From 91927d7450f313cf9d3b0297d24ebf02511c70d0 Mon Sep 17 00:00:00 2001
From: Karen Metts <35154725+karenzone@users.noreply.github.com>
Date: Mon, 10 Mar 2025 18:02:14 -0400
Subject: [PATCH] Doc: Migrate docs from AsciiDoc to Markdown in 9.0 branch
(#17289)
* Doc: Delete asciidoc files for 9.0 branch
* Add MD files for 9.0 branch
---
docs/docset.yml | 493 ++++++
docs/extend/codec-new-plugin.md | 636 ++++++++
docs/extend/community-maintainer.md | 193 +++
docs/extend/contribute-to-core.md | 11 +
docs/extend/contributing-patch-plugin.md | 386 +++++
docs/extend/create-logstash-plugins.md | 47 +
docs/extend/filter-new-plugin.md | 637 ++++++++
docs/extend/index.md | 58 +
docs/extend/input-new-plugin.md | 674 ++++++++
docs/extend/java-codec-plugin.md | 348 +++++
docs/extend/java-filter-plugin.md | 307 ++++
docs/extend/java-input-plugin.md | 341 ++++
docs/extend/java-output-plugin.md | 311 ++++
docs/extend/output-new-plugin.md | 570 +++++++
docs/extend/plugin-doc.md | 172 ++
docs/extend/plugin-listing.md | 23 +
docs/extend/publish-plugin.md | 62 +
docs/extend/toc.yml | 18 +
docs/gs-index.asciidoc | 38 -
.../images/basic_logstash_pipeline.png | Bin
.../images/centralized_config.png | Bin
.../{static => }/images/dead_letter_queue.png | Bin
docs/{static => }/images/deploy1.png | Bin
docs/{static => }/images/deploy2.png | Bin
docs/{static => }/images/deploy3.png | Bin
docs/{static => }/images/deploy4.png | Bin
.../images/integration-assets-dashboards.png | Bin
.../images/integration-dashboard-overview.png | Bin
.../images/kibana-filebeat-data.png | Bin
.../monitoring => }/images/kibana-home.png | Bin
.../monitoring => }/images/monitoring-ui.png | Bin
.../monitoring => }/images/nodestats.png | Bin
.../monitoring => }/images/overviewstats.png | Bin
.../images/pipeline-input-detail.png | Bin
.../monitoring => }/images/pipeline-tree.png | Bin
.../images/pipeline_correct_load.png | Bin
.../{static => }/images/pipeline_overload.png | Bin
docs/include/attributes-ls.asciidoc | 10 -
docs/include/attributes-lsplugins.asciidoc | 13 -
docs/include/filter.asciidoc | 234 ---
docs/include/input.asciidoc | 172 --
docs/include/output.asciidoc | 94 --
docs/include/plugin_header-core.asciidoc | 14 -
.../plugin_header-integration.asciidoc | 19 -
docs/include/plugin_header.asciidoc | 25 -
docs/include/version-list-intro.asciidoc | 14 -
docs/index.asciidoc | 238 ---
docs/index.x.asciidoc | 1 -
.../advanced-logstash-configurations.md | 14 +
docs/reference/advanced-pipeline.md | 612 ++++++++
docs/reference/codec-plugins.md | 67 +
.../config-examples.md} | 145 +-
docs/reference/config-setting-files.md | 33 +
.../reference/configuration-file-structure.md | 235 +++
.../configuring-centralized-pipelines.md | 157 ++
.../configuring-geoip-database-management.md | 68 +
docs/reference/connecting-to-cloud.md | 47 +
docs/reference/core-operations.md | 92 ++
docs/reference/creating-logstash-pipeline.md | 34 +
...dashboard-monitoring-with-elastic-agent.md | 145 ++
docs/reference/data-deserialization.md | 105 ++
docs/reference/dead-letter-queues.md | 257 +++
docs/reference/deploying-scaling-logstash.md | 165 ++
docs/reference/dir-layout.md | 59 +
docs/reference/docker-config.md | 130 ++
docs/reference/docker.md | 31 +
docs/reference/ecs-ls.md | 78 +
.../environment-variables.md} | 103 +-
docs/reference/event-api.md | 105 ++
.../event-dependent-configuration.md | 371 +++++
docs/reference/execution-model.md | 13 +
docs/reference/field-extraction.md | 102 ++
docs/reference/filter-plugins.md | 113 ++
docs/reference/first-event.md | 70 +
.../getting-started-with-logstash.md | 121 ++
docs/reference/glob-support.md | 44 +
docs/reference/how-logstash-works.md | 59 +
docs/reference/index.md | 8 +
docs/reference/input-plugins.md | 129 ++
docs/reference/installing-logstash.md | 63 +
docs/reference/integration-plugins.md | 27 +
docs/reference/jvm-settings.md | 152 ++
docs/reference/keystore.md | 159 ++
docs/reference/logging.md | 227 +++
...ogstash-centralized-pipeline-management.md | 78 +
.../logstash-geoip-database-management.md | 78 +
docs/reference/logstash-monitoring-ui.md | 28 +
docs/reference/logstash-pipeline-viewer.md | 55 +
docs/reference/logstash-settings-file.md | 96 ++
.../logstash-to-logstash-communications.md | 42 +
docs/reference/lookup-enrichment.md | 236 +++
docs/reference/ls-to-ls-http.md | 137 ++
.../ls-to-ls-lumberjack.md} | 80 +-
docs/reference/ls-to-ls-native.md | 133 ++
docs/reference/managing-geoip-databases.md | 14 +
docs/reference/managing-logstash.md | 13 +
docs/reference/memory-queue.md | 65 +
.../monitoring-internal-collection-legacy.md | 276 ++++
docs/reference/monitoring-logstash-legacy.md | 23 +
.../monitoring-logstash-with-elastic-agent.md | 20 +
docs/reference/monitoring-logstash.md | 97 ++
docs/reference/monitoring-troubleshooting.md | 30 +
.../monitoring-with-elastic-agent.md | 146 ++
docs/reference/monitoring-with-metricbeat.md | 156 ++
docs/reference/multiline.md | 113 ++
.../multiple-input-output-plugins.md | 174 +++
.../multiple-pipelines.md} | 32 +-
docs/reference/offline-plugins.md | 78 +
docs/reference/output-plugins.md | 133 ++
docs/reference/performance-troubleshooting.md | 56 +
docs/reference/performance-tuning.md | 14 +
docs/reference/persistent-queues.md | 368 +++++
.../pipeline-to-pipeline.md} | 125 +-
docs/reference/plugin-concepts.md | 26 +
docs/reference/plugin-generator.md | 18 +
docs/reference/plugins-codecs-avro.md | 148 ++
docs/reference/plugins-codecs-cef.md | 524 +++++++
docs/reference/plugins-codecs-cloudfront.md | 47 +
docs/reference/plugins-codecs-cloudtrail.md | 41 +
docs/reference/plugins-codecs-collectd.md | 153 ++
docs/reference/plugins-codecs-csv.md | 178 +++
docs/reference/plugins-codecs-dots.md | 25 +
docs/reference/plugins-codecs-edn.md | 56 +
docs/reference/plugins-codecs-edn_lines.md | 56 +
docs/reference/plugins-codecs-es_bulk.md | 79 +
docs/reference/plugins-codecs-fluent.md | 87 ++
docs/reference/plugins-codecs-graphite.md | 93 ++
docs/reference/plugins-codecs-gzip_lines.md | 51 +
docs/reference/plugins-codecs-java_line.md | 60 +
docs/reference/plugins-codecs-java_plain.md | 47 +
docs/reference/plugins-codecs-jdots.md | 25 +
docs/reference/plugins-codecs-json.md | 91 ++
docs/reference/plugins-codecs-json_lines.md | 103 ++
docs/reference/plugins-codecs-line.md | 75 +
docs/reference/plugins-codecs-msgpack.md | 62 +
docs/reference/plugins-codecs-multiline.md | 225 +++
docs/reference/plugins-codecs-netflow.md | 207 +++
docs/reference/plugins-codecs-nmap.md | 80 +
docs/reference/plugins-codecs-plain.md | 77 +
docs/reference/plugins-codecs-protobuf.md | 247 +++
docs/reference/plugins-codecs-rubydebug.md | 42 +
docs/reference/plugins-filters-age.md | 231 +++
docs/reference/plugins-filters-aggregate.md | 757 +++++++++
docs/reference/plugins-filters-alter.md | 283 ++++
docs/reference/plugins-filters-bytes.md | 284 ++++
docs/reference/plugins-filters-cidr.md | 277 ++++
docs/reference/plugins-filters-cipher.md | 387 +++++
docs/reference/plugins-filters-clone.md | 317 ++++
docs/reference/plugins-filters-csv.md | 345 ++++
docs/reference/plugins-filters-date.md | 423 +++++
docs/reference/plugins-filters-de_dot.md | 249 +++
docs/reference/plugins-filters-dissect.md | 531 +++++++
docs/reference/plugins-filters-dns.md | 347 +++++
docs/reference/plugins-filters-drop.md | 242 +++
docs/reference/plugins-filters-elapsed.md | 326 ++++
.../plugins-filters-elastic_integration.md | 674 ++++++++
.../plugins-filters-elasticsearch.md | 679 ++++++++
docs/reference/plugins-filters-environment.md | 246 +++
.../plugins-filters-extractnumbers.md | 226 +++
docs/reference/plugins-filters-fingerprint.md | 351 +++++
docs/reference/plugins-filters-geoip.md | 508 ++++++
docs/reference/plugins-filters-grok.md | 586 +++++++
docs/reference/plugins-filters-http.md | 620 ++++++++
docs/reference/plugins-filters-i18n.md | 230 +++
docs/reference/plugins-filters-java_uuid.md | 246 +++
docs/reference/plugins-filters-jdbc_static.md | 672 ++++++++
.../plugins-filters-jdbc_streaming.md | 466 ++++++
docs/reference/plugins-filters-json.md | 305 ++++
docs/reference/plugins-filters-json_encode.md | 243 +++
docs/reference/plugins-filters-kv.md | 687 ++++++++
docs/reference/plugins-filters-memcached.md | 347 +++++
docs/reference/plugins-filters-metricize.md | 280 ++++
docs/reference/plugins-filters-metrics.md | 387 +++++
docs/reference/plugins-filters-mutate.md | 600 +++++++
docs/reference/plugins-filters-prune.md | 318 ++++
docs/reference/plugins-filters-range.md | 249 +++
docs/reference/plugins-filters-ruby.md | 404 +++++
docs/reference/plugins-filters-sleep.md | 276 ++++
docs/reference/plugins-filters-split.md | 261 ++++
docs/reference/plugins-filters-syslog_pri.md | 266 ++++
.../plugins-filters-threats_classifier.md | 32 +
docs/reference/plugins-filters-throttle.md | 396 +++++
docs/reference/plugins-filters-tld.md | 235 +++
docs/reference/plugins-filters-translate.md | 563 +++++++
docs/reference/plugins-filters-truncate.md | 239 +++
docs/reference/plugins-filters-urldecode.md | 244 +++
docs/reference/plugins-filters-useragent.md | 354 +++++
docs/reference/plugins-filters-uuid.md | 250 +++
.../plugins-filters-wurfl_device_detection.md | 35 +
docs/reference/plugins-filters-xml.md | 362 +++++
.../plugins-inputs-azure_event_hubs.md | 538 +++++++
docs/reference/plugins-inputs-beats.md | 480 ++++++
docs/reference/plugins-inputs-cloudwatch.md | 395 +++++
.../plugins-inputs-couchdb_changes.md | 279 ++++
.../plugins-inputs-dead_letter_queue.md | 191 +++
.../reference/plugins-inputs-elastic_agent.md | 443 ++++++
...ins-inputs-elastic_serverless_forwarder.md | 383 +++++
.../reference/plugins-inputs-elasticsearch.md | 683 ++++++++
docs/reference/plugins-inputs-exec.md | 252 +++
docs/reference/plugins-inputs-file.md | 489 ++++++
docs/reference/plugins-inputs-ganglia.md | 138 ++
docs/reference/plugins-inputs-gelf.md | 201 +++
docs/reference/plugins-inputs-generator.md | 226 +++
docs/reference/plugins-inputs-github.md | 162 ++
.../plugins-inputs-google_cloud_storage.md | 359 +++++
.../reference/plugins-inputs-google_pubsub.md | 308 ++++
docs/reference/plugins-inputs-graphite.md | 240 +++
docs/reference/plugins-inputs-heartbeat.md | 221 +++
docs/reference/plugins-inputs-http.md | 519 ++++++
docs/reference/plugins-inputs-http_poller.md | 634 ++++++++
docs/reference/plugins-inputs-imap.md | 303 ++++
docs/reference/plugins-inputs-irc.md | 230 +++
.../plugins-inputs-java_generator.md | 185 +++
docs/reference/plugins-inputs-java_stdin.md | 111 ++
docs/reference/plugins-inputs-jdbc.md | 696 +++++++++
docs/reference/plugins-inputs-jms.md | 821 ++++++++++
docs/reference/plugins-inputs-jmx.md | 237 +++
docs/reference/plugins-inputs-kafka.md | 883 +++++++++++
docs/reference/plugins-inputs-kinesis.md | 282 ++++
docs/reference/plugins-inputs-log4j.md | 248 +++
docs/reference/plugins-inputs-logstash.md | 293 ++++
docs/reference/plugins-inputs-lumberjack.md | 191 +++
docs/reference/plugins-inputs-meetup.md | 190 +++
docs/reference/plugins-inputs-pipe.md | 190 +++
.../reference/plugins-inputs-puppet_facter.md | 174 +++
docs/reference/plugins-inputs-rabbitmq.md | 438 ++++++
docs/reference/plugins-inputs-redis.md | 241 +++
docs/reference/plugins-inputs-relp.md | 204 +++
docs/reference/plugins-inputs-rss.md | 150 ++
docs/reference/plugins-inputs-s3-sns-sqs.md | 32 +
docs/reference/plugins-inputs-s3.md | 409 +++++
docs/reference/plugins-inputs-salesforce.md | 292 ++++
docs/reference/plugins-inputs-snmp.md | 543 +++++++
docs/reference/plugins-inputs-snmptrap.md | 440 ++++++
docs/reference/plugins-inputs-sqlite.md | 208 +++
docs/reference/plugins-inputs-sqs.md | 355 +++++
docs/reference/plugins-inputs-stdin.md | 140 ++
docs/reference/plugins-inputs-stomp.md | 199 +++
docs/reference/plugins-inputs-syslog.md | 264 ++++
docs/reference/plugins-inputs-tcp.md | 408 +++++
docs/reference/plugins-inputs-twitter.md | 341 ++++
docs/reference/plugins-inputs-udp.md | 225 +++
docs/reference/plugins-inputs-unix.md | 223 +++
docs/reference/plugins-inputs-varnishlog.md | 132 ++
docs/reference/plugins-inputs-websocket.md | 144 ++
docs/reference/plugins-inputs-wmi.md | 202 +++
docs/reference/plugins-inputs-xmpp.md | 165 ++
docs/reference/plugins-integrations-aws.md | 35 +
...-integrations-elastic_enterprise_search.md | 27 +
docs/reference/plugins-integrations-jdbc.md | 29 +
docs/reference/plugins-integrations-kafka.md | 30 +
.../plugins-integrations-logstash.md | 77 +
.../plugins-integrations-rabbitmq.md | 28 +
docs/reference/plugins-integrations-snmp.md | 168 ++
docs/reference/plugins-outputs-boundary.md | 168 ++
docs/reference/plugins-outputs-circonus.md | 134 ++
docs/reference/plugins-outputs-cloudwatch.md | 307 ++++
docs/reference/plugins-outputs-csv.md | 190 +++
docs/reference/plugins-outputs-datadog.md | 165 ++
.../plugins-outputs-datadog_metrics.md | 181 +++
docs/reference/plugins-outputs-dynatrace.md | 32 +
.../plugins-outputs-elastic_app_search.md | 250 +++
...lugins-outputs-elastic_workplace_search.md | 262 ++++
.../plugins-outputs-elasticsearch.md | 1242 +++++++++++++++
docs/reference/plugins-outputs-email.md | 293 ++++
docs/reference/plugins-outputs-exec.md | 129 ++
docs/reference/plugins-outputs-file.md | 176 +++
docs/reference/plugins-outputs-ganglia.md | 182 +++
docs/reference/plugins-outputs-gelf.md | 205 +++
.../plugins-outputs-google_bigquery.md | 423 +++++
.../plugins-outputs-google_cloud_storage.md | 307 ++++
.../plugins-outputs-google_pubsub.md | 253 +++
docs/reference/plugins-outputs-graphite.md | 203 +++
docs/reference/plugins-outputs-graphtastic.md | 179 +++
docs/reference/plugins-outputs-http.md | 513 ++++++
docs/reference/plugins-outputs-influxdb.md | 291 ++++
docs/reference/plugins-outputs-irc.md | 202 +++
docs/reference/plugins-outputs-java_stdout.md | 92 ++
docs/reference/plugins-outputs-juggernaut.md | 158 ++
docs/reference/plugins-outputs-kafka.md | 600 +++++++
docs/reference/plugins-outputs-librato.md | 213 +++
docs/reference/plugins-outputs-loggly.md | 231 +++
docs/reference/plugins-outputs-logstash.md | 299 ++++
docs/reference/plugins-outputs-lumberjack.md | 139 ++
.../plugins-outputs-metriccatcher.md | 198 +++
docs/reference/plugins-outputs-mongodb.md | 174 +++
docs/reference/plugins-outputs-nagios.md | 129 ++
docs/reference/plugins-outputs-nagios_nsca.md | 180 +++
docs/reference/plugins-outputs-opentsdb.md | 131 ++
docs/reference/plugins-outputs-pagerduty.md | 145 ++
docs/reference/plugins-outputs-pipe.md | 117 ++
docs/reference/plugins-outputs-rabbitmq.md | 296 ++++
docs/reference/plugins-outputs-redis.md | 312 ++++
docs/reference/plugins-outputs-redmine.md | 229 +++
docs/reference/plugins-outputs-riak.md | 213 +++
docs/reference/plugins-outputs-riemann.md | 215 +++
docs/reference/plugins-outputs-s3.md | 458 ++++++
docs/reference/plugins-outputs-sink.md | 74 +
docs/reference/plugins-outputs-sns.md | 198 +++
docs/reference/plugins-outputs-solr_http.md | 136 ++
docs/reference/plugins-outputs-sqs.md | 266 ++++
docs/reference/plugins-outputs-statsd.md | 229 +++
docs/reference/plugins-outputs-stdout.md | 104 ++
docs/reference/plugins-outputs-stomp.md | 168 ++
docs/reference/plugins-outputs-syslog.md | 267 ++++
docs/reference/plugins-outputs-tcp.md | 250 +++
docs/reference/plugins-outputs-timber.md | 243 +++
docs/reference/plugins-outputs-udp.md | 127 ++
docs/reference/plugins-outputs-webhdfs.md | 332 ++++
docs/reference/plugins-outputs-websocket.md | 112 ++
docs/reference/plugins-outputs-xmpp.md | 149 ++
docs/reference/plugins-outputs-zabbix.md | 190 +++
docs/reference/private-rubygem.md | 55 +
docs/reference/processing.md | 48 +
docs/reference/queues-data-resiliency.md | 21 +
docs/reference/reloading-config.md | 58 +
.../running-logstash-command-line.md | 165 ++
docs/reference/running-logstash-kubernetes.md | 9 +
docs/reference/running-logstash-windows.md | 198 +++
docs/reference/running-logstash.md | 27 +
docs/reference/secure-connection.md | 498 ++++++
...erverless-monitoring-with-elastic-agent.md | 68 +
docs/reference/setting-up-running-logstash.md | 36 +
.../shutdown.md} | 87 +-
docs/reference/tips-best-practices.md | 118 ++
docs/reference/toc.yml | 330 ++++
docs/reference/transforming-data.md | 20 +
docs/reference/tuning-logstash.md | 80 +
docs/reference/upgrading-logstash-9-0.md | 34 +
docs/reference/upgrading-logstash.md | 73 +
docs/reference/upgrading-minor-versions.md | 11 +
.../upgrading-using-direct-download.md | 18 +
.../upgrading-using-package-managers.md | 15 +
docs/reference/use-filebeat-modules-kafka.md | 119 ++
docs/reference/use-ingest-pipelines.md | 65 +
...sing-logstash-with-elastic-integrations.md | 87 ++
.../working-with-filebeat-modules.md | 13 +
docs/reference/working-with-plugins.md | 145 ++
.../working-with-winlogbeat-modules.md | 70 +
docs/release-notes/breaking-changes.md | 206 +++
docs/release-notes/deprecations.md | 28 +
docs/release-notes/index.md | 32 +
docs/release-notes/known-issues.md | 7 +
docs/release-notes/toc.yml | 5 +
docs/static/advanced-pipeline.asciidoc | 868 -----------
docs/static/best-practice.asciidoc | 149 --
docs/static/breaking-changes-60.asciidoc | 58 -
docs/static/breaking-changes-70.asciidoc | 195 ---
docs/static/breaking-changes-80.asciidoc | 73 -
docs/static/breaking-changes-90.asciidoc | 265 ----
docs/static/breaking-changes.asciidoc | 39 -
docs/static/codec.asciidoc | 15 -
docs/static/config-details.asciidoc | 210 ---
docs/static/config-management.asciidoc | 12 -
docs/static/configuration-advanced.asciidoc | 6 -
docs/static/contrib-acceptance.asciidoc | 19 -
docs/static/contribute-core.asciidoc | 10 -
docs/static/contributing-java-plugin.asciidoc | 50 -
docs/static/contributing-patch.asciidoc | 407 -----
docs/static/contributing-to-logstash.asciidoc | 44 -
.../core-plugins/codecs/java_dots.asciidoc | 24 -
.../core-plugins/codecs/java_line.asciidoc | 63 -
.../core-plugins/codecs/java_plain.asciidoc | 51 -
.../core-plugins/filters/java_uuid.asciidoc | 91 --
.../inputs/java_generator.asciidoc | 117 --
.../core-plugins/inputs/java_stdin.asciidoc | 35 -
.../core-plugins/outputs/java_sink.asciidoc | 33 -
.../core-plugins/outputs/java_stdout.asciidoc | 50 -
docs/static/cross-plugin-concepts.asciidoc | 26 -
docs/static/dead-letter-queues.asciidoc | 348 -----
docs/static/deploying.asciidoc | 251 ---
docs/static/doc-for-plugin.asciidoc | 191 ---
docs/static/docker.asciidoc | 229 ---
docs/static/ea-integrations.asciidoc | 95 --
docs/static/ecs-compatibility.asciidoc | 87 --
docs/static/event-api.asciidoc | 120 --
docs/static/event-data.asciidoc | 439 ------
docs/static/fb-ls-kafka-example.asciidoc | 159 --
docs/static/field-reference.asciidoc | 147 --
docs/static/filebeat-modules.asciidoc | 191 ---
docs/static/filter.asciidoc | 14 -
.../static/geoip-database-management.asciidoc | 10 -
.../configuring.asciidoc | 68 -
.../geoip-database-management/index.asciidoc | 19 -
.../metrics.asciidoc | 56 -
.../getting-started-with-logstash.asciidoc | 317 ----
docs/static/glob-support.asciidoc | 50 -
docs/static/images/arcsight-diagram-adp.svg | 414 -----
.../arcsight-diagram-smart-connectors.svg | 308 ----
.../images/arcsight-network-overview.png | Bin 1221194 -> 0 bytes
.../images/arcsight-network-suspicious.png | Bin 947614 -> 0 bytes
docs/static/images/azure-flow.png | Bin 230253 -> 0 bytes
docs/static/images/deploy_1.png | Bin 33995 -> 0 bytes
docs/static/images/deploy_2.png | Bin 46477 -> 0 bytes
docs/static/images/deploy_3.png | Bin 41921 -> 0 bytes
docs/static/images/deploy_4.png | Bin 57848 -> 0 bytes
docs/static/images/deploy_5.png | Bin 107951 -> 0 bytes
docs/static/images/deploy_6.png | Bin 167866 -> 0 bytes
docs/static/images/deploy_7.png | Bin 175897 -> 0 bytes
.../images/logstash-module-overview.png | Bin 31634 -> 0 bytes
docs/static/images/logstash.png | Bin 52106 -> 0 bytes
.../images/netflow-conversation-partners.png | Bin 263522 -> 0 bytes
docs/static/images/netflow-geo-location.png | Bin 685997 -> 0 bytes
docs/static/images/netflow-overview.png | Bin 339232 -> 0 bytes
.../images/netflow-traffic-analysis.png | Bin 411714 -> 0 bytes
docs/static/include/javapluginpkg.asciidoc | 83 -
docs/static/include/javapluginsetup.asciidoc | 52 -
docs/static/include/pluginbody.asciidoc | 1268 ---------------
docs/static/input.asciidoc | 12 -
docs/static/introduction.asciidoc | 122 --
docs/static/java-codec.asciidoc | 357 -----
docs/static/java-filter.asciidoc | 304 ----
docs/static/java-input.asciidoc | 317 ----
docs/static/java-output.asciidoc | 286 ----
docs/static/jvm.asciidoc | 122 --
docs/static/keystore.asciidoc | 184 ---
docs/static/life-of-an-event.asciidoc | 99 --
docs/static/listing-a-plugin.asciidoc | 17 -
docs/static/logging.asciidoc | 251 ---
docs/static/logstash-glossary.asciidoc | 132 --
docs/static/ls-ls-config.asciidoc | 40 -
docs/static/ls-ls-http.asciidoc | 136 --
docs/static/ls-ls-native.asciidoc | 128 --
docs/static/ls-to-cloud.asciidoc | 58 -
docs/static/maintainer-guide.asciidoc | 222 ---
.../management/centralized-pipelines.asciidoc | 112 --
...configuring-centralized-pipelines.asciidoc | 42 -
.../static/management/images/new_pipeline.png | Bin 158231 -> 0 bytes
.../static/managing-multiline-events.asciidoc | 113 --
docs/static/mem-queue.asciidoc | 65 -
.../monitoring/collectors-legacy.asciidoc | 49 -
.../integration-agent-add-standalone.png | Bin 314099 -> 0 bytes
.../images/integration-agent-add.png | Bin 222831 -> 0 bytes
.../images/integration-agent-confirm.png | Bin 97202 -> 0 bytes
.../monitoring/images/pipeline-diagram.png | Bin 285242 -> 0 bytes
.../images/pipeline-filter-detail.png | Bin 9547 -> 0 bytes
.../images/pipeline-output-detail.png | Bin 8034 -> 0 bytes
.../images/pipeline-viewer-detail-drawer.png | Bin 373722 -> 0 bytes
.../images/pipeline-viewer-overview.png | Bin 158608 -> 0 bytes
.../monitoring/monitoring-apis.asciidoc | 1386 -----------------
.../monitoring-ea-dashboards.asciidoc | 117 --
.../monitoring/monitoring-ea-intro.asciidoc | 34 -
.../monitoring-ea-serverless.asciidoc | 73 -
docs/static/monitoring/monitoring-ea.asciidoc | 118 --
.../monitoring/monitoring-install.asciidoc | 11 -
.../monitoring-internal-legacy.asciidoc | 173 --
docs/static/monitoring/monitoring-mb.asciidoc | 205 ---
.../monitoring-output-legacy.asciidoc | 47 -
.../monitoring/monitoring-overview.asciidoc | 36 -
.../monitoring-prereq-create-user.asciidoc | 5 -
.../monitoring-prereq-define-cluster.asciidoc | 10 -
...monitoring-prereq-disable-default.asciidoc | 9 -
.../monitoring-prereq-setup-es.asciidoc | 5 -
docs/static/monitoring/monitoring-ui.asciidoc | 23 -
.../monitoring/monitoring-view.asciidoc | 14 -
docs/static/monitoring/monitoring.asciidoc | 26 -
.../monitoring/pipeline-viewer.asciidoc | 76 -
.../monitoring/troubleshooting.asciidoc | 36 -
docs/static/offline-plugins.asciidoc | 89 --
docs/static/output.asciidoc | 14 -
docs/static/performance-checklist.asciidoc | 179 ---
docs/static/persistent-queues.asciidoc | 425 -----
docs/static/pipeline-configuration.asciidoc | 319 ----
docs/static/pipeline-structure.asciidoc | 0
docs/static/plugin-generator.asciidoc | 19 -
docs/static/plugin-manager.asciidoc | 179 ---
docs/static/private-gem-repo.asciidoc | 53 -
docs/static/processing-info.asciidoc | 48 -
docs/static/redirects.asciidoc | 156 --
docs/static/releasenotes.asciidoc | 117 --
docs/static/reloading-config.asciidoc | 66 -
docs/static/reserved-fields.asciidoc | 39 -
docs/static/resiliency.asciidoc | 22 -
.../running-logstash-command-line.asciidoc | 235 ---
.../running-logstash-kubernetes.asciidoc | 6 -
docs/static/running-logstash-windows.asciidoc | 159 --
docs/static/running-logstash.asciidoc | 27 -
docs/static/security/api-keys.asciidoc | 293 ----
docs/static/security/basic-auth.asciidoc | 82 -
docs/static/security/es-security.asciidoc | 97 --
docs/static/security/grant-access.asciidoc | 42 -
docs/static/security/logstash.asciidoc | 32 -
docs/static/security/ls-monitoring.asciidoc | 35 -
docs/static/security/pipeline-mgmt.asciidoc | 18 -
docs/static/security/pki-auth.asciidoc | 22 -
docs/static/security/tls-encryption.asciidoc | 23 -
docs/static/setting-up-logstash.asciidoc | 196 ---
docs/static/settings-file.asciidoc | 346 ----
...configuration-management-settings.asciidoc | 154 --
...onfiguration-wildcard-pipeline-id.asciidoc | 19 -
...eoip-database-management-settings.asciidoc | 26 -
.../monitoring-settings-legacy.asciidoc | 154 --
docs/static/submitting-a-plugin.asciidoc | 73 -
.../tab-widgets/install-agent-widget.asciidoc | 40 -
.../static/tab-widgets/install-agent.asciidoc | 25 -
docs/static/transforming-data.asciidoc | 648 --------
...-pipeline-flow-worker-utilization.asciidoc | 44 -
.../health-pipeline-status.asciidoc | 37 -
.../troubleshoot/plugin-tracing.asciidoc | 96 --
.../troubleshoot/troubleshooting.asciidoc | 32 -
docs/static/troubleshoot/ts-azure.asciidoc | 82 -
docs/static/troubleshoot/ts-kafka.asciidoc | 191 ---
docs/static/troubleshoot/ts-logstash.asciidoc | 338 ----
.../troubleshoot/ts-other-issues.asciidoc | 14 -
.../troubleshoot/ts-plugins-general.asciidoc | 5 -
docs/static/troubleshoot/ts-plugins.asciidoc | 5 -
docs/static/upgrading.asciidoc | 167 --
docs/static/winlogbeat-modules.asciidoc | 90 --
508 files changed, 67717 insertions(+), 20144 deletions(-)
create mode 100644 docs/docset.yml
create mode 100644 docs/extend/codec-new-plugin.md
create mode 100644 docs/extend/community-maintainer.md
create mode 100644 docs/extend/contribute-to-core.md
create mode 100644 docs/extend/contributing-patch-plugin.md
create mode 100644 docs/extend/create-logstash-plugins.md
create mode 100644 docs/extend/filter-new-plugin.md
create mode 100644 docs/extend/index.md
create mode 100644 docs/extend/input-new-plugin.md
create mode 100644 docs/extend/java-codec-plugin.md
create mode 100644 docs/extend/java-filter-plugin.md
create mode 100644 docs/extend/java-input-plugin.md
create mode 100644 docs/extend/java-output-plugin.md
create mode 100644 docs/extend/output-new-plugin.md
create mode 100644 docs/extend/plugin-doc.md
create mode 100644 docs/extend/plugin-listing.md
create mode 100644 docs/extend/publish-plugin.md
create mode 100644 docs/extend/toc.yml
delete mode 100644 docs/gs-index.asciidoc
rename docs/{static => }/images/basic_logstash_pipeline.png (100%)
rename docs/{static/management => }/images/centralized_config.png (100%)
rename docs/{static => }/images/dead_letter_queue.png (100%)
rename docs/{static => }/images/deploy1.png (100%)
rename docs/{static => }/images/deploy2.png (100%)
rename docs/{static => }/images/deploy3.png (100%)
rename docs/{static => }/images/deploy4.png (100%)
rename docs/{static/monitoring => }/images/integration-assets-dashboards.png (100%)
rename docs/{static/monitoring => }/images/integration-dashboard-overview.png (100%)
rename docs/{static => }/images/kibana-filebeat-data.png (100%)
rename docs/{static/monitoring => }/images/kibana-home.png (100%)
rename docs/{static/monitoring => }/images/monitoring-ui.png (100%)
rename docs/{static/monitoring => }/images/nodestats.png (100%)
rename docs/{static/monitoring => }/images/overviewstats.png (100%)
rename docs/{static/monitoring => }/images/pipeline-input-detail.png (100%)
rename docs/{static/monitoring => }/images/pipeline-tree.png (100%)
rename docs/{static => }/images/pipeline_correct_load.png (100%)
rename docs/{static => }/images/pipeline_overload.png (100%)
delete mode 100644 docs/include/attributes-ls.asciidoc
delete mode 100644 docs/include/attributes-lsplugins.asciidoc
delete mode 100644 docs/include/filter.asciidoc
delete mode 100644 docs/include/input.asciidoc
delete mode 100644 docs/include/output.asciidoc
delete mode 100644 docs/include/plugin_header-core.asciidoc
delete mode 100644 docs/include/plugin_header-integration.asciidoc
delete mode 100644 docs/include/plugin_header.asciidoc
delete mode 100644 docs/include/version-list-intro.asciidoc
delete mode 100644 docs/index.asciidoc
delete mode 100644 docs/index.x.asciidoc
create mode 100644 docs/reference/advanced-logstash-configurations.md
create mode 100644 docs/reference/advanced-pipeline.md
create mode 100644 docs/reference/codec-plugins.md
rename docs/{static/pipeline-config-exps.asciidoc => reference/config-examples.md} (64%)
create mode 100644 docs/reference/config-setting-files.md
create mode 100644 docs/reference/configuration-file-structure.md
create mode 100644 docs/reference/configuring-centralized-pipelines.md
create mode 100644 docs/reference/configuring-geoip-database-management.md
create mode 100644 docs/reference/connecting-to-cloud.md
create mode 100644 docs/reference/core-operations.md
create mode 100644 docs/reference/creating-logstash-pipeline.md
create mode 100644 docs/reference/dashboard-monitoring-with-elastic-agent.md
create mode 100644 docs/reference/data-deserialization.md
create mode 100644 docs/reference/dead-letter-queues.md
create mode 100644 docs/reference/deploying-scaling-logstash.md
create mode 100644 docs/reference/dir-layout.md
create mode 100644 docs/reference/docker-config.md
create mode 100644 docs/reference/docker.md
create mode 100644 docs/reference/ecs-ls.md
rename docs/{static/env-vars.asciidoc => reference/environment-variables.md} (61%)
create mode 100644 docs/reference/event-api.md
create mode 100644 docs/reference/event-dependent-configuration.md
create mode 100644 docs/reference/execution-model.md
create mode 100644 docs/reference/field-extraction.md
create mode 100644 docs/reference/filter-plugins.md
create mode 100644 docs/reference/first-event.md
create mode 100644 docs/reference/getting-started-with-logstash.md
create mode 100644 docs/reference/glob-support.md
create mode 100644 docs/reference/how-logstash-works.md
create mode 100644 docs/reference/index.md
create mode 100644 docs/reference/input-plugins.md
create mode 100644 docs/reference/installing-logstash.md
create mode 100644 docs/reference/integration-plugins.md
create mode 100644 docs/reference/jvm-settings.md
create mode 100644 docs/reference/keystore.md
create mode 100644 docs/reference/logging.md
create mode 100644 docs/reference/logstash-centralized-pipeline-management.md
create mode 100644 docs/reference/logstash-geoip-database-management.md
create mode 100644 docs/reference/logstash-monitoring-ui.md
create mode 100644 docs/reference/logstash-pipeline-viewer.md
create mode 100644 docs/reference/logstash-settings-file.md
create mode 100644 docs/reference/logstash-to-logstash-communications.md
create mode 100644 docs/reference/lookup-enrichment.md
create mode 100644 docs/reference/ls-to-ls-http.md
rename docs/{static/ls-ls-lumberjack.asciidoc => reference/ls-to-ls-lumberjack.md} (61%)
create mode 100644 docs/reference/ls-to-ls-native.md
create mode 100644 docs/reference/managing-geoip-databases.md
create mode 100644 docs/reference/managing-logstash.md
create mode 100644 docs/reference/memory-queue.md
create mode 100644 docs/reference/monitoring-internal-collection-legacy.md
create mode 100644 docs/reference/monitoring-logstash-legacy.md
create mode 100644 docs/reference/monitoring-logstash-with-elastic-agent.md
create mode 100644 docs/reference/monitoring-logstash.md
create mode 100644 docs/reference/monitoring-troubleshooting.md
create mode 100644 docs/reference/monitoring-with-elastic-agent.md
create mode 100644 docs/reference/monitoring-with-metricbeat.md
create mode 100644 docs/reference/multiline.md
create mode 100644 docs/reference/multiple-input-output-plugins.md
rename docs/{static/multiple-pipelines.asciidoc => reference/multiple-pipelines.md} (53%)
create mode 100644 docs/reference/offline-plugins.md
create mode 100644 docs/reference/output-plugins.md
create mode 100644 docs/reference/performance-troubleshooting.md
create mode 100644 docs/reference/performance-tuning.md
create mode 100644 docs/reference/persistent-queues.md
rename docs/{static/pipeline-pipeline-config.asciidoc => reference/pipeline-to-pipeline.md} (68%)
create mode 100644 docs/reference/plugin-concepts.md
create mode 100644 docs/reference/plugin-generator.md
create mode 100644 docs/reference/plugins-codecs-avro.md
create mode 100644 docs/reference/plugins-codecs-cef.md
create mode 100644 docs/reference/plugins-codecs-cloudfront.md
create mode 100644 docs/reference/plugins-codecs-cloudtrail.md
create mode 100644 docs/reference/plugins-codecs-collectd.md
create mode 100644 docs/reference/plugins-codecs-csv.md
create mode 100644 docs/reference/plugins-codecs-dots.md
create mode 100644 docs/reference/plugins-codecs-edn.md
create mode 100644 docs/reference/plugins-codecs-edn_lines.md
create mode 100644 docs/reference/plugins-codecs-es_bulk.md
create mode 100644 docs/reference/plugins-codecs-fluent.md
create mode 100644 docs/reference/plugins-codecs-graphite.md
create mode 100644 docs/reference/plugins-codecs-gzip_lines.md
create mode 100644 docs/reference/plugins-codecs-java_line.md
create mode 100644 docs/reference/plugins-codecs-java_plain.md
create mode 100644 docs/reference/plugins-codecs-jdots.md
create mode 100644 docs/reference/plugins-codecs-json.md
create mode 100644 docs/reference/plugins-codecs-json_lines.md
create mode 100644 docs/reference/plugins-codecs-line.md
create mode 100644 docs/reference/plugins-codecs-msgpack.md
create mode 100644 docs/reference/plugins-codecs-multiline.md
create mode 100644 docs/reference/plugins-codecs-netflow.md
create mode 100644 docs/reference/plugins-codecs-nmap.md
create mode 100644 docs/reference/plugins-codecs-plain.md
create mode 100644 docs/reference/plugins-codecs-protobuf.md
create mode 100644 docs/reference/plugins-codecs-rubydebug.md
create mode 100644 docs/reference/plugins-filters-age.md
create mode 100644 docs/reference/plugins-filters-aggregate.md
create mode 100644 docs/reference/plugins-filters-alter.md
create mode 100644 docs/reference/plugins-filters-bytes.md
create mode 100644 docs/reference/plugins-filters-cidr.md
create mode 100644 docs/reference/plugins-filters-cipher.md
create mode 100644 docs/reference/plugins-filters-clone.md
create mode 100644 docs/reference/plugins-filters-csv.md
create mode 100644 docs/reference/plugins-filters-date.md
create mode 100644 docs/reference/plugins-filters-de_dot.md
create mode 100644 docs/reference/plugins-filters-dissect.md
create mode 100644 docs/reference/plugins-filters-dns.md
create mode 100644 docs/reference/plugins-filters-drop.md
create mode 100644 docs/reference/plugins-filters-elapsed.md
create mode 100644 docs/reference/plugins-filters-elastic_integration.md
create mode 100644 docs/reference/plugins-filters-elasticsearch.md
create mode 100644 docs/reference/plugins-filters-environment.md
create mode 100644 docs/reference/plugins-filters-extractnumbers.md
create mode 100644 docs/reference/plugins-filters-fingerprint.md
create mode 100644 docs/reference/plugins-filters-geoip.md
create mode 100644 docs/reference/plugins-filters-grok.md
create mode 100644 docs/reference/plugins-filters-http.md
create mode 100644 docs/reference/plugins-filters-i18n.md
create mode 100644 docs/reference/plugins-filters-java_uuid.md
create mode 100644 docs/reference/plugins-filters-jdbc_static.md
create mode 100644 docs/reference/plugins-filters-jdbc_streaming.md
create mode 100644 docs/reference/plugins-filters-json.md
create mode 100644 docs/reference/plugins-filters-json_encode.md
create mode 100644 docs/reference/plugins-filters-kv.md
create mode 100644 docs/reference/plugins-filters-memcached.md
create mode 100644 docs/reference/plugins-filters-metricize.md
create mode 100644 docs/reference/plugins-filters-metrics.md
create mode 100644 docs/reference/plugins-filters-mutate.md
create mode 100644 docs/reference/plugins-filters-prune.md
create mode 100644 docs/reference/plugins-filters-range.md
create mode 100644 docs/reference/plugins-filters-ruby.md
create mode 100644 docs/reference/plugins-filters-sleep.md
create mode 100644 docs/reference/plugins-filters-split.md
create mode 100644 docs/reference/plugins-filters-syslog_pri.md
create mode 100644 docs/reference/plugins-filters-threats_classifier.md
create mode 100644 docs/reference/plugins-filters-throttle.md
create mode 100644 docs/reference/plugins-filters-tld.md
create mode 100644 docs/reference/plugins-filters-translate.md
create mode 100644 docs/reference/plugins-filters-truncate.md
create mode 100644 docs/reference/plugins-filters-urldecode.md
create mode 100644 docs/reference/plugins-filters-useragent.md
create mode 100644 docs/reference/plugins-filters-uuid.md
create mode 100644 docs/reference/plugins-filters-wurfl_device_detection.md
create mode 100644 docs/reference/plugins-filters-xml.md
create mode 100644 docs/reference/plugins-inputs-azure_event_hubs.md
create mode 100644 docs/reference/plugins-inputs-beats.md
create mode 100644 docs/reference/plugins-inputs-cloudwatch.md
create mode 100644 docs/reference/plugins-inputs-couchdb_changes.md
create mode 100644 docs/reference/plugins-inputs-dead_letter_queue.md
create mode 100644 docs/reference/plugins-inputs-elastic_agent.md
create mode 100644 docs/reference/plugins-inputs-elastic_serverless_forwarder.md
create mode 100644 docs/reference/plugins-inputs-elasticsearch.md
create mode 100644 docs/reference/plugins-inputs-exec.md
create mode 100644 docs/reference/plugins-inputs-file.md
create mode 100644 docs/reference/plugins-inputs-ganglia.md
create mode 100644 docs/reference/plugins-inputs-gelf.md
create mode 100644 docs/reference/plugins-inputs-generator.md
create mode 100644 docs/reference/plugins-inputs-github.md
create mode 100644 docs/reference/plugins-inputs-google_cloud_storage.md
create mode 100644 docs/reference/plugins-inputs-google_pubsub.md
create mode 100644 docs/reference/plugins-inputs-graphite.md
create mode 100644 docs/reference/plugins-inputs-heartbeat.md
create mode 100644 docs/reference/plugins-inputs-http.md
create mode 100644 docs/reference/plugins-inputs-http_poller.md
create mode 100644 docs/reference/plugins-inputs-imap.md
create mode 100644 docs/reference/plugins-inputs-irc.md
create mode 100644 docs/reference/plugins-inputs-java_generator.md
create mode 100644 docs/reference/plugins-inputs-java_stdin.md
create mode 100644 docs/reference/plugins-inputs-jdbc.md
create mode 100644 docs/reference/plugins-inputs-jms.md
create mode 100644 docs/reference/plugins-inputs-jmx.md
create mode 100644 docs/reference/plugins-inputs-kafka.md
create mode 100644 docs/reference/plugins-inputs-kinesis.md
create mode 100644 docs/reference/plugins-inputs-log4j.md
create mode 100644 docs/reference/plugins-inputs-logstash.md
create mode 100644 docs/reference/plugins-inputs-lumberjack.md
create mode 100644 docs/reference/plugins-inputs-meetup.md
create mode 100644 docs/reference/plugins-inputs-pipe.md
create mode 100644 docs/reference/plugins-inputs-puppet_facter.md
create mode 100644 docs/reference/plugins-inputs-rabbitmq.md
create mode 100644 docs/reference/plugins-inputs-redis.md
create mode 100644 docs/reference/plugins-inputs-relp.md
create mode 100644 docs/reference/plugins-inputs-rss.md
create mode 100644 docs/reference/plugins-inputs-s3-sns-sqs.md
create mode 100644 docs/reference/plugins-inputs-s3.md
create mode 100644 docs/reference/plugins-inputs-salesforce.md
create mode 100644 docs/reference/plugins-inputs-snmp.md
create mode 100644 docs/reference/plugins-inputs-snmptrap.md
create mode 100644 docs/reference/plugins-inputs-sqlite.md
create mode 100644 docs/reference/plugins-inputs-sqs.md
create mode 100644 docs/reference/plugins-inputs-stdin.md
create mode 100644 docs/reference/plugins-inputs-stomp.md
create mode 100644 docs/reference/plugins-inputs-syslog.md
create mode 100644 docs/reference/plugins-inputs-tcp.md
create mode 100644 docs/reference/plugins-inputs-twitter.md
create mode 100644 docs/reference/plugins-inputs-udp.md
create mode 100644 docs/reference/plugins-inputs-unix.md
create mode 100644 docs/reference/plugins-inputs-varnishlog.md
create mode 100644 docs/reference/plugins-inputs-websocket.md
create mode 100644 docs/reference/plugins-inputs-wmi.md
create mode 100644 docs/reference/plugins-inputs-xmpp.md
create mode 100644 docs/reference/plugins-integrations-aws.md
create mode 100644 docs/reference/plugins-integrations-elastic_enterprise_search.md
create mode 100644 docs/reference/plugins-integrations-jdbc.md
create mode 100644 docs/reference/plugins-integrations-kafka.md
create mode 100644 docs/reference/plugins-integrations-logstash.md
create mode 100644 docs/reference/plugins-integrations-rabbitmq.md
create mode 100644 docs/reference/plugins-integrations-snmp.md
create mode 100644 docs/reference/plugins-outputs-boundary.md
create mode 100644 docs/reference/plugins-outputs-circonus.md
create mode 100644 docs/reference/plugins-outputs-cloudwatch.md
create mode 100644 docs/reference/plugins-outputs-csv.md
create mode 100644 docs/reference/plugins-outputs-datadog.md
create mode 100644 docs/reference/plugins-outputs-datadog_metrics.md
create mode 100644 docs/reference/plugins-outputs-dynatrace.md
create mode 100644 docs/reference/plugins-outputs-elastic_app_search.md
create mode 100644 docs/reference/plugins-outputs-elastic_workplace_search.md
create mode 100644 docs/reference/plugins-outputs-elasticsearch.md
create mode 100644 docs/reference/plugins-outputs-email.md
create mode 100644 docs/reference/plugins-outputs-exec.md
create mode 100644 docs/reference/plugins-outputs-file.md
create mode 100644 docs/reference/plugins-outputs-ganglia.md
create mode 100644 docs/reference/plugins-outputs-gelf.md
create mode 100644 docs/reference/plugins-outputs-google_bigquery.md
create mode 100644 docs/reference/plugins-outputs-google_cloud_storage.md
create mode 100644 docs/reference/plugins-outputs-google_pubsub.md
create mode 100644 docs/reference/plugins-outputs-graphite.md
create mode 100644 docs/reference/plugins-outputs-graphtastic.md
create mode 100644 docs/reference/plugins-outputs-http.md
create mode 100644 docs/reference/plugins-outputs-influxdb.md
create mode 100644 docs/reference/plugins-outputs-irc.md
create mode 100644 docs/reference/plugins-outputs-java_stdout.md
create mode 100644 docs/reference/plugins-outputs-juggernaut.md
create mode 100644 docs/reference/plugins-outputs-kafka.md
create mode 100644 docs/reference/plugins-outputs-librato.md
create mode 100644 docs/reference/plugins-outputs-loggly.md
create mode 100644 docs/reference/plugins-outputs-logstash.md
create mode 100644 docs/reference/plugins-outputs-lumberjack.md
create mode 100644 docs/reference/plugins-outputs-metriccatcher.md
create mode 100644 docs/reference/plugins-outputs-mongodb.md
create mode 100644 docs/reference/plugins-outputs-nagios.md
create mode 100644 docs/reference/plugins-outputs-nagios_nsca.md
create mode 100644 docs/reference/plugins-outputs-opentsdb.md
create mode 100644 docs/reference/plugins-outputs-pagerduty.md
create mode 100644 docs/reference/plugins-outputs-pipe.md
create mode 100644 docs/reference/plugins-outputs-rabbitmq.md
create mode 100644 docs/reference/plugins-outputs-redis.md
create mode 100644 docs/reference/plugins-outputs-redmine.md
create mode 100644 docs/reference/plugins-outputs-riak.md
create mode 100644 docs/reference/plugins-outputs-riemann.md
create mode 100644 docs/reference/plugins-outputs-s3.md
create mode 100644 docs/reference/plugins-outputs-sink.md
create mode 100644 docs/reference/plugins-outputs-sns.md
create mode 100644 docs/reference/plugins-outputs-solr_http.md
create mode 100644 docs/reference/plugins-outputs-sqs.md
create mode 100644 docs/reference/plugins-outputs-statsd.md
create mode 100644 docs/reference/plugins-outputs-stdout.md
create mode 100644 docs/reference/plugins-outputs-stomp.md
create mode 100644 docs/reference/plugins-outputs-syslog.md
create mode 100644 docs/reference/plugins-outputs-tcp.md
create mode 100644 docs/reference/plugins-outputs-timber.md
create mode 100644 docs/reference/plugins-outputs-udp.md
create mode 100644 docs/reference/plugins-outputs-webhdfs.md
create mode 100644 docs/reference/plugins-outputs-websocket.md
create mode 100644 docs/reference/plugins-outputs-xmpp.md
create mode 100644 docs/reference/plugins-outputs-zabbix.md
create mode 100644 docs/reference/private-rubygem.md
create mode 100644 docs/reference/processing.md
create mode 100644 docs/reference/queues-data-resiliency.md
create mode 100644 docs/reference/reloading-config.md
create mode 100644 docs/reference/running-logstash-command-line.md
create mode 100644 docs/reference/running-logstash-kubernetes.md
create mode 100644 docs/reference/running-logstash-windows.md
create mode 100644 docs/reference/running-logstash.md
create mode 100644 docs/reference/secure-connection.md
create mode 100644 docs/reference/serverless-monitoring-with-elastic-agent.md
create mode 100644 docs/reference/setting-up-running-logstash.md
rename docs/{static/shutdown.asciidoc => reference/shutdown.md} (56%)
create mode 100644 docs/reference/tips-best-practices.md
create mode 100644 docs/reference/toc.yml
create mode 100644 docs/reference/transforming-data.md
create mode 100644 docs/reference/tuning-logstash.md
create mode 100644 docs/reference/upgrading-logstash-9-0.md
create mode 100644 docs/reference/upgrading-logstash.md
create mode 100644 docs/reference/upgrading-minor-versions.md
create mode 100644 docs/reference/upgrading-using-direct-download.md
create mode 100644 docs/reference/upgrading-using-package-managers.md
create mode 100644 docs/reference/use-filebeat-modules-kafka.md
create mode 100644 docs/reference/use-ingest-pipelines.md
create mode 100644 docs/reference/using-logstash-with-elastic-integrations.md
create mode 100644 docs/reference/working-with-filebeat-modules.md
create mode 100644 docs/reference/working-with-plugins.md
create mode 100644 docs/reference/working-with-winlogbeat-modules.md
create mode 100644 docs/release-notes/breaking-changes.md
create mode 100644 docs/release-notes/deprecations.md
create mode 100644 docs/release-notes/index.md
create mode 100644 docs/release-notes/known-issues.md
create mode 100644 docs/release-notes/toc.yml
delete mode 100644 docs/static/advanced-pipeline.asciidoc
delete mode 100644 docs/static/best-practice.asciidoc
delete mode 100644 docs/static/breaking-changes-60.asciidoc
delete mode 100644 docs/static/breaking-changes-70.asciidoc
delete mode 100644 docs/static/breaking-changes-80.asciidoc
delete mode 100644 docs/static/breaking-changes-90.asciidoc
delete mode 100644 docs/static/breaking-changes.asciidoc
delete mode 100644 docs/static/codec.asciidoc
delete mode 100644 docs/static/config-details.asciidoc
delete mode 100644 docs/static/config-management.asciidoc
delete mode 100644 docs/static/configuration-advanced.asciidoc
delete mode 100644 docs/static/contrib-acceptance.asciidoc
delete mode 100644 docs/static/contribute-core.asciidoc
delete mode 100644 docs/static/contributing-java-plugin.asciidoc
delete mode 100644 docs/static/contributing-patch.asciidoc
delete mode 100644 docs/static/contributing-to-logstash.asciidoc
delete mode 100644 docs/static/core-plugins/codecs/java_dots.asciidoc
delete mode 100644 docs/static/core-plugins/codecs/java_line.asciidoc
delete mode 100644 docs/static/core-plugins/codecs/java_plain.asciidoc
delete mode 100644 docs/static/core-plugins/filters/java_uuid.asciidoc
delete mode 100644 docs/static/core-plugins/inputs/java_generator.asciidoc
delete mode 100644 docs/static/core-plugins/inputs/java_stdin.asciidoc
delete mode 100644 docs/static/core-plugins/outputs/java_sink.asciidoc
delete mode 100644 docs/static/core-plugins/outputs/java_stdout.asciidoc
delete mode 100644 docs/static/cross-plugin-concepts.asciidoc
delete mode 100644 docs/static/dead-letter-queues.asciidoc
delete mode 100644 docs/static/deploying.asciidoc
delete mode 100644 docs/static/doc-for-plugin.asciidoc
delete mode 100644 docs/static/docker.asciidoc
delete mode 100644 docs/static/ea-integrations.asciidoc
delete mode 100644 docs/static/ecs-compatibility.asciidoc
delete mode 100644 docs/static/event-api.asciidoc
delete mode 100644 docs/static/event-data.asciidoc
delete mode 100644 docs/static/fb-ls-kafka-example.asciidoc
delete mode 100644 docs/static/field-reference.asciidoc
delete mode 100644 docs/static/filebeat-modules.asciidoc
delete mode 100644 docs/static/filter.asciidoc
delete mode 100644 docs/static/geoip-database-management.asciidoc
delete mode 100644 docs/static/geoip-database-management/configuring.asciidoc
delete mode 100644 docs/static/geoip-database-management/index.asciidoc
delete mode 100644 docs/static/geoip-database-management/metrics.asciidoc
delete mode 100644 docs/static/getting-started-with-logstash.asciidoc
delete mode 100644 docs/static/glob-support.asciidoc
delete mode 100644 docs/static/images/arcsight-diagram-adp.svg
delete mode 100644 docs/static/images/arcsight-diagram-smart-connectors.svg
delete mode 100644 docs/static/images/arcsight-network-overview.png
delete mode 100644 docs/static/images/arcsight-network-suspicious.png
delete mode 100644 docs/static/images/azure-flow.png
delete mode 100644 docs/static/images/deploy_1.png
delete mode 100644 docs/static/images/deploy_2.png
delete mode 100644 docs/static/images/deploy_3.png
delete mode 100644 docs/static/images/deploy_4.png
delete mode 100644 docs/static/images/deploy_5.png
delete mode 100644 docs/static/images/deploy_6.png
delete mode 100644 docs/static/images/deploy_7.png
delete mode 100644 docs/static/images/logstash-module-overview.png
delete mode 100644 docs/static/images/logstash.png
delete mode 100644 docs/static/images/netflow-conversation-partners.png
delete mode 100644 docs/static/images/netflow-geo-location.png
delete mode 100644 docs/static/images/netflow-overview.png
delete mode 100644 docs/static/images/netflow-traffic-analysis.png
delete mode 100644 docs/static/include/javapluginpkg.asciidoc
delete mode 100644 docs/static/include/javapluginsetup.asciidoc
delete mode 100644 docs/static/include/pluginbody.asciidoc
delete mode 100644 docs/static/input.asciidoc
delete mode 100644 docs/static/introduction.asciidoc
delete mode 100644 docs/static/java-codec.asciidoc
delete mode 100644 docs/static/java-filter.asciidoc
delete mode 100644 docs/static/java-input.asciidoc
delete mode 100644 docs/static/java-output.asciidoc
delete mode 100644 docs/static/jvm.asciidoc
delete mode 100644 docs/static/keystore.asciidoc
delete mode 100644 docs/static/life-of-an-event.asciidoc
delete mode 100644 docs/static/listing-a-plugin.asciidoc
delete mode 100644 docs/static/logging.asciidoc
delete mode 100644 docs/static/logstash-glossary.asciidoc
delete mode 100644 docs/static/ls-ls-config.asciidoc
delete mode 100644 docs/static/ls-ls-http.asciidoc
delete mode 100644 docs/static/ls-ls-native.asciidoc
delete mode 100644 docs/static/ls-to-cloud.asciidoc
delete mode 100644 docs/static/maintainer-guide.asciidoc
delete mode 100644 docs/static/management/centralized-pipelines.asciidoc
delete mode 100644 docs/static/management/configuring-centralized-pipelines.asciidoc
delete mode 100644 docs/static/management/images/new_pipeline.png
delete mode 100644 docs/static/managing-multiline-events.asciidoc
delete mode 100644 docs/static/mem-queue.asciidoc
delete mode 100644 docs/static/monitoring/collectors-legacy.asciidoc
delete mode 100644 docs/static/monitoring/images/integration-agent-add-standalone.png
delete mode 100644 docs/static/monitoring/images/integration-agent-add.png
delete mode 100644 docs/static/monitoring/images/integration-agent-confirm.png
delete mode 100644 docs/static/monitoring/images/pipeline-diagram.png
delete mode 100644 docs/static/monitoring/images/pipeline-filter-detail.png
delete mode 100644 docs/static/monitoring/images/pipeline-output-detail.png
delete mode 100644 docs/static/monitoring/images/pipeline-viewer-detail-drawer.png
delete mode 100644 docs/static/monitoring/images/pipeline-viewer-overview.png
delete mode 100644 docs/static/monitoring/monitoring-apis.asciidoc
delete mode 100644 docs/static/monitoring/monitoring-ea-dashboards.asciidoc
delete mode 100644 docs/static/monitoring/monitoring-ea-intro.asciidoc
delete mode 100644 docs/static/monitoring/monitoring-ea-serverless.asciidoc
delete mode 100644 docs/static/monitoring/monitoring-ea.asciidoc
delete mode 100644 docs/static/monitoring/monitoring-install.asciidoc
delete mode 100644 docs/static/monitoring/monitoring-internal-legacy.asciidoc
delete mode 100644 docs/static/monitoring/monitoring-mb.asciidoc
delete mode 100644 docs/static/monitoring/monitoring-output-legacy.asciidoc
delete mode 100644 docs/static/monitoring/monitoring-overview.asciidoc
delete mode 100644 docs/static/monitoring/monitoring-prereq-create-user.asciidoc
delete mode 100644 docs/static/monitoring/monitoring-prereq-define-cluster.asciidoc
delete mode 100644 docs/static/monitoring/monitoring-prereq-disable-default.asciidoc
delete mode 100644 docs/static/monitoring/monitoring-prereq-setup-es.asciidoc
delete mode 100644 docs/static/monitoring/monitoring-ui.asciidoc
delete mode 100644 docs/static/monitoring/monitoring-view.asciidoc
delete mode 100644 docs/static/monitoring/monitoring.asciidoc
delete mode 100644 docs/static/monitoring/pipeline-viewer.asciidoc
delete mode 100644 docs/static/monitoring/troubleshooting.asciidoc
delete mode 100644 docs/static/offline-plugins.asciidoc
delete mode 100644 docs/static/output.asciidoc
delete mode 100644 docs/static/performance-checklist.asciidoc
delete mode 100644 docs/static/persistent-queues.asciidoc
delete mode 100644 docs/static/pipeline-configuration.asciidoc
delete mode 100644 docs/static/pipeline-structure.asciidoc
delete mode 100644 docs/static/plugin-generator.asciidoc
delete mode 100644 docs/static/plugin-manager.asciidoc
delete mode 100644 docs/static/private-gem-repo.asciidoc
delete mode 100644 docs/static/processing-info.asciidoc
delete mode 100644 docs/static/redirects.asciidoc
delete mode 100644 docs/static/releasenotes.asciidoc
delete mode 100644 docs/static/reloading-config.asciidoc
delete mode 100644 docs/static/reserved-fields.asciidoc
delete mode 100644 docs/static/resiliency.asciidoc
delete mode 100644 docs/static/running-logstash-command-line.asciidoc
delete mode 100644 docs/static/running-logstash-kubernetes.asciidoc
delete mode 100644 docs/static/running-logstash-windows.asciidoc
delete mode 100644 docs/static/running-logstash.asciidoc
delete mode 100644 docs/static/security/api-keys.asciidoc
delete mode 100644 docs/static/security/basic-auth.asciidoc
delete mode 100644 docs/static/security/es-security.asciidoc
delete mode 100644 docs/static/security/grant-access.asciidoc
delete mode 100644 docs/static/security/logstash.asciidoc
delete mode 100644 docs/static/security/ls-monitoring.asciidoc
delete mode 100644 docs/static/security/pipeline-mgmt.asciidoc
delete mode 100644 docs/static/security/pki-auth.asciidoc
delete mode 100644 docs/static/security/tls-encryption.asciidoc
delete mode 100644 docs/static/setting-up-logstash.asciidoc
delete mode 100644 docs/static/settings-file.asciidoc
delete mode 100644 docs/static/settings/configuration-management-settings.asciidoc
delete mode 100644 docs/static/settings/configuration-wildcard-pipeline-id.asciidoc
delete mode 100644 docs/static/settings/geoip-database-management-settings.asciidoc
delete mode 100644 docs/static/settings/monitoring-settings-legacy.asciidoc
delete mode 100644 docs/static/submitting-a-plugin.asciidoc
delete mode 100644 docs/static/tab-widgets/install-agent-widget.asciidoc
delete mode 100644 docs/static/tab-widgets/install-agent.asciidoc
delete mode 100644 docs/static/transforming-data.asciidoc
delete mode 100644 docs/static/troubleshoot/health-pipeline-flow-worker-utilization.asciidoc
delete mode 100644 docs/static/troubleshoot/health-pipeline-status.asciidoc
delete mode 100644 docs/static/troubleshoot/plugin-tracing.asciidoc
delete mode 100644 docs/static/troubleshoot/troubleshooting.asciidoc
delete mode 100644 docs/static/troubleshoot/ts-azure.asciidoc
delete mode 100644 docs/static/troubleshoot/ts-kafka.asciidoc
delete mode 100644 docs/static/troubleshoot/ts-logstash.asciidoc
delete mode 100644 docs/static/troubleshoot/ts-other-issues.asciidoc
delete mode 100644 docs/static/troubleshoot/ts-plugins-general.asciidoc
delete mode 100644 docs/static/troubleshoot/ts-plugins.asciidoc
delete mode 100644 docs/static/upgrading.asciidoc
delete mode 100644 docs/static/winlogbeat-modules.asciidoc
diff --git a/docs/docset.yml b/docs/docset.yml
new file mode 100644
index 000000000..e48937257
--- /dev/null
+++ b/docs/docset.yml
@@ -0,0 +1,493 @@
+project: 'Logstash'
+cross_links:
+ - beats
+ - docs-content
+ - ecs
+ - elasticsearch
+ - integration-docs
+ - logstash-docs
+ - search-ui
+toc:
+ - toc: reference
+ - toc: release-notes
+ - toc: extend
+subs:
+ ref: "https://www.elastic.co/guide/en/elasticsearch/reference/current"
+ ref-bare: "https://www.elastic.co/guide/en/elasticsearch/reference"
+ ref-8x: "https://www.elastic.co/guide/en/elasticsearch/reference/8.1"
+ ref-80: "https://www.elastic.co/guide/en/elasticsearch/reference/8.0"
+ ref-7x: "https://www.elastic.co/guide/en/elasticsearch/reference/7.17"
+ ref-70: "https://www.elastic.co/guide/en/elasticsearch/reference/7.0"
+ ref-60: "https://www.elastic.co/guide/en/elasticsearch/reference/6.0"
+ ref-64: "https://www.elastic.co/guide/en/elasticsearch/reference/6.4"
+ xpack-ref: "https://www.elastic.co/guide/en/x-pack/6.2"
+ logstash-ref: "https://www.elastic.co/guide/en/logstash/current"
+ kibana-ref: "https://www.elastic.co/guide/en/kibana/current"
+ kibana-ref-all: "https://www.elastic.co/guide/en/kibana"
+ beats-ref-root: "https://www.elastic.co/guide/en/beats"
+ beats-ref: "https://www.elastic.co/guide/en/beats/libbeat/current"
+ beats-ref-60: "https://www.elastic.co/guide/en/beats/libbeat/6.0"
+ beats-ref-63: "https://www.elastic.co/guide/en/beats/libbeat/6.3"
+ beats-devguide: "https://www.elastic.co/guide/en/beats/devguide/current"
+ auditbeat-ref: "https://www.elastic.co/guide/en/beats/auditbeat/current"
+ packetbeat-ref: "https://www.elastic.co/guide/en/beats/packetbeat/current"
+ metricbeat-ref: "https://www.elastic.co/guide/en/beats/metricbeat/current"
+ filebeat-ref: "https://www.elastic.co/guide/en/beats/filebeat/current"
+ functionbeat-ref: "https://www.elastic.co/guide/en/beats/functionbeat/current"
+ winlogbeat-ref: "https://www.elastic.co/guide/en/beats/winlogbeat/current"
+ heartbeat-ref: "https://www.elastic.co/guide/en/beats/heartbeat/current"
+ journalbeat-ref: "https://www.elastic.co/guide/en/beats/journalbeat/current"
+ ingest-guide: "https://www.elastic.co/guide/en/ingest/current"
+ fleet-guide: "https://www.elastic.co/guide/en/fleet/current"
+ apm-guide-ref: "https://www.elastic.co/guide/en/apm/guide/current"
+ apm-guide-7x: "https://www.elastic.co/guide/en/apm/guide/7.17"
+ apm-app-ref: "https://www.elastic.co/guide/en/kibana/current"
+ apm-agents-ref: "https://www.elastic.co/guide/en/apm/agent"
+ apm-android-ref: "https://www.elastic.co/guide/en/apm/agent/android/current"
+ apm-py-ref: "https://www.elastic.co/guide/en/apm/agent/python/current"
+ apm-py-ref-3x: "https://www.elastic.co/guide/en/apm/agent/python/3.x"
+ apm-node-ref-index: "https://www.elastic.co/guide/en/apm/agent/nodejs"
+ apm-node-ref: "https://www.elastic.co/guide/en/apm/agent/nodejs/current"
+ apm-node-ref-1x: "https://www.elastic.co/guide/en/apm/agent/nodejs/1.x"
+ apm-rum-ref: "https://www.elastic.co/guide/en/apm/agent/rum-js/current"
+ apm-ruby-ref: "https://www.elastic.co/guide/en/apm/agent/ruby/current"
+ apm-java-ref: "https://www.elastic.co/guide/en/apm/agent/java/current"
+ apm-go-ref: "https://www.elastic.co/guide/en/apm/agent/go/current"
+ apm-dotnet-ref: "https://www.elastic.co/guide/en/apm/agent/dotnet/current"
+ apm-php-ref: "https://www.elastic.co/guide/en/apm/agent/php/current"
+ apm-ios-ref: "https://www.elastic.co/guide/en/apm/agent/swift/current"
+ apm-lambda-ref: "https://www.elastic.co/guide/en/apm/lambda/current"
+ apm-attacher-ref: "https://www.elastic.co/guide/en/apm/attacher/current"
+ docker-logging-ref: "https://www.elastic.co/guide/en/beats/loggingplugin/current"
+ esf-ref: "https://www.elastic.co/guide/en/esf/current"
+ kinesis-firehose-ref: "https://www.elastic.co/guide/en/kinesis/{{kinesis_version}}"
+ estc-welcome-current: "https://www.elastic.co/guide/en/starting-with-the-elasticsearch-platform-and-its-solutions/current"
+ estc-welcome: "https://www.elastic.co/guide/en/starting-with-the-elasticsearch-platform-and-its-solutions/current"
+ estc-welcome-all: "https://www.elastic.co/guide/en/starting-with-the-elasticsearch-platform-and-its-solutions"
+ hadoop-ref: "https://www.elastic.co/guide/en/elasticsearch/hadoop/current"
+ stack-ref: "https://www.elastic.co/guide/en/elastic-stack/current"
+ stack-ref-67: "https://www.elastic.co/guide/en/elastic-stack/6.7"
+ stack-ref-68: "https://www.elastic.co/guide/en/elastic-stack/6.8"
+ stack-ref-70: "https://www.elastic.co/guide/en/elastic-stack/7.0"
+ stack-ref-80: "https://www.elastic.co/guide/en/elastic-stack/8.0"
+ stack-ov: "https://www.elastic.co/guide/en/elastic-stack-overview/current"
+ stack-gs: "https://www.elastic.co/guide/en/elastic-stack-get-started/current"
+ stack-gs-current: "https://www.elastic.co/guide/en/elastic-stack-get-started/current"
+ javaclient: "https://www.elastic.co/guide/en/elasticsearch/client/java-api/current"
+ java-api-client: "https://www.elastic.co/guide/en/elasticsearch/client/java-api-client/current"
+ java-rest: "https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current"
+ jsclient: "https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current"
+ jsclient-current: "https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current"
+ es-ruby-client: "https://www.elastic.co/guide/en/elasticsearch/client/ruby-api/current"
+ es-dotnet-client: "https://www.elastic.co/guide/en/elasticsearch/client/net-api/current"
+ es-php-client: "https://www.elastic.co/guide/en/elasticsearch/client/php-api/current"
+ es-python-client: "https://www.elastic.co/guide/en/elasticsearch/client/python-api/current"
+ defguide: "https://www.elastic.co/guide/en/elasticsearch/guide/2.x"
+ painless: "https://www.elastic.co/guide/en/elasticsearch/painless/current"
+ plugins: "https://www.elastic.co/guide/en/elasticsearch/plugins/current"
+ plugins-8x: "https://www.elastic.co/guide/en/elasticsearch/plugins/8.1"
+ plugins-7x: "https://www.elastic.co/guide/en/elasticsearch/plugins/7.17"
+ plugins-6x: "https://www.elastic.co/guide/en/elasticsearch/plugins/6.8"
+ glossary: "https://www.elastic.co/guide/en/elastic-stack-glossary/current"
+ upgrade_guide: "https://www.elastic.co/products/upgrade_guide"
+ blog-ref: "https://www.elastic.co/blog/"
+ curator-ref: "https://www.elastic.co/guide/en/elasticsearch/client/curator/current"
+ curator-ref-current: "https://www.elastic.co/guide/en/elasticsearch/client/curator/current"
+ metrics-ref: "https://www.elastic.co/guide/en/metrics/current"
+ metrics-guide: "https://www.elastic.co/guide/en/metrics/guide/current"
+ logs-ref: "https://www.elastic.co/guide/en/logs/current"
+ logs-guide: "https://www.elastic.co/guide/en/logs/guide/current"
+ uptime-guide: "https://www.elastic.co/guide/en/uptime/current"
+ observability-guide: "https://www.elastic.co/guide/en/observability/current"
+ observability-guide-all: "https://www.elastic.co/guide/en/observability"
+ siem-guide: "https://www.elastic.co/guide/en/siem/guide/current"
+ security-guide: "https://www.elastic.co/guide/en/security/current"
+ security-guide-all: "https://www.elastic.co/guide/en/security"
+ endpoint-guide: "https://www.elastic.co/guide/en/endpoint/current"
+ sql-odbc: "https://www.elastic.co/guide/en/elasticsearch/sql-odbc/current"
+ ecs-ref: "https://www.elastic.co/guide/en/ecs/current"
+ ecs-logging-ref: "https://www.elastic.co/guide/en/ecs-logging/overview/current"
+ ecs-logging-go-logrus-ref: "https://www.elastic.co/guide/en/ecs-logging/go-logrus/current"
+ ecs-logging-go-zap-ref: "https://www.elastic.co/guide/en/ecs-logging/go-zap/current"
+ ecs-logging-go-zerolog-ref: "https://www.elastic.co/guide/en/ecs-logging/go-zap/current"
+ ecs-logging-java-ref: "https://www.elastic.co/guide/en/ecs-logging/java/current"
+ ecs-logging-dotnet-ref: "https://www.elastic.co/guide/en/ecs-logging/dotnet/current"
+ ecs-logging-nodejs-ref: "https://www.elastic.co/guide/en/ecs-logging/nodejs/current"
+ ecs-logging-php-ref: "https://www.elastic.co/guide/en/ecs-logging/php/current"
+ ecs-logging-python-ref: "https://www.elastic.co/guide/en/ecs-logging/python/current"
+ ecs-logging-ruby-ref: "https://www.elastic.co/guide/en/ecs-logging/ruby/current"
+ ml-docs: "https://www.elastic.co/guide/en/machine-learning/current"
+ eland-docs: "https://www.elastic.co/guide/en/elasticsearch/client/eland/current"
+ eql-ref: "https://eql.readthedocs.io/en/latest/query-guide"
+ extendtrial: "https://www.elastic.co/trialextension"
+ wikipedia: "https://en.wikipedia.org/wiki"
+ forum: "https://discuss.elastic.co/"
+ xpack-forum: "https://discuss.elastic.co/c/50-x-pack"
+ security-forum: "https://discuss.elastic.co/c/x-pack/shield"
+ watcher-forum: "https://discuss.elastic.co/c/x-pack/watcher"
+ monitoring-forum: "https://discuss.elastic.co/c/x-pack/marvel"
+ graph-forum: "https://discuss.elastic.co/c/x-pack/graph"
+ apm-forum: "https://discuss.elastic.co/c/apm"
+ enterprise-search-ref: "https://www.elastic.co/guide/en/enterprise-search/current"
+ app-search-ref: "https://www.elastic.co/guide/en/app-search/current"
+ workplace-search-ref: "https://www.elastic.co/guide/en/workplace-search/current"
+ enterprise-search-node-ref: "https://www.elastic.co/guide/en/enterprise-search-clients/enterprise-search-node/current"
+ enterprise-search-php-ref: "https://www.elastic.co/guide/en/enterprise-search-clients/php/current"
+ enterprise-search-python-ref: "https://www.elastic.co/guide/en/enterprise-search-clients/python/current"
+ enterprise-search-ruby-ref: "https://www.elastic.co/guide/en/enterprise-search-clients/ruby/current"
+ elastic-maps-service: "https://maps.elastic.co"
+ integrations-docs: "https://docs.elastic.co/en/integrations"
+ integrations-devguide: "https://www.elastic.co/guide/en/integrations-developer/current"
+ time-units: "https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#time-units"
+ byte-units: "https://www.elastic.co/guide/en/elasticsearch/reference/current/api-conventions.html#byte-units"
+ apm-py-ref-v: "https://www.elastic.co/guide/en/apm/agent/python/current"
+ apm-node-ref-v: "https://www.elastic.co/guide/en/apm/agent/nodejs/current"
+ apm-rum-ref-v: "https://www.elastic.co/guide/en/apm/agent/rum-js/current"
+ apm-ruby-ref-v: "https://www.elastic.co/guide/en/apm/agent/ruby/current"
+ apm-java-ref-v: "https://www.elastic.co/guide/en/apm/agent/java/current"
+ apm-go-ref-v: "https://www.elastic.co/guide/en/apm/agent/go/current"
+ apm-ios-ref-v: "https://www.elastic.co/guide/en/apm/agent/swift/current"
+ apm-dotnet-ref-v: "https://www.elastic.co/guide/en/apm/agent/dotnet/current"
+ apm-php-ref-v: "https://www.elastic.co/guide/en/apm/agent/php/current"
+ ecloud: "Elastic Cloud"
+ esf: "Elastic Serverless Forwarder"
+ ess: "Elasticsearch Service"
+ ece: "Elastic Cloud Enterprise"
+ eck: "Elastic Cloud on Kubernetes"
+ serverless-full: "Elastic Cloud Serverless"
+ serverless-short: "Serverless"
+ es-serverless: "Elasticsearch Serverless"
+ es3: "Elasticsearch Serverless"
+ obs-serverless: "Elastic Observability Serverless"
+ sec-serverless: "Elastic Security Serverless"
+ serverless-docs: "https://docs.elastic.co/serverless"
+ cloud: "https://www.elastic.co/guide/en/cloud/current"
+ ess-utm-params: "?page=docs&placement=docs-body"
+ ess-baymax: "?page=docs&placement=docs-body"
+ ess-trial: "https://cloud.elastic.co/registration?page=docs&placement=docs-body"
+ ess-product: "https://www.elastic.co/cloud/elasticsearch-service?page=docs&placement=docs-body"
+ ess-console: "https://cloud.elastic.co?page=docs&placement=docs-body"
+ ess-console-name: "Elasticsearch Service Console"
+ ess-deployments: "https://cloud.elastic.co/deployments?page=docs&placement=docs-body"
+ ece-ref: "https://www.elastic.co/guide/en/cloud-enterprise/current"
+ eck-ref: "https://www.elastic.co/guide/en/cloud-on-k8s/current"
+ ess-leadin: "You can run Elasticsearch on your own hardware or use our hosted Elasticsearch Service that is available on AWS, GCP, and Azure. https://cloud.elastic.co/registration{ess-utm-params}[Try the Elasticsearch Service for free]."
+ ess-leadin-short: "Our hosted Elasticsearch Service is available on AWS, GCP, and Azure, and you can https://cloud.elastic.co/registration{ess-utm-params}[try it for free]."
+ ess-icon: "image:https://doc-icons.s3.us-east-2.amazonaws.com/logo_cloud.svg[link=\"https://cloud.elastic.co/registration{ess-utm-params}\", title=\"Supported on Elasticsearch Service\"]"
+ ece-icon: "image:https://doc-icons.s3.us-east-2.amazonaws.com/logo_cloud_ece.svg[link=\"https://cloud.elastic.co/registration{ess-utm-params}\", title=\"Supported on Elastic Cloud Enterprise\"]"
+ cloud-only: "This feature is designed for indirect use by https://cloud.elastic.co/registration{ess-utm-params}[Elasticsearch Service], https://www.elastic.co/guide/en/cloud-enterprise/{ece-version-link}[Elastic Cloud Enterprise], and https://www.elastic.co/guide/en/cloud-on-k8s/current[Elastic Cloud on Kubernetes]. Direct use is not supported."
+ ess-setting-change: "image:https://doc-icons.s3.us-east-2.amazonaws.com/logo_cloud.svg[link=\"{ess-trial}\", title=\"Supported on {ess}\"] indicates a change to a supported https://www.elastic.co/guide/en/cloud/current/ec-add-user-settings.html[user setting] for Elasticsearch Service."
+ ess-skip-section: "If you use Elasticsearch Service, skip this section. Elasticsearch Service handles these changes for you."
+ api-cloud: "https://www.elastic.co/docs/api/doc/cloud"
+ api-ece: "https://www.elastic.co/docs/api/doc/cloud-enterprise"
+ api-kibana-serverless: "https://www.elastic.co/docs/api/doc/serverless"
+ es-feature-flag: "This feature is in development and not yet available for use. This documentation is provided for informational purposes only."
+ es-ref-dir: "'{{elasticsearch-root}}/docs/reference'"
+ apm-app: "APM app"
+ uptime-app: "Uptime app"
+ synthetics-app: "Synthetics app"
+ logs-app: "Logs app"
+ metrics-app: "Metrics app"
+ infrastructure-app: "Infrastructure app"
+ siem-app: "SIEM app"
+ security-app: "Elastic Security app"
+ ml-app: "Machine Learning"
+ dev-tools-app: "Dev Tools"
+ ingest-manager-app: "Ingest Manager"
+ stack-manage-app: "Stack Management"
+ stack-monitor-app: "Stack Monitoring"
+ alerts-ui: "Alerts and Actions"
+ rules-ui: "Rules"
+ rac-ui: "Rules and Connectors"
+ connectors-ui: "Connectors"
+ connectors-feature: "Actions and Connectors"
+ stack-rules-feature: "Stack Rules"
+ user-experience: "User Experience"
+ ems: "Elastic Maps Service"
+ ems-init: "EMS"
+ hosted-ems: "Elastic Maps Server"
+ ipm-app: "Index Pattern Management"
+ ingest-pipelines: "ingest pipelines"
+ ingest-pipelines-app: "Ingest Pipelines"
+ ingest-pipelines-cap: "Ingest pipelines"
+ ls-pipelines: "Logstash pipelines"
+ ls-pipelines-app: "Logstash Pipelines"
+ maint-windows: "maintenance windows"
+ maint-windows-app: "Maintenance Windows"
+ maint-windows-cap: "Maintenance windows"
+ custom-roles-app: "Custom Roles"
+ data-source: "data view"
+ data-sources: "data views"
+ data-source-caps: "Data View"
+ data-sources-caps: "Data Views"
+ data-source-cap: "Data view"
+ data-sources-cap: "Data views"
+ project-settings: "Project settings"
+ manage-app: "Management"
+ index-manage-app: "Index Management"
+ data-views-app: "Data Views"
+ rules-app: "Rules"
+ saved-objects-app: "Saved Objects"
+ tags-app: "Tags"
+ api-keys-app: "API keys"
+ transforms-app: "Transforms"
+ connectors-app: "Connectors"
+ files-app: "Files"
+ reports-app: "Reports"
+ maps-app: "Maps"
+ alerts-app: "Alerts"
+ crawler: "Enterprise Search web crawler"
+ ents: "Enterprise Search"
+ app-search-crawler: "App Search web crawler"
+ agent: "Elastic Agent"
+ agents: "Elastic Agents"
+ fleet: "Fleet"
+ fleet-server: "Fleet Server"
+ integrations-server: "Integrations Server"
+ ingest-manager: "Ingest Manager"
+ ingest-management: "ingest management"
+ package-manager: "Elastic Package Manager"
+ integrations: "Integrations"
+ package-registry: "Elastic Package Registry"
+ artifact-registry: "Elastic Artifact Registry"
+ aws: "AWS"
+ stack: "Elastic Stack"
+ xpack: "X-Pack"
+ es: "Elasticsearch"
+ kib: "Kibana"
+ esms: "Elastic Stack Monitoring Service"
+ esms-init: "ESMS"
+ ls: "Logstash"
+ beats: "Beats"
+ auditbeat: "Auditbeat"
+ filebeat: "Filebeat"
+ heartbeat: "Heartbeat"
+ metricbeat: "Metricbeat"
+ packetbeat: "Packetbeat"
+ winlogbeat: "Winlogbeat"
+ functionbeat: "Functionbeat"
+ journalbeat: "Journalbeat"
+ es-sql: "Elasticsearch SQL"
+ esql: "ES|QL"
+ elastic-agent: "Elastic Agent"
+ k8s: "Kubernetes"
+ log-driver-long: "Elastic Logging Plugin for Docker"
+ security: "X-Pack security"
+ security-features: "security features"
+ operator-feature: "operator privileges feature"
+ es-security-features: "Elasticsearch security features"
+ stack-security-features: "Elastic Stack security features"
+ endpoint-sec: "Endpoint Security"
+ endpoint-cloud-sec: "Endpoint and Cloud Security"
+ elastic-defend: "Elastic Defend"
+ elastic-sec: "Elastic Security"
+ elastic-endpoint: "Elastic Endpoint"
+ swimlane: "Swimlane"
+ sn: "ServiceNow"
+ sn-itsm: "ServiceNow ITSM"
+ sn-itom: "ServiceNow ITOM"
+ sn-sir: "ServiceNow SecOps"
+ jira: "Jira"
+ ibm-r: "IBM Resilient"
+ webhook: "Webhook"
+ webhook-cm: "Webhook - Case Management"
+ opsgenie: "Opsgenie"
+ bedrock: "Amazon Bedrock"
+ gemini: "Google Gemini"
+ hive: "TheHive"
+ monitoring: "X-Pack monitoring"
+ monitor-features: "monitoring features"
+ stack-monitor-features: "Elastic Stack monitoring features"
+ watcher: "Watcher"
+ alert-features: "alerting features"
+ reporting: "X-Pack reporting"
+ report-features: "reporting features"
+ graph: "X-Pack graph"
+ graph-features: "graph analytics features"
+ searchprofiler: "Search Profiler"
+ xpackml: "X-Pack machine learning"
+ ml: "machine learning"
+ ml-cap: "Machine learning"
+ ml-init: "ML"
+ ml-features: "machine learning features"
+ stack-ml-features: "Elastic Stack machine learning features"
+ ccr: "cross-cluster replication"
+ ccr-cap: "Cross-cluster replication"
+ ccr-init: "CCR"
+ ccs: "cross-cluster search"
+ ccs-cap: "Cross-cluster search"
+ ccs-init: "CCS"
+ ilm: "index lifecycle management"
+ ilm-cap: "Index lifecycle management"
+ ilm-init: "ILM"
+ dlm: "data lifecycle management"
+ dlm-cap: "Data lifecycle management"
+ dlm-init: "DLM"
+ search-snap: "searchable snapshot"
+ search-snaps: "searchable snapshots"
+ search-snaps-cap: "Searchable snapshots"
+ slm: "snapshot lifecycle management"
+ slm-cap: "Snapshot lifecycle management"
+ slm-init: "SLM"
+ rollup-features: "data rollup features"
+ ipm: "index pattern management"
+ ipm-cap: "Index pattern"
+ rollup: "rollup"
+ rollup-cap: "Rollup"
+ rollups: "rollups"
+ rollups-cap: "Rollups"
+ rollup-job: "rollup job"
+ rollup-jobs: "rollup jobs"
+ rollup-jobs-cap: "Rollup jobs"
+ dfeed: "datafeed"
+ dfeeds: "datafeeds"
+ dfeed-cap: "Datafeed"
+ dfeeds-cap: "Datafeeds"
+ ml-jobs: "machine learning jobs"
+ ml-jobs-cap: "Machine learning jobs"
+ anomaly-detect: "anomaly detection"
+ anomaly-detect-cap: "Anomaly detection"
+ anomaly-job: "anomaly detection job"
+ anomaly-jobs: "anomaly detection jobs"
+ anomaly-jobs-cap: "Anomaly detection jobs"
+ dataframe: "data frame"
+ dataframes: "data frames"
+ dataframe-cap: "Data frame"
+ dataframes-cap: "Data frames"
+ watcher-transform: "payload transform"
+ watcher-transforms: "payload transforms"
+ watcher-transform-cap: "Payload transform"
+ watcher-transforms-cap: "Payload transforms"
+ transform: "transform"
+ transforms: "transforms"
+ transform-cap: "Transform"
+ transforms-cap: "Transforms"
+ dataframe-transform: "transform"
+ dataframe-transform-cap: "Transform"
+ dataframe-transforms: "transforms"
+ dataframe-transforms-cap: "Transforms"
+ dfanalytics-cap: "Data frame analytics"
+ dfanalytics: "data frame analytics"
+ dataframe-analytics-config: "'{dataframe} analytics config'"
+ dfanalytics-job: "'{dataframe} analytics job'"
+ dfanalytics-jobs: "'{dataframe} analytics jobs'"
+ dfanalytics-jobs-cap: "'{dataframe-cap} analytics jobs'"
+ cdataframe: "continuous data frame"
+ cdataframes: "continuous data frames"
+ cdataframe-cap: "Continuous data frame"
+ cdataframes-cap: "Continuous data frames"
+ cdataframe-transform: "continuous transform"
+ cdataframe-transforms: "continuous transforms"
+ cdataframe-transforms-cap: "Continuous transforms"
+ ctransform: "continuous transform"
+ ctransform-cap: "Continuous transform"
+ ctransforms: "continuous transforms"
+ ctransforms-cap: "Continuous transforms"
+ oldetection: "outlier detection"
+ oldetection-cap: "Outlier detection"
+ olscore: "outlier score"
+ olscores: "outlier scores"
+ fiscore: "feature influence score"
+ evaluatedf-api: "evaluate {dataframe} analytics API"
+ evaluatedf-api-cap: "Evaluate {dataframe} analytics API"
+ binarysc: "binary soft classification"
+ binarysc-cap: "Binary soft classification"
+ regression: "regression"
+ regression-cap: "Regression"
+ reganalysis: "regression analysis"
+ reganalysis-cap: "Regression analysis"
+ depvar: "dependent variable"
+ feature-var: "feature variable"
+ feature-vars: "feature variables"
+ feature-vars-cap: "Feature variables"
+ classification: "classification"
+ classification-cap: "Classification"
+ classanalysis: "classification analysis"
+ classanalysis-cap: "Classification analysis"
+ infer-cap: "Inference"
+ infer: "inference"
+ lang-ident-cap: "Language identification"
+ lang-ident: "language identification"
+ data-viz: "Data Visualizer"
+ file-data-viz: "File Data Visualizer"
+ feat-imp: "feature importance"
+ feat-imp-cap: "Feature importance"
+ nlp: "natural language processing"
+ nlp-cap: "Natural language processing"
+ apm-agent: "APM agent"
+ apm-go-agent: "Elastic APM Go agent"
+ apm-go-agents: "Elastic APM Go agents"
+ apm-ios-agent: "Elastic APM iOS agent"
+ apm-ios-agents: "Elastic APM iOS agents"
+ apm-java-agent: "Elastic APM Java agent"
+ apm-java-agents: "Elastic APM Java agents"
+ apm-dotnet-agent: "Elastic APM .NET agent"
+ apm-dotnet-agents: "Elastic APM .NET agents"
+ apm-node-agent: "Elastic APM Node.js agent"
+ apm-node-agents: "Elastic APM Node.js agents"
+ apm-php-agent: "Elastic APM PHP agent"
+ apm-php-agents: "Elastic APM PHP agents"
+ apm-py-agent: "Elastic APM Python agent"
+ apm-py-agents: "Elastic APM Python agents"
+ apm-ruby-agent: "Elastic APM Ruby agent"
+ apm-ruby-agents: "Elastic APM Ruby agents"
+ apm-rum-agent: "Elastic APM Real User Monitoring (RUM) JavaScript agent"
+ apm-rum-agents: "Elastic APM RUM JavaScript agents"
+ apm-lambda-ext: "Elastic APM AWS Lambda extension"
+ project-monitors: "project monitors"
+ project-monitors-cap: "Project monitors"
+ private-location: "Private Location"
+ private-locations: "Private Locations"
+ pwd: "YOUR_PASSWORD"
+ esh: "ES-Hadoop"
+ default-dist: "default distribution"
+ oss-dist: "OSS-only distribution"
+ observability: "Observability"
+ api-request-title: "Request"
+ api-prereq-title: "Prerequisites"
+ api-description-title: "Description"
+ api-path-parms-title: "Path parameters"
+ api-query-parms-title: "Query parameters"
+ api-request-body-title: "Request body"
+ api-response-codes-title: "Response codes"
+ api-response-body-title: "Response body"
+ api-example-title: "Example"
+ api-examples-title: "Examples"
+ api-definitions-title: "Properties"
+ multi-arg: "†footnoteref:[multi-arg,This parameter accepts multiple arguments.]"
+ multi-arg-ref: "†footnoteref:[multi-arg]"
+ yes-icon: "image:https://doc-icons.s3.us-east-2.amazonaws.com/icon-yes.png[Yes,20,15]"
+ no-icon: "image:https://doc-icons.s3.us-east-2.amazonaws.com/icon-no.png[No,20,15]"
+ es-repo: "https://github.com/elastic/elasticsearch/"
+ es-issue: "https://github.com/elastic/elasticsearch/issues/"
+ es-pull: "https://github.com/elastic/elasticsearch/pull/"
+ es-commit: "https://github.com/elastic/elasticsearch/commit/"
+ kib-repo: "https://github.com/elastic/kibana/"
+ kib-issue: "https://github.com/elastic/kibana/issues/"
+ kibana-issue: "'{kib-repo}issues/'"
+ kib-pull: "https://github.com/elastic/kibana/pull/"
+ kibana-pull: "'{kib-repo}pull/'"
+ kib-commit: "https://github.com/elastic/kibana/commit/"
+ ml-repo: "https://github.com/elastic/ml-cpp/"
+ ml-issue: "https://github.com/elastic/ml-cpp/issues/"
+ ml-pull: "https://github.com/elastic/ml-cpp/pull/"
+ ml-commit: "https://github.com/elastic/ml-cpp/commit/"
+ apm-repo: "https://github.com/elastic/apm-server/"
+ apm-issue: "https://github.com/elastic/apm-server/issues/"
+ apm-pull: "https://github.com/elastic/apm-server/pull/"
+ kibana-blob: "https://github.com/elastic/kibana/blob/current/"
+ apm-get-started-ref: "https://www.elastic.co/guide/en/apm/get-started/current"
+ apm-server-ref: "https://www.elastic.co/guide/en/apm/server/current"
+ apm-server-ref-v: "https://www.elastic.co/guide/en/apm/server/current"
+ apm-server-ref-m: "https://www.elastic.co/guide/en/apm/server/master"
+ apm-server-ref-62: "https://www.elastic.co/guide/en/apm/server/6.2"
+ apm-server-ref-64: "https://www.elastic.co/guide/en/apm/server/6.4"
+ apm-server-ref-70: "https://www.elastic.co/guide/en/apm/server/7.0"
+ apm-overview-ref-v: "https://www.elastic.co/guide/en/apm/get-started/current"
+ apm-overview-ref-70: "https://www.elastic.co/guide/en/apm/get-started/7.0"
+ apm-overview-ref-m: "https://www.elastic.co/guide/en/apm/get-started/master"
+ infra-guide: "https://www.elastic.co/guide/en/infrastructure/guide/current"
+ a-data-source: "a data view"
+ icon-bug: "pass:[ ]"
+ icon-checkInCircleFilled: "pass:[ ]"
+ icon-warningFilled: "pass:[ ]"
diff --git a/docs/extend/codec-new-plugin.md b/docs/extend/codec-new-plugin.md
new file mode 100644
index 000000000..00595c640
--- /dev/null
+++ b/docs/extend/codec-new-plugin.md
@@ -0,0 +1,636 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/codec-new-plugin.html
+---
+
+# How to write a Logstash codec plugin [codec-new-plugin]
+
+To develop a new codec for Logstash, build a self-contained Ruby gem whose source code lives in its own GitHub repository. The Ruby gem can then be hosted and shared on RubyGems.org. You can use the example codec implementation as a starting point. (If you’re unfamiliar with Ruby, you can find an excellent quickstart guide at [https://www.ruby-lang.org/en/documentation/quickstart/](https://www.ruby-lang.org/en/documentation/quickstart/).)
+
+## Get started [_get_started_2]
+
+Let’s step through creating a codec plugin using the [example codec plugin](https://github.com/logstash-plugins/logstash-codec-example/).
+
+### Create a GitHub repo for your new plugin [_create_a_github_repo_for_your_new_plugin_2]
+
+Each Logstash plugin lives in its own GitHub repository. To create a new repository for your plugin:
+
+1. Log in to GitHub.
+2. Click the **Repositories** tab. You’ll see a list of other repositories you’ve forked or contributed to.
+3. Click the green **New** button in the upper right.
+4. Specify the following settings for your new repo:
+
+ * **Repository name** — a unique name of the form `logstash-codec-pluginname`.
+ * **Public or Private** — your choice, but the repository must be Public if you want to submit it as an official plugin.
+ * **Initialize this repository with a README** — enables you to immediately clone the repository to your computer.
+
+5. Click **Create Repository**.
+
+
+### Use the plugin generator tool [_use_the_plugin_generator_tool_2]
+
+You can create your own Logstash plugin in seconds! The `generate` subcommand of `bin/logstash-plugin` creates the foundation for a new Logstash plugin with templatized files. It creates the correct directory structure, gemspec files, and dependencies so you can start adding custom code to process data with Logstash.
+
+For more information, see [Generating plugins](/reference/plugin-generator.md)
+
+
+### Copy the codec code [_copy_the_codec_code]
+
+Alternatively, you can use the examples repo we host on github.com
+
+1. **Clone your plugin.** Replace `GITUSERNAME` with your github username, and `MYPLUGINNAME` with your plugin name.
+
+ * `git clone https://github.com/GITUSERNAME/logstash-``codec-MYPLUGINNAME.git`
+
+ * alternately, via ssh: `git clone git@github.com:GITUSERNAME/logstash``-codec-MYPLUGINNAME.git`
+
+ * `cd logstash-codec-MYPLUGINNAME`
+
+2. **Clone the codec plugin example and copy it to your plugin branch.**
+
+ You don’t want to include the example .git directory or its contents, so delete it before you copy the example.
+
+ * `cd /tmp`
+ * `git clone https://github.com/logstash-plugins/logstash``-codec-example.git`
+ * `cd logstash-codec-example`
+ * `rm -rf .git`
+ * `cp -R * /path/to/logstash-codec-mypluginname/`
+
+3. **Rename the following files to match the name of your plugin.**
+
+ * `logstash-codec-example.gemspec`
+ * `example.rb`
+ * `example_spec.rb`
+
+ ```txt
+ cd /path/to/logstash-codec-mypluginname
+ mv logstash-codec-example.gemspec logstash-codec-mypluginname.gemspec
+ mv lib/logstash/codecs/example.rb lib/logstash/codecs/mypluginname.rb
+ mv spec/codecs/example_spec.rb spec/codecs/mypluginname_spec.rb
+ ```
+
+
+Your file structure should look like this:
+
+```txt
+$ tree logstash-codec-mypluginname
+├── Gemfile
+├── LICENSE
+├── README.md
+├── Rakefile
+├── lib
+│ └── logstash
+│ └── codecs
+│ └── mypluginname.rb
+├── logstash-codec-mypluginname.gemspec
+└── spec
+ └── codecs
+ └── mypluginname_spec.rb
+```
+
+For more information about the Ruby gem file structure and an excellent walkthrough of the Ruby gem creation process, see [http://timelessrepo.com/making-ruby-gems](http://timelessrepo.com/making-ruby-gems)
+
+
+### See what your plugin looks like [_see_what_your_plugin_looks_like_2]
+
+Before we dive into the details, open up the plugin file in your favorite text editor and take a look.
+
+```ruby
+require "logstash/codecs/base"
+require "logstash/codecs/line"
+
+# Add any asciidoc formatted documentation here
+class LogStash::Codecs::Example < LogStash::Codecs::Base
+
+ # This example codec will append a string to the message field
+ # of an event, either in the decoding or encoding methods
+ #
+ # This is only intended to be used as an example.
+ #
+ # input {
+ # stdin { codec => example }
+ # }
+ #
+ # or
+ #
+ # output {
+ # stdout { codec => example }
+ # }
+ config_name "example"
+
+ # Append a string to the message
+ config :append, :validate => :string, :default => ', Hello World!'
+
+ public
+ def register
+ @lines = LogStash::Codecs::Line.new
+ @lines.charset = "UTF-8"
+ end
+
+ public
+ def decode(data)
+ @lines.decode(data) do |line|
+ replace = { "message" => line["message"].to_s + @append }
+ yield LogStash::Event.new(replace)
+ end
+ end # def decode
+
+ public
+ def encode(event)
+ @on_event.call(event, event.get("message").to_s + @append + NL)
+ end # def encode
+
+end # class LogStash::Codecs::Example
+```
+
+
+
+## Coding codec plugins [_coding_codec_plugins]
+
+Now let’s take a line-by-line look at the example plugin.
+
+### `require` Statements [_require_statements_2]
+
+Logstash codec plugins require parent classes defined in `logstash/codecs/base` and logstash/namespace:
+
+```ruby
+require "logstash/codecs/base"
+require "logstash/namespace"
+```
+
+Of course, the plugin you build may depend on other code, or even gems. Just put them here along with these Logstash dependencies.
+
+
+
+## Plugin Body [_plugin_body_2]
+
+Let’s go through the various elements of the plugin itself.
+
+### `class` Declaration [_class_declaration_2]
+
+The codec plugin class should be a subclass of `LogStash::Codecs::Base`:
+
+```ruby
+class LogStash::Codecs::Example < LogStash::Codecs::Base
+```
+
+The class name should closely mirror the plugin name, for example:
+
+```ruby
+LogStash::Codecs::Example
+```
+
+
+### `config_name` [_config_name_2]
+
+```ruby
+ config_name "example"
+```
+
+This is the name your plugin will call inside the codec configuration block.
+
+If you set `config_name "example"` in your plugin code, the corresponding Logstash configuration block would need to look like this:
+
+
+
+## Configuration Parameters [_configuration_parameters_2]
+
+```ruby
+ config :variable_name, :validate => :variable_type, :default => "Default value", :required => boolean, :deprecated => boolean, :obsolete => string
+```
+
+The configuration, or `config` section allows you to define as many (or as few) parameters as are needed to enable Logstash to process events.
+
+There are several configuration attributes:
+
+* `:validate` - allows you to enforce passing a particular data type to Logstash for this configuration option, such as `:string`, `:password`, `:boolean`, `:number`, `:array`, `:hash`, `:path` (a file-system path), `uri`, `:codec` (since 1.2.0), `:bytes`. Note that this also works as a coercion in that if I specify "true" for boolean (even though technically a string), it will become a valid boolean in the config. This coercion works for the `:number` type as well where "1.2" becomes a float and "22" is an integer.
+* `:default` - lets you specify a default value for a parameter
+* `:required` - whether or not this parameter is mandatory (a Boolean `true` or
+* `:list` - whether or not this value should be a list of values. Will typecheck the list members, and convert scalars to one element lists. Note that this mostly obviates the array type, though if you need lists of complex objects that will be more suitable. `false`)
+* `:deprecated` - informational (also a Boolean `true` or `false`)
+* `:obsolete` - used to declare that a given setting has been removed and is no longer functioning. The idea is to provide an informed upgrade path to users who are still using a now-removed setting.
+
+
+## Plugin Methods [_plugin_methods_2]
+
+Logstash codecs must implement the `register` method, and the `decode` method or the `encode` method (or both).
+
+### `register` Method [_register_method_2]
+
+```ruby
+ public
+ def register
+ end # def register
+```
+
+The Logstash `register` method is like an `initialize` method. It was originally created to enforce having `super` called, preventing headaches for newbies. (Note: It may go away in favor of `initialize`, in conjunction with some enforced testing to ensure `super` is called.)
+
+`public` means the method can be called anywhere, not just within the class. This is the default behavior for methods in Ruby, but it is specified explicitly here anyway.
+
+You can also assign instance variables here (variables prepended by `@`). Configuration variables are now in scope as instance variables, like `@message`
+
+
+### `decode` Method [_decode_method]
+
+```ruby
+ public
+ def decode(data)
+ @lines.decode(data) do |line|
+ replace = { "message" => line["message"].to_s + @append }
+ yield LogStash::Event.new(replace)
+ end
+ end # def decode
+```
+
+The codec’s `decode` method is where data coming in from an input is transformed into an event. There are complex examples like the [collectd](https://github.com/logstash-plugins/logstash-codec-collectd/blob/main/lib/logstash/codecs/collectd.rb#L386-L484) codec, and simpler examples like the [spool](https://github.com/logstash-plugins/logstash-codec-spool/blob/main/lib/logstash/codecs/spool.rb#L11-L16) codec.
+
+There must be a `yield` statement as part of the `decode` method which will return decoded events to the pipeline.
+
+
+### `encode` Method [_encode_method]
+
+```ruby
+ public
+ def encode(event)
+ @on_event.call(event, event.get("message").to_s + @append + NL)
+ end # def encode
+```
+
+The `encode` method takes an event and serializes it (*encodes*) into another format. Good examples of `encode` methods include the simple [plain](https://github.com/logstash-plugins/logstash-codec-plain/blob/main/lib/logstash/codecs/plain.rb#L39-L46) codec, the slightly more involved [msgpack](https://github.com/logstash-plugins/logstash-codec-msgpack/blob/main/lib/logstash/codecs/msgpack.rb#L38-L46) codec, and even an [avro](https://github.com/logstash-plugins/logstash-codec-avro/blob/main/lib/logstash/codecs/avro.rb#L38-L45) codec.
+
+In most cases, your `encode` method should have an `@on_event.call()` statement. This call will output data per event in the described way.
+
+
+
+## Building the Plugin [_building_the_plugin_2]
+
+At this point in the process you have coded your plugin and are ready to build a Ruby Gem from it. The following information will help you complete the process.
+
+### External dependencies [_external_dependencies_2]
+
+A `require` statement in Ruby is used to include necessary code. In some cases your plugin may require additional files. For example, the collectd plugin [uses](https://github.com/logstash-plugins/logstash-codec-collectd/blob/main/lib/logstash/codecs/collectd.rb#L148) the `types.db` file provided by collectd. In the main directory of your plugin, a file called `vendor.json` is where these files are described.
+
+The `vendor.json` file contains an array of JSON objects, each describing a file dependency. This example comes from the [collectd](https://github.com/logstash-plugins/logstash-codec-collectd/blob/main/vendor.json) codec plugin:
+
+```txt
+[{
+ "sha1": "a90fe6cc53b76b7bdd56dc57950d90787cb9c96e",
+ "url": "http://collectd.org/files/collectd-5.4.0.tar.gz",
+ "files": [ "/src/types.db" ]
+}]
+```
+
+* `sha1` is the sha1 signature used to verify the integrity of the file referenced by `url`.
+* `url` is the address from where Logstash will download the file.
+* `files` is an optional array of files to extract from the downloaded file. Note that while tar archives can use absolute or relative paths, treat them as absolute in this array. If `files` is not present, all files will be uncompressed and extracted into the vendor directory.
+
+Another example of the `vendor.json` file is the [`geoip` filter](https://github.com/logstash-plugins/logstash-filter-geoip/blob/main/vendor.json)
+
+The process used to download these dependencies is to call `rake vendor`. This will be discussed further in the testing section of this document.
+
+Another kind of external dependency is on jar files. This will be described in the "Add a `gemspec` file" section.
+
+
+### Deprecated features [_deprecated_features_2]
+
+As a plugin evolves, an option or feature may no longer serve the intended purpose, and the developer may want to *deprecate* its usage. Deprecation warns users about the option’s status, so they aren’t caught by surprise when it is removed in a later release.
+
+{{ls}} 7.6 introduced a *deprecation logger* to make handling those situations easier. You can use the [adapter](https://github.com/logstash-plugins/logstash-mixin-deprecation_logger_support) to ensure that your plugin can use the deprecation logger while still supporting older versions of {{ls}}. See the [readme](https://github.com/logstash-plugins/logstash-mixin-deprecation_logger_support/blob/main/README.md) for more information and for instructions on using the adapter.
+
+Deprecations are noted in the `logstash-deprecation.log` file in the `log` directory.
+
+
+### Add a Gemfile [_add_a_gemfile_2]
+
+Gemfiles allow Ruby’s Bundler to maintain the dependencies for your plugin. Currently, all we’ll need is the Logstash gem, for testing, but if you require other gems, you should add them in here.
+
+::::{tip}
+See [Bundler’s Gemfile page](http://bundler.io/gemfile.html) for more details.
+::::
+
+
+```ruby
+source 'https://rubygems.org'
+gemspec
+gem "logstash", :github => "elastic/logstash", :branch => "master"
+```
+
+
+
+## Add a `gemspec` file [_add_a_gemspec_file_2]
+
+Gemspecs define the Ruby gem which will be built and contain your plugin.
+
+::::{tip}
+More information can be found on the [Rubygems Specification page](http://guides.rubygems.org/specification-reference/).
+::::
+
+
+```ruby
+Gem::Specification.new do |s|
+ s.name = 'logstash-codec-example'
+ s.version = '0.1.0'
+ s.licenses = ['Apache License (2.0)']
+ s.summary = "This codec does x, y, z in Logstash"
+ s.description = "This gem is a logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program"
+ s.authors = ["Elastic"]
+ s.email = 'info@elastic.co'
+ s.homepage = "http://www.elastic.co/guide/en/logstash/current/index.html"
+ s.require_paths = ["lib"]
+
+ # Files
+ s.files = Dir['lib/**/*','spec/**/*','vendor/**/*','*.gemspec','*.md','CONTRIBUTORS','Gemfile','LICENSE','NOTICE.TXT']
+ # Tests
+ s.test_files = s.files.grep(%r{^(test|spec|features)/})
+
+ # Special flag to let us know this is actually a logstash plugin
+ s.metadata = { "logstash_plugin" => "true", "logstash_group" => "codec" }
+
+ # Gem dependencies
+ s.add_runtime_dependency "logstash-core-plugin-api", ">= 1.60", "<= 2.99"
+ s.add_development_dependency 'logstash-devutils'
+end
+```
+
+It is appropriate to change these values to fit your plugin. In particular, `s.name` and `s.summary` should reflect your plugin’s name and behavior.
+
+`s.licenses` and `s.version` are also important and will come into play when you are ready to publish your plugin.
+
+Logstash and all its plugins are licensed under [Apache License, version 2 ("ALv2")](https://github.com/elastic/logstash/blob/main/LICENSE.txt). If you make your plugin publicly available via [RubyGems.org](http://rubygems.org), please make sure to have this line in your gemspec:
+
+* `s.licenses = ['Apache License (2.0)']`
+
+The gem version, designated by `s.version`, helps track changes to plugins over time. You should use [semver versioning](http://semver.org/) strategy for version numbers.
+
+### Runtime and Development Dependencies [_runtime_and_development_dependencies_2]
+
+At the bottom of the `gemspec` file is a section with a comment: `Gem dependencies`. This is where any other needed gems must be mentioned. If a gem is necessary for your plugin to function, it is a runtime dependency. If a gem are only used for testing, then it would be a development dependency.
+
+::::{note}
+You can also have versioning requirements for your dependencies—including other Logstash plugins:
+
+```ruby
+ # Gem dependencies
+ s.add_runtime_dependency "logstash-core-plugin-api", ">= 1.60", "<= 2.99"
+ s.add_development_dependency 'logstash-devutils'
+```
+
+This gemspec has a runtime dependency on the logstash-core-plugin-api and requires that it have a version number greater than or equal to version 1.60 and less than or equal to version 2.99.
+
+::::
+
+
+::::{important}
+All plugins have a runtime dependency on the `logstash-core-plugin-api` gem, and a development dependency on `logstash-devutils`.
+::::
+
+
+
+### Jar dependencies [_jar_dependencies_2]
+
+In some cases, such as the [Elasticsearch output plugin](https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/main/logstash-output-elasticsearch.gemspec#L22-L23), your code may depend on a jar file. In cases such as this, the dependency is added in the gemspec file in this manner:
+
+```ruby
+ # Jar dependencies
+ s.requirements << "jar 'org.elasticsearch:elasticsearch', '5.0.0'"
+ s.add_runtime_dependency 'jar-dependencies'
+```
+
+With these both defined, the install process will search for the required jar file at [http://mvnrepository.com](http://mvnrepository.com) and download the specified version.
+
+
+
+## Document your plugin [_document_your_plugin_2]
+
+Documentation is an important part of your plugin. All plugin documentation is rendered and placed in the [Logstash Reference](/reference/index.md) and the [Versioned plugin docs](logstash-docs://reference/integration-plugins.md).
+
+See [Document your plugin](/extend/plugin-doc.md) for tips and guidelines.
+
+
+## Add Tests [_add_tests_2]
+
+Logstash loves tests. Lots of tests. If you’re using your new codec plugin in a production environment, you’ll want to have some tests to ensure you are not breaking any existing functionality.
+
+::::{note}
+A full exposition on RSpec is outside the scope of this document. Learn more about RSpec at [http://rspec.info](http://rspec.info)
+::::
+
+
+For help learning about tests and testing, look in the `spec/codecs/` directory of several other similar plugins.
+
+
+## Clone and test! [_clone_and_test_2]
+
+Now let’s start with a fresh clone of the plugin, build it and run the tests.
+
+* **Clone your plugin into a temporary location** Replace `GITUSERNAME` with your github username, and `MYPLUGINNAME` with your plugin name.
+
+ * `git clone https://github.com/GITUSERNAME/logstash-``codec-MYPLUGINNAME.git`
+
+ * alternately, via ssh: `git clone git@github.com:GITUSERNAME/logstash-``codec-MYPLUGINNAME.git`
+
+ * `cd logstash-codec-MYPLUGINNAME`
+
+
+Then, you’ll need to install your plugins dependencies with bundler:
+
+```
+bundle install
+```
+
+::::{important}
+If your plugin has an external file dependency described in `vendor.json`, you must download that dependency before running or testing. You can do this by running:
+
+```
+rake vendor
+```
+
+::::
+
+
+And finally, run the tests:
+
+```
+bundle exec rspec
+```
+
+You should see a success message, which looks something like this:
+
+```
+Finished in 0.034 seconds
+1 example, 0 failures
+```
+
+Hooray! You’re almost there! (Unless you saw failures… you should fix those first).
+
+
+## Building and Testing [_building_and_testing_2]
+
+Now you’re ready to build your (well-tested) plugin into a Ruby gem.
+
+### Build [_build_2]
+
+You already have all the necessary ingredients, so let’s go ahead and run the build command:
+
+```sh
+gem build logstash-codec-example.gemspec
+```
+
+That’s it! Your gem should be built and be in the same path with the name
+
+```sh
+logstash-codec-mypluginname-0.1.0.gem
+```
+
+The `s.version` number from your gemspec file will provide the gem version, in this case, `0.1.0`.
+
+
+### Test installation [_test_installation_2]
+
+You should test install your plugin into a clean installation of Logstash. Download the latest version from the [Logstash downloads page](https://www.elastic.co/downloads/logstash/).
+
+1. Untar and cd in to the directory:
+
+ ```sh
+ curl -O https://download.elastic.co/logstash/logstash/logstash-9.0.0.tar.gz
+ tar xzvf logstash-9.0.0.tar.gz
+ cd logstash-9.0.0
+ ```
+
+2. Using the plugin tool, we can install the gem we just built.
+
+ * Replace `/my/logstash/plugins` with the correct path to the gem for your environment, and `0.1.0` with the correct version number from the gemspec file.
+
+ ```sh
+ bin/logstash-plugin install /my/logstash/plugins/logstash-codec-example/logstash-codec-example-0.1.0.gem
+ ```
+
+ * After running this, you should see feedback from Logstash that it was successfully installed:
+
+ ```sh
+ validating /my/logstash/plugins/logstash-codec-example/logstash-codec-example-0.1.0.gem >= 0
+ Valid logstash plugin. Continuing...
+ Successfully installed 'logstash-codec-example' with version '0.1.0'
+ ```
+
+ ::::{tip}
+ You can also use the Logstash plugin tool to determine which plugins are currently available:
+
+ ```sh
+ bin/logstash-plugin list
+ ```
+
+ Depending on what you have installed, you might see a short or long list of plugins: inputs, codecs, filters and outputs.
+
+ ::::
+
+3. Now try running Logstash with a simple configuration passed in via the command-line, using the `-e` flag.
+
+ ::::{note}
+ Your results will depend on what your codec plugin is designed to do.
+ ::::
+
+
+```sh
+bin/logstash -e 'input { stdin{ codec => example{}} } output {stdout { codec => rubydebug }}'
+```
+
+The example codec plugin will append the contents of `append` (which by default appends ", Hello World!")
+
+After starting Logstash, type something, for example "Random output string". The resulting output message field contents should be, "Random output string, Hello World!":
+
+```sh
+Random output string
+{
+ "message" => "Random output string, Hello World!",
+ "@version" => "1",
+ "@timestamp" => "2015-01-27T19:17:18.932Z",
+ "host" => "cadenza"
+}
+```
+
+Feel free to experiment and test this by changing the `append` parameter:
+
+```sh
+bin/logstash -e 'input { stdin{ codec => example{ append => ", I am appending this! }} } output {stdout { codec => rubydebug }}'
+```
+
+Congratulations! You’ve built, deployed and successfully run a Logstash codec.
+
+
+
+## Submitting your plugin to [RubyGems.org](http://rubygems.org) and [logstash-plugins](https://github.com/logstash-plugins) [_submitting_your_plugin_to_rubygems_orghttprubygems_org_and_logstash_pluginshttpsgithub_comlogstash_plugins_2]
+
+Logstash uses [RubyGems.org](http://rubygems.org) as its repository for all plugin artifacts. Once you have developed your new plugin, you can make it available to Logstash users by simply publishing it to RubyGems.org.
+
+### Licensing [_licensing_2]
+
+Logstash and all its plugins are licensed under [Apache License, version 2 ("ALv2")](https://github.com/elasticsearch/logstash/blob/main/LICENSE). If you make your plugin publicly available via [RubyGems.org](http://rubygems.org), please make sure to have this line in your gemspec:
+
+* `s.licenses = ['Apache License (2.0)']`
+
+
+### Publishing to [RubyGems.org](http://rubygems.org) [_publishing_to_rubygems_orghttprubygems_org_2]
+
+To begin, you’ll need an account on RubyGems.org
+
+* [Sign-up for a RubyGems account](https://rubygems.org/sign_up).
+
+After creating an account, [obtain](http://guides.rubygems.org/rubygems-org-api/#api-authorization) an API key from RubyGems.org. By default, RubyGems uses the file `~/.gem/credentials` to store your API key. These credentials will be used to publish the gem. Replace `username` and `password` with the credentials you created at RubyGems.org:
+
+```sh
+curl -u username:password https://rubygems.org/api/v1/api_key.yaml > ~/.gem/credentials
+chmod 0600 ~/.gem/credentials
+```
+
+Before proceeding, make sure you have the right version in your gemspec file and commit your changes.
+
+* `s.version = '0.1.0'`
+
+To publish version 0.1.0 of your new logstash gem:
+
+```sh
+bundle install
+bundle exec rake vendor
+bundle exec rspec
+bundle exec rake publish_gem
+```
+
+::::{note}
+Executing `rake publish_gem`:
+
+1. Reads the version from the gemspec file (`s.version = '0.1.0'`)
+2. Checks in your local repository if a tag exists for that version. If the tag already exists, it aborts the process. Otherwise, it creates a new version tag in your local repository.
+3. Builds the gem
+4. Publishes the gem to RubyGems.org
+
+::::
+
+
+That’s it! Your plugin is published! Logstash users can now install your plugin by running:
+
+```sh
+bin/logstash-plugin install logstash-codec-mypluginname
+```
+
+
+
+## Contributing your source code to [logstash-plugins](https://github.com/logstash-plugins) [_contributing_your_source_code_to_logstash_pluginshttpsgithub_comlogstash_plugins_2]
+
+It is not required to contribute your source code to [logstash-plugins](https://github.com/logstash-plugins) github organization, but we always welcome new plugins!
+
+### Benefits [_benefits_2]
+
+Some of the many benefits of having your plugin in the logstash-plugins repository are:
+
+* **Discovery.** Your plugin will appear in the [Logstash Reference](/reference/index.md), where Logstash users look first for plugins and documentation.
+* **Documentation.** Your plugin documentation will automatically be added to the [Logstash Reference](/reference/index.md).
+* **Testing.** With our testing infrastructure, your plugin will be continuously tested against current and future releases of Logstash. As a result, users will have the assurance that if incompatibilities arise, they will be quickly discovered and corrected.
+
+
+### Acceptance Guidelines [_acceptance_guidelines_2]
+
+* **Code Review.** Your plugin must be reviewed by members of the community for coherence, quality, readability, stability and security.
+* **Tests.** Your plugin must contain tests to be accepted. These tests are also subject to code review for scope and completeness. It’s ok if you don’t know how to write tests — we will guide you. We are working on publishing a guide to creating tests for Logstash which will make it easier. In the meantime, you can refer to [http://betterspecs.org/](http://betterspecs.org/) for examples.
+
+To begin migrating your plugin to logstash-plugins, simply create a new [issue](https://github.com/elasticsearch/logstash/issues) in the Logstash repository. When the acceptance guidelines are completed, we will facilitate the move to the logstash-plugins organization using the recommended [github process](https://help.github.com/articles/transferring-a-repository/#transferring-from-a-user-to-an-organization).
diff --git a/docs/extend/community-maintainer.md b/docs/extend/community-maintainer.md
new file mode 100644
index 000000000..686d0012c
--- /dev/null
+++ b/docs/extend/community-maintainer.md
@@ -0,0 +1,193 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/community-maintainer.html
+---
+
+# Logstash Plugins Community Maintainer Guide [community-maintainer]
+
+This document, to be read by new Maintainers, should explain their responsibilities. It was inspired by the [C4](http://rfc.zeromq.org/spec:22) document from the ZeroMQ project. This document is subject to change and suggestions through Pull Requests and issues are strongly encouraged.
+
+
+## Contribution Guidelines [_contribution_guidelines]
+
+For general guidance around contributing to Logstash Plugins, see the [*Contributing to Logstash*](/extend/index.md) section.
+
+
+## Document Goals [_document_goals]
+
+To help make the Logstash plugins community participation easy with positive feedback.
+
+To increase diversity.
+
+To reduce code review, merge and release dependencies on the core team by providing support and tools to the Community and Maintainers.
+
+To support the natural life cycle of a plugin.
+
+To codify the roles and responsibilities of: Maintainers and Contributors with specific focus on patch testing, code review, merging and release.
+
+
+## Development Workflow [_development_workflow]
+
+All Issues and Pull Requests must be tracked using the Github issue tracker.
+
+The plugin uses the [Apache 2.0 license](http://www.apache.org/licenses/LICENSE-2.0). Maintainers should check whether a patch introduces code which has an incompatible license. Patch ownership and copyright is defined in the Elastic [Contributor License Agreement](https://www.elastic.co/contributor-agreement) (CLA).
+
+
+### Terminology [_terminology_2]
+
+A "Contributor" is a role a person assumes when providing a patch. Contributors will not have commit access to the repository. They need to sign the Elastic [Contributor License Agreement](https://www.elastic.co/contributor-agreement) before a patch can be reviewed. Contributors can add themselves to the plugin Contributor list.
+
+A "Maintainer" is a role a person assumes when maintaining a plugin and keeping it healthy, including triaging issues, and reviewing and merging patches.
+
+
+### Patch Requirements [_patch_requirements]
+
+A patch is a minimal and accurate answer to exactly one identified and agreed upon problem. It must conform to the [code style guidelines](https://github.com/elastic/logstash/blob/main/STYLE.md) and must include RSpec tests that verify the fitness of the solution.
+
+A patch will be automatically tested by a CI system that will report on the Pull Request status.
+
+A patch CLA will be automatically verified and reported on the Pull Request status.
+
+A patch commit message has a single short (less than 50 character) first line summarizing the change, a blank second line, and any additional lines as necessary for change explanation and rationale.
+
+A patch is mergeable when it satisfies the above requirements and has been reviewed positively by at least one other person.
+
+
+### Development Process [_development_process]
+
+A user will log an issue on the issue tracker describing the problem they face or observe with as much detail as possible.
+
+To work on an issue, a Contributor forks the plugin repository and then works on their forked repository and submits a patch by creating a pull request back to the plugin.
+
+Maintainers must not merge patches where the author has not signed the CLA.
+
+Before a patch can be accepted it should be reviewed. Maintainers should merge accepted patches without delay.
+
+Maintainers should not merge their own patches except in exceptional cases, such as non-responsiveness from other Maintainers or core team for an extended period (more than 2 weeks).
+
+Reviewer’s comments should not be based on personal preferences.
+
+The Maintainers should label Issues and Pull Requests.
+
+Maintainers should involve the core team if help is needed to reach consensus.
+
+Review non-source changes such as documentation in the same way as source code changes.
+
+
+### Branch Management [_branch_management]
+
+The plugin has a main branch that always holds the latest in-progress version and should always build. Topic branches should kept to the minimum.
+
+
+### Changelog Management [_changelog_management]
+
+Every plugin should have a changelog (https://www.elastic.co/guide/en/logstash/current/CHANGELOG.html). If not, please create one. When changes are made to a plugin, make sure to include a changelog entry under the respective version to document the change. The changelog should be easily understood from a user point of view. As we iterate and release plugins rapidly, users use the changelog as a mechanism for deciding whether to update.
+
+Changes that are not user facing should be tagged as `internal:`. For example:
+
+```markdown
+ - internal: Refactored specs for better testing
+ - config: Default timeout configuration changed from 10s to 5s
+```
+
+
+#### Detailed format of https://www.elastic.co/guide/en/logstash/current/CHANGELOG.html [_detailed_format_of_changelog_md]
+
+Sharing a similar format of https://www.elastic.co/guide/en/logstash/current/CHANGELOG.html in plugins ease readability for users. Please see following annotated example and see a concrete example in [logstash-filter-date](https://raw.githubusercontent.com/logstash-plugins/logstash-filter-date/main/https://www.elastic.co/guide/en/logstash/current/CHANGELOG.html).
+
+```markdown
+## 1.0.x <1>
+ - change description <2>
+ - tag: change description <3>
+ - tag1,tag2: change description <4>
+ - tag: Multi-line description <5>
+ must be indented and can use
+ additional markdown syntax
+ <6>
+## 1.0.0 <7>
+[...]
+```
+
+1. Latest version is the first line of https://www.elastic.co/guide/en/logstash/current/CHANGELOG.html. Each version identifier should be a level-2 header using `##`
+2. One change description is described as a list item using a dash `-` aligned under the version identifier
+3. One change can be tagged by a word and suffixed by `:`. Common tags are `bugfix`, `feature`, `doc`, `test` or `internal`.
+4. One change can have multiple tags separated by a comma and suffixed by `:`
+5. A multi-line change description must be properly indented
+6. Please take care to **separate versions with an empty line**
+7. Previous version identifier
+
+
+
+### Continuous Integration [_continuous_integration]
+
+Plugins are setup with automated continuous integration (CI) environments and there should be a corresponding badge on each Github page. If it’s missing, please contact the Logstash core team.
+
+Every Pull Request opened automatically triggers a CI run. To conduct a manual run, comment “Jenkins, please test this.” on the Pull Request.
+
+
+## Versioning Plugins [_versioning_plugins]
+
+Logstash core and its plugins have separate product development lifecycles. Hence the versioning and release strategy for the core and plugins do not have to be aligned. In fact, this was one of our goals during the great separation of plugins work in Logstash 1.5.
+
+At times, there will be changes in core API in Logstash, which will require mass update of plugins to reflect the changes in core. However, this does not happen frequently.
+
+For plugins, we would like to adhere to a versioning and release strategy that can better inform our users, about any breaking changes to the Logstash configuration formats and functionality.
+
+Plugin releases follows a three-placed numbering scheme X.Y.Z. where X denotes a major release version which may break compatibility with existing configuration or functionality. Y denotes releases which includes features which are backward compatible. Z denotes releases which includes bug fixes and patches.
+
+
+### Changing the version [_changing_the_version]
+
+Version can be changed in the Gemspec, which needs to be associated with a changelog entry. Following this, we can publish the gem to RubyGem.org manually. At this point only the core developers can publish a gem.
+
+
+### Labeling [_labeling]
+
+Labeling is a critical aspect of maintaining plugins. All issues in GitHub should be labeled correctly so it can:
+
+* Provide good feedback to users/developers
+* Help prioritize changes
+* Be used in release notes
+
+Most labels are self explanatory, but here’s a quick recap of few important labels:
+
+* `bug`: Labels an issue as an unintentional defect
+* `needs details`: If a the issue reporter has incomplete details, please ask them for more info and label as needs details.
+* `missing cla`: Contributor License Agreement is missing and patch cannot be accepted without it
+* `adopt me`: Ask for help from the community to take over this issue
+* `enhancement`: New feature, not a bug fix
+* `needs tests`: Patch has no tests, and cannot be accepted without unit/integration tests
+* `docs`: Documentation related issue/PR
+
+
+## Logging [_logging]
+
+Although it’s important not to bog down performance with excessive logging, debug level logs can be immensely helpful when diagnosing and troubleshooting issues with Logstash. Please remember to liberally add debug logs wherever it makes sense as users will be forever gracious.
+
+```shell
+@logger.debug("Logstash loves debug logs!", :actions => actions)
+```
+
+
+## Contributor License Agreement (CLA) Guidance [_contributor_license_agreement_cla_guidance]
+
+Why is a [CLA](https://www.elastic.co/contributor-agreement) required?
+: We ask this of all Contributors in order to assure our users of the origin and continuing existence of the code. We are not asking Contributors to assign copyright to us, but to give us the right to distribute a Contributor’s code without restriction.
+
+Please make sure the CLA is signed by every Contributor prior to reviewing PRs and commits.
+: Contributors only need to sign the CLA once and should sign with the same email as used in Github. If a Contributor signs the CLA after a PR is submitted, they can refresh the automated CLA checker by pushing another comment on the PR after 5 minutes of signing.
+
+
+## Need Help? [_need_help]
+
+Ping @logstash-core on Github to get the attention of the Logstash core team.
+
+
+## Community Administration [_community_administration]
+
+The core team is there to support the plugin Maintainers and overall ecosystem.
+
+Maintainers should propose Contributors to become a Maintainer.
+
+Contributors and Maintainers should follow the Elastic Community [Code of Conduct](https://www.elastic.co/community/codeofconduct). The core team should block or ban "bad actors".
+
diff --git a/docs/extend/contribute-to-core.md b/docs/extend/contribute-to-core.md
new file mode 100644
index 000000000..fbc7043ca
--- /dev/null
+++ b/docs/extend/contribute-to-core.md
@@ -0,0 +1,11 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/contribute-to-core.html
+---
+
+# Extending Logstash core [contribute-to-core]
+
+We also welcome contributions and bug fixes to the Logstash core feature set.
+
+Please read through our [contribution](https://github.com/elastic/logstash/blob/main/CONTRIBUTING.md) guide, and the Logstash [readme](https://github.com/elastic/logstash/blob/main/README.md) document.
+
diff --git a/docs/extend/contributing-patch-plugin.md b/docs/extend/contributing-patch-plugin.md
new file mode 100644
index 000000000..a1621fd56
--- /dev/null
+++ b/docs/extend/contributing-patch-plugin.md
@@ -0,0 +1,386 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/contributing-patch-plugin.html
+---
+
+# Contributing a patch to a Logstash plugin [contributing-patch-plugin]
+
+This section discusses the information you need to know to successfully contribute a patch to a Logstash plugin.
+
+Each plugin defines its own configuration options. These control the behavior of the plugin to some degree. Configuration option definitions commonly include:
+
+* Data validation
+* Default value
+* Any required flags
+
+Plugins are subclasses of a Logstash base class. A plugin’s base class defines common configuration and methods.
+
+## Input plugins [contrib-patch-input]
+
+Input plugins ingest data from an external source. Input plugins are always associated with a codec. An input plugin always has an associated codec plugin. Input and codec plugins operate in conjunction to create a Logstash event and add that event to the processing queue. An input codec is a subclass of the `LogStash::Inputs::Base` class.
+
+### Input API [input-api]
+
+`#register() -> nil`
+: Required. This API sets up resources for the plugin, typically the connection to the external source.
+
+`#run(queue) -> nil`
+: Required. This API fetches or listens for source data, typically looping until stopped. Must handle errors inside the loop. Pushes any created events to the queue object specified in the method argument. Some inputs may receive batched data to minimize the external call overhead.
+
+`#stop() -> nil`
+: Optional. Stops external connections and cleans up.
+
+
+
+## Codec plugins [contrib-patch-codec]
+
+Codec plugins decode input data that has a specific structure, such as JSON input data. A codec plugin is a subclass of `LogStash::Codecs::Base`.
+
+### Codec API [codec-api]
+
+`#register() -> nil`
+: Identical to the API of the same name for input plugins.
+
+`#decode(data){|event| block} -> nil`
+: Must be implemented. Used to create an Event from the raw data given in the method argument. Must handle errors. The caller must provide a Ruby block. The block is called with the created Event.
+
+`#encode(event) -> nil`
+: Required. Used to create a structured data object from the given Event. May handle errors. This method calls a block that was previously stored as @on_event with two arguments: the original event and the data object.
+
+
+
+## Filter plugins [contrib-patch-filter]
+
+A mechanism to change, mutate or merge one or more Events. A filter plugin is a subclass of the `LogStash::Filters::Base` class.
+
+### Filter API [filter-api]
+
+`#register() -> nil`
+: Identical to the API of the same name for input plugins.
+
+`#filter(event) -> nil`
+: Required. May handle errors. Used to apply a mutation function to the given event.
+
+
+
+## Output plugins [contrib-patch-output]
+
+A mechanism to send an event to an external destination. This process may require serialization. An output plugin is a subclass of the `LogStash::Outputs::Base` class.
+
+### Output API [output-api]
+
+`#register() -> nil`
+: Identical to the API of the same name for input plugins.
+
+`#receive(event) -> nil`
+: Required. Must handle errors. Used to prepare the given event for transmission to the external destination. Some outputs may buffer the prepared events to batch transmit to the destination.
+
+
+
+## Process [patch-process]
+
+A bug or feature is identified. An issue is created in the plugin repository. A patch is created and a pull request (PR) is submitted. After review and possible rework the PR is merged and the plugin is published.
+
+The [Community Maintainer Guide](/extend/community-maintainer.md) explains, in more detail, the process of getting a patch accepted, merged and published. The Community Maintainer Guide also details the roles that contributors and maintainers are expected to perform.
+
+
+## Testing methodologies [test-methods]
+
+### Test driven development [tdd]
+
+Test driven development (TDD) describes a methodology for using tests to guide evolution of source code. For our purposes, we are use only a part of it. Before writing the fix, we create tests that illustrate the bug by failing. We stop when we have written enough code to make the tests pass and submit the fix and tests as a patch. It is not necessary to write the tests before the fix, but it is very easy to write a passing test afterwards that may not actually verify that the fault is really fixed especially if the fault can be triggered via multiple execution paths or varying input data.
+
+
+### RSpec framework [rspec]
+
+Logstash uses Rspec, a Ruby testing framework, to define and run the test suite. What follows is a summary of various sources.
+
+```ruby
+ 2 require "logstash/devutils/rspec/spec_helper"
+ 3 require "logstash/plugin"
+ 4
+ 5 describe "outputs/riemann" do
+ 6 describe "#register" do
+ 7 let(:output) do
+ 8 LogStash::Plugin.lookup("output", "riemann").new(configuration)
+ 9 end
+10
+11 context "when no protocol is specified" do
+12 let(:configuration) { Hash.new }
+13
+14 it "the method completes without error" do
+15 expect {output.register}.not_to raise_error
+16 end
+17 end
+18
+19 context "when a bad protocol is specified" do
+20 let(:configuration) { {"protocol" => "fake"} }
+21
+22 it "the method fails with error" do
+23 expect {output.register}.to raise_error
+24 end
+25 end
+26
+27 context "when the tcp protocol is specified" do
+28 let(:configuration) { {"protocol" => "tcp"} }
+29
+30 it "the method completes without error" do
+31 expect {output.register}.not_to raise_error
+32 end
+33 end
+34 end
+35
+36 describe "#receive" do
+37 let(:output) do
+38 LogStash::Plugin.lookup("output", "riemann").new(configuration)
+39 end
+40
+41 context "when operating normally" do
+42 let(:configuration) { Hash.new }
+43 let(:event) do
+44 data = {"message"=>"hello", "@version"=>"1",
+45 "@timestamp"=>"2015-06-03T23:34:54.076Z",
+46 "host"=>"vagrant-ubuntu-trusty-64"}
+47 LogStash::Event.new(data)
+48 end
+49
+50 before(:example) do
+51 output.register
+52 end
+53
+54 it "should accept the event" do
+55 expect { output.receive event }.not_to raise_error
+56 end
+57 end
+58 end
+59 end
+```
+
+```ruby
+describe(string){block} -> nil
+describe(Class){block} -> nil
+```
+
+With RSpec, we are always describing the plugin method behavior. The describe block is added in logical sections and can accept either an existing class name or a string. The string used in line 5 is the plugin name. Line 6 is the register method, line 36 is the receive method. It is a RSpec convention to prefix instance methods with one hash and class methods with one dot.
+
+```ruby
+context(string){block} -> nil
+```
+
+In RSpec, context blocks define sections that group tests by a variation. The string should start with the word `when` and then detail the variation. See line 11. The tests in the content block should should only be for that variation.
+
+```ruby
+let(symbol){block} -> nil
+```
+
+In RSpec, `let` blocks define resources for use in the test blocks. These resources are reinitialized for every test block. They are available as method calls inside the test block. Define `let` blocks in `describe` and `context` blocks, which scope the `let` block and any other nested blocks. You can use other `let` methods defined later within the `let` block body. See lines 7-9, which define the output resource and use the configuration method, defined with different variations in lines 12, 20 and 28.
+
+```ruby
+before(symbol){block} -> nil - symbol is one of :suite, :context, :example, but :all and :each are synonyms for :suite and :example respectively.
+```
+
+In RSpec, `before` blocks are used to further set up any resources that would have been initialized in a `let` block. You cannot define `let` blocks inside `before` blocks.
+
+You can also define `after` blocks, which are typically used to clean up any setup activity performed by a `before` block.
+
+```ruby
+it(string){block} -> nil
+```
+
+In RSpec, `it` blocks set the expectations that verify the behavior of the tested code. The string should not start with *it* or *should*, but needs to express the outcome of the expectation. When put together the texts from the enclosing describe, `context` and `it` blocks should form a fairly readable sentence, as in lines 5, 6, 11 and 14:
+
+```ruby
+outputs/riemann
+#register when no protocol is specified the method completes without error
+```
+
+Readable code like this make the goals of tests easy to understand.
+
+```ruby
+expect(object){block} -> nil
+```
+
+In RSpec, the expect method verifies a statement that compares an actual result to an expected result. The `expect` method is usually paired with a call to the `to` or `not_to` methods. Use the block form when expecting errors or observing for changes. The `to` or `not_to` methods require a `matcher` object that encapsulates the expected value. The argument form of the `expect` method encapsulates the actual value. When put together the whole line tests the actual against the expected value.
+
+```ruby
+raise_error(error class|nil) -> matcher instance
+be(object) -> matcher instance
+eq(object) -> matcher instance
+eql(object) -> matcher instance
+ for more see http://www.relishapp.com/rspec/rspec-expectations/docs/built-in-matchers
+```
+
+In RSpec, a matcher is an object generated by the equivalent method call (be, eq) that will be used to evaluate the expected against the actual values.
+
+
+
+## Putting it all together [all-together]
+
+This example fixes an [issue](https://github.com/logstash-plugins/logstash-output-zeromq/issues/9) in the ZeroMQ output plugin. The issue does not require knowledge of ZeroMQ.
+
+The activities in this example have the following prerequisites:
+
+* A minimal knowledge of Git and Github. See the [Github boot camp](https://help.github.com/categories/bootcamp/).
+* A text editor.
+* A JRuby [runtime](https://www.ruby-lang.org/en/documentation/installation/#managers) [environment](https://howistart.org/posts/ruby/1). The `chruby` tool manages Ruby versions.
+* JRuby 1.7.22 or later.
+* The `bundler` and `rake` gems installed.
+* ZeroMQ [installed](http://zeromq.org/intro:get-the-software).
+
+1. In Github, fork the ZeroMQ [output plugin repository](https://github.com/logstash-plugins/logstash-output-zeromq).
+2. On your local machine, [clone](https://help.github.com/articles/fork-a-repo/) the fork to a known folder such as `logstash/`.
+3. Open the following files in a text editor:
+
+ * `logstash-output-zeromq/lib/logstash/outputs/zeromq.rb`
+ * `logstash-output-zeromq/lib/logstash/util/zeromq.rb`
+ * `logstash-output-zeromq/spec/outputs/zeromq_spec.rb`
+
+4. According to the issue, log output in server mode must indicate `bound`. Furthermore, the test file contains no tests.
+
+ ::::{note}
+ Line 21 of `util/zeromq.rb` reads `@logger.info("0mq: #{server? ? 'connected' : 'bound'}", :address => address)`
+ ::::
+
+5. In the text editor, require `zeromq.rb` for the file `zeromq_spec.rb` by adding the following lines:
+
+ ```ruby
+ require "logstash/outputs/zeromq"
+ require "logstash/devutils/rspec/spec_helper"
+ ```
+
+6. The desired error message should read:
+
+ ```ruby
+ LogStash::Outputs::ZeroMQ when in server mode a 'bound' info line is logged
+ ```
+
+ To properly generate this message, add a `describe` block with the fully qualified class name as the argument, a context block, and an `it` block.
+
+ ```ruby
+ describe LogStash::Outputs::ZeroMQ do
+ context "when in server mode" do
+ it "a 'bound' info line is logged" do
+ end
+ end
+ end
+ ```
+
+7. To add the missing test, use an instance of the ZeroMQ output and a substitute logger. This example uses an RSpec feature called *test doubles* as the substitute logger.
+
+ Add the following lines to `zeromq_spec.rb`, after `describe LogStash::Outputs::ZeroMQ do` and before `context "when in server mode" do`:
+
+ ```ruby
+ let(:output) { described_class.new("mode" => "server", "topology" => "pushpull" }
+ let(:tracer) { double("logger") }
+ ```
+
+8. Add the body to the `it` block. Add the following five lines after the line `context "when in server mode" do`:
+
+ ```ruby
+ allow(tracer).to receive(:debug)<1>
+ output.logger = logger<2>
+ expect(tracer).to receive(:info).with("0mq: bound", {:address=>"tcp://127.0.0.1:2120"})<3>
+ output.register<4>
+ output.do_close<5>
+ ```
+
+
+1. Allow the double to receive `debug` method calls.
+2. Make the output use the test double.
+3. Set an expectation on the test to receive an `info` method call.
+4. Call `register` on the output.
+5. Call `do_close` on the output so the test does not hang.
+
+
+At the end of the modifications, the relevant code section reads:
+
+```ruby
+require "logstash/outputs/zeromq"
+require "logstash/devutils/rspec/spec_helper"
+
+describe LogStash::Outputs::ZeroMQ do
+ let(:output) { described_class.new("mode" => "server", "topology" => "pushpull") }
+ let(:tracer) { double("logger") }
+
+ context "when in server mode" do
+ it "a ‘bound’ info line is logged" do
+ allow(tracer).to receive(:debug)
+ output.logger = tracer
+ expect(tracer).to receive(:info).with("0mq: bound", {:address=>"tcp://127.0.0.1:2120"})
+ output.register
+ output.do_close
+ end
+ end
+end
+```
+
+To run this test:
+
+1. Open a terminal window
+2. Navigate to the cloned plugin folder
+3. The first time you run the test, run the command `bundle install`
+4. Run the command `bundle exec rspec`
+
+Assuming all prerequisites were installed correctly, the test fails with output similar to:
+
+```shell
+Using Accessor#strict_set for specs
+Run options: exclude {:redis=>true, :socket=>true, :performance=>true, :couchdb=>true, :elasticsearch=>true,
+:elasticsearch_secure=>true, :export_cypher=>true, :integration=>true, :windows=>true}
+
+LogStash::Outputs::ZeroMQ
+ when in server mode
+ a ‘bound’ info line is logged (FAILED - 1)
+
+Failures:
+
+ 1) LogStash::Outputs::ZeroMQ when in server mode a ‘bound’ info line is logged
+ Failure/Error: output.register
+ Double "logger" received :info with unexpected arguments
+ expected: ("0mq: bound", {:address=>"tcp://127.0.0.1:2120"})
+ got: ("0mq: connected", {:address=>"tcp://127.0.0.1:2120"})
+ # ./lib/logstash/util/zeromq.rb:21:in `setup'
+ # ./lib/logstash/outputs/zeromq.rb:92:in `register'
+ # ./lib/logstash/outputs/zeromq.rb:91:in `register'
+ # ./spec/outputs/zeromq_spec.rb:13:in `(root)'
+ # /Users/guy/.gem/jruby/1.9.3/gems/rspec-wait-0.0.7/lib/rspec/wait.rb:46:in `(root)'
+
+Finished in 0.133 seconds (files took 1.28 seconds to load)
+1 example, 1 failure
+
+Failed examples:
+
+rspec ./spec/outputs/zeromq_spec.rb:10 # LogStash::Outputs::ZeroMQ when in server mode a ‘bound’ info line is logged
+
+Randomized with seed 2568
+```
+
+To correct the error, open the `util/zeromq.rb` file in your text editor and swap the positions of the words `connected` and `bound` on line 21. Line 21 now reads:
+
+```ruby
+@logger.info("0mq: #{server? ? 'bound' : 'connected'}", :address => address)
+```
+
+Run the test again with the `bundle exec rspec` command.
+
+The test passes with output similar to:
+
+```shell
+Using Accessor#strict_set for specs
+Run options: exclude {:redis=>true, :socket=>true, :performance=>true, :couchdb=>true, :elasticsearch=>true, :elasticsearch_secure=>true, :export_cypher=>true, :integration=>true, :windows=>true}
+
+LogStash::Outputs::ZeroMQ
+ when in server mode
+ a ‘bound’ info line is logged
+
+Finished in 0.114 seconds (files took 1.22 seconds to load)
+1 example, 0 failures
+
+Randomized with seed 45887
+```
+
+[Commit](https://help.github.com/articles/fork-a-repo/#next-steps) the changes to git and Github.
+
+Your pull request is visible from the [Pull Requests](https://github.com/logstash-plugins/logstash-output-zeromq/pulls) section of the original Github repository. The plugin maintainers review your work, suggest changes if necessary, and merge and publish a new version of the plugin.
+
+
diff --git a/docs/extend/create-logstash-plugins.md b/docs/extend/create-logstash-plugins.md
new file mode 100644
index 000000000..7dc5d5ba5
--- /dev/null
+++ b/docs/extend/create-logstash-plugins.md
@@ -0,0 +1,47 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/contributing-java-plugin.html
+---
+
+# Create Logstash plugins [contributing-java-plugin]
+
+Now you can write your own Java plugin for use with {{ls}}. We have provided instructions and GitHub examples to give you a head start.
+
+Native support for Java plugins in {{ls}} consists of several components:
+
+* Extensions to the Java execution engine to support running Java plugins in Logstash pipelines
+* APIs for developing Java plugins. The APIs are in the `co.elastic.logstash.api` package. A Java plugin might break if it references classes or specific concrete implementations of API interfaces outside that package. The implementation of classes outside of the API package may change at any time.
+* Tooling to automate the packaging and deployment of Java plugins in Logstash.
+
+
+## Process overview [_process_overview]
+
+Here are the steps:
+
+1. Choose the type of plugin you want to create: input, codec, filter, or output.
+2. Set up your environment.
+3. Code the plugin.
+4. Package and deploy the plugin.
+5. Run Logstash with your new plugin.
+
+
+### Let’s get started [_lets_get_started]
+
+Here are the example repos:
+
+* [Input plugin example](https://github.com/logstash-plugins/logstash-input-java_input_example)
+* [Codec plugin example](https://github.com/logstash-plugins/logstash-codec-java_codec_example)
+* [Filter plugin example](https://github.com/logstash-plugins/logstash-filter-java_filter_example)
+* [Output plugin example](https://github.com/logstash-plugins/logstash-output-java_output_example)
+
+Here are the instructions:
+
+* [How to write a Java input plugin](/extend/java-input-plugin.md)
+* [How to write a Java codec plugin](/extend/java-codec-plugin.md)
+* [How to write a Java filter plugin](/extend/java-filter-plugin.md)
+* [How to write a Java output plugin](/extend/java-output-plugin.md)
+
+
+
+
+
diff --git a/docs/extend/filter-new-plugin.md b/docs/extend/filter-new-plugin.md
new file mode 100644
index 000000000..0ea01f0ee
--- /dev/null
+++ b/docs/extend/filter-new-plugin.md
@@ -0,0 +1,637 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/filter-new-plugin.html
+---
+
+# How to write a Logstash filter plugin [filter-new-plugin]
+
+To develop a new filter for Logstash, build a self-contained Ruby gem whose source code lives in its own GitHub repository. The Ruby gem can then be hosted and shared on RubyGems.org. You can use the example filter implementation as a starting point. (If you’re unfamiliar with Ruby, you can find an excellent quickstart guide at [https://www.ruby-lang.org/en/documentation/quickstart/](https://www.ruby-lang.org/en/documentation/quickstart/).)
+
+## Get started [_get_started_3]
+
+Let’s step through creating a filter plugin using the [example filter plugin](https://github.com/logstash-plugins/logstash-filter-example/).
+
+### Create a GitHub repo for your new plugin [_create_a_github_repo_for_your_new_plugin_3]
+
+Each Logstash plugin lives in its own GitHub repository. To create a new repository for your plugin:
+
+1. Log in to GitHub.
+2. Click the **Repositories** tab. You’ll see a list of other repositories you’ve forked or contributed to.
+3. Click the green **New** button in the upper right.
+4. Specify the following settings for your new repo:
+
+ * **Repository name** — a unique name of the form `logstash-filter-pluginname`.
+ * **Public or Private** — your choice, but the repository must be Public if you want to submit it as an official plugin.
+ * **Initialize this repository with a README** — enables you to immediately clone the repository to your computer.
+
+5. Click **Create Repository**.
+
+
+### Use the plugin generator tool [_use_the_plugin_generator_tool_3]
+
+You can create your own Logstash plugin in seconds! The `generate` subcommand of `bin/logstash-plugin` creates the foundation for a new Logstash plugin with templatized files. It creates the correct directory structure, gemspec files, and dependencies so you can start adding custom code to process data with Logstash.
+
+For more information, see [Generating plugins](/reference/plugin-generator.md)
+
+
+### Copy the filter code [_copy_the_filter_code]
+
+Alternatively, you can use the examples repo we host on github.com
+
+1. **Clone your plugin.** Replace `GITUSERNAME` with your github username, and `MYPLUGINNAME` with your plugin name.
+
+ * `git clone https://github.com/GITUSERNAME/logstash-``filter-MYPLUGINNAME.git`
+
+ * alternately, via ssh: `git clone git@github.com:GITUSERNAME/logstash``-filter-MYPLUGINNAME.git`
+
+ * `cd logstash-filter-MYPLUGINNAME`
+
+2. **Clone the filter plugin example and copy it to your plugin branch.**
+
+ You don’t want to include the example .git directory or its contents, so delete it before you copy the example.
+
+ * `cd /tmp`
+ * `git clone https://github.com/logstash-plugins/logstash``-filter-example.git`
+ * `cd logstash-filter-example`
+ * `rm -rf .git`
+ * `cp -R * /path/to/logstash-filter-mypluginname/`
+
+3. **Rename the following files to match the name of your plugin.**
+
+ * `logstash-filter-example.gemspec`
+ * `example.rb`
+ * `example_spec.rb`
+
+ ```txt
+ cd /path/to/logstash-filter-mypluginname
+ mv logstash-filter-example.gemspec logstash-filter-mypluginname.gemspec
+ mv lib/logstash/filters/example.rb lib/logstash/filters/mypluginname.rb
+ mv spec/filters/example_spec.rb spec/filters/mypluginname_spec.rb
+ ```
+
+
+Your file structure should look like this:
+
+```txt
+$ tree logstash-filter-mypluginname
+├── Gemfile
+├── LICENSE
+├── README.md
+├── Rakefile
+├── lib
+│ └── logstash
+│ └── filters
+│ └── mypluginname.rb
+├── logstash-filter-mypluginname.gemspec
+└── spec
+ └── filters
+ └── mypluginname_spec.rb
+```
+
+For more information about the Ruby gem file structure and an excellent walkthrough of the Ruby gem creation process, see [http://timelessrepo.com/making-ruby-gems](http://timelessrepo.com/making-ruby-gems)
+
+
+### See what your plugin looks like [_see_what_your_plugin_looks_like_3]
+
+Before we dive into the details, open up the plugin file in your favorite text editor and take a look.
+
+```ruby
+require "logstash/filters/base"
+require "logstash/namespace"
+
+# Add any asciidoc formatted documentation here
+# This example filter will replace the contents of the default
+# message field with whatever you specify in the configuration.
+#
+# It is only intended to be used as an example.
+class LogStash::Filters::Example < LogStash::Filters::Base
+
+ # Setting the config_name here is required. This is how you
+ # configure this filter from your Logstash config.
+ #
+ # filter {
+ # example { message => "My message..." }
+ # }
+ config_name "example"
+
+ # Replace the message with this value.
+ config :message, :validate => :string, :default => "Hello World!"
+
+
+ public
+ def register
+ # Add instance variables
+ end # def register
+
+ public
+ def filter(event)
+
+ if @message
+ # Replace the event message with our message as configured in the
+ # config file.
+ event.set("message", @message)
+ end
+
+ # filter_matched should go in the last line of our successful code
+ filter_matched(event)
+ end # def filter
+
+end # class LogStash::Filters::Example
+```
+
+
+
+## Coding filter plugins [_coding_filter_plugins]
+
+Now let’s take a line-by-line look at the example plugin.
+
+### `require` Statements [_require_statements_3]
+
+Logstash filter plugins require parent classes defined in `logstash/filters/base` and logstash/namespace:
+
+```ruby
+require "logstash/filters/base"
+require "logstash/namespace"
+```
+
+Of course, the plugin you build may depend on other code, or even gems. Just put them here along with these Logstash dependencies.
+
+
+
+## Plugin Body [_plugin_body_3]
+
+Let’s go through the various elements of the plugin itself.
+
+### `class` Declaration [_class_declaration_3]
+
+The filter plugin class should be a subclass of `LogStash::Filters::Base`:
+
+```ruby
+class LogStash::Filters::Example < LogStash::Filters::Base
+```
+
+The class name should closely mirror the plugin name, for example:
+
+```ruby
+LogStash::Filters::Example
+```
+
+
+### `config_name` [_config_name_3]
+
+```ruby
+ config_name "example"
+```
+
+This is the name your plugin will call inside the filter configuration block.
+
+If you set `config_name "example"` in your plugin code, the corresponding Logstash configuration block would need to look like this:
+
+
+
+## Configuration Parameters [_configuration_parameters_3]
+
+```ruby
+ config :variable_name, :validate => :variable_type, :default => "Default value", :required => boolean, :deprecated => boolean, :obsolete => string
+```
+
+The configuration, or `config` section allows you to define as many (or as few) parameters as are needed to enable Logstash to process events.
+
+There are several configuration attributes:
+
+* `:validate` - allows you to enforce passing a particular data type to Logstash for this configuration option, such as `:string`, `:password`, `:boolean`, `:number`, `:array`, `:hash`, `:path` (a file-system path), `uri`, `:codec` (since 1.2.0), `:bytes`. Note that this also works as a coercion in that if I specify "true" for boolean (even though technically a string), it will become a valid boolean in the config. This coercion works for the `:number` type as well where "1.2" becomes a float and "22" is an integer.
+* `:default` - lets you specify a default value for a parameter
+* `:required` - whether or not this parameter is mandatory (a Boolean `true` or
+* `:list` - whether or not this value should be a list of values. Will typecheck the list members, and convert scalars to one element lists. Note that this mostly obviates the array type, though if you need lists of complex objects that will be more suitable. `false`)
+* `:deprecated` - informational (also a Boolean `true` or `false`)
+* `:obsolete` - used to declare that a given setting has been removed and is no longer functioning. The idea is to provide an informed upgrade path to users who are still using a now-removed setting.
+
+
+## Plugin Methods [_plugin_methods_3]
+
+Logstash filters must implement the `register` and `filter` methods.
+
+### `register` Method [_register_method_3]
+
+```ruby
+ public
+ def register
+ end # def register
+```
+
+The Logstash `register` method is like an `initialize` method. It was originally created to enforce having `super` called, preventing headaches for newbies. (Note: It may go away in favor of `initialize`, in conjunction with some enforced testing to ensure `super` is called.)
+
+`public` means the method can be called anywhere, not just within the class. This is the default behavior for methods in Ruby, but it is specified explicitly here anyway.
+
+You can also assign instance variables here (variables prepended by `@`). Configuration variables are now in scope as instance variables, like `@message`
+
+
+### `filter` Method [_filter_method]
+
+```ruby
+ public
+ def filter(event)
+
+ if @message
+ # Replace the event message with our message as configured in the
+ # config file.
+ event.set("message", @message)
+ end
+
+ # filter_matched should go in the last line of our successful code
+ filter_matched(event)
+end # def filter
+```
+
+The plugin’s `filter` method is where the actual filtering work takes place! Inside the `filter` method you can refer to the event data using the `Event` object. Event is the main object that encapsulates data flow internally in Logstash and provides an [API](/reference/event-api.md) for the plugin developers to interact with the event’s content.
+
+The `filter` method should also handle any [event dependent configuration](/reference/event-dependent-configuration.md) by explicitly calling the `sprintf` method available in Event class. For example:
+
+```ruby
+field_foo = event.sprintf(field)
+```
+
+Note that configuration variables are now in scope as instance variables, like `@message`
+
+```ruby
+ filter_matched(event)
+```
+
+Calling the `filter_matched` method upon successful execution of the plugin will ensure that any fields or tags added through the Logstash configuration for this filter will be handled correctly. For example, any `add_field`, `remove_field`, `add_tag` and/or `remove_tag` actions will be performed at this time.
+
+Event methods such as `event.cancel` are now available to control the workflow of the event being processed.
+
+
+
+## Building the Plugin [_building_the_plugin_3]
+
+At this point in the process you have coded your plugin and are ready to build a Ruby Gem from it. The following information will help you complete the process.
+
+### External dependencies [_external_dependencies_3]
+
+A `require` statement in Ruby is used to include necessary code. In some cases your plugin may require additional files. For example, the collectd plugin [uses](https://github.com/logstash-plugins/logstash-codec-collectd/blob/main/lib/logstash/codecs/collectd.rb#L148) the `types.db` file provided by collectd. In the main directory of your plugin, a file called `vendor.json` is where these files are described.
+
+The `vendor.json` file contains an array of JSON objects, each describing a file dependency. This example comes from the [collectd](https://github.com/logstash-plugins/logstash-codec-collectd/blob/main/vendor.json) codec plugin:
+
+```txt
+[{
+ "sha1": "a90fe6cc53b76b7bdd56dc57950d90787cb9c96e",
+ "url": "http://collectd.org/files/collectd-5.4.0.tar.gz",
+ "files": [ "/src/types.db" ]
+}]
+```
+
+* `sha1` is the sha1 signature used to verify the integrity of the file referenced by `url`.
+* `url` is the address from where Logstash will download the file.
+* `files` is an optional array of files to extract from the downloaded file. Note that while tar archives can use absolute or relative paths, treat them as absolute in this array. If `files` is not present, all files will be uncompressed and extracted into the vendor directory.
+
+Another example of the `vendor.json` file is the [`geoip` filter](https://github.com/logstash-plugins/logstash-filter-geoip/blob/main/vendor.json)
+
+The process used to download these dependencies is to call `rake vendor`. This will be discussed further in the testing section of this document.
+
+Another kind of external dependency is on jar files. This will be described in the "Add a `gemspec` file" section.
+
+
+### Deprecated features [_deprecated_features_3]
+
+As a plugin evolves, an option or feature may no longer serve the intended purpose, and the developer may want to *deprecate* its usage. Deprecation warns users about the option’s status, so they aren’t caught by surprise when it is removed in a later release.
+
+{{ls}} 7.6 introduced a *deprecation logger* to make handling those situations easier. You can use the [adapter](https://github.com/logstash-plugins/logstash-mixin-deprecation_logger_support) to ensure that your plugin can use the deprecation logger while still supporting older versions of {{ls}}. See the [readme](https://github.com/logstash-plugins/logstash-mixin-deprecation_logger_support/blob/main/README.md) for more information and for instructions on using the adapter.
+
+Deprecations are noted in the `logstash-deprecation.log` file in the `log` directory.
+
+
+### Add a Gemfile [_add_a_gemfile_3]
+
+Gemfiles allow Ruby’s Bundler to maintain the dependencies for your plugin. Currently, all we’ll need is the Logstash gem, for testing, but if you require other gems, you should add them in here.
+
+::::{tip}
+See [Bundler’s Gemfile page](http://bundler.io/gemfile.html) for more details.
+::::
+
+
+```ruby
+source 'https://rubygems.org'
+gemspec
+gem "logstash", :github => "elastic/logstash", :branch => "master"
+```
+
+
+
+## Add a `gemspec` file [_add_a_gemspec_file_3]
+
+Gemspecs define the Ruby gem which will be built and contain your plugin.
+
+::::{tip}
+More information can be found on the [Rubygems Specification page](http://guides.rubygems.org/specification-reference/).
+::::
+
+
+```ruby
+Gem::Specification.new do |s|
+ s.name = 'logstash-filter-example'
+ s.version = '0.1.0'
+ s.licenses = ['Apache License (2.0)']
+ s.summary = "This filter does x, y, z in Logstash"
+ s.description = "This gem is a logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program"
+ s.authors = ["Elastic"]
+ s.email = 'info@elastic.co'
+ s.homepage = "http://www.elastic.co/guide/en/logstash/current/index.html"
+ s.require_paths = ["lib"]
+
+ # Files
+ s.files = Dir['lib/**/*','spec/**/*','vendor/**/*','*.gemspec','*.md','CONTRIBUTORS','Gemfile','LICENSE','NOTICE.TXT']
+ # Tests
+ s.test_files = s.files.grep(%r{^(test|spec|features)/})
+
+ # Special flag to let us know this is actually a logstash plugin
+ s.metadata = { "logstash_plugin" => "true", "logstash_group" => "filter" }
+
+ # Gem dependencies
+ s.add_runtime_dependency "logstash-core-plugin-api", ">= 1.60", "<= 2.99"
+ s.add_development_dependency 'logstash-devutils'
+end
+```
+
+It is appropriate to change these values to fit your plugin. In particular, `s.name` and `s.summary` should reflect your plugin’s name and behavior.
+
+`s.licenses` and `s.version` are also important and will come into play when you are ready to publish your plugin.
+
+Logstash and all its plugins are licensed under [Apache License, version 2 ("ALv2")](https://github.com/elastic/logstash/blob/main/LICENSE.txt). If you make your plugin publicly available via [RubyGems.org](http://rubygems.org), please make sure to have this line in your gemspec:
+
+* `s.licenses = ['Apache License (2.0)']`
+
+The gem version, designated by `s.version`, helps track changes to plugins over time. You should use [semver versioning](http://semver.org/) strategy for version numbers.
+
+### Runtime and Development Dependencies [_runtime_and_development_dependencies_3]
+
+At the bottom of the `gemspec` file is a section with a comment: `Gem dependencies`. This is where any other needed gems must be mentioned. If a gem is necessary for your plugin to function, it is a runtime dependency. If a gem are only used for testing, then it would be a development dependency.
+
+::::{note}
+You can also have versioning requirements for your dependencies—including other Logstash plugins:
+
+```ruby
+ # Gem dependencies
+ s.add_runtime_dependency "logstash-core-plugin-api", ">= 1.60", "<= 2.99"
+ s.add_development_dependency 'logstash-devutils'
+```
+
+This gemspec has a runtime dependency on the logstash-core-plugin-api and requires that it have a version number greater than or equal to version 1.60 and less than or equal to version 2.99.
+
+::::
+
+
+::::{important}
+All plugins have a runtime dependency on the `logstash-core-plugin-api` gem, and a development dependency on `logstash-devutils`.
+::::
+
+
+
+### Jar dependencies [_jar_dependencies_3]
+
+In some cases, such as the [Elasticsearch output plugin](https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/main/logstash-output-elasticsearch.gemspec#L22-L23), your code may depend on a jar file. In cases such as this, the dependency is added in the gemspec file in this manner:
+
+```ruby
+ # Jar dependencies
+ s.requirements << "jar 'org.elasticsearch:elasticsearch', '5.0.0'"
+ s.add_runtime_dependency 'jar-dependencies'
+```
+
+With these both defined, the install process will search for the required jar file at [http://mvnrepository.com](http://mvnrepository.com) and download the specified version.
+
+
+
+## Document your plugin [_document_your_plugin_3]
+
+Documentation is an important part of your plugin. All plugin documentation is rendered and placed in the [Logstash Reference](/reference/index.md) and the [Versioned plugin docs](logstash-docs://reference/integration-plugins.md).
+
+See [Document your plugin](/extend/plugin-doc.md) for tips and guidelines.
+
+
+## Add Tests [_add_tests_3]
+
+Logstash loves tests. Lots of tests. If you’re using your new filter plugin in a production environment, you’ll want to have some tests to ensure you are not breaking any existing functionality.
+
+::::{note}
+A full exposition on RSpec is outside the scope of this document. Learn more about RSpec at [http://rspec.info](http://rspec.info)
+::::
+
+
+For help learning about tests and testing, look in the `spec/filters/` directory of several other similar plugins.
+
+
+## Clone and test! [_clone_and_test_3]
+
+Now let’s start with a fresh clone of the plugin, build it and run the tests.
+
+* **Clone your plugin into a temporary location** Replace `GITUSERNAME` with your github username, and `MYPLUGINNAME` with your plugin name.
+
+ * `git clone https://github.com/GITUSERNAME/logstash-``filter-MYPLUGINNAME.git`
+
+ * alternately, via ssh: `git clone git@github.com:GITUSERNAME/logstash-``filter-MYPLUGINNAME.git`
+
+ * `cd logstash-filter-MYPLUGINNAME`
+
+
+Then, you’ll need to install your plugins dependencies with bundler:
+
+```
+bundle install
+```
+
+::::{important}
+If your plugin has an external file dependency described in `vendor.json`, you must download that dependency before running or testing. You can do this by running:
+
+```
+rake vendor
+```
+
+::::
+
+
+And finally, run the tests:
+
+```
+bundle exec rspec
+```
+
+You should see a success message, which looks something like this:
+
+```
+Finished in 0.034 seconds
+1 example, 0 failures
+```
+
+Hooray! You’re almost there! (Unless you saw failures… you should fix those first).
+
+
+## Building and Testing [_building_and_testing_3]
+
+Now you’re ready to build your (well-tested) plugin into a Ruby gem.
+
+### Build [_build_3]
+
+You already have all the necessary ingredients, so let’s go ahead and run the build command:
+
+```sh
+gem build logstash-filter-example.gemspec
+```
+
+That’s it! Your gem should be built and be in the same path with the name
+
+```sh
+logstash-filter-mypluginname-0.1.0.gem
+```
+
+The `s.version` number from your gemspec file will provide the gem version, in this case, `0.1.0`.
+
+
+### Test installation [_test_installation_3]
+
+You should test install your plugin into a clean installation of Logstash. Download the latest version from the [Logstash downloads page](https://www.elastic.co/downloads/logstash/).
+
+1. Untar and cd in to the directory:
+
+ ```sh
+ curl -O https://download.elastic.co/logstash/logstash/logstash-9.0.0.tar.gz
+ tar xzvf logstash-9.0.0.tar.gz
+ cd logstash-9.0.0
+ ```
+
+2. Using the plugin tool, we can install the gem we just built.
+
+ * Replace `/my/logstash/plugins` with the correct path to the gem for your environment, and `0.1.0` with the correct version number from the gemspec file.
+
+ ```sh
+ bin/logstash-plugin install /my/logstash/plugins/logstash-filter-example/logstash-filter-example-0.1.0.gem
+ ```
+
+ * After running this, you should see feedback from Logstash that it was successfully installed:
+
+ ```sh
+ validating /my/logstash/plugins/logstash-filter-example/logstash-filter-example-0.1.0.gem >= 0
+ Valid logstash plugin. Continuing...
+ Successfully installed 'logstash-filter-example' with version '0.1.0'
+ ```
+
+ ::::{tip}
+ You can also use the Logstash plugin tool to determine which plugins are currently available:
+
+ ```sh
+ bin/logstash-plugin list
+ ```
+
+ Depending on what you have installed, you might see a short or long list of plugins: inputs, codecs, filters and outputs.
+
+ ::::
+
+3. Now try running Logstash with a simple configuration passed in via the command-line, using the `-e` flag.
+
+ ::::{note}
+ Your results will depend on what your filter plugin is designed to do.
+ ::::
+
+
+```sh
+bin/logstash -e 'input { stdin{} } filter { example {} } output {stdout { codec => rubydebug }}'
+```
+
+Test your filter by sending input through `stdin` and output (after filtering) through `stdout` with the `rubydebug` codec, which enhances readability.
+
+In the case of the example filter plugin, any text you send will be replaced by the contents of the `message` configuration parameter, the default value being "Hello World!":
+
+```sh
+Testing 1, 2, 3
+{
+ "message" => "Hello World!",
+ "@version" => "1",
+ "@timestamp" => "2015-01-27T19:17:18.932Z",
+ "host" => "cadenza"
+}
+```
+
+Feel free to experiment and test this by changing the `message` parameter:
+
+```sh
+bin/logstash -e 'input { stdin{} } filter { example { message => "This is a new message!"} } output {stdout { codec => rubydebug }}'
+```
+
+Congratulations! You’ve built, deployed and successfully run a Logstash filter.
+
+
+
+## Submitting your plugin to [RubyGems.org](http://rubygems.org) and [logstash-plugins](https://github.com/logstash-plugins) [_submitting_your_plugin_to_rubygems_orghttprubygems_org_and_logstash_pluginshttpsgithub_comlogstash_plugins_3]
+
+Logstash uses [RubyGems.org](http://rubygems.org) as its repository for all plugin artifacts. Once you have developed your new plugin, you can make it available to Logstash users by simply publishing it to RubyGems.org.
+
+### Licensing [_licensing_3]
+
+Logstash and all its plugins are licensed under [Apache License, version 2 ("ALv2")](https://github.com/elasticsearch/logstash/blob/main/LICENSE). If you make your plugin publicly available via [RubyGems.org](http://rubygems.org), please make sure to have this line in your gemspec:
+
+* `s.licenses = ['Apache License (2.0)']`
+
+
+### Publishing to [RubyGems.org](http://rubygems.org) [_publishing_to_rubygems_orghttprubygems_org_3]
+
+To begin, you’ll need an account on RubyGems.org
+
+* [Sign-up for a RubyGems account](https://rubygems.org/sign_up).
+
+After creating an account, [obtain](http://guides.rubygems.org/rubygems-org-api/#api-authorization) an API key from RubyGems.org. By default, RubyGems uses the file `~/.gem/credentials` to store your API key. These credentials will be used to publish the gem. Replace `username` and `password` with the credentials you created at RubyGems.org:
+
+```sh
+curl -u username:password https://rubygems.org/api/v1/api_key.yaml > ~/.gem/credentials
+chmod 0600 ~/.gem/credentials
+```
+
+Before proceeding, make sure you have the right version in your gemspec file and commit your changes.
+
+* `s.version = '0.1.0'`
+
+To publish version 0.1.0 of your new logstash gem:
+
+```sh
+bundle install
+bundle exec rake vendor
+bundle exec rspec
+bundle exec rake publish_gem
+```
+
+::::{note}
+Executing `rake publish_gem`:
+
+1. Reads the version from the gemspec file (`s.version = '0.1.0'`)
+2. Checks in your local repository if a tag exists for that version. If the tag already exists, it aborts the process. Otherwise, it creates a new version tag in your local repository.
+3. Builds the gem
+4. Publishes the gem to RubyGems.org
+
+::::
+
+
+That’s it! Your plugin is published! Logstash users can now install your plugin by running:
+
+```sh
+bin/logstash-plugin install logstash-filter-mypluginname
+```
+
+
+
+## Contributing your source code to [logstash-plugins](https://github.com/logstash-plugins) [_contributing_your_source_code_to_logstash_pluginshttpsgithub_comlogstash_plugins_3]
+
+It is not required to contribute your source code to [logstash-plugins](https://github.com/logstash-plugins) github organization, but we always welcome new plugins!
+
+### Benefits [_benefits_3]
+
+Some of the many benefits of having your plugin in the logstash-plugins repository are:
+
+* **Discovery.** Your plugin will appear in the [Logstash Reference](/reference/index.md), where Logstash users look first for plugins and documentation.
+* **Documentation.** Your plugin documentation will automatically be added to the [Logstash Reference](/reference/index.md).
+* **Testing.** With our testing infrastructure, your plugin will be continuously tested against current and future releases of Logstash. As a result, users will have the assurance that if incompatibilities arise, they will be quickly discovered and corrected.
+
+
+### Acceptance Guidelines [_acceptance_guidelines_3]
+
+* **Code Review.** Your plugin must be reviewed by members of the community for coherence, quality, readability, stability and security.
+* **Tests.** Your plugin must contain tests to be accepted. These tests are also subject to code review for scope and completeness. It’s ok if you don’t know how to write tests — we will guide you. We are working on publishing a guide to creating tests for Logstash which will make it easier. In the meantime, you can refer to [http://betterspecs.org/](http://betterspecs.org/) for examples.
+
+To begin migrating your plugin to logstash-plugins, simply create a new [issue](https://github.com/elasticsearch/logstash/issues) in the Logstash repository. When the acceptance guidelines are completed, we will facilitate the move to the logstash-plugins organization using the recommended [github process](https://help.github.com/articles/transferring-a-repository/#transferring-from-a-user-to-an-organization).
diff --git a/docs/extend/index.md b/docs/extend/index.md
new file mode 100644
index 000000000..f4ed32f89
--- /dev/null
+++ b/docs/extend/index.md
@@ -0,0 +1,58 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/contributing-to-logstash.html
+---
+
+# Contributing to Logstash [contributing-to-logstash]
+
+You can add your own input, codec, filter, or output plugins to Logstash.
+
+
+### Acceptance guidelines [plugin-acceptance]
+
+Start with the end in mind. These guidelines and best practices can help you build a better plugin, even if you choose not to share it with the world.
+
+* **Consistency.** Your plugin must be consistent in quality and naming conventions used by other plugins. The plugin name must be unique and in this format: `logstash-plugintype-pluginname`. If the plugin name is more than one word, separate words after plugin type with underscores. Example: *logstash-output-elastic_app_search*
+* **Documentation.** Documentation is a required component of your plugin. If we list your plugin in the Logstash Reference, we point to your documentation—a readme.md, docs/index.asciidoc, or both—in your plugin repo.
+* **Code Review.** Your plugin must be reviewed by members of the community for coherence, quality, readability, stability and security.
+* **Tests.** Your plugin must contain tests to be accepted. You can refer to [http://betterspecs.org/](http://betterspecs.org/) for examples.
+
+ * Step 1. Enable travis on your account
+ * Step 2. Import our standard travis.yml [https://github.com/logstash-plugins/.ci/blob/1.x/travis/travis.yml](https://github.com/logstash-plugins/.ci/blob/1.x/travis/travis.yml), as shown in the [fingerprint filter example](https://github.com/logstash-plugins/logstash-filter-fingerprint/blob/main/.travis.yml).
+ * Step 3. Have specs in the spec folder.
+
+
+
+## Add a plugin [add-plugin]
+
+Plugins can be developed and deployed independently of the Logstash core. Here are some documents to guide you through the process of coding, deploying, and sharing your plugin:
+
+* Write a new plugin
+
+ * [How to write a Logstash input plugin](/extend/input-new-plugin.md)
+ * [How to write a Logstash codec plugin](/extend/codec-new-plugin.md)
+ * [How to write a Logstash filter plugin](/extend/filter-new-plugin.md)
+ * [How to write a Logstash output plugin](/extend/output-new-plugin.md)
+ * [Community Maintainer’s Guide](/extend/community-maintainer.md)
+
+* [Document your plugin](/extend/plugin-doc.md)
+* [Publish your plugin to RubyGems.org](/extend/publish-plugin.md)
+* [List your plugin](/extend/plugin-listing.md)
+* Contribute a patch
+
+ * [Contributing a patch to a Logstash plugin](/extend/contributing-patch-plugin.md)
+ * [Extending Logstash core](/extend/contribute-to-core.md)
+
+
+
+#### Plugin Shutdown APIs [shutdown-apis]
+
+You have three options for shutting down a plugin: `stop`, `stop?`, and `close`.
+
+* Call the `stop` method from outside the plugin thread. This method signals the plugin to stop.
+* The `stop?` method returns `true` when the `stop` method has already been called for that plugin.
+* The `close` method performs final bookkeeping and cleanup after the plugin’s `run` method and the plugin’s thread both exit. The `close` method is a a new name for the method known as `teardown` in previous versions of Logstash.
+
+The `shutdown`, `finished`, `finished?`, `running?`, and `terminating?` methods are redundant and no longer present in the Plugin Base class.
+
+Sample code for the plugin shutdown APIs is [available](https://github.com/logstash-plugins/logstash-input-example/blob/main/lib/logstash/inputs/example.rb).
diff --git a/docs/extend/input-new-plugin.md b/docs/extend/input-new-plugin.md
new file mode 100644
index 000000000..c42daac4e
--- /dev/null
+++ b/docs/extend/input-new-plugin.md
@@ -0,0 +1,674 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/input-new-plugin.html
+---
+
+# How to write a Logstash input plugin [input-new-plugin]
+
+To develop a new input for Logstash, build a self-contained Ruby gem whose source code lives in its own GitHub repository. The Ruby gem can then be hosted and shared on RubyGems.org. You can use the example input implementation as a starting point. (If you’re unfamiliar with Ruby, you can find an excellent quickstart guide at [https://www.ruby-lang.org/en/documentation/quickstart/](https://www.ruby-lang.org/en/documentation/quickstart/).)
+
+## Get started [_get_started]
+
+Let’s step through creating an input plugin using the [example input plugin](https://github.com/logstash-plugins/logstash-input-example/).
+
+### Create a GitHub repo for your new plugin [_create_a_github_repo_for_your_new_plugin]
+
+Each Logstash plugin lives in its own GitHub repository. To create a new repository for your plugin:
+
+1. Log in to GitHub.
+2. Click the **Repositories** tab. You’ll see a list of other repositories you’ve forked or contributed to.
+3. Click the green **New** button in the upper right.
+4. Specify the following settings for your new repo:
+
+ * **Repository name** — a unique name of the form `logstash-input-pluginname`.
+ * **Public or Private** — your choice, but the repository must be Public if you want to submit it as an official plugin.
+ * **Initialize this repository with a README** — enables you to immediately clone the repository to your computer.
+
+5. Click **Create Repository**.
+
+
+### Use the plugin generator tool [_use_the_plugin_generator_tool]
+
+You can create your own Logstash plugin in seconds! The `generate` subcommand of `bin/logstash-plugin` creates the foundation for a new Logstash plugin with templatized files. It creates the correct directory structure, gemspec files, and dependencies so you can start adding custom code to process data with Logstash.
+
+For more information, see [Generating plugins](/reference/plugin-generator.md)
+
+
+### Copy the input code [_copy_the_input_code]
+
+Alternatively, you can use the examples repo we host on github.com
+
+1. **Clone your plugin.** Replace `GITUSERNAME` with your github username, and `MYPLUGINNAME` with your plugin name.
+
+ * `git clone https://github.com/GITUSERNAME/logstash-``input-MYPLUGINNAME.git`
+
+ * alternately, via ssh: `git clone git@github.com:GITUSERNAME/logstash``-input-MYPLUGINNAME.git`
+
+ * `cd logstash-input-MYPLUGINNAME`
+
+2. **Clone the input plugin example and copy it to your plugin branch.**
+
+ You don’t want to include the example .git directory or its contents, so delete it before you copy the example.
+
+ * `cd /tmp`
+ * `git clone https://github.com/logstash-plugins/logstash``-input-example.git`
+ * `cd logstash-input-example`
+ * `rm -rf .git`
+ * `cp -R * /path/to/logstash-input-mypluginname/`
+
+3. **Rename the following files to match the name of your plugin.**
+
+ * `logstash-input-example.gemspec`
+ * `example.rb`
+ * `example_spec.rb`
+
+ ```txt
+ cd /path/to/logstash-input-mypluginname
+ mv logstash-input-example.gemspec logstash-input-mypluginname.gemspec
+ mv lib/logstash/inputs/example.rb lib/logstash/inputs/mypluginname.rb
+ mv spec/inputs/example_spec.rb spec/inputs/mypluginname_spec.rb
+ ```
+
+
+Your file structure should look like this:
+
+```txt
+$ tree logstash-input-mypluginname
+├── Gemfile
+├── LICENSE
+├── README.md
+├── Rakefile
+├── lib
+│ └── logstash
+│ └── inputs
+│ └── mypluginname.rb
+├── logstash-input-mypluginname.gemspec
+└── spec
+ └── inputs
+ └── mypluginname_spec.rb
+```
+
+For more information about the Ruby gem file structure and an excellent walkthrough of the Ruby gem creation process, see [http://timelessrepo.com/making-ruby-gems](http://timelessrepo.com/making-ruby-gems)
+
+
+### See what your plugin looks like [_see_what_your_plugin_looks_like]
+
+Before we dive into the details, open up the plugin file in your favorite text editor and take a look.
+
+```ruby
+require "logstash/inputs/base"
+require "logstash/namespace"
+require "stud/interval"
+require "socket" # for Socket.gethostname
+
+# Add any asciidoc formatted documentation here
+# Generate a repeating message.
+#
+# This plugin is intended only as an example.
+
+class LogStash::Inputs::Example < LogStash::Inputs::Base
+ config_name "example"
+
+ # If undefined, Logstash will complain, even if codec is unused.
+ default :codec, "plain"
+
+ # The message string to use in the event.
+ config :message, :validate => :string, :default => "Hello World!"
+
+ # Set how frequently messages should be sent.
+ #
+ # The default, `1`, means send a message every second.
+ config :interval, :validate => :number, :default => 1
+
+ public
+ def register
+ @host = Socket.gethostname
+ end # def register
+
+ def run(queue)
+ Stud.interval(@interval) do
+ event = LogStash::Event.new("message" => @message, "host" => @host)
+ decorate(event)
+ queue << event
+ end # loop
+ end # def run
+
+end # class LogStash::Inputs::Example
+```
+
+
+
+## Coding input plugins [_coding_input_plugins]
+
+Now let’s take a line-by-line look at the example plugin.
+
+### `require` Statements [_require_statements]
+
+Logstash input plugins require parent classes defined in `logstash/inputs/base` and logstash/namespace:
+
+```ruby
+require "logstash/inputs/base"
+require "logstash/namespace"
+```
+
+Of course, the plugin you build may depend on other code, or even gems. Just put them here along with these Logstash dependencies.
+
+
+
+## Plugin Body [_plugin_body]
+
+Let’s go through the various elements of the plugin itself.
+
+### `class` Declaration [_class_declaration]
+
+The input plugin class should be a subclass of `LogStash::Inputs::Base`:
+
+```ruby
+class LogStash::Inputs::Example < LogStash::Inputs::Base
+```
+
+The class name should closely mirror the plugin name, for example:
+
+```ruby
+LogStash::Inputs::Example
+```
+
+
+### `config_name` [_config_name]
+
+```ruby
+ config_name "example"
+```
+
+This is the name your plugin will call inside the input configuration block.
+
+If you set `config_name "example"` in your plugin code, the corresponding Logstash configuration block would need to look like this:
+
+```js
+input {
+ example {...}
+}
+```
+
+
+
+## Configuration Parameters [_configuration_parameters]
+
+```ruby
+ config :variable_name, :validate => :variable_type, :default => "Default value", :required => boolean, :deprecated => boolean, :obsolete => string
+```
+
+The configuration, or `config` section allows you to define as many (or as few) parameters as are needed to enable Logstash to process events.
+
+There are several configuration attributes:
+
+* `:validate` - allows you to enforce passing a particular data type to Logstash for this configuration option, such as `:string`, `:password`, `:boolean`, `:number`, `:array`, `:hash`, `:path` (a file-system path), `uri`, `:codec` (since 1.2.0), `:bytes`. Note that this also works as a coercion in that if I specify "true" for boolean (even though technically a string), it will become a valid boolean in the config. This coercion works for the `:number` type as well where "1.2" becomes a float and "22" is an integer.
+* `:default` - lets you specify a default value for a parameter
+* `:required` - whether or not this parameter is mandatory (a Boolean `true` or
+* `:list` - whether or not this value should be a list of values. Will typecheck the list members, and convert scalars to one element lists. Note that this mostly obviates the array type, though if you need lists of complex objects that will be more suitable. `false`)
+* `:deprecated` - informational (also a Boolean `true` or `false`)
+* `:obsolete` - used to declare that a given setting has been removed and is no longer functioning. The idea is to provide an informed upgrade path to users who are still using a now-removed setting.
+
+
+## Plugin Methods [_plugin_methods]
+
+Logstash inputs must implement two main methods: `register` and `run`.
+
+### `register` Method [_register_method]
+
+```ruby
+ public
+ def register
+ end # def register
+```
+
+The Logstash `register` method is like an `initialize` method. It was originally created to enforce having `super` called, preventing headaches for newbies. (Note: It may go away in favor of `initialize`, in conjunction with some enforced testing to ensure `super` is called.)
+
+`public` means the method can be called anywhere, not just within the class. This is the default behavior for methods in Ruby, but it is specified explicitly here anyway.
+
+You can also assign instance variables here (variables prepended by `@`). Configuration variables are now in scope as instance variables, like `@message`
+
+
+### `run` Method [_run_method]
+
+The example input plugin has the following `run` Method:
+
+```ruby
+ def run(queue)
+ Stud.interval(@interval) do
+ event = LogStash::Event.new("message" => @message, "host" => @host)
+ decorate(event)
+ queue << event
+ end # loop
+ end # def run
+```
+
+The `run` method is where a stream of data from an input becomes an event.
+
+The stream can be plain or generated as with the [heartbeat](https://github.com/logstash-plugins/logstash-input-heartbeat/blob/main/lib/logstash/inputs/heartbeat.rb#L43-L61) input plugin. In these cases, though no codec is used, [a default codec](https://github.com/logstash-plugins/logstash-input-heartbeat/blob/main/lib/logstash/inputs/heartbeat.rb#L17) must be set in the code to avoid errors.
+
+Here’s another example `run` method:
+
+```ruby
+ def run(queue)
+ while true
+ begin
+ # Based on some testing, there is no way to interrupt an IO.sysread nor
+ # IO.select call in JRuby.
+ data = $stdin.sysread(16384)
+ @codec.decode(data) do |event|
+ decorate(event)
+ event.set("host", @host) if !event.include?("host")
+ queue << event
+ end
+ rescue IOError, EOFError, LogStash::ShutdownSignal
+ # stdin closed or a requested shutdown
+ break
+ end
+ end # while true
+ finished
+ end # def run
+```
+
+In this example, the `data` is being sent to the codec defined in the configuration block to `decode` the data stream and return an event.
+
+In both examples, the resulting `event` is passed to the `decorate` method:
+
+```ruby
+ decorate(event)
+```
+
+This applies any tags you might have set in the input configuration block. For example, `tags => ["tag1", "tag2"]`.
+
+Also in both examples, the `event`, after being "decorated," is appended to the queue:
+
+```ruby
+ queue << event
+```
+
+This inserts the event into the pipeline.
+
+::::{tip}
+Because input plugins can range from simple to complex, it is helpful to see more examples of how they have been created:
+
+* [syslog](https://github.com/logstash-plugins/logstash-input-syslog/blob/main/lib/logstash/inputs/syslog.rb)
+* [zeromq](https://github.com/logstash-plugins/logstash-input-zeromq/blob/main/lib/logstash/inputs/zeromq.rb)
+* [stdin](https://github.com/logstash-plugins/logstash-input-stdin/blob/main/lib/logstash/inputs/stdin.rb)
+* [tcp](https://github.com/logstash-plugins/logstash-input-tcp/blob/main/lib/logstash/inputs/tcp.rb)
+
+There are many more more examples in the [logstash-plugin github repository](https://github.com/logstash-plugins?query=logstash-input).
+
+::::
+
+
+
+
+## Building the Plugin [_building_the_plugin]
+
+At this point in the process you have coded your plugin and are ready to build a Ruby Gem from it. The following information will help you complete the process.
+
+### External dependencies [_external_dependencies]
+
+A `require` statement in Ruby is used to include necessary code. In some cases your plugin may require additional files. For example, the collectd plugin [uses](https://github.com/logstash-plugins/logstash-codec-collectd/blob/main/lib/logstash/codecs/collectd.rb#L148) the `types.db` file provided by collectd. In the main directory of your plugin, a file called `vendor.json` is where these files are described.
+
+The `vendor.json` file contains an array of JSON objects, each describing a file dependency. This example comes from the [collectd](https://github.com/logstash-plugins/logstash-codec-collectd/blob/main/vendor.json) codec plugin:
+
+```txt
+[{
+ "sha1": "a90fe6cc53b76b7bdd56dc57950d90787cb9c96e",
+ "url": "http://collectd.org/files/collectd-5.4.0.tar.gz",
+ "files": [ "/src/types.db" ]
+}]
+```
+
+* `sha1` is the sha1 signature used to verify the integrity of the file referenced by `url`.
+* `url` is the address from where Logstash will download the file.
+* `files` is an optional array of files to extract from the downloaded file. Note that while tar archives can use absolute or relative paths, treat them as absolute in this array. If `files` is not present, all files will be uncompressed and extracted into the vendor directory.
+
+Another example of the `vendor.json` file is the [`geoip` filter](https://github.com/logstash-plugins/logstash-filter-geoip/blob/main/vendor.json)
+
+The process used to download these dependencies is to call `rake vendor`. This will be discussed further in the testing section of this document.
+
+Another kind of external dependency is on jar files. This will be described in the "Add a `gemspec` file" section.
+
+
+### Deprecated features [_deprecated_features]
+
+As a plugin evolves, an option or feature may no longer serve the intended purpose, and the developer may want to *deprecate* its usage. Deprecation warns users about the option’s status, so they aren’t caught by surprise when it is removed in a later release.
+
+{{ls}} 7.6 introduced a *deprecation logger* to make handling those situations easier. You can use the [adapter](https://github.com/logstash-plugins/logstash-mixin-deprecation_logger_support) to ensure that your plugin can use the deprecation logger while still supporting older versions of {{ls}}. See the [readme](https://github.com/logstash-plugins/logstash-mixin-deprecation_logger_support/blob/main/README.md) for more information and for instructions on using the adapter.
+
+Deprecations are noted in the `logstash-deprecation.log` file in the `log` directory.
+
+
+### Add a Gemfile [_add_a_gemfile]
+
+Gemfiles allow Ruby’s Bundler to maintain the dependencies for your plugin. Currently, all we’ll need is the Logstash gem, for testing, but if you require other gems, you should add them in here.
+
+::::{tip}
+See [Bundler’s Gemfile page](http://bundler.io/gemfile.html) for more details.
+::::
+
+
+```ruby
+source 'https://rubygems.org'
+gemspec
+gem "logstash", :github => "elastic/logstash", :branch => "master"
+```
+
+
+
+## Add a `gemspec` file [_add_a_gemspec_file]
+
+Gemspecs define the Ruby gem which will be built and contain your plugin.
+
+::::{tip}
+More information can be found on the [Rubygems Specification page](http://guides.rubygems.org/specification-reference/).
+::::
+
+
+```ruby
+Gem::Specification.new do |s|
+ s.name = 'logstash-input-example'
+ s.version = '0.1.0'
+ s.licenses = ['Apache License (2.0)']
+ s.summary = "This input does x, y, z in Logstash"
+ s.description = "This gem is a logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program"
+ s.authors = ["Elastic"]
+ s.email = 'info@elastic.co'
+ s.homepage = "http://www.elastic.co/guide/en/logstash/current/index.html"
+ s.require_paths = ["lib"]
+
+ # Files
+ s.files = Dir['lib/**/*','spec/**/*','vendor/**/*','*.gemspec','*.md','CONTRIBUTORS','Gemfile','LICENSE','NOTICE.TXT']
+ # Tests
+ s.test_files = s.files.grep(%r{^(test|spec|features)/})
+
+ # Special flag to let us know this is actually a logstash plugin
+ s.metadata = { "logstash_plugin" => "true", "logstash_group" => "input" }
+
+ # Gem dependencies
+ s.add_runtime_dependency "logstash-core-plugin-api", ">= 1.60", "<= 2.99"
+ s.add_development_dependency 'logstash-devutils'
+end
+```
+
+It is appropriate to change these values to fit your plugin. In particular, `s.name` and `s.summary` should reflect your plugin’s name and behavior.
+
+`s.licenses` and `s.version` are also important and will come into play when you are ready to publish your plugin.
+
+Logstash and all its plugins are licensed under [Apache License, version 2 ("ALv2")](https://github.com/elastic/logstash/blob/main/LICENSE.txt). If you make your plugin publicly available via [RubyGems.org](http://rubygems.org), please make sure to have this line in your gemspec:
+
+* `s.licenses = ['Apache License (2.0)']`
+
+The gem version, designated by `s.version`, helps track changes to plugins over time. You should use [semver versioning](http://semver.org/) strategy for version numbers.
+
+### Runtime and Development Dependencies [_runtime_and_development_dependencies]
+
+At the bottom of the `gemspec` file is a section with a comment: `Gem dependencies`. This is where any other needed gems must be mentioned. If a gem is necessary for your plugin to function, it is a runtime dependency. If a gem are only used for testing, then it would be a development dependency.
+
+::::{note}
+You can also have versioning requirements for your dependencies—including other Logstash plugins:
+
+```ruby
+ # Gem dependencies
+ s.add_runtime_dependency "logstash-core-plugin-api", ">= 1.60", "<= 2.99"
+ s.add_development_dependency 'logstash-devutils'
+```
+
+This gemspec has a runtime dependency on the logstash-core-plugin-api and requires that it have a version number greater than or equal to version 1.60 and less than or equal to version 2.99.
+
+::::
+
+
+::::{important}
+All plugins have a runtime dependency on the `logstash-core-plugin-api` gem, and a development dependency on `logstash-devutils`.
+::::
+
+
+
+### Jar dependencies [_jar_dependencies]
+
+In some cases, such as the [Elasticsearch output plugin](https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/main/logstash-output-elasticsearch.gemspec#L22-L23), your code may depend on a jar file. In cases such as this, the dependency is added in the gemspec file in this manner:
+
+```ruby
+ # Jar dependencies
+ s.requirements << "jar 'org.elasticsearch:elasticsearch', '5.0.0'"
+ s.add_runtime_dependency 'jar-dependencies'
+```
+
+With these both defined, the install process will search for the required jar file at [http://mvnrepository.com](http://mvnrepository.com) and download the specified version.
+
+
+
+## Document your plugin [_document_your_plugin]
+
+Documentation is an important part of your plugin. All plugin documentation is rendered and placed in the [Logstash Reference](/reference/index.md) and the [Versioned plugin docs](logstash-docs://reference/integration-plugins.md).
+
+See [Document your plugin](/extend/plugin-doc.md) for tips and guidelines.
+
+
+## Add Tests [_add_tests]
+
+Logstash loves tests. Lots of tests. If you’re using your new input plugin in a production environment, you’ll want to have some tests to ensure you are not breaking any existing functionality.
+
+::::{note}
+A full exposition on RSpec is outside the scope of this document. Learn more about RSpec at [http://rspec.info](http://rspec.info)
+::::
+
+
+For help learning about tests and testing, look in the `spec/inputs/` directory of several other similar plugins.
+
+
+## Clone and test! [_clone_and_test]
+
+Now let’s start with a fresh clone of the plugin, build it and run the tests.
+
+* **Clone your plugin into a temporary location** Replace `GITUSERNAME` with your github username, and `MYPLUGINNAME` with your plugin name.
+
+ * `git clone https://github.com/GITUSERNAME/logstash-``input-MYPLUGINNAME.git`
+
+ * alternately, via ssh: `git clone git@github.com:GITUSERNAME/logstash-``input-MYPLUGINNAME.git`
+
+ * `cd logstash-input-MYPLUGINNAME`
+
+
+Then, you’ll need to install your plugins dependencies with bundler:
+
+```
+bundle install
+```
+
+::::{important}
+If your plugin has an external file dependency described in `vendor.json`, you must download that dependency before running or testing. You can do this by running:
+
+```
+rake vendor
+```
+
+::::
+
+
+And finally, run the tests:
+
+```
+bundle exec rspec
+```
+
+You should see a success message, which looks something like this:
+
+```
+Finished in 0.034 seconds
+1 example, 0 failures
+```
+
+Hooray! You’re almost there! (Unless you saw failures… you should fix those first).
+
+
+## Building and Testing [_building_and_testing]
+
+Now you’re ready to build your (well-tested) plugin into a Ruby gem.
+
+### Build [_build]
+
+You already have all the necessary ingredients, so let’s go ahead and run the build command:
+
+```sh
+gem build logstash-input-example.gemspec
+```
+
+That’s it! Your gem should be built and be in the same path with the name
+
+```sh
+logstash-input-mypluginname-0.1.0.gem
+```
+
+The `s.version` number from your gemspec file will provide the gem version, in this case, `0.1.0`.
+
+
+### Test installation [_test_installation]
+
+You should test install your plugin into a clean installation of Logstash. Download the latest version from the [Logstash downloads page](https://www.elastic.co/downloads/logstash/).
+
+1. Untar and cd in to the directory:
+
+ ```sh
+ curl -O https://download.elastic.co/logstash/logstash/logstash-9.0.0.tar.gz
+ tar xzvf logstash-9.0.0.tar.gz
+ cd logstash-9.0.0
+ ```
+
+2. Using the plugin tool, we can install the gem we just built.
+
+ * Replace `/my/logstash/plugins` with the correct path to the gem for your environment, and `0.1.0` with the correct version number from the gemspec file.
+
+ ```sh
+ bin/logstash-plugin install /my/logstash/plugins/logstash-input-example/logstash-input-example-0.1.0.gem
+ ```
+
+ * After running this, you should see feedback from Logstash that it was successfully installed:
+
+ ```sh
+ validating /my/logstash/plugins/logstash-input-example/logstash-input-example-0.1.0.gem >= 0
+ Valid logstash plugin. Continuing...
+ Successfully installed 'logstash-input-example' with version '0.1.0'
+ ```
+
+ ::::{tip}
+ You can also use the Logstash plugin tool to determine which plugins are currently available:
+
+ ```sh
+ bin/logstash-plugin list
+ ```
+
+ Depending on what you have installed, you might see a short or long list of plugins: inputs, codecs, filters and outputs.
+
+ ::::
+
+3. Now try running Logstash with a simple configuration passed in via the command-line, using the `-e` flag.
+
+ ::::{note}
+ Your results will depend on what your input plugin is designed to do.
+ ::::
+
+
+```sh
+bin/logstash -e 'input { example{} } output {stdout { codec => rubydebug }}'
+```
+
+The example input plugin will send the contents of `message` (with a default message of "Hello World!") every second.
+
+```sh
+{
+ "message" => "Hello World!",
+ "@version" => "1",
+ "@timestamp" => "2015-01-27T19:17:18.932Z",
+ "host" => "cadenza"
+}
+```
+
+Feel free to experiment and test this by changing the `message` and `interval` parameters:
+
+```sh
+bin/logstash -e 'input { example{ message => "A different message" interval => 5 } } output {stdout { codec => rubydebug }}'
+```
+
+Congratulations! You’ve built, deployed and successfully run a Logstash input.
+
+
+
+## Submitting your plugin to [RubyGems.org](http://rubygems.org) and [logstash-plugins](https://github.com/logstash-plugins) [_submitting_your_plugin_to_rubygems_orghttprubygems_org_and_logstash_pluginshttpsgithub_comlogstash_plugins]
+
+Logstash uses [RubyGems.org](http://rubygems.org) as its repository for all plugin artifacts. Once you have developed your new plugin, you can make it available to Logstash users by simply publishing it to RubyGems.org.
+
+### Licensing [_licensing]
+
+Logstash and all its plugins are licensed under [Apache License, version 2 ("ALv2")](https://github.com/elasticsearch/logstash/blob/main/LICENSE). If you make your plugin publicly available via [RubyGems.org](http://rubygems.org), please make sure to have this line in your gemspec:
+
+* `s.licenses = ['Apache License (2.0)']`
+
+
+### Publishing to [RubyGems.org](http://rubygems.org) [_publishing_to_rubygems_orghttprubygems_org]
+
+To begin, you’ll need an account on RubyGems.org
+
+* [Sign-up for a RubyGems account](https://rubygems.org/sign_up).
+
+After creating an account, [obtain](http://guides.rubygems.org/rubygems-org-api/#api-authorization) an API key from RubyGems.org. By default, RubyGems uses the file `~/.gem/credentials` to store your API key. These credentials will be used to publish the gem. Replace `username` and `password` with the credentials you created at RubyGems.org:
+
+```sh
+curl -u username:password https://rubygems.org/api/v1/api_key.yaml > ~/.gem/credentials
+chmod 0600 ~/.gem/credentials
+```
+
+Before proceeding, make sure you have the right version in your gemspec file and commit your changes.
+
+* `s.version = '0.1.0'`
+
+To publish version 0.1.0 of your new logstash gem:
+
+```sh
+bundle install
+bundle exec rake vendor
+bundle exec rspec
+bundle exec rake publish_gem
+```
+
+::::{note}
+Executing `rake publish_gem`:
+
+1. Reads the version from the gemspec file (`s.version = '0.1.0'`)
+2. Checks in your local repository if a tag exists for that version. If the tag already exists, it aborts the process. Otherwise, it creates a new version tag in your local repository.
+3. Builds the gem
+4. Publishes the gem to RubyGems.org
+
+::::
+
+
+That’s it! Your plugin is published! Logstash users can now install your plugin by running:
+
+```sh
+bin/logstash-plugin install logstash-input-mypluginname
+```
+
+
+
+## Contributing your source code to [logstash-plugins](https://github.com/logstash-plugins) [_contributing_your_source_code_to_logstash_pluginshttpsgithub_comlogstash_plugins]
+
+It is not required to contribute your source code to [logstash-plugins](https://github.com/logstash-plugins) github organization, but we always welcome new plugins!
+
+### Benefits [_benefits]
+
+Some of the many benefits of having your plugin in the logstash-plugins repository are:
+
+* **Discovery.** Your plugin will appear in the [Logstash Reference](/reference/index.md), where Logstash users look first for plugins and documentation.
+* **Documentation.** Your plugin documentation will automatically be added to the [Logstash Reference](/reference/index.md).
+* **Testing.** With our testing infrastructure, your plugin will be continuously tested against current and future releases of Logstash. As a result, users will have the assurance that if incompatibilities arise, they will be quickly discovered and corrected.
+
+
+### Acceptance Guidelines [_acceptance_guidelines]
+
+* **Code Review.** Your plugin must be reviewed by members of the community for coherence, quality, readability, stability and security.
+* **Tests.** Your plugin must contain tests to be accepted. These tests are also subject to code review for scope and completeness. It’s ok if you don’t know how to write tests — we will guide you. We are working on publishing a guide to creating tests for Logstash which will make it easier. In the meantime, you can refer to [http://betterspecs.org/](http://betterspecs.org/) for examples.
+
+To begin migrating your plugin to logstash-plugins, simply create a new [issue](https://github.com/elasticsearch/logstash/issues) in the Logstash repository. When the acceptance guidelines are completed, we will facilitate the move to the logstash-plugins organization using the recommended [github process](https://help.github.com/articles/transferring-a-repository/#transferring-from-a-user-to-an-organization).
diff --git a/docs/extend/java-codec-plugin.md b/docs/extend/java-codec-plugin.md
new file mode 100644
index 000000000..75c0562ed
--- /dev/null
+++ b/docs/extend/java-codec-plugin.md
@@ -0,0 +1,348 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/java-codec-plugin.html
+---
+
+# How to write a Java codec plugin [java-codec-plugin]
+
+::::{note}
+Java codecs are currently supported only for Java input and output plugins. They will not work with Ruby input or output plugins.
+::::
+
+
+To develop a new Java codec for Logstash, you write a new Java class that conforms to the Logstash Java Codecs API, package it, and install it with the logstash-plugin utility. We’ll go through each of those steps.
+
+
+## Set up your environment [_set_up_your_environment_2]
+
+
+### Copy the example repo [_copy_the_example_repo_2]
+
+Start by copying the [example codec plugin](https://github.com/logstash-plugins/logstash-codec-java_codec_example). The plugin API is currently part of the Logstash codebase so you must have a local copy of that available. You can obtain a copy of the Logstash codebase with the following `git` command:
+
+```shell
+git clone --branch --single-branch https://github.com/elastic/logstash.git
+```
+
+The `branch_name` should correspond to the version of Logstash containing the preferred revision of the Java plugin API.
+
+::::{note}
+The GA version of the Java plugin API is available in the `7.2` and later branches of the Logstash codebase.
+::::
+
+
+Specify the `target_folder` for your local copy of the Logstash codebase. If you do not specify `target_folder`, it defaults to a new folder called `logstash` under your current folder.
+
+
+### Generate the .jar file [_generate_the_jar_file_2]
+
+After you have obtained a copy of the appropriate revision of the Logstash codebase, you need to compile it to generate the .jar file containing the Java plugin API. From the root directory of your Logstash codebase ($LS_HOME), you can compile it with `./gradlew assemble` (or `gradlew.bat assemble` if you’re running on Windows). This should produce the `$LS_HOME/logstash-core/build/libs/logstash-core-x.y.z.jar` where `x`, `y`, and `z` refer to the version of Logstash.
+
+After you have successfully compiled Logstash, you need to tell your Java plugin where to find the `logstash-core-x.y.z.jar` file. Create a new file named `gradle.properties` in the root folder of your plugin project. That file should have a single line:
+
+```txt
+LOGSTASH_CORE_PATH=/logstash-core
+```
+
+where `target_folder` is the root folder of your local copy of the Logstash codebase.
+
+
+## Code the plugin [_code_the_plugin_2]
+
+The example codec plugin decodes messages separated by a configurable delimiter and encodes messages by writing their string representation separated by a delimiter. For example, if the codec were configured with `/` as the delimiter, the input text `event1/event2/` would be decoded into two separate events with `message` fields of `event1` and `event2`, respectively. Note that this is only an example codec and does not cover all the edge cases that a production-grade codec should cover.
+
+Let’s look at the main class in that codec filter:
+
+```java
+@LogstashPlugin(name="java_codec_example")
+public class JavaCodecExample implements Codec {
+
+ public static final PluginConfigSpec DELIMITER_CONFIG =
+ PluginConfigSpec.stringSetting("delimiter", ",");
+
+ private final String id;
+ private final String delimiter;
+
+ public JavaCodecExample(final Configuration config, final Context context) {
+ this(config.get(DELIMITER_CONFIG));
+ }
+
+ private JavaCodecExample(String delimiter) {
+ this.id = UUID.randomUUID().toString();
+ this.delimiter = delimiter;
+ }
+
+ @Override
+ public void decode(ByteBuffer byteBuffer, Consumer> consumer) {
+ // a not-production-grade delimiter decoder
+ byte[] byteInput = new byte[byteBuffer.remaining()];
+ byteBuffer.get(byteInput);
+ if (byteInput.length > 0) {
+ String input = new String(byteInput);
+ String[] split = input.split(delimiter);
+ for (String s : split) {
+ Map map = new HashMap<>();
+ map.put("message", s);
+ consumer.accept(map);
+ }
+ }
+ }
+
+ @Override
+ public void flush(ByteBuffer byteBuffer, Consumer> consumer) {
+ // if the codec maintains any internal state such as partially-decoded input, this
+ // method should flush that state along with any additional input supplied in
+ // the ByteBuffer
+
+ decode(byteBuffer, consumer); // this is a simplistic implementation
+ }
+
+ @Override
+ public void encode(Event event, OutputStream outputStream) throws IOException {
+ outputStream.write((event.toString() + delimiter).getBytes(Charset.defaultCharset()));
+ }
+
+ @Override
+ public Collection> configSchema() {
+ // should return a list of all configuration options for this plugin
+ return Collections.singletonList(DELIMITER_CONFIG);
+ }
+
+ @Override
+ public Codec cloneCodec() {
+ return new JavaCodecExample(this.delimiter);
+ }
+
+ @Override
+ public String getId() {
+ return this.id;
+ }
+
+}
+```
+
+Let’s step through and examine each part of that class.
+
+
+### Class declaration [_class_declaration_6]
+
+```java
+@LogstashPlugin(name="java_codec_example")
+public class JavaCodecExample implements Codec {
+```
+
+Notes about the class declaration:
+
+* All Java plugins must be annotated with the `@LogstashPlugin` annotation. Additionally:
+
+ * The `name` property of the annotation must be supplied and defines the name of the plugin as it will be used in the Logstash pipeline definition. For example, this codec would be referenced in the codec section of the an appropriate input or output in the Logstash pipeline defintion as `codec => java_codec_example { }`
+ * The value of the `name` property must match the name of the class excluding casing and underscores.
+
+* The class must implement the `co.elastic.logstash.api.Codec` interface.
+* Java plugins may not be created in the `org.logstash` or `co.elastic.logstash` packages to prevent potential clashes with classes in Logstash itself.
+
+
+#### Plugin settings [_plugin_settings_2]
+
+The snippet below contains both the setting definition and the method referencing it:
+
+```java
+public static final PluginConfigSpec DELIMITER_CONFIG =
+ PluginConfigSpec.stringSetting("delimiter", ",");
+
+@Override
+public Collection> configSchema() {
+ return Collections.singletonList(DELIMITER_CONFIG);
+}
+```
+
+The `PluginConfigSpec` class allows developers to specify the settings that a plugin supports complete with setting name, data type, deprecation status, required status, and default value. In this example, the `delimiter` setting defines the delimiter on which the codec will split events. It is not a required setting and if it is not explicitly set, its default value will be `,`.
+
+The `configSchema` method must return a list of all settings that the plugin supports. The Logstash execution engine will validate that all required settings are present and that no unsupported settings are present.
+
+
+#### Constructor and initialization [_constructor_and_initialization_2]
+
+```java
+private final String id;
+private final String delimiter;
+
+public JavaCodecExample(final Configuration config, final Context context) {
+ this(config.get(DELIMITER_CONFIG));
+}
+
+private JavaCodecExample(String delimiter) {
+ this.id = UUID.randomUUID().toString();
+ this.delimiter = delimiter;
+}
+```
+
+All Java codec plugins must have a constructor taking a `Configuration` and `Context` argument. This is the constructor that will be used to instantiate them at runtime. The retrieval and validation of all plugin settings should occur in this constructor. In this example, the delimiter to be used for delimiting events is retrieved from its setting and stored in a local variable so that it can be used later in the `decode` and `encode` methods. The codec’s ID is initialized to a random UUID (as should be done for most codecs), and a local `encoder` variable is initialized to encode and decode with a specified character set.
+
+Any additional initialization may occur in the constructor as well. If there are any unrecoverable errors encountered in the configuration or initialization of the codec plugin, a descriptive exception should be thrown. The exception will be logged and will prevent Logstash from starting.
+
+
+### Codec methods [_codec_methods]
+
+```java
+@Override
+public void decode(ByteBuffer byteBuffer, Consumer> consumer) {
+ // a not-production-grade delimiter decoder
+ byte[] byteInput = new byte[byteBuffer.remaining()];
+ byteBuffer.get(byteInput);
+ if (byteInput.length > 0) {
+ String input = new String(byteInput);
+ String[] split = input.split(delimiter);
+ for (String s : split) {
+ Map map = new HashMap<>();
+ map.put("message", s);
+ consumer.accept(map);
+ }
+ }
+}
+
+@Override
+public void flush(ByteBuffer byteBuffer, Consumer> consumer) {
+ // if the codec maintains any internal state such as partially-decoded input, this
+ // method should flush that state along with any additional input supplied in
+ // the ByteBuffer
+
+ decode(byteBuffer, consumer); // this is a simplistic implementation
+}
+
+@Override
+public void encode(Event event, OutputStream outputStream) throws IOException {
+ outputStream.write((event.toString() + delimiter).getBytes(Charset.defaultCharset()));
+}
+```
+
+The `decode`, `flush`, and `encode` methods provide the core functionality of the codec. Codecs may be used by inputs to decode a sequence or stream of bytes into events or by outputs to encode events into a sequence of bytes.
+
+The `decode` method decodes events from the specified `ByteBuffer` and passes them to the provided `Consumer`. The input must provide a `ByteBuffer` that is ready for reading with `byteBuffer.position()` indicating the next position to read and `byteBuffer.limit()` indicating the first byte in the buffer that is not safe to read. Codecs must ensure that `byteBuffer.position()` reflects the last-read position before returning control to the input. The input is then responsible for returning the buffer to write mode via either `byteBuffer.clear()` or `byteBuffer.compact()` before resuming writes. In the example above, the `decode` method simply splits the incoming byte stream on the specified delimiter. A production-grade codec such as [`java-line`](https://github.com/elastic/logstash/blob/main/logstash-core/src/main/java/org/logstash/plugins/codecs/Line.java) would not make the simplifying assumption that the end of the supplied byte stream corresponded with the end of an event.
+
+Events should be constructed as instances of `Map` and pushed into the event pipeline via the `Consumer>.accept()` method. To reduce allocations and GC pressure, codecs may reuse the same map instance by modifying its fields between calls to `Consumer>.accept()` because the event pipeline will create events based on a copy of the map’s data.
+
+The `flush` method works in coordination with the `decode` method to decode all remaining events from the specified `ByteBuffer` along with any internal state that may remain after previous calls to the `decode` method. As an example of internal state that a codec might maintain, consider an input stream of bytes `event1/event2/event3` with a delimiter of `/`. Due to buffering or other reasons, the input might supply a partial stream of bytes such as `event1/eve` to the codec’s `decode` method. In this case, the codec could save the beginning three characters `eve` of the second event rather than assuming that the supplied byte stream ends on an event boundary. If the next call to `decode` supplied the `nt2/ev` bytes, the codec would prepend the saved `eve` bytes to produce the full `event2` event and then save the remaining `ev` bytes for decoding when the remainder of the bytes for that event were supplied. A call to `flush` signals the codec that the supplied bytes represent the end of an event stream and all remaining bytes should be decoded to events. The `flush` example above is a simplistic implementation that does not maintain any state about partially-supplied byte streams across calls to `decode`.
+
+The `encode` method encodes an event into a sequence of bytes and writes it into the specified `OutputStream`. Because a single codec instance is shared across all pipeline workers in the output stage of the Logstash pipeline, codecs should *not* retain state across calls to their `encode` methods.
+
+
+### cloneCodec method [_clonecodec_method]
+
+```java
+@Override
+public Codec cloneCodec() {
+ return new JavaCodecExample(this.delimiter);
+}
+```
+
+The `cloneCodec` method should return an identical instance of the codec with the exception of its ID. Because codecs may be stateful across calls to their `decode` methods, input plugins that are multi-threaded should use a separate instance of each codec via the `cloneCodec` method for each of their threads. Because a single codec instance is shared across all pipeline workers in the output stage of the Logstash pipeline, codecs should *not* retain state across calls to their `encode` methods. In the example above, the codec is cloned with the same delimiter but a different ID.
+
+
+### getId method [_getid_method_2]
+
+```java
+@Override
+public String getId() {
+ return id;
+}
+```
+
+For codec plugins, the `getId` method should always return the id that was set at instantiation time. This is typically an UUID.
+
+
+### Unit tests [_unit_tests_2]
+
+Lastly, but certainly not least importantly, unit tests are strongly encouraged. The example codec plugin includes an [example unit test](https://github.com/logstash-plugins/logstash-codec-java_codec_example/blob/main/src/test/java/org/logstashplugins/JavaCodecExampleTest.java) that you can use as a template for your own.
+
+
+## Package and deploy [_package_and_deploy_2]
+
+Java plugins are packaged as Ruby gems for dependency management and interoperability with Ruby plugins. Once they are packaged as gems, they may be installed with the `logstash-plugin` utility just as Ruby plugins are. Because no knowledge of Ruby or its toolchain should be required for Java plugin development, the procedure for packaging Java plugins as Ruby gems has been automated through a custom task in the Gradle build file provided with the example Java plugins. The following sections describe how to configure and execute that packaging task as well as how to install the packaged Java plugin in Logstash.
+
+
+### Configuring the Gradle packaging task [_configuring_the_gradle_packaging_task_2]
+
+The following section appears near the top of the `build.gradle` file supplied with the example Java plugins:
+
+```java
+// ===========================================================================
+// plugin info
+// ===========================================================================
+group 'org.logstashplugins' // must match the package of the main plugin class
+version "${file("VERSION").text.trim()}" // read from required VERSION file
+description = "Example Java filter implementation"
+pluginInfo.licenses = ['Apache-2.0'] // list of SPDX license IDs
+pluginInfo.longDescription = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using \$LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program"
+pluginInfo.authors = ['Elasticsearch']
+pluginInfo.email = ['info@elastic.co']
+pluginInfo.homepage = "http://www.elastic.co/guide/en/logstash/current/index.html"
+pluginInfo.pluginType = "filter"
+pluginInfo.pluginClass = "JavaFilterExample"
+pluginInfo.pluginName = "java_filter_example"
+// ===========================================================================
+```
+
+You should configure the values above for your plugin.
+
+* The `version` value will be automatically read from the `VERSION` file in the root of your plugin’s codebase.
+* `pluginInfo.pluginType` should be set to one of `input`, `filter`, `codec`, or `output`.
+* `pluginInfo.pluginName` must match the name specified on the `@LogstashPlugin` annotation on the main plugin class. The Gradle packaging task will validate that and return an error if they do not match.
+
+
+### Running the Gradle packaging task [_running_the_gradle_packaging_task_2]
+
+Several Ruby source files along with a `gemspec` file and a `Gemfile` are required to package the plugin as a Ruby gem. These Ruby files are used only for defining the Ruby gem structure or at Logstash startup time to register the Java plugin. They are not used during runtime event processing. The Gradle packaging task automatically generates all of these files based on the values configured in the section above.
+
+You run the Gradle packaging task with the following command:
+
+```shell
+./gradlew gem
+```
+
+For Windows platforms: Substitute `gradlew.bat` for `./gradlew` as appropriate in the command.
+
+That task will produce a gem file in the root directory of your plugin’s codebase with the name `logstash-{{plugintype}}--.gem`
+
+
+### Installing the Java plugin in Logstash [_installing_the_java_plugin_in_logstash_2]
+
+After you have packaged your Java plugin as a Ruby gem, you can install it in Logstash with this command:
+
+```shell
+bin/logstash-plugin install --no-verify --local /path/to/javaPlugin.gem
+```
+
+For Windows platforms: Substitute backslashes for forward slashes as appropriate in the command.
+
+
+## Run Logstash with the Java codec plugin [_run_logstash_with_the_java_codec_plugin]
+
+To test the plugin, start Logstash with:
+
+```java
+echo "foo,bar" | bin/logstash -e 'input { java_stdin { codec => java_codec_example } }'
+```
+
+The expected Logstash output (excluding initialization) with the configuration above is:
+
+```txt
+{
+ "@version" => "1",
+ "message" => "foo",
+ "@timestamp" => yyyy-MM-ddThh:mm:ss.SSSZ,
+ "host" => ""
+}
+{
+ "@version" => "1",
+ "message" => "bar\n",
+ "@timestamp" => yyyy-MM-ddThh:mm:ss.SSSZ,
+ "host" => ""
+}
+```
+
+
+## Feedback [_feedback_2]
+
+If you have any feedback on Java plugin support in Logstash, please comment on our [main Github issue](https://github.com/elastic/logstash/issues/9215) or post in the [Logstash forum](https://discuss.elastic.co/c/logstash).
+
diff --git a/docs/extend/java-filter-plugin.md b/docs/extend/java-filter-plugin.md
new file mode 100644
index 000000000..bc7aba01c
--- /dev/null
+++ b/docs/extend/java-filter-plugin.md
@@ -0,0 +1,307 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/java-filter-plugin.html
+---
+
+# How to write a Java filter plugin [java-filter-plugin]
+
+To develop a new Java filter for Logstash, you write a new Java class that conforms to the Logstash Java Filters API, package it, and install it with the logstash-plugin utility. We’ll go through each of those steps.
+
+
+## Set up your environment [_set_up_your_environment_3]
+
+
+### Copy the example repo [_copy_the_example_repo_3]
+
+Start by copying the [example filter plugin](https://github.com/logstash-plugins/logstash-filter-java_filter_example). The plugin API is currently part of the Logstash codebase so you must have a local copy of that available. You can obtain a copy of the Logstash codebase with the following `git` command:
+
+```shell
+git clone --branch --single-branch https://github.com/elastic/logstash.git
+```
+
+The `branch_name` should correspond to the version of Logstash containing the preferred revision of the Java plugin API.
+
+::::{note}
+The GA version of the Java plugin API is available in the `7.2` and later branches of the Logstash codebase.
+::::
+
+
+Specify the `target_folder` for your local copy of the Logstash codebase. If you do not specify `target_folder`, it defaults to a new folder called `logstash` under your current folder.
+
+
+### Generate the .jar file [_generate_the_jar_file_3]
+
+After you have obtained a copy of the appropriate revision of the Logstash codebase, you need to compile it to generate the .jar file containing the Java plugin API. From the root directory of your Logstash codebase ($LS_HOME), you can compile it with `./gradlew assemble` (or `gradlew.bat assemble` if you’re running on Windows). This should produce the `$LS_HOME/logstash-core/build/libs/logstash-core-x.y.z.jar` where `x`, `y`, and `z` refer to the version of Logstash.
+
+After you have successfully compiled Logstash, you need to tell your Java plugin where to find the `logstash-core-x.y.z.jar` file. Create a new file named `gradle.properties` in the root folder of your plugin project. That file should have a single line:
+
+```txt
+LOGSTASH_CORE_PATH=/logstash-core
+```
+
+where `target_folder` is the root folder of your local copy of the Logstash codebase.
+
+
+## Code the plugin [_code_the_plugin_3]
+
+The example filter plugin allows one to configure a field in each event that will be reversed. For example, if the filter were configured to reverse the `day_of_week` field, an event with `day_of_week: "Monday"` would be transformed to `day_of_week: "yadnoM"`. Let’s look at the main class in that example filter:
+
+```java
+@LogstashPlugin(name = "java_filter_example")
+public class JavaFilterExample implements Filter {
+
+ public static final PluginConfigSpec SOURCE_CONFIG =
+ PluginConfigSpec.stringSetting("source", "message");
+
+ private String id;
+ private String sourceField;
+
+ public JavaFilterExample(String id, Configuration config, Context context) {
+ this.id = id;
+ this.sourceField = config.get(SOURCE_CONFIG);
+ }
+
+ @Override
+ public Collection filter(Collection events, FilterMatchListener matchListener) {
+ for (Event e : events) {
+ Object f = e.getField(sourceField);
+ if (f instanceof String) {
+ e.setField(sourceField, StringUtils.reverse((String)f));
+ matchListener.filterMatched(e);
+ }
+ }
+ return events;
+ }
+
+ @Override
+ public Collection> configSchema() {
+ return Collections.singletonList(SOURCE_CONFIG);
+ }
+
+ @Override
+ public String getId() {
+ return this.id;
+ }
+
+ @Override
+ public void close() {
+ this.sourceField = null;
+ return;
+ }
+}
+```
+
+Let’s step through and examine each part of that class.
+
+
+### Class declaration [_class_declaration_7]
+
+```java
+@LogstashPlugin(name = "java_filter_example")
+public class JavaFilterExample implements Filter {
+```
+
+Notes about the class declaration:
+
+* All Java plugins must be annotated with the `@LogstashPlugin` annotation. Additionally:
+
+ * The `name` property of the annotation must be supplied and defines the name of the plugin as it will be used in the Logstash pipeline definition. For example, this filter would be referenced in the filter section of the Logstash pipeline defintion as `filter { java_filter_example => { .... } }`
+ * The value of the `name` property must match the name of the class excluding casing and underscores.
+
+* The class must implement the `co.elastic.logstash.api.Filter` interface.
+* Java plugins may not be created in the `org.logstash` or `co.elastic.logstash` packages to prevent potential clashes with classes in Logstash itself.
+
+
+### Plugin settings [_plugin_settings_3]
+
+The snippet below contains both the setting definition and the method referencing it:
+
+```java
+public static final PluginConfigSpec SOURCE_CONFIG =
+ PluginConfigSpec.stringSetting("source", "message");
+
+@Override
+public Collection> configSchema() {
+ return Collections.singletonList(SOURCE_CONFIG);
+}
+```
+
+The `PluginConfigSpec` class allows developers to specify the settings that a plugin supports complete with setting name, data type, deprecation status, required status, and default value. In this example, the `source` setting defines the name of the field in each event that will be reversed. It is not a required setting and if it is not explicitly set, its default value will be `message`.
+
+The `configSchema` method must return a list of all settings that the plugin supports. In a future phase of the Java plugin project, the Logstash execution engine will validate that all required settings are present and that no unsupported settings are present.
+
+
+### Constructor and initialization [_constructor_and_initialization_3]
+
+```java
+private String id;
+private String sourceField;
+
+public JavaFilterExample(String id, Configuration config, Context context) {
+ this.id = id;
+ this.sourceField = config.get(SOURCE_CONFIG);
+}
+```
+
+All Java filter plugins must have a constructor taking a `String` id and a `Configuration` and `Context` argument. This is the constructor that will be used to instantiate them at runtime. The retrieval and validation of all plugin settings should occur in this constructor. In this example, the name of the field to be reversed in each event is retrieved from its setting and stored in a local variable so that it can be used later in the `filter` method.
+
+Any additional initialization may occur in the constructor as well. If there are any unrecoverable errors encountered in the configuration or initialization of the filter plugin, a descriptive exception should be thrown. The exception will be logged and will prevent Logstash from starting.
+
+
+### Filter method [_filter_method_2]
+
+```java
+@Override
+public Collection filter(Collection events, FilterMatchListener matchListener) {
+ for (Event e : events) {
+ Object f = e.getField(sourceField);
+ if (f instanceof String) {
+ e.setField(sourceField, StringUtils.reverse((String)f));
+ matchListener.filterMatched(e);
+ }
+ }
+ return events;
+```
+
+Finally, we come to the `filter` method that is invoked by the Logstash execution engine on batches of events as they flow through the event processing pipeline. The events to be filtered are supplied in the `events` argument and the method should return a collection of filtered events. Filters may perform a variety of actions on events as they flow through the pipeline including:
+
+* Mutation — Fields in events may be added, removed, or changed by a filter. This is the most common scenario for filters that perform various kinds of enrichment on events. In this scenario, the incoming `events` collection may be returned unmodified since the events in the collection are mutated in place.
+* Deletion — Events may be removed from the event pipeline by a filter so that subsequent filters and outputs do not receive them. In this scenario, the events to be deleted must be removed from the collection of filtered events before it is returned.
+* Creation — A filter may insert new events into the event pipeline that will be seen only by subsequent filters and outputs. In this scenario, the new events must be added to the collection of filtered events before it is returned.
+* Observation — Events may pass unchanged by a filter through the event pipeline. This may be useful in scenarios where a filter performs external actions (e.g., updating an external cache) based on the events observed in the event pipeline. In this scenario, the incoming `events` collection may be returned unmodified since no changes were made.
+
+In the example above, the value of the `source` field is retrieved from each event and reversed if it is a string value. Because each event is mutated in place, the incoming `events` collection can be returned.
+
+The `matchListener` is the mechanism by which filters indicate which events "match". The common actions for filters such as `add_field` and `add_tag` are applied only to events that are designated as "matching". Some filters such as the [grok filter](/reference/plugins-filters-grok.md) have a clear definition for what constitutes a matching event and will notify the listener only for matching events. Other filters such as the [UUID filter](/reference/plugins-filters-uuid.md) have no specific match criteria and should notify the listener for every event filtered. In this example, the filter notifies the match listener for any event that had a `String` value in its `source` field and was therefore able to be reversed.
+
+
+### getId method [_getid_method_3]
+
+```java
+@Override
+public String getId() {
+ return id;
+}
+```
+
+For filter plugins, the `getId` method should always return the id that was provided to the plugin through its constructor at instantiation time.
+
+
+### close method [_close_method]
+
+```java
+@Override
+public void close() {
+ // shutdown a resource that was instantiated during the filter initialization phase.
+ this.sourceField = null;
+ return;
+}
+```
+
+Filter plugins can use additional resources to perform operations, such as creating new database connections. Implementing the `close` method will allow the plugins to free up those resources when shutting down the pipeline.
+
+
+### Unit tests [_unit_tests_3]
+
+Lastly, but certainly not least importantly, unit tests are strongly encouraged. The example filter plugin includes an [example unit test](https://github.com/logstash-plugins/logstash-filter-java_filter_example/blob/main/src/test/java/org/logstashplugins/JavaFilterExampleTest.java) that you can use as a template for your own.
+
+
+## Package and deploy [_package_and_deploy_3]
+
+Java plugins are packaged as Ruby gems for dependency management and interoperability with Ruby plugins. Once they are packaged as gems, they may be installed with the `logstash-plugin` utility just as Ruby plugins are. Because no knowledge of Ruby or its toolchain should be required for Java plugin development, the procedure for packaging Java plugins as Ruby gems has been automated through a custom task in the Gradle build file provided with the example Java plugins. The following sections describe how to configure and execute that packaging task as well as how to install the packaged Java plugin in Logstash.
+
+
+### Configuring the Gradle packaging task [_configuring_the_gradle_packaging_task_3]
+
+The following section appears near the top of the `build.gradle` file supplied with the example Java plugins:
+
+```java
+// ===========================================================================
+// plugin info
+// ===========================================================================
+group 'org.logstashplugins' // must match the package of the main plugin class
+version "${file("VERSION").text.trim()}" // read from required VERSION file
+description = "Example Java filter implementation"
+pluginInfo.licenses = ['Apache-2.0'] // list of SPDX license IDs
+pluginInfo.longDescription = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using \$LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program"
+pluginInfo.authors = ['Elasticsearch']
+pluginInfo.email = ['info@elastic.co']
+pluginInfo.homepage = "http://www.elastic.co/guide/en/logstash/current/index.html"
+pluginInfo.pluginType = "filter"
+pluginInfo.pluginClass = "JavaFilterExample"
+pluginInfo.pluginName = "java_filter_example"
+// ===========================================================================
+```
+
+You should configure the values above for your plugin.
+
+* The `version` value will be automatically read from the `VERSION` file in the root of your plugin’s codebase.
+* `pluginInfo.pluginType` should be set to one of `input`, `filter`, `codec`, or `output`.
+* `pluginInfo.pluginName` must match the name specified on the `@LogstashPlugin` annotation on the main plugin class. The Gradle packaging task will validate that and return an error if they do not match.
+
+
+### Running the Gradle packaging task [_running_the_gradle_packaging_task_3]
+
+Several Ruby source files along with a `gemspec` file and a `Gemfile` are required to package the plugin as a Ruby gem. These Ruby files are used only for defining the Ruby gem structure or at Logstash startup time to register the Java plugin. They are not used during runtime event processing. The Gradle packaging task automatically generates all of these files based on the values configured in the section above.
+
+You run the Gradle packaging task with the following command:
+
+```shell
+./gradlew gem
+```
+
+For Windows platforms: Substitute `gradlew.bat` for `./gradlew` as appropriate in the command.
+
+That task will produce a gem file in the root directory of your plugin’s codebase with the name `logstash-{{plugintype}}--.gem`
+
+
+### Installing the Java plugin in Logstash [_installing_the_java_plugin_in_logstash_3]
+
+After you have packaged your Java plugin as a Ruby gem, you can install it in Logstash with this command:
+
+```shell
+bin/logstash-plugin install --no-verify --local /path/to/javaPlugin.gem
+```
+
+For Windows platforms: Substitute backslashes for forward slashes as appropriate in the command.
+
+
+## Run Logstash with the Java filter plugin [_run_logstash_with_the_java_filter_plugin]
+
+The following is a minimal Logstash configuration that can be used to test that the Java filter plugin is correctly installed and functioning.
+
+```java
+input {
+ generator { message => "Hello world!" count => 1 }
+}
+filter {
+ java_filter_example {}
+}
+output {
+ stdout { codec => rubydebug }
+}
+```
+
+Copy the above Logstash configuration to a file such as `java_filter.conf`. Start Logstash with:
+
+```shell
+bin/logstash -f /path/to/java_filter.conf
+```
+
+The expected Logstash output (excluding initialization) with the configuration above is:
+
+```txt
+{
+ "sequence" => 0,
+ "@version" => "1",
+ "message" => "!dlrow olleH",
+ "@timestamp" => yyyy-MM-ddThh:mm:ss.SSSZ,
+ "host" => ""
+}
+```
+
+
+## Feedback [_feedback_3]
+
+If you have any feedback on Java plugin support in Logstash, please comment on our [main Github issue](https://github.com/elastic/logstash/issues/9215) or post in the [Logstash forum](https://discuss.elastic.co/c/logstash).
+
diff --git a/docs/extend/java-input-plugin.md b/docs/extend/java-input-plugin.md
new file mode 100644
index 000000000..240a1be9e
--- /dev/null
+++ b/docs/extend/java-input-plugin.md
@@ -0,0 +1,341 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/java-input-plugin.html
+---
+
+# How to write a Java input plugin [java-input-plugin]
+
+To develop a new Java input for Logstash, you write a new Java class that conforms to the Logstash Java Inputs API, package it, and install it with the logstash-plugin utility. We’ll go through each of those steps.
+
+
+## Set up your environment [_set_up_your_environment]
+
+
+### Copy the example repo [_copy_the_example_repo]
+
+Start by copying the [example input plugin](https://github.com/logstash-plugins/logstash-input-java_input_example). The plugin API is currently part of the Logstash codebase so you must have a local copy of that available. You can obtain a copy of the Logstash codebase with the following `git` command:
+
+```shell
+git clone --branch --single-branch https://github.com/elastic/logstash.git
+```
+
+The `branch_name` should correspond to the version of Logstash containing the preferred revision of the Java plugin API.
+
+::::{note}
+The GA version of the Java plugin API is available in the `7.2` and later branches of the Logstash codebase.
+::::
+
+
+Specify the `target_folder` for your local copy of the Logstash codebase. If you do not specify `target_folder`, it defaults to a new folder called `logstash` under your current folder.
+
+
+### Generate the .jar file [_generate_the_jar_file]
+
+After you have obtained a copy of the appropriate revision of the Logstash codebase, you need to compile it to generate the .jar file containing the Java plugin API. From the root directory of your Logstash codebase ($LS_HOME), you can compile it with `./gradlew assemble` (or `gradlew.bat assemble` if you’re running on Windows). This should produce the `$LS_HOME/logstash-core/build/libs/logstash-core-x.y.z.jar` where `x`, `y`, and `z` refer to the version of Logstash.
+
+After you have successfully compiled Logstash, you need to tell your Java plugin where to find the `logstash-core-x.y.z.jar` file. Create a new file named `gradle.properties` in the root folder of your plugin project. That file should have a single line:
+
+```txt
+LOGSTASH_CORE_PATH=/logstash-core
+```
+
+where `target_folder` is the root folder of your local copy of the Logstash codebase.
+
+
+## Code the plugin [_code_the_plugin]
+
+The example input plugin generates a configurable number of simple events before terminating. Let’s look at the main class in the example input.
+
+```java
+@LogstashPlugin(name="java_input_example")
+public class JavaInputExample implements Input {
+
+ public static final PluginConfigSpec EVENT_COUNT_CONFIG =
+ PluginConfigSpec.numSetting("count", 3);
+
+ public static final PluginConfigSpec PREFIX_CONFIG =
+ PluginConfigSpec.stringSetting("prefix", "message");
+
+ private String id;
+ private long count;
+ private String prefix;
+ private final CountDownLatch done = new CountDownLatch(1);
+ private volatile boolean stopped;
+
+
+ public JavaInputExample(String id, Configuration config, Context context) {
+ this.id = id;
+ count = config.get(EVENT_COUNT_CONFIG);
+ prefix = config.get(PREFIX_CONFIG);
+ }
+
+ @Override
+ public void start(Consumer> consumer) {
+ int eventCount = 0;
+ try {
+ while (!stopped && eventCount < count) {
+ eventCount++;
+ consumer.accept.push(Collections.singletonMap("message",
+ prefix + " " + StringUtils.center(eventCount + " of " + count, 20)));
+ }
+ } finally {
+ stopped = true;
+ done.countDown();
+ }
+ }
+
+ @Override
+ public void stop() {
+ stopped = true; // set flag to request cooperative stop of input
+ }
+
+ @Override
+ public void awaitStop() throws InterruptedException {
+ done.await(); // blocks until input has stopped
+ }
+
+ @Override
+ public Collection> configSchema() {
+ return Arrays.asList(EVENT_COUNT_CONFIG, PREFIX_CONFIG);
+ }
+
+ @Override
+ public String getId() {
+ return this.id;
+ }
+}
+```
+
+Let’s step through and examine each part of that class.
+
+
+### Class declaration [_class_declaration_5]
+
+```java
+@LogstashPlugin(name="java_input_example")
+public class JavaInputExample implements Input {
+```
+
+Notes about the class declaration:
+
+* All Java plugins must be annotated with the `@LogstashPlugin` annotation. Additionally:
+
+ * The `name` property of the annotation must be supplied and defines the name of the plugin as it will be used in the Logstash pipeline definition. For example, this input would be referenced in the input section of the Logstash pipeline defintion as `input { java_input_example => { .... } }`
+ * The value of the `name` property must match the name of the class excluding casing and underscores.
+
+* The class must implement the `co.elastic.logstash.api.Input` interface.
+* Java plugins may not be created in the `org.logstash` or `co.elastic.logstash` packages to prevent potential clashes with classes in Logstash itself.
+
+
+### Plugin settings [_plugin_settings]
+
+The snippet below contains both the setting definition and the method referencing it.
+
+```java
+public static final PluginConfigSpec EVENT_COUNT_CONFIG =
+ PluginConfigSpec.numSetting("count", 3);
+
+public static final PluginConfigSpec PREFIX_CONFIG =
+ PluginConfigSpec.stringSetting("prefix", "message");
+
+@Override
+public Collection> configSchema() {
+ return Arrays.asList(EVENT_COUNT_CONFIG, PREFIX_CONFIG);
+}
+```
+
+The `PluginConfigSpec` class allows developers to specify the settings that a plugin supports complete with setting name, data type, deprecation status, required status, and default value. In this example, the `count` setting defines the number of events that will be generated and the `prefix` setting defines an optional prefix to include in the event field. Neither setting is required and if it is not explicitly set, the settings default to `3` and `message`, respectively.
+
+The `configSchema` method must return a list of all settings that the plugin supports. In a future phase of the Java plugin project, the Logstash execution engine will validate that all required settings are present and that no unsupported settings are present.
+
+
+### Constructor and initialization [_constructor_and_initialization]
+
+```java
+private String id;
+private long count;
+private String prefix;
+
+public JavaInputExample(String id, Configuration config, Context context) {
+ this.id = id;
+ count = config.get(EVENT_COUNT_CONFIG);
+ prefix = config.get(PREFIX_CONFIG);
+}
+```
+
+All Java input plugins must have a constructor taking a `String` id and `Configuration` and `Context` argument. This is the constructor that will be used to instantiate them at runtime. The retrieval and validation of all plugin settings should occur in this constructor. In this example, the values of the two plugin settings are retrieved and stored in local variables for later use in the `start` method.
+
+Any additional initialization may occur in the constructor as well. If there are any unrecoverable errors encountered in the configuration or initialization of the input plugin, a descriptive exception should be thrown. The exception will be logged and will prevent Logstash from starting.
+
+
+### Start method [_start_method]
+
+```java
+@Override
+public void start(Consumer> consumer) {
+ int eventCount = 0;
+ try {
+ while (!stopped && eventCount < count) {
+ eventCount++;
+ consumer.accept.push(Collections.singletonMap("message",
+ prefix + " " + StringUtils.center(eventCount + " of " + count, 20)));
+ }
+ } finally {
+ stopped = true;
+ done.countDown();
+ }
+}
+```
+
+The `start` method begins the event-producing loop in an input. Inputs are flexible and may produce events through many different mechanisms including:
+
+* a pull mechanism such as periodic queries of external database
+* a push mechanism such as events sent from clients to a local network port
+* a timed computation such as a heartbeat
+* any other mechanism that produces a useful stream of events. Event streams may be either finite or infinite. If the input produces an infinite stream of events, this method should loop until a stop request is made through the `stop` method. If the input produces a finite stream of events, this method should terminate when the last event in the stream is produced or a stop request is made, whichever comes first.
+
+Events should be constructed as instances of `Map` and pushed into the event pipeline via the `Consumer>.accept()` method. To reduce allocations and GC pressure, inputs may reuse the same map instance by modifying its fields between calls to `Consumer>.accept()` because the event pipeline will create events based on a copy of the map’s data.
+
+
+### Stop and awaitStop methods [_stop_and_awaitstop_methods]
+
+```java
+private final CountDownLatch done = new CountDownLatch(1);
+private volatile boolean stopped;
+
+@Override
+public void stop() {
+ stopped = true; // set flag to request cooperative stop of input
+}
+
+@Override
+public void awaitStop() throws InterruptedException {
+ done.await(); // blocks until input has stopped
+}
+```
+
+The `stop` method notifies the input to stop producing events. The stop mechanism may be implemented in any way that honors the API contract though a `volatile boolean` flag works well for many use cases.
+
+Inputs stop both asynchronously and cooperatively. Use the `awaitStop` method to block until the input has completed the stop process. Note that this method should **not** signal the input to stop as the `stop` method does. The awaitStop mechanism may be implemented in any way that honors the API contract though a `CountDownLatch` works well for many use cases.
+
+
+### getId method [_getid_method]
+
+```java
+@Override
+public String getId() {
+ return id;
+}
+```
+
+For input plugins, the `getId` method should always return the id that was provided to the plugin through its constructor at instantiation time.
+
+
+### Unit tests [_unit_tests]
+
+Lastly, but certainly not least importantly, unit tests are strongly encouraged. The example input plugin includes an [example unit test](https://github.com/logstash-plugins/logstash-input-java_input_example/blob/main/src/test/java/org/logstashplugins/JavaInputExampleTest.java) that you can use as a template for your own.
+
+
+## Package and deploy [_package_and_deploy]
+
+Java plugins are packaged as Ruby gems for dependency management and interoperability with Ruby plugins. Once they are packaged as gems, they may be installed with the `logstash-plugin` utility just as Ruby plugins are. Because no knowledge of Ruby or its toolchain should be required for Java plugin development, the procedure for packaging Java plugins as Ruby gems has been automated through a custom task in the Gradle build file provided with the example Java plugins. The following sections describe how to configure and execute that packaging task as well as how to install the packaged Java plugin in Logstash.
+
+
+### Configuring the Gradle packaging task [_configuring_the_gradle_packaging_task]
+
+The following section appears near the top of the `build.gradle` file supplied with the example Java plugins:
+
+```java
+// ===========================================================================
+// plugin info
+// ===========================================================================
+group 'org.logstashplugins' // must match the package of the main plugin class
+version "${file("VERSION").text.trim()}" // read from required VERSION file
+description = "Example Java filter implementation"
+pluginInfo.licenses = ['Apache-2.0'] // list of SPDX license IDs
+pluginInfo.longDescription = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using \$LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program"
+pluginInfo.authors = ['Elasticsearch']
+pluginInfo.email = ['info@elastic.co']
+pluginInfo.homepage = "http://www.elastic.co/guide/en/logstash/current/index.html"
+pluginInfo.pluginType = "filter"
+pluginInfo.pluginClass = "JavaFilterExample"
+pluginInfo.pluginName = "java_filter_example"
+// ===========================================================================
+```
+
+You should configure the values above for your plugin.
+
+* The `version` value will be automatically read from the `VERSION` file in the root of your plugin’s codebase.
+* `pluginInfo.pluginType` should be set to one of `input`, `filter`, `codec`, or `output`.
+* `pluginInfo.pluginName` must match the name specified on the `@LogstashPlugin` annotation on the main plugin class. The Gradle packaging task will validate that and return an error if they do not match.
+
+
+### Running the Gradle packaging task [_running_the_gradle_packaging_task]
+
+Several Ruby source files along with a `gemspec` file and a `Gemfile` are required to package the plugin as a Ruby gem. These Ruby files are used only for defining the Ruby gem structure or at Logstash startup time to register the Java plugin. They are not used during runtime event processing. The Gradle packaging task automatically generates all of these files based on the values configured in the section above.
+
+You run the Gradle packaging task with the following command:
+
+```shell
+./gradlew gem
+```
+
+For Windows platforms: Substitute `gradlew.bat` for `./gradlew` as appropriate in the command.
+
+That task will produce a gem file in the root directory of your plugin’s codebase with the name `logstash-{{plugintype}}--.gem`
+
+
+### Installing the Java plugin in Logstash [_installing_the_java_plugin_in_logstash]
+
+After you have packaged your Java plugin as a Ruby gem, you can install it in Logstash with this command:
+
+```shell
+bin/logstash-plugin install --no-verify --local /path/to/javaPlugin.gem
+```
+
+For Windows platforms: Substitute backslashes for forward slashes as appropriate in the command.
+
+
+## Running Logstash with the Java input plugin [_running_logstash_with_the_java_input_plugin]
+
+The following is a minimal Logstash configuration that can be used to test that the Java input plugin is correctly installed and functioning.
+
+```java
+input {
+ java_input_example {}
+}
+output {
+ stdout { codec => rubydebug }
+}
+```
+
+Copy the above Logstash configuration to a file such as `java_input.conf`. Start {{ls}} with:
+
+```shell
+bin/logstash -f /path/to/java_input.conf
+```
+
+The expected Logstash output (excluding initialization) with the configuration above is:
+
+```txt
+{
+ "@version" => "1",
+ "message" => "message 1 of 3 ",
+ "@timestamp" => yyyy-MM-ddThh:mm:ss.SSSZ
+}
+{
+ "@version" => "1",
+ "message" => "message 2 of 3 ",
+ "@timestamp" => yyyy-MM-ddThh:mm:ss.SSSZ
+}
+{
+ "@version" => "1",
+ "message" => "message 3 of 3 ",
+ "@timestamp" => yyyy-MM-ddThh:mm:ss.SSSZ
+}
+```
+
+
+## Feedback [_feedback]
+
+If you have any feedback on Java plugin support in Logstash, please comment on our [main Github issue](https://github.com/elastic/logstash/issues/9215) or post in the [Logstash forum](https://discuss.elastic.co/c/logstash).
diff --git a/docs/extend/java-output-plugin.md b/docs/extend/java-output-plugin.md
new file mode 100644
index 000000000..e8e7a6040
--- /dev/null
+++ b/docs/extend/java-output-plugin.md
@@ -0,0 +1,311 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/java-output-plugin.html
+---
+
+# How to write a Java output plugin [java-output-plugin]
+
+To develop a new Java output for Logstash, you write a new Java class that conforms to the Logstash Java Outputs API, package it, and install it with the logstash-plugin utility. We’ll go through each of those steps.
+
+
+## Set up your environment [_set_up_your_environment_4]
+
+
+### Copy the example repo [_copy_the_example_repo_4]
+
+Start by copying the [example output plugin](https://github.com/logstash-plugins/logstash-output-java_output_example). The plugin API is currently part of the Logstash codebase so you must have a local copy of that available. You can obtain a copy of the Logstash codebase with the following `git` command:
+
+```shell
+git clone --branch --single-branch https://github.com/elastic/logstash.git
+```
+
+The `branch_name` should correspond to the version of Logstash containing the preferred revision of the Java plugin API.
+
+::::{note}
+The GA version of the Java plugin API is available in the `7.2` and later branches of the Logstash codebase.
+::::
+
+
+Specify the `target_folder` for your local copy of the Logstash codebase. If you do not specify `target_folder`, it defaults to a new folder called `logstash` under your current folder.
+
+
+### Generate the .jar file [_generate_the_jar_file_4]
+
+After you have obtained a copy of the appropriate revision of the Logstash codebase, you need to compile it to generate the .jar file containing the Java plugin API. From the root directory of your Logstash codebase ($LS_HOME), you can compile it with `./gradlew assemble` (or `gradlew.bat assemble` if you’re running on Windows). This should produce the `$LS_HOME/logstash-core/build/libs/logstash-core-x.y.z.jar` where `x`, `y`, and `z` refer to the version of Logstash.
+
+After you have successfully compiled Logstash, you need to tell your Java plugin where to find the `logstash-core-x.y.z.jar` file. Create a new file named `gradle.properties` in the root folder of your plugin project. That file should have a single line:
+
+```txt
+LOGSTASH_CORE_PATH=/logstash-core
+```
+
+where `target_folder` is the root folder of your local copy of the Logstash codebase.
+
+
+## Code the plugin [_code_the_plugin_4]
+
+The example output plugin prints events to the console using the event’s `toString` method. Let’s look at the main class in the example output:
+
+```java
+@LogstashPlugin(name = "java_output_example")
+public class JavaOutputExample implements Output {
+
+ public static final PluginConfigSpec PREFIX_CONFIG =
+ PluginConfigSpec.stringSetting("prefix", "");
+
+ private final String id;
+ private String prefix;
+ private PrintStream printer;
+ private final CountDownLatch done = new CountDownLatch(1);
+ private volatile boolean stopped = false;
+
+ public JavaOutputExample(final String id, final Configuration configuration, final Context context) {
+ this(id, configuration, context, System.out);
+ }
+
+ JavaOutputExample(final String id, final Configuration config, final Context context, OutputStream targetStream) {
+ this.id = id;
+ prefix = config.get(PREFIX_CONFIG);
+ printer = new PrintStream(targetStream);
+ }
+
+ @Override
+ public void output(final Collection events) {
+ Iterator z = events.iterator();
+ while (z.hasNext() && !stopped) {
+ String s = prefix + z.next();
+ printer.println(s);
+ }
+ }
+
+ @Override
+ public void stop() {
+ stopped = true;
+ done.countDown();
+ }
+
+ @Override
+ public void awaitStop() throws InterruptedException {
+ done.await();
+ }
+
+ @Override
+ public Collection> configSchema() {
+ return Collections.singletonList(PREFIX_CONFIG);
+ }
+
+ @Override
+ public String getId() {
+ return id;
+ }
+}
+```
+
+Let’s step through and examine each part of that class.
+
+
+### Class declaration [_class_declaration_8]
+
+```java
+@LogstashPlugin(name="java_output_example")
+public class JavaOutputExample implements Output {
+```
+
+Notes about the class declaration:
+
+* All Java plugins must be annotated with the `@LogstashPlugin` annotation. Additionally:
+
+ * The `name` property of the annotation must be supplied and defines the name of the plugin as it will be used in the Logstash pipeline definition. For example, this output would be referenced in the output section of the Logstash pipeline definition as `output { java_output_example => { .... } }`
+ * The value of the `name` property must match the name of the class excluding casing and underscores.
+
+* The class must implement the `co.elastic.logstash.api.Output` interface.
+* Java plugins may not be created in the `org.logstash` or `co.elastic.logstash` packages to prevent potential clashes with classes in Logstash itself.
+
+
+### Plugin settings [_plugin_settings_4]
+
+The snippet below contains both the setting definition and the method referencing it:
+
+```java
+public static final PluginConfigSpec PREFIX_CONFIG =
+ PluginConfigSpec.stringSetting("prefix", "");
+
+@Override
+public Collection> configSchema() {
+ return Collections.singletonList(PREFIX_CONFIG);
+}
+```
+
+The `PluginConfigSpec` class allows developers to specify the settings that a plugin supports complete with setting name, data type, deprecation status, required status, and default value. In this example, the `prefix` setting defines an optional prefix to include in the output of the event. The setting is not required and if it is not explicitly set, it defaults to the empty string.
+
+The `configSchema` method must return a list of all settings that the plugin supports. In a future phase of the Java plugin project, the Logstash execution engine will validate that all required settings are present and that no unsupported settings are present.
+
+
+### Constructor and initialization [_constructor_and_initialization_4]
+
+```java
+private final String id;
+private String prefix;
+private PrintStream printer;
+
+public JavaOutputExample(final String id, final Configuration configuration, final Context context) {
+ this(configuration, context, System.out);
+}
+
+JavaOutputExample(final String id, final Configuration config, final Context context, OutputStream targetStream) {
+ this.id = id;
+ prefix = config.get(PREFIX_CONFIG);
+ printer = new PrintStream(targetStream);
+}
+```
+
+All Java output plugins must have a constructor taking a `String` id and a `Configuration` and `Context` argument. This is the constructor that will be used to instantiate them at runtime. The retrieval and validation of all plugin settings should occur in this constructor. In this example, the values of the `prefix` setting is retrieved and stored in a local variable for later use in the `output` method. In this example, a second, pacakge private constructor is defined that is useful for unit testing with a `Stream` other than `System.out`.
+
+Any additional initialization may occur in the constructor as well. If there are any unrecoverable errors encountered in the configuration or initialization of the output plugin, a descriptive exception should be thrown. The exception will be logged and will prevent Logstash from starting.
+
+
+### Output method [_output_method]
+
+```java
+@Override
+public void output(final Collection events) {
+ Iterator z = events.iterator();
+ while (z.hasNext() && !stopped) {
+ String s = prefix + z.next();
+ printer.println(s);
+ }
+}
+```
+
+Outputs may send events to local sinks such as the console or a file or to remote systems such as Elasticsearch or other external systems. In this example, the events are printed to the local console.
+
+
+### Stop and awaitStop methods [_stop_and_awaitstop_methods_2]
+
+```java
+private final CountDownLatch done = new CountDownLatch(1);
+private volatile boolean stopped;
+
+@Override
+public void stop() {
+ stopped = true;
+ done.countDown();
+}
+
+@Override
+public void awaitStop() throws InterruptedException {
+ done.await();
+}
+```
+
+The `stop` method notifies the output to stop sending events. The stop mechanism may be implemented in any way that honors the API contract though a `volatile boolean` flag works well for many use cases. Because this output example is so simple, its `output` method does not check for the stop flag.
+
+Outputs stop both asynchronously and cooperatively. Use the `awaitStop` method to block until the output has completed the stop process. Note that this method should **not** signal the output to stop as the `stop` method does. The awaitStop mechanism may be implemented in any way that honors the API contract though a `CountDownLatch` works well for many use cases.
+
+
+### getId method [_getid_method_4]
+
+```java
+@Override
+public String getId() {
+ return id;
+}
+```
+
+For output plugins, the `getId` method should always return the id that was provided to the plugin through its constructor at instantiation time.
+
+
+### Unit tests [_unit_tests_4]
+
+Lastly, but certainly not least importantly, unit tests are strongly encouraged. The example output plugin includes an [example unit test](https://github.com/logstash-plugins/logstash-output-java_output_example/blob/main/src/test/java/org/logstashplugins/JavaOutputExampleTest.java) that you can use as a template for your own.
+
+
+## Package and deploy [_package_and_deploy_4]
+
+Java plugins are packaged as Ruby gems for dependency management and interoperability with Ruby plugins. Once they are packaged as gems, they may be installed with the `logstash-plugin` utility just as Ruby plugins are. Because no knowledge of Ruby or its toolchain should be required for Java plugin development, the procedure for packaging Java plugins as Ruby gems has been automated through a custom task in the Gradle build file provided with the example Java plugins. The following sections describe how to configure and execute that packaging task as well as how to install the packaged Java plugin in Logstash.
+
+
+### Configuring the Gradle packaging task [_configuring_the_gradle_packaging_task_4]
+
+The following section appears near the top of the `build.gradle` file supplied with the example Java plugins:
+
+```java
+// ===========================================================================
+// plugin info
+// ===========================================================================
+group 'org.logstashplugins' // must match the package of the main plugin class
+version "${file("VERSION").text.trim()}" // read from required VERSION file
+description = "Example Java filter implementation"
+pluginInfo.licenses = ['Apache-2.0'] // list of SPDX license IDs
+pluginInfo.longDescription = "This gem is a Logstash plugin required to be installed on top of the Logstash core pipeline using \$LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program"
+pluginInfo.authors = ['Elasticsearch']
+pluginInfo.email = ['info@elastic.co']
+pluginInfo.homepage = "http://www.elastic.co/guide/en/logstash/current/index.html"
+pluginInfo.pluginType = "filter"
+pluginInfo.pluginClass = "JavaFilterExample"
+pluginInfo.pluginName = "java_filter_example"
+// ===========================================================================
+```
+
+You should configure the values above for your plugin.
+
+* The `version` value will be automatically read from the `VERSION` file in the root of your plugin’s codebase.
+* `pluginInfo.pluginType` should be set to one of `input`, `filter`, `codec`, or `output`.
+* `pluginInfo.pluginName` must match the name specified on the `@LogstashPlugin` annotation on the main plugin class. The Gradle packaging task will validate that and return an error if they do not match.
+
+
+### Running the Gradle packaging task [_running_the_gradle_packaging_task_4]
+
+Several Ruby source files along with a `gemspec` file and a `Gemfile` are required to package the plugin as a Ruby gem. These Ruby files are used only for defining the Ruby gem structure or at Logstash startup time to register the Java plugin. They are not used during runtime event processing. The Gradle packaging task automatically generates all of these files based on the values configured in the section above.
+
+You run the Gradle packaging task with the following command:
+
+```shell
+./gradlew gem
+```
+
+For Windows platforms: Substitute `gradlew.bat` for `./gradlew` as appropriate in the command.
+
+That task will produce a gem file in the root directory of your plugin’s codebase with the name `logstash-{{plugintype}}--.gem`
+
+
+### Installing the Java plugin in Logstash [_installing_the_java_plugin_in_logstash_4]
+
+After you have packaged your Java plugin as a Ruby gem, you can install it in Logstash with this command:
+
+```shell
+bin/logstash-plugin install --no-verify --local /path/to/javaPlugin.gem
+```
+
+For Windows platforms: Substitute backslashes for forward slashes as appropriate in the command.
+
+
+## Running Logstash with the Java output plugin [_running_logstash_with_the_java_output_plugin]
+
+The following is a minimal Logstash configuration that can be used to test that the Java output plugin is correctly installed and functioning.
+
+```java
+input {
+ generator { message => "Hello world!" count => 1 }
+}
+output {
+ java_output_example {}
+}
+```
+
+Copy the above Logstash configuration to a file such as `java_output.conf`. Logstash should then be started with:
+
+```txt
+bin/logstash -f /path/to/java_output.conf
+```
+
+The expected Logstash output (excluding initialization) with the configuration above is:
+
+```txt
+{"@timestamp":"yyyy-MM-ddTHH:mm:ss.SSSZ","message":"Hello world!","@version":"1","host":"","sequence":0}
+```
+
+
+## Feedback [_feedback_4]
+
+If you have any feedback on Java plugin support in Logstash, please comment on our [main Github issue](https://github.com/elastic/logstash/issues/9215) or post in the [Logstash forum](https://discuss.elastic.co/c/logstash).
diff --git a/docs/extend/output-new-plugin.md b/docs/extend/output-new-plugin.md
new file mode 100644
index 000000000..88f9ef369
--- /dev/null
+++ b/docs/extend/output-new-plugin.md
@@ -0,0 +1,570 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/output-new-plugin.html
+---
+
+# How to write a Logstash output plugin [output-new-plugin]
+
+To develop a new output for Logstash, build a self-contained Ruby gem whose source code lives in its own GitHub repository. The Ruby gem can then be hosted and shared on RubyGems.org. You can use the example output implementation as a starting point. (If you’re unfamiliar with Ruby, you can find an excellent quickstart guide at [https://www.ruby-lang.org/en/documentation/quickstart/](https://www.ruby-lang.org/en/documentation/quickstart/).)
+
+## Get started [_get_started_4]
+
+Let’s step through creating an output plugin using the [example output plugin](https://github.com/logstash-plugins/logstash-output-example/).
+
+### Create a GitHub repo for your new plugin [_create_a_github_repo_for_your_new_plugin_4]
+
+Each Logstash plugin lives in its own GitHub repository. To create a new repository for your plugin:
+
+1. Log in to GitHub.
+2. Click the **Repositories** tab. You’ll see a list of other repositories you’ve forked or contributed to.
+3. Click the green **New** button in the upper right.
+4. Specify the following settings for your new repo:
+
+ * **Repository name** — a unique name of the form `logstash-output-pluginname`.
+ * **Public or Private** — your choice, but the repository must be Public if you want to submit it as an official plugin.
+ * **Initialize this repository with a README** — enables you to immediately clone the repository to your computer.
+
+5. Click **Create Repository**.
+
+
+### Use the plugin generator tool [_use_the_plugin_generator_tool_4]
+
+You can create your own Logstash plugin in seconds! The `generate` subcommand of `bin/logstash-plugin` creates the foundation for a new Logstash plugin with templatized files. It creates the correct directory structure, gemspec files, and dependencies so you can start adding custom code to process data with Logstash.
+
+For more information, see [Generating plugins](/reference/plugin-generator.md)
+
+
+### Copy the output code [_copy_the_output_code]
+
+Alternatively, you can use the examples repo we host on github.com
+
+1. **Clone your plugin.** Replace `GITUSERNAME` with your github username, and `MYPLUGINNAME` with your plugin name.
+
+ * `git clone https://github.com/GITUSERNAME/logstash-``output-MYPLUGINNAME.git`
+
+ * alternately, via ssh: `git clone git@github.com:GITUSERNAME/logstash``-output-MYPLUGINNAME.git`
+
+ * `cd logstash-output-MYPLUGINNAME`
+
+2. **Clone the output plugin example and copy it to your plugin branch.**
+
+ You don’t want to include the example .git directory or its contents, so delete it before you copy the example.
+
+ * `cd /tmp`
+ * `git clone https://github.com/logstash-plugins/logstash``-output-example.git`
+ * `cd logstash-output-example`
+ * `rm -rf .git`
+ * `cp -R * /path/to/logstash-output-mypluginname/`
+
+3. **Rename the following files to match the name of your plugin.**
+
+ * `logstash-output-example.gemspec`
+ * `example.rb`
+ * `example_spec.rb`
+
+ ```txt
+ cd /path/to/logstash-output-mypluginname
+ mv logstash-output-example.gemspec logstash-output-mypluginname.gemspec
+ mv lib/logstash/outputs/example.rb lib/logstash/outputs/mypluginname.rb
+ mv spec/outputs/example_spec.rb spec/outputs/mypluginname_spec.rb
+ ```
+
+
+Your file structure should look like this:
+
+```txt
+$ tree logstash-output-mypluginname
+├── Gemfile
+├── LICENSE
+├── README.md
+├── Rakefile
+├── lib
+│ └── logstash
+│ └── outputs
+│ └── mypluginname.rb
+├── logstash-output-mypluginname.gemspec
+└── spec
+ └── outputs
+ └── mypluginname_spec.rb
+```
+
+For more information about the Ruby gem file structure and an excellent walkthrough of the Ruby gem creation process, see [http://timelessrepo.com/making-ruby-gems](http://timelessrepo.com/making-ruby-gems)
+
+
+### See what your plugin looks like [_see_what_your_plugin_looks_like_4]
+
+Before we dive into the details, open up the plugin file in your favorite text editor and take a look.
+
+```ruby
+require "logstash/outputs/base"
+require "logstash/namespace"
+
+# Add any asciidoc formatted documentation here
+# An example output that does nothing.
+class LogStash::Outputs::Example < LogStash::Outputs::Base
+ config_name "example"
+
+ # This sets the concurrency behavior of this plugin. By default it is :legacy, which was the standard
+ # way concurrency worked before Logstash 2.4
+ #
+ # You should explicitly set it to either :single or :shared as :legacy will be removed in Logstash 6.0
+ #
+ # When configured as :single a single instance of the Output will be shared among the
+ # pipeline worker threads. Access to the `#multi_receive/#multi_receive_encoded/#receive` method will be synchronized
+ # i.e. only one thread will be active at a time making threadsafety much simpler.
+ #
+ # You can set this to :shared if your output is threadsafe. This will maximize
+ # concurrency but you will need to make appropriate uses of mutexes in `#multi_receive/#receive`.
+ #
+ # Only the `#multi_receive/#multi_receive_encoded` methods need to actually be threadsafe, the other methods
+ # will only be executed in a single thread
+ concurrency :single
+
+ public
+ def register
+ end # def register
+
+ public
+ # Takes an array of events
+ # Must be threadsafe if `concurrency :shared` is set
+ def multi_receive(events)
+ end # def multi_receive
+end # class LogStash::Outputs::Example
+```
+
+
+
+## Coding output plugins [_coding_output_plugins]
+
+Now let’s take a line-by-line look at the example plugin.
+
+### `require` Statements [_require_statements_4]
+
+Logstash output plugins require parent classes defined in `logstash/outputs/base` and logstash/namespace:
+
+```ruby
+require "logstash/outputs/base"
+require "logstash/namespace"
+```
+
+Of course, the plugin you build may depend on other code, or even gems. Just put them here along with these Logstash dependencies.
+
+
+
+## Plugin Body [_plugin_body_4]
+
+Let’s go through the various elements of the plugin itself.
+
+### `class` Declaration [_class_declaration_4]
+
+The output plugin class should be a subclass of `LogStash::Outputs::Base`:
+
+```ruby
+class LogStash::Outputs::Example < LogStash::Outputs::Base
+```
+
+The class name should closely mirror the plugin name, for example:
+
+```ruby
+LogStash::Outputs::Example
+```
+
+
+### `config_name` [_config_name_4]
+
+```ruby
+ config_name "example"
+```
+
+This is the name your plugin will call inside the output configuration block.
+
+If you set `config_name "example"` in your plugin code, the corresponding Logstash configuration block would need to look like this:
+
+
+
+## Configuration Parameters [_configuration_parameters_4]
+
+```ruby
+ config :variable_name, :validate => :variable_type, :default => "Default value", :required => boolean, :deprecated => boolean, :obsolete => string
+```
+
+The configuration, or `config` section allows you to define as many (or as few) parameters as are needed to enable Logstash to process events.
+
+There are several configuration attributes:
+
+* `:validate` - allows you to enforce passing a particular data type to Logstash for this configuration option, such as `:string`, `:password`, `:boolean`, `:number`, `:array`, `:hash`, `:path` (a file-system path), `uri`, `:codec` (since 1.2.0), `:bytes`. Note that this also works as a coercion in that if I specify "true" for boolean (even though technically a string), it will become a valid boolean in the config. This coercion works for the `:number` type as well where "1.2" becomes a float and "22" is an integer.
+* `:default` - lets you specify a default value for a parameter
+* `:required` - whether or not this parameter is mandatory (a Boolean `true` or
+* `:list` - whether or not this value should be a list of values. Will typecheck the list members, and convert scalars to one element lists. Note that this mostly obviates the array type, though if you need lists of complex objects that will be more suitable. `false`)
+* `:deprecated` - informational (also a Boolean `true` or `false`)
+* `:obsolete` - used to declare that a given setting has been removed and is no longer functioning. The idea is to provide an informed upgrade path to users who are still using a now-removed setting.
+
+
+## Plugin Methods [_plugin_methods_4]
+
+Logstash outputs must implement the `register` and `multi_receive` methods.
+
+### `register` Method [_register_method_4]
+
+```ruby
+ public
+ def register
+ end # def register
+```
+
+The Logstash `register` method is like an `initialize` method. It was originally created to enforce having `super` called, preventing headaches for newbies. (Note: It may go away in favor of `initialize`, in conjunction with some enforced testing to ensure `super` is called.)
+
+`public` means the method can be called anywhere, not just within the class. This is the default behavior for methods in Ruby, but it is specified explicitly here anyway.
+
+You can also assign instance variables here (variables prepended by `@`). Configuration variables are now in scope as instance variables, like `@message`
+
+
+
+## Building the Plugin [_building_the_plugin_4]
+
+At this point in the process you have coded your plugin and are ready to build a Ruby Gem from it. The following information will help you complete the process.
+
+### External dependencies [_external_dependencies_4]
+
+A `require` statement in Ruby is used to include necessary code. In some cases your plugin may require additional files. For example, the collectd plugin [uses](https://github.com/logstash-plugins/logstash-codec-collectd/blob/main/lib/logstash/codecs/collectd.rb#L148) the `types.db` file provided by collectd. In the main directory of your plugin, a file called `vendor.json` is where these files are described.
+
+The `vendor.json` file contains an array of JSON objects, each describing a file dependency. This example comes from the [collectd](https://github.com/logstash-plugins/logstash-codec-collectd/blob/main/vendor.json) codec plugin:
+
+```txt
+[{
+ "sha1": "a90fe6cc53b76b7bdd56dc57950d90787cb9c96e",
+ "url": "http://collectd.org/files/collectd-5.4.0.tar.gz",
+ "files": [ "/src/types.db" ]
+}]
+```
+
+* `sha1` is the sha1 signature used to verify the integrity of the file referenced by `url`.
+* `url` is the address from where Logstash will download the file.
+* `files` is an optional array of files to extract from the downloaded file. Note that while tar archives can use absolute or relative paths, treat them as absolute in this array. If `files` is not present, all files will be uncompressed and extracted into the vendor directory.
+
+Another example of the `vendor.json` file is the [`geoip` filter](https://github.com/logstash-plugins/logstash-filter-geoip/blob/main/vendor.json)
+
+The process used to download these dependencies is to call `rake vendor`. This will be discussed further in the testing section of this document.
+
+Another kind of external dependency is on jar files. This will be described in the "Add a `gemspec` file" section.
+
+
+### Deprecated features [_deprecated_features_4]
+
+As a plugin evolves, an option or feature may no longer serve the intended purpose, and the developer may want to *deprecate* its usage. Deprecation warns users about the option’s status, so they aren’t caught by surprise when it is removed in a later release.
+
+{{ls}} 7.6 introduced a *deprecation logger* to make handling those situations easier. You can use the [adapter](https://github.com/logstash-plugins/logstash-mixin-deprecation_logger_support) to ensure that your plugin can use the deprecation logger while still supporting older versions of {{ls}}. See the [readme](https://github.com/logstash-plugins/logstash-mixin-deprecation_logger_support/blob/main/README.md) for more information and for instructions on using the adapter.
+
+Deprecations are noted in the `logstash-deprecation.log` file in the `log` directory.
+
+
+### Add a Gemfile [_add_a_gemfile_4]
+
+Gemfiles allow Ruby’s Bundler to maintain the dependencies for your plugin. Currently, all we’ll need is the Logstash gem, for testing, but if you require other gems, you should add them in here.
+
+::::{tip}
+See [Bundler’s Gemfile page](http://bundler.io/gemfile.html) for more details.
+::::
+
+
+```ruby
+source 'https://rubygems.org'
+gemspec
+gem "logstash", :github => "elastic/logstash", :branch => "master"
+```
+
+
+
+## Add a `gemspec` file [_add_a_gemspec_file_4]
+
+Gemspecs define the Ruby gem which will be built and contain your plugin.
+
+::::{tip}
+More information can be found on the [Rubygems Specification page](http://guides.rubygems.org/specification-reference/).
+::::
+
+
+```ruby
+Gem::Specification.new do |s|
+ s.name = 'logstash-output-example'
+ s.version = '0.1.0'
+ s.licenses = ['Apache License (2.0)']
+ s.summary = "This output does x, y, z in Logstash"
+ s.description = "This gem is a logstash plugin required to be installed on top of the Logstash core pipeline using $LS_HOME/bin/logstash-plugin install gemname. This gem is not a stand-alone program"
+ s.authors = ["Elastic"]
+ s.email = 'info@elastic.co'
+ s.homepage = "http://www.elastic.co/guide/en/logstash/current/index.html"
+ s.require_paths = ["lib"]
+
+ # Files
+ s.files = Dir['lib/**/*','spec/**/*','vendor/**/*','*.gemspec','*.md','CONTRIBUTORS','Gemfile','LICENSE','NOTICE.TXT']
+ # Tests
+ s.test_files = s.files.grep(%r{^(test|spec|features)/})
+
+ # Special flag to let us know this is actually a logstash plugin
+ s.metadata = { "logstash_plugin" => "true", "logstash_group" => "output" }
+
+ # Gem dependencies
+ s.add_runtime_dependency "logstash-core-plugin-api", ">= 1.60", "<= 2.99"
+ s.add_development_dependency 'logstash-devutils'
+end
+```
+
+It is appropriate to change these values to fit your plugin. In particular, `s.name` and `s.summary` should reflect your plugin’s name and behavior.
+
+`s.licenses` and `s.version` are also important and will come into play when you are ready to publish your plugin.
+
+Logstash and all its plugins are licensed under [Apache License, version 2 ("ALv2")](https://github.com/elastic/logstash/blob/main/LICENSE.txt). If you make your plugin publicly available via [RubyGems.org](http://rubygems.org), please make sure to have this line in your gemspec:
+
+* `s.licenses = ['Apache License (2.0)']`
+
+The gem version, designated by `s.version`, helps track changes to plugins over time. You should use [semver versioning](http://semver.org/) strategy for version numbers.
+
+### Runtime and Development Dependencies [_runtime_and_development_dependencies_4]
+
+At the bottom of the `gemspec` file is a section with a comment: `Gem dependencies`. This is where any other needed gems must be mentioned. If a gem is necessary for your plugin to function, it is a runtime dependency. If a gem are only used for testing, then it would be a development dependency.
+
+::::{note}
+You can also have versioning requirements for your dependencies—including other Logstash plugins:
+
+```ruby
+ # Gem dependencies
+ s.add_runtime_dependency "logstash-core-plugin-api", ">= 1.60", "<= 2.99"
+ s.add_development_dependency 'logstash-devutils'
+```
+
+This gemspec has a runtime dependency on the logstash-core-plugin-api and requires that it have a version number greater than or equal to version 1.60 and less than or equal to version 2.99.
+
+::::
+
+
+::::{important}
+All plugins have a runtime dependency on the `logstash-core-plugin-api` gem, and a development dependency on `logstash-devutils`.
+::::
+
+
+
+### Jar dependencies [_jar_dependencies_4]
+
+In some cases, such as the [Elasticsearch output plugin](https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/main/logstash-output-elasticsearch.gemspec#L22-L23), your code may depend on a jar file. In cases such as this, the dependency is added in the gemspec file in this manner:
+
+```ruby
+ # Jar dependencies
+ s.requirements << "jar 'org.elasticsearch:elasticsearch', '5.0.0'"
+ s.add_runtime_dependency 'jar-dependencies'
+```
+
+With these both defined, the install process will search for the required jar file at [http://mvnrepository.com](http://mvnrepository.com) and download the specified version.
+
+
+
+## Document your plugin [_document_your_plugin_4]
+
+Documentation is an important part of your plugin. All plugin documentation is rendered and placed in the [Logstash Reference](/reference/index.md) and the [Versioned plugin docs](logstash-docs://reference/integration-plugins.md).
+
+See [Document your plugin](/extend/plugin-doc.md) for tips and guidelines.
+
+
+## Add Tests [_add_tests_4]
+
+Logstash loves tests. Lots of tests. If you’re using your new output plugin in a production environment, you’ll want to have some tests to ensure you are not breaking any existing functionality.
+
+::::{note}
+A full exposition on RSpec is outside the scope of this document. Learn more about RSpec at [http://rspec.info](http://rspec.info)
+::::
+
+
+For help learning about tests and testing, look in the `spec/outputs/` directory of several other similar plugins.
+
+
+## Clone and test! [_clone_and_test_4]
+
+Now let’s start with a fresh clone of the plugin, build it and run the tests.
+
+* **Clone your plugin into a temporary location** Replace `GITUSERNAME` with your github username, and `MYPLUGINNAME` with your plugin name.
+
+ * `git clone https://github.com/GITUSERNAME/logstash-``output-MYPLUGINNAME.git`
+
+ * alternately, via ssh: `git clone git@github.com:GITUSERNAME/logstash-``output-MYPLUGINNAME.git`
+
+ * `cd logstash-output-MYPLUGINNAME`
+
+
+Then, you’ll need to install your plugins dependencies with bundler:
+
+```
+bundle install
+```
+
+::::{important}
+If your plugin has an external file dependency described in `vendor.json`, you must download that dependency before running or testing. You can do this by running:
+
+```
+rake vendor
+```
+
+::::
+
+
+And finally, run the tests:
+
+```
+bundle exec rspec
+```
+
+You should see a success message, which looks something like this:
+
+```
+Finished in 0.034 seconds
+1 example, 0 failures
+```
+
+Hooray! You’re almost there! (Unless you saw failures… you should fix those first).
+
+
+## Building and Testing [_building_and_testing_4]
+
+Now you’re ready to build your (well-tested) plugin into a Ruby gem.
+
+### Build [_build_4]
+
+You already have all the necessary ingredients, so let’s go ahead and run the build command:
+
+```sh
+gem build logstash-output-example.gemspec
+```
+
+That’s it! Your gem should be built and be in the same path with the name
+
+```sh
+logstash-output-mypluginname-0.1.0.gem
+```
+
+The `s.version` number from your gemspec file will provide the gem version, in this case, `0.1.0`.
+
+
+### Test installation [_test_installation_4]
+
+You should test install your plugin into a clean installation of Logstash. Download the latest version from the [Logstash downloads page](https://www.elastic.co/downloads/logstash/).
+
+1. Untar and cd in to the directory:
+
+ ```sh
+ curl -O https://download.elastic.co/logstash/logstash/logstash-9.0.0.tar.gz
+ tar xzvf logstash-9.0.0.tar.gz
+ cd logstash-9.0.0
+ ```
+
+2. Using the plugin tool, we can install the gem we just built.
+
+ * Replace `/my/logstash/plugins` with the correct path to the gem for your environment, and `0.1.0` with the correct version number from the gemspec file.
+
+ ```sh
+ bin/logstash-plugin install /my/logstash/plugins/logstash-output-example/logstash-output-example-0.1.0.gem
+ ```
+
+ * After running this, you should see feedback from Logstash that it was successfully installed:
+
+ ```sh
+ validating /my/logstash/plugins/logstash-output-example/logstash-output-example-0.1.0.gem >= 0
+ Valid logstash plugin. Continuing...
+ Successfully installed 'logstash-output-example' with version '0.1.0'
+ ```
+
+ ::::{tip}
+ You can also use the Logstash plugin tool to determine which plugins are currently available:
+
+ ```sh
+ bin/logstash-plugin list
+ ```
+
+ Depending on what you have installed, you might see a short or long list of plugins: inputs, codecs, filters and outputs.
+
+ ::::
+
+3. Now try running Logstash with a simple configuration passed in via the command-line, using the `-e` flag.
+
+ ::::{note}
+ Your results will depend on what your output plugin is designed to do.
+ ::::
+
+
+Congratulations! You’ve built, deployed and successfully run a Logstash output.
+
+
+
+## Submitting your plugin to [RubyGems.org](http://rubygems.org) and [logstash-plugins](https://github.com/logstash-plugins) [_submitting_your_plugin_to_rubygems_orghttprubygems_org_and_logstash_pluginshttpsgithub_comlogstash_plugins_4]
+
+Logstash uses [RubyGems.org](http://rubygems.org) as its repository for all plugin artifacts. Once you have developed your new plugin, you can make it available to Logstash users by simply publishing it to RubyGems.org.
+
+### Licensing [_licensing_4]
+
+Logstash and all its plugins are licensed under [Apache License, version 2 ("ALv2")](https://github.com/elasticsearch/logstash/blob/main/LICENSE). If you make your plugin publicly available via [RubyGems.org](http://rubygems.org), please make sure to have this line in your gemspec:
+
+* `s.licenses = ['Apache License (2.0)']`
+
+
+### Publishing to [RubyGems.org](http://rubygems.org) [_publishing_to_rubygems_orghttprubygems_org_4]
+
+To begin, you’ll need an account on RubyGems.org
+
+* [Sign-up for a RubyGems account](https://rubygems.org/sign_up).
+
+After creating an account, [obtain](http://guides.rubygems.org/rubygems-org-api/#api-authorization) an API key from RubyGems.org. By default, RubyGems uses the file `~/.gem/credentials` to store your API key. These credentials will be used to publish the gem. Replace `username` and `password` with the credentials you created at RubyGems.org:
+
+```sh
+curl -u username:password https://rubygems.org/api/v1/api_key.yaml > ~/.gem/credentials
+chmod 0600 ~/.gem/credentials
+```
+
+Before proceeding, make sure you have the right version in your gemspec file and commit your changes.
+
+* `s.version = '0.1.0'`
+
+To publish version 0.1.0 of your new logstash gem:
+
+```sh
+bundle install
+bundle exec rake vendor
+bundle exec rspec
+bundle exec rake publish_gem
+```
+
+::::{note}
+Executing `rake publish_gem`:
+
+1. Reads the version from the gemspec file (`s.version = '0.1.0'`)
+2. Checks in your local repository if a tag exists for that version. If the tag already exists, it aborts the process. Otherwise, it creates a new version tag in your local repository.
+3. Builds the gem
+4. Publishes the gem to RubyGems.org
+
+::::
+
+
+That’s it! Your plugin is published! Logstash users can now install your plugin by running:
+
+```sh
+bin/logstash-plugin install logstash-output-mypluginname
+```
+
+
+
+## Contributing your source code to [logstash-plugins](https://github.com/logstash-plugins) [_contributing_your_source_code_to_logstash_pluginshttpsgithub_comlogstash_plugins_4]
+
+It is not required to contribute your source code to [logstash-plugins](https://github.com/logstash-plugins) github organization, but we always welcome new plugins!
+
+### Benefits [_benefits_4]
+
+Some of the many benefits of having your plugin in the logstash-plugins repository are:
+
+* **Discovery.** Your plugin will appear in the [Logstash Reference](/reference/index.md), where Logstash users look first for plugins and documentation.
+* **Documentation.** Your plugin documentation will automatically be added to the [Logstash Reference](/reference/index.md).
+* **Testing.** With our testing infrastructure, your plugin will be continuously tested against current and future releases of Logstash. As a result, users will have the assurance that if incompatibilities arise, they will be quickly discovered and corrected.
+
+
+### Acceptance Guidelines [_acceptance_guidelines_4]
+
+* **Code Review.** Your plugin must be reviewed by members of the community for coherence, quality, readability, stability and security.
+* **Tests.** Your plugin must contain tests to be accepted. These tests are also subject to code review for scope and completeness. It’s ok if you don’t know how to write tests — we will guide you. We are working on publishing a guide to creating tests for Logstash which will make it easier. In the meantime, you can refer to [http://betterspecs.org/](http://betterspecs.org/) for examples.
+
+To begin migrating your plugin to logstash-plugins, simply create a new [issue](https://github.com/elasticsearch/logstash/issues) in the Logstash repository. When the acceptance guidelines are completed, we will facilitate the move to the logstash-plugins organization using the recommended [github process](https://help.github.com/articles/transferring-a-repository/#transferring-from-a-user-to-an-organization).
diff --git a/docs/extend/plugin-doc.md b/docs/extend/plugin-doc.md
new file mode 100644
index 000000000..b3c4b3271
--- /dev/null
+++ b/docs/extend/plugin-doc.md
@@ -0,0 +1,172 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugin-doc.html
+---
+
+# Document your plugin [plugin-doc]
+
+Documentation is a required component of your plugin. Quality documentation with good examples contributes to the adoption of your plugin.
+
+The documentation that you write for your plugin will be generated and published in the [Logstash Reference](/reference/index.md) and the [Logstash Versioned Plugin Reference](logstash-docs://reference/integration-plugins.md).
+
+::::{admonition} Plugin listing in {{ls}} Reference
+:class: note
+
+We may list your plugin in the [Logstash Reference](/reference/index.md) if it meets our [requirements and quality standards](/extend/index.md#plugin-acceptance). When we list your plugin, we point to *your* documentation—a readme.md, docs/index.asciidoc, or both—in your plugin repo. For more info on this option, see [List your plugin](/extend/plugin-listing.md).
+
+::::
+
+
+The following sections contain guidelines for documenting plugins hosted in the Github [logstash-plugins](https://github.com/logstash-plugins/) organization.
+
+## Documentation file [plugin-doc-file]
+
+Documentation belongs in a single file called *docs/index.asciidoc*. It belongs in a single file called *docs/index.asciidoc*. The [plugin generation utility](/reference/plugin-generator.md) creates a starter file for you.
+
+
+## Heading IDs [heading-ids]
+
+Format heading anchors with variables that can support generated IDs. This approach creates unique IDs when the [Logstash Versioned Plugin Reference](logstash-docs://reference/integration-plugins.md) is built. Unique heading IDs are required to avoid duplication over multiple versions of a plugin.
+
+**Example**
+
+Don’t hardcode a plugin heading ID like this: `[[config_models]]`
+
+Instead, use variables to define it:
+
+```txt
+[id="plugins-{type}s-{plugin}-config_models"]
+==== Configuration models
+```
+
+If you hardcode an ID, the [Logstash Versioned Plugin Reference](logstash-docs://reference/integration-plugins.md) builds correctly the first time. The second time the doc build runs, the ID is flagged as a duplicate, and the build fails.
+
+
+## Link formats [link-format]
+
+Correct link formatting is essential for directing users to the content you want them to see. Incorrect link formatting or duplicate links can break the documentation build. Let’s not do that.
+
+### Link to content in the same file [_link_to_content_in_the_same_file]
+
+Use angle brackets to format links to content in the same asciidoc file.
+
+**Example**
+
+This link:
+
+```txt
+<>
+```
+
+Points to this heading in the same file:
+
+```txt
+[id="plugins-{type}s-{plugin}-config_models"]
+==== Configuration models
+```
+
+
+### Link to content in the Logstash Reference Guide [_link_to_content_in_the_logstash_reference_guide]
+
+Use external link syntax for links that point to documentation for other plugins or content in the Logstash Reference Guide.
+
+**Examples**
+
+```txt
+{logstash-ref}/plugins-codecs-multiline.html[Multiline codec plugin]
+```
+
+```txt
+{logstash-ref}/getting-started-with-logstash.html
+```
+
+
+### Link text [_link_text]
+
+If you don’t specify link text, the URL is used as the link text.
+
+**Examples**
+
+If you want your link to display as {{logstash-ref}}/getting-started-with-logstash.html, use this format:
+
+```txt
+{logstash-ref}/getting-started-with-logstash.html
+```
+
+If you want your link to display as [Getting Started with Logstash](/reference/getting-started-with-logstash.md), use this format:
+
+```txt
+{logstash-ref}/getting-started-with-logstash.html[Getting Started with Logstash]
+```
+
+
+### Link to data type descriptions [_link_to_data_type_descriptions]
+
+We make an exception for links that point to data type descriptions, such as `<>`, because they are used so frequently. We have a cleanup step in the conversion script that converts the links to the correct syntax.
+
+
+
+## Code samples [format-code]
+
+We all love code samples. Asciidoc supports code blocks and config examples. To include Ruby code, use the asciidoc `[source,ruby]` directive.
+
+Note that the hashmarks (#) are present to make the example render correctly. Don’t include the hashmarks in your asciidoc file.
+
+```txt
+# [source,ruby]
+# -----
+# match => {
+# "field1" => "value1"
+# "field2" => "value2"
+# ...
+# }
+# -----
+```
+
+The sample above (with hashmarks removed) renders in the documentation like this:
+
+```ruby
+match => {
+ "field1" => "value1"
+ "field2" => "value2"
+ ...
+}
+```
+
+
+## Where’s my doc? [_wheres_my_doc]
+
+Plugin documentation goes through several steps before it gets published in the [Logstash Versioned Plugin Reference](logstash-docs://reference/integration-plugins.md) and the [Logstash Reference](/reference/index.md).
+
+Here’s an overview of the workflow:
+
+* Be sure that you have signed the contributor license agreement (CLA) and have all necessary approvals and sign offs.
+* Merge the pull request for your plugin (including the `index.asciidoc` file, the `changelog.md` file, and the gemspec).
+* Wait for the continuous integration build to complete successfully.
+* Publish the plugin to [https://rubygems.org](https://rubygems.org).
+* A script detects the new or changed version, and picks up the `index.asciidoc` file for inclusion in the doc build.
+* The documentation for your new plugin is published in the [Logstash Versioned Plugin Reference](logstash-docs://reference/integration-plugins.md).
+
+We’re not done yet.
+
+* For each release, we package the new and changed documentation files into a pull request to add or update content. (We sometimes package plugin docs between releases if we make significant changes to plugin documentation or add a new plugin.)
+* The script detects the new or changed version, and picks up the `index.asciidoc` file for inclusion in the doc build.
+* We create a pull request, and merge the new and changed content into the appropriate version branches.
+* For a new plugin, we add a link to the list of plugins in the [Logstash Reference](/reference/index.md).
+* The documentation for your new (or changed) plugin is published in the [Logstash Reference](/reference/index.md).
+
+### Documentation or plugin updates [_documentation_or_plugin_updates]
+
+When you make updates to your plugin or the documentation, consider bumping the version number in the changelog and gemspec (or version file). The version change triggers the doc build to pick up your changes for publishing.
+
+
+
+## Resources [_resources]
+
+For more asciidoc formatting tips, see the excellent reference at [https://github.com/elastic/docs#asciidoc-guide](https://github.com/elastic/docs#asciidoc-guide).
+
+For tips on contributing and changelog guidelines, see [CONTRIBUTING.md](https://github.com/elastic/logstash/blob/main/CONTRIBUTING.md#logstash-plugin-changelog-guidelines).
+
+For general information about contributing, see [Contributing to Logstash](/extend/index.md).
+
+
diff --git a/docs/extend/plugin-listing.md b/docs/extend/plugin-listing.md
new file mode 100644
index 000000000..48807a404
--- /dev/null
+++ b/docs/extend/plugin-listing.md
@@ -0,0 +1,23 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugin-listing.html
+---
+
+# List your plugin [plugin-listing]
+
+The [Logstash Reference](/reference/index.md) is the first place {{ls}} users look for plugins and documentation. If your plugin meets the [quality and acceptance guidelines](/extend/index.md#plugin-acceptance), we may be able to list it in the guide.
+
+The plugin source and documentation will continue to live in your repo, and we will direct users there.
+
+If you would like to have your plugin included in the [Logstash Reference](/reference/index.md), create a new [issue](https://github.com/elasticsearch/logstash/issues) in the Logstash repository with the following information:
+
+* Title: `PluginListing: `
+* Body:
+
+ * Brief description of the plugin (what it is and what it does).
+ * Link to the plugin repository.
+ * Link to the README.md or docs/index.asciidoc.
+ * Describe how your plugin meets our [quality and acceptance guidelines](/extend/index.md#plugin-acceptance).
+
+* Labels: `docs`, `new-plugin`
+
diff --git a/docs/extend/publish-plugin.md b/docs/extend/publish-plugin.md
new file mode 100644
index 000000000..13294b2b5
--- /dev/null
+++ b/docs/extend/publish-plugin.md
@@ -0,0 +1,62 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/publish-plugin.html
+---
+
+# Publish your plugin to RubyGems.org [publish-plugin]
+
+Logstash uses [RubyGems.org](http://rubygems.org) as its repository for all plugin artifacts. After you have developed your new plugin, you can make it available to Logstash users by publishing it to RubyGems.org.
+
+## Licensing [_licensing_5]
+
+Logstash and all its plugins are licensed under [Apache License, version 2 ("ALv2")](https://github.com/elasticsearch/logstash/blob/main/LICENSE). If you make your plugin publicly available via [RubyGems.org](http://rubygems.org), please make sure to have this line in your gemspec:
+
+* `s.licenses = ['Apache License (2.0)']`
+
+
+## Publish to [RubyGems.org](http://rubygems.org) [_publish_to_rubygems_orghttprubygems_org]
+
+You’ll need an account on RubyGems.org
+
+* [Sign-up for a RubyGems account](https://rubygems.org/sign_up).
+
+After creating an account, [obtain](http://guides.rubygems.org/rubygems-org-api/#api-authorization) an API key from RubyGems.org. By default, RubyGems uses the file `~/.gem/credentials` to store your API key. These credentials will be used to publish the gem. Replace `username` and `password` with the credentials you created at RubyGems.org:
+
+```sh
+curl -u username:password https://rubygems.org/api/v1/api_key.yaml > ~/.gem/credentials
+chmod 0600 ~/.gem/credentials
+```
+
+Before proceeding, make sure you have the right version in your gemspec file and commit your changes.
+
+* `s.version = '0.1.0'`
+
+To publish version 0.1.0 of your new logstash gem:
+
+```sh
+bundle install
+bundle exec rake vendor
+bundle exec rspec
+bundle exec rake publish_gem
+```
+
+::::{note}
+Execute `rake publish_gem`:
+
+1. Reads the version from the gemspec file (`s.version = '0.1.0'`)
+2. Checks in your local repository if a tag exists for that version. If the tag already exists, it aborts the process. Otherwise, it creates a new version tag in your local repository.
+3. Builds the gem
+4. Publishes the gem to RubyGems.org
+
+::::
+
+
+That’s it! Your plugin is published! Logstash users can now install your plugin by running:
+
+```sh
+bin/plugin install logstash-output-mypluginname
+```
+
+Where is `input`, `output`, `filter`, or `codec`, and is the name of your new plugin.
+
+
diff --git a/docs/extend/toc.yml b/docs/extend/toc.yml
new file mode 100644
index 000000000..225f36590
--- /dev/null
+++ b/docs/extend/toc.yml
@@ -0,0 +1,18 @@
+toc:
+ - file: index.md
+ - file: input-new-plugin.md
+ - file: codec-new-plugin.md
+ - file: filter-new-plugin.md
+ - file: output-new-plugin.md
+ - file: community-maintainer.md
+ - file: plugin-doc.md
+ - file: publish-plugin.md
+ - file: plugin-listing.md
+ - file: contributing-patch-plugin.md
+ - file: contribute-to-core.md
+ - file: create-logstash-plugins.md
+ children:
+ - file: java-input-plugin.md
+ - file: java-codec-plugin.md
+ - file: java-filter-plugin.md
+ - file: java-output-plugin.md
\ No newline at end of file
diff --git a/docs/gs-index.asciidoc b/docs/gs-index.asciidoc
deleted file mode 100644
index feec48b87..000000000
--- a/docs/gs-index.asciidoc
+++ /dev/null
@@ -1,38 +0,0 @@
-[[logstash-reference]]
-= Logstash Reference
-
-:branch: 5.4
-:major-version: 5.4
-:logstash_version: 5.4.0
-:elasticsearch_version: 5.4.0
-:docker-image: docker.elastic.co/logstash/logstash:{logstash_version}
-
-//////////
-release-state can be: released | prerelease | unreleased
-//////////
-:release-state: released
-
-:jdk: 1.8.0
-:guide: https://www.elastic.co/guide/en/elasticsearch/guide/current/
-:ref: https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/
-:xpack: https://www.elastic.co/guide/en/x-pack/{branch}/
-:logstash: https://www.elastic.co/guide/en/logstash/{branch}/
-:filebeat: https://www.elastic.co/guide/en/beats/filebeat/{branch}/
-:lsissue: https://github.com/elastic/logstash/issues/
-:security: X-Pack Security
-
-[[introduction]]
-== Logstash Introduction
-
-Logstash is an open source data collection engine with real-time pipelining capabilities. Logstash can dynamically
-unify data from disparate sources and normalize the data into destinations of your choice. Cleanse and democratize all
-your data for diverse advanced downstream analytics and visualization use cases.
-
-While Logstash originally drove innovation in log collection, its capabilities extend well beyond that use case. Any
-type of event can be enriched and transformed with a broad array of input, filter, and output plugins, with many
-native codecs further simplifying the ingestion process. Logstash accelerates your insights by harnessing a greater
-volume and variety of data.
-
-include::static/introduction.asciidoc[]
-
-include::static/getting-started-with-logstash.asciidoc[]
\ No newline at end of file
diff --git a/docs/static/images/basic_logstash_pipeline.png b/docs/images/basic_logstash_pipeline.png
similarity index 100%
rename from docs/static/images/basic_logstash_pipeline.png
rename to docs/images/basic_logstash_pipeline.png
diff --git a/docs/static/management/images/centralized_config.png b/docs/images/centralized_config.png
similarity index 100%
rename from docs/static/management/images/centralized_config.png
rename to docs/images/centralized_config.png
diff --git a/docs/static/images/dead_letter_queue.png b/docs/images/dead_letter_queue.png
similarity index 100%
rename from docs/static/images/dead_letter_queue.png
rename to docs/images/dead_letter_queue.png
diff --git a/docs/static/images/deploy1.png b/docs/images/deploy1.png
similarity index 100%
rename from docs/static/images/deploy1.png
rename to docs/images/deploy1.png
diff --git a/docs/static/images/deploy2.png b/docs/images/deploy2.png
similarity index 100%
rename from docs/static/images/deploy2.png
rename to docs/images/deploy2.png
diff --git a/docs/static/images/deploy3.png b/docs/images/deploy3.png
similarity index 100%
rename from docs/static/images/deploy3.png
rename to docs/images/deploy3.png
diff --git a/docs/static/images/deploy4.png b/docs/images/deploy4.png
similarity index 100%
rename from docs/static/images/deploy4.png
rename to docs/images/deploy4.png
diff --git a/docs/static/monitoring/images/integration-assets-dashboards.png b/docs/images/integration-assets-dashboards.png
similarity index 100%
rename from docs/static/monitoring/images/integration-assets-dashboards.png
rename to docs/images/integration-assets-dashboards.png
diff --git a/docs/static/monitoring/images/integration-dashboard-overview.png b/docs/images/integration-dashboard-overview.png
similarity index 100%
rename from docs/static/monitoring/images/integration-dashboard-overview.png
rename to docs/images/integration-dashboard-overview.png
diff --git a/docs/static/images/kibana-filebeat-data.png b/docs/images/kibana-filebeat-data.png
similarity index 100%
rename from docs/static/images/kibana-filebeat-data.png
rename to docs/images/kibana-filebeat-data.png
diff --git a/docs/static/monitoring/images/kibana-home.png b/docs/images/kibana-home.png
similarity index 100%
rename from docs/static/monitoring/images/kibana-home.png
rename to docs/images/kibana-home.png
diff --git a/docs/static/monitoring/images/monitoring-ui.png b/docs/images/monitoring-ui.png
similarity index 100%
rename from docs/static/monitoring/images/monitoring-ui.png
rename to docs/images/monitoring-ui.png
diff --git a/docs/static/monitoring/images/nodestats.png b/docs/images/nodestats.png
similarity index 100%
rename from docs/static/monitoring/images/nodestats.png
rename to docs/images/nodestats.png
diff --git a/docs/static/monitoring/images/overviewstats.png b/docs/images/overviewstats.png
similarity index 100%
rename from docs/static/monitoring/images/overviewstats.png
rename to docs/images/overviewstats.png
diff --git a/docs/static/monitoring/images/pipeline-input-detail.png b/docs/images/pipeline-input-detail.png
similarity index 100%
rename from docs/static/monitoring/images/pipeline-input-detail.png
rename to docs/images/pipeline-input-detail.png
diff --git a/docs/static/monitoring/images/pipeline-tree.png b/docs/images/pipeline-tree.png
similarity index 100%
rename from docs/static/monitoring/images/pipeline-tree.png
rename to docs/images/pipeline-tree.png
diff --git a/docs/static/images/pipeline_correct_load.png b/docs/images/pipeline_correct_load.png
similarity index 100%
rename from docs/static/images/pipeline_correct_load.png
rename to docs/images/pipeline_correct_load.png
diff --git a/docs/static/images/pipeline_overload.png b/docs/images/pipeline_overload.png
similarity index 100%
rename from docs/static/images/pipeline_overload.png
rename to docs/images/pipeline_overload.png
diff --git a/docs/include/attributes-ls.asciidoc b/docs/include/attributes-ls.asciidoc
deleted file mode 100644
index 714982cad..000000000
--- a/docs/include/attributes-ls.asciidoc
+++ /dev/null
@@ -1,10 +0,0 @@
-/////
-These settings control attributes for Logstash core content
-in the Logstash Reference (LSR) only.
-
-Shared attributes for the plugin docs (in the LSR and VPR) should
-go in /docs/include/attributes-lsplugins.asciidoc instead
-with a corresponding change to the VPR settings in
-logstash-docs/docs/versioned-plugins/include/attributes-ls-vpr.asciidoc
-/////
-
diff --git a/docs/include/attributes-lsplugins.asciidoc b/docs/include/attributes-lsplugins.asciidoc
deleted file mode 100644
index 674bcc03c..000000000
--- a/docs/include/attributes-lsplugins.asciidoc
+++ /dev/null
@@ -1,13 +0,0 @@
-/////
-These settings control attributes in the LSR only.
-They correspond to the VPR settings in logstash-docs/docs/versioned-plugins/include/attributes-ls-vpr.asciidoc
-When we update one, we must update settings in the other location,
-
-Attribute text formatted without hard wrap is deliberate.
-Otherwise, text breaks at return and content after the return is dropped.
-
-Text is written to accommodate multiple versions because plugins are not stack versioned.
-/////
-
-
-:ecs-default: When the `ecs_compatibility` option for this plugin is not explicitly set, its effective value depends on the `pipeline.ecs_compatibility` setting for the pipeline in `pipelines.yml`, or globally in {logstash-ref}/logstash-settings-file.html[`logstash.yml`], allowing you to specify your preferred behavior at the plugin, pipeline, or system level. If no preference is specified, the default value is `v8` for Logstash 8 or `disabled` for all earlier releases of Logstash. For more information about ECS compatibility settings in Logstash and plugins, see {logstash-ref}/ecs-ls.html[ECS in Logstash].
diff --git a/docs/include/filter.asciidoc b/docs/include/filter.asciidoc
deleted file mode 100644
index 9cd984f29..000000000
--- a/docs/include/filter.asciidoc
+++ /dev/null
@@ -1,234 +0,0 @@
-==== Common options
-
-// Contributors: You must conditionally code all internal links and IDs in this
-// file to make the common files work in both the LS Reference and the versioned
-// plugin docs
-
-These configuration options are supported by all filter plugins:
-
-ifeval::["{versioned_docs}"!="true"]
-[cols="<,<,<",options="header",]
-|=======================================================================
-|Setting |Input type|Required
-| <> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No
-| <> |{logstash-ref}/configuration-file-structure.html#array[array]|No
-| <> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No
-| <> |{logstash-ref}/configuration-file-structure.html#string[string]|No
-| <> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No
-| <> |{logstash-ref}/configuration-file-structure.html#array[array]|No
-| <> |{logstash-ref}/configuration-file-structure.html#array[array]|No
-|=======================================================================
-endif::[]
-ifeval::["{versioned_docs}"=="true"]
-[cols="<,<,<",options="header",]
-|=======================================================================
-|Setting |Input type|Required
-| <<{version}-plugins-{type}s-{plugin}-add_field>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No
-| <<{version}-plugins-{type}s-{plugin}-add_tag>> |{logstash-ref}/configuration-file-structure.html#array[array]|No
-| <<{version}-plugins-{type}s-{plugin}-enable_metric>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No
-| <<{version}-plugins-{type}s-{plugin}-id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No
-| <<{version}-plugins-{type}s-{plugin}-periodic_flush>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No
-| <<{version}-plugins-{type}s-{plugin}-remove_field>> |{logstash-ref}/configuration-file-structure.html#array[array]|No
-| <<{version}-plugins-{type}s-{plugin}-remove_tag>> |{logstash-ref}/configuration-file-structure.html#array[array]|No
-|=======================================================================
-endif::[]
-
-ifeval::["{versioned_docs}"!="true"]
-[id="plugins-{type}s-{plugin}-add_field"]
-endif::[]
-ifeval::["{versioned_docs}"=="true"]
-[id="{version}-plugins-{type}s-{plugin}-add_field"]
-endif::[]
-===== `add_field`
-
- * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash]
- * Default value is `{}`
-
-If this filter is successful, add any arbitrary fields to this event.
-Field names can be dynamic and include parts of the event using the `%{field}`.
-
-Example:
-
-["source","json",subs="attributes"]
- filter {
- {plugin} {
- add_field => { "foo_%\{somefield\}" => "Hello world, from %\{host\}" }
- }
- }
-
-["source","json",subs="attributes"]
- # You can also add multiple fields at once:
- filter {
- {plugin} {
- add_field => {
- "foo_%\{somefield\}" => "Hello world, from %\{host\}"
- "new_field" => "new_static_value"
- }
- }
- }
-
-If the event has field `"somefield" == "hello"` this filter, on success,
-would add field `foo_hello` if it is present, with the
-value above and the `%{host}` piece replaced with that value from the
-event. The second example would also add a hardcoded field.
-
-ifeval::["{versioned_docs}"!="true"]
-[id="plugins-{type}s-{plugin}-add_tag"]
-endif::[]
-ifeval::["{versioned_docs}"=="true"]
-[id="{version}-plugins-{type}s-{plugin}-add_tag"]
-endif::[]
-===== `add_tag`
-
- * Value type is {logstash-ref}/configuration-file-structure.html#array[array]
- * Default value is `[]`
-
-If this filter is successful, add arbitrary tags to the event.
-Tags can be dynamic and include parts of the event using the `%{field}`
-syntax.
-
-Example:
-
-["source","json",subs="attributes"]
- filter {
- {plugin} {
- add_tag => [ "foo_%\{somefield\}" ]
- }
- }
-
-["source","json",subs="attributes"]
- # You can also add multiple tags at once:
- filter {
- {plugin} {
- add_tag => [ "foo_%\{somefield\}", "taggedy_tag"]
- }
- }
-
-If the event has field `"somefield" == "hello"` this filter, on success,
-would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
-
-ifeval::["{versioned_docs}"!="true"]
-[id="plugins-{type}s-{plugin}-enable_metric"]
-endif::[]
-ifeval::["{versioned_docs}"=="true"]
-[id="{version}-plugins-{type}s-{plugin}-enable_metric"]
-endif::[]
-===== `enable_metric`
-
- * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean]
- * Default value is `true`
-
-Disable or enable metric logging for this specific plugin instance.
-By default we record all the metrics we can, but you can disable metrics collection
-for a specific plugin.
-
-ifeval::["{versioned_docs}"!="true"]
-[id="plugins-{type}s-{plugin}-id"]
-endif::[]
-ifeval::["{versioned_docs}"=="true"]
-[id="{version}-plugins-{type}s-{plugin}-id"]
-endif::[]
-===== `id`
-
- * Value type is {logstash-ref}/configuration-file-structure.html#string[string]
- * There is no default value for this setting.
-
-Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one.
-It is strongly recommended to set this ID in your configuration. This is particularly useful
-when you have two or more plugins of the same type, for example, if you have 2 {plugin} filters.
-Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
-
-
-["source","json",subs="attributes"]
- filter {
- {plugin} {
- id => "ABC"
- }
- }
-
-NOTE: Variable substitution in the `id` field only supports environment variables
- and does not support the use of values from the secret store.
-
-ifeval::["{versioned_docs}"!="true"]
-[id="plugins-{type}s-{plugin}-periodic_flush"]
-endif::[]
-ifeval::["{versioned_docs}"=="true"]
-[id="{version}-plugins-{type}s-{plugin}-periodic_flush"]
-endif::[]
-===== `periodic_flush`
-
- * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean]
- * Default value is `false`
-
-Call the filter flush method at regular interval.
-Optional.
-
-ifeval::["{versioned_docs}"!="true"]
-[id="plugins-{type}s-{plugin}-remove_field"]
-endif::[]
-ifeval::["{versioned_docs}"=="true"]
-[id="{version}-plugins-{type}s-{plugin}-remove_field"]
-endif::[]
-===== `remove_field`
-
- * Value type is {logstash-ref}/configuration-file-structure.html#array[array]
- * Default value is `[]`
-
-If this filter is successful, remove arbitrary fields from this event.
-Fields names can be dynamic and include parts of the event using the %{field}
-Example:
-
-["source","json",subs="attributes"]
- filter {
- {plugin} {
- remove_field => [ "foo_%\{somefield\}" ]
- }
- }
-
-["source","json",subs="attributes"]
- # You can also remove multiple fields at once:
- filter {
- {plugin} {
- remove_field => [ "foo_%\{somefield\}", "my_extraneous_field" ]
- }
- }
-
-If the event has field `"somefield" == "hello"` this filter, on success,
-would remove the field with name `foo_hello` if it is present. The second
-example would remove an additional, non-dynamic field.
-
-ifeval::["{versioned_docs}"!="true"]
-[id="plugins-{type}s-{plugin}-remove_tag"]
-endif::[]
-ifeval::["{versioned_docs}"=="true"]
-[id="{version}-plugins-{type}s-{plugin}-remove_tag"]
-endif::[]
-===== `remove_tag`
-
- * Value type is {logstash-ref}/configuration-file-structure.html#array[array]
- * Default value is `[]`
-
-If this filter is successful, remove arbitrary tags from the event.
-Tags can be dynamic and include parts of the event using the `%{field}`
-syntax.
-
-Example:
-
-["source","json",subs="attributes"]
- filter {
- {plugin} {
- remove_tag => [ "foo_%\{somefield\}" ]
- }
- }
-
-["source","json",subs="attributes"]
- # You can also remove multiple tags at once:
- filter {
- {plugin} {
- remove_tag => [ "foo_%\{somefield\}", "sad_unwanted_tag"]
- }
- }
-
-If the event has field `"somefield" == "hello"` this filter, on success,
-would remove the tag `foo_hello` if it is present. The second example
-would remove a sad, unwanted tag as well.
diff --git a/docs/include/input.asciidoc b/docs/include/input.asciidoc
deleted file mode 100644
index 5ef643aac..000000000
--- a/docs/include/input.asciidoc
+++ /dev/null
@@ -1,172 +0,0 @@
-==== Common options
-
-// Contributors: You must conditionally code all internal links and IDs in this
-// file to make the common files work in both the LS Reference and the versioned
-// plugin docs
-
-These configuration options are supported by all input plugins:
-
-[cols="<,<,<",options="header",]
-ifeval::["{versioned_docs}"!="true"]
-|=======================================================================
-|Setting |Input type|Required
-| <> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No
-ifndef::no_codec[]
-| <> |{logstash-ref}/configuration-file-structure.html#codec[codec]|No
-endif::no_codec[]
-| <> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No
-| <> |{logstash-ref}/configuration-file-structure.html#string[string]|No
-| <> |{logstash-ref}/configuration-file-structure.html#array[array]|No
-| <> |{logstash-ref}/configuration-file-structure.html#string[string]|No
-|=======================================================================
-endif::[]
-ifeval::["{versioned_docs}"=="true"]
-|=======================================================================
-|Setting |Input type|Required
-| <<{version}-plugins-{type}s-{plugin}-add_field>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No
-ifndef::no_codec[]
-| <<{version}-plugins-{type}s-{plugin}-codec>> |{logstash-ref}/configuration-file-structure.html#codec[codec]|No
-endif::no_codec[]
-| <<{version}-plugins-{type}s-{plugin}-enable_metric>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No
-| <<{version}-plugins-{type}s-{plugin}-id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No
-| <<{version}-plugins-{type}s-{plugin}-tags>> |{logstash-ref}/configuration-file-structure.html#array[array]|No
-| <<{version}-plugins-{type}s-{plugin}-type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No
-|=======================================================================
-endif::[]
-
-
-ifeval::["{versioned_docs}"!="true"]
-[id="plugins-{type}s-{plugin}-add_field"]
-endif::[]
-ifeval::["{versioned_docs}"=="true"]
-[id="{version}-plugins-{type}s-{plugin}-add_field"]
-endif::[]
-===== `add_field`
-
- * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash]
- * Default value is `{}`
-
-Add a field to an event
-
-ifndef::no_codec[]
-ifeval::["{versioned_docs}"!="true"]
-[id="plugins-{type}s-{plugin}-codec"]
-endif::[]
-ifeval::["{versioned_docs}"=="true"]
-[id="{version}-plugins-{type}s-{plugin}-codec"]
-endif::[]
-===== `codec`
-
- * Value type is {logstash-ref}/configuration-file-structure.html#codec[codec]
-ifdef::default_codec[]
- * Default value is +"{default_codec}"+
-endif::[]
-ifndef::default_codec[]
- * Default value is `"plain"`
-endif::[]
-
-The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
-endif::no_codec[]
-
-
-ifeval::["{versioned_docs}"!="true"]
-[id="plugins-{type}s-{plugin}-enable_metric"]
-endif::[]
-ifeval::["{versioned_docs}"=="true"]
-[id="{version}-plugins-{type}s-{plugin}-enable_metric"]
-endif::[]
-===== `enable_metric`
-
- * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean]
- * Default value is `true`
-
-Disable or enable metric logging for this specific plugin instance
-by default we record all the metrics we can, but you can disable metrics collection
-for a specific plugin.
-
-ifeval::["{versioned_docs}"!="true"]
-[id="plugins-{type}s-{plugin}-id"]
-endif::[]
-ifeval::["{versioned_docs}"=="true"]
-[id="{version}-plugins-{type}s-{plugin}-id"]
-endif::[]
-===== `id`
-
- * Value type is {logstash-ref}/configuration-file-structure.html#string[string]
- * There is no default value for this setting.
-
-Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one.
-It is strongly recommended to set this ID in your configuration. This is particularly useful
-when you have two or more plugins of the same type, for example, if you have 2 {plugin} inputs.
-Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
-
-["source","json",subs="attributes"]
----------------------------------------------------------------------------------------------------
-input {
- {plugin} {
- id => "my_plugin_id"
- }
-}
----------------------------------------------------------------------------------------------------
-
-NOTE: Variable substitution in the `id` field only supports environment variables
- and does not support the use of values from the secret store.
-
-ifeval::["{versioned_docs}"!="true"]
-[id="plugins-{type}s-{plugin}-tags"]
-endif::[]
-ifeval::["{versioned_docs}"=="true"]
-[id="{version}-plugins-{type}s-{plugin}-tags"]
-endif::[]
-===== `tags`
-
- * Value type is {logstash-ref}/configuration-file-structure.html#array[array]
- * There is no default value for this setting.
-
-Add any number of arbitrary tags to your event.
-
-This can help with processing later.
-
-ifeval::["{versioned_docs}"!="true"]
-[id="plugins-{type}s-{plugin}-type"]
-endif::[]
-ifeval::["{versioned_docs}"=="true"]
-[id="{version}-plugins-{type}s-{plugin}-type"]
-endif::[]
-===== `type`
-
- * Value type is {logstash-ref}/configuration-file-structure.html#string[string]
- * There is no default value for this setting.
-
-Add a `type` field to all events handled by this input.
-
-Types are used mainly for filter activation.
-
-The type is stored as part of the event itself, so you can
-also use the type to search for it in Kibana.
-
-If you try to set a type on an event that already has one (for
-example when you send an event from a shipper to an indexer) then
-a new input will not override the existing type. A type set at
-the shipper stays with that event for its life even
-when sent to another Logstash server.
-
-ifeval::["{type}"=="input"]
-ifeval::["{plugin}"=="beats"]
-
-ifeval::["{versioned_docs}"!="true"]
-NOTE: The Beats shipper automatically sets the `type` field on the event.
-You cannot override this setting in the Logstash config. If you specify
-a setting for the <> config option in
-Logstash, it is ignored.
-endif::[]
-ifeval::["{versioned_docs}"=="true"]
-NOTE: The Beats shipper automatically sets the `type` field on the event.
-You cannot override this setting in the Logstash config. If you specify
-a setting for the <<{version}-plugins-inputs-beats-type,`type`>> config option in
-Logstash, it is ignored.
-endif::[]
-
-endif::[]
-endif::[]
-
diff --git a/docs/include/output.asciidoc b/docs/include/output.asciidoc
deleted file mode 100644
index 8e9453c4e..000000000
--- a/docs/include/output.asciidoc
+++ /dev/null
@@ -1,94 +0,0 @@
-==== Common options
-
-// Contributors: You must conditionally code all internal links and IDs in this
-// file to make the common files work in both the LS Reference and the versioned
-// plugin docs
-
-These configuration options are supported by all output plugins:
-
-ifeval::["{versioned_docs}"!="true"]
-[cols="<,<,<",options="header",]
-|=======================================================================
-|Setting |Input type|Required
-ifndef::no_codec[]
-| <> |{logstash-ref}/configuration-file-structure.html#codec[codec]|No
-endif::no_codec[]
-| <> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No
-| <> |{logstash-ref}/configuration-file-structure.html#string[string]|No
-|=======================================================================
-endif::[]
-ifeval::["{versioned_docs}"=="true"]
-[cols="<,<,<",options="header",]
-|=======================================================================
-|Setting |Input type|Required
-ifndef::no_codec[]
-| <<{version}-plugins-{type}s-{plugin}-codec>> |{logstash-ref}/configuration-file-structure.html#codec[codec]|No
-endif::no_codec[]
-| <<{version}-plugins-{type}s-{plugin}-enable_metric>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No
-| <<{version}-plugins-{type}s-{plugin}-id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No
-|=======================================================================
-endif::[]
-
-ifndef::no_codec[]
-ifeval::["{versioned_docs}"!="true"]
-[id="plugins-{type}s-{plugin}-codec"]
-endif::[]
-ifeval::["{versioned_docs}"=="true"]
-[id="{version}-plugins-{type}s-{plugin}-codec"]
-endif::[]
-===== `codec`
-
- * Value type is {logstash-ref}/configuration-file-structure.html#codec[codec]
-ifdef::default_codec[]
- * Default value is +"{default_codec}"+
-endif::[]
-ifndef::default_codec[]
- * Default value is `"plain"`
-endif::[]
-
-The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
-endif::no_codec[]
-
-ifeval::["{versioned_docs}"!="true"]
-[id="plugins-{type}s-{plugin}-enable_metric"]
-endif::[]
-ifeval::["{versioned_docs}"=="true"]
-[id="{version}-plugins-{type}s-{plugin}-enable_metric"]
-endif::[]
-===== `enable_metric`
-
- * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean]
- * Default value is `true`
-
-Disable or enable metric logging for this specific plugin instance.
-By default we record all the metrics we can, but you can disable metrics collection
-for a specific plugin.
-
-ifeval::["{versioned_docs}"!="true"]
-[id="plugins-{type}s-{plugin}-id"]
-endif::[]
-ifeval::["{versioned_docs}"=="true"]
-[id="{version}-plugins-{type}s-{plugin}-id"]
-endif::[]
-===== `id`
-
- * Value type is {logstash-ref}/configuration-file-structure.html#string[string]
- * There is no default value for this setting.
-
-Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one.
-It is strongly recommended to set this ID in your configuration. This is particularly useful
-when you have two or more plugins of the same type. For example, if you have 2 {plugin} outputs.
-Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
-
-["source","json",subs="attributes"]
----------------------------------------------------------------------------------------------------
-output {
- {plugin} {
- id => "my_plugin_id"
- }
-}
----------------------------------------------------------------------------------------------------
-
-NOTE: Variable substitution in the `id` field only supports environment variables
- and does not support the use of values from the secret store.
-
diff --git a/docs/include/plugin_header-core.asciidoc b/docs/include/plugin_header-core.asciidoc
deleted file mode 100644
index eec4d5901..000000000
--- a/docs/include/plugin_header-core.asciidoc
+++ /dev/null
@@ -1,14 +0,0 @@
-[subs="attributes"]
-++++
-{plugin}
-++++
-
-*{ls} Core Plugin.* The {plugin} {type} plugin cannot be
-installed or uninstalled independently of {ls}.
-
-==== Getting help
-
-For questions about the plugin, open a topic in the
-http://discuss.elastic.co[Discuss] forums. For bugs or feature requests, open an
-issue in https://github.com/logstash[Github].
-
diff --git a/docs/include/plugin_header-integration.asciidoc b/docs/include/plugin_header-integration.asciidoc
deleted file mode 100644
index 525f84181..000000000
--- a/docs/include/plugin_header-integration.asciidoc
+++ /dev/null
@@ -1,19 +0,0 @@
-[subs="attributes"]
-++++
-{plugin}
-++++
-
-* A component of the <>
-* Integration version: {version}
-* Released on: {release_date}
-* {changelog_url}[Changelog]
-
-For other versions, see the
-{lsplugindocs}/{type}-{plugin}-index.html[Versioned plugin docs].
-
-==== Getting help
-
-For questions about the plugin, open a topic in the http://discuss.elastic.co[Discuss] forums.
-For bugs or feature requests, open an issue in https://github.com/logstash-plugins/logstash-integration-{integration}[Github].
-For the list of Elastic supported plugins, please consult the https://www.elastic.co/support/matrix#logstash_plugins[Elastic Support Matrix].
-
diff --git a/docs/include/plugin_header.asciidoc b/docs/include/plugin_header.asciidoc
deleted file mode 100644
index c8a169810..000000000
--- a/docs/include/plugin_header.asciidoc
+++ /dev/null
@@ -1,25 +0,0 @@
-[subs="attributes"]
-++++
-{plugin}
-++++
-
-* Plugin version: {version}
-* Released on: {release_date}
-* {changelog_url}[Changelog]
-
-For other versions, see the
-{lsplugindocs}/{type}-{plugin}-index.html[Versioned plugin docs].
-
-ifeval::["{default_plugin}"=="0"]
-
-==== Installation
-
-For plugins not bundled by default, it is easy to install by running +bin/logstash-plugin install logstash-{type}-{plugin}+. See {logstash-ref}/working-with-plugins.html[Working with plugins] for more details.
-
-endif::[]
-
-==== Getting help
-
-For questions about the plugin, open a topic in the http://discuss.elastic.co[Discuss] forums. For bugs or feature requests, open an issue in https://github.com/logstash-plugins/logstash-{type}-{plugin}[Github].
-For the list of Elastic supported plugins, please consult the https://www.elastic.co/support/matrix#logstash_plugins[Elastic Support Matrix].
-
diff --git a/docs/include/version-list-intro.asciidoc b/docs/include/version-list-intro.asciidoc
deleted file mode 100644
index c396d201c..000000000
--- a/docs/include/version-list-intro.asciidoc
+++ /dev/null
@@ -1,14 +0,0 @@
-[id="{type}-{plugin}-index"]
-
-== Versioned {plugin} {type} plugin docs
-[subs="attributes"]
-++++
-{plugin}
-++++
-
-This page lists all available versions of the documentation for this plugin.
-To see which version of the plugin you have installed, run `bin/logstash-plugin
-list --verbose`.
-
-NOTE: Versioned plugin documentation is not available for plugins released prior
-to Logstash 6.0.
diff --git a/docs/index.asciidoc b/docs/index.asciidoc
deleted file mode 100644
index cc7ce9da6..000000000
--- a/docs/index.asciidoc
+++ /dev/null
@@ -1,238 +0,0 @@
-[[logstash-reference]]
-= Logstash Reference
-
-include::{docs-root}/shared/versions/stack/{source_branch}.asciidoc[]
-include::{docs-root}/shared/attributes.asciidoc[]
-include::./include/attributes-ls.asciidoc[]
-include::./include/attributes-lsplugins.asciidoc[]
-
-:include-xpack: true
-:lang: en
-:xls-repo-dir: {docdir}/../x-pack/docs/{lang}
-:log-repo-dir: {docdir}
-:plugins-repo-dir: {docdir}/../../logstash-docs/docs
-:docker-repo: docker.elastic.co/logstash/logstash
-:docker-image: {docker-repo}:{logstash_version}
-
-:versioned_docs: false
-
-:jdk: 1.8.0
-:lsissue: https://github.com/elastic/logstash/issues
-:lsplugindocs: https://www.elastic.co/guide/en/logstash-versioned-plugins/current
-:tab-widget-dir: {docdir}/static/tab-widgets
-
-
-[[introduction]]
-== Logstash Introduction
-
-Logstash is an open source data collection engine with real-time pipelining capabilities. Logstash can dynamically
-unify data from disparate sources and normalize the data into destinations of your choice. Cleanse and democratize all
-your data for diverse advanced downstream analytics and visualization use cases.
-
-While Logstash originally drove innovation in log collection, its capabilities extend well beyond that use case. Any
-type of event can be enriched and transformed with a broad array of input, filter, and output plugins, with many
-native codecs further simplifying the ingestion process. Logstash accelerates your insights by harnessing a greater
-volume and variety of data.
-
-
-[serverless]
-.Logstash to {serverless-full}
-****
-You'll use the {ls} <> to send data to {serverless-full}.
-Note these differences between {es-serverless} and both {ess} and self-managed {es}:
-
-* Use *API keys* to access {serverless-full} from {ls}.
-Any user-based security settings in your in your <> configuration are ignored and may cause errors.
-* {serverless-full} uses *data streams* and {ref}/data-stream-lifecycle.html[{dlm} ({dlm-init})] instead of {ilm} ({ilm-init}).
-Any {ilm-init} settings in your <> configuration are ignored and may cause errors.
-* *{ls} monitoring* is available through the https://github.com/elastic/integrations/blob/main/packages/logstash/_dev/build/docs/README.md[{ls} Integration] in {serverless-docs}/observability/what-is-observability-serverless[Elastic Observability] on {serverless-full}.
-
-
-.Known issue for {ls} to {es-serverless}.
-
-The logstash-output-elasticsearch `hosts` setting defaults to port :9200. Set the value to port :443 instead.
-****
-
-
-// The pass blocks here point to the correct repository for the edit links in the guide.
-
-// Introduction
-
-// Getting Started with Logstash
-include::static/getting-started-with-logstash.asciidoc[]
-
-// Advanced LS Pipelines
-include::static/advanced-pipeline.asciidoc[]
-
-// Processing Pipeline
-include::static/life-of-an-event.asciidoc[]
-
-// Elastic Common Schema (ECS)
-include::static/ecs-compatibility.asciidoc[]
-
-// Processing details
-include::static/processing-info.asciidoc[]
-
-// Logstash setup
-include::static/setting-up-logstash.asciidoc[]
-
-include::static/settings-file.asciidoc[]
-
-include::static/keystore.asciidoc[]
-
-include::static/running-logstash-command-line.asciidoc[]
-
-include::static/running-logstash.asciidoc[]
-
-include::static/docker.asciidoc[]
-
-include::static/running-logstash-kubernetes.asciidoc[]
-
-include::static/running-logstash-windows.asciidoc[]
-
-include::static/logging.asciidoc[]
-
-include::static/shutdown.asciidoc[]
-
-// Upgrading Logstash
-include::static/upgrading.asciidoc[]
-
-// Configuring pipelines
-include::static/pipeline-configuration.asciidoc[]
-
-// Security
-include::static/security/logstash.asciidoc[]
-
-// Advanced Logstash Configuration
-include::static/configuration-advanced.asciidoc[]
-
-include::static/multiple-pipelines.asciidoc[]
-
-include::static/pipeline-pipeline-config.asciidoc[]
-
-include::static/reloading-config.asciidoc[]
-
-include::static/managing-multiline-events.asciidoc[]
-
-include::static/glob-support.asciidoc[]
-
-include::static/field-reference.asciidoc[]
-
-//The `field-reference.asciidoc` file (included above) contains a
-//`role="exclude"` attribute to pull in the topic and make it linkable in the LS
-//Ref, but not appear in the main TOC. The `exclude`attribute was carrying
-//forward for all subsequent topics under the `configuration.asciidoc` heading.
-//This include should remain after includes for all other topics under the
-//`Advanced Logstash Configuration` heading.
-
-// Logstash-to-Logstash
-include::static/ls-ls-config.asciidoc[]
-
-// Centralized configuration managements
-include::static/config-management.asciidoc[]
-
-include::static/management/configuring-centralized-pipelines.asciidoc[]
-
-// EA Integrations to Logstash
-// (Planting near module content for now. Will likely move it up in info architecture.)
-include::static/ea-integrations.asciidoc[]
-
-
-// Working with Filebeat Modules
-include::static/filebeat-modules.asciidoc[]
-
-// Working with Winlogbeat Modules
-include::static/winlogbeat-modules.asciidoc[]
-
-// Data resiliency
-include::static/resiliency.asciidoc[]
-
-include::static/mem-queue.asciidoc[]
-
-include::static/persistent-queues.asciidoc[]
-
-include::static/dead-letter-queues.asciidoc[]
-
-// Transforming Data
-include::static/transforming-data.asciidoc[]
-
-// Deploying & Scaling
-include::static/deploying.asciidoc[]
-
-// GeoIP Database Management
-include::static/geoip-database-management.asciidoc[]
-
-// Troubleshooting performance
-include::static/performance-checklist.asciidoc[]
-
-// Monitoring
-include::static/monitoring/monitoring-ea-intro.asciidoc[]
-
-include::static/monitoring/monitoring-overview.asciidoc[]
-
-include::static/monitoring/monitoring.asciidoc[]
-
-// Working with Plugins
-include::static/plugin-manager.asciidoc[]
-
-// These files do their own pass blocks
-
-include::{plugins-repo-dir}/plugins/integrations.asciidoc[]
-
-include::{plugins-repo-dir}/plugins/inputs.asciidoc[]
-
-include::{plugins-repo-dir}/plugins/outputs.asciidoc[]
-
-include::{plugins-repo-dir}/plugins/filters.asciidoc[]
-
-include::{plugins-repo-dir}/plugins/codecs.asciidoc[]
-
-// FAQ and Troubleshooting
-:edit_url!:
-include::static/best-practice.asciidoc[]
-
-include::static/config-details.asciidoc[]
-
-include::static/troubleshoot/troubleshooting.asciidoc[]
-
-// Contributing to Logstash
-:edit_url:
-include::static/contributing-to-logstash.asciidoc[]
-
-include::static/input.asciidoc[]
-
-include::static/codec.asciidoc[]
-
-include::static/filter.asciidoc[]
-
-include::static/output.asciidoc[]
-
-// Logstash Community Maintainer Guide
-include::static/maintainer-guide.asciidoc[]
-
-// Plugin doc guidelines
-include::static/doc-for-plugin.asciidoc[]
-
-// Submitting a Plugin
-include::static/submitting-a-plugin.asciidoc[]
-
-include::static/listing-a-plugin.asciidoc[]
-
-include::static/contributing-patch.asciidoc[]
-
-include::static/contribute-core.asciidoc[]
-
-// Contributing to Logstash - JAVA EDITION
-:edit_url:
-include::static/contributing-java-plugin.asciidoc[]
-
-// Breaking Changes
-include::static/breaking-changes.asciidoc[]
-
-// Release Notes
-include::static/releasenotes.asciidoc[]
-
-:edit_url:
-include::static/redirects.asciidoc[]
-
-:edit_url!:
diff --git a/docs/index.x.asciidoc b/docs/index.x.asciidoc
deleted file mode 100644
index 35204eef5..000000000
--- a/docs/index.x.asciidoc
+++ /dev/null
@@ -1 +0,0 @@
-include::index.asciidoc[]
diff --git a/docs/reference/advanced-logstash-configurations.md b/docs/reference/advanced-logstash-configurations.md
new file mode 100644
index 000000000..13e46133c
--- /dev/null
+++ b/docs/reference/advanced-logstash-configurations.md
@@ -0,0 +1,14 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/configuration-advanced.html
+---
+
+# Advanced Logstash configurations [configuration-advanced]
+
+You can take {{ls}} beyond basic configuration to handle more advanced requirements, such as multiple pipelines, communication between {{ls}} pipelines, and multiple line events.
+
+
+
+
+
+
diff --git a/docs/reference/advanced-pipeline.md b/docs/reference/advanced-pipeline.md
new file mode 100644
index 000000000..c9ae6ec10
--- /dev/null
+++ b/docs/reference/advanced-pipeline.md
@@ -0,0 +1,612 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/advanced-pipeline.html
+---
+
+# Parsing Logs with Logstash [advanced-pipeline]
+
+In [Stashing Your First Event](/reference/first-event.md), you created a basic Logstash pipeline to test your Logstash setup. In the real world, a Logstash pipeline is a bit more complex: it typically has one or more input, filter, and output plugins.
+
+In this section, you create a Logstash pipeline that uses Filebeat to take Apache web logs as input, parses those logs to create specific, named fields from the logs, and writes the parsed data to an Elasticsearch cluster. Rather than defining the pipeline configuration at the command line, you’ll define the pipeline in a config file.
+
+To get started, go [here](https://download.elastic.co/demos/logstash/gettingstarted/logstash-tutorial.log.gz) to download the sample data set used in this example. Unpack the file.
+
+## Configuring Filebeat to Send Log Lines to Logstash [configuring-filebeat]
+
+Before you create the Logstash pipeline, you’ll configure Filebeat to send log lines to Logstash. The [Filebeat](https://github.com/elastic/beats/tree/main/filebeat) client is a lightweight, resource-friendly tool that collects logs from files on the server and forwards these logs to your Logstash instance for processing. Filebeat is designed for reliability and low latency. Filebeat has a light resource footprint on the host machine, and the [`Beats input`](/reference/plugins-inputs-beats.md) plugin minimizes the resource demands on the Logstash instance.
+
+::::{note}
+In a typical use case, Filebeat runs on a separate machine from the machine running your Logstash instance. For the purposes of this tutorial, Logstash and Filebeat are running on the same machine.
+::::
+
+
+The default Logstash installation includes the [`Beats input`](/reference/plugins-inputs-beats.md) plugin. The Beats input plugin enables Logstash to receive events from the Elastic Beats framework, which means that any Beat written to work with the Beats framework, such as Packetbeat and Metricbeat, can also send event data to Logstash.
+
+To install Filebeat on your data source machine, download the appropriate package from the Filebeat [product page](https://www.elastic.co/downloads/beats/filebeat). You can also refer to [Filebeat quick start](beats://reference/filebeat/filebeat-installation-configuration.md) for additional installation instructions.
+
+After installing Filebeat, you need to configure it. Open the `filebeat.yml` file located in your Filebeat installation directory, and replace the contents with the following lines. Make sure `paths` points to the example Apache log file, `logstash-tutorial.log`, that you downloaded earlier:
+
+```yaml
+filebeat.inputs:
+- type: log
+ paths:
+ - /path/to/file/logstash-tutorial.log <1>
+output.logstash:
+ hosts: ["localhost:5044"]
+```
+
+1. Absolute path to the file or files that Filebeat processes.
+
+
+Save your changes.
+
+To keep the configuration simple, you won’t specify TLS/SSL settings as you would in a real world scenario.
+
+At the data source machine, run Filebeat with the following command:
+
+```shell
+sudo ./filebeat -e -c filebeat.yml -d "publish"
+```
+
+::::{note}
+If you run Filebeat as root, you need to change ownership of the configuration file (see [Config File Ownership and Permissions](beats://reference/libbeat/config-file-permissions.md) in the *Beats Platform Reference*).
+::::
+
+
+Filebeat will attempt to connect on port 5044. Until Logstash starts with an active Beats plugin, there won’t be any answer on that port, so any messages you see regarding failure to connect on that port are normal for now.
+
+
+## Configuring Logstash for Filebeat Input [_configuring_logstash_for_filebeat_input]
+
+Next, you create a Logstash configuration pipeline that uses the Beats input plugin to receive events from Beats.
+
+The following text represents the skeleton of a configuration pipeline:
+
+```json
+# The # character at the beginning of a line indicates a comment. Use
+# comments to describe your configuration.
+input {
+}
+# The filter part of this file is commented out to indicate that it is
+# optional.
+# filter {
+#
+# }
+output {
+}
+```
+
+This skeleton is non-functional, because the input and output sections don’t have any valid options defined.
+
+To get started, copy and paste the skeleton configuration pipeline into a file named `first-pipeline.conf` in your home Logstash directory.
+
+Next, configure your Logstash instance to use the Beats input plugin by adding the following lines to the `input` section of the `first-pipeline.conf` file:
+
+```json
+ beats {
+ port => "5044"
+ }
+```
+
+You’ll configure Logstash to write to Elasticsearch later. For now, you can add the following line to the `output` section so that the output is printed to stdout when you run Logstash:
+
+```json
+ stdout { codec => rubydebug }
+```
+
+When you’re done, the contents of `first-pipeline.conf` should look like this:
+
+```json
+input {
+ beats {
+ port => "5044"
+ }
+}
+# The filter part of this file is commented out to indicate that it is
+# optional.
+# filter {
+#
+# }
+output {
+ stdout { codec => rubydebug }
+}
+```
+
+To verify your configuration, run the following command:
+
+```shell
+bin/logstash -f first-pipeline.conf --config.test_and_exit
+```
+
+The `--config.test_and_exit` option parses your configuration file and reports any errors.
+
+If the configuration file passes the configuration test, start Logstash with the following command:
+
+```shell
+bin/logstash -f first-pipeline.conf --config.reload.automatic
+```
+
+The `--config.reload.automatic` option enables automatic config reloading so that you don’t have to stop and restart Logstash every time you modify the configuration file.
+
+As Logstash starts up, you might see one or more warning messages about Logstash ignoring the `pipelines.yml` file. You can safely ignore this warning. The `pipelines.yml` file is used for running [multiple pipelines](/reference/multiple-pipelines.md) in a single Logstash instance. For the examples shown here, you are running a single pipeline.
+
+If your pipeline is working correctly, you should see a series of events like the following written to the console:
+
+```json
+{
+ "@timestamp" => 2017-11-09T01:44:20.071Z,
+ "offset" => 325,
+ "@version" => "1",
+ "beat" => {
+ "name" => "My-MacBook-Pro.local",
+ "hostname" => "My-MacBook-Pro.local",
+ "version" => "6.0.0"
+ },
+ "host" => "My-MacBook-Pro.local",
+ "prospector" => {
+ "type" => "log"
+ },
+ "input" => {
+ "type" => "log"
+ },
+ "source" => "/path/to/file/logstash-tutorial.log",
+ "message" => "83.149.9.216 - - [04/Jan/2015:05:13:42 +0000] \"GET /presentations/logstash-monitorama-2013/images/kibana-search.png HTTP/1.1\" 200 203023 \"http://semicomplete.com/presentations/logstash-monitorama-2013/\" \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.77 Safari/537.36\"",
+ "tags" => [
+ [0] "beats_input_codec_plain_applied"
+ ]
+}
+...
+```
+
+
+### Parsing Web Logs with the Grok Filter Plugin [configuring-grok-filter]
+
+Now you have a working pipeline that reads log lines from Filebeat. However you’ll notice that the format of the log messages is not ideal. You want to parse the log messages to create specific, named fields from the logs. To do this, you’ll use the `grok` filter plugin.
+
+The [`grok`](/reference/plugins-filters-grok.md) filter plugin is one of several plugins that are available by default in Logstash. For details on how to manage Logstash plugins, see the [reference documentation](/reference/working-with-plugins.md) for the plugin manager.
+
+The `grok` filter plugin enables you to parse the unstructured log data into something structured and queryable.
+
+Because the `grok` filter plugin looks for patterns in the incoming log data, configuring the plugin requires you to make decisions about how to identify the patterns that are of interest to your use case. A representative line from the web server log sample looks like this:
+
+```shell
+83.149.9.216 - - [04/Jan/2015:05:13:42 +0000] "GET /presentations/logstash-monitorama-2013/images/kibana-search.png
+HTTP/1.1" 200 203023 "http://semicomplete.com/presentations/logstash-monitorama-2013/" "Mozilla/5.0 (Macintosh; Intel
+Mac OS X 10_9_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.77 Safari/537.36"
+```
+
+The IP address at the beginning of the line is easy to identify, as is the timestamp in brackets. To parse the data, you can use the `%{{COMBINEDAPACHELOG}}` grok pattern, which structures lines from the Apache log using the following schema:
+
+**Information**
+: **Field Name**
+
+IP Address
+: `clientip`
+
+User ID
+: `ident`
+
+User Authentication
+: `auth`
+
+timestamp
+: `timestamp`
+
+HTTP Verb
+: `verb`
+
+Request body
+: `request`
+
+HTTP Version
+: `httpversion`
+
+HTTP Status Code
+: `response`
+
+Bytes served
+: `bytes`
+
+Referrer URL
+: `referrer`
+
+User agent
+: `agent`
+
+::::{tip}
+If you need help building grok patterns, try out the [Grok Debugger](docs-content://explore-analyze/query-filter/tools/grok-debugger.md). The Grok Debugger is an {{xpack}} feature under the Basic License and is therefore **free to use**.
+::::
+
+
+Edit the `first-pipeline.conf` file and replace the entire `filter` section with the following text:
+
+```json
+filter {
+ grok {
+ match => { "message" => "%{COMBINEDAPACHELOG}"}
+ }
+}
+```
+
+When you’re done, the contents of `first-pipeline.conf` should look like this:
+
+```json
+input {
+ beats {
+ port => "5044"
+ }
+}
+filter {
+ grok {
+ match => { "message" => "%{COMBINEDAPACHELOG}"}
+ }
+}
+output {
+ stdout { codec => rubydebug }
+}
+```
+
+Save your changes. Because you’ve enabled automatic config reloading, you don’t have to restart Logstash to pick up your changes. However, you do need to force Filebeat to read the log file from scratch. To do this, go to the terminal window where Filebeat is running and press Ctrl+C to shut down Filebeat. Then delete the Filebeat registry file. For example, run:
+
+```shell
+sudo rm data/registry
+```
+
+Since Filebeat stores the state of each file it harvests in the registry, deleting the registry file forces Filebeat to read all the files it’s harvesting from scratch.
+
+Next, restart Filebeat with the following command:
+
+```shell
+sudo ./filebeat -e -c filebeat.yml -d "publish"
+```
+
+There might be a slight delay before Filebeat begins processing events if it needs to wait for Logstash to reload the config file.
+
+After Logstash applies the grok pattern, the events will have the following JSON representation:
+
+```json
+{
+ "request" => "/presentations/logstash-monitorama-2013/images/kibana-search.png",
+ "agent" => "\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.77 Safari/537.36\"",
+ "offset" => 325,
+ "auth" => "-",
+ "ident" => "-",
+ "verb" => "GET",
+ "prospector" => {
+ "type" => "log"
+ },
+ "input" => {
+ "type" => "log"
+ },
+ "source" => "/path/to/file/logstash-tutorial.log",
+ "message" => "83.149.9.216 - - [04/Jan/2015:05:13:42 +0000] \"GET /presentations/logstash-monitorama-2013/images/kibana-search.png HTTP/1.1\" 200 203023 \"http://semicomplete.com/presentations/logstash-monitorama-2013/\" \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.77 Safari/537.36\"",
+ "tags" => [
+ [0] "beats_input_codec_plain_applied"
+ ],
+ "referrer" => "\"http://semicomplete.com/presentations/logstash-monitorama-2013/\"",
+ "@timestamp" => 2017-11-09T02:51:12.416Z,
+ "response" => "200",
+ "bytes" => "203023",
+ "clientip" => "83.149.9.216",
+ "@version" => "1",
+ "beat" => {
+ "name" => "My-MacBook-Pro.local",
+ "hostname" => "My-MacBook-Pro.local",
+ "version" => "6.0.0"
+ },
+ "host" => "My-MacBook-Pro.local",
+ "httpversion" => "1.1",
+ "timestamp" => "04/Jan/2015:05:13:42 +0000"
+}
+```
+
+Notice that the event includes the original message, but the log message is also broken down into specific fields.
+
+
+### Enhancing Your Data with the Geoip Filter Plugin [configuring-geoip-plugin]
+
+In addition to parsing log data for better searches, filter plugins can derive supplementary information from existing data. As an example, the [`geoip`](/reference/plugins-filters-geoip.md) plugin looks up IP addresses, derives geographic location information from the addresses, and adds that location information to the logs.
+
+Configure your Logstash instance to use the `geoip` filter plugin by adding the following lines to the `filter` section of the `first-pipeline.conf` file:
+
+```json
+ geoip {
+ source => "clientip"
+ }
+```
+
+The `geoip` plugin configuration requires you to specify the name of the source field that contains the IP address to look up. In this example, the `clientip` field contains the IP address.
+
+Since filters are evaluated in sequence, make sure that the `geoip` section is after the `grok` section of the configuration file and that both the `grok` and `geoip` sections are nested within the `filter` section.
+
+When you’re done, the contents of `first-pipeline.conf` should look like this:
+
+```json
+input {
+ beats {
+ port => "5044"
+ }
+}
+ filter {
+ grok {
+ match => { "message" => "%{COMBINEDAPACHELOG}"}
+ }
+ geoip {
+ source => "clientip"
+ }
+}
+output {
+ stdout { codec => rubydebug }
+}
+```
+
+Save your changes. To force Filebeat to read the log file from scratch, as you did earlier, shut down Filebeat (press Ctrl+C), delete the registry file, and then restart Filebeat with the following command:
+
+```shell
+sudo ./filebeat -e -c filebeat.yml -d "publish"
+```
+
+Notice that the event now contains geographic location information:
+
+```json
+{
+ "request" => "/presentations/logstash-monitorama-2013/images/kibana-search.png",
+ "agent" => "\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.77 Safari/537.36\"",
+ "geoip" => {
+ "timezone" => "Europe/Moscow",
+ "ip" => "83.149.9.216",
+ "latitude" => 55.7485,
+ "continent_code" => "EU",
+ "city_name" => "Moscow",
+ "country_name" => "Russia",
+ "country_code2" => "RU",
+ "country_code3" => "RU",
+ "region_name" => "Moscow",
+ "location" => {
+ "lon" => 37.6184,
+ "lat" => 55.7485
+ },
+ "postal_code" => "101194",
+ "region_code" => "MOW",
+ "longitude" => 37.6184
+ },
+ ...
+```
+
+
+### Indexing Your Data into Elasticsearch [indexing-parsed-data-into-elasticsearch]
+
+Now that the web logs are broken down into specific fields, you’re ready to get your data into Elasticsearch.
+
+::::{tip}
+{ess-leadin}
+::::
+
+
+The Logstash pipeline can index the data into an Elasticsearch cluster. Edit the `first-pipeline.conf` file and replace the entire `output` section with the following text:
+
+```json
+output {
+ elasticsearch {
+ hosts => [ "localhost:9200" ]
+ }
+}
+```
+
+With this configuration, Logstash uses http protocol to connect to Elasticsearch. The above example assumes that Logstash and Elasticsearch are running on the same instance. You can specify a remote Elasticsearch instance by using the `hosts` configuration to specify something like `hosts => [ "es-machine:9092" ]`.
+
+At this point, your `first-pipeline.conf` file has input, filter, and output sections properly configured, and looks something like this:
+
+```json
+input {
+ beats {
+ port => "5044"
+ }
+}
+ filter {
+ grok {
+ match => { "message" => "%{COMBINEDAPACHELOG}"}
+ }
+ geoip {
+ source => "clientip"
+ }
+}
+output {
+ elasticsearch {
+ hosts => [ "localhost:9200" ]
+ }
+}
+```
+
+Save your changes. To force Filebeat to read the log file from scratch, as you did earlier, shut down Filebeat (press Ctrl+C), delete the registry file, and then restart Filebeat with the following command:
+
+```shell
+sudo ./filebeat -e -c filebeat.yml -d "publish"
+```
+
+
+#### Testing Your Pipeline [testing-initial-pipeline]
+
+Now that the Logstash pipeline is configured to index the data into an Elasticsearch cluster, you can query Elasticsearch.
+
+Try a test query to Elasticsearch based on the fields created by the `grok` filter plugin. Replace $DATE with the current date, in YYYY.MM.DD format:
+
+```shell
+curl -XGET 'localhost:9200/logstash-$DATE/_search?pretty&q=response=200'
+```
+
+::::{note}
+The date used in the index name is based on UTC, not the timezone where Logstash is running. If the query returns `index_not_found_exception`, make sure that `logstash-$DATE` reflects the actual name of the index. To see a list of available indexes, use this query: `curl 'localhost:9200/_cat/indices?v'`.
+::::
+
+
+You should get multiple hits back. For example:
+
+```json
+{
+ "took": 50,
+ "timed_out": false,
+ "_shards": {
+ "total": 5,
+ "successful": 5,
+ "skipped": 0,
+ "failed": 0
+ },
+ "hits": {
+ "total": 98,
+ "max_score": 2.793642,
+ "hits": [
+ {
+ "_index": "logstash-2017.11.09",
+ "_type": "doc",
+ "_id": "3IzDnl8BW52sR0fx5wdV",
+ "_score": 2.793642,
+ "_source": {
+ "request": "/presentations/logstash-monitorama-2013/images/frontend-response-codes.png",
+ "agent": """"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.77 Safari/537.36"""",
+ "geoip": {
+ "timezone": "Europe/Moscow",
+ "ip": "83.149.9.216",
+ "latitude": 55.7485,
+ "continent_code": "EU",
+ "city_name": "Moscow",
+ "country_name": "Russia",
+ "country_code2": "RU",
+ "country_code3": "RU",
+ "region_name": "Moscow",
+ "location": {
+ "lon": 37.6184,
+ "lat": 55.7485
+ },
+ "postal_code": "101194",
+ "region_code": "MOW",
+ "longitude": 37.6184
+ },
+ "offset": 2932,
+ "auth": "-",
+ "ident": "-",
+ "verb": "GET",
+ "prospector": {
+ "type": "log"
+ },
+ "input": {
+ "type": "log"
+ },
+ "source": "/path/to/file/logstash-tutorial.log",
+ "message": """83.149.9.216 - - [04/Jan/2015:05:13:45 +0000] "GET /presentations/logstash-monitorama-2013/images/frontend-response-codes.png HTTP/1.1" 200 52878 "http://semicomplete.com/presentations/logstash-monitorama-2013/" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.77 Safari/537.36"""",
+ "tags": [
+ "beats_input_codec_plain_applied"
+ ],
+ "referrer": """"http://semicomplete.com/presentations/logstash-monitorama-2013/"""",
+ "@timestamp": "2017-11-09T03:11:35.304Z",
+ "response": "200",
+ "bytes": "52878",
+ "clientip": "83.149.9.216",
+ "@version": "1",
+ "beat": {
+ "name": "My-MacBook-Pro.local",
+ "hostname": "My-MacBook-Pro.local",
+ "version": "6.0.0"
+ },
+ "host": "My-MacBook-Pro.local",
+ "httpversion": "1.1",
+ "timestamp": "04/Jan/2015:05:13:45 +0000"
+ }
+ },
+ ...
+```
+
+Try another search for the geographic information derived from the IP address. Replace $DATE with the current date, in YYYY.MM.DD format:
+
+```shell
+curl -XGET 'localhost:9200/logstash-$DATE/_search?pretty&q=geoip.city_name=Buffalo'
+```
+
+A few log entries come from Buffalo, so the query produces the following response:
+
+```json
+{
+ "took": 9,
+ "timed_out": false,
+ "_shards": {
+ "total": 5,
+ "successful": 5,
+ "skipped": 0,
+ "failed": 0
+ },
+ "hits": {
+ "total": 2,
+ "max_score": 2.6390574,
+ "hits": [
+ {
+ "_index": "logstash-2017.11.09",
+ "_type": "doc",
+ "_id": "L4zDnl8BW52sR0fx5whY",
+ "_score": 2.6390574,
+ "_source": {
+ "request": "/blog/geekery/disabling-battery-in-ubuntu-vms.html?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+semicomplete%2Fmain+%28semicomplete.com+-+Jordan+Sissel%29",
+ "agent": """"Tiny Tiny RSS/1.11 (http://tt-rss.org/)"""",
+ "geoip": {
+ "timezone": "America/New_York",
+ "ip": "198.46.149.143",
+ "latitude": 42.8864,
+ "continent_code": "NA",
+ "city_name": "Buffalo",
+ "country_name": "United States",
+ "country_code2": "US",
+ "dma_code": 514,
+ "country_code3": "US",
+ "region_name": "New York",
+ "location": {
+ "lon": -78.8781,
+ "lat": 42.8864
+ },
+ "postal_code": "14202",
+ "region_code": "NY",
+ "longitude": -78.8781
+ },
+ "offset": 22795,
+ "auth": "-",
+ "ident": "-",
+ "verb": "GET",
+ "prospector": {
+ "type": "log"
+ },
+ "input": {
+ "type": "log"
+ },
+ "source": "/path/to/file/logstash-tutorial.log",
+ "message": """198.46.149.143 - - [04/Jan/2015:05:29:13 +0000] "GET /blog/geekery/disabling-battery-in-ubuntu-vms.html?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+semicomplete%2Fmain+%28semicomplete.com+-+Jordan+Sissel%29 HTTP/1.1" 200 9316 "-" "Tiny Tiny RSS/1.11 (http://tt-rss.org/)"""",
+ "tags": [
+ "beats_input_codec_plain_applied"
+ ],
+ "referrer": """"-"""",
+ "@timestamp": "2017-11-09T03:11:35.321Z",
+ "response": "200",
+ "bytes": "9316",
+ "clientip": "198.46.149.143",
+ "@version": "1",
+ "beat": {
+ "name": "My-MacBook-Pro.local",
+ "hostname": "My-MacBook-Pro.local",
+ "version": "6.0.0"
+ },
+ "host": "My-MacBook-Pro.local",
+ "httpversion": "1.1",
+ "timestamp": "04/Jan/2015:05:29:13 +0000"
+ }
+ },
+ ...
+```
+
+If you are using Kibana to visualize your data, you can also explore the Filebeat data in Kibana:
+
+:::{image} ../images/kibana-filebeat-data.png
+:alt: Discovering Filebeat data in Kibana
+:::
+
+See the [Filebeat quick start docs](beats://reference/filebeat/filebeat-installation-configuration.md) for info about loading the Kibana index pattern for Filebeat.
+
+You’ve successfully created a pipeline that uses Filebeat to take Apache web logs as input, parses those logs to create specific, named fields from the logs, and writes the parsed data to an Elasticsearch cluster. Next, you learn how to create a pipeline that uses multiple input and output plugins.
+
+
diff --git a/docs/reference/codec-plugins.md b/docs/reference/codec-plugins.md
new file mode 100644
index 000000000..17622dc03
--- /dev/null
+++ b/docs/reference/codec-plugins.md
@@ -0,0 +1,67 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/codec-plugins.html
+---
+
+# Codec plugins [codec-plugins]
+
+A codec plugin changes the data representation of an event. Codecs are essentially stream filters that can operate as part of an input or output.
+
+The following codec plugins are available below. For a list of Elastic supported plugins, please consult the [Support Matrix](https://www.elastic.co/support/matrix#show_logstash_plugins).
+
+| | | |
+| --- | --- | --- |
+| Plugin | Description | Github repository |
+| [avro](/reference/plugins-codecs-avro.md) | Reads serialized Avro records as Logstash events | [logstash-codec-avro](https://github.com/logstash-plugins/logstash-codec-avro) |
+| [cef](/reference/plugins-codecs-cef.md) | Reads the ArcSight Common Event Format (CEF). | [logstash-codec-cef](https://github.com/logstash-plugins/logstash-codec-cef) |
+| [cloudfront](/reference/plugins-codecs-cloudfront.md) | Reads AWS CloudFront reports | [logstash-codec-cloudfront](https://github.com/logstash-plugins/logstash-codec-cloudfront) |
+| [cloudtrail](/reference/plugins-codecs-cloudtrail.md) | Reads AWS CloudTrail log files | [logstash-codec-cloudtrail](https://github.com/logstash-plugins/logstash-codec-cloudtrail) |
+| [collectd](/reference/plugins-codecs-collectd.md) | Reads events from the `collectd` binary protocol using UDP. | [logstash-codec-collectd](https://github.com/logstash-plugins/logstash-codec-collectd) |
+| [csv](/reference/plugins-codecs-csv.md) | Takes CSV data, parses it, and passes it along. | [logstash-codec-csv](https://github.com/logstash-plugins/logstash-codec-csv) |
+| [dots](/reference/plugins-codecs-dots.md) | Sends 1 dot per event to `stdout` for performance tracking | [logstash-codec-dots](https://github.com/logstash-plugins/logstash-codec-dots) |
+| [edn](/reference/plugins-codecs-edn.md) | Reads EDN format data | [logstash-codec-edn](https://github.com/logstash-plugins/logstash-codec-edn) |
+| [edn_lines](/reference/plugins-codecs-edn_lines.md) | Reads newline-delimited EDN format data | [logstash-codec-edn_lines](https://github.com/logstash-plugins/logstash-codec-edn_lines) |
+| [es_bulk](/reference/plugins-codecs-es_bulk.md) | Reads the Elasticsearch bulk format into separate events, along with metadata | [logstash-codec-es_bulk](https://github.com/logstash-plugins/logstash-codec-es_bulk) |
+| [fluent](/reference/plugins-codecs-fluent.md) | Reads the `fluentd` `msgpack` schema | [logstash-codec-fluent](https://github.com/logstash-plugins/logstash-codec-fluent) |
+| [graphite](/reference/plugins-codecs-graphite.md) | Reads `graphite` formatted lines | [logstash-codec-graphite](https://github.com/logstash-plugins/logstash-codec-graphite) |
+| [gzip_lines](/reference/plugins-codecs-gzip_lines.md) | Reads `gzip` encoded content | [logstash-codec-gzip_lines](https://github.com/logstash-plugins/logstash-codec-gzip_lines) |
+| [jdots](/reference/plugins-codecs-jdots.md) | Renders each processed event as a dot | [core plugin](https://github.com/elastic/logstash/blob/master/logstash-core/src/main/java/org/logstash/plugins/codecs/Dots.java) |
+| [java_line](/reference/plugins-codecs-java_line.md) | Encodes and decodes line-oriented text data | [core plugin](https://github.com/elastic/logstash/blob/master/logstash-core/src/main/java/org/logstash/plugins/codecs/Line.java) |
+| [java_plain](/reference/plugins-codecs-java_plain.md) | Processes text data with no delimiters between events | [core plugin](https://github.com/elastic/logstash/blob/master/logstash-core/src/main/java/org/logstash/plugins/codecs/Plain.java) |
+| [json](/reference/plugins-codecs-json.md) | Reads JSON formatted content, creating one event per element in a JSON array | [logstash-codec-json](https://github.com/logstash-plugins/logstash-codec-json) |
+| [json_lines](/reference/plugins-codecs-json_lines.md) | Reads newline-delimited JSON | [logstash-codec-json_lines](https://github.com/logstash-plugins/logstash-codec-json_lines) |
+| [line](/reference/plugins-codecs-line.md) | Reads line-oriented text data | [logstash-codec-line](https://github.com/logstash-plugins/logstash-codec-line) |
+| [msgpack](/reference/plugins-codecs-msgpack.md) | Reads MessagePack encoded content | [logstash-codec-msgpack](https://github.com/logstash-plugins/logstash-codec-msgpack) |
+| [multiline](/reference/plugins-codecs-multiline.md) | Merges multiline messages into a single event | [logstash-codec-multiline](https://github.com/logstash-plugins/logstash-codec-multiline) |
+| [netflow](/reference/plugins-codecs-netflow.md) | Reads Netflow v5 and Netflow v9 data | [logstash-codec-netflow](https://github.com/logstash-plugins/logstash-codec-netflow) |
+| [nmap](/reference/plugins-codecs-nmap.md) | Reads Nmap data in XML format | [logstash-codec-nmap](https://github.com/logstash-plugins/logstash-codec-nmap) |
+| [plain](/reference/plugins-codecs-plain.md) | Reads plaintext with no delimiting between events | [logstash-codec-plain](https://github.com/logstash-plugins/logstash-codec-plain) |
+| [protobuf](/reference/plugins-codecs-protobuf.md) | Reads protobuf messages and converts to Logstash Events | [logstash-codec-protobuf](https://github.com/logstash-plugins/logstash-codec-protobuf) |
+| [rubydebug](/reference/plugins-codecs-rubydebug.md) | Applies the Ruby Awesome Print library to Logstash events | [logstash-codec-rubydebug](https://github.com/logstash-plugins/logstash-codec-rubydebug) |
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docs/static/pipeline-config-exps.asciidoc b/docs/reference/config-examples.md
similarity index 64%
rename from docs/static/pipeline-config-exps.asciidoc
rename to docs/reference/config-examples.md
index e13649659..966576d67 100644
--- a/docs/static/pipeline-config-exps.asciidoc
+++ b/docs/reference/config-examples.md
@@ -1,18 +1,23 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/config-examples.html
+---
+
+# Logstash configuration examples [config-examples]
-[[config-examples]]
-=== Logstash configuration examples
These examples illustrate how you can configure Logstash to filter events, process Apache logs and syslog messages, and use conditionals to control what events are processed by a filter or output.
-TIP: If you need help building grok patterns, try out the
-{kibana-ref}/xpack-grokdebugger.html[Grok Debugger].
+::::{tip}
+If you need help building grok patterns, try out the [Grok Debugger](docs-content://explore-analyze/query-filter/tools/grok-debugger.md).
+::::
-[discrete]
-[[filter-example]]
-==== Configuring filters
-Filters are an in-line processing mechanism that provide the flexibility to slice and dice your data to fit your needs. Let's take a look at some filters in action. The following configuration file sets up the `grok` and `date` filters.
-[source,ruby]
-----------------------------------
+
+## Configuring filters [filter-example]
+
+Filters are an in-line processing mechanism that provide the flexibility to slice and dice your data to fit your needs. Let’s take a look at some filters in action. The following configuration file sets up the `grok` and `date` filters.
+
+```ruby
input { stdin { } }
filter {
@@ -28,27 +33,23 @@ output {
elasticsearch { hosts => ["localhost:9200"] }
stdout { codec => rubydebug }
}
-----------------------------------
+```
Run Logstash with this configuration:
-[source,ruby]
-----------------------------------
+```ruby
bin/logstash -f logstash-filter.conf
-----------------------------------
+```
-Now, paste the following line into your terminal and press Enter so it will be
-processed by the stdin input:
+Now, paste the following line into your terminal and press Enter so it will be processed by the stdin input:
-[source,ruby]
-----------------------------------
+```ruby
127.0.0.1 - - [11/Dec/2013:00:01:45 -0800] "GET /xampp/status.php HTTP/1.1" 200 3891 "http://cadenza/xampp/navi.php" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:25.0) Gecko/20100101 Firefox/25.0"
-----------------------------------
+```
You should see something returned to stdout that looks like this:
-[source,ruby]
-----------------------------------
+```ruby
{
"message" => "127.0.0.1 - - [11/Dec/2013:00:01:45 -0800] \"GET /xampp/status.php HTTP/1.1\" 200 3891 \"http://cadenza/xampp/navi.php\" \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:25.0) Gecko/20100101 Firefox/25.0\"",
"@timestamp" => "2013-12-11T08:01:45.000Z",
@@ -66,18 +67,18 @@ You should see something returned to stdout that looks like this:
"referrer" => "\"http://cadenza/xampp/navi.php\"",
"agent" => "\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:25.0) Gecko/20100101 Firefox/25.0\""
}
-----------------------------------
+```
-As you can see, Logstash (with help from the `grok` filter) was able to parse the log line (which happens to be in Apache "combined log" format) and break it up into many different discrete bits of information. This is extremely useful once you start querying and analyzing our log data. For example, you'll be able to easily run reports on HTTP response codes, IP addresses, referrers, and so on. There are quite a few grok patterns included with Logstash out-of-the-box, so it's quite likely if you need to parse a common log format, someone has already done the work for you. For more information, see the list of https://github.com/logstash-plugins/logstash-patterns-core/tree/main/patterns[Logstash grok patterns] on GitHub.
+As you can see, Logstash (with help from the `grok` filter) was able to parse the log line (which happens to be in Apache "combined log" format) and break it up into many different discrete bits of information. This is extremely useful once you start querying and analyzing our log data. For example, you’ll be able to easily run reports on HTTP response codes, IP addresses, referrers, and so on. There are quite a few grok patterns included with Logstash out-of-the-box, so it’s quite likely if you need to parse a common log format, someone has already done the work for you. For more information, see the list of [Logstash grok patterns](https://github.com/logstash-plugins/logstash-patterns-core/tree/main/patterns) on GitHub.
-The other filter used in this example is the `date` filter. This filter parses out a timestamp and uses it as the timestamp for the event (regardless of when you're ingesting the log data). You'll notice that the `@timestamp` field in this example is set to December 11, 2013, even though Logstash is ingesting the event at some point afterwards. This is handy when backfilling logs. It gives you the ability to tell Logstash "use this value as the timestamp for this event".
+The other filter used in this example is the `date` filter. This filter parses out a timestamp and uses it as the timestamp for the event (regardless of when you’re ingesting the log data). You’ll notice that the `@timestamp` field in this example is set to December 11, 2013, even though Logstash is ingesting the event at some point afterwards. This is handy when backfilling logs. It gives you the ability to tell Logstash "use this value as the timestamp for this event".
-[discrete]
-==== Processing Apache logs
-Let's do something that's actually *useful*: process apache2 access log files! We are going to read the input from a file on the localhost, and use a <> to process the event according to our needs. First, create a file called something like 'logstash-apache.conf' with the following contents (you can change the log's file path to suit your needs):
-[source,js]
-----------------------------------
+## Processing Apache logs [_processing_apache_logs]
+
+Let’s do something that’s actually **useful**: process apache2 access log files! We are going to read the input from a file on the localhost, and use a [conditional](/reference/event-dependent-configuration.md#conditionals) to process the event according to our needs. First, create a file called something like *logstash-apache.conf* with the following contents (you can change the log’s file path to suit your needs):
+
+```js
input {
file {
path => "/tmp/access_log"
@@ -103,48 +104,43 @@ output {
}
stdout { codec => rubydebug }
}
-
-----------------------------------
+```
Then, create the input file you configured above (in this example, "/tmp/access_log") with the following log entries (or use some from your own webserver):
-[source,js]
-----------------------------------
+```js
71.141.244.242 - kurt [18/May/2011:01:48:10 -0700] "GET /admin HTTP/1.1" 301 566 "-" "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3"
134.39.72.245 - - [18/May/2011:12:40:18 -0700] "GET /favicon.ico HTTP/1.1" 200 1189 "-" "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; InfoPath.2; .NET4.0C; .NET4.0E)"
98.83.179.51 - - [18/May/2011:19:35:08 -0700] "GET /css/main.css HTTP/1.1" 200 1837 "http://www.safesand.com/information.htm" "Mozilla/5.0 (Windows NT 6.0; WOW64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1"
-----------------------------------
+```
Now, run Logstash with the -f flag to pass in the configuration file:
-[source,js]
-----------------------------------
+```js
bin/logstash -f logstash-apache.conf
-----------------------------------
+```
-Now you should see your apache log data in Elasticsearch! Logstash opened and read the specified input file, processing each event it encountered. Any additional lines logged to this file will also be captured, processed by Logstash as events, and stored in Elasticsearch. As an added bonus, they are stashed with the field "type" set to "apache_access" (this is done by the type => "apache_access" line in the input configuration).
+Now you should see your apache log data in Elasticsearch! Logstash opened and read the specified input file, processing each event it encountered. Any additional lines logged to this file will also be captured, processed by Logstash as events, and stored in Elasticsearch. As an added bonus, they are stashed with the field "type" set to "apache_access" (this is done by the type ⇒ "apache_access" line in the input configuration).
-In this configuration, Logstash is only watching the apache access_log, but it's easy enough to watch both the access_log and the error_log (actually, any file matching `*log`), by changing one line in the above configuration:
+In this configuration, Logstash is only watching the apache access_log, but it’s easy enough to watch both the access_log and the error_log (actually, any file matching `*log`), by changing one line in the above configuration:
-[source,js]
-----------------------------------
+```js
input {
file {
path => "/tmp/*_log"
...
-----------------------------------
+```
-When you restart Logstash, it will process both the error and access logs. However, if you inspect your data (using elasticsearch-kopf, perhaps), you'll see that the access_log is broken up into discrete fields, but the error_log isn't. That's because we used a `grok` filter to match the standard combined apache log format and automatically split the data into separate fields. Wouldn't it be nice *if* we could control how a line was parsed, based on its format? Well, we can...
+When you restart Logstash, it will process both the error and access logs. However, if you inspect your data (using elasticsearch-kopf, perhaps), you’ll see that the access_log is broken up into discrete fields, but the error_log isn’t. That’s because we used a `grok` filter to match the standard combined apache log format and automatically split the data into separate fields. Wouldn’t it be nice **if** we could control how a line was parsed, based on its format? Well, we can…
Note that Logstash did not reprocess the events that were already seen in the access_log file. When reading from a file, Logstash saves its position and only processes new lines as they are added. Neat!
-[discrete]
-[[using-conditionals]]
-==== Using conditionals
+
+## Using conditionals [using-conditionals]
+
You use conditionals to control what events are processed by a filter or output. For example, you could label each event according to which file it appeared in (access_log, error_log, and other random files that end with "log").
-[source,ruby]
-----------------------------------
+```ruby
input {
file {
path => "/tmp/*_log"
@@ -171,9 +167,9 @@ output {
elasticsearch { hosts => ["localhost:9200"] }
stdout { codec => rubydebug }
}
-----------------------------------
+```
-This example labels all events using the `type` field, but doesn't actually parse the `error` or `random` files. There are so many types of error logs that how they should be labeled really depends on what logs you're working with.
+This example labels all events using the `type` field, but doesn’t actually parse the `error` or `random` files. There are so many types of error logs that how they should be labeled really depends on what logs you’re working with.
Similarly, you can use conditionals to direct events to particular outputs. For example, you could:
@@ -181,14 +177,9 @@ Similarly, you can use conditionals to direct events to particular outputs. For
* record any 4xx status to Elasticsearch
* record all status code hits via statsd
-To tell nagios about any http event that has a 5xx status code, you
-first need to check the value of the `type` field. If it's apache, then you can
-check to see if the `status` field contains a 5xx error. If it is, send it to nagios. If it isn't
-a 5xx error, check to see if the `status` field contains a 4xx error. If so, send it to Elasticsearch.
-Finally, send all apache status codes to statsd no matter what the `status` field contains:
+To tell nagios about any http event that has a 5xx status code, you first need to check the value of the `type` field. If it’s apache, then you can check to see if the `status` field contains a 5xx error. If it is, send it to nagios. If it isn’t a 5xx error, check to see if the `status` field contains a 4xx error. If so, send it to Elasticsearch. Finally, send all apache status codes to statsd no matter what the `status` field contains:
-[source,js]
-----------------------------------
+```js
output {
if [type] == "apache" {
if [status] =~ /^5\d\d/ {
@@ -199,16 +190,16 @@ output {
statsd { increment => "apache.%{status}" }
}
}
-----------------------------------
+```
-[discrete]
-==== Processing Syslog messages
-Syslog is one of the most common use cases for Logstash, and one it handles exceedingly well (as long as the log lines conform roughly to RFC3164). Syslog is the de facto UNIX networked logging standard, sending messages from client machines to a local file, or to a centralized log server via rsyslog. For this example, you won't need a functioning syslog instance; we'll fake it from the command line so you can get a feel for what happens.
-First, let's make a simple configuration file for Logstash + syslog, called 'logstash-syslog.conf'.
+## Processing Syslog messages [_processing_syslog_messages]
-[source,ruby]
-----------------------------------
+Syslog is one of the most common use cases for Logstash, and one it handles exceedingly well (as long as the log lines conform roughly to RFC3164). Syslog is the de facto UNIX networked logging standard, sending messages from client machines to a local file, or to a centralized log server via rsyslog. For this example, you won’t need a functioning syslog instance; we’ll fake it from the command line so you can get a feel for what happens.
+
+First, let’s make a simple configuration file for Logstash + syslog, called *logstash-syslog.conf*.
+
+```ruby
input {
tcp {
port => 5000
@@ -237,36 +228,32 @@ output {
elasticsearch { hosts => ["localhost:9200"] }
stdout { codec => rubydebug }
}
-----------------------------------
+```
Run Logstash with this new configuration:
-[source,ruby]
-----------------------------------
+```ruby
bin/logstash -f logstash-syslog.conf
-----------------------------------
+```
-Normally, a client machine would connect to the Logstash instance on port 5000 and send its message. For this example, we'll just telnet to Logstash and enter a log line (similar to how we entered log lines into STDIN earlier). Open another shell window to interact with the Logstash syslog input and enter the following command:
+Normally, a client machine would connect to the Logstash instance on port 5000 and send its message. For this example, we’ll just telnet to Logstash and enter a log line (similar to how we entered log lines into STDIN earlier). Open another shell window to interact with the Logstash syslog input and enter the following command:
-[source,ruby]
-----------------------------------
+```ruby
telnet localhost 5000
-----------------------------------
+```
Copy and paste the following lines as samples. (Feel free to try some of your own, but keep in mind they might not parse if the `grok` filter is not correct for your data).
-[source,ruby]
-----------------------------------
+```ruby
Dec 23 12:11:43 louis postfix/smtpd[31499]: connect from unknown[95.75.93.154]
Dec 23 14:42:56 louis named[16000]: client 199.48.164.7#64817: query (cache) 'amsterdamboothuren.com/MX/IN' denied
Dec 23 14:30:01 louis CRON[619]: (www-data) CMD (php /usr/share/cacti/site/poller.php >/dev/null 2>/var/log/cacti/poller-error.log)
Dec 22 18:28:06 louis rsyslogd: [origin software="rsyslogd" swVersion="4.2.0" x-pid="2253" x-info="http://www.rsyslog.com"] rsyslogd was HUPed, type 'lightweight'.
-----------------------------------
+```
Now you should see the output of Logstash in your original shell as it processes and parses messages!
-[source,ruby]
-----------------------------------
+```ruby
{
"message" => "Dec 23 14:30:01 louis CRON[619]: (www-data) CMD (php /usr/share/cacti/site/poller.php >/dev/null 2>/var/log/cacti/poller-error.log)",
"@timestamp" => "2013-12-23T22:30:01.000Z",
@@ -285,7 +272,5 @@ Now you should see the output of Logstash in your original shell as it processes
"syslog_facility" => "user-level",
"syslog_severity" => "notice"
}
-----------------------------------
-
-
+```
diff --git a/docs/reference/config-setting-files.md b/docs/reference/config-setting-files.md
new file mode 100644
index 000000000..5ace59d73
--- /dev/null
+++ b/docs/reference/config-setting-files.md
@@ -0,0 +1,33 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/config-setting-files.html
+---
+
+# Logstash Configuration Files [config-setting-files]
+
+Logstash has two types of configuration files: *pipeline configuration files*, which define the Logstash processing pipeline, and *settings files*, which specify options that control Logstash startup and execution.
+
+## Pipeline Configuration Files [pipeline-config-files]
+
+You create pipeline configuration files when you define the stages of your Logstash processing pipeline. On deb and rpm, you place the pipeline configuration files in the `/etc/logstash/conf.d` directory. Logstash tries to load only files with `.conf` extension in the `/etc/logstash/conf.d directory` and ignores all other files.
+
+See [*Creating a {{ls}} pipeline*](/reference/creating-logstash-pipeline.md) for more info.
+
+
+## Settings Files [settings-files]
+
+The settings files are already defined in the Logstash installation. Logstash includes the following settings files:
+
+**`logstash.yml`**
+: Contains Logstash configuration flags. You can set flags in this file instead of passing the flags at the command line. Any flags that you set at the command line override the corresponding settings in the `logstash.yml` file. See [logstash.yml](/reference/logstash-settings-file.md) for more info.
+
+**`pipelines.yml`**
+: Contains the framework and instructions for running multiple pipelines in a single Logstash instance. See [Multiple Pipelines](/reference/multiple-pipelines.md) for more info.
+
+**`jvm.options`**
+: Contains JVM configuration flags. Use this file to set initial and maximum values for total heap space. You can also use this file to set the locale for Logstash. Specify each flag on a separate line. All other settings in this file are considered expert settings.
+
+**`log4j2.properties`**
+: Contains default settings for `log4j 2` library. See [Log4j2 configuration](/reference/logging.md#log4j2) for more info.
+
+
diff --git a/docs/reference/configuration-file-structure.md b/docs/reference/configuration-file-structure.md
new file mode 100644
index 000000000..f98459640
--- /dev/null
+++ b/docs/reference/configuration-file-structure.md
@@ -0,0 +1,235 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/configuration-file-structure.html
+---
+
+# Structure of a pipeline [configuration-file-structure]
+
+A {{ls}} pipeline config file has a separate section for each type of plugin you want to add to the event processing pipeline. For example:
+
+```js
+# This is a comment. You should use comments to describe
+# parts of your configuration.
+input {
+ ...
+}
+
+filter {
+ ...
+}
+
+output {
+ ...
+}
+```
+
+Each section contains configuration options for one or more plugins. If you specify multiple filters, they are applied in the order they appear in the configuration file. If you specify multiple outputs, events are sent to each destination sequentially, in the order they appear in the configuration file.
+
+::::{tip}
+When you are ready to deploy a pipeline beyond your local machine, add the pipeline config file to [`logstash.yml`](/reference/logstash-settings-file.md) using the `pipeline.id` setting. When you are ready to deploy [multiple pipelines](/reference/multiple-pipelines.md), set up and configure your pipelines in the `pipelines.yml` file.
+::::
+
+
+
+## Plugin configuration [plugin_configuration]
+
+A plugin configuration consists of the plugin name followed by a block of settings for that plugin. For example, this input section configures two file inputs:
+
+```js
+input {
+ http {
+ port => 3333
+ tags => gateway
+ }
+ http {
+ port => 4444
+ tags => billing
+ }
+}
+```
+
+In this example, two settings are configured for each of the file inputs: *port* and *tags*.
+
+The settings you can configure vary according to the plugin type. For information about each plugin, see [Input Plugins](/reference/input-plugins.md), [Output Plugins](/reference/output-plugins.md), [Filter Plugins](/reference/filter-plugins.md), and [Codec Plugins](/reference/codec-plugins.md).
+
+
+## Value types [plugin-value-types]
+
+A plugin can require that the value for a setting be a certain type, such as boolean, list, or hash. The following value types are supported.
+
+## Array [array]
+
+This type is now mostly deprecated in favor of using a standard type like `string` with the plugin defining the `:list => true` property for better type checking. It is still needed to handle lists of hashes or mixed types where type checking is not desired.
+
+Example:
+
+```js
+ users => [ {id => 1, name => bob}, {id => 2, name => jane} ]
+```
+
+
+### Lists [list]
+
+Not a type in and of itself, but a property types can have. This makes it possible to type check multiple values. Plugin authors can enable list checking by specifying `:list => true` when declaring an argument.
+
+Example:
+
+```js
+ path => [ "/var/log/messages", "/var/log/*.log" ]
+ uris => [ "http://elastic.co", "http://example.net" ]
+```
+
+This example configures `path`, which is a `string` to be a list that contains an element for each of the three strings. It also will configure the `uris` parameter to be a list of URIs, failing if any of the URIs provided are not valid.
+
+
+### Boolean [boolean]
+
+A boolean must be either `true` or `false`. Note that the `true` and `false` keywords are not enclosed in quotes.
+
+Example:
+
+```js
+ ssl_enable => true
+```
+
+
+### Bytes [bytes]
+
+A bytes field is a string field that represents a valid unit of bytes. It is a convenient way to declare specific sizes in your plugin options. Both SI (k M G T P E Z Y) and Binary (Ki Mi Gi Ti Pi Ei Zi Yi) units are supported. Binary units are in base-1024 and SI units are in base-1000. This field is case-insensitive and accepts space between the value and the unit. If no unit is specified, the integer string represents the number of bytes.
+
+Examples:
+
+```js
+ my_bytes => "1113" # 1113 bytes
+ my_bytes => "10MiB" # 10485760 bytes
+ my_bytes => "100kib" # 102400 bytes
+ my_bytes => "180 mb" # 180000000 bytes
+```
+
+
+### Codec [codec]
+
+A codec is the name of Logstash codec used to represent the data. Codecs can be used in both inputs and outputs.
+
+Input codecs provide a convenient way to decode your data before it enters the input. Output codecs provide a convenient way to encode your data before it leaves the output. Using an input or output codec eliminates the need for a separate filter in your Logstash pipeline.
+
+A list of available codecs can be found at the [Codec Plugins](/reference/codec-plugins.md) page.
+
+Example:
+
+```js
+ codec => "json"
+```
+
+
+### Hash [hash]
+
+A hash is a collection of key value pairs specified in the format `"field1" => "value1"`. Note that multiple key value entries are separated by spaces rather than commas.
+
+Example:
+
+```js
+match => {
+ "field1" => "value1"
+ "field2" => "value2"
+ ...
+}
+# or as a single line. No commas between entries:
+match => { "field1" => "value1" "field2" => "value2" }
+```
+
+
+### Number [number]
+
+Numbers must be valid numeric values (floating point or integer).
+
+Example:
+
+```js
+ port => 33
+```
+
+
+### Password [password]
+
+A password is a string with a single value that is not logged or printed.
+
+Example:
+
+```js
+ my_password => "password"
+```
+
+
+### URI [uri]
+
+A URI can be anything from a full URL like *http://elastic.co/* to a simple identifier like *foobar*. If the URI contains a password such as *http://user:pass@example.net* the password portion of the URI will not be logged or printed.
+
+Example:
+
+```js
+ my_uri => "http://foo:bar@example.net"
+```
+
+
+### Path [path]
+
+A path is a string that represents a valid operating system path.
+
+Example:
+
+```js
+ my_path => "/tmp/logstash"
+```
+
+
+### String [string]
+
+A string must be a single character sequence. Note that string values are enclosed in quotes, either double or single.
+
+### Escape sequences [_escape_sequences]
+
+By default, escape sequences are not enabled. If you wish to use escape sequences in quoted strings, you will need to set `config.support_escapes: true` in your `logstash.yml`. When `true`, quoted strings (double and single) will have this transformation:
+
+| | |
+| --- | --- |
+| Text | Result |
+| \r | carriage return (ASCII 13) |
+| \n | new line (ASCII 10) |
+| \t | tab (ASCII 9) |
+| \\ | backslash (ASCII 92) |
+| \" | double quote (ASCII 34) |
+| \' | single quote (ASCII 39) |
+
+Example:
+
+```js
+ name => "Hello world"
+ name => 'It\'s a beautiful day'
+```
+
+
+### Field reference [field-reference]
+
+A Field Reference is a special [String](#string) value representing the path to a field in an event, such as `@timestamp` or `[@timestamp]` to reference a top-level field, or `[client][ip]` to access a nested field. The [*Field References Deep Dive*](https://www.elastic.co/guide/en/logstash/current/field-references-deepdive.html) provides detailed information about the structure of Field References. When provided as a configuration option, Field References need to be quoted and special characters must be escaped following the same rules as [String](#string).
+
+
+## Comments [comments]
+
+Comments are the same as in perl, ruby, and python. A comment starts with a *#* character, and does not need to be at the beginning of a line. For example:
+
+```js
+# this is a comment
+
+input { # comments can appear at the end of a line, too
+ # ...
+}
+```
+
+::::{note}
+Comments containing environment variable `${var}` references in `config.string` are still evaluated. Remove the `$` sign to avoid pipeline loading failures.
+::::
+
+
+
+
diff --git a/docs/reference/configuring-centralized-pipelines.md b/docs/reference/configuring-centralized-pipelines.md
new file mode 100644
index 000000000..fc56a4a4f
--- /dev/null
+++ b/docs/reference/configuring-centralized-pipelines.md
@@ -0,0 +1,157 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/configuring-centralized-pipelines.html
+---
+
+# Configure Centralized Pipeline Management [configuring-centralized-pipelines]
+
+To configure [centralized pipeline management](/reference/logstash-centralized-pipeline-management.md):
+
+1. Verify that you are using a license that includes the pipeline management feature.
+
+ For more information, see [https://www.elastic.co/subscriptions](https://www.elastic.co/subscriptions) and [License management](docs-content://deploy-manage/license/manage-your-license-in-self-managed-cluster.md).
+
+2. Specify [configuration management settings](#configuration-management-settings) in the `logstash.yml` file. At a minimum, set:
+
+ * `xpack.management.enabled: true` to enable centralized configuration management.
+ * `xpack.management.elasticsearch.hosts` to specify the Elasticsearch instance that will store the Logstash pipeline configurations and metadata.
+ * `xpack.management.pipeline.id` to register the pipelines that you want to centrally manage.
+
+3. Restart Logstash.
+4. If your Elasticsearch cluster is protected with basic authentication, assign the built-in `logstash_admin` role as well as the `logstash_writer` role to any users who will use centralized pipeline management. See [Secure your connection](/reference/secure-connection.md) for more information.
+
+::::{note}
+Centralized management is disabled until you configure and enable {{security-features}}.
+::::
+
+
+::::{important}
+After you’ve configured Logstash to use centralized pipeline management, you can no longer specify local pipeline configurations. This means that the `pipelines.yml` file and settings like `path.config` and `config.string` are inactive when this feature is enabled.
+::::
+
+
+## Configuration Management Settings in Logstash [configuration-management-settings]
+
+
+You can set the following `xpack.management` settings in `logstash.yml` to enable [centralized pipeline management](/reference/logstash-centralized-pipeline-management.md). For more information about configuring Logstash, see [logstash.yml](/reference/logstash-settings-file.md).
+
+The following example shows basic settings that assume {{es}} and {{kib}} are installed on the localhost with basic AUTH enabled, but no SSL. If you’re using SSL, you need to specify additional SSL settings.
+
+```shell
+xpack.management.enabled: true
+xpack.management.elasticsearch.hosts: "http://localhost:9200/"
+xpack.management.elasticsearch.username: logstash_admin_user
+xpack.management.elasticsearch.password: t0p.s3cr3t
+xpack.management.logstash.poll_interval: 5s
+xpack.management.pipeline.id: ["apache", "cloudwatch_logs"]
+```
+
+`xpack.management.enabled`
+: Set to `true` to enable {{xpack}} centralized configuration management for Logstash.
+
+`xpack.management.logstash.poll_interval`
+: How often the Logstash instance polls for pipeline changes from Elasticsearch. The default is 5s.
+
+`xpack.management.pipeline.id`
+: Specify a comma-separated list of pipeline IDs to register for centralized pipeline management. After changing this setting, you need to restart Logstash to pick up changes. Pipeline IDs support `*` as a [wildcard](#wildcard-in-pipeline-id) for matching multiple IDs
+
+`xpack.management.elasticsearch.hosts`
+: The {{es}} instance that will store the Logstash pipeline configurations and metadata. This might be the same {{es}} instance specified in the `outputs` section in your Logstash configuration, or a different one. Defaults to `http://localhost:9200`.
+
+`xpack.management.elasticsearch.username` and `xpack.management.elasticsearch.password`
+: If your {{es}} cluster is protected with basic authentication, these settings provide the username and password that the Logstash instance uses to authenticate for accessing the configuration data. The username you specify here should have the built-in `logstash_admin` and `logstash_system` roles. These roles provide access to system indices for managing configurations.
+
+::::{note}
+Starting with Elasticsearch version 7.10.0, the `logstash_admin` role inherits the `manage_logstash_pipelines` cluster privilege for centralized pipeline management. If a user has created their own roles and granted them access to the .logstash index, those roles will continue to work in 7.x but will need to be updated for 8.0.
+::::
+
+
+`xpack.management.elasticsearch.proxy`
+: Optional setting that allows you to specify a proxy URL if Logstash needs to use a proxy to reach your Elasticsearch cluster.
+
+`xpack.management.elasticsearch.ssl.ca_trusted_fingerprint`
+: Optional setting that enables you to specify the hex-encoded SHA-256 fingerprint of the certificate authority for your {{es}} instance.
+
+::::{note}
+A self-secured Elasticsearch cluster will provide the fingerprint of its CA to the console during setup.
+
+You can also get the SHA256 fingerprint of an Elasticsearch’s CA using the `openssl` command-line utility on the Elasticsearch host:
+
+```shell
+openssl x509 -fingerprint -sha256 -in $ES_HOME/config/certs/http_ca.crt
+```
+
+::::
+
+
+`xpack.management.elasticsearch.ssl.certificate_authority`
+: Optional setting that enables you to specify a path to the `.pem` file for the certificate authority for your {{es}} instance.
+
+`xpack.management.elasticsearch.ssl.truststore.path`
+: Optional setting that provides the path to the Java keystore (JKS) to validate the server’s certificate.
+
+::::{note}
+You cannot use this setting and `xpack.management.elasticsearch.ssl.certificate_authority` at the same time.
+::::
+
+
+`xpack.management.elasticsearch.ssl.truststore.password`
+: Optional setting that provides the password to the truststore.
+
+`xpack.management.elasticsearch.ssl.keystore.path`
+: Optional setting that provides the path to the Java keystore (JKS) to validate the client’s certificate.
+
+::::{note}
+You cannot use this setting and `xpack.management.elasticsearch.ssl.keystore.certificate` at the same time.
+::::
+
+
+`xpack.management.elasticsearch.ssl.keystore.password`
+: Optional setting that provides the password to the keystore.
+
+`xpack.management.elasticsearch.ssl.certificate`
+: Optional setting that provides the path to an SSL certificate to use to authenticate the client. This certificate should be an OpenSSL-style X.509 certificate file.
+
+::::{note}
+This setting can be used only if `xpack.management.elasticsearch.ssl.key` is set.
+::::
+
+
+`xpack.management.elasticsearch.ssl.key`
+: Optional setting that provides the path to an OpenSSL-style RSA private key that corresponds to the `xpack.management.elasticsearch.ssl.certificate`.
+
+::::{note}
+This setting can be used only if `xpack.management.elasticsearch.ssl.certificate` is set.
+::::
+
+
+`xpack.management.elasticsearch.ssl.verification_mode`
+: Option to validate the server’s certificate. Defaults to `full`. To disable, set to `none`. Disabling this severely compromises security.
+
+`xpack.management.elasticsearch.ssl.cipher_suites`
+: Optional setting that provides the list of cipher suites to use, listed by priorities. Supported cipher suites vary depending on the Java and protocol versions.
+
+`xpack.management.elasticsearch.cloud_id`
+: If you’re using {{es}} in {{ecloud}}, you should specify the identifier here. This setting is an alternative to `xpack.management.elasticsearch.hosts`. If `cloud_id` is configured, `xpack.management.elasticsearch.hosts` should not be used. This {{es}} instance will store the Logstash pipeline configurations and metadata.
+
+`xpack.management.elasticsearch.cloud_auth`
+: If you’re using {{es}} in {{ecloud}}, you can set your auth credentials here. This setting is an alternative to both `xpack.management.elasticsearch.username` and `xpack.management.elasticsearch.password`. If `cloud_auth` is configured, those settings should not be used. The credentials you specify here should be for a user with the `logstash_admin` and `logstash_system` roles, which provide access to system indices for managing configurations.
+
+`xpack.management.elasticsearch.api_key`
+: Authenticate using an Elasticsearch API key. Note that this option also requires using SSL. The API key Format is `id:api_key` where `id` and `api_key` are as returned by the Elasticsearch [Create API key API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key).
+
+
+## Wildcard support in pipeline ID [wildcard-in-pipeline-id]
+
+
+Pipeline IDs must begin with a letter or underscore and contain only letters, underscores, dashes, and numbers. You can use `*` in `xpack.management.pipeline.id` to match any number of letters, underscores, dashes, and numbers.
+
+```shell
+xpack.management.pipeline.id: ["*logs", "*apache*", "tomcat_log"]
+```
+
+In this example, `"*logs"` matches all IDs ending in `logs`. `"*apache*"` matches any IDs with `apache` in the name.
+
+Wildcard in pipeline IDs is available starting with Elasticsearch 7.10. Logstash can pick up new pipeline without a restart if the new pipeline ID matches the wildcard pattern.
+
+
diff --git a/docs/reference/configuring-geoip-database-management.md b/docs/reference/configuring-geoip-database-management.md
new file mode 100644
index 000000000..df0f84a2a
--- /dev/null
+++ b/docs/reference/configuring-geoip-database-management.md
@@ -0,0 +1,68 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/configuring-geoip-database-management.html
+---
+
+# Configure GeoIP Database Management [configuring-geoip-database-management]
+
+To configure [GeoIP Database Management](/reference/logstash-geoip-database-management.md):
+
+1. Verify that you are using a license that includes the geoip database management feature.
+
+ For more information, see [https://www.elastic.co/subscriptions](https://www.elastic.co/subscriptions) and [License management](docs-content://deploy-manage/license/manage-your-license-in-self-managed-cluster.md).
+
+2. Specify [geoip database management settings](#geoip-database-management-settings) in the `logstash.yml` file to tune the configuration as-needed.
+
+## GeoIP database Management settings in {{ls}} [geoip-database-management-settings]
+
+
+You can set the following `xpack.geoip` settings in `logstash.yml` to configure the [geoip database manager](/reference/logstash-geoip-database-management.md). For more information about configuring Logstash, see [logstash.yml](/reference/logstash-settings-file.md).
+
+`xpack.geoip.downloader.enabled`
+: (Boolean) If `true`, Logstash automatically downloads and manages updates for GeoIP2 databases from the `xpack.geoip.downloader.endpoint`. If `false`, Logstash does not manage GeoIP2 databases and plugins that need a GeoIP2 database must be configured to provide their own.
+
+`xpack.geoip.downloader.endpoint`
+: (String) Endpoint URL used to download updates for GeoIP2 databases. For example, `https://mydomain.com/overview.json`. Defaults to `https://geoip.elastic.co/v1/database`. Note that Logstash will periodically make a GET request to `${xpack.geoip.downloader.endpoint}?elastic_geoip_service_tos=agree`, expecting the list of metadata about databases typically found in `overview.json`.
+
+`xpack.geoip.downloader.poll.interval`
+: (Time Value) How often Logstash checks for GeoIP2 database updates at the `xpack.geoip.downloader.endpoint`. For example, `6h` to check every six hours. Defaults to `24h` (24 hours).
+
+
+## Offline and air-gapped environments [configuring-geoip-database-management-offline]
+
+If Logstash does not have access to the internet, or if you want to disable the database manager, set the `xpack.geoip.downloader.enabled` value to `false` in `logstash.yml`. When the database manager is disabled, plugins that require GeoIP lookups must be configured with their own source of GeoIP databases.
+
+### Using an HTTP proxy [_using_an_http_proxy]
+
+If you can’t connect directly to the Elastic GeoIP endpoint, consider setting up an HTTP proxy server. You can then specify the proxy with `http_proxy` environment variable.
+
+```sh
+export http_proxy="http://PROXY_IP:PROXY_PORT"
+```
+
+
+### Using a custom endpoint [_using_a_custom_endpoint]
+
+If you work in an air-gapped environment and can’t update your databases from the Elastic endpoint, You can then download databases from MaxMind and bootstrap the service.
+
+1. Download both `GeoLite2-ASN.mmdb` and `GeoLite2-City.mmdb` database files from the [MaxMind site](http://dev.maxmind.com/geoip/geoip2/geolite2).
+2. Copy both database files to a single directory.
+3. [Download {{es}}](https://www.elastic.co/downloads/elasticsearch).
+4. From your {{es}} directory, run:
+
+ ```sh
+ ./bin/elasticsearch-geoip -s my/database/dir
+ ```
+
+5. Serve the static database files from your directory. For example, you can use Docker to serve the files from nginx server:
+
+ ```sh
+ docker run -p 8080:80 -v my/database/dir:/usr/share/nginx/html:ro nginx
+ ```
+
+6. Specify the service’s endpoint URL in Logstash using the `xpack.geoip.download.endpoint=http://localhost:8080/overview.json` setting in `logstash.yml`.
+
+Logstash gets automatic updates from this service.
+
+
+
diff --git a/docs/reference/connecting-to-cloud.md b/docs/reference/connecting-to-cloud.md
new file mode 100644
index 000000000..7bb7fcc24
--- /dev/null
+++ b/docs/reference/connecting-to-cloud.md
@@ -0,0 +1,47 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/connecting-to-cloud.html
+---
+
+# Sending data to Elastic Cloud (hosted Elasticsearch Service) [connecting-to-cloud]
+
+Our hosted {{ess}} on [Elastic Cloud](https://cloud.elastic.co/) simplifies safe, secure communication between {{ls}} and {{es}}. When you configure the Elasticsearch output plugin to use [`cloud_id`](/reference/plugins-outputs-elasticsearch.md#plugins-outputs-elasticsearch-cloud_id) with either the [`cloud_auth` option](/reference/plugins-outputs-elasticsearch.md#plugins-outputs-elasticsearch-cloud_auth) or the [`api_key` option](/reference/plugins-outputs-elasticsearch.md#plugins-outputs-elasticsearch-api_key), no additional SSL configuration is needed.
+
+Examples:
+
+* `output {elasticsearch { cloud_id => "" cloud_auth => "" } }`
+* `output {elasticsearch { cloud_id => "" api_key => "" } }`
+
+{ess-leadin-short}
+
+## Cloud ID [cloud-id]
+
+{{ls}} uses the Cloud ID, found in the Elastic Cloud web console, to build the Elasticsearch and Kibana hosts settings. It is a base64 encoded text value of about 120 characters made up of upper and lower case letters and numbers. If you have several Cloud IDs, you can add a label, which is ignored internally, to help you tell them apart. To add a label you should prefix your Cloud ID with a label and a `:` separator in this format ":"
+
+
+## Cloud Auth [cloud-auth]
+
+Cloud Auth is optional. Construct this value by following this format ":". Use your Cloud username for the first part. Use your Cloud password for the second part, which is given once in the Cloud UI when you create a cluster. If you change your Cloud password in the Cloud UI, remember to change it here, too.
+
+
+## Using Cloud ID and Cloud Auth with plugins [cloud-id-plugins]
+
+The Elasticsearch input, output, and filter plugins support cloud_id and cloud_auth in their configurations.
+
+* [Elasticsearch input plugin](/reference/plugins-inputs-elasticsearch.md#plugins-inputs-elasticsearch-cloud_id)
+* [Elasticsearch filter plugin](/reference/plugins-filters-elasticsearch.md#plugins-filters-elasticsearch-cloud_id)
+* [Elasticsearch output plugin](/reference/plugins-outputs-elasticsearch.md#plugins-outputs-elasticsearch-cloud_id)
+
+
+## Sending {{ls}} management data to {{es}} Services [cloud-id-mgmt]
+
+These settings in the `logstash.yml` config file can help you get set up to send management data to Elastic Cloud:
+
+* `xpack.management.elasticsearch.cloud_id`
+* `xpack.management.elasticsearch.cloud_auth`
+
+You can use the `xpack.management.elasticsearch.cloud_id` setting as an alternative to `xpack.management.elasticsearch.hosts`.
+
+You can use the `xpack.management.elasticsearch.cloud_auth` setting as an alternative to both `xpack.management.elasticsearch.username` and `xpack.management.elasticsearch.password`. The credentials you specify here should be for a user with the logstash_admin role, which provides access to .logstash-* indices for managing configurations.
+
+
diff --git a/docs/reference/core-operations.md b/docs/reference/core-operations.md
new file mode 100644
index 000000000..2f0c3ac05
--- /dev/null
+++ b/docs/reference/core-operations.md
@@ -0,0 +1,92 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/core-operations.html
+---
+
+# Performing Core Operations [core-operations]
+
+The plugins described in this section are useful for core operations, such as mutating and dropping events.
+
+[date filter](/reference/plugins-filters-date.md)
+: Parses dates from fields to use as Logstash timestamps for events.
+
+ The following config parses a field called `logdate` to set the Logstash timestamp:
+
+ ```json
+ filter {
+ date {
+ match => [ "logdate", "MMM dd yyyy HH:mm:ss" ]
+ }
+ }
+ ```
+
+
+[drop filter](/reference/plugins-filters-drop.md)
+: Drops events. This filter is typically used in combination with conditionals.
+
+ The following config drops `debug` level log messages:
+
+ ```json
+ filter {
+ if [loglevel] == "debug" {
+ drop { }
+ }
+ }
+ ```
+
+
+[fingerprint filter](/reference/plugins-filters-fingerprint.md)
+: Fingerprints fields by applying a consistent hash.
+
+ The following config fingerprints the `IP`, `@timestamp`, and `message` fields and adds the hash to a metadata field called `generated_id`:
+
+ ```json
+ filter {
+ fingerprint {
+ source => ["IP", "@timestamp", "message"]
+ method => "SHA1"
+ key => "0123"
+ target => "[@metadata][generated_id]"
+ }
+ }
+ ```
+
+
+[mutate filter](/reference/plugins-filters-mutate.md)
+: Performs general mutations on fields. You can rename, remove, replace, and modify fields in your events.
+
+ The following config renames the `HOSTORIP` field to `client_ip`:
+
+ ```json
+ filter {
+ mutate {
+ rename => { "HOSTORIP" => "client_ip" }
+ }
+ }
+ ```
+
+ The following config strips leading and trailing whitespace from the specified fields:
+
+ ```json
+ filter {
+ mutate {
+ strip => ["field1", "field2"]
+ }
+ }
+ ```
+
+
+[ruby filter](/reference/plugins-filters-ruby.md)
+: Executes Ruby code.
+
+ The following config executes Ruby code that cancels 90% of the events:
+
+ ```json
+ filter {
+ ruby {
+ code => "event.cancel if rand <= 0.90"
+ }
+ }
+ ```
+
+
diff --git a/docs/reference/creating-logstash-pipeline.md b/docs/reference/creating-logstash-pipeline.md
new file mode 100644
index 000000000..337887532
--- /dev/null
+++ b/docs/reference/creating-logstash-pipeline.md
@@ -0,0 +1,34 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/configuration.html
+---
+
+# Creating a Logstash Pipeline [configuration]
+
+You can create a pipeline by stringing together plugins--[inputs](/reference/input-plugins.md), [outputs](/reference/output-plugins.md), [filters](/reference/filter-plugins.md), and sometimes [codecs](/reference/codec-plugins.md)--in order to process data. To build a Logstash pipeline, create a config file to specify which plugins you want to use and the settings for each plugin.
+
+A very basic pipeline might contain only an input and an output. Most pipelines include at least one filter plugin because that’s where the "transform" part of the ETL (extract, transform, load) magic happens. You can reference event fields in a pipeline and use conditionals to process events when they meet certain criteria.
+
+Let’s step through creating a simple pipeline config on your local machine and then using it to run Logstash. Create a file named "logstash-simple.conf" and save it in the same directory as Logstash.
+
+```ruby
+input { stdin { } }
+output {
+ elasticsearch { cloud_id => "" api_key => "" }
+ stdout { codec => rubydebug }
+}
+```
+
+Then, run {{ls}} and specify the configuration file with the `-f` flag.
+
+```ruby
+bin/logstash -f logstash-simple.conf
+```
+
+Et voilà! Logstash reads the specified configuration file and outputs to both Elasticsearch and stdout. Before we move on to [more complex examples](/reference/config-examples.md), let’s take a look at what’s in a pipeline config file.
+
+
+
+
+
+
diff --git a/docs/reference/dashboard-monitoring-with-elastic-agent.md b/docs/reference/dashboard-monitoring-with-elastic-agent.md
new file mode 100644
index 000000000..273af5293
--- /dev/null
+++ b/docs/reference/dashboard-monitoring-with-elastic-agent.md
@@ -0,0 +1,145 @@
+---
+navigation_title: "Collect monitoring data for dashboards"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/dashboard-monitoring-with-elastic-agent.html
+---
+
+# Collect {{ls}} monitoring data for dashboards [dashboard-monitoring-with-elastic-agent]
+
+
+{{agent}} collects monitoring data from your {{ls}} instance, sends it directly to your monitoring cluster, and shows the data in {{ls}} dashboards.
+
+You can enroll {{agent}} in [{{fleet}}](docs-content://reference/ingestion-tools/fleet/install-fleet-managed-elastic-agent.md) for management from a central location, or you can run [{{agent}} standalone](docs-content://reference/ingestion-tools/fleet/install-standalone-elastic-agent.md).
+
+**Prerequisites**
+
+Complete these steps as you prepare to collect and ship monitoring data for dashboards:
+
+::::{dropdown} Disable default collection of {{ls}} monitoring metrics
+:name: disable-default-db
+
+Set `monitoring.enabled` to `false` in logstash.yml to disable default collection:
+
+```yaml
+monitoring.enabled: false
+```
+
+::::
+
+
+::::{dropdown} Specify the target cluster_uuid (optional)
+:name: define-cluster__uuid-db
+
+To bind the metrics of {{ls}} to a specific cluster, optionally define the `monitoring.cluster_uuid` in the configuration file (logstash.yml):
+
+```yaml
+monitoring.cluster_uuid: PRODUCTION_ES_CLUSTER_UUID
+```
+
+::::
+
+
+::::{dropdown} Create a monitoring user (standalone agent only)
+:name: create-user-db
+
+Create a user on the production cluster that has the `remote_monitoring_collector` [built-in role](elasticsearch://reference/elasticsearch/roles.md).
+
+::::
+
+
+
+## Install and configure {{agent}} [install-and-configure-db]
+
+Install and configure {{agent}} to collect {{ls}} monitoring data for dashboards. We’ll walk you through the process in these steps:
+
+* [Add the {{agent}} {{ls}} integration to monitor host logs and metrics](#add-logstash-integration-ead)
+* [Install and run an {{agent}} on your machine](#add-agent-to-fleet-ead)
+* [View assets](#view-assets-ead)
+* [Monitor {{ls}} logs and metrics](#view-data-dashboard)
+
+Check out [Installing {{agent}}](docs-content://reference/ingestion-tools/fleet/install-elastic-agents.md) in the *Fleet and Elastic Agent Guide* for more info.
+
+
+### Add the {{agent}} {{ls}} integration to monitor host logs and metrics [add-logstash-integration-ead]
+
+1. Go to the {{kib}} home page, and click **Add integrations**.
+
+ :::{image} ../images/kibana-home.png
+ :alt: {{kib}} home page
+ :class: screenshot
+ :::
+
+2. In the query bar, search for **{{ls}}** and select the integration to see more details.
+3. Click **Add {{ls}}**.
+4. Configure the integration name and add a description (optional).
+5. Configure the integration to collect logs.
+
+ * Make sure that **Logs** is turned on if you want to collect logs from your {{ls}} instance. Be sure that the required settings are correctly configured.
+ * Under **Logs**, modify the log paths to match your {{ls}} environment.
+
+6. Configure the integration to collect metrics.
+
+ * Make sure that **Metrics (Technical Preview)** is turned on, and **Metrics (Stack Monitoring)** is turned off.
+ * Under **Metrics (Technical Preview)**, make sure the {{ls}} URL setting points to your {{ls}} instance URLs. By default, the integration collects {{ls}} monitoring metrics from `https://localhost:9600`. If that host and port number are not correct, update the `Logstash URL` setting. If you configured {{ls}} to use encrypted communications and/or a username and password, you must access it via HTTPS, and expand the **Advanced Settings** options, and fill in with the appropriate values for your {{ls}} instance.
+
+7. Click **Save and continue**. This step takes a minute or two to complete. When it’s done, you’ll have an agent policy that contains a system integration policy for the configuration you just specified.
+8. In the popup, click **Add {{agent}} to your hosts** to open the **Add agent** flyout.
+
+ ::::{tip}
+ If you accidentally close the popup, go to **{{fleet}} > Agents** and click **Add agent**.
+ ::::
+
+
+
+## Install and run an {{agent}} on your machine [add-agent-to-fleet-ead]
+
+The **Add agent** flyout has two options: **Enroll in {{fleet}}** and **Run standalone**. Enrolling agents in {{fleet}} (default) provides a centralized management tool in {{kib}}, reducing management overhead.
+
+:::::::{tab-set}
+
+::::::{tab-item} Fleet-managed
+1. When the **Add Agent flyout** appears, stay on the **Enroll in fleet** tab.
+2. Skip the **Select enrollment token** step. The enrollment token you need is already selected.
+
+ ::::{note}
+ The enrollment token is specific to the {{agent}} policy that you just created. When you run the command to enroll the agent in {{fleet}}, you will pass in the enrollment token.
+ ::::
+
+3. Download, install, and enroll the {{agent}} on your host by selecting your host operating system and following the **Install {{agent}} on your host** step.
+
+It takes about a minute for {{agent}} to enroll in {{fleet}}, download the configuration specified in the policy you just created, and start collecting data.
+::::::
+
+::::::{tab-item} Run standalone
+1. When the **Add Agent flyout** appears, navigate to the **Run standalone** tab.
+2. Configure the agent. Follow the instructions in **Install Elastic Agent on your host**.
+3. After unpacking the binary, replace the `elastic-agent.yml` file with that supplied in the Add Agent flyout on the "Run standalone" tab, replacing the values of `ES_USERNAME` and `ES_PASSWORD` appropriately.
+4. Run `sudo ./elastic-agent install`
+::::::
+
+:::::::
+
+## View assets [view-assets-ead]
+
+After you have confirmed enrollment and data is coming in, click **View assets** to access dashboards related to the {{ls}} integration.
+
+For traditional Stack Monitoring UI, the dashboards marked **[Logs {{ls}}]** are used to visualize the logs produced by your {{ls}} instances, with those marked **[Metrics {{ls}}]** for the technical preview metrics dashboards. These are populated with data only if you selected the **Metrics (Technical Preview)** checkbox.
+
+:::{image} ../images/integration-assets-dashboards.png
+:alt: Integration assets
+:class: screenshot
+:::
+
+A number of dashboards are included to view {{ls}} as a whole, and dashboards that allow you to drill-down into how {{ls}} is performing on a node, pipeline and plugin basis.
+
+
+## Monitor {{ls}} logs and metrics [view-data-dashboard]
+
+From the list of assets, open the **[Metrics {{ls}}] {{ls}} overview** dashboard to view overall performance. Then follow the navigation panel to further drill down into {{ls}} performance.
+
+:::{image} ../images/integration-dashboard-overview.png
+:alt: The {{ls}} Overview dashboard in {{kib}} with various metrics from your monitored {ls}
+:class: screenshot
+:::
+
+You can hover over any visualization to adjust its settings, or click the **Edit** button to make changes to the dashboard. To learn more, refer to [Dashboard and visualizations](docs-content://explore-analyze/dashboards.md).
diff --git a/docs/reference/data-deserialization.md b/docs/reference/data-deserialization.md
new file mode 100644
index 000000000..488008ec1
--- /dev/null
+++ b/docs/reference/data-deserialization.md
@@ -0,0 +1,105 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/data-deserialization.html
+---
+
+# Deserializing Data [data-deserialization]
+
+The plugins described in this section are useful for deserializing data into Logstash events.
+
+[avro codec](/reference/plugins-codecs-avro.md)
+: Reads serialized Avro records as Logstash events. This plugin deserializes individual Avro records. It is not for reading Avro files. Avro files have a unique format that must be handled upon input.
+
+ The following config deserializes input from Kafka:
+
+ ```json
+ input {
+ kafka {
+ codec => {
+ avro => {
+ schema_uri => "/tmp/schema.avsc"
+ }
+ }
+ }
+ }
+ ...
+ ```
+
+
+[csv filter](/reference/plugins-filters-csv.md)
+: Parses comma-separated value data into individual fields. By default, the filter autogenerates field names (column1, column2, and so on), or you can specify a list of names. You can also change the column separator.
+
+ The following config parses CSV data into the field names specified in the `columns` field:
+
+ ```json
+ filter {
+ csv {
+ separator => ","
+ columns => [ "Transaction Number", "Date", "Description", "Amount Debit", "Amount Credit", "Balance" ]
+ }
+ }
+ ```
+
+
+[fluent codec](/reference/plugins-codecs-fluent.md)
+: Reads the Fluentd `msgpack` schema.
+
+ The following config decodes logs received from `fluent-logger-ruby`:
+
+ ```json
+ input {
+ tcp {
+ codec => fluent
+ port => 4000
+ }
+ }
+ ```
+
+
+[json codec](/reference/plugins-codecs-json.md)
+: Decodes (via inputs) and encodes (via outputs) JSON formatted content, creating one event per element in a JSON array.
+
+ The following config decodes the JSON formatted content in a file:
+
+ ```json
+ input {
+ file {
+ path => "/path/to/myfile.json"
+ codec =>"json"
+ }
+ ```
+
+
+[protobuf codec](/reference/plugins-codecs-protobuf.md)
+: Reads protobuf encoded messages and converts them to Logstash events. Requires the protobuf definitions to be compiled as Ruby files. You can compile them by using the [ruby-protoc compiler](https://github.com/codekitchen/ruby-protocol-buffers).
+
+ The following config decodes events from a Kafka stream:
+
+ ```json
+ input
+ kafka {
+ zk_connect => "127.0.0.1"
+ topic_id => "your_topic_goes_here"
+ codec => protobuf {
+ class_name => "Animal::Unicorn"
+ include_path => ['/path/to/protobuf/definitions/UnicornProtobuf.pb.rb']
+ }
+ }
+ }
+ ```
+
+
+[xml filter](/reference/plugins-filters-xml.md)
+: Parses XML into fields.
+
+ The following config parses the whole XML document stored in the `message` field:
+
+ ```json
+ filter {
+ xml {
+ source => "message"
+ }
+ }
+ ```
+
+
diff --git a/docs/reference/dead-letter-queues.md b/docs/reference/dead-letter-queues.md
new file mode 100644
index 000000000..0baaf6946
--- /dev/null
+++ b/docs/reference/dead-letter-queues.md
@@ -0,0 +1,257 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/dead-letter-queues.html
+---
+
+# Dead letter queues (DLQ) [dead-letter-queues]
+
+The dead letter queue (DLQ) is designed as a place to temporarily write events that cannot be processed. The DLQ gives you flexibility to investigate problematic events without blocking the pipeline or losing the events. Your pipeline keeps flowing, and the immediate problem is averted. But those events still need to be addressed.
+
+You can [process events from the DLQ](#es-proc-dlq) with the [`dead_letter_queue` input plugin](/reference/plugins-inputs-dead_letter_queue.md) .
+
+Processing events does not delete items from the queue, and the DLQ sometimes needs attention. See [Track dead letter queue size](#dlq-size) and [Clear the dead letter queue](#dlq-clear) for more info.
+
+## How the dead letter queue works [dead-letter-how]
+
+By default, when Logstash encounters an event that it cannot process because the data contains a mapping error or some other issue, the Logstash pipeline either hangs or drops the unsuccessful event. In order to protect against data loss in this situation, you can [configure Logstash](#configuring-dlq) to write unsuccessful events to a dead letter queue instead of dropping them.
+
+::::{note}
+The dead letter queue is currently supported only for the [{{es}} output](/reference/plugins-outputs-elasticsearch.md) and [conditional statements evaluation](/reference/event-dependent-configuration.md#conditionals). The dead letter queue is used for documents with response codes of 400 or 404, both of which indicate an event that cannot be retried. It’s also used when a conditional evaluation encounter an error.
+::::
+
+
+Each event written to the dead letter queue includes the original event, metadata that describes the reason the event could not be processed, information about the plugin that wrote the event, and the timestamp when the event entered the dead letter queue.
+
+To process events in the dead letter queue, create a Logstash pipeline configuration that uses the [`dead_letter_queue` input plugin](/reference/plugins-inputs-dead_letter_queue.md) to read from the queue. See [Processing events in the dead letter queue](#processing-dlq-events) for more information.
+
+:::{image} ../images/dead_letter_queue.png
+:alt: Diagram showing pipeline reading from the dead letter queue
+:::
+
+
+## {{es}} processing and the dead letter queue [es-proc-dlq]
+
+**HTTP request failure.** If the HTTP request fails (because {{es}} is unreachable or because it returned an HTTP error code), the {{es}} output retries the entire request indefinitely. In these scenarios, the dead letter queue has no opportunity to intercept.
+
+**HTTP request success.** The [{{es}} Bulk API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk) can perform multiple actions using the same request. If the Bulk API request is successful, it returns `200 OK`, even if some documents in the batch have [failed](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk#bulk-failures-ex). In this situation, the `errors` flag for the request will be `true`.
+
+The response body can include metadata indicating that one or more specific actions in the bulk request could not be performed, along with an HTTP-style status code per entry to indicate why the action could not be performed. If the DLQ is configured, individual indexing failures are routed there.
+
+Even if you regularly process events, events remain in the dead letter queue. The dead letter queue requires [manual intervention](#dlq-clear) to clear it.
+
+
+## Conditional statements and the dead letter queue [conditionals-dlq]
+
+When a conditional statement reaches an error in processing an event, such as comparing string and integer values, the event, as it is at the time of evaluation, is inserted into the dead letter queue.
+
+
+## Configuring {{ls}} to use dead letter queues [configuring-dlq]
+
+Dead letter queues are disabled by default. To enable dead letter queues, set the `dead_letter_queue_enable` option in the `logstash.yml` [settings file](/reference/logstash-settings-file.md):
+
+```yaml
+dead_letter_queue.enable: true
+```
+
+Dead letter queues are stored as files in the local directory of the Logstash instance. By default, the dead letter queue files are stored in `path.data/dead_letter_queue`. Each pipeline has a separate queue. For example, the dead letter queue for the `main` pipeline is stored in `LOGSTASH_HOME/data/dead_letter_queue/main` by default. The queue files are numbered sequentially: `1.log`, `2.log`, and so on.
+
+You can set `path.dead_letter_queue` in the `logstash.yml` file to specify a different path for the files:
+
+```yaml
+path.dead_letter_queue: "path/to/data/dead_letter_queue"
+```
+
+::::{tip}
+Use the local filesystem for data integrity and performance. Network File System (NFS) is not supported.
+::::
+
+
+Dead letter queue entries are written to a temporary file, which is then renamed to a dead letter queue segment file, which is then eligible for ingestion. The rename happens either when this temporary file is considered *full*, or when a period of time has elapsed since the last dead letter queue eligible event was written to the temporary file.
+
+This length of time can be set using the `dead_letter_queue.flush_interval` setting. This setting is in milliseconds, and defaults to 5000ms. A low value here will mean in the event of infrequent writes to the dead letter queue more, smaller, queue files may be written, while a larger value will introduce more latency between items being "written" to the dead letter queue, and being made available for reading by the dead_letter_queue input.
+
+```
+Note that this value cannot be set to lower than 1000ms.
+```
+```yaml
+dead_letter_queue.flush_interval: 5000
+```
+
+::::{note}
+You may not use the same `dead_letter_queue` path for two different Logstash instances.
+::::
+
+
+### File rotation [file-rotation]
+
+Dead letter queues have a built-in file rotation policy that manages the file size of the queue. When the file size reaches a preconfigured threshold, a new file is created automatically.
+
+
+### Size management [size-management]
+
+By default, the maximum size of each dead letter queue is set to 1024mb. To change this setting, use the `dead_letter_queue.max_bytes` option. Entries will be dropped if they would increase the size of the dead letter queue beyond this setting. Use the `dead_letter_queue.storage_policy` option to control which entries should be dropped to avoid exceeding the size limit. Set the value to `drop_newer` (default) to stop accepting new values that would push the file size over the limit. Set the value to `drop_older` to remove the oldest events to make space for new ones.
+
+#### Age policy [age-policy]
+
+You can use the age policy to automatically control the volume of events in the dead letter queue. Use the `dead_letter_queue.retain.age` setting (in `logstash.yml` or `pipelines.yml`) to have {{ls}} remove events that are older than a value you define. Available time units are `d`, `h`, `m`, `s` respectively for days, hours, minutes and seconds. There is no default time unit, so you need to specify it.
+
+```yaml
+dead_letter_queue.retain.age: 2d
+```
+
+The age policy is verified and applied on event writes and during pipeline shutdown. For that reason, your dead-letter-queue folder may store expired events for longer than specified, and the reader pipeline could possibly encounter outdated events.
+
+
+
+### Automatic cleaning of consumed events [auto-clean]
+
+By default, the dead letter queue input plugin does not remove the events that it consumes. Instead, it commits a reference to avoid re-processing events. Use the `clean_consumed` setting in the dead letter queue input plugin in order to remove segments that have been fully consumed, freeing space while processing.
+
+```yaml
+input {
+ dead_letter_queue {
+ path => "/path/to/data/dead_letter_queue"
+ pipeline_id => "main"
+ clean_consumed => true
+ }
+}
+```
+
+
+
+## Processing events in the dead letter queue [processing-dlq-events]
+
+When you are ready to process events in the dead letter queue, you create a pipeline that uses the [`dead_letter_queue` input plugin](/reference/plugins-inputs-dead_letter_queue.md) to read from the dead letter queue. The pipeline configuration that you use depends, of course, on what you need to do. For example, if the dead letter queue contains events that resulted from a mapping error in Elasticsearch, you can create a pipeline that reads the "dead" events, removes the field that caused the mapping issue, and re-indexes the clean events into Elasticsearch.
+
+The following example shows a simple pipeline that reads events from the dead letter queue and writes the events, including metadata, to standard output:
+
+```yaml
+input {
+ dead_letter_queue {
+ path => "/path/to/data/dead_letter_queue" <1>
+ commit_offsets => true <2>
+ pipeline_id => "main" <3>
+ }
+}
+
+output {
+ stdout {
+ codec => rubydebug { metadata => true }
+ }
+}
+```
+
+1. The path to the top-level directory containing the dead letter queue. This directory contains a separate folder for each pipeline that writes to the dead letter queue. To find the path to this directory, look at the `logstash.yml` [settings file](/reference/logstash-settings-file.md). By default, Logstash creates the `dead_letter_queue` directory under the location used for persistent storage (`path.data`), for example, `LOGSTASH_HOME/data/dead_letter_queue`. However, if `path.dead_letter_queue` is set, it uses that location instead.
+2. When `true`, saves the offset. When the pipeline restarts, it will continue reading from the position where it left off rather than reprocessing all the items in the queue. You can set `commit_offsets` to `false` when you are exploring events in the dead letter queue and want to iterate over the events multiple times.
+3. The ID of the pipeline that’s writing to the dead letter queue. The default is `"main"`.
+
+
+For another example, see [Example: Processing data that has mapping errors](#dlq-example).
+
+When the pipeline has finished processing all the events in the dead letter queue, it will continue to run and process new events as they stream into the queue. This means that you do not need to stop your production system to handle events in the dead letter queue.
+
+::::{note}
+Events emitted from the [`dead_letter_queue` input plugin](/reference/plugins-inputs-dead_letter_queue.md) plugin will not be resubmitted to the dead letter queue if they cannot be processed correctly.
+::::
+
+
+
+## Reading from a timestamp [dlq-timestamp]
+
+When you read from the dead letter queue, you might not want to process all the events in the queue, especially if there are a lot of old events in the queue. You can start processing events at a specific point in the queue by using the `start_timestamp` option. This option configures the pipeline to start processing events based on the timestamp of when they entered the queue:
+
+```yaml
+input {
+ dead_letter_queue {
+ path => "/path/to/data/dead_letter_queue"
+ start_timestamp => "2017-06-06T23:40:37"
+ pipeline_id => "main"
+ }
+}
+```
+
+For this example, the pipeline starts reading all events that were delivered to the dead letter queue on or after June 6, 2017, at 23:40:37.
+
+
+## Example: Processing data that has mapping errors [dlq-example]
+
+In this example, the user attempts to index a document that includes geo_ip data, but the data cannot be processed because it contains a mapping error:
+
+```json
+{"geoip":{"location":"home"}}
+```
+
+Indexing fails because the Logstash output plugin expects a `geo_point` object in the `location` field, but the value is a string. The failed event is written to the dead letter queue, along with metadata about the error that caused the failure:
+
+```json
+{
+ "@metadata" => {
+ "dead_letter_queue" => {
+ "entry_time" => #,
+ "plugin_id" => "fb80f1925088497215b8d037e622dec5819b503e-4",
+ "plugin_type" => "elasticsearch",
+ "reason" => "Could not index event to Elasticsearch. status: 400, action: [\"index\", {:_id=>nil, :_index=>\"logstash-2017.06.22\", :_type=>\"doc\", :_routing=>nil}, 2017-06-22T01:29:29.804Z My-MacBook-Pro-2.local {\"geoip\":{\"location\":\"home\"}}], response: {\"index\"=>{\"_index\"=>\"logstash-2017.06.22\", \"_type\"=>\"doc\", \"_id\"=>\"AVzNayPze1iR9yDdI2MD\", \"status\"=>400, \"error\"=>{\"type\"=>\"mapper_parsing_exception\", \"reason\"=>\"failed to parse\", \"caused_by\"=>{\"type\"=>\"illegal_argument_exception\", \"reason\"=>\"illegal latitude value [266.30859375] for geoip.location\"}}}}"
+ }
+ },
+ "@timestamp" => 2017-06-22T01:29:29.804Z,
+ "@version" => "1",
+ "geoip" => {
+ "location" => "home"
+ },
+ "host" => "My-MacBook-Pro-2.local",
+ "message" => "{\"geoip\":{\"location\":\"home\"}}"
+}
+```
+
+To process the failed event, you create the following pipeline that reads from the dead letter queue and removes the mapping problem:
+
+```json
+input {
+ dead_letter_queue {
+ path => "/path/to/data/dead_letter_queue/" <1>
+ }
+}
+filter {
+ mutate {
+ remove_field => "[geoip][location]" <2>
+ }
+}
+output {
+ elasticsearch{
+ hosts => [ "localhost:9200" ] <3>
+ }
+}
+```
+
+1. The [`dead_letter_queue` input](/reference/plugins-inputs-dead_letter_queue.md) reads from the dead letter queue.
+2. The `mutate` filter removes the problem field called `location`.
+3. The clean event is sent to Elasticsearch, where it can be indexed because the mapping issue is resolved.
+
+
+
+## Track dead letter queue size [dlq-size]
+
+Monitor the size of the dead letter queue before it becomes a problem. By checking it periodically, you can determine the maximum queue size that makes sense for each pipeline.
+
+The size of the DLQ for each pipeline is available in the node stats API.
+
+```txt
+pipelines.${pipeline_id}.dead_letter_queue.queue_size_in_bytes.
+```
+
+Where `{{pipeline_id}}` is the name of a pipeline with DLQ enabled.
+
+
+## Clear the dead letter queue [dlq-clear]
+
+The dead letter queue cannot be cleared with the upstream pipeline running.
+
+The dead letter queue is a directory of pages. To clear it, stop the pipeline and delete location/.
+
+```txt
+${path.data}/dead_letter_queue/${pipeline_id}
+```
+
+Where `{{pipeline_id}}` is the name of a pipeline with DLQ enabled.
+
+The pipeline creates a new dead letter queue when it starts again.
diff --git a/docs/reference/deploying-scaling-logstash.md b/docs/reference/deploying-scaling-logstash.md
new file mode 100644
index 000000000..4c5f18280
--- /dev/null
+++ b/docs/reference/deploying-scaling-logstash.md
@@ -0,0 +1,165 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/deploying-and-scaling.html
+---
+
+# Deploying and scaling Logstash [deploying-and-scaling]
+
+The Elastic Stack is used for tons of use cases, from operational log and metrics analytics, to enterprise and application search. Making sure your data gets scalably, durably, and securely transported to Elasticsearch is extremely important, especially for mission critical environments.
+
+The goal of this document is to highlight the most common architecture patterns for Logstash and how to effectively scale as your deployment grows. The focus will be around the operational log, metrics, and security analytics use cases because they tend to require larger scale deployments. The deploying and scaling recommendations provided here may vary based on your own requirements.
+
+
+## Getting Started [deploying-getting-started]
+
+For first time users, if you simply want to tail a log file to grasp the power of the Elastic Stack, we recommend trying [Filebeat Modules](beats://reference/filebeat/filebeat-modules-overview.md). Filebeat Modules enable you to quickly collect, parse, and index popular log types and view pre-built Kibana dashboards within minutes. [Metricbeat Modules](beats://reference/metricbeat/metricbeat-modules.md) provide a similar experience, but with metrics data. In this context, Beats will ship data directly to Elasticsearch where [Ingest Nodes](docs-content://manage-data/ingest/transform-enrich/ingest-pipelines.md) will process and index your data.
+
+:::{image} ../images/deploy1.png
+:alt: deploy1
+:::
+
+
+### Introducing Logstash [_introducing_logstash]
+
+What are the main benefits for integrating Logstash into your architecture?
+
+* Scale through ingestion spikes - Logstash has an adaptive disk-based buffering system that will absorb incoming throughput, therefore mitigating backpressure
+* Ingest from other data sources like databases, S3, or messaging queues
+* Emit data to multiple destinations like S3, HDFS, or write to a file
+* Compose more sophisticated processing pipelines with conditional dataflow logic
+
+
+## Scaling Ingest [scaling-ingest]
+
+Beats and Logstash make ingest awesome. Together, they provide a comprehensive solution that is scalable and resilient. What can you expect?
+
+* Horizontal scalability, high availability, and variable load handling
+* Message durability with at-least-once delivery guarantees
+* End-to-end secure transport with authentication and wire encryption
+
+
+### Beats and Logstash [_beats_and_logstash]
+
+Beats run across thousands of edge host servers, collecting, tailing, and shipping logs to Logstash. Logstash serves as the centralized streaming engine for data unification and enrichment. The [Beats input plugin](/reference/plugins-inputs-beats.md) exposes a secure, acknowledgement-based endpoint for Beats to send data to Logstash.
+
+:::{image} ../images/deploy2.png
+:alt: deploy2
+:::
+
+::::{note}
+Enabling persistent queues is strongly recommended, and these architecture characteristics assume that they are enabled. We encourage you to review the [Persistent queues (PQ)](/reference/persistent-queues.md) documentation for feature benefits and more details on resiliency.
+::::
+
+
+
+### Scalability [_scalability]
+
+Logstash is horizontally scalable and can form groups of nodes running the same pipeline. Logstash’s adaptive buffering capabilities will facilitate smooth streaming even through variable throughput loads. If the Logstash layer becomes an ingestion bottleneck, simply add more nodes to scale out. Here are a few general recommendations:
+
+* Beats should [load balance](beats://reference/filebeat/elasticsearch-output.md#_loadbalance) across a group of Logstash nodes.
+* A minimum of two Logstash nodes are recommended for high availability.
+* It’s common to deploy just one Beats input per Logstash node, but multiple Beats inputs can also be deployed per Logstash node to expose independent endpoints for different data sources.
+
+
+### Resiliency [_resiliency]
+
+When using [Filebeat](https://www.elastic.co/products/beats/filebeat) or [Winlogbeat](https://www.elastic.co/products/beats/winlogbeat) for log collection within this ingest flow, **at-least-once delivery** is guaranteed. Both the communication protocols, from Filebeat or Winlogbeat to Logstash, and from Logstash to Elasticsearch, are synchronous and support acknowledgements. The other Beats don’t yet have support for acknowledgements.
+
+Logstash persistent queues provide protection across node failures. For disk-level resiliency in Logstash, it’s important to ensure disk redundancy. For on-premise deployments, it’s recommended that you configure RAID. When running in the cloud or a containerized environment, it’s recommended that you use persistent disks with replication strategies that reflect your data SLAs.
+
+::::{note}
+Make sure `queue.checkpoint.writes: 1` is set for at-least-once guarantees. For more details, see the [persistent queue durability](/reference/persistent-queues.md#durability-persistent-queues) documentation.
+::::
+
+
+
+### Processing [_processing]
+
+Logstash will commonly extract fields with [grok](/reference/plugins-filters-grok.md) or [dissect](/reference/plugins-filters-dissect.md), augment [geographical](/reference/plugins-filters-geoip.md) info, and can further enrich events with [file](/reference/plugins-filters-translate.md), [database](/reference/plugins-filters-jdbc_streaming.md), or [Elasticsearch](/reference/plugins-filters-elasticsearch.md) lookup datasets. Be aware that processing complexity can affect overall throughput and CPU utilization. Make sure to check out the other [available filter plugins](/reference/filter-plugins.md).
+
+
+### Secure Transport [_secure_transport]
+
+Enterprise-grade security is available across the entire delivery chain.
+
+* Wire encryption is recommended for both the transport from [Beats to Logstash](beats://reference/filebeat/configuring-ssl-logstash.md) and from [Logstash to Elasticsearch](/reference/secure-connection.md).
+* There’s a wealth of security options when communicating with Elasticsearch including basic authentication, TLS, PKI, LDAP, AD, and other custom realms. To enable Elasticsearch security, see [Secure a cluster](docs-content://deploy-manage/security.md).
+
+
+### Monitoring [_monitoring]
+
+When running Logstash 5.2 or greater, the [Monitoring UI](https://www.elastic.co/products/x-pack/monitoring) provides deep visibility into your deployment metrics, helping observe performance and alleviate bottlenecks as you scale. Monitoring is an X-Pack feature under the Basic License and is therefore **free to use**. To get started, see [Monitoring Logstash](https://www.elastic.co/docs/api/doc/logstash/).
+
+If external monitoring is preferred, there are [monitoring APIs](monitoring-logstash.md) that return point-in-time metrics snapshots.
+
+
+## Adding Other Popular Sources [adding-other-sources]
+
+Users may have other mechanisms of collecting logging data, and it’s easy to integrate and centralize them into the Elastic Stack. Let’s walk through a few scenarios:
+
+:::{image} ../images/deploy3.png
+:alt: deploy3
+:::
+
+
+### TCP, UDP, and HTTP Protocols [_tcp_udp_and_http_protocols]
+
+The TCP, UDP, and HTTP protocols are common ways to feed data into Logstash. Logstash can expose endpoint listeners with the respective [TCP](/reference/plugins-inputs-tcp.md), [UDP](/reference/plugins-inputs-udp.md), and [HTTP](/reference/plugins-inputs-http.md) input plugins. The data sources enumerated below are typically ingested through one of these three protocols.
+
+::::{note}
+The TCP and UDP protocols do not support application-level acknowledgements, so connectivity issues may result in data loss.
+::::
+
+
+For high availability scenarios, a third-party hardware or software load balancer, like HAProxy, should be added to fan out traffic to a group of Logstash nodes.
+
+
+### Network and Security Data [_network_and_security_data]
+
+Although Beats may already satisfy your data ingest use case, network and security datasets come in a variety of forms. Let’s touch on a few other ingestion points.
+
+* Network wire data - collect and analyze network traffic with [Packetbeat](https://www.elastic.co/products/beats/packetbeat).
+* Netflow v5/v9/v10 - Logstash understands data from Netflow/IPFIX exporters with the [Netflow codec](/reference/plugins-codecs-netflow.md).
+* Nmap - Logstash accepts and parses Nmap XML data with the [Nmap codec](/reference/plugins-codecs-nmap.md).
+* SNMP trap - Logstash has a native [SNMP trap input](/reference/plugins-inputs-snmptrap.md).
+* CEF - Logstash accepts and parses CEF data from systems like Arcsight SmartConnectors with the [CEF codec](/reference/plugins-codecs-cef.md).
+
+
+### Centralized Syslog Servers [_centralized_syslog_servers]
+
+Existing syslog server technologies like rsyslog and syslog-ng generally send syslog over to Logstash TCP or UDP endpoints for extraction, processing, and persistence. If the data format conforms to RFC3164, it can be fed directly to the [Logstash syslog input](/reference/plugins-inputs-syslog.md).
+
+
+### Infrastructure & Application Data and IoT [_infrastructure_application_data_and_iot]
+
+Infrastructure and application metrics can be collected with [Metricbeat](https://www.elastic.co/products/beats/metricbeat), but applications can also send webhooks to a Logstash HTTP input or have metrics polled from an HTTP endpoint with the [HTTP poller input plugin](/reference/plugins-inputs-http_poller.md).
+
+For applications that log with log4j2, it’s recommended to use the SocketAppender to send JSON to the Logstash TCP input. Alternatively, log4j2 can also log to a file for collection with FIlebeat. Usage of the log4j1 SocketAppender is not recommended.
+
+IoT devices like Raspberry Pis, smartphones, and connected vehicles often send telemetry data through one of these protocols.
+
+
+## Integrating with Messaging Queues [integrating-with-messaging-queues]
+
+If you are leveraging message queuing technologies as part of your existing infrastructure, getting that data into the Elastic Stack is easy. For existing users who are utilizing an external queuing layer like Redis or RabbitMQ just for data buffering with Logstash, it’s recommended to use Logstash persistent queues instead of an external queuing layer. This will help with overall ease of management by removing an unnecessary layer of complexity in your ingest architecture.
+
+For users who want to integrate data from existing Kafka deployments or require the underlying usage of ephemeral storage, Kafka can serve as a data hub where Beats can persist to and Logstash nodes can consume from.
+
+:::{image} ../images/deploy4.png
+:alt: deploy4
+:::
+
+The other TCP, UDP, and HTTP sources can persist to Kafka with Logstash as a conduit to achieve high availability in lieu of a load balancer. A group of Logstash nodes can then consume from topics with the [Kafka input](/reference/plugins-inputs-kafka.md) to further transform and enrich the data in transit.
+
+
+### Resiliency and Recovery [_resiliency_and_recovery]
+
+When Logstash consumes from Kafka, persistent queues should be enabled and will add transport resiliency to mitigate the need for reprocessing during Logstash node failures. In this context, it’s recommended to use the default persistent queue disk allocation size `queue.max_bytes: 1GB`.
+
+If Kafka is configured to retain data for an extended period of time, data can be reprocessed from Kafka in the case of disaster recovery and reconciliation.
+
+
+### Other Messaging Queue Integrations [_other_messaging_queue_integrations]
+
+Although an additional queuing layer is not required, Logstash can consume from a myriad of other message queuing technologies like [RabbitMQ](/reference/plugins-inputs-rabbitmq.md) and [Redis](/reference/plugins-inputs-redis.md). It also supports ingestion from hosted queuing services like [Pub/Sub](/reference/plugins-inputs-google_pubsub.md), [Kinesis](/reference/plugins-inputs-kinesis.md), and [SQS](/reference/plugins-inputs-sqs.md).
+
diff --git a/docs/reference/dir-layout.md b/docs/reference/dir-layout.md
new file mode 100644
index 000000000..be4c4eb85
--- /dev/null
+++ b/docs/reference/dir-layout.md
@@ -0,0 +1,59 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/dir-layout.html
+---
+
+# Logstash Directory Layout [dir-layout]
+
+This section describes the default directory structure that is created when you unpack the Logstash installation packages.
+
+## Directory Layout of `.zip` and `.tar.gz` Archives [zip-targz-layout]
+
+The `.zip` and `.tar.gz` packages are entirely self-contained. All files and directories are, by default, contained within the home directory — the directory created when unpacking the archive.
+
+This is very convenient because you don’t have to create any directories to start using Logstash, and uninstalling Logstash is as easy as removing the home directory. However, it is advisable to change the default locations of the config and the logs directories so that you do not delete important data later on.
+
+| Type | Description | Default Location | Setting |
+| --- | --- | --- | --- |
+| home | Home directory of the Logstash installation. | ``{extract.path}`- Directory created by unpacking the archive` | |
+| bin | Binary scripts, including `logstash` to start Logstash and `logstash-plugin` to install plugins | ``{extract.path}/bin`` | |
+| settings | Configuration files, including `logstash.yml` and `jvm.options` | ``{extract.path}/config`` | ``path.settings`` |
+| logs | Log files | ``{extract.path}/logs`` | ``path.logs`` |
+| plugins | Local, non Ruby-Gem plugin files. Each plugin is contained in a subdirectory. Recommended for development only. | ``{extract.path}/plugins`` | ``path.plugins`` |
+| data | Data files used by logstash and its plugins for any persistence needs. | ``{extract.path}/data`` | ``path.data`` |
+
+
+## Directory Layout of Debian and RPM Packages [deb-layout]
+
+The Debian package and the RPM package each place config files, logs, and the settings files in the appropriate locations for the system:
+
+| Type | Description | Default Location | Setting |
+| --- | --- | --- | --- |
+| home | Home directory of the Logstash installation. | ``/usr/share/logstash`` | |
+| bin | Binary scripts including `logstash` to start Logstash and `logstash-plugin` to install plugins | ``/usr/share/logstash/bin`` | |
+| settings | Configuration files, including `logstash.yml` and `jvm.options` | ``/etc/logstash`` | ``path.settings`` |
+| conf | Logstash pipeline configuration files | ``/etc/logstash/conf.d/*.conf`` | `See `/etc/logstash/pipelines.yml`` |
+| logs | Log files | ``/var/log/logstash`` | ``path.logs`` |
+| plugins | Local, non Ruby-Gem plugin files. Each plugin is contained in a subdirectory. Recommended for development only. | ``/usr/share/logstash/plugins`` | ``path.plugins`` |
+| data | Data files used by logstash and its plugins for any persistence needs. | ``/var/lib/logstash`` | ``path.data`` |
+
+
+## Directory Layout of Docker Images [docker-layout]
+
+The Docker images are created from the `.tar.gz` packages, and follow a similar directory layout.
+
+| Type | Description | Default Location | Setting |
+| --- | --- | --- | --- |
+| home | Home directory of the Logstash installation. | ``/usr/share/logstash`` | |
+| bin | Binary scripts, including `logstash` to start Logstash and `logstash-plugin` to install plugins | ``/usr/share/logstash/bin`` | |
+| settings | Configuration files, including `logstash.yml` and `jvm.options` | ``/usr/share/logstash/config`` | ``path.settings`` |
+| conf | Logstash pipeline configuration files | ``/usr/share/logstash/pipeline`` | ``path.config`` |
+| plugins | Local, non Ruby-Gem plugin files. Each plugin is contained in a subdirectory. Recommended for development only. | ``/usr/share/logstash/plugins`` | ``path.plugins`` |
+| data | Data files used by logstash and its plugins for any persistence needs. | ``/usr/share/logstash/data`` | ``path.data`` |
+
+::::{note}
+Logstash Docker containers do not create log files by default. They log to standard output.
+::::
+
+
+
diff --git a/docs/reference/docker-config.md b/docs/reference/docker-config.md
new file mode 100644
index 000000000..0c92d08fc
--- /dev/null
+++ b/docs/reference/docker-config.md
@@ -0,0 +1,130 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/docker-config.html
+---
+
+# Configuring Logstash for Docker [docker-config]
+
+Logstash differentiates between two types of configuration: [Settings and Pipeline Configuration](/reference/config-setting-files.md).
+
+## Pipeline Configuration [_pipeline_configuration]
+
+It is essential to place your pipeline configuration where it can be found by Logstash. By default, the container will look in `/usr/share/logstash/pipeline/` for pipeline configuration files.
+
+In this example we use a bind-mounted volume to provide the configuration via the `docker run` command:
+
+```sh
+docker run --rm -it -v ~/pipeline/:/usr/share/logstash/pipeline/ docker.elastic.co/logstash/logstash:9.0.0
+```
+
+Every file in the host directory `~/pipeline/` will then be parsed by Logstash as pipeline configuration.
+
+If you don’t provide configuration to Logstash, it will run with a minimal config that listens for messages from the [Beats input plugin](/reference/plugins-inputs-beats.md) and echoes any that are received to `stdout`. In this case, the startup logs will be similar to the following:
+
+```text
+Sending Logstash logs to /usr/share/logstash/logs which is now configured via log4j2.properties.
+[2016-10-26T05:11:34,992][INFO ][logstash.inputs.beats ] Beats inputs: Starting input listener {:address=>"0.0.0.0:5044"}
+[2016-10-26T05:11:35,068][INFO ][logstash.pipeline ] Starting pipeline {"id"=>"main", "pipeline.workers"=>4, "pipeline.batch.size"=>125, "pipeline.batch.delay"=>5, "pipeline.max_inflight"=>500}
+[2016-10-26T05:11:35,078][INFO ][org.logstash.beats.Server] Starting server on port: 5044
+[2016-10-26T05:11:35,078][INFO ][logstash.pipeline ] Pipeline main started
+[2016-10-26T05:11:35,105][INFO ][logstash.agent ] Successfully started Logstash API endpoint {:port=>9600}
+```
+
+This is the default configuration for the image, defined in `/usr/share/logstash/pipeline/logstash.conf`. If this is the behaviour that you are observing, ensure that your pipeline configuration is being picked up correctly, and that you are replacing either `logstash.conf` or the entire `pipeline` directory.
+
+
+## Settings [_settings]
+
+The image provides several methods for configuring settings. The conventional approach is to provide a custom `logstash.yml` file, but it’s also possible to use environment variables to define settings.
+
+### Bind-mounted settings files [docker-bind-mount-settings]
+
+Settings files can also be provided through bind-mounts. Logstash expects to find them at `/usr/share/logstash/config/`.
+
+It’s possible to provide an entire directory containing all needed files:
+
+```sh
+docker run --rm -it -v ~/settings/:/usr/share/logstash/config/ docker.elastic.co/logstash/logstash:9.0.0
+```
+
+Alternatively, a single file can be mounted:
+
+```sh
+docker run --rm -it -v ~/settings/logstash.yml:/usr/share/logstash/config/logstash.yml docker.elastic.co/logstash/logstash:9.0.0
+```
+
+::::{note}
+Bind-mounted configuration files will retain the same permissions and ownership within the container that they have on the host system. Be sure to set permissions such that the files will be readable and, ideally, not writeable by the container’s `logstash` user (UID 1000).
+::::
+
+
+
+### Custom Images [_custom_images]
+
+Bind-mounted configuration is not the only option, naturally. If you prefer the *Immutable Infrastructure* approach, you can prepare a custom image containing your configuration by using a `Dockerfile` like this one:
+
+```dockerfile
+FROM docker.elastic.co/logstash/logstash:9.0.0
+RUN rm -f /usr/share/logstash/pipeline/logstash.conf
+COPY pipeline/ /usr/share/logstash/pipeline/
+COPY config/ /usr/share/logstash/config/
+```
+
+Be sure to replace or delete `logstash.conf` in your custom image, so that you don’t retain the example config from the base image.
+
+
+### Environment variable configuration [docker-env-config]
+
+Under Docker, Logstash settings can be configured via environment variables. When the container starts, a helper process checks the environment for variables that can be mapped to Logstash settings. Settings that are found in the environment override those in the `logstash.yml` as the container starts up.
+
+For compatibility with container orchestration systems, these environment variables are written in all capitals, with underscores as word separators.
+
+Some example translations are shown here:
+
+**Environment Variable**
+: **Logstash Setting**
+
+`PIPELINE_WORKERS`
+: `pipeline.workers`
+
+`LOG_LEVEL`
+: `log.level`
+
+`MONITORING_ENABLED`
+: `monitoring.enabled`
+
+In general, any setting listed in the [settings documentation](/reference/logstash-settings-file.md) can be configured with this technique.
+
+::::{note}
+Defining settings with environment variables causes `logstash.yml` to be modified in place. This behaviour is likely undesirable if `logstash.yml` was bind-mounted from the host system. Thus, it is not recommended to combine the bind-mount technique with the environment variable technique. It is best to choose a single method for defining Logstash settings.
+::::
+
+
+
+
+## Docker defaults [_docker_defaults]
+
+The following settings have different default values when using the Docker images:
+
+`api.http.host`
+: `0.0.0.0`
+
+`monitoring.elasticsearch.hosts`
+: `http://elasticsearch:9200`
+
+::::{note}
+The setting `monitoring.elasticsearch.hosts` is not defined in the `-oss` image.
+::::
+
+
+These settings are defined in the default `logstash.yml`. They can be overridden with a [custom `logstash.yml`](#docker-bind-mount-settings) or via [environment variables](#docker-env-config).
+
+::::{important}
+If replacing `logstash.yml` with a custom version, be sure to copy the above defaults to the custom file if you want to retain them. If not, they will be "masked" by the new file.
+::::
+
+
+
+## Logging Configuration [_logging_configuration]
+
+Under Docker, Logstash logs go to standard output by default. To change this behaviour, use any of the techniques above to replace the file at `/usr/share/logstash/config/log4j2.properties`.
diff --git a/docs/reference/docker.md b/docs/reference/docker.md
new file mode 100644
index 000000000..207e75785
--- /dev/null
+++ b/docs/reference/docker.md
@@ -0,0 +1,31 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/docker.html
+---
+
+# Running Logstash on Docker [docker]
+
+Docker images for Logstash are available from the Elastic Docker registry. The base image is [Red Hat Universal Base Image 9 Minimal](https://catalog.redhat.com/software/containers/ubi9/ubi-minimal/61832888c0d15aff4912fe0d).
+
+A list of all published Docker images and tags is available at [www.docker.elastic.co](https://www.docker.elastic.co). The source code is in [GitHub](https://github.com/elastic/logstash/tree/master).
+
+These images are free to use under the Elastic license. They contain open source and free commercial features and access to paid commercial features. [Start a 30-day trial](docs-content://deploy-manage/license/manage-your-license-in-self-managed-cluster.md) to try out all of the paid commercial features. See the [Subscriptions](https://www.elastic.co/subscriptions) page for information about Elastic license levels.
+
+## Pulling the image [_pulling_the_image]
+
+Obtaining Logstash for Docker is as simple as issuing a `docker pull` command against the Elastic Docker registry.
+
+However, version 9.0.0 of Logstash has not yet been released, so no Docker image is currently available for this version.
+
+
+## Verifying the image [_verifying_the_image]
+
+Although it’s optional, we highly recommend verifying the signatures included with your downloaded Docker images to ensure that the images are valid.
+
+Elastic images are signed with [Cosign](https://docs.sigstore.dev/cosign/overview/) which is part of the [Sigstore](https://www.sigstore.dev/) project. Cosign supports container signing, verification, and storage in an OCI registry. Install the appropriate [Cosign application](https://docs.sigstore.dev/cosign/installation/) for your operating system.
+
+Run the following commands to verify the container image signature for {{ls}} v9.0.0-beta1:
+
+Version 9.0.0 of Logstash has not yet been released, so no Docker image is currently available for this version.
+
+
diff --git a/docs/reference/ecs-ls.md b/docs/reference/ecs-ls.md
new file mode 100644
index 000000000..0254acf0a
--- /dev/null
+++ b/docs/reference/ecs-ls.md
@@ -0,0 +1,78 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/ecs-ls.html
+---
+
+# ECS in Logstash [ecs-ls]
+
+The [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)) is an open source specification, developed with support from the Elastic user community. ECS defines a common set of fields to be used for storing event data, such as logs and metrics, in {{es}}. With ECS, users can normalize event data to better analyze, visualize, and correlate the data represented in their events.
+
+## ECS compatibility [ecs-compatibility]
+
+Many plugins implement an ECS-compatibility mode, which causes them to produce and manipulate events in a manner that is compatible with the Elastic Common Schema (ECS).
+
+Any plugin that supports this mode will also have an `ecs_compatibility` option, which allows you to configure which mode the individual plugin instance should operate in. If left unspecified for an individual plugin, the pipeline’s `pipeline.ecs_compatibility` setting will be observed. This allows you to configure plugins to use a specific version of ECS or to use their legacy non-ECS behavior.
+
+ECS compatibility modes do not prevent you from explicitly configuring a plugin in a manner that conflicts with ECS. Instead, they ensure that *implicit* configuration avoids conflicts.
+
+### Configuring ECS [ecs-configuration]
+
+In {{ls}} 8, all plugins are run in ECS compatibility v8 mode by default, but you can opt out at the plugin, pipeline, or system level to maintain legacy behavior. This can be helpful if you have very complex pipelines that were defined pre-ECS, to allow you to either upgrade them or to avoid doing so independently of your {{ls}} 8.x upgrade.
+
+#### Specific plugin instance [_specific_plugin_instance]
+
+Use a plugin’s `ecs_compatibility` option to override the default value on the plugin instance.
+
+For example, if you want a specific instance of the GeoIP Filter to behave without ECS compatibility, you can adjust its definition in your pipeline without affecting any other plugin instances.
+
+```text
+filter {
+ geoip {
+ source => "[host][ip]"
+ ecs_compatibility => disabled
+ }
+}
+```
+
+Alternatively, if you had a UDP input with a CEF codec, and wanted both to use an ECS mode while still running {{ls}} 7, you can adjust their definitions to specify the major version of ECS to use.
+
+```text
+input {
+ udp {
+ port => 1234
+ ecs_compatibility => v8
+ codec => cef {
+ ecs_compatibility => v8
+ }
+ }
+}
+```
+
+
+#### All plugins in a given pipeline [ecs-configuration-pipeline]
+
+If you wish to provide a specific default value for `ecs_compatibility` to *all* plugins in a pipeline, you can do so with the `pipeline.ecs_compatibility` setting in your pipeline definition in `config/pipelines.yml` or Central Management. This setting will be used unless overridden by a specific plugin instance. If unspecified for an individual pipeline, the global value will be used.
+
+For example, setting `pipeline.ecs_compatibility: disabled` for a pipeline *locks in* that pipeline’s pre-{{ls}} 8 behavior.
+
+```yaml
+- pipeline.id: my-legacy-pipeline
+ path.config: "/etc/path/to/legacy-pipeline.config"
+ pipeline.ecs_compatibility: disabled
+- pipeline.id: my-ecs-pipeline
+ path.config: "/etc/path/to/ecs-pipeline.config"
+ pipeline.ecs_compatibility: v8
+```
+
+
+#### All plugins in all pipelines [ecs-configuration-all]
+
+Similarly, you can set the default value for the whole {{ls}} process by setting the `pipeline.ecs_compatibility` value in `config/logstash.yml`.
+
+```yaml
+pipeline.ecs_compatibility: disabled
+```
+
+
+
+
diff --git a/docs/static/env-vars.asciidoc b/docs/reference/environment-variables.md
similarity index 61%
rename from docs/static/env-vars.asciidoc
rename to docs/reference/environment-variables.md
index 2c555fe6c..9fc8d8cdc 100644
--- a/docs/static/env-vars.asciidoc
+++ b/docs/reference/environment-variables.md
@@ -1,119 +1,114 @@
-[[environment-variables]]
-=== Using environment variables
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/environment-variables.html
+---
-==== Overview
+# Using environment variables [environment-variables]
+
+## Overview [_overview]
* You can set environment variable references in the configuration for Logstash plugins by using `${var}`.
* At Logstash startup, each reference is replaced by the value of the environment variable.
* The replacement is case-sensitive.
* References to undefined variables raise a Logstash configuration error.
-* You can give a default value by using the form `${var:default value}`. Logstash uses the default value if the
-environment variable is undefined.
+* You can give a default value by using the form `${var:default value}`. Logstash uses the default value if the environment variable is undefined.
* You can add environment variable references in any plugin option type: string, number, boolean, array, or hash.
-* Environment variables for list-type URI parameters can support lists of space-delimited values. Currently, other non-URI based options do not support lists of values. See <>
-* Environment variables are immutable. If you update the environment variable, you'll have to restart Logstash to pick up the updated value.
-* References to environment variables in `config.string` comments are evaluated during configuration parsing, and are therefore discouraged.
-Remove the `$` sign to avoid pipeline loading failures.
+* Environment variables for list-type URI parameters can support lists of space-delimited values. Currently, other non-URI based options do not support lists of values. See [Cross-plugin concepts and features](/reference/plugin-concepts.md)
+* Environment variables are immutable. If you update the environment variable, you’ll have to restart Logstash to pick up the updated value.
+* References to environment variables in `config.string` comments are evaluated during configuration parsing, and are therefore discouraged. Remove the `$` sign to avoid pipeline loading failures.
-==== Examples
-These examples show you how to use environment variables to set the values of some commonly used
-configuration options.
+## Examples [_examples]
-===== Setting the TCP port
+These examples show you how to use environment variables to set the values of some commonly used configuration options.
-Here's an example that uses an environment variable to set the TCP port:
+### Setting the TCP port [_setting_the_tcp_port]
-[source,ruby]
-----------------------------------
+Here’s an example that uses an environment variable to set the TCP port:
+
+```ruby
input {
tcp {
port => "${TCP_PORT}"
}
}
-----------------------------------
+```
-Now let's set the value of `TCP_PORT`:
+Now let’s set the value of `TCP_PORT`:
-[source,shell]
-----
+```shell
export TCP_PORT=12345
-----
+```
At startup, Logstash uses this configuration:
-[source,ruby]
-----------------------------------
+```ruby
input {
tcp {
port => 12345
}
}
-----------------------------------
+```
If the `TCP_PORT` environment variable is not set, Logstash returns a configuration error.
You can fix this problem by specifying a default value:
-[source,ruby]
-----
+```ruby
input {
tcp {
port => "${TCP_PORT:54321}"
}
}
-----
+```
Now, instead of returning a configuration error if the variable is undefined, Logstash uses the default:
-[source,ruby]
-----
+```ruby
input {
tcp {
port => 54321
}
}
-----
+```
If the environment variable is defined, Logstash uses the value specified for the variable instead of the default.
-===== Setting the value of a tag
-Here's an example that uses an environment variable to set the value of a tag:
+### Setting the value of a tag [_setting_the_value_of_a_tag]
-[source,ruby]
-----
+Here’s an example that uses an environment variable to set the value of a tag:
+
+```ruby
filter {
mutate {
add_tag => [ "tag1", "${ENV_TAG}" ]
}
}
-----
+```
-Let's set the value of `ENV_TAG`:
+Let’s set the value of `ENV_TAG`:
-[source,shell]
-----
+```shell
export ENV_TAG="tag2"
-----
+```
At startup, Logstash uses this configuration:
-[source,ruby]
-----
+```ruby
filter {
mutate {
add_tag => [ "tag1", "tag2" ]
}
}
-----
+```
-===== Setting a file path
-Here's an example that uses an environment variable to set the path to a log file:
+### Setting a file path [_setting_a_file_path]
-[source,ruby]
-----
+Here’s an example that uses an environment variable to set the path to a log file:
+
+```ruby
filter {
mutate {
add_field => {
@@ -121,19 +116,17 @@ filter {
}
}
}
-----
+```
-Let's set the value of `HOME`:
+Let’s set the value of `HOME`:
-[source,shell]
-----
+```shell
export HOME="/path"
-----
+```
At startup, Logstash uses the following configuration:
-[source,ruby]
-----
+```ruby
filter {
mutate {
add_field => {
@@ -141,5 +134,7 @@ filter {
}
}
}
-----
+```
+
+
diff --git a/docs/reference/event-api.md b/docs/reference/event-api.md
new file mode 100644
index 000000000..2bf7a09ac
--- /dev/null
+++ b/docs/reference/event-api.md
@@ -0,0 +1,105 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/event-api.html
+---
+
+# Event API [event-api]
+
+This section is targeted for plugin developers and users of Logstash’s Ruby filter. Below we document recent changes (starting with version 5.0) in the way users have been accessing Logstash’s event based data in custom plugins and in the Ruby filter. Note that [Accessing event data and fields](/reference/event-dependent-configuration.md) data flow in Logstash’s config files — using [Field references](/reference/event-dependent-configuration.md#logstash-config-field-references) — is not affected by this change, and will continue to use existing syntax.
+
+
+## Event Object [_event_object]
+
+Event is the main object that encapsulates data flow internally in Logstash and provides an API for the plugin developers to interact with the event’s content. Typically, this API is used in plugins and in a Ruby filter to retrieve data and use it for transformations. Event object contains the original data sent to Logstash and any additional fields created during Logstash’s filter stages.
+
+In 5.0, we’ve re-implemented the Event class and its supporting classes in pure Java. Since Event is a critical component in data processing, a rewrite in Java improves performance and provides efficient serialization when storing data on disk. For the most part, this change aims at keeping backward compatibility and is transparent to the users. To this extent we’ve updated and published most of the plugins in Logstash’s ecosystem to adhere to the new API changes. However, if you are maintaining a custom plugin, or have a Ruby filter, this change will affect you. The aim of this guide is to describe the new API and provide examples to migrate to the new changes.
+
+
+## Event API [_event_api]
+
+Prior to version 5.0, developers could access and manipulate event data by directly using Ruby hash syntax. For example, `event[field] = foo`. While this is powerful, our goal is to abstract the internal implementation details and provide well-defined getter and setter APIs.
+
+**Get API**
+
+The getter is a read-only access of field-based data in an Event.
+
+**Syntax:** `event.get(field)`
+
+**Returns:** Value for this field or nil if the field does not exist. Returned values could be a string, numeric or timestamp scalar value.
+
+`field` is a structured field sent to Logstash or created after the transformation process. `field` can also be a nested [field reference](https://www.elastic.co/guide/en/logstash/current/field-references-deepdive.html) such as `[field][bar]`.
+
+Examples:
+
+```ruby
+event.get("foo" ) # => "baz"
+event.get("[foo]") # => "zab"
+event.get("[foo][bar]") # => 1
+event.get("[foo][bar]") # => 1.0
+event.get("[foo][bar]") # => [1, 2, 3]
+event.get("[foo][bar]") # => {"a" => 1, "b" => 2}
+event.get("[foo][bar]") # => {"a" => 1, "b" => 2, "c" => [1, 2]}
+```
+
+Accessing @metadata
+
+```ruby
+event.get("[@metadata][foo]") # => "baz"
+```
+
+**Set API**
+
+This API can be used to mutate data in an Event.
+
+**Syntax:** `event.set(field, value)`
+
+**Returns:** The current Event after the mutation, which can be used for chainable calls.
+
+Examples:
+
+```ruby
+event.set("foo", "baz")
+event.set("[foo]", "zab")
+event.set("[foo][bar]", 1)
+event.set("[foo][bar]", 1.0)
+event.set("[foo][bar]", [1, 2, 3])
+event.set("[foo][bar]", {"a" => 1, "b" => 2})
+event.set("[foo][bar]", {"a" => 1, "b" => 2, "c" => [1, 2]})
+event.set("[@metadata][foo]", "baz")
+```
+
+Mutating a collection after setting it in the Event has an undefined behaviour and is not allowed.
+
+```ruby
+h = {"a" => 1, "b" => 2, "c" => [1, 2]}
+event.set("[foo][bar]", h)
+
+h["c"] = [3, 4]
+event.get("[foo][bar][c]") # => undefined
+
+Suggested way of mutating collections:
+
+h = {"a" => 1, "b" => 2, "c" => [1, 2]}
+event.set("[foo][bar]", h)
+
+h["c"] = [3, 4]
+event.set("[foo][bar]", h)
+
+# Alternatively,
+event.set("[foo][bar][c]", [3, 4])
+```
+
+
+## Ruby Filter [_ruby_filter]
+
+The [Ruby Filter](/reference/plugins-filters-ruby.md) can be used to execute any ruby code and manipulate event data using the API described above. For example, using the new API:
+
+```ruby
+filter {
+ ruby {
+ code => 'event.set("lowercase_field", event.get("message").downcase)'
+ }
+}
+```
+
+This filter will lowercase the `message` field, and set it to a new field called `lowercase_field`
diff --git a/docs/reference/event-dependent-configuration.md b/docs/reference/event-dependent-configuration.md
new file mode 100644
index 000000000..5f1474910
--- /dev/null
+++ b/docs/reference/event-dependent-configuration.md
@@ -0,0 +1,371 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/event-dependent-configuration.html
+---
+
+# Accessing event data and fields [event-dependent-configuration]
+
+A Logstash pipeline usually has three stages: inputs → filters → outputs. Inputs generate events, filters modify them, and outputs ship them elsewhere.
+
+All events have properties. For example, an Apache access log has properties like status code (200, 404), request path ("/", "index.html"), HTTP verb (GET, POST), client IP address, and so forth. Logstash calls these properties "fields".
+
+Some configuration options in Logstash require the existence of fields in order to function. Because inputs generate events, there are no fields to evaluate within the input block—they do not exist yet!
+
+::::{important}
+[Field references](#logstash-config-field-references), [sprintf format](#sprintf), and [conditionals](#conditionals) do not work in input blocks. These configuration options depend on events and fields, and therefore, work only within filter and output blocks.
+::::
+
+
+
+## Field references [logstash-config-field-references]
+
+When you need to refer to a field by name, you can use the Logstash [field reference syntax](https://www.elastic.co/guide/en/logstash/current/field-references-deepdive.html).
+
+The basic syntax to access a field is `[fieldname]`. If you are referring to a **top-level field**, you can omit the `[]` and simply use `fieldname`. To refer to a **nested field**, specify the full path to that field: `[top-level field][nested field]`.
+
+For example, this event has five top-level fields (agent, ip, request, response, ua) and three nested fields (status, bytes, os).
+
+```js
+{
+ "agent": "Mozilla/5.0 (compatible; MSIE 9.0)",
+ "ip": "192.168.24.44",
+ "request": "/index.html"
+ "response": {
+ "status": 200,
+ "bytes": 52353
+ },
+ "ua": {
+ "os": "Windows 7"
+ }
+}
+```
+
+To reference the `os` field, specify `[ua][os]`. To reference a top-level field such as `request`, you can simply specify the field name.
+
+For more detailed information, see [*Field References Deep Dive*](https://www.elastic.co/guide/en/logstash/current/field-references-deepdive.html).
+
+
+## sprintf format [sprintf]
+
+The field reference format is also used in what Logstash calls *sprintf format*. This format enables you to embed field values in other strings. For example, the statsd output has an *increment* setting that enables you to keep a count of apache logs by status code:
+
+```js
+output {
+ statsd {
+ increment => "apache.%{[response][status]}"
+ }
+}
+```
+
+Similarly, you can convert the UTC timestamp in the `@timestamp` field into a string.
+
+Instead of specifying a field name inside the curly braces, use the `%{{FORMAT}}` syntax where `FORMAT` is a [java time format](https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/time/format/DateTimeFormatter.html#patterns).
+
+For example, if you want to use the file output to write logs based on the event’s UTC date and hour and the `type` field:
+
+```js
+output {
+ file {
+ path => "/var/log/%{type}.%{{yyyy.MM.dd.HH}}"
+ }
+}
+```
+
+::::{note}
+The sprintf format continues to support [deprecated joda time format](http://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html) strings as well using the `%{+FORMAT}` syntax. These formats are not directly interchangeable, and we advise you to begin using the more modern Java Time format.
+::::
+
+
+::::{note}
+A Logstash timestamp represents an instant on the UTC-timeline, so using sprintf formatters will produce results that may not align with your machine-local timezone.
+::::
+
+
+You can generate a fresh timestamp by using `%{{TIME_NOW}}` syntax instead of relying on the value in `@timestamp`. This is particularly useful when you need to estimate the time span of each plugin.
+
+```js
+input {
+ heartbeat {
+ add_field => { "heartbeat_time" => "%{{TIME_NOW}}" }
+ }
+}
+filter {
+ mutate {
+ add_field => { "mutate_time" => "%{{TIME_NOW}}" }
+ }
+}
+```
+
+
+## Conditionals [conditionals]
+
+Sometimes you want to filter or output an event only under certain conditions. For that, you can use a conditional.
+
+Conditionals in Logstash look and act the same way they do in programming languages. Conditionals support `if`, `else if` and `else` statements and can be nested.
+
+The conditional syntax is:
+
+```js
+if EXPRESSION {
+ ...
+} else if EXPRESSION {
+ ...
+} else {
+ ...
+}
+```
+
+What’s an expression? Comparison tests, boolean logic, and so on!
+
+You can use these comparison operators:
+
+* equality: `==`, `!=`, `<`, `>`, `<=`, `>=`
+* regexp: `=~`, `!~` (checks a pattern on the right against a string value on the left)
+* inclusion: `in`, `not in`
+
+Supported boolean operators are:
+
+* `and`, `or`, `nand`, `xor`
+
+Supported unary operators are:
+
+* `!`
+
+Expressions can be long and complex. Expressions can contain other expressions, you can negate expressions with `!`, and you can group them with parentheses `(...)`.
+
+For example, this conditional uses the mutate filter to remove the field `secret` if the field `action` has a value of `login`:
+
+```js
+filter {
+ if [action] == "login" {
+ mutate { remove_field => "secret" }
+ }
+}
+```
+
+If an expression generates an error when it is evaluated, event processing stops and a warning message is written to the log. For example, comparing integer value `100` with string value `"100"` cannot be evaluated with certainty, and so processing stops and the error is logged.
+
+To capture the full content of the message at the time the error occurs, set the log level to `debug`. Check out [Logging](/reference/logging.md) for more information about how to configure logging and available log levels.
+
+You can specify multiple expressions in a single condition:
+
+```js
+output {
+ # Send production errors to pagerduty
+ if [loglevel] == "ERROR" and [deployment] == "production" {
+ pagerduty {
+ ...
+ }
+ }
+}
+```
+
+You can use the `in` operator to test whether a field contains a specific string, key, or list element. Note that the semantic meaning of `in` can vary, based on the target type. For example, when applied to a string. `in` means "is a substring of". When applied to a collection type, `in` means "collection contains the exact value".
+
+```js
+filter {
+ if [foo] in [foobar] {
+ mutate { add_tag => "field in field" }
+ }
+ if [foo] in "foo" {
+ mutate { add_tag => "field in string" }
+ }
+ if "hello" in [greeting] {
+ mutate { add_tag => "string in field" }
+ }
+ if [foo] in ["hello", "world", "foo"] {
+ mutate { add_tag => "field in list" }
+ }
+ if [missing] in [alsomissing] {
+ mutate { add_tag => "shouldnotexist" }
+ }
+ if !("foo" in ["hello", "world"]) {
+ mutate { add_tag => "shouldexist" }
+ }
+}
+```
+
+You use the `not in` conditional the same way. For example, you could use `not in` to only route events to Elasticsearch when `grok` is successful:
+
+```js
+output {
+ if "_grokparsefailure" not in [tags] {
+ elasticsearch { ... }
+ }
+}
+```
+
+You can check for the existence of a specific field, but there’s currently no way to differentiate between a field that doesn’t exist versus a field that’s simply false. The expression `if [foo]` returns `false` when:
+
+* `[foo]` doesn’t exist in the event,
+* `[foo]` exists in the event, but is false, or
+* `[foo]` exists in the event, but is null
+
+For more complex examples, see [Using Conditionals](/reference/config-examples.md#using-conditionals).
+
+::::{note}
+Sprintf date/time format in conditionals is not currently supported. A workaround using the `@metadata` field is available. See [sprintf date/time format in conditionals](#date-time) for more details and an example.
+::::
+
+
+
+## The @metadata field [metadata]
+
+In Logstash, there is a special field called `@metadata`. The contents of `@metadata` are not part of any of your events at output time, which makes it great to use for conditionals, or extending and building event fields with field reference and `sprintf` formatting.
+
+This configuration file yields events from STDIN. Whatever you type becomes the `message` field in the event. The `mutate` events in the filter block add a few fields, some nested in the `@metadata` field.
+
+```ruby
+input { stdin { } }
+
+filter {
+ mutate { add_field => { "show" => "This data will be in the output" } }
+ mutate { add_field => { "[@metadata][test]" => "Hello" } }
+ mutate { add_field => { "[@metadata][no_show]" => "This data will not be in the output" } }
+}
+
+output {
+ if [@metadata][test] == "Hello" {
+ stdout { codec => rubydebug }
+ }
+}
+```
+
+Let’s see what comes out:
+
+```ruby
+$ bin/logstash -f ../test.conf
+Pipeline main started
+asdf
+{
+ "@timestamp" => 2016-06-30T02:42:51.496Z,
+ "@version" => "1",
+ "host" => "example.com",
+ "show" => "This data will be in the output",
+ "message" => "asdf"
+}
+```
+
+The "asdf" typed in became the `message` field contents, and the conditional successfully evaluated the contents of the `test` field nested within the `@metadata` field. But the output did not show a field called `@metadata`, or its contents.
+
+The `rubydebug` codec allows you to reveal the contents of the `@metadata` field if you add a config flag, `metadata => true`:
+
+```ruby
+ stdout { codec => rubydebug { metadata => true } }
+```
+
+Let’s see what the output looks like with this change:
+
+```ruby
+$ bin/logstash -f ../test.conf
+Pipeline main started
+asdf
+{
+ "@timestamp" => 2016-06-30T02:46:48.565Z,
+ "@metadata" => {
+ "test" => "Hello",
+ "no_show" => "This data will not be in the output"
+ },
+ "@version" => "1",
+ "host" => "example.com",
+ "show" => "This data will be in the output",
+ "message" => "asdf"
+}
+```
+
+Now you can see the `@metadata` field and its sub-fields.
+
+::::{important}
+Only the `rubydebug` codec allows you to show the contents of the `@metadata` field.
+::::
+
+
+Make use of the `@metadata` field any time you need a temporary field but do not want it to be in the final output.
+
+Perhaps one of the most common use cases for this new field is with the `date` filter and having a temporary timestamp.
+
+This configuration file has been simplified, but uses the timestamp format common to Apache and Nginx web servers. In the past, you’d have to delete the timestamp field yourself, after using it to overwrite the `@timestamp` field. With the `@metadata` field, this is no longer necessary:
+
+```ruby
+input { stdin { } }
+
+filter {
+ grok { match => [ "message", "%{HTTPDATE:[@metadata][timestamp]}" ] }
+ date { match => [ "[@metadata][timestamp]", "dd/MMM/yyyy:HH:mm:ss Z" ] }
+}
+
+output {
+ stdout { codec => rubydebug }
+}
+```
+
+Notice that this configuration puts the extracted date into the `[@metadata][timestamp]` field in the `grok` filter. Let’s feed this configuration a sample date string and see what comes out:
+
+```ruby
+$ bin/logstash -f ../test.conf
+Pipeline main started
+02/Mar/2014:15:36:43 +0100
+{
+ "@timestamp" => 2014-03-02T14:36:43.000Z,
+ "@version" => "1",
+ "host" => "example.com",
+ "message" => "02/Mar/2014:15:36:43 +0100"
+}
+```
+
+That’s it! No extra fields in the output, and a cleaner config file because you do not have to delete a "timestamp" field after conversion in the `date` filter.
+
+Another use case is the [CouchDB Changes input plugin](https://github.com/logstash-plugins/logstash-input-couchdb_changes). This plugin automatically captures CouchDB document field metadata into the `@metadata` field within the input plugin itself. When the events pass through to be indexed by Elasticsearch, the Elasticsearch output plugin allows you to specify the `action` (delete, update, insert, etc.) and the `document_id`, like this:
+
+```ruby
+output {
+ elasticsearch {
+ action => "%{[@metadata][action]}"
+ document_id => "%{[@metadata][_id]}"
+ hosts => ["example.com"]
+ index => "index_name"
+ protocol => "http"
+ }
+}
+```
+
+
+### sprintf date/time format in conditionals [date-time]
+
+Sprintf date/time format in conditionals is not currently supported, but a workaround is available. Put the date calculation in a field so that you can use the field reference in a conditional.
+
+**Example**
+
+Using sprintf time format directly to add a field based on ingestion time *will not work*:
+
+```
+----------
+# non-working example
+filter{
+ if "%{+HH}:%{+mm}" < "16:30" {
+ mutate {
+ add_field => { "string_compare" => "%{+HH}:%{+mm} is before 16:30" }
+ }
+ }
+}
+----------
+```
+
+This workaround gives you the intended results:
+
+```js
+filter {
+ mutate{
+ add_field => {
+ "[@metadata][time]" => "%{+HH}:%{+mm}"
+ }
+ }
+ if [@metadata][time] < "16:30" {
+ mutate {
+ add_field => {
+ "string_compare" => "%{+HH}:%{+mm} is before 16:30"
+ }
+ }
+ }
+}
+```
diff --git a/docs/reference/execution-model.md b/docs/reference/execution-model.md
new file mode 100644
index 000000000..a56d2ba6c
--- /dev/null
+++ b/docs/reference/execution-model.md
@@ -0,0 +1,13 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/execution-model.html
+---
+
+# Execution Model [execution-model]
+
+The Logstash event processing pipeline coordinates the execution of inputs, filters, and outputs.
+
+Each input stage in the Logstash pipeline runs in its own thread. Inputs write events to a central queue that is either in memory (default) or on disk. Each pipeline worker thread takes a batch of events off this queue, runs the batch of events through the configured filters, and then runs the filtered events through any outputs. The size of the batch and number of pipeline worker threads are configurable (see [Tuning and profiling logstash pipeline performance](/reference/tuning-logstash.md)).
+
+By default, Logstash uses in-memory bounded queues between pipeline stages (input → filter and filter → output) to buffer events. If Logstash terminates unsafely, any events that are stored in memory will be lost. To help prevent data loss, you can enable Logstash to persist in-flight events to disk. See [Persistent queues (PQ)](/reference/persistent-queues.md) for more information.
+
diff --git a/docs/reference/field-extraction.md b/docs/reference/field-extraction.md
new file mode 100644
index 000000000..6d7f984c7
--- /dev/null
+++ b/docs/reference/field-extraction.md
@@ -0,0 +1,102 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/field-extraction.html
+---
+
+# Extracting Fields and Wrangling Data [field-extraction]
+
+The plugins described in this section are useful for extracting fields and parsing unstructured data into fields.
+
+[dissect filter](/reference/plugins-filters-dissect.md)
+: Extracts unstructured event data into fields by using delimiters. The dissect filter does not use regular expressions and is very fast. However, if the structure of the data varies from line to line, the grok filter is more suitable.
+
+ For example, let’s say you have a log that contains the following message:
+
+ ```json
+ Apr 26 12:20:02 localhost systemd[1]: Starting system activity accounting tool...
+ ```
+
+ The following config dissects the message:
+
+ ```json
+ filter {
+ dissect {
+ mapping => { "message" => "%{ts} %{+ts} %{+ts} %{src} %{prog}[%{pid}]: %{msg}" }
+ }
+ }
+ ```
+
+ After the dissect filter is applied, the event will be dissected into the following fields:
+
+ ```json
+ {
+ "msg" => "Starting system activity accounting tool...",
+ "@timestamp" => 2017-04-26T19:33:39.257Z,
+ "src" => "localhost",
+ "@version" => "1",
+ "host" => "localhost.localdomain",
+ "pid" => "1",
+ "message" => "Apr 26 12:20:02 localhost systemd[1]: Starting system activity accounting tool...",
+ "type" => "stdin",
+ "prog" => "systemd",
+ "ts" => "Apr 26 12:20:02"
+ }
+ ```
+
+
+[kv filter](/reference/plugins-filters-kv.md)
+: Parses key-value pairs.
+
+ For example, let’s say you have a log message that contains the following key-value pairs:
+
+ ```json
+ ip=1.2.3.4 error=REFUSED
+ ```
+
+ The following config parses the key-value pairs into fields:
+
+ ```json
+ filter {
+ kv { }
+ }
+ ```
+
+ After the filter is applied, the event in the example will have these fields:
+
+ * `ip: 1.2.3.4`
+ * `error: REFUSED`
+
+
+[grok filter](/reference/plugins-filters-grok.md)
+: Parses unstructured event data into fields. This tool is perfect for syslog logs, Apache and other webserver logs, MySQL logs, and in general, any log format that is generally written for humans and not computer consumption. Grok works by combining text patterns into something that matches your logs.
+
+ For example, let’s say you have an HTTP request log that contains the following message:
+
+ ```json
+ 55.3.244.1 GET /index.html 15824 0.043
+ ```
+
+ The following config parses the message into fields:
+
+ ```json
+ filter {
+ grok {
+ match => { "message" => "%{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration}" }
+ }
+ }
+ ```
+
+ After the filter is applied, the event in the example will have these fields:
+
+ * `client: 55.3.244.1`
+ * `method: GET`
+ * `request: /index.html`
+ * `bytes: 15824`
+ * `duration: 0.043`
+
+
+::::{tip}
+If you need help building grok patterns, try the [Grok Debugger](docs-content://explore-analyze/query-filter/tools/grok-debugger.md). The Grok Debugger is an {{xpack}} feature under the Basic License and is therefore **free to use**.
+::::
+
+
diff --git a/docs/reference/filter-plugins.md b/docs/reference/filter-plugins.md
new file mode 100644
index 000000000..da550dacb
--- /dev/null
+++ b/docs/reference/filter-plugins.md
@@ -0,0 +1,113 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/filter-plugins.html
+---
+
+# Filter plugins [filter-plugins]
+
+A filter plugin performs intermediary processing on an event. Filters are often applied conditionally depending on the characteristics of the event.
+
+The following filter plugins are available below. For a list of Elastic supported plugins, please consult the [Support Matrix](https://www.elastic.co/support/matrix#show_logstash_plugins).
+
+| | | |
+| --- | --- | --- |
+| Plugin | Description | Github repository |
+| [age](/reference/plugins-filters-age.md) | Calculates the age of an event by subtracting the event timestamp from the current timestamp | [logstash-filter-age](https://github.com/logstash-plugins/logstash-filter-age) |
+| [aggregate](/reference/plugins-filters-aggregate.md) | Aggregates information from several events originating with a single task | [logstash-filter-aggregate](https://github.com/logstash-plugins/logstash-filter-aggregate) |
+| [alter](/reference/plugins-filters-alter.md) | Performs general alterations to fields that the `mutate` filter does not handle | [logstash-filter-alter](https://github.com/logstash-plugins/logstash-filter-alter) |
+| [bytes](/reference/plugins-filters-bytes.md) | Parses string representations of computer storage sizes, such as "123 MB" or "5.6gb", into their numeric value in bytes | [logstash-filter-bytes](https://github.com/logstash-plugins/logstash-filter-bytes) |
+| [cidr](/reference/plugins-filters-cidr.md) | Checks IP addresses against a list of network blocks | [logstash-filter-cidr](https://github.com/logstash-plugins/logstash-filter-cidr) |
+| [cipher](/reference/plugins-filters-cipher.md) | Applies or removes a cipher to an event | [logstash-filter-cipher](https://github.com/logstash-plugins/logstash-filter-cipher) |
+| [clone](/reference/plugins-filters-clone.md) | Duplicates events | [logstash-filter-clone](https://github.com/logstash-plugins/logstash-filter-clone) |
+| [csv](/reference/plugins-filters-csv.md) | Parses comma-separated value data into individual fields | [logstash-filter-csv](https://github.com/logstash-plugins/logstash-filter-csv) |
+| [date](/reference/plugins-filters-date.md) | Parses dates from fields to use as the Logstash timestamp for an event | [logstash-filter-date](https://github.com/logstash-plugins/logstash-filter-date) |
+| [de_dot](/reference/plugins-filters-de_dot.md) | Computationally expensive filter that removes dots from a field name | [logstash-filter-de_dot](https://github.com/logstash-plugins/logstash-filter-de_dot) |
+| [dissect](/reference/plugins-filters-dissect.md) | Extracts unstructured event data into fields using delimiters | [logstash-filter-dissect](https://github.com/logstash-plugins/logstash-filter-dissect) |
+| [dns](/reference/plugins-filters-dns.md) | Performs a standard or reverse DNS lookup | [logstash-filter-dns](https://github.com/logstash-plugins/logstash-filter-dns) |
+| [drop](/reference/plugins-filters-drop.md) | Drops all events | [logstash-filter-drop](https://github.com/logstash-plugins/logstash-filter-drop) |
+| [elapsed](/reference/plugins-filters-elapsed.md) | Calculates the elapsed time between a pair of events | [logstash-filter-elapsed](https://github.com/logstash-plugins/logstash-filter-elapsed) |
+| [elastic_integration](/reference/plugins-filters-elastic_integration.md) | Provides additional {{ls}} processing on data from Elastic integrations | [logstash-filter-elastic_integration](https://github.com/elastic/logstash-filter-elastic_integration) |
+| [elasticsearch](/reference/plugins-filters-elasticsearch.md) | Copies fields from previous log events in Elasticsearch to current events | [logstash-filter-elasticsearch](https://github.com/logstash-plugins/logstash-filter-elasticsearch) |
+| [environment](/reference/plugins-filters-environment.md) | Stores environment variables as metadata sub-fields | [logstash-filter-environment](https://github.com/logstash-plugins/logstash-filter-environment) |
+| [extractnumbers](/reference/plugins-filters-extractnumbers.md) | Extracts numbers from a string | [logstash-filter-extractnumbers](https://github.com/logstash-plugins/logstash-filter-extractnumbers) |
+| [fingerprint](/reference/plugins-filters-fingerprint.md) | Fingerprints fields by replacing values with a consistent hash | [logstash-filter-fingerprint](https://github.com/logstash-plugins/logstash-filter-fingerprint) |
+| [geoip](/reference/plugins-filters-geoip.md) | Adds geographical information about an IP address | [logstash-filter-geoip](https://github.com/logstash-plugins/logstash-filter-geoip) |
+| [grok](/reference/plugins-filters-grok.md) | Parses unstructured event data into fields | [logstash-filter-grok](https://github.com/logstash-plugins/logstash-filter-grok) |
+| [http](/reference/plugins-filters-http.md) | Provides integration with external web services/REST APIs | [logstash-filter-http](https://github.com/logstash-plugins/logstash-filter-http) |
+| [i18n](/reference/plugins-filters-i18n.md) | Removes special characters from a field | [logstash-filter-i18n](https://github.com/logstash-plugins/logstash-filter-i18n) |
+| [java_uuid](/reference/plugins-filters-java_uuid.md) | Generates a UUID and adds it to each processed event | [core plugin](https://github.com/elastic/logstash/blob/master/logstash-core/src/main/java/org/logstash/plugins/filters/Uuid.java) |
+| [jdbc_static](/reference/plugins-filters-jdbc_static.md) | Enriches events with data pre-loaded from a remote database | [logstash-integration-jdbc](https://github.com/logstash-plugins/logstash-integration-jdbc) |
+| [jdbc_streaming](/reference/plugins-filters-jdbc_streaming.md) | Enrich events with your database data | [logstash-integration-jdbc](https://github.com/logstash-plugins/logstash-integration-jdbc) |
+| [json](/reference/plugins-filters-json.md) | Parses JSON events | [logstash-filter-json](https://github.com/logstash-plugins/logstash-filter-json) |
+| [json_encode](/reference/plugins-filters-json_encode.md) | Serializes a field to JSON | [logstash-filter-json_encode](https://github.com/logstash-plugins/logstash-filter-json_encode) |
+| [kv](/reference/plugins-filters-kv.md) | Parses key-value pairs | [logstash-filter-kv](https://github.com/logstash-plugins/logstash-filter-kv) |
+| [memcached](/reference/plugins-filters-memcached.md) | Provides integration with external data in Memcached | [logstash-filter-memcached](https://github.com/logstash-plugins/logstash-filter-memcached) |
+| [metricize](/reference/plugins-filters-metricize.md) | Takes complex events containing a number of metrics and splits these up into multiple events, each holding a single metric | [logstash-filter-metricize](https://github.com/logstash-plugins/logstash-filter-metricize) |
+| [metrics](/reference/plugins-filters-metrics.md) | Aggregates metrics | [logstash-filter-metrics](https://github.com/logstash-plugins/logstash-filter-metrics) |
+| [mutate](/reference/plugins-filters-mutate.md) | Performs mutations on fields | [logstash-filter-mutate](https://github.com/logstash-plugins/logstash-filter-mutate) |
+| [prune](/reference/plugins-filters-prune.md) | Prunes event data based on a list of fields to blacklist or whitelist | [logstash-filter-prune](https://github.com/logstash-plugins/logstash-filter-prune) |
+| [range](/reference/plugins-filters-range.md) | Checks that specified fields stay within given size or length limits | [logstash-filter-range](https://github.com/logstash-plugins/logstash-filter-range) |
+| [ruby](/reference/plugins-filters-ruby.md) | Executes arbitrary Ruby code | [logstash-filter-ruby](https://github.com/logstash-plugins/logstash-filter-ruby) |
+| [sleep](/reference/plugins-filters-sleep.md) | Sleeps for a specified time span | [logstash-filter-sleep](https://github.com/logstash-plugins/logstash-filter-sleep) |
+| [split](/reference/plugins-filters-split.md) | Splits multi-line messages, strings, or arrays into distinct events | [logstash-filter-split](https://github.com/logstash-plugins/logstash-filter-split) |
+| [syslog_pri](/reference/plugins-filters-syslog_pri.md) | Parses the `PRI` (priority) field of a `syslog` message | [logstash-filter-syslog_pri](https://github.com/logstash-plugins/logstash-filter-syslog_pri) |
+| [threats_classifier](/reference/plugins-filters-threats_classifier.md) | Enriches security logs with information about the attacker’s intent | [logstash-filter-threats_classifier](https://github.com/empow/logstash-filter-threats_classifier) |
+| [throttle](/reference/plugins-filters-throttle.md) | Throttles the number of events | [logstash-filter-throttle](https://github.com/logstash-plugins/logstash-filter-throttle) |
+| [tld](/reference/plugins-filters-tld.md) | Replaces the contents of the default message field with whatever you specify in the configuration | [logstash-filter-tld](https://github.com/logstash-plugins/logstash-filter-tld) |
+| [translate](/reference/plugins-filters-translate.md) | Replaces field contents based on a hash or YAML file | [logstash-filter-translate](https://github.com/logstash-plugins/logstash-filter-translate) |
+| [truncate](/reference/plugins-filters-truncate.md) | Truncates fields longer than a given length | [logstash-filter-truncate](https://github.com/logstash-plugins/logstash-filter-truncate) |
+| [urldecode](/reference/plugins-filters-urldecode.md) | Decodes URL-encoded fields | [logstash-filter-urldecode](https://github.com/logstash-plugins/logstash-filter-urldecode) |
+| [useragent](/reference/plugins-filters-useragent.md) | Parses user agent strings into fields | [logstash-filter-useragent](https://github.com/logstash-plugins/logstash-filter-useragent) |
+| [uuid](/reference/plugins-filters-uuid.md) | Adds a UUID to events | [logstash-filter-uuid](https://github.com/logstash-plugins/logstash-filter-uuid) |
+| [wurfl_device_detection](/reference/plugins-filters-wurfl_device_detection.md) | Enriches logs with device information such as brand, model, OS | [logstash-filter-wurfl_device_detection](https://github.com/WURFL/logstash-filter-wurfl_device_detection) |
+| [xml](/reference/plugins-filters-xml.md) | Parses XML into fields | [logstash-filter-xml](https://github.com/logstash-plugins/logstash-filter-xml) |
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docs/reference/first-event.md b/docs/reference/first-event.md
new file mode 100644
index 000000000..fb6905e67
--- /dev/null
+++ b/docs/reference/first-event.md
@@ -0,0 +1,70 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/first-event.html
+---
+
+# Stashing Your First Event [first-event]
+
+First, let’s test your Logstash installation by running the most basic *Logstash pipeline*.
+
+A Logstash pipeline has two required elements, `input` and `output`, and one optional element, `filter`. The input plugins consume data from a source, the filter plugins modify the data as you specify, and the output plugins write the data to a destination.
+
+:::{image} ../images/basic_logstash_pipeline.png
+:alt: basic logstash pipeline
+:::
+
+To test your Logstash installation, run the most basic Logstash pipeline.
+
+**MacOS, Linux**
+
+```sh
+cd logstash-9.0.0
+bin/logstash -e 'input { stdin { } } output { stdout {} }'
+```
+
+**Windows**
+
+```sh
+cd logstash-9.0.0
+.\bin\logstash.bat -e "input { stdin { } } output { stdout {} }"
+```
+
+The command might vary slightly, depending on the terminal or shell you are using.
+
+::::{note}
+The location of the `bin` directory varies by platform. See [Directory layout](/reference/dir-layout.md) to find the location of `bin\logstash` on your system.
+::::
+
+
+::::{admonition} macOS Gatekeeper warnings
+:class: important
+
+Apple’s rollout of stricter notarization requirements affected the notarization of the 9.0.0-beta1 {{ls}} artifacts. If macOS Catalina displays a dialog when you first run {{ls}} that interrupts it, you will need to take an action to allow it to run. To prevent Gatekeeper checks on the {{ls}} files, run the following command on the downloaded `.tar.gz` archive or the directory to which was extracted:
+
+```sh
+xattr -d -r com.apple.quarantine
+```
+
+For example, if the `.tar.gz` file was extracted to the default logstash-9.0.0-beta1 directory, the command is:
+
+```sh
+xattr -d -r com.apple.quarantine logstash-9.0.0-beta1
+```
+
+Alternatively, you can add a security override if a Gatekeeper popup appears by following the instructions in the *How to open an app that hasn’t been notarized or is from an unidentified developer* section of [Safely open apps on your Mac](https://support.apple.com/en-us/HT202491).
+
+::::
+
+
+The `-e` flag enables you to specify a configuration directly from the command line. Specifying configurations at the command line lets you quickly test configurations without having to edit a file between iterations. The pipeline in the example takes input from the standard input, `stdin`, and moves that input to the standard output, `stdout`, in a structured format.
+
+After starting Logstash, wait until you see "Pipeline main started" and then enter `hello world` at the command prompt:
+
+```shell
+hello world
+2013-11-21T01:22:14.405+0000 0.0.0.0 hello world
+```
+
+Logstash adds timestamp and IP address information to the message. Exit Logstash by issuing a **CTRL-D** command in the shell where Logstash is running.
+
+Congratulations! You’ve created and run a basic Logstash pipeline. Next, you learn how to create a more realistic pipeline.
diff --git a/docs/reference/getting-started-with-logstash.md b/docs/reference/getting-started-with-logstash.md
new file mode 100644
index 000000000..d4bded5ba
--- /dev/null
+++ b/docs/reference/getting-started-with-logstash.md
@@ -0,0 +1,121 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/getting-started-with-logstash.html
+---
+
+# Getting started with Logstash [getting-started-with-logstash]
+
+This section guides you through the process of installing Logstash and verifying that everything is running properly. After learning how to stash your first event, you go on to create a more advanced pipeline that takes Apache web logs as input, parses the logs, and writes the parsed data to an Elasticsearch cluster. Then you learn how to stitch together multiple input and output plugins to unify data from a variety of disparate sources.
+
+This section includes the following topics:
+
+* [Java (JVM) version](#ls-jvm)
+* [Installing Logstash](/reference/installing-logstash.md)
+* [Stashing Your First Event](/reference/first-event.md)
+* [Parsing Logs with Logstash](/reference/advanced-pipeline.md)
+* [Stitching Together Multiple Input and Output Plugins](/reference/multiple-input-output-plugins.md)
+
+
+### Java (JVM) version [ls-jvm]
+
+{{ls}} requires one of these versions:
+
+* Java 17 (default). Check out [Using JDK 17](#jdk17-upgrade) for settings info.
+* Java 21
+
+Use the [official Oracle distribution](http://www.oracle.com/technetwork/java/javase/downloads/index.html) or an open-source distribution, such as [OpenJDK](http://openjdk.java.net/). See the [Elastic Support Matrix](https://www.elastic.co/support/matrix#matrix_jvm) for the official word on supported versions across releases.
+
+::::{admonition} Bundled JDK
+:class: note
+
+:name: bundled-jdk
+
+{{ls}} offers architecture-specific [downloads](https://www.elastic.co/downloads/logstash) that include Adoptium Eclipse Temurin 17, a long term support (LTS) release of the JDK.
+
+Use the LS_JAVA_HOME environment variable if you want to use a JDK other than the version that is bundled. If you have the LS_JAVA_HOME environment variable set to use a custom JDK, Logstash will continue to use the JDK version you have specified, even after you upgrade.
+
+::::
+
+
+
+#### Check your Java version [check-jvm]
+
+Run the following command:
+
+```shell
+java -version
+```
+
+On systems with Java installed, this command produces output similar to the following:
+
+```shell
+openjdk version "17.0.12" 2024-07-16
+OpenJDK Runtime Environment Temurin-17.0.12+7 (build 17.0.12+7)
+OpenJDK 64-Bit Server VM Temurin-17.0.12+7 (build 17.0.12+7, mixed mode)
+```
+
+
+#### `LS_JAVA_HOME` [java-home]
+
+{{ls}} includes a bundled JDK which has been verified to work with each specific version of {{ls}}, and generally provides the best performance and reliability. If you need to use a JDK other than the bundled version, then set the `LS_JAVA_HOME` environment variable to the version you want to use.
+
+On some Linux systems, you may need to have the `LS_JAVA_HOME` environment exported before installing {{ls}}, particularly if you installed Java from a tarball. {{ls}} uses Java during installation to automatically detect your environment and install the correct startup method (SysV init scripts, Upstart, or systemd). If {{ls}} is unable to find the `LS_JAVA_HOME` environment variable during package installation, you may get an error message, and {{ls}} will not start properly.
+
+
+#### Using JDK 17 [jdk17-upgrade]
+
+{{ls}} uses JDK 17 by default, but you need to update settings in `jvm.options` and `log4j2.properties` if you are upgrading from {{ls}} 7.11.x (or earlier) to 7.12 or later.
+
+
+##### Updates to `jvm.options` [_updates_to_jvm_options]
+
+In the `config/jvm.options` file, remove all CMS related flags:
+
+```shell
+## GC configuration
+-XX:+UseConcMarkSweepGC
+-XX:CMSInitiatingOccupancyFraction=75
+-XX:+UseCMSInitiatingOccupancyOnly
+```
+
+For more information about how to use `jvm.options`, please refer to [JVM settings](/reference/jvm-settings.md).
+
+
+##### Updates to `log4j2.properties` [_updates_to_log4j2_properties]
+
+In the `config/log4j2.properties`:
+
+* Replace properties that start with `appender.rolling.avoid_pipelined_filter.*` with:
+
+ ```shell
+ appender.rolling.avoid_pipelined_filter.type = PipelineRoutingFilter
+ ```
+
+* Replace properties that start with `appender.json_rolling.avoid_pipelined_filter.*` with:
+
+ ```shell
+ appender.json_rolling.avoid_pipelined_filter.type = PipelineRoutingFilter
+ ```
+
+* Replace properties that start with `appender.routing.*` with:
+
+ ```shell
+ appender.routing.type = PipelineRouting
+ appender.routing.name = pipeline_routing_appender
+ appender.routing.pipeline.type = RollingFile
+ appender.routing.pipeline.name = appender-${ctx:pipeline.id}
+ appender.routing.pipeline.fileName = ${sys:ls.logs}/pipeline_${ctx:pipeline.id}.log
+ appender.routing.pipeline.filePattern = ${sys:ls.logs}/pipeline_${ctx:pipeline.id}.%i.log.gz
+ appender.routing.pipeline.layout.type = PatternLayout
+ appender.routing.pipeline.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %m%n
+ appender.routing.pipeline.policy.type = SizeBasedTriggeringPolicy
+ appender.routing.pipeline.policy.size = 100MB
+ appender.routing.pipeline.strategy.type = DefaultRolloverStrategy
+ appender.routing.pipeline.strategy.max = 30
+ ```
+
+
+
+
+
+
diff --git a/docs/reference/glob-support.md b/docs/reference/glob-support.md
new file mode 100644
index 000000000..f47507224
--- /dev/null
+++ b/docs/reference/glob-support.md
@@ -0,0 +1,44 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/glob-support.html
+---
+
+# Glob Pattern Support [glob-support]
+
+Logstash supports the following patterns wherever glob patterns are allowed:
+
+**`*`**
+: Match any file. You can also use an `*` to restrict other values in the glob. For example, `*conf` matches all files that end in `conf`. `*apache*` matches any files with `apache` in the name. This pattern does not match hidden files (dot files) on Unix-like operating systems. To match dot files, use a pattern like `{*,.*}`.
+
+**`**`**
+: Match directories recursively.
+
+**`?`**
+: Match any one character.
+
+**`[set]`**
+: Match any one character in a set. For example, `[a-z]`. Also supports set negation (`[^a-z]`).
+
+**`{p,q}`**
+: Match either literal `p` or literal `q`. The matching literal can be more than one character, and you can specify more than two literals. This pattern is the equivalent to using alternation with the vertical bar in regular expressions (`foo|bar`).
+
+**`\`**
+: Escape the next metacharacter. This means that you cannot use a backslash in Windows as part of a glob. The pattern `c:\foo*` will not work, so use `foo*` instead.
+
+
+## Example Patterns [example-glob-patterns]
+
+Here are some common examples of glob patterns:
+
+`"/path/to/*.conf"`
+: Matches config files ending in `.conf` in the specified path.
+
+`"/var/log/*.log"`
+: Matches log files ending in `.log` in the specified path.
+
+`"/var/log/**/*.log"`
+: Matches log files ending in `.log` in subdirectories under the specified path.
+
+`"/path/to/logs/{app1,app2,app3}/data.log"`
+: Matches app log files in the `app1`, `app2`, and `app3` subdirectories under the specified path.
+
diff --git a/docs/reference/how-logstash-works.md b/docs/reference/how-logstash-works.md
new file mode 100644
index 000000000..091b22ce0
--- /dev/null
+++ b/docs/reference/how-logstash-works.md
@@ -0,0 +1,59 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/pipeline.html
+---
+
+# How Logstash Works [pipeline]
+
+The Logstash event processing pipeline has three stages: inputs → filters → outputs. Inputs generate events, filters modify them, and outputs ship them elsewhere. Inputs and outputs support codecs that enable you to encode or decode the data as it enters or exits the pipeline without having to use a separate filter.
+
+
+## Inputs [_inputs]
+
+You use inputs to get data into Logstash. Some of the more commonly-used inputs are:
+
+* **file**: reads from a file on the filesystem, much like the UNIX command `tail -0F`
+* **syslog**: listens on the well-known port 514 for syslog messages and parses according to the RFC3164 format
+* **redis**: reads from a redis server, using both redis channels and redis lists. Redis is often used as a "broker" in a centralized Logstash installation, which queues Logstash events from remote Logstash "shippers".
+* **beats**: processes events sent by [Beats](https://www.elastic.co/downloads/beats).
+
+For more information about the available inputs, see [Input Plugins](/reference/input-plugins.md).
+
+
+## Filters [_filters]
+
+Filters are intermediary processing devices in the Logstash pipeline. You can combine filters with conditionals to perform an action on an event if it meets certain criteria. Some useful filters include:
+
+* **grok**: parse and structure arbitrary text. Grok is currently the best way in Logstash to parse unstructured log data into something structured and queryable. With 120 patterns built-in to Logstash, it’s more than likely you’ll find one that meets your needs!
+* **mutate**: perform general transformations on event fields. You can rename, remove, replace, and modify fields in your events.
+* **drop**: drop an event completely, for example, *debug* events.
+* **clone**: make a copy of an event, possibly adding or removing fields.
+* **geoip**: add information about geographical location of IP addresses (also displays amazing charts in Kibana!)
+
+For more information about the available filters, see [Filter Plugins](/reference/filter-plugins.md).
+
+
+## Outputs [_outputs]
+
+Outputs are the final phase of the Logstash pipeline. An event can pass through multiple outputs, but once all output processing is complete, the event has finished its execution. Some commonly used outputs include:
+
+* **elasticsearch**: send event data to Elasticsearch. If you’re planning to save your data in an efficient, convenient, and easily queryable format… Elasticsearch is the way to go. Period. Yes, we’re biased :)
+* **file**: write event data to a file on disk.
+* **graphite**: send event data to graphite, a popular open source tool for storing and graphing metrics. [http://graphite.readthedocs.io/en/latest/](http://graphite.readthedocs.io/en/latest/)
+* **statsd**: send event data to statsd, a service that "listens for statistics, like counters and timers, sent over UDP and sends aggregates to one or more pluggable backend services". If you’re already using statsd, this could be useful for you!
+
+For more information about the available outputs, see [Output Plugins](/reference/output-plugins.md).
+
+
+## Codecs [_codecs]
+
+Codecs are basically stream filters that can operate as part of an input or output. Codecs enable you to easily separate the transport of your messages from the serialization process. Popular codecs include `json`, `msgpack`, and `plain` (text).
+
+* **json**: encode or decode data in the JSON format.
+* **multiline**: merge multiple-line text events such as java exception and stacktrace messages into a single event.
+
+For more information about the available codecs, see [Codec Plugins](/reference/codec-plugins.md).
+
+
+
+
diff --git a/docs/reference/index.md b/docs/reference/index.md
new file mode 100644
index 000000000..e18931d78
--- /dev/null
+++ b/docs/reference/index.md
@@ -0,0 +1,8 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/index.html
+ - https://www.elastic.co/guide/en/logstash/current/introduction.html
+ - https://www.elastic.co/guide/en/serverless/current/elasticsearch-ingest-data-through-logstash.html
+---
+
+# Logstash
\ No newline at end of file
diff --git a/docs/reference/input-plugins.md b/docs/reference/input-plugins.md
new file mode 100644
index 000000000..b34e609f3
--- /dev/null
+++ b/docs/reference/input-plugins.md
@@ -0,0 +1,129 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/input-plugins.html
+---
+
+# Input plugins [input-plugins]
+
+An input plugin enables a specific source of events to be read by Logstash.
+
+The following input plugins are available below. For a list of Elastic supported plugins, please consult the [Support Matrix](https://www.elastic.co/support/matrix#show_logstash_plugins).
+
+| | | |
+| --- | --- | --- |
+| Plugin | Description | Github repository |
+| [azure_event_hubs](/reference/plugins-inputs-azure_event_hubs.md) | Receives events from Azure Event Hubs | [azure_event_hubs](https://github.com/logstash-plugins/logstash-input-azure_event_hubs) |
+| [beats](/reference/plugins-inputs-beats.md) | Receives events from the Elastic Beats framework | [logstash-input-beats](https://github.com/logstash-plugins/logstash-input-beats) |
+| [cloudwatch](/reference/plugins-inputs-cloudwatch.md) | Pulls events from the Amazon Web Services CloudWatch API | [logstash-input-cloudwatch](https://github.com/logstash-plugins/logstash-input-cloudwatch) |
+| [couchdb_changes](/reference/plugins-inputs-couchdb_changes.md) | Streams events from CouchDB’s `_changes` URI | [logstash-input-couchdb_changes](https://github.com/logstash-plugins/logstash-input-couchdb_changes) |
+| [dead_letter_queue](/reference/plugins-inputs-dead_letter_queue.md) | read events from Logstash’s dead letter queue | [logstash-input-dead_letter_queue](https://github.com/logstash-plugins/logstash-input-dead_letter_queue) |
+| [elastic_agent](/reference/plugins-inputs-elastic_agent.md) | Receives events from the Elastic Agent framework | [logstash-input-beats](https://github.com/logstash-plugins/logstash-input-beats) (shared) |
+| [elastic_serverless_forwarder](/reference/plugins-inputs-elastic_serverless_forwarder.md) | Accepts events from Elastic Serverless Forwarder | [logstash-input-elastic_serverless_forwarder](https://github.com/logstash-plugins/logstash-input-elastic_serverless_forwarder) |
+| [elasticsearch](/reference/plugins-inputs-elasticsearch.md) | Reads query results from an Elasticsearch cluster | [logstash-input-elasticsearch](https://github.com/logstash-plugins/logstash-input-elasticsearch) |
+| [exec](/reference/plugins-inputs-exec.md) | Captures the output of a shell command as an event | [logstash-input-exec](https://github.com/logstash-plugins/logstash-input-exec) |
+| [file](/reference/plugins-inputs-file.md) | Streams events from files | [logstash-input-file](https://github.com/logstash-plugins/logstash-input-file) |
+| [ganglia](/reference/plugins-inputs-ganglia.md) | Reads Ganglia packets over UDP | [logstash-input-ganglia](https://github.com/logstash-plugins/logstash-input-ganglia) |
+| [gelf](/reference/plugins-inputs-gelf.md) | Reads GELF-format messages from Graylog2 as events | [logstash-input-gelf](https://github.com/logstash-plugins/logstash-input-gelf) |
+| [generator](/reference/plugins-inputs-generator.md) | Generates random log events for test purposes | [logstash-input-generator](https://github.com/logstash-plugins/logstash-input-generator) |
+| [github](/reference/plugins-inputs-github.md) | Reads events from a GitHub webhook | [logstash-input-github](https://github.com/logstash-plugins/logstash-input-github) |
+| [google_cloud_storage](/reference/plugins-inputs-google_cloud_storage.md) | Extract events from files in a Google Cloud Storage bucket | [logstash-input-google_cloud_storage](https://github.com/logstash-plugins/logstash-input-google_cloud_storage) |
+| [google_pubsub](/reference/plugins-inputs-google_pubsub.md) | Consume events from a Google Cloud PubSub service | [logstash-input-google_pubsub](https://github.com/logstash-plugins/logstash-input-google_pubsub) |
+| [graphite](/reference/plugins-inputs-graphite.md) | Reads metrics from the `graphite` tool | [logstash-input-graphite](https://github.com/logstash-plugins/logstash-input-graphite) |
+| [heartbeat](/reference/plugins-inputs-heartbeat.md) | Generates heartbeat events for testing | [logstash-input-heartbeat](https://github.com/logstash-plugins/logstash-input-heartbeat) |
+| [http](/reference/plugins-inputs-http.md) | Receives events over HTTP or HTTPS | [logstash-input-http](https://github.com/logstash-plugins/logstash-input-http) |
+| [http_poller](/reference/plugins-inputs-http_poller.md) | Decodes the output of an HTTP API into events | [logstash-input-http_poller](https://github.com/logstash-plugins/logstash-input-http_poller) |
+| [imap](/reference/plugins-inputs-imap.md) | Reads mail from an IMAP server | [logstash-input-imap](https://github.com/logstash-plugins/logstash-input-imap) |
+| [irc](/reference/plugins-inputs-irc.md) | Reads events from an IRC server | [logstash-input-irc](https://github.com/logstash-plugins/logstash-input-irc) |
+| [java_generator](/reference/plugins-inputs-java_generator.md) | Generates synthetic log events | [core plugin](https://github.com/elastic/logstash/blob/master/logstash-core/src/main/java/org/logstash/plugins/inputs/Generator.java) |
+| [java_stdin](/reference/plugins-inputs-java_stdin.md) | Reads events from standard input | [core plugin](https://github.com/elastic/logstash/blob/master/logstash-core/src/main/java/org/logstash/plugins/inputs/Stdin.java) |
+| [jdbc](/reference/plugins-inputs-jdbc.md) | Creates events from JDBC data | [logstash-integration-jdbc](https://github.com/logstash-plugins/logstash-integration-jdbc) |
+| [jms](/reference/plugins-inputs-jms.md) | Reads events from a Jms Broker | [logstash-input-jms](https://github.com/logstash-plugins/logstash-input-jms) |
+| [jmx](/reference/plugins-inputs-jmx.md) | Retrieves metrics from remote Java applications over JMX | [logstash-input-jmx](https://github.com/logstash-plugins/logstash-input-jmx) |
+| [kafka](/reference/plugins-inputs-kafka.md) | Reads events from a Kafka topic | [logstash-integration-kafka](https://github.com/logstash-plugins/logstash-integration-kafka) |
+| [kinesis](/reference/plugins-inputs-kinesis.md) | Receives events through an AWS Kinesis stream | [logstash-input-kinesis](https://github.com/logstash-plugins/logstash-input-kinesis) |
+| [logstash](/reference/plugins-inputs-logstash.md) | Reads from {{ls}} output of another {{ls}} instance | [logstash-integration-logstash](https://github.com/logstash-plugins/logstash-integration-logstash) |
+| [log4j](/reference/plugins-inputs-log4j.md) | Reads events over a TCP socket from a Log4j `SocketAppender` object | [logstash-input-log4j](https://github.com/logstash-plugins/logstash-input-log4j) |
+| [lumberjack](/reference/plugins-inputs-lumberjack.md) | Receives events using the Lumberjack protocl | [logstash-input-lumberjack](https://github.com/logstash-plugins/logstash-input-lumberjack) |
+| [meetup](/reference/plugins-inputs-meetup.md) | Captures the output of command line tools as an event | [logstash-input-meetup](https://github.com/logstash-plugins/logstash-input-meetup) |
+| [pipe](/reference/plugins-inputs-pipe.md) | Streams events from a long-running command pipe | [logstash-input-pipe](https://github.com/logstash-plugins/logstash-input-pipe) |
+| [puppet_facter](/reference/plugins-inputs-puppet_facter.md) | Receives facts from a Puppet server | [logstash-input-puppet_facter](https://github.com/logstash-plugins/logstash-input-puppet_facter) |
+| [rabbitmq](/reference/plugins-inputs-rabbitmq.md) | Pulls events from a RabbitMQ exchange | [logstash-integration-rabbitmq](https://github.com/logstash-plugins/logstash-integration-rabbitmq) |
+| [redis](/reference/plugins-inputs-redis.md) | Reads events from a Redis instance | [logstash-input-redis](https://github.com/logstash-plugins/logstash-input-redis) |
+| [relp](/reference/plugins-inputs-relp.md) | Receives RELP events over a TCP socket | [logstash-input-relp](https://github.com/logstash-plugins/logstash-input-relp) |
+| [rss](/reference/plugins-inputs-rss.md) | Captures the output of command line tools as an event | [logstash-input-rss](https://github.com/logstash-plugins/logstash-input-rss) |
+| [s3](/reference/plugins-inputs-s3.md) | Streams events from files in a S3 bucket | [logstash-input-s3](https://github.com/logstash-plugins/logstash-input-s3) |
+| [s3-sns-sqs](/reference/plugins-inputs-s3-sns-sqs.md) | Reads logs from AWS S3 buckets using sqs | [logstash-input-s3-sns-sqs](https://github.com/cherweg/logstash-input-s3-sns-sqs) |
+| [salesforce](/reference/plugins-inputs-salesforce.md) | Creates events based on a Salesforce SOQL query | [logstash-input-salesforce](https://github.com/logstash-plugins/logstash-input-salesforce) |
+| [snmp](/reference/plugins-inputs-snmp.md) | Polls network devices using Simple Network Management Protocol (SNMP) | [logstash-integration-snmp](https://github.com/logstash-plugins/logstash-integration-snmp) |
+| [snmptrap](/reference/plugins-inputs-snmptrap.md) | Creates events based on SNMP trap messages | [logstash-integration-snmp](https://github.com/logstash-plugins/logstash-integration-snmp) |
+| [sqlite](/reference/plugins-inputs-sqlite.md) | Creates events based on rows in an SQLite database | [logstash-input-sqlite](https://github.com/logstash-plugins/logstash-input-sqlite) |
+| [sqs](/reference/plugins-inputs-sqs.md) | Pulls events from an Amazon Web Services Simple Queue Service queue | [logstash-input-sqs](https://github.com/logstash-plugins/logstash-input-sqs) |
+| [stdin](/reference/plugins-inputs-stdin.md) | Reads events from standard input | [logstash-input-stdin](https://github.com/logstash-plugins/logstash-input-stdin) |
+| [stomp](/reference/plugins-inputs-stomp.md) | Creates events received with the STOMP protocol | [logstash-input-stomp](https://github.com/logstash-plugins/logstash-input-stomp) |
+| [syslog](/reference/plugins-inputs-syslog.md) | Reads syslog messages as events | [logstash-input-syslog](https://github.com/logstash-plugins/logstash-input-syslog) |
+| [tcp](/reference/plugins-inputs-tcp.md) | Reads events from a TCP socket | [logstash-input-tcp](https://github.com/logstash-plugins/logstash-input-tcp) |
+| [twitter](/reference/plugins-inputs-twitter.md) | Reads events from the Twitter Streaming API | [logstash-input-twitter](https://github.com/logstash-plugins/logstash-input-twitter) |
+| [udp](/reference/plugins-inputs-udp.md) | Reads events over UDP | [logstash-input-udp](https://github.com/logstash-plugins/logstash-input-udp) |
+| [unix](/reference/plugins-inputs-unix.md) | Reads events over a UNIX socket | [logstash-input-unix](https://github.com/logstash-plugins/logstash-input-unix) |
+| [varnishlog](/reference/plugins-inputs-varnishlog.md) | Reads from the `varnish` cache shared memory log | [logstash-input-varnishlog](https://github.com/logstash-plugins/logstash-input-varnishlog) |
+| [websocket](/reference/plugins-inputs-websocket.md) | Reads events from a websocket | [logstash-input-websocket](https://github.com/logstash-plugins/logstash-input-websocket) |
+| [wmi](/reference/plugins-inputs-wmi.md) | Creates events based on the results of a WMI query | [logstash-input-wmi](https://github.com/logstash-plugins/logstash-input-wmi) |
+| [xmpp](/reference/plugins-inputs-xmpp.md) | Receives events over the XMPP/Jabber protocol | [logstash-input-xmpp](https://github.com/logstash-plugins/logstash-input-xmpp) |
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docs/reference/installing-logstash.md b/docs/reference/installing-logstash.md
new file mode 100644
index 000000000..321f47971
--- /dev/null
+++ b/docs/reference/installing-logstash.md
@@ -0,0 +1,63 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/installing-logstash.html
+---
+
+# Installing Logstash [installing-logstash]
+
+
+## Installing from a Downloaded Binary [installing-binary]
+
+The {{ls}} binaries are available from [https://www.elastic.co/downloads](https://www.elastic.co/downloads/logstash). Download the Logstash installation file for your host environment—TAR.GZ, DEB, ZIP, or RPM.
+
+Unpack the file. Do not install Logstash into a directory path that contains colon (:) characters.
+
+::::{note}
+These packages are free to use under the Elastic license. They contain open source and free commercial features and access to paid commercial features. [Start a 30-day trial](docs-content://deploy-manage/license/manage-your-license-in-self-managed-cluster.md) to try out all of the paid commercial features. See the [Subscriptions](https://www.elastic.co/subscriptions) page for information about Elastic license levels.
+
+Alternatively, you can download an `oss` package, which contains only features that are available under the Apache 2.0 license.
+
+::::
+
+
+On supported Linux operating systems, you can use a package manager to install Logstash.
+
+
+## Installing from Package Repositories [package-repositories]
+
+We also have repositories available for APT and YUM based distributions. Note that we only provide binary packages, but no source packages, as the packages are created as part of the Logstash build.
+
+We have split the Logstash package repositories by version into separate urls to avoid accidental upgrades across major versions. For all 9.x.y releases use 9.x as version number.
+
+We use the PGP key [D88E42B4](https://pgp.mit.edu/pks/lookup?op=vindex&search=0xD27D666CD88E42B4), Elastic’s Signing Key, with fingerprint
+
+```
+4609 5ACC 8548 582C 1A26 99A9 D27D 666C D88E 42B4
+```
+to sign all our packages. It is available from [https://pgp.mit.edu](https://pgp.mit.edu).
+
+::::{note}
+When installing from a package repository (or from the DEB or RPM installation file), you will need to run Logstash as a service. Please refer to [Running Logstash as a Service](/reference/running-logstash.md) for more information.
+
+For testing purposes, you may still run Logstash from the command line, but you may need to define the default setting options (described in [Logstash Directory Layout](/reference/dir-layout.md)) manually. Please refer to [Running Logstash from the Command Line](/reference/running-logstash-command-line.md) for more information.
+
+::::
+
+
+
+### APT [_apt]
+
+Version 9.0.0 of Logstash has not yet been released.
+
+
+### YUM [_yum]
+
+Version 9.0.0 of Logstash has not yet been released.
+
+
+### Docker [_docker]
+
+Images are available for running Logstash as a Docker container. They are available from the Elastic Docker registry.
+
+See [Running Logstash on Docker](/reference/docker.md) for details on how to configure and run Logstash Docker containers.
+
diff --git a/docs/reference/integration-plugins.md b/docs/reference/integration-plugins.md
new file mode 100644
index 000000000..fad516416
--- /dev/null
+++ b/docs/reference/integration-plugins.md
@@ -0,0 +1,27 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugin-integrations.html
+---
+
+# Integration plugins [plugin-integrations]
+
+Integration plugins combine related plugins—inputs, outputs, and sometimes filters and codecs—into one package.
+
+| | | |
+| --- | --- | --- |
+| Integration Plugin | Description | Github repository |
+| [aws](/reference/plugins-integrations-aws.md) | Plugins for use with Amazon Web Services (AWS). | [logstash-integration-aws](https://github.com/logstash-plugins/logstash-integration-aws) |
+| [elastic_enterprise_search (deprecated) ](/reference/plugins-integrations-elastic_enterprise_search.md) | [deprecated at {{stack}} version 9.0.0 and plugin version 3.0.1] Plugins for use with Elastic Enterprise Search. | [logstash-integration-elastic_enterprise_search](https://github.com/logstash-plugins/logstash-integration-elastic_enterprise_search) |
+| [jdbc](/reference/plugins-integrations-jdbc.md) | Plugins for use with databases that provide JDBC drivers. | [logstash-integration-jdbc](https://github.com/logstash-plugins/logstash-integration-jdbc) |
+| [kafka](/reference/plugins-integrations-kafka.md) | Plugins for use with the Kafka distributed streaming platform. | [logstash-integration-kafka](https://github.com/logstash-plugins/logstash-integration-kafka) |
+| [logstash](/reference/plugins-integrations-logstash.md) | Plugins to enable {{ls}}-to-{{ls}} communication. | [logstash-integration-logstash](https://github.com/logstash-plugins/logstash-integration-logstash) |
+| [rabbitmq](/reference/plugins-integrations-rabbitmq.md) | Plugins for processing events to or from a RabbitMQ broker. | [logstash-integration-rabbitmq](https://github.com/logstash-plugins/logstash-integration-rabbitmq) |
+| [snmp](/reference/plugins-integrations-snmp.md) | Plugins for polling devices using Simple Network Management Protocol (SNMP) or creating events from SNMPtrap messages. | [logstash-integration-snmp](https://github.com/logstash-plugins/logstash-integration-snmp) |
+
+
+
+
+
+
+
+
diff --git a/docs/reference/jvm-settings.md b/docs/reference/jvm-settings.md
new file mode 100644
index 000000000..d1543f416
--- /dev/null
+++ b/docs/reference/jvm-settings.md
@@ -0,0 +1,152 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/jvm-settings.html
+---
+
+# JVM settings [jvm-settings]
+
+Configure JVM settings in the `jvm.options` [settings file](/reference/config-setting-files.md#settings-files). JVM settings can also be set via the [`LS_JAVA_OPTS`](#ls-java-opts) environment variable.
+
+This file contains a line-delimited list of JVM arguments following a special syntax:
+
+* lines consisting of whitespace only are ignored
+* lines beginning with `#` are treated as comments and are ignored
+
+ ```text
+ # this is a comment
+ ```
+
+* lines beginning with a `-` are treated as a JVM option that applies independent of the version of the JVM
+
+ ```text
+ -Xmx2g
+ ```
+
+* lines beginning with a number followed by a `:` followed by a `-` are treated as a JVM option that applies only if the version of the JVM matches the number
+
+ ```text
+ 8:-Xmx2g
+ ```
+
+* lines beginning with a number followed by a `-` followed by a `:` are treated as a JVM option that applies only if the version of the JVM is greater than or equal to the number
+
+ ```text
+ 8-:-Xmx2g
+ ```
+
+* lines beginning with a number followed by a `-` followed by a number followed by a `:` are treated as a JVM option that applies only if the version of the JVM falls in the inclusive range of the two numbers
+
+ ```text
+ 8-9:-Xmx2g
+ ```
+
+* all other lines are rejected
+
+## Setting the memory size [memory-size]
+
+The memory of the JVM executing {{ls}} can be divided in two zones: heap and off-heap memory. In the heap refers to Java heap, which contains all the Java objects created by {{ls}} during its operation, see [Setting the JVM heap size](#heap-size) for description on how to size it. What’s not part of the heap is named off-heap and consists of memory that can be used and controlled by {{ls}}, generally thread stacks, direct memory and memory mapped pages, check [Setting the off-heap size](#off-heap-size) for comprehensive descriptions. In off-heap space there is some space which is used by JVM and contains all the data structures functional to the execution of the virtual machine. This memory can’t be controlled by {{ls}} and the settings are rarely customized.
+
+### Setting the JVM heap size [heap-size]
+
+Here are some tips for adjusting the JVM heap size:
+
+* The recommended heap size for typical ingestion scenarios should be no less than 4GB and no more than 8GB.
+* CPU utilization can increase unnecessarily if the heap size is too low, resulting in the JVM constantly garbage collecting. You can check for this issue by doubling the heap size to see if performance improves.
+* Do not increase the heap size past the amount of physical memory. Some memory must be left to run the OS and other processes. As a general guideline for most installations, don’t exceed 50-75% of physical memory. The more memory you have, the higher percentage you can use.
+* Set the minimum (Xms) and maximum (Xmx) heap allocation size to the same value to prevent the heap from resizing at runtime, which is a very costly process.
+* You can make more accurate measurements of the JVM heap by using either the `jmap` command line utility distributed with Java or by using VisualVM. For more info, see [Profiling the heap](/reference/tuning-logstash.md#profiling-the-heap).
+
+
+### Setting the off-heap size [off-heap-size]
+
+The operating system, persistent queue mmap pages, direct memory, and other processes require memory in addition to memory allocated to heap size.
+
+Internal JVM data structures, thread stacks, memory mapped files and direct memory for input/output (IO) operations are all parts of the off-heap JVM memory. Memory mapped files are not part of the Logstash’s process off-heap memory, but consume RAM when paging files from disk. These mapped files speed up the access to Persistent Queues pages, a performance improvement - or trade off - to reduce expensive disk operations such as read, write, and seek. Some network I/O operations also resort to in-process direct memory usage to avoid, for example, copying of buffers between network sockets. Input plugins such as Elastic Agent, Beats, TCP, and HTTP inputs, use direct memory. The zone for Thread stacks contains the list of stack frames for each Java thread created by the JVM; each frame keeps the local arguments passed during method calls. Read on [Setting the JVM stack size](#stacks-size) if the size needs to be adapted to the processing needs.
+
+Plugins, depending on their type (inputs, filters, and outputs), have different thread models. Every input plugin runs in its own thread and can potentially spawn others. For example, each JDBC input plugin launches a scheduler thread. Netty based plugins like TCP, Beats or HTTP input spawn a thread pool with 2 * number_of_cores threads. Output plugins may also start helper threads, such as a connection management thread for each {{es}} output instance. Every pipeline, also, has its own thread responsible to manage the pipeline lifecycle.
+
+To summarize, we have 3 categories of memory usage, where 2 can be limited by the JVM and the other relies on available, free memory:
+
+| Memory Type | Configured using | Used by |
+| --- | --- | --- |
+| JVM Heap | -Xmx | any normal object allocation |
+| JVM direct memory | -XX:MaxDirectMemorySize | beats, tcp and http inputs |
+| Native memory | N/A | Persistent Queue Pages, Thread Stacks |
+
+Keep these memory requirements in mind as you calculate your ideal memory allocation.
+
+
+### Buffer Allocation types [off-heap-buffers-allocation]
+
+Input plugins such as {{agent}}, {{beats}}, TCP, and HTTP allocate buffers in Java heap memory to read events from the network. Heap memory is the preferred allocation method, as it facilitates debugging memory usage problems (such as leaks and Out of Memory errors) through the analysis of heap dumps.
+
+Before version 9.0.0, {{ls}} defaulted to direct memory instead of heap for this purpose. To re-enable the previous behavior {{ls}} provides a `pipeline.buffer.type` setting in [logstash.yml](/reference/logstash-settings-file.md) that lets you control where to allocate memory buffers for plugins that use them.
+
+Performance should not be noticeably affected if you switch between `direct` and `heap`. While copying bytes from OS buffers to direct memory buffers is faster, {{ls}} Event objects produced by these plugins are allocated on the Java Heap, incurring the cost of copying from direct memory to heap memory, regardless of the setting.
+
+
+### Memory sizing [memory-size-calculation]
+
+Total JVM memory allocation must be estimated and is controlled indirectly using Java heap and direct memory settings. By default, a JVM’s off-heap direct memory limit is the same as the heap size. Check out [beats input memory usage](/reference/plugins-inputs-beats.md#plugins-inputs-beats-memory). Consider setting `-XX:MaxDirectMemorySize` to half of the heap size or any value that can accommodate the load you expect these plugins to handle.
+
+As you make your capacity calculations, keep in mind that the JVM can’t consume the total amount of the host’s memory available, as the Operating System and other processes will require memory too.
+
+For a {{ls}} instance with persistent queue (PQ) enabled on multiple pipelines, we could estimate memory consumption using:
+
+```text
+pipelines number * (pipeline threads * stack size + 2 * PQ page size) + direct memory + Java heap
+```
+
+::::{note}
+Each Persistent Queue requires that at least head and tail pages are present and accessible in memory. The default page size is 64 MB so each PQ requires at least 128 MB of heap memory, which can be a significant source of memory consumption per pipeline. Note that the size of memory mapped file can’t be limited with an upper bound.
+::::
+
+
+::::{note}
+Stack size is a setting that depends on the JVM used, but could be customized with `-Xss` setting.
+::::
+
+
+::::{note}
+Direct memory space by default is big as much as Java heap, but can be customized with the `-XX:MaxDirectMemorySize` setting.
+::::
+
+
+**Example**
+
+Consider a {{ls}} instance running 10 pipelines, with simple input and output plugins that doesn’t start additional threads, it has 1 pipelines thread, 1 input plugin thread and 12 workers, summing up to 14. Keep in mind that, by default, JVM allocates direct memory equal to memory allocated for Java heap.
+
+The calculation results in:
+
+* native memory: 1.4Gb [derived from 10 * (14 * 1Mb + 128Mb)]
+* direct memory: 4Gb
+* Java heap: 4Gb
+
+
+
+## Setting the JVM stack size [stacks-size]
+
+Large configurations may require additional JVM stack memory. If you see a stack overflow error, try increasing the JVM stack size. Add an entry similar to this one in the `jvm.options` [settings file](/reference/config-setting-files.md#settings-files):
+
+```sh
+-Xss4M
+```
+
+Note that the default stack size is different per platform and per OS flavor. You can find out what the default is by running:
+
+```sh
+java -XX:+PrintFlagsFinal -version | grep ThreadStackSize
+```
+
+Depending on the default stack size, start by multiplying by 4x, then 8x, and then 16x until the overflow error resolves.
+
+
+## Using `LS_JAVA_OPTS` [ls-java-opts]
+
+The `LS_JAVA_OPTS` environment variable can also be used to override JVM settings in the `jvm.options` file [settings file](/reference/config-setting-files.md#settings-files). The content of this variable is additive to options configured in the `jvm.options` file, and will override any settings that exist in both places.
+
+For example to set a different locale to launch {{ls}} instance:
+
+```sh
+LS_JAVA_OPTS="-Duser.country=DE -Duser.language=de" bin/logstash -e 'input { stdin { codec => json } }'
+```
diff --git a/docs/reference/keystore.md b/docs/reference/keystore.md
new file mode 100644
index 000000000..9aae6fd74
--- /dev/null
+++ b/docs/reference/keystore.md
@@ -0,0 +1,159 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/keystore.html
+---
+
+# Secrets keystore for secure settings [keystore]
+
+When you configure Logstash, you might need to specify sensitive settings or configuration, such as passwords. Rather than relying on file system permissions to protect these values, you can use the Logstash keystore to securely store secret values for use in configuration settings.
+
+After adding a key and its secret value to the keystore, you can use the key in place of the secret value when you configure sensitive settings.
+
+The syntax for referencing keys is identical to the syntax for [environment variables](/reference/environment-variables.md):
+
+```txt
+${KEY}
+```
+
+Where KEY is the name of the key.
+
+**Example**
+
+Imagine that the keystore contains a key called `ES_PWD` with the value `yourelasticsearchpassword`.
+
+In configuration files, use:
+
+```shell
+output { elasticsearch {...password => "${ES_PWD}" } } }
+```
+
+In `logstash.yml`, use:
+
+```shell
+xpack.management.elasticsearch.password: ${ES_PWD}
+```
+
+Notice that the Logstash keystore differs from the Elasticsearch keystore. Whereas the Elasticsearch keystore lets you store `elasticsearch.yml` values by name, the Logstash keystore lets you specify arbitrary names that you can reference in the Logstash configuration.
+
+::::{note}
+There are some configuration fields that have no secret meaning, so not every field could leverage the secret store for variables substitution. Plugin’s `id` field is a field of this kind
+::::
+
+
+::::{note}
+Referencing keystore data from `pipelines.yml` or the command line (`-e`) is not currently supported.
+::::
+
+
+::::{note}
+Referencing keystore data from [centralized pipeline management](/reference/logstash-centralized-pipeline-management.md) requires each Logstash deployment to have a local copy of the keystore.
+::::
+
+
+::::{note}
+The {{ls}} keystore needs to be protected, but the {{ls}} user must have access to the file. While most things in {{ls}} can be protected with `chown -R root:root `, the keystore itself must be accessible from the {{ls}} user. Use `chown logstash:root && chmod 0600 `.
+::::
+
+
+When Logstash parses the settings (`logstash.yml`) or configuration (`/etc/logstash/conf.d/*.conf`), it resolves keys from the keystore before resolving environment variables.
+
+
+## Keystore password [keystore-password]
+
+You can protect access to the Logstash keystore by storing a password in an environment variable called `LOGSTASH_KEYSTORE_PASS`. If you create the Logstash keystore after setting this variable, the keystore will be password protected. This means that the environment variable needs to be accessible to the running instance of Logstash. This environment variable must also be correctly set for any users who need to issue keystore commands (add, list, remove, etc.).
+
+Using a keystore password is recommended, but optional. The data will be encrypted even if you do not set a password. However, it is highly recommended to configure the keystore password and grant restrictive permissions to any files that may contain the environment variable value. If you choose not to set a password, then you can skip the rest of this section.
+
+For example:
+
+```sh
+set +o history
+export LOGSTASH_KEYSTORE_PASS=mypassword
+set -o history
+bin/logstash-keystore create
+```
+
+This setup requires the user running Logstash to have the environment variable `LOGSTASH_KEYSTORE_PASS=mypassword` defined. If the environment variable is not defined, Logstash cannot access the keystore.
+
+When you run Logstash from an RPM or DEB package installation, the environment variables are sourced from `/etc/sysconfig/logstash`.
+
+::::{note}
+You might need to create `/etc/sysconfig/logstash`. This file should be owned by `root` with `600` permissions. The expected format of `/etc/sysconfig/logstash` is `ENVIRONMENT_VARIABLE=VALUE`, with one entry per line.
+::::
+
+
+For other distributions, such as Docker or ZIP, see the documentation for your runtime environment (Windows, Docker, etc) to learn how to set the environment variable for the user that runs Logstash. Ensure that the environment variable (and thus the password) is only accessible to that user.
+
+
+## Keystore location [keystore-location]
+
+The keystore must be located in the Logstash `path.settings` directory. This is the same directory that contains the `logstash.yml` file. When performing any operation against the keystore, it is recommended to set `path.settings` for the keystore command. For example, to create a keystore on a RPM/DEB installation:
+
+```sh
+set +o history
+export LOGSTASH_KEYSTORE_PASS=mypassword
+set -o history
+sudo -E /usr/share/logstash/bin/logstash-keystore --path.settings /etc/logstash create
+```
+
+See [Logstash Directory Layout](/reference/dir-layout.md) for more about the default directory locations.
+
+::::{note}
+You will see a warning if the `path.settings` is not pointed to the same directory as the `logstash.yml`.
+::::
+
+
+
+## Create or overwrite a keystore [creating-keystore]
+
+The `create` command creates a new keystore or overwrites an existing keystore:
+
+```sh
+bin/logstash-keystore create
+```
+
+Creates the keystore in the directory defined in the `path.settings` setting.
+
+::::{important}
+If a keystore already exists, the `create` command can overwrite it (after a Y/N prompt). Selecting `Y` clears all keys and secrets that were previously stored.
+::::
+
+
+::::{tip}
+Set a [keystore password](#keystore-password) when you create the keystore.
+::::
+
+
+
+## Add keys [add-keys-to-keystore]
+
+To store sensitive values, such as authentication credentials for Elasticsearch, use the `add` command:
+
+```sh
+bin/logstash-keystore add ES_USER ES_PWD
+```
+
+When prompted, enter a value for each key.
+
+::::{note}
+Key values are limited to ASCII characters. It includes digits, letters, and a few special symbols.
+::::
+
+
+
+## List keys [list-settings]
+
+To list the keys defined in the keystore, use:
+
+```sh
+bin/logstash-keystore list
+```
+
+
+## Remove keys [remove-settings]
+
+To remove keys from the keystore, use:
+
+```sh
+bin/logstash-keystore remove ES_USER ES_PWD
+```
diff --git a/docs/reference/logging.md b/docs/reference/logging.md
new file mode 100644
index 000000000..90df52d04
--- /dev/null
+++ b/docs/reference/logging.md
@@ -0,0 +1,227 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/logging.html
+---
+
+# Logging [logging]
+
+Logstash emits internal logs during its operation, which are placed in `LS_HOME/logs` (or `/var/log/logstash` for DEB/RPM). The default logging level is `INFO`. Logstash’s logging framework is based on [Log4j 2 framework](http://logging.apache.org/log4j/2.x/), and much of its functionality is exposed directly to users.
+
+You can configure logging for a particular subsystem, module, or plugin.
+
+When you need to debug problems, particularly problems with plugins, consider increasing the logging level to `DEBUG` to get more verbose messages. For example, if you are debugging issues with Elasticsearch Output, you can increase log levels just for that component. This approach reduces noise from excessive logging and helps you focus on the problem area.
+
+You can configure logging using the `log4j2.properties` file or the Logstash API.
+
+* **`log4j2.properties` file.** Changes made through the `log4j2.properties` file require you to restart Logstash for the changes to take effect. Changes **persist** through subsequent restarts.
+* **Logging API.** Changes made through the Logging API are effective immediately without a restart. The changes **do not persist** after Logstash is restarted.
+
+## Log4j2 configuration [log4j2]
+
+Logstash ships with a `log4j2.properties` file with out-of-the-box settings, including logging to console. You can modify this file to change the rotation policy, type, and other [log4j2 configuration](https://logging.apache.org/log4j/2.x/manual/configuration.html#Loggers).
+
+You must restart Logstash to apply any changes that you make to this file. Changes to `log4j2.properties` persist after Logstash is restarted.
+
+Here’s an example using `outputs.elasticsearch`:
+
+```yaml
+logger.elasticsearchoutput.name = logstash.outputs.elasticsearch
+logger.elasticsearchoutput.level = debug
+```
+
+The previous example defines a name and level for the logger `logstash.outputs.elasticsearch`. The logger is usually identified by a Java class name, such as `org.logstash.dissect.Dissector`, for example. It can also be a partial package path as in `org.logstash.dissect`. For Ruby classes, like `LogStash::Outputs::Elasticsearch`, the logger name is obtained by lowercasing the full class name and replacing double colons with a single dot.
+
+::::{note}
+Consider using the default log4j configuration that is shipped with {{ls}}, as it is configured to work well for most deployments. The next section describes how the rolling strategy works in case you need to make adjustments.
+::::
+
+
+### Rollover settings [rollover]
+
+The `log4j2.properties` file has three appenders for writing to log files: one for plain text, one with json format, and one to split log lines on per pipeline basis when you set the `pipeline.separate_logs` value.
+
+These appenders define:
+
+* **triggering policies** that determine *if* a rollover should be performed, and
+* **rollover strategy** to defines *how* the rollover should be done.
+
+By default, two triggering policies are defined—time and size.
+
+* The **time** policy creates one file per day.
+* The **size** policy forces the creation of a new file after the file size surpasses 100 MB.
+
+The default strategy also performs file rollovers based on a **maximum number of files**. When the limit of 30 files has been reached, the first (oldest) file is removed to create space for the new file. Subsequent files are renumbered accordingly.
+
+Each file has a date, and files older than 7 days (default) are removed during rollover.
+
+```text
+appender.rolling.type = RollingFile <1>
+appender.rolling.name = plain_rolling
+appender.rolling.fileName = ${sys:ls.logs}/logstash-plain.log <2>
+appender.rolling.filePattern = ${sys:ls.logs}/logstash-plain-%d{yyyy-MM-dd}-%i.log.gz <3>
+appender.rolling.policies.type = Policies
+appender.rolling.policies.time.type = TimeBasedTriggeringPolicy <4>
+appender.rolling.policies.time.interval = 1
+appender.rolling.policies.time.modulate = true
+appender.rolling.layout.type = PatternLayout
+appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c]%notEmpty{[%X{pipeline.id}]}%notEmpty{[%X{plugin.id}]} %m%n
+appender.rolling.policies.size.type = SizeBasedTriggeringPolicy <5>
+appender.rolling.policies.size.size = 100MB
+appender.rolling.strategy.type = DefaultRolloverStrategy
+appender.rolling.strategy.max = 30 <6>
+appender.rolling.strategy.action.type = Delete <7>
+appender.rolling.strategy.action.basepath = ${sys:ls.logs}
+appender.rolling.strategy.action.condition.type = IfFileName
+appender.rolling.strategy.action.condition.glob = logstash-plain-* <8>
+appender.rolling.strategy.action.condition.nested_condition.type = IfLastModified
+appender.rolling.strategy.action.condition.nested_condition.age = 7D <9>
+```
+
+1. The appender type, which rolls older log files.
+2. Name of the current log file.
+3. Name’s format definition of the rolled files, in this case a date followed by an incremental number, up to 30 (by default).
+4. Time policy to trigger a rollover at the end of the day.
+5. Size policy to trigger a rollover once the plain text file reaches the size of 100 MB.
+6. Rollover strategy defines a maximum of 30 files.
+7. Action to execute during the rollover.
+8. The file set to consider by the action.
+9. Condition to execute the rollover action: older than 7 days.
+
+
+The rollover action can also enforce a disk usage limit, deleting older files to match the requested condition, as an example:
+
+```text
+appender.rolling.type = RollingFile
+...
+appender.rolling.strategy.action.condition.glob = pipeline_${ctx:pipeline.id}.*.log.gz
+appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize
+appender.rolling.strategy.action.condition.nested_condition.exceeds = 5MB <1>
+```
+
+1. Deletes files if total accumulated compressed file size is over 5MB.
+
+
+
+
+## Logging APIs [_logging_apis]
+
+For temporary logging changes, modifying the `log4j2.properties` file and restarting Logstash leads to unnecessary downtime. Instead, you can dynamically update logging levels through the logging API. These settings are effective immediately and do not need a restart.
+
+::::{note}
+By default, the logging API attempts to bind to `tcp:9600`. If this port is already in use by another Logstash instance, you need to launch Logstash with the `--api.http.port` flag specified to bind to a different port. See [Command-Line Flags](/reference/running-logstash-command-line.md#command-line-flags) for more information.
+::::
+
+
+### Retrieve list of logging configurations [_retrieve_list_of_logging_configurations]
+
+To retrieve a list of logging subsystems available at runtime, you can do a `GET` request to `_node/logging`
+
+```js
+curl -XGET 'localhost:9600/_node/logging?pretty'
+```
+
+Example response:
+
+```js
+{
+...
+ "loggers" : {
+ "logstash.agent" : "INFO",
+ "logstash.api.service" : "INFO",
+ "logstash.basepipeline" : "INFO",
+ "logstash.codecs.plain" : "INFO",
+ "logstash.codecs.rubydebug" : "INFO",
+ "logstash.filters.grok" : "INFO",
+ "logstash.inputs.beats" : "INFO",
+ "logstash.instrument.periodicpoller.jvm" : "INFO",
+ "logstash.instrument.periodicpoller.os" : "INFO",
+ "logstash.instrument.periodicpoller.persistentqueue" : "INFO",
+ "logstash.outputs.stdout" : "INFO",
+ "logstash.pipeline" : "INFO",
+ "logstash.plugins.registry" : "INFO",
+ "logstash.runner" : "INFO",
+ "logstash.shutdownwatcher" : "INFO",
+ "org.logstash.Event" : "INFO",
+ "slowlog.logstash.codecs.plain" : "TRACE",
+ "slowlog.logstash.codecs.rubydebug" : "TRACE",
+ "slowlog.logstash.filters.grok" : "TRACE",
+ "slowlog.logstash.inputs.beats" : "TRACE",
+ "slowlog.logstash.outputs.stdout" : "TRACE"
+ }
+}
+```
+
+
+### Update logging levels [_update_logging_levels]
+
+Prepend the name of the subsystem, module, or plugin with `logger.`.
+
+Here is an example using `outputs.elasticsearch`:
+
+```js
+curl -XPUT 'localhost:9600/_node/logging?pretty' -H 'Content-Type: application/json' -d'
+{
+ "logger.logstash.outputs.elasticsearch" : "DEBUG"
+}
+'
+```
+
+While this setting is in effect, Logstash emits DEBUG-level logs for *all* the Elasticsearch outputs specified in your configuration. Please note this new setting is transient and will not survive a restart.
+
+::::{note}
+If you want logging changes to persist after a restart, add them to `log4j2.properties` instead.
+::::
+
+
+
+### Reset dynamic logging levels [_reset_dynamic_logging_levels]
+
+To reset any logging levels that may have been dynamically changed via the logging API, send a `PUT` request to `_node/logging/reset`. All logging levels will revert to the values specified in the `log4j2.properties` file.
+
+```js
+curl -XPUT 'localhost:9600/_node/logging/reset?pretty'
+```
+
+
+
+## Log file location [_log_file_location]
+
+You can specify the log file location using `--path.logs` setting.
+
+
+## Slowlog [_slowlog]
+
+Slowlog for Logstash adds the ability to log when a specific event takes an abnormal amount of time to make its way through the pipeline. Just like the normal application log, you can find slowlogs in your `--path.logs` directory. Slowlog is configured in the `logstash.yml` settings file with the following options:
+
+```yaml
+slowlog.threshold.warn (default: -1)
+slowlog.threshold.info (default: -1)
+slowlog.threshold.debug (default: -1)
+slowlog.threshold.trace (default: -1)
+```
+
+Slowlog is disabled by default. The default threshold values are set to `-1nanos` to represent an infinite threshold. No slowlog will be invoked.
+
+### Enable slowlog [_enable_slowlog]
+
+The `slowlog.threshold` fields use a time-value format which enables a wide range of trigger intervals. You can specify ranges using the following time units: `nanos` (nanoseconds), `micros` (microseconds), `ms` (milliseconds), `s` (second), `m` (minute), `h` (hour), `d` (day).
+
+Slowlog becomes more sensitive and logs more events as you raise the log level.
+
+Example:
+
+```yaml
+slowlog.threshold.warn: 2s
+slowlog.threshold.info: 1s
+slowlog.threshold.debug: 500ms
+slowlog.threshold.trace: 100ms
+```
+
+In this example:
+
+* If the log level is set to `warn`, the log shows events that took longer than 2s to process.
+* If the log level is set to `info`, the log shows events that took longer than 1s to process.
+* If the log level is set to `debug`, the log shows events that took longer than 500ms to process.
+* If the log level is set to `trace`, the log shows events that took longer than 100ms to process.
+
+The logs include the full event and filter configuration that are responsible for the slowness.
diff --git a/docs/reference/logstash-centralized-pipeline-management.md b/docs/reference/logstash-centralized-pipeline-management.md
new file mode 100644
index 000000000..a445eca4b
--- /dev/null
+++ b/docs/reference/logstash-centralized-pipeline-management.md
@@ -0,0 +1,78 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/logstash-centralized-pipeline-management.html
+---
+
+# Centralized Pipeline Management [logstash-centralized-pipeline-management]
+
+The pipeline management feature centralizes the creation and management of Logstash configuration pipelines in {{kib}}.
+
+::::{note}
+Centralized pipeline management is a subscription feature. If you want to try the full set of features, you can activate a free 30-day trial. To view the status of your license, start a trial, or install a new license, open the {{kib}} main menu and click **Stack Management > License Management**. For more information, see [https://www.elastic.co/subscriptions](https://www.elastic.co/subscriptions) and [License Management](docs-content://deploy-manage/license/manage-your-license-in-self-managed-cluster.md).
+::::
+
+
+You can control multiple Logstash instances from the pipeline management UI in {{kib}}. You can add, edit, and delete pipeline configurations. On the Logstash side, you simply need to enable configuration management and register Logstash to use the centrally managed pipeline configurations.
+
+::::{important}
+After you configure {{ls}} to use centralized pipeline management, you can no longer specify local pipeline configurations. The `pipelines.yml` file and settings such as `path.config` and `config.string` are inactive when centralized pipeline management is enabled.
+::::
+
+
+## Manage pipelines [_manage_pipelines]
+
+Before using the pipeline management UI, you must:
+
+* [Configure centralized pipeline management](/reference/configuring-centralized-pipelines.md).
+* If {{kib}} is protected with basic authentication, make sure your {{kib}} user has the `logstash_admin` role as well as the `logstash_writer` role that you created when you [configured Logstash to use basic authentication](/reference/secure-connection.md). Additionally, in order to view (as read-only) non-centrally-managed pipelines in the pipeline management UI, make sure your {{kib}} user has the `monitoring_user` role as well.
+
+To manage Logstash pipelines in {{kib}}:
+
+1. Open {{kib}} in your browser and go to the Management tab. If you’ve set up configuration management correctly, you’ll see an area for managing Logstash.
+
+ :::{image} ../images/centralized_config.png
+ :alt: centralized config
+ :::
+
+2. Click the **Pipelines** link.
+3. To add a new pipeline, click **Create pipeline** and specify values.
+
+ Pipeline ID
+ : A name that uniquely identifies the pipeline. This is the ID that you used when you [configured centralized pipeline management](/reference/configuring-centralized-pipelines.md) and specified a list of pipeline IDs in the `xpack.management.pipeline.id` setting.
+
+ Description
+ : A description of the pipeline configuration. This information is for your use.
+
+ Pipeline
+ : The pipeline configuration. You can treat the editor in the pipeline management UI like any other editor. You don’t have to worry about whitespace or indentation.
+
+ Pipeline workers
+ : The number of parallel workers used to run the filter and output stages of the pipeline.
+
+ Pipeline batch size
+ : The maximum number of events an individual worker thread collects before executing filters and outputs.
+
+ Pipeline batch delay
+ : Time in milliseconds to wait for each event before sending an undersized batch to pipeline workers.
+
+ Queue type
+ : The internal queueing model for event buffering. Options are **memory** for in-memory queueing, or **persisted** for disk-based acknowledged queueing.
+
+ Queue max bytes
+ : The total capacity of the queue when persistent queues are enabled.
+
+ Queue checkpoint writes
+ : The maximum number of events written before a checkpoint is forced when persistent queues are enabled.
+
+
+### Pipeline behavior [_pipeline_behavior]
+
+* The pipeline configurations and metadata are stored in Elasticsearch. Any changes that you make to a pipeline definition are picked up and loaded automatically by all Logstash instances registered to use the pipeline. The changes are applied immediately. If Logstash is registered to use the pipeline, you do not have to restart Logstash to pick up the changes.
+* The pipeline runs on all Logstash instances that are registered to use the pipeline. {{kib}} saves the new configuration, and Logstash will attempt to load it. There is no validation done at the UI level.
+* You need to check the local Logstash logs for configuration errors. If you’re using the Logstash monitoring feature in {{kib}}, use the Monitoring tab to check the status of your Logstash nodes.
+* You can specify multiple pipeline configurations that run in parallel on the same Logstash node.
+* If you edit and save a pipeline configuration, Logstash reloads the configuration in the background and continues processing events.
+* If you try to delete a pipeline that is running (for example, `apache`) in {{kib}}, Logstash will attempt to stop the pipeline. Logstash waits until all events have been fully processed by the pipeline. Before you delete a pipeline, make sure you understand your data sources. Stopping a pipeline may lead to data loss.
+
+
+
diff --git a/docs/reference/logstash-geoip-database-management.md b/docs/reference/logstash-geoip-database-management.md
new file mode 100644
index 000000000..2d18275df
--- /dev/null
+++ b/docs/reference/logstash-geoip-database-management.md
@@ -0,0 +1,78 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/logstash-geoip-database-management.html
+---
+
+# GeoIP Database Management [logstash-geoip-database-management]
+
+Logstash provides a mechanism for provisioning and maintaining GeoIP databases, which plugins can use to ensure that they have access to an always-up-to-date and EULA-compliant database for geo enrichment. This mechanism requires internet access or a network route to an Elastic GeoIP database service.
+
+If the database manager is enabled in `logstash.yml` (as it is by default), a plugin may subscribe to a database, triggering a download if a valid database is not already available. Logstash checks for updates every day. When an updated database is discovered, it is downloaded in the background and made available to the plugins that rely on it.
+
+The GeoIP databases are separately-licensed from MaxMind under the terms of an End User License Agreement, which prohibits a database from being used after an update has been available for more than 30 days. When Logstash cannot reach the database service for 30 days or more to validate that a managed database is up-to-date, that database is deleted and made unavailable to the plugins that subscribed to it.
+
+::::{note}
+GeoIP database management is a licensed feature of Logstash, and is only available in the Elastic-licensed complete distribution of Logstash.
+::::
+
+
+## Database Metrics [logstash-geoip-database-management-metrics]
+
+You can monitor the managed database’s status through the [Node Stats API](https://www.elastic.co/docs/api/doc/logstash/operation/operation-nodestats).
+
+The following request returns a JSON document containing database manager stats, including:
+
+* database status and freshness
+
+ * `geoip_download_manager.database.*.status`
+
+ * `init` : initial CC database status
+ * `up_to_date` : using up-to-date EULA database
+ * `to_be_expired` : 25 days without calling service
+ * `expired` : 30 days without calling service
+
+ * `fail_check_in_days` : number of days Logstash fails to call service since the last success
+
+* info about download successes and failures
+
+ * `geoip_download_manager.download_stats.successes` number of successful checks and downloads
+ * `geoip_download_manager.download_stats.failures` number of failed check or download
+ * `geoip_download_manager.download_stats.status`
+
+ * `updating` : check and download at the moment
+ * `succeeded` : last download succeed
+ * `failed` : last download failed
+
+
+```js
+curl -XGET 'localhost:9600/_node/stats/geoip_download_manager?pretty'
+```
+
+Example response:
+
+```js
+{
+ "geoip_download_manager" : {
+ "database" : {
+ "ASN" : {
+ "status" : "up_to_date",
+ "fail_check_in_days" : 0,
+ "last_updated_at": "2021-06-21T16:06:54+02:00"
+ },
+ "City" : {
+ "status" : "up_to_date",
+ "fail_check_in_days" : 0,
+ "last_updated_at": "2021-06-21T16:06:54+02:00"
+ }
+ },
+ "download_stats" : {
+ "successes" : 15,
+ "failures" : 1,
+ "last_checked_at" : "2021-06-21T16:07:03+02:00",
+ "status" : "succeeded"
+ }
+ }
+}
+```
+
+
diff --git a/docs/reference/logstash-monitoring-ui.md b/docs/reference/logstash-monitoring-ui.md
new file mode 100644
index 000000000..2ab6393f5
--- /dev/null
+++ b/docs/reference/logstash-monitoring-ui.md
@@ -0,0 +1,28 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/logstash-monitoring-ui.html
+---
+
+# Monitoring UI [logstash-monitoring-ui]
+
+Use the {{stack}} {{monitor-features}} to view metrics and gain insight into how your {{ls}} deployment is running. In the overview dashboard, you can see all events received and sent by Logstash, plus info about memory usage and uptime:
+
+:::{image} ../images/overviewstats.png
+:alt: Logstash monitoring overview dashboard in Kibana
+:::
+
+Then you can drill down to see stats about a specific node:
+
+:::{image} ../images/nodestats.png
+:alt: Logstash monitoring node stats dashboard in Kibana
+:::
+
+::::{note}
+A {{ls}} node is considered unique based on its persistent UUID, which is written to the [`path.data`](/reference/logstash-settings-file.md) directory when the node starts.
+::::
+
+
+Before you can use the monitoring UI, [configure Logstash monitoring](/reference/monitoring-logstash-legacy.md).
+
+For information about using the Monitoring UI, see [{{monitoring}} in {{kib}}](docs-content://deploy-manage/monitor/monitoring-data/visualizing-monitoring-data.md).
+
diff --git a/docs/reference/logstash-pipeline-viewer.md b/docs/reference/logstash-pipeline-viewer.md
new file mode 100644
index 000000000..1b2478522
--- /dev/null
+++ b/docs/reference/logstash-pipeline-viewer.md
@@ -0,0 +1,55 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/logstash-pipeline-viewer.html
+---
+
+# Pipeline Viewer UI [logstash-pipeline-viewer]
+
+The pipeline viewer UI offers additional visibility into the behavior and performance of complex pipeline configurations. Use the pipeline viewer to visualize and monitor the behavior of complex Logstash pipeline configurations. You can see and interact with a tree view that illustrates the pipeline topology, data flow, and branching logic.
+
+The pipeline viewer highlights CPU% and event latency in cases where the values are anomalous. This information helps you quickly identify processing that is disproportionately slow.
+
+:::{image} ../images/pipeline-tree.png
+:alt: Pipeline Viewer
+:class: screenshot
+:::
+
+
+## Prerequisites [_prerequisites]
+
+Before using the pipeline viewer:
+
+* [Configure Logstash monitoring](monitoring-logstash.md).
+* Start the Logstash pipeline that you want to monitor.
+
+Logstash begins shipping metrics to the monitoring cluster.
+
+
+## View the pipeline [_view_the_pipeline]
+
+To view the pipeline:
+
+* Kibana → Monitoring → Logstash → Pipelines
+
+Each pipeline is identified by a pipeline ID (`main` by default). For each pipeline, you see the pipeline’s throughput and the number of nodes on which the pipeline is running during the selected time range.
+
+Many elements in the tree are clickable. For example, you can click the plugin name to expand the detail view.
+
+:::{image} ../images/pipeline-input-detail.png
+:alt: Pipeline Input Detail
+:class: screenshot
+:::
+
+Click the arrow beside a branch name to collapse or expand it.
+
+
+## Notes and best practices [_notes_and_best_practices]
+
+**Use semantic IDs.** Specify semantic IDs when you configure the stages in your Logstash pipeline. Otherwise, Logstash generates them for you. Semantic IDs help you identify configurations that are causing bottlenecks. For example, you may have several grok filters running in your pipeline. If you have specified semantic IDs, you can tell at a glance which filters are slow. Semantic IDs, such as `apacheParsingGrok` and `cloudwatchGrok`, point you to the grok filters that are causing bottlenecks.
+
+**Outliers.** Values and stats that are anomalously slow or otherwise out of line are highlighted. This doesn’t necessarily indicate a problem, but it highlights potential bottle necks so that you can find them quickly.
+
+Some plugins are slower than others due to the nature of the work they do. For instance, you may find that a grok filter that uses a complicated regexp runs a lot slower than a mutate filter that simply adds a field. The grok filter might be highlighted in this case, though it may not be possible to further optimize its work.
+
+**Versioning.** Version information is available from the dropdown list beside the pipeline ID. Logstash generates a new version each time you modify a pipeline, and stores multiple versions of the pipeline stats. Use this information to see how changes over time affect throughput and other metrics. Logstash does not store multiple versions of the pipeline configurations.
+
diff --git a/docs/reference/logstash-settings-file.md b/docs/reference/logstash-settings-file.md
new file mode 100644
index 000000000..31050d549
--- /dev/null
+++ b/docs/reference/logstash-settings-file.md
@@ -0,0 +1,96 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/logstash-settings-file.html
+---
+
+# logstash.yml [logstash-settings-file]
+
+You can set options in the Logstash settings file, `logstash.yml`, to control Logstash execution. For example, you can specify pipeline settings, the location of configuration files, logging options, and other settings. Most of the settings in the `logstash.yml` file are also available as [command-line flags](/reference/running-logstash-command-line.md#command-line-flags) when you run Logstash. Any flags that you set at the command line override the corresponding settings in the `logstash.yml` file.
+
+The `logstash.yml` file is written in [YAML](http://yaml.org/). Its location varies by platform (see [Logstash Directory Layout](/reference/dir-layout.md)). You can specify settings in hierarchical form or use flat keys. For example, to use hierarchical form to set the pipeline batch size and batch delay, you specify:
+
+```yaml
+pipeline:
+ batch:
+ size: 125
+ delay: 50
+```
+
+To express the same values as flat keys, you specify:
+
+```yaml
+pipeline.batch.size: 125
+pipeline.batch.delay: 50
+```
+
+The `logstash.yml` file also supports bash-style interpolation of environment variables and keystore secrets in setting values.
+
+```yaml
+pipeline:
+ batch:
+ size: ${BATCH_SIZE}
+ delay: ${BATCH_DELAY:50}
+node:
+ name: "node_${LS_NODE_NAME}"
+path:
+ queue: "/tmp/${QUEUE_DIR:queue}"
+```
+
+Note that the `${VAR_NAME:default_value}` notation is supported, setting a default batch delay of `50` and a default `path.queue` of `/tmp/queue` in the above example.
+
+The `logstash.yml` file includes these settings.
+
+| Setting | Description | Default value |
+| --- | --- | --- |
+| `node.name` | A descriptive name for the node. | Machine’s hostname |
+| `path.data` | The directory that Logstash and its plugins use for any persistent needs. | `LOGSTASH_HOME/data` |
+| `pipeline.id` | The ID of the pipeline. | `main` |
+| `pipeline.workers` | The number of workers that will, in parallel, execute the filter and outputstages of the pipeline. This setting uses the[`java.lang.Runtime.getRuntime.availableProcessors`](https://docs.oracle.com/javase/7/docs/api/java/lang/Runtime.md#availableProcessors())value as a default if not overridden by `pipeline.workers` in `pipelines.yml` or`pipeline.workers` from `logstash.yml`. If you have modified this setting andsee that events are backing up, or that the CPU is not saturated, considerincreasing this number to better utilize machine processing power. | Number of the host’s CPU cores |
+| `pipeline.batch.size` | The maximum number of events an individual worker thread will collect from inputs before attempting to execute its filters and outputs. Larger batch sizes are generally more efficient, but come at the cost of increased memory overhead. You may need to increase JVM heap space in the `jvm.options` config file. See [Logstash Configuration Files](/reference/config-setting-files.md) for more info. | `125` |
+| `pipeline.batch.delay` | When creating pipeline event batches, how long in milliseconds to wait for each event before dispatching an undersized batch to pipeline workers. | `50` |
+| `pipeline.unsafe_shutdown` | When set to `true`, forces Logstash to exit during shutdown even if there are still inflight events in memory. By default, Logstash will refuse to quit until all received events have been pushed to the outputs. Enabling this option can lead to data loss during shutdown. | `false` |
+| `pipeline.plugin_classloaders` | (Beta) Load Java plugins in independent classloaders to isolate their dependencies. | `false` |
+| `pipeline.ordered` | Set the pipeline event ordering. Valid options are: * `auto`. Automatically enables ordering if the `pipeline.workers` setting is `1`, and disables otherwise. * `true`. Enforces ordering on the pipeline and prevents Logstash from starting if there are multiple workers. * `false`. Disables the processing required to preserve order. Ordering will not be guaranteed, but you save the processing cost of preserving order. | `auto` |
+| `pipeline.ecs_compatibility` | Sets the pipeline’s default value for `ecs_compatibility`, a setting that is available to plugins that implement an ECS compatibility mode for use with the Elastic Common Schema. Possible values are: * `disabled` * `v1` * `v8` This option allows the [early opt-in (or preemptive opt-out) of ECS compatibility](/reference/ecs-ls.md) modes in plugins, which is scheduled to be on-by-default in a future major release of {{ls}}. Values other than `disabled` are currently considered BETA, and may produce unintended consequences when upgrading {{ls}}. | `disabled` |
+| `path.config` | The path to the Logstash config for the main pipeline. If you specify a directory or wildcard, config files are read from the directory in alphabetical order. | Platform-specific. See [Logstash Directory Layout](/reference/dir-layout.md). |
+| `config.string` | A string that contains the pipeline configuration to use for the main pipeline. Use the same syntax as the config file. | *N/A* |
+| `config.test_and_exit` | When set to `true`, checks that the configuration is valid and then exits. Note that grok patterns are not checked for correctness with this setting. Logstash can read multiple config files from a directory. If you combine this setting with `log.level: debug`, Logstash will log the combined config file, annotating each config block with the source file it came from. | `false` |
+| `config.reload.automatic` | When set to `true`, periodically checks if the configuration has changed and reloads the configuration whenever it is changed. This can also be triggered manually through the SIGHUP signal. | `false` |
+| `config.reload.interval` | How often in seconds Logstash checks the config files for changes. Note that the unit qualifier (`s`) is required. | `3s` |
+| `config.debug` | When set to `true`, shows the fully compiled configuration as a debug log message. You must also set `log.level: debug`. WARNING: The log message will include any *password* options passed to plugin configs as plaintext, and may result in plaintext passwords appearing in your logs! | `false` |
+| `config.support_escapes` | When set to `true`, quoted strings will process the following escape sequences: `\n` becomes a literal newline (ASCII 10). `\r` becomes a literal carriage return (ASCII 13). `\t` becomes a literal tab (ASCII 9). `\\` becomes a literal backslash `\`. `\"` becomes a literal double quotation mark. `\'` becomes a literal quotation mark. | `false` |
+| `config.field_reference.escape_style` | Provides a way to reference fields that contain [field reference special characters](https://www.elastic.co/guide/en/logstash/current/field-references-deepdive.html#formal-grammar-escape-sequences) `[` and `]`. ::::{note} This feature is in **technical preview** and may change in the future. :::: Current options are: * `percent`: URI-style `%`+`HH` hexadecimal encoding of UTF-8 bytes (`[` → `%5B`; `]` → `%5D`) * `ampersand`: HTML-style ``+`DD`+`;` encoding of decimal Unicode code-points (`[` → `[`; `]` → `]`) * `none`: field names containing special characters *cannot* be referenced. | `none` |
+| `queue.type` | The internal queuing model to use for event buffering. Specify `memory` for legacy in-memory based queuing, or `persisted` for disk-based ACKed queueing ([persistent queues](/reference/persistent-queues.md)). | `memory` |
+| `path.queue` | The directory path where the data files will be stored when persistent queues are enabled (`queue.type: persisted`). | `path.data/queue` |
+| `queue.page_capacity` | The size of the page data files used when persistent queues are enabled (`queue.type: persisted`). The queue data consists of append-only data files separated into pages. | 64mb |
+| `queue.max_events` | The maximum number of unread events in the queue when persistent queues are enabled (`queue.type: persisted`). | 0 (unlimited) |
+| `queue.max_bytes` | The total capacity of the queue (`queue.type: persisted`) in number of bytes. Make sure the capacity of your disk drive is greater than the value you specify here. If both `queue.max_events` and `queue.max_bytes` are specified, Logstash uses whichever criteria is reached first. | 1024mb (1g) |
+| `queue.checkpoint.acks` | The maximum number of ACKed events before forcing a checkpoint when persistent queues are enabled (`queue.type: persisted`). Specify `queue.checkpoint.acks: 0` to set this value to unlimited. | 1024 |
+| `queue.checkpoint.writes` | The maximum number of written events before forcing a checkpoint when persistent queues are enabled (`queue.type: persisted`). Specify `queue.checkpoint.writes: 0` to set this value to unlimited. | 1024 |
+| `queue.checkpoint.retry` | When enabled, Logstash will retry four times per attempted checkpoint write for any checkpoint writes that fail. Any subsequent errors are not retried. This is a workaround for failed checkpoint writes that have been seen only on Windows platform, filesystems with non-standard behavior such as SANs and is not recommended except in those specific circumstances. (`queue.type: persisted`) | `true` |
+| `queue.drain` | When enabled, Logstash waits until the persistent queue (`queue.type: persisted`) is drained before shutting down. | `false` |
+| `dead_letter_queue.enable` | Flag to instruct Logstash to enable the DLQ feature supported by plugins. | `false` |
+| `dead_letter_queue.max_bytes` | The maximum size of each dead letter queue. Entries will be dropped if they would increase the size of the dead letter queue beyond this setting. | `1024mb` |
+| `dead_letter_queue.storage_policy` | Defines the action to take when the dead_letter_queue.max_bytes setting is reached: `drop_newer` stops accepting new values that would push the file size over the limit, and `drop_older` removes the oldest events to make space for new ones. | `drop_newer` |
+| `path.dead_letter_queue` | The directory path where the data files will be stored for the dead-letter queue. | `path.data/dead_letter_queue` |
+| `api.enabled` | The HTTP API is enabled by default. It can be disabled, but features that rely on it will not work as intended. | `true` |
+| `api.environment` | The API returns the provided string as a part of its response. Setting your environment may help to disambiguate between similarly-named nodes in production vs test environments. | `production` |
+| `api.http.host` | The bind address for the HTTP API endpoint. By default, the {{ls}} HTTP API binds only to the local loopback interface. When configured securely (`api.ssl.enabled: true` and `api.auth.type: basic`), the HTTP API binds to *all* available interfaces. | `"127.0.0.1"` |
+| `api.http.port` | The bind port for the HTTP API endpoint. | `9600-9700` |
+| `api.ssl.enabled` | Set to `true` to enable SSL on the HTTP API. Doing so requires both `api.ssl.keystore.path` and `api.ssl.keystore.password` to be set. | `false` |
+| `api.ssl.keystore.path` | The path to a valid JKS or PKCS12 keystore for use in securing the {{ls}} API. The keystore must be password-protected, and must contain a single certificate chain and a private key. This setting is ignored unless `api.ssl.enabled` is set to `true`. | *N/A* |
+| `api.ssl.keystore.password` | The password to the keystore provided with `api.ssl.keystore.path`. This setting is ignored unless `api.ssl.enabled` is set to `true`. | *N/A* |
+| `api.ssl.supported_protocols` | List of allowed SSL/TLS versions to use when establishing a secure connection. The availability of protocols depends on the JVM version. Certain protocols are disabled by default and need to be enabled manually by changing `jdk.tls.disabledAlgorithms` in the **$JDK_HOME/conf/security/java.security** configuration file. Possible values are: * `TLSv1` * `TLSv1.1` * `TLSv1.2` * `TLSv1.3` | *N/A* |
+| `api.auth.type` | Set to `basic` to require HTTP Basic auth on the API using the credentials supplied with `api.auth.basic.username` and `api.auth.basic.password`. | `none` |
+| `api.auth.basic.username` | The username to require for HTTP Basic auth Ignored unless `api.auth.type` is set to `basic`. | *N/A* |
+| `api.auth.basic.password` | The password to require for HTTP Basic auth. Ignored unless `api.auth.type` is set to `basic`. It should meet default password policy which requires non-empty minimum 8 char string that includes a digit, upper case letter and lower case letter. The default password policy can be customized by following options: * Set `api.auth.basic.password_policy.include.digit` `REQUIRED` (default) to accept only passwords that include at least one digit or `OPTIONAL` to exclude from requirement. * Set `api.auth.basic.password_policy.include.upper` `REQUIRED` (default) to accept only passwords that include at least one upper case letter or `OPTIONAL` to exclude from requirement. * Set `api.auth.basic.password_policy.include.lower` `REQUIRED` (default) to accept only passwords that include at least one lower case letter or `OPTIONAL` to exclude from requirement. * Set `api.auth.basic.password_policy.include.symbol` `REQUIRED` to accept only passwords that include at least one special character or `OPTIONAL` (default) to exclude from requirement. * Set `api.auth.basic.password_policy.length.minimum` to a value from 9 to 1024 if you want to require more than the 8 character default setting for passwords. | *N/A* |
+| `api.auth.basic.password_policy.mode` | Raises either `WARN` or `ERROR` message when password requirements are not met.Ignored unless `api.auth.type` is set to `basic`. | `WARN` |
+| `log.level` | The log level. Valid options are: * `fatal` * `error` * `warn` * `info` * `debug` * `trace` | `info` |
+| `log.format` | The log format. Set to `json` to log in JSON format, or `plain` to use `Object#.inspect`. | `plain` |
+| `log.format.json.fix_duplicate_message_fields` | When the log format is `json` avoid collision of field names in log lines. | `true` |
+| `path.logs` | The directory where Logstash will write its log to. | `LOGSTASH_HOME/logs` |
+| `pipeline.separate_logs` | This a boolean setting to enable separation of logs per pipeline in different log files. If enabled Logstash will create a different log file for each pipeline,using the pipeline.id as name of the file. The destination directory is taken from the `path.log`s setting. When there are many pipelines configured in Logstash,separating each log lines per pipeline could be helpful in case you need to troubleshoot what’s happening in a single pipeline, without interference of the other ones. | `false` |
+| `path.plugins` | Where to find custom plugins. You can specify this setting multiple times to include multiple paths. Plugins are expected to be in a specific directory hierarchy: `PATH/logstash/TYPE/NAME.rb` where `TYPE` is `inputs`, `filters`, `outputs`, or `codecs`, and `NAME` is the name of the plugin. | Platform-specific. See [Logstash Directory Layout](/reference/dir-layout.md). |
+| `allow_superuser` | Setting to `true` to allow or `false` to block running Logstash as a superuser. | `false` |
+| `pipeline.buffer.type` | Determine where to allocate memory buffers, for plugins that leverage them.Defaults to `heap` but can be switched to `direct` to instruct Logstash to prefer allocation of buffers in direct memory. | `heap` Check out [Buffer Allocation types](/reference/jvm-settings.md#off-heap-buffers-allocation) for more info. |
+
diff --git a/docs/reference/logstash-to-logstash-communications.md b/docs/reference/logstash-to-logstash-communications.md
new file mode 100644
index 000000000..6caaaee8b
--- /dev/null
+++ b/docs/reference/logstash-to-logstash-communications.md
@@ -0,0 +1,42 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/ls-to-ls.html
+---
+
+# Logstash-to-Logstash communications [ls-to-ls]
+
+{{ls}}-to-{{ls}} communication is available if you need to have one {{ls}} instance communicate with another {{ls}} instance. Implementing Logstash-to-Logstash communication can add complexity to your environment, but you may need it if the data path crosses network or firewall boundaries. However, we suggest you don’t implement unless it is strictly required.
+
+::::{note}
+If you are looking for information on connecting multiple pipelines within one Logstash instance, see [Pipeline-to-pipeline communication](/reference/pipeline-to-pipeline.md).
+::::
+
+
+Logstash-to-Logstash communication can be achieved in one of two ways:
+
+* [Logstash output to Logstash Input](#native-considerations)
+* [Lumberjack output to Beats input](#lumberjack-considerations)
+
+$$$native-considerations$$$**Logstash to Logstash considerations**
+
+This is the preferred method to implement Logstash-to-Logstash. It replaces [Logstash-to-Logstash: HTTP output to HTTP input](/reference/ls-to-ls-http.md) and has these considerations:
+
+* It relies on HTTP as the communication protocol between the Input and Output.
+* It supports multiple hosts, providing high availability by load balancing equally amongst the multiple destination hosts.
+* No connection information is added to events.
+
+Ready to see more configuration details? See [Logstash-to-Logstash: Output to Input](/reference/ls-to-ls-native.md).
+
+$$$lumberjack-considerations$$$**Lumberjack-Beats considerations**
+
+Lumberjack output to Beats input has been our standard approach for {{ls}}-to-{{ls}} communication, but our recommended approach is now [Logstash-to-Logstash: Output to Input](/reference/ls-to-ls-native.md). Before you implement the Lumberjack to Beats configuration, keep these points in mind:
+
+* Lumberjack to Beats provides high availability, but does not provide load balancing. The Lumberjack output plugin allows defining multiple output hosts for high availability, but instead of load-balancing between all output hosts, it falls back to one host on the list in the case of failure.
+* If you need a proxy between the Logstash instances, TCP proxy is the only option.
+* There’s no explicit way to exert back pressure back to the beats input.
+
+Ready to see more configuration details? See [Logstash-to-Logstash: Lumberjack output to Beats input](/reference/ls-to-ls-lumberjack.md).
+
+
+
+
diff --git a/docs/reference/lookup-enrichment.md b/docs/reference/lookup-enrichment.md
new file mode 100644
index 000000000..fcb998ef3
--- /dev/null
+++ b/docs/reference/lookup-enrichment.md
@@ -0,0 +1,236 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/lookup-enrichment.html
+---
+
+# Enriching Data with Lookups [lookup-enrichment]
+
+These plugins can help you enrich data with additional info, such as GeoIP and user agent info:
+
+* [dns filter](#dns-def)
+* [elasticsearch filter](#es-def)
+* [geoip filter](#geoip-def)
+* [http filter](#http-def)
+* [jdbc_static filter](#jdbc-static-def)
+* [jdbc_streaming filter](#jdbc-stream-def)
+* [memcached filter](#memcached-def)
+* [translate filter](#translate-def)
+* [useragent filter](#useragent-def)
+
+
+## Lookup plugins [lookup-plugins]
+
+$$$dns-def$$$dns filter
+: The [dns filter plugin](/reference/plugins-filters-dns.md) performs a standard or reverse DNS lookup.
+
+ The following config performs a reverse lookup on the address in the `source_host` field and replaces it with the domain name:
+
+ ```json
+ filter {
+ dns {
+ reverse => [ "source_host" ]
+ action => "replace"
+ }
+ }
+ ```
+
+
+$$$es-def$$$elasticsearch filter
+: The [elasticsearch filter](/reference/plugins-filters-elasticsearch.md) copies fields from previous log events in Elasticsearch to current events.
+
+ The following config shows a complete example of how this filter might be used. Whenever Logstash receives an "end" event, it uses this Elasticsearch filter to find the matching "start" event based on some operation identifier. Then it copies the `@timestamp` field from the "start" event into a new field on the "end" event. Finally, using a combination of the date filter and the ruby filter, the code in the example calculates the time duration in hours between the two events.
+
+ ```json
+ if [type] == "end" {
+ elasticsearch {
+ hosts => ["es-server"]
+ query => "type:start AND operation:%{[opid]}"
+ fields => { "@timestamp" => "started" }
+ }
+ date {
+ match => ["[started]", "ISO8601"]
+ target => "[started]"
+ }
+ ruby {
+ code => 'event.set("duration_hrs", (event.get("@timestamp") - event.get("started")) / 3600) rescue nil'
+ }
+ }
+ ```
+
+
+$$$geoip-def$$$geoip filter
+: The [geoip filter](/reference/plugins-filters-geoip.md) adds geographical information about the location of IP addresses. For example:
+
+ ```json
+ filter {
+ geoip {
+ source => "clientip"
+ }
+ }
+ ```
+
+ After the geoip filter is applied, the event will be enriched with geoip fields. For example:
+
+ ```json
+ filter {
+ geoip {
+ source => "clientip"
+ }
+ }
+ ```
+
+
+$$$http-def$$$http filter
+: The [http filter](/reference/plugins-filters-http.md) integrates with external web services/REST APIs, and enables lookup enrichment against any HTTP service or endpoint. This plugin is well suited for many enrichment use cases, such as social APIs, sentiment APIs, security feed APIs, and business service APIs.
+
+$$$jdbc-static-def$$$jdbc_static filter
+: The [jdbc_static filter](/reference/plugins-filters-jdbc_static.md) enriches events with data pre-loaded from a remote database.
+
+ The following example fetches data from a remote database, caches it in a local database, and uses lookups to enrich events with data cached in the local database.
+
+ ```json
+ filter {
+ jdbc_static {
+ loaders => [ <1>
+ {
+ id => "remote-servers"
+ query => "select ip, descr from ref.local_ips order by ip"
+ local_table => "servers"
+ },
+ {
+ id => "remote-users"
+ query => "select firstname, lastname, userid from ref.local_users order by userid"
+ local_table => "users"
+ }
+ ]
+ local_db_objects => [ <2>
+ {
+ name => "servers"
+ index_columns => ["ip"]
+ columns => [
+ ["ip", "varchar(15)"],
+ ["descr", "varchar(255)"]
+ ]
+ },
+ {
+ name => "users"
+ index_columns => ["userid"]
+ columns => [
+ ["firstname", "varchar(255)"],
+ ["lastname", "varchar(255)"],
+ ["userid", "int"]
+ ]
+ }
+ ]
+ local_lookups => [ <3>
+ {
+ id => "local-servers"
+ query => "select descr as description from servers WHERE ip = :ip"
+ parameters => {ip => "[from_ip]"}
+ target => "server"
+ },
+ {
+ id => "local-users"
+ query => "select firstname, lastname from users WHERE userid = :id"
+ parameters => {id => "[loggedin_userid]"}
+ target => "user" <4>
+ }
+ ]
+ # using add_field here to add & rename values to the event root
+ add_field => { server_name => "%{[server][0][description]}" }
+ add_field => { user_firstname => "%{[user][0][firstname]}" } <5>
+ add_field => { user_lastname => "%{[user][0][lastname]}" }
+ remove_field => ["server", "user"]
+ jdbc_user => "logstash"
+ jdbc_password => "example"
+ jdbc_driver_class => "org.postgresql.Driver"
+ jdbc_driver_library => "/tmp/logstash/vendor/postgresql-42.1.4.jar"
+ jdbc_connection_string => "jdbc:postgresql://remotedb:5432/ls_test_2"
+ }
+ }
+ ```
+
+ 1. Queries an external database to fetch the dataset that will be cached locally.
+ 2. Defines the columns, types, and indexes used to build the local database structure. The column names and types should match the external database.
+ 3. Performs lookup queries on the local database to enrich the events.
+ 4. Specifies the event field that will store the looked-up data. If the lookup returns multiple columns, the data is stored as a JSON object within the field.
+ 5. Takes data from the JSON object and stores it in top-level event fields for easier analysis in Kibana.
+
+
+$$$jdbc-stream-def$$$jdbc_streaming filter
+: The [jdbc_streaming filter](/reference/plugins-filters-jdbc_streaming.md) enriches events with database data.
+
+ The following example executes a SQL query and stores the result set in a field called `country_details`:
+
+ ```json
+ filter {
+ jdbc_streaming {
+ jdbc_driver_library => "/path/to/mysql-connector-java-5.1.34-bin.jar"
+ jdbc_driver_class => "com.mysql.jdbc.Driver"
+ jdbc_connection_string => "jdbc:mysql://localhost:3306/mydatabase"
+ jdbc_user => "me"
+ jdbc_password => "secret"
+ statement => "select * from WORLD.COUNTRY WHERE Code = :code"
+ parameters => { "code" => "country_code"}
+ target => "country_details"
+ }
+ }
+ ```
+
+
+$$$memcached-def$$$memcached filter
+: The [memcached filter](/reference/plugins-filters-memcached.md) enables key/value lookup enrichment against a Memcached object caching system. It supports both read (GET) and write (SET) operations. It is a notable addition for security analytics use cases.
+
+$$$translate-def$$$translate filter
+: The [translate filter](/reference/plugins-filters-translate.md) replaces field contents based on replacement values specified in a hash or file. Currently supports these file types: YAML, JSON, and CSV.
+
+ The following example takes the value of the `response_code` field, translates it to a description based on the values specified in the dictionary, and then removes the `response_code` field from the event:
+
+ ```json
+ filter {
+ translate {
+ field => "response_code"
+ destination => "http_response"
+ dictionary => {
+ "200" => "OK"
+ "403" => "Forbidden"
+ "404" => "Not Found"
+ "408" => "Request Timeout"
+ }
+ remove_field => "response_code"
+ }
+ }
+ ```
+
+
+$$$useragent-def$$$useragent filter
+: The [useragent filter](/reference/plugins-filters-useragent.md) parses user agent strings into fields.
+
+ The following example takes the user agent string in the `agent` field, parses it into user agent fields, and adds the user agent fields to a new field called `user_agent`. It also removes the original `agent` field:
+
+ ```json
+ filter {
+ useragent {
+ source => "agent"
+ target => "user_agent"
+ remove_field => "agent"
+ }
+ }
+ ```
+
+ After the filter is applied, the event will be enriched with user agent fields. For example:
+
+ ```json
+ "user_agent": {
+ "os": "Mac OS X 10.12",
+ "major": "50",
+ "minor": "0",
+ "os_minor": "12",
+ "os_major": "10",
+ "name": "Firefox",
+ "os_name": "Mac OS X",
+ "device": "Other"
+ }
+ ```
+
+
diff --git a/docs/reference/ls-to-ls-http.md b/docs/reference/ls-to-ls-http.md
new file mode 100644
index 000000000..f6ebc1e42
--- /dev/null
+++ b/docs/reference/ls-to-ls-http.md
@@ -0,0 +1,137 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/ls-to-ls-http.html
+---
+
+# Logstash-to-Logstash: HTTP output to HTTP input [ls-to-ls-http]
+
+HTTP output to HTTP input is an alternative to the Lumberjack output to Beats input approach for Logstash-to-Logstash communication. This approach relies on the use of [http output](/reference/plugins-outputs-http.md) to [http input](/reference/plugins-inputs-http.md) plugins.
+
+::::{note}
+{{ls}}-to-{{ls}} using HTTP input/output plugins is now being deprecated in favor of [Logstash-to-Logstash: Output to Input](/reference/ls-to-ls-native.md).
+::::
+
+
+## Configuration overview [overview-http-http]
+
+To use the HTTP protocol to connect two Logstash instances:
+
+1. Configure the downstream (server) Logstash to use HTTP input
+2. Configure the upstream (client) Logstash to use HTTP output
+3. Secure the communication between HTTP input and HTTP output
+
+### Configure the downstream Logstash to use HTTP input [configure-downstream-logstash-http-input]
+
+Configure the HTTP input on the downstream (receiving) Logstash to receive connections. The minimum configuration requires these options:
+
+* `port` - To set a custom port.
+* `additional_codecs` - To set `application/json` to be `json_lines`.
+
+```json
+input {
+ http {
+ port => 8080
+ additional_codecs => { "application/json" => "json_lines" }
+ }
+}
+```
+
+
+### Configure the upstream Logstash to use HTTP output [configure-upstream-logstash-http-output]
+
+In order to obtain the best performance when sending data from one Logstash to another, the data needs to be batched and compressed. As such, the upstream Logstash (the sending Logstash) needs to be configured with these options:
+
+* `url` - The receiving Logstash.
+* `http_method` - Set to `post`.
+* `retry_non_idempotent` - Set to `true`, in order to retry failed events.
+* `format` - Set to `json_batch` to batch the data.
+* `http_compression` - Set to `true` to ensure the data is compressed.
+
+```json
+output {
+ http {
+ url => '://:'
+ http_method => post
+ retry_non_idempotent => true
+ format => json_batch
+ http_compression => true
+ }
+}
+```
+
+
+### Secure Logstash to Logstash [securing-logstash-to-logstash-http]
+
+It is important that you secure the communication between Logstash instances. Use SSL/TLS mutual authentication in order to ensure that the upstream Logstash instance sends encrypted data to a trusted downstream Logstash instance, and vice versa.
+
+1. Create a certificate authority (CA) in order to sign the certificates that you plan to use between Logstash instances. Creating a correct SSL/TLS infrastructure is outside the scope of this document.
+
+ ::::{tip}
+ We recommend you use the [elasticsearch-certutil](elasticsearch://reference/elasticsearch/command-line-tools/certutil.md) tool to generate your certificates.
+ ::::
+
+2. Configure the downstream (receiving) Logstash to use SSL. Add these settings to the HTTP Input configuration:
+
+ * `ssl`: When set to `true`, it enables Logstash use of SSL/TLS
+ * `ssl_key`: Specifies the key that Logstash uses to authenticate with the client.
+ * `ssl_certificate`: Specifies the certificate that Logstash uses to authenticate with the client.
+ * `ssl_certificate_authorities`: Configures Logstash to trust any certificates signed by the specified CA.
+ * `ssl_verify_mode`: Specifies whether Logstash server verifies the client certificate against the CA.
+
+ For example:
+
+ ```json
+ input {
+ http {
+ ...
+
+ ssl => true
+ ssl_key => "server.key.pk8"
+ ssl_certificate => "server.crt"
+ ssl_certificate_authorities => "ca.crt"
+ ssl_verify_mode => force_peer
+ }
+ }
+ ```
+
+3. Configure the upstream (sending) Logstash to use SSL. Add these settings to the HTTP output configuration:
+
+ * `cacert`: Configures the Logstash client to trust any certificates signed by the specified CA.
+ * `client_key`: Specifies the key the Logstash client uses to authenticate with the Logstash server.
+ * `client_cert`: Specifies the certificate that the Logstash client uses to authenticate to the Logstash server.
+
+ For example:
+
+ ```json
+ output {
+ http {
+ ...
+
+ cacert => "ca.crt"
+ client_key => "client.key.pk8"
+ client_cert => "client.crt"
+ }
+ }
+ ```
+
+4. If you would like an additional authentication step, you can also use basic user/password authentication in both Logstash instances:
+
+ * `user`: Sets the username to use for authentication.
+ * `password`: Sets the password to use for authentication.
+
+ For example, you would need to add the following to both Logstash instances:
+
+ ```json
+ ...
+ http {
+ ...
+
+ user => "your-user"
+ password => "your-secret"
+ }
+ ...
+ ```
+
+
+
+
diff --git a/docs/static/ls-ls-lumberjack.asciidoc b/docs/reference/ls-to-ls-lumberjack.md
similarity index 61%
rename from docs/static/ls-ls-lumberjack.asciidoc
rename to docs/reference/ls-to-ls-lumberjack.md
index 75bb735b1..f223ea6d8 100644
--- a/docs/static/ls-ls-lumberjack.asciidoc
+++ b/docs/reference/ls-to-ls-lumberjack.md
@@ -1,34 +1,39 @@
-[[ls-to-ls-lumberjack]]
-=== Logstash-to-Logstash: Lumberjack output to Beats input
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/ls-to-ls-lumberjack.html
+---
-You can set up communication between two Logstash machines by connecting the Lumberjack output to the Beats input.
+# Logstash-to-Logstash: Lumberjack output to Beats input [ls-to-ls-lumberjack]
-Logstash-to-Logstash using Lumberjack and Beats has been our standard approach for {ls}-to-{ls}, and may still be the best option for more robust use cases.
+You can set up communication between two Logstash machines by connecting the Lumberjack output to the Beats input.
-NOTE: Check out these <> before you implement Logstash-to-Logstash using Lumberjack and Beats.
+Logstash-to-Logstash using Lumberjack and Beats has been our standard approach for {{ls}}-to-{{ls}}, and may still be the best option for more robust use cases.
-==== Configuration overview
+::::{note}
+Check out these [considerations](/reference/logstash-to-logstash-communications.md#lumberjack-considerations) before you implement Logstash-to-Logstash using Lumberjack and Beats.
+::::
+
+
+## Configuration overview [_configuration_overview]
Use the Lumberjack protocol to connect two Logstash machines.
-. Generate a trusted SSL certificate (required by the lumberjack protocol).
-. Copy the SSL certificate to the upstream Logstash machine.
-. Copy the SSL certificate and key to the downstream Logstash machine.
-. Set the upstream Logstash machine to use the Lumberjack output to send data.
-. Set the downstream Logstash machine to listen for incoming Lumberjack connections through the Beats input.
-. Test it.
+1. Generate a trusted SSL certificate (required by the lumberjack protocol).
+2. Copy the SSL certificate to the upstream Logstash machine.
+3. Copy the SSL certificate and key to the downstream Logstash machine.
+4. Set the upstream Logstash machine to use the Lumberjack output to send data.
+5. Set the downstream Logstash machine to listen for incoming Lumberjack connections through the Beats input.
+6. Test it.
-[[generate-self-signed-cert]]
-===== Generate a self-signed SSL certificate and key
+### Generate a self-signed SSL certificate and key [generate-self-signed-cert]
Use the `openssl req` command to generate a self-signed certificate and key. The `openssl req` command is available with some operating systems. You may need to install the openssl command line program for others.
Run the following command:
-[source,shell]
-----
+```shell
openssl req -x509 -batch -nodes -newkey rsa:2048 -keyout lumberjack.key -out lumberjack.cert -subj /CN=localhost
-----
+```
where:
@@ -36,55 +41,50 @@ where:
* `lumberjack.cert` is the name of the SSL certificate to be created
* `localhost` is the name of the upstream Logstash computer
-
This command produces output similar to the following:
-[source,shell]
-----
+```shell
Generating a 2048 bit RSA private key
.................................+++
....................+++
writing new private key to 'lumberjack.key'
-----
+```
-[[copy-cert-key]]
-===== Copy the SSL certificate and key
+
+### Copy the SSL certificate and key [copy-cert-key]
Copy the SSL certificate to the upstream Logstash machine.
Copy the SSL certificate and key to the downstream Logstash machine.
-[[save-cert-ls1]]
-===== Start the upstream Logstash instance
+
+### Start the upstream Logstash instance [save-cert-ls1]
Start Logstash and generate test events:
-[source,shell]
-----
+```shell
bin/logstash -e 'input { generator { count => 5 } } output { lumberjack { codec => json hosts => "mydownstreamhost" ssl_certificate => "lumberjack.cert" port => 5000 } }'
-----
+```
This sample command sends five events to mydownstreamhost:5000 using the SSL certificate provided.
-[[save-cert-ls2]]
-===== Start the downstream Logstash instance
+
+### Start the downstream Logstash instance [save-cert-ls2]
Start the downstream instance of Logstash:
-[source,shell]
-----
+```shell
bin/logstash -e 'input { beats { codec => json port => 5000 ssl_enabled => true ssl_certificate => "lumberjack.cert" ssl_key => "lumberjack.key"} }'
-----
+```
This sample command sets port 5000 to listen for incoming Beats input.
-[[test-ls-to-ls]]
-===== Verify the communication
+
+### Verify the communication [test-ls-to-ls]
Watch the downstream Logstash machine for the incoming events. You should see five incrementing events similar to the following:
-[source,shell]
-----
+```shell
{
"@timestamp" => 2018-02-07T12:16:39.415Z,
"sequence" => 0
@@ -95,5 +95,9 @@ Watch the downstream Logstash machine for the incoming events. You should see fi
"@version" => "1",
"host" => "ls1.semicomplete.com"
}
-----
+```
+
If you see all five events with consistent fields and formatting, incrementing by one, then your configuration is correct.
+
+
+
diff --git a/docs/reference/ls-to-ls-native.md b/docs/reference/ls-to-ls-native.md
new file mode 100644
index 000000000..31c065469
--- /dev/null
+++ b/docs/reference/ls-to-ls-native.md
@@ -0,0 +1,133 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/ls-to-ls-native.html
+---
+
+# Logstash-to-Logstash: Output to Input [ls-to-ls-native]
+
+The Logstash output to Logstash input is the default approach for Logstash-to-Logstash communication.
+
+::::{note}
+Check out these [considerations](/reference/logstash-to-logstash-communications.md#native-considerations) before you implement {{ls}}-to-{{ls}}.
+::::
+
+
+## Configuration overview [overview-ls-ls]
+
+To connect two Logstash instances:
+
+1. Configure the downstream (server) Logstash to use Logstash input
+2. Configure the upstream (client) Logstash to use Logstash output
+3. Secure the communication between Logstash input and Logstash output
+
+### Configure the downstream Logstash to use Logstash input [configure-downstream-logstash-input]
+
+Configure the Logstash input on the downstream (receiving) Logstash to receive connections. The minimum configuration requires this option:
+
+* `port` - To set a custom port. The default is 9800 if none is provided.
+
+```json
+input {
+ logstash {
+ port => 9800
+ }
+}
+```
+
+
+### Configure the upstream Logstash to use Logstash output [configure-upstream-logstash-output]
+
+In order to obtain the best performance when sending data from one Logstash to another, the data is batched and compressed. As such, the upstream Logstash (the sending Logstash) only needs to be concerned about configuring the receiving endpoint with these options:
+
+* `hosts` - The receiving one or more Logstash host and port pairs. If no port specified, 9800 will be used.
+
+::::{note}
+{{ls}} load balances batched events to *all* of its configured downstream hosts. Any failures caused by network issues, back-pressure or other conditions, will result in the downstream host being isolated from load balancing for at least 60 seconds.
+::::
+
+
+```json
+output {
+ logstash {
+ hosts => ["10.0.0.123", "10.0.1.123:9800"]
+ }
+}
+```
+
+
+### Secure Logstash to Logstash [securing-logstash-to-logstash]
+
+It is important that you secure the communication between Logstash instances. Use SSL/TLS mutual authentication in order to ensure that the upstream Logstash instance sends encrypted data to a trusted downstream Logstash instance, and vice versa.
+
+1. Create a certificate authority (CA) in order to sign the certificates that you plan to use between Logstash instances. Creating a correct SSL/TLS infrastructure is outside the scope of this document.
+
+ ::::{tip}
+ We recommend you use the [elasticsearch-certutil](elasticsearch://reference/elasticsearch/command-line-tools/certutil.md) tool to generate your certificates.
+ ::::
+
+2. Configure the downstream (receiving) Logstash to use SSL. Add these settings to the Logstash input configuration:
+
+ * `ssl_enabled`: When set to `true`, it enables Logstash use of SSL/TLS
+ * `ssl_key`: Specifies the key that Logstash uses to authenticate with the client.
+ * `ssl_certificate`: Specifies the certificate that Logstash uses to authenticate with the client.
+ * `ssl_certificate_authorities`: Configures Logstash to trust any certificates signed by the specified CA.
+ * `ssl_client_authentication`: Specifies whether Logstash server verifies the client certificate against the CA.
+
+ For example:
+
+ ```json
+ input {
+ logstash {
+ ...
+
+ ssl_enabled => true
+ ssl_key => "server.pkcs8.key"
+ ssl_certificate => "server.crt"
+ ssl_certificate_authorities => "ca.crt"
+ ssl_client_authentication => required
+ }
+ }
+ ```
+
+3. Configure the upstream (sending) Logstash to use SSL. Add these settings to the Logstash output configuration:
+
+ * `ssl_key`: Specifies the key the Logstash client uses to authenticate with the Logstash server.
+ * `ssl_certificate`: Specifies the certificate that the Logstash client uses to authenticate to the Logstash server.
+ * `ssl_certificate_authorities`: Configures the Logstash client to trust any certificates signed by the specified CA.
+
+ For example:
+
+ ```json
+ output {
+ logstash {
+ ...
+
+ ssl_enabled => true
+ ssl_key => "client.pkcs8.key"
+ ssl_certificate => "client.crt"
+ ssl_certificate_authorities => "ca.crt"
+ }
+ }
+ ```
+
+4. If you would like an additional authentication step, you can also use basic user/password authentication in both Logstash instances:
+
+ * `username`: Sets the username to use for authentication.
+ * `password`: Sets the password to use for authentication.
+
+ For example, you would need to add the following to both Logstash instances:
+
+ ```json
+ ...
+ logstash {
+ ...
+
+ username => "your-user"
+ password => "your-secret"
+ }
+ ...
+ ```
+
+
+
+
diff --git a/docs/reference/managing-geoip-databases.md b/docs/reference/managing-geoip-databases.md
new file mode 100644
index 000000000..0073eeda1
--- /dev/null
+++ b/docs/reference/managing-geoip-databases.md
@@ -0,0 +1,14 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/geoip-database-management.html
+---
+
+# Managing GeoIP databases [geoip-database-management]
+
+Logstash provides GeoIP database management features to make it easier for you to use plugins that require an up-to-date database to enrich events with geographic data.
+
+* [Feature Overview](/reference/logstash-geoip-database-management.md)
+* [Configuration Guide](/reference/configuring-geoip-database-management.md)
+
+
+
diff --git a/docs/reference/managing-logstash.md b/docs/reference/managing-logstash.md
new file mode 100644
index 000000000..9f2fdd1fb
--- /dev/null
+++ b/docs/reference/managing-logstash.md
@@ -0,0 +1,13 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/config-management.html
+---
+
+# Managing Logstash [config-management]
+
+Logstash provides configuration management features to make it easier for you to manage updates to your configuration over time.
+
+The topics in this section describe Logstash configuration management features only. For information about other config management tools, such as Puppet and Chef, see the documentation for those projects.
+
+
+
diff --git a/docs/reference/memory-queue.md b/docs/reference/memory-queue.md
new file mode 100644
index 000000000..f1fc4cf98
--- /dev/null
+++ b/docs/reference/memory-queue.md
@@ -0,0 +1,65 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/memory-queue.html
+---
+
+# Memory queue [memory-queue]
+
+By default, Logstash uses in-memory bounded queues between pipeline stages (inputs → pipeline workers) to buffer events. If Logstash experiences a temporary machine failure, the contents of the memory queue will be lost. Temporary machine failures are scenarios where Logstash or its host machine are terminated abnormally, but are capable of being restarted.
+
+## Benefits of memory queues [mem-queue-benefits]
+
+The memory queue might be a good choice if you value throughput over data resiliency.
+
+* Easier configuration
+* Easier management and administration
+* Faster throughput
+
+
+## Limitations of memory queues [mem-queue-limitations]
+
+* Can lose data in abnormal termination
+* Don’t do well handling sudden bursts of data, where extra capacity in needed for {{ls}} to catch up
+
+::::{tip}
+Consider using [persistent queues](/reference/persistent-queues.md) to avoid these limitations.
+::::
+
+
+
+## Memory queue size [sizing-mem-queue]
+
+Memory queue size is not configured directly. Instead, it depends on how you have Logstash tuned.
+
+Its upper bound is defined by `pipeline.workers` (default: number of CPUs) times the `pipeline.batch.size` (default: 125) events. This value, called the "inflight count," determines maximum number of events that can be held in each memory queue.
+
+Doubling the number of workers OR doubling the batch size will effectively double the memory queue’s capacity (and memory usage). Doubling both will *quadruple* the capacity (and usage).
+
+::::{important}
+Each pipeline has its own queue.
+::::
+
+
+See [Tuning and profiling logstash pipeline performance](/reference/tuning-logstash.md) for more info on the effects of adjusting `pipeline.batch.size` and `pipeline.workers`.
+
+If you need to absorb bursts of traffic, consider using [persistent queues](/reference/persistent-queues.md) instead. Persistent queues are bound to allocated capacity on disk.
+
+### Settings that affect queue size [mq-settings]
+
+These values can be configured in `logstash.yml` and `pipelines.yml`.
+
+pipeline.batch.size
+: Number events to retrieve from inputs before sending to filters+workers The default is 125.
+
+pipelines.workers
+: Number of workers that will, in parallel, execute the filters+outputs stage of the pipeline. This value defaults to the number of the host’s CPU cores.
+
+
+
+## Back pressure [backpressure-mem-queue]
+
+When the queue is full, Logstash puts back pressure on the inputs to stall data flowing into Logstash. This mechanism helps Logstash control the rate of data flow at the input stage without overwhelming outputs like Elasticsearch.
+
+Each input handles back pressure independently.
+
+
diff --git a/docs/reference/monitoring-internal-collection-legacy.md b/docs/reference/monitoring-internal-collection-legacy.md
new file mode 100644
index 000000000..4ed0523f1
--- /dev/null
+++ b/docs/reference/monitoring-internal-collection-legacy.md
@@ -0,0 +1,276 @@
+---
+navigation_title: "Legacy collection (deprecated)"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/monitoring-internal-collection-legacy.html
+---
+
+# Collect {{ls}} monitoring data using legacy collectors [monitoring-internal-collection-legacy]
+
+
+::::{warning}
+Deprecated in 7.9.0.
+::::
+
+
+::::{note}
+Starting from version 9.0, legacy internal collection is behind a feature flag and is turned off by default. Set `xpack.monitoring.allow_legacy_collection` to `true` to allow access to the feature.
+::::
+
+
+Using [{{agent}} for monitoring](/reference/monitoring-logstash-with-elastic-agent.md) is a better alternative for most {{ls}} deployments.
+
+## Components for legacy collection [_components_for_legacy_collection]
+
+Monitoring {{ls}} with legacy collection uses these components:
+
+* [Collectors](#logstash-monitoring-collectors-legacy)
+* [Output](#logstash-monitoring-output-legacy)
+
+These pieces live outside of the default Logstash pipeline in a dedicated monitoring pipeline. This configuration ensures that all data and processing has a minimal impact on ordinary Logstash processing. Existing Logstash features, such as the [`elasticsearch` output](/reference/plugins-outputs-elasticsearch.md), can be reused to benefit from its retry policies.
+
+::::{note}
+The `elasticsearch` output that is used for monitoring {{ls}} is configured exclusively through settings found in `logstash.yml`. It is not configured by using anything from the Logstash configurations that might also be using their own separate `elasticsearch` outputs.
+::::
+
+
+The production {{es}} cluster should be configured to receive {{ls}} monitoring data. This configuration enables the production {{es}} cluster to add metadata (for example, its cluster UUID) to the Logstash monitoring data and then route it to the monitoring clusters. For more information about typical monitoring architectures, see [How monitoring works](docs-content://deploy-manage/monitor/stack-monitoring.md) in the [Elasticsearch Reference](docs-content://get-started/index.md).
+
+
+#### Collectors [logstash-monitoring-collectors-legacy]
+
+Collectors, as their name implies, collect things. In monitoring for Logstash, collectors are just [Inputs](/reference/how-logstash-works.md) in the same way that ordinary Logstash configurations provide inputs.
+
+Like monitoring for {{es}}, each collector can create zero or more monitoring documents. As it is currently implemented, each Logstash node runs two types of collectors: one for node stats and one for pipeline stats.
+
+| Collector | Data Types | Description |
+| --- | --- | --- |
+| Node Stats | `logstash_stats` | Gathers details about the running node, such as memory utilization and CPUusage (for example, `GET /_stats`). This runs on every Logstash node with monitoring enabled. One commonfailure is that Logstash directories are copied with their `path.data` directoryincluded (`./data` by default), which copies the persistent UUID of the Logstashnode along with it. As a result, it generally appears that one or more Logstashnodes are failing to collect monitoring data, when in fact they are all reallymisreporting as the *same* Logstash node. Re-use `path.data` directories onlywhen upgrading Logstash, such that upgraded nodes replace the previous versions. |
+| Pipeline Stats | `logstash_state` | Gathers details about the node’s running pipelines, which powers theMonitoring Pipeline UI. |
+
+Per collection interval, which defaults to 10 seconds (`10s`), each collector is run. The failure of an individual collector does not impact any other collector. Each collector, as an ordinary Logstash input, creates a separate Logstash event in its isolated monitoring pipeline. The Logstash output then sends the data.
+
+The collection interval can be configured dynamically and you can also disable data collection. For more information about the configuration options for the collectors, see [Monitoring Settings](#monitoring-settings-legacy).
+
+::::{warning}
+Unlike {{es}} and {{kib}} monitoring, there is no `xpack.monitoring.collection.enabled` setting on Logstash. You must use the `xpack.monitoring.enabled` setting to enable and disable data collection.
+::::
+
+
+If gaps exist in the monitoring charts in {{kib}}, it is typically because either a collector failed or the monitoring cluster did not receive the data (for example, it was being restarted). In the event that a collector fails, a logged error should exist on the node that attempted to perform the collection.
+
+
+### Output [logstash-monitoring-output-legacy]
+
+Like all Logstash pipelines, the purpose of the dedicated monitoring pipeline is to send events to outputs. In the case of monitoring for Logstash, the output is always an `elasticsearch` output. However, unlike ordinary Logstash pipelines, the output is configured within the `logstash.yml` settings file via the `xpack.monitoring.elasticsearch.*` settings.
+
+Other than its unique manner of configuration, this `elasticsearch` output behaves like all `elasticsearch` outputs, including its ability to pause data collection when issues exist with the output.
+
+::::{important}
+It is critical that all Logstash nodes share the same setup. Otherwise, monitoring data might be routed in different ways or to different places.
+::::
+
+
+
+#### Default Configuration [logstash-monitoring-default-legacy]
+
+If a Logstash node does not explicitly define a monitoring output setting, the following default configuration is used:
+
+```yaml
+xpack.monitoring.elasticsearch.hosts: [ "http://localhost:9200" ]
+```
+
+All data produced by monitoring for Logstash is indexed in the monitoring cluster by using the `.monitoring-logstash` template, which is managed by the [exporters](docs-content://deploy-manage/monitor/stack-monitoring/es-monitoring-exporters.md) within {{es}}.
+
+If you are working with a cluster that has {{security}} enabled, extra steps are necessary to properly configure Logstash. For more information, see [*Monitoring {{ls}} (legacy)*](/reference/monitoring-logstash-legacy.md).
+
+::::{important}
+When discussing security relative to the `elasticsearch` output, it is critical to remember that all users are managed on the production cluster, which is identified in the `xpack.monitoring.elasticsearch.hosts` setting. This is particularly important to remember when you move from development environments to production environments, where you often have dedicated monitoring clusters.
+::::
+
+
+For more information about the configuration options for the output, see [Monitoring Settings](#monitoring-settings-legacy).
+
+
+## Configure {{ls}} monitoring with legacy collectors [configure-internal-collectors-legacy]
+
+
+To monitor Logstash nodes:
+
+1. Specify where to send monitoring data. This cluster is often referred to as the *production cluster*. For examples of typical monitoring architectures, see [How monitoring works](docs-content://deploy-manage/monitor/stack-monitoring.md).
+
+ ::::{important}
+ To visualize Logstash as part of the Elastic Stack (as shown in Step 6), send metrics to your *production* cluster. Sending metrics to a dedicated monitoring cluster will show the Logstash metrics under the *monitoring* cluster.
+ ::::
+
+2. Verify that the `xpack.monitoring.allow_legacy_collection` and `xpack.monitoring.collection.enabled` settings are `true` on the production cluster. If that setting is `false`, the collection of monitoring data is disabled in {{es}} and data is ignored from all other sources.
+3. Configure your Logstash nodes to send metrics by setting `xpack.monitoring.enabled` to `true` and specifying the destination {{es}} node(s) as `xpack.monitoring.elasticsearch.hosts` in `logstash.yml`. If {{security-features}} are enabled, you also need to specify the credentials for the [built-in `logstash_system` user](docs-content://deploy-manage/users-roles/cluster-or-deployment-auth/built-in-users.md). For more information about these settings, see [Monitoring Settings](#monitoring-settings-legacy).
+
+ ```yaml
+ xpack.monitoring.enabled: true
+ xpack.monitoring.elasticsearch.hosts: ["http://es-prod-node-1:9200", "http://es-prod-node-2:9200"] <1>
+ xpack.monitoring.elasticsearch.username: "logstash_system"
+ xpack.monitoring.elasticsearch.password: "changeme"
+ ```
+
+ 1. If SSL/TLS is enabled on the production cluster, you must connect through HTTPS. As of v5.2.1, you can specify multiple Elasticsearch hosts as an array as well as specifying a single host as a string. If multiple URLs are specified, Logstash can round-robin requests to these production nodes.
+
+4. If SSL/TLS is enabled on the production {{es}} cluster, specify the trusted CA certificates that will be used to verify the identity of the nodes in the cluster.
+
+ To add a CA certificate to a Logstash node’s trusted certificates, you can specify the location of the PEM encoded certificate with the `certificate_authority` setting:
+
+ ```yaml
+ xpack.monitoring.elasticsearch.ssl.certificate_authority: /path/to/ca.crt
+ ```
+
+ To add a CA without having it loaded on disk, you can specify a hex-encoded SHA 256 fingerprint of the DER-formatted CA with the `ca_trusted_fingerprint` setting:
+
+ ```yaml
+ xpack.monitoring.elasticsearch.ssl.ca_trusted_fingerprint: 2cfe62e474fb381cc7773c84044c28c9785ac5d1940325f942a3d736508de640
+ ```
+
+ ::::{note}
+ A self-secured Elasticsearch cluster will provide the fingerprint of its CA to the console during setup.
+
+ You can also get the SHA256 fingerprint of an Elasticsearch’s CA using the `openssl` command-line utility on the Elasticsearch host:
+
+ ```shell
+ openssl x509 -fingerprint -sha256 -in $ES_HOME/config/certs/http_ca.crt
+ ```
+
+ ::::
+
+
+ Alternatively, you can configure trusted certificates using a truststore (a Java Keystore file that contains the certificates):
+
+ ```yaml
+ xpack.monitoring.elasticsearch.ssl.truststore.path: /path/to/file
+ xpack.monitoring.elasticsearch.ssl.truststore.password: password
+ ```
+
+ Also, optionally, you can set up client certificate using a keystore (a Java Keystore file that contains the certificate) or using a certificate and key file pair:
+
+ ```yaml
+ xpack.monitoring.elasticsearch.ssl.keystore.path: /path/to/file
+ xpack.monitoring.elasticsearch.ssl.keystore.password: password
+ ```
+
+ ```yaml
+ xpack.monitoring.elasticsearch.ssl.certificate: /path/to/certificate
+ xpack.monitoring.elasticsearch.ssl.key: /path/to/key
+ ```
+
+ Set sniffing to `true` to enable discovery of other nodes of the {{es}} cluster. It defaults to `false`.
+
+ ```yaml
+ xpack.monitoring.elasticsearch.sniffing: false
+ ```
+
+5. Restart your Logstash nodes.
+6. To verify your monitoring configuration, point your web browser at your {{kib}} host, and select **Stack Monitoring** from the side navigation. If this is an initial setup, select **set up with self monitoring** and click **Turn on monitoring**. Metrics reported from your Logstash nodes should be visible in the Logstash section. When security is enabled, to view the monitoring dashboards you must log in to {{kib}} as a user who has the `kibana_user` and `monitoring_user` roles.
+
+ :::{image} ../images/monitoring-ui.png
+ :alt: Monitoring
+ :::
+
+
+
+## Monitoring settings for legacy collection [monitoring-settings-legacy]
+
+
+You can set the following `xpack.monitoring` settings in `logstash.yml` to control how monitoring data is collected from your Logstash nodes. However, the defaults work best in most circumstances. For more information about configuring Logstash, see [logstash.yml](/reference/logstash-settings-file.md).
+
+### General monitoring settings [monitoring-general-settings-legacy]
+
+`xpack.monitoring.enabled`
+: Monitoring is disabled by default. Set to `true` to enable {{xpack}} monitoring.
+
+`xpack.monitoring.elasticsearch.hosts`
+: The {{es}} instances that you want to ship your Logstash metrics to. This might be the same {{es}} instance specified in the `outputs` section in your Logstash configuration, or a different one. This is **not** the URL of your dedicated monitoring cluster. Even if you are using a dedicated monitoring cluster, the Logstash metrics must be routed through your production cluster. You can specify a single host as a string, or specify multiple hosts as an array. Defaults to `http://localhost:9200`.
+
+::::{note}
+If your Elasticsearch cluster is configured with dedicated master-eligible nodes, Logstash metrics should *not* be routed to these nodes, as doing so can create resource contention and impact the stability of the Elasticsearch cluster. Therefore, do not include such nodes in `xpack.monitoring.elasticsearch.hosts`.
+::::
+
+
+`xpack.monitoring.elasticsearch.proxy`
+: The monitoring {{es}} instance and monitored Logstash can be separated by a proxy. To enable Logstash to connect to a proxied {{es}}, set this value to the URI of the intermediate proxy using the standard URI format, `://` for example `http://192.168.1.1`. An empty string is treated as if proxy was not set.
+
+`xpack.monitoring.elasticsearch.username` and `xpack.monitoring.elasticsearch.password`
+: If your {{es}} is protected with basic authentication, these settings provide the username and password that the Logstash instance uses to authenticate for shipping monitoring data.
+
+
+### Monitoring collection settings [monitoring-collection-settings-legacy]
+
+`xpack.monitoring.collection.interval`
+: Controls how often data samples are collected and shipped on the Logstash side. Defaults to `10s`. If you modify the collection interval, set the `xpack.monitoring.min_interval_seconds` option in `kibana.yml` to the same value.
+
+
+### Monitoring TLS/SSL settings [monitoring-ssl-settings-legacy]
+
+You can configure the following Transport Layer Security (TLS) or Secure Sockets Layer (SSL) settings. For more information, see [Configuring credentials for {{ls}} monitoring](/reference/secure-connection.md#ls-monitoring-user).
+
+`xpack.monitoring.elasticsearch.ssl.ca_trusted_fingerprint`
+: Optional setting that enables you to specify the hex-encoded SHA-256 fingerprint of the certificate authority for your {{es}} instance.
+
+::::{note}
+A self-secured Elasticsearch cluster will provide the fingerprint of its CA to the console during setup.
+
+You can also get the SHA256 fingerprint of an Elasticsearch’s CA using the `openssl` command-line utility on the Elasticsearch host:
+
+```shell
+openssl x509 -fingerprint -sha256 -in $ES_HOME/config/certs/http_ca.crt
+```
+
+::::
+
+
+`xpack.monitoring.elasticsearch.ssl.certificate_authority`
+: Optional setting that enables you to specify a path to the `.pem` file for the certificate authority for your {{es}} instance.
+
+`xpack.monitoring.elasticsearch.ssl.truststore.path`
+: Optional settings that provide the paths to the Java keystore (JKS) to validate the server’s certificate.
+
+`xpack.monitoring.elasticsearch.ssl.truststore.password`
+: Optional settings that provide the password to the truststore.
+
+`xpack.monitoring.elasticsearch.ssl.keystore.path`
+: Optional settings that provide the paths to the Java keystore (JKS) to validate the client’s certificate.
+
+`xpack.monitoring.elasticsearch.ssl.keystore.password`
+: Optional settings that provide the password to the keystore.
+
+`xpack.monitoring.elasticsearch.ssl.certificate`
+: Optional setting that provides the path to an SSL certificate to use to authenticate the client. This certificate should be an OpenSSL-style X.509 certificate file.
+
+::::{note}
+This setting can be used only if `xpack.monitoring.elasticsearch.ssl.key` is set.
+::::
+
+
+`xpack.monitoring.elasticsearch.ssl.key`
+: Optional setting that provides the path to an OpenSSL-style RSA private key that corresponds to the `xpack.monitoring.elasticsearch.ssl.certificate`.
+
+::::{note}
+This setting can be used only if `xpack.monitoring.elasticsearch.ssl.certificate` is set.
+::::
+
+
+`xpack.monitoring.elasticsearch.ssl.verification_mode`
+: Option to validate the server’s certificate. Defaults to `full`. To disable, set to `none`. Disabling this severely compromises security.
+
+`xpack.monitoring.elasticsearch.ssl.cipher_suites`
+: Optional setting that provides the list of cipher suites to use, listed by priorities. Supported cipher suites vary depending on the Java and protocol versions.
+
+
+### Additional settings [monitoring-additional-settings-legacy]
+
+`xpack.monitoring.elasticsearch.cloud_id`
+: If you’re using {{es}} in {{ecloud}}, you should specify the identifier here. This setting is an alternative to `xpack.monitoring.elasticsearch.hosts`. If `cloud_id` is configured, `xpack.monitoring.elasticsearch.hosts` should not be used. The {{es}} instances that you want to ship your Logstash metrics to. This might be the same {{es}} instance specified in the `outputs` section in your Logstash configuration, or a different one.
+
+`xpack.monitoring.elasticsearch.cloud_auth`
+: If you’re using {{es}} in {{ecloud}}, you can set your auth credentials here. This setting is an alternative to both `xpack.monitoring.elasticsearch.username` and `xpack.monitoring.elasticsearch.password`. If `cloud_auth` is configured, those settings should not be used.
+
+`xpack.monitoring.elasticsearch.api_key`
+: Authenticate using an Elasticsearch API key. Note that this option also requires using SSL.
+
+The API key Format is `id:api_key` where `id` and `api_key` are as returned by the Elasticsearch [Create API key API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key).
diff --git a/docs/reference/monitoring-logstash-legacy.md b/docs/reference/monitoring-logstash-legacy.md
new file mode 100644
index 000000000..a2bcd351f
--- /dev/null
+++ b/docs/reference/monitoring-logstash-legacy.md
@@ -0,0 +1,23 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/configuring-logstash.html
+---
+
+# Monitoring Logstash (Legacy) [configuring-logstash]
+
+Use the {{stack}} {{monitor-features}} to gain insight into the health of {{ls}} instances running in your environment. For an introduction to monitoring your Elastic stack, see [Monitoring a cluster](docs-content://deploy-manage/monitor.md) in the [Elasticsearch Reference](docs-content://get-started/index.md). Then, make sure that monitoring is enabled on your {{es}} cluster.
+
+These options for collecting {{ls}} metrics for stack monitoring have been available for a while:
+
+* [{{metricbeat}} collection](/reference/monitoring-with-metricbeat.md). Metricbeat collects monitoring data from your {{ls}} instance and sends it directly to your monitoring cluster. The benefit of Metricbeat collection is that the monitoring agent remains active even if the {{ls}} instance does not.
+* [Legacy collection (deprecated)](/reference/monitoring-internal-collection-legacy.md). Legacy collectors send monitoring data to your production cluster.
+
+For more features, dependability, and easier management, consider using:
+
+* [{{agent}} collection for Stack Monitoring](/reference/monitoring-with-elastic-agent.md). {{agent}} collects monitoring data from your {{ls}} instance and sends it directly to your monitoring cluster, and shows the data in {{ls}} Dashboards. The benefit of {{agent}} collection is that the monitoring agent remains active even if the {{ls}} instance does not, you can manage all your monitoring agents from a central location in {{fleet}}.
+
+
+
+
+
+
diff --git a/docs/reference/monitoring-logstash-with-elastic-agent.md b/docs/reference/monitoring-logstash-with-elastic-agent.md
new file mode 100644
index 000000000..fa8d7c129
--- /dev/null
+++ b/docs/reference/monitoring-logstash-with-elastic-agent.md
@@ -0,0 +1,20 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/monitoring-with-ea.html
+---
+
+# Monitoring Logstash with Elastic Agent [monitoring-with-ea]
+
+You can use {{agent}} to collect data about {{ls}} and ship it to the monitoring cluster. When you use {{agent}} collection, the monitoring agent remains active even if the {{ls}} instance does not. Plus you have the option to manage all of your monitoring agents from a central location in {{fleet}}.
+
+{{agent}} gives you a single, unified way to add monitoring for logs, metrics, and other types of data to a host. Each agent has a single policy you can update to add integrations for new data sources, security protections, and more.
+
+You can use {{agent}} to collect {{ls}} monitoring data on:
+
+* [{{ecloud}} or self-managed dashboards](/reference/dashboard-monitoring-with-elastic-agent.md). {{agent}} collects monitoring data from your {{ls}} instance, sends it directly to your monitoring cluster, and shows the data in {{ls}} dashboards. {{ls}} dashboards include an extended range of metrics, including plugin drilldowns, and plugin specific dashboards for the dissect filter, the grok filter, and the elasticsearch output.
+* [{{ecloud}} dashboards (serverless)](/reference/serverless-monitoring-with-elastic-agent.md). {{agent}} collects monitoring data from your {{ls}} instance, sends it to [Elastic serverless](docs-content://deploy-manage/deploy/elastic-cloud/serverless.md), and shows the data in {{ls}} dashboards in [Elastic Observability](docs-content://solutions/observability.md). {{ls}} dashboards include an extended range of metrics, including plugin drilldowns, and plugin specific dashboards for the dissect filter, the grok filter, and the elasticsearch output.
+* [{{stack}} monitoring](/reference/monitoring-with-elastic-agent.md). Use the Elastic Stack monitoring features to gain insight into the health of {{ls}} instances running in your environment.
+
+
+
+
diff --git a/docs/reference/monitoring-logstash.md b/docs/reference/monitoring-logstash.md
new file mode 100644
index 000000000..79c9309d0
--- /dev/null
+++ b/docs/reference/monitoring-logstash.md
@@ -0,0 +1,97 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/monitoring-logstash.html
+---
+# Monitoring Logstash with APIs
+
+When you run Logstash, it automatically captures runtime metrics that you can use to monitor the health and performance of your Logstash deployment.
+
+The metrics collected by Logstash include:
+
+* Logstash node info, like pipeline settings, OS info, and JVM info.
+* Plugin info, including a list of installed plugins.
+* Node stats, like JVM stats, process stats, event-related stats, and pipeline runtime stats.
+* Hot threads.
+
+You can use monitoring APIs provided by Logstash to retrieve these metrics. These APIs are available by default without requiring any extra configuration.
+
+Alternatively, you can [configure Elastic Stack monitoring features](monitoring-logstash-legacy.md) to send
+data to a monitoring cluster.
+
+## APIs for monitoring Logstash [monitoring]
+
+Logstash provides monitoring APIs for retrieving runtime information about Logstash:
+
+* [Node info API](https://www.elastic.co/docs/api/doc/logstash/group/endpoint-node-info)
+* [Plugins info API](https://www.elastic.co/docs/api/doc/logstash/group/endpoint-plugin-info)
+* [Node stats API](https://www.elastic.co/docs/api/doc/logstash/group/endpoint-node-stats)
+* [Hot threads API](https://www.elastic.co/docs/api/doc/logstash/group/endpoint-hot-threads)
+* [Health report API](https://www.elastic.co/docs/api/doc/logstash/group/endpoint-health)
+
+You can use the root resource to retrieve general information about the Logstash instance, including
+the host and version.
+
+```
+curl -XGET 'localhost:9600/?pretty'
+```
+
+Example response:
+
+```json
+{
+ "host": "skywalker",
+ "version": "{logstash_version}",
+ "http_address": "127.0.0.1:9600"
+}
+```
+
+:::{note}
+By default, the monitoring API attempts to bind to `tcp:9600`.
+If this port is already in use by another Logstash instance, you need to launch Logstash with the `--api.http.port` flag specified to bind to a different port. For more information, go to [](running-logstash-command-line.md#command-line-flags)
+:::
+
+## Securing the Logstash API [monitoring-api-security]
+
+The Logstash monitoring APIs are not secured by default, but you can configure Logstash to secure them in one of several ways to meet your organization's needs.
+
+You can enable SSL for the Logstash API by setting `api.ssl.enabled: true` in the `logstash.yml`, and providing the relevant keystore settings `api.ssl.keystore.path` and `api.ssl.keystore.password`:
+
+```yaml
+api.ssl.enabled: true
+api.ssl.keystore.path: /path/to/keystore.jks
+api.ssl.keystore.password: "s3cUr3p4$$w0rd"
+```
+
+The keystore must be in either jks or p12 format, and must contain both a certificate and a private key.
+Connecting clients receive this certificate, allowing them to authenticate the Logstash endpoint.
+
+You can also require HTTP Basic authentication by setting `api.auth.type: basic` in the `logstash.yml`, and providing the relevant credentials `api.auth.basic.username` and `api.auth.basic.password`:
+
+```yaml
+api.auth.type: basic
+api.auth.basic.username: "logstash"
+api.auth.basic.password: "s3cUreP4$$w0rD"
+```
+
+:::{note}
+Usage of `Keystore` or `Environment` or variable replacements is encouraged for password-type fields to avoid storing them in plain text.
+For example, specifying the value `"${HTTP_PASS}"` will resolve to the value stored in the [secure keystore's](keystore.md) `HTTP_PASS` variable if present or the same variable from the [environment](environment-variables.md).
+:::
+
+## Common options [monitoring-common-options]
+
+The following options can be applied to all of the Logstash monitoring APIs.
+
+### Pretty results
+
+When appending `?pretty=true` to any request made, the JSON returned will be pretty formatted (use it for debugging only!).
+
+### Human-readable output
+
+:::{note}
+The `human` option is supported for the hot threads API only.
+When you specify `human=true`, the results are returned in plain text instead of JSON format.
+The default is `false`.
+:::
+
+Statistics are returned in a format suitable for humans (for example, `"exists_time": "1h"` or `"size": "1kb"`) and for computers (for example, `"exists_time_in_millis": 3600000` or `"size_in_bytes": 1024`). The human-readable values can be turned off by adding `?human=false` to the query string. This makes sense when the stats results are being consumed by a monitoring tool, rather than intended for human consumption. The default for the `human` flag is `false`.
diff --git a/docs/reference/monitoring-troubleshooting.md b/docs/reference/monitoring-troubleshooting.md
new file mode 100644
index 000000000..9ebe5d7f6
--- /dev/null
+++ b/docs/reference/monitoring-troubleshooting.md
@@ -0,0 +1,30 @@
+---
+navigation_title: "Troubleshooting"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/monitoring-troubleshooting.html
+---
+
+# Troubleshooting monitoring in Logstash [monitoring-troubleshooting]
+
+
+
+## Logstash Monitoring Not Working After Upgrade [_logstash_monitoring_not_working_after_upgrade]
+
+When upgrading from older versions, the built-in `logstash_system` user is disabled for security reasons. To resume monitoring:
+
+1. Change the `logstash_system` password:
+
+ ```console
+ PUT _security/user/logstash_system/_password
+ {
+ "password": "newpassword"
+ }
+ ```
+
+2. Re-enable the `logstash_system` user:
+
+ ```console
+ PUT _security/user/logstash_system/_enable
+ ```
+
+
diff --git a/docs/reference/monitoring-with-elastic-agent.md b/docs/reference/monitoring-with-elastic-agent.md
new file mode 100644
index 000000000..1745d74f7
--- /dev/null
+++ b/docs/reference/monitoring-with-elastic-agent.md
@@ -0,0 +1,146 @@
+---
+navigation_title: "Collect monitoring data for stack monitoring"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/monitoring-with-elastic-agent.html
+---
+
+# Collect {{ls}} monitoring data for stack monitoring [monitoring-with-elastic-agent]
+
+
+{{agent}} collects monitoring data from your {{ls}} instance and sends it directly to your monitoring cluster. With {{agent}} collection the monitoring agent remains active even if the {{ls}} instance does not.
+
+You can enroll {{agent}} in [{{fleet}}](docs-content://reference/ingestion-tools/fleet/install-fleet-managed-elastic-agent.md) for management from a central location, or you can run [{{agent}} standalone](docs-content://reference/ingestion-tools/fleet/install-standalone-elastic-agent.md).
+
+**Prerequisites**
+
+Complete these steps as you prepare to collect and ship monitoring data for stack monitoring:
+
+::::{dropdown} Set up {{es}} monitoring
+:name: set-up-monitoring-ea
+
+To bind {{ls}} metrics to an {{es}} cluster, set up [{{es}} monitoring](docs-content://deploy-manage/monitor/stack-monitoring.md). If you would like to create a dedicated monitoring cluster (optional), check out [{{es}} monitoring documentation](docs-content://deploy-manage/monitor/stack-monitoring/elasticsearch-monitoring-self-managed.md).
+
+::::
+
+
+::::{dropdown} Disable default collection of {{ls}} monitoring metrics
+:name: disable-default-include-ea
+
+Set `monitoring.enabled` to `false` in logstash.yml to disable default collection:
+
+```yaml
+monitoring.enabled: false
+```
+
+::::
+
+
+::::{dropdown} Specify the target cluster_uuid (optional)
+:name: define-cluster__uuid-ea
+
+To bind the metrics of {{ls}} to a specific cluster, optionally define the `monitoring.cluster_uuid` in the configuration file (logstash.yml):
+
+```yaml
+monitoring.cluster_uuid: PRODUCTION_ES_CLUSTER_UUID
+```
+
+::::
+
+
+::::{dropdown} Create a monitoring user (standalone agent only)
+:name: create-user-ea
+
+Create a user on the production cluster that has the `remote_monitoring_collector` [built-in role](elasticsearch://reference/elasticsearch/roles.md).
+
+::::
+
+
+
+## Install and configure {{agent}} [install-and-configure-mon]
+
+When you have completed the prerequisites, install and configure {{agent}} to monitor host logs and metrics. We’ll walk you through the process in these steps:
+
+* [Add the {{agent}} {{ls}} integration](#add-logstash-integration-ea)
+* [Install and run an {{agent}} on your machine](#add-agent-to-fleet-ea)
+* [View assets](#view-assets)
+* [Monitor {{ls}} logs and metrics (Stack Monitoring)](#view-data-stack)
+
+Check out [Installing {{agent}}](docs-content://reference/ingestion-tools/fleet/install-elastic-agents.md) in the *Fleet and Elastic Agent Guide* for more info.
+
+
+### Add the {{agent}} {{ls}} integration [add-logstash-integration-ea]
+
+1. Go to the {{kib}} home page, and click **Add integrations**.
+
+ :::{image} ../images/kibana-home.png
+ :alt: {{kib}} home page
+ :class: screenshot
+ :::
+
+2. In the query bar, search for **{{ls}}** and select the integration to see more details about it.
+3. Click **Add {{ls}}**.
+4. Configure the integration name and optionally add a description.
+5. Configure the integration to collect logs.
+
+ * Make sure that **Logs** is turned on if you want to collect logs from your {{ls}} instance, ensuring that the required settings are correctly configured:
+ * Under **Logs**, modify the log paths to match your {{ls}} environment.
+
+6. Configure the integration to collect metrics
+
+ * Make sure that **Metrics (Stack Monitoring)** is turned on, and **Metrics (Technical Preview)** is turned off, if you want to collect metrics from your {{ls}} instance
+ * Under **Metrics (Stack Monitoring)**, make sure the hosts setting points to your {{ls}} host URLs. By default, the integration collects {{ls}} monitoring metrics from `localhost:9600`. If that host and port number are not correct, update the `hosts` setting. If you configured {{ls}} to use encrypted communications, you must access it via HTTPS. For example, use a `hosts` setting like `https://localhost:9600`.
+
+7. Choose where to add the integration policy. Click **New hosts** to add it to new agent policy or **Existing hosts** to add it to an existing agent policy.
+8. In the popup, click **Add {{agent}} to your hosts** to open the **Add agent** flyout.
+
+ ::::{tip}
+ If you accidentally close the popup, go to **{{fleet}} > Agents**, then click **Add agent** to access the flyout.
+ ::::
+
+
+
+### Install and run an {{agent}} on your machine [add-agent-to-fleet-ea]
+
+The **Add agent** flyout has two options: **Enroll in {{fleet}}** and **Run standalone**. Enrolling agents in {{fleet}} (default) provides a centralized management tool in {{kib}}, reducing management overhead.
+
+:::::::{tab-set}
+
+::::::{tab-item} Fleet-managed
+1. When the **Add Agent flyout** appears, stay on the **Enroll in fleet** tab.
+2. Skip the **Select enrollment token** step. The enrollment token you need is already selected.
+
+ ::::{note}
+ The enrollment token is specific to the {{agent}} policy that you just created. When you run the command to enroll the agent in {{fleet}}, you will pass in the enrollment token.
+ ::::
+
+3. Download, install, and enroll the {{agent}} on your host by selecting your host operating system and following the **Install {{agent}} on your host** step.
+
+It takes about a minute for {{agent}} to enroll in {{fleet}}, download the configuration specified in the policy you just created, and start collecting data.
+::::::
+
+::::::{tab-item} Run standalone
+1. When the **Add Agent flyout** appears, navigate to the **Run standalone** tab.
+2. Configure the agent. Follow the instructions in **Install Elastic Agent on your host**.
+3. After unpacking the binary, replace the `elastic-agent.yml` file with that supplied in the Add Agent flyout on the "Run standalone" tab, replacing the values of `ES_USERNAME` and `ES_PASSWORD` appropriately.
+4. Run `sudo ./elastic-agent install`
+::::::
+
+:::::::
+
+## View assets [view-assets]
+
+After you have confirmed enrollment and data is coming in, click **View assets** to access dashboards related to the {{ls}} integration.
+
+For traditional Stack Monitoring UI, the dashboards marked **[Logs {{ls}}]** are used to visualize the logs produced by your {{ls}} instances, with those marked **[Metrics {{ls}}]** for the technical preview metrics dashboards. These are populated with data only if you selected the **Metrics (Technical Preview)** checkbox.
+
+:::{image} ../images/integration-assets-dashboards.png
+:alt: Integration assets
+:class: screenshot
+:::
+
+A number of dashboards are included to view {{ls}} as a whole, and dashboards that allow you to drill-down into how {{ls}} is performing on a node, pipeline and plugin basis.
+
+
+### Monitor {{ls}} logs and metrics (Stack Monitoring) [view-data-stack]
+
+[View the monitoring data in {{kib}}](docs-content://deploy-manage/monitor/stack-monitoring/kibana-monitoring-data.md), and navigate to the [monitoring UI](/reference/logstash-monitoring-ui.md).
diff --git a/docs/reference/monitoring-with-metricbeat.md b/docs/reference/monitoring-with-metricbeat.md
new file mode 100644
index 000000000..613a63938
--- /dev/null
+++ b/docs/reference/monitoring-with-metricbeat.md
@@ -0,0 +1,156 @@
+---
+navigation_title: "{{metricbeat}} collection"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/monitoring-with-metricbeat.html
+---
+
+# Collect {{ls}} monitoring data with {{metricbeat}} [monitoring-with-metricbeat]
+
+
+You can use {{metricbeat}} to collect data about {{ls}} and ship it to the monitoring cluster. The benefit of Metricbeat collection is that the monitoring agent remains active even if the {{ls}} instance does not.
+
+This step requires [{{es}} with {{metricbeat}} monitoring setup](docs-content://deploy-manage/monitor/stack-monitoring/collecting-monitoring-data-with-metricbeat.md).
+
+To collect and ship monitoring data:
+
+1. [Disable default collection of monitoring metrics](#disable-default)
+2. [Specify the target `cluster_uuid`](#define-cluster__uuid)
+3. [Install and configure {{metricbeat}} to collect monitoring data](#configure-metricbeat)
+
+Want to use {{agent}} instead? Refer to [Collect monitoring data for stack monitoring](/reference/monitoring-with-elastic-agent.md).
+
+
+## Disable default collection of {{ls}} monitoring metrics [disable-default]
+
+Set the `monitoring.enabled` to `false` in logstash.yml to disable to default monitoring:
+
+```yaml
+monitoring.enabled: false
+```
+
+
+## Determine target Elasticsearch cluster [define-cluster__uuid]
+
+You will need to determine which Elasticsearch cluster that {{ls}} will bind metrics to in the Stack Monitoring UI by specifying the `cluster_uuid`. When pipelines contain [{{es}} output plugins](/reference/plugins-outputs-elasticsearch.md), the `cluster_uuid` is automatically calculated, and the metrics should be bound without any additional settings.
+
+To override automatic values, or if your pipeline does not contain any [{{es}} output plugins](/reference/plugins-outputs-elasticsearch.md), you can bind the metrics of {{ls}} to a specific cluster, by defining the target cluster in the `monitoring.cluster_uuid` setting. in the configuration file (logstash.yml):
+
+```yaml
+monitoring.cluster_uuid: PRODUCTION_ES_CLUSTER_UUID
+```
+
+Refer to [{{es}} cluster stats page](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-cluster-stats) to figure out how to get your cluster `cluster_uuid`.
+
+
+## Install and configure {{metricbeat}} [configure-metricbeat]
+
+1. [Install {{metricbeat}}](beats://reference/metricbeat/metricbeat-installation-configuration.md) on the same server as {{ls}}.
+2. Enable the `logstash-xpack` module in {{metricbeat}}.
+
+ To enable the default configuration in the {{metricbeat}} `modules.d` directory, run:
+
+ **deb or rpm:**
+
+ ```sh
+ metricbeat modules enable logstash-xpack
+ ```
+
+ **linux or mac:**
+
+ ```sh
+ ./metricbeat modules enable logstash-xpack
+ ```
+
+ **win:**
+
+ ```sh
+ PS > .\metricbeat.exe modules enable logstash-xpack
+ ```
+
+ For more information, see [Specify which modules to run](beats://reference/metricbeat/configuration-metricbeat.md) and [beat module](beats://reference/metricbeat/metricbeat-module-beat.md).
+
+3. Configure the `logstash-xpack` module in {{metricbeat}}.
+
+ The `modules.d/logstash-xpack.yml` file contains these settings:
+
+ ```yaml
+ - module: logstash
+ metricsets:
+ - node
+ - node_stats
+ period: 10s
+ hosts: ["localhost:9600"]
+ #username: "user"
+ #password: "secret"
+ xpack.enabled: true
+ ```
+
+ Set the `hosts`, `username`, and `password` to authenticate with {{ls}}. For other module settings, it’s recommended that you accept the defaults.
+
+ By default, the module collects {{ls}} monitoring data from `localhost:9600`.
+
+ To monitor multiple {{ls}} instances, specify a list of hosts, for example:
+
+ ```yaml
+ hosts: ["http://localhost:9601","http://localhost:9602","http://localhost:9603"]
+ ```
+
+ **Elastic security.** The Elastic {{security-features}} are enabled by default. You must provide a user ID and password so that {{metricbeat}} can collect metrics successfully:
+
+ 1. Create a user on the production cluster that has the `remote_monitoring_collector` [built-in role](elasticsearch://reference/elasticsearch/roles.md).
+ 2. Add the `username` and `password` settings to the module configuration file (`logstash-xpack.yml`).
+
+4. Optional: Disable the system module in the {{metricbeat}}.
+
+ By default, the [system module](beats://reference/metricbeat/metricbeat-module-system.md) is enabled. The information it collects, however, is not shown on the **Stack Monitoring** page in {{kib}}. Unless you want to use that information for other purposes, run the following command:
+
+ ```sh
+ metricbeat modules disable system
+ ```
+
+5. Identify where to send the monitoring data.
+
+ ::::{tip}
+ In production environments, we strongly recommend using a separate cluster (referred to as the *monitoring cluster*) to store the data. Using a separate monitoring cluster prevents production cluster outages from impacting your ability to access your monitoring data. It also prevents monitoring activities from impacting the performance of your production cluster.
+ ::::
+
+
+ For example, specify the {{es}} output information in the {{metricbeat}} configuration file (`metricbeat.yml`):
+
+ ```yaml
+ output.elasticsearch:
+ # Array of hosts to connect to.
+ hosts: ["http://es-mon-1:9200", "http://es-mon2:9200"] <1>
+
+ # Optional protocol and basic auth credentials.
+ #protocol: "https"
+ #username: "elastic"
+ #password: "changeme"
+ ```
+
+ 1. In this example, the data is stored on a monitoring cluster with nodes `es-mon-1` and `es-mon-2`.
+
+
+ If you configured the monitoring cluster to use encrypted communications, you must access it via HTTPS. For example, use a `hosts` setting like `https://es-mon-1:9200`.
+
+ ::::{important}
+ The {{es}} {{monitor-features}} use ingest pipelines, therefore the cluster that stores the monitoring data must have at least one ingest node.
+ ::::
+
+
+ **Elastic security.** The Elastic {{security-features}} are enabled by default. You must provide a user ID and password so that {{metricbeat}} can send metrics successfully:
+
+ 1. Create a user on the monitoring cluster that has the `remote_monitoring_agent` [built-in role](elasticsearch://reference/elasticsearch/roles.md). Alternatively, use the `remote_monitoring_user` [built-in user](docs-content://deploy-manage/users-roles/cluster-or-deployment-auth/built-in-users.md).
+
+ ::::{tip}
+ If you’re using index lifecycle management, the remote monitoring user requires additional privileges to create and read indices. For more information, see `<>`.
+ ::::
+
+ 2. Add the `username` and `password` settings to the {{es}} output information in the {{metricbeat}} configuration file.
+
+ For more information about these configuration options, see [Configure the {{es}} output](beats://reference/metricbeat/elasticsearch-output.md).
+
+6. [Start {{metricbeat}}](beats://reference/metricbeat/metricbeat-starting.md) to begin collecting monitoring data.
+7. [View the monitoring data in {{kib}}](docs-content://deploy-manage/monitor/stack-monitoring/kibana-monitoring-data.md).
+
+Your monitoring setup is complete.
diff --git a/docs/reference/multiline.md b/docs/reference/multiline.md
new file mode 100644
index 000000000..7df03c59e
--- /dev/null
+++ b/docs/reference/multiline.md
@@ -0,0 +1,113 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/multiline.html
+---
+
+# Managing Multiline Events [multiline]
+
+Several use cases generate events that span multiple lines of text. In order to correctly handle these multiline events, Logstash needs to know how to tell which lines are part of a single event.
+
+Multiline event processing is complex and relies on proper event ordering. The best way to guarantee ordered log processing is to implement the processing as early in the pipeline as possible.
+
+The [multiline](/reference/plugins-codecs-multiline.md) codec is the preferred tool for handling multiline events in the Logstash pipeline. The multiline codec merges lines from a single input using a simple set of rules.
+
+::::{important}
+If you are using a Logstash input plugin that supports multiple hosts, such as the [beats](/reference/plugins-inputs-beats.md) input plugin, you should not use the [multiline](/reference/plugins-codecs-multiline.md) codec to handle multiline events. Doing so may result in the mixing of streams and corrupted event data. In this situation, you need to handle multiline events before sending the event data to Logstash.
+::::
+
+
+The most important aspects of configuring the multiline codec are the following:
+
+* The `pattern` option specifies a regular expression. Lines that match the specified regular expression are considered either continuations of a previous line or the start of a new multiline event. You can use [grok](/reference/plugins-filters-grok.md) regular expression templates with this configuration option.
+* The `what` option takes two values: `previous` or `next`. The `previous` value specifies that lines that match the value in the `pattern` option are part of the previous line. The `next` value specifies that lines that match the value in the `pattern` option are part of the following line.* The `negate` option applies the multiline codec to lines that *do not* match the regular expression specified in the `pattern` option.
+
+See the full documentation for the [multiline](/reference/plugins-codecs-multiline.md) codec plugin for more information on configuration options.
+
+## Examples of Multiline Codec Configuration [_examples_of_multiline_codec_configuration]
+
+The examples in this section cover the following use cases:
+
+* Combining a Java stack trace into a single event
+* Combining C-style line continuations into a single event
+* Combining multiple lines from time-stamped events
+
+### Java Stack Traces [_java_stack_traces]
+
+Java stack traces consist of multiple lines, with each line after the initial line beginning with whitespace, as in this example:
+
+```java
+Exception in thread "main" java.lang.NullPointerException
+ at com.example.myproject.Book.getTitle(Book.java:16)
+ at com.example.myproject.Author.getBookTitles(Author.java:25)
+ at com.example.myproject.Bootstrap.main(Bootstrap.java:14)
+```
+
+To consolidate these lines into a single event in Logstash, use the following configuration for the multiline codec:
+
+```json
+input {
+ stdin {
+ codec => multiline {
+ pattern => "^\s"
+ what => "previous"
+ }
+ }
+}
+```
+
+This configuration merges any line that begins with whitespace up to the previous line.
+
+
+### Line Continuations [_line_continuations]
+
+Several programming languages use the `\` character at the end of a line to denote that the line continues, as in this example:
+
+```c
+printf ("%10.10ld \t %10.10ld \t %s\
+ %f", w, x, y, z );
+```
+
+To consolidate these lines into a single event in Logstash, use the following configuration for the multiline codec:
+
+```json
+input {
+ stdin {
+ codec => multiline {
+ pattern => "\\$"
+ what => "next"
+ }
+ }
+}
+```
+
+This configuration merges any line that ends with the `\` character with the following line.
+
+
+### Timestamps [_timestamps]
+
+Activity logs from services such as Elasticsearch typically begin with a timestamp, followed by information on the specific activity, as in this example:
+
+```shell
+[2015-08-24 11:49:14,389][INFO ][env ] [Letha] using [1] data paths, mounts [[/
+(/dev/disk1)]], net usable_space [34.5gb], net total_space [118.9gb], types [hfs]
+```
+
+To consolidate these lines into a single event in Logstash, use the following configuration for the multiline codec:
+
+```json
+input {
+ file {
+ path => "/var/log/someapp.log"
+ codec => multiline {
+ pattern => "^%{TIMESTAMP_ISO8601} "
+ negate => true
+ what => previous
+ }
+ }
+}
+```
+
+This configuration uses the `negate` option to specify that any line that does not begin with a timestamp belongs to the previous line.
+
+
+
diff --git a/docs/reference/multiple-input-output-plugins.md b/docs/reference/multiple-input-output-plugins.md
new file mode 100644
index 000000000..43024afaa
--- /dev/null
+++ b/docs/reference/multiple-input-output-plugins.md
@@ -0,0 +1,174 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/multiple-input-output-plugins.html
+---
+
+# Stitching Together Multiple Input and Output Plugins [multiple-input-output-plugins]
+
+The information you need to manage often comes from several disparate sources, and use cases can require multiple destinations for your data. Your Logstash pipeline can use multiple input and output plugins to handle these requirements.
+
+In this section, you create a Logstash pipeline that takes input from a Twitter feed and the Filebeat client, then sends the information to an Elasticsearch cluster as well as writing the information directly to a file.
+
+
+## Reading from a Twitter Feed [twitter-configuration]
+
+To add a Twitter feed, you use the [`twitter`](/reference/plugins-inputs-twitter.md) input plugin. To configure the plugin, you need several pieces of information:
+
+* A *consumer key*, which uniquely identifies your Twitter app.
+* A *consumer secret*, which serves as the password for your Twitter app.
+* One or more *keywords* to search in the incoming feed. The example shows using "cloud" as a keyword, but you can use whatever you want.
+* An *oauth token*, which identifies the Twitter account using this app.
+* An *oauth token secret*, which serves as the password of the Twitter account.
+
+Visit [https://dev.twitter.com/apps](https://dev.twitter.com/apps) to set up a Twitter account and generate your consumer key and secret, as well as your access token and secret. See the docs for the [`twitter`](/reference/plugins-inputs-twitter.md) input plugin if you’re not sure how to generate these keys.
+
+Like you did earlier when you worked on [Parsing Logs with Logstash](/reference/advanced-pipeline.md), create a config file (called `second-pipeline.conf`) that contains the skeleton of a configuration pipeline. If you want, you can reuse the file you created earlier, but make sure you pass in the correct config file name when you run Logstash.
+
+Add the following lines to the `input` section of the `second-pipeline.conf` file, substituting your values for the placeholder values shown here:
+
+```json
+ twitter {
+ consumer_key => "enter_your_consumer_key_here"
+ consumer_secret => "enter_your_secret_here"
+ keywords => ["cloud"]
+ oauth_token => "enter_your_access_token_here"
+ oauth_token_secret => "enter_your_access_token_secret_here"
+ }
+```
+
+
+## Configuring Filebeat to Send Log Lines to Logstash [configuring-lsf]
+
+As you learned earlier in [Configuring Filebeat to Send Log Lines to Logstash](/reference/advanced-pipeline.md#configuring-filebeat), the [Filebeat](https://github.com/elastic/beats/tree/main/filebeat) client is a lightweight, resource-friendly tool that collects logs from files on the server and forwards these logs to your Logstash instance for processing.
+
+After installing Filebeat, you need to configure it. Open the `filebeat.yml` file located in your Filebeat installation directory, and replace the contents with the following lines. Make sure `paths` points to your syslog:
+
+```shell
+filebeat.inputs:
+- type: log
+ paths:
+ - /var/log/*.log <1>
+ fields:
+ type: syslog <2>
+output.logstash:
+ hosts: ["localhost:5044"]
+```
+
+1. Absolute path to the file or files that Filebeat processes.
+2. Adds a field called `type` with the value `syslog` to the event.
+
+
+Save your changes.
+
+To keep the configuration simple, you won’t specify TLS/SSL settings as you would in a real world scenario.
+
+Configure your Logstash instance to use the Filebeat input plugin by adding the following lines to the `input` section of the `second-pipeline.conf` file:
+
+```json
+ beats {
+ port => "5044"
+ }
+```
+
+
+## Writing Logstash Data to a File [logstash-file-output]
+
+You can configure your Logstash pipeline to write data directly to a file with the [`file`](/reference/plugins-outputs-file.md) output plugin.
+
+Configure your Logstash instance to use the `file` output plugin by adding the following lines to the `output` section of the `second-pipeline.conf` file:
+
+```json
+ file {
+ path => "/path/to/target/file"
+ }
+```
+
+
+## Writing to Multiple Elasticsearch Nodes [multiple-es-nodes]
+
+Writing to multiple Elasticsearch nodes lightens the resource demands on a given Elasticsearch node, as well as providing redundant points of entry into the cluster when a particular node is unavailable.
+
+To configure your Logstash instance to write to multiple Elasticsearch nodes, edit the `output` section of the `second-pipeline.conf` file to read:
+
+```json
+output {
+ elasticsearch {
+ hosts => ["IP Address 1:port1", "IP Address 2:port2", "IP Address 3"]
+ }
+}
+```
+
+Use the IP addresses of three non-master nodes in your Elasticsearch cluster in the host line. When the `hosts` parameter lists multiple IP addresses, Logstash load-balances requests across the list of addresses. Also note that the default port for Elasticsearch is `9200` and can be omitted in the configuration above.
+
+
+### Testing the Pipeline [testing-second-pipeline]
+
+At this point, your `second-pipeline.conf` file looks like this:
+
+```json
+input {
+ twitter {
+ consumer_key => "enter_your_consumer_key_here"
+ consumer_secret => "enter_your_secret_here"
+ keywords => ["cloud"]
+ oauth_token => "enter_your_access_token_here"
+ oauth_token_secret => "enter_your_access_token_secret_here"
+ }
+ beats {
+ port => "5044"
+ }
+}
+output {
+ elasticsearch {
+ hosts => ["IP Address 1:port1", "IP Address 2:port2", "IP Address 3"]
+ }
+ file {
+ path => "/path/to/target/file"
+ }
+}
+```
+
+Logstash is consuming data from the Twitter feed you configured, receiving data from Filebeat, and indexing this information to three nodes in an Elasticsearch cluster as well as writing to a file.
+
+At the data source machine, run Filebeat with the following command:
+
+```shell
+sudo ./filebeat -e -c filebeat.yml -d "publish"
+```
+
+Filebeat will attempt to connect on port 5044. Until Logstash starts with an active Beats plugin, there won’t be any answer on that port, so any messages you see regarding failure to connect on that port are normal for now.
+
+To verify your configuration, run the following command:
+
+```shell
+bin/logstash -f second-pipeline.conf --config.test_and_exit
+```
+
+The `--config.test_and_exit` option parses your configuration file and reports any errors. When the configuration file passes the configuration test, start Logstash with the following command:
+
+```shell
+bin/logstash -f second-pipeline.conf
+```
+
+Use the `grep` utility to search in the target file to verify that information is present:
+
+```shell
+grep syslog /path/to/target/file
+```
+
+Run an Elasticsearch query to find the same information in the Elasticsearch cluster:
+
+```shell
+curl -XGET 'localhost:9200/logstash-$DATE/_search?pretty&q=fields.type:syslog'
+```
+
+Replace $DATE with the current date, in YYYY.MM.DD format.
+
+To see data from the Twitter feed, try this query:
+
+```shell
+curl -XGET 'http://localhost:9200/logstash-$DATE/_search?pretty&q=client:iphone'
+```
+
+Again, remember to replace $DATE with the current date, in YYYY.MM.DD format.
+
diff --git a/docs/static/multiple-pipelines.asciidoc b/docs/reference/multiple-pipelines.md
similarity index 53%
rename from docs/static/multiple-pipelines.asciidoc
rename to docs/reference/multiple-pipelines.md
index 59abeb408..8ec037296 100644
--- a/docs/static/multiple-pipelines.asciidoc
+++ b/docs/reference/multiple-pipelines.md
@@ -1,31 +1,33 @@
-[[multiple-pipelines]]
-=== Multiple Pipelines
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/multiple-pipelines.html
+---
-If you need to run more than one pipeline in the same process, Logstash provides a way to do this through a configuration file called `pipelines.yml`.
-This file must be placed in the `path.settings` folder and follows this structure:
+# Multiple Pipelines [multiple-pipelines]
-[source,yaml]
--------------------------------------------------------------------------------
+If you need to run more than one pipeline in the same process, Logstash provides a way to do this through a configuration file called `pipelines.yml`. This file must be placed in the `path.settings` folder and follows this structure:
+
+```yaml
- pipeline.id: my-pipeline_1
path.config: "/etc/path/to/p1.config"
pipeline.workers: 3
- pipeline.id: my-other-pipeline
path.config: "/etc/different/path/p2.cfg"
queue.type: persisted
--------------------------------------------------------------------------------
+```
-This file is formatted in YAML and contains a list of dictionaries, where each dictionary describes a pipeline, and each key/value pair specifies a setting for that pipeline. The example shows two different pipelines described by their IDs and configuration paths. For the first pipeline, the value of `pipeline.workers` is set to 3, while in the other, the persistent queue feature is enabled.
-The value of a setting that is not explicitly set in the `pipelines.yml` file will fall back to the default specified in the `logstash.yml` <>.
+This file is formatted in YAML and contains a list of dictionaries, where each dictionary describes a pipeline, and each key/value pair specifies a setting for that pipeline. The example shows two different pipelines described by their IDs and configuration paths. For the first pipeline, the value of `pipeline.workers` is set to 3, while in the other, the persistent queue feature is enabled. The value of a setting that is not explicitly set in the `pipelines.yml` file will fall back to the default specified in the `logstash.yml` [settings file](/reference/logstash-settings-file.md).
When you start Logstash without arguments, it will read the `pipelines.yml` file and instantiate all pipelines specified in the file. On the other hand, when you use `-e` or `-f`, Logstash ignores the `pipelines.yml` file and logs a warning about it.
-[[multiple-pipeline-usage]]
-==== Usage Considerations
+## Usage Considerations [multiple-pipeline-usage]
-Using multiple pipelines is especially useful if your current configuration has event flows that don't share the same inputs/filters and outputs and are being separated from each other using tags and conditionals.
+Using multiple pipelines is especially useful if your current configuration has event flows that don’t share the same inputs/filters and outputs and are being separated from each other using tags and conditionals.
-Having multiple pipelines in a single instance also allows these event flows to have different performance and durability parameters (for example, different settings for pipeline workers and persistent queues). This separation means that a blocked output in one pipeline won't exert backpressure in the other.
+Having multiple pipelines in a single instance also allows these event flows to have different performance and durability parameters (for example, different settings for pipeline workers and persistent queues). This separation means that a blocked output in one pipeline won’t exert backpressure in the other.
+
+That said, it’s important to take into account resource competition between the pipelines, given that the default values are tuned for a single pipeline. So, for example, consider reducing the number of pipeline workers used by each pipeline, because each pipeline will use 1 worker per CPU core by default.
+
+Persistent queues and dead letter queues are isolated per pipeline, with their locations namespaced by the `pipeline.id` value.
-That said, it's important to take into account resource competition between the pipelines, given that the default values are tuned for a single pipeline. So, for example, consider reducing the number of pipeline workers used by each pipeline, because each pipeline will use 1 worker per CPU core by default.
-Persistent queues and dead letter queues are isolated per pipeline, with their locations namespaced by the `pipeline.id` value.
\ No newline at end of file
diff --git a/docs/reference/offline-plugins.md b/docs/reference/offline-plugins.md
new file mode 100644
index 000000000..1bbce3f6f
--- /dev/null
+++ b/docs/reference/offline-plugins.md
@@ -0,0 +1,78 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/offline-plugins.html
+---
+
+# Offline Plugin Management [offline-plugins]
+
+The Logstash [plugin manager](/reference/working-with-plugins.md) provides support for preparing offline plugin packs that you can use to install Logstash plugins on systems that don’t have Internet access.
+
+This procedure requires a staging machine running Logstash that has access to a public or [private Rubygems](/reference/private-rubygem.md) server. The staging machine downloads and packages all the files and dependencies required for offline installation.
+
+
+## Building Offline Plugin Packs [building-offline-packs]
+
+An *offline plugin pack* is a compressed file that contains all the plugins your offline Logstash installation requires, along with the dependencies for those plugins.
+
+To build an offline plugin pack:
+
+1. Make sure all the plugins that you want to package are installed on the staging server and that the staging server can access the Internet.
+2. Run the `bin/logstash-plugin prepare-offline-pack` subcommand to package the plugins and dependencies:
+
+ ```shell
+ bin/logstash-plugin prepare-offline-pack --output OUTPUT --overwrite [PLUGINS]
+ ```
+
+ where:
+
+ * `OUTPUT` specifies the zip file where the compressed plugin pack will be written. The default file is `/LOGSTASH_HOME/logstash-offline-plugins-9.0.0.zip`. If you are using 5.2.x and 5.3.0, this location should be a zip file whose contents will be overwritten.
+ * `[PLUGINS]` specifies one or more plugins that you want to include in the pack.
+ * `--overwrite` specifies if you want to override an existing file at the location
+
+
+Examples:
+
+```sh
+bin/logstash-plugin prepare-offline-pack logstash-input-beats <1>
+bin/logstash-plugin prepare-offline-pack logstash-filter-* <2>
+bin/logstash-plugin prepare-offline-pack logstash-filter-* logstash-input-beats <3>
+```
+
+1. Packages the Beats input plugin and any dependencies.
+2. Uses a wildcard to package all filter plugins and any dependencies.
+3. Packages all filter plugins, the Beats input plugin, and any dependencies.
+
+
+::::{note}
+Downloading all dependencies for the specified plugins may take some time, depending on the plugins listed.
+::::
+
+
+
+## Installing Offline Plugin Packs [installing-offline-packs]
+
+To install an offline plugin pack:
+
+1. Move the compressed bundle to the machine where you want to install the plugins.
+2. Run the `bin/logstash-plugin install` subcommand and pass in the file URI of the offline plugin pack.
+
+ ```sh
+ bin/logstash-plugin install file:///c:/path/to/logstash-offline-plugins-9.0.0.zip
+ ```
+
+ ```sh
+ bin/logstash-plugin install file:///path/to/logstash-offline-plugins-9.0.0.zip
+ ```
+
+ This command expects a file URI, so make sure you use forward slashes and specify the full path to the pack.
+
+
+
+## Updating Offline Plugins [updating-offline-packs]
+
+To update offline plugins, you update the plugins on the staging server and then use the same process that you followed to build and install the plugin pack:
+
+1. On the staging server, run the `bin/logstash-plugin update` subcommand to update the plugins. See [Updating plugins](/reference/working-with-plugins.md#updating-plugins).
+2. Create a new version of the plugin pack. See [Building Offline Plugin Packs](#building-offline-packs).
+3. Install the new version of the plugin pack. See [Installing Offline Plugin Packs](#installing-offline-packs).
+
diff --git a/docs/reference/output-plugins.md b/docs/reference/output-plugins.md
new file mode 100644
index 000000000..546c08637
--- /dev/null
+++ b/docs/reference/output-plugins.md
@@ -0,0 +1,133 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/output-plugins.html
+---
+
+# Output plugins [output-plugins]
+
+An output plugin sends event data to a particular destination. Outputs are the final stage in the event pipeline.
+
+The following output plugins are available below. For a list of Elastic supported plugins, please consult the [Support Matrix](https://www.elastic.co/support/matrix#show_logstash_plugins).
+
+| | | |
+| --- | --- | --- |
+| Plugin | Description | Github repository |
+| [app_search (deprecated)](/reference/plugins-outputs-elastic_app_search.md) | [deprecated at {{stack}} version 9.0.0 and plugin version 3.0.1] Sends events to Elastic App Search | [logstash-integration-elastic_enterprise_search](https://github.com/logstash-plugins/logstash-output-elastic_app_search) |
+| [boundary](/reference/plugins-outputs-boundary.md) | Sends annotations to Boundary based on Logstash events | [logstash-output-boundary](https://github.com/logstash-plugins/logstash-output-boundary) |
+| [circonus](/reference/plugins-outputs-circonus.md) | Sends annotations to Circonus based on Logstash events | [logstash-output-circonus](https://github.com/logstash-plugins/logstash-output-circonus) |
+| [cloudwatch](/reference/plugins-outputs-cloudwatch.md) | Aggregates and sends metric data to AWS CloudWatch | [logstash-output-cloudwatch](https://github.com/logstash-plugins/logstash-output-cloudwatch) |
+| [csv](/reference/plugins-outputs-csv.md) | Writes events to disk in a delimited format | [logstash-output-csv](https://github.com/logstash-plugins/logstash-output-csv) |
+| [datadog](/reference/plugins-outputs-datadog.md) | Sends events to DataDogHQ based on Logstash events | [logstash-output-datadog](https://github.com/logstash-plugins/logstash-output-datadog) |
+| [datadog_metrics](/reference/plugins-outputs-datadog_metrics.md) | Sends metrics to DataDogHQ based on Logstash events | [logstash-output-datadog_metrics](https://github.com/logstash-plugins/logstash-output-datadog_metrics) |
+| [dynatrace](/reference/plugins-outputs-dynatrace.md) | Sends events to Dynatrace based on Logstash events | [logstash-output-dynatrace](https://github.com/dynatrace-oss/logstash-output-dynatrace) |
+| [elastic_app_search (deprecated)](/reference/plugins-outputs-elastic_app_search.md) | [deprecated at {{stack}} version 9.0.0 and plugin version 3.0.1]Sends events to the [Elastic App Search](https://www.elastic.co/app-search/) solution | [logstash-integration-elastic_enterprise_search](https://github.com/logstash-plugins/logstash-output-elastic_app_search) |
+| [elastic_workplace_search](/reference/plugins-outputs-elastic_workplace_search.md) | Sends events to the [Elastic Workplace Search](https://www.elastic.co/enterprise-search) solution | [logstash-integration-elastic_enterprise_search](https://github.com/logstash-plugins/logstash-output-elastic_app_search) |
+| [elasticsearch](/reference/plugins-outputs-elasticsearch.md) | Stores logs in Elasticsearch | [logstash-output-elasticsearch](https://github.com/logstash-plugins/logstash-output-elasticsearch) |
+| [email](/reference/plugins-outputs-email.md) | Sends email to a specified address when output is received | [logstash-output-email](https://github.com/logstash-plugins/logstash-output-email) |
+| [exec](/reference/plugins-outputs-exec.md) | Runs a command for a matching event | [logstash-output-exec](https://github.com/logstash-plugins/logstash-output-exec) |
+| [file](/reference/plugins-outputs-file.md) | Writes events to files on disk | [logstash-output-file](https://github.com/logstash-plugins/logstash-output-file) |
+| [ganglia](/reference/plugins-outputs-ganglia.md) | Writes metrics to Ganglia’s `gmond` | [logstash-output-ganglia](https://github.com/logstash-plugins/logstash-output-ganglia) |
+| [gelf](/reference/plugins-outputs-gelf.md) | Generates GELF formatted output for Graylog2 | [logstash-output-gelf](https://github.com/logstash-plugins/logstash-output-gelf) |
+| [google_bigquery](/reference/plugins-outputs-google_bigquery.md) | Writes events to Google BigQuery | [logstash-output-google_bigquery](https://github.com/logstash-plugins/logstash-output-google_bigquery) |
+| [google_cloud_storage](/reference/plugins-outputs-google_cloud_storage.md) | Uploads log events to Google Cloud Storage | [logstash-output-google_cloud_storage](https://github.com/logstash-plugins/logstash-output-google_cloud_storage) |
+| [google_pubsub](/reference/plugins-outputs-google_pubsub.md) | Uploads log events to Google Cloud Pubsub | [logstash-output-google_pubsub](https://github.com/logstash-plugins/logstash-output-google_pubsub) |
+| [graphite](/reference/plugins-outputs-graphite.md) | Writes metrics to Graphite | [logstash-output-graphite](https://github.com/logstash-plugins/logstash-output-graphite) |
+| [graphtastic](/reference/plugins-outputs-graphtastic.md) | Sends metric data on Windows | [logstash-output-graphtastic](https://github.com/logstash-plugins/logstash-output-graphtastic) |
+| [http](/reference/plugins-outputs-http.md) | Sends events to a generic HTTP or HTTPS endpoint | [logstash-output-http](https://github.com/logstash-plugins/logstash-output-http) |
+| [influxdb](/reference/plugins-outputs-influxdb.md) | Writes metrics to InfluxDB | [logstash-output-influxdb](https://github.com/logstash-plugins/logstash-output-influxdb) |
+| [irc](/reference/plugins-outputs-irc.md) | Writes events to IRC | [logstash-output-irc](https://github.com/logstash-plugins/logstash-output-irc) |
+| [java_stdout](/reference/plugins-outputs-java_stdout.md) | Prints events to the STDOUT of the shell | [core plugin](https://github.com/elastic/logstash/blob/master/logstash-core/src/main/java/org/logstash/plugins/outputs/Stdout.java) |
+| [juggernaut](/reference/plugins-outputs-juggernaut.md) | Pushes messages to the Juggernaut websockets server | [logstash-output-juggernaut](https://github.com/logstash-plugins/logstash-output-juggernaut) |
+| [kafka](/reference/plugins-outputs-kafka.md) | Writes events to a Kafka topic | [logstash-integration-kafka](https://github.com/logstash-plugins/logstash-integration-kafka) |
+| [librato](/reference/plugins-outputs-librato.md) | Sends metrics, annotations, and alerts to Librato based on Logstash events | [logstash-output-librato](https://github.com/logstash-plugins/logstash-output-librato) |
+| [loggly](/reference/plugins-outputs-loggly.md) | Ships logs to Loggly | [logstash-output-loggly](https://github.com/logstash-plugins/logstash-output-loggly) |
+| [logstash](/reference/plugins-outputs-logstash.md) | Ships data to {{ls}} input on another {{ls}} instance | [logstash-integration-logstash](https://github.com/logstash-plugins/logstash-integration-logstash) |
+| [lumberjack](/reference/plugins-outputs-lumberjack.md) | Sends events using the `lumberjack` protocol | [logstash-output-lumberjack](https://github.com/logstash-plugins/logstash-output-lumberjack) |
+| [metriccatcher](/reference/plugins-outputs-metriccatcher.md) | Writes metrics to MetricCatcher | [logstash-output-metriccatcher](https://github.com/logstash-plugins/logstash-output-metriccatcher) |
+| [mongodb](/reference/plugins-outputs-mongodb.md) | Writes events to MongoDB | [logstash-output-mongodb](https://github.com/logstash-plugins/logstash-output-mongodb) |
+| [nagios](/reference/plugins-outputs-nagios.md) | Sends passive check results to Nagios | [logstash-output-nagios](https://github.com/logstash-plugins/logstash-output-nagios) |
+| [nagios_nsca](/reference/plugins-outputs-nagios_nsca.md) | Sends passive check results to Nagios using the NSCA protocol | [logstash-output-nagios_nsca](https://github.com/logstash-plugins/logstash-output-nagios_nsca) |
+| [opentsdb](/reference/plugins-outputs-opentsdb.md) | Writes metrics to OpenTSDB | [logstash-output-opentsdb](https://github.com/logstash-plugins/logstash-output-opentsdb) |
+| [pagerduty](/reference/plugins-outputs-pagerduty.md) | Sends notifications based on preconfigured services and escalation policies | [logstash-output-pagerduty](https://github.com/logstash-plugins/logstash-output-pagerduty) |
+| [pipe](/reference/plugins-outputs-pipe.md) | Pipes events to another program’s standard input | [logstash-output-pipe](https://github.com/logstash-plugins/logstash-output-pipe) |
+| [rabbitmq](/reference/plugins-outputs-rabbitmq.md) | Pushes events to a RabbitMQ exchange | [logstash-integration-rabbitmq](https://github.com/logstash-plugins/logstash-integration-rabbitmq) |
+| [redis](/reference/plugins-outputs-redis.md) | Sends events to a Redis queue using the `RPUSH` command | [logstash-output-redis](https://github.com/logstash-plugins/logstash-output-redis) |
+| [redmine](/reference/plugins-outputs-redmine.md) | Creates tickets using the Redmine API | [logstash-output-redmine](https://github.com/logstash-plugins/logstash-output-redmine) |
+| [riak](/reference/plugins-outputs-riak.md) | Writes events to the Riak distributed key/value store | [logstash-output-riak](https://github.com/logstash-plugins/logstash-output-riak) |
+| [riemann](/reference/plugins-outputs-riemann.md) | Sends metrics to Riemann | [logstash-output-riemann](https://github.com/logstash-plugins/logstash-output-riemann) |
+| [s3](/reference/plugins-outputs-s3.md) | Sends Logstash events to the Amazon Simple Storage Service | [logstash-output-s3](https://github.com/logstash-plugins/logstash-output-s3) |
+| [sink](/reference/plugins-outputs-sink.md) | Discards any events received | [core plugin](https://github.com/elastic/logstash/blob/master/logstash-core/src/main/java/org/logstash/plugins/outputs/Sink.java) |
+| [sns](/reference/plugins-outputs-sns.md) | Sends events to Amazon’s Simple Notification Service | [logstash-output-sns](https://github.com/logstash-plugins/logstash-output-sns) |
+| [solr_http](/reference/plugins-outputs-solr_http.md) | Stores and indexes logs in Solr | [logstash-output-solr_http](https://github.com/logstash-plugins/logstash-output-solr_http) |
+| [sqs](/reference/plugins-outputs-sqs.md) | Pushes events to an Amazon Web Services Simple Queue Service queue | [logstash-output-sqs](https://github.com/logstash-plugins/logstash-output-sqs) |
+| [statsd](/reference/plugins-outputs-statsd.md) | Sends metrics using the `statsd` network daemon | [logstash-output-statsd](https://github.com/logstash-plugins/logstash-output-statsd) |
+| [stdout](/reference/plugins-outputs-stdout.md) | Prints events to the standard output | [logstash-output-stdout](https://github.com/logstash-plugins/logstash-output-stdout) |
+| [stomp](/reference/plugins-outputs-stomp.md) | Writes events using the STOMP protocol | [logstash-output-stomp](https://github.com/logstash-plugins/logstash-output-stomp) |
+| [syslog](/reference/plugins-outputs-syslog.md) | Sends events to a `syslog` server | [logstash-output-syslog](https://github.com/logstash-plugins/logstash-output-syslog) |
+| [tcp](/reference/plugins-outputs-tcp.md) | Writes events over a TCP socket | [logstash-output-tcp](https://github.com/logstash-plugins/logstash-output-tcp) |
+| [timber](/reference/plugins-outputs-timber.md) | Sends events to the Timber.io logging service | [logstash-output-timber](https://github.com/logstash-plugins/logstash-output-timber) |
+| [udp](/reference/plugins-outputs-udp.md) | Sends events over UDP | [logstash-output-udp](https://github.com/logstash-plugins/logstash-output-udp) |
+| [webhdfs](/reference/plugins-outputs-webhdfs.md) | Sends Logstash events to HDFS using the `webhdfs` REST API | [logstash-output-webhdfs](https://github.com/logstash-plugins/logstash-output-webhdfs) |
+| [websocket](/reference/plugins-outputs-websocket.md) | Publishes messages to a websocket | [logstash-output-websocket](https://github.com/logstash-plugins/logstash-output-websocket) |
+| [workplace_search (deprecated)](/reference/plugins-outputs-elastic_workplace_search.md) | [deprecated at {{stack}} version 9.0.0 and plugin version 3.0.1] Sends events to Elastic Workplace Search | [logstash-integration-elastic_enterprise_search](https://github.com/logstash-plugins/logstash-output-elastic_app_search) |
+| [xmpp](/reference/plugins-outputs-xmpp.md) | Posts events over XMPP | [logstash-output-xmpp](https://github.com/logstash-plugins/logstash-output-xmpp) |
+| [zabbix](/reference/plugins-outputs-zabbix.md) | Sends events to a Zabbix server | [logstash-output-zabbix](https://github.com/logstash-plugins/logstash-output-zabbix) |
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docs/reference/performance-troubleshooting.md b/docs/reference/performance-troubleshooting.md
new file mode 100644
index 000000000..bbb7416a7
--- /dev/null
+++ b/docs/reference/performance-troubleshooting.md
@@ -0,0 +1,56 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/performance-troubleshooting.html
+---
+
+# Performance troubleshooting [performance-troubleshooting]
+
+You can use these troubleshooting tips to quickly diagnose and resolve Logstash performance problems. Advanced knowledge of pipeline internals is not required to understand this guide. However, the [pipeline documentation](/reference/how-logstash-works.md) is recommended reading if you want to go beyond these tips.
+
+You may be tempted to jump ahead and change settings like `pipeline.workers` (`-w`) as a first attempt to improve performance. In our experience, changing this setting makes it more difficult to troubleshoot performance problems because you increase the number of variables in play. Instead, make one change at a time and measure the results. Starting at the end of this list is a sure-fire way to create a confusing situation.
+
+
+## Performance checklist [_performance_checklist]
+
+1. **Check the performance of input sources and output destinations:**
+
+ * Logstash is only as fast as the services it connects to. Logstash can only consume and produce data as fast as its input and output destinations can!
+
+2. **Check system statistics:**
+
+ * CPU
+
+ * Note whether the CPU is being heavily used. On Linux/Unix, you can run `top -H` to see process statistics broken out by thread, as well as total CPU statistics.
+ * If CPU usage is high, skip forward to the section about checking the JVM heap and then read the section about tuning Logstash worker settings.
+
+ * Memory
+
+ * Be aware of the fact that Logstash runs on the Java VM. This means that Logstash will always use the maximum amount of memory you allocate to it.
+ * Look for other applications that use large amounts of memory and may be causing Logstash to swap to disk. This can happen if the total memory used by applications exceeds physical memory.
+
+ * I/O Utilization
+
+ * Monitor disk I/O to check for disk saturation.
+
+ * Disk saturation can happen if you’re using Logstash plugins (such as the file output) that may saturate your storage.
+ * Disk saturation can also happen if you’re encountering a lot of errors that force Logstash to generate large error logs.
+ * On Linux, you can use iostat, dstat, or something similar to monitor disk I/O.
+
+ * Monitor network I/O for network saturation.
+
+ * Network saturation can happen if you’re using inputs/outputs that perform a lot of network operations.
+ * On Linux, you can use a tool like dstat or iftop to monitor your network.
+
+3. **Check the JVM heap:**
+
+ * The recommended heap size for typical ingestion scenarios should be no less than 4GB and no more than 8GB.
+ * CPU utilization can increase unnecessarily if the heap size is too low, resulting in the JVM constantly garbage collecting. You can check for this issue by doubling the heap size to see if performance improves.
+ * Do not increase the heap size past the amount of physical memory. Some memory must be left to run the OS and other processes. As a general guideline for most installations, don’t exceed 50-75% of physical memory. The more memory you have, the higher percentage you can use.
+ * Set the minimum (Xms) and maximum (Xmx) heap allocation size to the same value to prevent the heap from resizing at runtime, which is a very costly process.
+ * You can make more accurate measurements of the JVM heap by using either the `jmap` command line utility distributed with Java or by using VisualVM. For more info, see [Profiling the heap](/reference/tuning-logstash.md#profiling-the-heap).
+
+4. **Tune Logstash pipeline settings:**
+
+ * Continue on to [Tuning and profiling logstash pipeline performance](/reference/tuning-logstash.md) to learn about tuning individual pipelines.
+
+
diff --git a/docs/reference/performance-tuning.md b/docs/reference/performance-tuning.md
new file mode 100644
index 000000000..87fdd73cf
--- /dev/null
+++ b/docs/reference/performance-tuning.md
@@ -0,0 +1,14 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/performance-tuning.html
+---
+
+# Performance tuning [performance-tuning]
+
+This section includes the following information about tuning Logstash performance:
+
+* [Performance troubleshooting](/reference/performance-troubleshooting.md)
+* [Tuning and profiling logstash pipeline performance](/reference/tuning-logstash.md)
+
+
+
diff --git a/docs/reference/persistent-queues.md b/docs/reference/persistent-queues.md
new file mode 100644
index 000000000..e5a83d8b5
--- /dev/null
+++ b/docs/reference/persistent-queues.md
@@ -0,0 +1,368 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/persistent-queues.html
+---
+
+# Persistent queues (PQ) [persistent-queues]
+
+A {{ls}} persistent queue helps protect against data loss during abnormal termination by storing the in-flight message queue to disk.
+
+## Benefits of persistent queues [persistent-queues-benefits]
+
+A persistent queue (PQ):
+
+* Helps protect against message loss during a normal shutdown and when Logstash is terminated abnormally. If Logstash is restarted while events are in-flight, Logstash attempts to deliver messages stored in the persistent queue until delivery succeeds at least once.
+* Can absorb bursts of events without needing an external buffering mechanism like Redis or Apache Kafka.
+
+::::{note}
+Persistent queues are disabled by default. To enable them, check out [Configuring persistent queues](#configuring-persistent-queues).
+::::
+
+
+
+## Limitations of persistent queues [persistent-queues-limitations]
+
+Persistent queues do not solve these problems:
+
+* Input plugins that do not use a request-response protocol cannot be protected from data loss. Tcp, udp, zeromq push+pull, and many other inputs do not have a mechanism to acknowledge receipt to the sender. (Plugins such as beats and http, which **do** have an acknowledgement capability, are well protected by this queue.)
+* Data may be lost if an abnormal shutdown occurs before the checkpoint file has been committed.
+* A persistent queue does not handle permanent machine failures such as disk corruption, disk failure, and machine loss. The data persisted to disk is not replicated.
+
+::::{tip}
+Use the local filesystem for data integrity and performance. Network File System (NFS) is not supported.
+::::
+
+
+
+## Configuring persistent queues [configuring-persistent-queues]
+
+To configure persistent queues, specify options in the Logstash [settings file](/reference/logstash-settings-file.md). Settings are applied to every pipeline.
+
+When you set values for capacity and sizing settings, remember that the value you set is applied *per pipeline* rather than a total to be shared among all pipelines.
+
+::::{tip}
+If you want to define values for a specific pipeline, use [`pipelines.yml`](/reference/multiple-pipelines.md).
+::::
+
+
+`queue.type`
+: Specify `persisted` to enable persistent queues. By default, persistent queues are disabled (default: `queue.type: memory`).
+
+`path.queue`
+: The directory path where the data files will be stored. By default, the files are stored in `path.data/queue`.
+
+`queue.page_capacity`
+: The queue data consists of append-only files called "pages." This value sets the maximum size of a queue page in bytes. The default size of 64mb is a good value for most users, and changing this value is unlikely to have performance benefits. If you change the page capacity of an existing queue, the new size applies only to the new page.
+
+`queue.drain`
+: Specify `true` if you want Logstash to wait until the persistent queue is drained before shutting down. The amount of time it takes to drain the queue depends on the number of events that have accumulated in the queue. Therefore, you should avoid using this setting unless the queue, even when full, is relatively small and can be drained quickly.
+
+`queue.max_events`
+: The maximum number of events not yet read by the pipeline worker. The default is 0 (unlimited). We use this setting for internal testing. Users generally shouldn’t be changing this value.
+
+`queue.max_bytes`
+: The total capacity of *each queue* in number of bytes. Unless overridden in `pipelines.yml` or central management, each persistent queue will be sized at the value of `queue.max_bytes` specified in `logstash.yml`. The default is 1024mb (1gb).
+
+ ::::{note}
+ Be sure that your disk has sufficient capacity to handle the cumulative total of `queue.max_bytes` across all persistent queues. The total of `queue.max_bytes` for *all* queues should be lower than the capacity of your disk.
+ ::::
+
+
+ ::::{tip}
+ If you are using persistent queues to protect against data loss, but don’t require much buffering, you can set `queue.max_bytes` to a smaller value as long as it is not less than the value of `queue.page_capacity`. A smaller value produces smaller queues and improves queue performance.
+ ::::
+
+
+`queue.checkpoint.acks`
+: Sets the number of acked events before forcing a checkpoint. Default is `1024`. Set to `0` for unlimited.
+
+`queue.checkpoint.writes`
+: Sets the maximum number of written events before a forced checkpoint. Default is `1024`. Set to `0` for unlimited.
+
+ To avoid losing data in the persistent queue, you can set `queue.checkpoint.writes: 1` to force a checkpoint after each event is written. Keep in mind that disk writes have a resource cost. Setting this value to `1` ensures maximum durability, but can severely impact performance. See [Controlling durability](#durability-persistent-queues) to better understand the trade-offs.
+
+
+`queue.checkpoint.interval`
+: Sets the interval in milliseconds when a checkpoint is forced on the head page. Default is `1000`. Set to `0` to eliminate periodic checkpoints.
+
+
+## Configuration notes [pq-config-notes]
+
+Every situation and environment is different, and the "ideal" configuration varies. If you optimize for performance, you may increase your risk of losing data. If you optimize for data protection, you may impact performance.
+
+### Queue size [pq-size]
+
+You can control queue size with the `queue.max_events` and `queue.max_bytes` settings. If both settings are specified, Logstash uses whichever criteria is reached first. See [Handling back pressure](#backpressure-persistent-queue) for behavior when queue limits are reached.
+
+Appropriate sizing for the queue depends on the use-case. As a general guiding principle, consider this formula to size your persistent queue.
+
+```txt
+Bytes Received Per Second = Incoming Events Per Second * Raw Event Byte Size
+Bytes Received Per Hour = Bytes Received per Second * 3600s
+Required Queue Capacity = (Bytes Received Per Hour * Tolerated Hours of Downtime) * Multiplication Factor <1>
+```
+
+1. To start, you can set the `Multiplication Factor` to `1.10`, and then refine it for specific data types as indicated in the tables below.
+
+
+#### Queue size by data type [sizing-by-type]
+
+{{ls}} serializes the events it receives before they are stored in the queue. This process results in added overhead to the event inside {{ls}}. This overhead depends on the type and the size of the `Original Event Size`. As such, the `Multiplication Factor` changes depending on your use case. These tables show examples of overhead by event type and how that affects the multiplication factor.
+
+**Raw string message**
+
+| Plaintext size (bytes) | Serialized {{ls}} event size (bytes) | Overhead (bytes) | Overhead (%) | Multiplication Factor |
+| --- | --- | --- | --- | --- |
+| 11 | 213 | `202` | `1836%` | `19.4` |
+| 1212 | 1416 | `204` | `17%` | `1.17` |
+| 10240 | 10452 | `212` | `2%` | `1.02` |
+
+**JSON document**
+
+| JSON document size (bytes) | Serialized {{ls}} event size (bytes) | Overhead (bytes) | Overhead (%) | Multiplication Factor |
+| --- | --- | --- | --- | --- |
+| 947 | 1133 | `186` | `20%` | `1.20` |
+| 2707 | 3206 | `499` | `18%` | `1.18` |
+| 6751 | 7388 | `637` | `9%` | `1.9` |
+| 58901 | 59693 | `792` | `1%` | `1.1` |
+
+**Example**
+
+Let’s consider a {{ls}} instance that receives 1000 EPS and each event is 1KB, or 3.5GB every hour. In order to tolerate a downstream component being unavailable for 12h without {{ls}} exerting back-pressure upstream, the persistent queue’s `max_bytes` would have to be set to 3.6*12*1.10 = 47.25GB, or about 50GB.
+
+
+
+### Smaller queue size [pq-lower-max_bytes]
+
+If you are using persistent queues to protect against data loss, but don’t require much buffering, you can set `queue.max_bytes` to a smaller value. A smaller value may produce smaller queues and improves queue performance.
+
+**Sample configuration**
+
+```yaml
+queue.type: persisted
+queue.max_bytes: 10mb
+```
+
+
+### Fewer checkpoints [pq-fewer-checkpoints]
+
+Setting `queue.checkpoint.writes` and `queue.checkpoint.acks` to `0` may yield maximum performance, but may have potential impact on durability.
+
+In a situation where Logstash is terminated or there is a hardware-level failure, any data that has not been checkpointed, is lost. See [Controlling durability](#durability-persistent-queues) to better understand the trade-offs.
+
+
+### PQs and pipeline-to-pipeline communication [pq-pline-pline]
+
+Persistent queues can play an important role in your [pipeline-to-pipeline](/reference/pipeline-to-pipeline.md) configuration.
+
+#### Use case: PQs and output isolator pattern [uc-isolator]
+
+Here is a real world use case described by a Logstash user.
+
+"*In our deployment, we use one pipeline per output, and each pipeline has a large PQ. This configuration allows a single output to stall without blocking the input (and thus all other outputs), until the operator can restore flow to the stalled output and let the queue drain.*"
+
+"*Our real-time outputs must be low-latency, and our bulk outputs must be consistent. We use PQs to protect against stalling the real-time outputs more so than to protect against data loss in the bulk outputs. (Although the protection is nice, too).*"
+
+
+
+
+## Troubleshooting persistent queues [troubleshooting-pqs]
+
+Symptoms of persistent queue problems include {{ls}} or one or more pipelines not starting successfully, accompanied by an error message similar to this one.
+
+```
+message=>"java.io.IOException: Page file size is too small to hold elements"
+```
+
+This error indicates that the head page (the oldest in a directory and the one with lowest page id) has a size < 18 bytes, the size of a page header.
+
+To research and resolve the issue:
+
+1. Identify the queue (or queues) that may be corrupt by checking log files, or running the `pqcheck` utility.
+2. Stop Logstash, and wait for it to shut down.
+3. Run `pqrepair ` for each of the corrupted queues.
+
+### `pqcheck` utility [pqcheck]
+
+```
+the `pqcheck` utility to identify which persistent queue--or queues--have been corrupted.
+```
+From LOGSTASH_HOME, run:
+
+```txt
+bin/pqcheck
+```
+
+where `` is the fully qualified path to the persistent queue location.
+
+The `pqcheck utility` reads through the checkpoint files in the given directory and outputs information about the current state of those files. The utility outputs this information for each checkpoint file:
+
+* Checkpoint file name
+* Whether or not the page file has been fully acknowledged. A fully acknowledged page file indicates that all events have been read and processed.
+* Page file name that the checkpoint file is referencing
+* Size of the page file. A page file with a size of 0 results in the output `NOT FOUND`. In this case, run `pqrepair` against the specified queue directory.
+* Page number
+* First unacknowledged page number (only relevant in the head checkpoint)
+* First unacknowledged event sequence number in the page
+* First event sequence number in the page
+* Number of events in the page
+* Whether or not the page has been fully acknowledged
+
+**Sample with healthy page file**
+
+This sample represents a healthy queue with three page files. In this sample, Logstash is currently writing to `page.2` as referenced by `checkpoint.head`. Logstash is reading from `page.0` as referenced by `checkpoint.0`.
+
+```txt
+ubuntu@bigger:/usr/share/logstash$ bin/pqcheck /var/lib/logstash/queue/main/
+Using bundled JDK: /usr/share/logstash/jdk
+OpenJDK 64-Bit Server VM warning: Option UseConcMarkSweepGC was deprecated in version 9.0 and will likely be removed in a future release.
+Checking queue dir: /var/lib/logstash/queue/main
+checkpoint.1, fully-acked: NO, page.1 size: 67108864
+ pageNum=1, firstUnackedPageNum=0, firstUnackedSeqNum=239675, minSeqNum=239675,
+ elementCount=218241, isFullyAcked=no
+checkpoint.head, fully-acked: NO, page.2 size: 67108864
+ pageNum=2, firstUnackedPageNum=0, firstUnackedSeqNum=457916, minSeqNum=457916, elementCount=11805, isFullyAcked=no
+checkpoint.0, fully-acked: NO, page.0 size: 67108864 <1>
+ pageNum=0, firstUnackedPageNum=0, firstUnackedSeqNum=176126, minSeqNum=1,
+ elementCount=239674, isFullyAcked=no <2>
+```
+
+1. Represents `checkpoint.0`, which refers to the page file `page.0`, and has a size of `67108864`.
+2. Continuing for `checkpoint.0`, these lines indicate that the page number is `0`, the first unacknowledged event is number `176126`, there are `239674` events in the page file, the first event in this page file is event number `1`, and the page file has not been fully acknowledged. That is, there are still events left in the page file that need to be ingested.
+
+
+**Sample with corrupted page file**
+
+If Logstash doesn’t start and/or `pqcheck` shows an anomaly, such as `NOT_FOUND` for a page, run `pqrepair` on the queue directory.
+
+```txt
+bin/pqcheck /var/lib/logstash/queue/main/
+Using bundled JDK: /usr/share/logstash/jdk
+OpenJDK 64-Bit Server VM warning: Option UseConcMarkSweepGC was deprecated in version 9.0 and will likely be removed in a future release.
+Checking queue dir: /var/lib/logstash/queue/main
+checkpoint.head, fully-acked: NO, page.2 size: NOT FOUND <1>
+ pageNum=2, firstUnackedPageNum=2, firstUnackedSeqNum=534041, minSeqNum=457916,
+ elementCount=76127, isFullyAcked=no
+```
+
+1. `NOT FOUND` is an indication of a corrupted page file. Run `pqrepair` against the specified queue directory.
+
+
+::::{note}
+If the queue shows `fully-acked: YES` and 0 bytes, you can safely delete the file.
+::::
+
+
+
+### `pqrepair` utility [pqrepair]
+
+The `pqrepair` utility tries to remove corrupt queue segments to bring the queue back into working order. It starts searching from the directory where is launched and looks for `data/queue/main`.
+
+::::{note}
+The queue may lose some data in this operation.
+::::
+
+
+From LOGSTASH_HOME, run:
+
+```txt
+bin/pqrepair
+```
+
+where `` is the fully qualified path to the persistent queue location.
+
+There is no output if the utility runs properly.
+
+The `pqrepair` utility requires write access to the directory. Folder permissions may cause problems when Logstash is run as a service. In this situation, use `sudo`.
+
+```txt
+/usr/share/logstash$ sudo -u logstash bin/pqrepair /var/lib/logstash/queue/main/
+```
+
+After you run `pqrepair`, restart Logstash to verify that the repair operation was successful.
+
+
+### Draining the queue [draining-pqs]
+
+You may encounter situations where you want to drain the queue. Examples include:
+
+* Pausing new ingestion. There may be situations where you want to stop new ingestion, but still keep a backlog of data.
+* PQ repair. You can drain the queue to route to a different PQ while repairing an old one.
+* Data or workflow migration. If you are moving off a disk/hardware and/or migrating to a new data flow, you may want to drain the existing queue.
+
+To drain the persistent queue:
+
+1. In the `logstash.yml` file, set `queue.drain: true`.
+2. Restart Logstash for this setting to take effect.
+3. Shutdown Logstash (using CTRL+C or SIGTERM), and wait for the queue to empty.
+
+
+
+## How persistent queues work [persistent-queues-architecture]
+
+The queue sits between the input and filter stages in the same process:
+
+input → queue → filter + output
+
+When an input has events ready to process, it writes them to the queue. When the write to the queue is successful, the input can send an acknowledgement to its data source.
+
+When processing events from the queue, Logstash acknowledges events as completed, within the queue, only after filters and outputs have completed. The queue keeps a record of events that have been processed by the pipeline. An event is recorded as processed (in this document, called "acknowledged" or "ACKed") if, and only if, the event has been processed completely by the Logstash pipeline.
+
+What does acknowledged mean? This means the event has been handled by all configured filters and outputs. For example, if you have only one output, Elasticsearch, an event is ACKed when the Elasticsearch output has successfully sent this event to Elasticsearch.
+
+During a normal shutdown (**CTRL+C** or SIGTERM), Logstash stops reading from the queue and finishes processing the in-flight events being processed by the filters and outputs. Upon restart, Logstash resumes processing the events in the persistent queue as well as accepting new events from inputs.
+
+If Logstash is abnormally terminated, any in-flight events will not have been ACKed and will be reprocessed by filters and outputs when Logstash is restarted. Logstash processes events in batches, so it is possible that for any given batch, some of that batch may have been successfully completed, but not recorded as ACKed, when an abnormal termination occurs.
+
+::::{note}
+If you override the default behavior by setting `drain.queue: true`, Logstash reads from the queue until it is emptied—even after a controlled shutdown.
+::::
+
+
+For more details specific behaviors of queue writes and acknowledgement, see [Controlling durability](#durability-persistent-queues).
+
+### Handling back pressure [backpressure-persistent-queue]
+
+When the queue is full, Logstash puts back pressure on the inputs to stall data flowing into Logstash. This mechanism helps Logstash control the rate of data flow at the input stage without overwhelming outputs like Elasticsearch.
+
+Use `queue.max_bytes` setting to configure the total capacity of the queue on disk. The following example sets the total capacity of the queue to 8gb:
+
+```yaml
+queue.type: persisted
+queue.max_bytes: 8gb
+```
+
+With these settings specified, Logstash buffers events on disk until the size of the queue reaches 8gb. When the queue is full of unACKed events, and the size limit has been reached, Logstash no longer accepts new events.
+
+Each input handles back pressure independently. For example, when the [beats](/reference/plugins-inputs-beats.md) input encounters back pressure, it no longer accepts new connections and waits until the persistent queue has space to accept more events. After the filter and output stages finish processing existing events in the queue and ACKs them, Logstash automatically starts accepting new events.
+
+
+### Controlling durability [durability-persistent-queues]
+
+Durability is a property of storage writes that ensures data will be available after it’s written.
+
+When the persistent queue feature is enabled, Logstash stores events on disk. Logstash commits to disk in a mechanism called *checkpointing*.
+
+The queue itself is a set of pages. There are two kinds of pages: head pages and tail pages. The head page is where new events are written. There is only one head page. When the head page is of a certain size (see `queue.page_capacity`), it becomes a tail page, and a new head page is created. Tail pages are immutable, and the head page is append-only. Second, the queue records details about itself (pages, acknowledgements, etc) in a separate file called a checkpoint file.
+
+When recording a checkpoint, Logstash:
+
+* Calls `fsync` on the head page.
+* Atomically writes to disk the current state of the queue.
+
+The process of checkpointing is atomic, which means any update to the file is saved if successful.
+
+::::{important}
+If Logstash is terminated, or if there is a hardware-level failure, any data that is buffered in the persistent queue, but not yet checkpointed, is lost.
+::::
+
+
+You can force Logstash to checkpoint more frequently by setting `queue.checkpoint.writes`. This setting specifies the maximum number of events that may be written to disk before forcing a checkpoint. The default is 1024. To ensure maximum durability and avoid data loss in the persistent queue, you can set `queue.checkpoint.writes: 1` to force a checkpoint after each event is written. Keep in mind that disk writes have a resource cost. Setting this value to `1` can severely impact performance.
+
+
+### Disk garbage collection [garbage-collection]
+
+On disk, the queue is stored as a set of pages where each page is one file. Each page can be at most `queue.page_capacity` in size. Pages are deleted (garbage collected) after all events in that page have been ACKed. If an older page has at least one event that is not yet ACKed, that entire page will remain on disk until all events in that page are successfully processed. Each page containing unprocessed events will count against the `queue.max_bytes` byte size.
+
+
+
diff --git a/docs/static/pipeline-pipeline-config.asciidoc b/docs/reference/pipeline-to-pipeline.md
similarity index 68%
rename from docs/static/pipeline-pipeline-config.asciidoc
rename to docs/reference/pipeline-to-pipeline.md
index 679f35128..a05cd7b9a 100644
--- a/docs/static/pipeline-pipeline-config.asciidoc
+++ b/docs/reference/pipeline-to-pipeline.md
@@ -1,35 +1,37 @@
-[[pipeline-to-pipeline]]
-=== Pipeline-to-pipeline communication
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/pipeline-to-pipeline.html
+---
+
+# Pipeline-to-pipeline communication [pipeline-to-pipeline]
When using the multiple pipeline feature of Logstash, you may want to connect multiple pipelines within the same Logstash instance. This configuration can be useful to isolate the execution of these pipelines, as well as to help break-up the logic of complex pipelines. The `pipeline` input/output enables a number of advanced architectural patterns discussed later in this document.
-If you need to set up communication _between_ Logstash instances, use either {logstash-ref}/ls-to-ls.html[Logstash-to-Logstash] communications, or an intermediary queue, such as Kafka or Redis.
+If you need to set up communication *between* Logstash instances, use either [Logstash-to-Logstash](/reference/logstash-to-logstash-communications.md) communications, or an intermediary queue, such as Kafka or Redis.
-TIP: Persistent queues (PQs) can help keep data moving through pipelines.
-See <> to learn how PQs can enhance your
-pipeline-to-pipeline communication strategy.
+::::{tip}
+Persistent queues (PQs) can help keep data moving through pipelines. See [PQs and pipeline-to-pipeline communication](/reference/persistent-queues.md#pq-pline-pline) to learn how PQs can enhance your pipeline-to-pipeline communication strategy.
+::::
-[[pipeline-to-pipeline-overview]]
-==== Configuration overview
+
+## Configuration overview [pipeline-to-pipeline-overview]
Use the `pipeline` input and `pipeline` output to connect two pipelines running within the same Logstash instance. These inputs use a client-server approach, where the `pipeline` input registers a virtual address that a `pipeline` output can connect to.
-. Create a 'downstream' pipeline that listens for events on a virtual address.
-. Create an 'upstream' pipeline that produces events, sending them through a `pipeline` output to one or more virtual addresses.
+1. Create a *downstream* pipeline that listens for events on a virtual address.
+2. Create an *upstream* pipeline that produces events, sending them through a `pipeline` output to one or more virtual addresses.
Here is a simple example of this configuration.
-[source,yaml]
-----
+```yaml
# config/pipelines.yml
- pipeline.id: upstream
config.string: input { stdin {} } output { pipeline { send_to => [myVirtualAddress] } }
- pipeline.id: downstream
config.string: input { pipeline { address => myVirtualAddress } }
-----
+```
-[[how-pipeline-to-pipeline-works]]
-===== How it works
+### How it works [how-pipeline-to-pipeline-works]
The `pipeline` input acts as a virtual server listening on a single virtual address in the local process. Only `pipeline` outputs running on the same local Logstash can send events to this address. Pipeline `outputs` can send events to a list of virtual addresses. A `pipeline` output will be blocked if the downstream pipeline is blocked or unavailable.
@@ -37,53 +39,45 @@ When events are sent across pipelines, their data is fully copied. Modifications
The `pipeline` plugin may be the most efficient way to communicate between pipelines, but it still incurs a performance cost. Logstash must duplicate each event in full on the Java heap for each downstream pipeline. Using this feature may affect the heap memory utilization of Logstash.
-[[delivery-guarantees]]
-===== Delivery guarantees
-In its standard configuration the `pipeline` input/output has at-least-once delivery guarantees. The output will be
-blocked if the address is blocked or unavailable.
-By default, the `ensure_delivery` option on the `pipeline` output is set to `true.` If you change the
-`ensure_delivery` flag to `false`, an _unavailable_ downstream pipeline causes the sent message to be discarded. Note
-that a pipeline is considered unavailable only when it is starting up or reloading, not when any of the plugins it
-may contain are blocked. A _blocked_ downstream pipeline blocks the sending output/pipeline regardless of the value of
-the `ensure_delivery` flag. Use `ensure_delivery => false` when you want the ability to temporarily disable a
-downstream pipeline without blocking any upstream pipelines sending to it.
+### Delivery guarantees [delivery-guarantees]
-These delivery guarantees also inform the shutdown behavior of this feature. When performing a pipeline reload, changes
-will be made immediately as the user requests, even if that means removing a downstream pipeline receiving events from
-an upstream pipeline. This will cause the upstream pipeline to block. You must restore the downstream pipeline to
-cleanly shut down Logstash. You may issue a force kill, but inflight events may be lost unless the persistent queue is
-enabled for that pipeline.
+In its standard configuration the `pipeline` input/output has at-least-once delivery guarantees. The output will be blocked if the address is blocked or unavailable.
-[[avoid-cycles]]
-===== Avoid cycles
+By default, the `ensure_delivery` option on the `pipeline` output is set to `true.` If you change the `ensure_delivery` flag to `false`, an *unavailable* downstream pipeline causes the sent message to be discarded. Note that a pipeline is considered unavailable only when it is starting up or reloading, not when any of the plugins it may contain are blocked. A *blocked* downstream pipeline blocks the sending output/pipeline regardless of the value of the `ensure_delivery` flag. Use `ensure_delivery => false` when you want the ability to temporarily disable a downstream pipeline without blocking any upstream pipelines sending to it.
-When you connect pipelines, keep the data flowing in one direction. Looping data or connecting the pipelines into a cyclic graph can cause problems. Logstash waits for each pipeline's work to complete before shutting down. Pipeline loops can prevent Logstash from shutting down cleanly.
+These delivery guarantees also inform the shutdown behavior of this feature. When performing a pipeline reload, changes will be made immediately as the user requests, even if that means removing a downstream pipeline receiving events from an upstream pipeline. This will cause the upstream pipeline to block. You must restore the downstream pipeline to cleanly shut down Logstash. You may issue a force kill, but inflight events may be lost unless the persistent queue is enabled for that pipeline.
-[[architectural-patterns]]
-==== Architectural patterns
+
+### Avoid cycles [avoid-cycles]
+
+When you connect pipelines, keep the data flowing in one direction. Looping data or connecting the pipelines into a cyclic graph can cause problems. Logstash waits for each pipeline’s work to complete before shutting down. Pipeline loops can prevent Logstash from shutting down cleanly.
+
+
+
+## Architectural patterns [architectural-patterns]
You can use the `pipeline` input and output to better organize code, streamline control flow, and isolate the performance of complex configurations. There are infinite ways to connect pipelines. The ones presented here offer some ideas.
-* <>
-* <>
-* <>
-* <>
+* [The distributor pattern](#distributor-pattern)
+* [The output isolator pattern](#output-isolator-pattern)
+* [The forked path pattern](#forked-path-pattern)
+* [The collector pattern](#collector-pattern)
-NOTE: These examples use `config.string` to illustrate the flows.
-You can also use configuration files for pipeline-to-pipeline communication.
+::::{note}
+These examples use `config.string` to illustrate the flows. You can also use configuration files for pipeline-to-pipeline communication.
+::::
-[[distributor-pattern]]
-===== The distributor pattern
-You can use the distributor pattern in situations where there are multiple types of data coming through a single input, each with its own complex set of processing rules. With the distributor pattern one pipeline is used to route data to other pipelines based on type. Each type is routed to a pipeline with only the logic for handling that type. In this way each type's logic can be isolated.
+### The distributor pattern [distributor-pattern]
+
+You can use the distributor pattern in situations where there are multiple types of data coming through a single input, each with its own complex set of processing rules. With the distributor pattern one pipeline is used to route data to other pipelines based on type. Each type is routed to a pipeline with only the logic for handling that type. In this way each type’s logic can be isolated.
As an example, in many organizations a single beats input may be used to receive traffic from a variety of sources, each with its own processing logic. A common way to deal with this type of data is to have a number of `if` conditions separating the traffic and processing each type differently. This approach can quickly get messy when configs are long and complex.
Here is an example distributor pattern configuration.
-[source,yaml]
-----
+```yaml
# config/pipelines.yml
- pipeline.id: beats-server
config.string: |
@@ -119,23 +113,22 @@ Here is an example distributor pattern configuration.
config.string: |
input { pipeline { address => fallback } }
output { elasticsearch { hosts => [es_cluster_b_host] } }
-----
+```
Notice how following the flow of data is a simple due to the fact that each pipeline only works on a single specific task.
-[[output-isolator-pattern]]
-===== The output isolator pattern
-You can use the output isolator pattern to prevent Logstash from becoming blocked if one of multiple outputs experiences a temporary failure. Logstash, by default, is blocked when any single output is down. This behavior is important in guaranteeing at-least-once delivery of data.
+### The output isolator pattern [output-isolator-pattern]
-For example, a server might be configured to send log data to both Elasticsearch and an HTTP endpoint. The HTTP endpoint might be frequently unavailable due to regular service or other reasons. In this scenario, data would be paused from sending to Elasticsearch any time the HTTP endpoint is down.
+You can use the output isolator pattern to prevent Logstash from becoming blocked if one of multiple outputs experiences a temporary failure. Logstash, by default, is blocked when any single output is down. This behavior is important in guaranteeing at-least-once delivery of data.
+
+For example, a server might be configured to send log data to both Elasticsearch and an HTTP endpoint. The HTTP endpoint might be frequently unavailable due to regular service or other reasons. In this scenario, data would be paused from sending to Elasticsearch any time the HTTP endpoint is down.
Using the output isolator pattern and persistent queues, we can continue sending to Elasticsearch, even when one output is down.
-Here is an example of this scenario using the output isolator pattern.
+Here is an example of this scenario using the output isolator pattern.
-[source,yaml]
-----
+```yaml
# config/pipelines.yml
- pipeline.id: intake
config.string: |
@@ -151,23 +144,22 @@ Here is an example of this scenario using the output isolator pattern.
config.string: |
input { pipeline { address => http } }
output { http { } }
-----
+```
In this architecture, each output has its own queue with its own tuning and settings. Note that this approach uses up to twice as much disk space and incurs three times as much serialization/deserialization cost as a single pipeline.
If any of the persistent queues of the downstream pipelines (in the example above, `buffered-es` and `buffered-http`) become full, both outputs will stop.
-[[forked-path-pattern]]
-===== The forked path pattern
+
+### The forked path pattern [forked-path-pattern]
You can use the forked path pattern for situations where a single event must be processed more than once according to different sets of rules. Before the `pipeline` input and output were available, this need was commonly addressed through creative use of the `clone` filter and `if/else` rules.
-Let's imagine a use case where we receive data and index the full event in our own systems, but publish a redacted version of the data to a partner's S3 bucket. We might use the output isolator pattern described above to decouple our writes to either system. The distinguishing feature of the forked path pattern is the existence of additional rules in the downstream pipelines.
+Let’s imagine a use case where we receive data and index the full event in our own systems, but publish a redacted version of the data to a partner’s S3 bucket. We might use the output isolator pattern described above to decouple our writes to either system. The distinguishing feature of the forked path pattern is the existence of additional rules in the downstream pipelines.
Here is an example of the forked path configuration.
-[source,yaml]
-----
+```yaml
# config/pipelines.yml
- pipeline.id: intake
queue.type: persisted
@@ -189,17 +181,16 @@ Here is an example of the forked path configuration.
mutate { remove_field => 'sensitive-data' }
}
output { s3 { } } # Output to partner's bucket
-----
+```
-[[collector-pattern]]
-===== The collector pattern
+
+### The collector pattern [collector-pattern]
You can use the collector pattern when you want to define a common set of outputs and pre-output filters that many disparate pipelines might use. This pattern is the opposite of the distributor pattern. In this pattern many pipelines flow in to a single pipeline where they share outputs and processing. This pattern simplifies configuration at the cost of reducing isolation, since all data is sent through a single pipeline.
Here is an example of the collector pattern.
-[source,yaml]
-----
+```yaml
# config/pipelines.yml
- pipeline.id: beats
config.string: |
@@ -218,5 +209,7 @@ Here is an example of the collector pattern.
mutate { remove_field => 'sensitive-data' }
}
output { elasticsearch { } }
-----
+```
+
+
diff --git a/docs/reference/plugin-concepts.md b/docs/reference/plugin-concepts.md
new file mode 100644
index 000000000..15ad2e61b
--- /dev/null
+++ b/docs/reference/plugin-concepts.md
@@ -0,0 +1,26 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugin-concepts.html
+---
+
+# Cross-plugin concepts and features [plugin-concepts]
+
+New section for concepts, features, and behaviours that apply to multiple plugins.
+
+## Space-deliminated URIs in list-type params [space-delimited-uris-in-list-params]
+
+List-type URI parameters will automatically expand strings that contain multiple whitespace-delimited URIs into separate entries. This behaviour enables the expansion of an arbitrary list of URIs from a single Environment- or Keystore-variable.
+
+These plugins and options support this functionality:
+
+* [Elasticsearch input plugin - `hosts`](/reference/plugins-inputs-elasticsearch.md#plugins-inputs-elasticsearch-hosts)
+* [Elasticsearch output plugin - `hosts`](/reference/plugins-outputs-elasticsearch.md#plugins-outputs-elasticsearch-hosts)
+* [Elasticsearch filter plugin - `hosts`](/reference/plugins-filters-elasticsearch.md#plugins-filters-elasticsearch-hosts)
+
+You can use this functionality to define an environment variable with multiple whitespace-delimited URIs and use it for the options above.
+
+**Example**
+
+```
+ES_HOSTS="es1.example.com es2.example.com:9201 es3.example.com:9201"
+```
diff --git a/docs/reference/plugin-generator.md b/docs/reference/plugin-generator.md
new file mode 100644
index 000000000..9361c1052
--- /dev/null
+++ b/docs/reference/plugin-generator.md
@@ -0,0 +1,18 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugin-generator.html
+---
+
+# Generating plugins [plugin-generator]
+
+You can create your own Logstash plugin in seconds! The generate subcommand of `bin/logstash-plugin` creates the foundation for a new Logstash plugin with templatized files. It creates the correct directory structure, gemspec files, and dependencies so you can start adding custom code to process data with Logstash.
+
+**Example Usage**
+
+```sh
+bin/logstash-plugin generate --type input --name xkcd --path ~/ws/elastic/plugins
+```
+
+* `--type`: Type of plugin - input, filter, output, or codec
+* `--name`: Name for the new plugin
+* `--path`: Directory path where the new plugin structure will be created. If you don’t specify a directory, the plugin is created in the current directory.
diff --git a/docs/reference/plugins-codecs-avro.md b/docs/reference/plugins-codecs-avro.md
new file mode 100644
index 000000000..7d73f163e
--- /dev/null
+++ b/docs/reference/plugins-codecs-avro.md
@@ -0,0 +1,148 @@
+---
+navigation_title: "avro"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-codecs-avro.html
+---
+
+# Avro codec plugin [plugins-codecs-avro]
+
+
+* Plugin version: v3.4.1
+* Released on: 2023-10-16
+* [Changelog](https://github.com/logstash-plugins/logstash-codec-avro/blob/v3.4.1/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/codec-avro-index.md).
+
+## Getting help [_getting_help_172]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-codec-avro). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_171]
+
+Read serialized Avro records as Logstash events
+
+This plugin is used to serialize Logstash events as Avro datums, as well as deserializing Avro datums into Logstash events.
+
+
+## Event Metadata and the Elastic Common Schema (ECS) [plugins-codecs-avro-ecs_metadata]
+
+The plugin behaves the same regardless of ECS compatibility, except adding the original message to `[event][original]`.
+
+
+## Encoding [_encoding]
+
+This codec is for serializing individual Logstash events as Avro datums that are Avro binary blobs. It does not encode Logstash events into an Avro file.
+
+
+## Decoding [_decoding]
+
+This codec is for deserializing individual Avro records. It is not for reading Avro files. Avro files have a unique format that must be handled upon input.
+
+::::{admonition} Partial deserialization
+:class: note
+
+Avro format is known to support partial deserialization of arbitrary fields, providing a schema containing a subset of the schema which was used to serialize the data. This codec **doesn’t support partial deserialization of arbitrary fields**. Partial deserialization *might* work only when providing a schema which contains the first `N` fields of the schema used to serialize the data (and in the same order).
+
+::::
+
+
+
+## Usage [_usage_6]
+
+Example usage with Kafka input.
+
+```ruby
+input {
+ kafka {
+ codec => avro {
+ schema_uri => "/tmp/schema.avsc"
+ }
+ }
+}
+filter {
+ ...
+}
+output {
+ ...
+}
+```
+
+
+## Avro Codec Configuration Options [plugins-codecs-avro-options]
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`ecs_compatibility`](#plugins-codecs-avro-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`encoding`](#plugins-codecs-avro-encoding) | [string](/reference/configuration-file-structure.md#string), one of `["binary", "base64"]` | No |
+| [`schema_uri`](#plugins-codecs-avro-schema_uri) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`tag_on_failure`](#plugins-codecs-avro-tag_on_failure) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`target`](#plugins-codecs-avro-target) | [string](/reference/configuration-file-structure.md#string) | No |
+
+
+
+### `ecs_compatibility` [plugins-codecs-avro-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: Avro data added at root level
+ * `v1`,`v8`: Elastic Common Schema compliant behavior (`[event][original]` is also added)
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)).
+
+
+### `encoding` [plugins-codecs-avro-encoding]
+
+* Value can be any of: `binary`, `base64`
+* Default value is `base64`
+
+Set encoding for Avro’s payload. Use `base64` (default) to indicate that this codec sends or expects to receive base64-encoded bytes.
+
+Set this option to `binary` to indicate that this codec sends or expects to receive binary Avro data.
+
+
+### `schema_uri` [plugins-codecs-avro-schema_uri]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+schema path to fetch the schema from. This can be a *http* or *file* scheme URI example:
+
+* http - `http://example.com/schema.avsc`
+* file - `/path/to/schema.avsc`
+
+
+### `tag_on_failure` [plugins-codecs-avro-tag_on_failure]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+tag events with `_avroparsefailure` when decode fails
+
+
+### `target` [plugins-codecs-avro-target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+* This is only relevant when decode data into an event
+
+Define the target field for placing the values. If this setting is not set, the Avro data will be stored at the root (top level) of the event.
+
+**Example**
+
+```ruby
+input {
+ kafka {
+ codec => avro {
+ schema_uri => "/tmp/schema.avsc"
+ target => "[document]"
+ }
+ }
+}
+```
+
+
+
diff --git a/docs/reference/plugins-codecs-cef.md b/docs/reference/plugins-codecs-cef.md
new file mode 100644
index 000000000..43cfd6f9c
--- /dev/null
+++ b/docs/reference/plugins-codecs-cef.md
@@ -0,0 +1,524 @@
+---
+navigation_title: "cef"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-codecs-cef.html
+---
+
+# Cef codec plugin [plugins-codecs-cef]
+
+
+* Plugin version: v6.2.8
+* Released on: 2024-10-22
+* [Changelog](https://github.com/logstash-plugins/logstash-codec-cef/blob/v6.2.8/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/codec-cef-index.md).
+
+## Getting help [_getting_help_173]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-codec-cef). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_172]
+
+Implementation of a Logstash codec for the ArcSight Common Event Format (CEF). It is based on [Implementing ArcSight CEF Revision 25, September 2017](https://www.microfocus.com/documentation/arcsight/arcsight-smartconnectors/pdfdoc/common-event-format-v25/common-event-format-v25.pdf).
+
+If this codec receives a payload from an input that is not a valid CEF message, then it produces an event with the payload as the *message* field and a *_cefparsefailure* tag.
+
+
+## Compatibility with the Elastic Common Schema (ECS) [_compatibility_with_the_elastic_common_schema_ecs_3]
+
+This plugin can be used to decode CEF events *into* the Elastic Common Schema, or to encode ECS-compatible events into CEF. It can also be used *without* ECS, encoding and decoding events using only CEF-defined field names and keys.
+
+The ECS Compatibility mode for a specific plugin instance can be controlled by setting [`ecs_compatibility`](#plugins-codecs-cef-ecs_compatibility) when defining the codec:
+
+```sh
+ input {
+ tcp {
+ # ...
+ codec => cef {
+ ecs_compatibility => v1
+ }
+ }
+ }
+```
+
+If left unspecified, the value of the `pipeline.ecs_compatibility` setting is used.
+
+### Timestamps and ECS compatiblity [_timestamps_and_ecs_compatiblity]
+
+When decoding in ECS Compatibility Mode, timestamp-type fields are parsed and normalized to specific points on the timeline.
+
+Because the CEF format allows ambiguous timestamp formats, some reasonable assumptions are made:
+
+* When the timestamp does not include a year, we assume it happened in the recent past (or *very* near future to accommodate out-of-sync clocks and timezone offsets).
+* When the timestamp does not include UTC-offset information, we use the event’s timezone (`dtz` or `deviceTimeZone` field), or fall through to this plugin’s [`default_timezone`](#plugins-codecs-cef-default_timezone).
+* Localized timestamps are parsed using the provided [`locale`](#plugins-codecs-cef-locale).
+
+
+### Field mapping [plugins-codecs-cef-field-mapping]
+
+The header fields from each CEF payload is expanded to the following fields, depending on whether ECS is enabled.
+
+#### Header field mapping [plugins-codecs-cef-header-field]
+
+| ECS Disabled | ECS Field |
+| --- | --- |
+| `cefVersion` | `[cef][version]` |
+| `deviceVendor` | `[observer][vendor]` |
+| `deviceProduct` | `[observer][product]` |
+| `deviceVersion` | `[observer][version]` |
+| `deviceEventClassId` | `[event][code]` |
+| `name` | `[cef][name]` |
+| `severity` | `[event][severity]` |
+
+When decoding CEF payloads with `ecs_compatibility => disabled`, the abbreviated CEF Keys found in extensions are expanded, and CEF Field Names are inserted at the root level of the event.
+
+When decoding in an ECS Compatibility mode, the ECS Fields are populated from the corresponding CEF Field Names *or* CEF Keys found in the payload’s extensions.
+
+The following is a mapping between these fields.
+
+
+#### Extension field mapping [plugins-codecs-cef-ext-field]
+
+| CEF Field Name (optional CEF Key) | ECS Field |
+| --- | --- |
+| `agentAddress` (`agt`) | `[agent][ip]` |
+| `agentDnsDomain` | `[cef][agent][registered_domain]` Multiple possible CEF fields map to this ECS Field. When decoding, the last entry encountered wins. When encoding, this field has *higher* priority. |
+| `agentHostName` (`ahost`) | `[agent][name]` |
+| `agentId` (`aid`) | `[agent][id]` |
+| `agentMacAddress` (`amac`) | `[agent][mac]` |
+| `agentNtDomain` | `[cef][agent][registered_domain]` Multiple possible CEF fields map to this ECS Field. When decoding, the last entry encountered wins. When encoding, this field has *lower* priority. |
+| `agentReceiptTime` (`art`) | `[event][created]` This field contains a timestamp. In ECS Compatibility Mode, it is parsed to a specific point in time. |
+| `agentTimeZone` (`atz`) | `[cef][agent][timezone]` |
+| `agentTranslatedAddress` | `[cef][agent][nat][ip]` |
+| `agentTranslatedZoneExternalID` | `[cef][agent][translated_zone][external_id]` |
+| `agentTranslatedZoneURI` | `[cef][agent][translated_zone][uri]` |
+| `agentType` (`at`) | `[agent][type]` |
+| `agentVersion` (`av`) | `[agent][version]` |
+| `agentZoneExternalID` | `[cef][agent][zone][external_id]` |
+| `agentZoneURI` | `[cef][agent][zone][uri]` |
+| `applicationProtocol` (`app`) | `[network][protocol]` |
+| `baseEventCount` (`cnt`) | `[cef][base_event_count]` |
+| `bytesIn` (`in`) | `[source][bytes]` |
+| `bytesOut` (`out`) | `[destination][bytes]` |
+| `categoryDeviceType` (`catdt`) | `[cef][device_type]` |
+| `customerExternalID` | `[organization][id]` |
+| `customerURI` | `[organization][name]` |
+| `destinationAddress` (`dst`) | `[destination][ip]` |
+| `destinationDnsDomain` | `[destination][registered_domain]` Multiple possible CEF fields map to this ECS Field. When decoding, the last entry encountered wins. When encoding, this field has *higher* priority. |
+| `destinationGeoLatitude` (`dlat`) | `[destination][geo][location][lat]` |
+| `destinationGeoLongitude` (`dlong`) | `[destination][geo][location][lon]` |
+| `destinationHostName` (`dhost`) | `[destination][domain]` |
+| `destinationMacAddress` (`dmac`) | `[destination][mac]` |
+| `destinationNtDomain` (`dntdom`) | `[destination][registered_domain]` Multiple possible CEF fields map to this ECS Field. When decoding, the last entry encountered wins. When encoding, this field has *lower* priority. |
+| `destinationPort` (`dpt`) | `[destination][port]` |
+| `destinationProcessId` (`dpid`) | `[destination][process][pid]` |
+| `destinationProcessName` (`dproc`) | `[destination][process][name]` |
+| `destinationServiceName` | `[destination][service][name]` |
+| `destinationTranslatedAddress` | `[destination][nat][ip]` |
+| `destinationTranslatedPort` | `[destination][nat][port]` |
+| `destinationTranslatedZoneExternalID` | `[cef][destination][translated_zone][external_id]` |
+| `destinationTranslatedZoneURI` | `[cef][destination][translated_zone][uri]` |
+| `destinationUserId` (`duid`) | `[destination][user][id]` |
+| `destinationUserName` (`duser`) | `[destination][user][name]` |
+| `destinationUserPrivileges` (`dpriv`) | `[destination][user][group][name]` |
+| `destinationZoneExternalID` | `[cef][destination][zone][external_id]` |
+| `destinationZoneURI` | `[cef][destination][zone][uri]` |
+| `deviceAction` (`act`) | `[event][action]` |
+| `deviceAddress` (`dvc`) | `[observer][ip]` When plugin configured with `device => observer` |
+| `[host][ip]` When plugin configured with `device => host` |
+| `deviceCustomFloatingPoint1` (`cfp1`) | `[cef][device_custom_floating_point_1][value]` |
+| `deviceCustomFloatingPoint1Label` (`cfp1Label`) | `[cef][device_custom_floating_point_1][label]` |
+| `deviceCustomFloatingPoint2` (`cfp2`) | `[cef][device_custom_floating_point_2][value]` |
+| `deviceCustomFloatingPoint2Label` (`cfp2Label`) | `[cef][device_custom_floating_point_2][label]` |
+| `deviceCustomFloatingPoint3` (`cfp3`) | `[cef][device_custom_floating_point_3][value]` |
+| `deviceCustomFloatingPoint3Label` (`cfp3Label`) | `[cef][device_custom_floating_point_3][label]` |
+| `deviceCustomFloatingPoint4` (`cfp4`) | `[cef][device_custom_floating_point_4][value]` |
+| `deviceCustomFloatingPoint4Label` (`cfp4Label`) | `[cef][device_custom_floating_point_4][label]` |
+| `deviceCustomFloatingPoint5` (`cfp5`) | `[cef][device_custom_floating_point_5][value]` |
+| `deviceCustomFloatingPoint5Label` (`cfp5Label`) | `[cef][device_custom_floating_point_5][label]` |
+| `deviceCustomFloatingPoint6` (`cfp6`) | `[cef][device_custom_floating_point_6][value]` |
+| `deviceCustomFloatingPoint6Label` (`cfp6Label`) | `[cef][device_custom_floating_point_6][label]` |
+| `deviceCustomFloatingPoint7` (`cfp7`) | `[cef][device_custom_floating_point_7][value]` |
+| `deviceCustomFloatingPoint7Label` (`cfp7Label`) | `[cef][device_custom_floating_point_7][label]` |
+| `deviceCustomFloatingPoint8` (`cfp8`) | `[cef][device_custom_floating_point_8][value]` |
+| `deviceCustomFloatingPoint8Label` (`cfp8Label`) | `[cef][device_custom_floating_point_8][label]` |
+| `deviceCustomFloatingPoint9` (`cfp9`) | `[cef][device_custom_floating_point_9][value]` |
+| `deviceCustomFloatingPoint9Label` (`cfp9Label`) | `[cef][device_custom_floating_point_9][label]` |
+| `deviceCustomFloatingPoint10` (`cfp10`) | `[cef][device_custom_floating_point_10][value]` |
+| `deviceCustomFloatingPoint10Label` (`cfp10Label`) | `[cef][device_custom_floating_point_10][label]` |
+| `deviceCustomFloatingPoint11` (`cfp11`) | `[cef][device_custom_floating_point_11][value]` |
+| `deviceCustomFloatingPoint11Label` (`cfp11Label`) | `[cef][device_custom_floating_point_11][label]` |
+| `deviceCustomFloatingPoint12` (`cfp12`) | `[cef][device_custom_floating_point_12][value]` |
+| `deviceCustomFloatingPoint12Label` (`cfp12Label`) | `[cef][device_custom_floating_point_12][label]` |
+| `deviceCustomFloatingPoint13` (`cfp13`) | `[cef][device_custom_floating_point_13][value]` |
+| `deviceCustomFloatingPoint13Label` (`cfp13Label`) | `[cef][device_custom_floating_point_13][label]` |
+| `deviceCustomFloatingPoint14` (`cfp14`) | `[cef][device_custom_floating_point_14][value]` |
+| `deviceCustomFloatingPoint14Label` (`cfp14Label`) | `[cef][device_custom_floating_point_14][label]` |
+| `deviceCustomFloatingPoint15` (`cfp15`) | `[cef][device_custom_floating_point_15][value]` |
+| `deviceCustomFloatingPoint15Label` (`cfp15Label`) | `[cef][device_custom_floating_point_15][label]` |
+| `deviceCustomIPv6Address1` (`c6a1`) | `[cef][device_custom_ipv6_address_1][value]` |
+| `deviceCustomIPv6Address1Label` (`c6a1Label`) | `[cef][device_custom_ipv6_address_1][label]` |
+| `deviceCustomIPv6Address2` (`c6a2`) | `[cef][device_custom_ipv6_address_2][value]` |
+| `deviceCustomIPv6Address2Label` (`c6a2Label`) | `[cef][device_custom_ipv6_address_2][label]` |
+| `deviceCustomIPv6Address3` (`c6a3`) | `[cef][device_custom_ipv6_address_3][value]` |
+| `deviceCustomIPv6Address3Label` (`c6a3Label`) | `[cef][device_custom_ipv6_address_3][label]` |
+| `deviceCustomIPv6Address4` (`c6a4`) | `[cef][device_custom_ipv6_address_4][value]` |
+| `deviceCustomIPv6Address4Label` (`c6a4Label`) | `[cef][device_custom_ipv6_address_4][label]` |
+| `deviceCustomIPv6Address5` (`c6a5`) | `[cef][device_custom_ipv6_address_5][value]` |
+| `deviceCustomIPv6Address5Label` (`c6a5Label`) | `[cef][device_custom_ipv6_address_5][label]` |
+| `deviceCustomIPv6Address6` (`c6a6`) | `[cef][device_custom_ipv6_address_6][value]` |
+| `deviceCustomIPv6Address6Label` (`c6a6Label`) | `[cef][device_custom_ipv6_address_6][label]` |
+| `deviceCustomIPv6Address7` (`c6a7`) | `[cef][device_custom_ipv6_address_7][value]` |
+| `deviceCustomIPv6Address7Label` (`c6a7Label`) | `[cef][device_custom_ipv6_address_7][label]` |
+| `deviceCustomIPv6Address8` (`c6a8`) | `[cef][device_custom_ipv6_address_8][value]` |
+| `deviceCustomIPv6Address8Label` (`c6a8Label`) | `[cef][device_custom_ipv6_address_8][label]` |
+| `deviceCustomIPv6Address9` (`c6a9`) | `[cef][device_custom_ipv6_address_9][value]` |
+| `deviceCustomIPv6Address9Label` (`c6a9Label`) | `[cef][device_custom_ipv6_address_9][label]` |
+| `deviceCustomIPv6Address10` (`c6a10`) | `[cef][device_custom_ipv6_address_10][value]` |
+| `deviceCustomIPv6Address10Label` (`c6a10Label`) | `[cef][device_custom_ipv6_address_10][label]` |
+| `deviceCustomIPv6Address11` (`c6a11`) | `[cef][device_custom_ipv6_address_11][value]` |
+| `deviceCustomIPv6Address11Label` (`c6a11Label`) | `[cef][device_custom_ipv6_address_11][label]` |
+| `deviceCustomIPv6Address12` (`c6a12`) | `[cef][device_custom_ipv6_address_12][value]` |
+| `deviceCustomIPv6Address12Label` (`c6a12Label`) | `[cef][device_custom_ipv6_address_12][label]` |
+| `deviceCustomIPv6Address13` (`c6a13`) | `[cef][device_custom_ipv6_address_13][value]` |
+| `deviceCustomIPv6Address13Label` (`c6a13Label`) | `[cef][device_custom_ipv6_address_13][label]` |
+| `deviceCustomIPv6Address14` (`c6a14`) | `[cef][device_custom_ipv6_address_14][value]` |
+| `deviceCustomIPv6Address14Label` (`c6a14Label`) | `[cef][device_custom_ipv6_address_14][label]` |
+| `deviceCustomIPv6Address15` (`c6a15`) | `[cef][device_custom_ipv6_address_15][value]` |
+| `deviceCustomIPv6Address15Label` (`c6a15Label`) | `[cef][device_custom_ipv6_address_15][label]` |
+| `deviceCustomNumber1` (`cn1`) | `[cef][device_custom_number_1][value]` |
+| `deviceCustomNumber1Label` (`cn1Label`) | `[cef][device_custom_number_1][label]` |
+| `deviceCustomNumber2` (`cn2`) | `[cef][device_custom_number_2][value]` |
+| `deviceCustomNumber2Label` (`cn2Label`) | `[cef][device_custom_number_2][label]` |
+| `deviceCustomNumber3` (`cn3`) | `[cef][device_custom_number_3][value]` |
+| `deviceCustomNumber3Label` (`cn3Label`) | `[cef][device_custom_number_3][label]` |
+| `deviceCustomNumber4` (`cn4`) | `[cef][device_custom_number_4][value]` |
+| `deviceCustomNumber4Label` (`cn4Label`) | `[cef][device_custom_number_4][label]` |
+| `deviceCustomNumber5` (`cn5`) | `[cef][device_custom_number_5][value]` |
+| `deviceCustomNumber5Label` (`cn5Label`) | `[cef][device_custom_number_5][label]` |
+| `deviceCustomNumber6` (`cn6`) | `[cef][device_custom_number_6][value]` |
+| `deviceCustomNumber6Label` (`cn6Label`) | `[cef][device_custom_number_6][label]` |
+| `deviceCustomNumber7` (`cn7`) | `[cef][device_custom_number_7][value]` |
+| `deviceCustomNumber7Label` (`cn7Label`) | `[cef][device_custom_number_7][label]` |
+| `deviceCustomNumber8` (`cn8`) | `[cef][device_custom_number_8][value]` |
+| `deviceCustomNumber8Label` (`cn8Label`) | `[cef][device_custom_number_8][label]` |
+| `deviceCustomNumber9` (`cn9`) | `[cef][device_custom_number_9][value]` |
+| `deviceCustomNumber9Label` (`cn9Label`) | `[cef][device_custom_number_9][label]` |
+| `deviceCustomNumber10` (`cn10`) | `[cef][device_custom_number_10][value]` |
+| `deviceCustomNumber10Label` (`cn10Label`) | `[cef][device_custom_number_10][label]` |
+| `deviceCustomNumber11` (`cn11`) | `[cef][device_custom_number_11][value]` |
+| `deviceCustomNumber11Label` (`cn11Label`) | `[cef][device_custom_number_11][label]` |
+| `deviceCustomNumber12` (`cn12`) | `[cef][device_custom_number_12][value]` |
+| `deviceCustomNumber12Label` (`cn12Label`) | `[cef][device_custom_number_12][label]` |
+| `deviceCustomNumber13` (`cn13`) | `[cef][device_custom_number_13][value]` |
+| `deviceCustomNumber13Label` (`cn13Label`) | `[cef][device_custom_number_13][label]` |
+| `deviceCustomNumber14` (`cn14`) | `[cef][device_custom_number_14][value]` |
+| `deviceCustomNumber14Label` (`cn14Label`) | `[cef][device_custom_number_14][label]` |
+| `deviceCustomNumber15` (`cn15`) | `[cef][device_custom_number_15][value]` |
+| `deviceCustomNumber15Label` (`cn15Label`) | `[cef][device_custom_number_15][label]` |
+| `deviceCustomString1` (`cs1`) | `[cef][device_custom_string_1][value]` |
+| `deviceCustomString1Label` (`cs1Label`) | `[cef][device_custom_string_1][label]` |
+| `deviceCustomString2` (`cs2`) | `[cef][device_custom_string_2][value]` |
+| `deviceCustomString2Label` (`cs2Label`) | `[cef][device_custom_string_2][label]` |
+| `deviceCustomString3` (`cs3`) | `[cef][device_custom_string_3][value]` |
+| `deviceCustomString3Label` (`cs3Label`) | `[cef][device_custom_string_3][label]` |
+| `deviceCustomString4` (`cs4`) | `[cef][device_custom_string_4][value]` |
+| `deviceCustomString4Label` (`cs4Label`) | `[cef][device_custom_string_4][label]` |
+| `deviceCustomString5` (`cs5`) | `[cef][device_custom_string_5][value]` |
+| `deviceCustomString5Label` (`cs5Label`) | `[cef][device_custom_string_5][label]` |
+| `deviceCustomString6` (`cs6`) | `[cef][device_custom_string_6][value]` |
+| `deviceCustomString6Label` (`cs6Label`) | `[cef][device_custom_string_6][label]` |
+| `deviceCustomString7` (`cs7`) | `[cef][device_custom_string_7][value]` |
+| `deviceCustomString7Label` (`cs7Label`) | `[cef][device_custom_string_7][label]` |
+| `deviceCustomString8` (`cs8`) | `[cef][device_custom_string_8][value]` |
+| `deviceCustomString8Label` (`cs8Label`) | `[cef][device_custom_string_8][label]` |
+| `deviceCustomString9` (`cs9`) | `[cef][device_custom_string_9][value]` |
+| `deviceCustomString9Label` (`cs9Label`) | `[cef][device_custom_string_9][label]` |
+| `deviceCustomString10` (`cs10`) | `[cef][device_custom_string_10][value]` |
+| `deviceCustomString10Label` (`cs10Label`) | `[cef][device_custom_string_10][label]` |
+| `deviceCustomString11` (`cs11`) | `[cef][device_custom_string_11][value]` |
+| `deviceCustomString11Label` (`cs11Label`) | `[cef][device_custom_string_11][label]` |
+| `deviceCustomString12` (`cs12`) | `[cef][device_custom_string_12][value]` |
+| `deviceCustomString12Label` (`cs12Label`) | `[cef][device_custom_string_12][label]` |
+| `deviceCustomString13` (`cs13`) | `[cef][device_custom_string_13][value]` |
+| `deviceCustomString13Label` (`cs13Label`) | `[cef][device_custom_string_13][label]` |
+| `deviceCustomString14` (`cs14`) | `[cef][device_custom_string_14][value]` |
+| `deviceCustomString14Label` (`cs14Label`) | `[cef][device_custom_string_14][label]` |
+| `deviceCustomString15` (`cs15`) | `[cef][device_custom_string_15][value]` |
+| `deviceCustomString15Label` (`cs15Label`) | `[cef][device_custom_string_15][label]` |
+| `deviceDirection` | `[network][direction]` |
+| `deviceDnsDomain` | `[observer][registered_domain]` When plugin configured with `device => observer`. |
+| `[host][registered_domain]` When plugin configured with `device => host`. |
+| `deviceEventCategory` (`cat`) | `[cef][category]` |
+| `deviceExternalId` | `[observer][name]` When plugin configured with `device => observer`. |
+| `[host][id]` When plugin configured with `device => host`. |
+| `deviceFacility` | `[log][syslog][facility][code]` |
+| `deviceHostName` (`dvchost`) | `[observer][hostname]` When plugin configured with `device => observer`. |
+| `[host][name]` When plugin configured with `device => host`. |
+| `deviceInboundInterface` | `[observer][ingress][interface][name]` |
+| `deviceMacAddress` (`dvcmac`) | `[observer][mac]` When plugin configured with `device => observer`. |
+| `[host][mac]` When plugin configured with `device => host`. |
+| `deviceNtDomain` | `[cef][nt_domain]` |
+| `deviceOutboundInterface` | `[observer][egress][interface][name]` |
+| `devicePayloadId` | `[cef][payload_id]` |
+| `deviceProcessId` (`dvcpid`) | `[process][pid]` |
+| `deviceProcessName` | `[process][name]` |
+| `deviceReceiptTime` (`rt`) | `@timestamp` This field contains a timestamp. In ECS Compatibility Mode, it is parsed to a specific point in time. |
+| `deviceTimeZone` (`dtz`) | `[event][timezone]` |
+| `deviceTranslatedAddress` | `[host][nat][ip]` |
+| `deviceTranslatedZoneExternalID` | `[cef][translated_zone][external_id]` |
+| `deviceTranslatedZoneURI` | `[cef][translated_zone][uri]` |
+| `deviceVersion` | `[observer][version]` |
+| `deviceZoneExternalID` | `[cef][zone][external_id]` |
+| `deviceZoneURI` | `[cef][zone][uri]` |
+| `endTime` (`end`) | `[event][end]` This field contains a timestamp. In ECS Compatibility Mode, it is parsed to a specific point in time. |
+| `eventId` | `[event][id]` |
+| `eventOutcome` (`outcome`) | `[event][outcome]` |
+| `externalId` | `[cef][external_id]` |
+| `fileCreateTime` | `[file][created]` |
+| `fileHash` | `[file][hash]` |
+| `fileId` | `[file][inode]` |
+| `fileModificationTime` | `[file][mtime]` This field contains a timestamp. In ECS Compatibility Mode, it is parsed to a specific point in time. |
+| `fileName` (`fname`) | `[file][name]` |
+| `filePath` | `[file][path]` |
+| `filePermission` | `[file][group]` |
+| `fileSize` (`fsize`) | `[file][size]` |
+| `fileType` | `[file][extension]` |
+| `managerReceiptTime` (`mrt`) | `[event][ingested]` This field contains a timestamp. In ECS Compatibility Mode, it is parsed to a specific point in time. |
+| `message` (`msg`) | `[message]` |
+| `oldFileCreateTime` | `[cef][old_file][created]` This field contains a timestamp. In ECS Compatibility Mode, it is parsed to a specific point in time. |
+| `oldFileHash` | `[cef][old_file][hash]` |
+| `oldFileId` | `[cef][old_file][inode]` |
+| `oldFileModificationTime` | `[cef][old_file][mtime]` This field contains a timestamp. In ECS Compatibility Mode, it is parsed to a specific point in time. |
+| `oldFileName` | `[cef][old_file][name]` |
+| `oldFilePath` | `[cef][old_file][path]` |
+| `oldFilePermission` | `[cef][old_file][group]` |
+| `oldFileSize` | `[cef][old_file][size]` |
+| `oldFileType` | `[cef][old_file][extension]` |
+| `rawEvent` | `[event][original]` |
+| `Reason` (`reason`) | `[event][reason]` |
+| `requestClientApplication` | `[user_agent][original]` |
+| `requestContext` | `[http][request][referrer]` |
+| `requestCookies` | `[cef][request][cookies]` |
+| `requestMethod` | `[http][request][method]` |
+| `requestUrl` (`request`) | `[url][original]` |
+| `sourceAddress` (`src`) | `[source][ip]` |
+| `sourceDnsDomain` | `[source][registered_domain]` Multiple possible CEF fields map to this ECS Field. When decoding, the last entry encountered wins. When encoding, this field has *higher* priority. |
+| `sourceGeoLatitude` (`slat`) | `[source][geo][location][lat]` |
+| `sourceGeoLongitude` (`slong`) | `[source][geo][location][lon]` |
+| `sourceHostName` (`shost`) | `[source][domain]` |
+| `sourceMacAddress` (`smac`) | `[source][mac]` |
+| `sourceNtDomain` (`sntdom`) | `[source][registered_domain]` Multiple possible CEF fields map to this ECS Field. When decoding, the last entry encountered wins. When encoding, this field has *lower* priority. |
+| `sourcePort` (`spt`) | `[source][port]` |
+| `sourceProcessId` (`spid`) | `[source][process][pid]` |
+| `sourceProcessName` (`sproc`) | `[source][process][name]` |
+| `sourceServiceName` | `[source][service][name]` |
+| `sourceTranslatedAddress` | `[source][nat][ip]` |
+| `sourceTranslatedPort` | `[source][nat][port]` |
+| `sourceTranslatedZoneExternalID` | `[cef][source][translated_zone][external_id]` |
+| `sourceTranslatedZoneURI` | `[cef][source][translated_zone][uri]` |
+| `sourceUserId` (`suid`) | `[source][user][id]` |
+| `sourceUserName` (`suser`) | `[source][user][name]` |
+| `sourceUserPrivileges` (`spriv`) | `[source][user][group][name]` |
+| `sourceZoneExternalID` | `[cef][source][zone][external_id]` |
+| `sourceZoneURI` | `[cef][source][zone][uri]` |
+| `startTime` (`start`) | `[event][start]` This field contains a timestamp. In ECS Compatibility Mode, it is parsed to a specific point in time. |
+| `transportProtocol` (`proto`) | `[network][transport]` |
+| `type` | `[cef][type]` |
+
+
+
+
+## Cef Codec Configuration Options [plugins-codecs-cef-options]
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`default_timezone`](#plugins-codecs-cef-default_timezone) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`delimiter`](#plugins-codecs-cef-delimiter) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`device`](#plugins-codecs-cef-device) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ecs_compatibility`](#plugins-codecs-cef-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`fields`](#plugins-codecs-cef-fields) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`locale`](#plugins-codecs-cef-locale) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`name`](#plugins-codecs-cef-name) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`product`](#plugins-codecs-cef-product) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`raw_data_field`](#plugins-codecs-cef-raw_data_field) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`reverse_mapping`](#plugins-codecs-cef-reverse_mapping) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`severity`](#plugins-codecs-cef-severity) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`signature`](#plugins-codecs-cef-signature) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`vendor`](#plugins-codecs-cef-vendor) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`version`](#plugins-codecs-cef-version) | [string](/reference/configuration-file-structure.md#string) | No |
+
+
+
+### `default_timezone` [plugins-codecs-cef-default_timezone]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * [Timezone names](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) (such as `Europe/Moscow`, `America/Argentina/Buenos_Aires`)
+ * UTC Offsets (such as `-08:00`, `+03:00`)
+
+* The default value is your system time zone
+* This option has no effect when *encoding*.
+
+When parsing timestamp fields in ECS mode and encountering timestamps that do not contain UTC-offset information, the `deviceTimeZone` (`dtz`) field from the CEF payload is used to interpret the given time. If the event does not include timezone information, this `default_timezone` is used instead.
+
+
+### `delimiter` [plugins-codecs-cef-delimiter]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+If your input puts a delimiter between each CEF event, you’ll want to set this to be that delimiter.
+
+::::{note}
+Byte stream inputs such as TCP require delimiter to be specified. Otherwise input can be truncated or incorrectly split.
+::::
+
+
+**Example**
+
+```ruby
+ input {
+ tcp {
+ codec => cef { delimiter => "\r\n" }
+ # ...
+ }
+ }
+```
+
+This setting allows the following character sequences to have special meaning:
+
+* `\\r` (backslash "r") - means carriage return (ASCII 0x0D)
+* `\\n` (backslash "n") - means newline (ASCII 0x0A)
+
+
+### `device` [plugins-codecs-cef-device]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `observer`: indicates that device-specific fields represent the device used to *observe* the event.
+ * `host`: indicates that device-specific fields represent the device on which the event *occurred*.
+
+* The default value for this setting is `observer`.
+* Option has no effect when [`ecs_compatibility => disabled`](#plugins-codecs-cef-ecs_compatibility).
+* Option has no effect when *encoding*
+
+Defines a set of device-specific CEF fields as either representing the device on which an event *occurred*, or merely the device from which the event was *observed*. This causes the relevant fields to be routed to either the `host` or the `observer` top-level groupings.
+
+If the codec handles data from a variety of sources, the ECS recommendation is to use `observer`.
+
+
+### `ecs_compatibility` [plugins-codecs-cef-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: uses CEF-defined field names in the event (e.g., `bytesIn`, `sourceAddress`)
+ * `v1`: supports ECS-compatible event fields (e.g., `[source][bytes]`, `[source][ip]`)
+
+* Default value depends on which version of Logstash is running:
+
+ * When Logstash provides a `pipeline.ecs_compatibility` setting, its value is used as the default
+ * Otherwise, the default value is `disabled`.
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)).
+
+
+### `fields` [plugins-codecs-cef-fields]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+* Option has no effect when *decoding*
+
+When this codec is used in an Output Plugin, a list of fields can be provided to be included in CEF extensions part as key/value pairs.
+
+
+### `locale` [plugins-codecs-cef-locale]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * Abbreviated language_COUNTRY format (e.g., `en_GB`, `pt_BR`)
+ * Valid [IETF BCP 47](https://tools.ietf.org/html/bcp47) language tag (e.g., `zh-cmn-Hans-CN`)
+
+* The default value is your system locale
+* Option has no effect when *encoding*
+
+When parsing timestamp fields in ECS mode and encountering timestamps in a localized format, this `locale` is used to interpret locale-specific strings such as month abbreviations.
+
+
+### `name` [plugins-codecs-cef-name]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"Logstash"`
+* Option has no effect when *decoding*
+
+When this codec is used in an Output Plugin, this option can be used to specify the value of the name field in the CEF header. The new value can include `%{{foo}}` strings to help you build a new value from other parts of the event.
+
+
+### `product` [plugins-codecs-cef-product]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"Logstash"`
+* Option has no effect when *decoding*
+
+When this codec is used in an Output Plugin, this option can be used to specify the value of the device product field in CEF header. The new value can include `%{{foo}}` strings to help you build a new value from other parts of the event.
+
+
+### `raw_data_field` [plugins-codecs-cef-raw_data_field]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting
+
+Store the raw data to the field, for example `[event][original]`. Existing target field will be overriden.
+
+
+### `reverse_mapping` [plugins-codecs-cef-reverse_mapping]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+* Option has no effect when *decoding*
+
+Set to true to adhere to the specifications and encode using the CEF key name (short name) for the CEF field names.
+
+
+### `severity` [plugins-codecs-cef-severity]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"6"`
+* Option has no effect when *decoding*
+
+When this codec is used in an Output Plugin, this option can be used to specify the value of the severity field in CEF header. The new value can include `%{{foo}}` strings to help you build a new value from other parts of the event.
+
+Defined as field of type string to allow sprintf. The value will be validated to be an integer in the range from 0 to 10 (including). All invalid values will be mapped to the default of 6.
+
+
+### `signature` [plugins-codecs-cef-signature]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"Logstash"`
+* Option has no effect when *decoding*
+
+When this codec is used in an Output Plugin, this option can be used to specify the value of the signature ID field in CEF header. The new value can include `%{{foo}}` strings to help you build a new value from other parts of the event.
+
+
+### `vendor` [plugins-codecs-cef-vendor]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"Elasticsearch"`
+* Option has no effect when *decoding*
+
+When this codec is used in an Output Plugin, this option can be used to specify the value of the device vendor field in CEF header. The new value can include `%{{foo}}` strings to help you build a new value from other parts of the event.
+
+
+### `version` [plugins-codecs-cef-version]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"1.0"`
+* Option has no effect when *decoding*
+
+When this codec is used in an Output Plugin, this option can be used to specify the value of the device version field in CEF header. The new value can include `%{{foo}}` strings to help you build a new value from other parts of the event.
diff --git a/docs/reference/plugins-codecs-cloudfront.md b/docs/reference/plugins-codecs-cloudfront.md
new file mode 100644
index 000000000..f4e54a922
--- /dev/null
+++ b/docs/reference/plugins-codecs-cloudfront.md
@@ -0,0 +1,47 @@
+---
+navigation_title: "cloudfront"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-codecs-cloudfront.html
+---
+
+# Cloudfront codec plugin [plugins-codecs-cloudfront]
+
+
+* A component of the [aws integration plugin](/reference/plugins-integrations-aws.md)
+* Integration version: v7.1.8
+* Released on: 2024-07-26
+* [Changelog](https://github.com/logstash-plugins/logstash-integration-aws/blob/v7.1.8/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/codec-cloudfront-index.md).
+
+## Getting help [_getting_help_174]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-integration-aws). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_173]
+
+This codec will read cloudfront encoded content
+
+
+## Cloudfront Codec Configuration Options [plugins-codecs-cloudfront-options]
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`charset`](#plugins-codecs-cloudfront-charset) | [string](/reference/configuration-file-structure.md#string), one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]` | No |
+
+
+
+### `charset` [plugins-codecs-cloudfront-charset]
+
+* Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale`
+* Default value is `"UTF-8"`
+
+The character encoding used in this codec. Examples include "UTF-8" and "CP1252"
+
+JSON requires valid UTF-8 strings, but in some cases, software that emits JSON does so in another encoding (nxlog, for example). In weird cases like this, you can set the charset setting to the actual encoding of the text and logstash will convert it for you.
+
+For nxlog users, you’ll want to set this to "CP1252"
+
+
+
diff --git a/docs/reference/plugins-codecs-cloudtrail.md b/docs/reference/plugins-codecs-cloudtrail.md
new file mode 100644
index 000000000..8b12e3cfb
--- /dev/null
+++ b/docs/reference/plugins-codecs-cloudtrail.md
@@ -0,0 +1,41 @@
+---
+navigation_title: "cloudtrail"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-codecs-cloudtrail.html
+---
+
+# Cloudtrail codec plugin [plugins-codecs-cloudtrail]
+
+
+* A component of the [aws integration plugin](/reference/plugins-integrations-aws.md)
+* Integration version: v7.1.8
+* Released on: 2024-07-26
+* [Changelog](https://github.com/logstash-plugins/logstash-integration-aws/blob/v7.1.8/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/codec-cloudtrail-index.md).
+
+## Getting help [_getting_help_175]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-integration-aws). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_174]
+
+This is the base class for logstash codecs.
+
+
+## Cloudtrail Codec Configuration Options [plugins-codecs-cloudtrail-options]
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`charset`](#plugins-codecs-cloudtrail-charset) | [string](/reference/configuration-file-structure.md#string), one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]` | No |
+
+
+
+### `charset` [plugins-codecs-cloudtrail-charset]
+
+* Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale`
+* Default value is `"UTF-8"`
+
+
+
diff --git a/docs/reference/plugins-codecs-collectd.md b/docs/reference/plugins-codecs-collectd.md
new file mode 100644
index 000000000..88517f69e
--- /dev/null
+++ b/docs/reference/plugins-codecs-collectd.md
@@ -0,0 +1,153 @@
+---
+navigation_title: "collectd"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-codecs-collectd.html
+---
+
+# Collectd codec plugin [plugins-codecs-collectd]
+
+
+* Plugin version: v3.1.0
+* Released on: 2021-08-04
+* [Changelog](https://github.com/logstash-plugins/logstash-codec-collectd/blob/v3.1.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/codec-collectd-index.md).
+
+## Getting help [_getting_help_176]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-codec-collectd). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_175]
+
+Read events from the collectd binary protocol over the network via udp. See [https://collectd.org/wiki/index.php/Binary_protocol](https://collectd.org/wiki/index.php/Binary_protocol)
+
+Configuration in your Logstash configuration file can be as simple as:
+
+```ruby
+ input {
+ udp {
+ port => 25826
+ buffer_size => 1452
+ codec => collectd { }
+ }
+ }
+```
+
+A sample `collectd.conf` to send to Logstash might be:
+
+```xml
+ Hostname "host.example.com"
+ LoadPlugin interface
+ LoadPlugin load
+ LoadPlugin memory
+ LoadPlugin network
+
+ Interface "eth0"
+ IgnoreSelected false
+
+
+ Server "10.0.0.1" "25826"
+
+```
+
+Be sure to replace `10.0.0.1` with the IP of your Logstash instance.
+
+
+## Collectd Codec configuration options [plugins-codecs-collectd-options]
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`authfile`](#plugins-codecs-collectd-authfile) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`nan_handling`](#plugins-codecs-collectd-nan_handling) | [string](/reference/configuration-file-structure.md#string), one of `["change_value", "warn", "drop"]` | No |
+| [`nan_tag`](#plugins-codecs-collectd-nan_tag) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`nan_value`](#plugins-codecs-collectd-nan_value) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`prune_intervals`](#plugins-codecs-collectd-prune_intervals) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`security_level`](#plugins-codecs-collectd-security_level) | [string](/reference/configuration-file-structure.md#string), one of `["None", "Sign", "Encrypt"]` | No |
+| [`target`](#plugins-codecs-collectd-target) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`typesdb`](#plugins-codecs-collectd-typesdb) | [array](/reference/configuration-file-structure.md#array) | No |
+
+
+
+### `authfile` [plugins-codecs-collectd-authfile]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Path to the authentication file. This file should have the same format as the [AuthFile](http://collectd.org/documentation/manpages/collectd.conf.5.shtml#authfile_filename) in collectd. You only need to set this option if the `security_level` is set to `Sign` or `Encrypt`
+
+
+### `nan_handling` [plugins-codecs-collectd-nan_handling]
+
+* Value can be any of: `change_value`, `warn`, `drop`
+* Default value is `"change_value"`
+
+What to do when a value in the event is `NaN` (Not a Number)
+
+* change_value (default): Change the `NaN` to the value of the nan_value option and add `nan_tag` as a tag
+* warn: Change the `NaN` to the value of the nan_value option, print a warning to the log and add `nan_tag` as a tag
+* drop: Drop the event containing the `NaN` (this only drops the single event, not the whole packet)
+
+
+### `nan_tag` [plugins-codecs-collectd-nan_tag]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"_collectdNaN"`
+
+The tag to add to the event if a `NaN` value was found Set this to an empty string ('') if you don’t want to tag
+
+
+### `nan_value` [plugins-codecs-collectd-nan_value]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `0`
+
+Only relevant when `nan_handeling` is set to `change_value` Change NaN to this configured value
+
+
+### `prune_intervals` [plugins-codecs-collectd-prune_intervals]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Prune interval records. Defaults to `true`.
+
+
+### `security_level` [plugins-codecs-collectd-security_level]
+
+* Value can be any of: `None`, `Sign`, `Encrypt`
+* Default value is `"None"`
+
+Security Level. Default is `None`. This setting mirrors the setting from the collectd [Network plugin](https://collectd.org/wiki/index.php/Plugin:Network)
+
+
+### `target` [plugins-codecs-collectd-target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Define the target field for placing the decoded values. If this setting is not set, data will be stored at the root (top level) of the event.
+
+For example, if you want data to be put under the `document` field:
+
+```ruby
+ input {
+ udp {
+ port => 12345
+ codec => collectd {
+ target => "[document]"
+ }
+ }
+ }
+```
+
+
+### `typesdb` [plugins-codecs-collectd-typesdb]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+File path(s) to collectd `types.db` to use. The last matching pattern wins if you have identical pattern names in multiple files. If no types.db is provided the included `types.db` will be used (currently 5.4.0).
+
+
+
diff --git a/docs/reference/plugins-codecs-csv.md b/docs/reference/plugins-codecs-csv.md
new file mode 100644
index 000000000..84fb6580c
--- /dev/null
+++ b/docs/reference/plugins-codecs-csv.md
@@ -0,0 +1,178 @@
+---
+navigation_title: "csv"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-codecs-csv.html
+---
+
+# Csv codec plugin [plugins-codecs-csv]
+
+
+* Plugin version: v1.1.0
+* Released on: 2021-07-28
+* [Changelog](https://github.com/logstash-plugins/logstash-codec-csv/blob/v1.1.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/codec-csv-index.md).
+
+## Installation [_installation_68]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-codec-csv`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_177]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-codec-csv). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_176]
+
+The csv codec takes CSV data, parses it and passes it along.
+
+
+## Compatibility with the Elastic Common Schema (ECS) [plugins-codecs-csv-ecs]
+
+The plugin behaves the same regardless of ECS compatibility, except giving a warning when ECS is enabled and `target` isn’t set.
+
+::::{tip}
+Set the `target` option to avoid potential schema conflicts.
+::::
+
+
+
+## Csv Codec configuration options [plugins-codecs-csv-options]
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`autodetect_column_names`](#plugins-codecs-csv-autodetect_column_names) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`autogenerate_column_names`](#plugins-codecs-csv-autogenerate_column_names) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`charset`](#plugins-codecs-csv-charset) | [string](/reference/configuration-file-structure.md#string), one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]` | No |
+| [`columns`](#plugins-codecs-csv-columns) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`convert`](#plugins-codecs-csv-convert) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`ecs_compatibility`](#plugins-codecs-csv-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`include_headers`](#plugins-codecs-csv-include_headers) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`quote_char`](#plugins-codecs-csv-quote_char) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`separator`](#plugins-codecs-csv-separator) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`skip_empty_columns`](#plugins-codecs-csv-skip_empty_columns) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`target`](#plugins-codecs-csv-target) | [string](/reference/configuration-file-structure.md#string) | No |
+
+
+
+### `autodetect_column_names` [plugins-codecs-csv-autodetect_column_names]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Define whether column names should be auto-detected from the header column or not. Defaults to false.
+
+
+### `autogenerate_column_names` [plugins-codecs-csv-autogenerate_column_names]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Define whether column names should be autogenerated or not. Defaults to true. If set to false, columns not having a header specified will not be parsed.
+
+
+### `charset` [plugins-codecs-csv-charset]
+
+* Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale`
+* Default value is `"UTF-8"`
+
+List of valid conversion types used for the convert option The character encoding used in this codec. Examples include "UTF-8" and "CP1252".
+
+
+### `columns` [plugins-codecs-csv-columns]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+**When decoding:** Define a list of column names (in the order they appear in the CSV, as if it were a header line). If `columns` is not configured, or there are not enough columns specified, the default column names are "column1", "column2", etc.
+
+**When encoding:** List of fields names to include in the encoded CSV, in the order listed.
+
+
+### `convert` [plugins-codecs-csv-convert]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Define a set of datatype conversions to be applied to columns. Possible conversions are: `integer`, `float`, `date`, `date_time`, `boolean`
+
+**Example**
+
+```ruby
+ filter {
+ csv {
+ convert => { "column1" => "integer", "column2" => "boolean" }
+ }
+ }
+```
+
+
+### `ecs_compatibility` [plugins-codecs-csv-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: CSV data added at root level
+ * `v1`,`v8`: Elastic Common Schema compliant behavior (`[event][original]` is also added)
+
+* Default value depends on which version of Logstash is running:
+
+ * When Logstash provides a `pipeline.ecs_compatibility` setting, its value is used as the default
+ * Otherwise, the default value is `disabled`
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)).
+
+
+### `include_headers` [plugins-codecs-csv-include_headers]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+When **encoding** in an output plugin, include headers in the encoded CSV once per codec lifecyle (not for every event). Default ⇒ false
+
+
+### `quote_char` [plugins-codecs-csv-quote_char]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"\""`
+
+Define the character used to quote CSV fields. If this is not specified the default is a double quote `"`. Optional.
+
+
+### `separator` [plugins-codecs-csv-separator]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `","`
+
+Define the column separator value. If this is not specified, the default is a comma `,`. Optional.
+
+
+### `skip_empty_columns` [plugins-codecs-csv-skip_empty_columns]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Define whether empty columns should be skipped. Defaults to false. If set to true, columns containing no value will not be included.
+
+
+### `target` [plugins-codecs-csv-target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Define the target field for placing the row values. If this setting is not set, the CSV data will be stored at the root (top level) of the event.
+
+For example, if you want data to be put under the `document` field:
+
+```ruby
+ input {
+ file {
+ codec => csv {
+ autodetect_column_names => true
+ target => "[document]"
+ }
+ }
+ }
+```
diff --git a/docs/reference/plugins-codecs-dots.md b/docs/reference/plugins-codecs-dots.md
new file mode 100644
index 000000000..c485dbe88
--- /dev/null
+++ b/docs/reference/plugins-codecs-dots.md
@@ -0,0 +1,25 @@
+---
+navigation_title: "dots"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-codecs-dots.html
+---
+
+# Dots codec plugin [plugins-codecs-dots]
+
+
+* Plugin version: v3.0.6
+* Released on: 2017-11-07
+* [Changelog](https://github.com/logstash-plugins/logstash-codec-dots/blob/v3.0.6/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/codec-dots-index.md).
+
+## Getting help [_getting_help_178]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-codec-dots). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_177]
+
+This codec generates a dot(`.`) to represent each Event it processes. This is typically used with `stdout` output to provide feedback on the terminal. It is also used to measure Logstash’s throughtput with the `pv` command.
+
+
diff --git a/docs/reference/plugins-codecs-edn.md b/docs/reference/plugins-codecs-edn.md
new file mode 100644
index 000000000..db7c6e701
--- /dev/null
+++ b/docs/reference/plugins-codecs-edn.md
@@ -0,0 +1,56 @@
+---
+navigation_title: "edn"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-codecs-edn.html
+---
+
+# Edn codec plugin [plugins-codecs-edn]
+
+
+* Plugin version: v3.1.0
+* Released on: 2021-08-04
+* [Changelog](https://github.com/logstash-plugins/logstash-codec-edn/blob/v3.1.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/codec-edn-index.md).
+
+## Getting help [_getting_help_179]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-codec-edn). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_178]
+
+Reads and produces EDN format data.
+
+
+## Edn Codec configuration options [plugins-codecs-edn-options]
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`target`](#plugins-codecs-edn-target) | [string](/reference/configuration-file-structure.md#string) | No |
+
+
+
+### `target` [plugins-codecs-edn-target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+* The option is only relevant while decoding.
+
+Define the target field for placing the decoded fields. If this setting is not set, data will be stored at the root (top level) of the event.
+
+For example, if you want data to be put under the `document` field:
+
+```ruby
+ input {
+ tcp {
+ port => 4242
+ codec => edn {
+ target => "[document]"
+ }
+ }
+ }
+```
+
+
+
diff --git a/docs/reference/plugins-codecs-edn_lines.md b/docs/reference/plugins-codecs-edn_lines.md
new file mode 100644
index 000000000..84e3ef269
--- /dev/null
+++ b/docs/reference/plugins-codecs-edn_lines.md
@@ -0,0 +1,56 @@
+---
+navigation_title: "edn_lines"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-codecs-edn_lines.html
+---
+
+# Edn_lines codec plugin [plugins-codecs-edn_lines]
+
+
+* Plugin version: v3.1.0
+* Released on: 2021-08-04
+* [Changelog](https://github.com/logstash-plugins/logstash-codec-edn_lines/blob/v3.1.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/codec-edn_lines-index.md).
+
+## Getting help [_getting_help_180]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-codec-edn_lines). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_179]
+
+Reads and produces newline-delimited EDN format data.
+
+
+## Edn_lines Codec configuration options [plugins-codecs-edn_lines-options]
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`target`](#plugins-codecs-edn_lines-target) | [string](/reference/configuration-file-structure.md#string) | No |
+
+
+
+### `target` [plugins-codecs-edn_lines-target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+* The option is only relevant while decoding.
+
+Define the target field for placing the decoded fields. If this setting is not set, data will be stored at the root (top level) of the event.
+
+For example, if you want data to be put under the `document` field:
+
+```ruby
+ input {
+ tcp {
+ port => 4242
+ codec => edn_lines {
+ target => "[document]"
+ }
+ }
+ }
+```
+
+
+
diff --git a/docs/reference/plugins-codecs-es_bulk.md b/docs/reference/plugins-codecs-es_bulk.md
new file mode 100644
index 000000000..0d783d3dd
--- /dev/null
+++ b/docs/reference/plugins-codecs-es_bulk.md
@@ -0,0 +1,79 @@
+---
+navigation_title: "es_bulk"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-codecs-es_bulk.html
+---
+
+# Es_bulk codec plugin [plugins-codecs-es_bulk]
+
+
+* Plugin version: v3.1.0
+* Released on: 2021-08-19
+* [Changelog](https://github.com/logstash-plugins/logstash-codec-es_bulk/blob/v3.1.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/codec-es_bulk-index.md).
+
+## Getting help [_getting_help_181]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-codec-es_bulk). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_180]
+
+This codec will decode the [Elasticsearch bulk format](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk) into individual events, plus metadata into the `@metadata` field.
+
+Encoding is not supported at this time as the Elasticsearch output submits Logstash events in bulk format.
+
+
+## Codec settings in the `logstash-input-http` plugin [plugins-codecs-es_bulk-codec-settings]
+
+The [input-http](/reference/plugins-inputs-http.md) plugin has two configuration options for codecs: `codec` and `additional_codecs`.
+
+Values in `additional_codecs` are prioritized over those specified in the `codec` option. That is, the default `codec` is applied only if no codec for the request’s content-type is found in the `additional_codecs` setting.
+
+
+## Event Metadata and the Elastic Common Schema (ECS) [plugins-codecs-es_bulk-ecs_metadata]
+
+When ECS compatibility is disabled, the metadata is stored in the `[@metadata]` field. When ECS is enabled, the metadata is stored in the `[@metadata][codec][es_bulk]` field.
+
+
+## ES Bulk Codec Configuration Options [plugins-codecs-es_bulk-options]
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`ecs_compatibility`](#plugins-codecs-es_bulk-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`target`](#plugins-codecs-es_bulk-target) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `ecs_compatibility` [plugins-codecs-es_bulk-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: unstructured metadata added at @metadata
+ * `v1`: uses `[@metadata][codec][es_bulk]` fields
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)).
+
+
+### `target` [plugins-codecs-es_bulk-target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Define the target field for placing the values. If this setting is not set, the data will be stored at the root (top level) of the event.
+
+For example, if you want data to be put under the `document` field:
+
+```ruby
+input {
+ kafka {
+ codec => es_bulk {
+ target => "[document]"
+ }
+ }
+}
+```
+
+
+
diff --git a/docs/reference/plugins-codecs-fluent.md b/docs/reference/plugins-codecs-fluent.md
new file mode 100644
index 000000000..7ed4d6bbf
--- /dev/null
+++ b/docs/reference/plugins-codecs-fluent.md
@@ -0,0 +1,87 @@
+---
+navigation_title: "fluent"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-codecs-fluent.html
+---
+
+# Fluent codec plugin [plugins-codecs-fluent]
+
+
+* Plugin version: v3.4.3
+* Released on: 2024-06-25
+* [Changelog](https://github.com/logstash-plugins/logstash-codec-fluent/blob/v3.4.3/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/codec-fluent-index.md).
+
+## Getting help [_getting_help_182]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-codec-fluent). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_181]
+
+This codec handles fluentd’s msgpack schema.
+
+For example, you can receive logs from `fluent-logger-ruby` with:
+
+```ruby
+ input {
+ tcp {
+ codec => fluent
+ port => 4000
+ }
+ }
+```
+
+And from your ruby code in your own application:
+
+```ruby
+ logger = Fluent::Logger::FluentLogger.new(nil, :host => "example.log", :port => 4000)
+ logger.post("some_tag", { "your" => "data", "here" => "yay!" })
+```
+
+::::{note}
+Fluent uses second-precision for events, so you will not see sub-second precision on events processed by this codec.
+::::
+
+
+
+## Fluent Codec configuration options [plugins-codecs-fluent-options]
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`nanosecond_precision`](#plugins-codecs-fluent-nanosecond_precision) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`target`](#plugins-codecs-fluent-target) | [string](/reference/configuration-file-structure.md#string) | No |
+
+
+
+### `nanosecond_precision` [plugins-codecs-fluent-nanosecond_precision]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Enables sub-second level precision while encoding events.
+
+
+### `target` [plugins-codecs-fluent-target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Define the target field for placing the decoded values. If this setting is not set, data will be stored at the root (top level) of the event.
+
+For example, if you want data to be put under the `logs` field:
+
+```ruby
+ input {
+ tcp {
+ codec => fluent {
+ target => "[logs]"
+ }
+ port => 4000
+ }
+ }
+```
+
+
+
diff --git a/docs/reference/plugins-codecs-graphite.md b/docs/reference/plugins-codecs-graphite.md
new file mode 100644
index 000000000..9221c63e9
--- /dev/null
+++ b/docs/reference/plugins-codecs-graphite.md
@@ -0,0 +1,93 @@
+---
+navigation_title: "graphite"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-codecs-graphite.html
+---
+
+# Graphite codec plugin [plugins-codecs-graphite]
+
+
+* Plugin version: v3.0.6
+* Released on: 2021-08-12
+* [Changelog](https://github.com/logstash-plugins/logstash-codec-graphite/blob/v3.0.6/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/codec-graphite-index.md).
+
+## Getting help [_getting_help_183]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-codec-graphite). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_182]
+
+This codec will encode and decode Graphite formated lines.
+
+
+## Graphite Codec Configuration Options [plugins-codecs-graphite-options]
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`exclude_metrics`](#plugins-codecs-graphite-exclude_metrics) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`fields_are_metrics`](#plugins-codecs-graphite-fields_are_metrics) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`include_metrics`](#plugins-codecs-graphite-include_metrics) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`metrics`](#plugins-codecs-graphite-metrics) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`metrics_format`](#plugins-codecs-graphite-metrics_format) | [string](/reference/configuration-file-structure.md#string) | No |
+
+
+
+### `exclude_metrics` [plugins-codecs-graphite-exclude_metrics]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `["%{[^}]+}"]`
+
+Exclude regex matched metric names, by default exclude unresolved `%{{field}}` strings
+
+
+### `fields_are_metrics` [plugins-codecs-graphite-fields_are_metrics]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Indicate that the event @fields should be treated as metrics and will be sent as is to graphite
+
+
+### `include_metrics` [plugins-codecs-graphite-include_metrics]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[".*"]`
+
+Include only regex matched metric names
+
+
+### `metrics` [plugins-codecs-graphite-metrics]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+The metric(s) to use. This supports dynamic strings like `%{{host}}` for metric names and also for values. This is a hash field with key of the metric name, value of the metric value. Example:
+
+```ruby
+ [ "%{host}/uptime", "%{uptime_1m}" ]
+```
+
+The value will be coerced to a floating point value. Values which cannot be coerced will zero (0)
+
+
+### `metrics_format` [plugins-codecs-graphite-metrics_format]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"*"`
+
+Defines format of the metric string. The placeholder `*` will be replaced with the name of the actual metric. This supports dynamic strings like `%{{host}}`.
+
+```ruby
+ metrics_format => "%{host}.foo.bar.*.sum"
+```
+
+::::{note}
+If no metrics_format is defined the name of the metric will be used as fallback.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-codecs-gzip_lines.md b/docs/reference/plugins-codecs-gzip_lines.md
new file mode 100644
index 000000000..971827bf3
--- /dev/null
+++ b/docs/reference/plugins-codecs-gzip_lines.md
@@ -0,0 +1,51 @@
+---
+navigation_title: "gzip_lines"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-codecs-gzip_lines.html
+---
+
+# Gzip_lines codec plugin [plugins-codecs-gzip_lines]
+
+
+* Plugin version: v3.0.4
+* Released on: 2019-07-23
+* [Changelog](https://github.com/logstash-plugins/logstash-codec-gzip_lines/blob/v3.0.4/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/codec-gzip_lines-index.md).
+
+## Installation [_installation_69]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-codec-gzip_lines`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_184]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-codec-gzip_lines). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_183]
+
+This codec will read gzip encoded content
+
+
+## Gzip_lines Codec Configuration Options [plugins-codecs-gzip_lines-options]
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`charset`](#plugins-codecs-gzip_lines-charset) | [string](/reference/configuration-file-structure.md#string), one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]` | No |
+
+
+
+### `charset` [plugins-codecs-gzip_lines-charset]
+
+* Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale`
+* Default value is `"UTF-8"`
+
+The character encoding used in this codec. Examples include "UTF-8" and "CP1252"
+
+JSON requires valid UTF-8 strings, but in some cases, software that emits JSON does so in another encoding (nxlog, for example). In weird cases like this, you can set the charset setting to the actual encoding of the text and logstash will convert it for you.
+
+For nxlog users, you’ll want to set this to "CP1252"
+
+
+
diff --git a/docs/reference/plugins-codecs-java_line.md b/docs/reference/plugins-codecs-java_line.md
new file mode 100644
index 000000000..66371fce6
--- /dev/null
+++ b/docs/reference/plugins-codecs-java_line.md
@@ -0,0 +1,60 @@
+---
+navigation_title: "java_line"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-codecs-java_line.html
+---
+
+# Java_line codec plugin [plugins-codecs-java_line]
+
+
+**{{ls}} Core Plugin.** The java_line codec plugin cannot be installed or uninstalled independently of {{ls}}.
+
+## Getting help [_getting_help_186]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash).
+
+
+## Description [_description_185]
+
+Encodes and decodes line-oriented text data.
+
+Decoding behavior: All text data between specified delimiters will be decoded as distinct events.
+
+Encoding behavior: Each event will be emitted with the specified trailing delimiter.
+
+
+## Java_line Codec Configuration Options [plugins-codecs-java_line-options]
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`charset`](#plugins-codecs-java_line-charset) | [string](/reference/configuration-file-structure.md#string), one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]` | No |
+| [`delimiter`](#plugins-codecs-java_line-delimiter) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`format`](#plugins-codecs-java_line-format) | [string](/reference/configuration-file-structure.md#string) | No |
+
+
+
+### `charset` [plugins-codecs-java_line-charset]
+
+* Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale`
+* Default value is `"UTF-8"`
+
+The character encoding used by this input. Examples include `UTF-8` and `cp1252`. This setting is useful if your inputs are in `Latin-1` (aka `cp1252`) or other character sets than `UTF-8`.
+
+
+### `delimiter` [plugins-codecs-java_line-delimiter]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is the system-dependent line separator ("\n" for UNIX systems; "\r\n" for Microsoft Windows)
+
+Specifies the delimiter that indicates end-of-line.
+
+
+### `format` [plugins-codecs-java_line-format]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Set the desired text format for encoding in [`sprintf`](/reference/event-dependent-configuration.md#sprintf) format.
+
+
+
diff --git a/docs/reference/plugins-codecs-java_plain.md b/docs/reference/plugins-codecs-java_plain.md
new file mode 100644
index 000000000..11faa6fc1
--- /dev/null
+++ b/docs/reference/plugins-codecs-java_plain.md
@@ -0,0 +1,47 @@
+---
+navigation_title: "java_plain"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-codecs-java_plain.html
+---
+
+# Java_plain codec plugin [plugins-codecs-java_plain]
+
+
+**{{ls}} Core Plugin.** The java_plain codec plugin cannot be installed or uninstalled independently of {{ls}}.
+
+## Getting help [_getting_help_187]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash).
+
+
+## Description [_description_186]
+
+The `java_plain` codec is for text data with no delimiters between events. It is useful mainly for inputs and outputs that already have a defined framing in their transport protocol such as ZeroMQ, RabbitMQ, Redis, etc.
+
+
+## Java_plain Codec Configuration Options [plugins-codecs-java_plain-options]
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`charset`](#plugins-codecs-java_plain-charset) | [string](/reference/configuration-file-structure.md#string), one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]` | No |
+| [`format`](#plugins-codecs-java_plain-format) | [string](/reference/configuration-file-structure.md#string) | No |
+
+
+
+### `charset` [plugins-codecs-java_plain-charset]
+
+* Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale`
+* Default value is `"UTF-8"`
+
+The character encoding used in this input. Examples include `UTF-8` and `cp1252`. This setting is useful if your data is in a character set other than `UTF-8`.
+
+
+### `format` [plugins-codecs-java_plain-format]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Set the desired text format for encoding in [`sprintf`](/reference/event-dependent-configuration.md#sprintf) format.
+
+
+
diff --git a/docs/reference/plugins-codecs-jdots.md b/docs/reference/plugins-codecs-jdots.md
new file mode 100644
index 000000000..04cd6f240
--- /dev/null
+++ b/docs/reference/plugins-codecs-jdots.md
@@ -0,0 +1,25 @@
+---
+navigation_title: "jdots"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-codecs-jdots.html
+---
+
+# Jdots codec plugin [plugins-codecs-jdots]
+
+
+**{{ls}} Core Plugin.** The jdots codec plugin cannot be installed or uninstalled independently of {{ls}}.
+
+## Getting help [_getting_help_185]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash).
+
+
+## Description [_description_184]
+
+This codec renders each processed event as a dot (`.`). It is typically used with the `java_stdout` output to provide approximate event throughput. It is especially useful when combined with `pv` and `wc -c` as follows:
+
+```bash
+ bin/logstash -f /path/to/config/with/jdots/codec | pv | wc -c
+```
+
+
diff --git a/docs/reference/plugins-codecs-json.md b/docs/reference/plugins-codecs-json.md
new file mode 100644
index 000000000..c9f02c024
--- /dev/null
+++ b/docs/reference/plugins-codecs-json.md
@@ -0,0 +1,91 @@
+---
+navigation_title: "json"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-codecs-json.html
+---
+
+# Json codec plugin [plugins-codecs-json]
+
+
+* Plugin version: v3.1.1
+* Released on: 2022-10-03
+* [Changelog](https://github.com/logstash-plugins/logstash-codec-json/blob/v3.1.1/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/codec-json-index.md).
+
+## Getting help [_getting_help_188]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-codec-json). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_187]
+
+This codec may be used to decode (via inputs) and encode (via outputs) full JSON messages. If the data being sent is a JSON array at its root multiple events will be created (one per element).
+
+If you are streaming JSON messages delimited by *\n* then see the `json_lines` codec.
+
+Encoding will result in a compact JSON representation (no line terminators or indentation)
+
+If this codec recieves a payload from an input that is not valid JSON, then it will fall back to plain text and add a tag `_jsonparsefailure`. Upon a JSON failure, the payload will be stored in the `message` field.
+
+
+## Json Codec Configuration Options [plugins-codecs-json-options]
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`charset`](#plugins-codecs-json-charset) | [string](/reference/configuration-file-structure.md#string), one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]` | No |
+| [`ecs_compatibility`](#plugins-codecs-json-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`target`](#plugins-codecs-json-target) | [string](/reference/configuration-file-structure.md#string) | No |
+
+
+
+### `charset` [plugins-codecs-json-charset]
+
+* Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale`
+* Default value is `"UTF-8"`
+
+The character encoding used in this codec. Examples include "UTF-8" and "CP1252".
+
+JSON requires valid UTF-8 strings, but in some cases, software that emits JSON does so in another encoding (nxlog, for example). In weird cases like this, you can set the `charset` setting to the actual encoding of the text and Logstash will convert it for you.
+
+For nxlog users, you may to set this to "CP1252".
+
+
+### `ecs_compatibility` [plugins-codecs-json-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: JSON document data added at root level
+ * `v1`,`v8`: Elastic Common Schema compliant behavior (warns when `target` isn’t set)
+
+* Default value depends on which version of Logstash is running:
+
+ * When Logstash provides a `pipeline.ecs_compatibility` setting, its value is used as the default
+ * Otherwise, the default value is `disabled`
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)).
+
+
+### `target` [plugins-codecs-json-target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Define the target field for placing the parsed data. If this setting is not set, the JSON data will be stored at the root (top level) of the event.
+
+For example, if you want data to be put under the `document` field:
+
+```ruby
+ input {
+ http {
+ codec => json {
+ target => "[document]"
+ }
+ }
+ }
+```
+
+
+
diff --git a/docs/reference/plugins-codecs-json_lines.md b/docs/reference/plugins-codecs-json_lines.md
new file mode 100644
index 000000000..35b579afe
--- /dev/null
+++ b/docs/reference/plugins-codecs-json_lines.md
@@ -0,0 +1,103 @@
+---
+navigation_title: "json_lines"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-codecs-json_lines.html
+---
+
+# Json_lines codec plugin [plugins-codecs-json_lines]
+
+
+* Plugin version: v3.2.2
+* Released on: 2024-09-06
+* [Changelog](https://github.com/logstash-plugins/logstash-codec-json_lines/blob/v3.2.2/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/codec-json_lines-index.md).
+
+## Getting help [_getting_help_189]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-codec-json_lines). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_188]
+
+This codec will decode streamed JSON that is newline delimited. Encoding will emit a single JSON string ending in a `@delimiter` NOTE: Do not use this codec if your source input is line-oriented JSON, for example, redis or file inputs. Rather, use the json codec. More info: This codec is expecting to receive a stream (string) of newline terminated lines. The file input will produce a line string without a newline. Therefore this codec cannot work with line oriented inputs.
+
+
+## Json_lines Codec Configuration Options [plugins-codecs-json_lines-options]
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`charset`](#plugins-codecs-json_lines-charset) | [string](/reference/configuration-file-structure.md#string), one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]` | No |
+| [`decode_size_limit_bytes`](#plugins-codecs-json_lines-decode_size_limit_bytes) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`delimiter`](#plugins-codecs-json_lines-delimiter) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ecs_compatibility`](#plugins-codecs-json_lines-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`target`](#plugins-codecs-json_lines-target) | [string](/reference/configuration-file-structure.md#string) | No |
+
+
+
+### `charset` [plugins-codecs-json_lines-charset]
+
+* Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale`
+* Default value is `"UTF-8"`
+
+The character encoding used in this codec. Examples include `UTF-8` and `CP1252`
+
+JSON requires valid `UTF-8` strings, but in some cases, software that emits JSON does so in another encoding (nxlog, for example). In weird cases like this, you can set the charset setting to the actual encoding of the text and logstash will convert it for you.
+
+For nxlog users, you’ll want to set this to `CP1252`
+
+
+### `decode_size_limit_bytes` [plugins-codecs-json_lines-decode_size_limit_bytes]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is 512 MB
+
+Maximum number of bytes for a single line before stop processing.
+
+
+### `delimiter` [plugins-codecs-json_lines-delimiter]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"\n"`
+
+Change the delimiter that separates lines
+
+
+### `ecs_compatibility` [plugins-codecs-json_lines-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: does not use ECS-compatible field names
+ * `v1`, `v8`: Elastic Common Schema compliant behavior (warns when `target` isn’t set)
+
+* Default value depends on which version of Logstash is running:
+
+ * When Logstash provides a `pipeline.ecs_compatibility` setting, its value is used as the default
+ * Otherwise, the default value is `disabled`
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)).
+
+
+### `target` [plugins-codecs-json_lines-target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Define the target field for placing the parsed data. If this setting is not set, the JSON data will be stored at the root (top level) of the event.
+
+For example, if you want data to be put under the `document` field:
+
+```ruby
+ input {
+ http {
+ codec => json_lines {
+ target => "[document]"
+ }
+ }
+ }
+```
+
+
+
diff --git a/docs/reference/plugins-codecs-line.md b/docs/reference/plugins-codecs-line.md
new file mode 100644
index 000000000..383c5ebd8
--- /dev/null
+++ b/docs/reference/plugins-codecs-line.md
@@ -0,0 +1,75 @@
+---
+navigation_title: "line"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-codecs-line.html
+---
+
+# Line codec plugin [plugins-codecs-line]
+
+
+* Plugin version: v3.1.1
+* Released on: 2021-07-15
+* [Changelog](https://github.com/logstash-plugins/logstash-codec-line/blob/v3.1.1/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/codec-line-index.md).
+
+## Getting help [_getting_help_190]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-codec-line). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_189]
+
+Reads line-oriented text data.
+
+Decoding behavior
+: Only whole line events are emitted.
+
+Encoding behavior
+: Each event is emitted with a trailing newline.
+
+
+## Compatibility with the Elastic Common Schema (ECS) [plugins-codecs-line-ecs]
+
+This plugin is compatible with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)). No additional configuration is required.
+
+
+## Line codec configuration options [plugins-codecs-line-options]
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`charset`](#plugins-codecs-line-charset) | [string](/reference/configuration-file-structure.md#string), one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]` | No |
+| [`delimiter`](#plugins-codecs-line-delimiter) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`format`](#plugins-codecs-line-format) | [string](/reference/configuration-file-structure.md#string) | No |
+
+
+
+### `charset` [plugins-codecs-line-charset]
+
+* Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale`
+* Default value is `"UTF-8"`
+
+The character encoding used in this input. Examples include `UTF-8` and `cp1252`
+
+This setting is useful if your log files are in `Latin-1` (aka `cp1252`) or in another character set other than `UTF-8`.
+
+This only affects "plain" format logs since json is `UTF-8` already.
+
+
+### `delimiter` [plugins-codecs-line-delimiter]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"\n"`
+
+Change the delimiter that separates lines
+
+
+### `format` [plugins-codecs-line-format]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Set the desired text format for encoding.
+
+
+
diff --git a/docs/reference/plugins-codecs-msgpack.md b/docs/reference/plugins-codecs-msgpack.md
new file mode 100644
index 000000000..98e7e772d
--- /dev/null
+++ b/docs/reference/plugins-codecs-msgpack.md
@@ -0,0 +1,62 @@
+---
+navigation_title: "msgpack"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-codecs-msgpack.html
+---
+
+# Msgpack codec plugin [plugins-codecs-msgpack]
+
+
+* Plugin version: v3.1.0
+* Released on: 2021-08-09
+* [Changelog](https://github.com/logstash-plugins/logstash-codec-msgpack/blob/v3.1.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/codec-msgpack-index.md).
+
+## Getting help [_getting_help_191]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-codec-msgpack). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_190]
+
+This codec reads and produces MessagePack encoded content.
+
+
+## Msgpack Codec configuration options [plugins-codecs-msgpack-options]
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`format`](#plugins-codecs-msgpack-format) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`target`](#plugins-codecs-msgpack-target) | [string](/reference/configuration-file-structure.md#string) | No |
+
+
+
+### `format` [plugins-codecs-msgpack-format]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+
+### `target` [plugins-codecs-msgpack-target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Define the target field for placing the decoded values. If this setting is not set, data will be stored at the root (top level) of the event.
+
+For example, if you want data to be put under the `document` field:
+
+```ruby
+ input {
+ tcp {
+ port => 4242
+ codec => msgpack {
+ target => "[document]"
+ }
+ }
+ }
+```
+
+
+
diff --git a/docs/reference/plugins-codecs-multiline.md b/docs/reference/plugins-codecs-multiline.md
new file mode 100644
index 000000000..5ed71c1c4
--- /dev/null
+++ b/docs/reference/plugins-codecs-multiline.md
@@ -0,0 +1,225 @@
+---
+navigation_title: "multiline"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-codecs-multiline.html
+---
+
+# Multiline codec plugin [plugins-codecs-multiline]
+
+
+* Plugin version: v3.1.2
+* Released on: 2024-04-25
+* [Changelog](https://github.com/logstash-plugins/logstash-codec-multiline/blob/v3.1.2/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/codec-multiline-index.md).
+
+## Getting help [_getting_help_192]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-codec-multiline). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_191]
+
+The multiline codec will collapse multiline messages and merge them into a single event.
+
+::::{important}
+If you are using a Logstash input plugin that supports multiple hosts, such as the [beats input plugin](/reference/plugins-inputs-beats.md), you should not use the multiline codec to handle multiline events. Doing so may result in the mixing of streams and corrupted event data. In this situation, you need to handle multiline events before sending the event data to Logstash.
+::::
+
+
+The original goal of this codec was to allow joining of multiline messages from files into a single event. For example, joining Java exception and stacktrace messages into a single event.
+
+The config looks like this:
+
+```ruby
+ input {
+ stdin {
+ codec => multiline {
+ pattern => "pattern, a regexp"
+ negate => "true" or "false"
+ what => "previous" or "next"
+ }
+ }
+ }
+```
+
+The `pattern` should match what you believe to be an indicator that the field is part of a multi-line event.
+
+The `what` must be `previous` or `next` and indicates the relation to the multi-line event.
+
+The `negate` can be `true` or `false` (defaults to `false`). If `true`, a message not matching the pattern will constitute a match of the multiline filter and the `what` will be applied. (vice-versa is also true)
+
+For example, Java stack traces are multiline and usually have the message starting at the far-left, with each subsequent line indented. Do this:
+
+```ruby
+ input {
+ stdin {
+ codec => multiline {
+ pattern => "^\s"
+ what => "previous"
+ }
+ }
+ }
+```
+
+This says that any line starting with whitespace belongs to the previous line.
+
+Another example is to merge lines not starting with a date up to the previous line..
+
+```ruby
+ input {
+ file {
+ path => "/var/log/someapp.log"
+ codec => multiline {
+ # Grok pattern names are valid! :)
+ pattern => "^%{TIMESTAMP_ISO8601} "
+ negate => true
+ what => "previous"
+ }
+ }
+ }
+```
+
+This says that any line not starting with a timestamp should be merged with the previous line.
+
+One more common example is C line continuations (backslash). Here’s how to do that:
+
+```ruby
+ input {
+ stdin {
+ codec => multiline {
+ pattern => "\\$"
+ what => "next"
+ }
+ }
+ }
+```
+
+This says that any line ending with a backslash should be combined with the following line.
+
+
+## Multiline codec configuration options [plugins-codecs-multiline-options]
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`auto_flush_interval`](#plugins-codecs-multiline-auto_flush_interval) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`charset`](#plugins-codecs-multiline-charset) | [string](/reference/configuration-file-structure.md#string), one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]` | No |
+| [`ecs_compatibility`](#plugins-codecs-multiline-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`max_bytes`](#plugins-codecs-multiline-max_bytes) | [bytes](/reference/configuration-file-structure.md#bytes) | No |
+| [`max_lines`](#plugins-codecs-multiline-max_lines) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`multiline_tag`](#plugins-codecs-multiline-multiline_tag) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`negate`](#plugins-codecs-multiline-negate) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`pattern`](#plugins-codecs-multiline-pattern) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`patterns_dir`](#plugins-codecs-multiline-patterns_dir) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`what`](#plugins-codecs-multiline-what) | [string](/reference/configuration-file-structure.md#string), one of `["previous", "next"]` | Yes |
+
+
+
+### `auto_flush_interval` [plugins-codecs-multiline-auto_flush_interval]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+The accumulation of multiple lines will be converted to an event when either a matching new line is seen or there has been no new data appended for this many seconds. No default. If unset, no auto_flush. Units: seconds
+
+
+### `charset` [plugins-codecs-multiline-charset]
+
+* Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale`
+* Default value is `"UTF-8"`
+
+The character encoding used in this input. Examples include `UTF-8` and `cp1252`
+
+This setting is useful if your log files are in `Latin-1` (aka `cp1252`) or in another character set other than `UTF-8`.
+
+This only affects "plain" format logs since JSON is `UTF-8` already.
+
+
+### `ecs_compatibility` [plugins-codecs-multiline-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: plugin only sets the `message` field
+ * `v1`,`v8`: Elastic Common Schema compliant behavior (`[event][original]` is also added)
+
+* Default value depends on which version of Logstash is running:
+
+ * When Logstash provides a `pipeline.ecs_compatibility` setting, its value is used as the default
+ * Otherwise, the default value is `disabled`
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)).
+
+
+### `max_bytes` [plugins-codecs-multiline-max_bytes]
+
+* Value type is [bytes](/reference/configuration-file-structure.md#bytes)
+* Default value is `"10 MiB"`
+
+The accumulation of events can make logstash exit with an out of memory error if event boundaries are not correctly defined. This settings make sure to flush multiline events after reaching a number of bytes, it is used in combination max_lines.
+
+
+### `max_lines` [plugins-codecs-multiline-max_lines]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `500`
+
+The accumulation of events can make logstash exit with an out of memory error if event boundaries are not correctly defined. This settings make sure to flush multiline events after reaching a number of lines, it is used in combination max_bytes.
+
+
+### `multiline_tag` [plugins-codecs-multiline-multiline_tag]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"multiline"`
+
+Tag multiline events with a given tag. This tag will only be added to events that actually have multiple lines in them.
+
+
+### `negate` [plugins-codecs-multiline-negate]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Negate the regexp pattern (*if not matched*).
+
+
+### `pattern` [plugins-codecs-multiline-pattern]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The regular expression to match.
+
+
+### `patterns_dir` [plugins-codecs-multiline-patterns_dir]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+Logstash ships by default with a bunch of patterns, so you don’t necessarily need to define this yourself unless you are adding additional patterns.
+
+Pattern files are plain text with format:
+
+```ruby
+ NAME PATTERN
+```
+
+For example:
+
+```ruby
+ NUMBER \d+
+```
+
+
+### `what` [plugins-codecs-multiline-what]
+
+* This is a required setting.
+* Value can be any of: `previous`, `next`
+* There is no default value for this setting.
+
+If the pattern matched, does event belong to the next or previous event?
+
+
+
diff --git a/docs/reference/plugins-codecs-netflow.md b/docs/reference/plugins-codecs-netflow.md
new file mode 100644
index 000000000..feb0b1995
--- /dev/null
+++ b/docs/reference/plugins-codecs-netflow.md
@@ -0,0 +1,207 @@
+---
+navigation_title: "netflow"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-codecs-netflow.html
+---
+
+# Netflow codec plugin [plugins-codecs-netflow]
+
+
+* Plugin version: v4.3.2
+* Released on: 2023-12-22
+* [Changelog](https://github.com/logstash-plugins/logstash-codec-netflow/blob/v4.3.2/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/codec-netflow-index.md).
+
+## Getting help [_getting_help_193]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-codec-netflow). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_192]
+
+The "netflow" codec is used for decoding Netflow v5/v9/v10 (IPFIX) flows.
+
+
+## Supported Netflow/IPFIX exporters [_supported_netflowipfix_exporters]
+
+This codec supports:
+
+* Netflow v5
+* Netflow v9
+* IPFIX
+
+The following Netflow/IPFIX exporters have been seen and tested with the most recent version of the Netflow Codec:
+
+| Netflow exporter | v5 | v9 | IPFIX | Remarks |
+| --- | --- | --- | --- | --- |
+| Barracuda Firewall | | | y | With support for Extended Uniflow |
+| Cisco ACI | | y | | |
+| Cisco ASA | | y | | |
+| Cisco ASR 1k | | | N | Fails because of duplicate fields |
+| Cisco ASR 9k | | y | | |
+| Cisco IOS 12.x | | y | | |
+| Cisco ISR w/ HSL | | N | | Fails because of duplicate fields, see: [https://github.com/logstash-plugins/logstash-codec-netflow/issues/93](https://github.com/logstash-plugins/logstash-codec-netflow/issues/93) |
+| Cisco WLC | | y | | |
+| Citrix Netscaler | | | y | Still some unknown fields, labeled netscalerUnknown |
+| fprobe | y | | | |
+| Fortigate FortiOS | | y | | |
+| Huawei Netstream | | y | | |
+| ipt_NETFLOW | y | y | y | |
+| IXIA packet broker | | | y | |
+| Juniper MX | y | | y | SW > 12.3R8. Fails to decode IPFIX from Junos 16.1 due to duplicate field names which we currently don’t support. |
+| Mikrotik | y | | y | [http://wiki.mikrotik.com/wiki/Manual:IP/Traffic_Flow](http://wiki.mikrotik.com/wiki/Manual:IP/Traffic_Flow) |
+| nProbe | y | y | y | L7 DPI fields now also supported |
+| Nokia BRAS | | | y | |
+| OpenBSD pflow | y | N | y | [http://man.openbsd.org/OpenBSD-current/man4/pflow.4](http://man.openbsd.org/OpenBSD-current/man4/pflow.4) |
+| Riverbed | | N | | Not supported due to field ID conflicts. Workaround available in the definitions directory over at Elastiflow [https://github.com/robcowart/elastiflow](https://github.com/robcowart/elastiflow) |
+| Sandvine Procera PacketLogic | | | y | v15.1 |
+| Softflowd | y | y | y | IPFIX supported in [https://github.com/djmdjm/softflowd](https://github.com/djmdjm/softflowd) |
+| Sophos UTM | | | y | |
+| Streamcore Streamgroomer | | y | | |
+| Palo Alto PAN-OS | | y | | |
+| Ubiquiti Edgerouter X | | y | | With MPLS labels |
+| VMware VDS | | | y | Still some unknown fields |
+| YAF | | | y | With silk and applabel, but no DPI plugin support |
+| vIPtela | | | y | |
+
+
+## Usage [_usage_7]
+
+Example Logstash configuration that will listen on 2055/udp for Netflow v5,v9 and IPFIX:
+
+```ruby
+input {
+ udp {
+ port => 2055
+ codec => netflow
+ }
+}
+```
+
+For high-performance production environments the configuration below will decode up to 15000 flows/sec from a Cisco ASR 9000 router on a dedicated 16 CPU instance. If your total flowrate exceeds 15000 flows/sec, you should use multiple Logstash instances.
+
+Note that for richer flows from a Cisco ASA firewall this number will be at least 3x lower.
+
+```ruby
+input {
+ udp {
+ port => 2055
+ codec => netflow
+ receive_buffer_bytes => 16777216
+ workers => 16
+ }
+```
+
+To mitigate dropped packets, make sure to increase the Linux kernel receive buffer limit:
+
+```
+# sysctl -w net.core.rmem_max=$((1024*1024*16))
+```
+
+## Netflow Codec Configuration Options [plugins-codecs-netflow-options]
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`cache_save_path`](#plugins-codecs-netflow-cache_save_path) | a valid filesystem path | No |
+| [`cache_ttl`](#plugins-codecs-netflow-cache_ttl) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`include_flowset_id`](#plugins-codecs-netflow-include_flowset_id) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`ipfix_definitions`](#plugins-codecs-netflow-ipfix_definitions) | a valid filesystem path | No |
+| [`netflow_definitions`](#plugins-codecs-netflow-netflow_definitions) | a valid filesystem path | No |
+| [`target`](#plugins-codecs-netflow-target) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`versions`](#plugins-codecs-netflow-versions) | [array](/reference/configuration-file-structure.md#array) | No |
+
+
+
+### `cache_save_path` [plugins-codecs-netflow-cache_save_path]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+Enables the template cache and saves it in the specified directory. This minimizes data loss after Logstash restarts because the codec doesn’t have to wait for the arrival of templates, but instead reload already received templates received during previous runs.
+
+Template caches are saved as:
+
+* [path](/reference/configuration-file-structure.md#path)/netflow_templates.cache for Netflow v9 templates.
+* [path](/reference/configuration-file-structure.md#path)/ipfix_templates.cache for IPFIX templates.
+
+
+### `cache_ttl` [plugins-codecs-netflow-cache_ttl]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `4000`
+
+Netflow v9/v10 template cache TTL (seconds)
+
+
+### `include_flowset_id` [plugins-codecs-netflow-include_flowset_id]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Only makes sense for ipfix, v9 already includes this Setting to true will include the flowset_id in events Allows you to work with sequences, for instance with the aggregate filter
+
+
+### `ipfix_definitions` [plugins-codecs-netflow-ipfix_definitions]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+Override YAML file containing IPFIX field definitions
+
+Very similar to the Netflow version except there is a top level Private Enterprise Number (PEN) key added:
+
+```yaml
+pen:
+id:
+- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string
+- :name
+id:
+- :skip
+```
+
+There is an implicit PEN 0 for the standard fields.
+
+See [https://github.com/logstash-plugins/logstash-codec-netflow/blob/master/lib/logstash/codecs/netflow/ipfix.yaml](https://github.com/logstash-plugins/logstash-codec-netflow/blob/master/lib/logstash/codecs/netflow/ipfix.yaml) for the base set.
+
+
+### `netflow_definitions` [plugins-codecs-netflow-netflow_definitions]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+Override YAML file containing Netflow field definitions
+
+Each Netflow field is defined like so:
+
+```yaml
+id:
+- default length in bytes
+- :name
+id:
+- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string
+- :name
+id:
+- :skip
+```
+
+See [https://github.com/logstash-plugins/logstash-codec-netflow/blob/master/lib/logstash/codecs/netflow/netflow.yaml](https://github.com/logstash-plugins/logstash-codec-netflow/blob/master/lib/logstash/codecs/netflow/netflow.yaml) for the base set.
+
+
+### `target` [plugins-codecs-netflow-target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"netflow"`
+
+Specify into what field you want the Netflow data.
+
+
+### `versions` [plugins-codecs-netflow-versions]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[5, 9, 10]`
+
+Specify which Netflow versions you will accept.
+
+
+
diff --git a/docs/reference/plugins-codecs-nmap.md b/docs/reference/plugins-codecs-nmap.md
new file mode 100644
index 000000000..5d3236750
--- /dev/null
+++ b/docs/reference/plugins-codecs-nmap.md
@@ -0,0 +1,80 @@
+---
+navigation_title: "nmap"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-codecs-nmap.html
+---
+
+# Nmap codec plugin [plugins-codecs-nmap]
+
+
+* Plugin version: v0.0.22
+* Released on: 2022-11-16
+* [Changelog](https://github.com/logstash-plugins/logstash-codec-nmap/blob/v0.0.22/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/codec-nmap-index.md).
+
+## Installation [_installation_70]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-codec-nmap`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_194]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-codec-nmap). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_193]
+
+This codec is used to parse [nmap](https://nmap.org/) output data which is serialized in XML format. Nmap ("Network Mapper") is a free and open source utility for network discovery and security auditing. For more information on nmap, see [https://nmap.org/](https://nmap.org/).
+
+This codec can only be used for decoding data.
+
+Event types are listed below
+
+`nmap_scan_metadata`: An object containing top level information about the scan, including how many hosts were up, and how many were down. Useful for the case where you need to check if a DNS based hostname does not resolve, where both those numbers will be zero. `nmap_host`: One event is created per host. The full data covering an individual host, including open ports and traceroute information as a nested structure. `nmap_port`: One event is created per host/port. This duplicates data already in `nmap_host`: This was put in for the case where you want to model ports as separate documents in Elasticsearch (which Kibana prefers). `nmap_traceroute_link`: One of these is output per traceroute *connection*, with a `from` and a `to` object describing each hop. Note that traceroute hop data is not always correct due to the fact that each tracing ICMP packet may take a different route. Also very useful for Kibana visualizations.
+
+
+## Nmap Codec Configuration Options [plugins-codecs-nmap-options]
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`emit_hosts`](#plugins-codecs-nmap-emit_hosts) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`emit_ports`](#plugins-codecs-nmap-emit_ports) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`emit_scan_metadata`](#plugins-codecs-nmap-emit_scan_metadata) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`emit_traceroute_links`](#plugins-codecs-nmap-emit_traceroute_links) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+
+
+
+### `emit_hosts` [plugins-codecs-nmap-emit_hosts]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Emit all host data as a nested document (including ports + traceroutes) with the type *nmap_fullscan*
+
+
+### `emit_ports` [plugins-codecs-nmap-emit_ports]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Emit each port as a separate document with type *nmap_port*
+
+
+### `emit_scan_metadata` [plugins-codecs-nmap-emit_scan_metadata]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Emit scan metadata
+
+
+### `emit_traceroute_links` [plugins-codecs-nmap-emit_traceroute_links]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Emit each hop_tuple of the traceroute with type *nmap_traceroute_link*
+
+
+
diff --git a/docs/reference/plugins-codecs-plain.md b/docs/reference/plugins-codecs-plain.md
new file mode 100644
index 000000000..a6b94c452
--- /dev/null
+++ b/docs/reference/plugins-codecs-plain.md
@@ -0,0 +1,77 @@
+---
+navigation_title: "plain"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-codecs-plain.html
+---
+
+# Plain codec plugin [plugins-codecs-plain]
+
+
+* Plugin version: v3.1.0
+* Released on: 2021-07-27
+* [Changelog](https://github.com/logstash-plugins/logstash-codec-plain/blob/v3.1.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/codec-plain-index.md).
+
+## Getting help [_getting_help_195]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-codec-plain). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_194]
+
+The "plain" codec is for plain text with no delimiting between events.
+
+This is mainly useful on inputs and outputs that already have a defined framing in their transport protocol (such as zeromq, rabbitmq, redis, etc).
+
+
+## Plain codec configuration options [plugins-codecs-plain-options]
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`charset`](#plugins-codecs-plain-charset) | [string](/reference/configuration-file-structure.md#string), one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]` | No |
+| [`ecs_compatibility`](#plugins-codecs-plain-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`format`](#plugins-codecs-plain-format) | [string](/reference/configuration-file-structure.md#string) | No |
+
+
+
+### `charset` [plugins-codecs-plain-charset]
+
+* Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale`
+* Default value is `"UTF-8"`
+
+The character encoding used in this input. Examples include `UTF-8` and `cp1252`
+
+This setting is useful if your log files are in `Latin-1` (aka `cp1252`) or in another character set other than `UTF-8`.
+
+This only affects "plain" format logs since json is `UTF-8` already.
+
+
+### `ecs_compatibility` [plugins-codecs-plain-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: plugin only sets the `message` field
+ * `v1`,`v8`: Elastic Common Schema compliant behavior (`[event][original]` is also added)
+
+* Default value depends on which version of Logstash is running:
+
+ * When Logstash provides a `pipeline.ecs_compatibility` setting, its value is used as the default
+ * Otherwise, the default value is `disabled`
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)).
+
+
+### `format` [plugins-codecs-plain-format]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Set the message you which to emit for each event. This supports `sprintf` strings.
+
+This setting only affects outputs (encoding of events).
+
+
+
diff --git a/docs/reference/plugins-codecs-protobuf.md b/docs/reference/plugins-codecs-protobuf.md
new file mode 100644
index 000000000..2e1ea3cff
--- /dev/null
+++ b/docs/reference/plugins-codecs-protobuf.md
@@ -0,0 +1,247 @@
+---
+navigation_title: "protobuf"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-codecs-protobuf.html
+---
+
+# Protobuf codec plugin [plugins-codecs-protobuf]
+
+
+* Plugin version: v1.3.0
+* Released on: 2023-09-20
+* [Changelog](https://github.com/logstash-plugins/logstash-codec-protobuf/blob/v1.3.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/codec-protobuf-index.md).
+
+## Installation [_installation_71]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-codec-protobuf`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_196]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-codec-protobuf). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_195]
+
+This codec converts protobuf encoded messages into logstash events and vice versa. It supports the protobuf versions 2 and 3.
+
+The plugin requires the protobuf definitions to be compiled to ruby files. For protobuf 2 use the [ruby-protoc compiler](https://github.com/codekitchen/ruby-protocol-buffers). For protobuf 3 use the [official google protobuf compiler](https://developers.google.com/protocol-buffers/docs/reference/ruby-generated).
+
+The following shows a usage example (protobuf v2) for decoding events from a kafka stream:
+
+```ruby
+kafka
+{
+ topic_id => "..."
+ key_deserializer_class => "org.apache.kafka.common.serialization.ByteArrayDeserializer"
+ value_deserializer_class => "org.apache.kafka.common.serialization.ByteArrayDeserializer"
+ codec => protobuf
+ {
+ class_name => "Animals::Mammals::Unicorn"
+ class_file => '/path/to/pb_definitions/some_folder/Unicorn.pb.rb'
+ protobuf_root_directory => "/path/to/pb_definitions/"
+ }
+}
+```
+
+Decoder usage example for protobuf v3:
+
+```ruby
+kafka
+{
+ topic_id => "..."
+ key_deserializer_class => "org.apache.kafka.common.serialization.ByteArrayDeserializer"
+ value_deserializer_class => "org.apache.kafka.common.serialization.ByteArrayDeserializer"
+ codec => protobuf
+ {
+ class_name => "Animals.Mammals.Unicorn"
+ class_file => '/path/to/pb_definitions/some_folder/Unicorn_pb.rb'
+ protobuf_root_directory => "/path/to/pb_definitions/"
+ protobuf_version => 3
+ }
+}
+```
+
+The codec can be used in input and output plugins. When using the codec in the kafka input plugin please set the deserializer classes as shown above. When using the codec in an output plugin:
+
+* make sure to include all the desired fields in the protobuf definition, including timestamp. Remove fields that are not part of the protobuf definition from the event by using the mutate filter. Encoding will fail if the event has fields which are not in the protobuf definition.
+* the `@` symbol is currently not supported in field names when loading the protobuf definitions for encoding. Make sure to call the timestamp field `timestamp` instead of `@timestamp` in the protobuf file. Logstash event fields will be stripped of the leading `@` before conversion.
+* fields with a nil value will automatically be removed from the event. Empty fields will not be removed.
+* it is recommended to set the config option `pb3_encoder_autoconvert_types` to true. Otherwise any type mismatch between your data and the protobuf definition will cause an event to be lost. The auto typeconversion does not alter your data. It just tries to convert obviously identical data into the expected datatype, such as converting integers to floats where floats are expected, or "true" / "false" strings into booleans where booleans are expected.
+* When writing to Kafka: set the serializer class: `value_serializer => "org.apache.kafka.common.serialization.ByteArraySerializer"`
+
+Encoder usage example (protobufg v3):
+
+```ruby
+kafka
+ {
+ codec => protobuf
+ {
+ class_name => "Animals.Mammals.Unicorn"
+ class_file => '/path/to/pb_definitions/some_folder/Unicorn_pb.rb'
+ protobuf_root_directory => "/path/to/pb_definitions/"
+ protobuf_version => 3
+ }
+ value_serializer => "org.apache.kafka.common.serialization.ByteArraySerializer"
+ }
+}
+```
+
+
+## Protobuf Codec Configuration Options [plugins-codecs-protobuf-options]
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`class_name`](#plugins-codecs-protobuf-class_name) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`class_file`](#plugins-codecs-protobuf-class_file) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`protobuf_root_directory`](#plugins-codecs-protobuf-protobuf_root_directory) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`include_path`](#plugins-codecs-protobuf-include_path) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`protobuf_version`](#plugins-codecs-protobuf-protobuf_version) | [number](/reference/configuration-file-structure.md#number) | Yes |
+| [`stop_on_error`](#plugins-codecs-protobuf-stop_on_error) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`pb3_encoder_autoconvert_types`](#plugins-codecs-protobuf-pb3_encoder_autoconvert_types) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+
+
+
+### `class_name` [plugins-codecs-protobuf-class_name]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Fully qualified name of the class to decode. Please note that the module delimiter is different depending on the protobuf version. For protobuf v2, use double colons:
+
+```ruby
+class_name => "Animals::Mammals::Unicorn"
+```
+
+For protobuf v3, use single dots:
+
+```ruby
+class_name => "Animals.Mammals.Unicorn"
+```
+
+For protobuf v3, you can copy the class name from the Descriptorpool registrations at the bottom of the generated protobuf ruby file. It contains lines like this:
+
+```ruby
+Animals.Mammals.Unicorn = Google::Protobuf::DescriptorPool.generated_pool.lookup("Animals.Mammals.Unicorn").msgclass
+```
+
+If your class references other definitions: you only have to add the name of the main class here.
+
+
+### `class_file` [plugins-codecs-protobuf-class_file]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Absolute path to the directory that contains all compiled protobuf files. If the protobuf definitions are spread across multiple folders, this needs to point to the folder containing all those folders.
+
+
+### `protobuf_root_directory` [plugins-codecs-protobuf-protobuf_root_directory]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Absolute path to the root directory that contains all referenced/used dependencies of the main class (`class_name`) or any of its dependencies. Must be used in combination with the `class_file` setting, and can not be used in combination with the legacy loading mechanism `include_path`.
+
+Example:
+
+```
+ pb3
+ ├── header
+ │ └── header_pb.rb
+ ├── messageA_pb.rb
+```
+
+In this case `messageA_pb.rb` has an embedded message from `header/header_pb.rb`. If `class_file` is set to `messageA_pb.rb`, and `class_name` to `MessageA`, `protobuf_root_directory` must be set to `/path/to/pb3`, which includes both definitions.
+
+
+### `include_path` [plugins-codecs-protobuf-include_path]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Legacy protobuf definition loading mechanism for backwards compatibility: List of absolute pathes to files with protobuf definitions. When using more than one file, make sure to arrange the files in reverse order of dependency so that each class is loaded before it is refered to by another.
+
+Example: a class *Unicorn* referencing another protobuf class *Wings*
+
+```ruby
+module Animal
+ module Mammal
+ class Unicorn
+ set_fully_qualified_name "Animal.Mammal.Unicorn"
+ optional ::Bodypart::Wings, :wings, 1
+ optional :string, :name, 2
+ ...
+```
+
+would be configured as
+
+```ruby
+include_path => ['/path/to/pb_definitions/wings.pb.rb','/path/to/pb_definitions/unicorn.pb.rb']
+```
+
+Please note that protobuf v2 files have the ending `.pb.rb` whereas files compiled for protobuf v3 end in `_pb.rb`.
+
+Cannot be used together with `protobuf_root_directory` or `class_file`.
+
+
+### `protobuf_version` [plugins-codecs-protobuf-protobuf_version]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is 2
+
+Protocol buffers version. Valid settings are 2, 3.
+
+
+### `stop_on_error` [plugins-codecs-protobuf-stop_on_error]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is false
+
+Stop entire pipeline when encountering a non decodable message.
+
+
+### `pb3_encoder_autoconvert_types` [plugins-codecs-protobuf-pb3_encoder_autoconvert_types]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is true
+
+Convert data types to match the protobuf definition (if possible). The protobuf encoder library is very strict with regards to data types. Example: an event has an integer field but the protobuf definition expects a float. This would lead to an exception and the event would be lost.
+
+This feature tries to convert the datatypes to the expectations of the protobuf definitions, without modifying the data whatsoever. Examples of conversions it might attempt:
+
+`"true"
+: string ⇒ true :: boolean`
+
+`17
+: int ⇒ 17.0 :: float`
+
+`12345
+: number ⇒ "12345" :: string`
+
+Available only for protobuf version 3.
+
+
+### `pb3_set_oneof_metainfo` [plugins-codecs-protobuf-pb3_set_oneof_metainfo]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is false
+
+Add meta information to `[@metadata][pb_oneof]` about which classes were chosen for [oneof](https://developers.google.com/protocol-buffers/docs/proto3#oneof) fields. A new field of name `[@metadata][pb_oneof][FOO]` will be added, where `FOO` is the name of the `oneof` field.
+
+Example values: for the protobuf definition
+
+```ruby
+ oneof :horse_type do
+ optional :unicorn, :message, 2, "UnicornType"
+ optional :pegasus, :message, 3, "PegasusType"
+ end
+```
+
+the field `[@metadata][pb_oneof][horse_type]` will be set to either `pegasus` or `unicorn`. Available only for protobuf version 3.
+
+
+
diff --git a/docs/reference/plugins-codecs-rubydebug.md b/docs/reference/plugins-codecs-rubydebug.md
new file mode 100644
index 000000000..10bbea0e7
--- /dev/null
+++ b/docs/reference/plugins-codecs-rubydebug.md
@@ -0,0 +1,42 @@
+---
+navigation_title: "rubydebug"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-codecs-rubydebug.html
+---
+
+# Rubydebug codec plugin [plugins-codecs-rubydebug]
+
+
+* Plugin version: v3.1.0
+* Released on: 2020-07-08
+* [Changelog](https://github.com/logstash-plugins/logstash-codec-rubydebug/blob/v3.1.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/codec-rubydebug-index.md).
+
+## Getting help [_getting_help_197]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-codec-rubydebug). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_196]
+
+The rubydebug codec will output your Logstash event data using the Ruby Amazing Print library.
+
+
+## Rubydebug Codec Configuration Options [plugins-codecs-rubydebug-options]
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`metadata`](#plugins-codecs-rubydebug-metadata) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+
+
+
+### `metadata` [plugins-codecs-rubydebug-metadata]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Should the event’s metadata be included?
+
+
+
diff --git a/docs/reference/plugins-filters-age.md b/docs/reference/plugins-filters-age.md
new file mode 100644
index 000000000..ab41c1923
--- /dev/null
+++ b/docs/reference/plugins-filters-age.md
@@ -0,0 +1,231 @@
+---
+navigation_title: "age"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-age.html
+---
+
+# Age filter plugin [plugins-filters-age]
+
+
+* Plugin version: v1.0.3
+* Released on: 2021-10-29
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-age/blob/v1.0.3/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-age-index.md).
+
+## Installation [_installation_54]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-filter-age`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_123]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-age). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_123]
+
+A simple filter for calculating the age of an event.
+
+This filter calculates the age of an event by subtracting the event timestamp from the current timestamp. You can use this plugin with the [`drop` filter plugin](/reference/plugins-filters-drop.md) to drop Logstash events that are older than some threshold.
+
+```ruby
+filter {
+ age {}
+ if [@metadata][age] > 86400 {
+ drop {}
+ }
+}
+```
+
+
+## Age Filter Configuration Options [plugins-filters-age-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-age-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`target`](#plugins-filters-age-target) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-filters-age-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `target` [plugins-filters-age-target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"[@metadata][age]"`
+
+Define the target field for the event age, in seconds.
+
+
+
+## Common options [plugins-filters-age-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-age-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-age-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-age-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-age-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-age-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-age-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-age-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-age-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ age {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ age {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-age-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ age {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ age {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-age-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-age-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 age filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ age {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-age-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-age-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ age {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ age {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-age-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ age {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ age {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-aggregate.md b/docs/reference/plugins-filters-aggregate.md
new file mode 100644
index 000000000..ba7ea8868
--- /dev/null
+++ b/docs/reference/plugins-filters-aggregate.md
@@ -0,0 +1,757 @@
+---
+navigation_title: "aggregate"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-aggregate.html
+---
+
+# Aggregate filter plugin [plugins-filters-aggregate]
+
+
+* Plugin version: v2.10.0
+* Released on: 2021-10-11
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-aggregate/blob/v2.10.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-aggregate-index.md).
+
+## Getting help [_getting_help_124]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-aggregate). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [plugins-filters-aggregate-description]
+
+The aim of this filter is to aggregate information available among several events (typically log lines) belonging to a same task, and finally push aggregated information into final task event.
+
+You should be very careful to set Logstash filter workers to 1 (`-w 1` flag) for this filter to work correctly otherwise events may be processed out of sequence and unexpected results will occur.
+
+
+## Example #1 [plugins-filters-aggregate-example1]
+
+* with these given logs :
+
+```ruby
+ INFO - 12345 - TASK_START - start
+ INFO - 12345 - SQL - sqlQuery1 - 12
+ INFO - 12345 - SQL - sqlQuery2 - 34
+ INFO - 12345 - TASK_END - end
+```
+
+* you can aggregate "sql duration" for the whole task with this configuration :
+
+```ruby
+ filter {
+ grok {
+ match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:taskid} - %{NOTSPACE:logger} - %{WORD:label}( - %{INT:duration:int})?" ]
+ }
+
+ if [logger] == "TASK_START" {
+ aggregate {
+ task_id => "%{taskid}"
+ code => "map['sql_duration'] = 0"
+ map_action => "create"
+ }
+ }
+
+ if [logger] == "SQL" {
+ aggregate {
+ task_id => "%{taskid}"
+ code => "map['sql_duration'] += event.get('duration')"
+ map_action => "update"
+ }
+ }
+
+ if [logger] == "TASK_END" {
+ aggregate {
+ task_id => "%{taskid}"
+ code => "event.set('sql_duration', map['sql_duration'])"
+ map_action => "update"
+ end_of_task => true
+ timeout => 120
+ }
+ }
+ }
+```
+
+* the final event then looks like :
+
+```ruby
+{
+ "message" => "INFO - 12345 - TASK_END - end message",
+ "sql_duration" => 46
+}
+```
+
+the field `sql_duration` is added and contains the sum of all sql queries durations.
+
+
+## Example #2 : no start event [plugins-filters-aggregate-example2]
+
+* If you have the same logs than example #1, but without a start log :
+
+```ruby
+ INFO - 12345 - SQL - sqlQuery1 - 12
+ INFO - 12345 - SQL - sqlQuery2 - 34
+ INFO - 12345 - TASK_END - end
+```
+
+* you can also aggregate "sql duration" with a slightly different configuration :
+
+```ruby
+ filter {
+ grok {
+ match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:taskid} - %{NOTSPACE:logger} - %{WORD:label}( - %{INT:duration:int})?" ]
+ }
+
+ if [logger] == "SQL" {
+ aggregate {
+ task_id => "%{taskid}"
+ code => "map['sql_duration'] ||= 0 ; map['sql_duration'] += event.get('duration')"
+ }
+ }
+
+ if [logger] == "TASK_END" {
+ aggregate {
+ task_id => "%{taskid}"
+ code => "event.set('sql_duration', map['sql_duration'])"
+ end_of_task => true
+ timeout => 120
+ }
+ }
+ }
+```
+
+* the final event is exactly the same than example #1
+* the key point is the "||=" ruby operator. It allows to initialize *sql_duration* map entry to 0 only if this map entry is not already initialized
+
+
+## Example #3 : no end event [plugins-filters-aggregate-example3]
+
+Third use case: You have no specific end event.
+
+A typical case is aggregating or tracking user behaviour. We can track a user by its ID through the events, however once the user stops interacting, the events stop coming in. There is no specific event indicating the end of the user’s interaction.
+
+In this case, we can enable the option *push_map_as_event_on_timeout* to enable pushing the aggregation map as a new event when a timeout occurs. In addition, we can enable *timeout_code* to execute code on the populated timeout event. We can also add *timeout_task_id_field* so we can correlate the task_id, which in this case would be the user’s ID.
+
+* Given these logs:
+
+```ruby
+INFO - 12345 - Clicked One
+INFO - 12345 - Clicked Two
+INFO - 12345 - Clicked Three
+```
+
+* You can aggregate the amount of clicks the user did like this:
+
+```ruby
+filter {
+ grok {
+ match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:user_id} - %{GREEDYDATA:msg_text}" ]
+ }
+
+ aggregate {
+ task_id => "%{user_id}"
+ code => "map['clicks'] ||= 0; map['clicks'] += 1;"
+ push_map_as_event_on_timeout => true
+ timeout_task_id_field => "user_id"
+ timeout => 600 # 10 minutes timeout
+ timeout_tags => ['_aggregatetimeout']
+ timeout_code => "event.set('several_clicks', event.get('clicks') > 1)"
+ }
+}
+```
+
+* After ten minutes, this will yield an event like:
+
+```json
+{
+ "user_id": "12345",
+ "clicks": 3,
+ "several_clicks": true,
+ "tags": [
+ "_aggregatetimeout"
+ ]
+}
+```
+
+
+## Example #4 : no end event and tasks come one after the other [plugins-filters-aggregate-example4]
+
+Fourth use case : like example #3, you have no specific end event, but also, tasks come one after the other.
+
+That is to say : tasks are not interlaced. All task1 events come, then all task2 events come, …
+
+In that case, you don’t want to wait task timeout to flush aggregation map.
+
+* A typical case is aggregating results from jdbc input plugin.
+* Given that you have this SQL query : `SELECT country_name, town_name FROM town`
+* Using jdbc input plugin, you get these 3 events from :
+
+```json
+ { "country_name": "France", "town_name": "Paris" }
+ { "country_name": "France", "town_name": "Marseille" }
+ { "country_name": "USA", "town_name": "New-York" }
+```
+
+* And you would like these 2 result events to push them into elasticsearch :
+
+```json
+ { "country_name": "France", "towns": [ {"town_name": "Paris"}, {"town_name": "Marseille"} ] }
+ { "country_name": "USA", "towns": [ {"town_name": "New-York"} ] }
+```
+
+* You can do that using `push_previous_map_as_event` aggregate plugin option :
+
+```ruby
+ filter {
+ aggregate {
+ task_id => "%{country_name}"
+ code => "
+ map['country_name'] ||= event.get('country_name')
+ map['towns'] ||= []
+ map['towns'] << {'town_name' => event.get('town_name')}
+ event.cancel()
+ "
+ push_previous_map_as_event => true
+ timeout => 3
+ }
+ }
+```
+
+* The key point is that each time aggregate plugin detects a new `country_name`, it pushes previous aggregate map as a new Logstash event, and then creates a new empty map for the next country
+* When 3s timeout comes, the last aggregate map is pushed as a new event
+* Initial events (which are not aggregated) are dropped because useless (thanks to `event.cancel()`)
+* Last point: if a field is not fulfilled for every event (say "town_postcode" field), the `||=` operator will let you to push into aggregate map, the first "not null" value. Example: `map['town_postcode'] ||= event.get('town_postcode')`
+
+
+## Example #5 : no end event and push events as soon as possible [plugins-filters-aggregate-example5]
+
+Fifth use case: like example #3, there is no end event.
+
+Events keep coming for an indefinite time and you want to push the aggregation map as soon as possible after the last user interaction without waiting for the `timeout`.
+
+This allows to have the aggregated events pushed closer to real time.
+
+A typical case is aggregating or tracking user behaviour.
+
+We can track a user by its ID through the events, however once the user stops interacting, the events stop coming in.
+
+There is no specific event indicating the end of the user’s interaction.
+
+The user interaction will be considered as ended when no events for the specified user (task_id) arrive after the specified inactivity_timeout`.
+
+If the user continues interacting for longer than `timeout` seconds (since first event), the aggregation map will still be deleted and pushed as a new event when timeout occurs.
+
+The difference with example #3 is that the events will be pushed as soon as the user stops interacting for `inactivity_timeout` seconds instead of waiting for the end of `timeout` seconds since first event.
+
+In this case, we can enable the option *push_map_as_event_on_timeout* to enable pushing the aggregation map as a new event when inactivity timeout occurs.
+
+In addition, we can enable *timeout_code* to execute code on the populated timeout event.
+
+We can also add *timeout_task_id_field* so we can correlate the task_id, which in this case would be the user’s ID.
+
+* Given these logs:
+
+```ruby
+INFO - 12345 - Clicked One
+INFO - 12345 - Clicked Two
+INFO - 12345 - Clicked Three
+```
+
+* You can aggregate the amount of clicks the user did like this:
+
+```ruby
+filter {
+ grok {
+ match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:user_id} - %{GREEDYDATA:msg_text}" ]
+ }
+ aggregate {
+ task_id => "%{user_id}"
+ code => "map['clicks'] ||= 0; map['clicks'] += 1;"
+ push_map_as_event_on_timeout => true
+ timeout_task_id_field => "user_id"
+ timeout => 3600 # 1 hour timeout, user activity will be considered finished one hour after the first event, even if events keep coming
+ inactivity_timeout => 300 # 5 minutes timeout, user activity will be considered finished if no new events arrive 5 minutes after the last event
+ timeout_tags => ['_aggregatetimeout']
+ timeout_code => "event.set('several_clicks', event.get('clicks') > 1)"
+ }
+}
+```
+
+* After five minutes of inactivity or one hour since first event, this will yield an event like:
+
+```json
+{
+ "user_id": "12345",
+ "clicks": 3,
+ "several_clicks": true,
+ "tags": [
+ "_aggregatetimeout"
+ ]
+}
+```
+
+
+## How it works [plugins-filters-aggregate-howitworks]
+
+* the filter needs a "task_id" to correlate events (log lines) of a same task
+* at the task beginning, filter creates a map, attached to task_id
+* for each event, you can execute code using *event* and *map* (for instance, copy an event field to map)
+* in the final event, you can execute a last code (for instance, add map data to final event)
+* after the final event, the map attached to task is deleted (thanks to `end_of_task => true`)
+* an aggregate map is tied to one task_id value which is tied to one task_id pattern. So if you have 2 filters with different task_id patterns, even if you have same task_id value, they won’t share the same aggregate map.
+* in one filter configuration, it is recommended to define a timeout option to protect the feature against unterminated tasks. It tells the filter to delete expired maps
+* if no timeout is defined, by default, all maps older than 1800 seconds are automatically deleted
+* all timeout options have to be defined in only one aggregate filter per task_id pattern (per pipeline). Timeout options are : timeout, inactivity_timeout, timeout_code, push_map_as_event_on_timeout, push_previous_map_as_event, timeout_timestamp_field, timeout_task_id_field, timeout_tags
+* if `code` execution raises an exception, the error is logged and event is tagged *_aggregateexception*
+
+
+## Use Cases [plugins-filters-aggregate-usecases]
+
+* extract some cool metrics from task logs and push them into task final log event (like in example #1 and #2)
+* extract error information in any task log line, and push it in final task event (to get a final event with all error information if any)
+* extract all back-end calls as a list, and push this list in final task event (to get a task profile)
+* extract all http headers logged in several lines to push this list in final task event (complete http request info)
+* for every back-end call, collect call details available on several lines, analyse it and finally tag final back-end call log line (error, timeout, business-warning, …)
+* Finally, task id can be any correlation id matching your need : it can be a session id, a file path, …
+
+
+## Aggregate Filter Configuration Options [plugins-filters-aggregate-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-aggregate-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`aggregate_maps_path`](#plugins-filters-aggregate-aggregate_maps_path) | [string](/reference/configuration-file-structure.md#string), a valid filesystem path | No |
+| [`code`](#plugins-filters-aggregate-code) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`end_of_task`](#plugins-filters-aggregate-end_of_task) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`inactivity_timeout`](#plugins-filters-aggregate-inactivity_timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`map_action`](#plugins-filters-aggregate-map_action) | [string](/reference/configuration-file-structure.md#string), one of `["create", "update", "create_or_update"]` | No |
+| [`push_map_as_event_on_timeout`](#plugins-filters-aggregate-push_map_as_event_on_timeout) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`push_previous_map_as_event`](#plugins-filters-aggregate-push_previous_map_as_event) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`task_id`](#plugins-filters-aggregate-task_id) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`timeout`](#plugins-filters-aggregate-timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`timeout_code`](#plugins-filters-aggregate-timeout_code) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`timeout_tags`](#plugins-filters-aggregate-timeout_tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`timeout_task_id_field`](#plugins-filters-aggregate-timeout_task_id_field) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`timeout_timestamp_field`](#plugins-filters-aggregate-timeout_timestamp_field) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-filters-aggregate-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `aggregate_maps_path` [plugins-filters-aggregate-aggregate_maps_path]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The path to file where aggregate maps are stored when Logstash stops and are loaded from when Logstash starts.
+
+If not defined, aggregate maps will not be stored at Logstash stop and will be lost. Must be defined in only one aggregate filter per pipeline (as aggregate maps are shared at pipeline level).
+
+Example:
+
+```ruby
+ filter {
+ aggregate {
+ aggregate_maps_path => "/path/to/.aggregate_maps"
+ }
+ }
+```
+
+
+### `code` [plugins-filters-aggregate-code]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The code to execute to update aggregated map, using current event.
+
+Or on the contrary, the code to execute to update event, using aggregated map.
+
+Available variables are:
+
+`event`: current Logstash event
+
+`map`: aggregated map associated to `task_id`, containing key/value pairs. Data structure is a ruby [Hash](http://ruby-doc.org/core-1.9.1/Hash.html)
+
+`map_meta`: meta informations associated to aggregate map. It allows to set a custom `timeout` or `inactivity_timeout`. It allows also to get `creation_timestamp`, `lastevent_timestamp` and `task_id`.
+
+`new_event_block`: block used to emit new Logstash events. See the second example on how to use it.
+
+When option push_map_as_event_on_timeout=true, if you set `map_meta.timeout=0` in `code` block, then aggregated map is immediately pushed as a new event.
+
+Example:
+
+```ruby
+ filter {
+ aggregate {
+ code => "map['sql_duration'] += event.get('duration')"
+ }
+ }
+```
+
+To create additional events during the code execution, to be emitted immediately, you can use `new_event_block.call(event)` function, like in the following example:
+
+```ruby
+ filter {
+ aggregate {
+ code => "
+ data = {:my_sql_duration => map['sql_duration']}
+ generated_event = LogStash::Event.new(data)
+ generated_event.set('my_other_field', 34)
+ new_event_block.call(generated_event)
+ "
+ }
+ }
+```
+
+The parameter of the function `new_event_block.call` must be of type `LogStash::Event`. To create such an object, the constructor of the same class can be used: `LogStash::Event.new()`. `LogStash::Event.new()` can receive a parameter of type ruby [Hash](http://ruby-doc.org/core-1.9.1/Hash.html) to initialize the new event fields.
+
+
+### `end_of_task` [plugins-filters-aggregate-end_of_task]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Tell the filter that task is ended, and therefore, to delete aggregate map after code execution.
+
+
+### `inactivity_timeout` [plugins-filters-aggregate-inactivity_timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+The amount of seconds (since the last event) after which a task is considered as expired.
+
+When timeout occurs for a task, its aggregate map is evicted.
+
+If *push_map_as_event_on_timeout* or *push_previous_map_as_event* is set to true, the task aggregation map is pushed as a new Logstash event.
+
+`inactivity_timeout` can be defined for each "task_id" pattern.
+
+`inactivity_timeout` must be lower than `timeout`.
+
+
+### `map_action` [plugins-filters-aggregate-map_action]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"create_or_update"`
+
+Tell the filter what to do with aggregate map.
+
+`"create"`: create the map, and execute the code only if map wasn’t created before
+
+`"update"`: doesn’t create the map, and execute the code only if map was created before
+
+`"create_or_update"`: create the map if it wasn’t created before, execute the code in all cases
+
+
+### `push_map_as_event_on_timeout` [plugins-filters-aggregate-push_map_as_event_on_timeout]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+When this option is enabled, each time a task timeout is detected, it pushes task aggregation map as a new Logstash event. This enables to detect and process task timeouts in Logstash, but also to manage tasks that have no explicit end event.
+
+
+### `push_previous_map_as_event` [plugins-filters-aggregate-push_previous_map_as_event]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+When this option is enabled, each time aggregate plugin detects a new task id, it pushes previous aggregate map as a new Logstash event, and then creates a new empty map for the next task.
+
+::::{warning}
+this option works fine only if tasks come one after the other. It means : all task1 events, then all task2 events, etc…
+::::
+
+
+
+### `task_id` [plugins-filters-aggregate-task_id]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The expression defining task ID to correlate logs.
+
+This value must uniquely identify the task.
+
+Example:
+
+```ruby
+ filter {
+ aggregate {
+ task_id => "%{type}%{my_task_id}"
+ }
+ }
+```
+
+
+### `timeout` [plugins-filters-aggregate-timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1800`
+
+The amount of seconds (since the first event) after which a task is considered as expired.
+
+When timeout occurs for a task, its aggregate map is evicted.
+
+If *push_map_as_event_on_timeout* or *push_previous_map_as_event* is set to true, the task aggregation map is pushed as a new Logstash event.
+
+Timeout can be defined for each "task_id" pattern.
+
+
+### `timeout_code` [plugins-filters-aggregate-timeout_code]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The code to execute to complete timeout generated event, when `'push_map_as_event_on_timeout'` or `'push_previous_map_as_event'` is set to true. The code block will have access to the newly generated timeout event that is pre-populated with the aggregation map.
+
+If `'timeout_task_id_field'` is set, the event is also populated with the task_id value
+
+Example:
+
+```ruby
+ filter {
+ aggregate {
+ timeout_code => "event.set('state', 'timeout')"
+ }
+ }
+```
+
+
+### `timeout_tags` [plugins-filters-aggregate-timeout_tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+Defines tags to add when a timeout event is generated and yield
+
+Example:
+
+```ruby
+ filter {
+ aggregate {
+ timeout_tags => ["aggregate_timeout"]
+ }
+ }
+```
+
+
+### `timeout_task_id_field` [plugins-filters-aggregate-timeout_task_id_field]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+This option indicates the timeout generated event’s field where the current "task_id" value will be set. This can help to correlate which tasks have been timed out.
+
+By default, if this option is not set, task id value won’t be set into timeout generated event.
+
+Example:
+
+```ruby
+ filter {
+ aggregate {
+ timeout_task_id_field => "task_id"
+ }
+ }
+```
+
+
+### `timeout_timestamp_field` [plugins-filters-aggregate-timeout_timestamp_field]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+By default, timeout is computed using system time, where Logstash is running.
+
+When this option is set, timeout is computed using event timestamp field indicated in this option. It means that when a first event arrives on aggregate filter and induces a map creation, map creation time will be equal to this event timestamp. Then, each time a new event arrives on aggregate filter, event timestamp is compared to map creation time to check if timeout happened.
+
+This option is particularly useful when processing old logs with option `push_map_as_event_on_timeout => true`. It lets to generate aggregated events based on timeout on old logs, where system time is inappropriate.
+
+Warning : so that this option works fine, it must be set on first aggregate filter.
+
+Example:
+
+```ruby
+ filter {
+ aggregate {
+ timeout_timestamp_field => "@timestamp"
+ }
+ }
+```
+
+
+
+## Common options [plugins-filters-aggregate-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-aggregate-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-aggregate-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-aggregate-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-aggregate-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-aggregate-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-aggregate-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-aggregate-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-aggregate-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ aggregate {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ aggregate {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-aggregate-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ aggregate {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ aggregate {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-aggregate-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-aggregate-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 aggregate filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ aggregate {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-aggregate-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-aggregate-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ aggregate {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ aggregate {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-aggregate-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ aggregate {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ aggregate {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-alter.md b/docs/reference/plugins-filters-alter.md
new file mode 100644
index 000000000..b96f8a20f
--- /dev/null
+++ b/docs/reference/plugins-filters-alter.md
@@ -0,0 +1,283 @@
+---
+navigation_title: "alter"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-alter.html
+---
+
+# Alter filter plugin [plugins-filters-alter]
+
+
+* Plugin version: v3.0.3
+* Released on: 2017-11-07
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-alter/blob/v3.0.3/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-alter-index.md).
+
+## Installation [_installation_55]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-filter-alter`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_125]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-alter). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_124]
+
+The alter filter allows you to do general alterations to fields that are not included in the normal mutate filter.
+
+::::{note}
+The functionality provided by this plugin is likely to be merged into the *mutate* filter in future versions.
+::::
+
+
+
+## Alter Filter Configuration Options [plugins-filters-alter-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-alter-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`coalesce`](#plugins-filters-alter-coalesce) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`condrewrite`](#plugins-filters-alter-condrewrite) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`condrewriteother`](#plugins-filters-alter-condrewriteother) | [array](/reference/configuration-file-structure.md#array) | No |
+
+Also see [Common options](#plugins-filters-alter-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `coalesce` [plugins-filters-alter-coalesce]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Sets the value of field_name to the first nonnull expression among its arguments.
+
+Example:
+
+```ruby
+ filter {
+ alter {
+ coalesce => [
+ "field_name", "value1", "value2", "value3", ...
+ ]
+ }
+ }
+```
+
+
+### `condrewrite` [plugins-filters-alter-condrewrite]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Change the content of the field to the specified value if the actual content is equal to the expected one.
+
+Example:
+
+```ruby
+ filter {
+ alter {
+ condrewrite => [
+ "field_name", "expected_value", "new_value",
+ "field_name2", "expected_value2", "new_value2",
+ ....
+ ]
+ }
+ }
+```
+
+
+### `condrewriteother` [plugins-filters-alter-condrewriteother]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Change the content of the field to the specified value if the content of another field is equal to the expected one.
+
+Example:
+
+```ruby
+ filter {
+ alter {
+ condrewriteother => [
+ "field_name", "expected_value", "field_name_to_change", "value",
+ "field_name2", "expected_value2", "field_name_to_change2", "value2",
+ ....
+ ]
+ }
+ }
+```
+
+
+
+## Common options [plugins-filters-alter-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-alter-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-alter-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-alter-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-alter-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-alter-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-alter-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-alter-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-alter-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ alter {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ alter {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-alter-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ alter {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ alter {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-alter-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-alter-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 alter filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ alter {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-alter-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-alter-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ alter {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ alter {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-alter-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ alter {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ alter {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-bytes.md b/docs/reference/plugins-filters-bytes.md
new file mode 100644
index 000000000..6106e3dba
--- /dev/null
+++ b/docs/reference/plugins-filters-bytes.md
@@ -0,0 +1,284 @@
+---
+navigation_title: "bytes"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-bytes.html
+---
+
+# Bytes filter plugin [plugins-filters-bytes]
+
+
+* Plugin version: v1.0.3
+* Released on: 2020-08-18
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-bytes/blob/v1.0.3/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-bytes-index.md).
+
+## Installation [_installation_56]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-filter-bytes`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_126]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-bytes). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_125]
+
+Parse string representations of computer storage sizes, such as "123 MB" or "5.6gb", into their numeric value in bytes.
+
+This plugin understands:
+
+* bytes ("B")
+* kilobytes ("KB" or "kB")
+* megabytes ("MB", "mb", or "mB")
+* gigabytes ("GB", "gb", or "gB")
+* terabytes ("TB", "tb", or "tB")
+* petabytes ("PB", "pb", or "pB")
+
+
+## Examples [plugins-filters-bytes-examples]
+
+| Input string | Conversion method | Numeric value in bytes |
+| --- | --- | --- |
+| 40 | `binary` or `metric` | 40 |
+| 40B | `binary` or `metric` | 40 |
+| 40 B | `binary` or `metric` | 40 |
+| 40KB | `binary` | 40960 |
+| 40kB | `binary` | 40960 |
+| 40KB | `metric` | 40000 |
+| 40.5KB | `binary` | 41472 |
+| 40kb | `binary` | 5120 |
+| 40Kb | `binary` | 5120 |
+| 10 MB | `binary` | 10485760 |
+| 10 mB | `binary` | 10485760 |
+| 10 mb | `binary` | 10485760 |
+| 10 Mb | `binary` | 1310720 |
+
+```ruby
+ filter {
+ bytes {
+ source => "my_bytes_string_field"
+ target => "my_bytes_numeric_field"
+ }
+ }
+```
+
+
+## Bytes Filter Configuration Options [plugins-filters-bytes-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-bytes-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`source`](#plugins-filters-bytes-source) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`target`](#plugins-filters-bytes-target) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`conversion_method`](#plugins-filters-bytes-conversion_method) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`source`](#plugins-filters-bytes-decimal_separator) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-filters-bytes-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `source` [plugins-filters-bytes-source]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `message`
+
+Name of the source field that contains the storage size
+
+
+### `target` [plugins-filters-bytes-target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+
+Name of the target field that will contain the storage size in bytes
+
+
+### `conversion_method` [plugins-filters-bytes-conversion_method]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Value can be any of: `binary`, `metric`
+* Default value is `binary`
+
+Which conversion method to use when converting to bytes. `binary` uses `1K = 1024B`. `metric` uses `1K = 1000B`.
+
+
+### `source` [plugins-filters-bytes-decimal_separator]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `.`
+
+Separator, if any, used as the decimal. This value is only used if the plugin cannot guess the decimal separator by looking at the string in the `source` field.
+
+
+
+## Common options [plugins-filters-bytes-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-bytes-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-bytes-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-bytes-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-bytes-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-bytes-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-bytes-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-bytes-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-bytes-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ bytes {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ bytes {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-bytes-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ bytes {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ bytes {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-bytes-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-bytes-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 bytes filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ bytes {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-bytes-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-bytes-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ bytes {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ bytes {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-bytes-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ bytes {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ bytes {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-cidr.md b/docs/reference/plugins-filters-cidr.md
new file mode 100644
index 000000000..79628c67c
--- /dev/null
+++ b/docs/reference/plugins-filters-cidr.md
@@ -0,0 +1,277 @@
+---
+navigation_title: "cidr"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-cidr.html
+---
+
+# Cidr filter plugin [plugins-filters-cidr]
+
+
+* Plugin version: v3.1.3
+* Released on: 2019-09-18
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-cidr/blob/v3.1.3/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-cidr-index.md).
+
+## Getting help [_getting_help_127]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-cidr). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_126]
+
+The CIDR filter is for checking IP addresses in events against a list of network blocks that might contain it. Multiple addresses can be checked against multiple networks, any match succeeds. Upon success additional tags and/or fields can be added to the event.
+
+
+## Cidr Filter Configuration Options [plugins-filters-cidr-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-cidr-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`address`](#plugins-filters-cidr-address) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`network`](#plugins-filters-cidr-network) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`network_path`](#plugins-filters-cidr-network_path) | a valid filesystem path | No |
+| [`refresh_interval`](#plugins-filters-cidr-refresh_interval) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`separator`](#plugins-filters-cidr-separator) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-filters-cidr-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `address` [plugins-filters-cidr-address]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+The IP address(es) to check with. Example:
+
+```ruby
+ filter {
+ cidr {
+ add_tag => [ "testnet" ]
+ address => [ "%{src_ip}", "%{dst_ip}" ]
+ network => [ "192.0.2.0/24" ]
+ }
+ }
+```
+
+
+### `network` [plugins-filters-cidr-network]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+The IP network(s) to check against. Example:
+
+```ruby
+ filter {
+ cidr {
+ add_tag => [ "linklocal" ]
+ address => [ "%{clientip}" ]
+ network => [ "169.254.0.0/16", "fe80::/64" ]
+ }
+ }
+```
+
+
+### `network_path` [plugins-filters-cidr-network_path]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+The full path of the external file containing the networks the filter should check with. Networks are separated by a separator character defined in `separator`.
+
+```ruby
+ 192.168.1.0/24
+ 192.167.0.0/16
+NOTE: It is an error to specify both `network` and `network_path`.
+```
+
+
+### `refresh_interval` [plugins-filters-cidr-refresh_interval]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `600`
+
+When using an external file, this setting will indicate how frequently (in seconds) Logstash will check the file for updates.
+
+
+### `separator` [plugins-filters-cidr-separator]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `\n`
+
+Separator character used for parsing networks from the external file specified by `network_path`. Defaults to newline `\n` character.
+
+
+
+## Common options [plugins-filters-cidr-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-cidr-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-cidr-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-cidr-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-cidr-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-cidr-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-cidr-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-cidr-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-cidr-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ cidr {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ cidr {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-cidr-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ cidr {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ cidr {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-cidr-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-cidr-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 cidr filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ cidr {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-cidr-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-cidr-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ cidr {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ cidr {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-cidr-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ cidr {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ cidr {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-cipher.md b/docs/reference/plugins-filters-cipher.md
new file mode 100644
index 000000000..853010687
--- /dev/null
+++ b/docs/reference/plugins-filters-cipher.md
@@ -0,0 +1,387 @@
+---
+navigation_title: "cipher"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-cipher.html
+---
+
+# Cipher filter plugin [plugins-filters-cipher]
+
+
+* Plugin version: v4.0.3
+* Released on: 2022-06-21
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-cipher/blob/v4.0.3/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-cipher-index.md).
+
+## Installation [_installation_57]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-filter-cipher`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_128]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-cipher). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_127]
+
+This filter parses a source and apply a cipher or decipher before storing it in the target.
+
+::::{note}
+Prior to version 4.0.1, this plugin was not thread-safe and could not safely be used with multiple pipeline workers.
+::::
+
+
+
+## Cipher Filter Configuration Options [plugins-filters-cipher-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-cipher-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`algorithm`](#plugins-filters-cipher-algorithm) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`base64`](#plugins-filters-cipher-base64) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`cipher_padding`](#plugins-filters-cipher-cipher_padding) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`iv_random_length`](#plugins-filters-cipher-iv_random_length) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`key`](#plugins-filters-cipher-key) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`key_pad`](#plugins-filters-cipher-key_pad) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`key_size`](#plugins-filters-cipher-key_size) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`max_cipher_reuse`](#plugins-filters-cipher-max_cipher_reuse) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`mode`](#plugins-filters-cipher-mode) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`source`](#plugins-filters-cipher-source) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`target`](#plugins-filters-cipher-target) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-filters-cipher-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `algorithm` [plugins-filters-cipher-algorithm]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The cipher algorithm to use for encryption and decryption operations.
+
+A list of supported algorithms depends on the versions of Logstash, JRuby, and Java this plugin is running in, but can be obtained by running:
+
+```sh
+ cd $LOGSTASH_HOME # <-- your Logstash distribution root
+ bin/ruby -ropenssl -e 'puts OpenSSL::Cipher.ciphers'
+```
+
+
+### `base64` [plugins-filters-cipher-base64]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+* Unless this option is disabled:
+
+ * When [`mode => encrypt`](#plugins-filters-cipher-mode), the source ciphertext will be `base64`-decoded before it is deciphered.
+ * When [`mode => decrypt`](#plugins-filters-cipher-mode), the result ciphertext will be `base64`-encoded before it is stored.
+
+
+
+### `cipher_padding` [plugins-filters-cipher-cipher_padding]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+
+ * `0`: means `false`
+ * `1`: means `true`
+
+* There is no default value for this setting.
+
+Enables or disables padding in encryption operations.
+
+In encryption operations with block-ciphers, the input plaintext must be an *exact* multiple of the cipher’s block-size unless padding is enabled.
+
+Disabling padding by setting this value to `0` will cause this plugin to fail to encrypt any input plaintext that doesn’t strictly adhere to the [`algorithm`](#plugins-filters-cipher-algorithm)'s block size requirements.
+
+```ruby
+ filter { cipher { cipher_padding => 0 }}
+```
+
+
+### `iv_random_length` [plugins-filters-cipher-iv_random_length]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+In encryption operations, this plugin generates a random Initialization Vector (IV) per encryption operation. This is a standard best-practice to ensure that the resulting ciphertexts cannot be compared to infer equivalence of the source plaintext. This unique IV is then *prepended* to the resulting ciphertext before it is stored, ensuring it is available to any process that needs to decrypt it.
+
+In decryption operations, the IV is assumed to have been prepended to the ciphertext, so this plugin needs to know the length of the IV in order to split the input appropriately.
+
+The size of the IV is generally dependent on which [`algorithm`](#plugins-filters-cipher-algorithm) is used. AES Algorithms generally use a 16-byte IV:
+
+```ruby
+ filter { cipher { iv_random_length => 16 }}
+```
+
+
+### `key` [plugins-filters-cipher-key]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The key to use for encryption and decryption operations.
+
+::::{note}
+Please read the [UnlimitedStrengthCrypto topic](https://github.com/jruby/jruby/wiki/UnlimitedStrengthCrypto) in the [jruby](https://github.com/jruby/jruby) github repo if you see a runtime error that resembles:
+
+`java.security.InvalidKeyException: Illegal key size: possibly you need to install Java Cryptography Extension (JCE) Unlimited Strength Jurisdiction Policy Files for your JRE`
+
+::::
+
+
+
+### `key_pad` [plugins-filters-cipher-key_pad]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"\u0000"`
+
+The character used to pad the key to the required [`key_size`](#plugins-filters-cipher-key_size).
+
+
+### `key_size` [plugins-filters-cipher-key_size]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `16`
+
+The cipher’s required key size, which depends on which [`algorithm`](#plugins-filters-cipher-algorithm) you are using. If a [`key`](#plugins-filters-cipher-key) is specified with a shorter value, it will be padded with [`key_pad`](#plugins-filters-cipher-key_pad).
+
+Example, for AES-128, we must have 16 char long key. AES-256 = 32 chars
+
+```ruby
+ filter { cipher { key_size => 16 }
+```
+
+
+### `max_cipher_reuse` [plugins-filters-cipher-max_cipher_reuse]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1`
+
+If this value is set, the internal Cipher instance will be re-used up to `max_cipher_reuse` times before it is re-created from scratch. This is an option for efficiency where lots of data is being encrypted and decrypted using this filter. This lets the filter avoid creating new Cipher instances over and over for each encrypt/decrypt operation.
+
+This is optional, the default is no re-use of the Cipher instance and max_cipher_reuse = 1 by default
+
+```ruby
+ filter { cipher { max_cipher_reuse => 1000 }}
+```
+
+
+### `mode` [plugins-filters-cipher-mode]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+
+ * `encrypt`: encrypts a plaintext value into IV + ciphertext
+ * `decrypt`: decrypts an IV + ciphertext value into plaintext
+
+* There is no default value for this setting.
+
+
+### `source` [plugins-filters-cipher-source]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"message"`
+
+The name of the source field.
+
+* When [`mode => encrypt`](#plugins-filters-cipher-mode), the `source` should be a field containing plaintext
+* When [`mode => decrypt`](#plugins-filters-cipher-mode), the `source` should be a field containing IV + ciphertext
+
+Example, to use the `message` field (default) :
+
+```ruby
+ filter { cipher { source => "message" } }
+```
+
+
+### `target` [plugins-filters-cipher-target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"message"`
+
+The name of the target field to put the result:
+
+* When [`mode => encrypt`](#plugins-filters-cipher-mode), the IV + ciphertext result will be stored in the `target` field
+* When [`mode => decrypt`](#plugins-filters-cipher-mode), the plaintext result will be stored in the `target` field
+
+Example, to place the result into crypt:
+
+```ruby
+ filter { cipher { target => "crypt" } }
+```
+
+
+
+## Common options [plugins-filters-cipher-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-cipher-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-cipher-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-cipher-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-cipher-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-cipher-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-cipher-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-cipher-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-cipher-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ cipher {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ cipher {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-cipher-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ cipher {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ cipher {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-cipher-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-cipher-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 cipher filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ cipher {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-cipher-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-cipher-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ cipher {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ cipher {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-cipher-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ cipher {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ cipher {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-clone.md b/docs/reference/plugins-filters-clone.md
new file mode 100644
index 000000000..bdcff0b20
--- /dev/null
+++ b/docs/reference/plugins-filters-clone.md
@@ -0,0 +1,317 @@
+---
+navigation_title: "clone"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-clone.html
+---
+
+# Clone filter plugin [plugins-filters-clone]
+
+
+* Plugin version: v4.2.0
+* Released on: 2021-11-10
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-clone/blob/v4.2.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-clone-index.md).
+
+## Getting help [_getting_help_129]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-clone). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_128]
+
+The clone filter is for duplicating events. A clone will be created for each type in the clone list. The original event is left unchanged and a `type` field is added to the clone. Created events are inserted into the pipeline as normal events and will be processed by the remaining pipeline configuration starting from the filter that generated them (i.e. this plugin).
+
+
+## Event Metadata and the Elastic Common Schema (ECS) [_event_metadata_and_the_elastic_common_schema_ecs]
+
+This plugin adds a tag to a cloned event. By default, the tag is stored in the `type` field. When ECS is enabled, the tag is stored in the `tags` array field.
+
+Here’s how ECS compatibility mode affects output.
+
+| ECS disabled | ECS `v1`, `v8` | Availability | Description |
+| --- | --- | --- | --- |
+| type | tags | *Always* | *a tag of cloned event* |
+
+
+## Clone Filter Configuration Options [plugins-filters-clone-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-clone-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`clones`](#plugins-filters-clone-clones) | [array](/reference/configuration-file-structure.md#array) | Yes |
+| [`ecs_compatibility`](#plugins-filters-clone-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-filters-clone-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `clones` [plugins-filters-clone-clones]
+
+* This is a required setting.
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+* a new clone will be created with a `type` of the given value in this list when ECS is disabled
+* a new clone will be created with a `tags` of the given value in this list when ECS is enabled
+
+Note: setting an empty array will not create any clones. A warning message is logged.
+
+
+### `ecs_compatibility` [plugins-filters-clone-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: does not use ECS-compatible field names
+ * `v1`, `v8`: uses fields that are compatible with Elastic Common Schema
+
+* Default value depends on which version of Logstash is running:
+
+ * When Logstash provides a `pipeline.ecs_compatibility` setting, its value is used as the default
+ * Otherwise, the default value is `disabled`.
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)). The value of this setting affects the behavior of the [`clones`](#plugins-filters-clone-clones)
+
+Example:
+
+```ruby
+ filter {
+ clone {
+ clones => ["sun", "moon"]
+ }
+ }
+```
+
+ECS disabled
+
+```text
+{
+ "@version" => "1",
+ "sequence" => 0,
+ "message" => "Hello World!",
+ "@timestamp" => 2021-03-24T11:20:36.226Z,
+ "host" => "example.com"
+}
+{
+ "@version" => "1",
+ "sequence" => 0,
+ "message" => "Hello World!",
+ "@timestamp" => 2021-03-24T11:20:36.226Z,
+ "type" => "sun",
+ "host" => "example.com"
+}
+{
+ "@version" => "1",
+ "sequence" => 0,
+ "message" => "Hello World!",
+ "@timestamp" => 2021-03-24T11:20:36.226Z,
+ "type" => "moon",
+ "host" => "example.com"
+}
+```
+
+ECS enabled
+
+```text
+{
+ "sequence" => 0,
+ "@timestamp" => 2021-03-23T20:25:10.042Z,
+ "message" => "Hello World!",
+ "@version" => "1",
+ "host" => "example.com"
+}
+{
+ "tags" => [
+ [0] "sun"
+ ],
+ "sequence" => 0,
+ "@timestamp" => 2021-03-23T20:25:10.042Z,
+ "message" => "Hello World!",
+ "@version" => "1",
+ "host" => "example.com"
+}
+{
+ "tags" => [
+ [0] "moon"
+ ],
+ "sequence" => 0,
+ "@timestamp" => 2021-03-23T20:25:10.042Z,
+ "message" => "Hello World!",
+ "@version" => "1",
+ "host" => "example.com"
+}
+```
+
+
+
+## Common options [plugins-filters-clone-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-clone-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-clone-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-clone-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-clone-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-clone-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-clone-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-clone-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-clone-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ clone {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ clone {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-clone-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ clone {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ clone {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-clone-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-clone-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 clone filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ clone {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-clone-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-clone-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ clone {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ clone {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-clone-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ clone {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ clone {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-csv.md b/docs/reference/plugins-filters-csv.md
new file mode 100644
index 000000000..4322be5f1
--- /dev/null
+++ b/docs/reference/plugins-filters-csv.md
@@ -0,0 +1,345 @@
+---
+navigation_title: "csv"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-csv.html
+---
+
+# Csv filter plugin [plugins-filters-csv]
+
+
+* Plugin version: v3.1.1
+* Released on: 2021-06-08
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-csv/blob/v3.1.1/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-csv-index.md).
+
+## Getting help [_getting_help_130]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-csv). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_129]
+
+The CSV filter takes an event field containing CSV data, parses it, and stores it as individual fields with optionally-specified field names. This filter can parse data with any separator, not just commas.
+
+
+## Event Metadata and the Elastic Common Schema (ECS) [plugins-filters-csv-ecs_metadata]
+
+The plugin behaves the same regardless of ECS compatibility, except giving a warning when ECS is enabled and `target` isn’t set.
+
+::::{tip}
+Set the `target` option to avoid potential schema conflicts.
+::::
+
+
+
+## Csv Filter Configuration Options [plugins-filters-csv-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-csv-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`autodetect_column_names`](#plugins-filters-csv-autodetect_column_names) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`autogenerate_column_names`](#plugins-filters-csv-autogenerate_column_names) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`columns`](#plugins-filters-csv-columns) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`convert`](#plugins-filters-csv-convert) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`ecs_compatibility`](#plugins-filters-csv-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`quote_char`](#plugins-filters-csv-quote_char) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`separator`](#plugins-filters-csv-separator) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`skip_empty_columns`](#plugins-filters-csv-skip_empty_columns) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`skip_empty_rows`](#plugins-filters-csv-skip_empty_rows) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`skip_header`](#plugins-filters-csv-skip_header) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`source`](#plugins-filters-csv-source) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`target`](#plugins-filters-csv-target) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-filters-csv-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `autodetect_column_names` [plugins-filters-csv-autodetect_column_names]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Define whether column names should be auto-detected from the header column or not. Defaults to false.
+
+Logstash pipeline workers must be set to `1` for this option to work.
+
+
+### `autogenerate_column_names` [plugins-filters-csv-autogenerate_column_names]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Define whether column names should autogenerated or not. Defaults to true. If set to false, columns not having a header specified will not be parsed.
+
+
+### `columns` [plugins-filters-csv-columns]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+Define a list of column names (in the order they appear in the CSV, as if it were a header line). If `columns` is not configured, or there are not enough columns specified, the default column names are "column1", "column2", etc. In the case that there are more columns in the data than specified in this column list, extra columns will be auto-numbered: (e.g. "user_defined_1", "user_defined_2", "column3", "column4", etc.)
+
+
+### `convert` [plugins-filters-csv-convert]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Define a set of datatype conversions to be applied to columns. Possible conversions are integer, float, date, date_time, boolean
+
+Example:
+
+```ruby
+ filter {
+ csv {
+ convert => {
+ "column1" => "integer"
+ "column2" => "boolean"
+ }
+ }
+ }
+```
+
+
+### `ecs_compatibility` [plugins-filters-csv-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: does not use ECS-compatible field names
+ * `v1`: uses the value in `target` as field name
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)). See [Event Metadata and the Elastic Common Schema (ECS)](#plugins-filters-csv-ecs_metadata) for detailed information.
+
+
+### `quote_char` [plugins-filters-csv-quote_char]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"\""`
+
+Define the character used to quote CSV fields. If this is not specified the default is a double quote `"`. Optional.
+
+
+### `separator` [plugins-filters-csv-separator]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `","`
+
+Define the column separator value. If this is not specified, the default is a comma `,`. If you want to define a tabulation as a separator, you need to set the value to the actual tab character and not `\t`. Optional.
+
+
+### `skip_empty_columns` [plugins-filters-csv-skip_empty_columns]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Define whether empty columns should be skipped. Defaults to false. If set to true, columns containing no value will not get set.
+
+
+### `skip_empty_rows` [plugins-filters-csv-skip_empty_rows]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Define whether empty rows could potentially be skipped. Defaults to false. If set to true, rows containing no value will be tagged with "_csvskippedemptyfield". This tag can referenced by users if they wish to cancel events using an *if* conditional statement.
+
+
+### `skip_header` [plugins-filters-csv-skip_header]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Define whether the header should be skipped. Defaults to false. If set to true, the header will be skipped. Assumes that header is not repeated within further rows as such rows will also be skipped. If `skip_header` is set without `autodetect_column_names` being set then columns should be set which will result in the skipping of any row that exactly matches the specified column values. If `skip_header` and `autodetect_column_names` are specified then columns should not be specified, in this case `autodetect_column_names` will fill the columns setting in the background, from the first event seen, and any subsequent values that match what was autodetected will be skipped.
+
+Logstash pipeline workers must be set to `1` for this option to work.
+
+
+### `source` [plugins-filters-csv-source]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"message"`
+
+The CSV data in the value of the `source` field will be expanded into a data structure.
+
+
+### `target` [plugins-filters-csv-target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Define target field for placing the data. Defaults to writing to the root of the event.
+
+
+
+## Common options [plugins-filters-csv-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-csv-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-csv-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-csv-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-csv-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-csv-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-csv-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-csv-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-csv-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ csv {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ csv {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-csv-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ csv {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ csv {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-csv-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-csv-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 csv filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ csv {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-csv-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-csv-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ csv {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ csv {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-csv-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ csv {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ csv {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-date.md b/docs/reference/plugins-filters-date.md
new file mode 100644
index 000000000..f32ecbc26
--- /dev/null
+++ b/docs/reference/plugins-filters-date.md
@@ -0,0 +1,423 @@
+---
+navigation_title: "date"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-date.html
+---
+
+# Date filter plugin [plugins-filters-date]
+
+
+* Plugin version: v3.1.15
+* Released on: 2022-06-29
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-date/blob/v3.1.15/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-date-index.md).
+
+## Getting help [_getting_help_131]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-date). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_130]
+
+The date filter is used for parsing dates from fields, and then using that date or timestamp as the logstash timestamp for the event.
+
+For example, syslog events usually have timestamps like this:
+
+```ruby
+ "Apr 17 09:32:01"
+```
+
+You would use the date format `MMM dd HH:mm:ss` to parse this.
+
+The date filter is especially important for sorting events and for backfilling old data. If you don’t get the date correct in your event, then searching for them later will likely sort out of order.
+
+In the absence of this filter, logstash will choose a timestamp based on the first time it sees the event (at input time), if the timestamp is not already set in the event. For example, with file input, the timestamp is set to the time of each read.
+
+
+## Date Filter Configuration Options [plugins-filters-date-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-date-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`locale`](#plugins-filters-date-locale) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`match`](#plugins-filters-date-match) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`tag_on_failure`](#plugins-filters-date-tag_on_failure) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`target`](#plugins-filters-date-target) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`timezone`](#plugins-filters-date-timezone) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-filters-date-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `locale` [plugins-filters-date-locale]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Specify a locale to be used for date parsing using either IETF-BCP47 or POSIX language tag. Simple examples are `en`,`en-US` for BCP47 or `en_US` for POSIX.
+
+The locale is mostly necessary to be set for parsing month names (pattern with `MMM`) and weekday names (pattern with `EEE`).
+
+If not specified, the platform default will be used but for non-english platform default an english parser will also be used as a fallback mechanism.
+
+
+### `match` [plugins-filters-date-match]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+An array with field name first, and format patterns following, `[ field, formats... ]`
+
+If your time field has multiple possible formats, you can do this:
+
+```ruby
+ match => [ "logdate", "MMM dd yyyy HH:mm:ss",
+ "MMM d yyyy HH:mm:ss", "ISO8601" ]
+```
+
+The above will match a syslog (rfc3164) or `iso8601` timestamp.
+
+There are a few special exceptions. The following format literals exist to help you save time and ensure correctness of date parsing.
+
+* `ISO8601` - should parse any valid ISO8601 timestamp, such as `2011-04-19T03:44:01.103Z`
+* `UNIX` - will parse **float or int** value expressing unix time in seconds since epoch like 1326149001.132 as well as 1326149001
+* `UNIX_MS` - will parse **int** value expressing unix time in milliseconds since epoch like 1366125117000
+* `TAI64N` - will parse tai64n time values
+
+For example, if you have a field `logdate`, with a value that looks like `Aug 13 2010 00:03:44`, you would use this configuration:
+
+```ruby
+ filter {
+ date {
+ match => [ "logdate", "MMM dd yyyy HH:mm:ss" ]
+ }
+ }
+```
+
+If your field is nested in your structure, you can use the nested syntax `[foo][bar]` to match its value. For more information, please refer to [Field references](/reference/event-dependent-configuration.md#logstash-config-field-references)
+
+**More details on the syntax**
+
+The syntax used for parsing date and time text uses letters to indicate the kind of time value (month, minute, etc), and a repetition of letters to indicate the form of that value (2-digit month, full month name, etc).
+
+Here’s what you can use to parse dates and times:
+
+y
+: year
+
+ yyyy
+ : full year number. Example: `2015`.
+
+ yy
+ : two-digit year. Example: `15` for the year 2015.
+
+
+M
+: month of the year
+
+ M
+ : minimal-digit month. Example: `1` for January and `12` for December.
+
+ MM
+ : two-digit month. zero-padded if needed. Example: `01` for January and `12` for December
+
+ MMM
+ : abbreviated month text. Example: `Jan` for January. Note: The language used depends on your locale. See the `locale` setting for how to change the language.
+
+ MMMM
+ : full month text, Example: `January`. Note: The language used depends on your locale.
+
+
+d
+: day of the month
+
+ d
+ : minimal-digit day. Example: `1` for the 1st of the month.
+
+ dd
+ : two-digit day, zero-padded if needed. Example: `01` for the 1st of the month.
+
+
+H
+: hour of the day (24-hour clock)
+
+ H
+ : minimal-digit hour. Example: `0` for midnight.
+
+ HH
+ : two-digit hour, zero-padded if needed. Example: `00` for midnight.
+
+
+m
+: minutes of the hour (60 minutes per hour)
+
+ m
+ : minimal-digit minutes. Example: `0`.
+
+ mm
+ : two-digit minutes, zero-padded if needed. Example: `00`.
+
+
+s
+: seconds of the minute (60 seconds per minute)
+
+ s
+ : minimal-digit seconds. Example: `0`.
+
+ ss
+ : two-digit seconds, zero-padded if needed. Example: `00`.
+
+
+S
+: fraction of a second **Maximum precision is milliseconds (`SSS`). Beyond that, zeroes are appended.**
+
+ S
+ : tenths of a second. Example: `0` for a subsecond value `012`
+
+ SS
+ : hundredths of a second. Example: `01` for a subsecond value `01`
+
+ SSS
+ : thousandths of a second. Example: `012` for a subsecond value `012`
+
+
+Z
+: time zone offset or identity
+
+ Z
+ : Timezone offset structured as HHmm (hour and minutes offset from Zulu/UTC). Example: `-0700`.
+
+ ZZ
+ : Timezone offset structured as HH:mm (colon in between hour and minute offsets). Example: `-07:00`.
+
+ ZZZ
+ : Timezone identity. Example: `America/Los_Angeles`. Note: Valid IDs are listed on the [Joda.org available time zones page](http://joda-time.sourceforge.net/timezones.html).
+
+
+z
+: time zone names. **Time zone names (*z*) cannot be parsed.**
+
+w
+: week of the year
+
+ w
+ : minimal-digit week. Example: `1`.
+
+ ww
+ : two-digit week, zero-padded if needed. Example: `01`.
+
+
+D
+: day of the year
+
+e
+: day of the week (number)
+
+E
+: day of the week (text)
+
+ E, EE, EEE
+ : Abbreviated day of the week. Example: `Mon`, `Tue`, `Wed`, `Thu`, `Fri`, `Sat`, `Sun`. Note: The actual language of this will depend on your locale.
+
+ EEEE
+ : The full text day of the week. Example: `Monday`, `Tuesday`, … Note: The actual language of this will depend on your locale.
+
+
+For non-formatting syntax, you’ll need to put single-quote characters around the value. For example, if you were parsing ISO8601 time, "2015-01-01T01:12:23" that little "T" isn’t a valid time format, and you want to say "literally, a T", your format would be this: "yyyy-MM-dd’T’HH:mm:ss"
+
+Other less common date units, such as era (G), century (C), am/pm (a), and # more, can be learned about on the [joda-time documentation](http://www.joda.org/joda-time/key_format.html).
+
+
+### `tag_on_failure` [plugins-filters-date-tag_on_failure]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `["_dateparsefailure"]`
+
+Append values to the `tags` field when there has been no successful match
+
+
+### `target` [plugins-filters-date-target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"@timestamp"`
+
+Store the matching timestamp into the given target field. If not provided, default to updating the `@timestamp` field of the event.
+
+
+### `timezone` [plugins-filters-date-timezone]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Specify a time zone canonical ID to be used for date parsing. The valid IDs are listed on the [Joda.org available time zones page](http://joda-time.sourceforge.net/timezones.html). This is useful in case the time zone cannot be extracted from the value, and is not the platform default. If this is not specified the platform default will be used. Canonical ID is good as it takes care of daylight saving time for you For example, `America/Los_Angeles` or `Europe/Paris` are valid IDs. This field can be dynamic and include parts of the event using the `%{{field}}` syntax
+
+
+
+## Common options [plugins-filters-date-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-date-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-date-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-date-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-date-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-date-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-date-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-date-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-date-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ date {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ date {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-date-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ date {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ date {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-date-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-date-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 date filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ date {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-date-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-date-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ date {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ date {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-date-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ date {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ date {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
diff --git a/docs/reference/plugins-filters-de_dot.md b/docs/reference/plugins-filters-de_dot.md
new file mode 100644
index 000000000..2ba849808
--- /dev/null
+++ b/docs/reference/plugins-filters-de_dot.md
@@ -0,0 +1,249 @@
+---
+navigation_title: "de_dot"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-de_dot.html
+---
+
+# De_dot filter plugin [plugins-filters-de_dot]
+
+
+* Plugin version: v1.1.0
+* Released on: 2024-05-27
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-de_dot/blob/v1.1.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-de_dot-index.md).
+
+## Getting help [_getting_help_132]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-de_dot). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_131]
+
+This filter *appears* to rename fields by replacing `.` characters with a different separator. In reality, it’s a somewhat expensive filter that has to copy the source field contents to a new destination field (whose name no longer contains dots), and then remove the corresponding source field.
+
+It should only be used if no other options are available.
+
+
+## De_dot Filter Configuration Options [plugins-filters-de_dot-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-de_dot-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`fields`](#plugins-filters-de_dot-fields) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`nested`](#plugins-filters-de_dot-nested) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`recursive`](#plugins-filters-de_dot-recursive) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`separator`](#plugins-filters-de_dot-separator) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-filters-de_dot-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `fields` [plugins-filters-de_dot-fields]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+The `fields` array should contain a list of known fields to act on. If undefined, all top-level fields will be checked. Sub-fields must be manually specified in the array. For example: `["field.suffix","[foo][bar.suffix]"]` will result in "field_suffix" and nested or sub field ["foo"]["bar_suffix"]
+
+::::{warning}
+This is an expensive operation.
+::::
+
+
+
+### `nested` [plugins-filters-de_dot-nested]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+If `nested` is *true*, then create sub-fields instead of replacing dots with a different separator.
+
+
+### `recursive` [plugins-filters-de_dot-recursive]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+If `recursive` is *true*, then recursively check sub-fields. It is recommended you only use this when setting specific fields, as this is an expensive operation.
+
+
+### `separator` [plugins-filters-de_dot-separator]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"_"`
+
+Replace dots with this value.
+
+
+
+## Common options [plugins-filters-de_dot-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-de_dot-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-de_dot-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-de_dot-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-de_dot-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-de_dot-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-de_dot-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-de_dot-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-de_dot-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ de_dot {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ de_dot {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-de_dot-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ de_dot {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ de_dot {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-de_dot-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-de_dot-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 de_dot filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ de_dot {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-de_dot-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-de_dot-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ de_dot {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ de_dot {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-de_dot-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ de_dot {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ de_dot {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-dissect.md b/docs/reference/plugins-filters-dissect.md
new file mode 100644
index 000000000..2ae84a7c0
--- /dev/null
+++ b/docs/reference/plugins-filters-dissect.md
@@ -0,0 +1,531 @@
+---
+navigation_title: "dissect"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-dissect.html
+---
+
+# Dissect filter plugin [plugins-filters-dissect]
+
+
+* Plugin version: v1.2.5
+* Released on: 2022-02-14
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-dissect/blob/v1.2.5/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-dissect-index.md).
+
+## Getting help [_getting_help_133]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-dissect). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_132]
+
+The Dissect filter plugin tokenizes incoming strings using defined patterns. It extracts unstructured event data into fields using delimiters. This process is called tokenization.
+
+Unlike a regular split operation where one delimiter is applied to the whole string, the Dissect operation applies a set of delimiters to a string value.
+
+::::{note}
+All keys must be found and extracted for tokenization to be successful. If one or more keys cannot be found, an error occurs and the original event is not modified.
+::::
+
+
+### Dissect or Grok? Or both? [_dissect_or_grok_or_both]
+
+Dissect differs from Grok in that it does not use regular expressions and is faster. Dissect works well when data is reliably repeated. Grok is a better choice when the structure of your text varies from line to line.
+
+You can use both Dissect and Grok for a hybrid use case when a section of the line is reliably repeated, but the entire line is not. The Dissect filter can deconstruct the section of the line that is repeated. The Grok filter can process the remaining field values with more regex predictability.
+
+
+### Terminology [_terminology]
+
+**dissect pattern** - the set of fields and delimiters describing the textual format. Also known as a dissection. The dissection is described using a set of `%{}` sections: `%{{a}} - %{{b}} - %{{c}}`
+
+**field** - the text from `%{` to `}` inclusive.
+
+**delimiter** - the text between `}` and the next `%{` characters. Any set of characters other than `%{`, `'not }'`, or `}` is a delimiter.
+
+**key** - the text between the `%{` and `}`, exclusive of the `?`, `+`, `&` prefixes and the ordinal suffix.
+
+Examples:
+
+`%{?aaa}` - the key is `aaa`
+
+`%{+bbb/3}` - the key is `bbb`
+
+`%{&ccc}` - the key is `ccc`
+
+::::{note}
+Using the `.` (dot) as `key` will generate fields with `.` in the field name. If you want to get nested fields, use the brackets notation such as `%{[fieldname][subfieldname]}`.
+::::
+
+
+
+### Sample configuration [_sample_configuration_2]
+
+The config might look like this:
+
+```ruby
+ filter {
+ dissect {
+ mapping => {
+ "message" => "%{ts} %{+ts} %{+ts} %{src} %{} %{prog}[%{pid}]: %{msg}"
+ }
+ }
+ }
+```
+
+When a string is dissected from left to right, text is captured up to the first delimiter. The captured text is stored in the first field. This is repeated for each field/# delimiter pair until the last delimiter is reached. Then **the remaining text is stored in the last field**.
+
+
+
+## Notations [_notations]
+
+[Normal field notation](#plugins-filters-dissect-normal)
+
+[Skip field notation](#plugins-filters-dissect-skip)
+
+[Append field notation](#plugins-filters-dissect-append)
+
+[Indirect field notation](#plugins-filters-dissect-indirect)
+
+### Notes and usage guidelines [_notes_and_usage_guidelines]
+
+* For append or indirect fields, the key can refer to a field that already exists in the event before dissection.
+* Use a Skip field if you do not want the indirection key/value stored.
+
+ Example:
+
+ `%{?a}: %{&a}` applied to text `google: 77.98` will build a key/value of `google => 77.98`.
+
+* Append and indirect cannot be combined.
+
+ Examples:
+
+ `%{+&something}` will add a value to the `&something` key (probably not the intended outcome).
+
+ `%{&+something}` will add a value to the `+something` key (again probably unintended).
+
+
+
+### Normal field notation [plugins-filters-dissect-normal]
+
+The found value is added to the Event using the key. A normal field has no prefix or suffix.
+
+Example:
+
+`%{{some_field}}`
+
+
+### Skip field notation [plugins-filters-dissect-skip]
+
+The found value is stored internally, but is not added to the Event. The key, if supplied, is prefixed with a `?`.
+
+Examples:
+
+`%{}` is an empty skip field.
+
+`%{?foo}` is a named skip field.
+
+
+### Append field notation [plugins-filters-dissect-append]
+
+If the value is the first field seen, it is stored. Subsequent fields are appended to another value.
+
+The key is prefixed with a `+`. The final value is stored in the Event using the key.
+
+::::{note}
+The delimiter found before the field is appended with the value. If no delimiter is found before the field, a single space character is used.
+::::
+
+
+Examples:
+
+`%{+some_field}` is an append field.
+
+`%{+some_field/2}` is an append field with an order modifier.
+
+**Order modifiers**
+
+An order modifier, `/digits`, allows one to reorder the append sequence.
+
+Example:
+
+For text `1 2 3 go`, this `%{+a/2} %{+a/1} %{+a/4} %{+a/3}` will build a key/value of `a => 2 1 go 3`.
+
+**Append fields** without an order modifier will append in declared order.
+
+Example:
+
+For text `1 2 3 go`, this `%{{a}} %{{b}} %{+a}` will build two key/values of `a => 1 3 go, b => 2`
+
+
+### Indirect field notation [plugins-filters-dissect-indirect]
+
+The found value is added to the Event using the found value of another field as the key. The key is prefixed with a `&`.
+
+Examples:
+
+`%{&some_field}` is an indirect field where the key is indirectly sourced from the value of `some_field`.
+
+For text `error: some_error, some_description`, this notation `error: %{?err}, %{&err}` will build a key/value of `some_error => some_description`.
+
+
+
+## Multiple Consecutive Delimiter Handling [_multiple_consecutive_delimiter_handling]
+
+::::{important}
+Multiple found delimiter handling has changed starting with version 1.1.1 of this plugin. Now multiple consecutive delimiters are seen as missing fields by default and not padding. If you are already using Dissect and your source text has fields padded with extra delimiters, you will need to change your config. Please read the section below.
+::::
+
+
+### Empty data between delimiters [_empty_data_between_delimiters]
+
+Given this text as the sample used to create a dissection:
+
+```ruby
+John Smith,Big Oaks,Wood Lane,Hambledown,Canterbury,CB34RY
+```
+
+The created dissection, with 6 fields, is:
+
+```ruby
+%{name},%{addr1},%{addr2},%{addr3},%{city},%{zip}
+```
+
+When a line like this is processed:
+
+```ruby
+Jane Doe,4321 Fifth Avenue,,,New York,87432
+```
+
+Dissect will create an event with empty fields for `addr2 and addr3` like so:
+
+```ruby
+{
+ "name": "Jane Doe",
+ "addr1": "4321 Fifth Avenue",
+ "addr2": "",
+ "addr3": "",
+ "city": "New York"
+ "zip": "87432"
+}
+```
+
+
+### Delimiters used as padding to visually align fields [_delimiters_used_as_padding_to_visually_align_fields]
+
+**Padding to the right hand side**
+
+Given these texts as the samples used to create a dissection:
+
+```ruby
+00000043 ViewReceive machine-321
+f3000a3b Calc machine-123
+```
+
+The dissection, with 3 fields, is:
+
+```ruby
+%{id} %{function->} %{server}
+```
+
+Note, above, the second field has a `->` suffix which tells Dissect to ignore padding to its right. Dissect will create these events:
+
+```ruby
+{
+ "id": "00000043",
+ "function": "ViewReceive",
+ "server": "machine-123"
+}
+{
+ "id": "f3000a3b",
+ "function": "Calc",
+ "server": "machine-321"
+}
+```
+
+::::{important}
+Always add the `->` suffix to the field on the left of the padding.
+::::
+
+
+**Padding to the left hand side (to the human eye)**
+
+Given these texts as the samples used to create a dissection:
+
+```ruby
+00000043 ViewReceive machine-321
+f3000a3b Calc machine-123
+```
+
+The dissection, with 3 fields, is now:
+
+```ruby
+%{id->} %{function} %{server}
+```
+
+Here the `->` suffix moves to the `id` field because Dissect sees the padding as being to the right of the `id` field.
+
+
+
+## Conditional processing [_conditional_processing]
+
+You probably want to use this filter inside an `if` block. This ensures that the event contains a field value with a suitable structure for the dissection.
+
+Example:
+
+```ruby
+filter {
+ if [type] == "syslog" or "syslog" in [tags] {
+ dissect {
+ mapping => {
+ "message" => "%{ts} %{+ts} %{+ts} %{src} %{} %{prog}[%{pid}]: %{msg}"
+ }
+ }
+ }
+}
+```
+
+
+## Dissect Filter Configuration Options [plugins-filters-dissect-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-dissect-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`convert_datatype`](#plugins-filters-dissect-convert_datatype) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`mapping`](#plugins-filters-dissect-mapping) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`tag_on_failure`](#plugins-filters-dissect-tag_on_failure) | [array](/reference/configuration-file-structure.md#array) | No |
+
+Also see [Common options](#plugins-filters-dissect-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `convert_datatype` [plugins-filters-dissect-convert_datatype]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+With this setting `int` and `float` datatype conversions can be specified. These will be done after all `mapping` dissections have taken place. Feel free to use this setting on its own without a `mapping` section.
+
+**Example**
+
+```ruby
+filter {
+ dissect {
+ convert_datatype => {
+ "cpu" => "float"
+ "code" => "int"
+ }
+ }
+}
+```
+
+
+### `mapping` [plugins-filters-dissect-mapping]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+A hash of dissections of `field => value`
+
+::::{important}
+Don’t use an escaped newline `\n` in the value. It will be interpreted as two characters `\` + `n`. Instead use actual line breaks in the config. Also use single quotes to define the value if it contains double quotes.
+::::
+
+
+A later dissection can be done on values from a previous dissection or they can be independent.
+
+**Example**
+
+```ruby
+filter {
+ dissect {
+ mapping => {
+ # using an actual line break
+ "message" => '"%{field1}" "%{field2}"
+ "%{description}"'
+ "description" => "%{field3} %{field4} %{field5}"
+ }
+ }
+}
+```
+
+This is useful if you want to keep the field `description` but also dissect it further.
+
+
+### `tag_on_failure` [plugins-filters-dissect-tag_on_failure]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `["_dissectfailure"]`
+
+Append values to the `tags` field when dissection fails
+
+
+
+## Common options [plugins-filters-dissect-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-dissect-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-dissect-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-dissect-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-dissect-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-dissect-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-dissect-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-dissect-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-dissect-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ dissect {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ dissect {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-dissect-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ dissect {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ dissect {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-dissect-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-dissect-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 dissect filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ dissect {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-dissect-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-dissect-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ dissect {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ dissect {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-dissect-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ dissect {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ dissect {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
diff --git a/docs/reference/plugins-filters-dns.md b/docs/reference/plugins-filters-dns.md
new file mode 100644
index 000000000..df8824d6f
--- /dev/null
+++ b/docs/reference/plugins-filters-dns.md
@@ -0,0 +1,347 @@
+---
+navigation_title: "dns"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-dns.html
+---
+
+# Dns filter plugin [plugins-filters-dns]
+
+
+* Plugin version: v3.2.0
+* Released on: 2023-01-26
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-dns/blob/v3.2.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-dns-index.md).
+
+## Getting help [_getting_help_134]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-dns). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_133]
+
+The DNS filter performs a lookup (either an A record/CNAME record lookup or a reverse lookup at the PTR record) on records specified under the `reverse` arrays or respectively under the `resolve` arrays.
+
+The config should look like this:
+
+```ruby
+ filter {
+ dns {
+ reverse => [ "source_host", "field_with_address" ]
+ resolve => [ "field_with_fqdn" ]
+ action => "replace"
+ }
+ }
+```
+
+This filter, like all filters, only processes 1 event at a time, so the use of this plugin can significantly slow down your pipeline’s throughput if you have a high latency network. By way of example, if each DNS lookup takes 2 milliseconds, the maximum throughput you can achieve with a single filter worker is 500 events per second (1000 milliseconds / 2 milliseconds).
+
+
+## Dns Filter Configuration Options [plugins-filters-dns-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-dns-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`action`](#plugins-filters-dns-action) | [string](/reference/configuration-file-structure.md#string), one of `["append", "replace"]` | No |
+| [`failed_cache_size`](#plugins-filters-dns-failed_cache_size) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`failed_cache_ttl`](#plugins-filters-dns-failed_cache_ttl) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`hit_cache_size`](#plugins-filters-dns-hit_cache_size) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`hit_cache_ttl`](#plugins-filters-dns-hit_cache_ttl) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`hostsfile`](#plugins-filters-dns-hostsfile) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`max_retries`](#plugins-filters-dns-max_retries) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`nameserver`](#plugins-filters-dns-nameserver) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`resolve`](#plugins-filters-dns-resolve) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`reverse`](#plugins-filters-dns-reverse) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`tag_on_timeout`](#plugins-filters-dns-tag_on_timeout) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`timeout`](#plugins-filters-dns-timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+
+Also see [Common options](#plugins-filters-dns-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `action` [plugins-filters-dns-action]
+
+* Value can be any of: `append`, `replace`
+* Default value is `"append"`
+
+Determine what action to do: append or replace the values in the fields specified under `reverse` and `resolve`.
+
+
+### `failed_cache_size` [plugins-filters-dns-failed_cache_size]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `0` (cache disabled)
+
+cache size for failed requests
+
+
+### `failed_cache_ttl` [plugins-filters-dns-failed_cache_ttl]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `5`
+
+how long to cache failed requests (in seconds)
+
+
+### `hit_cache_size` [plugins-filters-dns-hit_cache_size]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `0` (cache disabled)
+
+set the size of cache for successful requests
+
+
+### `hit_cache_ttl` [plugins-filters-dns-hit_cache_ttl]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `60`
+
+how long to cache successful requests (in seconds)
+
+
+### `hostsfile` [plugins-filters-dns-hostsfile]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Use custom hosts file(s). For example: `["/var/db/my_custom_hosts"]`
+
+
+### `max_retries` [plugins-filters-dns-max_retries]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `2`
+
+number of times to retry a failed resolve/reverse
+
+
+### `nameserver` [plugins-filters-dns-nameserver]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash), and is composed of:
+
+ * a required `address` key, whose value is either a [string](/reference/configuration-file-structure.md#string) or an [array](/reference/configuration-file-structure.md#array), representing one or more nameserver ip addresses
+ * an optional `search` key, whose value is either a [string](/reference/configuration-file-structure.md#string) or an [array](/reference/configuration-file-structure.md#array), representing between one and six search domains (e.g., with search domain `com`, a query for `example` will match DNS entries for `example.com`)
+ * an optional `ndots` key, used in conjunction with `search`, whose value is a [number](/reference/configuration-file-structure.md#number), representing the minimum number of dots in a domain name being resolved that will *prevent* the search domains from being used (default `1`; this option is rarely needed)
+
+* For backward-compatibility, values of [string](/reference/configuration-file-structure.md#string) and [array](/reference/configuration-file-structure.md#array) are also accepted, representing one or more nameserver ip addresses *without* search domains.
+* There is no default value for this setting.
+
+Use custom nameserver(s). For example:
+
+```ruby
+ filter {
+ dns {
+ nameserver => {
+ address => ["8.8.8.8", "8.8.4.4"]
+ search => ["internal.net"]
+ }
+ }
+ }
+```
+
+If `nameserver` is not specified then `/etc/resolv.conf` will be read to configure the resolver using the `nameserver`, `domain`, `search` and `ndots` directives in `/etc/resolv.conf`.
+
+
+### `resolve` [plugins-filters-dns-resolve]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Forward resolve one or more fields.
+
+
+### `reverse` [plugins-filters-dns-reverse]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Reverse resolve one or more fields.
+
+
+### `timeout` [plugins-filters-dns-timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `0.5`
+
+`resolv` calls will be wrapped in a timeout instance
+
+
+### `tag_on_timeout` [plugins-filters-dns-tag_on_timeout]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Defaults to `["_dnstimeout"]`.
+
+Add tag(s) on DNS lookup time out.
+
+
+
+## Common options [plugins-filters-dns-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-dns-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-dns-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-dns-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-dns-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-dns-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-dns-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-dns-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-dns-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ dns {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ dns {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-dns-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ dns {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ dns {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-dns-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-dns-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 dns filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ dns {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-dns-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-dns-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ dns {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ dns {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-dns-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ dns {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ dns {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-drop.md b/docs/reference/plugins-filters-drop.md
new file mode 100644
index 000000000..1b06215a9
--- /dev/null
+++ b/docs/reference/plugins-filters-drop.md
@@ -0,0 +1,242 @@
+---
+navigation_title: "drop"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-drop.html
+---
+
+# Drop filter plugin [plugins-filters-drop]
+
+
+* Plugin version: v3.0.5
+* Released on: 2017-11-07
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-drop/blob/v3.0.5/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-drop-index.md).
+
+## Getting help [_getting_help_135]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-drop). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_134]
+
+Drop filter.
+
+Drops everything that gets to this filter.
+
+This is best used in combination with conditionals, for example:
+
+```ruby
+ filter {
+ if [loglevel] == "debug" {
+ drop { }
+ }
+ }
+```
+
+The above will only pass events to the drop filter if the loglevel field is `debug`. This will cause all events matching to be dropped.
+
+
+## Drop Filter Configuration Options [plugins-filters-drop-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-drop-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`percentage`](#plugins-filters-drop-percentage) | [number](/reference/configuration-file-structure.md#number) | No |
+
+Also see [Common options](#plugins-filters-drop-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `percentage` [plugins-filters-drop-percentage]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `100`
+
+Drop all the events within a pre-configured percentage.
+
+This is useful if you just need a percentage but not the whole.
+
+Example, to only drop around 40% of the events that have the field loglevel with value "debug".
+
+```
+filter {
+ if [loglevel] == "debug" {
+ drop {
+ percentage => 40
+ }
+ }
+}
+```
+
+
+## Common options [plugins-filters-drop-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-drop-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-drop-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-drop-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-drop-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-drop-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-drop-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-drop-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-drop-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ drop {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ drop {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-drop-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ drop {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ drop {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-drop-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-drop-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 drop filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ drop {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-drop-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-drop-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ drop {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ drop {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-drop-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ drop {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ drop {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-elapsed.md b/docs/reference/plugins-filters-elapsed.md
new file mode 100644
index 000000000..ba47b9a64
--- /dev/null
+++ b/docs/reference/plugins-filters-elapsed.md
@@ -0,0 +1,326 @@
+---
+navigation_title: "elapsed"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-elapsed.html
+---
+
+# Elapsed filter plugin [plugins-filters-elapsed]
+
+
+* Plugin version: v4.1.0
+* Released on: 2018-07-31
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-elapsed/blob/v4.1.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-elapsed-index.md).
+
+## Installation [_installation_58]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-filter-elapsed`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_136]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-elapsed). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_135]
+
+The elapsed filter tracks a pair of start/end events and uses their timestamps to calculate the elapsed time between them.
+
+The filter has been developed to track the execution time of processes and other long tasks.
+
+The configuration looks like this:
+
+```ruby
+ filter {
+ elapsed {
+ start_tag => "start event tag"
+ end_tag => "end event tag"
+ unique_id_field => "id field name"
+ timeout => seconds
+ new_event_on_match => true/false
+ }
+ }
+```
+
+The events managed by this filter must have some particular properties. The event describing the start of the task (the "start event") must contain a tag equal to `start_tag`. On the other side, the event describing the end of the task (the "end event") must contain a tag equal to `end_tag`. Both these two kinds of event need to own an ID field which identify uniquely that particular task. The name of this field is stored in `unique_id_field`.
+
+You can use a Grok filter to prepare the events for the elapsed filter. An example of configuration can be:
+
+```ruby
+ filter {
+ grok {
+ match => { "message" => "%{TIMESTAMP_ISO8601} START id: (?.*)" }
+ add_tag => [ "taskStarted" ]
+ }
+```
+
+```
+grok {
+ match => { "message" => "%{{TIMESTAMP_ISO8601}} END id: (?.*)" }
+ add_tag => [ "taskTerminated" ]
+}
+```
+```
+ elapsed {
+ start_tag => "taskStarted"
+ end_tag => "taskTerminated"
+ unique_id_field => "task_id"
+ }
+}
+```
+The elapsed filter collects all the "start events". If two, or more, "start events" have the same ID, only the first one is recorded, the others are discarded.
+
+When an "end event" matching a previously collected "start event" is received, there is a match. The configuration property `new_event_on_match` tells where to insert the elapsed information: they can be added to the "end event" or a new "match event" can be created. Both events store the following information:
+
+* the tags `elapsed` and `elapsed_match`
+* the field `elapsed_time` with the difference, in seconds, between the two events timestamps
+* an ID filed with the task ID
+* the field `elapsed_timestamp_start` with the timestamp of the start event
+
+If the "end event" does not arrive before "timeout" seconds, the "start event" is discarded and an "expired event" is generated. This event contains:
+
+* the tags `elapsed` and `elapsed_expired_error`
+* a field called `elapsed_time` with the age, in seconds, of the "start event"
+* an ID filed with the task ID
+* the field `elapsed_timestamp_start` with the timestamp of the "start event"
+
+
+## Elapsed Filter Configuration Options [plugins-filters-elapsed-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-elapsed-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`end_tag`](#plugins-filters-elapsed-end_tag) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`new_event_on_match`](#plugins-filters-elapsed-new_event_on_match) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`start_tag`](#plugins-filters-elapsed-start_tag) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`timeout`](#plugins-filters-elapsed-timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`unique_id_field`](#plugins-filters-elapsed-unique_id_field) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`keep_start_event`](#plugins-filters-elapsed-keep_start_event) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-filters-elapsed-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `end_tag` [plugins-filters-elapsed-end_tag]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The name of the tag identifying the "end event"
+
+
+### `new_event_on_match` [plugins-filters-elapsed-new_event_on_match]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+This property manage what to do when an "end event" matches a "start event". If it’s set to `false` (default value), the elapsed information are added to the "end event"; if it’s set to `true` a new "match event" is created.
+
+
+### `start_tag` [plugins-filters-elapsed-start_tag]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The name of the tag identifying the "start event"
+
+
+### `timeout` [plugins-filters-elapsed-timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1800`
+
+The amount of seconds after an "end event" can be considered lost. The corresponding "start event" is discarded and an "expired event" is generated. The default value is 30 minutes (1800 seconds).
+
+
+### `unique_id_field` [plugins-filters-elapsed-unique_id_field]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The name of the field containing the task ID. This value must uniquely identify the task in the system, otherwise it’s impossible to match the couple of events.
+
+
+### `keep_start_event` [plugins-filters-elapsed-keep_start_event]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `first`
+
+This property manages what to do when several events matched as a start one were received before the end event for the specified ID. There are two supported values: `first` or `last`. If it’s set to `first` (default value), the first event matched as a start will be used; if it’s set to `last`, the last one will be used.
+
+
+
+## Common options [plugins-filters-elapsed-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-elapsed-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-elapsed-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-elapsed-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-elapsed-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-elapsed-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-elapsed-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-elapsed-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-elapsed-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ elapsed {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ elapsed {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-elapsed-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ elapsed {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ elapsed {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-elapsed-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-elapsed-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 elapsed filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ elapsed {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-elapsed-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-elapsed-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ elapsed {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ elapsed {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-elapsed-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ elapsed {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ elapsed {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-elastic_integration.md b/docs/reference/plugins-filters-elastic_integration.md
new file mode 100644
index 000000000..1d3fcd1ab
--- /dev/null
+++ b/docs/reference/plugins-filters-elastic_integration.md
@@ -0,0 +1,674 @@
+---
+navigation_title: "elastic_integration"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-elastic_integration.html
+---
+
+# Elastic Integration filter plugin [plugins-filters-elastic_integration]
+
+
+* Plugin version: v8.17.0
+* Released on: 2025-01-08
+* [Changelog](https://github.com/elastic/logstash-filter-elastic_integration/blob/v8.17.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-elastic_integration-index.md).
+
+## Getting help [_getting_help_137]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-elastic_integration). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+::::{admonition} Elastic Enterprise License
+Use of this plugin requires an active Elastic Enterprise [subscription](https://www.elastic.co/subscriptions).
+
+::::
+
+
+
+## Description [_description_136]
+
+Use this filter to process Elastic integrations powered by {{es}} Ingest Node in {{ls}}.
+
+::::{admonition} Extending Elastic integrations with {ls}
+This plugin can help you take advantage of the extensive, built-in capabilities of [Elastic {{integrations}}](integration-docs://reference/index.md)—such as managing data collection, transformation, and visualization—and then use {{ls}} for additional data processing and output options. For more info about extending Elastic integrations with {{ls}}, check out [Using {{ls}} with Elastic Integrations](/reference/using-logstash-with-elastic-integrations.md).
+
+::::
+
+
+When you configure this filter to point to an {{es}} cluster, it detects which ingest pipeline (if any) should be executed for each event, using an explicitly-defined [`pipeline_name`](#plugins-filters-elastic_integration-pipeline_name) or auto-detecting the event’s data-stream and its default pipeline.
+
+It then loads that pipeline’s definition from {{es}} and run that pipeline inside Logstash without transmitting the event to {{es}}. Events that are successfully handled by their ingest pipeline will have `[@metadata][target_ingest_pipeline]` set to `_none` so that any downstream {{es}} output in the Logstash pipeline will avoid running the event’s default pipeline *again* in {{es}}.
+
+::::{note}
+Some multi-pipeline configurations such as logstash-to-logstash over http(s) do not maintain the state of `[@metadata]` fields. In these setups, you may need to explicitly configure your downstream pipeline’s {{es}} output with `pipeline => "_none"` to avoid re-running the default pipeline.
+::::
+
+
+Events that *fail* ingest pipeline processing will be tagged with `_ingest_pipeline_failure`, and their `[@metadata][_ingest_pipeline_failure]` will be populated with details as a key/value map.
+
+### Requirements and upgrade guidance [plugins-filters-elastic_integration-requirements]
+
+* This plugin requires Java 17 minimum with {{ls}} `8.x` versions and Java 21 minimum with {{ls}} `9.x` versions.
+* When you upgrade the {{stack}}, upgrade {{ls}} (or this plugin specifically) *before* you upgrade {{kib}}. (Note that this requirement is a departure from the typical {{stack}} [installation order](docs-content://get-started/installing-elastic-stack.md#install-order-elastic-stack).)
+
+ The {{es}}-{{ls}}-{{kib}} installation order ensures the best experience with {{agent}}-managed pipelines, and embeds functionality from a version of {{es}} Ingest Node that is compatible with the plugin version (`major`.`minor`).
+
+
+
+### Using `filter-elastic_integration` with `output-elasticsearch` [plugins-filters-elastic_integration-es-tips]
+
+Elastic {{integrations}} are designed to work with [data streams](/reference/plugins-outputs-elasticsearch.md#plugins-outputs-elasticsearch-data-streams) and [ECS-compatible](/reference/plugins-outputs-elasticsearch.md#_compatibility_with_the_elastic_common_schema_ecs) output. Be sure that these features are enabled in the [`output-elasticsearch`](/reference/plugins-outputs-elasticsearch.md) plugin.
+
+* Set [`data-stream`](/reference/plugins-outputs-elasticsearch.md#plugins-outputs-elasticsearch-data_stream) to `true`. (Check out [Data streams](/reference/plugins-outputs-elasticsearch.md#plugins-outputs-elasticsearch-data-streams) for additional data streams settings.)
+* Set [`ecs-compatibility`](/reference/plugins-outputs-elasticsearch.md#plugins-outputs-elasticsearch-ecs_compatibility) to `v1` or `v8`.
+
+Check out the [`output-elasticsearch` plugin](/reference/plugins-outputs-elasticsearch.md) docs for additional settings.
+
+
+
+## Minimum configuration [plugins-filters-elastic_integration-minimum_configuration]
+
+You will need to configure this plugin to connect to {{es}}, and may need to also need to provide local GeoIp databases.
+
+```ruby
+filter {
+ elastic_integration {
+ cloud_id => "YOUR_CLOUD_ID_HERE"
+ cloud_auth => "YOUR_CLOUD_AUTH_HERE"
+ geoip_database_directory => "/etc/your/geoip-databases"
+ }
+}
+```
+
+Read on for a guide to configuration, or jump to the [complete list of configuration options](#plugins-filters-elastic_integration-options).
+
+
+## Connecting to {{es}} [plugins-filters-elastic_integration-connecting_to_elasticsearch]
+
+This plugin communicates with {{es}} to identify which ingest pipeline should be run for a given event, and to retrieve the ingest pipeline definitions themselves. You must configure this plugin to point to {{es}} using exactly one of:
+
+* A Cloud Id (see [`cloud_id`](#plugins-filters-elastic_integration-cloud_id))
+* A list of one or more host URLs (see [`hosts`](#plugins-filters-elastic_integration-hosts))
+
+Communication will be made securely over SSL unless you explicitly configure this plugin otherwise.
+
+You may need to configure how this plugin establishes trust of the server that responds, and will likely need to configure how this plugin presents its own identity or credentials.
+
+### SSL Trust Configuration [_ssl_trust_configuration]
+
+When communicating over SSL, this plugin fully-validates the proof-of-identity presented by {{es}} using the system trust store. You can provide an *alternate* source of trust with one of:
+
+* A PEM-formatted list of trusted certificate authorities (see [`ssl_certificate_authorities`](#plugins-filters-elastic_integration-ssl_certificate_authorities))
+* A JKS- or PKCS12-formatted Keystore containing trusted certificates (see [`ssl_truststore_path`](#plugins-filters-elastic_integration-ssl_truststore_path))
+
+You can also configure which aspects of the proof-of-identity are verified (see [`ssl_verification_mode`](#plugins-filters-elastic_integration-ssl_verification_mode)).
+
+
+### SSL Identity Configuration [_ssl_identity_configuration]
+
+When communicating over SSL, you can also configure this plugin to present a certificate-based proof-of-identity to the {{es}} cluster it connects to using one of:
+
+* A PKCS8 Certificate/Key pair (see [`ssl_certificate`](#plugins-filters-elastic_integration-ssl_certificate))
+* A JKS- or PKCS12-formatted Keystore (see [`ssl_keystore_path`](#plugins-filters-elastic_integration-ssl_keystore_path))
+
+
+### Request Identity [_request_identity]
+
+You can configure this plugin to present authentication credentials to {{es}} in one of several ways:
+
+* ApiKey: (see [`api_key`](#plugins-filters-elastic_integration-api_key))
+* Cloud Auth: (see [`cloud_auth`](#plugins-filters-elastic_integration-cloud_auth))
+* HTTP Basic Auth: (see [`username`](#plugins-filters-elastic_integration-username) and [`password`](#plugins-filters-elastic_integration-password))
+
+::::{note}
+Your request credentials are only as secure as the connection they are being passed over. They provide neither privacy nor secrecy on their own, and can easily be recovered by an adversary when SSL is disabled.
+::::
+
+
+
+
+## Minimum required privileges [plugins-filters-elastic_integration-minimum_required_privileges]
+
+This plugin communicates with Elasticsearch to resolve events into pipeline definitions and needs to be configured with credentials with appropriate privileges to read from the relevant APIs. At the startup phase, this plugin confirms that current user has sufficient privileges, including:
+
+| Privilege name | Description |
+| --- | --- |
+| `monitor` | A read-only privilege for cluster operations such as cluster health or state. Plugin requires it when checks {{es}} license. |
+| `read_pipeline` | A read-only get and simulate access to ingest pipeline. It is required when plugin reads {{es}} ingest pipeline definitions. |
+| `manage_index_templates` | All operations on index templates privilege. It is required when plugin resolves default pipeline based on event data stream name. |
+
+::::{note}
+This plugin cannot determine if an anonymous user has the required privileges when it connects to an {{es}} cluster that has security features disabled or when the user does not provide credentials. The plugin starts in an unsafe mode with a runtime error indicating that API permissions are insufficient, and prevents events from being processed by the ingest pipeline.
+
+To avoid these issues, set up user authentication and ensure that security in {{es}} is enabled (default).
+
+::::
+
+
+
+## Supported Ingest Processors [plugins-filters-elastic_integration-supported_ingest_processors]
+
+This filter can run {{es}} Ingest Node pipelines that are *wholly* comprised of the supported subset of processors. It has access to the Painless and Mustache scripting engines where applicable:
+
+| Source | Processor | Caveats |
+| --- | --- | --- |
+| Ingest Common | `append` | *none* |
+| `bytes` | *none* |
+| `communityid` | *none* |
+| `convert` | *none* |
+| `csv` | *none* |
+| `date` | *none* |
+| `dateindexname` | *none* |
+| `dissect` | *none* |
+| `dotexpander` | *none* |
+| `drop` | *none* |
+| `fail` | *none* |
+| `fingerprint` | *none* |
+| `foreach` | *none* |
+| `grok` | *none* |
+| `gsub` | *none* |
+| `htmlstrip` | *none* |
+| `join` | *none* |
+| `json` | *none* |
+| `keyvalue` | *none* |
+| `lowercase` | *none* |
+| `networkdirection` | *none* |
+| `pipeline` | resolved pipeline *must* be wholly-composed of supported processors |
+| `registereddomain` | *none* |
+| `remove` | *none* |
+| `rename` | *none* |
+| `reroute` | *none* |
+| `script` | `lang` must be `painless` (default) |
+| `set` | *none* |
+| `sort` | *none* |
+| `split` | *none* |
+| `trim` | *none* |
+| `uppercase` | *none* |
+| `uri_parts` | *none* |
+| `urldecode` | *none* |
+| `user_agent` | side-loading a custom regex file is not supported; the processor will use the default user agent definitions as specified in [Elasticsearch processor definition](elasticsearch://reference/ingestion-tools/enrich-processor/user-agent-processor.md) |
+| Redact | `redact` | *none* |
+| GeoIp | `geoip` | requires MaxMind GeoIP2 databases, which may be provided by Logstash’s Geoip Database Management *OR* configured using [`geoip_database_directory`](#plugins-filters-elastic_integration-geoip_database_directory) |
+
+### Field Mappings [plugins-filters-elastic_integration-field_mappings]
+
+During execution the Ingest pipeline works with a temporary mutable *view* of the Logstash event called an ingest document. This view contains all of the as-structured fields from the event with minimal type conversions.
+
+It also contains additional metadata fields as required by ingest pipeline processors:
+
+* `_version`: a `long`-value integer equivalent to the event’s `@version`, or a sensible default value of `1`.
+* `_ingest.timestamp`: a `ZonedDateTime` equivalent to the event’s `@timestamp` field
+
+After execution completes the event is sanitized to ensure that Logstash-reserved fields have the expected shape, providing sensible defaults for any missing required fields. When an ingest pipeline has set a reserved field to a value that cannot be coerced, the value is made available in an alternate location on the event as described below.
+
+| {{ls}} field | type | value |
+| --- | --- | --- |
+| `@timestamp` | `Timestamp` | First coercible value of the ingest document’s `@timestamp`, `event.created`, `_ingest.timestamp`, or `_now` fields; or the current timestamp.When the ingest document has a value for `@timestamp` that cannot be coerced, it will be available in the event’s `_@timestamp` field. |
+| `@version` | String-encoded integer | First coercible value of the ingest document’s `@version`, or `_version` fields; or the current timestamp.When the ingest document has a value for `@version` that cannot be coerced, it will be available in the event’s `_@version` field. |
+| `@metadata` | key/value map | The ingest document’s `@metadata`; or an empty map.When the ingest document has a value for `@metadata` that cannot be coerced, it will be available in the event’s `_@metadata` field. |
+| `tags` | a String or a list of Strings | The ingest document’s `tags`.When the ingest document has a value for `tags` that cannot be coerced, it will be available in the event’s `_tags` field. |
+
+Additionally, these {{es}} IngestDocument Metadata fields are made available on the resulting event *if-and-only-if* they were set during pipeline execution:
+
+| {{es}} document metadata | {{ls}} field |
+| --- | --- |
+| `_id` | `[@metadata][_ingest_document][id]` |
+| `_index` | `[@metadata][_ingest_document][index]` |
+| `_routing` | `[@metadata][_ingest_document][routing]` |
+| `_version` | `[@metadata][_ingest_document][version]` |
+| `_version_type` | `[@metadata][_ingest_document][version_type]` |
+| `_ingest.timestamp` | `[@metadata][_ingest_document][timestamp]` |
+
+
+
+## Resolving Pipeline Definitions [plugins-filters-elastic_integration-resolving]
+
+This plugin uses {{es}} to resolve pipeline names into their pipeline definitions. When configured *without* an explicit [`pipeline_name`](#plugins-filters-elastic_integration-pipeline_name), or when a pipeline uses the Reroute Processor, it also uses {{es}} to establish mappings of data stream names to their respective default pipeline names.
+
+It uses hit/miss caches to avoid querying Elasticsearch for every single event. It also works to update these cached mappings *before* they expire. The result is that when {{es}} is responsive this plugin is able to pick up changes quickly without impacting its own performance, and it can survive periods of {{es}} issues without interruption by continuing to use potentially-stale mappings or definitions.
+
+To achieve this, mappings are cached for a maximum of 24 hours, and cached values are reloaded every 1 minute with the following effect:
+
+* when a reloaded mapping is non-empty and is the *same* as its already-cached value, its time-to-live is reset to ensure that subsequent events can continue using the confirmed-unchanged value
+* when a reloaded mapping is non-empty and is *different* from its previously-cached value, the entry is *updated* so that subsequent events will use the new value
+* when a reloaded mapping is newly *empty*, the previous non-empty mapping is *replaced* with a new empty entry so that subsequent events will use the empty value
+* when the reload of a mapping *fails*, this plugin emits a log warning but the existing cache entry is unchanged and gets closer to its expiry.
+
+
+## Elastic Integration Filter Configuration Options [plugins-filters-elastic_integration-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-elastic_integration-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`api_key`](#plugins-filters-elastic_integration-api_key) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`cloud_auth`](#plugins-filters-elastic_integration-cloud_auth) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`cloud_id`](#plugins-filters-elastic_integration-cloud_id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`geoip_database_directory`](#plugins-filters-elastic_integration-geoip_database_directory) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`hosts`](#plugins-filters-elastic_integration-hosts) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`password`](#plugins-filters-elastic_integration-password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`pipeline_name`](#plugins-filters-elastic_integration-pipeline_name) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_certificate`](#plugins-filters-elastic_integration-ssl_certificate) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_certificate_authorities`](#plugins-filters-elastic_integration-ssl_certificate_authorities) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`ssl_enabled`](#plugins-filters-elastic_integration-ssl_enabled) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`ssl_key`](#plugins-filters-elastic_integration-ssl_key) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_keystore_password`](#plugins-filters-elastic_integration-ssl_keystore_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_keystore_path`](#plugins-filters-elastic_integration-ssl_keystore_path) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_key_passphrase`](#plugins-filters-elastic_integration-ssl_key_passphrase) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_truststore_path`](#plugins-filters-elastic_integration-ssl_truststore_path) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_truststore_password`](#plugins-filters-elastic_integration-ssl_truststore_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_verification_mode`](#plugins-filters-elastic_integration-ssl_verification_mode) | [string](/reference/configuration-file-structure.md#string), one of `["full", "certificate", "none"]` | No |
+| [`username`](#plugins-filters-elastic_integration-username) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `api_key` [plugins-filters-elastic_integration-api_key]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+The encoded form of an API key that is used to authenticate this plugin to {{es}}.
+
+
+### `cloud_auth` [plugins-filters-elastic_integration-cloud_auth]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Cloud authentication string (":" format) is an alternative for the `username`/`password` pair and can be obtained from Elastic Cloud web console.
+
+
+### `cloud_id` [plugins-filters-elastic_integration-cloud_id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+* Cannot be combined with `[`ssl_enabled`](#plugins-filters-elastic_integration-ssl_enabled)⇒false`.
+
+Cloud Id, from the Elastic Cloud web console.
+
+When connecting with a Cloud Id, communication to {{es}} is secured with SSL.
+
+For more details, check out the [Logstash-to-Cloud documentation](/reference/connecting-to-cloud.md).
+
+
+### `geoip_database_directory` [plugins-filters-elastic_integration-geoip_database_directory]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+When running in a Logstash process that has Geoip Database Management enabled, integrations that use the Geoip Processor wil use managed Maxmind databases by default. By using managed databases you accept and agree to the [MaxMind EULA](https://www.maxmind.com/en/geolite2/eula).
+
+You may instead configure this plugin with the path to a local directory containing database files.
+
+This plugin will discover all regular files with the `.mmdb` suffix in the provided directory, and make each available by its file name to the GeoIp processors in integration pipelines. It expects the files it finds to be in the MaxMind DB format with one of the following database types:
+
+* `AnonymousIp`
+* `ASN`
+* `City`
+* `Country`
+* `ConnectionType`
+* `Domain`
+* `Enterprise`
+* `Isp`
+
+::::{note}
+Most integrations rely on databases being present named *exactly*:
+
+* `GeoLite2-ASN.mmdb`,
+* `GeoLite2-City.mmdb`, or
+* `GeoLite2-Country.mmdb`
+
+::::
+
+
+
+### `hosts` [plugins-filters-elastic_integration-hosts]
+
+* Value type is a list of [uri](/reference/configuration-file-structure.md#uri)s
+* There is no default value for this setting.
+* Constraints:
+
+ * When any URL contains a protocol component, all URLs must have the same protocol as each other.
+ * `https`-protocol hosts use HTTPS and cannot be combined with [`ssl_enabled => false`](#plugins-filters-elastic_integration-ssl_enabled).
+ * `http`-protocol hosts use unsecured HTTP and cannot be combined with [`ssl_enabled => true`](#plugins-filters-elastic_integration-ssl_enabled).
+ * When any URL omits a port component, the default `9200` is used.
+ * When any URL contains a path component, all URLs must have the same path as each other.
+
+
+A non-empty list of {{es}} hosts to connect.
+
+Examples:
+
+* `"127.0.0.1"`
+* `["127.0.0.1:9200","127.0.0.2:9200"]`
+* `["http://127.0.0.1"]`
+* `["https://127.0.0.1:9200"]`
+* `["https://127.0.0.1:9200/subpath"]` (If using a proxy on a subpath)
+
+When connecting with a list of hosts, communication to {{es}} is secured with SSL unless configured otherwise.
+
+::::{admonition} Disabling SSL is dangerous
+:class: warning
+
+The security of this plugin relies on SSL to avoid leaking credentials and to avoid running illegitimate ingest pipeline definitions.
+
+There are two ways to disable SSL:
+
+* Provide a list of `http`-protocol hosts
+* Set `<>=>false`
+
+::::
+
+
+
+### `password` [plugins-filters-elastic_integration-password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+* Required when request auth is configured with [`username`](#plugins-filters-elastic_integration-username)
+
+A password when using HTTP Basic Authentication to connect to {{es}}.
+
+
+### `pipeline_name` [plugins-filters-elastic_integration-pipeline_name]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+* When present, the event’s initial pipeline will *not* be auto-detected from the event’s data stream fields.
+* Value may be a [sprintf-style](/reference/event-dependent-configuration.md#sprintf) template; if any referenced fields cannot be resolved the event will not be routed to an ingest pipeline.
+
+
+### `ssl_certificate` [plugins-filters-elastic_integration-ssl_certificate]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+* When present, [`ssl_key`](#plugins-filters-elastic_integration-ssl_key) and [`ssl_key_passphrase`](#plugins-filters-elastic_integration-ssl_key_passphrase) are also required.
+* Cannot be combined with configurations that disable SSL
+
+Path to a PEM-encoded certificate or certificate chain with which to identify this plugin to {{es}}.
+
+
+### `ssl_certificate_authorities` [plugins-filters-elastic_integration-ssl_certificate_authorities]
+
+* Value type is a list of [path](/reference/configuration-file-structure.md#path)s
+* There is no default value for this setting.
+* Cannot be combined with configurations that disable SSL
+* Cannot be combined with `[`ssl_verification_mode`](#plugins-filters-elastic_integration-ssl_verification_mode)⇒none`.
+
+One or more PEM-formatted files defining certificate authorities.
+
+This setting can be used to *override* the system trust store for verifying the SSL certificate presented by {{es}}.
+
+
+### `ssl_enabled` [plugins-filters-elastic_integration-ssl_enabled]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* There is no default value for this setting.
+
+Secure SSL communication to {{es}} is enabled unless:
+
+* it is explicitly disabled with `ssl_enabled => false`; OR
+* it is implicitly disabled by providing `http`-protocol [`hosts`](#plugins-filters-elastic_integration-hosts).
+
+Specifying `ssl_enabled => true` can be a helpful redundant safeguard to ensure this plugin cannot be configured to use non-ssl communication.
+
+
+### `ssl_key` [plugins-filters-elastic_integration-ssl_key]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+* Required when connection identity is configured with [`ssl_certificate`](#plugins-filters-elastic_integration-ssl_certificate)
+* Cannot be combined with configurations that disable SSL
+
+A path to a PKCS8-formatted SSL certificate key.
+
+
+### `ssl_keystore_password` [plugins-filters-elastic_integration-ssl_keystore_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+* Required when connection identity is configured with [`ssl_keystore_path`](#plugins-filters-elastic_integration-ssl_keystore_path)
+* Cannot be combined with configurations that disable SSL
+
+Password for the [`ssl_keystore_path`](#plugins-filters-elastic_integration-ssl_keystore_path).
+
+
+### `ssl_keystore_path` [plugins-filters-elastic_integration-ssl_keystore_path]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+* When present, [`ssl_keystore_password`](#plugins-filters-elastic_integration-ssl_keystore_password) is also required.
+* Cannot be combined with configurations that disable SSL
+
+A path to a JKS- or PKCS12-formatted keystore with which to identify this plugin to {{es}}.
+
+
+### `ssl_key_passphrase` [plugins-filters-elastic_integration-ssl_key_passphrase]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+* Required when connection identity is configured with [`ssl_certificate`](#plugins-filters-elastic_integration-ssl_certificate)
+* Cannot be combined with configurations that disable SSL
+
+A password or passphrase of the [`ssl_key`](#plugins-filters-elastic_integration-ssl_key).
+
+
+### `ssl_truststore_path` [plugins-filters-elastic_integration-ssl_truststore_path]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+* When present, [`ssl_truststore_password`](#plugins-filters-elastic_integration-ssl_truststore_password) is required.
+* Cannot be combined with configurations that disable SSL
+* Cannot be combined with `[`ssl_verification_mode`](#plugins-filters-elastic_integration-ssl_verification_mode)⇒none`.
+
+A path to JKS- or PKCS12-formatted keystore where trusted certificates are located.
+
+This setting can be used to *override* the system trust store for verifying the SSL certificate presented by {{es}}.
+
+
+### `ssl_truststore_password` [plugins-filters-elastic_integration-ssl_truststore_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+* Required when connection trust is configured with [`ssl_truststore_path`](#plugins-filters-elastic_integration-ssl_truststore_path)
+* Cannot be combined with configurations that disable SSL
+
+Password for the [`ssl_truststore_path`](#plugins-filters-elastic_integration-ssl_truststore_path).
+
+
+### `ssl_verification_mode` [plugins-filters-elastic_integration-ssl_verification_mode]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+* Cannot be combined with configurations that disable SSL
+
+Level of verification of the certificate provided by {{es}}.
+
+SSL certificates presented by {{es}} are fully-validated by default.
+
+* Available modes:
+
+ * `none`: performs no validation, implicitly trusting any server that this plugin connects to (insecure)
+ * `certificate`: validates the server-provided certificate is signed by a trusted certificate authority and that the server can prove possession of its associated private key (less secure)
+ * `full` (default): performs the same validations as `certificate` and also verifies that the provided certificate has an identity claim matching the server we are attempting to connect to (most secure)
+
+
+
+### `username` [plugins-filters-elastic_integration-username]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+* When present, [`password`](#plugins-filters-elastic_integration-password) is also required.
+
+A user name when using HTTP Basic Authentication to connect to {{es}}.
+
+
+
+
+
+## Common options [plugins-filters-elastic_integration-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-elastic_integration-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-elastic_integration-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-elastic_integration-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-elastic_integration-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-elastic_integration-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-elastic_integration-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-elastic_integration-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-elastic_integration-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ elastic_integration {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ elastic_integration {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-elastic_integration-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ elastic_integration {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ elastic_integration {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-elastic_integration-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-elastic_integration-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 elastic_integration filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ elastic_integration {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-elastic_integration-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-elastic_integration-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ elastic_integration {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ elastic_integration {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-elastic_integration-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ elastic_integration {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ elastic_integration {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-elasticsearch.md b/docs/reference/plugins-filters-elasticsearch.md
new file mode 100644
index 000000000..4bcedfe42
--- /dev/null
+++ b/docs/reference/plugins-filters-elasticsearch.md
@@ -0,0 +1,679 @@
+---
+navigation_title: "elasticsearch"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-elasticsearch.html
+---
+
+# Elasticsearch filter plugin [plugins-filters-elasticsearch]
+
+
+* Plugin version: v4.0.0
+* Released on: 2025-01-10
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-elasticsearch/blob/v4.0.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-elasticsearch-index.md).
+
+## Getting help [_getting_help_138]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-elasticsearch). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_137]
+
+Search Elasticsearch for a previous log event and copy some fields from it into the current event. Below are two complete examples of how this filter might be used.
+
+The first example uses the legacy *query* parameter where the user is limited to an Elasticsearch query_string. Whenever logstash receives an "end" event, it uses this elasticsearch filter to find the matching "start" event based on some operation identifier. Then it copies the `@timestamp` field from the "start" event into a new field on the "end" event. Finally, using a combination of the "date" filter and the "ruby" filter, we calculate the time duration in hours between the two events.
+
+```ruby
+if [type] == "end" {
+ elasticsearch {
+ hosts => ["es-server"]
+ query => "type:start AND operation:%{[opid]}"
+ fields => { "@timestamp" => "started" }
+ }
+
+ date {
+ match => ["[started]", "ISO8601"]
+ target => "[started]"
+ }
+
+ ruby {
+ code => "event.set('duration_hrs', (event.get('@timestamp') - event.get('started')) / 3600)"
+ }
+}
+```
+
+The example below reproduces the above example but utilises the query_template. This query_template represents a full Elasticsearch query DSL and supports the standard Logstash field substitution syntax. The example below issues the same query as the first example but uses the template shown.
+
+```ruby
+if [type] == "end" {
+ elasticsearch {
+ hosts => ["es-server"]
+ query_template => "template.json"
+ fields => { "@timestamp" => "started" }
+ }
+
+ date {
+ match => ["[started]", "ISO8601"]
+ target => "[started]"
+ }
+
+ ruby {
+ code => "event.set('duration_hrs', (event.get('@timestamp') - event.get('started')) / 3600)"
+ }
+}
+```
+
+template.json:
+
+```json
+{
+ "size": 1,
+ "sort" : [ { "@timestamp" : "desc" } ],
+ "query": {
+ "query_string": {
+ "query": "type:start AND operation:%{[opid]}"
+ }
+ },
+ "_source": ["@timestamp"]
+}
+```
+
+As illustrated above, through the use of *opid*, fields from the Logstash events can be referenced within the template. The template will be populated per event prior to being used to query Elasticsearch.
+
+Notice also that when you use `query_template`, the Logstash attributes `result_size` and `sort` will be ignored. They should be specified directly in the JSON template, as shown in the example above.
+
+
+## Authentication [plugins-filters-elasticsearch-auth]
+
+Authentication to a secure Elasticsearch cluster is possible using *one* of the following options:
+
+* [`user`](#plugins-filters-elasticsearch-user) AND [`password`](#plugins-filters-elasticsearch-password)
+* [`cloud_auth`](#plugins-filters-elasticsearch-cloud_auth)
+* [`api_key`](#plugins-filters-elasticsearch-api_key)
+* [`ssl_keystore_path`](#plugins-filters-elasticsearch-ssl_keystore_path) and/or [`ssl_keystore_password`](#plugins-filters-elasticsearch-ssl_keystore_password)
+
+
+## Authorization [plugins-filters-elasticsearch-autz]
+
+Authorization to a secure Elasticsearch cluster requires `read` permission at index level and `monitoring` permissions at cluster level. The `monitoring` permission at cluster level is necessary to perform periodic connectivity checks.
+
+
+## Elasticsearch Filter Configuration Options [plugins-filters-elasticsearch-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-elasticsearch-common-options) described later.
+
+::::{note}
+As of version `4.0.0` of this plugin, a number of previously deprecated settings related to SSL have been removed. Please see the [Elasticsearch Filter Obsolete Configuration Options](#plugins-filters-elasticsearch-obsolete-options) for more details.
+::::
+
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`aggregation_fields`](#plugins-filters-elasticsearch-aggregation_fields) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`api_key`](#plugins-filters-elasticsearch-api_key) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ca_trusted_fingerprint`](#plugins-filters-elasticsearch-ca_trusted_fingerprint) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`cloud_auth`](#plugins-filters-elasticsearch-cloud_auth) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`cloud_id`](#plugins-filters-elasticsearch-cloud_id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`docinfo_fields`](#plugins-filters-elasticsearch-docinfo_fields) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`enable_sort`](#plugins-filters-elasticsearch-enable_sort) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`fields`](#plugins-filters-elasticsearch-fields) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`hosts`](#plugins-filters-elasticsearch-hosts) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`index`](#plugins-filters-elasticsearch-index) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`password`](#plugins-filters-elasticsearch-password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`proxy`](#plugins-filters-elasticsearch-proxy) | [uri](/reference/configuration-file-structure.md#uri) | No |
+| [`query`](#plugins-filters-elasticsearch-query) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`query_template`](#plugins-filters-elasticsearch-query_template) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`result_size`](#plugins-filters-elasticsearch-result_size) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`retry_on_failure`](#plugins-filters-elasticsearch-retry_on_failure) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`retry_on_status`](#plugins-filters-elasticsearch-retry_on_status) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`sort`](#plugins-filters-elasticsearch-sort) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_certificate`](#plugins-filters-elasticsearch-ssl_certificate) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_certificate_authorities`](#plugins-filters-elasticsearch-ssl_certificate_authorities) | list of [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_cipher_suites`](#plugins-filters-elasticsearch-ssl_cipher_suites) | list of [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_enabled`](#plugins-filters-elasticsearch-ssl_enabled) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`ssl_key`](#plugins-filters-elasticsearch-ssl_key) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_keystore_password`](#plugins-filters-elasticsearch-ssl_keystore_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_keystore_path`](#plugins-filters-elasticsearch-ssl_keystore_path) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_keystore_type`](#plugins-filters-elasticsearch-ssl_keystore_type) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_supported_protocols`](#plugins-filters-elasticsearch-ssl_supported_protocols) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_truststore_password`](#plugins-filters-elasticsearch-ssl_truststore_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_truststore_path`](#plugins-filters-elasticsearch-ssl_truststore_path) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_truststore_type`](#plugins-filters-elasticsearch-ssl_truststore_type) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_verification_mode`](#plugins-filters-elasticsearch-ssl_verification_mode) | [string](/reference/configuration-file-structure.md#string), one of `["full", "none"]` | No |
+| [`tag_on_failure`](#plugins-filters-elasticsearch-tag_on_failure) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`user`](#plugins-filters-elasticsearch-user) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-filters-elasticsearch-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `aggregation_fields` [plugins-filters-elasticsearch-aggregation_fields]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Hash of aggregation names to copy from elasticsearch response into Logstash event fields
+
+Example:
+
+```ruby
+ filter {
+ elasticsearch {
+ aggregation_fields => {
+ "my_agg_name" => "my_ls_field"
+ }
+ }
+ }
+```
+
+
+### `api_key` [plugins-filters-elasticsearch-api_key]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Authenticate using Elasticsearch API key. Note that this option also requires enabling the [`ssl_enabled`](#plugins-filters-elasticsearch-ssl_enabled) option.
+
+Format is `id:api_key` where `id` and `api_key` are as returned by the Elasticsearch [Create API key API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key).
+
+
+### `ca_trusted_fingerprint` [plugins-filters-elasticsearch-ca_trusted_fingerprint]
+
+* Value type is [string](/reference/configuration-file-structure.md#string), and must contain exactly 64 hexadecimal characters.
+* There is no default value for this setting.
+* Use of this option *requires* Logstash 8.3+
+
+The SHA-256 fingerprint of an SSL Certificate Authority to trust, such as the autogenerated self-signed CA for an Elasticsearch cluster.
+
+
+### `cloud_auth` [plugins-filters-elasticsearch-cloud_auth]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Cloud authentication string (":" format) is an alternative for the `user`/`password` pair.
+
+For more info, check out the [Logstash-to-Cloud documentation](/reference/connecting-to-cloud.md).
+
+
+### `cloud_id` [plugins-filters-elasticsearch-cloud_id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Cloud ID, from the Elastic Cloud web console. If set `hosts` should not be used.
+
+For more info, check out the [Logstash-to-Cloud documentation](/reference/connecting-to-cloud.md).
+
+
+### `docinfo_fields` [plugins-filters-elasticsearch-docinfo_fields]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Hash of docinfo fields to copy from old event (found via elasticsearch) into new event
+
+Example:
+
+```ruby
+ filter {
+ elasticsearch {
+ docinfo_fields => {
+ "_id" => "document_id"
+ "_index" => "document_index"
+ }
+ }
+ }
+```
+
+
+### `enable_sort` [plugins-filters-elasticsearch-enable_sort]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Whether results should be sorted or not
+
+
+### `fields` [plugins-filters-elasticsearch-fields]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `{}`
+
+An array of fields to copy from the old event (found via elasticsearch) into the new event, currently being processed.
+
+In the following example, the values of `@timestamp` and `event_id` on the event found via elasticsearch are copied to the current event’s `started` and `start_id` fields, respectively:
+
+```ruby
+fields => {
+ "@timestamp" => "started"
+ "event_id" => "start_id"
+}
+```
+
+
+### `hosts` [plugins-filters-elasticsearch-hosts]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `["localhost:9200"]`
+
+List of elasticsearch hosts to use for querying.
+
+
+### `index` [plugins-filters-elasticsearch-index]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `""`
+
+Comma-delimited list of index names to search; use `_all` or empty string to perform the operation on all indices. Field substitution (e.g. `index-name-%{{date_field}}`) is available
+
+
+### `password` [plugins-filters-elasticsearch-password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Basic Auth - password
+
+
+### `proxy` [plugins-filters-elasticsearch-proxy]
+
+* Value type is [uri](/reference/configuration-file-structure.md#uri)
+* There is no default value for this setting.
+
+Set the address of a forward HTTP proxy. An empty string is treated as if proxy was not set, and is useful when using environment variables e.g. `proxy => '${LS_PROXY:}'`.
+
+
+### `query` [plugins-filters-elasticsearch-query]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Elasticsearch query string. More information is available in the [Elasticsearch query string documentation](elasticsearch://reference/query-languages/query-dsl-query-string-query.md#query-string-syntax). Use either `query` or `query_template`.
+
+
+### `query_template` [plugins-filters-elasticsearch-query_template]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+File path to elasticsearch query in DSL format. More information is available in the [Elasticsearch query documentation](elasticsearch://reference/query-languages/querydsl.md). Use either `query` or `query_template`.
+
+
+### `result_size` [plugins-filters-elasticsearch-result_size]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1`
+
+How many results to return
+
+
+### `retry_on_failure` [plugins-filters-elasticsearch-retry_on_failure]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `0` (retries disabled)
+
+How many times to retry an individual failed request.
+
+When enabled, retry requests that result in connection errors or an HTTP status code included in [`retry_on_status`](#plugins-filters-elasticsearch-retry_on_status)
+
+
+### `retry_on_status` [plugins-filters-elasticsearch-retry_on_status]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is an empty list `[]`
+
+Which HTTP Status codes to consider for retries (in addition to connection errors) when using [`retry_on_failure`](#plugins-filters-elasticsearch-retry_on_failure),
+
+
+### `sort` [plugins-filters-elasticsearch-sort]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"@timestamp:desc"`
+
+Comma-delimited list of `:` pairs that define the sort order
+
+
+### `ssl_certificate` [plugins-filters-elasticsearch-ssl_certificate]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+SSL certificate to use to authenticate the client. This certificate should be an OpenSSL-style X.509 certificate file.
+
+::::{note}
+This setting can be used only if [`ssl_key`](#plugins-filters-elasticsearch-ssl_key) is set.
+::::
+
+
+
+### `ssl_certificate_authorities` [plugins-filters-elasticsearch-ssl_certificate_authorities]
+
+* Value type is a list of [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting
+
+The .cer or .pem files to validate the server’s certificate.
+
+::::{note}
+You cannot use this setting and [`ssl_truststore_path`](#plugins-filters-elasticsearch-ssl_truststore_path) at the same time.
+::::
+
+
+
+### `ssl_cipher_suites` [plugins-filters-elasticsearch-ssl_cipher_suites]
+
+* Value type is a list of [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting
+
+The list of cipher suites to use, listed by priorities. Supported cipher suites vary depending on the Java and protocol versions.
+
+
+### `ssl_enabled` [plugins-filters-elasticsearch-ssl_enabled]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* There is no default value for this setting.
+
+Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme is specified in the URLs listed in [`hosts`](#plugins-filters-elasticsearch-hosts) or extracted from the [`cloud_id`](#plugins-filters-elasticsearch-cloud_id). If no explicit protocol is specified plain HTTP will be used.
+
+
+### `ssl_key` [plugins-filters-elasticsearch-ssl_key]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+OpenSSL-style RSA private key that corresponds to the [`ssl_certificate`](#plugins-filters-elasticsearch-ssl_certificate).
+
+::::{note}
+This setting can be used only if [`ssl_certificate`](#plugins-filters-elasticsearch-ssl_certificate) is set.
+::::
+
+
+
+### `ssl_keystore_password` [plugins-filters-elasticsearch-ssl_keystore_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Set the keystore password
+
+
+### `ssl_keystore_path` [plugins-filters-elasticsearch-ssl_keystore_path]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+The keystore used to present a certificate to the server. It can be either `.jks` or `.p12`
+
+::::{note}
+You cannot use this setting and [`ssl_certificate`](#plugins-filters-elasticsearch-ssl_certificate) at the same time.
+::::
+
+
+
+### `ssl_keystore_type` [plugins-filters-elasticsearch-ssl_keystore_type]
+
+* Value can be any of: `jks`, `pkcs12`
+* If not provided, the value will be inferred from the keystore filename.
+
+The format of the keystore file. It must be either `jks` or `pkcs12`.
+
+
+### `ssl_supported_protocols` [plugins-filters-elasticsearch-ssl_supported_protocols]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Allowed values are: `'TLSv1.1'`, `'TLSv1.2'`, `'TLSv1.3'`
+* Default depends on the JDK being used. With up-to-date Logstash, the default is `['TLSv1.2', 'TLSv1.3']`. `'TLSv1.1'` is not considered secure and is only provided for legacy applications.
+
+List of allowed SSL/TLS versions to use when establishing a connection to the Elasticsearch cluster.
+
+For Java 8 `'TLSv1.3'` is supported only since **8u262** (AdoptOpenJDK), but requires that you set the `LS_JAVA_OPTS="-Djdk.tls.client.protocols=TLSv1.3"` system property in Logstash.
+
+::::{note}
+If you configure the plugin to use `'TLSv1.1'` on any recent JVM, such as the one packaged with Logstash, the protocol is disabled by default and needs to be enabled manually by changing `jdk.tls.disabledAlgorithms` in the **$JDK_HOME/conf/security/java.security** configuration file. That is, `TLSv1.1` needs to be removed from the list.
+::::
+
+
+
+### `ssl_truststore_password` [plugins-filters-elasticsearch-ssl_truststore_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Set the truststore password
+
+
+### `ssl_truststore_path` [plugins-filters-elasticsearch-ssl_truststore_path]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+The truststore to validate the server’s certificate. It can be either `.jks` or `.p12`.
+
+::::{note}
+You cannot use this setting and [`ssl_certificate_authorities`](#plugins-filters-elasticsearch-ssl_certificate_authorities) at the same time.
+::::
+
+
+
+### `ssl_truststore_type` [plugins-filters-elasticsearch-ssl_truststore_type]
+
+* Value can be any of: `jks`, `pkcs12`
+* If not provided, the value will be inferred from the truststore filename.
+
+The format of the truststore file. It must be either `jks` or `pkcs12`.
+
+
+### `ssl_verification_mode` [plugins-filters-elasticsearch-ssl_verification_mode]
+
+* Value can be any of: `full`, `none`
+* Default value is `full`
+
+Defines how to verify the certificates presented by another party in the TLS connection:
+
+`full` validates that the server certificate has an issue date that’s within the not_before and not_after dates; chains to a trusted Certificate Authority (CA), and has a hostname or IP address that matches the names within the certificate.
+
+`none` performs no certificate validation.
+
+::::{warning}
+Setting certificate verification to `none` disables many security benefits of SSL/TLS, which is very dangerous. For more information on disabling certificate verification please read [https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf](https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf)
+::::
+
+
+
+### `tag_on_failure` [plugins-filters-elasticsearch-tag_on_failure]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `["_elasticsearch_lookup_failure"]`
+
+Tags the event on failure to look up previous log event information. This can be used in later analysis.
+
+
+### `user` [plugins-filters-elasticsearch-user]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Basic Auth - username
+
+
+
+## Elasticsearch Filter Obsolete Configuration Options [plugins-filters-elasticsearch-obsolete-options]
+
+::::{warning}
+As of version `4.0.0` of this plugin, some configuration options have been replaced. The plugin will fail to start if it contains any of these obsolete options.
+::::
+
+
+| Setting | Replaced by | ca_file |
+| --- | --- | --- |
+| [`ssl_certificate_authorities`](#plugins-filters-elasticsearch-ssl_certificate_authorities) | keystore | [`ssl_keystore_path`](#plugins-filters-elasticsearch-ssl_keystore_path) |
+| keystore_password | [`ssl_keystore_password`](#plugins-filters-elasticsearch-ssl_keystore_password) | ssl |
+
+
+## Common options [plugins-filters-elasticsearch-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-elasticsearch-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-elasticsearch-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-elasticsearch-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-elasticsearch-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-elasticsearch-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-elasticsearch-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-elasticsearch-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-elasticsearch-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ elasticsearch {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ elasticsearch {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-elasticsearch-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ elasticsearch {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ elasticsearch {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-elasticsearch-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-elasticsearch-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 elasticsearch filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ elasticsearch {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-elasticsearch-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-elasticsearch-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ elasticsearch {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ elasticsearch {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-elasticsearch-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ elasticsearch {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ elasticsearch {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
diff --git a/docs/reference/plugins-filters-environment.md b/docs/reference/plugins-filters-environment.md
new file mode 100644
index 000000000..6aa60a7fa
--- /dev/null
+++ b/docs/reference/plugins-filters-environment.md
@@ -0,0 +1,246 @@
+---
+navigation_title: "environment"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-environment.html
+---
+
+# Environment filter plugin [plugins-filters-environment]
+
+
+* Plugin version: v3.0.3
+* Released on: 2017-11-07
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-environment/blob/v3.0.3/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-environment-index.md).
+
+## Installation [_installation_59]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-filter-environment`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_139]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-environment). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_138]
+
+This filter stores environment variables as subfields in the `@metadata` field. You can then use these values in other parts of the pipeline.
+
+Adding environment variables is as easy as: filter { environment { add_metadata_from_env ⇒ { "field_name" ⇒ "ENV_VAR_NAME" } } }
+
+Accessing stored environment variables is now done through the `@metadata` field:
+
+```
+["@metadata"]["field_name"]
+```
+This would reference field `field_name`, which in the above example references the `ENV_VAR_NAME` environment variable.
+
+::::{important}
+Previous versions of this plugin put the environment variables as fields at the root level of the event. Current versions make use of the `@metadata` field, as outlined. You have to change `add_field_from_env` in the older versions to `add_metadata_from_env` in the newer version.
+::::
+
+
+
+## Environment Filter Configuration Options [plugins-filters-environment-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-environment-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_metadata_from_env`](#plugins-filters-environment-add_metadata_from_env) | [hash](/reference/configuration-file-structure.md#hash) | No |
+
+Also see [Common options](#plugins-filters-environment-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `add_metadata_from_env` [plugins-filters-environment-add_metadata_from_env]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Specify a hash of field names and the environment variable name with the value you want imported into Logstash. For example:
+
+```
+add_metadata_from_env => { "field_name" => "ENV_VAR_NAME" }
+```
+or
+
+```
+add_metadata_from_env => {
+ "field1" => "ENV1"
+ "field2" => "ENV2"
+ # "field_n" => "ENV_n"
+}
+```
+
+
+## Common options [plugins-filters-environment-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-environment-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-environment-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-environment-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-environment-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-environment-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-environment-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-environment-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-environment-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ environment {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ environment {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-environment-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ environment {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ environment {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-environment-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-environment-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 environment filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ environment {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-environment-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-environment-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ environment {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ environment {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-environment-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ environment {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ environment {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-extractnumbers.md b/docs/reference/plugins-filters-extractnumbers.md
new file mode 100644
index 000000000..8ba059d1d
--- /dev/null
+++ b/docs/reference/plugins-filters-extractnumbers.md
@@ -0,0 +1,226 @@
+---
+navigation_title: "extractnumbers"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-extractnumbers.html
+---
+
+# Extractnumbers filter plugin [plugins-filters-extractnumbers]
+
+
+* Plugin version: v3.0.3
+* Released on: 2017-11-07
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-extractnumbers/blob/v3.0.3/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-extractnumbers-index.md).
+
+## Installation [_installation_60]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-filter-extractnumbers`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_140]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-extractnumbers). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_139]
+
+This filter automatically extracts all numbers found inside a string
+
+This is useful when you have lines that don’t match a grok pattern or use json but you still need to extract numbers.
+
+Each numbers is returned in a `@fields.intX` or `@fields.floatX` field where X indicates the position in the string.
+
+The fields produced by this filter are extra useful used in combination with kibana number plotting features.
+
+
+## Extractnumbers Filter Configuration Options [plugins-filters-extractnumbers-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-extractnumbers-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`source`](#plugins-filters-extractnumbers-source) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-filters-extractnumbers-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `source` [plugins-filters-extractnumbers-source]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"message"`
+
+The source field for the data. By default is message.
+
+
+
+## Common options [plugins-filters-extractnumbers-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-extractnumbers-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-extractnumbers-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-extractnumbers-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-extractnumbers-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-extractnumbers-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-extractnumbers-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-extractnumbers-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-extractnumbers-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ extractnumbers {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ extractnumbers {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-extractnumbers-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ extractnumbers {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ extractnumbers {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-extractnumbers-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-extractnumbers-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 extractnumbers filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ extractnumbers {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-extractnumbers-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-extractnumbers-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ extractnumbers {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ extractnumbers {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-extractnumbers-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ extractnumbers {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ extractnumbers {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-fingerprint.md b/docs/reference/plugins-filters-fingerprint.md
new file mode 100644
index 000000000..b1c4e16cd
--- /dev/null
+++ b/docs/reference/plugins-filters-fingerprint.md
@@ -0,0 +1,351 @@
+---
+navigation_title: "fingerprint"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-fingerprint.html
+---
+
+# Fingerprint filter plugin [plugins-filters-fingerprint]
+
+
+* Plugin version: v3.4.4
+* Released on: 2024-03-19
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-fingerprint/blob/v3.4.4/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-fingerprint-index.md).
+
+## Getting help [_getting_help_141]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-fingerprint). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_140]
+
+Create consistent hashes (fingerprints) of one or more fields and store the result in a new field.
+
+You can use this plugin to create consistent document ids when events are inserted into Elasticsearch. This approach means that existing documents can be updated instead of creating new documents.
+
+::::{note}
+When the `method` option is set to `UUID` the result won’t be a consistent hash but a random [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). To generate UUIDs, prefer the [uuid filter](/reference/plugins-filters-uuid.md).
+::::
+
+
+
+## Event Metadata and the Elastic Common Schema (ECS) [plugins-filters-fingerprint-ecs_metadata]
+
+This plugin adds a hash value to event as an identifier. You can configure the `target` option to change the output field.
+
+When ECS compatibility is disabled, the hash value is stored in the `fingerprint` field. When ECS is enabled, the value is stored in the `[event][hash]` field.
+
+Here’s how ECS compatibility mode affects output.
+
+| ECS disabled | ECS v1 | Availability | Description |
+| --- | --- | --- | --- |
+| fingerprint | [event][hash] | *Always* | *a hash value of event* |
+
+
+## Fingerprint Filter Configuration Options [plugins-filters-fingerprint-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-fingerprint-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`base64encode`](#plugins-filters-fingerprint-base64encode) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`concatenate_sources`](#plugins-filters-fingerprint-concatenate_sources) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`concatenate_all_fields`](#plugins-filters-fingerprint-concatenate_all_fields) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`ecs_compatibility`](#plugins-filters-fingerprint-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`key`](#plugins-filters-fingerprint-key) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`method`](#plugins-filters-fingerprint-method) | [string](/reference/configuration-file-structure.md#string), one of `["SHA1", "SHA256", "SHA384", "SHA512", "MD5", "MURMUR3", "MURMUR3_128", IPV4_NETWORK", "UUID", "PUNCTUATION"]` | Yes |
+| [`source`](#plugins-filters-fingerprint-source) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`target`](#plugins-filters-fingerprint-target) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-filters-fingerprint-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `base64encode` [plugins-filters-fingerprint-base64encode]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+When set to `true`, the `SHA1`, `SHA256`, `SHA384`, `SHA512`, `MD5` and `MURMUR3_128` fingerprint methods will produce base64 encoded rather than hex encoded strings.
+
+
+### `concatenate_sources` [plugins-filters-fingerprint-concatenate_sources]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+When set to `true` and `method` isn’t `UUID` or `PUNCTUATION`, the plugin concatenates the names and values of all fields given in the `source` option into one string (like the old checksum filter) before doing the fingerprint computation.
+
+If `false` and multiple source fields are given, the target field will be single fingerprint of the last source field.
+
+**Example: `concatenate_sources`=false**
+
+This example produces a single fingerprint that is computed from "birthday," the last source field.
+
+```ruby
+fingerprint {
+ source => ["user_id", "siblings", "birthday"]
+}
+```
+
+The output is:
+
+```ruby
+"fingerprint" => "6b6390a4416131f82b6ffb509f6e779e5dd9630f".
+```
+
+**Example: `concatenate_sources`=false with array**
+
+If the last source field is an array, you get an array of fingerprints.
+
+In this example, "siblings" is an array ["big brother", "little sister", "little brother"].
+
+```ruby
+fingerprint {
+ source => ["user_id", "siblings"]
+}
+```
+
+The output is:
+
+```ruby
+ "fingerprint" => [
+ [0] "8a8a9323677f4095fcf0c8c30b091a0133b00641",
+ [1] "2ce11b313402e0e9884e094409f8d9fcf01337c2",
+ [2] "adc0b90f9391a82098c7b99e66a816e9619ad0a7"
+ ],
+```
+
+
+### `concatenate_all_fields` [plugins-filters-fingerprint-concatenate_all_fields]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+When set to `true` and `method` isn’t `UUID` or `PUNCTUATION`, the plugin concatenates the names and values of all fields of the event into one string (like the old checksum filter) before doing the fingerprint computation. If `false` and at least one source field is given, the target field will be an array with fingerprints of the source fields given.
+
+
+### `ecs_compatibility` [plugins-filters-fingerprint-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: unstructured data added at root level
+ * `v1`: uses `[event][hash]` fields that are compatible with Elastic Common Schema
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)). See [Event Metadata and the Elastic Common Schema (ECS)](#plugins-filters-fingerprint-ecs_metadata) for detailed information.
+
+
+### `key` [plugins-filters-fingerprint-key]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+When used with the `IPV4_NETWORK` method fill in the subnet prefix length. With other methods, optionally fill in the HMAC key.
+
+
+### `method` [plugins-filters-fingerprint-method]
+
+* This is a required setting.
+* Value can be any of: `SHA1`, `SHA256`, `SHA384`, `SHA512`, `MD5`, `MURMUR3`, `MURMUR3_128`, `IPV4_NETWORK`, `UUID`, `PUNCTUATION`
+* Default value is `"SHA1"`
+
+The fingerprint method to use.
+
+If set to `SHA1`, `SHA256`, `SHA384`, `SHA512`, or `MD5` and a key is set, the corresponding cryptographic hash function and the keyed-hash (HMAC) digest function are used to generate the fingerprint.
+
+If set to `MURMUR3` or `MURMUR3_128` the non-cryptographic MurmurHash function (either the 32-bit or 128-bit implementation, respectively) will be used.
+
+If set to `IPV4_NETWORK` the input data needs to be a IPv4 address and the hash value will be the masked-out address using the number of bits specified in the `key` option. For example, with "1.2.3.4" as the input and `key` set to 16, the hash becomes "1.2.0.0".
+
+If set to `PUNCTUATION`, all non-punctuation characters will be removed from the input string.
+
+If set to `UUID`, a [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier) will be generated. The result will be random and thus not a consistent hash.
+
+
+### `source` [plugins-filters-fingerprint-source]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `"message"`
+
+The name(s) of the source field(s) whose contents will be used to create the fingerprint. If an array is given, see the `concatenate_sources` option.
+
+
+### `target` [plugins-filters-fingerprint-target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"fingerprint"` when ECS is disabled
+* Default value is `"[event][hash]"` when ECS is enabled
+
+The name of the field where the generated fingerprint will be stored. Any current contents of that field will be overwritten.
+
+
+
+## Common options [plugins-filters-fingerprint-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-fingerprint-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-fingerprint-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-fingerprint-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-fingerprint-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-fingerprint-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-fingerprint-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-fingerprint-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-fingerprint-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ fingerprint {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ fingerprint {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-fingerprint-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ fingerprint {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ fingerprint {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-fingerprint-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-fingerprint-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 fingerprint filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ fingerprint {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-fingerprint-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-fingerprint-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ fingerprint {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ fingerprint {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-fingerprint-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ fingerprint {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ fingerprint {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
diff --git a/docs/reference/plugins-filters-geoip.md b/docs/reference/plugins-filters-geoip.md
new file mode 100644
index 000000000..05f6d328a
--- /dev/null
+++ b/docs/reference/plugins-filters-geoip.md
@@ -0,0 +1,508 @@
+---
+navigation_title: "geoip"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-geoip.html
+---
+
+# Geoip filter plugin [plugins-filters-geoip]
+
+
+* Plugin version: v7.3.1
+* Released on: 2024-10-11
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-geoip/blob/v7.3.1/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-geoip-index.md).
+
+## Getting help [_getting_help_142]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-geoip). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_141]
+
+The GeoIP filter adds information about the geographical location of IP addresses, based on data from the MaxMind GeoLite2 databases.
+
+
+## Supported Databases [_supported_databases]
+
+This plugin is bundled with [GeoLite2](https://dev.maxmind.com/geoip/geoip2/geolite2) City database out of the box. From MaxMind’s description — "GeoLite2 databases are free IP geolocation databases comparable to, but less accurate than, MaxMind’s GeoIP2 databases". Please see GeoIP Lite2 license for more details.
+
+[Commercial databases](https://www.maxmind.com/en/geoip2-databases) from MaxMind are also supported in this plugin.
+
+If you need to use databases other than the bundled GeoLite2 City, you can download them directly from MaxMind’s website and use the `database` option to specify their location. The GeoLite2 databases can be [downloaded from here](https://dev.maxmind.com/geoip/geoip2/geolite2).
+
+If you would like to get Autonomous System Number(ASN) information, you can use the GeoLite2-ASN database.
+
+
+## Database License [plugins-filters-geoip-database_license]
+
+[MaxMind](https://www.maxmind.com) changed from releasing the GeoIP database under a Creative Commons (CC) license to a proprietary end-user license agreement (EULA). The MaxMind EULA requires Logstash to update the MaxMind database within 30 days of a database update.
+
+The GeoIP filter plugin can manage the database for users running the Logstash default distribution, or you can manage database updates on your own. The behavior is controlled by the `database` setting and by the auto-update feature. When you use the default `database` setting and the auto-update feature is enabled, Logstash ensures that the plugin is using the latest version of the database. Otherwise, you are responsible for maintaining compliance.
+
+The Logstash open source distribution uses the MaxMind Creative Commons license database by default.
+
+
+## Database Auto-update [plugins-filters-geoip-database_auto]
+
+This plugin bundles Creative Commons (CC) license databases. If the auto-update feature is enabled in `logstash.yml`(as it is by default), Logstash checks for database updates every day. It downloads the latest and can replace the old database while the plugin is running.
+
+::::{note}
+If the auto-update feature is disabled or the database has never been updated successfully, as in air-gapped environments, Logstash can use CC license databases indefinitely.
+::::
+
+
+After Logstash has switched to a EULA licensed database, the geoip filter will stop enriching events in order to maintain compliance if Logstash fails to check for database updates for 30 days. Events will be tagged with `_geoip_expired_database` tag to facilitate the handling of this situation.
+
+::::{note}
+If the auto-update feature is enabled, Logstash upgrades from the CC database license to the EULA version on the first download.
+::::
+
+
+::::{tip}
+When possible, allow Logstash to access the internet to download databases so that they are always up-to-date.
+::::
+
+
+**Disable the auto-update feature**
+
+If you work in air-gapped environment and want to disable the database auto-update feature, set the `xpack.geoip.downloader.enabled` value to `false` in `logstash.yml`.
+
+When the auto-update feature is disabled, Logstash uses the Creative Commons (CC) license databases indefinitely, and any previously downloaded version of the EULA databases will be deleted.
+
+
+## Manage your own database updates [plugins-filters-geoip-manage_update]
+
+**Use an HTTP proxy**
+
+If you can’t connect directly to the Elastic GeoIP endpoint, consider setting up an HTTP proxy server. You can then specify the proxy with `http_proxy` environment variable.
+
+```sh
+export http_proxy="http://PROXY_IP:PROXY_PORT"
+```
+
+**Use a custom endpoint (air-gapped environments)**
+
+If you work in air-gapped environment and can’t update your databases from the Elastic endpoint, You can then download databases from MaxMind and bootstrap the service.
+
+1. Download both `GeoLite2-ASN.mmdb` and `GeoLite2-City.mmdb` database files from the [MaxMind site](http://dev.maxmind.com/geoip/geoip2/geolite2).
+2. Copy both database files to a single directory.
+3. [Download {{es}}](https://www.elastic.co/downloads/elasticsearch).
+4. From your {{es}} directory, run:
+
+ ```sh
+ ./bin/elasticsearch-geoip -s my/database/dir
+ ```
+
+5. Serve the static database files from your directory. For example, you can use Docker to serve the files from nginx server:
+
+ ```sh
+ docker run -p 8080:80 -v my/database/dir:/usr/share/nginx/html:ro nginx
+ ```
+
+6. Specify the service’s endpoint URL using the `xpack.geoip.download.endpoint=http://localhost:8080/overview.json` setting in `logstash.yml`.
+
+Logstash gets automatic updates from this service.
+
+
+## Database Metrics [plugins-filters-geoip-metrics]
+
+You can monitor database status through the [Node Stats API](https://www.elastic.co/docs/api/doc/logstash/operation/operation-nodestats).
+
+The following request returns a JSON document containing database manager stats, including:
+
+* database status and freshness
+
+ * `geoip_download_manager.database.*.status`
+
+ * `init` : initial CC database status
+ * `up_to_date` : using up-to-date EULA database
+ * `to_be_expired` : 25 days without calling service
+ * `expired` : 30 days without calling service
+
+ * `fail_check_in_days` : number of days Logstash fails to call service since the last success
+
+* info about download successes and failures
+
+ * `geoip_download_manager.download_stats.successes` number of successful checks and downloads
+ * `geoip_download_manager.download_stats.failures` number of failed check or download
+ * `geoip_download_manager.download_stats.status`
+
+ * `updating` : check and download at the moment
+ * `succeeded` : last download succeed
+ * `failed` : last download failed
+
+
+```js
+curl -XGET 'localhost:9600/_node/stats/geoip_download_manager?pretty'
+```
+
+Example response:
+
+```js
+{
+ "geoip_download_manager" : {
+ "database" : {
+ "ASN" : {
+ "status" : "up_to_date",
+ "fail_check_in_days" : 0,
+ "last_updated_at": "2021-06-21T16:06:54+02:00"
+ },
+ "City" : {
+ "status" : "up_to_date",
+ "fail_check_in_days" : 0,
+ "last_updated_at": "2021-06-21T16:06:54+02:00"
+ }
+ },
+ "download_stats" : {
+ "successes" : 15,
+ "failures" : 1,
+ "last_checked_at" : "2021-06-21T16:07:03+02:00",
+ "status" : "succeeded"
+ }
+ }
+}
+```
+
+
+## Field mapping [plugins-filters-geoip-field-mapping]
+
+When this plugin is run with [`ecs_compatibility`](#plugins-filters-geoip-ecs_compatibility) disabled, the MaxMind DB’s fields are added directly to the [`target`](#plugins-filters-geoip-target). When ECS compatibility is enabled, the fields are structured to fit into an ECS shape.
+
+| Database Field Name | ECS Field | Example |
+| --- | --- | --- |
+| `ip` | `[ip]` | `12.34.56.78` |
+| `anonymous` | `[ip_traits][anonymous]` | `false` |
+| `anonymous_vpn` | `[ip_traits][anonymous_vpn]` | `false` |
+| `hosting_provider` | `[ip_traits][hosting_provider]` | `true` |
+| `network` | `[ip_traits][network]` | `12.34.56.78/20` |
+| `public_proxy` | `[ip_traits][public_proxy]` | `true` |
+| `residential_proxy` | `[ip_traits][residential_proxy]` | `false` |
+| `tor_exit_node` | `[ip_traits][tor_exit_node]` | `true` |
+| `city_name` | `[geo][city_name]` | `Seattle` |
+| `country_name` | `[geo][country_name]` | `United States` |
+| `continent_code` | `[geo][continent_code]` | `NA` |
+| `continent_name` | `[geo][continent_name]` | `North America` |
+| `country_code2` | `[geo][country_iso_code]` | `US` |
+| `country_code3` | *N/A* | `US` *maintained for legacy support, but populated with 2-character country code* |
+| `postal_code` | `[geo][postal_code]` | `98106` |
+| `region_name` | `[geo][region_name]` | `Washington` |
+| `region_code` | `[geo][region_code]` | `WA` |
+| `region_iso_code`* | `[geo][region_iso_code]` | `US-WA` |
+| `timezone` | `[geo][timezone]` | `America/Los_Angeles` |
+| `location`* | `[geo][location]` | `{"lat": 47.6062, "lon": -122.3321}"` |
+| `latitude` | `[geo][location][lat]` | `47.6062` |
+| `longitude` | `[geo][location][lon]` | `-122.3321` |
+| `domain` | `[domain]` | `example.com` |
+| `asn` | `[as][number]` | `98765` |
+| `as_org` | `[as][organization][name]` | `Elastic, NV` |
+| `isp` | `[mmdb][isp]` | `InterLink Supra LLC` |
+| `dma_code` | `[mmdb][dma_code]` | `819` |
+| `organization` | `[mmdb][organization]` | `Elastic, NV` |
+
+::::{note}
+`*` indicates a composite field, which is only populated if GeoIP lookup result contains all components.
+::::
+
+
+
+## Details [_details_2]
+
+When using a City database, the enrichment is aborted if no latitude/longitude pair is available.
+
+The `location` field combines the latitude and longitude into a structure called [GeoJSON](https://datatracker.ietf.org/doc/html/rfc7946). When you are using a default [`target`](#plugins-filters-geoip-target), the templates provided by the [elasticsearch output](/reference/plugins-outputs-elasticsearch.md) map the field to an [Elasticsearch Geo_point datatype](elasticsearch://reference/elasticsearch/mapping-reference/geo-point.md).
+
+As this field is a `geo_point` *and* it is still valid GeoJSON, you get the awesomeness of Elasticsearch’s geospatial query, facet and filter functions and the flexibility of having GeoJSON for all other applications (like Kibana’s map visualization).
+
+::::{note}
+This product includes GeoLite2 data created by MaxMind, available from [http://www.maxmind.com](http://www.maxmind.com). This database is licensed under [Creative Commons Attribution-ShareAlike 4.0 International License](http://creativecommons.org/licenses/by-sa/4.0/).
+
+Versions 4.0.0 and later of the GeoIP filter use the MaxMind GeoLite2 database and support both IPv4 and IPv6 lookups. Versions prior to 4.0.0 use the legacy MaxMind GeoLite database and support IPv4 lookups only.
+
+::::
+
+
+
+## Geoip Filter Configuration Options [plugins-filters-geoip-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-geoip-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`cache_size`](#plugins-filters-geoip-cache_size) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`database`](#plugins-filters-geoip-database) | a valid filesystem path | No |
+| [`default_database_type`](#plugins-filters-geoip-default_database_type) | `City` or `ASN` | No |
+| [`ecs_compatibility`](#plugins-filters-geoip-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`fields`](#plugins-filters-geoip-fields) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`source`](#plugins-filters-geoip-source) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`tag_on_failure`](#plugins-filters-geoip-tag_on_failure) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`target`](#plugins-filters-geoip-target) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-filters-geoip-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `cache_size` [plugins-filters-geoip-cache_size]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1000`
+
+GeoIP lookup is surprisingly expensive. This filter uses an cache to take advantage of the fact that IPs agents are often found adjacent to one another in log files and rarely have a random distribution. The higher you set this the more likely an item is to be in the cache and the faster this filter will run. However, if you set this too high you can use more memory than desired. Since the Geoip API upgraded to v2, there is not any eviction policy so far, if cache is full, no more record can be added. Experiment with different values for this option to find the best performance for your dataset.
+
+This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal and the speed gains are large.
+
+It is important to note that this config value is global to the geoip_type. That is to say all instances of the geoip filter of the same geoip_type share the same cache. The last declared cache size will *win*. The reason for this is that there would be no benefit to having multiple caches for different instances at different points in the pipeline, that would just increase the number of cache misses and waste memory.
+
+
+### `database` [plugins-filters-geoip-database]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* If not specified, the database defaults to the `GeoLite2 City` database that ships with Logstash.
+
+The path to MaxMind’s database file that Logstash should use. The default database is `GeoLite2-City`. This plugin supports several free databases (`GeoLite2-City`, `GeoLite2-Country`, `GeoLite2-ASN`) and a selection of commercially-licensed databases (`GeoIP2-City`, `GeoIP2-ISP`, `GeoIP2-Country`, `GeoIP2-Domain`, `GeoIP2-Enterprise`, `GeoIP2-Anonymous-IP`).
+
+Database auto-update applies to the default distribution. When `database` points to user’s database path, auto-update is disabled. See [Database License](#plugins-filters-geoip-database_license) for more information.
+
+
+### `default_database_type` [plugins-filters-geoip-default_database_type]
+
+This plugin now includes both the GeoLite2-City and GeoLite2-ASN databases. If `database` and `default_database_type` are unset, the GeoLite2-City database will be selected. To use the included GeoLite2-ASN database, set `default_database_type` to `ASN`.
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* The default value is `City`
+* The only acceptable values are `City` and `ASN`
+
+
+### `fields` [plugins-filters-geoip-fields]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+An array of geoip fields to be included in the event.
+
+Possible fields depend on the database type. By default, all geoip fields from the relevant database are included in the event.
+
+For a complete list of available fields and how they map to an event’s structure, see [field mapping](#plugins-filters-geoip-field-mapping).
+
+
+### `ecs_compatibility` [plugins-filters-geoip-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: unstructured geo data added at root level
+ * `v1`, `v8`: use fields that are compatible with Elastic Common Schema. Example: `[client][geo][country_name]`. See [field mapping](#plugins-filters-geoip-field-mapping) for more info.
+
+* Default value depends on which version of Logstash is running:
+
+ * When Logstash provides a `pipeline.ecs_compatibility` setting, its value is used as the default
+ * Otherwise, the default value is `disabled`.
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)). The value of this setting affects the *default* value of [`target`](#plugins-filters-geoip-target).
+
+
+### `source` [plugins-filters-geoip-source]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The field containing the IP address or hostname to map via geoip. If this field is an array, only the first value will be used.
+
+
+### `tag_on_failure` [plugins-filters-geoip-tag_on_failure]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `["_geoip_lookup_failure"]`
+
+Tags the event on failure to look up geo information. This can be used in later analysis.
+
+
+### `target` [plugins-filters-geoip-target]
+
+* This is an optional setting with condition.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value depends on whether [`ecs_compatibility`](#plugins-filters-geoip-ecs_compatibility) is enabled:
+
+ * ECS Compatibility disabled: `geoip`
+ * ECS Compatibility enabled: If `source` is an `ip` sub-field, eg. `[client][ip]`, `target` will automatically set to the parent field, in this example `client`, otherwise, `target` is a required setting
+
+ * `geo` field is nested in `[client][geo]`
+ * ECS compatible values are `client`, `destination`, `host`, `observer`, `server`, `source`
+
+
+Specify the field into which Logstash should store the geoip data. This can be useful, for example, if you have `src_ip` and `dst_ip` fields and would like the GeoIP information of both IPs.
+
+If you save the data to a target field other than `geoip` and want to use the `geo_point` related functions in Elasticsearch, you need to alter the template provided with the Elasticsearch output and configure the output to use the new template.
+
+Even if you don’t use the `geo_point` mapping, the `[target][location]` field is still valid GeoJSON.
+
+
+
+## Common options [plugins-filters-geoip-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-geoip-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-geoip-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-geoip-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-geoip-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-geoip-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-geoip-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-geoip-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-geoip-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ geoip {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ geoip {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-geoip-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ geoip {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ geoip {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-geoip-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-geoip-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 geoip filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ geoip {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-geoip-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-geoip-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ geoip {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ geoip {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-geoip-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ geoip {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ geoip {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
diff --git a/docs/reference/plugins-filters-grok.md b/docs/reference/plugins-filters-grok.md
new file mode 100644
index 000000000..68db9bd18
--- /dev/null
+++ b/docs/reference/plugins-filters-grok.md
@@ -0,0 +1,586 @@
+---
+navigation_title: "grok"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html
+---
+
+# Grok filter plugin [plugins-filters-grok]
+
+
+* Plugin version: v4.4.3
+* Released on: 2022-10-28
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-grok/blob/v4.4.3/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-grok-index.md).
+
+## Getting help [_getting_help_143]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-grok). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_142]
+
+Parse arbitrary text and structure it.
+
+Grok is a great way to parse unstructured log data into something structured and queryable.
+
+This tool is perfect for syslog logs, apache and other webserver logs, mysql logs, and in general, any log format that is generally written for humans and not computer consumption.
+
+Logstash ships with about 120 patterns by default. You can find them here: [https://github.com/logstash-plugins/logstash-patterns-core/tree/master/patterns](https://github.com/logstash-plugins/logstash-patterns-core/tree/master/patterns). You can add your own trivially. (See the `patterns_dir` setting)
+
+If you need help building patterns to match your logs, you will find the [http://grokdebug.herokuapp.com](http://grokdebug.herokuapp.com) and [http://grokconstructor.appspot.com/](http://grokconstructor.appspot.com/) applications quite useful!
+
+### Grok or Dissect? Or both? [_grok_or_dissect_or_both]
+
+The [`dissect`](/reference/plugins-filters-dissect.md) filter plugin is another way to extract unstructured event data into fields using delimiters.
+
+Dissect differs from Grok in that it does not use regular expressions and is faster. Dissect works well when data is reliably repeated. Grok is a better choice when the structure of your text varies from line to line.
+
+You can use both Dissect and Grok for a hybrid use case when a section of the line is reliably repeated, but the entire line is not. The Dissect filter can deconstruct the section of the line that is repeated. The Grok filter can process the remaining field values with more regex predictability.
+
+
+
+## Grok Basics [_grok_basics]
+
+Grok works by combining text patterns into something that matches your logs.
+
+The syntax for a grok pattern is `%{SYNTAX:SEMANTIC}`
+
+The `SYNTAX` is the name of the pattern that will match your text. For example, `3.44` will be matched by the `NUMBER` pattern and `55.3.244.1` will be matched by the `IP` pattern. The syntax is how you match.
+
+The `SEMANTIC` is the identifier you give to the piece of text being matched. For example, `3.44` could be the duration of an event, so you could call it simply `duration`. Further, a string `55.3.244.1` might identify the `client` making a request.
+
+For the above example, your grok filter would look something like this:
+
+```ruby
+%{NUMBER:duration} %{IP:client}
+```
+
+Optionally you can add a data type conversion to your grok pattern. By default all semantics are saved as strings. If you wish to convert a semantic’s data type, for example change a string to an integer then suffix it with the target data type. For example `%{NUMBER:num:int}` which converts the `num` semantic from a string to an integer. Currently the only supported conversions are `int` and `float`.
+
+With that idea of a syntax and semantic, we can pull out useful fields from a sample log like this fictional http request log:
+
+```ruby
+ 55.3.244.1 GET /index.html 15824 0.043
+```
+
+The pattern for this could be:
+
+```ruby
+ %{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration}
+```
+
+A more realistic example, let’s read these logs from a file:
+
+```ruby
+ input {
+ file {
+ path => "/var/log/http.log"
+ }
+ }
+ filter {
+ grok {
+ match => { "message" => "%{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration}" }
+ }
+ }
+```
+
+After the grok filter, the event will have a few extra fields in it:
+
+* `client: 55.3.244.1`
+* `method: GET`
+* `request: /index.html`
+* `bytes: 15824`
+* `duration: 0.043`
+
+
+## Regular Expressions [_regular_expressions]
+
+Grok sits on top of regular expressions, so any regular expressions are valid in grok as well. The regular expression library is Oniguruma, and you can see the full supported regexp syntax [on the Oniguruma site](https://github.com/kkos/oniguruma/blob/master/doc/RE).
+
+
+## Custom Patterns [_custom_patterns]
+
+Sometimes logstash doesn’t have a pattern you need. For this, you have a few options.
+
+First, you can use the Oniguruma syntax for named capture which will let you match a piece of text and save it as a field:
+
+```ruby
+ (?the pattern here)
+```
+
+For example, postfix logs have a `queue id` that is an 10 or 11-character hexadecimal value. I can capture that easily like this:
+
+```ruby
+ (?[0-9A-F]{10,11})
+```
+
+Alternately, you can create a custom patterns file.
+
+* Create a directory called `patterns` with a file in it called `extra` (the file name doesn’t matter, but name it meaningfully for yourself)
+* In that file, write the pattern you need as the pattern name, a space, then the regexp for that pattern.
+
+For example, doing the postfix queue id example as above:
+
+```ruby
+ # contents of ./patterns/postfix:
+ POSTFIX_QUEUEID [0-9A-F]{10,11}
+```
+
+Then use the `patterns_dir` setting in this plugin to tell logstash where your custom patterns directory is. Here’s a full example with a sample log:
+
+```ruby
+ Jan 1 06:25:43 mailserver14 postfix/cleanup[21403]: BEF25A72965: message-id=<20130101142543.5828399CCAF@mailserver14.example.com>
+```
+
+```ruby
+ filter {
+ grok {
+ patterns_dir => ["./patterns"]
+ match => { "message" => "%{SYSLOGBASE} %{POSTFIX_QUEUEID:queue_id}: %{GREEDYDATA:syslog_message}" }
+ }
+ }
+```
+
+The above will match and result in the following fields:
+
+* `timestamp: Jan 1 06:25:43`
+* `logsource: mailserver14`
+* `program: postfix/cleanup`
+* `pid: 21403`
+* `queue_id: BEF25A72965`
+* `syslog_message: message-id=<20130101142543.5828399CCAF@mailserver14.example.com>`
+
+The `timestamp`, `logsource`, `program`, and `pid` fields come from the `SYSLOGBASE` pattern which itself is defined by other patterns.
+
+Another option is to define patterns *inline* in the filter using `pattern_definitions`. This is mostly for convenience and allows user to define a pattern which can be used just in that filter. This newly defined patterns in `pattern_definitions` will not be available outside of that particular `grok` filter.
+
+
+## Migrating to Elastic Common Schema (ECS) [plugins-filters-grok-ecs]
+
+To ease migration to the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)), the filter plugin offers a new set of ECS-compliant patterns in addition to the existing patterns. The new ECS pattern definitions capture event field names that are compliant with the schema.
+
+The ECS pattern set has all of the pattern definitions from the legacy set, and is a drop-in replacement. Use the [`ecs_compatibility`](#plugins-filters-grok-ecs_compatibility) setting to switch modes.
+
+New features and enhancements will be added to the ECS-compliant files. The legacy patterns may still receive bug fixes which are backwards compatible.
+
+
+## Grok Filter Configuration Options [plugins-filters-grok-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-grok-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`break_on_match`](#plugins-filters-grok-break_on_match) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`ecs_compatibility`](#plugins-filters-grok-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`keep_empty_captures`](#plugins-filters-grok-keep_empty_captures) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`match`](#plugins-filters-grok-match) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`named_captures_only`](#plugins-filters-grok-named_captures_only) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`overwrite`](#plugins-filters-grok-overwrite) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`pattern_definitions`](#plugins-filters-grok-pattern_definitions) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`patterns_dir`](#plugins-filters-grok-patterns_dir) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`patterns_files_glob`](#plugins-filters-grok-patterns_files_glob) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tag_on_failure`](#plugins-filters-grok-tag_on_failure) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`tag_on_timeout`](#plugins-filters-grok-tag_on_timeout) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`timeout_millis`](#plugins-filters-grok-timeout_millis) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`timeout_scope`](#plugins-filters-grok-timeout_scope) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-filters-grok-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `break_on_match` [plugins-filters-grok-break_on_match]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Break on first match. The first successful match by grok will result in the filter being finished. If you want grok to try all patterns (maybe you are parsing different things), then set this to false.
+
+
+### `ecs_compatibility` [plugins-filters-grok-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: the plugin will load legacy (built-in) pattern definitions
+ * `v1`,`v8`: all patterns provided by the plugin will use ECS compliant captures
+
+* Default value depends on which version of Logstash is running:
+
+ * When Logstash provides a `pipeline.ecs_compatibility` setting, its value is used as the default
+ * Otherwise, the default value is `disabled`.
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)). The value of this setting affects extracted event field names when a composite pattern (such as `HTTPD_COMMONLOG`) is matched.
+
+
+### `keep_empty_captures` [plugins-filters-grok-keep_empty_captures]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+If `true`, keep empty captures as event fields.
+
+
+### `match` [plugins-filters-grok-match]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+A hash that defines the mapping of *where to look*, and with which patterns.
+
+For example, the following will match an existing value in the `message` field for the given pattern, and if a match is found will add the field `duration` to the event with the captured value:
+
+```ruby
+ filter {
+ grok {
+ match => {
+ "message" => "Duration: %{NUMBER:duration}"
+ }
+ }
+ }
+```
+
+If you need to match multiple patterns against a single field, the value can be an array of patterns:
+
+```ruby
+ filter {
+ grok {
+ match => {
+ "message" => [
+ "Duration: %{NUMBER:duration}",
+ "Speed: %{NUMBER:speed}"
+ ]
+ }
+ }
+ }
+```
+
+To perform matches on multiple fields just use multiple entries in the `match` hash:
+
+```ruby
+ filter {
+ grok {
+ match => {
+ "speed" => "Speed: %{NUMBER:speed}"
+ "duration" => "Duration: %{NUMBER:duration}"
+ }
+ }
+ }
+```
+
+However, if one pattern depends on a field created by a previous pattern, separate these into two separate grok filters:
+
+```ruby
+ filter {
+ grok {
+ match => {
+ "message" => "Hi, the rest of the message is: %{GREEDYDATA:rest}"
+ }
+ }
+ grok {
+ match => {
+ "rest" => "a number %{NUMBER:number}, and a word %{WORD:word}"
+ }
+ }
+ }
+```
+
+
+### `named_captures_only` [plugins-filters-grok-named_captures_only]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+If `true`, only store named captures from grok.
+
+
+### `overwrite` [plugins-filters-grok-overwrite]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+The fields to overwrite.
+
+This allows you to overwrite a value in a field that already exists.
+
+For example, if you have a syslog line in the `message` field, you can overwrite the `message` field with part of the match like so:
+
+```ruby
+ filter {
+ grok {
+ match => { "message" => "%{SYSLOGBASE} %{DATA:message}" }
+ overwrite => [ "message" ]
+ }
+ }
+```
+
+In this case, a line like `May 29 16:37:11 sadness logger: hello world` will be parsed and `hello world` will overwrite the original message.
+
+If you are using a field reference in `overwrite`, you must use the field reference in the pattern. Example:
+
+```ruby
+ filter {
+ grok {
+ match => { "somefield" => "%{NUMBER} %{GREEDYDATA:[nested][field][test]}" }
+ overwrite => [ "[nested][field][test]" ]
+ }
+ }
+```
+
+
+### `pattern_definitions` [plugins-filters-grok-pattern_definitions]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+A hash of pattern-name and pattern tuples defining custom patterns to be used by the current filter. Patterns matching existing names will override the pre-existing definition. Think of this as inline patterns available just for this definition of grok
+
+
+### `patterns_dir` [plugins-filters-grok-patterns_dir]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+Logstash ships by default with a bunch of patterns, so you don’t necessarily need to define this yourself unless you are adding additional patterns. You can point to multiple pattern directories using this setting. Note that Grok will read all files in the directory matching the patterns_files_glob and assume it’s a pattern file (including any tilde backup files).
+
+```ruby
+ patterns_dir => ["/opt/logstash/patterns", "/opt/logstash/extra_patterns"]
+```
+
+Pattern files are plain text with format:
+
+```ruby
+ NAME PATTERN
+```
+
+For example:
+
+```ruby
+ NUMBER \d+
+```
+
+The patterns are loaded when the pipeline is created.
+
+
+### `patterns_files_glob` [plugins-filters-grok-patterns_files_glob]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"*"`
+
+Glob pattern, used to select the pattern files in the directories specified by patterns_dir
+
+
+### `tag_on_failure` [plugins-filters-grok-tag_on_failure]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `["_grokparsefailure"]`
+
+Append values to the `tags` field when there has been no successful match
+
+
+### `tag_on_timeout` [plugins-filters-grok-tag_on_timeout]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"_groktimeout"`
+
+Tag to apply if a grok regexp times out.
+
+
+### `target` [plugins-filters-grok-target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting
+
+Define target namespace for placing matches.
+
+
+### `timeout_millis` [plugins-filters-grok-timeout_millis]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `30000`
+
+Attempt to terminate regexps after this amount of time. This applies per pattern if multiple patterns are applied This will never timeout early, but may take a little longer to timeout. Actual timeout is approximate based on a 250ms quantization. Set to 0 to disable timeouts
+
+
+### `timeout_scope` [plugins-filters-grok-timeout_scope]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"pattern"`
+* Supported values are `"pattern"` and `"event"`
+
+When multiple patterns are provided to [`match`](#plugins-filters-grok-match), the timeout has historically applied to *each* pattern, incurring overhead for each and every pattern that is attempted; when the grok filter is configured with `timeout_scope => event`, the plugin instead enforces a single timeout across all attempted matches on the event, so it can achieve similar safeguard against runaway matchers with significantly less overhead.
+
+It’s usually better to scope the timeout for the whole event.
+
+
+
+## Common options [plugins-filters-grok-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-grok-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-grok-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-grok-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-grok-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-grok-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-grok-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-grok-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-grok-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ grok {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ grok {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-grok-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ grok {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ grok {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-grok-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-grok-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 grok filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ grok {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-grok-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-grok-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ grok {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ grok {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-grok-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ grok {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ grok {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-http.md b/docs/reference/plugins-filters-http.md
new file mode 100644
index 000000000..75c68e65f
--- /dev/null
+++ b/docs/reference/plugins-filters-http.md
@@ -0,0 +1,620 @@
+---
+navigation_title: "http"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-http.html
+---
+
+# HTTP filter plugin [plugins-filters-http]
+
+
+* Plugin version: v2.0.0
+* Released on: 2024-12-18
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-http/blob/v2.0.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-http-index.md).
+
+## Getting help [_getting_help_144]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-http). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_143]
+
+The HTTP filter provides integration with external web services/REST APIs.
+
+
+## Compatibility with the Elastic Common Schema (ECS) [plugins-filters-http-ecs]
+
+The plugin includes sensible defaults that change based on [ECS compatibility mode](#plugins-filters-http-ecs_compatibility). When targeting an ECS version, headers are set as `@metadata` and the `target_body` is a required option. See [`target_body`](#plugins-filters-http-target_body), and [`target_headers`](#plugins-filters-http-target_headers).
+
+
+## HTTP Filter Configuration Options [plugins-filters-http-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-http-common-options) described later.
+
+::::{note}
+As of version `2.0.0` of this plugin, a number of previously deprecated settings related to SSL have been removed. Please check out [HTTP Filter Obsolete Configuration Options](#plugins-filters-http-obsolete-options) for details.
+::::
+
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`body`](#plugins-filters-http-body) | String, Array or Hash | No |
+| [`body_format`](#plugins-filters-http-body_format) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ecs_compatibility`](#plugins-filters-http-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`headers`](#plugins-filters-http-headers) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`query`](#plugins-filters-http-query) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`target_body`](#plugins-filters-http-target_body) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`target_headers`](#plugins-filters-http-target_headers) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`url`](#plugins-filters-http-url) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`verb`](#plugins-filters-http-verb) | [string](/reference/configuration-file-structure.md#string) | No |
+
+There are also multiple configuration options related to the HTTP connectivity:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`automatic_retries`](#plugins-filters-http-automatic_retries) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`connect_timeout`](#plugins-filters-http-connect_timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`cookies`](#plugins-filters-http-cookies) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`follow_redirects`](#plugins-filters-http-follow_redirects) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`keepalive`](#plugins-filters-http-keepalive) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`password`](#plugins-filters-http-password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`pool_max`](#plugins-filters-http-pool_max) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`pool_max_per_route`](#plugins-filters-http-pool_max_per_route) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`proxy`](#plugins-filters-http-proxy) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`request_timeout`](#plugins-filters-http-request_timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`retry_non_idempotent`](#plugins-filters-http-retry_non_idempotent) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`socket_timeout`](#plugins-filters-http-socket_timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`ssl_certificate`](#plugins-filters-http-ssl_certificate) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_certificate_authorities`](#plugins-filters-http-ssl_certificate_authorities) | list of [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_cipher_suites`](#plugins-filters-http-ssl_cipher_suites) | list of [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_enabled`](#plugins-filters-http-ssl_enabled) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`ssl_keystore_password`](#plugins-filters-http-ssl_keystore_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_keystore_path`](#plugins-filters-http-ssl_keystore_path) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_keystore_type`](#plugins-filters-http-ssl_keystore_type) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_supported_protocols`](#plugins-filters-http-ssl_supported_protocols) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_truststore_password`](#plugins-filters-http-ssl_truststore_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_truststore_path`](#plugins-filters-http-ssl_truststore_path) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_truststore_type`](#plugins-filters-http-ssl_truststore_type) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_verification_mode`](#plugins-filters-http-ssl_verification_mode) | [string](/reference/configuration-file-structure.md#string), one of `["full", "none"]` | No |
+| [`user`](#plugins-filters-http-user) | [string](/reference/configuration-file-structure.md#string) | no |
+| [`validate_after_inactivity`](#plugins-filters-http-validate_after_inactivity) | [number](/reference/configuration-file-structure.md#number) | No |
+
+Also see [Common options](#plugins-filters-http-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `body` [plugins-filters-http-body]
+
+* Value type can be a [string](/reference/configuration-file-structure.md#string), [number](/reference/configuration-file-structure.md#number), [array](/reference/configuration-file-structure.md#array) or [hash](/reference/configuration-file-structure.md#hash)
+* There is no default value
+
+The body of the HTTP request to be sent.
+
+An example to send `body` as json
+
+```
+http {
+ body => {
+ "key1" => "constant_value"
+ "key2" => "%{[field][reference]}"
+ }
+ body_format => "json"
+}
+```
+
+
+### `body_format` [plugins-filters-http-body_format]
+
+* Value type can be either `"json"` or `"text"`
+* Default value is `"text"`
+
+If set to `"json"` and the [`body`](#plugins-filters-http-body) is a type of [array](/reference/configuration-file-structure.md#array) or [hash](/reference/configuration-file-structure.md#hash), the body will be serialized as JSON. Otherwise it is sent as is.
+
+
+### `ecs_compatibility` [plugins-filters-http-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: does not use ECS-compatible field names (for example, response headers target `headers` field by default)
+ * `v1`, `v8`: avoids field names that might conflict with Elastic Common Schema (for example, headers are added as metadata)
+
+* Default value depends on which version of Logstash is running:
+
+ * When Logstash provides a `pipeline.ecs_compatibility` setting, its value is used as the default
+ * Otherwise, the default value is `disabled`.
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)). The value of this setting affects the *default* value of [`target_body`](#plugins-filters-http-target_body) and [`target_headers`](#plugins-filters-http-target_headers).
+
+
+### `headers` [plugins-filters-http-headers]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* There is no default value
+
+The HTTP headers to be sent in the request. Both the names of the headers and their values can reference values from event fields.
+
+
+### `query` [plugins-filters-http-query]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* There is no default value
+
+Define the query string parameters (key-value pairs) to be sent in the HTTP request.
+
+
+### `target_body` [plugins-filters-http-target_body]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value depends on whether [`ecs_compatibility`](#plugins-filters-http-ecs_compatibility) is enabled:
+
+ * ECS Compatibility disabled: `"[body]"
+ * ECS Compatibility enabled: no default value, needs to be specified explicitly
+
+
+Define the target field for placing the body of the HTTP response.
+
+
+### `target_headers` [plugins-filters-http-target_headers]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value depends on whether [`ecs_compatibility`](#plugins-filters-http-ecs_compatibility) is enabled:
+
+ * ECS Compatibility disabled: `"[headers]"`
+ * ECS Compatibility enabled: `"[@metadata][filter][http][response][headers]"`
+
+
+Define the target field for placing the headers of the HTTP response.
+
+
+### `url` [plugins-filters-http-url]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value
+
+The URL to send the request to. The value can be fetched from event fields.
+
+
+### `verb` [plugins-filters-http-verb]
+
+* Value type can be either `"GET"`, `"HEAD"`, `"PATCH"`, `"DELETE"`, `"POST"`, `"PUT"`
+* Default value is `"GET"`
+
+The verb to be used for the HTTP request.
+
+
+
+## HTTP Filter Connectivity Options [plugins-filters-http-connectivity-options]
+
+### `automatic_retries` [plugins-filters-http-automatic_retries]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1`
+
+How many times should the client retry a failing URL. We highly recommend NOT setting this value to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried.
+
+
+### `connect_timeout` [plugins-filters-http-connect_timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `10`
+
+Timeout (in seconds) to wait for a connection to be established. Default is `10s`
+
+
+### `cookies` [plugins-filters-http-cookies]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Enable cookie support. With this enabled the client will persist cookies across requests as a normal web browser would. Enabled by default
+
+
+### `follow_redirects` [plugins-filters-http-follow_redirects]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Should redirects be followed? Defaults to `true`
+
+
+### `keepalive` [plugins-filters-http-keepalive]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least one with this to fix interactions with broken keepalive implementations.
+
+
+### `password` [plugins-filters-http-password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Password to be used in conjunction with the username for HTTP authentication.
+
+
+### `pool_max` [plugins-filters-http-pool_max]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `50`
+
+Max number of concurrent connections. Defaults to `50`
+
+
+### `pool_max_per_route` [plugins-filters-http-pool_max_per_route]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `25`
+
+Max number of concurrent connections to a single host. Defaults to `25`
+
+
+### `proxy` [plugins-filters-http-proxy]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+If you’d like to use an HTTP proxy . This supports multiple configuration syntaxes:
+
+1. Proxy host in form: `http://proxy.org:1234`
+2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}`
+3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}`
+
+
+### `request_timeout` [plugins-filters-http-request_timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `60`
+
+Timeout (in seconds) for the entire request.
+
+
+### `retry_non_idempotent` [plugins-filters-http-retry_non_idempotent]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried.
+
+
+### `socket_timeout` [plugins-filters-http-socket_timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `10`
+
+Timeout (in seconds) to wait for data on the socket. Default is `10s`
+
+
+### `ssl_certificate` [plugins-filters-http-ssl_certificate]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+SSL certificate to use to authenticate the client. This certificate should be an OpenSSL-style X.509 certificate file.
+
+::::{note}
+This setting can be used only if [`ssl_key`](#plugins-filters-http-ssl_key) is set.
+::::
+
+
+
+### `ssl_certificate_authorities` [plugins-filters-http-ssl_certificate_authorities]
+
+* Value type is a list of [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting
+
+The .cer or .pem CA files to validate the server’s certificate.
+
+
+### `ssl_cipher_suites` [plugins-filters-http-ssl_cipher_suites]
+
+* Value type is a list of [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting
+
+The list of cipher suites to use, listed by priorities. Supported cipher suites vary depending on the Java and protocol versions.
+
+
+### `ssl_enabled` [plugins-filters-http-ssl_enabled]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Enable SSL/TLS secured communication. It must be `true` for other `ssl_` options to take effect.
+
+
+### `ssl_key` [plugins-filters-http-ssl_key]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+OpenSSL-style RSA private key that corresponds to the [`ssl_certificate`](#plugins-filters-http-ssl_certificate).
+
+::::{note}
+This setting can be used only if [`ssl_certificate`](#plugins-filters-http-ssl_certificate) is set.
+::::
+
+
+
+### `ssl_keystore_password` [plugins-filters-http-ssl_keystore_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Set the keystore password
+
+
+### `ssl_keystore_path` [plugins-filters-http-ssl_keystore_path]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+The keystore used to present a certificate to the server. It can be either `.jks` or `.p12`
+
+
+### `ssl_keystore_type` [plugins-filters-http-ssl_keystore_type]
+
+* Value can be any of: `jks`, `pkcs12`
+* If not provided, the value will be inferred from the keystore filename.
+
+The format of the keystore file. It must be either `jks` or `pkcs12`.
+
+
+### `ssl_supported_protocols` [plugins-filters-http-ssl_supported_protocols]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Allowed values are: `'TLSv1.1'`, `'TLSv1.2'`, `'TLSv1.3'`
+* Default depends on the JDK being used. With up-to-date Logstash, the default is `['TLSv1.2', 'TLSv1.3']`. `'TLSv1.1'` is not considered secure and is only provided for legacy applications.
+
+List of allowed SSL/TLS versions to use when establishing a connection to the HTTP endpoint.
+
+For Java 8 `'TLSv1.3'` is supported only since **8u262** (AdoptOpenJDK), but requires that you set the `LS_JAVA_OPTS="-Djdk.tls.client.protocols=TLSv1.3"` system property in Logstash.
+
+::::{note}
+If you configure the plugin to use `'TLSv1.1'` on any recent JVM, such as the one packaged with Logstash, the protocol is disabled by default and needs to be enabled manually by changing `jdk.tls.disabledAlgorithms` in the **$JDK_HOME/conf/security/java.security** configuration file. That is, `TLSv1.1` needs to be removed from the list.
+::::
+
+
+
+### `ssl_truststore_password` [plugins-filters-http-ssl_truststore_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Set the truststore password
+
+
+### `ssl_truststore_path` [plugins-filters-http-ssl_truststore_path]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+The truststore to validate the server’s certificate. It can be either `.jks` or `.p12`.
+
+
+### `ssl_truststore_type` [plugins-filters-http-ssl_truststore_type]
+
+* Value can be any of: `jks`, `pkcs12`
+* If not provided, the value will be inferred from the truststore filename.
+
+The format of the truststore file. It must be either `jks` or `pkcs12`.
+
+
+### `ssl_verification_mode` [plugins-filters-http-ssl_verification_mode]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are: `full`, `none`
+* Default value is `full`
+
+Controls the verification of server certificates. The `full` option verifies that the provided certificate is signed by a trusted authority (CA) and also that the server’s hostname (or IP address) matches the names identified within the certificate.
+
+The `none` setting performs no verification of the server’s certificate. This mode disables many of the security benefits of SSL/TLS and should only be used after cautious consideration. It is primarily intended as a temporary diagnostic mechanism when attempting to resolve TLS errors. Using `none` in production environments is strongly discouraged.
+
+
+### `user` [plugins-filters-http-user]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Username to use with HTTP authentication for ALL requests. Note that you can also set this per-URL. If you set this you must also set the `password` option.
+
+
+### `validate_after_inactivity` [plugins-filters-http-validate_after_inactivity]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `200`
+
+How long to wait before checking for a stale connection to determine if a keepalive request is needed. Consider setting this value lower than the default, possibly to 0, if you get connection errors regularly.
+
+This client is based on Apache Commons. Here’s how the [Apache Commons documentation](https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.md#setValidateAfterInactivity(int)) describes this option: "Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool."
+
+
+
+## HTTP Filter Obsolete Configuration Options [plugins-filters-http-obsolete-options]
+
+::::{warning}
+As of version `2.0.0` of this plugin, some configuration options have been replaced. The plugin will fail to start if it contains any of these obsolete options.
+::::
+
+
+| Setting | Replaced by |
+| --- | --- |
+| cacert | [`ssl_certificate_authorities`](#plugins-filters-http-ssl_certificate_authorities) |
+| client_cert | [`ssl_certificate`](#plugins-filters-http-ssl_certificate) |
+| client_key | [`ssl_key`](#plugins-filters-http-ssl_key) |
+| keystore | [`ssl_keystore_path`](#plugins-filters-http-ssl_keystore_path) |
+| keystore_password | [`ssl_keystore_password`](#plugins-filters-http-ssl_keystore_password) |
+| keystore_type | [`ssl_keystore_type`](#plugins-filters-http-ssl_keystore_type) |
+| truststore | [`ssl_truststore_path`](#plugins-filters-http-ssl_truststore_path) |
+| truststore_password | [`ssl_truststore_password`](#plugins-filters-http-ssl_truststore_password) |
+| truststore_type | [`ssl_truststore_type`](#plugins-filters-http-ssl_truststore_type) |
+
+
+## Common options [plugins-filters-http-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-http-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-http-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-http-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-http-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-http-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-http-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-http-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-http-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ http {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ http {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-http-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ http {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ http {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-http-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-http-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 http filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ http {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-http-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-http-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ http {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ http {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-http-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ http {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ http {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
diff --git a/docs/reference/plugins-filters-i18n.md b/docs/reference/plugins-filters-i18n.md
new file mode 100644
index 000000000..595c5fa2a
--- /dev/null
+++ b/docs/reference/plugins-filters-i18n.md
@@ -0,0 +1,230 @@
+---
+navigation_title: "i18n"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-i18n.html
+---
+
+# I18n filter plugin [plugins-filters-i18n]
+
+
+* Plugin version: v3.0.3
+* Released on: 2017-11-07
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-i18n/blob/v3.0.3/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-i18n-index.md).
+
+## Installation [_installation_61]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-filter-i18n`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_145]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-i18n). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_144]
+
+The i18n filter allows you to remove special characters from a field
+
+
+## I18n Filter Configuration Options [plugins-filters-i18n-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-i18n-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`transliterate`](#plugins-filters-i18n-transliterate) | [array](/reference/configuration-file-structure.md#array) | No |
+
+Also see [Common options](#plugins-filters-i18n-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `transliterate` [plugins-filters-i18n-transliterate]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Replaces non-ASCII characters with an ASCII approximation, or if none exists, a replacement character which defaults to `?`
+
+Example:
+
+```ruby
+ filter {
+ i18n {
+ transliterate => ["field1", "field2"]
+ }
+ }
+```
+
+
+
+## Common options [plugins-filters-i18n-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-i18n-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-i18n-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-i18n-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-i18n-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-i18n-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-i18n-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-i18n-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-i18n-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ i18n {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ i18n {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-i18n-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ i18n {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ i18n {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-i18n-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-i18n-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 i18n filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ i18n {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-i18n-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-i18n-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ i18n {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ i18n {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-i18n-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ i18n {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ i18n {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-java_uuid.md b/docs/reference/plugins-filters-java_uuid.md
new file mode 100644
index 000000000..c160d445e
--- /dev/null
+++ b/docs/reference/plugins-filters-java_uuid.md
@@ -0,0 +1,246 @@
+---
+navigation_title: "java_uuid"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-java_uuid.html
+---
+
+# Java_uuid filter plugin [plugins-filters-java_uuid]
+
+
+**{{ls}} Core Plugin.** The java_uuid filter plugin cannot be installed or uninstalled independently of {{ls}}.
+
+## Getting help [_getting_help_146]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash).
+
+
+## Description [_description_145]
+
+The uuid filter allows you to generate a [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier) and add it as a field to each processed event.
+
+This is useful if you need to generate a string that’s unique for every event even if the same input is processed multiple times. If you want to generate strings that are identical each time an event with the same content is processed (i.e., a hash), you should use the [fingerprint filter](/reference/plugins-filters-fingerprint.md) instead.
+
+The generated UUIDs follow the version 4 definition in [RFC 4122](https://tools.ietf.org/html/rfc4122) and will be represented in standard hexadecimal string format, e.g. "e08806fe-02af-406c-bbde-8a5ae4475e57".
+
+
+## Java_uuid Filter Configuration Options [plugins-filters-java_uuid-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-java_uuid-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`overwrite`](#plugins-filters-java_uuid-overwrite) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`target`](#plugins-filters-java_uuid-target) | [string](/reference/configuration-file-structure.md#string) | Yes |
+
+Also see [Common options](#plugins-filters-java_uuid-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `overwrite` [plugins-filters-java_uuid-overwrite]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Determines if an existing value in the field specified by the `target` option should be overwritten by the filter.
+
+Example:
+
+```ruby
+ filter {
+ java_uuid {
+ target => "uuid"
+ overwrite => true
+ }
+ }
+```
+
+
+### `target` [plugins-filters-java_uuid-target]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Specifies the name of the field in which the generated UUID should be stored.
+
+Example:
+
+```ruby
+ filter {
+ java_uuid {
+ target => "uuid"
+ }
+ }
+```
+
+
+
+## Common options [plugins-filters-java_uuid-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-java_uuid-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-java_uuid-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-java_uuid-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-java_uuid-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-java_uuid-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-java_uuid-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-java_uuid-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-java_uuid-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ java_uuid {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ java_uuid {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-java_uuid-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ java_uuid {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ java_uuid {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-java_uuid-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-java_uuid-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 java_uuid filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ java_uuid {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-java_uuid-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-java_uuid-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ java_uuid {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ java_uuid {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-java_uuid-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ java_uuid {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ java_uuid {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-jdbc_static.md b/docs/reference/plugins-filters-jdbc_static.md
new file mode 100644
index 000000000..0655518eb
--- /dev/null
+++ b/docs/reference/plugins-filters-jdbc_static.md
@@ -0,0 +1,672 @@
+---
+navigation_title: "jdbc_static"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-jdbc_static.html
+---
+
+# Jdbc_static filter plugin [plugins-filters-jdbc_static]
+
+
+* A component of the [jdbc integration plugin](/reference/plugins-integrations-jdbc.md)
+* Integration version: v5.5.2
+* Released on: 2024-12-23
+* [Changelog](https://github.com/logstash-plugins/logstash-integration-jdbc/blob/v5.5.2/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-jdbc_static-index.md).
+
+## Getting help [_getting_help_147]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-integration-jdbc). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_146]
+
+This filter enriches events with data pre-loaded from a remote database.
+
+This filter is best suited for enriching events with reference data that is static or does not change very often, such as environments, users, and products.
+
+This filter works by fetching data from a remote database, caching it in a local, in-memory [Apache Derby](https://db.apache.org/derby/manuals/#docs_10.14) database, and using lookups to enrich events with data cached in the local database. You can set up the filter to load the remote data once (for static data), or you can schedule remote loading to run periodically (for data that needs to be refreshed).
+
+To define the filter, you specify three main sections: local_db_objects, loaders, and lookups.
+
+**local_db_objects**
+: Define the columns, types, and indexes used to build the local database structure. The column names and types should match the external database. Define as many of these objects as needed to build the local database structure.
+
+**loaders**
+: Query the external database to fetch the dataset that will be cached locally. Define as many loaders as needed to fetch the remote data. Each loader should fill a table defined by `local_db_objects`. Make sure the column names and datatypes in the loader SQL statement match the columns defined under `local_db_objects`. Each loader has an independent remote database connection.
+
+**lookups**
+: Perform lookup queries on the local database to enrich the events. Define as many lookups as needed to enrich the event from all lookup tables in one pass. Ideally the SQL statement should only return one row. Any rows are converted to Hash objects and are stored in a target field that is an Array.
+
+ The following example config fetches data from a remote database, caches it in a local database, and uses lookups to enrich events with data cached in the local database.
+
+ ```json
+ filter {
+ jdbc_static {
+ loaders => [ <1>
+ {
+ id => "remote-servers"
+ query => "select ip, descr from ref.local_ips order by ip"
+ local_table => "servers"
+ },
+ {
+ id => "remote-users"
+ query => "select firstname, lastname, userid from ref.local_users order by userid"
+ local_table => "users"
+ }
+ ]
+ local_db_objects => [ <2>
+ {
+ name => "servers"
+ index_columns => ["ip"]
+ columns => [
+ ["ip", "varchar(15)"],
+ ["descr", "varchar(255)"]
+ ]
+ },
+ {
+ name => "users"
+ index_columns => ["userid"]
+ columns => [
+ ["firstname", "varchar(255)"],
+ ["lastname", "varchar(255)"],
+ ["userid", "int"]
+ ]
+ }
+ ]
+ local_lookups => [ <3>
+ {
+ id => "local-servers"
+ query => "SELECT descr as description FROM servers WHERE ip = :ip"
+ parameters => {ip => "[from_ip]"}
+ target => "server"
+ },
+ {
+ id => "local-users"
+ query => "SELECT firstname, lastname FROM users WHERE userid = ? AND country = ?"
+ prepared_parameters => ["[loggedin_userid]", "[user_nation]"] <4>
+ target => "user" <5>
+ default_hash => { <6>
+ firstname => nil
+ lastname => nil
+ }
+ }
+ ]
+ # using add_field here to add & rename values to the event root
+ add_field => { server_name => "%{[server][0][description]}" } <7>
+ add_field => { user_firstname => "%{[user][0][firstname]}" }
+ add_field => { user_lastname => "%{[user][0][lastname]}" }
+ remove_field => ["server", "user"]
+ staging_directory => "/tmp/logstash/jdbc_static/import_data"
+ loader_schedule => "* */2 * * *" <8>
+ jdbc_user => "logstash"
+ jdbc_password => "example"
+ jdbc_driver_class => "org.postgresql.Driver"
+ jdbc_driver_library => "/tmp/logstash/vendor/postgresql-42.1.4.jar"
+ jdbc_connection_string => "jdbc:postgresql://remotedb:5432/ls_test_2"
+ }
+ }
+
+ output {
+ if "_jdbcstaticdefaultsused" in [tags] {
+ # Print all the not found users
+ stdout { }
+ }
+ }
+ ```
+
+ 1. Queries an external database to fetch the dataset that will be cached locally.
+ 2. Defines the columns, types, and indexes used to build the local database structure. The column names and types should match the external database. The order of table definitions is significant and should match that of the loader queries. See [Loader column and local_db_object order dependency](#plugins-filters-jdbc_static-object_order).
+ 3. Performs lookup queries on the local database to enrich the events.
+ 4. Local lookup queries can also use prepared statements where the parameters follow the positional ordering.
+ 5. Specifies the event field that will store the looked-up data. If the lookup returns multiple columns, the data is stored as a JSON object within the field.
+ 6. When the user is not found in the database, an event is created using data from the [`local_lookups`](#plugins-filters-jdbc_static-local_lookups) `default hash` setting, and the event is tagged with the list set in [`tag_on_default_use`](#plugins-filters-jdbc_static-tag_on_default_use).
+ 7. Takes data from the JSON object and stores it in top-level event fields for easier analysis in Kibana.
+ 8. Runs loaders every 2 hours.
+
+
+Here’s a full example:
+
+```json
+input {
+ generator {
+ lines => [
+ '{"from_ip": "10.2.3.20", "app": "foobar", "amount": 32.95}',
+ '{"from_ip": "10.2.3.30", "app": "barfoo", "amount": 82.95}',
+ '{"from_ip": "10.2.3.40", "app": "bazfoo", "amount": 22.95}'
+ ]
+ count => 200
+ }
+}
+
+filter {
+ json {
+ source => "message"
+ }
+
+ jdbc_static {
+ loaders => [
+ {
+ id => "servers"
+ query => "select ip, descr from ref.local_ips order by ip"
+ local_table => "servers"
+ }
+ ]
+ local_db_objects => [
+ {
+ name => "servers"
+ index_columns => ["ip"]
+ columns => [
+ ["ip", "varchar(15)"],
+ ["descr", "varchar(255)"]
+ ]
+ }
+ ]
+ local_lookups => [
+ {
+ query => "select descr as description from servers WHERE ip = :ip"
+ parameters => {ip => "[from_ip]"}
+ target => "server"
+ }
+ ]
+ staging_directory => "/tmp/logstash/jdbc_static/import_data"
+ loader_schedule => "*/30 * * * *"
+ jdbc_user => "logstash"
+ jdbc_password => "logstash??"
+ jdbc_driver_class => "org.postgresql.Driver"
+ jdbc_driver_library => "/Users/guy/tmp/logstash-6.0.0/vendor/postgresql-42.1.4.jar"
+ jdbc_connection_string => "jdbc:postgresql://localhost:5432/ls_test_2"
+ }
+}
+
+output {
+ stdout {
+ codec => rubydebug {metadata => true}
+ }
+}
+```
+
+Assuming the loader fetches the following data from a Postgres database:
+
+```shell
+select * from ref.local_ips order by ip;
+ ip | descr
+-----------+-----------------------
+ 10.2.3.10 | Authentication Server
+ 10.2.3.20 | Payments Server
+ 10.2.3.30 | Events Server
+ 10.2.3.40 | Payroll Server
+ 10.2.3.50 | Uploads Server
+```
+
+The events are enriched with a description of the server based on the value of the IP:
+
+```shell
+{
+ "app" => "bazfoo",
+ "sequence" => 0,
+ "server" => [
+ [0] {
+ "description" => "Payroll Server"
+ }
+ ],
+ "amount" => 22.95,
+ "@timestamp" => 2017-11-30T18:08:15.694Z,
+ "@version" => "1",
+ "host" => "Elastics-MacBook-Pro.local",
+ "message" => "{\"from_ip\": \"10.2.3.40\", \"app\": \"bazfoo\", \"amount\": 22.95}",
+ "from_ip" => "10.2.3.40"
+}
+```
+
+
+## Using this plugin with multiple pipelines [_using_this_plugin_with_multiple_pipelines]
+
+::::{important}
+Logstash uses a single, in-memory Apache Derby instance as the lookup database engine for the entire JVM. Because each plugin instance uses a unique database inside the shared Derby engine, there should be no conflicts with plugins attempting to create and populate the same tables. This is true regardless of whether the plugins are defined in a single pipeline, or multiple pipelines. However, after setting up the filter, you should watch the lookup results and view the logs to verify correct operation.
+
+::::
+
+
+
+## Loader column and local_db_object order dependency [plugins-filters-jdbc_static-object_order]
+
+::::{important}
+For loader performance reasons, the loading mechanism uses a CSV style file with an inbuilt Derby file import procedure to add the remote data to the local db. The retrieved columns are written to the CSV file as is and the file import procedure expects a 1 to 1 correspondence to the order of the columns specified in the local_db_object settings. Please ensure that this order is in place.
+
+::::
+
+
+
+## Compatibility with the Elastic Common Schema (ECS) [plugins-filters-jdbc_static-ecs]
+
+This plugin is compatible with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)). It behaves the same regardless of ECS compatibility, except giving a warning when ECS is enabled and `target` isn’t set.
+
+::::{tip}
+Set the `target` option to avoid potential schema conflicts.
+::::
+
+
+
+## Jdbc_static filter configuration options [plugins-filters-jdbc_static-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-jdbc_static-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`jdbc_connection_string`](#plugins-filters-jdbc_static-jdbc_connection_string) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`jdbc_driver_class`](#plugins-filters-jdbc_static-jdbc_driver_class) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`jdbc_driver_library`](#plugins-filters-jdbc_static-jdbc_driver_library) | a valid filesystem path | No |
+| [`jdbc_password`](#plugins-filters-jdbc_static-jdbc_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`jdbc_user`](#plugins-filters-jdbc_static-jdbc_user) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tag_on_failure`](#plugins-filters-jdbc_static-tag_on_failure) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`tag_on_default_use`](#plugins-filters-jdbc_static-tag_on_default_use) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`staging_directory`](#plugins-filters-jdbc_static-staging_directory) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`loader_schedule`](#plugins-filters-jdbc_static-loader_schedule) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`loaders`](#plugins-filters-jdbc_static-loaders) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`local_db_objects`](#plugins-filters-jdbc_static-local_db_objects) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`local_lookups`](#plugins-filters-jdbc_static-local_lookups) | [array](/reference/configuration-file-structure.md#array) | No |
+
+Also see [Common options](#plugins-filters-jdbc_static-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `jdbc_connection_string` [plugins-filters-jdbc_static-jdbc_connection_string]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+JDBC connection string.
+
+
+### `jdbc_driver_class` [plugins-filters-jdbc_static-jdbc_driver_class]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+JDBC driver class to load, for example, "org.apache.derby.jdbc.ClientDriver".
+
+::::{note}
+According to [Issue 43](https://github.com/logstash-plugins/logstash-input-jdbc/issues/43), if you are using the Oracle JDBC driver (ojdbc6.jar), the correct `jdbc_driver_class` is `"Java::oracle.jdbc.driver.OracleDriver"`.
+::::
+
+
+
+### `jdbc_driver_library` [plugins-filters-jdbc_static-jdbc_driver_library]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+JDBC driver library path to third-party driver library. Use comma separated paths in one string if you need multiple libraries.
+
+If the driver class is not provided, the plugin looks for it in the Logstash Java classpath.
+
+
+### `jdbc_password` [plugins-filters-jdbc_static-jdbc_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+JDBC password.
+
+
+### `jdbc_user` [plugins-filters-jdbc_static-jdbc_user]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+JDBC user.
+
+
+### `tag_on_default_use` [plugins-filters-jdbc_static-tag_on_default_use]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `["_jdbcstaticdefaultsused"]`
+
+Append values to the `tags` field if no record was found and default values were used.
+
+
+### `tag_on_failure` [plugins-filters-jdbc_static-tag_on_failure]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `["_jdbcstaticfailure"]`
+
+Append values to the `tags` field if a SQL error occurred.
+
+
+### `staging_directory` [plugins-filters-jdbc_static-staging_directory]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is derived from the Ruby temp directory + plugin_name + "import_data"
+* e.g. `"/tmp/logstash/jdbc_static/import_data"`
+
+The directory used stage the data for bulk loading, there should be sufficient disk space to handle the data you wish to use to enrich events. Previous versions of this plugin did not handle loading datasets of more than several thousand rows well due to an open bug in Apache Derby. This setting introduces an alternative way of loading large recordsets. As each row is received it is spooled to file and then that file is imported using a system *import table* system call.
+
+Append values to the `tags` field if a SQL error occurred.
+
+
+### `loader_schedule` [plugins-filters-jdbc_static-loader_schedule]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+You can schedule remote loading to run periodically according to a specific schedule. This scheduling syntax is powered by [rufus-scheduler](https://github.com/jmettraux/rufus-scheduler). The syntax is cron-like with some extensions specific to Rufus (for example, timezone support). For more about this syntax, see [parsing cronlines and time strings](https://github.com/jmettraux/rufus-scheduler#parsing-cronlines-and-time-strings).
+
+Examples:
+
+| | |
+| --- | --- |
+| `*/30 * * * *` | will execute on the 0th and 30th minute of every hour every day. |
+| `* 5 * 1-3 *` | will execute every minute of 5am every day of January through March. |
+| `0 * * * *` | will execute on the 0th minute of every hour every day. |
+| `0 6 * * * America/Chicago` | will execute at 6:00am (UTC/GMT -5) every day. |
+
+Debugging using the Logstash interactive shell:
+
+```shell
+bin/logstash -i irb
+irb(main):001:0> require 'rufus-scheduler'
+=> true
+irb(main):002:0> Rufus::Scheduler.parse('*/10 * * * *')
+=> #
+irb(main):003:0> exit
+```
+
+The object returned by the above call, an instance of `Rufus::Scheduler::CronLine` shows the seconds, minutes etc. of execution.
+
+
+### `loaders` [plugins-filters-jdbc_static-loaders]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+The array should contain one or more Hashes. Each Hash is validated according to the table below.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| id | string | No |
+| local_table | string | Yes |
+| query | string | Yes |
+| max_rows | number | No |
+| jdbc_connection_string | string | No |
+| jdbc_driver_class | string | No |
+| jdbc_driver_library | a valid filesystem path | No |
+| jdbc_password | password | No |
+| jdbc_user | string | No |
+
+**Loader Field Descriptions:**
+
+id
+: An optional identifier. This is used to identify the loader that is generating error messages and log lines.
+
+local_table
+: The destination table in the local lookup database that the loader will fill.
+
+query
+: The SQL statement that is executed to fetch the remote records. Use SQL aliases and casts to ensure that the record’s columns and datatype match the table structure in the local database as defined in the `local_db_objects`.
+
+max_rows
+: The default for this setting is 1 million. Because the lookup database is in-memory, it will take up JVM heap space. If the query returns many millions of rows, you should increase the JVM memory given to Logstash or limit the number of rows returned, perhaps to those most frequently found in the event data.
+
+jdbc_connection_string
+: If not set in a loader, this setting defaults to the plugin-level `jdbc_connection_string` setting.
+
+jdbc_driver_class
+: If not set in a loader, this setting defaults to the plugin-level `jdbc_driver_class` setting.
+
+jdbc_driver_library
+: If not set in a loader, this setting defaults to the plugin-level `jdbc_driver_library` setting.
+
+jdbc_password
+: If not set in a loader, this setting defaults to the plugin-level `jdbc_password` setting.
+
+jdbc_user
+: If not set in a loader, this setting defaults to the plugin-level `jdbc_user` setting.
+
+
+### `local_db_objects` [plugins-filters-jdbc_static-local_db_objects]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+The array should contain one or more Hashes. Each Hash represents a table schema for the local lookups database. Each Hash is validated according to the table below.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| name | string | Yes |
+| columns | array | Yes |
+| index_columns | number | No |
+| preserve_existing | boolean | No |
+
+**Local_db_objects Field Descriptions:**
+
+name
+: The name of the table to be created in the database.
+
+columns
+: An array of column specifications. Each column specification is an array of exactly two elements, for example `["ip", "varchar(15)"]`. The first element is the column name string. The second element is a string that is an [Apache Derby SQL type](https://db.apache.org/derby/docs/10.14/ref/crefsqlj31068.html). The string content is checked when the local lookup tables are built, not when the settings are validated. Therefore, any misspelled SQL type strings result in errors.
+
+index_columns
+: An array of strings. Each string must be defined in the `columns` setting. The index name will be generated internally. Unique or sorted indexes are not supported.
+
+preserve_existing
+: This setting, when `true`, checks whether the table already exists in the local lookup database. If you have multiple pipelines running in the same instance of Logstash, and more than one pipeline is using this plugin, then you must read the important multiple pipeline notice at the top of the page.
+
+
+### `local_lookups` [plugins-filters-jdbc_static-local_lookups]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+The array should contain one or more Hashes. Each Hash represents a lookup enrichment. Each Hash is validated according to the table below.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| id | string | No |
+| query | string | Yes |
+| parameters | hash | Yes |
+| target | string | No |
+| default_hash | hash | No |
+| tag_on_failure | string | No |
+| tag_on_default_use | string | No |
+
+**Local_lookups Field Descriptions:**
+
+id
+: An optional identifier. This is used to identify the lookup that is generating error messages and log lines. If you omit this setting then a default id is used instead.
+
+query
+: A SQL SELECT statement that is executed to achieve the lookup. To use parameters, use named parameter syntax, for example `"SELECT * FROM MYTABLE WHERE ID = :id"`. Alternatively, the `?` sign can be used as a prepared statement parameter, in which case the `prepared_parameters` array is used to populate the values
+
+parameters
+: A key/value Hash or dictionary. The key (LHS) is the text that is substituted for in the SQL statement `SELECT * FROM sensors WHERE reference = :p1`. The value (RHS) is the field name in your event. The plugin reads the value from this key out of the event and substitutes that value into the statement, for example, `parameters => { "p1" => "ref" }`. Quoting is automatic - you do not need to put quotes in the statement. Only use the field interpolation syntax on the RHS if you need to add a prefix/suffix or join two event field values together to build the substitution value. For example, imagine an IOT message that has an id and a location, and you have a table of sensors that have a column of `id-loc_id`. In this case your parameter hash would look like this: `parameters => { "p1" => "%{[id]}-%{[loc_id]}" }`.
+
+prepared_parameters
+: An Array, where the position is related to the position of the `?` in the query syntax. The values of array follow the same semantic of `parameters`. If `prepared_parameters` is valorized the filter is forced to use JDBC’s prepared statement to query the local database. Prepared statements provides two benefits: one on the performance side, because avoid the DBMS to parse and compile the SQL expression for every call; the other benefit is on the security side, using prepared statements avoid SQL-injection attacks based on query string concatenation.
+
+target
+: An optional name for the field that will receive the looked-up data. If you omit this setting then the `id` setting (or the default id) is used. The looked-up data, an array of results converted to Hashes, is never added to the root of the event. If you want to do this, you should use the `add_field` setting. This means that you are in full control of how the fields/values are put in the root of the event, for example, `add_field => { user_firstname => "%{[user][0][firstname]}" }` - where `[user]` is the target field, `[0]` is the first result in the array, and `[firstname]` is the key in the result hash.
+
+default_hash
+: An optional hash that will be put in the target field array when the lookup returns no results. Use this setting if you need to ensure that later references in other parts of the config actually refer to something.
+
+tag_on_failure
+: An optional string that overrides the plugin-level setting. This is useful when defining multiple lookups.
+
+tag_on_default_use
+: An optional string that overrides the plugin-level setting. This is useful when defining multiple lookups.
+
+
+
+## Common options [plugins-filters-jdbc_static-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-jdbc_static-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-jdbc_static-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-jdbc_static-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-jdbc_static-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-jdbc_static-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-jdbc_static-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-jdbc_static-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-jdbc_static-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ jdbc_static {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ jdbc_static {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-jdbc_static-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ jdbc_static {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ jdbc_static {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-jdbc_static-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-jdbc_static-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 jdbc_static filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ jdbc_static {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-jdbc_static-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-jdbc_static-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ jdbc_static {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ jdbc_static {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-jdbc_static-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ jdbc_static {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ jdbc_static {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-jdbc_streaming.md b/docs/reference/plugins-filters-jdbc_streaming.md
new file mode 100644
index 000000000..a869ee9f0
--- /dev/null
+++ b/docs/reference/plugins-filters-jdbc_streaming.md
@@ -0,0 +1,466 @@
+---
+navigation_title: "jdbc_streaming"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-jdbc_streaming.html
+---
+
+# Jdbc_streaming filter plugin [plugins-filters-jdbc_streaming]
+
+
+* A component of the [jdbc integration plugin](/reference/plugins-integrations-jdbc.md)
+* Integration version: v5.5.2
+* Released on: 2024-12-23
+* [Changelog](https://github.com/logstash-plugins/logstash-integration-jdbc/blob/v5.5.2/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-jdbc_streaming-index.md).
+
+## Getting help [_getting_help_148]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-integration-jdbc). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_147]
+
+This filter executes a SQL query and stores the result set in the field specified as `target`. It will cache the results locally in an LRU cache with expiry.
+
+For example, you can load a row based on an id in the event.
+
+```ruby
+filter {
+ jdbc_streaming {
+ jdbc_driver_library => "/path/to/mysql-connector-java-5.1.34-bin.jar"
+ jdbc_driver_class => "com.mysql.jdbc.Driver"
+ jdbc_connection_string => "jdbc:mysql://localhost:3306/mydatabase"
+ jdbc_user => "me"
+ jdbc_password => "secret"
+ statement => "select * from WORLD.COUNTRY WHERE Code = :code"
+ parameters => { "code" => "country_code"}
+ target => "country_details"
+ }
+}
+```
+
+
+## Prepared Statements [plugins-filters-jdbc_streaming-prepared_statements]
+
+Using server side prepared statements can speed up execution times as the server optimises the query plan and execution.
+
+::::{note}
+Not all JDBC accessible technologies will support prepared statements.
+::::
+
+
+With the introduction of Prepared Statement support comes a different code execution path and some new settings. Most of the existing settings are still useful but there are several new settings for Prepared Statements to read up on.
+
+Use the boolean setting `use_prepared_statements` to enable this execution mode.
+
+Use the `prepared_statement_name` setting to specify a name for the Prepared Statement, this identifies the prepared statement locally and remotely and it should be unique in your config and on the database.
+
+Use the `prepared_statement_bind_values` array setting to specify the bind values. Typically, these values are indirectly extracted from your event, i.e. the string in the array refers to a field name in your event. You can also use constant values like numbers or strings but ensure that any string constants (e.g. a locale constant of "en" or "de") is not also an event field name. It is a good idea to use the bracketed field reference syntax for fields and normal strings for constants, e.g. `prepared_statement_bind_values => ["[src_ip]", "tokyo"],`.
+
+There are 3 possible parameter schemes. Interpolated, field references and constants. Use interpolation when you are prefixing, suffixing or concatenating field values to create a value that exists in your database, e.g. `%{{username}}@%{{domain}}` → `"alice@example.org"`, `%{{distance}}km` → "42km". Use field references for exact field values e.g. "[srcip]" → "192.168.1.2". Use constants when a database column holds values that slice or categorise a number of similar records e.g. language translations.
+
+A boolean setting `prepared_statement_warn_on_constant_usage`, defaulting to true, controls whether you will see a WARN message logged that warns when constants could be missing the bracketed field reference syntax. If you have set your field references and constants correctly you should set `prepared_statement_warn_on_constant_usage` to false. This setting and code checks should be deprecated in a future major Logstash release.
+
+The `statement` (or `statement_path`) setting still holds the SQL statement but to use bind variables you must use the `?` character as a placeholder in the exact order found in the `prepared_statement_bind_values` array. Some technologies may require connection string properties to be set, see MySQL example below.
+
+Example:
+
+```ruby
+filter {
+ jdbc_streaming {
+ jdbc_driver_library => "/path/to/mysql-connector-java-5.1.34-bin.jar"
+ jdbc_driver_class => "com.mysql.jdbc.Driver"
+ jdbc_connection_string => "jdbc:mysql://localhost:3306/mydatabase?cachePrepStmts=true&prepStmtCacheSize=250&prepStmtCacheSqlLimit=2048&useServerPrepStmts=true"
+ jdbc_user => "me"
+ jdbc_password => "secret"
+ statement => "select * from WORLD.COUNTRY WHERE Code = ?"
+ use_prepared_statements => true
+ prepared_statement_name => "lookup_country_info"
+ prepared_statement_bind_values => ["[country_code]"]
+ target => "country_details"
+ }
+}
+```
+
+
+## Jdbc_streaming Filter Configuration Options [plugins-filters-jdbc_streaming-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-jdbc_streaming-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`cache_expiration`](#plugins-filters-jdbc_streaming-cache_expiration) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`cache_size`](#plugins-filters-jdbc_streaming-cache_size) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`default_hash`](#plugins-filters-jdbc_streaming-default_hash) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`jdbc_connection_string`](#plugins-filters-jdbc_streaming-jdbc_connection_string) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`jdbc_driver_class`](#plugins-filters-jdbc_streaming-jdbc_driver_class) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`jdbc_driver_library`](#plugins-filters-jdbc_streaming-jdbc_driver_library) | a valid filesystem path | No |
+| [`jdbc_password`](#plugins-filters-jdbc_streaming-jdbc_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`jdbc_user`](#plugins-filters-jdbc_streaming-jdbc_user) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`jdbc_validate_connection`](#plugins-filters-jdbc_streaming-jdbc_validate_connection) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`jdbc_validation_timeout`](#plugins-filters-jdbc_streaming-jdbc_validation_timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`parameters`](#plugins-filters-jdbc_streaming-parameters) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`prepared_statement_bind_values`](#plugins-filters-jdbc_streaming-prepared_statement_bind_values) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`prepared_statement_name`](#plugins-filters-jdbc_streaming-prepared_statement_name) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`prepared_statement_warn_on_constant_usage`](#plugins-filters-jdbc_streaming-prepared_statement_warn_on_constant_usage) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`sequel_opts`](#plugins-filters-jdbc_streaming-sequel_opts) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`statement`](#plugins-filters-jdbc_streaming-statement) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`tag_on_default_use`](#plugins-filters-jdbc_streaming-tag_on_default_use) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`tag_on_failure`](#plugins-filters-jdbc_streaming-tag_on_failure) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`target`](#plugins-filters-jdbc_streaming-target) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`use_cache`](#plugins-filters-jdbc_streaming-use_cache) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`use_prepared_statements`](#plugins-filters-jdbc_streaming-use_prepared_statements) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+
+Also see [Common options](#plugins-filters-jdbc_streaming-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `cache_expiration` [plugins-filters-jdbc_streaming-cache_expiration]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `5.0`
+
+The minimum number of seconds any entry should remain in the cache. Defaults to 5 seconds.
+
+A numeric value. You can use decimals for example: `cache_expiration => 0.25`. If there are transient jdbc errors, the cache will store empty results for a given parameter set and bypass the jbdc lookup. This will merge the default_hash into the event until the cache entry expires. Then the jdbc lookup will be tried again for the same parameters. Conversely, while the cache contains valid results, any external problem that would cause jdbc errors will not be noticed for the cache_expiration period.
+
+
+### `cache_size` [plugins-filters-jdbc_streaming-cache_size]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `500`
+
+The maximum number of cache entries that will be stored. Defaults to 500 entries. The least recently used entry will be evicted.
+
+
+### `default_hash` [plugins-filters-jdbc_streaming-default_hash]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Define a default object to use when lookup fails to return a matching row. Ensure that the key names of this object match the columns from the statement.
+
+
+### `jdbc_connection_string` [plugins-filters-jdbc_streaming-jdbc_connection_string]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+JDBC connection string
+
+
+### `jdbc_driver_class` [plugins-filters-jdbc_streaming-jdbc_driver_class]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+JDBC driver class to load, for example "oracle.jdbc.OracleDriver" or "org.apache.derby.jdbc.ClientDriver"
+
+
+### `jdbc_driver_library` [plugins-filters-jdbc_streaming-jdbc_driver_library]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+JDBC driver library path to third party driver library.
+
+
+### `jdbc_password` [plugins-filters-jdbc_streaming-jdbc_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+JDBC password
+
+
+### `jdbc_user` [plugins-filters-jdbc_streaming-jdbc_user]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+JDBC user
+
+
+### `jdbc_validate_connection` [plugins-filters-jdbc_streaming-jdbc_validate_connection]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Connection pool configuration. Validate connection before use.
+
+
+### `jdbc_validation_timeout` [plugins-filters-jdbc_streaming-jdbc_validation_timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `3600`
+
+Connection pool configuration. How often to validate a connection (in seconds).
+
+
+### `parameters` [plugins-filters-jdbc_streaming-parameters]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Hash of query parameter, for example `{ "id" => "id_field" }`.
+
+
+### `prepared_statement_bind_values` [plugins-filters-jdbc_streaming-prepared_statement_bind_values]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+Array of bind values for the prepared statement. Use field references and constants. See the section on [prepared_statements](#plugins-filters-jdbc_streaming-prepared_statements) for more info.
+
+
+### `prepared_statement_name` [plugins-filters-jdbc_streaming-prepared_statement_name]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `""`
+
+Name given to the prepared statement. It must be unique in your config and in the database. You need to supply this if `use_prepared_statements` is true.
+
+
+### `prepared_statement_warn_on_constant_usage` [plugins-filters-jdbc_streaming-prepared_statement_warn_on_constant_usage]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+A flag that controls whether a warning is logged if, in `prepared_statement_bind_values`, a String constant is detected that might be intended as a field reference.
+
+
+### `sequel_opts` [plugins-filters-jdbc_streaming-sequel_opts]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+General/Vendor-specific Sequel configuration options
+
+An example of an optional connection pool configuration max_connections - The maximum number of connections the connection pool
+
+examples of vendor-specific options can be found in this documentation page: [https://github.com/jeremyevans/sequel/blob/master/doc/opening_databases.rdoc](https://github.com/jeremyevans/sequel/blob/master/doc/opening_databases.rdoc)
+
+
+### `statement` [plugins-filters-jdbc_streaming-statement]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Statement to execute. To use parameters, use named parameter syntax, for example "SELECT * FROM MYTABLE WHERE ID = :id".
+
+
+### `tag_on_default_use` [plugins-filters-jdbc_streaming-tag_on_default_use]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `["_jdbcstreamingdefaultsused"]`
+
+Append values to the `tags` field if no record was found and default values were used.
+
+
+### `tag_on_failure` [plugins-filters-jdbc_streaming-tag_on_failure]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `["_jdbcstreamingfailure"]`
+
+Append values to the `tags` field if sql error occurred.
+
+
+### `target` [plugins-filters-jdbc_streaming-target]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Define the target field to store the extracted result(s). Field is overwritten if exists.
+
+
+### `use_cache` [plugins-filters-jdbc_streaming-use_cache]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Enable or disable caching, boolean true or false. Defaults to true.
+
+
+### `use_prepared_statements` [plugins-filters-jdbc_streaming-use_prepared_statements]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+When set to `true`, enables prepare statement usage
+
+
+
+## Common options [plugins-filters-jdbc_streaming-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-jdbc_streaming-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-jdbc_streaming-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-jdbc_streaming-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-jdbc_streaming-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-jdbc_streaming-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-jdbc_streaming-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-jdbc_streaming-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-jdbc_streaming-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ jdbc_streaming {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ jdbc_streaming {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-jdbc_streaming-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ jdbc_streaming {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ jdbc_streaming {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-jdbc_streaming-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-jdbc_streaming-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 jdbc_streaming filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ jdbc_streaming {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-jdbc_streaming-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-jdbc_streaming-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ jdbc_streaming {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ jdbc_streaming {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-jdbc_streaming-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ jdbc_streaming {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ jdbc_streaming {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-json.md b/docs/reference/plugins-filters-json.md
new file mode 100644
index 000000000..8d56f2c0f
--- /dev/null
+++ b/docs/reference/plugins-filters-json.md
@@ -0,0 +1,305 @@
+---
+navigation_title: "json"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-json.html
+---
+
+# JSON filter plugin [plugins-filters-json]
+
+
+* Plugin version: v3.2.1
+* Released on: 2023-12-18
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-json/blob/v3.2.1/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-json-index.md).
+
+## Getting help [_getting_help_149]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-json). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_148]
+
+This is a JSON parsing filter. It takes an existing field which contains JSON and expands it into an actual data structure within the Logstash event.
+
+By default, it will place the parsed JSON in the root (top level) of the Logstash event, but this filter can be configured to place the JSON into any arbitrary event field, using the `target` configuration.
+
+This plugin has a few fallback scenarios when something bad happens during the parsing of the event. If the JSON parsing fails on the data, the event will be untouched and it will be tagged with `_jsonparsefailure`; you can then use conditionals to clean the data. You can configure this tag with the `tag_on_failure` option.
+
+If the parsed data contains a `@timestamp` field, the plugin will try to use it for the events `@timestamp`, and if the parsing fails, the field will be renamed to `_@timestamp` and the event will be tagged with a `_timestampparsefailure`.
+
+
+## Event Metadata and the Elastic Common Schema (ECS) [plugins-filters-json-ecs_metadata]
+
+The plugin behaves the same regardless of ECS compatibility, except giving a warning when ECS is enabled and `target` isn’t set.
+
+::::{tip}
+Set the `target` option to avoid potential schema conflicts.
+::::
+
+
+
+## JSON Filter Configuration Options [plugins-filters-json-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-json-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`ecs_compatibility`](#plugins-filters-json-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`skip_on_invalid_json`](#plugins-filters-json-skip_on_invalid_json) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`source`](#plugins-filters-json-source) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`tag_on_failure`](#plugins-filters-json-tag_on_failure) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`target`](#plugins-filters-json-target) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-filters-json-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `ecs_compatibility` [plugins-filters-json-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: does not use ECS-compatible field names
+ * `v1`: Elastic Common Schema compliant behavior (warns when `target` isn’t set)
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)). See [Event Metadata and the Elastic Common Schema (ECS)](#plugins-filters-json-ecs_metadata) for detailed information.
+
+
+### `skip_on_invalid_json` [plugins-filters-json-skip_on_invalid_json]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Allows for skipping the filter on invalid JSON (this allows you to handle JSON and non-JSON data without warnings)
+
+
+### `source` [plugins-filters-json-source]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The configuration for the JSON filter:
+
+```ruby
+ source => source_field
+```
+
+For example, if you have JSON data in the `message` field:
+
+```ruby
+ filter {
+ json {
+ source => "message"
+ }
+ }
+```
+
+The above would parse the JSON from the `message` field.
+
+
+### `tag_on_failure` [plugins-filters-json-tag_on_failure]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `["_jsonparsefailure"]`
+
+Append values to the `tags` field when there has been no successful match
+
+
+### `target` [plugins-filters-json-target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Define the target field for placing the parsed data. If this setting is omitted, the JSON data will be stored at the root (top level) of the event.
+
+For example, if you want the data to be put in the `doc` field:
+
+```ruby
+ filter {
+ json {
+ target => "doc"
+ }
+ }
+```
+
+JSON in the value of the `source` field will be expanded into a data structure in the `target` field.
+
+::::{note}
+if the `target` field already exists, it will be overwritten!
+::::
+
+
+
+
+## Common options [plugins-filters-json-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-json-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-json-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-json-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-json-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-json-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-json-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-json-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-json-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ json {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ json {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-json-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ json {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ json {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-json-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-json-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 json filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ json {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-json-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-json-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ json {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ json {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-json-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ json {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ json {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-json_encode.md b/docs/reference/plugins-filters-json_encode.md
new file mode 100644
index 000000000..df3dae415
--- /dev/null
+++ b/docs/reference/plugins-filters-json_encode.md
@@ -0,0 +1,243 @@
+---
+navigation_title: "json_encode"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-json_encode.html
+---
+
+# Json_encode filter plugin [plugins-filters-json_encode]
+
+
+* Plugin version: v3.0.3
+* Released on: 2017-11-07
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-json_encode/blob/v3.0.3/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-json_encode-index.md).
+
+## Installation [_installation_62]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-filter-json_encode`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_150]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-json_encode). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_149]
+
+JSON encode filter. Takes a field and serializes it into JSON
+
+If no target is specified, the source field is overwritten with the JSON text.
+
+For example, if you have a field named `foo`, and you want to store the JSON encoded string in `bar`, do this:
+
+```ruby
+ filter {
+ json_encode {
+ source => "foo"
+ target => "bar"
+ }
+ }
+```
+
+
+## Json_encode Filter Configuration Options [plugins-filters-json_encode-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-json_encode-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`source`](#plugins-filters-json_encode-source) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`target`](#plugins-filters-json_encode-target) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-filters-json_encode-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `source` [plugins-filters-json_encode-source]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The field to convert to JSON.
+
+
+### `target` [plugins-filters-json_encode-target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The field to write the JSON into. If not specified, the source field will be overwritten.
+
+
+
+## Common options [plugins-filters-json_encode-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-json_encode-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-json_encode-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-json_encode-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-json_encode-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-json_encode-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-json_encode-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-json_encode-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-json_encode-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ json_encode {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ json_encode {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-json_encode-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ json_encode {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ json_encode {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-json_encode-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-json_encode-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 json_encode filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ json_encode {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-json_encode-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-json_encode-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ json_encode {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ json_encode {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-json_encode-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ json_encode {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ json_encode {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-kv.md b/docs/reference/plugins-filters-kv.md
new file mode 100644
index 000000000..4c63be9c3
--- /dev/null
+++ b/docs/reference/plugins-filters-kv.md
@@ -0,0 +1,687 @@
+---
+navigation_title: "kv"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-kv.html
+---
+
+# Kv filter plugin [plugins-filters-kv]
+
+
+* Plugin version: v4.7.0
+* Released on: 2022-03-04
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-kv/blob/v4.7.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-kv-index.md).
+
+## Getting help [_getting_help_151]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-kv). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_150]
+
+This filter helps automatically parse messages (or specific event fields) which are of the `foo=bar` variety.
+
+For example, if you have a log message which contains `ip=1.2.3.4 error=REFUSED`, you can parse those automatically by configuring:
+
+```ruby
+ filter {
+ kv { }
+ }
+```
+
+The above will result in a message of `ip=1.2.3.4 error=REFUSED` having the fields:
+
+* `ip: 1.2.3.4`
+* `error: REFUSED`
+
+This is great for postfix, iptables, and other types of logs that tend towards `key=value` syntax.
+
+You can configure any arbitrary strings to split your data on, in case your data is not structured using `=` signs and whitespace. For example, this filter can also be used to parse query parameters like `foo=bar&baz=fizz` by setting the `field_split` parameter to `&`.
+
+
+## Event Metadata and the Elastic Common Schema (ECS) [plugins-filters-kv-ecs_metadata]
+
+The plugin behaves the same regardless of ECS compatibility, except giving a warning when ECS is enabled and `target` isn’t set.
+
+::::{tip}
+Set the `target` option to avoid potential schema conflicts.
+::::
+
+
+
+## Kv Filter Configuration Options [plugins-filters-kv-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-kv-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`allow_duplicate_values`](#plugins-filters-kv-allow_duplicate_values) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`allow_empty_values`](#plugins-filters-kv-allow_empty_values) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`default_keys`](#plugins-filters-kv-default_keys) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`ecs_compatibility`](#plugins-filters-kv-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`exclude_keys`](#plugins-filters-kv-exclude_keys) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`field_split`](#plugins-filters-kv-field_split) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`field_split_pattern`](#plugins-filters-kv-field_split_pattern) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`include_brackets`](#plugins-filters-kv-include_brackets) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`include_keys`](#plugins-filters-kv-include_keys) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`prefix`](#plugins-filters-kv-prefix) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`recursive`](#plugins-filters-kv-recursive) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_char_key`](#plugins-filters-kv-remove_char_key) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`remove_char_value`](#plugins-filters-kv-remove_char_value) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`source`](#plugins-filters-kv-source) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`target`](#plugins-filters-kv-target) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tag_on_failure`](#plugins-filters-kv-tag_on_failure) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`tag_on_timeout`](#plugins-filters-kv-tag_on_timeout) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`timeout_millis`](#plugins-filters-kv-timeout_millis) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`transform_key`](#plugins-filters-kv-transform_key) | [string](/reference/configuration-file-structure.md#string), one of `["lowercase", "uppercase", "capitalize"]` | No |
+| [`transform_value`](#plugins-filters-kv-transform_value) | [string](/reference/configuration-file-structure.md#string), one of `["lowercase", "uppercase", "capitalize"]` | No |
+| [`trim_key`](#plugins-filters-kv-trim_key) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`trim_value`](#plugins-filters-kv-trim_value) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`value_split`](#plugins-filters-kv-value_split) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`value_split_pattern`](#plugins-filters-kv-value_split_pattern) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`whitespace`](#plugins-filters-kv-whitespace) | [string](/reference/configuration-file-structure.md#string), one of `["strict", "lenient"]` | No |
+
+Also see [Common options](#plugins-filters-kv-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `allow_duplicate_values` [plugins-filters-kv-allow_duplicate_values]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+A bool option for removing duplicate key/value pairs. When set to false, only one unique key/value pair will be preserved.
+
+For example, consider a source like `from=me from=me`. `[from]` will map to an Array with two elements: `["me", "me"]`. To only keep unique key/value pairs, you could use this configuration:
+
+```ruby
+ filter {
+ kv {
+ allow_duplicate_values => false
+ }
+ }
+```
+
+
+### `allow_empty_values` [plugins-filters-kv-allow_empty_values]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+A bool option for explicitly including empty values. When set to true, empty values will be added to the event.
+
+::::{note}
+Parsing empty values typically requires [`whitespace => strict`](#plugins-filters-kv-whitespace).
+::::
+
+
+
+### `default_keys` [plugins-filters-kv-default_keys]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+A hash specifying the default keys and their values which should be added to the event in case these keys do not exist in the source field being parsed.
+
+```ruby
+ filter {
+ kv {
+ default_keys => [ "from", "logstash@example.com",
+ "to", "default@dev.null" ]
+ }
+ }
+```
+
+
+### `ecs_compatibility` [plugins-filters-kv-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: does not use ECS-compatible field names
+ * `v1`: Elastic Common Schema compliant behavior (warns when `target` isn’t set)
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)). See [Event Metadata and the Elastic Common Schema (ECS)](#plugins-filters-kv-ecs_metadata) for detailed information.
+
+
+### `exclude_keys` [plugins-filters-kv-exclude_keys]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+An array specifying the parsed keys which should not be added to the event. By default no keys will be excluded.
+
+For example, consider a source like `Hey, from=, to=def foo=bar`. To exclude `from` and `to`, but retain the `foo` key, you could use this configuration:
+
+```ruby
+ filter {
+ kv {
+ exclude_keys => [ "from", "to" ]
+ }
+ }
+```
+
+
+### `field_split` [plugins-filters-kv-field_split]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `" "`
+
+A string of characters to use as single-character field delimiters for parsing out key-value pairs.
+
+These characters form a regex character class and thus you must escape special regex characters like `[` or `]` using `\`.
+
+**Example with URL Query Strings**
+
+For example, to split out the args from a url query string such as `?pin=12345~0&d=123&e=foo@bar.com&oq=bobo&ss=12345`:
+
+```ruby
+ filter {
+ kv {
+ field_split => "&?"
+ }
+ }
+```
+
+The above splits on both `&` and `?` characters, giving you the following fields:
+
+* `pin: 12345~0`
+* `d: 123`
+* `e: foo@bar.com`
+* `oq: bobo`
+* `ss: 12345`
+
+
+### `field_split_pattern` [plugins-filters-kv-field_split_pattern]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+A regex expression to use as field delimiter for parsing out key-value pairs. Useful to define multi-character field delimiters. Setting the `field_split_pattern` options will take precedence over the `field_split` option.
+
+Note that you should avoid using captured groups in your regex and you should be cautious with lookaheads or lookbehinds and positional anchors.
+
+For example, to split fields on a repetition of one or more colons `k1=v1:k2=v2::k3=v3:::k4=v4`:
+
+```ruby
+ filter { kv { field_split_pattern => ":+" } }
+```
+
+To split fields on a regex character that need escaping like the plus sign `k1=v1++k2=v2++k3=v3++k4=v4`:
+
+```ruby
+ filter { kv { field_split_pattern => "\\+\\+" } }
+```
+
+
+### `include_brackets` [plugins-filters-kv-include_brackets]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+A boolean specifying whether to treat square brackets, angle brackets, and parentheses as value "wrappers" that should be removed from the value.
+
+```ruby
+ filter {
+ kv {
+ include_brackets => true
+ }
+ }
+```
+
+For example, the result of this line: `bracketsone=(hello world) bracketstwo=[hello world] bracketsthree=`
+
+will be:
+
+* bracketsone: hello world
+* bracketstwo: hello world
+* bracketsthree: hello world
+
+instead of:
+
+* bracketsone: (hello
+* bracketstwo: [hello
+* bracketsthree: , to=def foo=bar`. To include `from` and `to`, but exclude the `foo` key, you could use this configuration:
+
+```ruby
+ filter {
+ kv {
+ include_keys => [ "from", "to" ]
+ }
+ }
+```
+
+
+### `prefix` [plugins-filters-kv-prefix]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `""`
+
+A string to prepend to all of the extracted keys.
+
+For example, to prepend arg_ to all keys:
+
+```ruby
+ filter { kv { prefix => "arg_" } }
+```
+
+
+### `recursive` [plugins-filters-kv-recursive]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+A boolean specifying whether to drill down into values and recursively get more key-value pairs from it. The extra key-value pairs will be stored as subkeys of the root key.
+
+Default is not to recursive values.
+
+```ruby
+ filter {
+ kv {
+ recursive => "true"
+ }
+ }
+```
+
+
+### `remove_char_key` [plugins-filters-kv-remove_char_key]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+A string of characters to remove from the key.
+
+These characters form a regex character class and thus you must escape special regex characters like `[` or `]` using `\`.
+
+Contrary to trim option, all characters are removed from the key, whatever their position.
+
+For example, to remove `<` `>` `[` `]` and `,` characters from keys:
+
+```ruby
+ filter {
+ kv {
+ remove_char_key => "<>\[\],"
+ }
+ }
+```
+
+
+### `remove_char_value` [plugins-filters-kv-remove_char_value]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+A string of characters to remove from the value.
+
+These characters form a regex character class and thus you must escape special regex characters like `[` or `]` using `\`.
+
+Contrary to trim option, all characters are removed from the value, whatever their position.
+
+For example, to remove `<`, `>`, `[`, `]` and `,` characters from values:
+
+```ruby
+ filter {
+ kv {
+ remove_char_value => "<>\[\],"
+ }
+ }
+```
+
+
+### `source` [plugins-filters-kv-source]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"message"`
+
+The field to perform `key=value` searching on
+
+For example, to process the `not_the_message` field:
+
+```ruby
+ filter { kv { source => "not_the_message" } }
+```
+
+
+### `target` [plugins-filters-kv-target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The name of the container to put all of the key-value pairs into.
+
+If this setting is omitted, fields will be written to the root of the event, as individual fields.
+
+For example, to place all keys into the event field kv:
+
+```ruby
+ filter { kv { target => "kv" } }
+```
+
+
+### `tag_on_failure` [plugins-filters-kv-tag_on_failure]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* The default value for this setting is [`_kv_filter_error`].
+
+When a kv operation causes a runtime exception to be thrown within the plugin, the operation is safely aborted without crashing the plugin, and the event is tagged with the provided values.
+
+
+### `tag_on_timeout` [plugins-filters-kv-tag_on_timeout]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* The default value for this setting is `_kv_filter_timeout`.
+
+When timeouts are enabled and a kv operation is aborted, the event is tagged with the provided value (see: [`timeout_millis`](#plugins-filters-kv-timeout_millis)).
+
+
+### `timeout_millis` [plugins-filters-kv-timeout_millis]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* The default value for this setting is 30000 (30 seconds).
+* Set to zero (`0`) to disable timeouts
+
+Timeouts provide a safeguard against inputs that are pathological against the regular expressions that are used to extract key/value pairs. When parsing an event exceeds this threshold the operation is aborted and the event is tagged in order to prevent the operation from blocking the pipeline (see: [`tag_on_timeout`](#plugins-filters-kv-tag_on_timeout)).
+
+
+### `transform_key` [plugins-filters-kv-transform_key]
+
+* Value can be any of: `lowercase`, `uppercase`, `capitalize`
+* There is no default value for this setting.
+
+Transform keys to lower case, upper case or capitals.
+
+For example, to lowercase all keys:
+
+```ruby
+ filter {
+ kv {
+ transform_key => "lowercase"
+ }
+ }
+```
+
+
+### `transform_value` [plugins-filters-kv-transform_value]
+
+* Value can be any of: `lowercase`, `uppercase`, `capitalize`
+* There is no default value for this setting.
+
+Transform values to lower case, upper case or capitals.
+
+For example, to capitalize all values:
+
+```ruby
+ filter {
+ kv {
+ transform_value => "capitalize"
+ }
+ }
+```
+
+
+### `trim_key` [plugins-filters-kv-trim_key]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+A string of characters to trim from the key. This is useful if your keys are wrapped in brackets or start with space.
+
+These characters form a regex character class and thus you must escape special regex characters like `[` or `]` using `\`.
+
+Only leading and trailing characters are trimed from the key.
+
+For example, to trim `<` `>` `[` `]` and `,` characters from keys:
+
+```ruby
+ filter {
+ kv {
+ trim_key => "<>\[\],"
+ }
+ }
+```
+
+
+### `trim_value` [plugins-filters-kv-trim_value]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Constants used for transform check A string of characters to trim from the value. This is useful if your values are wrapped in brackets or are terminated with commas (like postfix logs).
+
+These characters form a regex character class and thus you must escape special regex characters like `[` or `]` using `\`.
+
+Only leading and trailing characters are trimed from the value.
+
+For example, to trim `<`, `>`, `[`, `]` and `,` characters from values:
+
+```ruby
+ filter {
+ kv {
+ trim_value => "<>\[\],"
+ }
+ }
+```
+
+
+### `value_split` [plugins-filters-kv-value_split]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"="`
+
+A non-empty string of characters to use as single-character value delimiters for parsing out key-value pairs.
+
+These characters form a regex character class and thus you must escape special regex characters like `[` or `]` using `\`.
+
+For example, to identify key-values such as `key1:value1 key2:value2`:
+
+```ruby
+ filter { kv { value_split => ":" } }
+```
+
+
+### `value_split_pattern` [plugins-filters-kv-value_split_pattern]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+A regex expression to use as value delimiter for parsing out key-value pairs. Useful to define multi-character value delimiters. Setting the `value_split_pattern` options will take precedence over the `value_split option`.
+
+Note that you should avoid using captured groups in your regex and you should be cautious with lookaheads or lookbehinds and positional anchors.
+
+See `field_split_pattern` for examples.
+
+
+### `whitespace` [plugins-filters-kv-whitespace]
+
+* Value can be any of: `lenient`, `strict`
+* Default value is `lenient`
+
+An option specifying whether to be *lenient* or *strict* with the acceptance of unnecessary whitespace surrounding the configured value-split sequence.
+
+By default the plugin is run in `lenient` mode, which ignores spaces that occur before or after the value-splitter. While this allows the plugin to make reasonable guesses with most input, in some situations it may be too lenient.
+
+You may want to enable `whitespace => strict` mode if you have control of the input data and can guarantee that no extra spaces are added surrounding the pattern you have defined for splitting values. Doing so will ensure that a *field-splitter* sequence immediately following a *value-splitter* will be interpreted as an empty field.
+
+
+
+## Common options [plugins-filters-kv-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-kv-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-kv-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-kv-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-kv-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-kv-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-kv-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-kv-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-kv-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ kv {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ kv {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-kv-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ kv {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ kv {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-kv-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-kv-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 kv filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ kv {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-kv-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-kv-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ kv {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ kv {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-kv-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ kv {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ kv {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-memcached.md b/docs/reference/plugins-filters-memcached.md
new file mode 100644
index 000000000..fd232e943
--- /dev/null
+++ b/docs/reference/plugins-filters-memcached.md
@@ -0,0 +1,347 @@
+---
+navigation_title: "memcached"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-memcached.html
+---
+
+# Memcached filter plugin [plugins-filters-memcached]
+
+
+* Plugin version: v1.2.0
+* Released on: 2023-01-18
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-memcached/blob/v1.2.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-memcached-index.md).
+
+## Getting help [_getting_help_152]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-memcached). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_151]
+
+The Memcached filter provides integration with external data in Memcached.
+
+It currently provides the following facilities:
+
+* `get`: get values for one or more memcached keys and inject them into the event at the provided paths
+* `set`: set values from the event to the corresponding memcached keys
+
+
+## Examples [_examples_2]
+
+This plugin enables key/value lookup enrichment against a Memcached object caching system. You can use this plugin to query for a value, and set it if not found.
+
+### GET example [_get_example]
+
+```txt
+memcached {
+ hosts => ["localhost"]
+ namespace => "convert_mm"
+ get => {
+ "%{millimeters}" => "[inches]"
+ }
+ add_tag => ["from_cache"]
+ id => "memcached-get"
+ }
+```
+
+
+### SET example [_set_example]
+
+```txt
+memcached {
+ hosts => ["localhost"]
+ namespace => "convert_mm"
+ set => {
+ "[inches]" => "%{millimeters}"
+ }
+ id => "memcached-set"
+ }
+```
+
+
+
+## Memcached Filter Configuration Options [plugins-filters-memcached-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-memcached-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`hosts`](#plugins-filters-memcached-hosts) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`namespace`](#plugins-filters-memcached-namespace) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`get`](#plugins-filters-memcached-get) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`set`](#plugins-filters-memcached-set) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`tag_on_failure`](#plugins-filters-memcached-tag_on_failure) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ttl`](#plugins-filters-memcached-ttl) | [number](/reference/configuration-file-structure.md#number) | No |
+
+Also see [Common options](#plugins-filters-memcached-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `hosts` [plugins-filters-memcached-hosts]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `localhost`
+
+The `hosts` parameter accepts an array of addresses corresponding to memcached instances.
+
+Hosts can be specified via FQDN (e.g., `example.com`), an IPV4 address (e.g., `123.45.67.89`), or an IPV6 address (e.g. `::1` or `2001:0db8:85a3:0000:0000:8a2e:0370:7334`). If your memcached host uses a non-standard port, the port can be specified by appending a colon (`:`) and the port number; to include a port with an IPv6 address, the address must first be wrapped in square-brackets (`[` and `]`), e.g., `[::1]:11211`.
+
+If more than one host is specified, requests will be distributed to the given hosts using a modulus of the CRC-32 checksum of each key.
+
+
+### `namespace` [plugins-filters-memcached-namespace]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+If specified, prefix all memcached keys with the given string followed by a colon (`:`); this is useful if all keys being used by this plugin share a common prefix.
+
+Example:
+
+In the following configuration, we would GET `fruit:banana` and `fruit:apple` from memcached:
+
+```
+filter {
+ memcached {
+ namespace => "fruit"
+ get => {
+ "banana" => "[fruit-stats][banana]"
+ "apple" => "[fruit-stats][apple]
+ }
+ }
+}
+```
+
+
+### `get` [plugins-filters-memcached-get]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* There is no default value for this setting.
+
+If specified, get the values for the given keys from memcached, and store them in the corresponding fields on the event.
+
+* keys are interpolated (e.g., if the event has a field `foo` with value `bar`, the key `sand/%{{foo}}` will evaluate to `sand/bar`)
+* fields can be nested references
+
+```
+filter {
+ memcached {
+ get => {
+ "memcached-key-1" => "field1"
+ "memcached-key-2" => "[nested][field2]"
+ }
+ }
+}
+```
+
+
+### `set` [plugins-filters-memcached-set]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* There is no default value for this setting.
+
+If specified, extracts the values from the given event fields, and sets the corresponding keys to those values in memcached with the configured [ttl](#plugins-filters-memcached-ttl)
+
+* keys are interpolated (e.g., if the event has a field `foo` with value `bar`, the key `sand/%{{foo}}` will evaluate to `sand/bar`)
+* fields can be nested references
+
+```
+filter {
+ memcached {
+ set => {
+ "field1" => "memcached-key-1"
+ "[nested][field2]" => "memcached-key-2"
+ }
+ }
+}
+```
+
+
+### `tag_on_failure` [plugins-filters-memcached-tag_on_failure]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* The default value for this setting is `_memcached_failure`.
+
+When a memcached operation causes a runtime exception to be thrown within the plugin, the operation is safely aborted without crashing the plugin, and the event is tagged with the provided value.
+
+
+### `ttl` [plugins-filters-memcached-ttl]
+
+For usages of this plugin that persist data to memcached (e.g., [`set`](#plugins-filters-memcached-set)), the time-to-live in seconds
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* The default value is `0` (no expiry)
+
+
+
+## Common options [plugins-filters-memcached-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-memcached-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-memcached-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-memcached-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-memcached-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-memcached-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-memcached-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-memcached-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-memcached-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ memcached {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ memcached {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-memcached-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ memcached {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ memcached {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-memcached-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-memcached-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 memcached filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ memcached {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-memcached-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-memcached-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ memcached {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ memcached {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-memcached-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ memcached {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ memcached {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-metricize.md b/docs/reference/plugins-filters-metricize.md
new file mode 100644
index 000000000..ad8c95d53
--- /dev/null
+++ b/docs/reference/plugins-filters-metricize.md
@@ -0,0 +1,280 @@
+---
+navigation_title: "metricize"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-metricize.html
+---
+
+# Metricize filter plugin [plugins-filters-metricize]
+
+
+* Plugin version: v3.0.3
+* Released on: 2017-11-07
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-metricize/blob/v3.0.3/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-metricize-index.md).
+
+## Installation [_installation_63]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-filter-metricize`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_153]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-metricize). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_152]
+
+The metricize filter takes complex events containing a number of metrics and splits these up into multiple events, each holding a single metric.
+
+Example:
+
+```
+Assume the following filter configuration:
+```
+```
+filter {
+ metricize {
+ metrics => [ "metric1", "metric2" ]
+ }
+}
+```
+```
+Assuming the following event is passed in:
+```
+```
+{
+ type => "type A"
+ metric1 => "value1"
+ metric2 => "value2"
+}
+```
+```
+This will result in the following 2 events being generated in addition to the original event:
+```
+```
+{ {
+ type => "type A" type => "type A"
+ metric => "metric1" metric => "metric2"
+ value => "value1" value => "value2"
+} }
+```
+
+## Metricize Filter Configuration Options [plugins-filters-metricize-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-metricize-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`drop_original_event`](#plugins-filters-metricize-drop_original_event) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`metric_field_name`](#plugins-filters-metricize-metric_field_name) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`metrics`](#plugins-filters-metricize-metrics) | [array](/reference/configuration-file-structure.md#array) | Yes |
+| [`value_field_name`](#plugins-filters-metricize-value_field_name) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-filters-metricize-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `drop_original_event` [plugins-filters-metricize-drop_original_event]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Flag indicating whether the original event should be dropped or not.
+
+
+### `metric_field_name` [plugins-filters-metricize-metric_field_name]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"metric"`
+
+Name of the field the metric name will be written to.
+
+
+### `metrics` [plugins-filters-metricize-metrics]
+
+* This is a required setting.
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+A new matrics event will be created for each metric field in this list. All fields in this list will be removed from generated events.
+
+
+### `value_field_name` [plugins-filters-metricize-value_field_name]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"value"`
+
+Name of the field the metric value will be written to.
+
+
+
+## Common options [plugins-filters-metricize-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-metricize-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-metricize-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-metricize-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-metricize-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-metricize-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-metricize-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-metricize-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-metricize-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ metricize {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ metricize {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-metricize-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ metricize {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ metricize {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-metricize-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-metricize-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 metricize filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ metricize {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-metricize-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-metricize-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ metricize {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ metricize {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-metricize-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ metricize {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ metricize {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-metrics.md b/docs/reference/plugins-filters-metrics.md
new file mode 100644
index 000000000..91114a4d8
--- /dev/null
+++ b/docs/reference/plugins-filters-metrics.md
@@ -0,0 +1,387 @@
+---
+navigation_title: "metrics"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-metrics.html
+---
+
+# Metrics filter plugin [plugins-filters-metrics]
+
+
+* Plugin version: v4.0.7
+* Released on: 2021-01-20
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-metrics/blob/v4.0.7/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-metrics-index.md).
+
+## Getting help [_getting_help_154]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-metrics). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_153]
+
+The metrics filter is useful for aggregating metrics.
+
+::::{important}
+Elasticsearch 2.0 no longer allows field names with dots. Version 3.0 of the metrics filter plugin changes behavior to use nested fields rather than dotted notation to avoid colliding with versions of Elasticsearch 2.0+. Please note the changes in the documentation (underscores and sub-fields used).
+::::
+
+
+For example, if you have a field `response` that is a http response code, and you want to count each kind of response, you can do this:
+
+```ruby
+ filter {
+ metrics {
+ meter => [ "http_%{response}" ]
+ add_tag => "metric"
+ }
+ }
+```
+
+Metrics are flushed every 5 seconds by default or according to `flush_interval`. Metrics appear as new events in the event stream and go through any filters that occur after as well as outputs.
+
+In general, you will want to add a tag to your metrics and have an output explicitly look for that tag.
+
+The event that is flushed will include every *meter* and *timer* metric in the following way:
+
+
+## `meter` values [_meter_values]
+
+For a `meter => "thing"` you will receive the following fields:
+
+* "[thing][count]" - the total count of events
+* "[thing][rate_1m]" - the per-second event rate in a 1-minute sliding window
+* "[thing][rate_5m]" - the per-second event rate in a 5-minute sliding window
+* "[thing][rate_15m]" - the per-second event rate in a 15-minute sliding window
+
+
+## `timer` values [_timer_values]
+
+For a `timer => { "thing" => "%{{duration}}" }` you will receive the following fields:
+
+* "[thing][count]" - the total count of events
+* "[thing][rate_1m]" - the per-second average value in a 1-minute sliding window
+* "[thing][rate_5m]" - the per-second average value in a 5-minute sliding window
+* "[thing][rate_15m]" - the per-second average value in a 15-minute sliding window
+* "[thing][min]" - the minimum value seen for this metric
+* "[thing][max]" - the maximum value seen for this metric
+* "[thing][stddev]" - the standard deviation for this metric
+* "[thing][mean]" - the mean for this metric
+* "[thing][pXX]" - the XXth percentile for this metric (see `percentiles`)
+
+The default lengths of the event rate window (1, 5, and 15 minutes) can be configured with the `rates` option.
+
+
+## Example: Computing event rate [_example_computing_event_rate]
+
+For a simple example, let’s track how many events per second are running through logstash:
+
+```ruby
+ input {
+ generator {
+ type => "generated"
+ }
+ }
+
+ filter {
+ if [type] == "generated" {
+ metrics {
+ meter => "events"
+ add_tag => "metric"
+ }
+ }
+ }
+
+ output {
+ # only emit events with the 'metric' tag
+ if "metric" in [tags] {
+ stdout {
+ codec => line {
+ format => "rate: %{[events][rate_1m]}"
+ }
+ }
+ }
+ }
+```
+
+Running the above:
+
+```ruby
+ % bin/logstash -f example.conf
+ rate: 23721.983566819246
+ rate: 24811.395722536377
+ rate: 25875.892745934525
+ rate: 26836.42375967113
+```
+
+We see the output includes our events' 1-minute rate.
+
+In the real world, you would emit this to graphite or another metrics store, like so:
+
+```ruby
+ output {
+ graphite {
+ metrics => [ "events.rate_1m", "%{[events][rate_1m]}" ]
+ }
+ }
+```
+
+
+## Metrics Filter Configuration Options [plugins-filters-metrics-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-metrics-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`clear_interval`](#plugins-filters-metrics-clear_interval) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`flush_interval`](#plugins-filters-metrics-flush_interval) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`ignore_older_than`](#plugins-filters-metrics-ignore_older_than) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`meter`](#plugins-filters-metrics-meter) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`percentiles`](#plugins-filters-metrics-percentiles) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`rates`](#plugins-filters-metrics-rates) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`timer`](#plugins-filters-metrics-timer) | [hash](/reference/configuration-file-structure.md#hash) | No |
+
+Also see [Common options](#plugins-filters-metrics-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `clear_interval` [plugins-filters-metrics-clear_interval]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `-1`
+
+The clear interval, when all counters are reset.
+
+If set to -1, the default value, the metrics will never be cleared. Otherwise, should be a multiple of 5s.
+
+
+### `flush_interval` [plugins-filters-metrics-flush_interval]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `5`
+
+The flush interval, when the metrics event is created. Must be a multiple of 5s.
+
+
+### `ignore_older_than` [plugins-filters-metrics-ignore_older_than]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `0`
+
+Don’t track events that have `@timestamp` older than some number of seconds.
+
+This is useful if you want to only include events that are near real-time in your metrics.
+
+For example, to only count events that are within 10 seconds of real-time, you would do this:
+
+```
+filter {
+ metrics {
+ meter => [ "hits" ]
+ ignore_older_than => 10
+ }
+}
+```
+
+### `meter` [plugins-filters-metrics-meter]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+syntax: `meter => [ "name of metric", "name of metric" ]`
+
+
+### `percentiles` [plugins-filters-metrics-percentiles]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[1, 5, 10, 90, 95, 99, 100]`
+
+The percentiles that should be measured and emitted for timer values.
+
+
+### `rates` [plugins-filters-metrics-rates]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[1, 5, 15]`
+
+The rates that should be measured, in minutes. Possible values are 1, 5, and 15.
+
+
+### `timer` [plugins-filters-metrics-timer]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+syntax: `timer => [ "name of metric", "%{{time_value}}" ]`
+
+
+
+## Common options [plugins-filters-metrics-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-metrics-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-metrics-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-metrics-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-metrics-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-metrics-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-metrics-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-metrics-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-metrics-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ metrics {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ metrics {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-metrics-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ metrics {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ metrics {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-metrics-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-metrics-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 metrics filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ metrics {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-metrics-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-metrics-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ metrics {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ metrics {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-metrics-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ metrics {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ metrics {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-mutate.md b/docs/reference/plugins-filters-mutate.md
new file mode 100644
index 000000000..bc8b91c80
--- /dev/null
+++ b/docs/reference/plugins-filters-mutate.md
@@ -0,0 +1,600 @@
+---
+navigation_title: "mutate"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-mutate.html
+---
+
+# Mutate filter plugin [plugins-filters-mutate]
+
+
+* Plugin version: v3.5.8
+* Released on: 2023-11-22
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-mutate/blob/v3.5.8/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-mutate-index.md).
+
+## Getting help [_getting_help_155]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-mutate). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_154]
+
+The mutate filter allows you to perform general mutations on fields. You can rename, replace, and modify fields in your events.
+
+### Processing order [plugins-filters-mutate-proc_order]
+
+Mutations in a config file are executed in this order:
+
+* coerce
+* rename
+* update
+* replace
+* convert
+* gsub
+* uppercase
+* capitalize
+* lowercase
+* strip
+* split
+* join
+* merge
+* copy
+
+::::{important}
+Each mutation must be in its own code block if the sequence of operations needs to be preserved.
+::::
+
+
+Example:
+
+```ruby
+filter {
+ mutate {
+ split => { "hostname" => "." }
+ add_field => { "shortHostname" => "%{[hostname][0]}" }
+ }
+
+ mutate {
+ rename => {"shortHostname" => "hostname"}
+ }
+}
+```
+
+
+
+## Mutate Filter Configuration Options [plugins-filters-mutate-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-mutate-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`convert`](#plugins-filters-mutate-convert) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`copy`](#plugins-filters-mutate-copy) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`gsub`](#plugins-filters-mutate-gsub) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`join`](#plugins-filters-mutate-join) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`lowercase`](#plugins-filters-mutate-lowercase) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`merge`](#plugins-filters-mutate-merge) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`coerce`](#plugins-filters-mutate-coerce) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`rename`](#plugins-filters-mutate-rename) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`replace`](#plugins-filters-mutate-replace) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`split`](#plugins-filters-mutate-split) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`strip`](#plugins-filters-mutate-strip) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`update`](#plugins-filters-mutate-update) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`uppercase`](#plugins-filters-mutate-uppercase) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`capitalize`](#plugins-filters-mutate-capitalize) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`tag_on_failure`](#plugins-filters-mutate-tag_on_failure) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-filters-mutate-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `convert` [plugins-filters-mutate-convert]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* There is no default value for this setting.
+
+Convert a field’s value to a different type, like turning a string to an integer. If the field value is an array, all members will be converted. If the field is a hash no action will be taken.
+
+::::{admonition} Conversion insights
+:class: note
+
+The values are converted using Ruby semantics. Be aware that using `float` and `float_eu` converts the value to a double-precision 64-bit IEEE 754 floating point decimal number. In order to maintain precision due to the conversion, you should use a `double` in the Elasticsearch mappings.
+
+::::
+
+
+Valid conversion targets, and their expected behaviour with different inputs are:
+
+* `integer`:
+
+ * strings are parsed; comma-separators are supported (e.g., the string `"1,000"` produces an integer with value of one thousand); when strings have decimal parts, they are *truncated*.
+ * floats and decimals are *truncated* (e.g., `3.99` becomes `3`, `-2.7` becomes `-2`)
+ * boolean true and boolean false are converted to `1` and `0` respectively
+
+* `integer_eu`:
+
+ * same as `integer`, except string values support dot-separators and comma-decimals (e.g., `"1.000"` produces an integer with value of one thousand)
+
+* `float`:
+
+ * integers are converted to floats
+ * strings are parsed; comma-separators and dot-decimals are supported (e.g., `"1,000.5"` produces a float with value of one thousand and one half)
+ * boolean true and boolean false are converted to `1.0` and `0.0` respectively
+
+* `float_eu`:
+
+ * same as `float`, except string values support dot-separators and comma-decimals (e.g., `"1.000,5"` produces a float with value of one thousand and one half)
+
+* `string`:
+
+ * all values are stringified and encoded with UTF-8
+
+* `boolean`:
+
+ * integer 0 is converted to boolean `false`
+ * integer 1 is converted to boolean `true`
+ * float 0.0 is converted to boolean `false`
+ * float 1.0 is converted to boolean `true`
+ * strings `"true"`, `"t"`, `"yes"`, `"y"`, `"1"`and `"1.0"` are converted to boolean `true`
+ * strings `"false"`, `"f"`, `"no"`, `"n"`, `"0"` and `"0.0"` are converted to boolean `false`
+ * empty strings are converted to boolean `false`
+ * all other values pass straight through without conversion and log a warning message
+ * for arrays each value gets processed separately using rules above
+
+
+This plugin can convert multiple fields in the same document, see the example below.
+
+Example:
+
+```ruby
+ filter {
+ mutate {
+ convert => {
+ "fieldname" => "integer"
+ "booleanfield" => "boolean"
+ }
+ }
+ }
+```
+
+
+### `copy` [plugins-filters-mutate-copy]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* There is no default value for this setting.
+
+Copy an existing field to another field. Existing target field will be overriden.
+
+Example:
+
+```ruby
+ filter {
+ mutate {
+ copy => { "source_field" => "dest_field" }
+ }
+ }
+```
+
+
+### `gsub` [plugins-filters-mutate-gsub]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Match a regular expression against a field value and replace all matches with a replacement string. Only fields that are strings or arrays of strings are supported. For other kinds of fields no action will be taken.
+
+This configuration takes an array consisting of 3 elements per field/substitution.
+
+Be aware of escaping any backslash in the config file.
+
+Example:
+
+```ruby
+ filter {
+ mutate {
+ gsub => [
+ # replace all forward slashes with underscore
+ "fieldname", "/", "_",
+ # replace backslashes, question marks, hashes, and minuses
+ # with a dot "."
+ "fieldname2", "[\\?#-]", "."
+ ]
+ }
+ }
+```
+
+
+### `join` [plugins-filters-mutate-join]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* There is no default value for this setting.
+
+Join an array with a separator character or string. Does nothing on non-array fields.
+
+Example:
+
+```ruby
+ filter {
+ mutate {
+ join => { "fieldname" => "," }
+ }
+ }
+```
+
+
+### `lowercase` [plugins-filters-mutate-lowercase]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Convert a string to its lowercase equivalent.
+
+Example:
+
+```ruby
+ filter {
+ mutate {
+ lowercase => [ "fieldname" ]
+ }
+ }
+```
+
+
+### `merge` [plugins-filters-mutate-merge]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* There is no default value for this setting.
+
+Merge two fields of arrays or hashes. String fields will be automatically be converted into an array, so:
+
+::::{admonition}
+```
+`array` + `string` will work
+`string` + `string` will result in an 2 entry array in `dest_field`
+`array` and `hash` will not work
+```
+::::
+
+
+Example:
+
+```ruby
+ filter {
+ mutate {
+ merge => { "dest_field" => "added_field" }
+ }
+ }
+```
+
+
+### `coerce` [plugins-filters-mutate-coerce]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* There is no default value for this setting.
+
+Set the default value of a field that exists but is null
+
+Example:
+
+```ruby
+ filter {
+ mutate {
+ # Sets the default value of the 'field1' field to 'default_value'
+ coerce => { "field1" => "default_value" }
+ }
+ }
+```
+
+
+### `rename` [plugins-filters-mutate-rename]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* There is no default value for this setting.
+
+Rename one or more fields.
+
+If the destination field already exists, its value is replaced.
+
+If one of the source fields doesn’t exist, no action is performed for that field. (This is not considered an error; the `tag_on_failure` tag is not applied.)
+
+When renaming multiple fields, the order of operations is not guaranteed.
+
+Example:
+
+```ruby
+ filter {
+ mutate {
+ # Renames the 'HOSTORIP' field to 'client_ip'
+ rename => { "HOSTORIP" => "client_ip" }
+ }
+ }
+```
+
+
+### `replace` [plugins-filters-mutate-replace]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* There is no default value for this setting.
+
+Replace the value of a field with a new value, or add the field if it doesn’t already exist. The new value can include `%{{foo}}` strings to help you build a new value from other parts of the event.
+
+Example:
+
+```ruby
+ filter {
+ mutate {
+ replace => { "message" => "%{source_host}: My new message" }
+ }
+ }
+```
+
+
+### `split` [plugins-filters-mutate-split]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* There is no default value for this setting.
+
+Split a field to an array using a separator character or string. Only works on string fields.
+
+Example:
+
+```ruby
+ filter {
+ mutate {
+ split => { "fieldname" => "," }
+ }
+ }
+```
+
+
+### `strip` [plugins-filters-mutate-strip]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Strip whitespace from field. NOTE: this only works on leading and trailing whitespace.
+
+Example:
+
+```ruby
+ filter {
+ mutate {
+ strip => ["field1", "field2"]
+ }
+ }
+```
+
+
+### `update` [plugins-filters-mutate-update]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* There is no default value for this setting.
+
+Update an existing field with a new value. If the field does not exist, then no action will be taken.
+
+Example:
+
+```ruby
+ filter {
+ mutate {
+ update => { "sample" => "My new message" }
+ }
+ }
+```
+
+
+### `uppercase` [plugins-filters-mutate-uppercase]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Convert a string to its uppercase equivalent.
+
+Example:
+
+```ruby
+ filter {
+ mutate {
+ uppercase => [ "fieldname" ]
+ }
+ }
+```
+
+
+### `capitalize` [plugins-filters-mutate-capitalize]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Convert a string to its capitalized equivalent.
+
+Example:
+
+```ruby
+ filter {
+ mutate {
+ capitalize => [ "fieldname" ]
+ }
+ }
+```
+
+
+### `tag_on_failure` [plugins-filters-mutate-tag_on_failure]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* The default value for this setting is `_mutate_error`
+
+If a failure occurs during the application of this mutate filter, the rest of the operations are aborted and the provided tag is added to the event.
+
+
+
+## Common options [plugins-filters-mutate-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-mutate-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-mutate-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-mutate-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-mutate-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-mutate-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-mutate-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-mutate-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-mutate-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ mutate {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ mutate {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-mutate-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ mutate {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ mutate {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-mutate-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-mutate-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 mutate filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ mutate {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-mutate-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-mutate-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ mutate {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ mutate {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-mutate-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ mutate {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ mutate {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-prune.md b/docs/reference/plugins-filters-prune.md
new file mode 100644
index 000000000..4c4cb15a5
--- /dev/null
+++ b/docs/reference/plugins-filters-prune.md
@@ -0,0 +1,318 @@
+---
+navigation_title: "prune"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-prune.html
+---
+
+# Prune filter plugin [plugins-filters-prune]
+
+
+* Plugin version: v3.0.4
+* Released on: 2019-09-12
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-prune/blob/v3.0.4/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-prune-index.md).
+
+## Getting help [_getting_help_156]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-prune). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_155]
+
+The prune filter is for removing fields from events based on whitelists or blacklist of field names or their values (names and values can also be regular expressions).
+
+This can e.g. be useful if you have a [json](/reference/plugins-filters-json.md) or [kv](/reference/plugins-filters-kv.md) filter that creates a number of fields with names that you don’t necessarily know the names of beforehand, and you only want to keep a subset of them.
+
+Usage help: To specify a exact field name or value use the regular expression syntax `^some_name_or_value$`. Example usage: Input data `{ "msg":"hello world", "msg_short":"hw" }`
+
+```ruby
+ filter {
+ prune {
+ whitelist_names => [ "msg" ]
+ }
+ }
+Allows both `"msg"` and `"msg_short"` through.
+```
+
+While:
+
+```ruby
+ filter {
+ prune {
+ whitelist_names => ["^msg$"]
+ }
+ }
+Allows only `"msg"` through.
+```
+
+Logstash stores an event’s `tags` as a field which is subject to pruning. Remember to `whitelist_names => [ "^tags$" ]` to maintain `tags` after pruning or use `blacklist_values => [ "^tag_name$" ]` to eliminate a specific `tag`.
+
+::::{note}
+This filter currently only support operations on top-level fields, i.e. whitelisting and blacklisting of subfields based on name or value does not work.
+::::
+
+
+
+## Prune Filter Configuration Options [plugins-filters-prune-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-prune-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`blacklist_names`](#plugins-filters-prune-blacklist_names) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`blacklist_values`](#plugins-filters-prune-blacklist_values) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`interpolate`](#plugins-filters-prune-interpolate) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`whitelist_names`](#plugins-filters-prune-whitelist_names) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`whitelist_values`](#plugins-filters-prune-whitelist_values) | [hash](/reference/configuration-file-structure.md#hash) | No |
+
+Also see [Common options](#plugins-filters-prune-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `blacklist_names` [plugins-filters-prune-blacklist_names]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `["%{[^}]+}"]`
+
+Exclude fields whose names match specified regexps, by default exclude unresolved `%{{field}}` strings.
+
+```ruby
+ filter {
+ prune {
+ blacklist_names => [ "method", "(referrer|status)", "${some}_field" ]
+ }
+ }
+```
+
+
+### `blacklist_values` [plugins-filters-prune-blacklist_values]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Exclude specified fields if their values match one of the supplied regular expressions. In case field values are arrays, each array item is matched against the regular expressions and matching array items will be excluded.
+
+```ruby
+ filter {
+ prune {
+ blacklist_values => [ "uripath", "/index.php",
+ "method", "(HEAD|OPTIONS)",
+ "status", "^[^2]" ]
+ }
+ }
+```
+
+
+### `interpolate` [plugins-filters-prune-interpolate]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Trigger whether configuration fields and values should be interpolated for dynamic values (when resolving `%{{some_field}}`). Probably adds some performance overhead. Defaults to false.
+
+
+### `whitelist_names` [plugins-filters-prune-whitelist_names]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+Include only fields only if their names match specified regexps, default to empty list which means include everything.
+
+```ruby
+ filter {
+ prune {
+ whitelist_names => [ "method", "(referrer|status)", "${some}_field" ]
+ }
+ }
+```
+
+
+### `whitelist_values` [plugins-filters-prune-whitelist_values]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Include specified fields only if their values match one of the supplied regular expressions. In case field values are arrays, each array item is matched against the regular expressions and only matching array items will be included. By default all fields that are not listed in this setting are kept unless pruned by other settings.
+
+```ruby
+ filter {
+ prune {
+ whitelist_values => [ "uripath", "/index.php",
+ "method", "(GET|POST)",
+ "status", "^[^2]" ]
+ }
+ }
+```
+
+
+
+## Common options [plugins-filters-prune-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-prune-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-prune-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-prune-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-prune-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-prune-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-prune-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-prune-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-prune-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ prune {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ prune {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-prune-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ prune {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ prune {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-prune-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-prune-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 prune filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ prune {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-prune-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-prune-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ prune {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ prune {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-prune-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ prune {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ prune {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-range.md b/docs/reference/plugins-filters-range.md
new file mode 100644
index 000000000..aa8323d42
--- /dev/null
+++ b/docs/reference/plugins-filters-range.md
@@ -0,0 +1,249 @@
+---
+navigation_title: "range"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-range.html
+---
+
+# Range filter plugin [plugins-filters-range]
+
+
+* Plugin version: v3.0.3
+* Released on: 2017-11-07
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-range/blob/v3.0.3/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-range-index.md).
+
+## Installation [_installation_64]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-filter-range`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_157]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-range). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_156]
+
+This filter is used to check that certain fields are within expected size/length ranges. Supported types are numbers and strings. Numbers are checked to be within numeric value range. Strings are checked to be within string length range. More than one range can be specified for same fieldname, actions will be applied incrementally. When field value is within a specified range an action will be taken. Supported actions are drop event, add tag, or add field with specified value.
+
+Example use cases are for histogram-like tagging of events or for finding anomaly values in fields or too big events that should be dropped.
+
+
+## Range Filter Configuration Options [plugins-filters-range-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-range-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`negate`](#plugins-filters-range-negate) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`ranges`](#plugins-filters-range-ranges) | [array](/reference/configuration-file-structure.md#array) | No |
+
+Also see [Common options](#plugins-filters-range-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `negate` [plugins-filters-range-negate]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Negate the range match logic, events should be outsize of the specified range to match.
+
+
+### `ranges` [plugins-filters-range-ranges]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+An array of field, min, max, action tuples. Example:
+
+```ruby
+ filter {
+ range {
+ ranges => [ "message", 0, 10, "tag:short",
+ "message", 11, 100, "tag:medium",
+ "message", 101, 1000, "tag:long",
+ "message", 1001, 1e1000, "drop",
+ "duration", 0, 100, "field:latency:fast",
+ "duration", 101, 200, "field:latency:normal",
+ "duration", 201, 1000, "field:latency:slow",
+ "duration", 1001, 1e1000, "field:latency:outlier",
+ "requests", 0, 10, "tag:too_few_%{host}_requests" ]
+ }
+ }
+```
+
+Supported actions are drop tag or field with specified value. Added tag names and field names and field values can have `%{{dynamic}}` values.
+
+
+
+## Common options [plugins-filters-range-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-range-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-range-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-range-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-range-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-range-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-range-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-range-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-range-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ range {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ range {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-range-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ range {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ range {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-range-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-range-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 range filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ range {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-range-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-range-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ range {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ range {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-range-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ range {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ range {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-ruby.md b/docs/reference/plugins-filters-ruby.md
new file mode 100644
index 000000000..ed9d0e429
--- /dev/null
+++ b/docs/reference/plugins-filters-ruby.md
@@ -0,0 +1,404 @@
+---
+navigation_title: "ruby"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-ruby.html
+---
+
+# Ruby filter plugin [plugins-filters-ruby]
+
+
+* Plugin version: v3.1.8
+* Released on: 2022-01-24
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-ruby/blob/v3.1.8/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-ruby-index.md).
+
+## Getting help [_getting_help_158]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-ruby). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_157]
+
+Execute ruby code. This filter accepts inline ruby code or a ruby file. The two options are mutually exclusive and have slightly different ways of working, which are described below.
+
+::::{note}
+This plugin’s concurrency-safety depends on your code. Be sure to read up on [how to avoid concurrency issues](#plugins-filters-ruby-concurrency).
+::::
+
+
+### Inline ruby code [plugins-filters-ruby-using-inline-script]
+
+To add inline ruby in your filter, place all code in the `code` option. This code will be executed for every event the filter receives. You can also place ruby code in the `init` option. It will be executed only once during the plugin’s register phase.
+
+For example, to cancel 90% of events, you can do this:
+
+```ruby
+ filter {
+ ruby {
+ # Cancel 90% of events
+ code => "event.cancel if rand <= 0.90"
+ }
+ }
+```
+
+If you need to create additional events, you must use a specific syntax `new_event_block.call(event)` like in this example duplicating the input event
+
+```ruby
+filter {
+ ruby {
+ code => "new_event_block.call(event.clone)"
+ }
+}
+```
+
+::::{note}
+Defining methods in the [`code` option](#plugins-filters-ruby-code) can significantly reduce throughput. Use the [`init` option](#plugins-filters-ruby-init) instead.
+::::
+
+
+
+### Using a Ruby script file [plugins-filters-ruby-using-script-file]
+
+As the inline code can become complex and hard to structure inside of a text string in `code`, it’s then preferable to place the Ruby code in a .rb file, using the `path` option.
+
+```ruby
+ filter {
+ ruby {
+ # Cancel 90% of events
+ path => "/etc/logstash/drop_percentage.rb"
+ script_params => { "percentage" => 0.9 }
+ }
+ }
+```
+
+The ruby script file should define the following methods:
+
+* `register(params)`: An optional register method that receives the key/value hash passed in the `script_params` configuration option
+* `filter(event)`: A mandatory Ruby method that accepts a Logstash event and must return an array of events
+
+Below is an example implementation of the `drop_percentage.rb` ruby script that drops a configurable percentage of events:
+
+```ruby
+# the value of `params` is the value of the hash passed to `script_params`
+# in the logstash configuration
+def register(params)
+ @drop_percentage = params["percentage"]
+end
+
+# the filter method receives an event and must return a list of events.
+# Dropping an event means not including it in the return array,
+# while creating new ones only requires you to add a new instance of
+# LogStash::Event to the returned array
+def filter(event)
+ if rand >= @drop_percentage
+ return [event]
+ else
+ return [] # return empty array to cancel event
+ end
+end
+```
+
+
+### Testing the ruby script [_testing_the_ruby_script]
+
+To validate the behaviour of the `filter` method you implemented, the Ruby filter plugin provides an inline test framework where you can assert expectations. The tests you define will run when the pipeline is created and will prevent it from starting if a test fails.
+
+You can also verify if the tests pass using the logstash `-t` flag.
+
+For example above, you can write at the bottom of the `drop_percentage.rb` ruby script the following test:
+
+```ruby
+def register(params)
+ # ..
+end
+
+def filter(event)
+ # ..
+end
+
+test "drop percentage 100%" do
+ parameters do
+ { "percentage" => 1 }
+ end
+
+ in_event { { "message" => "hello" } }
+
+ expect("drops the event") do |events|
+ events.size == 0
+ end
+end
+```
+
+We can now test that the ruby script we’re using is implemented correctly:
+
+```shell
+% bin/logstash -e "filter { ruby { path => '/etc/logstash/drop_percentage.rb' script_params => { 'drop_percentage' => 0.5 } } }" -t
+[2017-10-13T13:44:29,723][INFO ][logstash.filters.ruby.script] Test run complete {:script_path=>"/etc/logstash/drop_percentage.rb", :results=>{:passed=>1, :failed=>0, :errored=>0}}
+Configuration OK
+[2017-10-13T13:44:29,887][INFO ][logstash.runner ] Using config.test_and_exit mode. Config Validation Result: OK. Exiting Logstash
+```
+
+
+
+## Avoiding concurrency issues [plugins-filters-ruby-concurrency]
+
+When events are flowing through a pipeline with multiple workers, a single shared instance of this filter may end up processing many events *simultaneously*. This means that your script needs to be written to avoid mutating shared state unless it is done in a thread-safe manner.
+
+In Ruby, the name of a variable determines its scope. The following guidance may help you avoid *accidentally* mutating shared state:
+
+* Freely use Local Variables, whose name begins with a lower-case letter or an underscore (`_`).
+
+ * Local Variables are available only to the individual event being processed, and are automatically cleaned up.
+
+* Exercise caution when *modifying* Instance Variables, whose names begin with `@` followed by a lower-case letter or an underscore (`_`).
+
+ * Instance Variables are shared between *all* worker threads in this pipeline, which may be processing multiple events simultaneously.
+ * It is safe to *set* Instance Variables in a [script](#plugins-filters-ruby-using-script-file)-defined `register` function or with [`init`](#plugins-filters-ruby-init), but they should not be modified while processing events unless safe-guarded by mutual exclusion.
+ * Instance Variables are *not* persisted across pipeline restarts or plugin crashes.
+
+* *Avoid* using variables whose scope is not limited to the plugin instance, as they can cause hard-to-debug problems that span beyond the individual plugin or pipeline:
+
+ * Class Variables: begin with `@@`.
+ * Global Variables: begin with a `$`.
+ * Constants: begin with a capital letter.
+
+
+
+## Ruby Filter Configuration Options [plugins-filters-ruby-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-ruby-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`code`](#plugins-filters-ruby-code) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`init`](#plugins-filters-ruby-init) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`path`](#plugins-filters-ruby-path) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`script_params`](#plugins-filters-ruby-script_params) | [hash](/reference/configuration-file-structure.md#hash),{} | No |
+| [`tag_on_exception`](#plugins-filters-ruby-tag_on_exception) | [string](/reference/configuration-file-structure.md#string),_rubyexception | No |
+| [`tag_with_exception_message`](#plugins-filters-ruby-tag_with_exception_message) | [boolean](/reference/configuration-file-structure.md#boolean),_false | No |
+
+Also see [Common options](#plugins-filters-ruby-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `code` [plugins-filters-ruby-code]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+* This setting cannot be used together with `path`.
+
+The code to execute for every event. You will have an `event` variable available that is the event itself. See the [Event API](/reference/event-api.md) for more information.
+
+
+### `init` [plugins-filters-ruby-init]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Any code to execute at logstash startup-time
+
+
+### `path` [plugins-filters-ruby-path]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+* This setting cannot be used together with `code`.
+
+The path of the ruby script file that implements the `filter` method.
+
+
+### `script_params` [plugins-filters-ruby-script_params]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+A key/value hash with parameters that are passed to the register method of your ruby script file defined in `path`.
+
+
+### `tag_on_exception` [plugins-filters-ruby-tag_on_exception]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `_rubyexception`
+
+Tag to add to events in case the ruby code (either inline or file based) causes an exception.
+
+
+### `tag_with_exception_message` [plugins-filters-ruby-tag_with_exception_message]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+If `true` adds a tag to the event that is the concatenation of `tag_with_exception_message` and the exception message.
+
+
+
+## Common options [plugins-filters-ruby-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-ruby-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-ruby-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-ruby-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-ruby-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-ruby-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-ruby-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-ruby-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-ruby-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ ruby {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ ruby {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-ruby-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ ruby {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ ruby {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-ruby-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-ruby-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 ruby filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ ruby {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-ruby-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-ruby-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ ruby {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ ruby {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-ruby-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ ruby {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ ruby {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-sleep.md b/docs/reference/plugins-filters-sleep.md
new file mode 100644
index 000000000..87a6c998d
--- /dev/null
+++ b/docs/reference/plugins-filters-sleep.md
@@ -0,0 +1,276 @@
+---
+navigation_title: "sleep"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-sleep.html
+---
+
+# Sleep filter plugin [plugins-filters-sleep]
+
+
+* Plugin version: v3.0.7
+* Released on: 2020-09-04
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-sleep/blob/v3.0.7/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-sleep-index.md).
+
+## Getting help [_getting_help_159]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-sleep). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_158]
+
+Sleep a given amount of time. This will cause logstash to stall for the given amount of time. This is useful for rate limiting, etc.
+
+
+## Sleep Filter Configuration Options [plugins-filters-sleep-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-sleep-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`every`](#plugins-filters-sleep-every) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`replay`](#plugins-filters-sleep-replay) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`time`](#plugins-filters-sleep-time) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-filters-sleep-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `every` [plugins-filters-sleep-every]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `1`
+
+Sleep on every N’th. This option is ignored in replay mode.
+
+Example:
+
+```ruby
+ filter {
+ sleep {
+ time => "1" # Sleep 1 second
+ every => 10 # on every 10th event
+ }
+ }
+```
+
+
+### `replay` [plugins-filters-sleep-replay]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Enable replay mode.
+
+Replay mode tries to sleep based on timestamps in each event.
+
+The amount of time to sleep is computed by subtracting the previous event’s timestamp from the current event’s timestamp. This helps you replay events in the same timeline as original.
+
+If you specify a `time` setting as well, this filter will use the `time` value as a speed modifier. For example, a `time` value of 2 will replay at double speed, while a value of 0.25 will replay at 1/4th speed.
+
+For example:
+
+```ruby
+ filter {
+ sleep {
+ time => 2
+ replay => true
+ }
+ }
+```
+
+The above will sleep in such a way that it will perform replay 2-times faster than the original time speed.
+
+
+### `time` [plugins-filters-sleep-time]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The length of time to sleep, in seconds, for every event.
+
+This can be a number (eg, 0.5), or a string (eg, `%{{foo}}`) The second form (string with a field value) is useful if you have an attribute of your event that you want to use to indicate the amount of time to sleep.
+
+Example:
+
+```ruby
+ filter {
+ sleep {
+ # Sleep 1 second for every event.
+ time => "1"
+ }
+ }
+```
+
+
+
+## Common options [plugins-filters-sleep-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-sleep-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-sleep-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-sleep-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-sleep-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-sleep-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-sleep-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-sleep-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-sleep-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ sleep {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ sleep {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-sleep-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ sleep {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ sleep {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-sleep-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-sleep-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 sleep filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ sleep {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-sleep-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-sleep-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ sleep {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ sleep {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-sleep-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ sleep {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ sleep {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-split.md b/docs/reference/plugins-filters-split.md
new file mode 100644
index 000000000..496e66aa4
--- /dev/null
+++ b/docs/reference/plugins-filters-split.md
@@ -0,0 +1,261 @@
+---
+navigation_title: "split"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-split.html
+---
+
+# Split filter plugin [plugins-filters-split]
+
+
+* Plugin version: v3.1.8
+* Released on: 2020-01-21
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-split/blob/v3.1.8/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-split-index.md).
+
+## Getting help [_getting_help_160]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-split). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_159]
+
+The split filter clones an event by splitting one of its fields and placing each value resulting from the split into a clone of the original event. The field being split can either be a string or an array.
+
+An example use case of this filter is for taking output from the [exec input plugin](/reference/plugins-inputs-exec.md) which emits one event for the whole output of a command and splitting that output by newline - making each line an event.
+
+Split filter can also be used to split array fields in events into individual events. A very common pattern in JSON & XML is to make use of lists to group data together.
+
+For example, a json structure like this:
+
+```js
+{ field1: ...,
+ results: [
+ { result ... },
+ { result ... },
+ { result ... },
+ ...
+] }
+```
+
+The split filter can be used on the above data to create separate events for each value of `results` field
+
+```js
+filter {
+ split {
+ field => "results"
+ }
+}
+```
+
+The end result of each split is a complete copy of the event with only the current split section of the given field changed.
+
+
+## Split Filter Configuration Options [plugins-filters-split-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-split-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`field`](#plugins-filters-split-field) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`target`](#plugins-filters-split-target) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`terminator`](#plugins-filters-split-terminator) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-filters-split-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `field` [plugins-filters-split-field]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"message"`
+
+The field which value is split by the terminator. Can be a multiline message or the ID of an array. Nested arrays are referenced like: "[object_id][array_id]"
+
+
+### `target` [plugins-filters-split-target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The field within the new event which the value is split into. If not set, the target field defaults to split field name.
+
+
+### `terminator` [plugins-filters-split-terminator]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"\n"`
+
+The string to split on. This is usually a line terminator, but can be any string. If you are splitting a JSON array into multiple events, you can ignore this field.
+
+
+
+## Common options [plugins-filters-split-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-split-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-split-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-split-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-split-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-split-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-split-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-split-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-split-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ split {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ split {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-split-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ split {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ split {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-split-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-split-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 split filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ split {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-split-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-split-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ split {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ split {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-split-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ split {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ split {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-syslog_pri.md b/docs/reference/plugins-filters-syslog_pri.md
new file mode 100644
index 000000000..e744d6bca
--- /dev/null
+++ b/docs/reference/plugins-filters-syslog_pri.md
@@ -0,0 +1,266 @@
+---
+navigation_title: "syslog_pri"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-syslog_pri.html
+---
+
+# Syslog_pri filter plugin [plugins-filters-syslog_pri]
+
+
+* Plugin version: v3.2.1
+* Released on: 2024-01-17
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-syslog_pri/blob/v3.2.1/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-syslog_pri-index.md).
+
+## Getting help [_getting_help_161]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-syslog_pri). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_160]
+
+Filter plugin for logstash to parse the `PRI` field from the front of a Syslog (RFC3164) message. If no priority is set, it will default to 13 (per RFC).
+
+This filter is based on the original `syslog.rb` code shipped with logstash.
+
+
+## Syslog_pri Filter Configuration Options [plugins-filters-syslog_pri-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-syslog_pri-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`ecs_compatibility`](#plugins-filters-syslog_pri-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`facility_labels`](#plugins-filters-syslog_pri-facility_labels) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`severity_labels`](#plugins-filters-syslog_pri-severity_labels) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`syslog_pri_field_name`](#plugins-filters-syslog_pri-syslog_pri_field_name) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`use_labels`](#plugins-filters-syslog_pri-use_labels) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+
+Also see [Common options](#plugins-filters-syslog_pri-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `ecs_compatibility` [plugins-filters-syslog_pri-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: does not use ECS-compatible field names (for example, `syslog_severity_code` for syslog severity)
+ * `v1`, `v8`: uses fields that are compatible with Elastic Common Schema (for example, `[log][syslog][severity][code]`)
+
+* Default value depends on which version of Logstash is running:
+
+ * When Logstash provides a `pipeline.ecs_compatibility` setting, its value is used as the default
+ * Otherwise, the default value is `disabled`.
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)). The value of this setting affects the *default* value of [`syslog_pri_field_name`](#plugins-filters-syslog_pri-syslog_pri_field_name).
+
+
+### `facility_labels` [plugins-filters-syslog_pri-facility_labels]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `["kernel", "user-level", "mail", "daemon", "security/authorization", "syslogd", "line printer", "network news", "uucp", "clock", "security/authorization", "ftp", "ntp", "log audit", "log alert", "clock", "local0", "local1", "local2", "local3", "local4", "local5", "local6", "local7"]`
+
+Labels for facility levels. This comes from RFC3164. If an unrecognized facility code is provided and [`use_labels`](#plugins-filters-syslog_pri-use_labels) is `true` then the event is tagged with `_syslogpriparsefailure`.
+
+
+### `severity_labels` [plugins-filters-syslog_pri-severity_labels]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `["emergency", "alert", "critical", "error", "warning", "notice", "informational", "debug"]`
+
+Labels for severity levels. This comes from RFC3164.
+
+
+### `syslog_pri_field_name` [plugins-filters-syslog_pri-syslog_pri_field_name]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value depends on whether [`ecs_compatibility`](#plugins-filters-syslog_pri-ecs_compatibility) is enabled:
+
+ * ECS Compatibility disabled: `"syslog_pri"`
+ * ECS Compatibility enabled: `"[log][syslog][priority]"`
+
+
+Name of field which passes in the extracted PRI part of the syslog message
+
+
+### `use_labels` [plugins-filters-syslog_pri-use_labels]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Add human-readable names after parsing severity and facility from PRI
+
+
+
+## Common options [plugins-filters-syslog_pri-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-syslog_pri-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-syslog_pri-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-syslog_pri-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-syslog_pri-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-syslog_pri-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-syslog_pri-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-syslog_pri-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-syslog_pri-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ syslog_pri {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ syslog_pri {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-syslog_pri-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ syslog_pri {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ syslog_pri {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-syslog_pri-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-syslog_pri-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 syslog_pri filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ syslog_pri {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-syslog_pri-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-syslog_pri-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ syslog_pri {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ syslog_pri {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-syslog_pri-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ syslog_pri {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ syslog_pri {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-threats_classifier.md b/docs/reference/plugins-filters-threats_classifier.md
new file mode 100644
index 000000000..94b0d2d1e
--- /dev/null
+++ b/docs/reference/plugins-filters-threats_classifier.md
@@ -0,0 +1,32 @@
+---
+navigation_title: "threats_classifier"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-threats_classifier.html
+---
+
+# Threats_classifier filter plugin [plugins-filters-threats_classifier]
+
+
+* This plugin was created and is maintained by a partner.
+* [Change log](https://github.com/empow/logstash-filter-empow-classifier/blob/master/CHANGELOG.md)
+
+## Installation [_installation_65]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-filter-threats_classifier`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Description [_description_161]
+
+This plugin uses the cyber-kill-chain and MITRE representation language to enrich security logs with information about the attacker’s intent—what the attacker is trying to achieve, who they are targeting, and how they plan to carry out the attack.
+
+
+## Documentation [_documentation_3]
+
+Documentation for the [filter-threats_classifier plugin](https://github.com/empow/logstash-filter-empow-classifier/blob/master/README.md) is maintained by the creators.
+
+
+## Getting Help [_getting_help_162]
+
+This is a third-party plugin. For bugs or feature requests, open an issue in the [plugins-filters-threats_classifier Github repo](https://github.com/empow/logstash-filter-empow-classifier).
+
+
diff --git a/docs/reference/plugins-filters-throttle.md b/docs/reference/plugins-filters-throttle.md
new file mode 100644
index 000000000..bbb771d27
--- /dev/null
+++ b/docs/reference/plugins-filters-throttle.md
@@ -0,0 +1,396 @@
+---
+navigation_title: "throttle"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-throttle.html
+---
+
+# Throttle filter plugin [plugins-filters-throttle]
+
+
+* Plugin version: v4.0.4
+* Released on: 2017-11-07
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-throttle/blob/v4.0.4/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-throttle-index.md).
+
+## Getting help [_getting_help_163]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-throttle). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_162]
+
+The throttle filter is for throttling the number of events. The filter is configured with a lower bound, the "before_count", and upper bound, the "after_count", and a period of time. All events passing through the filter will be counted based on their key and the event timestamp. As long as the count is less than the "before_count" or greater than the "after_count", the event will be "throttled" which means the filter will be considered successful and any tags or fields will be added (or removed).
+
+The plugin is thread-safe and properly tracks past events.
+
+For example, if you wanted to throttle events so you only receive an event after 2 occurrences and you get no more than 3 in 10 minutes, you would use the configuration:
+
+```ruby
+ period => 600
+ max_age => 1200
+ before_count => 3
+ after_count => 5
+```
+
+Which would result in:
+
+::::{admonition}
+```
+event 1 - throttled (successful filter, period start)
+event 2 - throttled (successful filter)
+event 3 - not throttled
+event 4 - not throttled
+event 5 - not throttled
+event 6 - throttled (successful filter)
+event 7 - throttled (successful filter)
+event x - throttled (successful filter)
+period end
+event 1 - throttled (successful filter, period start)
+event 2 - throttled (successful filter)
+event 3 - not throttled
+event 4 - not throttled
+event 5 - not throttled
+event 6 - throttled (successful filter)
+...
+```
+::::
+
+
+Another example is if you wanted to throttle events so you only receive 1 event per hour, you would use the configuration:
+
+```ruby
+ period => 3600
+ max_age => 7200
+ before_count => -1
+ after_count => 1
+```
+
+Which would result in:
+
+::::{admonition}
+```
+event 1 - not throttled (period start)
+event 2 - throttled (successful filter)
+event 3 - throttled (successful filter)
+event 4 - throttled (successful filter)
+event x - throttled (successful filter)
+period end
+event 1 - not throttled (period start)
+event 2 - throttled (successful filter)
+event 3 - throttled (successful filter)
+event 4 - throttled (successful filter)
+...
+```
+::::
+
+
+A common use case would be to use the throttle filter to throttle events before 3 and after 5 while using multiple fields for the key and then use the drop filter to remove throttled events. This configuration might appear as:
+
+```ruby
+ filter {
+ throttle {
+ before_count => 3
+ after_count => 5
+ period => 3600
+ max_age => 7200
+ key => "%{host}%{message}"
+ add_tag => "throttled"
+ }
+ if "throttled" in [tags] {
+ drop { }
+ }
+ }
+```
+
+Another case would be to store all events, but only email non-throttled events so the op’s inbox isn’t flooded with emails in the event of a system error. This configuration might appear as:
+
+```ruby
+ filter {
+ throttle {
+ before_count => 3
+ after_count => 5
+ period => 3600
+ max_age => 7200
+ key => "%{message}"
+ add_tag => "throttled"
+ }
+ }
+ output {
+ if "throttled" not in [tags] {
+ email {
+ from => "logstash@mycompany.com"
+ subject => "Production System Alert"
+ to => "ops@mycompany.com"
+ via => "sendmail"
+ body => "Alert on %{host} from path %{path}:\n\n%{message}"
+ options => { "location" => "/usr/sbin/sendmail" }
+ }
+ }
+ elasticsearch_http {
+ host => "localhost"
+ port => "19200"
+ }
+ }
+```
+
+When an event is received, the event key is stored in a key_cache. The key references a timeslot_cache. The event is allocated to a timeslot (created dynamically) based on the timestamp of the event. The timeslot counter is incremented. When the next event is received (same key), within the same "period", it is allocated to the same timeslot. The timeslot counter is incremented once again.
+
+The timeslot expires if the maximum age has been exceeded. The age is calculated based on the latest event timestamp and the max_age configuration option.
+
+```
+---[::.. DESIGN ..::]---
+```
+`- [key_cache] -` `-- [timeslot_cache] --` | | | @created: 1439839636 | | @latest: 1439839836 | [a.b.c] ⇒ `----------------------` | [1439839636] ⇒ 1 | | [1439839736] ⇒ 3 | | [1439839836] ⇒ 2 | `----------------------`
+
+```
+ +-- [timeslot_cache] --+
+ | @created: eeeeeeeeee |
+ | @latest: llllllllll |
+ [x.y.z] => +----------------------+
+ | [0000000060] => x |
+ | [0000000120] => y |
+| | | [..........] => N |
++---------------+ +----------------------+
+```
+Frank de Jong (@frapex) Mike Pilone (@mikepilone)
+
+only update if greater than current
+
+
+## Throttle Filter Configuration Options [plugins-filters-throttle-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-throttle-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`after_count`](#plugins-filters-throttle-after_count) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`before_count`](#plugins-filters-throttle-before_count) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`key`](#plugins-filters-throttle-key) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`max_age`](#plugins-filters-throttle-max_age) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`max_counters`](#plugins-filters-throttle-max_counters) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`period`](#plugins-filters-throttle-period) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-filters-throttle-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `after_count` [plugins-filters-throttle-after_count]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `-1`
+
+Events greater than this count will be throttled. Setting this value to -1, the default, will cause no events to be throttled based on the upper bound.
+
+
+### `before_count` [plugins-filters-throttle-before_count]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `-1`
+
+Events less than this count will be throttled. Setting this value to -1, the default, will cause no events to be throttled based on the lower bound.
+
+
+### `key` [plugins-filters-throttle-key]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The key used to identify events. Events with the same key are grouped together. Field substitutions are allowed, so you can combine multiple fields.
+
+
+### `max_age` [plugins-filters-throttle-max_age]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `3600`
+
+The maximum age of a timeslot. Higher values allow better tracking of an asynchronous flow of events, but require more memory. As a rule of thumb you should set this value to at least twice the period. Or set this value to period + maximum time offset between unordered events with the same key. Values below the specified period give unexpected results if unordered events are processed simultaneously.
+
+
+### `max_counters` [plugins-filters-throttle-max_counters]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `100000`
+
+The maximum number of counters to store before decreasing the maximum age of a timeslot. Setting this value to -1 will prevent an upper bound with no constraint on the number of counters. This configuration value should only be used as a memory control mechanism and can cause early counter expiration if the value is reached. It is recommended to leave the default value and ensure that your key is selected such that it limits the number of counters required (i.e. don’t use UUID as the key).
+
+
+### `period` [plugins-filters-throttle-period]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"60"`
+
+The period in seconds after the first occurrence of an event until a new timeslot is created. This period is tracked per unique key and per timeslot. Field substitutions are allowed in this value. This allows you to specify that certain kinds of events throttle for a specific period of time.
+
+
+
+## Common options [plugins-filters-throttle-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-throttle-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-throttle-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-throttle-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-throttle-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-throttle-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-throttle-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-throttle-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-throttle-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ throttle {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ throttle {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-throttle-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ throttle {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ throttle {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-throttle-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-throttle-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 throttle filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ throttle {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-throttle-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-throttle-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ throttle {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ throttle {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-throttle-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ throttle {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ throttle {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-tld.md b/docs/reference/plugins-filters-tld.md
new file mode 100644
index 000000000..3a058f632
--- /dev/null
+++ b/docs/reference/plugins-filters-tld.md
@@ -0,0 +1,235 @@
+---
+navigation_title: "tld"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-tld.html
+---
+
+# Tld filter plugin [plugins-filters-tld]
+
+
+* Plugin version: v3.1.3
+* Released on: 2023-10-19
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-tld/blob/v3.1.3/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-tld-index.md).
+
+## Installation [_installation_66]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-filter-tld`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_164]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-tld). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_163]
+
+This example filter will replace the contents of the default message field with whatever you specify in the configuration.
+
+It is only intended to be used as an example.
+
+
+## Tld Filter Configuration Options [plugins-filters-tld-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-tld-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`source`](#plugins-filters-tld-source) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`target`](#plugins-filters-tld-target) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-filters-tld-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `source` [plugins-filters-tld-source]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"message"`
+
+Setting the config_name here is required. This is how you configure this filter from your Logstash config.
+
+filter { example { message ⇒ "My message…" } }
+
+The source field to parse
+
+
+### `target` [plugins-filters-tld-target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"tld"`
+
+The target field to place all the data
+
+
+
+## Common options [plugins-filters-tld-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-tld-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-tld-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-tld-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-tld-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-tld-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-tld-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-tld-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-tld-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ tld {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ tld {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-tld-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ tld {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ tld {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-tld-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-tld-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 tld filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ tld {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-tld-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-tld-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ tld {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ tld {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-tld-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ tld {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ tld {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-translate.md b/docs/reference/plugins-filters-translate.md
new file mode 100644
index 000000000..fdc42c85f
--- /dev/null
+++ b/docs/reference/plugins-filters-translate.md
@@ -0,0 +1,563 @@
+---
+navigation_title: "translate"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-translate.html
+---
+
+# Translate filter plugin [plugins-filters-translate]
+
+
+* Plugin version: v3.4.2
+* Released on: 2023-06-14
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-translate/blob/v3.4.2/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-translate-index.md).
+
+## Getting help [_getting_help_165]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-translate). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_164]
+
+A general search and replace tool that uses a configured hash and/or a file to determine replacement values. Currently supported are YAML, JSON, and CSV files. Each dictionary item is a key value pair.
+
+You can specify dictionary entries in one of two ways:
+
+* The `dictionary` configuration item can contain a hash representing the mapping.
+* An external file (readable by logstash) may be specified in the `dictionary_path` configuration item.
+
+These two methods may not be used in conjunction; it will produce an error.
+
+Operationally, for each event, the value from the `source` setting is tested against the dictionary and if it matches exactly (or matches a regex when `regex` configuration item has been enabled), the matched value is put in the `target` field, but on no match the `fallback` setting string is used instead.
+
+Example:
+
+```ruby
+ filter {
+ translate {
+ source => "[http][response][status_code]"
+ target => "[http_status_description]"
+ dictionary => {
+ "100" => "Continue"
+ "101" => "Switching Protocols"
+ "200" => "OK"
+ "500" => "Server Error"
+ }
+ fallback => "I'm a teapot"
+ }
+ }
+```
+
+Occasionally, people find that they have a field with a variable sized array of values or objects that need some enrichment. The `iterate_on` setting helps in these cases.
+
+Alternatively, for simple string search and replacements for just a few values you might consider using the gsub function of the mutate filter.
+
+It is possible to provide multi-valued dictionary values. When using a YAML or JSON dictionary, you can have the value as a hash (map) or an array datatype. When using a CSV dictionary, multiple values in the translation must be extracted with another filter e.g. Dissect or KV. Note that the `fallback` is a string so on no match the fallback setting needs to formatted so that a filter can extract the multiple values to the correct fields.
+
+File based dictionaries are loaded in a separate thread using a scheduler. If you set a `refresh_interval` of 300 seconds (5 minutes) or less then the modified time of the file is checked before reloading. Very large dictionaries are supported, internally tested at 100 000 key/values, and we minimise the impact on throughput by having the refresh in the scheduler thread. Any ongoing modification of the dictionary file should be done using a copy/edit/rename or create/rename mechanism to avoid the refresh code from processing half-baked dictionary content.
+
+
+## Compatibility with the Elastic Common Schema (ECS) [plugins-filters-translate-ecs_metadata]
+
+The plugin acts as an in-place translator if `source` and `target` are the same and does not produce any new event fields. This is the default behavior in [ECS compatibility mode](#plugins-filters-translate-ecs_compatibility).
+
+
+## Translate Filter Configuration Options [plugins-filters-translate-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-translate-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`destination`](#plugins-filters-translate-destination) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`dictionary`](#plugins-filters-translate-dictionary) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`dictionary_path`](#plugins-filters-translate-dictionary_path) | a valid filesystem path | No |
+| [`ecs_compatibility`](#plugins-filters-translate-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`exact`](#plugins-filters-translate-exact) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`fallback`](#plugins-filters-translate-fallback) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`field`](#plugins-filters-translate-field) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`iterate_on`](#plugins-filters-translate-iterate_on) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`override`](#plugins-filters-translate-override) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`refresh_interval`](#plugins-filters-translate-refresh_interval) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`regex`](#plugins-filters-translate-regex) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`source`](#plugins-filters-translate-source) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`refresh_behaviour`](#plugins-filters-translate-refresh_behaviour) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`target`](#plugins-filters-translate-target) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`yaml_dictionary_code_point_limit`](#plugins-filters-translate-yaml_dictionary_code_point_limit) | [number](/reference/configuration-file-structure.md#number) | No |
+
+Also see [Common options](#plugins-filters-translate-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `destination` [plugins-filters-translate-destination]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Deprecated alias for [`target`](#plugins-filters-translate-target) setting.
+
+::::{admonition} Deprecated in 3.3.0.
+:class: warning
+
+Use [`target`](#plugins-filters-translate-target) instead. In 4.0 this setting will be removed.
+::::
+
+
+
+### `dictionary` [plugins-filters-translate-dictionary]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+The dictionary to use for translation, when specified in the logstash filter configuration item (i.e. do not use the `dictionary_path` file).
+
+Example:
+
+```ruby
+ filter {
+ translate {
+ dictionary => {
+ "100" => "Continue"
+ "101" => "Switching Protocols"
+ "merci" => "thank you"
+ "old version" => "new version"
+ }
+ }
+ }
+```
+
+::::{note}
+It is an error to specify both `dictionary` and `dictionary_path`.
+::::
+
+
+
+### `dictionary_path` [plugins-filters-translate-dictionary_path]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+The full path of the external dictionary file. The format of the table should be a standard YAML, JSON, or CSV.
+
+Specify any integer-based keys in quotes. The value taken from the event’s `source` setting is converted to a string. The lookup dictionary keys must also be strings, and the quotes make the integer-based keys function as a string. For example, the YAML file should look something like this:
+
+```ruby
+ "100": Continue
+ "101": Switching Protocols
+ merci: gracias
+ old version: new version
+```
+
+::::{note}
+It is an error to specify both `dictionary` and `dictionary_path`.
+::::
+
+
+The currently supported formats are YAML, JSON, and CSV. Format selection is based on the file extension: `json` for JSON, `yaml` or `yml` for YAML, and `csv` for CSV. The CSV format expects exactly two columns, with the first serving as the original text (lookup key), and the second column as the translation.
+
+
+### `ecs_compatibility` [plugins-filters-translate-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: disabled ECS-compatibility
+ * `v1`, `v8`: compatibility with the specified major version of the Elastic Common Schema
+
+* Default value depends on which version of Logstash is running:
+
+ * When Logstash provides a `pipeline.ecs_compatibility` setting, its value is used as the default
+ * Otherwise, the default value is `disabled`.
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)). The value of this setting affects the *default* value of [`target`](#plugins-filters-translate-target).
+
+
+### `exact` [plugins-filters-translate-exact]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+When `exact => true`, the translate filter will populate the destination field with the exact contents of the dictionary value. When `exact => false`, the filter will populate the destination field with the result of any existing destination field’s data, with the translated value substituted in-place.
+
+For example, consider this simple translation.yml, configured to check the `data` field:
+
+```ruby
+ foo: bar
+```
+
+If logstash receives an event with the `data` field set to `foo`, and `exact => true`, the destination field will be populated with the string `bar`. If `exact => false`, and logstash receives the same event, the destination field will be also set to `bar`. However, if logstash receives an event with the `data` field set to `foofing`, the destination field will be set to `barfing`.
+
+Set both `exact => true` AND `regex => `true` if you would like to match using dictionary keys as regular expressions. A large dictionary could be expensive to match in this case.
+
+
+### `fallback` [plugins-filters-translate-fallback]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+In case no translation occurs in the event (no matches), this will add a default translation string, which will always populate `field`, if the match failed.
+
+For example, if we have configured `fallback => "no match"`, using this dictionary:
+
+```ruby
+ foo: bar
+```
+
+Then, if logstash received an event with the field `foo` set to `bar`, the destination field would be set to `bar`. However, if logstash received an event with `foo` set to `nope`, then the destination field would still be populated, but with the value of `no match`. This configuration can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+
+### `field` [plugins-filters-translate-field]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Deprecated alias for [`source`](#plugins-filters-translate-source) setting.
+
+::::{admonition} Deprecated in 3.3.0.
+:class: warning
+
+Use [`source`](#plugins-filters-translate-source) instead. In 4.0 this setting will be removed.
+::::
+
+
+
+### `iterate_on` [plugins-filters-translate-iterate_on]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+When the value that you need to perform enrichment on is a variable sized array then specify the field name in this setting. This setting introduces two modes, 1) when the value is an array of strings and 2) when the value is an array of objects (as in JSON object). In the first mode, you should have the same field name in both `source` and `iterate_on`, the result will be an array added to the field specified in the `target` setting. This array will have the looked up value (or the `fallback` value or nil) in same ordinal position as each sought value. In the second mode, specify the field that has the array of objects in `iterate_on` then specify the field in each object that provides the sought value with `source` and the field to write the looked up value (or the `fallback` value) to with `target`.
+
+For a dictionary of:
+
+```text
+ 100,Yuki
+ 101,Rupert
+ 102,Ahmed
+ 103,Kwame
+```
+
+Example of Mode 1
+
+```ruby
+ filter {
+ translate {
+ iterate_on => "[collaborator_ids]"
+ source => "[collaborator_ids]"
+ target => "[collaborator_names]"
+ fallback => "Unknown"
+ }
+ }
+```
+
+Before
+
+```json
+ {
+ "collaborator_ids": [100,103,110,102]
+ }
+```
+
+After
+
+```json
+ {
+ "collaborator_ids": [100,103,110,102],
+ "collabrator_names": ["Yuki","Kwame","Unknown","Ahmed"]
+ }
+```
+
+Example of Mode 2
+
+```ruby
+ filter {
+ translate {
+ iterate_on => "[collaborators]"
+ source => "[id]"
+ target => "[name]"
+ fallback => "Unknown"
+ }
+ }
+```
+
+Before
+
+```json
+ {
+ "collaborators": [
+ {
+ "id": 100
+ },
+ {
+ "id": 103
+ },
+ {
+ "id": 110
+ },
+ {
+ "id": 101
+ }
+ ]
+ }
+```
+
+After
+
+```json
+ {
+ "collaborators": [
+ {
+ "id": 100,
+ "name": "Yuki"
+ },
+ {
+ "id": 103,
+ "name": "Kwame"
+ },
+ {
+ "id": 110,
+ "name": "Unknown"
+ },
+ {
+ "id": 101,
+ "name": "Rupert"
+ }
+ ]
+ }
+```
+
+
+### `override` [plugins-filters-translate-override]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value depends on whether in-place translation is being used
+
+If the destination (or target) field already exists, this configuration option controls whether the filter skips translation (default behavior) or overwrites the target field value with the new translation value.
+
+In case of in-place translation, where `target` is the same as `source` (such as when [`ecs_compatibility`](#plugins-filters-translate-ecs_compatibility) is enabled), overwriting is allowed.
+
+
+### `refresh_interval` [plugins-filters-translate-refresh_interval]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `300`
+
+When using a dictionary file, this setting will indicate how frequently (in seconds) logstash will check the dictionary file for updates. A value of zero or less will disable refresh.
+
+
+### `regex` [plugins-filters-translate-regex]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+To treat dictionary keys as regular expressions, set `regex => true`.
+
+Be sure to escape dictionary key strings for use with regex. Resources on regex formatting are available online.
+
+
+### `refresh_behaviour` [plugins-filters-translate-refresh_behaviour]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `merge`
+
+When using a dictionary file, this setting indicates how the update will be executed. Setting this to `merge` causes the new dictionary to be merged into the old one. This means same entry will be updated but entries that existed before but not in the new dictionary will remain after the merge; `replace` causes the whole dictionary to be replaced with a new one (deleting all entries of the old one on update).
+
+
+### `source` [plugins-filters-translate-source]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The name of the logstash event field containing the value to be compared for a match by the translate filter (e.g. `message`, `host`, `response_code`).
+
+If this field is an array, only the first value will be used.
+
+
+### `target` [plugins-filters-translate-target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value depends on whether [`ecs_compatibility`](#plugins-filters-translate-ecs_compatibility) is enabled:
+
+ * ECS Compatibility disabled: `"translation"`
+ * ECS Compatibility enabled: defaults to the same value as `source`
+
+
+The target field you wish to populate with the translated code. If you set this value to the same value as `source` field, the plugin does a substitution, and the filter will succeed. This will clobber the old value of the source field!
+
+
+### `yaml_dictionary_code_point_limit` [plugins-filters-translate-yaml_dictionary_code_point_limit]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is 134217728 (128MB for 1 byte code points)
+
+The max amount of code points in the YAML file in `dictionary_path`. Please be aware that byte limit depends on the encoding. This setting is effective for YAML file only. YAML over the limit throws exception.
+
+
+
+## Common options [plugins-filters-translate-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-translate-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-translate-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-translate-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-translate-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-translate-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-translate-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-translate-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-translate-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ translate {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ translate {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-translate-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ translate {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ translate {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-translate-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-translate-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 translate filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ translate {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-translate-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-translate-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ translate {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ translate {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-translate-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ translate {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ translate {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-truncate.md b/docs/reference/plugins-filters-truncate.md
new file mode 100644
index 000000000..357cbd7f2
--- /dev/null
+++ b/docs/reference/plugins-filters-truncate.md
@@ -0,0 +1,239 @@
+---
+navigation_title: "truncate"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-truncate.html
+---
+
+# Truncate filter plugin [plugins-filters-truncate]
+
+
+* Plugin version: v1.0.6
+* Released on: 2023-05-10
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-truncate/blob/v1.0.6/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-truncate-index.md).
+
+## Getting help [_getting_help_166]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-truncate). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_165]
+
+Allows you to truncate fields longer than a given length.
+
+This truncates on bytes values, not character count. In practice, this should mean that the truncated length is somewhere between `length_bytes` and `length_bytes - 6` (UTF-8 supports up to 6-byte characters).
+
+
+## Truncate Filter Configuration Options [plugins-filters-truncate-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-truncate-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`fields`](#plugins-filters-truncate-fields) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`length_bytes`](#plugins-filters-truncate-length_bytes) | [number](/reference/configuration-file-structure.md#number) | Yes |
+
+Also see [Common options](#plugins-filters-truncate-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `fields` [plugins-filters-truncate-fields]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+A list of fieldrefs to truncate if they are too long.
+
+If not specified, the default behavior will be to attempt truncation on all strings in the event. This default behavior could be computationally expensive, so if you know exactly which fields you wish to truncate, it is advised that you be specific and configure the fields you want truncated.
+
+Special behaviors for non-string fields:
+
+* Numbers: No action
+* Array: this plugin will attempt truncation on all elements of that array.
+* Hash: truncate will try all values of the hash (recursively, if this hash contains other hashes).
+
+
+### `length_bytes` [plugins-filters-truncate-length_bytes]
+
+* This is a required setting.
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+Fields over this length will be truncated to this length.
+
+Truncation happens from the end of the text (the start will be kept).
+
+As an example, if you set `length_bytes => 10` and a field contains "hello world, how are you?", then this field will be truncated and have this value: "hello worl"
+
+
+
+## Common options [plugins-filters-truncate-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-truncate-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-truncate-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-truncate-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-truncate-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-truncate-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-truncate-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-truncate-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-truncate-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ truncate {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ truncate {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-truncate-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ truncate {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ truncate {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-truncate-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-truncate-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 truncate filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ truncate {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-truncate-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-truncate-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ truncate {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ truncate {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-truncate-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ truncate {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ truncate {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-urldecode.md b/docs/reference/plugins-filters-urldecode.md
new file mode 100644
index 000000000..65c67ce56
--- /dev/null
+++ b/docs/reference/plugins-filters-urldecode.md
@@ -0,0 +1,244 @@
+---
+navigation_title: "urldecode"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-urldecode.html
+---
+
+# Urldecode filter plugin [plugins-filters-urldecode]
+
+
+* Plugin version: v3.0.6
+* Released on: 2017-11-07
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-urldecode/blob/v3.0.6/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-urldecode-index.md).
+
+## Getting help [_getting_help_167]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-urldecode). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_166]
+
+The urldecode filter is for decoding fields that are urlencoded.
+
+
+## Urldecode Filter Configuration Options [plugins-filters-urldecode-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-urldecode-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`all_fields`](#plugins-filters-urldecode-all_fields) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`charset`](#plugins-filters-urldecode-charset) | [string](/reference/configuration-file-structure.md#string), one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]` | No |
+| [`field`](#plugins-filters-urldecode-field) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tag_on_failure`](#plugins-filters-urldecode-tag_on_failure) | [array](/reference/configuration-file-structure.md#array) | No |
+
+Also see [Common options](#plugins-filters-urldecode-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `all_fields` [plugins-filters-urldecode-all_fields]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Urldecode all fields
+
+
+### `charset` [plugins-filters-urldecode-charset]
+
+* Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale`
+* Default value is `"UTF-8"`
+
+Thel character encoding used in this filter. Examples include `UTF-8` and `cp1252`
+
+This setting is useful if your url decoded string are in `Latin-1` (aka `cp1252`) or in another character set other than `UTF-8`.
+
+
+### `field` [plugins-filters-urldecode-field]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"message"`
+
+The field which value is urldecoded
+
+
+### `tag_on_failure` [plugins-filters-urldecode-tag_on_failure]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `["_urldecodefailure"]`
+
+Append values to the `tags` field when an exception is thrown
+
+
+
+## Common options [plugins-filters-urldecode-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-urldecode-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-urldecode-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-urldecode-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-urldecode-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-urldecode-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-urldecode-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-urldecode-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-urldecode-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ urldecode {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ urldecode {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-urldecode-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ urldecode {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ urldecode {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-urldecode-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-urldecode-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 urldecode filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ urldecode {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-urldecode-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-urldecode-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ urldecode {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ urldecode {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-urldecode-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ urldecode {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ urldecode {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-useragent.md b/docs/reference/plugins-filters-useragent.md
new file mode 100644
index 000000000..7da6ced7b
--- /dev/null
+++ b/docs/reference/plugins-filters-useragent.md
@@ -0,0 +1,354 @@
+---
+navigation_title: "useragent"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-useragent.html
+---
+
+# Useragent filter plugin [plugins-filters-useragent]
+
+
+* Plugin version: v3.3.5
+* Released on: 2023-09-19
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-useragent/blob/v3.3.5/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-useragent-index.md).
+
+## Getting help [_getting_help_168]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-useragent). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_167]
+
+Parse user agent strings into structured data based on BrowserScope data
+
+UserAgent filter, adds information about user agent like name, version, operating system, and device.
+
+The plugin ships with the **regexes.yaml** database made available from ua-parser with an Apache 2.0 license. For more details on ua-parser, see [https://github.com/ua-parser/uap-core/](https://github.com/ua-parser/uap-core/).
+
+
+## Compatibility with the Elastic Common Schema (ECS) [_compatibility_with_the_elastic_common_schema_ecs_2]
+
+This plugin can be used to parse user-agent (UA) *into* fields compliant with the Elastic Common Schema. Here’s how [ECS compatibility mode](#plugins-filters-useragent-ecs_compatibility) affects output.
+
+| ECS disabled | ECS v1, v8 | Description | Notes |
+| --- | --- | --- | --- |
+| [name] | [user_agent][name] | *Detected UA name* | |
+| [version]* | [user_agent][version] | *Detected UA version* | *Only available in ECS mode* |
+| [major] | [@metadata][filter][user_agent][version][major] | *UA major version* | *Only as meta-data in ECS mode* |
+| [minor] | [@metadata][filter][user_agent][version][minor] | *UA minor version* | *Only as meta-data in ECS mode* |
+| [patch] | [@metadata][filter][user_agent][version][patch] | *UA patch version* | *Only as meta-data in ECS mode* |
+| [os_name] | [user_agent][os][name] | *Detected operating-system name* | |
+| [os_version]* | [user_agent][os][version] | *Detected OS version* | *Only available in ECS mode* |
+| [os_major] | [@metadata][filter][user_agent][os][version][major] | *OS major version* | *Only as meta-data in ECS mode* |
+| [os_minor] | [@metadata][filter][user_agent][os][version][minor] | *OS minor version* | *Only as meta-data in ECS mode* |
+| [os_patch] | [@metadata][filter][user_agent][os][version][patch] | *OS patch version* | *Only as meta-data in ECS mode* |
+| [os_full] | [user_agent][os][full] | *Full operating-system name* | |
+| [device] | [user_agent][device][name] | *Device name* | |
+
+::::{note}
+`[version]` and `[os_version]` fields were added in Logstash **7.14** and are not available by default in earlier versions.
+::::
+
+
+Example:
+
+```ruby
+ filter {
+ useragent {
+ source => 'message'
+ }
+ }
+```
+
+Given an event with the `message` field set as: `Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:45.0) Gecko/20100101 Firefox/45.0` produces the following fields:
+
+```ruby
+ {
+ "name"=>"Firefox",
+ "version"=>"45.0", # since plugin version 3.3.0
+ "major"=>"45",
+ "minor"=>"0",
+ "os_name"=>"Mac OS X",
+ "os_version"=>"10.11", # since plugin version 3.3.0
+ "os_full"=>"Mac OS X 10.11",
+ "os_major"=>"10",
+ "os_minor"=>"11",
+ "device"=>"Mac"
+ }
+```
+
+**and with ECS enabled:**
+
+```ruby
+ {
+ "user_agent"=>{
+ "name"=>"Firefox",
+ "version"=>"45.0",
+ "os"=>{
+ "name"=>"Mac OS X",
+ "version"=>"10.11",
+ "full"=>"Mac OS X 10.11"
+ },
+ "device"=>{"name"=>"Mac"},
+ }
+ }
+```
+
+
+## Useragent Filter Configuration Options [plugins-filters-useragent-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-useragent-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`ecs_compatibility`](#plugins-filters-useragent-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`lru_cache_size`](#plugins-filters-useragent-lru_cache_size) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`prefix`](#plugins-filters-useragent-prefix) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`regexes`](#plugins-filters-useragent-regexes) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`source`](#plugins-filters-useragent-source) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`target`](#plugins-filters-useragent-target) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-filters-useragent-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `ecs_compatibility` [plugins-filters-useragent-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: does not use ECS-compatible field names (fields might be set at the root of the event)
+ * `v1`, `v8`: uses fields that are compatible with Elastic Common Schema (for example, `[user_agent][version]`)
+
+* Default value depends on which version of Logstash is running:
+
+ * When Logstash provides a `pipeline.ecs_compatibility` setting, its value is used as the default
+ * Otherwise, the default value is `disabled`.
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)). The value of this setting affects the *default* value of [`target`](#plugins-filters-useragent-target).
+
+
+### `lru_cache_size` [plugins-filters-useragent-lru_cache_size]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `100000`
+
+UA parsing is surprisingly expensive. This filter uses an LRU cache to take advantage of the fact that user agents are often found adjacent to one another in log files and rarely have a random distribution. The higher you set this the more likely an item is to be in the cache and the faster this filter will run. However, if you set this too high you can use more memory than desired.
+
+Experiment with different values for this option to find the best performance for your dataset.
+
+This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal and the speed gains are large.
+
+It is important to note that this config value is global. That is to say all instances of the user agent filter share the same cache. The last declared cache size will *win*. The reason for this is that there would be no benefit to having multiple caches for different instances at different points in the pipeline, that would just increase the number of cache misses and waste memory.
+
+
+### `prefix` [plugins-filters-useragent-prefix]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `""`
+
+A string to prepend to all of the extracted keys
+
+
+### `regexes` [plugins-filters-useragent-regexes]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+If not specified, this will default to the `regexes.yaml` that ships with logstash. Otherwise use the provided `regexes.yaml` file.
+
+You can find the latest version of this here: [https://github.com/ua-parser/uap-core/blob/master/regexes.yaml](https://github.com/ua-parser/uap-core/blob/master/regexes.yaml)
+
+
+### `source` [plugins-filters-useragent-source]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The field containing the user agent string. If this field is an array, only the first value will be used.
+
+
+### `target` [plugins-filters-useragent-target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value depends on whether [`ecs_compatibility`](#plugins-filters-useragent-ecs_compatibility) is enabled:
+
+ * ECS Compatibility disabled: no default value for this setting
+ * ECS Compatibility enabled: `"user_agent"`
+
+
+The name of the field to assign user agent data into.
+
+If not specified user agent data will be stored in the root of the event.
+
+
+
+## Common options [plugins-filters-useragent-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-useragent-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-useragent-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-useragent-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-useragent-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-useragent-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-useragent-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-useragent-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-useragent-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ useragent {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ useragent {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-useragent-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ useragent {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ useragent {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-useragent-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-useragent-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 useragent filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ useragent {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-useragent-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-useragent-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ useragent {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ useragent {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-useragent-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ useragent {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ useragent {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
diff --git a/docs/reference/plugins-filters-uuid.md b/docs/reference/plugins-filters-uuid.md
new file mode 100644
index 000000000..1bddd94aa
--- /dev/null
+++ b/docs/reference/plugins-filters-uuid.md
@@ -0,0 +1,250 @@
+---
+navigation_title: "uuid"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-uuid.html
+---
+
+# Uuid filter plugin [plugins-filters-uuid]
+
+
+* Plugin version: v3.0.5
+* Released on: 2017-11-07
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-uuid/blob/v3.0.5/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-uuid-index.md).
+
+## Getting help [_getting_help_169]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-uuid). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_168]
+
+The uuid filter allows you to generate a [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier) and add it as a field to each processed event.
+
+This is useful if you need to generate a string that’s unique for every event, even if the same input is processed multiple times. If you want to generate strings that are identical each time a event with a given content is processed (i.e. a hash) you should use the [fingerprint filter](/reference/plugins-filters-fingerprint.md) instead.
+
+The generated UUIDs follow the version 4 definition in [RFC 4122](https://tools.ietf.org/html/rfc4122)) and will be represented as a standard hexadecimal string format, e.g. "e08806fe-02af-406c-bbde-8a5ae4475e57".
+
+
+## Uuid Filter Configuration Options [plugins-filters-uuid-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-uuid-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`overwrite`](#plugins-filters-uuid-overwrite) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`target`](#plugins-filters-uuid-target) | [string](/reference/configuration-file-structure.md#string) | Yes |
+
+Also see [Common options](#plugins-filters-uuid-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `overwrite` [plugins-filters-uuid-overwrite]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+If the value in the field currently (if any) should be overridden by the generated UUID. Defaults to `false` (i.e. if the field is present, with ANY value, it won’t be overridden)
+
+Example:
+
+```ruby
+ filter {
+ uuid {
+ target => "uuid"
+ overwrite => true
+ }
+ }
+```
+
+
+### `target` [plugins-filters-uuid-target]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Select the name of the field where the generated UUID should be stored.
+
+Example:
+
+```ruby
+ filter {
+ uuid {
+ target => "uuid"
+ }
+ }
+```
+
+
+
+## Common options [plugins-filters-uuid-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-uuid-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-uuid-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-uuid-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-uuid-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-uuid-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-uuid-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-uuid-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-uuid-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ uuid {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ uuid {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-uuid-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ uuid {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ uuid {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-uuid-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-uuid-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 uuid filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ uuid {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-uuid-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-uuid-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ uuid {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ uuid {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-uuid-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ uuid {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ uuid {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-filters-wurfl_device_detection.md b/docs/reference/plugins-filters-wurfl_device_detection.md
new file mode 100644
index 000000000..64918392d
--- /dev/null
+++ b/docs/reference/plugins-filters-wurfl_device_detection.md
@@ -0,0 +1,35 @@
+---
+navigation_title: "wurfl_device_detection"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-wurfl_device_detection.html
+---
+
+# Wurfl_device_detection filter plugin [plugins-filters-wurfl_device_detection]
+
+
+* This plugin was created and is maintained by a contributor.
+* [Change log](https://github.com/WURFL/logstash-filter-wurfl_device_detection/blob/master/CHANGELOG.md)
+
+## Installation [_installation_67]
+
+You can easily install plugins that are not bundled by default. Run `bin/logstash-plugin install logstash-filter-wurfl_device_detection`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Description [_description_169]
+
+This plugin uses [WURFL Microservice](https://www.scientiamobile.com/products/wurfl-microservice/) by ScientiaMobile to enrich logs with device information, such as brand and model name, OS name and version, and so forth. You can integrate WURFL Device Detection capabilities into applications for use-cases such as mobile optimization, targeted advertising, Event Streams analysis, and device analytics.
+
+
+## Documentation [_documentation_4]
+
+Documentation for the filter-wurfl_device_detection plugin is maintained by the creators:
+
+* [README.md](https://github.com/WURFL/logstash-filter-wurfl_device_detection/blob/master/README.md)
+* [plugin documentation](https://github.com/WURFL/logstash-filter-wurfl_device_detection/blob/master/docs/index.asciidoc)
+
+
+## Getting Help [_getting_help_170]
+
+This is a third-party plugin. For bugs or feature requests, open an issue in the [plugins-filters-wurfl_device_detection Github repo](https://github.com/WURFL/logstash-filter-wurfl_device_detection).
+
+
diff --git a/docs/reference/plugins-filters-xml.md b/docs/reference/plugins-filters-xml.md
new file mode 100644
index 000000000..55e438253
--- /dev/null
+++ b/docs/reference/plugins-filters-xml.md
@@ -0,0 +1,362 @@
+---
+navigation_title: "xml"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-filters-xml.html
+---
+
+# Xml filter plugin [plugins-filters-xml]
+
+
+* Plugin version: v4.2.1
+* Released on: 2024-10-29
+* [Changelog](https://github.com/logstash-plugins/logstash-filter-xml/blob/v4.2.1/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/filter-xml-index.md).
+
+## Getting help [_getting_help_171]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-filter-xml). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_170]
+
+XML filter. Takes a field that contains XML and expands it into an actual datastructure.
+
+
+## Xml Filter Configuration Options [plugins-filters-xml-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-filters-xml-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`force_array`](#plugins-filters-xml-force_array) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`force_content`](#plugins-filters-xml-force_content) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`namespaces`](#plugins-filters-xml-namespaces) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`parse_options`](#plugins-filters-xml-parse_options) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`remove_namespaces`](#plugins-filters-xml-remove_namespaces) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`source`](#plugins-filters-xml-source) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`store_xml`](#plugins-filters-xml-store_xml) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`suppress_empty`](#plugins-filters-xml-suppress_empty) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`target`](#plugins-filters-xml-target) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`xpath`](#plugins-filters-xml-xpath) | [hash](/reference/configuration-file-structure.md#hash) | No |
+
+Also see [Common options](#plugins-filters-xml-common-options) for a list of options supported by all filter plugins.
+
+
+
+### `force_array` [plugins-filters-xml-force_array]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+By default the filter will force single elements to be arrays. Setting this to false will prevent storing single elements in arrays.
+
+
+### `force_content` [plugins-filters-xml-force_content]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+By default the filter will expand attributes differently from content inside of tags. This option allows you to force text content and attributes to always parse to a hash value.
+
+
+### `namespaces` [plugins-filters-xml-namespaces]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+By default only namespaces declarations on the root element are considered. This allows to configure all namespace declarations to parse the XML document.
+
+Example:
+
+```ruby
+filter {
+ xml {
+ namespaces => {
+ "xsl" => "http://www.w3.org/1999/XSL/Transform"
+ "xhtml" => "http://www.w3.org/1999/xhtml"
+ }
+ }
+}
+```
+
+
+### `parse_options` [plugins-filters-xml-parse_options]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Setting XML parse options allows for more control of the parsing process. By default the parser is not strict and thus accepts some invalid content. Currently supported options are:
+
+* `strict` - forces the parser to fail early instead of accumulating errors when content is not valid xml.
+
+Control characters such as ASCII 0x0 are not allowed and *always* result in non-valid XML.
+
+When XML content is not valid, it will be tagged as `_xmlparsefailure`.
+
+XML specs:
+
+* XML 1.0 Spec: [https://www.w3.org/TR/2008/REC-xml-20081126/#charsets](https://www.w3.org/TR/2008/REC-xml-20081126/#charsets)
+* XML 1.1 Spec: [https://www.w3.org/TR/xml11/#charsets](https://www.w3.org/TR/xml11/#charsets)
+
+
+### `remove_namespaces` [plugins-filters-xml-remove_namespaces]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Remove all namespaces from all nodes in the document. Of course, if the document had nodes with the same names but different namespaces, they will now be ambiguous.
+
+
+### `source` [plugins-filters-xml-source]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Config for xml to hash is:
+
+```ruby
+ source => source_field
+```
+
+For example, if you have the whole XML document in your `message` field:
+
+```ruby
+ filter {
+ xml {
+ source => "message"
+ }
+ }
+```
+
+The above would parse the XML from the `message` field.
+
+
+### `store_xml` [plugins-filters-xml-store_xml]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+By default the filter will store the whole parsed XML in the destination field as described above. Setting this to false will prevent that.
+
+
+### `suppress_empty` [plugins-filters-xml-suppress_empty]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+By default, output nothing if the element is empty. If set to `false`, empty element will result in an empty hash object.
+
+
+### `target` [plugins-filters-xml-target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Define target for placing the data
+
+For example if you want the data to be put in the `doc` field:
+
+```ruby
+ filter {
+ xml {
+ target => "doc"
+ }
+ }
+```
+
+XML in the value of the source field will be expanded into a datastructure in the `target` field. Note: if the `target` field already exists, it will be overridden. Required if `store_xml` is true (which is the default).
+
+
+### `xpath` [plugins-filters-xml-xpath]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+xpath will additionally select string values (non-strings will be converted to strings with Ruby’s `to_s` function) from parsed XML (using each source field defined using the method above) and place those values in the destination fields. Configuration:
+
+```ruby
+xpath => [ "xpath-syntax", "destination-field" ]
+```
+
+Values returned by XPath parsing from `xpath-syntax` will be put in the destination field. Multiple values returned will be pushed onto the destination field as an array. As such, multiple matches across multiple source fields will produce duplicate entries in the field.
+
+#### Additional XPath resources [plugins-filters-xml-xpath_resources]
+
+For more information on XPath, see [https://www.w3schools.com/xml/xml_xpath.asp](https://www.w3schools.com/xml/xml_xpath.asp).
+
+The [XPath functions](https://www.w3schools.com/xml/xsl_functions.asp) are particularly powerful.
+
+
+
+
+## Common options [plugins-filters-xml-common-options]
+
+These configuration options are supported by all filter plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-filters-xml-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`add_tag`](#plugins-filters-xml-add_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`enable_metric`](#plugins-filters-xml-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-filters-xml-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`periodic_flush`](#plugins-filters-xml-periodic_flush) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`remove_field`](#plugins-filters-xml-remove_field) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`remove_tag`](#plugins-filters-xml-remove_tag) | [array](/reference/configuration-file-structure.md#array) | No |
+
+### `add_field` [plugins-filters-xml-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+If this filter is successful, add any arbitrary fields to this event. Field names can be dynamic and include parts of the event using the `%{{field}}`.
+
+Example:
+
+```json
+ filter {
+ xml {
+ add_field => { "foo_%{somefield}" => "Hello world, from %{host}" }
+ }
+ }
+```
+
+```json
+ # You can also add multiple fields at once:
+ filter {
+ xml {
+ add_field => {
+ "foo_%{somefield}" => "Hello world, from %{host}"
+ "new_field" => "new_static_value"
+ }
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add field `foo_hello` if it is present, with the value above and the `%{{host}}` piece replaced with that value from the event. The second example would also add a hardcoded field.
+
+
+### `add_tag` [plugins-filters-xml-add_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, add arbitrary tags to the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ xml {
+ add_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also add multiple tags at once:
+ filter {
+ xml {
+ add_tag => [ "foo_%{somefield}", "taggedy_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag).
+
+
+### `enable_metric` [plugins-filters-xml-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-filters-xml-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 xml filters. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+ filter {
+ xml {
+ id => "ABC"
+ }
+ }
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `periodic_flush` [plugins-filters-xml-periodic_flush]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Call the filter flush method at regular interval. Optional.
+
+
+### `remove_field` [plugins-filters-xml-remove_field]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary fields from this event. Fields names can be dynamic and include parts of the event using the `%{{field}}` Example:
+
+```json
+ filter {
+ xml {
+ remove_field => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple fields at once:
+ filter {
+ xml {
+ remove_field => [ "foo_%{somefield}", "my_extraneous_field" ]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the field with name `foo_hello` if it is present. The second example would remove an additional, non-dynamic field.
+
+
+### `remove_tag` [plugins-filters-xml-remove_tag]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+If this filter is successful, remove arbitrary tags from the event. Tags can be dynamic and include parts of the event using the `%{{field}}` syntax.
+
+Example:
+
+```json
+ filter {
+ xml {
+ remove_tag => [ "foo_%{somefield}" ]
+ }
+ }
+```
+
+```json
+ # You can also remove multiple tags at once:
+ filter {
+ xml {
+ remove_tag => [ "foo_%{somefield}", "sad_unwanted_tag"]
+ }
+ }
+```
+
+If the event has field `"somefield" == "hello"` this filter, on success, would remove the tag `foo_hello` if it is present. The second example would remove a sad, unwanted tag as well.
+
+
+
diff --git a/docs/reference/plugins-inputs-azure_event_hubs.md b/docs/reference/plugins-inputs-azure_event_hubs.md
new file mode 100644
index 000000000..c80c77ff5
--- /dev/null
+++ b/docs/reference/plugins-inputs-azure_event_hubs.md
@@ -0,0 +1,538 @@
+---
+navigation_title: "azure_event_hubs"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-azure_event_hubs.html
+---
+
+# Azure Event Hubs plugin [plugins-inputs-azure_event_hubs]
+
+
+* Plugin version: v1.5.1
+* Released on: 2025-01-03
+* [Changelog](https://github.com/logstash-plugins/logstash-input-azure_event_hubs/blob/v1.5.1/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-azure_event_hubs-index.md).
+
+## Getting help [_getting_help_8]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-azure_event_hubs). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_9]
+
+This plugin consumes events from [Azure Event Hubs](https://azure.microsoft.com/en-us/services/event-hubs), a highly scalable data streaming platform and event ingestion service. Event producers send events to the Azure Event Hub, and this plugin consumes those events for use with Logstash.
+
+Many Azure services integrate with the Azure Event Hubs. [Azure Monitor](https://docs.microsoft.com/en-us/azure/monitoring-and-diagnostics/monitoring-overview-azure-monitor), for example, integrates with Azure Event Hubs to provide infrastructure metrics.
+
+::::{important}
+This plugin requires outbound connections to ports `tcp/443`, `tcp/9093`, `tcp/5671`, and `tcp/5672`, as noted in the [Microsoft Event Hub documentation](https://learn.microsoft.com/en-us/azure/event-hubs/event-hubs-faq#what-ports-do-i-need-to-open-on-the-firewall).
+::::
+
+
+### Event Hub connection string [_event_hub_connection_string]
+
+The plugin uses the connection string to access Azure Events Hubs. Find the connection string here: [Azure Portal](https://portal.azure.com)`-> Event Hub -> Shared access polices`. The event_hub_connections option passes the Event Hub connection strings for the basic configuration.
+
+Sample connection string:
+
+```text
+Endpoint=sb://logstash.servicebus.windows.net/;SharedAccessKeyName=activity-log-read-only;SharedAccessKey=mm6AbDcEfj8lk7sjsbzoTJ10qAkiSaG663YykEAG2eg=;EntityPath=insights-operational-logs
+```
+
+
+### Blob Storage and connection string [_blob_storage_and_connection_string]
+
+[Azure Blob Storage account](https://azure.microsoft.com/en-us/services/storage/blobs) is an essential part of Azure-to-Logstash configuration. A Blob Storage account is a central location that enables multiple instances of Logstash to work together to process events. It records the offset (location) of processed events. On restart, Logstash resumes processing exactly where it left off.
+
+Configuration notes:
+
+* A Blob Storage account is highly recommended for use with this plugin, and is likely required for production servers.
+* The `storage_connection` option passes the blob storage connection string.
+* Configure all Logstash instances to use the same `storage_connection` to get the benefits of shared processing.
+
+Sample Blob Storage connection string:
+
+```text
+DefaultEndpointsProtocol=https;AccountName=logstash;AccountKey=ETOPnkd/hDAWidkEpPZDiXffQPku/SZdXhPSLnfqdRTalssdEuPkZwIcouzXjCLb/xPZjzhmHfwRCGo0SBSw==;EndpointSuffix=core.windows.net
+```
+
+Find the connection string to Blob Storage here: [Azure Portal](https://portal.azure.com)`-> Blob Storage account -> Access keys`.
+
+
+### Best practices [plugins-inputs-azure_event_hubs-best-practices]
+
+Here are some guidelines to help you avoid data conflicts that can cause lost events.
+
+* [Create a Logstash consumer group](#plugins-inputs-azure_event_hubs-bp-group)
+* [Avoid overwriting offset with multiple Event Hubs](#plugins-inputs-azure_event_hubs-bp-multihub)
+* [Set number of threads correctly](#plugins-inputs-azure_event_hubs-bp-threads)
+
+#### Create a Logstash consumer group [plugins-inputs-azure_event_hubs-bp-group]
+
+Create a new consumer group specifically for Logstash. Do not use the $default or any other consumer group that might already be in use. Reusing consumer groups among non-related consumers can cause unexpected behavior and possibly lost events. All Logstash instances should use the same consumer group so that they can work together for processing events.
+
+
+#### Avoid overwriting offset with multiple Event Hubs [plugins-inputs-azure_event_hubs-bp-multihub]
+
+The offsets (position) of the Event Hubs are stored in the configured Azure Blob store. The Azure Blob store uses paths like a file system to store the offsets. If the paths between multiple Event Hubs overlap, then the offsets may be stored incorrectly.
+
+To avoid duplicate file paths, use the advanced configuration model and make sure that at least one of these options is different per Event Hub:
+
+* storage_connection
+* storage_container (defaults to Event Hub name if not defined)
+* consumer_group
+
+
+#### Set number of threads correctly [plugins-inputs-azure_event_hubs-bp-threads]
+
+By default, the number of threads used to service all event hubs is `16`. And while this may be sufficient for most use cases, throughput may be improved by refining this number. When servicing a large number of partitions across one or more event hubs, setting a higher value may result in improved performance. The maximum number of threads is not strictly bound by the total number of partitions being serviced, but setting the value much higher than that may mean that some threads are idle.
+
+::::{note}
+The number of threads **must** be greater than or equal to the number of Event hubs plus one.
+::::
+
+
+::::{note}
+Threads are currently available only as a global setting across all event hubs in a single `azure_event_hubs` input definition. However if your configuration includes multiple `azure_event_hubs` inputs, the threads setting applies independently to each.
+::::
+
+
+**Example: Single event hub**
+
+If you’re collecting activity logs from one event hub instance, then only 2 threads are required.
+
+* Event hubs = 1
+* Minimum threads = 2 (1 Event Hub + 1)
+
+**Example: Multiple event hubs**
+
+If you are collecting activity logs from more than event hub instance, then at least 1 thread per event hub is required.
+
+* Event hubs = 4
+* Minimum threads = 5 (4 Event Hubs + 1)
+
+When you are using multiple partitions per event hub, you may want to assign more threads. A good base level is (1 + `number of event hubs * number of partitions`). That is, one thread for each partition across all event hubs.
+
+
+
+
+## Configuration models [plugins-inputs-azure_event_hubs-eh_config_models]
+
+This plugin supports two configuration models: basic and advanced. Basic configuration is recommended for most use cases, and is illustrated in the examples throughout this topic.
+
+### Basic configuration (default) [plugins-inputs-azure_event_hubs-eh_basic_config]
+
+Basic configuration is the default and supports consuming from multiple Event Hubs. All Events Hubs, except for the connection string, share the same configuration.
+
+You supply a list of Event Hub connection strings, complete with the Event Hub EntityPath that defines the Event Hub name. All other configuration settings are shared.
+
+```ruby
+input {
+ azure_event_hubs {
+ event_hub_connections => ["Endpoint=sb://example1...EntityPath=insights-logs-errors", "Endpoint=sb://example2...EntityPath=insights-metrics-pt1m"]
+ threads => 8
+ decorate_events => true
+ consumer_group => "logstash"
+ storage_connection => "DefaultEndpointsProtocol=https;AccountName=example...."
+ }
+}
+```
+
+
+### Advanced configuration [plugins-inputs-azure_event_hubs-eh_advanced_config]
+
+The advanced configuration model accommodates deployments where different Event Hubs require different configurations. Options can be configured per Event Hub. You provide a list of Event Hub names through the `event_hubs` option. Under each name, specify the configuration for that Event Hub. Options can be defined globally or expressed per Event Hub.
+
+If the same configuration option appears in both the global and `event_hub` sections, the more specific (event_hub) setting takes precedence.
+
+::::{note}
+Advanced configuration is not necessary or recommended for most use cases.
+::::
+
+
+```ruby
+input {
+ azure_event_hubs {
+ config_mode => "advanced"
+ threads => 8
+ decorate_events => true
+ storage_connection => "DefaultEndpointsProtocol=https;AccountName=example...."
+ event_hubs => [
+ {"insights-operational-logs" => {
+ event_hub_connection => "Endpoint=sb://example1..."
+ initial_position => "beginning"
+ consumer_group => "iam_team"
+ }},
+ {"insights-metrics-pt1m" => {
+ event_hub_connection => "Endpoint=sb://example2..."
+ initial_position => "end"
+ consumer_group => "db_team"
+ }}
+ ]
+ }
+}
+```
+
+In this example, `storage_connection` and `decorate_events` are applied globally. The two Event Hubs each have their own settings for `consumer_groups` and `initial_position`.
+
+
+
+## Azure Event Hubs Configuration Options [plugins-inputs-azure_event_hubs-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-azure_event_hubs-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`config_mode`](#plugins-inputs-azure_event_hubs-config_mode) | [string](/reference/configuration-file-structure.md#string), (`basic` or `advanced`) | No |
+| [`event_hubs`](#plugins-inputs-azure_event_hubs-event_hubs) | [array](/reference/configuration-file-structure.md#array) | Yes, when `config_mode => advanced` |
+| [`event_hub_connections`](#plugins-inputs-azure_event_hubs-event_hub_connections) | [array](/reference/configuration-file-structure.md#array) | Yes, when `config_mode => basic` |
+| [`event_hub_connection`](#plugins-inputs-azure_event_hubs-event_hub_connection) | [string](/reference/configuration-file-structure.md#string) | Yes, when `config_mode => advanced` |
+| [`checkpoint_interval`](#plugins-inputs-azure_event_hubs-checkpoint_interval) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`consumer_group`](#plugins-inputs-azure_event_hubs-consumer_group) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`decorate_events`](#plugins-inputs-azure_event_hubs-decorate_events) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`initial_position`](#plugins-inputs-azure_event_hubs-initial_position) | [string](/reference/configuration-file-structure.md#string), (`beginning`, `end`, or `look_back`) | No |
+| [`initial_position_look_back`](#plugins-inputs-azure_event_hubs-initial_position_look_back) | [number](/reference/configuration-file-structure.md#number) | No, unless `initial_position => look_back` |
+| [`max_batch_size`](#plugins-inputs-azure_event_hubs-max_batch_size) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`storage_connection`](#plugins-inputs-azure_event_hubs-storage_connection) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`storage_container`](#plugins-inputs-azure_event_hubs-storage_container) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`threads`](#plugins-inputs-azure_event_hubs-threads) | [number](/reference/configuration-file-structure.md#number) | No |
+
+Also see [Common options](#plugins-inputs-azure_event_hubs-common-options) for a list of options supported by all input plugins.
+
+::::{note}
+All Event Hubs options are common to both basic and advanced configurations, with the following exceptions. The basic configuration uses `event_hub_connections` to support multiple connections. The advanced configuration uses `event_hubs` and `event_hub_connection` (singular).
+::::
+
+
+### `config_mode` [plugins-inputs-azure_event_hubs-config_mode]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Valid entries are `basic` or `advanced`
+* Default value is `basic`
+
+Sets configuration to either [Basic configuration (default)](#plugins-inputs-azure_event_hubs-eh_basic_config) or [Advanced configuration](#plugins-inputs-azure_event_hubs-eh_advanced_config).
+
+```ruby
+azure_event_hubs {
+ event_hub_connections => ["Endpoint=sb://example1...;EntityPath=event_hub_name1" , "Endpoint=sb://example2...;EntityPath=event_hub_name2" ]
+}
+```
+
+
+### `event_hubs` [plugins-inputs-azure_event_hubs-event_hubs]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* No default value
+* Ignored for basic configuration
+* Required for advanced configuration
+
+Defines the Event Hubs to be read. An array of hashes where each entry is a hash of the Event Hub name and its configuration options.
+
+```ruby
+azure_event_hubs {
+ config_mode => "advanced"
+ event_hubs => [
+ { "event_hub_name1" => {
+ event_hub_connection => "Endpoint=sb://example1..."
+ }},
+ { "event_hub_name2" => {
+ event_hub_connection => "Endpoint=sb://example2..."
+ storage_connection => "DefaultEndpointsProtocol=https;AccountName=example...."
+ storage_container => "my_container"
+ }}
+ ]
+ consumer_group => "logstash" # shared across all Event Hubs
+}
+```
+
+
+### `event_hub_connections` [plugins-inputs-azure_event_hubs-event_hub_connections]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* No default value
+* Required for basic configuration
+
+List of connection strings that identifies the Event Hubs to be read. Connection strings include the EntityPath for the Event Hub.
+
+The `event_hub_connections` option is defined per Event Hub. All other configuration options are shared among Event Hubs.
+
+```ruby
+azure_event_hubs {
+ event_hub_connections => ["Endpoint=sb://example1...;EntityPath=event_hub_name1" , "Endpoint=sb://example2...;EntityPath=event_hub_name2" ]
+}
+```
+
+
+### `event_hub_connection` [plugins-inputs-azure_event_hubs-event_hub_connection]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* No default value
+* Valid only for advanced configuration
+
+Connection string that identifies the Event Hub to be read. Advanced configuration options can be set per Event Hub. This option modifies `event_hub_name`, and should be nested under it. (See sample.) This option accepts only one connection string.
+
+```ruby
+azure_event_hubs {
+ config_mode => "advanced"
+ event_hubs => [
+ { "event_hub_name1" => {
+ event_hub_connection => "Endpoint=sb://example1...;EntityPath=event_hub_name1"
+ }}
+ ]
+}
+```
+
+
+### `checkpoint_interval` [plugins-inputs-azure_event_hubs-checkpoint_interval]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `5` seconds
+* Set to `0` to disable.
+
+Interval in seconds to write checkpoints during batch processing. Checkpoints tell Logstash where to resume processing after a restart. Checkpoints are automatically written at the end of each batch, regardless of this setting.
+
+Writing checkpoints too frequently can slow down processing unnecessarily.
+
+```ruby
+azure_event_hubs {
+ event_hub_connections => ["Endpoint=sb://example1...;EntityPath=event_hub_name1"]
+ checkpoint_interval => 5
+}
+```
+
+
+### `consumer_group` [plugins-inputs-azure_event_hubs-consumer_group]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `$Default`
+
+Consumer group used to read the Event Hub(s). Create a consumer group specifically for Logstash. Then ensure that all instances of Logstash use that consumer group so that they can work together properly.
+
+```ruby
+azure_event_hubs {
+ event_hub_connections => ["Endpoint=sb://example1...;EntityPath=event_hub_name1"]
+ consumer_group => "logstash"
+}
+```
+
+
+### `decorate_events` [plugins-inputs-azure_event_hubs-decorate_events]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Adds metadata about the Event Hub, including Event Hub name, consumer_group, processor_host, partition, offset, sequence, timestamp, and event_size.
+
+```ruby
+azure_event_hubs {
+ event_hub_connections => ["Endpoint=sb://example1...;EntityPath=event_hub_name1"]
+ decorate_events => true
+}
+```
+
+
+### `initial_position` [plugins-inputs-azure_event_hubs-initial_position]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Valid arguments are `beginning`, `end`, `look_back`
+* Default value is `beginning`
+
+When first reading from an Event Hub, start from this position:
+
+* `beginning` reads all pre-existing events in the Event Hub
+* `end` does not read any pre-existing events in the Event Hub
+* `look_back` reads `end` minus a number of seconds worth of pre-existing events. You control the number of seconds using the `initial_position_look_back` option.
+
+Note: If `storage_connection` is set, the `initial_position` value is used only the first time Logstash reads from the Event Hub.
+
+```ruby
+azure_event_hubs {
+ event_hub_connections => ["Endpoint=sb://example1...;EntityPath=event_hub_name1"]
+ initial_position => "beginning"
+}
+```
+
+
+### `initial_position_look_back` [plugins-inputs-azure_event_hubs-initial_position_look_back]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `86400`
+* Used only if `initial_position` is set to `look-back`
+
+Number of seconds to look back to find the initial position for pre-existing events. This option is used only if `initial_position` is set to `look_back`. If `storage_connection` is set, this configuration applies only the first time Logstash reads from the Event Hub.
+
+```ruby
+azure_event_hubs {
+ event_hub_connections => ["Endpoint=sb://example1...;EntityPath=event_hub_name1"]
+ initial_position => "look_back"
+ initial_position_look_back => 86400
+}
+```
+
+
+### `max_batch_size` [plugins-inputs-azure_event_hubs-max_batch_size]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `125`
+
+Maximum number of events retrieved and processed together. A checkpoint is created after each batch. Increasing this value may help with performance, but requires more memory.
+
+```ruby
+azure_event_hubs {
+ event_hub_connections => ["Endpoint=sb://example1...;EntityPath=event_hub_name1"]
+ max_batch_size => 125
+}
+```
+
+
+### `storage_connection` [plugins-inputs-azure_event_hubs-storage_connection]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* No default value
+
+Connection string for blob account storage. Blob account storage persists the offsets between restarts, and ensures that multiple instances of Logstash process different partitions. When this value is set, restarts resume where processing left off. When this value is not set, the `initial_position` value is used on every restart.
+
+We strongly recommend that you define this value for production environments.
+
+```ruby
+azure_event_hubs {
+ event_hub_connections => ["Endpoint=sb://example1...;EntityPath=event_hub_name1"]
+ storage_connection => "DefaultEndpointsProtocol=https;AccountName=example...."
+}
+```
+
+
+### `storage_container` [plugins-inputs-azure_event_hubs-storage_container]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Defaults to the Event Hub name if not defined
+
+Name of the storage container used to persist offsets and allow multiple instances of Logstash to work together.
+
+```ruby
+azure_event_hubs {
+ event_hub_connections => ["Endpoint=sb://example1...;EntityPath=event_hub_name1"]
+ storage_connection => "DefaultEndpointsProtocol=https;AccountName=example...."
+ storage_container => "my_container"
+}
+```
+
+To avoid overwriting offsets, you can use different storage containers. This is particularly important if you are monitoring two Event Hubs with the same name. You can use the advanced configuration model to configure different storage containers.
+
+```ruby
+azure_event_hubs {
+ config_mode => "advanced"
+ consumer_group => "logstash"
+ storage_connection => "DefaultEndpointsProtocol=https;AccountName=example...."
+ event_hubs => [
+ {"insights-operational-logs" => {
+ event_hub_connection => "Endpoint=sb://example1..."
+ storage_container => "insights-operational-logs-1"
+ }},
+ {"insights-operational-logs" => {
+ event_hub_connection => "Endpoint=sb://example2..."
+ storage_container => "insights-operational-logs-2"
+ }}
+ ]
+ }
+```
+
+
+### `threads` [plugins-inputs-azure_event_hubs-threads]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Minimum value is `2`
+* Default value is `16`
+
+Total number of threads used to process events. The value you set here applies to all Event Hubs. Even with advanced configuration, this value is a global setting, and can’t be set per event hub.
+
+```ruby
+azure_event_hubs {
+ threads => 16
+}
+```
+
+The number of threads should be the number of Event Hubs plus one or more. See [Best practices](#plugins-inputs-azure_event_hubs-best-practices) for more information.
+
+
+
+## Common options [plugins-inputs-azure_event_hubs-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-azure_event_hubs-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-azure_event_hubs-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-azure_event_hubs-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-azure_event_hubs-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-azure_event_hubs-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-azure_event_hubs-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-azure_event_hubs-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-azure_event_hubs-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-azure_event_hubs-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-azure_event_hubs-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 azure_event_hubs inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ azure_event_hubs {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-azure_event_hubs-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-azure_event_hubs-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
diff --git a/docs/reference/plugins-inputs-beats.md b/docs/reference/plugins-inputs-beats.md
new file mode 100644
index 000000000..5178cb39f
--- /dev/null
+++ b/docs/reference/plugins-inputs-beats.md
@@ -0,0 +1,480 @@
+---
+navigation_title: "beats"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-beats.html
+---
+
+# Beats input plugin [plugins-inputs-beats]
+
+::::{note}
+The `input-elastic_agent` plugin is the next generation of the `input-beats` plugin. They currently share code and a [common codebase](https://github.com/logstash-plugins/logstash-input-beats).
+::::
+
+
+
+* Plugin version: v7.0.0
+* Released on: 2024-12-02
+* [Changelog](https://github.com/logstash-plugins/logstash-input-beats/blob/v7.0.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-beats-index.md).
+
+## Getting help [_getting_help_9]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-beats). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_10]
+
+This input plugin enables Logstash to receive events from the Beats framework.
+
+The following example shows how to configure Logstash to listen on port 5044 for incoming Beats connections and to index into Elasticsearch.
+
+```sh
+input {
+ beats {
+ port => 5044
+ }
+}
+
+output {
+ elasticsearch {
+ hosts => ["http://localhost:9200"]
+ index => "%{[@metadata][beat]}-%{[@metadata][version]}" <1>
+ }
+}
+```
+
+1. `%{[@metadata][beat]}` sets the first part of the index name to the value of the metadata field and `%{[@metadata][version]}` sets the second part to the Beat version. For example: metricbeat-6.1.6.
+
+
+Events indexed into Elasticsearch with the Logstash configuration shown here will be similar to events directly indexed by Beats into Elasticsearch.
+
+::::{note}
+If ILM is not being used, set `index` to `%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}` instead so Logstash creates an index per day, based on the `@timestamp` value of the events coming from Beats.
+::::
+
+
+### Memory usage [plugins-inputs-beats-memory]
+
+This plugin uses "off-heap" direct memory in addition to heap memory. By default, a JVM’s off-heap direct memory limit is the same as the heap size. For example, setting `-Xmx10G` without setting the direct memory limit will allocate `10GB` for heap and an additional `10GB` for direct memory, for a total of `20GB` allocated. You can set the amount of direct memory with `-XX:MaxDirectMemorySize` in [Logstash JVM Settings](/reference/jvm-settings.md). Consider setting direct memory to half of the heap size. Setting direct memory too low decreases the performance of ingestion.
+
+::::{note}
+Be sure that heap and direct memory combined does not exceed the total memory available on the server to avoid an OutOfDirectMemoryError
+::::
+
+
+
+### Multi-line events [plugins-inputs-beats-multiline]
+
+If you are shipping events that span multiple lines, you need to use the [configuration options available in Filebeat](beats://reference/filebeat/multiline-examples.md) to handle multiline events before sending the event data to Logstash. You cannot use the [Multiline codec plugin](/reference/plugins-codecs-multiline.md) to handle multiline events. Doing so will result in the failure to start Logstash.
+
+
+
+## Versioned indices [plugins-inputs-beats-versioned-indexes]
+
+To minimize the impact of future schema changes on your existing indices and mappings in Elasticsearch, configure the Elasticsearch output to write to versioned indices. The pattern that you specify for the `index` setting controls the index name:
+
+```yaml
+index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
+```
+
+`%{[@metadata][beat]}`
+: Sets the first part of the index name to the value of the `beat` metadata field, for example, `filebeat`.
+
+`%{[@metadata][version]}`
+: Sets the second part of the name to the Beat version, for example, `9.0.0`.
+
+`%{+YYYY.MM.dd}`
+: Sets the third part of the name to a date based on the Logstash `@timestamp` field.
+
+This configuration results in daily index names like `filebeat-9.0.0-2025-01-30`.
+
+
+## Event enrichment and the Elastic Common Schema (ECS) [plugins-inputs-beats-ecs_metadata]
+
+When decoding Beats events, this plugin enriches each event with metadata about the event’s source, making this information available during further processing. You can use the [`enrich`](#plugins-inputs-beats-enrich) option to activate or deactivate individual enrichment categories.
+
+The location of these enrichment fields depends on whether [ECS compatibility mode](#plugins-inputs-beats-ecs_compatibility) is enabled:
+
+* When ECS compatibility is *enabled*, enrichment fields are added in an ECS-compatible structure.
+* When ECS compatibility is *disabled*, enrichment fields are added in a way that is backward-compatible with this plugin, but is known to clash with the Elastic Common Schema.
+
+| ECS `v1`, `v8` | ECS `disabled` | Description |
+| --- | --- | --- |
+| [@metadata][input][beats][host][name] | [host] | *Name or address of the Beat host* |
+| [@metadata][input][beats][host][ip] | [@metadata][ip_address] | *IP address of the Beats client that connected to this input* |
+
+| ECS `v1`, `v8` | ECS `disabled` | Description |
+| --- | --- | --- |
+| [@metadata][tls_peer][status] | [@metadata][tls_peer][status] | *Contains "verified" or "unverified" label; available when SSL is enabled.* |
+| [@metadata][input][beats][tls][version_protocol] | [@metadata][tls_peer][protocol] | *Contains the TLS version used (such as `TLSv1.2`); available when SSL status is "verified"* |
+| [@metadata][input][beats][tls][client][subject] | [@metadata][tls_peer][subject] | *Contains the identity name of the remote end (such as `CN=artifacts-no-kpi.elastic.co`); available when SSL status is "verified"* |
+| [@metadata][input][beats][tls][cipher] | [@metadata][tls_peer][cipher_suite] | *Contains the name of cipher suite used (such as `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256`); available when SSL status is "verified"* |
+
+| ECS `v1`, `v8` | ECS `disabled` | Description |
+| --- | --- | --- |
+| [tag] | [tag] | *Contains `beats_input_codec_XXX_applied` where `XXX` is the name of the codec* |
+| [event][original] | *N/A* | *When ECS is enabled, even if `[event][original]` field does not already exist on the event being processed, this plugin’s **default codec** ensures that the field is populated using the bytes as-processed.* |
+
+
+## Beats input configuration options [plugins-inputs-beats-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-beats-common-options) described later.
+
+::::{note}
+As of version `7.0.0` of this plugin, a number of previously deprecated settings related to SSL have been removed. Please check out [Beats Input Obsolete Configuration Options](#plugins-inputs-beats-obsolete-options) for details.
+::::
+
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_hostname`](#plugins-inputs-beats-add_hostname) | [boolean](/reference/configuration-file-structure.md#boolean) | *Deprecated* |
+| [`client_inactivity_timeout`](#plugins-inputs-beats-client_inactivity_timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`ecs_compatibility`](#plugins-inputs-beats-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`enrich`](#plugins-inputs-beats-enrich) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`event_loop_threads`](#plugins-inputs-beats-event_loop_threads) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`executor_threads`](#plugins-inputs-beats-executor_threads) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`host`](#plugins-inputs-beats-host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`include_codec_tag`](#plugins-inputs-beats-include_codec_tag) | [boolean](/reference/configuration-file-structure.md#boolean) | *Deprecated* |
+| [`port`](#plugins-inputs-beats-port) | [number](/reference/configuration-file-structure.md#number) | Yes |
+| [`ssl_certificate`](#plugins-inputs-beats-ssl_certificate) | a valid filesystem path | No |
+| [`ssl_certificate_authorities`](#plugins-inputs-beats-ssl_certificate_authorities) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`ssl_cipher_suites`](#plugins-inputs-beats-ssl_cipher_suites) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`ssl_client_authentication`](#plugins-inputs-beats-ssl_client_authentication) | [string](/reference/configuration-file-structure.md#string), one of `["none", "optional", "required"]` | No |
+| [`ssl_enabled`](#plugins-inputs-beats-ssl_enabled) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`ssl_handshake_timeout`](#plugins-inputs-beats-ssl_handshake_timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`ssl_key`](#plugins-inputs-beats-ssl_key) | a valid filesystem path | No |
+| [`ssl_key_passphrase`](#plugins-inputs-beats-ssl_key_passphrase) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_supported_protocols`](#plugins-inputs-beats-ssl_supported_protocols) | [array](/reference/configuration-file-structure.md#array) | No |
+
+Also see [Common options](#plugins-inputs-beats-common-options) for a list of options supported by all input plugins.
+
+
+
+### `add_hostname` [plugins-inputs-beats-add_hostname]
+
+::::{admonition} Deprecated in 6.0.0.
+:class: warning
+
+The default value has been changed to `false`. In 7.0.0 this setting will be removed
+::::
+
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Flag to determine whether to add `host` field to event using the value supplied by the Beat in the `hostname` field.
+
+
+### `client_inactivity_timeout` [plugins-inputs-beats-client_inactivity_timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `60`
+
+Close Idle clients after X seconds of inactivity.
+
+
+### `ecs_compatibility` [plugins-inputs-beats-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: unstructured connection metadata added at root level
+ * `v1`: structured connection metadata added under ECS v1 compliant namespaces
+ * `v8`: structured connection metadata added under ECS v8 compliant namespaces
+
+* Default value depends on which version of Logstash is running:
+
+ * When Logstash provides a `pipeline.ecs_compatibility` setting, its value is used as the default
+ * Otherwise, the default value is `disabled`.
+
+
+Refer to [ECS mapping](#plugins-inputs-beats-ecs_metadata) for detailed information.
+
+
+### `enrich` [plugins-inputs-beats-enrich]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+
+ * An [array](/reference/configuration-file-structure.md#array) can also be provided
+ * Configures which enrichments are applied to each event
+ * Default value is `[codec_metadata, source_metadata]` that may be extended in future versions of this plugin to include additional enrichments.
+ * Supported values are:
+
+ | Enrichment | Description |
+ | --- | --- |
+ | codec_metadata | Information about how the codec transformed a sequence of bytes into this Event, such as *which* codec was used. Also, if no codec is explicitly specified, *excluding* `codec_metadata` from `enrich` will disable `ecs_compatibility` for this plugin. |
+ | source_metadata | Information about the *source* of the event, such as the IP address of the inbound connection this input received the event from |
+ | ssl_peer_metadata | Detailed information about the *SSL peer* we received the event from, such as identity information from the SSL client certificate that was presented when establishing a connection to this input |
+ | all | *alias* to include *all* available enrichments (including additional enrichments introduced in future versions of this plugin) |
+ | none | *alias* to *exclude* all available enrichments. Note that, *explicitly* defining codec with this option will not disable the `ecs_compatibility`, instead it relies on pipeline or codec `ecs_compatibility` configuration. |
+
+
+**Example:**
+
+This configuration disables *all* enrichments:
+
+```
+input {
+ beats {
+ port => 5044
+ enrich => none
+ }
+}
+```
+
+Or, to explicitly enable *only* `source_metadata` and `ssl_peer_metadata` (disabling all others):
+
+```
+input {
+ beats {
+ port => 5044
+ enrich => [source_metadata, ssl_peer_metadata]
+ }
+}
+```
+
+
+### `event_loop_threads` [plugins-inputs-beats-event_loop_threads]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Defaults to 0.
+
+When setting `0`, the actual default is `available_processors * 2`
+
+This is an expert-level setting, and generally should not need to be set Beats plugin is implemented based on a non-blocking mechanism, requiring a number of event loop and executor threads. The event loop threads are responsible to communicate with clients (accept incoming connections, enqueue/dequeue tasks and respond) and executor threads handle tasks. This configuration intends to limit or increase the number of threads to be created for the event loop. See [`executor_threads`](#plugins-inputs-beats-executor_threads) configuration if you need to set executor threads count.
+
+
+### `executor_threads` [plugins-inputs-beats-executor_threads]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is equal to the number of CPU cores (1 executor thread per CPU core).
+
+The number of threads to be used to process incoming Beats requests. By default, the Beats input creates a number of threads equal to the number of CPU cores. These threads handle incoming connections, reading from established sockets, and executing most of the tasks related to network connection management. Parsing the Lumberjack protocol is offloaded to a dedicated thread pool.
+
+Generally you don’t need to touch this setting. In case you are sending very large events and observing "OutOfDirectMemory" exceptions, you may want to reduce this number to half or 1/4 of the CPU cores. This change reduces the number of threads decompressing batches of data into direct memory. However, this will only be a mitigating tweak, as the proper solution may require resizing your Logstash deployment, either by increasing number of Logstash nodes or increasing the JVM’s Direct Memory.
+
+
+### `host` [plugins-inputs-beats-host]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"0.0.0.0"`
+
+The IP address to listen on.
+
+
+### `include_codec_tag` [plugins-inputs-beats-include_codec_tag]
+
+::::{admonition} Deprecated in 6.5.0.
+:class: warning
+
+Replaced by [`enrich`](#plugins-inputs-beats-enrich)
+::::
+
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+
+### `port` [plugins-inputs-beats-port]
+
+* This is a required setting.
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+The port to listen on.
+
+
+### `ssl_certificate` [plugins-inputs-beats-ssl_certificate]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+SSL certificate to use.
+
+
+### `ssl_certificate_authorities` [plugins-inputs-beats-ssl_certificate_authorities]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+Validate client certificates against these authorities. You can define multiple files or paths. All the certificates will be read and added to the trust store. You need to configure the [`ssl_client_authentication`](#plugins-inputs-beats-ssl_client_authentication) to `optional` or `required` to enable the verification.
+
+
+### `ssl_cipher_suites` [plugins-inputs-beats-ssl_cipher_suites]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `['TLS_AES_256_GCM_SHA384', 'TLS_AES_128_GCM_SHA256', 'TLS_CHACHA20_POLY1305_SHA256', 'TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384', 'TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384', 'TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256', 'TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256', 'TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256', 'TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', 'TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384', 'TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384', 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256', 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256']`
+
+The list of cipher suites to use, listed by priorities. This default list applies for OpenJDK 11.0.14 and higher. For older JDK versions, the default list includes only suites supported by that version. For example, the ChaCha20 family of ciphers is not supported in older versions.
+
+
+### `ssl_client_authentication` [plugins-inputs-beats-ssl_client_authentication]
+
+* Value can be any of: `none`, `optional`, `required`
+* Default value is `"none"`
+
+Controls the server’s behavior in regard to requesting a certificate from client connections: `required` forces a client to present a certificate, while `optional` requests a client certificate but the client is not required to present one. Defaults to `none`, which disables the client authentication.
+
+When mutual TLS is enabled (`required` or `optional`), the certificate presented by the client must be signed by trusted [`ssl_certificate_authorities`](#plugins-inputs-beats-ssl_certificate_authorities) (CAs). Please note that the server does not validate the client certificate CN (Common Name) or SAN (Subject Alternative Name).
+
+::::{note}
+This setting can be used only if [`ssl_certificate_authorities`](#plugins-inputs-beats-ssl_certificate_authorities) is set.
+::::
+
+
+
+### `ssl_enabled` [plugins-inputs-beats-ssl_enabled]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Events are by default sent in plain text. You can enable encryption by setting `ssl_enabled` to true and configuring the [`ssl_certificate`](#plugins-inputs-beats-ssl_certificate) and [`ssl_key`](#plugins-inputs-beats-ssl_key) options.
+
+
+### `ssl_handshake_timeout` [plugins-inputs-beats-ssl_handshake_timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `10000`
+
+Time in milliseconds for an incomplete ssl handshake to timeout
+
+
+### `ssl_key` [plugins-inputs-beats-ssl_key]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+SSL key to use. This key must be in the PKCS8 format and PEM encoded. You can use the [openssl pkcs8](https://www.openssl.org/docs/man1.1.1/man1/openssl-pkcs8.md) command to complete the conversion. For example, the command to convert a PEM encoded PKCS1 private key to a PEM encoded, non-encrypted PKCS8 key is:
+
+```sh
+openssl pkcs8 -inform PEM -in path/to/logstash.key -topk8 -nocrypt -outform PEM -out path/to/logstash.pkcs8.key
+```
+
+
+### `ssl_key_passphrase` [plugins-inputs-beats-ssl_key_passphrase]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+SSL key passphrase to use.
+
+
+### `ssl_supported_protocols` [plugins-inputs-beats-ssl_supported_protocols]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Allowed values are: `'TLSv1.1'`, `'TLSv1.2'`, `'TLSv1.3'`
+* Default depends on the JDK being used. With up-to-date Logstash, the default is `['TLSv1.2', 'TLSv1.3']`. `'TLSv1.1'` is not considered secure and is only provided for legacy applications.
+
+List of allowed SSL/TLS versions to use when establishing a connection to the HTTP endpoint.
+
+For Java 8 `'TLSv1.3'` is supported only since **8u262** (AdoptOpenJDK), but requires that you set the `LS_JAVA_OPTS="-Djdk.tls.client.protocols=TLSv1.3"` system property in Logstash.
+
+::::{note}
+If you configure the plugin to use `'TLSv1.1'` on any recent JVM, such as the one packaged with Logstash, the protocol is disabled by default and needs to be enabled manually by changing `jdk.tls.disabledAlgorithms` in the **$JDK_HOME/conf/security/java.security** configuration file. That is, `TLSv1.1` needs to be removed from the list.
+::::
+
+
+
+
+## Beats Input Obsolete Configuration Options [plugins-inputs-beats-obsolete-options]
+
+::::{warning}
+As of version `7.0.0` of this plugin, some configuration options have been replaced. The plugin will fail to start if it contains any of these obsolete options.
+::::
+
+
+| Setting | Replaced by |
+| --- | --- |
+| cipher_suites | [`ssl_cipher_suites`](#plugins-inputs-beats-ssl_cipher_suites) |
+| ssl | [`ssl_enabled`](#plugins-inputs-beats-ssl_enabled) |
+| ssl_peer_metadata | [`enrich`](#plugins-inputs-beats-enrich) |
+| ssl_verify_mode | [`ssl_client_authentication`](#plugins-inputs-beats-ssl_client_authentication) |
+| tls_max_version | [`ssl_supported_protocols`](#plugins-inputs-beats-ssl_supported_protocols) |
+| tls_min_version | [`ssl_supported_protocols`](#plugins-inputs-beats-ssl_supported_protocols) |
+
+
+## Common options [plugins-inputs-beats-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-beats-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-beats-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-beats-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-beats-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-beats-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-beats-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-beats-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-beats-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-beats-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-beats-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 beats inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ beats {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-beats-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-beats-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+::::{note}
+The Beats shipper automatically sets the `type` field on the event. You cannot override this setting in the Logstash config. If you specify a setting for the [`type`](#plugins-inputs-beats-type) config option in Logstash, it is ignored.
+::::
diff --git a/docs/reference/plugins-inputs-cloudwatch.md b/docs/reference/plugins-inputs-cloudwatch.md
new file mode 100644
index 000000000..85249fd87
--- /dev/null
+++ b/docs/reference/plugins-inputs-cloudwatch.md
@@ -0,0 +1,395 @@
+---
+navigation_title: "cloudwatch"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-cloudwatch.html
+---
+
+# Cloudwatch input plugin [plugins-inputs-cloudwatch]
+
+
+* A component of the [aws integration plugin](/reference/plugins-integrations-aws.md)
+* Integration version: v7.1.8
+* Released on: 2024-07-26
+* [Changelog](https://github.com/logstash-plugins/logstash-integration-aws/blob/v7.1.8/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-cloudwatch-index.md).
+
+## Getting help [_getting_help_10]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-integration-aws). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_11]
+
+Pull events from the Amazon Web Services CloudWatch API.
+
+To use this plugin, you **must** have an AWS account, and the following policy
+
+Typically, you should setup an IAM policy, create a user and apply the IAM policy to the user. A sample policy for EC2 metrics is as follows:
+
+```json
+ {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "Stmt1444715676000",
+ "Effect": "Allow",
+ "Action": [
+ "cloudwatch:GetMetricStatistics",
+ "cloudwatch:ListMetrics"
+ ],
+ "Resource": "*"
+ },
+ {
+ "Sid": "Stmt1444716576170",
+ "Effect": "Allow",
+ "Action": [
+ "ec2:DescribeInstances"
+ ],
+ "Resource": "*"
+ }
+ ]
+ }
+```
+
+See [http://aws.amazon.com/iam/](http://aws.amazon.com/iam/) for more details on setting up AWS identities.
+
+### Configuration examples [_configuration_examples]
+
+```ruby
+ input {
+ cloudwatch {
+ namespace => "AWS/EC2"
+ metrics => [ "CPUUtilization" ]
+ filters => { "tag:Group" => "API-Production" }
+ region => "us-east-1"
+ }
+ }
+```
+
+```ruby
+ input {
+ cloudwatch {
+ namespace => "AWS/EBS"
+ metrics => ["VolumeQueueLength"]
+ filters => { "tag:Monitoring" => "Yes" }
+ region => "us-east-1"
+ }
+ }
+```
+
+```ruby
+ input {
+ cloudwatch {
+ namespace => "AWS/RDS"
+ metrics => ["CPUUtilization", "CPUCreditUsage"]
+ filters => { "EngineName" => "mysql" } # Only supports EngineName, DatabaseClass and DBInstanceIdentifier
+ region => "us-east-1"
+ }
+ }
+```
+
+```ruby
+ input {
+ cloudwatch {
+ namespace => "sqlserver_test2"
+ metrics => [ "Memory Available Bytes"]
+ filters => {
+ InstanceId => "i-xxxxxxxxxxx"
+ objectname => "Memory"
+ }
+ combined => true
+ interval => 600
+ period => 300
+ }
+ }
+```
+
+
+
+## Cloudwatch Input Configuration Options [plugins-inputs-cloudwatch-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-cloudwatch-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`access_key_id`](#plugins-inputs-cloudwatch-access_key_id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`aws_credentials_file`](#plugins-inputs-cloudwatch-aws_credentials_file) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`combined`](#plugins-inputs-cloudwatch-combined) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`endpoint`](#plugins-inputs-cloudwatch-endpoint) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`filters`](#plugins-inputs-cloudwatch-filters) | [array](/reference/configuration-file-structure.md#array) | See [note](#plugins-inputs-cloudwatch-filters) |
+| [`interval`](#plugins-inputs-cloudwatch-interval) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`metrics`](#plugins-inputs-cloudwatch-metrics) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`namespace`](#plugins-inputs-cloudwatch-namespace) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`period`](#plugins-inputs-cloudwatch-period) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`proxy_uri`](#plugins-inputs-cloudwatch-proxy_uri) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`region`](#plugins-inputs-cloudwatch-region) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`role_arn`](#plugins-inputs-cloudwatch-role_arn) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`role_session_name`](#plugins-inputs-cloudwatch-role_session_name) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`secret_access_key`](#plugins-inputs-cloudwatch-secret_access_key) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`session_token`](#plugins-inputs-cloudwatch-session_token) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`statistics`](#plugins-inputs-cloudwatch-statistics) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`use_aws_bundled_ca`](#plugins-inputs-cloudwatch-use_aws_bundled_ca) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`use_ssl`](#plugins-inputs-cloudwatch-use_ssl) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+
+Also see [Common options](#plugins-inputs-cloudwatch-common-options) for a list of options supported by all input plugins.
+
+
+
+### `access_key_id` [plugins-inputs-cloudwatch-access_key_id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order:
+
+1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config
+2. External credentials file specified by `aws_credentials_file`
+3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`
+4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY`
+5. IAM Instance Profile (available when running inside EC2)
+
+
+### `aws_credentials_file` [plugins-inputs-cloudwatch-aws_credentials_file]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Path to YAML file containing a hash of AWS credentials. This file will only be loaded if `access_key_id` and `secret_access_key` aren’t set. The contents of the file should look like this:
+
+```ruby
+ :access_key_id: "12345"
+ :secret_access_key: "54321"
+```
+
+
+### `combined` [plugins-inputs-cloudwatch-combined]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Use this for namespaces that need to combine the dimensions like S3 and SNS.
+
+
+### `endpoint` [plugins-inputs-cloudwatch-endpoint]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The endpoint to connect to. By default it is constructed using the value of `region`. This is useful when connecting to S3 compatible services, but beware that these aren’t guaranteed to work correctly with the AWS SDK.
+
+
+### `filters` [plugins-inputs-cloudwatch-filters]
+
+* This setting can be required or optional. See note below.
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+::::{note}
+This setting is optional when the namespace is `AWS/EC2`. Otherwise this is a required field.
+::::
+
+
+Specify the filters to apply when fetching resources. Follow the AWS convention:
+
+* Instances: { *instance-id* ⇒ *i-12344321* }
+* Tags: { "tag:Environment" ⇒ "Production" }
+* Volumes: { *attachment.status* ⇒ *attached* }
+
+Each namespace uniquely support certain dimensions. Please consult the documentation to ensure you’re using valid filters.
+
+
+### `interval` [plugins-inputs-cloudwatch-interval]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `900`
+
+Set how frequently CloudWatch should be queried
+
+The default, `900`, means check every 15 minutes. Setting this value too low (generally less than 300) results in no metrics being returned from CloudWatch.
+
+
+### `metrics` [plugins-inputs-cloudwatch-metrics]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `["CPUUtilization", "DiskReadOps", "DiskWriteOps", "NetworkIn", "NetworkOut"]`
+
+Specify the metrics to fetch for the namespace. The defaults are AWS/EC2 specific. See [http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/aws-namespaces.html](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/aws-namespaces.html) for the available metrics for other namespaces.
+
+
+### `namespace` [plugins-inputs-cloudwatch-namespace]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"AWS/EC2"`
+
+If undefined, LogStash will complain, even if codec is unused. The service namespace of the metrics to fetch.
+
+The default is for the EC2 service. See [http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/aws-namespaces.html](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/aws-namespaces.html) for valid values.
+
+
+### `period` [plugins-inputs-cloudwatch-period]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `300`
+
+Set the granularity of the returned datapoints.
+
+Must be at least 60 seconds and in multiples of 60.
+
+
+### `proxy_uri` [plugins-inputs-cloudwatch-proxy_uri]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+URI to proxy server if required
+
+
+### `region` [plugins-inputs-cloudwatch-region]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"us-east-1"`
+
+The AWS Region
+
+
+### `role_arn` [plugins-inputs-cloudwatch-role_arn]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The AWS IAM Role to assume, if any. This is used to generate temporary credentials, typically for cross-account access. See the [AssumeRole API documentation](https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html) for more information.
+
+
+### `role_session_name` [plugins-inputs-cloudwatch-role_session_name]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"logstash"`
+
+Session name to use when assuming an IAM role.
+
+
+### `secret_access_key` [plugins-inputs-cloudwatch-secret_access_key]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The AWS Secret Access Key
+
+
+### `session_token` [plugins-inputs-cloudwatch-session_token]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The AWS Session token for temporary credential
+
+
+### `statistics` [plugins-inputs-cloudwatch-statistics]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `["SampleCount", "Average", "Minimum", "Maximum", "Sum"]`
+
+Specify the statistics to fetch for each namespace
+
+
+### `use_aws_bundled_ca` [plugins-inputs-cloudwatch-use_aws_bundled_ca]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Use bundled CA certificates that ship with AWS SDK to verify SSL peer certificates. For cases where the default certificates are unavailable, e.g. Windows, you can set this to `true`.
+
+
+### `use_ssl` [plugins-inputs-cloudwatch-use_ssl]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Make sure we require the V1 classes when including this module. require *aws-sdk* will load v2 classes. Should we require (true) or disable (false) using SSL for communicating with the AWS API The AWS SDK for Ruby defaults to SSL so we preserve that
+
+
+
+## Common options [plugins-inputs-cloudwatch-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-cloudwatch-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-cloudwatch-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-cloudwatch-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-cloudwatch-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-cloudwatch-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-cloudwatch-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-cloudwatch-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-cloudwatch-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-cloudwatch-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-cloudwatch-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 cloudwatch inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ cloudwatch {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-cloudwatch-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-cloudwatch-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-couchdb_changes.md b/docs/reference/plugins-inputs-couchdb_changes.md
new file mode 100644
index 000000000..f8bfc8a83
--- /dev/null
+++ b/docs/reference/plugins-inputs-couchdb_changes.md
@@ -0,0 +1,279 @@
+---
+navigation_title: "couchdb_changes"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-couchdb_changes.html
+---
+
+# Couchdb_changes input plugin [plugins-inputs-couchdb_changes]
+
+
+* Plugin version: v3.1.6
+* Released on: 2019-04-15
+* [Changelog](https://github.com/logstash-plugins/logstash-input-couchdb_changes/blob/v3.1.6/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-couchdb_changes-index.md).
+
+## Getting help [_getting_help_11]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-couchdb_changes). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_12]
+
+This CouchDB input allows you to automatically stream events from the CouchDB [_changes](http://guide.couchdb.org/draft/notifications.html) URI. Moreover, any "future" changes will automatically be streamed as well making it easy to synchronize your CouchDB data with any target destination
+
+### Upsert and delete [_upsert_and_delete]
+
+You can use event metadata to allow for document deletion. All non-delete operations are treated as upserts
+
+
+### Starting at a Specific Sequence [_starting_at_a_specific_sequence]
+
+The CouchDB input stores the last sequence number value in location defined by `sequence_path`. You can use this fact to start or resume the stream at a particular sequence.
+
+
+
+## Couchdb_changes Input Configuration Options [plugins-inputs-couchdb_changes-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-couchdb_changes-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`always_reconnect`](#plugins-inputs-couchdb_changes-always_reconnect) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`ca_file`](#plugins-inputs-couchdb_changes-ca_file) | a valid filesystem path | No |
+| [`db`](#plugins-inputs-couchdb_changes-db) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`heartbeat`](#plugins-inputs-couchdb_changes-heartbeat) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`host`](#plugins-inputs-couchdb_changes-host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ignore_attachments`](#plugins-inputs-couchdb_changes-ignore_attachments) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`initial_sequence`](#plugins-inputs-couchdb_changes-initial_sequence) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`keep_id`](#plugins-inputs-couchdb_changes-keep_id) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`keep_revision`](#plugins-inputs-couchdb_changes-keep_revision) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`password`](#plugins-inputs-couchdb_changes-password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`port`](#plugins-inputs-couchdb_changes-port) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`reconnect_delay`](#plugins-inputs-couchdb_changes-reconnect_delay) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`secure`](#plugins-inputs-couchdb_changes-secure) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`sequence_path`](#plugins-inputs-couchdb_changes-sequence_path) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`timeout`](#plugins-inputs-couchdb_changes-timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`username`](#plugins-inputs-couchdb_changes-username) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-inputs-couchdb_changes-common-options) for a list of options supported by all input plugins.
+
+
+
+### `always_reconnect` [plugins-inputs-couchdb_changes-always_reconnect]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Reconnect flag. When true, always try to reconnect after a failure
+
+
+### `ca_file` [plugins-inputs-couchdb_changes-ca_file]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+Path to a CA certificate file, used to validate certificates
+
+
+### `db` [plugins-inputs-couchdb_changes-db]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The CouchDB db to connect to. Required parameter.
+
+
+### `heartbeat` [plugins-inputs-couchdb_changes-heartbeat]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1000`
+
+Logstash connects to CouchDB’s _changes with feed=continuous The heartbeat is how often (in milliseconds) Logstash will ping CouchDB to ensure the connection is maintained. Changing this setting is not recommended unless you know what you are doing.
+
+
+### `host` [plugins-inputs-couchdb_changes-host]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"localhost"`
+
+IP or hostname of your CouchDB instance
+
+
+### `ignore_attachments` [plugins-inputs-couchdb_changes-ignore_attachments]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Future feature! Until implemented, changing this from the default will not do anything.
+
+Ignore attachments associated with CouchDB documents.
+
+
+### `initial_sequence` [plugins-inputs-couchdb_changes-initial_sequence]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+If unspecified, Logstash will attempt to read the last sequence number from the `sequence_path` file. If that is empty or non-existent, it will begin with 0 (the beginning).
+
+If you specify this value, it is anticipated that you will only be doing so for an initial read under special circumstances and that you will unset this value afterwards.
+
+
+### `keep_id` [plugins-inputs-couchdb_changes-keep_id]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Preserve the CouchDB document id "_id" value in the output.
+
+
+### `keep_revision` [plugins-inputs-couchdb_changes-keep_revision]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Preserve the CouchDB document revision "_rev" value in the output.
+
+
+### `password` [plugins-inputs-couchdb_changes-password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* Default value is `nil`
+
+Password, if authentication is needed to connect to CouchDB
+
+
+### `port` [plugins-inputs-couchdb_changes-port]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `5984`
+
+Port of your CouchDB instance.
+
+
+### `reconnect_delay` [plugins-inputs-couchdb_changes-reconnect_delay]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `10`
+
+Reconnect delay: time between reconnect attempts, in seconds.
+
+
+### `secure` [plugins-inputs-couchdb_changes-secure]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Connect to CouchDB’s _changes feed securely (via https) Default: false (via http)
+
+
+### `sequence_path` [plugins-inputs-couchdb_changes-sequence_path]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+File path where the last sequence number in the _changes stream is stored. If unset it will write to `$HOME/.couchdb_seq`
+
+
+### `timeout` [plugins-inputs-couchdb_changes-timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+Timeout: Number of milliseconds to wait for new data before terminating the connection. If a timeout is set it will disable the heartbeat configuration option.
+
+
+### `username` [plugins-inputs-couchdb_changes-username]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `nil`
+
+Username, if authentication is needed to connect to CouchDB
+
+
+
+## Common options [plugins-inputs-couchdb_changes-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-couchdb_changes-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-couchdb_changes-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-couchdb_changes-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-couchdb_changes-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-couchdb_changes-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-couchdb_changes-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-couchdb_changes-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-couchdb_changes-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-couchdb_changes-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-couchdb_changes-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 couchdb_changes inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ couchdb_changes {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-couchdb_changes-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-couchdb_changes-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-dead_letter_queue.md b/docs/reference/plugins-inputs-dead_letter_queue.md
new file mode 100644
index 000000000..e934ae045
--- /dev/null
+++ b/docs/reference/plugins-inputs-dead_letter_queue.md
@@ -0,0 +1,191 @@
+---
+navigation_title: "dead_letter_queue"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-dead_letter_queue.html
+---
+
+# Dead_letter_queue input plugin [plugins-inputs-dead_letter_queue]
+
+
+* Plugin version: v2.0.1
+* Released on: 2024-09-04
+* [Changelog](https://github.com/logstash-plugins/logstash-input-dead_letter_queue/blob/v2.0.1/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-dead_letter_queue-index.md).
+
+## Getting help [_getting_help_12]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-dead_letter_queue). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_13]
+
+Logstash input to read events from Logstash’s dead letter queue.
+
+```sh
+input {
+ dead_letter_queue {
+ path => "/var/logstash/data/dead_letter_queue"
+ start_timestamp => "2017-04-04T23:40:37"
+ }
+}
+```
+
+For more information about processing events in the dead letter queue, see [Dead Letter Queues](/reference/dead-letter-queues.md).
+
+
+## Dead_letter_queue Input Configuration Options [plugins-inputs-dead_letter_queue-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-dead_letter_queue-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`clean_consumed`](#plugins-inputs-dead_letter_queue-clean_consumed) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`commit_offsets`](#plugins-inputs-dead_letter_queue-commit_offsets) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`path`](#plugins-inputs-dead_letter_queue-path) | a valid filesystem path | Yes |
+| [`pipeline_id`](#plugins-inputs-dead_letter_queue-pipeline_id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`sincedb_path`](#plugins-inputs-dead_letter_queue-sincedb_path) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`start_timestamp`](#plugins-inputs-dead_letter_queue-start_timestamp) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-inputs-dead_letter_queue-common-options) for a list of options supported by all input plugins.
+
+
+
+### `clean_consumed` [plugins-inputs-dead_letter_queue-clean_consumed]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+When set to `true`, this option deletes the DLQ segments that have been read. This feature requires that `commit_offsets` is set to `true`. If not, you’ll get a configuration error. This feature is available in Logstash 8.4.0 and later. If this setting is `true` and and you are using a Logstash version older than 8.4.0, then you’ll get a configuration error.
+
+
+### `commit_offsets` [plugins-inputs-dead_letter_queue-commit_offsets]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Specifies whether this input should commit offsets as it processes the events. Typically you specify `false` when you want to iterate multiple times over the events in the dead letter queue, but don’t want to save state. This is when you are exploring the events in the dead letter queue.
+
+
+### `path` [plugins-inputs-dead_letter_queue-path]
+
+* This is a required setting.
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+Path to the dead letter queue directory that was created by a Logstash instance. This is the path from which "dead" events are read and is typically configured in the original Logstash instance with the setting `path.dead_letter_queue`.
+
+
+### `pipeline_id` [plugins-inputs-dead_letter_queue-pipeline_id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"main"`
+
+ID of the pipeline whose events you want to read from.
+
+
+### `sincedb_path` [plugins-inputs-dead_letter_queue-sincedb_path]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Path of the sincedb database file (keeps track of the current position of dead letter queue) that will be written to disk. The default will write sincedb files to `/plugins/inputs/dead_letter_queue`.
+
+::::{note}
+This value must be a file path and not a directory path.
+::::
+
+
+
+### `start_timestamp` [plugins-inputs-dead_letter_queue-start_timestamp]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Timestamp in ISO8601 format from when you want to start processing the events from. For example, `2017-04-04T23:40:37`.
+
+
+
+## Common options [plugins-inputs-dead_letter_queue-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-dead_letter_queue-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-dead_letter_queue-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-dead_letter_queue-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-dead_letter_queue-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-dead_letter_queue-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-dead_letter_queue-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-dead_letter_queue-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-dead_letter_queue-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-dead_letter_queue-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-dead_letter_queue-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 dead_letter_queue inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ dead_letter_queue {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-dead_letter_queue-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-dead_letter_queue-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-elastic_agent.md b/docs/reference/plugins-inputs-elastic_agent.md
new file mode 100644
index 000000000..da2ef1748
--- /dev/null
+++ b/docs/reference/plugins-inputs-elastic_agent.md
@@ -0,0 +1,443 @@
+---
+navigation_title: "elastic_agent"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-elastic_agent.html
+---
+
+# Elastic Agent input plugin [plugins-inputs-elastic_agent]
+
+::::{note}
+The `input-elastic_agent` plugin is the next generation of the `input-beats` plugin. They currently share code and a [common codebase](https://github.com/logstash-plugins/logstash-input-beats).
+::::
+
+
+
+* Plugin version: v7.0.0
+* Released on: 2024-12-02
+* [Changelog](https://github.com/logstash-plugins/logstash-input-beats/blob/v7.0.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-elastic_agent-index.md).
+
+## Getting help [_getting_help_13]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-elastic_agent). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_14]
+
+This input plugin enables Logstash to receive events from the Elastic Agent framework.
+
+The following example shows how to configure Logstash to listen on port 5044 for incoming Elastic Agent connections and to index into Elasticsearch.
+
+```sh
+input {
+ elastic_agent {
+ port => 5044
+ }
+}
+
+output {
+ elasticsearch {
+ hosts => ["http://localhost:9200"]
+ data_stream => "true"
+ }
+}
+```
+
+Events indexed into Elasticsearch with the Logstash configuration shown here will be similar to events directly indexed by Elastic Agent into Elasticsearch.
+
+### Memory usage [plugins-inputs-elastic_agent-memory]
+
+This plugin uses "off-heap" direct memory in addition to heap memory. By default, a JVM’s off-heap direct memory limit is the same as the heap size. For example, setting `-Xmx10G` without setting the direct memory limit will allocate `10GB` for heap and an additional `10GB` for direct memory, for a total of `20GB` allocated. You can set the amount of direct memory with `-XX:MaxDirectMemorySize` in [Logstash JVM Settings](/reference/jvm-settings.md). Consider setting direct memory to half of the heap size. Setting direct memory too low decreases the performance of ingestion.
+
+::::{note}
+Be sure that heap and direct memory combined does not exceed the total memory available on the server to avoid an OutOfDirectMemoryError
+::::
+
+
+
+
+## Event enrichment and the Elastic Common Schema (ECS) [plugins-inputs-elastic_agent-ecs_metadata]
+
+When decoding Elastic Agent events, this plugin enriches each event with metadata about the event’s source, making this information available during further processing. You can use the [`enrich`](#plugins-inputs-elastic_agent-enrich) option to activate or deactivate individual enrichment categories.
+
+The location of these enrichment fields depends on whether [ECS compatibility mode](#plugins-inputs-elastic_agent-ecs_compatibility) is enabled:
+
+* When ECS compatibility is *enabled*, enrichment fields are added in an ECS-compatible structure.
+* When ECS compatibility is *disabled*, enrichment fields are added in a way that is backward-compatible with this plugin, but is known to clash with the Elastic Common Schema.
+
+| ECS `v1`, `v8` | ECS `disabled` | Description |
+| --- | --- | --- |
+| [@metadata][input][beats][host][name] | [host] | *Name or address of the Elastic Agent host* |
+| [@metadata][input][beats][host][ip] | [@metadata][ip_address] | *IP address of the Elastic Agent client that connected to this input* |
+
+| ECS `v1`, `v8` | ECS `disabled` | Description |
+| --- | --- | --- |
+| [@metadata][tls_peer][status] | [@metadata][tls_peer][status] | *Contains "verified" or "unverified" label; available when SSL is enabled.* |
+| [@metadata][input][beats][tls][version_protocol] | [@metadata][tls_peer][protocol] | *Contains the TLS version used (such as `TLSv1.2`); available when SSL status is "verified"* |
+| [@metadata][input][beats][tls][client][subject] | [@metadata][tls_peer][subject] | *Contains the identity name of the remote end (such as `CN=artifacts-no-kpi.elastic.co`); available when SSL status is "verified"* |
+| [@metadata][input][beats][tls][cipher] | [@metadata][tls_peer][cipher_suite] | *Contains the name of cipher suite used (such as `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256`); available when SSL status is "verified"* |
+
+| ECS `v1`, `v8` | ECS `disabled` | Description |
+| --- | --- | --- |
+| [tag] | [tag] | *Contains `beats_input_codec_XXX_applied` where `XXX` is the name of the codec* |
+| [event][original] | *N/A* | *When ECS is enabled, even if `[event][original]` field does not already exist on the event being processed, this plugin’s **default codec** ensures that the field is populated using the bytes as-processed.* |
+
+
+## Elastic Agent input configuration options [plugins-inputs-elastic_agent-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-elastic_agent-common-options) described later.
+
+::::{note}
+As of version `7.0.0` of this plugin, a number of previously deprecated settings related to SSL have been removed. Please check out [Beats Input Obsolete Configuration Options](#plugins-inputs-elastic_agent-obsolete-options) for details.
+::::
+
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_hostname`](#plugins-inputs-elastic_agent-add_hostname) | [boolean](/reference/configuration-file-structure.md#boolean) | *Deprecated* |
+| [`client_inactivity_timeout`](#plugins-inputs-elastic_agent-client_inactivity_timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`ecs_compatibility`](#plugins-inputs-elastic_agent-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`enrich`](#plugins-inputs-elastic_agent-enrich) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`event_loop_threads`](#plugins-inputs-elastic_agent-event_loop_threads) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`executor_threads`](#plugins-inputs-elastic_agent-executor_threads) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`host`](#plugins-inputs-elastic_agent-host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`include_codec_tag`](#plugins-inputs-elastic_agent-include_codec_tag) | [boolean](/reference/configuration-file-structure.md#boolean) | *Deprecated* |
+| [`port`](#plugins-inputs-elastic_agent-port) | [number](/reference/configuration-file-structure.md#number) | Yes |
+| [`ssl_certificate`](#plugins-inputs-elastic_agent-ssl_certificate) | a valid filesystem path | No |
+| [`ssl_certificate_authorities`](#plugins-inputs-elastic_agent-ssl_certificate_authorities) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`ssl_cipher_suites`](#plugins-inputs-elastic_agent-ssl_cipher_suites) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`ssl_client_authentication`](#plugins-inputs-elastic_agent-ssl_client_authentication) | [string](/reference/configuration-file-structure.md#string), one of `["none", "optional", "required"]` | No |
+| [`ssl_enabled`](#plugins-inputs-elastic_agent-ssl_enabled) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`ssl_handshake_timeout`](#plugins-inputs-elastic_agent-ssl_handshake_timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`ssl_key`](#plugins-inputs-elastic_agent-ssl_key) | a valid filesystem path | No |
+| [`ssl_key_passphrase`](#plugins-inputs-elastic_agent-ssl_key_passphrase) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_supported_protocols`](#plugins-inputs-elastic_agent-ssl_supported_protocols) | [array](/reference/configuration-file-structure.md#array) | No |
+
+Also see [Common options](#plugins-inputs-elastic_agent-common-options) for a list of options supported by all input plugins.
+
+
+
+### `add_hostname` [plugins-inputs-elastic_agent-add_hostname]
+
+::::{admonition} Deprecated in 6.0.0.
+:class: warning
+
+The default value has been changed to `false`. In 7.0.0 this setting will be removed
+::::
+
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Flag to determine whether to add `host` field to event using the value supplied by the Elastic Agent in the `hostname` field.
+
+
+### `client_inactivity_timeout` [plugins-inputs-elastic_agent-client_inactivity_timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `60`
+
+Close Idle clients after X seconds of inactivity.
+
+
+### `ecs_compatibility` [plugins-inputs-elastic_agent-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: unstructured connection metadata added at root level
+ * `v1`: structured connection metadata added under ECS v1 compliant namespaces
+ * `v8`: structured connection metadata added under ECS v8 compliant namespaces
+
+* Default value depends on which version of Logstash is running:
+
+ * When Logstash provides a `pipeline.ecs_compatibility` setting, its value is used as the default
+ * Otherwise, the default value is `disabled`.
+
+
+Refer to [ECS mapping](#plugins-inputs-elastic_agent-ecs_metadata) for detailed information.
+
+
+### `enrich` [plugins-inputs-elastic_agent-enrich]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+
+ * An [array](/reference/configuration-file-structure.md#array) can also be provided
+ * Configures which enrichments are applied to each event
+ * Default value is `[codec_metadata, source_metadata]` that may be extended in future versions of this plugin to include additional enrichments.
+ * Supported values are:
+
+ | Enrichment | Description |
+ | --- | --- |
+ | codec_metadata | Information about how the codec transformed a sequence of bytes into this Event, such as *which* codec was used. Also, if no codec is explicitly specified, *excluding* `codec_metadata` from `enrich` will disable `ecs_compatibility` for this plugin. |
+ | source_metadata | Information about the *source* of the event, such as the IP address of the inbound connection this input received the event from |
+ | ssl_peer_metadata | Detailed information about the *SSL peer* we received the event from, such as identity information from the SSL client certificate that was presented when establishing a connection to this input |
+ | all | *alias* to include *all* available enrichments (including additional enrichments introduced in future versions of this plugin) |
+ | none | *alias* to *exclude* all available enrichments. Note that, *explicitly* defining codec with this option will not disable the `ecs_compatibility`, instead it relies on pipeline or codec `ecs_compatibility` configuration. |
+
+
+**Example:**
+
+This configuration disables *all* enrichments:
+
+```
+input {
+ beats {
+ port => 5044
+ enrich => none
+ }
+}
+```
+
+Or, to explicitly enable *only* `source_metadata` and `ssl_peer_metadata` (disabling all others):
+
+```
+input {
+ beats {
+ port => 5044
+ enrich => [source_metadata, ssl_peer_metadata]
+ }
+}
+```
+
+
+### `event_loop_threads` [plugins-inputs-elastic_agent-event_loop_threads]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Defaults to 0.
+
+When setting `0`, the actual default is `available_processors * 2`
+
+This is an expert-level setting, and generally should not need to be set Elastic Agent plugin is implemented based on a non-blocking mechanism, requiring a number of event loop and executor threads. The event loop threads are responsible to communicate with clients (accept incoming connections, enqueue/dequeue tasks and respond) and executor threads handle tasks. This configuration intends to limit or increase the number of threads to be created for the event loop. See [`executor_threads`](#plugins-inputs-elastic_agent-executor_threads) configuration if you need to set executor threads count.
+
+
+### `executor_threads` [plugins-inputs-elastic_agent-executor_threads]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is equal to the number of CPU cores (1 executor thread per CPU core).
+
+The number of threads to be used to process incoming Elastic Agent requests. By default, the Elastic Agent input creates a number of threads equal to the number of CPU cores. These threads handle incoming connections, reading from established sockets, and executing most of the tasks related to network connection management. Parsing the Lumberjack protocol is offloaded to a dedicated thread pool.
+
+Generally you don’t need to touch this setting. In case you are sending very large events and observing "OutOfDirectMemory" exceptions, you may want to reduce this number to half or 1/4 of the CPU cores. This change reduces the number of threads decompressing batches of data into direct memory. However, this will only be a mitigating tweak, as the proper solution may require resizing your Logstash deployment, either by increasing number of Logstash nodes or increasing the JVM’s Direct Memory.
+
+
+### `host` [plugins-inputs-elastic_agent-host]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"0.0.0.0"`
+
+The IP address to listen on.
+
+
+### `include_codec_tag` [plugins-inputs-elastic_agent-include_codec_tag]
+
+::::{admonition} Deprecated in 6.5.0.
+:class: warning
+
+Replaced by [`enrich`](#plugins-inputs-elastic_agent-enrich)
+::::
+
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+
+### `port` [plugins-inputs-elastic_agent-port]
+
+* This is a required setting.
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+The port to listen on.
+
+
+### `ssl_certificate` [plugins-inputs-elastic_agent-ssl_certificate]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+SSL certificate to use.
+
+
+### `ssl_certificate_authorities` [plugins-inputs-elastic_agent-ssl_certificate_authorities]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+Validate client certificates against these authorities. You can define multiple files or paths. All the certificates will be read and added to the trust store. You need to configure the [`ssl_client_authentication`](#plugins-inputs-elastic_agent-ssl_client_authentication) to `optional` or `required` to enable the verification.
+
+
+### `ssl_cipher_suites` [plugins-inputs-elastic_agent-ssl_cipher_suites]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `['TLS_AES_256_GCM_SHA384', 'TLS_AES_128_GCM_SHA256', 'TLS_CHACHA20_POLY1305_SHA256', 'TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384', 'TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384', 'TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256', 'TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256', 'TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256', 'TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', 'TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384', 'TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384', 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256', 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256']`
+
+The list of cipher suites to use, listed by priorities. This default list applies for OpenJDK 11.0.14 and higher. For older JDK versions, the default list includes only suites supported by that version. For example, the ChaCha20 family of ciphers is not supported in older versions.
+
+
+### `ssl_client_authentication` [plugins-inputs-elastic_agent-ssl_client_authentication]
+
+* Value can be any of: `none`, `optional`, `required`
+* Default value is `"none"`
+
+Controls the server’s behavior in regard to requesting a certificate from client connections: `required` forces a client to present a certificate, while `optional` requests a client certificate but the client is not required to present one. Defaults to `none`, which disables the client authentication.
+
+When mutual TLS is enabled (`required` or `optional`), the certificate presented by the client must be signed by trusted [`ssl_certificate_authorities`](#plugins-inputs-elastic_agent-ssl_certificate_authorities) (CAs). Please note that the server does not validate the client certificate CN (Common Name) or SAN (Subject Alternative Name).
+
+::::{note}
+This setting can be used only if [`ssl_certificate_authorities`](#plugins-inputs-elastic_agent-ssl_certificate_authorities) is set.
+::::
+
+
+
+### `ssl_enabled` [plugins-inputs-elastic_agent-ssl_enabled]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Events are by default sent in plain text. You can enable encryption by setting `ssl_enabled` to true and configuring the [`ssl_certificate`](#plugins-inputs-elastic_agent-ssl_certificate) and [`ssl_key`](#plugins-inputs-elastic_agent-ssl_key) options.
+
+
+### `ssl_handshake_timeout` [plugins-inputs-elastic_agent-ssl_handshake_timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `10000`
+
+Time in milliseconds for an incomplete ssl handshake to timeout
+
+
+### `ssl_key` [plugins-inputs-elastic_agent-ssl_key]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+SSL key to use. This key must be in the PKCS8 format and PEM encoded. You can use the [openssl pkcs8](https://www.openssl.org/docs/man1.1.1/man1/openssl-pkcs8.md) command to complete the conversion. For example, the command to convert a PEM encoded PKCS1 private key to a PEM encoded, non-encrypted PKCS8 key is:
+
+```sh
+openssl pkcs8 -inform PEM -in path/to/logstash.key -topk8 -nocrypt -outform PEM -out path/to/logstash.pkcs8.key
+```
+
+
+### `ssl_key_passphrase` [plugins-inputs-elastic_agent-ssl_key_passphrase]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+SSL key passphrase to use.
+
+
+### `ssl_supported_protocols` [plugins-inputs-elastic_agent-ssl_supported_protocols]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Allowed values are: `'TLSv1.1'`, `'TLSv1.2'`, `'TLSv1.3'`
+* Default depends on the JDK being used. With up-to-date Logstash, the default is `['TLSv1.2', 'TLSv1.3']`. `'TLSv1.1'` is not considered secure and is only provided for legacy applications.
+
+List of allowed SSL/TLS versions to use when establishing a connection to the HTTP endpoint.
+
+For Java 8 `'TLSv1.3'` is supported only since **8u262** (AdoptOpenJDK), but requires that you set the `LS_JAVA_OPTS="-Djdk.tls.client.protocols=TLSv1.3"` system property in Logstash.
+
+::::{note}
+If you configure the plugin to use `'TLSv1.1'` on any recent JVM, such as the one packaged with Logstash, the protocol is disabled by default and needs to be enabled manually by changing `jdk.tls.disabledAlgorithms` in the **$JDK_HOME/conf/security/java.security** configuration file. That is, `TLSv1.1` needs to be removed from the list.
+::::
+
+
+
+
+## Beats Input Obsolete Configuration Options [plugins-inputs-elastic_agent-obsolete-options]
+
+::::{warning}
+As of version `7.0.0` of this plugin, some configuration options have been replaced. The plugin will fail to start if it contains any of these obsolete options.
+::::
+
+
+| Setting | Replaced by |
+| --- | --- |
+| cipher_suites | [`ssl_cipher_suites`](#plugins-inputs-elastic_agent-ssl_cipher_suites) |
+| ssl | [`ssl_enabled`](#plugins-inputs-elastic_agent-ssl_enabled) |
+| ssl_peer_metadata | [`enrich`](#plugins-inputs-elastic_agent-enrich) |
+| ssl_verify_mode | [`ssl_client_authentication`](#plugins-inputs-elastic_agent-ssl_client_authentication) |
+| tls_max_version | [`ssl_supported_protocols`](#plugins-inputs-elastic_agent-ssl_supported_protocols) |
+| tls_min_version | [`ssl_supported_protocols`](#plugins-inputs-elastic_agent-ssl_supported_protocols) |
+
+
+## Common options [plugins-inputs-elastic_agent-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-elastic_agent-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-elastic_agent-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-elastic_agent-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-elastic_agent-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-elastic_agent-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-elastic_agent-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-elastic_agent-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-elastic_agent-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-elastic_agent-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-elastic_agent-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 elastic_agent inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ elastic_agent {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-elastic_agent-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-elastic_agent-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
diff --git a/docs/reference/plugins-inputs-elastic_serverless_forwarder.md b/docs/reference/plugins-inputs-elastic_serverless_forwarder.md
new file mode 100644
index 000000000..1b9f3117e
--- /dev/null
+++ b/docs/reference/plugins-inputs-elastic_serverless_forwarder.md
@@ -0,0 +1,383 @@
+---
+navigation_title: "elastic_serverless_forwarder"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-elastic_serverless_forwarder.html
+---
+
+# Elastic Serverless Forwarder input plugin [plugins-inputs-elastic_serverless_forwarder]
+
+
+* Plugin version: v2.0.0
+* Released on: 2024-12-23
+* [Changelog](https://github.com/logstash-plugins/logstash-input-elastic_serverless_forwarder/blob/v2.0.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-elastic_serverless_forwarder-index.md).
+
+## Getting help [_getting_help_14]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-elastic_serverless_forwarder). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_15]
+
+Using this input you can receive events from Elastic Serverless Forwarder over http(s) connections to the configured [`port`](#plugins-inputs-elastic_serverless_forwarder-port).
+
+### Minimum Configuration [plugins-inputs-elastic_serverless_forwarder-ext-field]
+
+| SSL Enabled | SSL Disabled |
+| --- | --- |
+| ``` input { elastic_serverless_forwarder { port => 8080 ssl_certificate => "/path/to/logstash.crt" ssl_key => "/path/to/logstash.key" } } ``` | ``` input { elastic_serverless_forwarder { port => 8080 ssl_enabled => false } } ``` |
+
+
+
+## Enrichment [plugins-inputs-elastic_serverless_forwarder-enrichment]
+
+This input provides *minimal enrichment* on events, and avoids including information about itself, the client from which it received the data, or about the original event as-decoded from the request.
+
+::::{note}
+Senders are advised to use care with respect to fields that are [reserved in Logstash](/reference/processing.md#reserved-fields). ESF sends the Logstash-required `@timestamp` field by default, but if this value is missing it will be populated with the current time.
+::::
+
+
+
+## Security [plugins-inputs-elastic_serverless_forwarder-security]
+
+This plugin has SSL on-by-default.
+
+At a minimum, you will need to either configure the plugin to present its identity, or disable SSL.
+
+Additionally, you may wish to authenticate clients using SSL client authentication, and/or authenticate requests using HTTP Basic authentication as described below.
+
+### SSL Identity [_ssl_identity]
+
+In order to establish SSL with a client, this input plugin will need to present an SSL certificate that the client trusts, and have access to the associated key. These are configurable with [`ssl_certificate`](#plugins-inputs-elastic_serverless_forwarder-ssl_certificate), [`ssl_key`](#plugins-inputs-elastic_serverless_forwarder-ssl_key), and optionally [`ssl_key_passphrase`](#plugins-inputs-elastic_serverless_forwarder-ssl_key_passphrase).
+
+
+### SSL Client Authentication [_ssl_client_authentication]
+
+By default, this plugin does not request certificates from clients during SSL negotiation.
+
+It can be configured to either request or require client certificates using [`ssl_client_authentication`](#plugins-inputs-elastic_serverless_forwarder-ssl_client_authentication), which often also requires configuring it with a list of [`ssl_certificate_authorities`](#plugins-inputs-elastic_serverless_forwarder-ssl_certificate_authorities) to trust. When validating a certificate that is presented, [`ssl_verification_mode`](#plugins-inputs-elastic_serverless_forwarder-ssl_verification_mode) controls how certificates are verified.
+
+::::{note}
+ESF does not currently support *presenting* client certificates, so requesting or requiring clients to present identity is only useful when combined with an SSL-terminating proxy.
+::::
+
+
+
+### SSL Advanced Configuration [_ssl_advanced_configuration]
+
+This plugin exposes several advanced SSL configurations:
+
+* [`ssl_cipher_suites`](#plugins-inputs-elastic_serverless_forwarder-ssl_cipher_suites)
+* [`ssl_supported_protocols`](#plugins-inputs-elastic_serverless_forwarder-ssl_supported_protocols)
+* [`ssl_handshake_timeout`](#plugins-inputs-elastic_serverless_forwarder-ssl_handshake_timeout)
+
+
+### HTTP Basic Authentication [_http_basic_authentication]
+
+You can configure this plugin to authenticate requests using HTTP Basic authentication by configuring [`auth_basic_username`](#plugins-inputs-elastic_serverless_forwarder-auth_basic_username) and [`auth_basic_password`](#plugins-inputs-elastic_serverless_forwarder-auth_basic_password).
+
+::::{note}
+Basic Authentication is not a substitute for SSL, as it provides neither secrecy nor security on its own. When used with SSL disabled, HTTP Basic credentials are transmitted in effectively clear-text and can be easily recovered by an adversary.
+::::
+
+
+
+
+## Using Elastic Serverless Forwarder with the Elasticsearch output [plugins-inputs-elastic_serverless_forwarder-es-output-notes]
+
+Here are some tips for configuring the {{esf}} input to work with the elasticsearch output:
+
+* Set the `document_id` in the output configuration when you use the {{esf}} input with an [Elasticsearch output plugin](/reference/plugins-outputs-elasticsearch.md).
+
+ ```ruby
+ output {
+ elasticsearch {
+ ...
+ document_id => "%{[@metadata][_id]}"
+ ...
+ }
+ }
+ ```
+
+* Starting from version 1.10.0 of Elastic Serverless Forwarder, configuring `document_id` as shown in the example above is sufficient (the `_id` field is no longer available, and instead, Logstash now receives the `@metadata._id` field).
+* For Elastic Serverless Forwarder v1.9.0 and earlier, rename the field `_id` to `@metadata._id` with a filter:
+
+ ```ruby
+ filter {
+ # support ESF < 1.10
+ if [_id] and ![@metadata][_id] {
+ mutate { rename => { "_id" => "[@metadata][_id]" } }
+ }
+ }
+ ```
+
+
+
+## Elastic Serverless Forwarder Input Configuration Options [plugins-inputs-elastic_serverless_forwarder-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-elastic_serverless_forwarder-common-options) described later.
+
+::::{note}
+As of version `2.0.0` of this plugin, a previously deprecated SSL setting has been removed. Please check out [Elasticsearch Output Obsolete Configuration Options](#plugins-inputs-elastic_serverless_forwarder-obsolete-options) for details.
+::::
+
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`auth_basic_username`](#plugins-inputs-elastic_serverless_forwarder-auth_basic_username) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`auth_basic_password`](#plugins-inputs-elastic_serverless_forwarder-auth_basic_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`host`](#plugins-inputs-elastic_serverless_forwarder-host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`port`](#plugins-inputs-elastic_serverless_forwarder-port) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`ssl_certificate`](#plugins-inputs-elastic_serverless_forwarder-ssl_certificate) | a valid filesystem path | No |
+| [`ssl_certificate_authorities`](#plugins-inputs-elastic_serverless_forwarder-ssl_certificate_authorities) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`ssl_client_authentication`](#plugins-inputs-elastic_serverless_forwarder-ssl_client_authentication) | [string](/reference/configuration-file-structure.md#string), one of `["none", "optional", "required"]` | No |
+| [`ssl_cipher_suites`](#plugins-inputs-elastic_serverless_forwarder-ssl_cipher_suites) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`ssl_enabled`](#plugins-inputs-elastic_serverless_forwarder-ssl_enabled) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`ssl_handshake_timeout`](#plugins-inputs-elastic_serverless_forwarder-ssl_handshake_timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`ssl_key`](#plugins-inputs-elastic_serverless_forwarder-ssl_key) | a valid filesystem path | No |
+| [`ssl_key_passphrase`](#plugins-inputs-elastic_serverless_forwarder-ssl_key_passphrase) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_supported_protocols`](#plugins-inputs-elastic_serverless_forwarder-ssl_supported_protocols) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`ssl_verification_mode`](#plugins-inputs-elastic_serverless_forwarder-ssl_verification_mode) | [string](/reference/configuration-file-structure.md#string), one of `["certificate"]` | No |
+
+Also see [Common options](#plugins-inputs-elastic_serverless_forwarder-common-options) for a list of options supported by all input plugins.
+
+
+
+### `auth_basic_password` [plugins-inputs-elastic_serverless_forwarder-auth_basic_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Password for HTTP basic authorization. Requires [`auth_basic_username`](#plugins-inputs-elastic_serverless_forwarder-auth_basic_username).
+
+
+### `auth_basic_username` [plugins-inputs-elastic_serverless_forwarder-auth_basic_username]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Username for basic authorization. Requires [`auth_basic_password`](#plugins-inputs-elastic_serverless_forwarder-auth_basic_password).
+
+
+### `host` [plugins-inputs-elastic_serverless_forwarder-host]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"0.0.0.0"` (all available interfaces)
+
+The host or ip to bind
+
+
+### `port` [plugins-inputs-elastic_serverless_forwarder-port]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `8080`
+
+The TCP port to bind to
+
+
+### `ssl_certificate` [plugins-inputs-elastic_serverless_forwarder-ssl_certificate]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+SSL certificate to use. This certificate *MUST* be PEM-formatted, and *MAY* contain a chain of certificates starting with the certificate that identifies itself, followed by zero or more ordered intermediates optionally ending with the root signing authority. Providing a complete chain allows clients to trust our certificate if their configuration allows them to trust one of our intermediates.
+
+
+### `ssl_certificate_authorities` [plugins-inputs-elastic_serverless_forwarder-ssl_certificate_authorities]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+Validate client certificates against these authorities. You can define multiple files or paths. All the certificates will be read and added to the trust store.
+
+If you wish to perform client authentication, you need to set `ssl_client_authentication` to `optional` or `required`.
+
+
+### `ssl_cipher_suites` [plugins-inputs-elastic_serverless_forwarder-ssl_cipher_suites]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `['TLS_AES_256_GCM_SHA384', 'TLS_AES_128_GCM_SHA256', 'TLS_CHACHA20_POLY1305_SHA256', 'TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384', 'TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384', 'TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256', 'TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256', 'TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256', 'TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', 'TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384', 'TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384', 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256', 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256']`
+
+The list of cipher suites to use, listed by priorities.
+
+This is an advanced SSL configuration.
+
+This default list applies for OpenJDK 11.0.14 and higher. For older JDK versions, the default list includes only suites supported by that version. For example, the ChaCha20 family of ciphers is not supported in older versions.
+
+
+### `ssl_client_authentication` [plugins-inputs-elastic_serverless_forwarder-ssl_client_authentication]
+
+* Value can be any of:
+
+ * `none`: do not request client’s certificate, or validate certificates that are presented
+ * `optional`: request client’s certificate, and validate it against our trust authorities *if-and-only-if* it is presented
+ * `required`: require a valid certificate from the client that is signed by a trusted certificate authority
+
+* Default value is `"none"`
+
+By default the server doesn’t do any client authentication. This means that connections from clients are *private* when SSL is enabled, but that this input will allow SSL connections from *any* client. If you wish to configure this plugin to reject connections from untrusted hosts, you will need to configure this plugin to authenticate clients, and may also need to configure it with a list of `ssl_certificate_authorities`.
+
+
+### `ssl_enabled` [plugins-inputs-elastic_serverless_forwarder-ssl_enabled]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Events are, by default, sent over SSL, which requires configuring this plugin to present an identity certificate using [`ssl_certificate`](#plugins-inputs-elastic_serverless_forwarder-ssl_certificate) and key using [`ssl_key`](#plugins-inputs-elastic_serverless_forwarder-ssl_key).
+
+You can disable SSL with `+ssl_enabled => false+`.
+
+
+### `ssl_handshake_timeout` [plugins-inputs-elastic_serverless_forwarder-ssl_handshake_timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `10000`
+
+Time in milliseconds for an incomplete ssl handshake to timeout
+
+This is an advanced SSL configuration.
+
+
+### `ssl_key` [plugins-inputs-elastic_serverless_forwarder-ssl_key]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+SSL key to use.
+
+::::{note}
+This key need to be in the PKCS8 format, you can convert it with [OpenSSL](https://www.openssl.org/docs/man1.1.1/man1/openssl-pkcs8.md) for more information.
+::::
+
+
+
+### `ssl_key_passphrase` [plugins-inputs-elastic_serverless_forwarder-ssl_key_passphrase]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+SSL key passphrase to use.
+
+
+### `ssl_supported_protocols` [plugins-inputs-elastic_serverless_forwarder-ssl_supported_protocols]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Allowed values are: `'TLSv1.1'`, `'TLSv1.2'`, `'TLSv1.3'`
+* Default depends on the JDK being used. With up-to-date Logstash, the default is `['TLSv1.2', 'TLSv1.3']`. `'TLSv1.1'` is not considered secure and is only provided for legacy applications.
+
+List of allowed SSL/TLS versions to use when establishing a connection to the HTTP endpoint.
+
+This is an advanced SSL configuration.
+
+For Java 8 `'TLSv1.3'` is supported only since **8u262** (AdoptOpenJDK), but requires that you set the `LS_JAVA_OPTS="-Djdk.tls.client.protocols=TLSv1.3"` system property in Logstash.
+
+::::{note}
+If you configure the plugin to use `'TLSv1.1'` on any recent JVM, such as the one packaged with Logstash, the protocol is disabled by default and needs to be enabled manually by changing `jdk.tls.disabledAlgorithms` in the **$JDK_HOME/conf/security/java.security** configuration file. That is, `TLSv1.1` needs to be removed from the list.
+::::
+
+
+
+### `ssl_verification_mode` [plugins-inputs-elastic_serverless_forwarder-ssl_verification_mode]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is only one currently-supported mode:
+
+ * `certificate`: verifies that a certificate provided by the client is signed by a trusted authority (CA), is within its valid date range, and that the client has possession of the associated key, but does *not* perform hostname validation.
+
+
+* The default value is `certificate`.
+
+When [`ssl_client_authentication`](#plugins-inputs-elastic_serverless_forwarder-ssl_client_authentication) causes a client to present a certificate, this setting controls how that certificate is verified.
+
+::::{note}
+Client identity is not typically validated using SSL because the receiving server only has access to the client’s outbound-ip, which is not always constant and is frequently not represented in the certificate’s subject or subjectAltNames extensions. For more information, see [RFC2818 § 3.2 (HTTP over TLS — Client Identity)](https://www.rfc-editor.org/rfc/rfc2818#section-3.1)
+::::
+
+
+
+
+## Elasticsearch Output Obsolete Configuration Options [plugins-inputs-elastic_serverless_forwarder-obsolete-options]
+
+::::{warning}
+As of version `2.0.0` of this plugin, some configuration options have been replaced. The plugin will fail to start if it contains any of these obsolete options.
+::::
+
+
+| Setting | Replaced by |
+| --- | --- |
+| ssl | [`ssl_enabled`](#plugins-inputs-elastic_serverless_forwarder-ssl_enabled) |
+
+
+## Common options [plugins-inputs-elastic_serverless_forwarder-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-elastic_serverless_forwarder-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`enable_metric`](#plugins-inputs-elastic_serverless_forwarder-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-elastic_serverless_forwarder-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-elastic_serverless_forwarder-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-elastic_serverless_forwarder-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-elastic_serverless_forwarder-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `enable_metric` [plugins-inputs-elastic_serverless_forwarder-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-elastic_serverless_forwarder-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 elastic_serverless_forwarder inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ elastic_serverless_forwarder {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-elastic_serverless_forwarder-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-elastic_serverless_forwarder-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
diff --git a/docs/reference/plugins-inputs-elasticsearch.md b/docs/reference/plugins-inputs-elasticsearch.md
new file mode 100644
index 000000000..9561fcd13
--- /dev/null
+++ b/docs/reference/plugins-inputs-elasticsearch.md
@@ -0,0 +1,683 @@
+---
+navigation_title: "elasticsearch"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-elasticsearch.html
+---
+
+# Elasticsearch input plugin [plugins-inputs-elasticsearch]
+
+
+* Plugin version: v5.0.0
+* Released on: 2024-12-18
+* [Changelog](https://github.com/logstash-plugins/logstash-input-elasticsearch/blob/v5.0.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-elasticsearch-index.md).
+
+## Getting help [_getting_help_15]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-elasticsearch). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_16]
+
+Read from an Elasticsearch cluster, based on search query results. This is useful for replaying test logs, reindexing, etc. You can periodically schedule ingestion using a cron syntax (see `schedule` setting) or run the query one time to load data into Logstash.
+
+Example:
+
+```ruby
+ input {
+ # Read all documents from Elasticsearch matching the given query
+ elasticsearch {
+ hosts => "localhost"
+ query => '{ "query": { "match": { "statuscode": 200 } }, "sort": [ "_doc" ] }'
+ }
+ }
+```
+
+This would create an Elasticsearch query with the following format:
+
+```json
+ curl 'http://localhost:9200/logstash-*/_search?&scroll=1m&size=1000' -d '{
+ "query": {
+ "match": {
+ "statuscode": 200
+ }
+ },
+ "sort": [ "_doc" ]
+ }'
+```
+
+
+## Scheduling [_scheduling]
+
+Input from this plugin can be scheduled to run periodically according to a specific schedule. This scheduling syntax is powered by [rufus-scheduler](https://github.com/jmettraux/rufus-scheduler). The syntax is cron-like with some extensions specific to Rufus (e.g. timezone support ).
+
+Examples:
+
+| | |
+| --- | --- |
+| `* 5 * 1-3 *` | will execute every minute of 5am every day of January through March. |
+| `0 * * * *` | will execute on the 0th minute of every hour every day. |
+| `0 6 * * * America/Chicago` | will execute at 6:00am (UTC/GMT -5) every day. |
+
+Further documentation describing this syntax can be found [here](https://github.com/jmettraux/rufus-scheduler#parsing-cronlines-and-time-strings).
+
+
+## Authentication [plugins-inputs-elasticsearch-auth]
+
+Authentication to a secure Elasticsearch cluster is possible using *one* of the following options:
+
+* [`user`](#plugins-inputs-elasticsearch-user) AND [`password`](#plugins-inputs-elasticsearch-password)
+* [`cloud_auth`](#plugins-inputs-elasticsearch-cloud_auth)
+* [`api_key`](#plugins-inputs-elasticsearch-api_key)
+
+
+## Authorization [plugins-inputs-elasticsearch-autz]
+
+Authorization to a secure Elasticsearch cluster requires `read` permission at index level and `monitoring` permissions at cluster level. The `monitoring` permission at cluster level is necessary to perform periodic connectivity checks.
+
+
+## Compatibility with the Elastic Common Schema (ECS) [plugins-inputs-elasticsearch-ecs]
+
+When ECS compatibility is disabled, `docinfo_target` uses the `"@metadata"` field as a default, with ECS enabled the plugin uses a naming convention `"[@metadata][input][elasticsearch]"` as a default target for placing document information.
+
+The plugin logs a warning when ECS is enabled and `target` isn’t set.
+
+::::{tip}
+Set the `target` option to avoid potential schema conflicts.
+::::
+
+
+
+## Elasticsearch Input configuration options [plugins-inputs-elasticsearch-options]
+
+This plugin supports these configuration options plus the [Common options](#plugins-inputs-elasticsearch-common-options) described later.
+
+::::{note}
+As of version `5.0.0` of this plugin, a number of previously deprecated settings related to SSL have been removed. Please check out [Elasticsearch Input Obsolete Configuration Options](#plugins-inputs-elasticsearch-obsolete-options) for details.
+::::
+
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`api_key`](#plugins-inputs-elasticsearch-api_key) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ca_trusted_fingerprint`](#plugins-inputs-elasticsearch-ca_trusted_fingerprint) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`cloud_auth`](#plugins-inputs-elasticsearch-cloud_auth) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`cloud_id`](#plugins-inputs-elasticsearch-cloud_id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`connect_timeout_seconds`](#plugins-inputs-elasticsearch-connect_timeout_seconds) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`custom_headers`](#plugins-inputs-elasticsearch-custom_headers) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`docinfo`](#plugins-inputs-elasticsearch-docinfo) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`docinfo_fields`](#plugins-inputs-elasticsearch-docinfo_fields) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`docinfo_target`](#plugins-inputs-elasticsearch-docinfo_target) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ecs_compatibility`](#plugins-inputs-elasticsearch-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`hosts`](#plugins-inputs-elasticsearch-hosts) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`index`](#plugins-inputs-elasticsearch-index) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`password`](#plugins-inputs-elasticsearch-password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`proxy`](#plugins-inputs-elasticsearch-proxy) | [uri](/reference/configuration-file-structure.md#uri) | No |
+| [`query`](#plugins-inputs-elasticsearch-query) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`response_type`](#plugins-inputs-elasticsearch-response_type) | [string](/reference/configuration-file-structure.md#string), one of `["hits","aggregations"]` | No |
+| [`request_timeout_seconds`](#plugins-inputs-elasticsearch-request_timeout_seconds) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`schedule`](#plugins-inputs-elasticsearch-schedule) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`scroll`](#plugins-inputs-elasticsearch-scroll) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`search_api`](#plugins-inputs-elasticsearch-search_api) | [string](/reference/configuration-file-structure.md#string), one of `["auto", "search_after", "scroll"]` | No |
+| [`size`](#plugins-inputs-elasticsearch-size) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`slices`](#plugins-inputs-elasticsearch-slices) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`ssl_certificate`](#plugins-inputs-elasticsearch-ssl_certificate) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_certificate_authorities`](#plugins-inputs-elasticsearch-ssl_certificate_authorities) | list of [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_cipher_suites`](#plugins-inputs-elasticsearch-ssl_cipher_suites) | list of [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_enabled`](#plugins-inputs-elasticsearch-ssl_enabled) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`ssl_key`](#plugins-inputs-elasticsearch-ssl_key) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_keystore_password`](#plugins-inputs-elasticsearch-ssl_keystore_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_keystore_path`](#plugins-inputs-elasticsearch-ssl_keystore_path) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_keystore_type`](#plugins-inputs-elasticsearch-ssl_keystore_type) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_supported_protocols`](#plugins-inputs-elasticsearch-ssl_supported_protocols) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_truststore_password`](#plugins-inputs-elasticsearch-ssl_truststore_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_truststore_path`](#plugins-inputs-elasticsearch-ssl_truststore_path) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_truststore_type`](#plugins-inputs-elasticsearch-ssl_truststore_type) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_verification_mode`](#plugins-inputs-elasticsearch-ssl_verification_mode) | [string](/reference/configuration-file-structure.md#string), one of `["full", "none"]` | No |
+| [`socket_timeout_seconds`](#plugins-inputs-elasticsearch-socket_timeout_seconds) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`target`](#plugins-inputs-elasticsearch-target) | [field reference](https://www.elastic.co/guide/en/logstash/current/field-references-deepdive.html) | No |
+| [`retries`](#plugins-inputs-elasticsearch-retries) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`user`](#plugins-inputs-elasticsearch-user) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-inputs-elasticsearch-common-options) for a list of options supported by all input plugins.
+
+
+
+### `api_key` [plugins-inputs-elasticsearch-api_key]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Authenticate using Elasticsearch API key. Note that this option also requires enabling the [`ssl_enabled`](#plugins-inputs-elasticsearch-ssl_enabled) option.
+
+Format is `id:api_key` where `id` and `api_key` are as returned by the Elasticsearch [Create API key API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key).
+
+
+### `ca_trusted_fingerprint` [plugins-inputs-elasticsearch-ca_trusted_fingerprint]
+
+* Value type is [string](/reference/configuration-file-structure.md#string), and must contain exactly 64 hexadecimal characters.
+* There is no default value for this setting.
+* Use of this option *requires* Logstash 8.3+
+
+The SHA-256 fingerprint of an SSL Certificate Authority to trust, such as the autogenerated self-signed CA for an Elasticsearch cluster.
+
+
+### `cloud_auth` [plugins-inputs-elasticsearch-cloud_auth]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Cloud authentication string (":" format) is an alternative for the `user`/`password` pair.
+
+For more info, check out the [Logstash-to-Cloud documentation](/reference/connecting-to-cloud.md).
+
+
+### `cloud_id` [plugins-inputs-elasticsearch-cloud_id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Cloud ID, from the Elastic Cloud web console. If set `hosts` should not be used.
+
+For more info, check out the [Logstash-to-Cloud documentation](/reference/connecting-to-cloud.md).
+
+
+### `connect_timeout_seconds` [plugins-inputs-elasticsearch-connect_timeout_seconds]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `10`
+
+The maximum amount of time, in seconds, to wait while establishing a connection to Elasticsearch. Connect timeouts tend to occur when Elasticsearch or an intermediate proxy is overloaded with requests and has exhausted its connection pool.
+
+
+### `custom_headers` [plugins-inputs-elasticsearch-custom_headers]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is empty
+
+Pass a set of key value pairs as the headers sent in each request to an elasticsearch node. The headers will be used for any kind of request. These custom headers will override any headers previously set by the plugin such as the User Agent or Authorization headers.
+
+
+### `docinfo` [plugins-inputs-elasticsearch-docinfo]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+If set, include Elasticsearch document information such as index, type, and the id in the event.
+
+It might be important to note, with regards to metadata, that if you’re ingesting documents with the intent to re-index them (or just update them) that the `action` option in the elasticsearch output wants to know how to handle those things. It can be dynamically assigned with a field added to the metadata.
+
+Example
+
+```ruby
+ input {
+ elasticsearch {
+ hosts => "es.production.mysite.org"
+ index => "mydata-2018.09.*"
+ query => '{ "query": { "query_string": { "query": "*" } } }'
+ size => 500
+ scroll => "5m"
+ docinfo => true
+ docinfo_target => "[@metadata][doc]"
+ }
+ }
+ output {
+ elasticsearch {
+ index => "copy-of-production.%{[@metadata][doc][_index]}"
+ document_type => "%{[@metadata][doc][_type]}"
+ document_id => "%{[@metadata][doc][_id]}"
+ }
+ }
+```
+
+If set, you can use metadata information in the [`add_field`](#plugins-inputs-elasticsearch-add_field) common option.
+
+Example
+
+```ruby
+ input {
+ elasticsearch {
+ docinfo => true
+ docinfo_target => "[@metadata][doc]"
+ add_field => {
+ identifier => "%{[@metadata][doc][_index]}:%{[@metadata][doc][_type]}:%{[@metadata][doc][_id]}"
+ }
+ }
+ }
+```
+
+
+### `docinfo_fields` [plugins-inputs-elasticsearch-docinfo_fields]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `["_index", "_type", "_id"]`
+
+If document metadata storage is requested by enabling the `docinfo` option, this option lists the metadata fields to save in the current event. See [Meta-Fields](elasticsearch://reference/elasticsearch/mapping-reference/document-metadata-fields.md) in the Elasticsearch documentation for more information.
+
+
+### `docinfo_target` [plugins-inputs-elasticsearch-docinfo_target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value depends on whether [`ecs_compatibility`](#plugins-inputs-elasticsearch-ecs_compatibility) is enabled:
+
+ * ECS Compatibility disabled: `"@metadata"`
+ * ECS Compatibility enabled: `"[@metadata][input][elasticsearch]"`
+
+
+If document metadata storage is requested by enabling the `docinfo` option, this option names the field under which to store the metadata fields as subfields.
+
+
+### `ecs_compatibility` [plugins-inputs-elasticsearch-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: CSV data added at root level
+ * `v1`,`v8`: Elastic Common Schema compliant behavior
+
+* Default value depends on which version of Logstash is running:
+
+ * When Logstash provides a `pipeline.ecs_compatibility` setting, its value is used as the default
+ * Otherwise, the default value is `disabled`
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)).
+
+
+### `hosts` [plugins-inputs-elasticsearch-hosts]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+List of one or more Elasticsearch hosts to use for querying. Each host can be either IP, HOST, IP:port, or HOST:port. The port defaults to 9200.
+
+
+### `index` [plugins-inputs-elasticsearch-index]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"logstash-*"`
+
+The index or alias to search. Check out [Multi Indices documentation](elasticsearch://reference/elasticsearch/rest-apis/api-conventions.md#api-multi-index) in the Elasticsearch documentation for info on referencing multiple indices.
+
+
+### `password` [plugins-inputs-elasticsearch-password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+The password to use together with the username in the `user` option when authenticating to the Elasticsearch server. If set to an empty string authentication will be disabled.
+
+
+### `proxy` [plugins-inputs-elasticsearch-proxy]
+
+* Value type is [uri](/reference/configuration-file-structure.md#uri)
+* There is no default value for this setting.
+
+Set the address of a forward HTTP proxy. An empty string is treated as if proxy was not set, this is useful when using environment variables e.g. `proxy => '${LS_PROXY:}'`.
+
+
+### `query` [plugins-inputs-elasticsearch-query]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `'{ "sort": [ "_doc" ] }'`
+
+The query to be executed. Read the [Elasticsearch query DSL documentation](elasticsearch://reference/query-languages/querydsl.md) for more information.
+
+When [`search_api`](#plugins-inputs-elasticsearch-search_api) resolves to `search_after` and the query does not specify `sort`, the default sort `'{ "sort": { "_shard_doc": "asc" } }'` will be added to the query. Please refer to the [Elasticsearch search_after](elasticsearch://reference/elasticsearch/rest-apis/paginate-search-results.md#search-after) parameter to know more.
+
+
+### `response_type` [plugins-inputs-elasticsearch-response_type]
+
+* Value can be any of: `hits`, `aggregations`
+* Default value is `hits`
+
+Which part of the result to transform into Logstash events when processing the response from the query. The default `hits` will generate one event per returned document (i.e. "hit"). When set to `aggregations`, a single Logstash event will be generated with the contents of the `aggregations` object of the query’s response. In this case the `hits` object will be ignored. The parameter `size` will be always be set to 0 regardless of the default or user-defined value set in this plugin.
+
+
+### `request_timeout_seconds` [plugins-inputs-elasticsearch-request_timeout_seconds]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `60`
+
+The maximum amount of time, in seconds, for a single request to Elasticsearch. Request timeouts tend to occur when an individual page of data is very large, such as when it contains large-payload documents and/or the [`size`](#plugins-inputs-elasticsearch-size) has been specified as a large value.
+
+
+### `retries` [plugins-inputs-elasticsearch-retries]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `0`
+
+The number of times to re-run the query after the first failure. If the query fails after all retries, it logs an error message. The default is 0 (no retry). This value should be equal to or greater than zero.
+
+::::{note}
+Partial failures - such as errors in a subset of all slices - can result in the entire query being retried, which can lead to duplication of data. Avoiding this would require Logstash to store the entire result set of a query in memory which is often not possible.
+::::
+
+
+
+### `schedule` [plugins-inputs-elasticsearch-schedule]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Schedule of when to periodically run statement, in Cron format for example: "* * * * *" (execute query every minute, on the minute)
+
+There is no schedule by default. If no schedule is given, then the statement is run exactly once.
+
+
+### `scroll` [plugins-inputs-elasticsearch-scroll]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"1m"`
+
+This parameter controls the keepalive time in seconds of the scrolling request and initiates the scrolling process. The timeout applies per round trip (i.e. between the previous scroll request, to the next).
+
+
+### `search_api` [plugins-inputs-elasticsearch-search_api]
+
+* Value can be any of: `auto`, `search_after`, `scroll`
+* Default value is `auto`
+
+With `auto` the plugin uses the `search_after` parameter for Elasticsearch version `8.0.0` or higher, otherwise the `scroll` API is used instead.
+
+`search_after` uses [point in time](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-open-point-in-time) and sort value to search. The query requires at least one `sort` field, as described in the [`query`](#plugins-inputs-elasticsearch-query) parameter.
+
+`scroll` uses [scroll](elasticsearch://reference/elasticsearch/rest-apis/paginate-search-results.md#scroll-search-results) API to search, which is no longer recommended.
+
+
+### `size` [plugins-inputs-elasticsearch-size]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1000`
+
+This allows you to set the maximum number of hits returned per scroll.
+
+
+### `slices` [plugins-inputs-elasticsearch-slices]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value.
+* Sensible values range from 2 to about 8.
+
+In some cases, it is possible to improve overall throughput by consuming multiple distinct slices of a query simultaneously using [sliced scrolls](elasticsearch://reference/elasticsearch/rest-apis/paginate-search-results.md#slice-scroll), especially if the pipeline is spending significant time waiting on Elasticsearch to provide results.
+
+If set, the `slices` parameter tells the plugin how many slices to divide the work into, and will produce events from the slices in parallel until all of them are done scrolling.
+
+::::{note}
+The Elasticsearch manual indicates that there can be *negative* performance implications to both the query and the Elasticsearch cluster when a scrolling query uses more slices than shards in the index.
+::::
+
+
+If the `slices` parameter is left unset, the plugin will *not* inject slice instructions into the query.
+
+
+### `ssl_certificate` [plugins-inputs-elasticsearch-ssl_certificate]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+SSL certificate to use to authenticate the client. This certificate should be an OpenSSL-style X.509 certificate file.
+
+::::{note}
+This setting can be used only if [`ssl_key`](#plugins-inputs-elasticsearch-ssl_key) is set.
+::::
+
+
+
+### `ssl_certificate_authorities` [plugins-inputs-elasticsearch-ssl_certificate_authorities]
+
+* Value type is a list of [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting
+
+The `.cer` or `.pem` files to validate the server’s certificate.
+
+::::{note}
+You cannot use this setting and [`ssl_truststore_path`](#plugins-inputs-elasticsearch-ssl_truststore_path) at the same time.
+::::
+
+
+
+### `ssl_cipher_suites` [plugins-inputs-elasticsearch-ssl_cipher_suites]
+
+* Value type is a list of [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting
+
+The list of cipher suites to use, listed by priorities. Supported cipher suites vary depending on the Java and protocol versions.
+
+
+### `ssl_enabled` [plugins-inputs-elasticsearch-ssl_enabled]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* There is no default value for this setting.
+
+Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme is specified in the URLs listed in [`hosts`](#plugins-inputs-elasticsearch-hosts) or extracted from the [`cloud_id`](#plugins-inputs-elasticsearch-cloud_id). If no explicit protocol is specified plain HTTP will be used.
+
+When not explicitly set, SSL will be automatically enabled if any of the specified hosts use HTTPS.
+
+
+### `ssl_key` [plugins-inputs-elasticsearch-ssl_key]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+OpenSSL-style RSA private key that corresponds to the [`ssl_certificate`](#plugins-inputs-elasticsearch-ssl_certificate).
+
+::::{note}
+This setting can be used only if [`ssl_certificate`](#plugins-inputs-elasticsearch-ssl_certificate) is set.
+::::
+
+
+
+### `ssl_keystore_password` [plugins-inputs-elasticsearch-ssl_keystore_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Set the keystore password
+
+
+### `ssl_keystore_path` [plugins-inputs-elasticsearch-ssl_keystore_path]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+The keystore used to present a certificate to the server. It can be either `.jks` or `.p12`
+
+::::{note}
+You cannot use this setting and [`ssl_certificate`](#plugins-inputs-elasticsearch-ssl_certificate) at the same time.
+::::
+
+
+
+### `ssl_keystore_type` [plugins-inputs-elasticsearch-ssl_keystore_type]
+
+* Value can be any of: `jks`, `pkcs12`
+* If not provided, the value will be inferred from the keystore filename.
+
+The format of the keystore file. It must be either `jks` or `pkcs12`.
+
+
+### `ssl_supported_protocols` [plugins-inputs-elasticsearch-ssl_supported_protocols]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Allowed values are: `'TLSv1.1'`, `'TLSv1.2'`, `'TLSv1.3'`
+* Default depends on the JDK being used. With up-to-date Logstash, the default is `['TLSv1.2', 'TLSv1.3']`. `'TLSv1.1'` is not considered secure and is only provided for legacy applications.
+
+List of allowed SSL/TLS versions to use when establishing a connection to the Elasticsearch cluster.
+
+For Java 8 `'TLSv1.3'` is supported only since **8u262** (AdoptOpenJDK), but requires that you set the `LS_JAVA_OPTS="-Djdk.tls.client.protocols=TLSv1.3"` system property in Logstash.
+
+::::{note}
+If you configure the plugin to use `'TLSv1.1'` on any recent JVM, such as the one packaged with Logstash, the protocol is disabled by default and needs to be enabled manually by changing `jdk.tls.disabledAlgorithms` in the **$JDK_HOME/conf/security/java.security** configuration file. That is, `TLSv1.1` needs to be removed from the list.
+::::
+
+
+
+### `ssl_truststore_password` [plugins-inputs-elasticsearch-ssl_truststore_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Set the truststore password.
+
+
+### `ssl_truststore_path` [plugins-inputs-elasticsearch-ssl_truststore_path]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+The truststore to validate the server’s certificate. It can be either .jks or .p12.
+
+::::{note}
+You cannot use this setting and [`ssl_certificate_authorities`](#plugins-inputs-elasticsearch-ssl_certificate_authorities) at the same time.
+::::
+
+
+
+### `ssl_truststore_type` [plugins-inputs-elasticsearch-ssl_truststore_type]
+
+* Value can be any of: `jks`, `pkcs12`
+* If not provided, the value will be inferred from the truststore filename.
+
+The format of the truststore file. It must be either `jks` or `pkcs12`.
+
+
+### `ssl_verification_mode` [plugins-inputs-elasticsearch-ssl_verification_mode]
+
+* Value can be any of: `full`, `none`
+* Default value is `full`
+
+Defines how to verify the certificates presented by another party in the TLS connection:
+
+`full` validates that the server certificate has an issue date that’s within the not_before and not_after dates; chains to a trusted Certificate Authority (CA), and has a hostname or IP address that matches the names within the certificate.
+
+`none` performs no certificate validation.
+
+::::{warning}
+Setting certificate verification to `none` disables many security benefits of SSL/TLS, which is very dangerous. For more information on disabling certificate verification please read [https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf](https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf)
+::::
+
+
+
+### `socket_timeout_seconds` [plugins-inputs-elasticsearch-socket_timeout_seconds]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `60`
+
+The maximum amount of time, in seconds, to wait on an incomplete response from Elasticsearch while no additional data has been appended. Socket timeouts usually occur while waiting for the first byte of a response, such as when executing a particularly complex query.
+
+
+### `target` [plugins-inputs-elasticsearch-target]
+
+* Value type is [field reference](https://www.elastic.co/guide/en/logstash/current/field-references-deepdive.html)
+* There is no default value for this setting.
+
+Without a `target`, events are created from each hit’s `_source` at the root level. When the `target` is set to a field reference, the `_source` of the hit is placed in the target field instead.
+
+This option can be useful to avoid populating unknown fields when a downstream schema such as ECS is enforced. It is also possible to target an entry in the event’s metadata, which will be available during event processing but not exported to your outputs (e.g., `target \=> "[@metadata][_source]"`).
+
+
+### `user` [plugins-inputs-elasticsearch-user]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The username to use together with the password in the `password` option when authenticating to the Elasticsearch server. If set to an empty string authentication will be disabled.
+
+
+
+## Elasticsearch Input Obsolete Configuration Options [plugins-inputs-elasticsearch-obsolete-options]
+
+::::{warning}
+As of version `5.0.0` of this plugin, some configuration options have been replaced. The plugin will fail to start if it contains any of these obsolete options.
+::::
+
+
+| Setting | Replaced by |
+| --- | --- |
+| ca_file | [`ssl_certificate_authorities`](#plugins-inputs-elasticsearch-ssl_certificate_authorities) |
+| ssl | [`ssl_enabled`](#plugins-inputs-elasticsearch-ssl_enabled) |
+| ssl_certificate_verification | [`ssl_verification_mode`](#plugins-inputs-elasticsearch-ssl_verification_mode) |
+
+
+## Common options [plugins-inputs-elasticsearch-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-elasticsearch-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-elasticsearch-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-elasticsearch-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-elasticsearch-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-elasticsearch-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-elasticsearch-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-elasticsearch-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-elasticsearch-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"json"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-elasticsearch-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-elasticsearch-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 elasticsearch inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ elasticsearch {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-elasticsearch-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-elasticsearch-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
diff --git a/docs/reference/plugins-inputs-exec.md b/docs/reference/plugins-inputs-exec.md
new file mode 100644
index 000000000..e43ebc5e4
--- /dev/null
+++ b/docs/reference/plugins-inputs-exec.md
@@ -0,0 +1,252 @@
+---
+navigation_title: "exec"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-exec.html
+---
+
+# Exec input plugin [plugins-inputs-exec]
+
+
+* Plugin version: v3.6.0
+* Released on: 2022-06-15
+* [Changelog](https://github.com/logstash-plugins/logstash-input-exec/blob/v3.6.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-exec-index.md).
+
+## Getting help [_getting_help_16]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-exec). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_17]
+
+Periodically run a shell command and capture the whole output as an event.
+
+::::{note}
+* The `command` field of this event will be the command run.
+* The `message` field of this event will be the entire stdout of the command.
+
+::::
+
+
+::::{important}
+The exec input ultimately uses `fork` to spawn a child process. Using fork duplicates the parent process address space (in our case, **logstash and the JVM**); this is mitigated with OS copy-on-write but ultimately you can end up allocating lots of memory just for a "simple" executable. If the exec input fails with errors like `ENOMEM: Cannot allocate memory` it is an indication that there is not enough non-JVM-heap physical memory to perform the fork.
+::::
+
+
+Example:
+
+```ruby
+input {
+ exec {
+ command => "echo 'hi!'"
+ interval => 30
+ }
+}
+```
+
+This will execute `echo` command every 30 seconds.
+
+
+## Compatibility with the Elastic Common Schema (ECS) [plugins-inputs-exec-ecs]
+
+This plugin adds metadata about the event’s source, and can be configured to do so in an [ECS-compatible](ecs://reference/index.md) way with [`ecs_compatibility`](#plugins-inputs-exec-ecs_compatibility). This metadata is added after the event has been decoded by the appropriate codec, and will not overwrite existing values.
+
+| ECS Disabled | ECS v1 , v8 | Description |
+| --- | --- | --- |
+| `host` | `[host][name]` | The name of the {{ls}} host that processed the event |
+| `command` | `[process][command_line]` | The command run by the plugin |
+| `[@metadata][exit_status]` | `[process][exit_code]` | The exit code of the process |
+| — | `[@metadata][input][exec][process][elapsed_time]` | The elapsed time the command took to run in nanoseconds |
+| `[@metadata][duration]` | — | Command duration in seconds as a floating point number (deprecated) |
+
+
+## Exec Input configuration options [plugins-inputs-exec-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-exec-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`command`](#plugins-inputs-exec-command) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`ecs_compatibility`](#plugins-inputs-exec-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`interval`](#plugins-inputs-exec-interval) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`schedule`](#plugins-inputs-exec-schedule) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-inputs-exec-common-options) for a list of options supported by all input plugins.
+
+
+
+### `command` [plugins-inputs-exec-command]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Command to run. For example, `uptime`
+
+
+### `ecs_compatibility` [plugins-inputs-exec-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: uses backwards compatible field names, such as `[host]`
+ * `v1`, `v8`: uses fields that are compatible with ECS, such as `[host][name]`
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)). See [Compatibility with the Elastic Common Schema (ECS)](#plugins-inputs-exec-ecs) for detailed information.
+
+**Sample output: ECS enabled**
+
+```ruby
+{
+ "message" => "hi!\n",
+ "process" => {
+ "command_line" => "echo 'hi!'",
+ "exit_code" => 0
+ },
+ "host" => {
+ "name" => "deus-ex-machina"
+ },
+
+ "@metadata" => {
+ "input" => {
+ "exec" => {
+ "process" => {
+ "elapsed_time"=>3042
+ }
+ }
+ }
+ }
+}
+```
+
+**Sample output: ECS disabled**
+
+```ruby
+{
+ "message" => "hi!\n",
+ "command" => "echo 'hi!'",
+ "host" => "deus-ex-machina",
+
+ "@metadata" => {
+ "exit_status" => 0,
+ "duration" => 0.004388
+ }
+}
+```
+
+
+### `interval` [plugins-inputs-exec-interval]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+Interval to run the command. Value is in seconds.
+
+Either `interval` or `schedule` option must be defined.
+
+
+### `schedule` [plugins-inputs-exec-schedule]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Schedule of when to periodically run command.
+
+This scheduling syntax is powered by [rufus-scheduler](https://github.com/jmettraux/rufus-scheduler). The syntax is cron-like with some extensions specific to Rufus (e.g. timezone support).
+
+Examples:
+
+| | |
+| --- | --- |
+| `* 5 * 1-3 *` | will execute every minute of 5am every day of January through March. |
+| `0 * * * *` | will execute on the 0th minute of every hour every day. |
+| `0 6 * * * America/Chicago` | will execute at 6:00am (UTC/GMT -5) every day. |
+
+Further documentation describing this syntax can be found [here](https://github.com/jmettraux/rufus-scheduler#parsing-cronlines-and-time-strings).
+
+Either `interval` or `schedule` option must be defined.
+
+
+
+## Common options [plugins-inputs-exec-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-exec-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-exec-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-exec-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-exec-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-exec-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-exec-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-exec-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-exec-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-exec-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-exec-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 exec inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ exec {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-exec-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-exec-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
diff --git a/docs/reference/plugins-inputs-file.md b/docs/reference/plugins-inputs-file.md
new file mode 100644
index 000000000..7ae0abfdc
--- /dev/null
+++ b/docs/reference/plugins-inputs-file.md
@@ -0,0 +1,489 @@
+---
+navigation_title: "file"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-file.html
+---
+
+# File input plugin [plugins-inputs-file]
+
+
+* Plugin version: v4.4.6
+* Released on: 2023-12-13
+* [Changelog](https://github.com/logstash-plugins/logstash-input-file/blob/v4.4.6/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-file-index.md).
+
+## Getting help [_getting_help_17]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-file). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_18]
+
+Stream events from files, normally by tailing them in a manner similar to `tail -0F` but optionally reading them from the beginning.
+
+Normally, logging will add a newline to the end of each line written. By default, each event is assumed to be one line and a line is taken to be the text before a newline character. If you would like to join multiple log lines into one event, you’ll want to use the multiline codec. The plugin loops between discovering new files and processing each discovered file. Discovered files have a lifecycle, they start off in the "watched" or "ignored" state. Other states in the lifecycle are: "active", "closed" and "unwatched"
+
+By default, a window of 4095 files is used to limit the number of file handles in use. The processing phase has a number of stages:
+
+* Checks whether "closed" or "ignored" files have changed in size since last time and if so puts them in the "watched" state.
+* Selects enough "watched" files to fill the available space in the window, these files are made "active".
+* The active files are opened and read, each file is read from the last known position to the end of current content (EOF) by default.
+
+In some cases it is useful to be able to control which files are read first, sorting, and whether files are read completely or banded/striped. Complete reading is **all of** file A then file B then file C and so on. Banded or striped reading is **some of** file A then file B then file C and so on looping around to file A again until all files are read. Banded reading is specified by changing [`file_chunk_count`](#plugins-inputs-file-file_chunk_count) and perhaps [`file_chunk_size`](#plugins-inputs-file-file_chunk_size). Banding and sorting may be useful if you want some events from all files to appear in Kibana as early as possible.
+
+The plugin has two modes of operation, Tail mode and Read mode.
+
+### Tail mode [_tail_mode]
+
+In this mode the plugin aims to track changing files and emit new content as it’s appended to each file. In this mode, files are seen as a never ending stream of content and EOF has no special significance. The plugin always assumes that there will be more content. When files are rotated, the smaller or zero size is detected, the current position is reset to zero and streaming continues. A delimiter must be seen before the accumulated characters can be emitted as a line.
+
+
+### Read mode [_read_mode]
+
+In this mode the plugin treats each file as if it is content complete, that is, a finite stream of lines and now EOF is significant. A last delimiter is not needed because EOF means that the accumulated characters can be emitted as a line. Further, EOF here means that the file can be closed and put in the "unwatched" state - this automatically frees up space in the active window. This mode also makes it possible to process compressed files as they are content complete. Read mode also allows for an action to take place after processing the file completely.
+
+In the past attempts to simulate a Read mode while still assuming infinite streams was not ideal and a dedicated Read mode is an improvement.
+
+
+
+## Compatibility with the Elastic Common Schema (ECS) [plugins-inputs-file-ecs]
+
+This plugin adds metadata about event’s source, and can be configured to do so in an [ECS-compatible](ecs://reference/index.md) way with [`ecs_compatibility`](#plugins-inputs-file-ecs_compatibility). This metadata is added after the event has been decoded by the appropriate codec, and will never overwrite existing values.
+
+| ECS Disabled | ECS `v1`, `v8` | Description |
+| --- | --- | --- |
+| `host` | `[host][name]` | The name of the {{ls}} host that processed the event |
+| `path` | `[log][file][path]` | The full path to the log file from which the event originates |
+
+
+## Tracking of current position in watched files [_tracking_of_current_position_in_watched_files]
+
+The plugin keeps track of the current position in each file by recording it in a separate file named sincedb. This makes it possible to stop and restart Logstash and have it pick up where it left off without missing the lines that were added to the file while Logstash was stopped.
+
+By default, the sincedb file is placed in the data directory of Logstash with a filename based on the filename patterns being watched (i.e. the `path` option). Thus, changing the filename patterns will result in a new sincedb file being used and any existing current position state will be lost. If you change your patterns with any frequency it might make sense to explicitly choose a sincedb path with the `sincedb_path` option.
+
+A different `sincedb_path` must be used for each input. Using the same path will cause issues. The read checkpoints for each input must be stored in a different path so the information does not override.
+
+Files are tracked via an identifier. This identifier is made up of the inode, major device number and minor device number. In windows, a different identifier is taken from a `kernel32` API call.
+
+Sincedb records can now be expired meaning that read positions of older files will not be remembered after a certain time period. File systems may need to reuse inodes for new content. Ideally, we would not use the read position of old content, but we have no reliable way to detect that inode reuse has occurred. This is more relevant to Read mode where a great many files are tracked in the sincedb. Bear in mind though, if a record has expired, a previously seen file will be read again.
+
+Sincedb files are text files with four (< v5.0.0), five or six columns:
+
+1. The inode number (or equivalent).
+2. The major device number of the file system (or equivalent).
+3. The minor device number of the file system (or equivalent).
+4. The current byte offset within the file.
+5. The last active timestamp (a floating point number)
+6. The last known path that this record was matched to (for old sincedb records converted to the new format, this is blank.
+
+On non-Windows systems you can obtain the inode number of a file with e.g. `ls -li`.
+
+
+## Reading from remote network volumes [_reading_from_remote_network_volumes]
+
+The file input is not thoroughly tested on remote filesystems such as NFS, Samba, s3fs-fuse, etc, however NFS is occasionally tested. The file size as given by the remote FS client is used to govern how much data to read at any given time to prevent reading into allocated but yet unfilled memory. As we use the device major and minor in the identifier to track "last read" positions of files and on remount the device major and minor can change, the sincedb records may not match across remounts. Read mode might not be suitable for remote filesystems as the file size at discovery on the client side may not be the same as the file size on the remote side due to latency in the remote to client copy process.
+
+
+## File rotation in Tail mode [_file_rotation_in_tail_mode]
+
+File rotation is detected and handled by this input, regardless of whether the file is rotated via a rename or a copy operation. To support programs that write to the rotated file for some time after the rotation has taken place, include both the original filename and the rotated filename (e.g. /var/log/syslog and /var/log/syslog.1) in the filename patterns to watch (the `path` option). For a rename, the inode will be detected as having moved from `/var/log/syslog` to `/var/log/syslog.1` and so the "state" is moved internally too, the old content will not be reread but any new content on the renamed file will be read. For copy/truncate the copied content into a new file path, if discovered, will be treated as a new discovery and be read from the beginning. The copied file paths should therefore not be in the filename patterns to watch (the `path` option). The truncation will be detected and the "last read" position updated to zero.
+
+
+## File Input Configuration Options [plugins-inputs-file-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-file-common-options) described later.
+
+::::{note}
+Duration settings can be specified in text form e.g. "250 ms", this string will be converted into decimal seconds. There are quite a few supported natural and abbreviated durations, see [string_duration](#plugins-inputs-file-string_duration) for the details.
+::::
+
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`check_archive_validity`](#plugins-inputs-file-check_archive_validity) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`close_older`](#plugins-inputs-file-close_older) | [number](/reference/configuration-file-structure.md#number) or [string_duration](#plugins-inputs-file-string_duration) | No |
+| [`delimiter`](#plugins-inputs-file-delimiter) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`discover_interval`](#plugins-inputs-file-discover_interval) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`ecs_compatibility`](#plugins-inputs-file-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`exclude`](#plugins-inputs-file-exclude) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`exit_after_read`](#plugins-inputs-file-exit_after_read) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`file_chunk_count`](#plugins-inputs-file-file_chunk_count) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`file_chunk_size`](#plugins-inputs-file-file_chunk_size) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`file_completed_action`](#plugins-inputs-file-file_completed_action) | [string](/reference/configuration-file-structure.md#string), one of `["delete", "log", "log_and_delete"]` | No |
+| [`file_completed_log_path`](#plugins-inputs-file-file_completed_log_path) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`file_sort_by`](#plugins-inputs-file-file_sort_by) | [string](/reference/configuration-file-structure.md#string), one of `["last_modified", "path"]` | No |
+| [`file_sort_direction`](#plugins-inputs-file-file_sort_direction) | [string](/reference/configuration-file-structure.md#string), one of `["asc", "desc"]` | No |
+| [`ignore_older`](#plugins-inputs-file-ignore_older) | [number](/reference/configuration-file-structure.md#number) or [string_duration](#plugins-inputs-file-string_duration) | No |
+| [`max_open_files`](#plugins-inputs-file-max_open_files) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`mode`](#plugins-inputs-file-mode) | [string](/reference/configuration-file-structure.md#string), one of `["tail", "read"]` | No |
+| [`path`](#plugins-inputs-file-path) | [array](/reference/configuration-file-structure.md#array) | Yes |
+| [`sincedb_clean_after`](#plugins-inputs-file-sincedb_clean_after) | [number](/reference/configuration-file-structure.md#number) or [string_duration](#plugins-inputs-file-string_duration) | No |
+| [`sincedb_path`](#plugins-inputs-file-sincedb_path) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`sincedb_write_interval`](#plugins-inputs-file-sincedb_write_interval) | [number](/reference/configuration-file-structure.md#number) or [string_duration](#plugins-inputs-file-string_duration) | No |
+| [`start_position`](#plugins-inputs-file-start_position) | [string](/reference/configuration-file-structure.md#string), one of `["beginning", "end"]` | No |
+| [`stat_interval`](#plugins-inputs-file-stat_interval) | [number](/reference/configuration-file-structure.md#number) or [string_duration](#plugins-inputs-file-string_duration) | No |
+
+Also see [Common options](#plugins-inputs-file-common-options) for a list of options supported by all input plugins.
+
+
+
+### `check_archive_validity` [plugins-inputs-file-check_archive_validity]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* The default is `false`.
+
+When set to `true`, this setting verifies that a compressed file is valid before processing it. There are two passes through the file—one pass to verify that the file is valid, and another pass to process the file.
+
+Validating a compressed file requires more processing time, but can prevent a corrupt archive from causing looping.
+
+
+### `close_older` [plugins-inputs-file-close_older]
+
+* Value type is [number](/reference/configuration-file-structure.md#number) or [string_duration](#plugins-inputs-file-string_duration)
+* Default value is `"1 hour"`
+
+The file input closes any files that were last read the specified duration (seconds if a number is specified) ago. This has different implications depending on if a file is being tailed or read. If tailing, and there is a large time gap in incoming data the file can be closed (allowing other files to be opened) but will be queued for reopening when new data is detected. If reading, the file will be closed after closed_older seconds from when the last bytes were read. This setting is retained for backward compatibility if you upgrade the plugin to 4.1.0+, are reading not tailing and do not switch to using Read mode.
+
+
+### `delimiter` [plugins-inputs-file-delimiter]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"\n"`
+
+set the new line delimiter, defaults to "\n". Note that when reading compressed files this setting is not used, instead the standard Windows or Unix line endings are used.
+
+
+### `discover_interval` [plugins-inputs-file-discover_interval]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `15`
+
+How often we expand the filename patterns in the `path` option to discover new files to watch. This value is a multiple to `stat_interval`, e.g. if `stat_interval` is "500 ms" then new files files could be discovered every 15 X 500 milliseconds - 7.5 seconds. In practice, this will be the best case because the time taken to read new content needs to be factored in.
+
+
+### `ecs_compatibility` [plugins-inputs-file-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: sets non-ECS metadata on event (such as top-level `host`, `path`)
+ * `v1`,`v8`: sets ECS-compatible metadata on event (such as `[host][name]`, `[log][file][path]`)
+
+* Default value depends on which version of Logstash is running:
+
+ * When Logstash provides a `pipeline.ecs_compatibility` setting, its value is used as the default
+ * Otherwise, the default value is `disabled`.
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)).
+
+
+### `exclude` [plugins-inputs-file-exclude]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Exclusions (matched against the filename, not full path). Filename patterns are valid here, too. For example, if you have
+
+```ruby
+ path => "/var/log/*"
+```
+
+In Tail mode, you might want to exclude gzipped files:
+
+```ruby
+ exclude => "*.gz"
+```
+
+
+### `exit_after_read` [plugins-inputs-file-exit_after_read]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+This option can be used in `read` mode to enforce closing all watchers when file gets read. Can be used in situation when content of the file is static and won’t change during execution. When set to `true` it also disables active discovery of the files - only files that were in the directories when process was started will be read. It supports `sincedb` entries. When file was processed once, then modified - next run will only read newly added entries.
+
+
+### `file_chunk_count` [plugins-inputs-file-file_chunk_count]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `4611686018427387903`
+
+When combined with the `file_chunk_size`, this option sets how many chunks (bands or stripes) are read from each file before moving to the next active file. For example, a `file_chunk_count` of 32 and a `file_chunk_size` 32KB will process the next 1MB from each active file. As the default is very large, the file is effectively read to EOF before moving to the next active file.
+
+
+### `file_chunk_size` [plugins-inputs-file-file_chunk_size]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `32768` (32KB)
+
+File content is read off disk in blocks or chunks and lines are extracted from the chunk. See [`file_chunk_count`](#plugins-inputs-file-file_chunk_count) to see why and when to change this setting from the default.
+
+
+### `file_completed_action` [plugins-inputs-file-file_completed_action]
+
+* Value can be any of: `delete`, `log`, `log_and_delete`
+* The default is `delete`.
+
+When in `read` mode, what action should be carried out when a file is done with. If *delete* is specified then the file will be deleted. If *log* is specified then the full path of the file is logged to the file specified in the `file_completed_log_path` setting. If `log_and_delete` is specified then both above actions take place.
+
+
+### `file_completed_log_path` [plugins-inputs-file-file_completed_log_path]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Which file should the completely read file paths be appended to. Only specify this path to a file when `file_completed_action` is *log* or *log_and_delete*. IMPORTANT: this file is appended to only - it could become very large. You are responsible for file rotation.
+
+
+### `file_sort_by` [plugins-inputs-file-file_sort_by]
+
+* Value can be any of: `last_modified`, `path`
+* The default is `last_modified`.
+
+Which attribute of a "watched" file should be used to sort them by. Files can be sorted by modified date or full path alphabetic. Previously the processing order of the discovered and therefore "watched" files was OS dependent.
+
+
+### `file_sort_direction` [plugins-inputs-file-file_sort_direction]
+
+* Value can be any of: `asc`, `desc`
+* The default is `asc`.
+
+Select between ascending and descending order when sorting "watched" files. If oldest data first is important then the defaults of `last_modified` + `asc` are good. If newest data first is more important then opt for `last_modified` + `desc`. If you use special naming conventions for the file full paths then perhaps `path` + `asc` will help to control the order of file processing.
+
+
+### `ignore_older` [plugins-inputs-file-ignore_older]
+
+* Value type is [number](/reference/configuration-file-structure.md#number) or [string_duration](#plugins-inputs-file-string_duration)
+* There is no default value for this setting.
+
+When the file input discovers a file that was last modified before the specified duration (seconds if a number is specified), the file is ignored. After it’s discovery, if an ignored file is modified it is no longer ignored and any new data is read. By default, this option is disabled. Note this unit is in seconds.
+
+
+### `max_open_files` [plugins-inputs-file-max_open_files]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+What is the maximum number of file_handles that this input consumes at any one time. Use close_older to close some files if you need to process more files than this number. This should not be set to the maximum the OS can do because file handles are needed for other LS plugins and OS processes. A default of 4095 is set in internally.
+
+
+### `mode` [plugins-inputs-file-mode]
+
+* Value can be either `tail` or `read`.
+* The default value is `tail`.
+
+What mode do you want the file input to operate in. Tail a few files or read many content-complete files. Read mode now supports gzip file processing.
+
+If `read` is specified, these settings can be used:
+
+* `ignore_older` (older files are not processed)
+* `file_completed_action` (what action should be taken when the file is processed)
+* `file_completed_log_path` (which file should the completed file path be logged to)
+
+If `read` is specified, these settings are ignored:
+
+* `start_position` (files are always read from the beginning)
+* `close_older` (files are automatically *closed* when EOF is reached)
+
+
+### `path` [plugins-inputs-file-path]
+
+* This is a required setting.
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+The path(s) to the file(s) to use as an input. You can use filename patterns here, such as `/var/log/*.log`. If you use a pattern like `/var/log/**/*.log`, a recursive search of `/var/log` will be done for all `*.log` files. Paths must be absolute and cannot be relative.
+
+You may also configure multiple paths. See an example on the [Logstash configuration page](/reference/configuration-file-structure.md#array).
+
+
+### `sincedb_clean_after` [plugins-inputs-file-sincedb_clean_after]
+
+* Value type is [number](/reference/configuration-file-structure.md#number) or [string_duration](#plugins-inputs-file-string_duration)
+* The default value for this setting is "2 weeks".
+* If a number is specified then it is interpreted as **days** and can be decimal e.g. 0.5 is 12 hours.
+
+The sincedb record now has a last active timestamp associated with it. If no changes are detected in a tracked file in the last N days its sincedb tracking record expires and will not be persisted. This option helps protect against the inode recycling problem. Filebeat has an [FAQ about inode recycling](beats://reference/filebeat/inode-reuse-issue.md).
+
+
+### `sincedb_path` [plugins-inputs-file-sincedb_path]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Path of the sincedb database file (keeps track of the current position of monitored log files) that will be written to disk. The default will write sincedb files to `/plugins/inputs/file` NOTE: it must be a file path and not a directory path
+
+
+### `sincedb_write_interval` [plugins-inputs-file-sincedb_write_interval]
+
+* Value type is [number](/reference/configuration-file-structure.md#number) or [string_duration](#plugins-inputs-file-string_duration)
+* Default value is `"15 seconds"`
+
+How often (in seconds) to write a since database with the current position of monitored log files.
+
+
+### `start_position` [plugins-inputs-file-start_position]
+
+* Value can be any of: `beginning`, `end`
+* Default value is `"end"`
+
+Choose where Logstash starts initially reading files: at the beginning or at the end. The default behavior treats files like live streams and thus starts at the end. If you have old data you want to import, set this to *beginning*.
+
+This option only modifies "first contact" situations where a file is new and not seen before, i.e. files that don’t have a current position recorded in a sincedb file read by Logstash. If a file has already been seen before, this option has no effect and the position recorded in the sincedb file will be used.
+
+
+### `stat_interval` [plugins-inputs-file-stat_interval]
+
+* Value type is [number](/reference/configuration-file-structure.md#number) or [string_duration](#plugins-inputs-file-string_duration)
+* Default value is `"1 second"`
+
+How often (in seconds) we stat files to see if they have been modified. Increasing this interval will decrease the number of system calls we make, but increase the time to detect new log lines.
+
+::::{note}
+Discovering new files and checking whether they have grown/or shrunk occurs in a loop. This loop will sleep for `stat_interval` seconds before looping again. However, if files have grown, the new content is read and lines are enqueued. Reading and enqueuing across all grown files can take time, especially if the pipeline is congested. So the overall loop time is a combination of the `stat_interval` and the file read time.
+::::
+
+
+
+
+## Common options [plugins-inputs-file-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-file-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-file-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-file-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-file-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-file-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-file-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-file-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-file-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-file-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-file-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 file inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ file {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-file-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-file-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
+## String Durations [plugins-inputs-file-string_duration]
+
+Format is `number` `string` and the space between these is optional. So "45s" and "45 s" are both valid.
+
+::::{tip}
+Use the most suitable duration, for example, "3 days" rather than "72 hours".
+::::
+
+
+### Weeks [_weeks]
+
+Supported values: `w` `week` `weeks`, e.g. "2 w", "1 week", "4 weeks".
+
+
+### Days [_days]
+
+Supported values: `d` `day` `days`, e.g. "2 d", "1 day", "2.5 days".
+
+
+### Hours [_hours]
+
+Supported values: `h` `hour` `hours`, e.g. "4 h", "1 hour", "0.5 hours".
+
+
+### Minutes [_minutes]
+
+Supported values: `m` `min` `minute` `minutes`, e.g. "45 m", "35 min", "1 minute", "6 minutes".
+
+
+### Seconds [_seconds]
+
+Supported values: `s` `sec` `second` `seconds`, e.g. "45 s", "15 sec", "1 second", "2.5 seconds".
+
+
+### Milliseconds [_milliseconds]
+
+Supported values: `ms` `msec` `msecs`, e.g. "500 ms", "750 msec", "50 msecs
+
+::::{note}
+`milli` `millis` and `milliseconds` are not supported
+::::
+
+
+
+### Microseconds [_microseconds]
+
+Supported values: `us` `usec` `usecs`, e.g. "600 us", "800 usec", "900 usecs"
+
+::::{note}
+`micro` `micros` and `microseconds` are not supported
+::::
+
+
+
+
diff --git a/docs/reference/plugins-inputs-ganglia.md b/docs/reference/plugins-inputs-ganglia.md
new file mode 100644
index 000000000..aa7d86342
--- /dev/null
+++ b/docs/reference/plugins-inputs-ganglia.md
@@ -0,0 +1,138 @@
+---
+navigation_title: "ganglia"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-ganglia.html
+---
+
+# Ganglia input plugin [plugins-inputs-ganglia]
+
+
+* Plugin version: v3.1.4
+* Released on: 2018-04-06
+* [Changelog](https://github.com/logstash-plugins/logstash-input-ganglia/blob/v3.1.4/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-ganglia-index.md).
+
+## Getting help [_getting_help_18]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-ganglia). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_19]
+
+Read ganglia packets from the network via udp
+
+
+## Ganglia Input Configuration Options [plugins-inputs-ganglia-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-ganglia-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`host`](#plugins-inputs-ganglia-host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`port`](#plugins-inputs-ganglia-port) | [number](/reference/configuration-file-structure.md#number) | No |
+
+Also see [Common options](#plugins-inputs-ganglia-common-options) for a list of options supported by all input plugins.
+
+
+
+### `host` [plugins-inputs-ganglia-host]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"0.0.0.0"`
+
+The address to listen on
+
+
+### `port` [plugins-inputs-ganglia-port]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `8649`
+
+The port to listen on. Remember that ports less than 1024 (privileged ports) may require root to use.
+
+
+
+## Common options [plugins-inputs-ganglia-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-ganglia-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-ganglia-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-ganglia-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-ganglia-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-ganglia-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-ganglia-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-ganglia-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-ganglia-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-ganglia-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-ganglia-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 ganglia inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ ganglia {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-ganglia-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-ganglia-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-gelf.md b/docs/reference/plugins-inputs-gelf.md
new file mode 100644
index 000000000..d0733941b
--- /dev/null
+++ b/docs/reference/plugins-inputs-gelf.md
@@ -0,0 +1,201 @@
+---
+navigation_title: "gelf"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-gelf.html
+---
+
+# Gelf input plugin [plugins-inputs-gelf]
+
+
+* Plugin version: v3.3.2
+* Released on: 2022-08-22
+* [Changelog](https://github.com/logstash-plugins/logstash-input-gelf/blob/v3.3.2/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-gelf-index.md).
+
+## Getting help [_getting_help_19]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-gelf). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_20]
+
+This input will read GELF messages as events over the network, making it a good choice if you already use Graylog2 today.
+
+The main use case for this input is to leverage existing GELF logging libraries such as the GELF log4j appender.
+
+
+## Gelf Input Configuration Options [plugins-inputs-gelf-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-gelf-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`host`](#plugins-inputs-gelf-host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`use_udp`](#plugins-inputs-gelf-use_udp) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`use_tcp`](#plugins-inputs-gelf-use_tcp) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`port`](#plugins-inputs-gelf-port) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`port_tcp`](#plugins-inputs-gelf-port_tcp) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`port_udp`](#plugins-inputs-gelf-port_udp) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`remap`](#plugins-inputs-gelf-remap) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`strip_leading_underscore`](#plugins-inputs-gelf-strip_leading_underscore) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+
+Also see [Common options](#plugins-inputs-gelf-common-options) for a list of options supported by all input plugins.
+
+
+
+### `host` [plugins-inputs-gelf-host]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"0.0.0.0"`
+
+The IP address or hostname to listen on.
+
+
+### `use_udp` [plugins-inputs-gelf-use_udp]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Whether to listen for gelf messages sent over udp
+
+
+### `use_tcp` [plugins-inputs-gelf-use_tcp]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Whether to listen for gelf messages sent over tcp
+
+
+### `port` [plugins-inputs-gelf-port]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `12201`
+
+The port to listen on. Remember that ports less than 1024 (privileged ports) may require root to use. port_tcp and port_udp can be used to set a specific port for each protocol.
+
+
+### `port_tcp` [plugins-inputs-gelf-port_tcp]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+Tcp port to listen on. Use port instead of this setting unless you need a different port for udp than tcp
+
+
+### `port_udp` [plugins-inputs-gelf-port_udp]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+Udp port to listen on. Use port instead of this setting unless you need a different port for udp than tcp
+
+
+### `remap` [plugins-inputs-gelf-remap]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Whether or not to remap the GELF message fields to Logstash event fields or leave them intact.
+
+Remapping converts the following GELF fields to Logstash equivalents:
+
+* `full\_message` becomes `event.get("message")`.
+* if there is no `full\_message`, `short\_message` becomes `event.get("message")`.
+
+
+### `strip_leading_underscore` [plugins-inputs-gelf-strip_leading_underscore]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Whether or not to remove the leading `\_` in GELF fields or leave them in place. (Logstash < 1.2 did not remove them by default.). Note that GELF version 1.1 format now requires all non-standard fields to be added as an "additional" field, beginning with an underscore.
+
+e.g. `\_foo` becomes `foo`
+
+
+
+## Common options [plugins-inputs-gelf-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-gelf-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-gelf-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-gelf-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-gelf-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-gelf-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-gelf-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-gelf-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-gelf-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-gelf-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-gelf-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 gelf inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ gelf {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-gelf-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-gelf-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-generator.md b/docs/reference/plugins-inputs-generator.md
new file mode 100644
index 000000000..a3544ac99
--- /dev/null
+++ b/docs/reference/plugins-inputs-generator.md
@@ -0,0 +1,226 @@
+---
+navigation_title: "generator"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-generator.html
+---
+
+# Generator input plugin [plugins-inputs-generator]
+
+
+* Plugin version: v3.1.0
+* Released on: 2021-11-04
+* [Changelog](https://github.com/logstash-plugins/logstash-input-generator/blob/v3.1.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-generator-index.md).
+
+## Getting help [_getting_help_20]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-generator). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_21]
+
+Generate random log events.
+
+The general intention of this is to test performance of plugins.
+
+An event is generated first
+
+
+## Compatibility with the Elastic Common Schema (ECS) [plugins-inputs-generator-ecs]
+
+This plugin uses different field names depending on whether [ECS-compatibility](ecs://reference/index.md) in enabled (see also [`ecs_compatibility`](#plugins-inputs-generator-ecs_compatibility)).
+
+| ECS Disabled | ECS v1, v8 | Description |
+| --- | --- | --- |
+| `host` | `[host][name]` | The name of the {{ls}} host that processed the event |
+| `sequence` | `[event][sequence]` | The sequence number for the generated event |
+
+
+## Generator Input Configuration Options [plugins-inputs-generator-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-generator-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`count`](#plugins-inputs-generator-count) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`ecs_compatibility`](#plugins-inputs-generator-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`lines`](#plugins-inputs-generator-lines) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`message`](#plugins-inputs-generator-message) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`threads`](#plugins-inputs-generator-threads) | [number](/reference/configuration-file-structure.md#number) | No |
+
+Also see [Common options](#plugins-inputs-generator-common-options) for a list of options supported by all input plugins.
+
+
+
+### `count` [plugins-inputs-generator-count]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `0`
+
+Set how many messages should be generated.
+
+The default, `0`, means generate an unlimited number of events.
+
+
+### `ecs_compatibility` [plugins-inputs-generator-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: uses backwards compatible field names, such as `[host]`
+ * `v1`, `v8`: uses fields that are compatible with ECS, such as `[host][name]`
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)). See [Compatibility with the Elastic Common Schema (ECS)](#plugins-inputs-generator-ecs) for detailed information.
+
+**Sample output: ECS enabled**
+
+```ruby
+{
+ "message" => "Hello world!",
+ "event" => {
+ "sequence" => 0
+ },
+ "host" => {
+ "name" => "the-machine"
+ }
+}
+```
+
+**Sample output: ECS disabled**
+
+```ruby
+{
+ "message" => "Hello world!",
+ "sequence" => 0,
+ "host" => "the-machine"
+}
+```
+
+
+### `lines` [plugins-inputs-generator-lines]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+The lines to emit, in order. This option cannot be used with the *message* setting.
+
+Example:
+
+```ruby
+ input {
+ generator {
+ lines => [
+ "line 1",
+ "line 2",
+ "line 3"
+ ]
+ # Emit all lines 3 times.
+ count => 3
+ }
+ }
+```
+
+The above will emit `line 1` then `line 2` then `line 3`, then `line 1`, etc…
+
+
+### `message` [plugins-inputs-generator-message]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"Hello world!"`
+
+The message string to use in the event.
+
+If you set this to `stdin` then this plugin will read a single line from stdin and use that as the message string for every event.
+
+Otherwise, this value will be used verbatim as the event message.
+
+
+### `threads` [plugins-inputs-generator-threads]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1`
+
+
+
+## Common options [plugins-inputs-generator-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-generator-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-generator-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-generator-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-generator-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-generator-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-generator-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-generator-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-generator-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-generator-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-generator-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 generator inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ generator {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-generator-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-generator-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
diff --git a/docs/reference/plugins-inputs-github.md b/docs/reference/plugins-inputs-github.md
new file mode 100644
index 000000000..70706dca3
--- /dev/null
+++ b/docs/reference/plugins-inputs-github.md
@@ -0,0 +1,162 @@
+---
+navigation_title: "github"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-github.html
+---
+
+# Github input plugin [plugins-inputs-github]
+
+
+* Plugin version: v3.0.11
+* Released on: 2023-05-30
+* [Changelog](https://github.com/logstash-plugins/logstash-input-github/blob/v3.0.11/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-github-index.md).
+
+## Installation [_installation]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-input-github`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_21]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-github). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_22]
+
+Read events from github webhooks
+
+
+## Github Input Configuration Options [plugins-inputs-github-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-github-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`drop_invalid`](#plugins-inputs-github-drop_invalid) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`ip`](#plugins-inputs-github-ip) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`port`](#plugins-inputs-github-port) | [number](/reference/configuration-file-structure.md#number) | Yes |
+| [`secret_token`](#plugins-inputs-github-secret_token) | [password](/reference/configuration-file-structure.md#password) | No |
+
+Also see [Common options](#plugins-inputs-github-common-options) for a list of options supported by all input plugins.
+
+
+
+### `drop_invalid` [plugins-inputs-github-drop_invalid]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+If Secret is defined, we drop the events that don’t match. Otherwise, we’ll just add an invalid tag
+
+
+### `ip` [plugins-inputs-github-ip]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"0.0.0.0"`
+
+The ip to listen on
+
+
+### `port` [plugins-inputs-github-port]
+
+* This is a required setting.
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+The port to listen on
+
+
+### `secret_token` [plugins-inputs-github-secret_token]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Your GitHub Secret Token for the webhook
+
+
+
+## Common options [plugins-inputs-github-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-github-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-github-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-github-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-github-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-github-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-github-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-github-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-github-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-github-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-github-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 github inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ github {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-github-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-github-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-google_cloud_storage.md b/docs/reference/plugins-inputs-google_cloud_storage.md
new file mode 100644
index 000000000..47da01821
--- /dev/null
+++ b/docs/reference/plugins-inputs-google_cloud_storage.md
@@ -0,0 +1,359 @@
+---
+navigation_title: "google_cloud_storage"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-google_cloud_storage.html
+---
+
+# Google Cloud Storage Input Plugin [plugins-inputs-google_cloud_storage]
+
+
+* Plugin version: v0.15.0
+* Released on: 2023-08-22
+* [Changelog](https://github.com/logstash-plugins/logstash-input-google_cloud_storage/blob/v0.15.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-google_cloud_storage-index.md).
+
+## Installation [_installation_2]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-input-google_cloud_storage`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_22]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-google_cloud_storage). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [plugins-inputs-google_cloud_storage-description]
+
+Extracts events from files in a Google Cloud Storage bucket.
+
+Example use-cases:
+
+* Read [Stackdriver logs](https://cloud.google.com/stackdriver/) from a Cloud Storage bucket into Elastic.
+* Read gzipped logs from cold-storage into Elastic.
+* Restore data from an Elastic dump.
+* Extract data from Cloud Storage, transform it with Logstash and load it into BigQuery.
+
+Note: While this project is partially maintained by Google, this is not an official Google product.
+
+::::{admonition} Installation Note
+:class: note
+
+Attempting to install this plugin may result in an error:
+
+```bash
+Bundler::VersionConflict: Bundler could not find compatible versions for gem "mimemagic":
+ In Gemfile:
+ logstash-input-google_cloud_storage (= 0.11.0) was resolved to 0.11.0, which depends on
+ mimemagic (>= 0.3.7)
+
+Could not find gem 'mimemagic (>= 0.3.7)', which is required by gem 'logstash-input-google_cloud_storage (= 0.11.0)', in any of the sources or in gems cached in vendor/cache
+```
+
+If this error occurs, you can fix it by manually installing the "mimemagic" dependency directly into the Logstash’s internal Ruby Gems cache, which is present at `vendor/bundle/jruby//gems/`. This could be done using the bundled Ruby gem’s instance inside the Logstash’s installation `bin/` folder.
+
+To manually install the "mimemagic" gem into Logstash use:
+
+```bash
+bin/ruby -S gem install mimemagic -v '>= 0.3.7'
+```
+
+The mimemagic gem also requires the `shared-mime-info` package to be present, it can be installed using `apt-get install shared-mime-info` on Debian/Ubuntu or `yum install shared-mime-info` on Red Hat/RockyOS distributions.
+
+Then install the plugin as usual with:
+
+```bash
+bin/logstash-plugin install logstash-input-google_cloud_storage
+```
+
+::::
+
+
+
+## Metadata Attributes [plugins-inputs-google_cloud_storage-metadata-attributes]
+
+The plugin exposes several metadata attributes about the object being read. You can access these later in the pipeline to augment the data or perform conditional logic.
+
+| Key | Type | Description |
+| --- | --- | --- |
+| `[@metadata][gcs][bucket]` | `string` | The name of the bucket the file was read from. |
+| `[@metadata][gcs][name]` | `string` | The name of the object. |
+| `[@metadata][gcs][metadata]` | `object` | A map of metadata on the object. |
+| `[@metadata][gcs][md5]` | `string` | MD5 hash of the data. Encoded using base64. |
+| `[@metadata][gcs][crc32c]` | `string` | CRC32c checksum, as described in RFC 4960. Encoded using base64 in big-endian byte order. |
+| `[@metadata][gcs][generation]` | `long` | The content generation of the object. Used for object versioning |
+| `[@metadata][gcs][line]` | `long` | The position of the event in the file. 1 indexed. |
+| `[@metadata][gcs][line_id]` | `string` | A deterministic, unique ID describing this line. This lets you do idempotent inserts into Elasticsearch. |
+
+More information about object metadata can be found in the [official documentation](https://cloud.google.com/storage/docs/json_api/v1/objects).
+
+
+## Example Configurations [plugins-inputs-google_cloud_storage-example-configurations]
+
+### Basic [_basic]
+
+Basic configuration to read JSON logs every minute from `my-logs-bucket`. For example, [Stackdriver logs](https://cloud.google.com/stackdriver/).
+
+```ruby
+input {
+ google_cloud_storage {
+ interval => 60
+ bucket_id => "my-logs-bucket"
+ json_key_file => "/home/user/key.json"
+ file_matches => ".*json"
+ codec => "json_lines"
+ }
+}
+output { stdout { codec => rubydebug } }
+```
+
+
+### Idempotent Inserts into Elasticsearch [_idempotent_inserts_into_elasticsearch]
+
+If your pipeline might insert the same file multiple times you can use the `line_id` metadata key as a deterministic id.
+
+The ID has the format: `gs:///:@`. `line_num` represents the nth event deserialized from the file starting at 1. `generation` is a unique id Cloud Storage generates for the object. When an object is overwritten it gets a new generation.
+
+```ruby
+input {
+ google_cloud_storage {
+ bucket_id => "batch-jobs-output"
+ }
+}
+
+output {
+ elasticsearch {
+ document_id => "%{[@metadata][gcs][line_id]}"
+ }
+}
+```
+
+
+### From Cloud Storage to BigQuery [_from_cloud_storage_to_bigquery]
+
+Extract data from Cloud Storage, transform it with Logstash and load it into BigQuery.
+
+```ruby
+input {
+ google_cloud_storage {
+ interval => 60
+ bucket_id => "batch-jobs-output"
+ file_matches => "purchases.*.csv"
+ json_key_file => "/home/user/key.json"
+ codec => "plain"
+ }
+}
+
+filter {
+ csv {
+ columns => ["transaction", "sku", "price"]
+ convert => {
+ "transaction" => "integer"
+ "price" => "float"
+ }
+ }
+}
+
+output {
+ google_bigquery {
+ project_id => "my-project"
+ dataset => "logs"
+ csv_schema => "transaction:INTEGER,sku:INTEGER,price:FLOAT"
+ json_key_file => "/path/to/key.json"
+ error_directory => "/tmp/bigquery-errors"
+ ignore_unknown_values => true
+ }
+}
+```
+
+
+
+## Additional Resources [plugins-inputs-google_cloud_storage-additional-resources]
+
+* [Cloud Storage Homepage](https://cloud.google.com/storage/)
+* [Cloud Storage Pricing](https://cloud.google.com/storage/pricing-summary/)
+* [IAM Service Accounts](https://cloud.google.com/iam/docs/service-accounts)
+* [Application Default Credentials](https://cloud.google.com/docs/authentication/production)
+
+
+## Google Cloud Storage Input Configuration Options [plugins-inputs-google_cloud_storage-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-google_cloud_storage-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`bucket_id`](#plugins-inputs-google_cloud_storage-bucket_id) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`json_key_file`](#plugins-inputs-google_cloud_storage-json_key_file) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`interval`](#plugins-inputs-google_cloud_storage-interval) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`file_matches`](#plugins-inputs-google_cloud_storage-file_matches) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`file_exclude`](#plugins-inputs-google_cloud_storage-file_exclude) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`metadata_key`](#plugins-inputs-google_cloud_storage-metadata_key) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`processed_db_path`](#plugins-inputs-google_cloud_storage-processed_db_path) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`delete`](#plugins-inputs-google_cloud_storage-delete) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`unpack_gzip`](#plugins-inputs-google_cloud_storage-unpack_gzip) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+
+Also see [Common options](#plugins-inputs-google_cloud_storage-common-options) for a list of options supported by all input plugins.
+
+### `bucket_id` [plugins-inputs-google_cloud_storage-bucket_id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The bucket containing your log files.
+
+
+### `json_key_file` [plugins-inputs-google_cloud_storage-json_key_file]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+The path to the key to authenticate your user to the bucket. This service user *should* have the `storage.objects.update` permission so it can create metadata on the object preventing it from being scanned multiple times.
+
+If no key is provided the plugin will try to use the [default application credentials](https://cloud.google.com/java/docs/reference/google-auth-library/latest/com.google.auth.oauth2.GoogleCredentials#com_google_auth_oauth2_GoogleCredentials_getApplicationDefault__), and if they don’t exist, it falls back to unauthenticated mode.
+
+
+### `interval` [plugins-inputs-google_cloud_storage-interval]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default is: `60`
+
+The number of seconds between looking for new files in your bucket.
+
+
+### `file_matches` [plugins-inputs-google_cloud_storage-file_matches]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default is: `.*\.log(\.gz)?`
+
+A regex pattern to filter files. Only files with names matching this will be considered. All files match by default.
+
+
+### `file_exclude` [plugins-inputs-google_cloud_storage-file_exclude]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default is: `^$`
+
+Any files matching this regex are excluded from processing. No files are excluded by default.
+
+
+### `metadata_key` [plugins-inputs-google_cloud_storage-metadata_key]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default is: `x-goog-meta-ls-gcs-input`
+
+This key will be set on the objects after they’ve been processed by the plugin. That way you can stop the plugin and not upload files again or prevent them from being uploaded by setting the field manually.
+
+::::{note}
+the key is a flag, if a file was partially processed before Logstash exited some events will be resent.
+::::
+
+
+
+### `processed_db_path` [plugins-inputs-google_cloud_storage-processed_db_path]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* Default is: `LOGSTASH_DATA/plugins/inputs/google_cloud_storage/db`.
+
+If set, the plugin will store the list of processed files locally. This allows you to create a service account for the plugin that does not have write permissions. However, the data will not be shared across multiple running instances of Logstash.
+
+
+### `delete` [plugins-inputs-google_cloud_storage-delete]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default is: `false`
+
+Should the log file be deleted after its contents have been updated?
+
+
+### `unpack_gzip` [plugins-inputs-google_cloud_storage-unpack_gzip]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default is: `true`
+
+If set to `true`, files ending in `.gz` are decompressed before they’re parsed by the codec. The file will be skipped if it has the suffix, but can’t be opened as a gzip, e.g. if it has a bad magic number.
+
+
+
+## Common options [plugins-inputs-google_cloud_storage-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-google_cloud_storage-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-google_cloud_storage-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-google_cloud_storage-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-google_cloud_storage-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-google_cloud_storage-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-google_cloud_storage-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-google_cloud_storage-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-google_cloud_storage-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-google_cloud_storage-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-google_cloud_storage-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 google_cloud_storage inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ google_cloud_storage {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-google_cloud_storage-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-google_cloud_storage-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-google_pubsub.md b/docs/reference/plugins-inputs-google_pubsub.md
new file mode 100644
index 000000000..6f183ad19
--- /dev/null
+++ b/docs/reference/plugins-inputs-google_pubsub.md
@@ -0,0 +1,308 @@
+---
+navigation_title: "google_pubsub"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-google_pubsub.html
+---
+
+# Google_pubsub input plugin [plugins-inputs-google_pubsub]
+
+
+* Plugin version: v1.4.0
+* Released on: 2024-10-15
+* [Changelog](https://github.com/logstash-plugins/logstash-input-google_pubsub/blob/v1.4.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-google_pubsub-index.md).
+
+## Installation [_installation_3]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-input-google_pubsub`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_23]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-google_pubsub). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_23]
+
+Author: Eric Johnson `` Date: 2016-06-01
+
+Copyright 2016 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
+
+```
+http://www.apache.org/licenses/LICENSE-2.0
+```
+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Google deps This is a [Logstash](https://github.com/elastic/logstash) input plugin for [Google Pub/Sub](https://cloud.google.com/pubsub/). The plugin can subscribe to a topic and ingest messages.
+
+The main motivation behind the development of this plugin was to ingest [Stackdriver Logging](https://cloud.google.com/logging/) messages via the [Exported Logs](https://cloud.google.com/logging/docs/export/using_exported_logs) feature of Stackdriver Logging.
+
+
+## Prerequisites [_prerequisites_2]
+
+You must first create a Google Cloud Platform project and enable the Google Pub/Sub API. If you intend to use the plugin ingest Stackdriver Logging messages, you must also enable the Stackdriver Logging API and configure log exporting to Pub/Sub. There is plentiful information on [https://cloud.google.com/](https://cloud.google.com/) to get started:
+
+* Google Cloud Platform Projects and [Overview](https://cloud.google.com/docs/overview/)
+* Google Cloud Pub/Sub [documentation](https://cloud.google.com/pubsub/)
+* Stackdriver Logging [documentation](https://cloud.google.com/logging/)
+
+
+## Cloud Pub/Sub [_cloud_pubsub]
+
+Currently, this module requires you to create a `topic` manually and specify it in the logstash config file. You must also specify a `subscription`, but the plugin will attempt to create the pull-based `subscription` on its own.
+
+All messages received from Pub/Sub will be converted to a logstash `event` and added to the processing pipeline queue. All Pub/Sub messages will be `acknowledged` and removed from the Pub/Sub `topic` (please see more about [Pub/Sub concepts](https://cloud.google.com/pubsub/overview#concepts)).
+
+It is generally assumed that incoming messages will be in JSON and added to the logstash `event` as-is. However, if a plain text message is received, the plugin will return the raw text in as `raw_message` in the logstash `event`.
+
+
+## Authentication [_authentication]
+
+You have two options for authentication depending on where you run Logstash.
+
+1. If you are running Logstash outside of Google Cloud Platform, then you will need to provide the path to the JSON private key file in your config in `json_key_file` setting. If you don’t want to upload the file and use its content as a string, then add the content of JSON private key in `json_key_file_content` setting. You must assign sufficient roles to the Service Account to create a subscription and to pull messages from the subscription. Learn more about GCP Service Accounts and IAM roles here:
+
+ * Google Cloud Platform IAM [overview](https://cloud.google.com/iam/)
+ * Creating Service Accounts [overview](https://cloud.google.com/iam/docs/creating-managing-service-accounts)
+ * Granting Roles [overview](https://cloud.google.com/iam/docs/granting-roles-to-service-accounts)
+
+2. If you are running Logstash on a Google Compute Engine instance, you may opt to use Application Default Credentials. In this case, you will not need to specify a JSON private key file in your config.
+
+
+## Stackdriver Logging (optional) [_stackdriver_logging_optional]
+
+If you intend to use the logstash plugin for Stackdriver Logging message ingestion, you must first manually set up the Export option to Cloud Pub/Sub and the manually create the `topic`. Please see the more detailed instructions at, [https://cloud.google.com/logging/docs/export/using_exported_logs](https://cloud.google.com/logging/docs/export/using_exported_logs) [Exported Logs] and ensure that the [necessary permissions](https://cloud.google.com/logging/docs/export/configure_export#manual-access-pubsub) have also been manually configured.
+
+Logging messages from Stackdriver Logging exported to Pub/Sub are received as JSON and converted to a logstash `event` as-is in [this format](https://cloud.google.com/logging/docs/export/using_exported_logs#log_entries_in_google_pubsub_topics).
+
+
+## Sample Configuration [_sample_configuration]
+
+Below is a copy of the included `example.conf-tmpl` file that shows a basic configuration for this plugin.
+
+```ruby
+input {
+ google_pubsub {
+ # Your GCP project id (name)
+ project_id => "my-project-1234"
+
+ # The topic name below is currently hard-coded in the plugin. You
+ # must first create this topic by hand and ensure you are exporting
+ # logging to this pubsub topic.
+ topic => "logstash-input-dev"
+
+ # The subscription name is customizeable. The plugin will attempt to
+ # create the subscription (but use the hard-coded topic name above).
+ subscription => "logstash-sub"
+
+ # If you are running logstash within GCE, it will use
+ # Application Default Credentials and use GCE's metadata
+ # service to fetch tokens. However, if you are running logstash
+ # outside of GCE, you will need to specify the service account's
+ # JSON key file below.
+ #json_key_file => "/home/erjohnso/pkey.json"
+
+ # Should the plugin attempt to create the subscription on startup?
+ # This is not recommended for security reasons but may be useful in
+ # some cases.
+ #create_subscription => false
+ }
+}
+output { stdout { codec => rubydebug } }
+```
+
+
+## Metadata and Attributes [_metadata_and_attributes]
+
+The original Pub/Sub message is preserved in the special Logstash `[@metadata][pubsub_message]` field so you can fetch:
+
+* Message attributes
+* The origiginal base64 data
+* Pub/Sub message ID for de-duplication
+* Publish time
+
+You MUST extract any fields you want in a filter prior to the data being sent to an output because Logstash deletes `@metadata` fields otherwise.
+
+See the PubsubMessage [documentation](https://cloud.google.com/pubsub/docs/reference/rest/v1/PubsubMessage) for a full description of the fields.
+
+Example to get the message ID:
+
+```ruby
+input {google_pubsub {...}}
+
+filter {
+ mutate {
+ add_field => { "messageId" => "%{[@metadata][pubsub_message][messageId]}" }
+ }
+}
+
+output {...}
+```
+
+
+## Google_pubsub Input Configuration Options [plugins-inputs-google_pubsub-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-google_pubsub-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`json_key_file`](#plugins-inputs-google_pubsub-json_key_file) | a valid filesystem path | No |
+| [`max_messages`](#plugins-inputs-google_pubsub-max_messages) | [number](/reference/configuration-file-structure.md#number) | Yes |
+| [`project_id`](#plugins-inputs-google_pubsub-project_id) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`subscription`](#plugins-inputs-google_pubsub-subscription) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`topic`](#plugins-inputs-google_pubsub-topic) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`include_metadata`](#plugins-inputs-google_pubsub-include_metadata) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`create_subscription`](#plugins-inputs-google_pubsub-create_subscription) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+
+Also see [Common options](#plugins-inputs-google_pubsub-common-options) for a list of options supported by all input plugins.
+
+
+
+### `json_key_file` [plugins-inputs-google_pubsub-json_key_file]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+If logstash is running within Google Compute Engine, the plugin will use GCE’s Application Default Credentials. Outside of GCE, you will need to specify a Service Account JSON key file.
+
+
+### `max_messages` [plugins-inputs-google_pubsub-max_messages]
+
+* This is a required setting.
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `5`
+
+The maximum number of messages returned per request. The Pub/Sub system may return fewer than the number specified.
+
+
+### `project_id` [plugins-inputs-google_pubsub-project_id]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Google Cloud Project ID (name, not number).
+
+
+### `subscription` [plugins-inputs-google_pubsub-subscription]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+
+### `topic` [plugins-inputs-google_pubsub-topic]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Google Cloud Pub/Sub Topic and Subscription. Note that the topic must be created manually with Cloud Logging pre-configured export to PubSub configured to use the defined topic. The subscription will be created automatically by the plugin.
+
+
+### `include_metadata` [plugins-inputs-google_pubsub-include_metadata]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`.
+
+If set true, will include the full message data in the `[@metadata][pubsub_message]` field.
+
+
+### `create_subscription` [plugins-inputs-google_pubsub-create_subscription]
+
+::::{note}
+Added in 1.2.0.
+::::
+
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`.
+
+If true, the plugin will try to create the subscription before publishing. Note: this requires additional permissions to be granted to the client and is *not* recommended for most use-cases.
+
+
+
+## Common options [plugins-inputs-google_pubsub-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-google_pubsub-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-google_pubsub-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-google_pubsub-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-google_pubsub-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-google_pubsub-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-google_pubsub-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-google_pubsub-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-google_pubsub-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-google_pubsub-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-google_pubsub-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 google_pubsub inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ google_pubsub {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-google_pubsub-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-google_pubsub-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-graphite.md b/docs/reference/plugins-inputs-graphite.md
new file mode 100644
index 000000000..fd89fd357
--- /dev/null
+++ b/docs/reference/plugins-inputs-graphite.md
@@ -0,0 +1,240 @@
+---
+navigation_title: "graphite"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-graphite.html
+---
+
+# Graphite input plugin [plugins-inputs-graphite]
+
+
+* Plugin version: v3.0.6
+* Released on: 2018-04-06
+* [Changelog](https://github.com/logstash-plugins/logstash-input-graphite/blob/v3.0.6/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-graphite-index.md).
+
+## Getting help [_getting_help_24]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-graphite). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_24]
+
+Receive graphite metrics. This plugin understands the text-based graphite carbon protocol. Both `N` and `specific-timestamp` forms are supported, example:
+
+```ruby
+ mysql.slow_query.count 204 N
+ haproxy.live_backends 7 1364608909
+```
+
+`N` means `now` for a timestamp. This plugin also supports having the time specified in the metric payload:
+
+For every metric received from a client, a single event will be emitted with the metric name as the field (like `mysql.slow_query.count`) and the metric value as the field’s value.
+
+
+## Graphite Input Configuration Options [plugins-inputs-graphite-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-graphite-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`host`](#plugins-inputs-graphite-host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`mode`](#plugins-inputs-graphite-mode) | [string](/reference/configuration-file-structure.md#string), one of `["server", "client"]` | No |
+| [`port`](#plugins-inputs-graphite-port) | [number](/reference/configuration-file-structure.md#number) | Yes |
+| [`proxy_protocol`](#plugins-inputs-graphite-proxy_protocol) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`ssl_cert`](#plugins-inputs-graphite-ssl_cert) | a valid filesystem path | No |
+| [`ssl_enable`](#plugins-inputs-graphite-ssl_enable) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`ssl_extra_chain_certs`](#plugins-inputs-graphite-ssl_extra_chain_certs) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`ssl_key`](#plugins-inputs-graphite-ssl_key) | a valid filesystem path | No |
+| [`ssl_key_passphrase`](#plugins-inputs-graphite-ssl_key_passphrase) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_verify`](#plugins-inputs-graphite-ssl_verify) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+
+Also see [Common options](#plugins-inputs-graphite-common-options) for a list of options supported by all input plugins.
+
+
+
+### `data_timeout` (DEPRECATED) [plugins-inputs-graphite-data_timeout]
+
+* DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions.
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `-1`
+
+
+### `host` [plugins-inputs-graphite-host]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"0.0.0.0"`
+
+Read events over a TCP socket.
+
+Like stdin and file inputs, each event is assumed to be one line of text.
+
+Can either accept connections from clients or connect to a server, depending on `mode`. When mode is `server`, the address to listen on. When mode is `client`, the address to connect to.
+
+
+### `mode` [plugins-inputs-graphite-mode]
+
+* Value can be any of: `server`, `client`
+* Default value is `"server"`
+
+Mode to operate in. `server` listens for client connections, `client` connects to a server.
+
+
+### `port` [plugins-inputs-graphite-port]
+
+* This is a required setting.
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+When mode is `server`, the port to listen on. When mode is `client`, the port to connect to.
+
+
+### `proxy_protocol` [plugins-inputs-graphite-proxy_protocol]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Proxy protocol support, only v1 is supported at this time [http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt](http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt)
+
+
+### `ssl_cacert` (DEPRECATED) [plugins-inputs-graphite-ssl_cacert]
+
+* DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions.
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+The SSL CA certificate, chainfile or CA path. The system CA path is automatically included.
+
+
+### `ssl_cert` [plugins-inputs-graphite-ssl_cert]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+SSL certificate path
+
+
+### `ssl_enable` [plugins-inputs-graphite-ssl_enable]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Enable SSL (must be set for other `ssl_` options to take effect).
+
+
+### `ssl_extra_chain_certs` [plugins-inputs-graphite-ssl_extra_chain_certs]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+An Array of extra X509 certificates to be added to the certificate chain. Useful when the CA chain is not necessary in the system store.
+
+
+### `ssl_key` [plugins-inputs-graphite-ssl_key]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+SSL key path
+
+
+### `ssl_key_passphrase` [plugins-inputs-graphite-ssl_key_passphrase]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* Default value is `nil`
+
+SSL key passphrase
+
+
+### `ssl_verify` [plugins-inputs-graphite-ssl_verify]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Verify the identity of the other end of the SSL connection against the CA. For input, sets the field `sslsubject` to that of the client certificate.
+
+
+
+## Common options [plugins-inputs-graphite-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-graphite-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-graphite-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-graphite-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-graphite-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-graphite-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-graphite-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-graphite-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-graphite-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-graphite-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-graphite-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 graphite inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ graphite {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-graphite-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-graphite-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-heartbeat.md b/docs/reference/plugins-inputs-heartbeat.md
new file mode 100644
index 000000000..d4b75be29
--- /dev/null
+++ b/docs/reference/plugins-inputs-heartbeat.md
@@ -0,0 +1,221 @@
+---
+navigation_title: "heartbeat"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-heartbeat.html
+---
+
+# Heartbeat input plugin [plugins-inputs-heartbeat]
+
+
+* Plugin version: v3.1.1
+* Released on: 2021-08-04
+* [Changelog](https://github.com/logstash-plugins/logstash-input-heartbeat/blob/v3.1.1/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-heartbeat-index.md).
+
+## Getting help [_getting_help_25]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-heartbeat). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_25]
+
+Generate heartbeat messages.
+
+The general intention of this is to test the performance and availability of Logstash.
+
+
+## Elastic Common Schema (ECS) [plugins-inputs-heartbeat-ecs]
+
+This plugin could provide a field, originally named `clock`, to track `epoch` or `sequence` incremental numbers. When [ECS compatibility mode](#plugins-inputs-heartbeat-ecs_compatibility) is enabled that value is now present in the event’s `[event][sequence]` subfield.
+
+When [ECS compatibility mode](#plugins-inputs-heartbeat-ecs_compatibility) is enabled the use of `message` as selector of sequence type is not available and only [`sequence`](#plugins-inputs-heartbeat-sequence) is considered. In this case if `message` contains sequence selector strings it’s ignored.
+
+The existing `host` field is moved to `[host][name]` when ECS is enabled.
+
+| `disabled` | `v1`, `v8` | Availability | Description |
+| --- | --- | --- | --- |
+| [host] | [host][name] | *Always* | *Name or address of the host is running the plugin* |
+| [clock] | [event][sequence] | *When `sequence` setting enables it* | *Increment counter based on seconds or from local 0 based counter* |
+
+
+## Heartbeat Input Configuration Options [plugins-inputs-heartbeat-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-heartbeat-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`count`](#plugins-inputs-heartbeat-count) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`ecs_compatibility`](#plugins-inputs-heartbeat-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`interval`](#plugins-inputs-heartbeat-interval) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`message`](#plugins-inputs-heartbeat-message) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`sequence`](#plugins-inputs-heartbeat-sequence) | [string](/reference/configuration-file-structure.md#string) one of `["none", "epoch", "sequence"]` | No |
+| [`threads`](#plugins-inputs-heartbeat-threads) | [number](/reference/configuration-file-structure.md#number) | No |
+
+Also see [Common options](#plugins-inputs-heartbeat-common-options) for a list of options supported by all input plugins.
+
+
+
+### `count` [plugins-inputs-heartbeat-count]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `-1`
+
+How many times to iterate. This is typically used only for testing purposes.
+
+
+### `ecs_compatibility` [plugins-inputs-heartbeat-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: `clock` counter field added at root level
+ * `v1`,`v8`: ECS compliant `[event][sequence]` counter field added to the event
+
+* Default value depends on which version of Logstash is running:
+
+ * When Logstash provides a `pipeline.ecs_compatibility` setting, its value is used as the default
+ * Otherwise, the default value is `disabled`.
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)). Refer to [Elastic Common Schema (ECS)](#plugins-inputs-heartbeat-ecs) in this topic for detailed information.
+
+
+### `interval` [plugins-inputs-heartbeat-interval]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `60`
+
+Set how frequently messages should be sent.
+
+The default, `60`, means send a message every 60 seconds.
+
+
+### `message` [plugins-inputs-heartbeat-message]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"ok"`
+
+The message string to use in the event.
+
+If you set this value to `epoch`, then this plugin will use the current timestamp in unix timestamp (which is by definition, UTC). It will output this value into a field called `clock`
+
+If you set this value to `sequence`, then this plugin will send a sequence of numbers beginning at 0 and incrementing each interval. It will output this value into a field called `clock`
+
+Otherwise, this value will be used verbatim as the event message. It will output this value into a field called `message`
+
+::::{note}
+Usage of `epoch` and `sequence` in `message` setting is deprecated. Consider using [`sequence`](#plugins-inputs-heartbeat-sequence) settings, which takes precedence over the usage of `message` setting as selector.
+::::
+
+
+::::{note}
+If [ECS compatibility mode](#plugins-inputs-heartbeat-ecs_compatibility) is enabled and `message` contains `epoch` or `sequence`, it is ignored and is not present as a field in the generated event.
+::::
+
+
+
+### `sequence` [plugins-inputs-heartbeat-sequence]
+
+* Value can be any of: `none`, `epoch`, `sequence`
+* Default value is `"none""`
+
+If you set this value to `none`, then no sequence field is added.
+
+If you set this value to `epoch`, then this plugin will use the current timestamp in unix timestamp (which is by definition, UTC). It will output this value into a field called `clock` if [ECS compatibility mode](#plugins-inputs-heartbeat-ecs_compatibility) is disabled. Otherwise, the field name is `[event][sequence]`.
+
+If you set this value to `sequence`, then this plugin will send a sequence of numbers beginning at 0 and incrementing each interval. It will output this value into a field called `clock` if [ECS compatibility mode](#plugins-inputs-heartbeat-ecs_compatibility) is disabled. Otherwise, the field name is `[event][sequence]`
+
+If `sequence` is defined, it takes precedence over `message` configuration. If `message` doesn’t have `epoch` or `sequence` values, it will be present in the generated event together with `clock` field.
+
+
+### `threads` [plugins-inputs-heartbeat-threads]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1`
+
+
+
+## Common options [plugins-inputs-heartbeat-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-heartbeat-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-heartbeat-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-heartbeat-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-heartbeat-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-heartbeat-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-heartbeat-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-heartbeat-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-heartbeat-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-heartbeat-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-heartbeat-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 heartbeat inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ heartbeat {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-heartbeat-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-heartbeat-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-http.md b/docs/reference/plugins-inputs-http.md
new file mode 100644
index 000000000..b4a06b0f1
--- /dev/null
+++ b/docs/reference/plugins-inputs-http.md
@@ -0,0 +1,519 @@
+---
+navigation_title: "http"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-http.html
+---
+
+# Http input plugin [plugins-inputs-http]
+
+
+* Plugin version: v4.1.0
+* Released on: 2024-12-19
+* [Changelog](https://github.com/logstash-plugins/logstash-input-http/blob/v4.1.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-http-index.md).
+
+## Getting help [_getting_help_26]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-http). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_26]
+
+Using this input you can receive single or multiline events over http(s). Applications can send an HTTP request to the endpoint started by this input and Logstash will convert it into an event for subsequent processing. Users can pass plain text, JSON, or any formatted data and use a corresponding codec with this input. For Content-Type `application/json` the `json` codec is used, but for all other data formats, `plain` codec is used.
+
+This input can also be used to receive webhook requests to integrate with other services and applications. By taking advantage of the vast plugin ecosystem available in Logstash you can trigger actionable events right from your application.
+
+
+## Event Metadata and the Elastic Common Schema (ECS) [plugins-inputs-http-ecs_metadata]
+
+In addition to decoding the events, this input will add HTTP headers containing connection information to each event. When ECS compatibility is disabled, the headers are stored in the `headers` field, which has the potential to create confusion and schema conflicts downstream. When ECS is enabled, we can ensure a pipeline maintains access to this metadata throughout the event’s lifecycle without polluting the top-level namespace.
+
+Here’s how ECS compatibility mode affects output.
+
+| ECS `disabled` | ECS `v1`,`v8` | Availability | Description |
+| --- | --- | --- | --- |
+| [host] | [host][ip] | *Always* | *Host IP address* |
+| [headers] | [@metadata][input][http][request][headers] | *Always* | *Complete HTTP headers* |
+| [headers][http_version] | [http][version] | *Always* | *HTTP version* |
+| [headers][http_user_agent] | [user_agent][original] | *Always* | *client user agent* |
+| [headers][http_host] | [url][domain] and [url][port] | *Always* | *host domain and port* |
+| [headers][request_method] | [http][method] | *Always* | *HTTP method* |
+| [headers][request_path] | [url][path] | *Always* | *Query path* |
+| [headers][content_length] | [http][request][body][bytes] | *Always* | *Request content length* |
+| [headers][content_type] | [http][request][mime_type] | *Always* | *Request mime type* |
+
+
+## Blocking Behavior [_blocking_behavior]
+
+The HTTP protocol doesn’t deal well with long running requests. This plugin will either return a 429 (busy) error when Logstash is backlogged, or it will time out the request.
+
+If a 429 error is encountered clients should sleep, backing off exponentially with some random jitter, then retry their request.
+
+This plugin will block if the Logstash queue is blocked and there are available HTTP input threads. This will cause most HTTP clients to time out. Sent events will still be processed in this case. This behavior is not optimal and will be changed in a future release. In the future, this plugin will always return a 429 if the queue is busy, and will not time out in the event of a busy queue.
+
+
+## Security [_security]
+
+This plugin supports standard HTTP basic authentication headers to identify the requester. You can pass in a username, password combination while sending data to this input
+
+You can also setup SSL and send data securely over https, with multiple options such as validating the client’s certificate.
+
+
+## Codec settings [plugins-inputs-http-codec-settings]
+
+This plugin has two configuration options for codecs: `codec` and `additional_codecs`.
+
+Values in `additional_codecs` are prioritized over those specified in the `codec` option. That is, the default `codec` is applied only if no codec for the request’s content-type is found in the `additional_codecs` setting.
+
+
+## Http Input Configuration Options [plugins-inputs-http-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-http-common-options) described later.
+
+::::{note}
+As of version `4.0.0` of this plugin, a number of previously deprecated settings related to SSL have been removed. Please check out [HTTP Input Obsolete Configuration Options](#plugins-inputs-http-obsolete-options) for details.
+::::
+
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`additional_codecs`](#plugins-inputs-http-additional_codecs) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`ecs_compatibility`](#plugins-inputs-http-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`host`](#plugins-inputs-http-host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`password`](#plugins-inputs-http-password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`port`](#plugins-inputs-http-port) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`max_pending_requests`](#plugins-inputs-http-max_pending_requests) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`response_headers`](#plugins-inputs-http-response_headers) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`response_code`](#plugins-inputs-http-response_code) | [number](/reference/configuration-file-structure.md#number), one of `[200, 201, 202, 204]` | No |
+| [`ssl_certificate`](#plugins-inputs-http-ssl_certificate) | a valid filesystem path | No |
+| [`ssl_certificate_authorities`](#plugins-inputs-http-ssl_certificate_authorities) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`ssl_cipher_suites`](#plugins-inputs-http-ssl_cipher_suites) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`ssl_client_authentication`](#plugins-inputs-http-ssl_client_authentication) | [string](/reference/configuration-file-structure.md#string), one of `["none", "optional", "required"]` | No |
+| [`ssl_enabled`](#plugins-inputs-http-ssl_enabled) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`ssl_handshake_timeout`](#plugins-inputs-http-ssl_handshake_timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`ssl_key`](#plugins-inputs-http-ssl_key) | a valid filesystem path | No |
+| [`ssl_key_passphrase`](#plugins-inputs-http-ssl_key_passphrase) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_keystore_password`](#plugins-inputs-http-ssl_keystore_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_keystore_path`](#plugins-inputs-http-ssl_keystore_path) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_keystore_type`](#plugins-inputs-http-ssl_keystore_type) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_supported_protocols`](#plugins-inputs-http-ssl_supported_protocols) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`ssl_truststore_password`](#plugins-inputs-http-ssl_truststore_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_truststore_path`](#plugins-inputs-http-ssl_truststore_path) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_truststore_type`](#plugins-inputs-http-ssl_truststore_type) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`threads`](#plugins-inputs-http-threads) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`user`](#plugins-inputs-http-user) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-inputs-http-common-options) for a list of options supported by all input plugins.
+
+
+
+### `additional_codecs` [plugins-inputs-http-additional_codecs]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{"application/json"=>"json"}`
+
+Apply specific codecs for specific content types. The default codec will be applied only after this list is checked and no codec for the request’s content-type is found
+
+
+### `ecs_compatibility` [plugins-inputs-http-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: unstructured connection metadata added at root level
+ * `v1`,`v8`: headers added under `[@metadata][http][header]`. Some are copied to structured ECS fields `http`, `url`, `user_agent` and `host`
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)). See [Event Metadata and the Elastic Common Schema (ECS)](#plugins-inputs-http-ecs_metadata) for detailed information.
+
+**Sample output: ECS disabled**
+
+```text
+{
+ "@version" => "1",
+ "headers" => {
+ "request_path" => "/twitter/tweet/1",
+ "http_accept" => "*/*",
+ "http_version" => "HTTP/1.1",
+ "request_method" => "PUT",
+ "http_host" => "localhost:8080",
+ "http_user_agent" => "curl/7.64.1",
+ "content_length" => "5",
+ "content_type" => "application/x-www-form-urlencoded"
+ },
+ "@timestamp" => 2021-05-28T19:27:28.609Z,
+ "host" => "127.0.0.1",
+ "message" => "hello"
+}
+```
+
+**Sample output: ECS enabled**
+
+```text
+{
+ "@version" => "1",
+ "user_agent" => {
+ "original" => "curl/7.64.1"
+ },
+ "http" => {
+ "method" => "PUT",
+ "request" => {
+ "mime_type" => "application/x-www-form-urlencoded",
+ "body" => {
+ "bytes" => "5"
+ }
+ },
+ "version" => "HTTP/1.1"
+ },
+ "url" => {
+ "port" => "8080",
+ "domain" => "snmp1",
+ "path" => "/twitter/tweet/1"
+ },
+ "@timestamp" => 2021-05-28T23:32:38.222Z,
+ "host" => {
+ "ip" => "127.0.0.1"
+ },
+ "message" => "hello",
+}
+```
+
+
+### `host` [plugins-inputs-http-host]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"0.0.0.0"`
+
+The host or ip to bind
+
+
+### `password` [plugins-inputs-http-password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Password for basic authorization
+
+
+### `port` [plugins-inputs-http-port]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `8080`
+
+The TCP port to bind to
+
+
+### `max_content_length` [plugins-inputs-http-max_content_length]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is 104857600
+
+The max content of an HTTP request in bytes. It defaults to 100mb.
+
+
+### `max_pending_requests` [plugins-inputs-http-max_pending_requests]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is 200
+
+Maximum number of incoming requests to store in a temporary queue before being processed by worker threads. If a request arrives and the queue is full a 429 response will be returned immediately. This queue exists to deal with micro bursts of events and to improve overall throughput, so it should be changed very carefully as it can lead to memory pressure and impact performance. If you need to deal both periodic or unforeseen spikes in incoming requests consider enabling the Persistent Queue for the logstash pipeline.
+
+
+### `response_code` [plugins-inputs-http-response_code]
+
+* Value can be any of: 200, 201, 202, 204
+* Default value is `200`
+
+The HTTP return code if the request is processed successfully.
+
+Other return codes may happen in the case of an error condition, such as invalid credentials (401), internal errors (503) or backpressure (429).
+
+If 204 (No Content) is set, the response body will not be sent in the response.
+
+
+### `response_headers` [plugins-inputs-http-response_headers]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{"Content-Type"=>"text/plain"}`
+
+specify a custom set of response headers
+
+
+### `remote_host_target_field` [plugins-inputs-http-remote_host_target_field]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"host"` when ECS is disabled
+* Default value is `[host][ip]` when ECS is enabled
+
+specify a target field for the client host of the http request
+
+
+### `request_headers_target_field` [plugins-inputs-http-request_headers_target_field]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"headers"` when ECS is disabled
+* Default value is `[@metadata][http][header]` when ECS is enabled
+
+specify target field for the client host of the http request
+
+
+### `ssl_certificate` [plugins-inputs-http-ssl_certificate]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+SSL certificate to use.
+
+
+### `ssl_certificate_authorities` [plugins-inputs-http-ssl_certificate_authorities]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+Validate client certificates against these authorities. You can define multiple files or paths. All the certificates will be read and added to the trust store. You need to configure the [`ssl_client_authentication`](#plugins-inputs-http-ssl_client_authentication) to `optional` or `required` to enable the verification.
+
+
+### `ssl_cipher_suites` [plugins-inputs-http-ssl_cipher_suites]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `['TLS_AES_256_GCM_SHA384', 'TLS_AES_128_GCM_SHA256', 'TLS_CHACHA20_POLY1305_SHA256', 'TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384', 'TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384', 'TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256', 'TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256', 'TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256', 'TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', 'TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384', 'TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384', 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256', 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256']`
+
+The list of cipher suites to use, listed by priorities. This default list applies for OpenJDK 11.0.14 and higher. For older JDK versions, the default list includes only suites supported by that version. For example, the ChaCha20 family of ciphers is not supported in older versions.
+
+
+### `ssl_client_authentication` [plugins-inputs-http-ssl_client_authentication]
+
+* Value can be any of: `none`, `optional`, `required`
+* Default value is `"none"`
+
+Controls the server’s behavior in regard to requesting a certificate from client connections: `required` forces a client to present a certificate, while `optional` requests a client certificate but the client is not required to present one. Defaults to `none`, which disables the client authentication.
+
+::::{note}
+This setting can be used only if [`ssl_certificate_authorities`](#plugins-inputs-http-ssl_certificate_authorities) is set.
+::::
+
+
+
+### `ssl_enabled` [plugins-inputs-http-ssl_enabled]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Events are, by default, sent in plain text. You can enable encryption by setting `ssl_enabled` to true and configuring the [`ssl_certificate`](#plugins-inputs-http-ssl_certificate) and [`ssl_key`](#plugins-inputs-http-ssl_key) options.
+
+
+### `ssl_handshake_timeout` [plugins-inputs-http-ssl_handshake_timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `10000`
+
+Time in milliseconds for an incomplete ssl handshake to timeout
+
+
+### `ssl_key` [plugins-inputs-http-ssl_key]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+SSL key to use. NOTE: This key need to be in the PKCS8 format, you can convert it with [OpenSSL](https://www.openssl.org/docs/man1.1.1/man1/openssl-pkcs8.md) for more information.
+
+
+### `ssl_key_passphrase` [plugins-inputs-http-ssl_key_passphrase]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+SSL key passphrase to use.
+
+
+### `ssl_keystore_path` [plugins-inputs-http-ssl_keystore_path]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+The path for the keystore file that contains a private key and certificate. It must be either a Java keystore (jks) or a PKCS#12 file.
+
+::::{note}
+You cannot use this setting and [`ssl_certificate`](#plugins-inputs-http-ssl_certificate) at the same time.
+::::
+
+
+
+### `ssl_keystore_type` [plugins-inputs-http-ssl_keystore_type]
+
+* Value can be any of: `jks`, `pkcs12`
+* If not provided, the value will be inferred from the keystore filename.
+
+The format of the keystore file. It must be either `jks` or `pkcs12`.
+
+
+### `ssl_keystore_password` [plugins-inputs-http-ssl_keystore_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Set the JKS keystore password
+
+
+### `ssl_supported_protocols` [plugins-inputs-http-ssl_supported_protocols]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Allowed values are: `'TLSv1.1'`, `'TLSv1.2'`, `'TLSv1.3'`
+* Default depends on the JDK being used. With up-to-date Logstash, the default is `['TLSv1.2', 'TLSv1.3']`. `'TLSv1.1'` is not considered secure and is only provided for legacy applications.
+
+List of allowed SSL/TLS versions to use when establishing a connection to the HTTP endpoint.
+
+For Java 8 `'TLSv1.3'` is supported only since **8u262** (AdoptOpenJDK), but requires that you set the `LS_JAVA_OPTS="-Djdk.tls.client.protocols=TLSv1.3"` system property in Logstash.
+
+::::{note}
+If you configure the plugin to use `'TLSv1.1'` on any recent JVM, such as the one packaged with Logstash, the protocol is disabled by default and needs to be enabled manually by changing `jdk.tls.disabledAlgorithms` in the **$JDK_HOME/conf/security/java.security** configuration file. That is, `TLSv1.1` needs to be removed from the list.
+::::
+
+
+
+### `ssl_truststore_password` [plugins-inputs-http-ssl_truststore_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Set the truststore password
+
+
+### `ssl_truststore_path` [plugins-inputs-http-ssl_truststore_path]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+The path for the keystore that contains the certificates to trust. It must be either a Java keystore (jks) or a PKCS#12 file.
+
+::::{note}
+You cannot use this setting and [`ssl_certificate_authorities`](#plugins-inputs-http-ssl_certificate_authorities) at the same time.
+::::
+
+
+
+### `ssl_truststore_type` [plugins-inputs-http-ssl_truststore_type]
+
+* Value can be any of: `jks`, `pkcs12`
+* If not provided, the value will be inferred from the truststore filename.
+
+The format of the truststore file. It must be either `jks` or `pkcs12`.
+
+
+### `threads` [plugins-inputs-http-threads]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is number of processors
+
+Number of threads to use for both accepting connections and handling requests
+
+
+### `user` [plugins-inputs-http-user]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Username for basic authorization
+
+
+
+## HTTP Input Obsolete Configuration Options [plugins-inputs-http-obsolete-options]
+
+::::{warning}
+As of version `4.0.0` of this plugin, some configuration options have been replaced. The plugin will fail to start if it contains any of these obsolete options.
+::::
+
+
+| Setting | Replaced by |
+| --- | --- |
+| cipher_suites | [`ssl_cipher_suites`](#plugins-inputs-http-ssl_cipher_suites) |
+| keystore | [`ssl_keystore_path`](#plugins-inputs-http-ssl_keystore_path) |
+| keystore_password | [`ssl_keystore_password`](#plugins-inputs-http-ssl_keystore_password) |
+| ssl | [`ssl_enabled`](#plugins-inputs-http-ssl_enabled) |
+| ssl_verify_mode | [`ssl_client_authentication`](#plugins-inputs-http-ssl_client_authentication) |
+| tls_max_version | [`ssl_supported_protocols`](#plugins-inputs-http-ssl_supported_protocols) |
+| tls_min_version | [`ssl_supported_protocols`](#plugins-inputs-http-ssl_supported_protocols) |
+| verify_mode | [`ssl_client_authentication`](#plugins-inputs-http-ssl_client_authentication) |
+
+
+## Common options [plugins-inputs-http-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-http-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-http-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-http-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-http-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-http-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-http-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-http-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-http-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-http-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-http-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 http inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ http {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-http-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-http-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
diff --git a/docs/reference/plugins-inputs-http_poller.md b/docs/reference/plugins-inputs-http_poller.md
new file mode 100644
index 000000000..6234ee686
--- /dev/null
+++ b/docs/reference/plugins-inputs-http_poller.md
@@ -0,0 +1,634 @@
+---
+navigation_title: "http_poller"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-http_poller.html
+---
+
+# Http_poller input plugin [plugins-inputs-http_poller]
+
+
+* Plugin version: v6.0.0
+* Released on: 2024-12-18
+* [Changelog](https://github.com/logstash-plugins/logstash-input-http_poller/blob/v6.0.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-http_poller-index.md).
+
+## Getting help [_getting_help_27]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-http_poller). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_27]
+
+This Logstash input plugin allows you to call an HTTP API, decode the output of it into event(s), and send them on their merry way. The idea behind this plugins came from a need to read springboot metrics endpoint, instead of configuring jmx to monitor my java application memory/gc/ etc.
+
+
+## Example [_example]
+
+Reads from a list of urls and decodes the body of the response with a codec. The config should look like this:
+
+```ruby
+input {
+ http_poller {
+ urls => {
+ test1 => "http://localhost:9200"
+ test2 => {
+ # Supports all options supported by ruby's Manticore HTTP client
+ method => get
+ user => "AzureDiamond"
+ password => "hunter2"
+ url => "http://localhost:9200/_cluster/health"
+ headers => {
+ Accept => "application/json"
+ }
+ }
+ }
+ request_timeout => 60
+ # Supports "cron", "every", "at" and "in" schedules by rufus scheduler
+ schedule => { cron => "* * * * * UTC"}
+ codec => "json"
+ # A hash of request metadata info (timing, response headers, etc.) will be sent here
+ metadata_target => "http_poller_metadata"
+ }
+}
+
+output {
+ stdout {
+ codec => rubydebug
+ }
+}
+```
+
+Using the HTTP poller with custom a custom CA or self signed cert.
+
+If you have a self signed cert you will need to convert your server’s certificate to a valid# `.jks` or `.p12` file. An easy way to do it is to run the following one-liner, substituting your server’s URL for the placeholder `MYURL` and `MYPORT`.
+
+```ruby
+openssl s_client -showcerts -connect MYURL:MYPORT /dev/null|openssl x509 -outform PEM > downloaded_cert.pem; keytool -import -alias test -file downloaded_cert.pem -keystore downloaded_truststore.jks
+```
+
+The above snippet will create two files `downloaded_cert.pem` and `downloaded_truststore.jks`. You will be prompted to set a password for the `jks` file during this process. To configure logstash use a config like the one that follows.
+
+```ruby
+ http_poller {
+ urls => {
+ myurl => "https://myhostname:1234"
+ }
+ truststore => "/path/to/downloaded_truststore.jks"
+ truststore_password => "mypassword"
+ schedule => { cron => "* * * * * UTC"}
+ }
+```
+
+
+## Event Metadata and the Elastic Common Schema (ECS) [plugins-inputs-http_poller-ecs_metadata]
+
+This input will add metadata about the HTTP connection itself to each event.
+
+When ECS compatibility is disabled, metadata was added to a variety of non-standard top-level fields, which has the potential to create confusion and schema conflicts downstream.
+
+With ECS Compatibility Mode, we can ensure a pipeline maintains access to this metadata throughout the event’s lifecycle without polluting the top-level namespace.
+
+Here’s how ECS compatibility mode affects output.
+
+| ECS disabled | ECS v1 | Availability | Description |
+| --- | --- | --- | --- |
+| [@metadata][host] | [@metadata][input][http_poller][request][host][hostname] | *Always* | *Hostname* |
+| [@metadata][code] | [@metadata][input][http_poller][response][status_code] | *When server responds a valid status code* | *HTTP response code* |
+| [@metadata][response_headers] | [@metadata][input][http_poller][response][headers] | *When server responds with headers* | *HTTP headers of the response* |
+| [@metadata][response_message] | [@metadata][input][http_poller][response][status_message] | *When server responds with status line* | *message of status line of HTTP headers* |
+| [@metadata][runtime_seconds] | [@metadata][input][http_poller][response][elapsed_time_ns] | *When server responds a valid status code* | *elapsed time of calling endpoint. ECS v1 shows in nanoseconds.* |
+| [http_request_failure][runtime_seconds] | [event][duration] | *When server throws exception* | *elapsed time of calling endpoint. ECS v1 shows in nanoseconds.* |
+| [@metadata][times_retried] | [@metadata][input][http_poller][request][retry_count] | *When the poller calls server successfully* | *retry count from http client library* |
+| [@metadata][name] / [http_request_failure][name] | [@metadata][input][http_poller][request][name] | *Always* | *The key of `urls` from poller config* |
+| [@metadata][request] / [http_request_failure][request] | [@metadata][input][http_poller][request][original] | *Always* | *The whole object of `urls` from poller config* |
+| [http_request_failure][error] | [error][message] | *When server throws exception* | *Error message* |
+| [http_request_failure][backtrace] | [error][stack_trace] | *When server throws exception* | *Stack trace of error* |
+| -- | [url][full] | *When server throws exception* | *The URL of the endpoint* |
+| -- | [http][request][method] | *When server throws exception* | *HTTP request method* |
+| -- | [host][hostname] | *When server throws exception* | *Hostname* |
+
+
+## Http_poller Input Configuration Options [plugins-inputs-http_poller-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-http_poller-common-options) described later.
+
+::::{note}
+As of version `6.0.0` of this plugin, a number of previously deprecated settings related to SSL have been removed. Please check out [HTTP Poller Input Obsolete Configuration Options](#plugins-inputs-http_poller-obsolete-options) for details.
+::::
+
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`automatic_retries`](#plugins-inputs-http_poller-automatic_retries) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`connect_timeout`](#plugins-inputs-http_poller-connect_timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`cookies`](#plugins-inputs-http_poller-cookies) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`ecs_compatibility`](#plugins-inputs-http_poller-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`follow_redirects`](#plugins-inputs-http_poller-follow_redirects) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`keepalive`](#plugins-inputs-http_poller-keepalive) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`metadata_target`](#plugins-inputs-http_poller-metadata_target) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`password`](#plugins-inputs-http_poller-password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`pool_max`](#plugins-inputs-http_poller-pool_max) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`pool_max_per_route`](#plugins-inputs-http_poller-pool_max_per_route) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`proxy`](#plugins-inputs-http_poller-proxy) | <<,>> | No |
+| [`request_timeout`](#plugins-inputs-http_poller-request_timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`retry_non_idempotent`](#plugins-inputs-http_poller-retry_non_idempotent) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`schedule`](#plugins-inputs-http_poller-schedule) | [hash](/reference/configuration-file-structure.md#hash) | Yes |
+| [`socket_timeout`](#plugins-inputs-http_poller-socket_timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`ssl_certificate`](#plugins-inputs-http_poller-ssl_certificate) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_certificate_authorities`](#plugins-inputs-http_poller-ssl_certificate_authorities) | list of [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_cipher_suites`](#plugins-inputs-http_poller-ssl_cipher_suites) | list of [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_enabled`](#plugins-inputs-http_poller-ssl_enabled) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`ssl_keystore_password`](#plugins-inputs-http_poller-ssl_keystore_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_keystore_path`](#plugins-inputs-http_poller-ssl_keystore_path) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_keystore_type`](#plugins-inputs-http_poller-ssl_keystore_type) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_truststore_password`](#plugins-inputs-http_poller-ssl_truststore_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_truststore_path`](#plugins-inputs-http_poller-ssl_truststore_path) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_truststore_type`](#plugins-inputs-http_poller-ssl_truststore_type) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_verification_mode`](#plugins-inputs-http_poller-ssl_verification_mode) | [string](/reference/configuration-file-structure.md#string), one of `["full", "none"]` | No |
+| [`target`](#plugins-inputs-http_poller-target) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`urls`](#plugins-inputs-http_poller-urls) | [hash](/reference/configuration-file-structure.md#hash) | Yes |
+| [`user`](#plugins-inputs-http_poller-user) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`validate_after_inactivity`](#plugins-inputs-http_poller-validate_after_inactivity) | [number](/reference/configuration-file-structure.md#number) | No |
+
+Also see [Common options](#plugins-inputs-http_poller-common-options) for a list of options supported by all input plugins.
+
+
+
+### `automatic_retries` [plugins-inputs-http_poller-automatic_retries]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1`
+
+How many times should the client retry a failing URL. We highly recommend NOT setting this value to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried.
+
+
+### `connect_timeout` [plugins-inputs-http_poller-connect_timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `10`
+
+Timeout (in seconds) to wait for a connection to be established. Default is `10s`
+
+
+### `cookies` [plugins-inputs-http_poller-cookies]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Enable cookie support. With this enabled the client will persist cookies across requests as a normal web browser would. Enabled by default
+
+
+### `ecs_compatibility` [plugins-inputs-http_poller-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: unstructured data added at root level
+ * `v1`: uses `error`, `url` and `http` fields that are compatible with Elastic Common Schema
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)). See [Event Metadata and the Elastic Common Schema (ECS)](#plugins-inputs-http_poller-ecs_metadata) for detailed information.
+
+Example output:
+
+**Sample output: ECS disabled**
+
+```text
+{
+ "http_poller_data" => {
+ "@version" => "1",
+ "@timestamp" => 2021-01-01T00:43:22.388Z,
+ "status" => "UP"
+ },
+ "@version" => "1",
+ "@timestamp" => 2021-01-01T00:43:22.389Z,
+}
+```
+
+**Sample output: ECS enabled**
+
+```text
+{
+ "http_poller_data" => {
+ "status" => "UP",
+ "@version" => "1",
+ "event" => {
+ "original" => "{\"status\":\"UP\"}"
+ },
+ "@timestamp" => 2021-01-01T00:40:59.558Z
+ },
+ "@version" => "1",
+ "@timestamp" => 2021-01-01T00:40:59.559Z
+}
+```
+
+**Sample error output: ECS enabled**
+
+```text
+{
+ "@timestamp" => 2021-07-09T09:53:48.721Z,
+ "@version" => "1",
+ "host" => {
+ "hostname" => "MacBook-Pro"
+ },
+ "http" => {
+ "request" => {
+ "method" => "get"
+ }
+ },
+ "event" => {
+ "duration" => 259019
+ },
+ "error" => {
+ "stack_trace" => nil,
+ "message" => "Connection refused (Connection refused)"
+ },
+ "url" => {
+ "full" => "http://localhost:8080/actuator/health"
+ },
+ "tags" => [
+ [0] "_http_request_failure"
+ ]
+}
+```
+
+
+### `follow_redirects` [plugins-inputs-http_poller-follow_redirects]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Should redirects be followed? Defaults to `true`
+
+
+### `keepalive` [plugins-inputs-http_poller-keepalive]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least one with this to fix interactions with broken keepalive implementations.
+
+
+### `metadata_target` [plugins-inputs-http_poller-metadata_target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"@metadata"`
+
+If you’d like to work with the request/response metadata. Set this value to the name of the field you’d like to store a nested hash of metadata.
+
+
+### `password` [plugins-inputs-http_poller-password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Password to be used in conjunction with [`user`](#plugins-inputs-http_poller-user) for HTTP authentication.
+
+
+### `pool_max` [plugins-inputs-http_poller-pool_max]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `50`
+
+Max number of concurrent connections. Defaults to `50`
+
+
+### `pool_max_per_route` [plugins-inputs-http_poller-pool_max_per_route]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `25`
+
+Max number of concurrent connections to a single host. Defaults to `25`
+
+
+### `proxy` [plugins-inputs-http_poller-proxy]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+If you’d like to use an HTTP proxy . This supports multiple configuration syntaxes:
+
+1. Proxy host in form: `http://proxy.org:1234`
+2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}`
+3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}`
+
+
+### `request_timeout` [plugins-inputs-http_poller-request_timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `60`
+
+Timeout (in seconds) for the entire request.
+
+
+### `retry_non_idempotent` [plugins-inputs-http_poller-retry_non_idempotent]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried.
+
+
+### `schedule` [plugins-inputs-http_poller-schedule]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* There is no default value for this setting.
+
+Schedule of when to periodically poll from the urls Format: A hash with + key: "cron" | "every" | "in" | "at" + value: string Examples: a) { "every" ⇒ "1h" } b) { "cron" ⇒ "* * * * * UTC" } See: rufus/scheduler for details about different schedule options and value string format
+
+
+### `socket_timeout` [plugins-inputs-http_poller-socket_timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `10`
+
+Timeout (in seconds) to wait for data on the socket. Default is `10s`
+
+
+### `ssl_certificate` [plugins-inputs-http_poller-ssl_certificate]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+SSL certificate to use to authenticate the client. This certificate should be an OpenSSL-style X.509 certificate file.
+
+::::{note}
+This setting can be used only if [`ssl_key`](#plugins-inputs-http_poller-ssl_key) is set.
+::::
+
+
+
+### `ssl_certificate_authorities` [plugins-inputs-http_poller-ssl_certificate_authorities]
+
+* Value type is a list of [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting
+
+The .cer or .pem CA files to validate the server’s certificate.
+
+
+### `ssl_cipher_suites` [plugins-inputs-http_poller-ssl_cipher_suites]
+
+* Value type is a list of [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting
+
+The list of cipher suites to use, listed by priorities. Supported cipher suites vary depending on the Java and protocol versions.
+
+
+### `ssl_enabled` [plugins-inputs-http_poller-ssl_enabled]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Enable SSL/TLS secured communication. It must be `true` for other `ssl_` options to take effect.
+
+
+### `ssl_key` [plugins-inputs-http_poller-ssl_key]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+OpenSSL-style RSA private key that corresponds to the [`ssl_certificate`](#plugins-inputs-http_poller-ssl_certificate).
+
+::::{note}
+This setting can be used only if [`ssl_certificate`](#plugins-inputs-http_poller-ssl_certificate) is set.
+::::
+
+
+
+### `ssl_keystore_password` [plugins-inputs-http_poller-ssl_keystore_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Set the keystore password
+
+
+### `ssl_keystore_path` [plugins-inputs-http_poller-ssl_keystore_path]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+The keystore used to present a certificate to the server. It can be either `.jks` or `.p12`
+
+
+### `ssl_keystore_type` [plugins-inputs-http_poller-ssl_keystore_type]
+
+* Value can be any of: `jks`, `pkcs12`
+* If not provided, the value will be inferred from the keystore filename.
+
+The format of the keystore file. It must be either `jks` or `pkcs12`.
+
+
+### `ssl_supported_protocols` [plugins-inputs-http_poller-ssl_supported_protocols]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Allowed values are: `'TLSv1.1'`, `'TLSv1.2'`, `'TLSv1.3'`
+* Default depends on the JDK being used. With up-to-date Logstash, the default is `['TLSv1.2', 'TLSv1.3']`. `'TLSv1.1'` is not considered secure and is only provided for legacy applications.
+
+List of allowed SSL/TLS versions to use when establishing a connection to the HTTP endpoint.
+
+For Java 8 `'TLSv1.3'` is supported only since **8u262** (AdoptOpenJDK), but requires that you set the `LS_JAVA_OPTS="-Djdk.tls.client.protocols=TLSv1.3"` system property in Logstash.
+
+::::{note}
+If you configure the plugin to use `'TLSv1.1'` on any recent JVM, such as the one packaged with Logstash, the protocol is disabled by default and needs to be enabled manually by changing `jdk.tls.disabledAlgorithms` in the **$JDK_HOME/conf/security/java.security** configuration file. That is, `TLSv1.1` needs to be removed from the list.
+::::
+
+
+
+### `ssl_truststore_password` [plugins-inputs-http_poller-ssl_truststore_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Set the truststore password
+
+
+### `ssl_truststore_path` [plugins-inputs-http_poller-ssl_truststore_path]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+The truststore to validate the server’s certificate. It can be either `.jks` or `.p12`.
+
+
+### `ssl_truststore_type` [plugins-inputs-http_poller-ssl_truststore_type]
+
+* Value can be any of: `jks`, `pkcs12`
+* If not provided, the value will be inferred from the truststore filename.
+
+The format of the truststore file. It must be either `jks` or `pkcs12`.
+
+
+### `ssl_verification_mode` [plugins-inputs-http_poller-ssl_verification_mode]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are: `full`, `none`
+* Default value is `full`
+
+Controls the verification of server certificates. The `full` option verifies that the provided certificate is signed by a trusted authority (CA) and also that the server’s hostname (or IP address) matches the names identified within the certificate.
+
+The `none` setting performs no verification of the server’s certificate. This mode disables many of the security benefits of SSL/TLS and should only be used after cautious consideration. It is primarily intended as a temporary diagnostic mechanism when attempting to resolve TLS errors. Using `none` in production environments is strongly discouraged.
+
+
+### `target` [plugins-inputs-http_poller-target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Define the target field for placing the received data. If this setting is omitted, the data will be stored at the root (top level) of the event.
+
+::::{tip}
+When ECS is enabled, set `target` in the codec (if the codec has a `target` option). Example: `codec => json { target => "TARGET_FIELD_NAME" }`
+::::
+
+
+
+### `urls` [plugins-inputs-http_poller-urls]
+
+* This is a required setting.
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* There is no default value for this setting.
+
+A Hash of urls in this format : `"name" => "url"`. The name and the url will be passed in the outputted event.
+
+The values in urls can be either:
+
+* a string url (which will be issued as an HTTP GET).
+* a sub-hash containing many useful keys provided by the Manticore backend:
+
+ * url: the String url
+ * method: (optional) the HTTP method to use (defaults to GET)
+ * user: (optional) the HTTP Basic Auth user. The user must be under an auth sub-hash for Manticore, but this plugin also accepts it either way.
+ * password: (optional) the HTTP Basic Auth password. The password must be under an auth sub-hash for Manticore, but this plugin accepts it either way.
+ * headers: a hash containing key-value pairs of headers.
+ * body: a string (supported only on POST and PUT requests)
+ * possibly other options mentioned in the [Manticore docs](https://www.rubydoc.info/github/cheald/manticore/Manticore/Client#http-instance_method). Note that Manticore options that are not explicitly documented above are not thoroughly tested and therefore liable to break in unexpected ways if we replace the backend.
+
+
+**Notes:**
+
+* Passwords specified as a part of `urls` are prone to exposure in plugin log output. The plugin does not declare them as passwords, and therefore doesn’t wrap them in leak-reducing wrappers as we do elsewhere.
+* We don’t guarantee that boolean-type options like Manticore’s `follow_redirects` are supported correctly. The strings `true` or `false` may get passed through, and in ruby any string is "truthy."
+* Our implementation of this plugin precludes the ability to specify auth[:eager] as anything other than true
+
+
+### `user` [plugins-inputs-http_poller-user]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Username to use with HTTP authentication for ALL requests. Note that you can also set this per-URL. If you set this you must also set the [`password`](#plugins-inputs-http_poller-password) option.
+
+
+### `validate_after_inactivity` [plugins-inputs-http_poller-validate_after_inactivity]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `200`
+
+How long to wait before checking for a stale connection to determine if a keepalive request is needed. Consider setting this value lower than the default, possibly to 0, if you get connection errors regularly.
+
+This client is based on Apache Commons' HTTP implementation. Here’s how the [Apache Commons documentation](https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.md#setValidateAfterInactivity(int)) describes this option: "Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool."
+
+
+
+## HTTP Poller Input Obsolete Configuration Options [plugins-inputs-http_poller-obsolete-options]
+
+::::{warning}
+As of version `6.0.0` of this plugin, some configuration options have been replaced. The plugin will fail to start if it contains any of these obsolete options.
+::::
+
+
+| Setting | Replaced by |
+| --- | --- |
+| cacert | [`ssl_certificate_authorities`](#plugins-inputs-http_poller-ssl_certificate_authorities) |
+| client_cert | [`ssl_certificate`](#plugins-inputs-http_poller-ssl_certificate) |
+| client_key | [`ssl_key`](#plugins-inputs-http_poller-ssl_key) |
+| keystore | [`ssl_keystore_path`](#plugins-inputs-http_poller-ssl_keystore_path) |
+| keystore_password | [`ssl_keystore_password`](#plugins-inputs-http_poller-ssl_keystore_password) |
+| keystore_type | [`ssl_keystore_password`](#plugins-inputs-http_poller-ssl_keystore_password) |
+| truststore | [`ssl_truststore_path`](#plugins-inputs-http_poller-ssl_truststore_path) |
+| truststore_password | [`ssl_truststore_password`](#plugins-inputs-http_poller-ssl_truststore_password) |
+| truststore_type | [`ssl_truststore_type`](#plugins-inputs-http_poller-ssl_truststore_type) |
+
+
+## Common options [plugins-inputs-http_poller-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-http_poller-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-http_poller-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-http_poller-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-http_poller-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-http_poller-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-http_poller-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-http_poller-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-http_poller-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"json"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-http_poller-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-http_poller-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 http_poller inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ http_poller {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-http_poller-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-http_poller-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
diff --git a/docs/reference/plugins-inputs-imap.md b/docs/reference/plugins-inputs-imap.md
new file mode 100644
index 000000000..8ffe27ab9
--- /dev/null
+++ b/docs/reference/plugins-inputs-imap.md
@@ -0,0 +1,303 @@
+---
+navigation_title: "imap"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-imap.html
+---
+
+# Imap input plugin [plugins-inputs-imap]
+
+
+* Plugin version: v3.2.1
+* Released on: 2023-10-03
+* [Changelog](https://github.com/logstash-plugins/logstash-input-imap/blob/v3.2.1/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-imap-index.md).
+
+## Getting help [_getting_help_28]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-imap). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_28]
+
+Read mails from IMAP server
+
+Periodically scan an IMAP folder (`INBOX` by default) and move any read messages to the trash.
+
+
+## Compatibility with the Elastic Common Schema (ECS) [plugins-inputs-imap-ecs]
+
+The plugin includes sensible defaults that change based on [ECS compatibility mode](#plugins-inputs-imap-ecs_compatibility). When ECS compatibility is disabled, mail headers and attachments are targeted at the root level. When targeting an ECS version, headers and attachments target `@metadata` sub-fields unless configured otherwise in order to avoid conflict with ECS fields. See [`headers_target`](#plugins-inputs-imap-headers_target), and [`attachments_target`](#plugins-inputs-imap-attachments_target).
+
+
+## Imap Input Configuration Options [plugins-inputs-imap-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-imap-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`attachments_target`](#plugins-inputs-imap-attachments_target) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`check_interval`](#plugins-inputs-imap-check_interval) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`content_type`](#plugins-inputs-imap-content_type) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`delete`](#plugins-inputs-imap-delete) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`ecs_compatibility`](#plugins-inputs-imap-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`expunge`](#plugins-inputs-imap-expunge) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`fetch_count`](#plugins-inputs-imap-fetch_count) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`folder`](#plugins-inputs-imap-folder) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`headers_target`](#plugins-inputs-imap-headers_target) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`host`](#plugins-inputs-imap-host) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`lowercase_headers`](#plugins-inputs-imap-lowercase_headers) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`password`](#plugins-inputs-imap-password) | [password](/reference/configuration-file-structure.md#password) | Yes |
+| [`port`](#plugins-inputs-imap-port) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`save_attachments`](#plugins-inputs-imap-save_attachments) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`secure`](#plugins-inputs-imap-secure) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`sincedb_path`](#plugins-inputs-imap-sincedb_path) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`strip_attachments`](#plugins-inputs-imap-strip_attachments) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`uid_tracking`](#plugins-inputs-imap-uid_tracking) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`user`](#plugins-inputs-imap-user) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`verify_cert`](#plugins-inputs-imap-verify_cert) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+
+Also see [Common options](#plugins-inputs-imap-common-options) for a list of options supported by all input plugins.
+
+
+
+### `attachments_target` [plugins-inputs-imap-attachments_target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value depends on whether [`ecs_compatibility`](#plugins-inputs-imap-ecs_compatibility) is enabled:
+
+ * ECS Compatibility disabled: `"[attachments]"`
+ * ECS Compatibility enabled: `"[@metadata][input][imap][attachments]"
+
+
+The name of the field under which mail attachments information will be added, if [`save_attachments`](#plugins-inputs-imap-save_attachments) is set.
+
+
+### `check_interval` [plugins-inputs-imap-check_interval]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `300`
+
+
+### `content_type` [plugins-inputs-imap-content_type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"text/plain"`
+
+For multipart messages, use the first part that has this content-type as the event message.
+
+
+### `delete` [plugins-inputs-imap-delete]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+
+### `ecs_compatibility` [plugins-inputs-imap-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: does not use ECS-compatible field names (for example, `From` header field is added to the event)
+ * `v1`, `v8`: avoids field names that might conflict with Elastic Common Schema (for example, the `From` header is added as metadata)
+
+* Default value depends on which version of Logstash is running:
+
+ * When Logstash provides a `pipeline.ecs_compatibility` setting, its value is used as the default
+ * Otherwise, the default value is `disabled`.
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)). The value of this setting affects the *default* value of [`headers_target`](#plugins-inputs-imap-headers_target) and [`attachments_target`](#plugins-inputs-imap-attachments_target).
+
+
+### `expunge` [plugins-inputs-imap-expunge]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+
+### `fetch_count` [plugins-inputs-imap-fetch_count]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `50`
+
+
+### `folder` [plugins-inputs-imap-folder]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"INBOX"`
+
+
+### `headers_target` [plugins-inputs-imap-headers_target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value depends on whether [`ecs_compatibility`](#plugins-inputs-imap-ecs_compatibility) is enabled:
+
+ * ECS Compatibility disabled: no default value (for example, the subject header is stored under the `"subject"` name)
+ * ECS Compatibility enabled: `"[@metadata][input][imap][headers]"`
+
+
+The name of the field under which mail headers will be added.
+
+Setting `headers_target => ''` skips headers processing and no header is added to the event. Except the date header, if present, which is always used as the event’s `@timestamp`.
+
+
+### `host` [plugins-inputs-imap-host]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+
+### `lowercase_headers` [plugins-inputs-imap-lowercase_headers]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+
+### `password` [plugins-inputs-imap-password]
+
+* This is a required setting.
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+
+### `port` [plugins-inputs-imap-port]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+
+### `save_attachments` [plugins-inputs-imap-save_attachments]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+When set to true the content of attachments will be included in the `attachments.data` field.
+
+
+### `secure` [plugins-inputs-imap-secure]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+
+### `sincedb_path` [plugins-inputs-imap-sincedb_path]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Path of the sincedb database file (keeps track of the UID of the last processed mail) that will be written to disk. The default will write sincedb file to `/plugins/inputs/imap` directory. NOTE: it must be a file path and not a directory path.
+
+
+### `strip_attachments` [plugins-inputs-imap-strip_attachments]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+
+### `uid_tracking` [plugins-inputs-imap-uid_tracking]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+When the IMAP input plugin connects to the mailbox for the first time and the UID of the last processed mail is not yet known, the unread mails are first downloaded and the UID of the last processed mail is saved. From this point on, if `uid_tracking` is set to `true`, all new mail will be downloaded regardless of whether they are marked as read or unread. This allows users or other services to use the mailbox simultaneously with the IMAP input plugin. UID of the last processed mail is always saved regardles of the `uid_tracking` value, so you can switch its value as needed. In transition from the previous IMAP input plugin version, first process at least one mail with `uid_tracking` set to `false` to save the UID of the last processed mail and then switch `uid_tracking` to `true`.
+
+
+### `user` [plugins-inputs-imap-user]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+
+### `verify_cert` [plugins-inputs-imap-verify_cert]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+
+
+## Common options [plugins-inputs-imap-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-imap-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-imap-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-imap-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-imap-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-imap-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-imap-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-imap-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-imap-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-imap-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-imap-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 imap inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ imap {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-imap-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-imap-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-irc.md b/docs/reference/plugins-inputs-irc.md
new file mode 100644
index 000000000..09e5415f5
--- /dev/null
+++ b/docs/reference/plugins-inputs-irc.md
@@ -0,0 +1,230 @@
+---
+navigation_title: "irc"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-irc.html
+---
+
+# Irc input plugin [plugins-inputs-irc]
+
+
+* Plugin version: v3.0.7
+* Released on: 2018-04-06
+* [Changelog](https://github.com/logstash-plugins/logstash-input-irc/blob/v3.0.7/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-irc-index.md).
+
+## Installation [_installation_4]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-input-irc`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_29]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-irc). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_29]
+
+Read events from an IRC Server.
+
+
+## Irc Input Configuration Options [plugins-inputs-irc-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-irc-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`catch_all`](#plugins-inputs-irc-catch_all) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`channels`](#plugins-inputs-irc-channels) | [array](/reference/configuration-file-structure.md#array) | Yes |
+| [`get_stats`](#plugins-inputs-irc-get_stats) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`host`](#plugins-inputs-irc-host) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`nick`](#plugins-inputs-irc-nick) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`password`](#plugins-inputs-irc-password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`port`](#plugins-inputs-irc-port) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`real`](#plugins-inputs-irc-real) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`secure`](#plugins-inputs-irc-secure) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`stats_interval`](#plugins-inputs-irc-stats_interval) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`user`](#plugins-inputs-irc-user) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-inputs-irc-common-options) for a list of options supported by all input plugins.
+
+
+
+### `catch_all` [plugins-inputs-irc-catch_all]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Catch all IRC channel/user events not just channel messages
+
+
+### `channels` [plugins-inputs-irc-channels]
+
+* This is a required setting.
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Channels to join and read messages from.
+
+These should be full channel names including the *#* symbol, such as "#logstash".
+
+For passworded channels, add a space and the channel password, such as "#logstash password".
+
+
+### `get_stats` [plugins-inputs-irc-get_stats]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Gather and send user counts for channels - this requires catch_all and will force it
+
+
+### `host` [plugins-inputs-irc-host]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Host of the IRC Server to connect to.
+
+
+### `nick` [plugins-inputs-irc-nick]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"logstash"`
+
+IRC Nickname
+
+
+### `password` [plugins-inputs-irc-password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+IRC Server password
+
+
+### `port` [plugins-inputs-irc-port]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `6667`
+
+Port for the IRC Server
+
+
+### `real` [plugins-inputs-irc-real]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"logstash"`
+
+IRC Real name
+
+
+### `secure` [plugins-inputs-irc-secure]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Set this to true to enable SSL.
+
+
+### `stats_interval` [plugins-inputs-irc-stats_interval]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `5`
+
+How often in minutes to get the user count stats
+
+
+### `user` [plugins-inputs-irc-user]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"logstash"`
+
+IRC Username
+
+
+
+## Common options [plugins-inputs-irc-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-irc-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-irc-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-irc-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-irc-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-irc-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-irc-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-irc-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-irc-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-irc-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-irc-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 irc inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ irc {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-irc-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-irc-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-java_generator.md b/docs/reference/plugins-inputs-java_generator.md
new file mode 100644
index 000000000..2da31727a
--- /dev/null
+++ b/docs/reference/plugins-inputs-java_generator.md
@@ -0,0 +1,185 @@
+---
+navigation_title: "java_generator"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-java_generator.html
+---
+
+# Java_generator input plugin [plugins-inputs-java_generator]
+
+
+**{{ls}} Core Plugin.** The java_generator input plugin cannot be installed or uninstalled independently of {{ls}}.
+
+## Getting help [_getting_help_30]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash).
+
+
+## Description [_description_30]
+
+Generate synthetic log events.
+
+This plugin generates a stream of synthetic events that can be used to test the correctness or performance of a Logstash pipeline.
+
+
+## Java_generator Input Configuration Options [plugins-inputs-java_generator-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-java_generator-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`count`](#plugins-inputs-java_generator-count) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`eps`](#plugins-inputs-java_generator-eps) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`lines`](#plugins-inputs-java_generator-lines) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`message`](#plugins-inputs-java_generator-message) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`threads`](#plugins-inputs-java_generator-threads) | [number](/reference/configuration-file-structure.md#number) | No |
+
+Also see [Common options](#plugins-inputs-java_generator-common-options) for a list of options supported by all input plugins.
+
+
+
+### `count` [plugins-inputs-java_generator-count]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `0`
+
+Sets the number of events that should be generated.
+
+The default, `0`, means generate an unlimited number of events.
+
+
+### `eps` [plugins-inputs-java_generator-eps]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `0`
+
+Sets the rate at which events should be generated. Fractional values may be specified. For example, a rate of `0.25` means that one event will be generated every four seconds.
+
+The default, `0`, means generate events as fast as possible.
+
+
+### `lines` [plugins-inputs-java_generator-lines]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+The lines to emit, in order. This option overrides the *message* setting if it has also been specified.
+
+Example:
+
+```ruby
+ input {
+ java_generator {
+ lines => [
+ "line 1",
+ "line 2",
+ "line 3"
+ ]
+ # Emit all lines 2 times.
+ count => 2
+ }
+ }
+```
+
+The above will emit a series of three events `line 1` then `line 2` then `line 3` two times for a total of 6 events.
+
+
+### `message` [plugins-inputs-java_generator-message]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"Hello world!"`
+
+The message string to use in the event.
+
+
+### `threads` [plugins-inputs-java_generator-threads]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1`
+
+Increasing the number of generator threads up to about the number of CPU cores generally increases overall event throughput. The `count`, `eps`, and `lines` settings all apply on a per-thread basis. In other words, each thread will emit the number of events specified in the `count` setting for a total of `threads * count` events. Each thread will emit events at the `eps` rate for a total rate of `threads * eps`, and each thread will emit each line specified in the `lines` option.
+
+
+
+## Common options [plugins-inputs-java_generator-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-java_generator-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-java_generator-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-java_generator-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-java_generator-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-java_generator-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-java_generator-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-java_generator-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-java_generator-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-java_generator-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-java_generator-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 java_generator inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ java_generator {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-java_generator-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-java_generator-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-java_stdin.md b/docs/reference/plugins-inputs-java_stdin.md
new file mode 100644
index 000000000..6a3a02722
--- /dev/null
+++ b/docs/reference/plugins-inputs-java_stdin.md
@@ -0,0 +1,111 @@
+---
+navigation_title: "java_stdin"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-java_stdin.html
+---
+
+# Java_stdin input plugin [plugins-inputs-java_stdin]
+
+
+**{{ls}} Core Plugin.** The java_stdin input plugin cannot be installed or uninstalled independently of {{ls}}.
+
+## Getting help [_getting_help_31]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash).
+
+
+## Description [_description_31]
+
+Read events from standard input.
+
+By default, each event is assumed to be terminated by end-of-line. If you want events delimited in a different method, you’ll need to use a codec with support for that encoding.
+
+
+## Java_stdin Input Configuration Options [plugins-inputs-java_stdin-options]
+
+There are no special configuration options for this plugin, but it does support the [Common options](#plugins-inputs-java_stdin-common-options).
+
+
+## Common options [plugins-inputs-java_stdin-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-java_stdin-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-java_stdin-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-java_stdin-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-java_stdin-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-java_stdin-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-java_stdin-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-java_stdin-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-java_stdin-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"java_line"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-java_stdin-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-java_stdin-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 java_stdin inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ java_stdin {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-java_stdin-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-java_stdin-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-jdbc.md b/docs/reference/plugins-inputs-jdbc.md
new file mode 100644
index 000000000..285794a4e
--- /dev/null
+++ b/docs/reference/plugins-inputs-jdbc.md
@@ -0,0 +1,696 @@
+---
+navigation_title: "jdbc"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-jdbc.html
+---
+
+# Jdbc input plugin [plugins-inputs-jdbc]
+
+
+* A component of the [jdbc integration plugin](/reference/plugins-integrations-jdbc.md)
+* Integration version: v5.5.2
+* Released on: 2024-12-23
+* [Changelog](https://github.com/logstash-plugins/logstash-integration-jdbc/blob/v5.5.2/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-jdbc-index.md).
+
+## Getting help [_getting_help_32]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-integration-jdbc). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_32]
+
+This plugin was created as a way to ingest data in any database with a JDBC interface into Logstash. You can periodically schedule ingestion using a cron syntax (see `schedule` setting) or run the query one time to load data into Logstash. Each row in the resultset becomes a single event. Columns in the resultset are converted into fields in the event.
+
+
+## Drivers [_drivers]
+
+This plugin does not come packaged with JDBC driver libraries. The desired jdbc driver library must be explicitly passed in to the plugin using the `jdbc_driver_library` configuration option.
+
+See the [`jdbc_driver_library`](#plugins-inputs-jdbc-jdbc_driver_library) and [`jdbc_driver_class`](#plugins-inputs-jdbc-jdbc_driver_class) options for more info.
+
+
+## Scheduling [_scheduling_2]
+
+Input from this plugin can be scheduled to run periodically according to a specific schedule. This scheduling syntax is powered by [rufus-scheduler](https://github.com/jmettraux/rufus-scheduler). The syntax is cron-like with some extensions specific to Rufus (e.g. timezone support ).
+
+Examples:
+
+| | |
+| --- | --- |
+| `* 5 * 1-3 *` | will execute every minute of 5am every day of January through March. |
+| `0 * * * *` | will execute on the 0th minute of every hour every day. |
+| `0 6 * * * America/Chicago` | will execute at 6:00am (UTC/GMT -5) every day. |
+
+Further documentation describing this syntax can be found [here](https://github.com/jmettraux/rufus-scheduler#parsing-cronlines-and-time-strings).
+
+
+## State [_state]
+
+The plugin will persist the `sql_last_value` parameter in the form of a metadata file stored in the configured `last_run_metadata_path`. Upon query execution, this file will be updated with the current value of `sql_last_value`. Next time the pipeline starts up, this value will be updated by reading from the file. If `clean_run` is set to true, this value will be ignored and `sql_last_value` will be set to Jan 1, 1970, or 0 if `use_column_value` is true, as if no query has ever been executed.
+
+
+## Dealing With Large Result-sets [_dealing_with_large_result_sets]
+
+Many JDBC drivers use the `fetch_size` parameter to limit how many results are pre-fetched at a time from the cursor into the client’s cache before retrieving more results from the result-set. This is configured in this plugin using the `jdbc_fetch_size` configuration option. No fetch size is set by default in this plugin, so the specific driver’s default size will be used.
+
+
+## Usage: [_usage]
+
+Here is an example of setting up the plugin to fetch data from a MySQL database. First, we place the appropriate JDBC driver library in our current path (this can be placed anywhere on your filesystem). In this example, we connect to the *mydb* database using the user: *mysql* and wish to input all rows in the *songs* table that match a specific artist. The following examples demonstrates a possible Logstash configuration for this. The `schedule` option in this example will instruct the plugin to execute this input statement on the minute, every minute.
+
+```ruby
+input {
+ jdbc {
+ jdbc_driver_library => "mysql-connector-java-5.1.36-bin.jar"
+ jdbc_driver_class => "com.mysql.jdbc.Driver"
+ jdbc_connection_string => "jdbc:mysql://localhost:3306/mydb"
+ jdbc_user => "mysql"
+ parameters => { "favorite_artist" => "Beethoven" }
+ schedule => "* * * * *"
+ statement => "SELECT * from songs where artist = :favorite_artist"
+ }
+}
+```
+
+
+## Configuring SQL statement [_configuring_sql_statement]
+
+A sql statement is required for this input. This can be passed-in via a statement option in the form of a string, or read from a file (`statement_filepath`). File option is typically used when the SQL statement is large or cumbersome to supply in the config. The file option only supports one SQL statement. The plugin will only accept one of the options. It cannot read a statement from a file as well as from the `statement` configuration parameter.
+
+
+## Configuring multiple SQL statements [_configuring_multiple_sql_statements]
+
+Configuring multiple SQL statements is useful when there is a need to query and ingest data from different database tables or views. It is possible to define separate Logstash configuration files for each statement or to define multiple statements in a single configuration file. When using multiple statements in a single Logstash configuration file, each statement has to be defined as a separate jdbc input (including jdbc driver, connection string and other required parameters).
+
+Please note that if any of the statements use the `sql_last_value` parameter (e.g. for ingesting only data changed since last run), each input should define its own `last_run_metadata_path` parameter. Failure to do so will result in undesired behaviour, as all inputs will store their state to the same (default) metadata file, effectively overwriting each other’s `sql_last_value`.
+
+
+## Predefined Parameters [_predefined_parameters]
+
+Some parameters are built-in and can be used from within your queries. Here is the list:
+
+| | |
+| --- | --- |
+| sql_last_value | The value used to calculate which rows to query. Before any query is run,this is set to Thursday, 1 January 1970, or 0 if `use_column_value` is true and`tracking_column` is set. It is updated accordingly after subsequent queries are run. |
+| offset, size | Values used with manual paging mode to explicitly implement the paging.Supported only if [`jdbc_paging_enabled`](#plugins-inputs-jdbc-jdbc_paging_enabled) is enabled and[`jdbc_paging_mode`](#plugins-inputs-jdbc-jdbc_paging_mode) has the `explicit` value. |
+
+Example:
+
+```ruby
+input {
+ jdbc {
+ statement => "SELECT id, mycolumn1, mycolumn2 FROM my_table WHERE id > :sql_last_value"
+ use_column_value => true
+ tracking_column => "id"
+ # ... other configuration bits
+ }
+}
+```
+
+
+## Prepared Statements [_prepared_statements]
+
+Using server side prepared statements can speed up execution times as the server optimises the query plan and execution.
+
+::::{note}
+Not all JDBC accessible technologies will support prepared statements.
+::::
+
+
+With the introduction of Prepared Statement support comes a different code execution path and some new settings. Most of the existing settings are still useful but there are several new settings for Prepared Statements to read up on. Use the boolean setting `use_prepared_statements` to enable this execution mode. Use the `prepared_statement_name` setting to specify a name for the Prepared Statement, this identifies the prepared statement locally and remotely and it should be unique in your config and on the database. Use the `prepared_statement_bind_values` array setting to specify the bind values, use the exact string `:sql_last_value` (multiple times if necessary) for the predefined parameter mentioned before. The `statement` (or `statement_path`) setting still holds the SQL statement but to use bind variables you must use the `?` character as a placeholder in the exact order found in the `prepared_statement_bind_values` array.
+
+::::{note}
+Building count queries around a prepared statement is not supported at this time. Because jdbc paging uses count queries when `jdbc_paging_mode` has value `auto`,jdbc paging is not supported with prepared statements at this time either. Therefore, `jdbc_paging_enabled`, `jdbc_page_size` settings are ignored when using prepared statements.
+::::
+
+
+Example:
+
+```ruby
+input {
+ jdbc {
+ statement => "SELECT * FROM mgd.seq_sequence WHERE _sequence_key > ? AND _sequence_key < ? + ? ORDER BY _sequence_key ASC"
+ prepared_statement_bind_values => [":sql_last_value", ":sql_last_value", 4]
+ prepared_statement_name => "foobar"
+ use_prepared_statements => true
+ use_column_value => true
+ tracking_column_type => "numeric"
+ tracking_column => "_sequence_key"
+ last_run_metadata_path => "/elastic/tmp/testing/confs/test-jdbc-int-sql_last_value.yml"
+ # ... other configuration bits
+ }
+}
+```
+
+
+## Database-specific considerations [_database_specific_considerations]
+
+The JDBC input plugin leverages the [sequel](https://github.com/jeremyevans/sequel) library to query databases through their JDBC drivers. The implementation of drivers will vary, however, potentially leading to unexpected behavior.
+
+### Unable to reuse connections [_unable_to_reuse_connections]
+
+Some databases - such as Sybase or SQL Anywhere - may have issues with stale connections, timing out between scheduled runs and never reconnecting.
+
+To ensure connections are valid before queries are executed, enable [`jdbc_validate_connection`](#plugins-inputs-jdbc-jdbc_validate_connection) and set [`jdbc_validation_timeout`](#plugins-inputs-jdbc-jdbc_validation_timeout) to a shorter interval than the [`schedule`](#plugins-inputs-jdbc-schedule).
+
+```ruby
+input {
+ jdbc {
+ schedule => "* * * * *" # run every minute
+ jdbc_validate_connection => true
+ jdbc_validation_timeout => 50 # 50 seconds
+ }
+}
+```
+
+
+
+## Jdbc Input Configuration Options [plugins-inputs-jdbc-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-jdbc-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`clean_run`](#plugins-inputs-jdbc-clean_run) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`columns_charset`](#plugins-inputs-jdbc-columns_charset) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`connection_retry_attempts`](#plugins-inputs-jdbc-connection_retry_attempts) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`connection_retry_attempts_wait_time`](#plugins-inputs-jdbc-connection_retry_attempts_wait_time) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`jdbc_connection_string`](#plugins-inputs-jdbc-jdbc_connection_string) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`jdbc_default_timezone`](#plugins-inputs-jdbc-jdbc_default_timezone) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`jdbc_driver_class`](#plugins-inputs-jdbc-jdbc_driver_class) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`jdbc_driver_library`](#plugins-inputs-jdbc-jdbc_driver_library) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`jdbc_fetch_size`](#plugins-inputs-jdbc-jdbc_fetch_size) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`jdbc_page_size`](#plugins-inputs-jdbc-jdbc_page_size) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`jdbc_paging_enabled`](#plugins-inputs-jdbc-jdbc_paging_enabled) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`jdbc_paging_mode`](#plugins-inputs-jdbc-jdbc_paging_mode) | [string](/reference/configuration-file-structure.md#string), one of `["auto", "explicit"]` | No |
+| [`jdbc_password`](#plugins-inputs-jdbc-jdbc_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`jdbc_password_filepath`](#plugins-inputs-jdbc-jdbc_password_filepath) | a valid filesystem path | No |
+| [`jdbc_pool_timeout`](#plugins-inputs-jdbc-jdbc_pool_timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`jdbc_user`](#plugins-inputs-jdbc-jdbc_user) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`jdbc_validate_connection`](#plugins-inputs-jdbc-jdbc_validate_connection) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`jdbc_validation_timeout`](#plugins-inputs-jdbc-jdbc_validation_timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`last_run_metadata_path`](#plugins-inputs-jdbc-last_run_metadata_path) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`lowercase_column_names`](#plugins-inputs-jdbc-lowercase_column_names) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`parameters`](#plugins-inputs-jdbc-parameters) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`plugin_timezone`](#plugins-inputs-jdbc-plugin_timezone) | [string](/reference/configuration-file-structure.md#string), one of `["local", "utc"]` | No |
+| [`prepared_statement_bind_values`](#plugins-inputs-jdbc-prepared_statement_bind_values) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`prepared_statement_name`](#plugins-inputs-jdbc-prepared_statement_name) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`record_last_run`](#plugins-inputs-jdbc-record_last_run) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`schedule`](#plugins-inputs-jdbc-schedule) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`sequel_opts`](#plugins-inputs-jdbc-sequel_opts) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`sql_log_level`](#plugins-inputs-jdbc-sql_log_level) | [string](/reference/configuration-file-structure.md#string), one of `["fatal", "error", "warn", "info", "debug"]` | No |
+| [`statement`](#plugins-inputs-jdbc-statement) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`statement_filepath`](#plugins-inputs-jdbc-statement_filepath) | a valid filesystem path | No |
+| [`statement_retry_attempts`](#plugins-inputs-jdbc-statement_retry_attempts) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`statement_retry_attempts_wait_time`](#plugins-inputs-jdbc-statement_retry_attempts_wait_time) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`target`](#plugins-inputs-jdbc-target) | [field reference](https://www.elastic.co/guide/en/logstash/current/field-references-deepdive.html) | No |
+| [`tracking_column`](#plugins-inputs-jdbc-tracking_column) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tracking_column_type`](#plugins-inputs-jdbc-tracking_column_type) | [string](/reference/configuration-file-structure.md#string), one of `["numeric", "timestamp"]` | No |
+| [`use_column_value`](#plugins-inputs-jdbc-use_column_value) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`use_prepared_statements`](#plugins-inputs-jdbc-use_prepared_statements) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+
+Also see [Common options](#plugins-inputs-jdbc-common-options) for a list of options supported by all input plugins.
+
+
+
+### `clean_run` [plugins-inputs-jdbc-clean_run]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Whether the previous run state should be preserved
+
+
+### `columns_charset` [plugins-inputs-jdbc-columns_charset]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+The character encoding for specific columns. This option will override the `:charset` option for the specified columns.
+
+Example:
+
+```ruby
+input {
+ jdbc {
+ ...
+ columns_charset => { "column0" => "ISO-8859-1" }
+ ...
+ }
+}
+```
+
+this will only convert column0 that has ISO-8859-1 as an original encoding.
+
+
+### `connection_retry_attempts` [plugins-inputs-jdbc-connection_retry_attempts]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1`
+
+Maximum number of times to try connecting to database
+
+
+### `connection_retry_attempts_wait_time` [plugins-inputs-jdbc-connection_retry_attempts_wait_time]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `0.5`
+
+Number of seconds to sleep between connection attempts
+
+
+### `jdbc_connection_string` [plugins-inputs-jdbc-jdbc_connection_string]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+JDBC connection string
+
+
+### `jdbc_default_timezone` [plugins-inputs-jdbc-jdbc_default_timezone]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+
+ * Value should be a canonical timezone or offset, such as `Europe/Paris` or `Etc/GMT+3`
+ * Value *may* include square-bracketed extensions, such as `America/Denver[dst_enabled_on_overlap:true]`
+
+* There is no default value for this setting.
+
+#### Timezone conversion [plugins-inputs-jdbc-jdbc_timezone_conv]
+
+Logstash and Elasticsearch expect timestamps to be expressed in UTC terms. If your database has recorded timestamps that are relative to another timezone, the database timezone if you will, then set this setting to be the timezone that the database is using. However, as SQL does not allow for timezone data in timestamp fields we can’t figure this out on a record by record basis. This plugin will automatically convert your SQL timestamp fields to Logstash timestamps, in relative UTC time in ISO8601 format.
+
+Using this setting will manually assign a specified timezone offset, instead of using the timezone setting of the local machine. You must use a canonical timezone, `America/Denver`, for example.
+
+
+
+### Ambiguous timestamps [plugins-inputs-jdbc-jdbc_ambiguous_timestamps]
+
+While it is common to store local times in SQL’s timestamp column type, many timezones change their offset during the course of a calendar year and therefore cannot be used with SQL’s timestamp type to represent an ordered, continuous timeline. For example in the `America/Chicago` zone when daylight saving time (DST) ends in the autumn, the clock rolls from `01:59:59` back to `01:00:00`, making any timestamp in the 2-hour period between `01:00:00CDT` and `02:00:00CST` on that day ambiguous.
+
+When encountering an ambiguous timestamp caused by a DST transition, the query will fail unless the timezone specified here includes a square-bracketed instruction for how to handle overlapping periods (such as: `America/Chicago[dst_enabled_on_overlap:true]` or `Australia/Melbourne[dst_enabled_on_overlap:false]`).
+
+
+### `plugin_timezone` [plugins-inputs-jdbc-plugin_timezone]
+
+* Value can be any of: `utc`, `local`
+* Default value is `"utc"`
+
+If you want this plugin to offset timestamps to a timezone other than UTC, you can set this setting to `local` and the plugin will use the OS timezone for offset adjustments.
+
+Note: when specifying `plugin_timezone` and/or `jdbc_default_timezone`, offset adjustments are made in two places, if `sql_last_value` is a timestamp and it is used as a parameter in the statement then offset adjustment is done from the plugin timezone into the data timezone and while records are processed, timestamps are offset adjusted from the database timezone to the plugin timezone. If your database timezone is UTC then you do not need to set either of these settings.
+
+
+### `jdbc_driver_class` [plugins-inputs-jdbc-jdbc_driver_class]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+JDBC driver class to load, for example, "org.apache.derby.jdbc.ClientDriver"
+
+::::{note}
+Per [https://github.com/logstash-plugins/logstash-input-jdbc/issues/43](https://github.com/logstash-plugins/logstash-input-jdbc/issues/43), prepending `Java::` to the driver class may be required if it appears that the driver is not being loaded correctly despite relevant jar(s) being provided by either via the `jdbc_driver_library` setting or being placed in the Logstash Java classpath. This is known to be the case for the Oracle JDBC driver (ojdbc6.jar), where the correct `jdbc_driver_class` is `"Java::oracle.jdbc.driver.OracleDriver"`, and may also be the case for other JDBC drivers.
+::::
+
+
+
+### `jdbc_driver_library` [plugins-inputs-jdbc-jdbc_driver_library]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+JDBC driver library path to third party driver library. In case of multiple libraries being required you can pass them separated by a comma.
+
+::::{note}
+If not provided, Plugin will look for the driver class in the Logstash Java classpath. Additionally, if the library does not appear to be being loaded correctly via this setting, placing the relevant jar(s) in the Logstash Java classpath rather than via this setting may help. Please also make sure the path is readable by the Logstash process (e.g. `logstash` user when running as a service).
+::::
+
+
+
+### `jdbc_fetch_size` [plugins-inputs-jdbc-jdbc_fetch_size]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+JDBC fetch size. if not provided, respective driver’s default will be used
+
+
+### `jdbc_page_size` [plugins-inputs-jdbc-jdbc_page_size]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `100000`
+
+JDBC page size
+
+
+### `jdbc_paging_enabled` [plugins-inputs-jdbc-jdbc_paging_enabled]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+JDBC enable paging
+
+This will cause a sql statement to be broken up into multiple queries. Each query will use limits and offsets to collectively retrieve the full result-set. The limit size is set with `jdbc_page_size`.
+
+Be aware that ordering is not guaranteed between queries.
+
+
+### `jdbc_paging_mode` [plugins-inputs-jdbc-jdbc_paging_mode]
+
+* Value can be any of: `auto`, `explicit`
+* Default value is `"auto"`
+
+Whether to use `explicit` or `auto` mode during the JDBC paging
+
+If `auto`, your statement will be automatically surrounded by a count query and subsequent multiple paged queries (with `LIMIT` statement, etc.).
+
+If `explicit`, multiple queries (without a count query ahead) will be performed with your statement, until no more rows are retrieved. You have to write your own paging conditions in your statement configuration. The `offset` and `size` parameters can be used in your statement (`size` equal to `jdbc_page_size`, and `offset` incremented by `size` for each query). When the number of rows returned by the query is not equal to `size`, SQL paging will be ended. Example:
+
+```ruby
+input {
+ jdbc {
+ statement => "SELECT id, mycolumn1, mycolumn2 FROM my_table WHERE id > :sql_last_value LIMIT :size OFFSET :offset",
+ jdbc_paging_enabled => true,
+ jdbc_paging_mode => "explicit",
+ jdbc_page_size => 100000
+ }
+}
+```
+
+```ruby
+input {
+ jdbc {
+ statement => "CALL fetch_my_data(:sql_last_value, :offset, :size)",
+ jdbc_paging_enabled => true,
+ jdbc_paging_mode => "explicit",
+ jdbc_page_size => 100000
+ }
+}
+```
+
+This mode can be considered in the following situations:
+
+1. Performance issues encountered in default paging mode.
+2. Your SQL statement is complex, so simply surrounding it with paging statements is not what you want.
+3. Your statement is a stored procedure, and the actual paging statement is inside it.
+
+
+### `jdbc_password` [plugins-inputs-jdbc-jdbc_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+JDBC password
+
+
+### `jdbc_password_filepath` [plugins-inputs-jdbc-jdbc_password_filepath]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+JDBC password filename
+
+
+### `jdbc_pool_timeout` [plugins-inputs-jdbc-jdbc_pool_timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `5`
+
+Connection pool configuration. The amount of seconds to wait to acquire a connection before raising a PoolTimeoutError (default 5)
+
+
+### `jdbc_user` [plugins-inputs-jdbc-jdbc_user]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+JDBC user
+
+
+### `jdbc_validate_connection` [plugins-inputs-jdbc-jdbc_validate_connection]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Connection pool configuration. Validate connection before use.
+
+
+### `jdbc_validation_timeout` [plugins-inputs-jdbc-jdbc_validation_timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `3600`
+
+Connection pool configuration. How often to validate a connection (in seconds)
+
+
+### `last_run_metadata_path` [plugins-inputs-jdbc-last_run_metadata_path]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"/plugins/inputs/jdbc/logstash_jdbc_last_run"`
+
+Path to file with last run time
+
+In versions prior to 5.2.6 the metadata file was written to `$HOME/.logstash_jdbc_last_run`. If during a Logstash upgrade the file is found in "$HOME" it will be moved to the default location under "path.data". If the path is defined by the user then no automatic move is performed.
+
+
+### `lowercase_column_names` [plugins-inputs-jdbc-lowercase_column_names]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Whether to force the lowercasing of identifier fields
+
+
+### `parameters` [plugins-inputs-jdbc-parameters]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Hash of query parameter, for example `{ "target_id" => "321" }`
+
+
+### `prepared_statement_bind_values` [plugins-inputs-jdbc-prepared_statement_bind_values]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+Array of bind values for the prepared statement. `:sql_last_value` is a reserved predefined string
+
+
+### `prepared_statement_name` [plugins-inputs-jdbc-prepared_statement_name]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `""`
+
+Name given to the prepared statement. It must be unique in your config and in the database
+
+
+### `record_last_run` [plugins-inputs-jdbc-record_last_run]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Whether to save state or not in [`last_run_metadata_path`](#plugins-inputs-jdbc-last_run_metadata_path)
+
+
+### `schedule` [plugins-inputs-jdbc-schedule]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Schedule of when to periodically run statement, in Cron format for example: "* * * * *" (execute query every minute, on the minute)
+
+There is no schedule by default. If no schedule is given, then the statement is run exactly once.
+
+
+### `sequel_opts` [plugins-inputs-jdbc-sequel_opts]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+General/Vendor-specific Sequel configuration options.
+
+An example of an optional connection pool configuration max_connections - The maximum number of connections the connection pool
+
+examples of vendor-specific options can be found in this documentation page: [https://github.com/jeremyevans/sequel/blob/master/doc/opening_databases.rdoc](https://github.com/jeremyevans/sequel/blob/master/doc/opening_databases.rdoc)
+
+
+### `sql_log_level` [plugins-inputs-jdbc-sql_log_level]
+
+* Value can be any of: `fatal`, `error`, `warn`, `info`, `debug`
+* Default value is `"info"`
+
+Log level at which to log SQL queries, the accepted values are the common ones fatal, error, warn, info and debug. The default value is info.
+
+
+### `statement` [plugins-inputs-jdbc-statement]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+If undefined, Logstash will complain, even if codec is unused. Statement to execute
+
+To use parameters, use named parameter syntax. For example:
+
+```ruby
+"SELECT * FROM MYTABLE WHERE id = :target_id"
+```
+
+here, ":target_id" is a named parameter. You can configure named parameters with the `parameters` setting.
+
+
+### `statement_filepath` [plugins-inputs-jdbc-statement_filepath]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+Path of file containing statement to execute
+
+
+### `statement_retry_attempts` [plugins-inputs-jdbc-statement_retry_attempts]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1`
+
+Maximum number of times to try executing a statement.
+
+
+### `statement_retry_attempts_wait_time` [plugins-inputs-jdbc-statement_retry_attempts_wait_time]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `0.5`
+
+Number of seconds to sleep between statement execution attempts.
+
+
+### `target` [plugins-inputs-jdbc-target]
+
+* Value type is [field reference](https://www.elastic.co/guide/en/logstash/current/field-references-deepdive.html)
+* There is no default value for this setting.
+
+Without a `target`, events are created from each row column at the root level. When the `target` is set to a field reference, the column of each row is placed in the target field instead.
+
+This option can be useful to avoid populating unknown fields when a downstream schema such as ECS is enforced.
+
+
+### `tracking_column` [plugins-inputs-jdbc-tracking_column]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The column whose value is to be tracked if `use_column_value` is set to `true`
+
+
+### `tracking_column_type` [plugins-inputs-jdbc-tracking_column_type]
+
+* Value can be any of: `numeric`, `timestamp`
+* Default value is `"numeric"`
+
+Type of tracking column. Currently only "numeric" and "timestamp"
+
+
+### `use_column_value` [plugins-inputs-jdbc-use_column_value]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+When set to `true`, uses the defined [`tracking_column`](#plugins-inputs-jdbc-tracking_column) value as the `:sql_last_value`. When set to `false`, `:sql_last_value` reflects the last time the query was executed.
+
+
+### `use_prepared_statements` [plugins-inputs-jdbc-use_prepared_statements]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+When set to `true`, enables prepare statement usage
+
+
+
+## Common options [plugins-inputs-jdbc-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-jdbc-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-jdbc-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-jdbc-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-jdbc-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-jdbc-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-jdbc-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-jdbc-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-jdbc-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-jdbc-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-jdbc-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 jdbc inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ jdbc {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-jdbc-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-jdbc-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-jms.md b/docs/reference/plugins-inputs-jms.md
new file mode 100644
index 000000000..7e732bc43
--- /dev/null
+++ b/docs/reference/plugins-inputs-jms.md
@@ -0,0 +1,821 @@
+---
+navigation_title: "jms"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-jms.html
+---
+
+# Jms input plugin [plugins-inputs-jms]
+
+
+* Plugin version: v3.2.2
+* Released on: 2022-06-13
+* [Changelog](https://github.com/logstash-plugins/logstash-input-jms/blob/v3.2.2/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-jms-index.md).
+
+## Getting help [_getting_help_33]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-jms). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_33]
+
+Read events from a Jms Broker. Supports both Jms Queues and Topics.
+
+For more information about Jms, see [https://javaee.github.io/tutorial/jms-concepts.html](https://javaee.github.io/tutorial/jms-concepts.html). For more information about the Ruby Gem used, see [http://github.com/reidmorrison/jruby-jms](http://github.com/reidmorrison/jruby-jms).
+
+JMS configurations can be done either entirely in the Logstash configuration file, or in a mixture of the Logstash configuration file, and a specified yaml file. Simple configurations that do not need to make calls to implementation specific methods on the connection factory can be specified entirely in the Logstash configuration, whereas more complex configurations, should also use the combination of yaml file and Logstash configuration.
+
+
+## Compatibility with the Elastic Common Schema (ECS) [plugins-inputs-jms-ecs]
+
+JMS data is application specific. ECS compliance for fields depends on the use case. The plugin includes sensible defaults that change based on [ECS compatibility mode](#plugins-inputs-jms-ecs_compatibility). When ECS compatibility is disabled, headers, properties, and payload are targeted at the root level of the event to maintain compatibility with legacy usage of this plugin. When targeting an ECS version, headers and properties target `@metadata` sub-fields unless configured otherwise in order to avoid conflict with ECS fields. See [`headers_target`](#plugins-inputs-jms-headers_target), [`properties_target`](#plugins-inputs-jms-properties_target) and [`target`](#plugins-inputs-jms-target).
+
+
+## Sample Configuration using Logstash Configuration Only [_sample_configuration_using_logstash_configuration_only]
+
+Configurations can be configured either entirely in Logstash configuration, or via a combination of Logstash configuration and yaml file, which can be useful for sharing similar configurations across multiple inputs and outputs. The JMS plugin can also be configured using JNDI if desired.
+
+### Logstash Confuguration for Non-JNDI Connection [_logstash_confuguration_for_non_jndi_connection]
+
+```ruby
+ input
+ {
+ jms {
+ broker_url => 'failover:(tcp://host1:61616,tcp://host2:61616)?initialReconnectDelay=100' <1>
+ destination => 'myqueue' <2>
+ factory => 'org.apache.activemq.ActiveMQConnectionFactory' <3>
+ pub_sub => false <4>
+ use_jms_timestamp => false <5>
+ # JMS provider credentials if needed <6>
+ username => 'username'
+ password => 'secret'
+ # JMS provider keystore and truststore details <7>
+ keystore => '/Users/logstash-user/security/keystore.jks'
+ keystore_password => 'another_secret'
+ truststore => '/Users/logstash-user/security/truststore.jks'
+ truststore_password => 'yet_another_secret'
+ # Parts of the JMS message to be included <8>
+ include_headers => false
+ include_properties => false
+ include_body => true
+ # Message selector
+ selector => "string_property = 'this' OR int_property < 3" <9>
+ # Connection factory specific settings
+ factory_settings => { <10>
+ exclusive_consumer => true
+ }
+ # Jar Files to include
+ require_jars => ['/usr/share/jms/activemq-all-5.15.9.jar'] <11>
+ }
+ }
+```
+
+1. Url of the broker to connect to. Please consult your JMS provider documentation for the exact syntax to use here, including how to enable failover.
+2. Name of the topic or queue that the plugin will listen to events from.
+3. Full name (including package name) of Java connection factory used to create a connection with your JMS provider.
+4. Determines whether the event source is a queue or a topic, set to `true` for topic, `false` for queue.
+5. Determines whether the JMSTimestamp header is used to populate the `@timestamp` field.
+6. Credentials to use when connecting to the JMS provider, if required.
+7. Keystore and Truststore to use when connecting to the JMS provider, if required.
+8. Parts of the JMS Message to include in the event - headers, properties and the message body can be included or excluded from the event.
+9. Message selector: Use this to filter messages to be processed. The whole selector query should be double-quoted, string property values should be single quoted, and numeric property vaues should not be quoted. See JMS provider documentation for exact syntax.
+10. Additional settings that are set directly on the ConnectionFactory object can be added here.
+11. List of jars required by the JMS provider. Paths should be the fully qualified location of all jar files required by the JMS provider. This list may also include dependent jars as well as specific jars from the JMS provider.
+
+
+
+### Logstash Configuration for JNDI Connection [_logstash_configuration_for_jndi_connection]
+
+```ruby
+ input {
+ jms {
+ # Logstash Configuration Settings. <1>
+ include_headers => false
+ include_properties => false
+ include_body => true
+ use_jms_timestamp => false
+ destination => "myqueue"
+ pub_sub => false
+ # JNDI Settings
+ jndi_name => /jms/cf/default <2>
+ jndi_context => { <3>
+ 'java.naming.factory.initial' => com.solacesystems.jndi.SolJNDIInitialContextFactory
+ 'java.naming.security.principal' => solace-cloud-client@user
+ 'java.naming.provider.url' => tcp://address.messaging.solace.cloud:20608
+ 'java.naming.security.credentials' => verysecret
+ }
+ # Jar files to be imported
+ require_jars=> ['/usr/share/jms/commons-lang-2.6.jar', <4>
+ '/usr/share/jms/sol-jms-10.5.0.jar',
+ '/usr/share/jms/geronimo-jms_1.1_spec-1.1.1.jar',
+ '/usr/share/jms/commons-lang-2.6.jar]'
+ }
+ }
+```
+
+1. Configuration settings. Note that there is no `broker_url` or `username` or `password` defined here - these are defined in the `jndi_context` hash.
+2. JNDI name for this connection.
+3. JNDI context settings hash. Contains details of how to connect to JNDI server. See your JMS provider documentation for implementation specific details.
+4. List of jars required by the JMS provider. Paths should be the fully qualified location of all jar files required by the JMS provider. This list may also include dependent jars as well as specific jars from the JMS provider.
+
+
+
+
+## Sample Configuration using Logstash Configuration and Yaml File [_sample_configuration_using_logstash_configuration_and_yaml_file]
+
+### Non-JNDI Connection [_non_jndi_connection]
+
+This section contains sample configurations for connecting to a JMS provider that is not using JNDI using a combination of the Logstash configuration and the yaml file
+
+
+### Logstash Configuration for Non-JNDI Connection (for configs including yaml) [_logstash_configuration_for_non_jndi_connection_for_configs_including_yaml]
+
+```ruby
+ input {
+ jms {
+ # Logstash Configuration File Settings <1>
+ include_headers => false
+ include_properties => false
+ include_body => true
+ use_jms_timestamp => false
+ destination => "myqueue"
+ pub_sub => false
+ # JMS Provider credentials <2>
+ username => xxxx
+ password => xxxx
+ # Location of yaml file, and which section to use for configuration
+ yaml_file => "~/jms.yml" <3>
+ yaml_section => "mybroker" <4>
+ }
+ }
+```
+
+1. Configuration settings
+2. Username and password for the connection.
+3. Full path to a yaml file containing the definition for the ConnectionFactory.
+4. Section name in the yaml file of the ConnectionFactory for this plugin definition
+
+
+
+### Yaml File for Non-JNDI Connection [_yaml_file_for_non_jndi_connection]
+
+```yaml
+mybroker: <1>
+ :broker_url: 'ssl://localhost:61617' <2>
+ :factory: org.apache.activemq.ActiveMQConnectionFactory <3>
+ :exclusive_consumer: true <4>
+ :require_jars: <5>
+ - /usr/share/jms/activemq-all-5.15.9.jar
+ - /usr/share/jms/log4j-1.2.17.jar
+```
+
+1. Section name for this broker definition. This should be the value of `yaml_section` in the logstash configuration file. Note that multiple sections can co-exist in the same yaml file.
+2. Full url of the broker. See your JMS Provider documentation for details.
+3. Full name (including package name) of Java connection factory used to create a connection with your JMS provider.
+4. Implementation specific configuration parameters to be used with the connection factory specified. in <3>. Each JMS Provider will have its own set of parameters that can be used here. These parameters are mapped to `set` methods on the provided connection factory, and can be supplied in either *snake* or *camel* case. In <4> above, the `exclusive_consumer` property will call the `setExclusiveConsumer` on the supplied connection factory. See your JMS provider documentation for implementation specific details.
+5. List of jars required by the JMS provider. Paths should be the fully qualified location of all jar files required by the JMS provider. This list may also include dependent jars as well as specific jars from the JMS provider.
+
+
+
+### JNDI Connection [_jndi_connection]
+
+This section contains sample configurations for connecting to a JMS provider that is using JNDI using a combination of the Logstash configuration and the yaml file
+
+
+### Logstash Configuration for JNDI Connection (for configs including yaml) [_logstash_configuration_for_jndi_connection_for_configs_including_yaml]
+
+```ruby
+ input {
+ jms {
+ # Logstash specific configuration settings <1>
+ include_headers => false
+ include_properties => false
+ include_body => true
+ use_jms_timestamp => false
+ destination => "myqueue"
+ pub_sub => false
+ # Location of yaml file, and which section to use for configuration
+ yaml_file => "~/jms.yml" <2>
+ yaml_section => "mybroker" <3>
+ }
+ }
+```
+
+1. Configuration settings
+2. Full path to a yaml file containing the definition for the ConnectionFactory.
+3. Section name in the yaml file of the ConnectionFactory for this plugin definition
+
+
+
+### Yaml File for JNDI Connection [_yaml_file_for_jndi_connection]
+
+```yaml
+solace: <1>
+ :jndi_name: /jms/cf/default <2>
+ :jndi_context: <3>
+ java.naming.factory.initial: com.solacesystems.jndi.SolJNDIInitialContextFactory
+ java.naming.security.principal: solace-cloud-client@user
+ java.naming.provider.url: tcp://address.messaging.solace.cloud:20608
+ java.naming.security.credentials: verysecret
+ :require_jars: <4>
+ - /usr/share/jms/commons-lang-2.6.jar
+ - /usr/share/jms/sol-jms-10.5.0.jar
+ - /usr/share/jms/geronimo-jms_1.1_spec-1.1.1.jar
+ - /usr/share/jms/commons-lang-2.6.jar
+```
+
+1. Section name for this broker definition. This should be the value of `yaml_section` in the Logstash configuration file.
+2. Name of JNDI entry at which the Factory can be found
+3. JNDI context settings. Contains details of how to connect to JNDI server. See your JMS provider documentation for implementation specific details.
+4. List of jars required by the JMS provider. Paths should be the fully qualified location of all jar files required by the JMS provider. This list may also include dependent jars as well as specific jars from the JMS provider.
+
+
+
+
+## Jar files [_jar_files]
+
+In order to communicate with a JMS broker, the plugin must load the jar files necessary for each client type. This can be set in the yaml file, or in the main configuration if a yaml file is not necessary. The `require_jars` setting should include the full path for each jar file required for the client. Eg
+
+### Logstash configuration [_logstash_configuration]
+
+```ruby
+ input {
+ jms {
+ :
+ [snip]
+ require_jars => ['/usr/share/jms/commons-lang-2.6.jar',
+ '/usr/share/jms/sol-jms-10.5.0.jar',
+ '/usr/share/jms/geronimo-jms_1.1_spec-1.1.1.jar',
+ '/usr/share/jms/commons-lang-2.6.jar']
+ }
+ }
+```
+
+
+
+## Troubleshooting [_troubleshooting]
+
+This section includes some common issues that a user may have when integrating this plugin with their JMS provider.
+
+### Missing Jar files [_missing_jar_files]
+
+The most common issue is missing jar files, which may be jar files provided by the JMS vendor, or jar files that the JMS vendor requires to run. This issue can manifest in different ways, depending on where the missing jar file is discovered.
+
+Example log output:
+
+```txt
+ Failed to load JMS Connection, likely because a JMS Provider is not on the Logstash classpath or correctly
+ specified by the plugin's `require_jars` directive
+ {:exception=>"cannot load Java class javax.jms.DeliveryMode"
+```
+
+```txt
+ JMS Consumer Died {:exception=>"Java::JavaxNaming::NoInitialContextException",
+ :exception_message=>"Cannot instantiate class:"
+```
+
+```txt
+ warning: thread "[main]"Java::JavaxJms::JMSException", :exception_message=>"io/netty/channel/epoll/Epoll",
+ :root_cause=>{:exception=>"Java::JavaLang::NoClassDefFoundError", :exception_message=>"io/netty/channel/epoll/Epoll"}
+```
+
+If any of these issues occur, check the list of `require_jars` in either the Logstash configuration or yaml configuration files.
+
+
+### Setting System Properties [_setting_system_properties]
+
+Many JMS providers allow or expect System properties to be set to configure certain properties when using JMS, for example, the Apache qpid JMS client allows the connection factory lookup to be stored there, and the Solace JMS client allows many properties, such as number of connection retries to be set as System properties. Any system properties that are set should be set in the Logstash `jvm.options` file.
+
+
+### Multiple JMS inputs/outputs in the same Logstash process [_multiple_jms_inputsoutputs_in_the_same_logstash_process]
+
+The use of multiple JMS consumers and producers in the same Logstash process is unsupported if:
+
+* System properties need to be different for any of the consumers/producers
+* Different keystores or truststores are required for any of the consumers/producers
+
+
+### Message Selectors unexpectedly filtering out all messages [_message_selectors_unexpectedly_filtering_out_all_messages]
+
+Incorrect message selector syntax can have two effects - either the syntax is incorrect and the selector parser from the JMS provider will throw an exception causing the plugin to fail OR the syntax will be accepted, but the messages will be silently dropped - this can happen with incorrect quoting of string properties in the selector definition. All selector definitions must be double quoted in the Logstash configuration file, and string property values must be single quoted, and numeric property values not quoted at all.
+
+
+### Failed to create Event with MissingConverterException [_failed_to_create_event_with_missingconverterexception]
+
+Messages from certain JMS providers may contain headers or properties that Logstash cannot interpret, which can lead to error messages such as:
+
+```txt
+[2019-11-25T08:04:28,769][ERROR][logstash.inputs.jms ] Failed to create event {:message=>Java::ComSolacesystemsJmsMessage::SolTextMessage: ...
+Attributes: {:jms_correlation_id=>"xxxx", :jms_delivery_mode_sym=>:non_persistent, :jms_destination=>"destination", :jms_expiration=>0, :jms_message_id=>"xxxxxx", :jms_priority=>0, :jms_redelivered=>false, :jms_reply_to=>#, :jms_timestamp=>1574669008862, :jms_type=>nil}
+Properties: nil, :exception=>org.logstash.MissingConverterException: Missing Converter handling for full class name=com.solacesystems.jms.impl.SolTopicImpl, simple name=SolTopicImpl, :backtrace=>["org.logstash.Valuefier.fallbackConvert(Valuefier.java:98)..."]}
+:exception=>org.logstash.MissingConverterException: Missing Converter handling for full class name=com.solacesystems.jms.impl.SolTopicImpl
+```
+
+To get around this, use the `skip_headers` or `skip_properties` configuration setting to avoid attempting to process the offending header or property in the message.
+
+In the example shown above, this attribute is causing the `MissingConverterException`:
+
+`jms_reply_to=>#`
+
+To avoid this error, the configuration should include the following line:
+
+```ruby
+skip_headers => ["jms_reply_to"]
+```
+
+
+
+## Jms Input Configuration Options [plugins-inputs-jms-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-jms-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`broker_url`](#plugins-inputs-jms-broker_url) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`destination`](#plugins-inputs-jms-destination) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`durable_subscriber`](#plugins-inputs-jms-durable_subscriber) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`durable_subscriber_client_id`](#plugins-inputs-jms-durable_subscriber_client_id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`durable_subscriber_name`](#plugins-inputs-jms-durable_subscriber_name) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ecs_compatibility`](#plugins-inputs-jms-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`factory`](#plugins-inputs-jms-factory) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`factory_settings`](#plugins-inputs-jms-factory_settings) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`headers_target`](#plugins-inputs-jms-headers_target) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`include_body`](#plugins-inputs-jms-include_body) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`include_header`](#plugins-inputs-jms-include_header) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`include_headers`](#plugins-inputs-jms-include_headers) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`include_properties`](#plugins-inputs-jms-include_properties) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`interval`](#plugins-inputs-jms-interval) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`jndi_context`](#plugins-inputs-jms-jndi_context) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`jndi_name`](#plugins-inputs-jms-jndi_name) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`keystore`](#plugins-inputs-jms-keystore) | a valid filesystem path | No |
+| [`keystore_password`](#plugins-inputs-jms-keystore_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`oracle_aq_buffered_messages`](#plugins-inputs-jms-oracle_aq_buffered_messages) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`password`](#plugins-inputs-jms-password) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`properties_target`](#plugins-inputs-jms-properties_target) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`pub_sub`](#plugins-inputs-jms-pub_sub) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`require_jars`](#plugins-inputs-jms-require_jars) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`runner`](#plugins-inputs-jms-runner) | [string](/reference/configuration-file-structure.md#string) | *Deprecated* |
+| [`selector`](#plugins-inputs-jms-selector) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`skip_headers`](#plugins-inputs-jms-skip_headers) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`skip_properties`](#plugins-inputs-jms-skip_properties) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`system_properties`](#plugins-inputs-jms-system_properties) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`target`](#plugins-inputs-jms-target) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`threads`](#plugins-inputs-jms-threads) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`timeout`](#plugins-inputs-jms-timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`truststore`](#plugins-inputs-jms-truststore) | a valid filesystem path | No |
+| [`truststore_password`](#plugins-inputs-jms-truststore_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`use_jms_timestamp`](#plugins-inputs-jms-use_jms_timestamp) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`username`](#plugins-inputs-jms-username) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`yaml_file`](#plugins-inputs-jms-yaml_file) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`yaml_section`](#plugins-inputs-jms-yaml_section) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-inputs-jms-common-options) for a list of options supported by all input plugins.
+
+
+
+### `broker_url` [plugins-inputs-jms-broker_url]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Url to use when connecting to the JMS provider. This is only relevant for non-JNDI configurations.
+
+
+### `destination` [plugins-inputs-jms-destination]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Name of the destination queue or topic to use.
+
+::::{tip}
+If the destination setting doesn’t appear to be working properly, try this format: `!`.
+::::
+
+
+
+### `durable_subscriber` [plugins-inputs-jms-durable_subscriber]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* This is `false` by default
+* Requires `pub_sub` to be true
+
+Setting this value to `true` will make subscriptions to topics "durable", which allowing messages that arrived on the specified topic while Logstash is not running to still be read. Without setting this value, any messages sent to a topic while Logstash is not actively listening will be lost. A durable subscriber specifies a unique identity consisting of the topic (`destination`), the client id (`durable_subscriber_client_id`) and subscriber name (`durable_subscriber_subscriber_name`). See your JMS Provider documentation for any further requirements/limitations around these settings.
+
+* Note that a durable subscription can only have one active subscriber at a time.
+* Note that this setting is only permitted when `pub_sub` is set to true, and will generate a configuration error otherwise
+
+
+### `durable_subscriber_client_id` [plugins-inputs-jms-durable_subscriber_client_id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* If `durable_subscriber` is set, the default value for this setting is *Logstash*, otherwise this setting has no effect
+
+This represents the value of the client ID for a durable subscribtion, and is only used if `durable_subscriber` is set to `true`.
+
+
+### `durable_subscriber_name` [plugins-inputs-jms-durable_subscriber_name]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* If `durable_subscriber` is set, the default value for this setting will be the same value as the `destination` setting, otherwise this setting has no effect.
+
+This represents the value of the subscriber name for a durable subscribtion, and is only used if `durable_subscriber` is set to `true`. Please consult your JMS Provider documentation for constraints and requirements for this setting.
+
+
+### `ecs_compatibility` [plugins-inputs-jms-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: does not use ECS-compatible field names (fields might be set at the root of the event)
+ * `v1`, `v8`: avoids field names that might conflict with Elastic Common Schema (for example, JMS specific properties)
+
+* Default value depends on which version of Logstash is running:
+
+ * When Logstash provides a `pipeline.ecs_compatibility` setting, its value is used as the default
+ * Otherwise, the default value is `disabled`.
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)). The value of this setting affects the *default* value of [`headers_target`](#plugins-inputs-jms-headers_target) and [`properties_target`](#plugins-inputs-jms-properties_target).
+
+
+### `factory` [plugins-inputs-jms-factory]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Full name (including package name) of Java connection factory used to create a connection with your JMS provider.
+
+
+### `factory_settings` [plugins-inputs-jms-factory_settings]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* There is no default value for this setting.
+
+Hash of implementation specific configuration values to set on the connection factory of the JMS provider. Each JMS Provider will have its own set of parameters that can be used here. These parameters are mapped to `set` methods on the provided connection factory, and can be supplied in either *snake* or *camel* case. For example, a hash including `exclusive_consumer => true` would call `setExclusiveConsumer(true)` on the supplied connection factory. See your JMS provider documentation for implementation specific details.
+
+
+### `headers_target` [plugins-inputs-jms-headers_target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value depends on whether [`ecs_compatibility`](#plugins-inputs-jms-ecs_compatibility) is enabled:
+
+ * ECS Compatibility disabled: no default value for this setting
+ * ECS Compatibility enabled: `"[@metadata][input][jms][headers]"
+
+
+The name of the field under which JMS headers will be added, if [`include_headers`](#plugins-inputs-jms-include_headers) is set.
+
+
+### `include_body` [plugins-inputs-jms-include_body]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Include JMS Message Body in the event. Supports TextMessage, MapMessage and BytesMessage.
+
+If the JMS Message is a TextMessage or BytesMessage, then the value will be in the "message" field of the event. If the JMS Message is a MapMessage, then all the key/value pairs will be added at the top-level of the event by default. To avoid pollution of the top-level namespace, when receiving a MapMessage, use the [`target`](#plugins-inputs-jms-target).
+
+StreamMessage and ObjectMessage are not supported.
+
+
+### `include_header` [plugins-inputs-jms-include_header]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* This option is deprecated
+
+Note: This option is deprecated and it will be removed in the next major version of Logstash. Use `include_headers` instead.
+
+
+### `include_headers` [plugins-inputs-jms-include_headers]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+A JMS message has three parts:
+
+* Message Headers (required)
+* Message Properties (optional)
+* Message Body (optional)
+
+You can tell the input plugin which parts should be included in the event produced by Logstash.
+
+Include standard JMS message header field values in the event. Example headers:
+
+```ruby
+ {
+ "jms_message_id" => "ID:amqhost-39547-1636977297920-71:1:1:1:1",
+ "jms_timestamp" => 1636977329102,
+ "jms_expiration" => 0,
+ "jms_delivery_mode" => "persistent",
+ "jms_redelivered" => false,
+ "jms_destination" => "topic://41ad5342149901ad",
+ "jms_priority" => 4,
+ "jms_type" => "sample",
+ "jms_correlation_id" => "28d975cb-14ff-4285-841e-05ef1e0a7ab2"
+ }
+```
+
+
+### `include_properties` [plugins-inputs-jms-include_properties]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Include JMS Message Properties Field values in the event.
+
+
+### `interval` [plugins-inputs-jms-interval]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `10`
+
+Polling interval in seconds. This is the time sleeping between asks to a consumed Queue. This parameter has non influence in the case of a subcribed Topic.
+
+
+### `jndi_context` [plugins-inputs-jms-jndi_context]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* There is no default value for this setting.
+
+Only used if using JNDI lookup. Key value pairs to determine how to connect the JMS message brokers if JNDI is being used. Consult your JMS provider documentation for the correct values to use here.
+
+
+### `jndi_name` [plugins-inputs-jms-jndi_name]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Only used if using JNDI lookup. Name of JNDI entry at which the Factory can be found.
+
+
+### `keystore` [plugins-inputs-jms-keystore]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+If you need to use a custom keystore (`.jks`) specify it here. This does not work with .pem keys
+
+
+### `keystore_password` [plugins-inputs-jms-keystore_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Specify the keystore password here. Note, most .jks files created with keytool require a password
+
+
+### `oracle_aq_buffered_messages` [plugins-inputs-jms-oracle_aq_buffered_messages]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Receive Oracle AQ buffered messages. In this mode persistent Oracle AQ JMS messages will not be received. Only for use with Oracle AQ
+
+
+### `password` [plugins-inputs-jms-password]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Password to use when connecting to the JMS provider.
+
+
+### `properties_target` [plugins-inputs-jms-properties_target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value depends on whether [`ecs_compatibility`](#plugins-inputs-jms-ecs_compatibility) is enabled:
+
+ * ECS Compatibility disabled: no default value for this setting
+ * ECS Compatibility enabled: `"[@metadata][input][jms][properties]"
+
+
+The name of the field under which JMS properties will be added, if [`include_properties`](#plugins-inputs-jms-include_properties) is set.
+
+
+### `pub_sub` [plugins-inputs-jms-pub_sub]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+If pub-sub (topic) style should be used. Note that if `pub_sub` is set to true, `threads` must be set to 1.
+
+
+### `require_jars` [plugins-inputs-jms-require_jars]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+If you do not use an yaml configuration use either the factory or jndi_name. An optional array of Jar file names to load for the specified JMS provider. By using this option it is not necessary to put all the JMS Provider specific jar files into the java CLASSPATH prior to starting Logstash.
+
+
+### `runner` [plugins-inputs-jms-runner]
+
+* DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions.
+
+
+### `selector` [plugins-inputs-jms-selector]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+JMS message selector. Use in conjunctions with message headers or properties to filter messages to be processed. Only messages that match the query specified here will be processed. Check with your JMS provider for the correct JMS message selector syntax.
+
+
+### `skip_headers` [plugins-inputs-jms-skip_headers]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+If `include_headers` is set, a list of headers to skip processing on can be specified here.
+
+
+### `skip_properties` [plugins-inputs-jms-skip_properties]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+If `include_properties` is set, a list of properties to skip processing on can be specified here.
+
+
+### `system_properties` [plugins-inputs-jms-system_properties]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* There is no default value for this setting.
+
+Any System properties that the JMS provider requires can be set either in a Hash here, or in `jvm.options`
+
+
+### `target` [plugins-inputs-jms-target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The name of the field to assign MapMessage data into. If not specified data will be stored in the root of the event.
+
+::::{note}
+For TextMessage and BytesMessage the `target` has no effect. Use the codec setting instead e.g. `codec => json { target => "[jms]" }`.
+::::
+
+
+
+### `threads` [plugins-inputs-jms-threads]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1`
+
+::::{note}
+If pub_sub is set to `true`, this value **must** be `1`. A configuration error will be thrown otherwise!
+::::
+
+
+
+### `timeout` [plugins-inputs-jms-timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `60`
+
+Initial connection timeout in seconds.
+
+
+### `truststore` [plugins-inputs-jms-truststore]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+If you need to use a custom truststore (`.jks`) specify it here. This does not work with .pem certs.
+
+
+### `truststore_password` [plugins-inputs-jms-truststore_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Specify the truststore password here.
+
+* Note, most .jks files created with keytool require a password.
+
+
+### `use_jms_timestamp` [plugins-inputs-jms-use_jms_timestamp]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Convert the JMSTimestamp header field to the @timestamp value of the event
+
+
+### `username` [plugins-inputs-jms-username]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Username to use for connecting to JMS provider.
+
+
+### `yaml_file` [plugins-inputs-jms-yaml_file]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Yaml config file
+
+
+### `yaml_section` [plugins-inputs-jms-yaml_section]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Yaml config file section name For some known examples, see [jms.yml examples](https://github.com/reidmorrison/jruby-jms/blob/master/examples/jms.yml).
+
+
+
+## Common options [plugins-inputs-jms-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-jms-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-jms-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-jms-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-jms-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-jms-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-jms-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-jms-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-jms-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-jms-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-jms-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 jms inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ jms {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-jms-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-jms-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-jmx.md b/docs/reference/plugins-inputs-jmx.md
new file mode 100644
index 000000000..4b485a290
--- /dev/null
+++ b/docs/reference/plugins-inputs-jmx.md
@@ -0,0 +1,237 @@
+---
+navigation_title: "jmx"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-jmx.html
+---
+
+# Jmx input plugin [plugins-inputs-jmx]
+
+
+* Plugin version: v3.0.7
+* Released on: 2018-08-13
+* [Changelog](https://github.com/logstash-plugins/logstash-input-jmx/blob/v3.0.7/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-jmx-index.md).
+
+## Installation [_installation_5]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-input-jmx`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_34]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-jmx). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_34]
+
+This input plugin permits to retrieve metrics from remote Java applications using JMX. Every `polling_frequency`, it scans a folder containing json configuration files describing JVMs to monitor with metrics to retrieve. Then a pool of threads will retrieve metrics and create events.
+
+
+## The configuration [_the_configuration]
+
+In Logstash configuration, you must set the polling frequency, the number of thread used to poll metrics and a directory absolute path containing json files with the configuration per jvm of metrics to retrieve. Logstash input configuration example:
+
+```ruby
+ jmx {
+ //Required
+ path => "/apps/logstash_conf/jmxconf"
+ //Optional, default 60s
+ polling_frequency => 15
+ type => "jmx"
+ //Optional, default 4
+ nb_thread => 4
+ }
+```
+
+Json JMX configuration example:
+
+```js
+ {
+ //Required, JMX listening host/ip
+ "host" : "192.168.1.2",
+ //Required, JMX listening port
+ "port" : 1335,
+ //Optional, the username to connect to JMX
+ "username" : "user",
+ //Optional, the password to connect to JMX
+ "password": "pass",
+ //Optional, use this alias as a prefix in the metric name. If not set use _
+ "alias" : "test.homeserver.elasticsearch",
+ //Required, list of JMX metrics to retrieve
+ "queries" : [
+ {
+ //Required, the object name of Mbean to request
+ "object_name" : "java.lang:type=Memory",
+ //Optional, use this alias in the metrics value instead of the object_name
+ "object_alias" : "Memory"
+ }, {
+ "object_name" : "java.lang:type=Runtime",
+ //Optional, set of attributes to retrieve. If not set retrieve
+ //all metrics available on the configured object_name.
+ "attributes" : [ "Uptime", "StartTime" ],
+ "object_alias" : "Runtime"
+ }, {
+ //object_name can be configured with * to retrieve all matching Mbeans
+ "object_name" : "java.lang:type=GarbageCollector,name=*",
+ "attributes" : [ "CollectionCount", "CollectionTime" ],
+ //object_alias can be based on specific value from the object_name thanks to ${}.
+ //In this case ${type} will be replaced by GarbageCollector...
+ "object_alias" : "${type}.${name}"
+ }, {
+ "object_name" : "java.nio:type=BufferPool,name=*",
+ "object_alias" : "${type}.${name}"
+ } ]
+ }
+```
+
+Here are examples of generated events. When returned metrics value type is number/boolean it is stored in `metric_value_number` event field otherwise it is stored in `metric_value_string` event field.
+
+```ruby
+ {
+ "@version" => "1",
+ "@timestamp" => "2014-02-18T20:57:27.688Z",
+ "host" => "192.168.1.2",
+ "path" => "/apps/logstash_conf/jmxconf",
+ "type" => "jmx",
+ "metric_path" => "test.homeserver.elasticsearch.GarbageCollector.ParNew.CollectionCount",
+ "metric_value_number" => 2212
+ }
+```
+
+```ruby
+ {
+ "@version" => "1",
+ "@timestamp" => "2014-02-18T20:58:06.376Z",
+ "host" => "localhost",
+ "path" => "/apps/logstash_conf/jmxconf",
+ "type" => "jmx",
+ "metric_path" => "test.homeserver.elasticsearch.BufferPool.mapped.ObjectName",
+ "metric_value_string" => "java.nio:type=BufferPool,name=mapped"
+ }
+```
+
+
+## Jmx Input Configuration Options [plugins-inputs-jmx-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-jmx-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`nb_thread`](#plugins-inputs-jmx-nb_thread) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`path`](#plugins-inputs-jmx-path) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`polling_frequency`](#plugins-inputs-jmx-polling_frequency) | [number](/reference/configuration-file-structure.md#number) | No |
+
+Also see [Common options](#plugins-inputs-jmx-common-options) for a list of options supported by all input plugins.
+
+
+
+### `nb_thread` [plugins-inputs-jmx-nb_thread]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `4`
+
+Indicate number of thread launched to retrieve metrics
+
+
+### `path` [plugins-inputs-jmx-path]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Path where json conf files are stored
+
+
+### `polling_frequency` [plugins-inputs-jmx-polling_frequency]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `60`
+
+Indicate interval between two jmx metrics retrieval (in s)
+
+
+
+## Common options [plugins-inputs-jmx-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-jmx-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-jmx-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-jmx-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-jmx-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-jmx-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-jmx-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-jmx-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-jmx-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-jmx-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-jmx-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 jmx inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ jmx {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-jmx-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-jmx-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-kafka.md b/docs/reference/plugins-inputs-kafka.md
new file mode 100644
index 000000000..c733ee6f6
--- /dev/null
+++ b/docs/reference/plugins-inputs-kafka.md
@@ -0,0 +1,883 @@
+---
+navigation_title: "kafka"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-kafka.html
+---
+
+# Kafka input plugin [plugins-inputs-kafka]
+
+
+* A component of the [kafka integration plugin](/reference/plugins-integrations-kafka.md)
+* Integration version: v11.6.0
+* Released on: 2025-01-07
+* [Changelog](https://github.com/logstash-plugins/logstash-integration-kafka/blob/v11.6.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-kafka-index.md).
+
+## Getting help [_getting_help_35]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-integration-kafka). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_35]
+
+This input will read events from a Kafka topic.
+
+This plugin uses Kafka Client 3.8.1. For broker compatibility, see the official [Kafka compatibility reference](https://cwiki.apache.org/confluence/display/KAFKA/Compatibility+Matrix). If the linked compatibility wiki is not up-to-date, please contact Kafka support/community to confirm compatibility.
+
+If you require features not yet available in this plugin (including client version upgrades), please file an issue with details about what you need.
+
+This input supports connecting to Kafka over:
+
+* SSL (requires plugin version 3.0.0 or later)
+* Kerberos SASL (requires plugin version 5.1.0 or later)
+
+By default security is disabled but can be turned on as needed.
+
+::::{note}
+This plugin does not support using a proxy when communicating to the Kafka broker.
+
+This plugin does support using a proxy when communicating to the Schema Registry using the [`schema_registry_proxy`](#plugins-inputs-kafka-schema_registry_proxy) option.
+
+::::
+
+
+The Logstash Kafka consumer handles group management and uses the default offset management strategy using Kafka topics.
+
+Logstash instances by default form a single logical group to subscribe to Kafka topics Each Logstash Kafka consumer can run multiple threads to increase read throughput. Alternatively, you could run multiple Logstash instances with the same `group_id` to spread the load across physical machines. Messages in a topic will be distributed to all Logstash instances with the same `group_id`.
+
+Ideally you should have as many threads as the number of partitions for a perfect balance — more threads than partitions means that some threads will be idle
+
+For more information see [https://kafka.apache.org/38/documentation.html#theconsumer](https://kafka.apache.org/38/documentation.html#theconsumer)
+
+Kafka consumer configuration: [https://kafka.apache.org/38/documentation.html#consumerconfigs](https://kafka.apache.org/38/documentation.html#consumerconfigs)
+
+
+## Metadata fields [_metadata_fields]
+
+The following metadata from Kafka broker are added under the `[@metadata]` field:
+
+* `[@metadata][kafka][topic]`: Original Kafka topic from where the message was consumed.
+* `[@metadata][kafka][consumer_group]`: Consumer group
+* `[@metadata][kafka][partition]`: Partition info for this message.
+* `[@metadata][kafka][offset]`: Original record offset for this message.
+* `[@metadata][kafka][key]`: Record key, if any.
+* `[@metadata][kafka][timestamp]`: Timestamp in the Record. Depending on your broker configuration, this can be either when the record was created (default) or when it was received by the broker. See more about property log.message.timestamp.type at [https://kafka.apache.org/38/documentation.html#brokerconfigs](https://kafka.apache.org/38/documentation.html#brokerconfigs)
+
+Metadata is only added to the event if the `decorate_events` option is set to `basic` or `extended` (it defaults to `none`).
+
+Please note that `@metadata` fields are not part of any of your events at output time. If you need these information to be inserted into your original event, you’ll have to use the `mutate` filter to manually copy the required fields into your `event`.
+
+
+## Kafka Input Configuration Options [plugins-inputs-kafka-options]
+
+This plugin supports these configuration options plus the [Common options](#plugins-inputs-kafka-common-options) described later.
+
+::::{note}
+Some of these options map to a Kafka option. Defaults usually reflect the Kafka default setting, and might change if Kafka’s consumer defaults change. See the [https://kafka.apache.org/38/documentation](https://kafka.apache.org/38/documentation) for more details.
+::::
+
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`auto_commit_interval_ms`](#plugins-inputs-kafka-auto_commit_interval_ms) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`auto_create_topics` ](#plugins-inputs-kafka-auto_create_topics) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`auto_offset_reset`](#plugins-inputs-kafka-auto_offset_reset) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`bootstrap_servers`](#plugins-inputs-kafka-bootstrap_servers) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`check_crcs`](#plugins-inputs-kafka-check_crcs) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`client_dns_lookup`](#plugins-inputs-kafka-client_dns_lookup) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`client_id`](#plugins-inputs-kafka-client_id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`client_rack`](#plugins-inputs-kafka-client_rack) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`connections_max_idle_ms`](#plugins-inputs-kafka-connections_max_idle_ms) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`consumer_threads`](#plugins-inputs-kafka-consumer_threads) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`decorate_events`](#plugins-inputs-kafka-decorate_events) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`enable_auto_commit`](#plugins-inputs-kafka-enable_auto_commit) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`exclude_internal_topics`](#plugins-inputs-kafka-exclude_internal_topics) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`fetch_max_bytes`](#plugins-inputs-kafka-fetch_max_bytes) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`fetch_max_wait_ms`](#plugins-inputs-kafka-fetch_max_wait_ms) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`fetch_min_bytes`](#plugins-inputs-kafka-fetch_min_bytes) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`group_id`](#plugins-inputs-kafka-group_id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`group_instance_id`](#plugins-inputs-kafka-group_instance_id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`heartbeat_interval_ms`](#plugins-inputs-kafka-heartbeat_interval_ms) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`isolation_level`](#plugins-inputs-kafka-isolation_level) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`jaas_path`](#plugins-inputs-kafka-jaas_path) | a valid filesystem path | No |
+| [`kerberos_config`](#plugins-inputs-kafka-kerberos_config) | a valid filesystem path | No |
+| [`key_deserializer_class`](#plugins-inputs-kafka-key_deserializer_class) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`max_partition_fetch_bytes`](#plugins-inputs-kafka-max_partition_fetch_bytes) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`max_poll_interval_ms`](#plugins-inputs-kafka-max_poll_interval_ms) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`max_poll_records`](#plugins-inputs-kafka-max_poll_records) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`metadata_max_age_ms`](#plugins-inputs-kafka-metadata_max_age_ms) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`partition_assignment_strategy`](#plugins-inputs-kafka-partition_assignment_strategy) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`poll_timeout_ms`](#plugins-inputs-kafka-poll_timeout_ms) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`receive_buffer_bytes`](#plugins-inputs-kafka-receive_buffer_bytes) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`reconnect_backoff_ms`](#plugins-inputs-kafka-reconnect_backoff_ms) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`request_timeout_ms`](#plugins-inputs-kafka-request_timeout_ms) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`retry_backoff_ms`](#plugins-inputs-kafka-retry_backoff_ms) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`sasl_client_callback_handler_class`](#plugins-inputs-kafka-sasl_client_callback_handler_class) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`sasl_oauthbearer_token_endpoint_url`](#plugins-inputs-kafka-sasl_oauthbearer_token_endpoint_url) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`sasl_oauthbearer_scope_claim_name`](#plugins-inputs-kafka-sasl_oauthbearer_scope_claim_name) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`sasl_login_callback_handler_class`](#plugins-inputs-kafka-sasl_login_callback_handler_class) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`sasl_login_connect_timeout_ms`](#plugins-inputs-kafka-sasl_login_connect_timeout_ms) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`sasl_login_read_timeout_ms`](#plugins-inputs-kafka-sasl_login_read_timeout_ms) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`sasl_login_retry_backoff_ms`](#plugins-inputs-kafka-sasl_login_retry_backoff_ms) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`sasl_login_retry_backoff_max_ms`](#plugins-inputs-kafka-sasl_login_retry_backoff_max_ms) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`sasl_jaas_config`](#plugins-inputs-kafka-sasl_jaas_config) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`sasl_kerberos_service_name`](#plugins-inputs-kafka-sasl_kerberos_service_name) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`sasl_mechanism`](#plugins-inputs-kafka-sasl_mechanism) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`schema_registry_key`](#plugins-inputs-kafka-schema_registry_key) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`schema_registry_proxy`](#plugins-inputs-kafka-schema_registry_proxy) | [uri](/reference/configuration-file-structure.md#uri) | No |
+| [`schema_registry_secret`](#plugins-inputs-kafka-schema_registry_secret) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`schema_registry_ssl_keystore_location`](#plugins-inputs-kafka-schema_registry_ssl_keystore_location) | a valid filesystem path | No |
+| [`schema_registry_ssl_keystore_password`](#plugins-inputs-kafka-schema_registry_ssl_keystore_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`schema_registry_ssl_keystore_type`](#plugins-inputs-kafka-schema_registry_ssl_keystore_type) | [string](/reference/configuration-file-structure.md#string), one of `["jks", "PKCS12"]` | No |
+| [`schema_registry_ssl_truststore_location`](#plugins-inputs-kafka-schema_registry_ssl_truststore_location) | a valid filesystem path | No |
+| [`schema_registry_ssl_truststore_password`](#plugins-inputs-kafka-schema_registry_ssl_truststore_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`schema_registry_ssl_truststore_type`](#plugins-inputs-kafka-schema_registry_ssl_truststore_type) | [string](/reference/configuration-file-structure.md#string), one of `["jks", "PKCS12"]` | No |
+| [`schema_registry_url`](#plugins-inputs-kafka-schema_registry_url) | [uri](/reference/configuration-file-structure.md#uri) | No |
+| [`schema_registry_validation`](#plugins-inputs-kafka-schema_registry_validation) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`security_protocol`](#plugins-inputs-kafka-security_protocol) | [string](/reference/configuration-file-structure.md#string), one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]` | No |
+| [`send_buffer_bytes`](#plugins-inputs-kafka-send_buffer_bytes) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`session_timeout_ms`](#plugins-inputs-kafka-session_timeout_ms) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`ssl_endpoint_identification_algorithm`](#plugins-inputs-kafka-ssl_endpoint_identification_algorithm) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_key_password`](#plugins-inputs-kafka-ssl_key_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_keystore_location`](#plugins-inputs-kafka-ssl_keystore_location) | a valid filesystem path | No |
+| [`ssl_keystore_password`](#plugins-inputs-kafka-ssl_keystore_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_keystore_type`](#plugins-inputs-kafka-ssl_keystore_type) | [string](/reference/configuration-file-structure.md#string), one of `["jks", "PKCS12"]` | No |
+| [`ssl_truststore_location`](#plugins-inputs-kafka-ssl_truststore_location) | a valid filesystem path | No |
+| [`ssl_truststore_password`](#plugins-inputs-kafka-ssl_truststore_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_truststore_type`](#plugins-inputs-kafka-ssl_truststore_type) | [string](/reference/configuration-file-structure.md#string), one of `["jks", "PKCS12"]` | No |
+| [`topics`](#plugins-inputs-kafka-topics) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`topics_pattern`](#plugins-inputs-kafka-topics_pattern) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`value_deserializer_class`](#plugins-inputs-kafka-value_deserializer_class) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-inputs-kafka-common-options) for a list of options supported by all input plugins.
+
+
+
+### `auto_commit_interval_ms` [plugins-inputs-kafka-auto_commit_interval_ms]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `5000`.
+
+The frequency in milliseconds that the consumer offsets are committed to Kafka.
+
+
+### `auto_offset_reset` [plugins-inputs-kafka-auto_offset_reset]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+What to do when there is no initial offset in Kafka or if an offset is out of range:
+
+* earliest: automatically reset the offset to the earliest offset
+* latest: automatically reset the offset to the latest offset
+* none: throw exception to the consumer if no previous offset is found for the consumer’s group
+* anything else: throw exception to the consumer.
+
+
+### `bootstrap_servers` [plugins-inputs-kafka-bootstrap_servers]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"localhost:9092"`
+
+A list of URLs of Kafka instances to use for establishing the initial connection to the cluster. This list should be in the form of `host1:port1,host2:port2` These urls are just used for the initial connection to discover the full cluster membership (which may change dynamically) so this list need not contain the full set of servers (you may want more than one, though, in case a server is down).
+
+
+### `check_crcs` [plugins-inputs-kafka-check_crcs]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk corruption to the messages occurred. This check adds some overhead, so it may be disabled in cases seeking extreme performance.
+
+
+### `client_dns_lookup` [plugins-inputs-kafka-client_dns_lookup]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"default"`
+
+How DNS lookups should be done. If set to `use_all_dns_ips`, when the lookup returns multiple IP addresses for a hostname, they will all be attempted to connect to before failing the connection. If the value is `resolve_canonical_bootstrap_servers_only` each entry will be resolved and expanded into a list of canonical names.
+
+::::{note}
+Starting from Kafka 3 `default` value for `client.dns.lookup` value has been removed. If explicitly configured it fallbacks to `use_all_dns_ips`.
+
+::::
+
+
+
+### `client_id` [plugins-inputs-kafka-client_id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"logstash"`
+
+The id string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included.
+
+
+### `client_rack` [plugins-inputs-kafka-client_rack]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+A rack identifier for the Kafka consumer. Used to select the physically closest rack for the consumer to read from. The setting corresponds with Kafka’s `broker.rack` configuration.
+
+::::{note}
+Available only for Kafka 2.4.0 and higher. See [KIP-392](https://cwiki.apache.org/confluence/display/KAFKA/KIP-392%3A+Allow+consumers+to+fetch+from+closest+replica).
+::::
+
+
+
+### `connections_max_idle_ms` [plugins-inputs-kafka-connections_max_idle_ms]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `540000` milliseconds (9 minutes).
+
+Close idle connections after the number of milliseconds specified by this config.
+
+
+### `consumer_threads` [plugins-inputs-kafka-consumer_threads]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1`
+
+Ideally you should have as many threads as the number of partitions for a perfect balance — more threads than partitions means that some threads will be idle
+
+
+### `decorate_events` [plugins-inputs-kafka-decorate_events]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Accepted values are:
+
+ * `none`: no metadata is added
+ * `basic`: record’s attributes are added
+ * `extended`: record’s attributes, headers are added (limited to headers with values using UTF-8 encoding)
+ * `false`: deprecated alias for `none`
+ * `true`: deprecated alias for `basic`
+
+* Default value is `none`
+
+Option to add Kafka metadata like topic, message size and header key values to the event. This will add a field named `kafka` to the logstash event containing the following attributes:
+
+* `topic`: The topic this message is associated with
+* `consumer_group`: The consumer group used to read in this event
+* `partition`: The partition this message is associated with
+* `offset`: The offset from the partition this message is associated with
+* `key`: A ByteBuffer containing the message key
+
+
+### `auto_create_topics` [plugins-inputs-kafka-auto_create_topics]
+
+ * Value type is [boolean](/reference/configuration-file-structure.md#boolean) * Default value is `true`
+
+Controls whether the topic is automatically created when subscribing to a non-existent topic. A topic will be auto-created only if this configuration is set to `true` and auto-topic creation is enabled on the broker using `auto.create.topics.enable`; otherwise auto-topic creation is not permitted.
+
+
+### `enable_auto_commit` [plugins-inputs-kafka-enable_auto_commit]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+This committed offset will be used when the process fails as the position from which the consumption will begin.
+
+If true, periodically commit to Kafka the offsets of messages already returned by the consumer. If value is `false` however, the offset is committed every time the consumer writes data fetched from the topic to the in-memory or persistent queue.
+
+
+### `exclude_internal_topics` [plugins-inputs-kafka-exclude_internal_topics]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Whether records from internal topics (such as offsets) should be exposed to the consumer. If set to true the only way to receive records from an internal topic is subscribing to it.
+
+
+### `fetch_max_bytes` [plugins-inputs-kafka-fetch_max_bytes]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `52428800` (50MB)
+
+The maximum amount of data the server should return for a fetch request. This is not an absolute maximum, if the first message in the first non-empty partition of the fetch is larger than this value, the message will still be returned to ensure that the consumer can make progress.
+
+
+### `fetch_max_wait_ms` [plugins-inputs-kafka-fetch_max_wait_ms]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `500` milliseconds.
+
+The maximum amount of time the server will block before answering the fetch request if there isn’t sufficient data to immediately satisfy `fetch_min_bytes`. This should be less than or equal to the timeout used in `poll_timeout_ms`
+
+
+### `fetch_min_bytes` [plugins-inputs-kafka-fetch_min_bytes]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+The minimum amount of data the server should return for a fetch request. If insufficient data is available the request will wait for that much data to accumulate before answering the request.
+
+
+### `group_id` [plugins-inputs-kafka-group_id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"logstash"`
+
+The identifier of the group this consumer belongs to. Consumer group is a single logical subscriber that happens to be made up of multiple processors. Messages in a topic will be distributed to all Logstash instances with the same `group_id`.
+
+::::{note}
+In cases when multiple inputs are being used in a single pipeline, reading from different topics, it’s essential to set a different `group_id => ...` for each input. Setting a unique `client_id => ...` is also recommended.
+::::
+
+
+
+### `group_instance_id` [plugins-inputs-kafka-group_instance_id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The static membership identifier for this Logstash Kafka consumer. Static membership feature was introduced in [KIP-345](https://cwiki.apache.org/confluence/display/KAFKA/KIP-345%3A+Introduce+static+membership+protocol+to+reduce+consumer+rebalances), available under Kafka property `group.instance.id`. Its purpose is to avoid rebalances in situations in which a lot of data has to be forwarded after a consumer goes offline. This feature mitigates cases where the service state is heavy and the rebalance of one topic partition from instance A to B would cause a huge amount of data to be transferred. A client that goes offline/online frequently can avoid frequent and heavy rebalances by using this option.
+
+::::{note}
+The `group_instance_id` setting must be unique across all the clients belonging to the same [`group_id`](#plugins-inputs-kafka-group_id). Otherwise, another client connecting with same `group.instance.id` value would cause the oldest instance to be disconnected. You can set this value to use information such as a hostname, an IP, or anything that uniquely identifies the client application.
+::::
+
+
+::::{note}
+In cases when multiple threads are configured and `consumer_threads` is greater than one, a suffix is appended to the `group_instance_id` to avoid collisions.
+::::
+
+
+
+### `heartbeat_interval_ms` [plugins-inputs-kafka-heartbeat_interval_ms]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `3000` milliseconds (3 seconds).
+
+The expected time between heartbeats to the consumer coordinator. Heartbeats are used to ensure that the consumer’s session stays active and to facilitate rebalancing when new consumers join or leave the group. The value must be set lower than `session.timeout.ms`, but typically should be set no higher than 1/3 of that value. It can be adjusted even lower to control the expected time for normal rebalances.
+
+
+### `isolation_level` [plugins-inputs-kafka-isolation_level]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"read_uncommitted"`
+
+Controls how to read messages written transactionally. If set to `read_committed`, polling messages will only return transactional messages which have been committed. If set to `read_uncommitted` (the default), polling messages will return all messages, even transactional messages which have been aborted. Non-transactional messages will be returned unconditionally in either mode.
+
+
+### `jaas_path` [plugins-inputs-kafka-jaas_path]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client:
+
+```java
+KafkaClient {
+ com.sun.security.auth.module.Krb5LoginModule required
+ useTicketCache=true
+ renewTicket=true
+ serviceName="kafka";
+ };
+```
+
+Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same `jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on different JVM instances.
+
+
+### `kerberos_config` [plugins-inputs-kafka-kerberos_config]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+Optional path to kerberos config file. This is krb5.conf style as detailed in [https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html](https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html)
+
+
+### `key_deserializer_class` [plugins-inputs-kafka-key_deserializer_class]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"org.apache.kafka.common.serialization.StringDeserializer"`
+
+Java Class used to deserialize the record’s key
+
+
+### `max_partition_fetch_bytes` [plugins-inputs-kafka-max_partition_fetch_bytes]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1048576` (1MB).
+
+The maximum amount of data per-partition the server will return. The maximum total memory used for a request will be `#partitions * max.partition.fetch.bytes`. This size must be at least as large as the maximum message size the server allows or else it is possible for the producer to send messages larger than the consumer can fetch. If that happens, the consumer can get stuck trying to fetch a large message on a certain partition.
+
+
+### `max_poll_interval_ms` [plugins-inputs-kafka-max_poll_interval_ms]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `300000` milliseconds (5 minutes).
+
+The maximum delay between invocations of poll() when using consumer group management. This places an upper bound on the amount of time that the consumer can be idle before fetching more records. If poll() is not called before expiration of this timeout, then the consumer is considered failed and the group will rebalance in order to reassign the partitions to another member.
+
+
+### `max_poll_records` [plugins-inputs-kafka-max_poll_records]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `500`.
+
+The maximum number of records returned in a single call to poll().
+
+
+### `metadata_max_age_ms` [plugins-inputs-kafka-metadata_max_age_ms]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `300000` milliseconds (5 minutes).
+
+The period of time in milliseconds after which we force a refresh of metadata even if we haven’t seen any partition leadership changes to proactively discover any new brokers or partitions
+
+
+### `partition_assignment_strategy` [plugins-inputs-kafka-partition_assignment_strategy]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The name of the partition assignment strategy that the client uses to distribute partition ownership amongst consumer instances, supported options are:
+
+* `range`
+* `round_robin`
+* `sticky`
+* `cooperative_sticky`
+
+These map to Kafka’s corresponding [`ConsumerPartitionAssignor`](https://kafka.apache.org/38/javadoc/org/apache/kafka/clients/consumer/ConsumerPartitionAssignor.html) implementations.
+
+
+### `poll_timeout_ms` [plugins-inputs-kafka-poll_timeout_ms]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `100` milliseconds.
+
+Time Kafka consumer will wait to receive new messages from topics.
+
+After subscribing to a set of topics, the Kafka consumer automatically joins the group when polling. The plugin poll-ing in a loop ensures consumer liveness. Underneath the covers, Kafka client sends periodic heartbeats to the server. The timeout specified the time to block waiting for input on each poll.
+
+
+### `receive_buffer_bytes` [plugins-inputs-kafka-receive_buffer_bytes]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `32768` (32KB).
+
+The size of the TCP receive buffer (SO_RCVBUF) to use when reading data.
+
+
+### `reconnect_backoff_ms` [plugins-inputs-kafka-reconnect_backoff_ms]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `50` milliseconds.
+
+The amount of time to wait before attempting to reconnect to a given host. This avoids repeatedly connecting to a host in a tight loop. This backoff applies to all requests sent by the consumer to the broker.
+
+
+### `request_timeout_ms` [plugins-inputs-kafka-request_timeout_ms]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `40000` milliseconds (40 seconds).
+
+The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+
+
+### `retry_backoff_ms` [plugins-inputs-kafka-retry_backoff_ms]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `100` milliseconds.
+
+The amount of time to wait before attempting to retry a failed fetch request to a given topic partition. This avoids repeated fetching-and-failing in a tight loop.
+
+
+### `sasl_client_callback_handler_class` [plugins-inputs-kafka-sasl_client_callback_handler_class]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The SASL client callback handler class the specified SASL mechanism should use.
+
+
+### `sasl_oauthbearer_token_endpoint_url` [plugins-inputs-kafka-sasl_oauthbearer_token_endpoint_url]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The URL for the OAuth 2.0 issuer token endpoint.
+
+
+### `sasl_oauthbearer_scope_claim_name` [plugins-inputs-kafka-sasl_oauthbearer_scope_claim_name]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"scope"`
+
+(optional) The override name of the scope claim.
+
+
+### `sasl_login_callback_handler_class` [plugins-inputs-kafka-sasl_login_callback_handler_class]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The SASL login callback handler class the specified SASL mechanism should use.
+
+
+### `sasl_login_connect_timeout_ms` [plugins-inputs-kafka-sasl_login_connect_timeout_ms]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+(optional) The duration, in milliseconds, for HTTPS connect timeout
+
+
+### `sasl_login_read_timeout_ms` [plugins-inputs-kafka-sasl_login_read_timeout_ms]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+(optional) The duration, in milliseconds, for HTTPS read timeout.
+
+
+### `sasl_login_retry_backoff_ms` [plugins-inputs-kafka-sasl_login_retry_backoff_ms]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `100` milliseconds.
+
+(optional) The duration, in milliseconds, to wait between HTTPS call attempts.
+
+
+### `sasl_login_retry_backoff_max_ms` [plugins-inputs-kafka-sasl_login_retry_backoff_max_ms]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `10000` milliseconds.
+
+(optional) The maximum duration, in milliseconds, for HTTPS call attempts.
+
+
+### `sasl_jaas_config` [plugins-inputs-kafka-sasl_jaas_config]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+JAAS configuration setting local to this plugin instance, as opposed to settings using config file configured using `jaas_path`, which are shared across the JVM. This allows each plugin instance to have its own configuration.
+
+If both `sasl_jaas_config` and `jaas_path` configurations are set, the setting here takes precedence.
+
+Example (setting for Azure Event Hub):
+
+```ruby
+ input {
+ kafka {
+ sasl_jaas_config => "org.apache.kafka.common.security.plain.PlainLoginModule required username='auser' password='apassword';"
+ }
+ }
+```
+
+
+### `sasl_kerberos_service_name` [plugins-inputs-kafka-sasl_kerberos_service_name]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The Kerberos principal name that Kafka broker runs as. This can be defined either in Kafka’s JAAS config or in Kafka’s config.
+
+
+### `sasl_mechanism` [plugins-inputs-kafka-sasl_mechanism]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"GSSAPI"`
+
+[SASL mechanism](http://kafka.apache.org/documentation.html#security_sasl) used for client connections. This may be any mechanism for which a security provider is available. GSSAPI is the default mechanism.
+
+
+### `schema_registry_key` [plugins-inputs-kafka-schema_registry_key]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Set the username for basic authorization to access remote Schema Registry.
+
+
+### `schema_registry_proxy` [plugins-inputs-kafka-schema_registry_proxy]
+
+* Value type is [uri](/reference/configuration-file-structure.md#uri)
+* There is no default value for this setting.
+
+Set the address of a forward HTTP proxy. An empty string is treated as if proxy was not set.
+
+
+### `schema_registry_secret` [plugins-inputs-kafka-schema_registry_secret]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Set the password for basic authorization to access remote Schema Registry.
+
+
+### `schema_registry_ssl_keystore_location` [plugins-inputs-kafka-schema_registry_ssl_keystore_location]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+If schema registry client authentication is required, this setting stores the keystore path.
+
+
+### `schema_registry_ssl_keystore_password` [plugins-inputs-kafka-schema_registry_ssl_keystore_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+If schema registry authentication is required, this setting stores the keystore password.
+
+
+### `schema_registry_ssl_keystore_type` [plugins-inputs-kafka-schema_registry_ssl_keystore_type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The format of the keystore file. It must be either `jks` or `PKCS12`.
+
+
+### `schema_registry_ssl_truststore_location` [plugins-inputs-kafka-schema_registry_ssl_truststore_location]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+The truststore path to validate the schema registry’s certificate.
+
+
+### `schema_registry_ssl_truststore_password` [plugins-inputs-kafka-schema_registry_ssl_truststore_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+The schema registry truststore password.
+
+
+### `schema_registry_ssl_truststore_type` [plugins-inputs-kafka-schema_registry_ssl_truststore_type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The format of the schema registry’s truststore file. It must be either `jks` or `PKCS12`.
+
+
+### `schema_registry_url` [plugins-inputs-kafka-schema_registry_url]
+
+* Value type is [uri](/reference/configuration-file-structure.md#uri)
+
+The URI that points to an instance of the [Schema Registry](https://docs.confluent.io/current/schema-registry/index.md) service, used to manage Avro schemas. Be sure that the Avro schemas for deserializing the data from the specified topics have been uploaded to the Schema Registry service. The schemas must follow a naming convention with the pattern -value.
+
+Use either the Schema Registry config option or the [`value_deserializer_class`](#plugins-inputs-kafka-value_deserializer_class) config option, but not both.
+
+
+### `schema_registry_validation` [plugins-inputs-kafka-schema_registry_validation]
+
+* Value can be either of: `auto`, `skip`
+* Default value is `"auto"`
+
+::::{note}
+Under most circumstances, the default setting of `auto` should not need to be changed.
+::::
+
+
+When using the schema registry, by default the plugin checks connectivity and validates the schema registry, during plugin registration, before events are processed. In some circumstances, this process may fail when it tries to validate an authenticated schema registry, causing the plugin to crash. This setting allows the plugin to skip validation during registration, which allows the plugin to continue and events to be processed. Note that an incorrectly configured schema registry will still stop the plugin from processing events.
+
+
+### `security_protocol` [plugins-inputs-kafka-security_protocol]
+
+* Value can be any of: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL`
+* Default value is `"PLAINTEXT"`
+
+Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL
+
+
+### `send_buffer_bytes` [plugins-inputs-kafka-send_buffer_bytes]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `131072` (128KB).
+
+The size of the TCP send buffer (SO_SNDBUF) to use when sending data
+
+
+### `session_timeout_ms` [plugins-inputs-kafka-session_timeout_ms]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `10000` milliseconds (10 seconds).
+
+The timeout after which, if the `poll_timeout_ms` is not invoked, the consumer is marked dead and a rebalance operation is triggered for the group identified by `group_id`
+
+
+### `ssl_endpoint_identification_algorithm` [plugins-inputs-kafka-ssl_endpoint_identification_algorithm]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"https"`
+
+The endpoint identification algorithm, defaults to `"https"`. Set to empty string `""` to disable endpoint verification
+
+
+### `ssl_key_password` [plugins-inputs-kafka-ssl_key_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+The password of the private key in the key store file.
+
+
+### `ssl_keystore_location` [plugins-inputs-kafka-ssl_keystore_location]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+If client authentication is required, this setting stores the keystore path.
+
+
+### `ssl_keystore_password` [plugins-inputs-kafka-ssl_keystore_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+If client authentication is required, this setting stores the keystore password
+
+
+### `ssl_keystore_type` [plugins-inputs-kafka-ssl_keystore_type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The format of the keystore file. It must be either `jks` or `PKCS12`.
+
+
+### `ssl_truststore_location` [plugins-inputs-kafka-ssl_truststore_location]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+The JKS truststore path to validate the Kafka broker’s certificate.
+
+
+### `ssl_truststore_password` [plugins-inputs-kafka-ssl_truststore_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+The truststore password.
+
+
+### `ssl_truststore_type` [plugins-inputs-kafka-ssl_truststore_type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The format of the truststore file. It must be either `jks` or `PKCS12`.
+
+
+### `topics` [plugins-inputs-kafka-topics]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `["logstash"]`
+
+A list of topics to subscribe to, defaults to ["logstash"].
+
+
+### `topics_pattern` [plugins-inputs-kafka-topics_pattern]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+A topic regular expression pattern to subscribe to.
+
+Filtering by a regular expression is done by retrieving the full list of topic names from the broker and applying the pattern locally. When used with brokers with a lot of topics this operation could be very slow, especially if there are a lot of consumers.
+
+::::{note}
+When the broker has some topics configured with ACL rules and they miss the DESCRIBE permission, then the subscription happens but on the broker side it is logged that the subscription of some topics was denied to the configured user.
+::::
+
+
+
+### `value_deserializer_class` [plugins-inputs-kafka-value_deserializer_class]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"org.apache.kafka.common.serialization.StringDeserializer"`
+
+Java Class used to deserialize the record’s value. A custom value deserializer can be used only if you are not using a Schema Registry. Use either the value_deserializer_class config option or the [`schema_registry_url`](#plugins-inputs-kafka-schema_registry_url) config option, but not both.
+
+
+
+## Common options [plugins-inputs-kafka-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-kafka-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-kafka-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-kafka-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-kafka-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-kafka-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-kafka-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-kafka-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-kafka-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-kafka-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-kafka-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 kafka inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ kafka {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-kafka-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-kafka-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-kinesis.md b/docs/reference/plugins-inputs-kinesis.md
new file mode 100644
index 000000000..afd0d113e
--- /dev/null
+++ b/docs/reference/plugins-inputs-kinesis.md
@@ -0,0 +1,282 @@
+---
+navigation_title: "kinesis"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-kinesis.html
+---
+
+# Kinesis input plugin [plugins-inputs-kinesis]
+
+
+* Plugin version: v2.3.0
+* Released on: 2023-08-28
+* [Changelog](https://github.com/logstash-plugins/logstash-input-kinesis/blob/v2.3.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-kinesis-index.md).
+
+## Installation [_installation_6]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-input-kinesis`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_36]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-kinesis). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_36]
+
+You can use this plugin to receive events through [AWS Kinesis](http://docs.aws.amazon.com/kinesis/latest/dev/introduction.html). This plugin uses the [Java Kinesis Client Library](http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-record-processor-implementation-app-java.html). The documentation at [https://github.com/awslabs/amazon-kinesis-client](https://github.com/awslabs/amazon-kinesis-client) will be useful.
+
+AWS credentials can be specified either through environment variables, or an IAM instance role. The library uses a DynamoDB table for worker coordination, so you’ll need to grant access to that as well as to the Kinesis stream. The DynamoDB table has the same name as the `application_name` configuration option, which defaults to "logstash".
+
+The library can optionally also send worker statistics to CloudWatch.
+
+
+## Usage [plugins-inputs-kinesis-usage]
+
+```ruby
+input {
+ kinesis {
+ kinesis_stream_name => "my-logging-stream"
+ codec => json { }
+ }
+}
+```
+
+
+## Using with CloudWatch Logs [plugins-inputs-kinesis-cloudwatch]
+
+If you want to read a CloudWatch Logs subscription stream, you’ll also need to install and configure the [CloudWatch Logs Codec](https://github.com/threadwaste/logstash-codec-cloudwatch_logs).
+
+
+## Authentication [plugins-inputs-kinesis-authentication]
+
+This plugin uses the default AWS SDK auth chain, [DefaultAWSCredentialsProviderChain](https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/auth/DefaultAWSCredentialsProviderChain.html), to determine which credentials the client will use, unless `profile` is set, in which case [ProfileCredentialsProvider](http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/auth/profile/ProfileCredentialsProvider.html) is used.
+
+The default chain reads the credentials in this order:
+
+* `AWS_ACCESS_KEY_ID` / `AWS_SECRET_KEY` environment variables
+* `~/.aws/credentials` credentials file
+* EC2 instance profile
+
+The credentials need access to the following services:
+
+* AWS Kinesis
+* AWS DynamoDB. The client library stores information for worker coordination in DynamoDB (offsets and active worker per partition)
+* AWS CloudWatch. If the metrics are enabled the credentials need CloudWatch update permissions granted.
+
+See the [AWS documentation](https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/auth/DefaultAWSCredentialsProviderChain.html) for more information on the default chain.
+
+
+## Kinesis Input Configuration Options [plugins-inputs-kinesis-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-kinesis-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`application_name`](#plugins-inputs-kinesis-application_name) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`checkpoint_interval_seconds`](#plugins-inputs-kinesis-checkpoint_interval_seconds) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`http_proxy`](#plugins-inputs-kinesis-http_proxy) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`initial_position_in_stream`](#plugins-inputs-kinesis-initial_position_in_stream) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`kinesis_stream_name`](#plugins-inputs-kinesis-kinesis_stream_name) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`metrics`](#plugins-inputs-kinesis-metrics) | [string](/reference/configuration-file-structure.md#string), one of `[nil, "cloudwatch"]` | No |
+| [`non_proxy_hosts`](#plugins-inputs-kinesis-non_proxy_hosts) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`profile`](#plugins-inputs-kinesis-profile) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`region`](#plugins-inputs-kinesis-region) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`role_arn`](#plugins-inputs-kinesis-role_arn) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`role_session_name`](#plugins-inputs-kinesis-role_session_name) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`additional_settings`](#plugins-inputs-kinesis-additional_settings) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-inputs-kinesis-common-options) for a list of options supported by all input plugins.
+
+
+
+### `application_name` [plugins-inputs-kinesis-application_name]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"logstash"`
+
+The application name used for the dynamodb coordination table. Must be unique for this kinesis stream.
+
+
+### `checkpoint_interval_seconds` [plugins-inputs-kinesis-checkpoint_interval_seconds]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `60`
+
+How many seconds between worker checkpoints to dynamodb.
+
+
+### `http_proxy` [plugins-inputs-kinesis-http_proxy]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Proxy support for Kinesis, DynamoDB, and CloudWatch (if enabled).
+
+
+### `initial_position_in_stream` [plugins-inputs-kinesis-initial_position_in_stream]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"TRIM_HORIZON"`
+
+The value for initialPositionInStream. Accepts "TRIM_HORIZON" or "LATEST".
+
+
+### `kinesis_stream_name` [plugins-inputs-kinesis-kinesis_stream_name]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The kinesis stream name.
+
+
+### `metrics` [plugins-inputs-kinesis-metrics]
+
+* Value can be any of: ``, `cloudwatch`
+* Default value is `nil`
+
+Worker metric tracking. By default this is disabled, set it to "cloudwatch" to enable the cloudwatch integration in the Kinesis Client Library.
+
+
+### `non_proxy_hosts` [plugins-inputs-kinesis-non_proxy_hosts]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Hosts that should be excluded from proxying, separated by the "|" (pipe) character.
+
+
+### `profile` [plugins-inputs-kinesis-profile]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The AWS profile name for authentication. This ensures that the `~/.aws/credentials` AWS auth provider is used. By default this is empty and the default chain will be used.
+
+
+### `region` [plugins-inputs-kinesis-region]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"us-east-1"`
+
+The AWS region for Kinesis, DynamoDB, and CloudWatch (if enabled)
+
+
+### `role_arn` [plugins-inputs-kinesis-role_arn]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The AWS role to assume. This can be used, for example, to access a Kinesis stream in a different AWS account. This role will be assumed after the default credentials or profile credentials are created. By default this is empty and a role will not be assumed.
+
+
+### `role_session_name` [plugins-inputs-kinesis-role_session_name]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `logstash`
+
+Session name to use when assuming an IAM role. This is recorded in CloudTrail logs for example.
+
+
+### `additional_settings` [plugins-inputs-kinesis-additional_settings]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting
+
+The KCL provides several configuration options which can be set in [KinesisClientLibConfiguration](https://github.com/awslabs/amazon-kinesis-client/blob/master/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/coordinator/KinesisClientLibConfiguration.java). These options are configured via various function calls that all begin with `with`. Some of these functions take complex types, which are not supported. However, you may invoke any one of the `withX()` functions that take a primitive by providing key-value pairs in `snake_case`.
+
+Example:
+
+To set the dynamodb read and write capacity values, use these functions: `withInitialLeaseTableReadCapacity` and `withInitialLeaseTableWriteCapacity`.
+
+```text
+additional_settings => {"initial_lease_table_read_capacity" => 25 "initial_lease_table_write_capacity" => 100}
+```
+
+
+
+## Common options [plugins-inputs-kinesis-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-kinesis-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-kinesis-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-kinesis-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-kinesis-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-kinesis-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-kinesis-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-kinesis-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-kinesis-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-kinesis-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-kinesis-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 kinesis inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ kinesis {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-kinesis-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-kinesis-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-log4j.md b/docs/reference/plugins-inputs-log4j.md
new file mode 100644
index 000000000..c7ad41129
--- /dev/null
+++ b/docs/reference/plugins-inputs-log4j.md
@@ -0,0 +1,248 @@
+---
+navigation_title: "log4j"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-log4j.html
+---
+
+# Log4j input plugin [plugins-inputs-log4j]
+
+
+* Plugin version: v3.1.3
+* Released on: 2018-04-06
+* [Changelog](https://github.com/logstash-plugins/logstash-input-log4j/blob/v3.1.3/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-log4j-index.md).
+
+## Installation [_installation_7]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-input-log4j`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_38]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-log4j). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Deprecation Notice [_deprecation_notice]
+
+::::{note}
+This plugin is deprecated. It is recommended that you use filebeat to collect logs from log4j.
+::::
+
+
+The following section is a guide for how to migrate from SocketAppender to use filebeat.
+
+To migrate away from log4j SocketAppender to using filebeat, you will need to make these changes:
+
+* Configure your log4j.properties (in your app) to write to a local file.
+* Install and configure filebeat to collect those logs and ship them to Logstash
+* Configure Logstash to use the beats input.
+
+### Configuring log4j for writing to local files [_configuring_log4j_for_writing_to_local_files]
+
+In your log4j.properties file, remove SocketAppender and replace it with RollingFileAppender.
+
+For example, you can use the following log4j.properties configuration to write daily log files.
+
+```
+# Your app's log4j.properties (log4j 1.2 only)
+log4j.rootLogger=daily
+log4j.appender.daily=org.apache.log4j.rolling.RollingFileAppender
+log4j.appender.daily.RollingPolicy=org.apache.log4j.rolling.TimeBasedRollingPolicy
+log4j.appender.daily.RollingPolicy.FileNamePattern=/var/log/your-app/app.%d.log
+log4j.appender.daily.layout = org.apache.log4j.PatternLayout
+log4j.appender.daily.layout.ConversionPattern=%d{YYYY-MM-dd HH:mm:ss,SSSZ} %p %c{{1}}:%L - %m%n
+```
+Configuring log4j.properties in more detail is outside the scope of this migration guide.
+
+
+### Configuring filebeat [_configuring_filebeat]
+
+Next, [install Filebeat](beats://reference/filebeat/filebeat-installation-configuration.md). Based on the above log4j.properties, we can use this filebeat configuration:
+
+```
+# filebeat.yml
+filebeat:
+ prospectors:
+ -
+ paths:
+ - /var/log/your-app/app.*.log
+ input_type: log
+output:
+ logstash:
+ hosts: ["your-logstash-host:5000"]
+```
+For more details on configuring filebeat, see [Configure Filebeat](beats://reference/filebeat/configuring-howto-filebeat.md).
+
+
+### Configuring Logstash to receive from filebeat [_configuring_logstash_to_receive_from_filebeat]
+
+Finally, configure Logstash with a beats input:
+
+```
+# logstash configuration
+input {
+ beats {
+ port => 5000
+ }
+}
+```
+It is strongly recommended that you also enable TLS in filebeat and logstash beats input for protection and safety of your log data..
+
+For more details on configuring the beats input, see [the logstash beats input documentation](https://www.elastic.co/guide/en/logstash/master/plugins-inputs-beats.html).
+
+
+
+## Description [_description_38]
+
+Read events over a TCP socket from a Log4j SocketAppender. This plugin works only with log4j version 1.x.
+
+Can either accept connections from clients or connect to a server, depending on `mode`. Depending on which `mode` is configured, you need a matching SocketAppender or a SocketHubAppender on the remote side.
+
+One event is created per received log4j LoggingEvent with the following schema:
+
+* `timestamp` ⇒ the number of milliseconds elapsed from 1/1/1970 until logging event was created.
+* `path` ⇒ the name of the logger
+* `priority` ⇒ the level of this event
+* `logger_name` ⇒ the name of the logger
+* `thread` ⇒ the thread name making the logging request
+* `class` ⇒ the fully qualified class name of the caller making the logging request.
+* `file` ⇒ the source file name and line number of the caller making the logging request in a colon-separated format "fileName:lineNumber".
+* `method` ⇒ the method name of the caller making the logging request.
+* `NDC` ⇒ the NDC string
+* `stack_trace` ⇒ the multi-line stack-trace
+
+Also if the original log4j LoggingEvent contains MDC hash entries, they will be merged in the event as fields.
+
+
+## Log4j Input Configuration Options [plugins-inputs-log4j-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-log4j-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`host`](#plugins-inputs-log4j-host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`mode`](#plugins-inputs-log4j-mode) | [string](/reference/configuration-file-structure.md#string), one of `["server", "client"]` | No |
+| [`port`](#plugins-inputs-log4j-port) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`proxy_protocol`](#plugins-inputs-log4j-proxy_protocol) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+
+Also see [Common options](#plugins-inputs-log4j-common-options) for a list of options supported by all input plugins.
+
+
+
+### `host` [plugins-inputs-log4j-host]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"0.0.0.0"`
+
+When mode is `server`, the address to listen on. When mode is `client`, the address to connect to.
+
+
+### `mode` [plugins-inputs-log4j-mode]
+
+* Value can be any of: `server`, `client`
+* Default value is `"server"`
+
+Mode to operate in. `server` listens for client connections, `client` connects to a server.
+
+
+### `port` [plugins-inputs-log4j-port]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `4560`
+
+When mode is `server`, the port to listen on. When mode is `client`, the port to connect to.
+
+
+### `proxy_protocol` [plugins-inputs-log4j-proxy_protocol]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Proxy protocol support, only v1 is supported at this time [http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt](http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt)
+
+
+
+## Common options [plugins-inputs-log4j-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-log4j-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-log4j-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-log4j-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-log4j-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-log4j-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-log4j-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-log4j-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-log4j-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-log4j-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-log4j-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 log4j inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ log4j {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-log4j-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-log4j-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-logstash.md b/docs/reference/plugins-inputs-logstash.md
new file mode 100644
index 000000000..4d6ecd967
--- /dev/null
+++ b/docs/reference/plugins-inputs-logstash.md
@@ -0,0 +1,293 @@
+---
+navigation_title: "logstash"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-logstash.html
+---
+
+# Logstash input plugin [plugins-inputs-logstash]
+
+
+* A component of the [logstash integration plugin](/reference/plugins-integrations-logstash.md)
+* Integration version: v1.0.4
+* Released on: 2024-12-10
+* [Changelog](https://github.com/logstash-plugins/logstash-integration-logstash/blob/v1.0.4/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-logstash-index.md).
+
+## Getting help [_getting_help_37]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-integration-logstash). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_37]
+
+Listen for events that are sent by a [Logstash output plugin](/reference/plugins-outputs-logstash.md) in a pipeline that may be in another process or on another host. The upstream output must have a TCP route to the port (defaults to 9800) on an interface that this plugin is bound to.
+
+::::{note}
+Sending events to this input by *any* means other than `plugins-outputs-logstash` is neither advised nor supported. We will maintain cross-compatibility with any two supported versions of output/input pair and reserve the right to change details such as protocol and encoding.
+::::
+
+
+### Minimum Configuration [plugins-inputs-logstash-minimum-config]
+
+| SSL Enabled | SSL Disabled |
+| --- | --- |
+| ``` input { logstash { ssl_keystore_path => "/path/to/logstash.p12" ssl_keystore_password => "${PASS}" } } ``` | ``` input { logstash { ssl_enabled => false } } ``` |
+
+
+### Configuration Concepts [plugins-inputs-logstash-config-binding]
+
+This input plugin needs to be configured to bind to a TCP [`port`](#plugins-inputs-logstash-port), and can be constrained to bind to a particular interface by providing the IP to [`host`](#plugins-inputs-logstash-host).
+
+
+### Security: SSL Identity [plugins-inputs-logstash-config-ssl-identity]
+
+Unless SSL is disabled, this plugin needs to be configured with identity material:
+
+* JKS- or PKCS12-formatted Keystore (see [`ssl_keystore_path`](#plugins-inputs-logstash-ssl_keystore_path))
+* PKCS8-formatted Certificate/Key pair (see [`ssl_certificate`](#plugins-inputs-logstash-ssl_certificate))
+
+
+### Security: SSL Trust [plugins-inputs-logstash-config-ssl-trust]
+
+When communicating over SSL, this plugin can be configured to either request or require that connecting clients present their own identity claims with [`ssl_client_authentication`](#plugins-inputs-logstash-ssl_client_authentication).
+
+Certificates that are presented by clients are validated by default using the system trust store to ensure that they are currently-valid and trusted, and that the client can prove possession of its associated private key. You can provide an *alternate* source of trust with:
+
+* A PEM-formatted list of trusted certificate authorities (see [`ssl_certificate_authorities`](#plugins-inputs-logstash-ssl_certificate_authorities))
+
+::::{note}
+Client-certificate verification does *not* verify identity claims on the presented certificate, such as whether the certificate includes a Subject Alt Name matching the IP address from which the client is connecting.
+::::
+
+
+
+### Security: Credentials [plugins-inputs-logstash-config-credentials]
+
+You can also configure this plugin to require a specific username/password be provided by configuring [`username`](#plugins-inputs-logstash-username) and [`password`](#plugins-inputs-logstash-password). Doing so requires connecting `logstash-output` plugin clients to provide matching `username` and `password`.
+
+::::{note}
+when SSL is disabled, data and credentials will be received in clear-text.
+::::
+
+
+
+
+## Logstash Input Configuration Options [plugins-inputs-logstash-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-logstash-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`host`](#plugins-inputs-logstash-host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`password`](#plugins-inputs-logstash-password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`port`](#plugins-inputs-logstash-port) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`ssl_certificate`](#plugins-inputs-logstash-ssl_certificate) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_certificate_authorities`](#plugins-inputs-logstash-ssl_certificate_authorities) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`ssl_client_authentication`](#plugins-inputs-logstash-ssl_client_authentication) | [string](/reference/configuration-file-structure.md#string), one of `["none", "optional", "required"]` | No |
+| [`ssl_enabled`](#plugins-inputs-logstash-ssl_enabled) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`ssl_key`](#plugins-inputs-logstash-ssl_key) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_keystore_password`](#plugins-inputs-logstash-ssl_keystore_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_keystore_path`](#plugins-inputs-logstash-ssl_keystore_path) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_key_passphrase`](#plugins-inputs-logstash-ssl_key_passphrase) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`username`](#plugins-inputs-logstash-username) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-inputs-logstash-common-options) for a list of options supported by all input plugins.
+
+
+
+### `host` [plugins-inputs-logstash-host]
+
+* Value type is a [string](/reference/configuration-file-structure.md#string) ip address
+* Default value is `0.0.0.0` (all interfaces)
+
+Specify which interface to listen on by providing its ip address. By default, this input listens on all available interfaces.
+
+
+### `password` [plugins-inputs-logstash-password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Password for password-based authentication. Requires [`username`](#plugins-inputs-logstash-username).
+
+
+### `port` [plugins-inputs-logstash-port]
+
+* Value type is a [number](/reference/configuration-file-structure.md#number) port
+* Default value is 9800
+
+Specify which port to listen on.
+
+
+### `ssl_certificate` [plugins-inputs-logstash-ssl_certificate]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+* When present, [`ssl_key`](#plugins-inputs-logstash-ssl_key) and [`ssl_key_passphrase`](#plugins-inputs-logstash-ssl_key_passphrase) are also required.
+* Cannot be combined with configurations that disable SSL.
+
+Path to a PEM-encoded certificate or certificate chain with which to identify this plugin to connecting clients. The certificate *SHOULD* include identity claims about the ip address or hostname that clients use to establish a connection.
+
+
+### `ssl_certificate_authorities` [plugins-inputs-logstash-ssl_certificate_authorities]
+
+* Value type is a list of [path](/reference/configuration-file-structure.md#path)s
+* There is no default value for this setting.
+* Cannot be combined with configurations that disable SSL.
+* Cannot be combined with [`+ssl_client_authentication => none+`](#plugins-inputs-logstash-ssl_client_authentication).
+
+One or more PEM-encoded files defining certificate authorities for use in client authentication. This setting can be used to *override* the system trust store for verifying the SSL certificate presented by clients.
+
+
+### `ssl_client_authentication` [plugins-inputs-logstash-ssl_client_authentication]
+
+* Value can be any of:
+
+ * `none`: do not request client’s certificate, or validate certificates that are presented
+ * `optional`: request client’s certificate, and validate it against our trust authorities *if-and-only-if* it is presented
+ * `required`: require a valid certificate from the client that is signed by a trusted certificate authority
+
+* Default value is `"none"`
+
+By default the server doesn’t do any client authentication. This means that connections from clients are *private* when SSL is enabled, but that this input will allow SSL connections from *any* client. If you wish to configure this plugin to reject connections from untrusted hosts, you will need to configure this plugin to authenticate clients, and may also need to configure its [source of trust](#plugins-inputs-logstash-config-ssl-trust).
+
+
+### `ssl_enabled` [plugins-inputs-logstash-ssl_enabled]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+SSL is enabled by default, which requires configuring this plugin to present its [identity](#plugins-inputs-logstash-config-ssl-identity).
+
+You can disable SSL with `+ssl_enabled => false+`. When disabled, setting any `ssl_*` configuration causes configuration failure.
+
+
+### `ssl_key` [plugins-inputs-logstash-ssl_key]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+* Required when connection identity is configured with [`ssl_certificate`](#plugins-inputs-logstash-ssl_certificate).
+* Cannot be combined with configurations that disable SSL.
+
+A path to a PEM-encoded *encrypted* PKCS8 SSL certificate key.
+
+
+### `ssl_keystore_password` [plugins-inputs-logstash-ssl_keystore_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+* Required when connection identity is configured with [`ssl_keystore_path`](#plugins-inputs-logstash-ssl_keystore_path).
+* Cannot be combined with configurations that disable SSL.
+
+Password for the [`ssl_keystore_path`](#plugins-inputs-logstash-ssl_keystore_path)
+
+
+### `ssl_keystore_path` [plugins-inputs-logstash-ssl_keystore_path]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+* When present, [`ssl_keystore_password`](#plugins-inputs-logstash-ssl_keystore_password) is also required.
+* Cannot be combined with configurations that disable SSL.
+
+A path to a JKS- or PKCS12-formatted keystore with which to identify this plugin to {{es}}.
+
+
+### `ssl_key_passphrase` [plugins-inputs-logstash-ssl_key_passphrase]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+* Required when connection identity is configured with [`ssl_certificate`](#plugins-inputs-logstash-ssl_certificate).
+* Cannot be combined with configurations that disable SSL.
+
+A password or passphrase of the [`ssl_key`](#plugins-inputs-logstash-ssl_key).
+
+
+### `username` [plugins-inputs-logstash-username]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Username for password-based authentication. When this input plugin is configured with a `username`, it also requires a `password`, and any upstream `logstash-output` plugin must also be configured with a matching `username`/`password` pair.
+
+::::{note}
+when SSL is disabled, credentials will be transmitted in clear-text.
+::::
+
+
+
+
+## Common options [plugins-inputs-logstash-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-logstash-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`enable_metric`](#plugins-inputs-logstash-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-logstash-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-logstash-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-logstash-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-logstash-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `enable_metric` [plugins-inputs-logstash-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-logstash-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 logstash inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ logstash {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-logstash-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-logstash-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-lumberjack.md b/docs/reference/plugins-inputs-lumberjack.md
new file mode 100644
index 000000000..af02154c6
--- /dev/null
+++ b/docs/reference/plugins-inputs-lumberjack.md
@@ -0,0 +1,191 @@
+---
+navigation_title: "lumberjack"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-lumberjack.html
+---
+
+# Lumberjack input plugin [plugins-inputs-lumberjack]
+
+
+* Plugin version: v3.1.6
+* Released on: 2019-04-15
+* [Changelog](https://github.com/logstash-plugins/logstash-input-lumberjack/blob/v3.1.6/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-lumberjack-index.md).
+
+## Installation [_installation_8]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-input-lumberjack`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_39]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-lumberjack). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_39]
+
+Receive events using the Lumberjack protocol.
+
+This input can be used to reliably and securely transport events between Logstash instances. To do so, use the [lumberjack output plugin](/reference/plugins-outputs-lumberjack.md) in the sending Logstash instance(s).
+
+It can also be used to receive events from the deprecated [logstash-forwarder](https://github.com/elastic/logstash-forwarder) tool that has been replaced by [Filebeat](https://github.com/elastic/beats/tree/master/filebeat).
+
+::::{note}
+Consider using the [Beats input plugin](/reference/plugins-inputs-beats.md) instead. The Beats input implements the Lumberjack protocol v1 and v2.
+::::
+
+
+
+## Lumberjack Input Configuration Options [plugins-inputs-lumberjack-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-lumberjack-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`congestion_threshold`](#plugins-inputs-lumberjack-congestion_threshold) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`host`](#plugins-inputs-lumberjack-host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`port`](#plugins-inputs-lumberjack-port) | [number](/reference/configuration-file-structure.md#number) | Yes |
+| [`ssl_certificate`](#plugins-inputs-lumberjack-ssl_certificate) | a valid filesystem path | Yes |
+| [`ssl_key`](#plugins-inputs-lumberjack-ssl_key) | a valid filesystem path | Yes |
+| [`ssl_key_passphrase`](#plugins-inputs-lumberjack-ssl_key_passphrase) | [password](/reference/configuration-file-structure.md#password) | No |
+
+Also see [Common options](#plugins-inputs-lumberjack-common-options) for a list of options supported by all input plugins.
+
+
+
+### `congestion_threshold` [plugins-inputs-lumberjack-congestion_threshold]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `5`
+
+The number of seconds before we raise a timeout, this option is useful to control how much time to wait if something is blocking the pipeline.
+
+
+### `host` [plugins-inputs-lumberjack-host]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"0.0.0.0"`
+
+The IP address to listen on.
+
+
+### `port` [plugins-inputs-lumberjack-port]
+
+* This is a required setting.
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+The port to listen on.
+
+
+### `ssl_certificate` [plugins-inputs-lumberjack-ssl_certificate]
+
+* This is a required setting.
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+SSL certificate to use.
+
+
+### `ssl_key` [plugins-inputs-lumberjack-ssl_key]
+
+* This is a required setting.
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+SSL key to use.
+
+
+### `ssl_key_passphrase` [plugins-inputs-lumberjack-ssl_key_passphrase]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+SSL key passphrase to use.
+
+
+
+## Common options [plugins-inputs-lumberjack-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-lumberjack-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-lumberjack-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-lumberjack-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-lumberjack-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-lumberjack-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-lumberjack-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-lumberjack-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-lumberjack-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-lumberjack-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-lumberjack-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 lumberjack inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ lumberjack {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-lumberjack-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-lumberjack-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-meetup.md b/docs/reference/plugins-inputs-meetup.md
new file mode 100644
index 000000000..59b79e4d8
--- /dev/null
+++ b/docs/reference/plugins-inputs-meetup.md
@@ -0,0 +1,190 @@
+---
+navigation_title: "meetup"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-meetup.html
+---
+
+# Meetup input plugin [plugins-inputs-meetup]
+
+
+* Plugin version: v3.1.1
+* Released on: 2018-04-06
+* [Changelog](https://github.com/logstash-plugins/logstash-input-meetup/blob/v3.1.1/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-meetup-index.md).
+
+## Installation [_installation_9]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-input-meetup`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_40]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-meetup). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_40]
+
+Periodically query meetup.com regarding updates on events for the given Meetup key.
+
+
+## Meetup Input Configuration Options [plugins-inputs-meetup-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-meetup-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`eventstatus`](#plugins-inputs-meetup-eventstatus) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`groupid`](#plugins-inputs-meetup-groupid) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`interval`](#plugins-inputs-meetup-interval) | [number](/reference/configuration-file-structure.md#number) | Yes |
+| [`meetupkey`](#plugins-inputs-meetup-meetupkey) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`urlname`](#plugins-inputs-meetup-urlname) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`venueid`](#plugins-inputs-meetup-venueid) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`text`](#plugins-inputs-meetup-text) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-inputs-meetup-common-options) for a list of options supported by all input plugins.
+
+
+
+### `eventstatus` [plugins-inputs-meetup-eventstatus]
+
+* Value type is [string](/reference/configuration-file-structure.md#string).
+* Default value is `"upcoming,past"`.
+
+Event Status can be one of `"upcoming"`, `"past"`, or `"upcoming,past"`. Default is `"upcoming,past"`.
+
+
+### `groupid` [plugins-inputs-meetup-groupid]
+
+* Value type is [string](/reference/configuration-file-structure.md#string).
+* There is no default value for this setting.
+
+The Group ID, multiple may be specified seperated by commas. Must have one of `urlname`, `venueid`, `groupid`, `text`.
+
+
+### `interval` [plugins-inputs-meetup-interval]
+
+* This is a required setting.
+* Value type is [number](/reference/configuration-file-structure.md#number).
+* There is no default value for this setting.
+
+Interval to run the command. Value is in minutes.
+
+
+### `meetupkey` [plugins-inputs-meetup-meetupkey]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string).
+* There is no default value for this setting.
+
+Meetup Key, aka personal token.
+
+
+### `urlname` [plugins-inputs-meetup-urlname]
+
+* Value type is [string](/reference/configuration-file-structure.md#string).
+* There is no default value for this setting.
+
+URLName - the URL name ie `ElasticSearch-Oklahoma-City`. Must have one of urlname, venue_id, group_id, `text`.
+
+
+### `venueid` [plugins-inputs-meetup-venueid]
+
+* Value type is [string](/reference/configuration-file-structure.md#string).
+* There is no default value for this setting.
+
+The venue ID Must have one of `urlname`, `venue_id`, `group_id`, `text`.
+
+
+### `text` [plugins-inputs-meetup-text]
+
+* Value type is [string](/reference/configuration-file-structure.md#string).
+* There is no default value for this setting.
+
+A text string to search meetup events by. Must have one of `urlname`, `venue_id`, `group_id`, `text`.
+
+
+
+## Common options [plugins-inputs-meetup-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-meetup-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-meetup-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-meetup-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-meetup-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-meetup-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-meetup-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-meetup-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-meetup-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-meetup-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-meetup-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 meetup inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ meetup {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-meetup-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-meetup-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-pipe.md b/docs/reference/plugins-inputs-pipe.md
new file mode 100644
index 000000000..abdb389fe
--- /dev/null
+++ b/docs/reference/plugins-inputs-pipe.md
@@ -0,0 +1,190 @@
+---
+navigation_title: "pipe"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-pipe.html
+---
+
+# Pipe input plugin [plugins-inputs-pipe]
+
+
+* Plugin version: v3.1.0
+* Released on: 2021-11-18
+* [Changelog](https://github.com/logstash-plugins/logstash-input-pipe/blob/v3.1.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-pipe-index.md).
+
+## Getting help [_getting_help_41]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-pipe). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_41]
+
+Stream events from a long running command pipe.
+
+By default, each event is assumed to be one line. If you want to join lines, you’ll want to use the multiline codec.
+
+
+## Compatibility with the Elastic Common Schema (ECS) [plugins-inputs-pipe-ecs]
+
+This plugin adds extra fields about the event’s source. Configure the [`ecs_compatibility`](#plugins-inputs-pipe-ecs_compatibility) option if you want to ensure that these fields are compatible with [ECS](ecs://reference/index.md).
+
+These fields are added after the event has been decoded by the appropriate codec, and will not overwrite existing values.
+
+| ECS Disabled | ECS v1 , v8 | Description |
+| --- | --- | --- |
+| `host` | `[host][name]` | The name of the {{ls}} host that processed the event |
+| `command` | `[process][command_line]` | The command run by the plugin |
+
+
+## Pipe Input Configuration Options [plugins-inputs-pipe-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-pipe-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`command`](#plugins-inputs-pipe-command) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`ecs_compatibility`](#plugins-inputs-pipe-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-inputs-pipe-common-options) for a list of options supported by all input plugins.
+
+
+
+### `command` [plugins-inputs-pipe-command]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Command to run and read events from, one line at a time.
+
+Example:
+
+```ruby
+input {
+ pipe {
+ command => "echo ¡Hola!"
+ }
+}
+```
+
+
+### `ecs_compatibility` [plugins-inputs-pipe-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: uses backwards compatible field names, such as `[host]`
+ * `v1`, `v8`: uses fields that are compatible with ECS, such as `[host][name]`
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)). See [Compatibility with the Elastic Common Schema (ECS)](#plugins-inputs-pipe-ecs) for detailed information.
+
+**Sample output: ECS enabled**
+
+```ruby
+{
+ "@timestamp"=>2021-11-16T09:18:45.306Z,
+ "message" => "¡Hola!",
+ "process" => {
+ "command_line" => "echo '¡Hola!'"
+ },
+ "host" => {
+ "name" => "deus-ex-machina"
+ }
+}
+```
+
+**Sample output: ECS disabled**
+
+```ruby
+{
+ "@timestamp"=>2021-11-16T09:18:45.306Z,
+ "message" => "¡Hola!",
+ "command" => "echo '¡Hola!'",
+ "host" => "deus-ex-machina"
+}
+```
+
+
+
+## Common options [plugins-inputs-pipe-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-pipe-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-pipe-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-pipe-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-pipe-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-pipe-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-pipe-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-pipe-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-pipe-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-pipe-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-pipe-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 pipe inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ pipe {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-pipe-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-pipe-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
diff --git a/docs/reference/plugins-inputs-puppet_facter.md b/docs/reference/plugins-inputs-puppet_facter.md
new file mode 100644
index 000000000..61e0e9892
--- /dev/null
+++ b/docs/reference/plugins-inputs-puppet_facter.md
@@ -0,0 +1,174 @@
+---
+navigation_title: "puppet_facter"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-puppet_facter.html
+---
+
+# Puppet_facter input plugin [plugins-inputs-puppet_facter]
+
+
+* Plugin version: v3.0.4
+* Released on: 2018-04-06
+* [Changelog](https://github.com/logstash-plugins/logstash-input-puppet_facter/blob/v3.0.4/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-puppet_facter-index.md).
+
+## Installation [_installation_10]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-input-puppet_facter`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_42]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-puppet_facter). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_42]
+
+Connects to a puppet server and requests facts
+
+
+## Puppet_facter Input Configuration Options [plugins-inputs-puppet_facter-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-puppet_facter-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`environment`](#plugins-inputs-puppet_facter-environment) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`host`](#plugins-inputs-puppet_facter-host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`interval`](#plugins-inputs-puppet_facter-interval) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`port`](#plugins-inputs-puppet_facter-port) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`private_key`](#plugins-inputs-puppet_facter-private_key) | a valid filesystem path | No |
+| [`public_key`](#plugins-inputs-puppet_facter-public_key) | a valid filesystem path | No |
+| [`ssl`](#plugins-inputs-puppet_facter-ssl) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+
+Also see [Common options](#plugins-inputs-puppet_facter-common-options) for a list of options supported by all input plugins.
+
+
+
+### `environment` [plugins-inputs-puppet_facter-environment]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"production"`
+
+
+### `host` [plugins-inputs-puppet_facter-host]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"0.0.0.0"`
+
+
+### `interval` [plugins-inputs-puppet_facter-interval]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `600`
+
+
+### `port` [plugins-inputs-puppet_facter-port]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `8140`
+
+
+### `private_key` [plugins-inputs-puppet_facter-private_key]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+
+### `public_key` [plugins-inputs-puppet_facter-public_key]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+
+### `ssl` [plugins-inputs-puppet_facter-ssl]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+
+
+## Common options [plugins-inputs-puppet_facter-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-puppet_facter-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-puppet_facter-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-puppet_facter-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-puppet_facter-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-puppet_facter-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-puppet_facter-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-puppet_facter-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-puppet_facter-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-puppet_facter-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-puppet_facter-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 puppet_facter inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ puppet_facter {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-puppet_facter-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-puppet_facter-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-rabbitmq.md b/docs/reference/plugins-inputs-rabbitmq.md
new file mode 100644
index 000000000..7d875b924
--- /dev/null
+++ b/docs/reference/plugins-inputs-rabbitmq.md
@@ -0,0 +1,438 @@
+---
+navigation_title: "rabbitmq"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-rabbitmq.html
+---
+
+# Rabbitmq input plugin [plugins-inputs-rabbitmq]
+
+
+* A component of the [rabbitmq integration plugin](/reference/plugins-integrations-rabbitmq.md)
+* Integration version: v7.4.0
+* Released on: 2024-09-16
+* [Changelog](https://github.com/logstash-plugins/logstash-integration-rabbitmq/blob/v7.4.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-rabbitmq-index.md).
+
+## Getting help [_getting_help_43]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-integration-rabbitmq). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_43]
+
+Pull events from a [RabbitMQ](http://www.rabbitmq.com/) queue.
+
+The default settings will create an entirely transient queue and listen for all messages by default. If you need durability or any other advanced settings, please set the appropriate options
+
+This plugin uses the [March Hare](http://rubymarchhare.info/) library for interacting with the RabbitMQ server. Most configuration options map directly to standard RabbitMQ and AMQP concepts. The [AMQP 0-9-1 reference guide](https://www.rabbitmq.com/amqp-0-9-1-reference.html) and other parts of the RabbitMQ documentation are useful for deeper understanding.
+
+The properties of messages received will be stored in the `[@metadata][rabbitmq_properties]` field if the `@metadata_enabled` setting is enabled. Note that storing metadata may degrade performance. The following properties may be available (in most cases dependent on whether they were set by the sender):
+
+* app-id
+* cluster-id
+* consumer-tag
+* content-encoding
+* content-type
+* correlation-id
+* delivery-mode
+* exchange
+* expiration
+* message-id
+* priority
+* redeliver
+* reply-to
+* routing-key
+* timestamp
+* type
+* user-id
+
+For example, to get the RabbitMQ message’s timestamp property into the Logstash event’s `@timestamp` field, use the date filter to parse the `[@metadata][rabbitmq_properties][timestamp]` field:
+
+```ruby
+ filter {
+ if [@metadata][rabbitmq_properties][timestamp] {
+ date {
+ match => ["[@metadata][rabbitmq_properties][timestamp]", "UNIX"]
+ }
+ }
+ }
+```
+
+Additionally, any message headers will be saved in the `[@metadata][rabbitmq_headers]` field.
+
+
+## Rabbitmq Input Configuration Options [plugins-inputs-rabbitmq-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-rabbitmq-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`ack`](#plugins-inputs-rabbitmq-ack) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`arguments`](#plugins-inputs-rabbitmq-arguments) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`auto_delete`](#plugins-inputs-rabbitmq-auto_delete) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`automatic_recovery`](#plugins-inputs-rabbitmq-automatic_recovery) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`connect_retry_interval`](#plugins-inputs-rabbitmq-connect_retry_interval) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`connection_timeout`](#plugins-inputs-rabbitmq-connection_timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`durable`](#plugins-inputs-rabbitmq-durable) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`exchange`](#plugins-inputs-rabbitmq-exchange) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`exchange_type`](#plugins-inputs-rabbitmq-exchange_type) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`exclusive`](#plugins-inputs-rabbitmq-exclusive) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`heartbeat`](#plugins-inputs-rabbitmq-heartbeat) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`host`](#plugins-inputs-rabbitmq-host) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`key`](#plugins-inputs-rabbitmq-key) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`metadata_enabled`](#plugins-inputs-rabbitmq-metadata_enabled) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`passive`](#plugins-inputs-rabbitmq-passive) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`password`](#plugins-inputs-rabbitmq-password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`port`](#plugins-inputs-rabbitmq-port) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`prefetch_count`](#plugins-inputs-rabbitmq-prefetch_count) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`queue`](#plugins-inputs-rabbitmq-queue) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl`](#plugins-inputs-rabbitmq-ssl) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`ssl_certificate_password`](#plugins-inputs-rabbitmq-ssl_certificate_password) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_certificate_path`](#plugins-inputs-rabbitmq-ssl_certificate_path) | a valid filesystem path | No |
+| [`ssl_version`](#plugins-inputs-rabbitmq-ssl_version) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`subscription_retry_interval_seconds`](#plugins-inputs-rabbitmq-subscription_retry_interval_seconds) | [number](/reference/configuration-file-structure.md#number) | Yes |
+| [`threads`](#plugins-inputs-rabbitmq-threads) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`user`](#plugins-inputs-rabbitmq-user) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`vhost`](#plugins-inputs-rabbitmq-vhost) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-inputs-rabbitmq-common-options) for a list of options supported by all input plugins.
+
+
+
+### `ack` [plugins-inputs-rabbitmq-ack]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Enable message acknowledgements. With acknowledgements messages fetched by Logstash but not yet sent into the Logstash pipeline will be requeued by the server if Logstash shuts down. Acknowledgements will however hurt the message throughput.
+
+This will only send an ack back every `prefetch_count` messages. Working in batches provides a performance boost here.
+
+
+### `arguments` [plugins-inputs-rabbitmq-arguments]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `{}`
+
+Optional queue arguments as an array.
+
+Relevant RabbitMQ doc guides:
+
+* [Optional queue arguments](https://www.rabbitmq.com/queues.html#optional-arguments)
+* [Policies](https://www.rabbitmq.com/parameters.html#policies)
+* [Quorum Queues](https://www.rabbitmq.com/quorum-queues.html)
+
+
+### `auto_delete` [plugins-inputs-rabbitmq-auto_delete]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Should the queue be deleted on the broker when the last consumer disconnects? Set this option to `false` if you want the queue to remain on the broker, queueing up messages until a consumer comes along to consume them.
+
+
+### `automatic_recovery` [plugins-inputs-rabbitmq-automatic_recovery]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Set this to [automatically recover](https://www.rabbitmq.com/connections.html#automatic-recovery) from a broken connection. You almost certainly don’t want to override this!
+
+
+### `connect_retry_interval` [plugins-inputs-rabbitmq-connect_retry_interval]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1`
+
+Time in seconds to wait before retrying a connection
+
+
+### `connection_timeout` [plugins-inputs-rabbitmq-connection_timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+The default connection timeout in milliseconds. If not specified the timeout is infinite.
+
+
+### `durable` [plugins-inputs-rabbitmq-durable]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Is this queue durable? (aka; Should it survive a broker restart?) If consuming directly from a queue you must set this value to match the existing queue setting, otherwise the connection will fail due to an inequivalent arg error.
+
+
+### `exchange` [plugins-inputs-rabbitmq-exchange]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The name of the exchange to bind the queue to. Specify `exchange_type` as well to declare the exchange if it does not exist
+
+
+### `exchange_type` [plugins-inputs-rabbitmq-exchange_type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The type of the exchange to bind to. Specifying this will cause this plugin to declare the exchange if it does not exist.
+
+
+### `exclusive` [plugins-inputs-rabbitmq-exclusive]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Is the queue exclusive? Exclusive queues can only be used by the connection that declared them and will be deleted when it is closed (e.g. due to a Logstash restart).
+
+
+### `heartbeat` [plugins-inputs-rabbitmq-heartbeat]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+[Heartbeat timeout](https://www.rabbitmq.com/heartbeats.html) in seconds. If unspecified then heartbeat timeout of 60 seconds will be used.
+
+
+### `host` [plugins-inputs-rabbitmq-host]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Common functionality for the rabbitmq input/output RabbitMQ server address(es) host can either be a single host, or a list of hosts i.e. host ⇒ "localhost" or host ⇒ ["host01", "host02]
+
+if multiple hosts are provided on the initial connection and any subsequent recovery attempts of the hosts is chosen at random and connected to. Note that only one host connection is active at a time.
+
+
+### `key` [plugins-inputs-rabbitmq-key]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"logstash"`
+
+The routing key to use when binding a queue to the exchange. This is only relevant for direct or topic exchanges.
+
+* Routing keys are ignored on fanout exchanges.
+* Wildcards are not valid on direct exchanges.
+
+
+### `metadata_enabled` [plugins-inputs-rabbitmq-metadata_enabled]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Accepted values are:
+
+ * `none`: no metadata is added
+ * `basic`: headers and properties are added
+ * `extended`: headers, properties, and raw payload are added
+ * `false`: deprecated alias for `none`
+ * `true`: deprecated alias for `basic`
+
+* Default value is `none`
+
+Enable metadata about the RabbitMQ topic to be added to the event’s `@metadata` field, for availablity during pipeline processing. In general, most output plugins and codecs do not include `@metadata` fields. This may impact memory usage and performance.
+
+#### Metadata mapping [plugins-inputs-rabbitmq-metadata_locations]
+
+| category | location | type |
+| --- | --- | --- |
+| headers | `[@metadata][rabbitmq_headers]` | key/value map |
+| properties | `[@metadata][rabbitmq_properties]` | key/value map |
+| raw payload | `[@metadata][rabbitmq_payload]` | byte sequence |
+
+
+
+### `passive` [plugins-inputs-rabbitmq-passive]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+If true the queue will be passively declared, meaning it must already exist on the server. To have Logstash create the queue if necessary leave this option as false. If actively declaring a queue that already exists, the queue options for this plugin (durable etc) must match those of the existing queue.
+
+
+### `password` [plugins-inputs-rabbitmq-password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* Default value is `"guest"`
+
+RabbitMQ password
+
+
+### `port` [plugins-inputs-rabbitmq-port]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `5672`
+
+RabbitMQ port to connect on
+
+
+### `prefetch_count` [plugins-inputs-rabbitmq-prefetch_count]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `256`
+
+Prefetch count. If acknowledgements are enabled with the `ack` option, specifies the number of outstanding unacknowledged messages allowed.
+
+
+### `queue` [plugins-inputs-rabbitmq-queue]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `""`
+
+The properties to extract from each message and store in a @metadata field.
+
+Technically the exchange, redeliver, and routing-key properties belong to the envelope and not the message but we ignore that distinction here. However, we extract the headers separately via get_headers even though the header table technically is a message property.
+
+Freezing all strings so that code modifying the event’s @metadata field can’t touch them.
+
+If updating this list, remember to update the documentation above too. The default codec for this plugin is JSON. You can override this to suit your particular needs however. The name of the queue Logstash will consume events from. If left empty, a transient queue with an randomly chosen name will be created.
+
+
+### `ssl` [plugins-inputs-rabbitmq-ssl]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* There is no default value for this setting.
+
+Enable or disable SSL. Note that by default remote certificate verification is off. Specify ssl_certificate_path and ssl_certificate_password if you need certificate verification
+
+
+### `ssl_certificate_password` [plugins-inputs-rabbitmq-ssl_certificate_password]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Password for the encrypted PKCS12 (.p12) certificate file specified in ssl_certificate_path
+
+
+### `ssl_certificate_path` [plugins-inputs-rabbitmq-ssl_certificate_path]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+Path to an SSL certificate in PKCS12 (.p12) format used for verifying the remote host
+
+
+### `ssl_version` [plugins-inputs-rabbitmq-ssl_version]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"TLSv1.2"`
+
+Version of the SSL protocol to use.
+
+
+### `subscription_retry_interval_seconds` [plugins-inputs-rabbitmq-subscription_retry_interval_seconds]
+
+* This is a required setting.
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `5`
+
+Amount of time in seconds to wait after a failed subscription request before retrying. Subscribes can fail if the server goes away and then comes back.
+
+
+### `threads` [plugins-inputs-rabbitmq-threads]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1`
+
+
+### `user` [plugins-inputs-rabbitmq-user]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"guest"`
+
+RabbitMQ username
+
+
+### `vhost` [plugins-inputs-rabbitmq-vhost]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"/"`
+
+The vhost (virtual host) to use. If you don’t know what this is, leave the default. With the exception of the default vhost ("/"), names of vhosts should not begin with a forward slash.
+
+
+
+## Common options [plugins-inputs-rabbitmq-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-rabbitmq-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-rabbitmq-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-rabbitmq-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-rabbitmq-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-rabbitmq-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-rabbitmq-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-rabbitmq-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-rabbitmq-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"json"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-rabbitmq-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-rabbitmq-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 rabbitmq inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ rabbitmq {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-rabbitmq-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-rabbitmq-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-redis.md b/docs/reference/plugins-inputs-redis.md
new file mode 100644
index 000000000..f6bf1ea6f
--- /dev/null
+++ b/docs/reference/plugins-inputs-redis.md
@@ -0,0 +1,241 @@
+---
+navigation_title: "redis"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-redis.html
+---
+
+# Redis input plugin [plugins-inputs-redis]
+
+
+* Plugin version: v3.7.1
+* Released on: 2024-08-01
+* [Changelog](https://github.com/logstash-plugins/logstash-input-redis/blob/v3.7.1/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-redis-index.md).
+
+## Getting help [_getting_help_44]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-redis). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_44]
+
+This input will read events from a Redis instance; it supports both Redis channels and lists. The list command (BLPOP) used by Logstash is supported in Redis v1.3.1+, and the channel commands used by Logstash are found in Redis v1.3.8+. While you may be able to make these Redis versions work, the best performance and stability will be found in more recent stable versions. Versions 2.6.0+ are recommended.
+
+For more information about Redis, see [http://redis.io/](http://redis.io/)
+
+`batch_count` note: If you use the `batch_count` setting, you **must** use a Redis version 2.6.0 or newer. Anything older does not support the operations used by batching.
+
+
+## Redis Input Configuration Options [plugins-inputs-redis-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-redis-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`batch_count`](#plugins-inputs-redis-batch_count) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`command_map`](#plugins-inputs-redis-command_map) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`data_type`](#plugins-inputs-redis-data_type) | [string](/reference/configuration-file-structure.md#string), one of `["list", "channel", "pattern_channel"]` | Yes |
+| [`db`](#plugins-inputs-redis-db) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`host`](#plugins-inputs-redis-host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`path`](#plugins-inputs-redis-path) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`key`](#plugins-inputs-redis-key) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`password`](#plugins-inputs-redis-password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`port`](#plugins-inputs-redis-port) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`ssl`](#plugins-inputs-redis-ssl) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`threads`](#plugins-inputs-redis-threads) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`timeout`](#plugins-inputs-redis-timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+
+Also see [Common options](#plugins-inputs-redis-common-options) for a list of options supported by all input plugins.
+
+
+
+### `batch_count` [plugins-inputs-redis-batch_count]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `125`
+
+The number of events to return from Redis using EVAL.
+
+
+### `command_map` [plugins-inputs-redis-command_map]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* There is no default value for this setting.
+* key is the default command name, value is the renamed command.
+
+Configure renamed redis commands in the form of "oldname" ⇒ "newname". Redis allows for the renaming or disabling of commands in its protocol, see: [https://redis.io/topics/security](https://redis.io/topics/security)
+
+
+### `data_type` [plugins-inputs-redis-data_type]
+
+* This is a required setting.
+* Value can be any of: `list`, `channel`, `pattern_channel`
+* There is no default value for this setting.
+
+Specify either list or channel. If `data_type` is `list`, then we will BLPOP the key. If `data_type` is `channel`, then we will SUBSCRIBE to the key. If `data_type` is `pattern_channel`, then we will PSUBSCRIBE to the key.
+
+
+### `db` [plugins-inputs-redis-db]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `0`
+
+The Redis database number.
+
+
+### `host` [plugins-inputs-redis-host]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"127.0.0.1"`
+
+The hostname of your Redis server.
+
+
+### `path` [plugins-inputs-redis-path]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+* Path will override Host configuration if both specified.
+
+The unix socket path of your Redis server.
+
+
+### `key` [plugins-inputs-redis-key]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The name of a Redis list or channel.
+
+
+### `password` [plugins-inputs-redis-password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Password to authenticate with. There is no authentication by default.
+
+
+### `port` [plugins-inputs-redis-port]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `6379`
+
+The port to connect on.
+
+
+### `ssl` [plugins-inputs-redis-ssl]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Enable SSL support.
+
+
+### `threads` [plugins-inputs-redis-threads]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1`
+
+Number of instances of the input to start, each on its own thread. Increase from one to improve concurrency in consuming messages from Redis.
+
+::::{note}
+Increasing the number of threads when consuming from a channel will result in duplicate messages since a `SUBSCRIBE` delivers each message to all subscribers.
+::::
+
+
+
+### `timeout` [plugins-inputs-redis-timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `5`
+
+Initial connection timeout in seconds.
+
+
+
+## Common options [plugins-inputs-redis-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-redis-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-redis-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-redis-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-redis-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-redis-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-redis-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-redis-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-redis-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"json"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-redis-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-redis-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 redis inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ redis {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-redis-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-redis-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-relp.md b/docs/reference/plugins-inputs-relp.md
new file mode 100644
index 000000000..cd04b1d3e
--- /dev/null
+++ b/docs/reference/plugins-inputs-relp.md
@@ -0,0 +1,204 @@
+---
+navigation_title: "relp"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-relp.html
+---
+
+# Relp input plugin [plugins-inputs-relp]
+
+
+* Plugin version: v3.0.4
+* Released on: 2018-04-06
+* [Changelog](https://github.com/logstash-plugins/logstash-input-relp/blob/v3.0.4/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-relp-index.md).
+
+## Installation [_installation_11]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-input-relp`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_45]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-relp). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_45]
+
+Read RELP events over a TCP socket.
+
+For more information about RELP, see [http://www.rsyslog.com/doc/imrelp.html](http://www.rsyslog.com/doc/imrelp.html)
+
+This protocol implements application-level acknowledgements to help protect against message loss.
+
+Message acks only function as far as messages being put into the queue for filters; anything lost after that point will not be retransmitted
+
+
+## Relp Input Configuration Options [plugins-inputs-relp-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-relp-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`host`](#plugins-inputs-relp-host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`port`](#plugins-inputs-relp-port) | [number](/reference/configuration-file-structure.md#number) | Yes |
+| [`ssl_cacert`](#plugins-inputs-relp-ssl_cacert) | a valid filesystem path | No |
+| [`ssl_cert`](#plugins-inputs-relp-ssl_cert) | a valid filesystem path | No |
+| [`ssl_enable`](#plugins-inputs-relp-ssl_enable) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`ssl_key`](#plugins-inputs-relp-ssl_key) | a valid filesystem path | No |
+| [`ssl_key_passphrase`](#plugins-inputs-relp-ssl_key_passphrase) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_verify`](#plugins-inputs-relp-ssl_verify) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+
+Also see [Common options](#plugins-inputs-relp-common-options) for a list of options supported by all input plugins.
+
+
+
+### `host` [plugins-inputs-relp-host]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"0.0.0.0"`
+
+The address to listen on.
+
+
+### `port` [plugins-inputs-relp-port]
+
+* This is a required setting.
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+The port to listen on.
+
+
+### `ssl_cacert` [plugins-inputs-relp-ssl_cacert]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+The SSL CA certificate, chainfile or CA path. The system CA path is automatically included.
+
+
+### `ssl_cert` [plugins-inputs-relp-ssl_cert]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+SSL certificate path
+
+
+### `ssl_enable` [plugins-inputs-relp-ssl_enable]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Enable SSL (must be set for other `ssl_` options to take effect).
+
+
+### `ssl_key` [plugins-inputs-relp-ssl_key]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+SSL key path
+
+
+### `ssl_key_passphrase` [plugins-inputs-relp-ssl_key_passphrase]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* Default value is `nil`
+
+SSL key passphrase
+
+
+### `ssl_verify` [plugins-inputs-relp-ssl_verify]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Verify the identity of the other end of the SSL connection against the CA. For input, sets the field `sslsubject` to that of the client certificate.
+
+
+
+## Common options [plugins-inputs-relp-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-relp-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-relp-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-relp-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-relp-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-relp-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-relp-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-relp-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-relp-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-relp-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-relp-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 relp inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ relp {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-relp-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-relp-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-rss.md b/docs/reference/plugins-inputs-rss.md
new file mode 100644
index 000000000..6f70673a0
--- /dev/null
+++ b/docs/reference/plugins-inputs-rss.md
@@ -0,0 +1,150 @@
+---
+navigation_title: "rss"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-rss.html
+---
+
+# Rss input plugin [plugins-inputs-rss]
+
+
+* Plugin version: v3.0.6
+* Released on: 2023-11-03
+* [Changelog](https://github.com/logstash-plugins/logstash-input-rss/blob/v3.0.6/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-rss-index.md).
+
+## Installation [_installation_12]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-input-rss`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_46]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-rss). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_46]
+
+Run command line tools and capture the whole output as an event.
+
+Notes:
+
+* The `@source` of this event will be the command run.
+* The `@message` of this event will be the entire stdout of the command as one event.
+
+
+## Rss Input Configuration Options [plugins-inputs-rss-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-rss-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`interval`](#plugins-inputs-rss-interval) | [number](/reference/configuration-file-structure.md#number) | Yes |
+| [`url`](#plugins-inputs-rss-url) | [string](/reference/configuration-file-structure.md#string) | Yes |
+
+Also see [Common options](#plugins-inputs-rss-common-options) for a list of options supported by all input plugins.
+
+
+
+### `interval` [plugins-inputs-rss-interval]
+
+* This is a required setting.
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+Interval to run the command. Value is in seconds.
+
+
+### `url` [plugins-inputs-rss-url]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+RSS/Atom feed URL
+
+
+
+## Common options [plugins-inputs-rss-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-rss-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-rss-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-rss-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-rss-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-rss-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-rss-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-rss-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-rss-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-rss-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-rss-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 rss inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ rss {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-rss-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-rss-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-s3-sns-sqs.md b/docs/reference/plugins-inputs-s3-sns-sqs.md
new file mode 100644
index 000000000..cd422bf2a
--- /dev/null
+++ b/docs/reference/plugins-inputs-s3-sns-sqs.md
@@ -0,0 +1,32 @@
+---
+navigation_title: "s3-sns-sqs"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-s3-sns-sqs.html
+---
+
+# S3 via SNS/SQS plugin [plugins-inputs-s3-sns-sqs]
+
+
+* This plugin was created and is maintained by a contributor.
+* [Change log](https://github.com/cherweg/logstash-input-s3-sns-sqs/blob/master/CHANGELOG.md)
+
+## Installation [_installation_13]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-input-s3-sns-sqs`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Description [_description_48]
+
+This plugin uses sqs to read logs from AWS S3 buckets in high availability setups with multiple Logstash instances.
+
+
+## Documentation [_documentation]
+
+[ Documentation](https://github.com/cherweg/logstash-input-s3-sns-sqs/blob/master/docs/index.asciidoc) for the logstash-input-s3-sns-sqs plugin is maintained by the creator.
+
+
+## Getting Help [_getting_help_48]
+
+This is a third-party plugin. For bugs or feature requests, open an issue in the [plugins-inputs-s3-sns-sqs Github repo](https://github.com/cherweg/logstash-input-s3-sns-sqs).
+
+
diff --git a/docs/reference/plugins-inputs-s3.md b/docs/reference/plugins-inputs-s3.md
new file mode 100644
index 000000000..bca072b0b
--- /dev/null
+++ b/docs/reference/plugins-inputs-s3.md
@@ -0,0 +1,409 @@
+---
+navigation_title: "s3"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-s3.html
+---
+
+# S3 input plugin [plugins-inputs-s3]
+
+
+* A component of the [aws integration plugin](/reference/plugins-integrations-aws.md)
+* Integration version: v7.1.8
+* Released on: 2024-07-26
+* [Changelog](https://github.com/logstash-plugins/logstash-integration-aws/blob/v7.1.8/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-s3-index.md).
+
+## Getting help [_getting_help_47]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-integration-aws). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_47]
+
+Stream events from files from a S3 bucket.
+
+::::{important}
+The S3 input plugin only supports AWS S3. Other S3 compatible storage solutions are not supported.
+::::
+
+
+Each line from each file generates an event. Files ending in `.gz` are handled as gzip’ed files.
+
+Files that are archived to AWS Glacier will be skipped.
+
+
+## Event Metadata and the Elastic Common Schema (ECS) [plugins-inputs-s3-ecs_metadata]
+
+This plugin adds cloudfront metadata to event. When ECS compatibility is disabled, the value is stored in the root level. When ECS is enabled, the value is stored in the `@metadata` where it can be used by other plugins in your pipeline.
+
+Here’s how ECS compatibility mode affects output.
+
+| ECS disabled | ECS v1 | Availability | Description |
+| --- | --- | --- | --- |
+| cloudfront_fields | [@metadata][s3][cloudfront][fields] | *available when the file is a CloudFront log* | *column names of log* |
+| cloudfront_version | [@metadata][s3][cloudfront][version] | *available when the file is a CloudFront log* | *version of log* |
+
+
+## S3 Input Configuration Options [plugins-inputs-s3-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-s3-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`access_key_id`](#plugins-inputs-s3-access_key_id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`additional_settings`](#plugins-inputs-s3-additional_settings) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`aws_credentials_file`](#plugins-inputs-s3-aws_credentials_file) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`backup_add_prefix`](#plugins-inputs-s3-backup_add_prefix) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`backup_to_bucket`](#plugins-inputs-s3-backup_to_bucket) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`backup_to_dir`](#plugins-inputs-s3-backup_to_dir) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`bucket`](#plugins-inputs-s3-bucket) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`delete`](#plugins-inputs-s3-delete) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`ecs_compatibility`](#plugins-inputs-s3-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`endpoint`](#plugins-inputs-s3-endpoint) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`exclude_pattern`](#plugins-inputs-s3-exclude_pattern) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`gzip_pattern`](#plugins-inputs-s3-gzip_pattern) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`include_object_properties`](#plugins-inputs-s3-include_object_properties) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`interval`](#plugins-inputs-s3-interval) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`prefix`](#plugins-inputs-s3-prefix) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`proxy_uri`](#plugins-inputs-s3-proxy_uri) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`region`](#plugins-inputs-s3-region) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`role_arn`](#plugins-inputs-s3-role_arn) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`role_session_name`](#plugins-inputs-s3-role_session_name) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`secret_access_key`](#plugins-inputs-s3-secret_access_key) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`session_token`](#plugins-inputs-s3-session_token) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`sincedb_path`](#plugins-inputs-s3-sincedb_path) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`temporary_directory`](#plugins-inputs-s3-temporary_directory) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`use_aws_bundled_ca`](#plugins-inputs-s3-use_aws_bundled_ca) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`watch_for_new_files`](#plugins-inputs-s3-watch_for_new_files) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+
+Also see [Common options](#plugins-inputs-s3-common-options) for a list of options supported by all input plugins.
+
+
+
+### `access_key_id` [plugins-inputs-s3-access_key_id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order:
+
+1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config
+2. External credentials file specified by `aws_credentials_file`
+3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`
+4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY`
+5. IAM Instance Profile (available when running inside EC2)
+
+
+### `additional_settings` [plugins-inputs-s3-additional_settings]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Key-value pairs of settings and corresponding values used to parametrize the connection to s3. See full list in [the AWS SDK documentation](https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html). Example:
+
+```ruby
+ input {
+ s3 {
+ access_key_id => "1234"
+ secret_access_key => "secret"
+ bucket => "logstash-test"
+ additional_settings => {
+ force_path_style => true
+ follow_redirects => false
+ }
+ }
+ }
+```
+
+
+### `aws_credentials_file` [plugins-inputs-s3-aws_credentials_file]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Path to YAML file containing a hash of AWS credentials. This file will only be loaded if `access_key_id` and `secret_access_key` aren’t set. The contents of the file should look like this:
+
+```ruby
+ :access_key_id: "12345"
+ :secret_access_key: "54321"
+```
+
+
+### `backup_add_prefix` [plugins-inputs-s3-backup_add_prefix]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `nil`
+
+Append a prefix to the key (full path including file name in s3) after processing. If backing up to another (or the same) bucket, this effectively lets you choose a new *folder* to place the files in
+
+
+### `backup_to_bucket` [plugins-inputs-s3-backup_to_bucket]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `nil`
+
+Name of a S3 bucket to backup processed files to.
+
+
+### `backup_to_dir` [plugins-inputs-s3-backup_to_dir]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `nil`
+
+Path of a local directory to backup processed files to.
+
+
+### `bucket` [plugins-inputs-s3-bucket]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The name of the S3 bucket.
+
+
+### `delete` [plugins-inputs-s3-delete]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Whether to delete processed files from the original bucket.
+
+
+### `ecs_compatibility` [plugins-inputs-s3-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: does not use ECS-compatible field names
+ * `v1`,`v8`: uses metadata fields that are compatible with Elastic Common Schema
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)). See [Event Metadata and the Elastic Common Schema (ECS)](#plugins-inputs-s3-ecs_metadata) for detailed information.
+
+
+### `endpoint` [plugins-inputs-s3-endpoint]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The endpoint to connect to. By default it is constructed using the value of `region`. This is useful when connecting to S3 compatible services, but beware that these aren’t guaranteed to work correctly with the AWS SDK.
+
+
+### `exclude_pattern` [plugins-inputs-s3-exclude_pattern]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `nil`
+
+Ruby style regexp of keys to exclude from the bucket.
+
+Note that files matching the pattern are skipped *after* they have been listed. Consider using [`prefix`](#plugins-inputs-s3-prefix) instead where possible.
+
+Example:
+
+```ruby
+"exclude_pattern" => "\/2020\/04\/"
+```
+
+This pattern excludes all logs containing "/2020/04/" in the path.
+
+
+### `gzip_pattern` [plugins-inputs-s3-gzip_pattern]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"\.gz(ip)?$"`
+
+Regular expression used to determine whether an input file is in gzip format.
+
+
+### `include_object_properties` [plugins-inputs-s3-include_object_properties]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Whether or not to include the S3 object’s properties (last_modified, content_type, metadata) into each Event at `[@metadata][s3]`. Regardless of this setting, `[@metadata][s3][key]` will always be present.
+
+
+### `interval` [plugins-inputs-s3-interval]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `60`
+
+Interval to wait between to check the file list again after a run is finished. Value is in seconds.
+
+
+### `prefix` [plugins-inputs-s3-prefix]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `nil`
+
+If specified, the prefix of filenames in the bucket must match (not a regexp)
+
+
+### `proxy_uri` [plugins-inputs-s3-proxy_uri]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+URI to proxy server if required
+
+
+### `region` [plugins-inputs-s3-region]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"us-east-1"`
+
+The AWS Region
+
+
+### `role_arn` [plugins-inputs-s3-role_arn]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The AWS IAM Role to assume, if any. This is used to generate temporary credentials, typically for cross-account access. See the [AssumeRole API documentation](https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html) for more information.
+
+
+### `role_session_name` [plugins-inputs-s3-role_session_name]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"logstash"`
+
+Session name to use when assuming an IAM role.
+
+
+### `secret_access_key` [plugins-inputs-s3-secret_access_key]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The AWS Secret Access Key
+
+
+### `session_token` [plugins-inputs-s3-session_token]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The AWS Session token for temporary credential
+
+
+### `sincedb_path` [plugins-inputs-s3-sincedb_path]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `nil`
+
+Where to write the since database (keeps track of the date the last handled file was added to S3). The default will write sincedb files to in the directory *{path.data}/plugins/inputs/s3/*
+
+If specified, this setting must be a filename path and not just a directory.
+
+
+### `temporary_directory` [plugins-inputs-s3-temporary_directory]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"/tmp/logstash"`
+
+Set the directory where logstash will store the tmp files before processing them.
+
+
+### `use_aws_bundled_ca` [plugins-inputs-s3-use_aws_bundled_ca]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Use bundled CA certificates that ship with AWS SDK to verify SSL peer certificates. For cases where the default certificates are unavailable, e.g. Windows, you can set this to `true`.
+
+
+### `watch_for_new_files` [plugins-inputs-s3-watch_for_new_files]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Whether or not to watch for new files. Disabling this option causes the input to close itself after processing the files from a single listing.
+
+
+
+## Common options [plugins-inputs-s3-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-s3-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-s3-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-s3-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-s3-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-s3-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-s3-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-s3-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-s3-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-s3-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-s3-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 s3 inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ s3 {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-s3-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-s3-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-salesforce.md b/docs/reference/plugins-inputs-salesforce.md
new file mode 100644
index 000000000..7e2ca7d8a
--- /dev/null
+++ b/docs/reference/plugins-inputs-salesforce.md
@@ -0,0 +1,292 @@
+---
+navigation_title: "salesforce"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-salesforce.html
+---
+
+# Salesforce input plugin [plugins-inputs-salesforce]
+
+
+* Plugin version: v3.2.1
+* Released on: 2023-05-30
+* [Changelog](https://github.com/logstash-plugins/logstash-input-salesforce/blob/v3.2.1/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-salesforce-index.md).
+
+## Installation [_installation_14]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-input-salesforce`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_49]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-salesforce). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_49]
+
+This Logstash input plugin allows you to query Salesforce using SOQL and puts the results into Logstash, one row per event. You can configure it to pull entire sObjects or only specific fields.
+
+::::{note}
+This input plugin will stop after all the results of the query are processed and will need to be re-run to fetch new results. It does not utilize the streaming API.
+::::
+
+
+In order to use this plugin, you will need to create a new SFDC Application using oauth. More details can be found here: [https://help.salesforce.com/apex/HTViewHelpDoc?id=connected_app_create.htm](https://help.salesforce.com/apex/HTViewHelpDoc?id=connected_app_create.htm)
+
+You will also need a username, password, and security token for your salesforce instance. More details for generating a token can be found here: [https://help.salesforce.com/apex/HTViewHelpDoc?id=user_security_token.htm](https://help.salesforce.com/apex/HTViewHelpDoc?id=user_security_token.htm)
+
+In addition to specifying an sObject, you can also supply a list of API fields that will be used in the SOQL query.
+
+
+## HTTP proxy [_http_proxy]
+
+If your infrastructure uses a HTTP proxy, you can set the `SALESFORCE_PROXY_URI` environment variable with the desired URI value (e.g `export SALESFORCE_PROXY_URI="http://proxy.example.com:123"`).
+
+
+## Example [_example_2]
+
+This example prints all the Salesforce Opportunities to standard out
+
+```ruby
+input {
+ salesforce {
+ client_id => 'OAUTH CLIENT ID FROM YOUR SFDC APP'
+ client_secret => 'OAUTH CLIENT SECRET FROM YOUR SFDC APP'
+ username => 'email@example.com'
+ password => 'super-secret'
+ security_token => 'SECURITY TOKEN FOR THIS USER'
+ sfdc_object_name => 'Opportunity'
+ }
+}
+
+output {
+ stdout {
+ codec => rubydebug
+ }
+}
+```
+
+
+## Salesforce Input Configuration Options [plugins-inputs-salesforce-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-salesforce-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`api_version`](#plugins-inputs-salesforce-api_version) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`client_id`](#plugins-inputs-salesforce-client_id) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`client_secret`](#plugins-inputs-salesforce-client_secret) | [password](/reference/configuration-file-structure.md#password) | Yes |
+| [`password`](#plugins-inputs-salesforce-password) | [password](/reference/configuration-file-structure.md#password) | Yes |
+| [`security_token`](#plugins-inputs-salesforce-security_token) | [password](/reference/configuration-file-structure.md#password) | Yes |
+| [`sfdc_fields`](#plugins-inputs-salesforce-sfdc_fields) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`sfdc_filters`](#plugins-inputs-salesforce-sfdc_filters) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`sfdc_instance_url`](#plugins-inputs-salesforce-sfdc_instance_url) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`sfdc_object_name`](#plugins-inputs-salesforce-sfdc_object_name) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`to_underscores`](#plugins-inputs-salesforce-to_underscores) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`use_test_sandbox`](#plugins-inputs-salesforce-use_test_sandbox) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`use_tooling_api`](#plugins-inputs-salesforce-use_tooling_api) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`username`](#plugins-inputs-salesforce-username) | [string](/reference/configuration-file-structure.md#string) | Yes |
+
+Also see [Common options](#plugins-inputs-salesforce-common-options) for a list of options supported by all input plugins.
+
+
+
+### `api_version` [plugins-inputs-salesforce-api_version]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+By default, this uses the default Restforce API version. To override this, set this to something like "32.0" for example
+
+
+### `client_id` [plugins-inputs-salesforce-client_id]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Consumer Key for authentication. You must set up a new SFDC connected app with oath to use this output. More information can be found here: [https://help.salesforce.com/apex/HTViewHelpDoc?id=connected_app_create.htm](https://help.salesforce.com/apex/HTViewHelpDoc?id=connected_app_create.htm)
+
+
+### `client_secret` [plugins-inputs-salesforce-client_secret]
+
+* This is a required setting.
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Consumer Secret from your oauth enabled connected app
+
+
+### `password` [plugins-inputs-salesforce-password]
+
+* This is a required setting.
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+The password used to login to sfdc
+
+
+### `security_token` [plugins-inputs-salesforce-security_token]
+
+* This is a required setting.
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+The security token for this account. For more information about generting a security token, see: [https://help.salesforce.com/apex/HTViewHelpDoc?id=user_security_token.htm](https://help.salesforce.com/apex/HTViewHelpDoc?id=user_security_token.htm)
+
+
+### `sfdc_fields` [plugins-inputs-salesforce-sfdc_fields]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+These are the field names to return in the Salesforce query If this is empty, all fields are returned.
+
+
+### `sfdc_filters` [plugins-inputs-salesforce-sfdc_filters]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `""`
+
+These options will be added to the WHERE clause in the SOQL statement. Additional fields can be filtered on by adding field1 = value1 AND field2 = value2 AND…
+
+
+### `sfdc_instance_url` [plugins-inputs-salesforce-sfdc_instance_url]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The url of a Salesforce instance. Provide the url if you want to connect to your Salesforce instance instead of `login.salesforce.com` or `test.salesforce.com` at login.
+
+Use either this or the `use_test_sandbox` configuration option but not both to configure the url to which the plugin connects to.
+
+
+### `sfdc_object_name` [plugins-inputs-salesforce-sfdc_object_name]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The name of the salesforce object you are creating or updating
+
+
+### `to_underscores` [plugins-inputs-salesforce-to_underscores]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Setting this to true will convert SFDC’s NamedFields*c to named_fields*c
+
+
+### `use_test_sandbox` [plugins-inputs-salesforce-use_test_sandbox]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Set this to true to connect to a sandbox sfdc instance logging in through test.salesforce.com
+
+Use either this or the `sfdc_instance_url` configuration option but not both to configure the url to which the plugin connects to.
+
+
+### `use_tooling_api` [plugins-inputs-salesforce-use_tooling_api]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Set this to true to connect to the sfdc tooling api instead of the regular sfdc rest api. See [https://developer.salesforce.com/docs/atlas.en-us.api_tooling.meta/api_tooling](https://developer.salesforce.com/docs/atlas.en-us.api_tooling.meta/api_tooling) for details about the sfdc tooling api. Use cases for the sfdc tooling api include reading apex unit test results, flow coverage results (e.g. coverage of elements of sfdc flows) and security health check risks.
+
+
+### `username` [plugins-inputs-salesforce-username]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+A valid salesforce user name, usually your email address. Used for authentication and will be the user all objects are created or modified by
+
+
+
+## Common options [plugins-inputs-salesforce-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-salesforce-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-salesforce-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-salesforce-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-salesforce-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-salesforce-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-salesforce-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-salesforce-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-salesforce-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-salesforce-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-salesforce-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 salesforce inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ salesforce {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-salesforce-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-salesforce-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-snmp.md b/docs/reference/plugins-inputs-snmp.md
new file mode 100644
index 000000000..563bafddd
--- /dev/null
+++ b/docs/reference/plugins-inputs-snmp.md
@@ -0,0 +1,543 @@
+---
+navigation_title: "snmp"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-snmp.html
+---
+
+# SNMP input plugin [plugins-inputs-snmp]
+
+
+* A component of the [snmp integration plugin](/reference/plugins-integrations-snmp.md)
+* Integration version: v4.0.5
+* Released on: 2025-01-06
+* [Changelog](https://github.com/logstash-plugins/logstash-integration-snmp/blob/v4.0.5/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-snmp-index.md).
+
+## Getting help [_getting_help_50]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-integration-snmp). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+::::{admonition} Migrating to `logstash-integration-snmp` from stand-alone `input-snmp`
+The `logstash-input-snmp` plugin is now a component of the `logstash-integration-snmp` plugin which is bundled with {{ls}} 8.15.0 by default. This integrated plugin package provides better alignment in snmp processing, better resource management, easier package maintenance, and a smaller installation footprint.
+
+Before you upgrade to {{ls}} 8.15.0, be aware of [behavioral and mapping differences](/reference/plugins-integrations-snmp.md#plugins-integrations-snmp-migration) between current stand-alone plugins and the new versions included in `integration-snmp`.
+
+::::
+
+
+
+## Description [_description_50]
+
+The SNMP input polls network devices using Simple Network Management Protocol (SNMP) to gather information related to the current state of the devices operation.
+
+The SNMP input plugin supports SNMP v1, v2c, and v3 over UDP and TCP transport protocols.
+
+
+## Compatibility with the Elastic Common Schema (ECS) [plugins-inputs-snmp-ecs]
+
+Because SNMP data has specific field names based on OIDs, we recommend setting a [`target`](#plugins-inputs-snmp-target). Metadata fields follow a specific naming convention when [ECS compatibility mode](#plugins-inputs-snmp-ecs_compatibility) is enabled.
+
+| | | | |
+| --- | --- | --- | --- |
+| ECS disabled | ECS v1, v8 | *Description* | *[@metadata][host_protocol]* |
+| [@metadata][input][snmp][host][protocol] | The protocol used to retrieve data e.g. "udp" | *[@metadata][host_address]* | *[@metadata][input][snmp][host][address]* |
+| The host IP e.g. "192.168.1.1" | [@metadata][host_port] | *[@metadata][input][snmp][host][port]* | *The host’s port e.g. "161"* |
+| [@metadata][host_community] | [@metadata][input][snmp][host][community] | *The configured community e.g. "public"* | *[host]* |
+
+
+## Importing MIBs [plugins-inputs-snmp-import-mibs]
+
+This plugin already includes the IETF MIBs (management information bases), and you do not need to import them. If you need additional MIBs, you need to import them. Check out [Importing MIBs](/reference/plugins-integrations-snmp.md#plugins-integrations-snmp-import-mibs) for info.
+
+
+## SNMP Input Configuration Options [plugins-inputs-snmp-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-snmp-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`ecs_compatibility`](#plugins-inputs-snmp-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`get`](#plugins-inputs-snmp-get) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`hosts`](#plugins-inputs-snmp-hosts) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`interval`](#plugins-inputs-snmp-interval) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`local_engine_id`](#plugins-inputs-snmp-local_engine_id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`mib_paths`](#plugins-inputs-snmp-mib_paths) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`oid_mapping_format`](#plugins-inputs-snmp-oid_mapping_format) | [string](/reference/configuration-file-structure.md#string), one of `["default", "ruby_snmp", "dotted_string"]` | No |
+| [`oid_map_field_values`](#plugins-inputs-snmp-oid_map_field_values) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`oid_path_length`](#plugins-inputs-snmp-oid_path_length) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`oid_root_skip`](#plugins-inputs-snmp-oid_root_skip) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`poll_hosts_timeout`](#plugins-inputs-snmp-poll_hosts_timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`walk`](#plugins-inputs-snmp-walk) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`tables`](#plugins-inputs-snmp-tables) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`target`](#plugins-inputs-snmp-target) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`threads`](#plugins-inputs-snmp-threads) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`use_provided_mibs`](#plugins-inputs-snmp-use_provided_mibs) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+
+
+## SNMPv3 Authentication Options [_snmpv3_authentication_options]
+
+This plugin supports the following SNMPv3 authentication options.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`auth_pass`](#plugins-inputs-snmp-auth_pass) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`auth_protocol`](#plugins-inputs-snmp-auth_protocol) | [string](/reference/configuration-file-structure.md#string), one of `["md5", "sha", "sha2", "hmac128sha224", "hmac192sha256", "hmac256sha384", "hmac384sha512"]` | No |
+| [`priv_pass`](#plugins-inputs-snmp-priv_pass) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`priv_protocol`](#plugins-inputs-snmp-priv_protocol) | [string](/reference/configuration-file-structure.md#string), one of `["des", "3des", "aes", "aes128", "aes192", "aes256"]` | No |
+| [`security_level`](#plugins-inputs-snmp-security_level) | [string](/reference/configuration-file-structure.md#string), one of `["noAuthNoPriv", "authNoPriv", "authPriv"]` | No |
+| [`security_name`](#plugins-inputs-snmp-security_name) | [string](/reference/configuration-file-structure.md#string) | No |
+
+
+## SNMP Input Configuration Options [_snmp_input_configuration_options]
+
+Also see [Common options](#plugins-inputs-snmp-common-options) for a list of options supported by all input plugins.
+
+### `ecs_compatibility` [plugins-inputs-snmp-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: does not use ECS-compatible field names (fields might be set at the root of the event)
+ * `v1`, `v8`: avoids field names that might conflict with Elastic Common Schema (for example, the `host` field)
+
+* Default value depends on which version of Logstash is running:
+
+ * When Logstash provides a `pipeline.ecs_compatibility` setting, its value is used as the default
+ * Otherwise, the default value is `disabled`.
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)).
+
+
+### `get` [plugins-inputs-snmp-get]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting
+
+Use the `get` option to query for scalar values for the given OID(s). One or more OID(s) are specified as an array of strings of OID(s).
+
+Example
+
+```ruby
+input {
+ snmp {
+ get => ["1.3.6.1.2.1.1.1.0", "1.3.6.1.2.1.1.3.0", "1.3.6.1.2.1.1.5.0"]
+ hosts => [{host => "udp:127.0.0.1/161" community => "public"}]
+ }
+}
+```
+
+
+### `hosts` [plugins-inputs-snmp-hosts]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting
+
+The `hosts` option specifies the list of hosts to query the configured `get` and `walk` options.
+
+Each host definition is a hash and must define the `host` key and value. `host` must use the format `{tcp|udp}:{ip address}/{{port}}`, for example `host => "udp:127.0.0.1/161"`
+
+Each host definition can optionally include the following keys and values:
+
+* `community` the community string, default is `public`.
+* `version` `1`, `2c` or `3`, default is `2c`.
+* `retries` is the number of retries in case of failure, default is `2`.
+* `timeout` is the timeout in milliseconds with a default value of `1000`.
+
+**Specifying all hosts options**
+
+```ruby
+input {
+ snmp {
+ get => ["1.3.6.1.2.1.1.1.0"]
+ hosts => [{host => "udp:127.0.0.1/161" community => "public" version => "2c" retries => 2 timeout => 1000}]
+ }
+}
+```
+
+**Specifying multiple hosts**
+
+```ruby
+input {
+ snmp {
+ get => ["1.3.6.1.2.1.1.1.0"]
+ hosts => [{host => "udp:127.0.0.1/161" community => "public"}, {host => "udp:192.168.0.1/161" community => "private"}]
+ }
+}
+```
+
+**Specifying IPv6 hosts**
+
+```ruby
+input {
+ snmp {
+ get => ["1.3.6.1.2.1.1.1.0"]
+ hosts => [{host => "udp:[::1]/161" community => "public"}, {host => "udp:[2001:db8::2:1]/161" community => "private"}]
+ }
+}
+```
+
+
+### `interval` [plugins-inputs-snmp-interval]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `30`
+
+The `interval` option specifies the polling interval in seconds. If polling all configured hosts takes longer than this interval, a warning will be emitted to the logs.
+
+
+### `local_engine_id` [plugins-inputs-snmp-local_engine_id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting
+
+The SNMPv3 local engine ID. Its length must be greater or equal than 5 and less or equal than 32. If not provided, a default ID is generated based on the local IP address and additional four random bytes.
+
+
+### `mib_paths` [plugins-inputs-snmp-mib_paths]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting
+
+The `mib_paths` option specifies the location of one or more imported MIB files. The value can be either a dir path containing the imported MIB (`.dic`, `.yaml`) files, or a file path to a single MIB file.
+
+This plugin includes the IETF MIBs. If you require other MIBs, you need to import them. See [Importing MIBs](#plugins-inputs-snmp-import-mibs).
+
+
+### `oid_mapping_format` [plugins-inputs-snmp-oid_mapping_format]
+
+* Value can be any of: `default`, `ruby_snmp`, `dotted_string`
+* Default value is `"default"`
+
+Defines the mapping textual representation of an OID in the Logstash event:
+
+* `default` translates every identifier, using the MIBs resolved names, separated by dots. Example: `1.3.6.1.2.1.1.2.0` is mapped as `iso.org.dod.internet.mgmt.mib-2.system.sysObjectID.0`
+* `ruby_snmp` produces field names prefixed by the MIBs module name, followed by the latest resolved identifier name and unknowns values. Example: `1.3.6.1.2.1.1.2.0` is mapped as `SNMPv2-MIB::sysObjectID.0`.
+* `dotted_string` maps fields using the standard dotted string representation, Example: `1.3.6.1.2.1.1.2.0` is mapped as `1.3.6.1.2.1.1.2.0`
+
+
+### `oid_map_field_values` [plugins-inputs-snmp-oid_map_field_values]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Defines if the Logstash event fields values, which types are `OID`, are mapped using the configured OID textual representation set on the [`oid_mapping_format`](#plugins-inputs-snmp-oid_mapping_format) option.
+
+
+### `oid_root_skip` [plugins-inputs-snmp-oid_root_skip]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `0`
+
+The `oid_root_skip` option specifies the number of OID root digits to ignore in the event field name. For example, in a numeric OID like "1.3.6.1.2.1.1.1.0" the first 5 digits could be ignored by setting `oid_root_skip => 5` which would result in a field name "1.1.1.0". Similarly when a MIB is used an OID such "1.3.6.1.2.mib-2.system.sysDescr.0" would become "mib-2.system.sysDescr.0"
+
+* You can use this setting or [`oid_path_length`](#plugins-inputs-snmp-oid_path_length), but not both at the same time.
+* Use this setting only if [`oid_mapping_format`](#plugins-inputs-snmp-oid_mapping_format) is set to `default`.
+
+
+### `oid_path_length` [plugins-inputs-snmp-oid_path_length]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `0`
+
+The `oid_path_length` option specifies the number of OID root digits to retain in the event field name. For example, in a numeric OID like "1.3.6.1.2.1.1.1.0" the last 2 digits could be retained by setting `oid_path_length => 2` which would result in a field name "1.0". Similarly when a MIB is used an OID such "1.3.6.1.2.mib-2.system.sysDescr.0" would become "sysDescr.0"
+
+* You can use this setting or [`oid_root_skip`](#plugins-inputs-snmp-oid_root_skip), but not both at the same time.
+* This setting can be used only if [`oid_mapping_format`](#plugins-inputs-snmp-oid_mapping_format) is set to `default`.
+
+
+### `poll_hosts_timeout` [plugins-inputs-snmp-poll_hosts_timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting
+
+Specifies the maximum amount of time in milliseconds the polling client will wait for all [`hosts`](#plugins-inputs-snmp-hosts) responses. If all responses are not received before the timeout elapses, the client will fail and some hosts might not get polled during the current cycle.
+
+By default, it uses the highest value between `1 hour`, the maximum [`hosts`](#plugins-inputs-snmp-hosts) configured `timeout`, and the [`interval`](#plugins-inputs-snmp-interval) value.
+
+
+### `walk` [plugins-inputs-snmp-walk]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting
+
+Use the `walk` option to retrieve the subtree of information for the given OID(s). One or more OID(s) are specified as an array of strings of OID(s).
+
+Queries the subtree of information starting at the given OID(s).
+
+Example
+
+```ruby
+ snmp {
+ walk => ["1.3.6.1.2.1.1"]
+ hosts => [{host => "udp:127.0.0.1/161" community => "public"}]
+ }
+}
+```
+
+
+### `tables` [plugins-inputs-snmp-tables]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting
+* Results are returned under a field using the table name
+
+The `tables` option is used to query for tabular values for the given column OID(s).
+
+Each table definition is a hash and must define the name key and value and the columns to return.
+
+**Specifying a single table**
+
+```ruby
+input {
+ snmp {
+ hosts => [{host => "udp:127.0.0.1/161" community => "public" version => "2c" retries => 2 timeout => 1000}]
+ tables => [ {"name" => "interfaces" "columns" => ["1.3.6.1.2.1.2.2.1.1", "1.3.6.1.2.1.2.2.1.2", "1.3.6.1.2.1.2.2.1.5"]} ]
+ }
+}
+```
+
+**Specifying multiple tables**
+
+```ruby
+input {
+ snmp {
+ get => ["1.3.6.1.2.1.1.1.0"]
+ tables => [ {"name" => "interfaces" "columns" => ["1.3.6.1.2.1.2.2.1.1", "1.3.6.1.2.1.2.2.1.2", "1.3.6.1.2.1.2.2.1.5"]}, {"name" => "ltmPoolStatTable" "columns" => ["1.3.6.1.4.1.3375.2.2.5.2.3.1.1", "1.3.6.1.4.1.3375.2.2.5.2.3.1.6"]} ]
+ }
+}
+```
+
+
+### `target` [plugins-inputs-snmp-target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting
+
+The name of the field under which SNMP payloads are assigned. If not specified data will be stored in the root of the event.
+
+Setting a target is recommended when [`ecs_compatibility`](#plugins-inputs-snmp-ecs_compatibility) is enabled.
+
+
+### `threads` [plugins-inputs-snmp-threads]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is the number of CPU cores
+
+The number of threads to use for executing the hosts SNMP requests.
+
+
+### `use_provided_mibs` [plugins-inputs-snmp-use_provided_mibs]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+This plugin provides all IETF MIBs (management information bases), publicly available in the [libsmi](https://www.ibr.cs.tu-bs.de/projects/libsmi) version `0.5.0`. When enabled, it automatically loads the bundled MIBs and provides mapping of the numeric OIDs to MIB field names in the resulting event.
+
+
+
+## SNMPv3 Authentication Options [_snmpv3_authentication_options_2]
+
+A **single user** can be configured and will be used for all defined SNMPv3 hosts. Multiple snmp input declarations will be needed if multiple SNMPv3 users are required. These options are required only if you are using SNMPv3.
+
+### `auth_pass` [plugins-inputs-snmp-auth_pass]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting
+
+The `auth_pass` option specifies the SNMPv3 authentication passphrase or password.
+
+
+### `auth_protocol` [plugins-inputs-snmp-auth_protocol]
+
+The `auth_protocol` option specifies the SNMPv3 authentication protocol or type
+
+* Value can be any of: `md5`, `sha`, `sha2`, `hmac128sha224`, `hmac192sha256`, `hmac256sha384`, `hmac384sha512`
+* Note that `sha2` and `hmac192sha256` are equivalent
+* There is no default value for this setting
+
+
+### `priv_pass` [plugins-inputs-snmp-priv_pass]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting
+
+The `priv_pass` option specifies the SNMPv3 encryption password.
+
+
+### `priv_protocol` [plugins-inputs-snmp-priv_protocol]
+
+* Value can be any of: `des`, `3des`, `aes`, `aes128`, `aes192`, `aes256`
+* Note that `aes` and `aes128` are equivalent
+* There is no default value for this setting
+
+The `priv_protocol` option specifies the SNMPv3 privacy/encryption protocol.
+
+
+### `security_level` [plugins-inputs-snmp-security_level]
+
+* Value can be any of: `noAuthNoPriv`, `authNoPriv`, `authPriv`
+* There is no default value for this setting
+
+The `security_level` option specifies the SNMPv3 security level between Authentication, No Privacy; Authentication, Privacy; or no Authentication, no Privacy.
+
+
+### `security_name` [plugins-inputs-snmp-security_name]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting
+
+The `security_name` option specifies the SNMPv3 security name or user name.
+
+
+
+## Configuration examples [plugins-inputs-snmp-examples]
+
+**Specifying SNMPv3 settings**
+
+```ruby
+input {
+ snmp {
+ hosts => [{host => "udp:127.0.0.1/161" version => "3"}]
+ get => ["1.3.6.1.2.1.1.1.0"]
+ security_name => "mySecurityName"
+ auth_protocol => "sha"
+ auth_pass => "ShaPassword"
+ priv_protocol => "aes"
+ priv_pass => "AesPasword"
+ security_level => "authPriv"
+ }
+}
+```
+
+**Using both `get` and `walk` in the same poll cycle for each host(s)**
+
+```ruby
+input {
+ snmp {
+ get => ["1.3.6.1.2.1.1.1.0", "1.3.6.1.2.1.1.3.0", "1.3.6.1.2.1.1.5.0"]
+ walk => ["1.3.6.1.2.1.1"]
+ hosts => [{host => "udp:127.0.0.1/161" community => "public"}]
+ }
+}
+```
+
+**Specifying all global options**
+
+```ruby
+input {
+ snmp {
+ get => ["1.3.6.1.2.1.1.1.0"]
+ hosts => [{host => "udp:127.0.0.1/161"}]
+
+ mib_paths => ["path/to/converted/mibfile.dic"]
+ oid_root_skip => 0
+ interval => 30
+ }
+}
+```
+
+
+## Polled host information [_polled_host_information]
+
+All the polled host information is stored in the event `@metadata`:
+
+* `[@metadata][host_protocol]` : `udp` or `tcp`
+* `[@metadata][host_address]` : host address for example `127.0.0.1`
+* `[@metadata][host_port]` : host port (for example `161`)
+* `[@metadata][host_community]` : community string for example `public`
+
+By default, a `host` field is added to the event with the `[@metadata][host_address]` value.
+
+```ruby
+config :add_field, :validate => :hash, :default => { "host" => "%{[@metadata][host_address]}" }
+```
+
+You can customize the format and content of the `host` field by specifying an alternate `add_field`.
+
+Example
+
+```ruby
+input {
+ snmp {
+ get => ["1.3.6.1.2.1.1.1.0"]
+ hosts => [{host => "udp:127.0.0.1/161"}]
+
+ add_field => {host => "%{[@metadata][host_protocol]}:%{[@metadata][host_address]}/%{[@metadata][host_port]},%{[@metadata][host_community]}"}
+ }
+}
+```
+
+
+## Common options [plugins-inputs-snmp-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-snmp-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`enable_metric`](#plugins-inputs-snmp-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-snmp-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-snmp-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-snmp-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-snmp-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `enable_metric` [plugins-inputs-snmp-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-snmp-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 snmp inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ snmp {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-snmp-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-snmp-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
diff --git a/docs/reference/plugins-inputs-snmptrap.md b/docs/reference/plugins-inputs-snmptrap.md
new file mode 100644
index 000000000..23a94677e
--- /dev/null
+++ b/docs/reference/plugins-inputs-snmptrap.md
@@ -0,0 +1,440 @@
+---
+navigation_title: "snmptrap"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-snmptrap.html
+---
+
+# SNMP trap input plugin [plugins-inputs-snmptrap]
+
+
+* A component of the [snmp integration plugin](/reference/plugins-integrations-snmp.md)
+* Integration version: v4.0.5
+* Released on: 2025-01-06
+* [Changelog](https://github.com/logstash-plugins/logstash-integration-snmp/blob/v4.0.5/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-snmptrap-index.md).
+
+## Getting help [_getting_help_51]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-integration-snmp). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+::::{admonition} Migrating to `logstash-integration-snmp` from stand-alone `input-snmptrap`
+The `logstash-input-snmptrap` plugin is now a component of the `logstash-integration-snmp` plugin which is bundled with {{ls}} 8.15.0 by default. This integrated plugin package provides better alignment in snmp processing, better resource management, easier package maintenance, and a smaller installation footprint.
+
+Before you upgrade to {{ls}} 8.15.0, be aware of [behavioral and mapping differences](/reference/plugins-integrations-snmp.md#plugins-integrations-snmp-migration) between current stand-alone plugins and the new versions included in `integration-snmp`. If you need to maintain current mappings for the `input-snmptrap` plugin, you have options to [preserve existing behavior](/reference/plugins-integrations-snmp.md#plugins-integrations-snmp-input-snmptrap-compat).
+
+::::
+
+
+
+## Description [_description_51]
+
+The `logstash-input-snmptrap` plugin reads SNMP trap messages as events.
+
+Resulting `message` field resembles:
+
+```json
+{"agent_addr":"192.168.1.40", "generic_trap":6, "specific_trap":15511, "enterprise":"1.3.6.1.2.1.1.1", "variable_bindings":{"1.3.6.1.2.1.1.2.0":"test one", "1.3.6.1.2.1.1.1.0":"test two"}, "type":"V1TRAP", "community":"public", "version":1, "timestamp":1500}
+```
+
+
+## Event Metadata and the Elastic Common Schema (ECS) [plugins-inputs-snmptrap-ecs]
+
+Because SNMP data has specific field names based on OIDs, we recommend setting a [`target`](#plugins-inputs-snmptrap-target). The source host field changes based on [`ecs_compatibility`](#plugins-inputs-snmptrap-ecs_compatibility).
+
+| | | | |
+| --- | --- | --- | --- |
+| ECS disabled | ECS v1, v8 | *Availability* | *Description* |
+| [host] | [host][ip] | *Always* | *IP address of the host e.g. "192.168.1.11"* |
+
+This plugin also adds the trap PDU metadata to each event. The value is stored in the `@metadata` where it can be used by other plugins in the pipeline.
+
+| | | |
+| --- | --- | --- |
+| ECS disabled, v1, v8 | *Availability* | *Description* |
+| [@metadata][input][snmptrap][pdu][agent_addr] | *`SNMPv1`* | *Network address of the object generating the trap* |
+| [@metadata][input][snmptrap][pdu][community] | *`SNMPv1` `SNMPv2c`* | *SNMP community* |
+| [@metadata][input][snmptrap][pdu][enterprise] | *`SNMPv1`* | *Type of object generating the trap* |
+| [@metadata][input][snmptrap][pdu][error_index] | *`SNMPv2c` `SNMPv3`* | *Provides additional information by identifyingwhich variable binding in the list caused the error* |
+| [@metadata][input][snmptrap][pdu][error_status] | *`SNMPv2c` `SNMPv3`* | *Error status code* |
+| [@metadata][input][snmptrap][pdu][error_status_text] | *`SNMPv2c` `SNMPv3`* | *Error status code description* |
+| [@metadata][input][snmptrap][pdu][generic_trap] | *`SNMPv1`* | *Generic trap type* |
+| [@metadata][input][snmptrap][pdu][request_id] | *`SNMPv2c` `SNMPv3`* | *Request ID* |
+| [@metadata][input][snmptrap][pdu][specific_trap] | *`SNMPv1`* | *Specific code, presented even if the generic_trap is not enterprise specific* |
+| [@metadata][input][snmptrap][pdu][timestamp] | *`SNMPv1`* | *Time elapsed between the last (re)initialization of the network entity and the generation of the trap* |
+| [@metadata][input][snmptrap][pdu][type] | *Always* | *PDU type* |
+| [@metadata][input][snmptrap][pdu][variable_bindings] | *Always* | *SNMP variable bindings values* |
+| [@metadata][input][snmptrap][pdu][version] | *Always* | *SNMP version* |
+
+
+## Importing MIBs [plugins-inputs-snmptrap-import-mibs]
+
+This plugin already includes the IETF MIBs (management information bases), and you do not need to import them. If you need additional MIBs, you need to import them. Check out [Importing MIBs](/reference/plugins-integrations-snmp.md#plugins-integrations-snmp-import-mibs) for info.
+
+
+## SNMP Trap Input Configuration Options [plugins-inputs-snmptrap-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-snmptrap-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`community`](#plugins-inputs-snmptrap-community) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`ecs_compatibility`](#plugins-inputs-snmptrap-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`host`](#plugins-inputs-snmptrap-host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`mib_paths`](#plugins-inputs-snmptrap-mib_paths) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`oid_mapping_format`](#plugins-inputs-snmptrap-oid_mapping_format) | [string](/reference/configuration-file-structure.md#string), one of `["default", "ruby_snmp", "dotted_string"]` | No |
+| [`oid_map_field_values`](#plugins-inputs-snmptrap-oid_map_field_values) | [boolean](/reference/configuration-file-structure.md#boolean) | Yes |
+| [`oid_path_length`](#plugins-inputs-snmptrap-oid_path_length) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`oid_root_skip`](#plugins-inputs-snmptrap-oid_root_skip) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`port`](#plugins-inputs-snmptrap-port) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`supported_transports`](#plugins-inputs-snmptrap-supported_transports) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`supported_versions`](#plugins-inputs-snmptrap-supported_versions) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`target`](#plugins-inputs-snmptrap-target) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`threads`](#plugins-inputs-snmptrap-threads) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`use_provided_mibs`](#plugins-inputs-snmptrap-use_provided_mibs) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`yamlmibdir`](#plugins-inputs-snmptrap-yamlmibdir) | [string](/reference/configuration-file-structure.md#string) | *Deprecated* |
+
+
+## SNMPv3 Authentication Options [_snmpv3_authentication_options_3]
+
+This plugin supports the following SNMPv3 authentication options.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`auth_pass`](#plugins-inputs-snmptrap-auth_pass) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`auth_protocol`](#plugins-inputs-snmptrap-auth_protocol) | [string](/reference/configuration-file-structure.md#string), one of `["md5", "sha", "sha2", "hmac128sha224", "hmac192sha256", "hmac256sha384", "hmac384sha512"]` | No |
+| [`priv_pass`](#plugins-inputs-snmptrap-priv_pass) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`priv_protocol`](#plugins-inputs-snmptrap-priv_protocol) | [string](/reference/configuration-file-structure.md#string), one of `["des", "3des", "aes", "aes128", "aes192", "aes256"]` | No |
+| [`security_level`](#plugins-inputs-snmptrap-security_level) | [string](/reference/configuration-file-structure.md#string), one of `["noAuthNoPriv", "authNoPriv", "authPriv"]` | No |
+| [`security_name`](#plugins-inputs-snmptrap-security_name) | [string](/reference/configuration-file-structure.md#string) | No |
+
+
+## SNMP Trap Input Configuration Options [_snmp_trap_input_configuration_options]
+
+Also see [Common options](#plugins-inputs-snmptrap-common-options) for a list of options supported by all input plugins.
+
+### `community` [plugins-inputs-snmptrap-community]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `["public"]`
+
+The SNMPv1 and SNMPv2c communities to listen for. To allow any community, set this config value to empty `community => []`.
+
+Examples
+
+**Listen for `public` and `guest` communities**
+
+```ruby
+input {
+ snmptrap {
+ community => ["public", "guest"]
+ }
+}
+```
+
+**Listen for all communities**
+
+```ruby
+input {
+ snmptrap {
+ community => []
+ }
+}
+```
+
+
+### `ecs_compatibility` [plugins-inputs-snmptrap-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: does not use ECS-compatible field names (fields might be set at the root of the event)
+ * `v1`, `v8`: avoids field names that might conflict with Elastic Common Schema (for example, the `host` field)
+
+* Default value depends on which version of Logstash is running:
+
+ * When Logstash provides a `pipeline.ecs_compatibility` setting, its value is used as the default
+ * Otherwise, the default value is `disabled`.
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)).
+
+
+### `host` [plugins-inputs-snmptrap-host]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"0.0.0.0"`
+
+The address to listen on.
+
+
+### `mib_paths` [plugins-inputs-snmptrap-mib_paths]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting
+
+The `mib_paths` option specifies the location of one or more imported MIB files. The value can be either a dir path containing the imported MIB (`.dic`, `.yaml`) files or a file path to a single MIB file.
+
+
+### `oid_mapping_format` [plugins-inputs-snmptrap-oid_mapping_format]
+
+* Value can be any of: `default`, `ruby_snmp`, `dotted_string`
+* Default value is `"default"`
+
+Defines the mapping textual representation of an OID in the Logstash event: * `default` translates every identifier, using the MIBs resolved names, separated by dots. Example: `1.3.6.1.2.1.1.2.0` is mapped as `iso.org.dod.internet.mgmt.mib-2.system.sysObjectID.0` * `ruby_snmp` produces field names prefixed by the MIBs module name, followed by the latest resolved identifier name and unknowns values. Example: `1.3.6.1.2.1.1.2.0` is mapped as `SNMPv2-MIB::sysObjectID.0`. * `dotted_string` maps fields using the standard dotted string representation, Example: `1.3.6.1.2.1.1.2.0` is mapped as `1.3.6.1.2.1.1.2.0`
+
+
+### `oid_map_field_values` [plugins-inputs-snmptrap-oid_map_field_values]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Defines if the Logstash event fields values, which types are `OID`, are mapped using the configured OID textual representation set on the [`oid_mapping_format`](#plugins-inputs-snmptrap-oid_mapping_format) option.
+
+
+### `oid_root_skip` [plugins-inputs-snmptrap-oid_root_skip]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `0`
+
+The `oid_root_skip` option specifies the number of OID root digits to ignore in the event field name. For example, in a numeric OID like "1.3.6.1.2.1.1.1.0" the first 5 digits could be ignored by setting `oid_root_skip => 5` which would result in a field name "1.1.1.0". Similarly when a MIB is used an OID such "1.3.6.1.2.mib-2.system.sysDescr.0" would become "mib-2.system.sysDescr.0"
+
+* You can use this setting or [`oid_path_length`](#plugins-inputs-snmptrap-oid_path_length), but not both at the same time.
+* Use this setting only if [`oid_mapping_format`](#plugins-inputs-snmptrap-oid_mapping_format) is set to `default`.
+
+
+### `oid_path_length` [plugins-inputs-snmptrap-oid_path_length]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `0`
+
+The `oid_path_length` option specifies the number of OID root digits to retain in the event field name. For example, in a numeric OID like "1.3.6.1.2.1.1.1.0" the last 2 digits could be retained by setting `oid_path_length => 2` which would result in a field name "1.0". Similarly when a MIB is used an OID such "1.3.6.1.2.mib-2.system.sysDescr.0" would become "sysDescr.0"
+
+* You can use this setting or [`oid_root_skip`](#plugins-inputs-snmptrap-oid_root_skip), but not both at the same time.
+* Use this setting only if [`oid_mapping_format`](#plugins-inputs-snmptrap-oid_mapping_format) is set to `default`.
+
+
+### `port` [plugins-inputs-snmptrap-port]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1062`
+
+The port to listen on. Remember that ports less than 1024 (privileged ports) may require root to use. hence the default of 1062.
+
+
+### `supported_transports` [plugins-inputs-snmptrap-supported_transports]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Allowed values are: `tcp`, `udp`
+* Default value is `["udp"]`
+
+The supported transport protocols to listen on.
+
+SNMP was originally designed for use with UDP as transport protocol and is the official recommendation. TCP is an optional transport mapping and can be enabled if needed. For more details on SNMP over TCP, please refer to the [RFC-3430](https://datatracker.ietf.org/doc/html/rfc3430).
+
+
+### `supported_versions` [plugins-inputs-snmptrap-supported_versions]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Allowed values are: `1`, `2c`, `3`
+* Default value is `["1", "2c"]`
+
+The supported SNMP protocol versions to listen on. SNMP messages for versions that are either unsupported and/or disabled are automatically discarded.
+
+
+### `target` [plugins-inputs-snmptrap-target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting
+
+The name of the field under which SNMP payloads are assigned. If not specified data will be stored in the root of the event.
+
+Setting a target is recommended when [`ecs_compatibility`](#plugins-inputs-snmptrap-ecs_compatibility) is enabled.
+
+
+### `threads` [plugins-inputs-snmptrap-threads]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is 75% of the number of CPU cores
+
+The number of threads to use for processing the received SNMP trap messages.
+
+
+### `use_provided_mibs` [plugins-inputs-snmptrap-use_provided_mibs]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+This plugin provides all IETF MIBs (management information bases), publicly available in the [libsmi](https://www.ibr.cs.tu-bs.de/projects/libsmi) version `0.5.0`. When enabled, it automatically loads the bundled MIBs and provides mapping of the numeric OIDs to MIB field names in the resulting event.
+
+
+### `yamlmibdir` [plugins-inputs-snmptrap-yamlmibdir]
+
+::::{admonition} Deprecated in 4.0.0.
+:class: warning
+
+Replaced by [`mib_paths`](#plugins-inputs-snmptrap-mib_paths)
+::::
+
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+directory of YAML MIB maps (same format ruby-snmp uses)
+
+
+
+## SNMPv3 Authentication Options [_snmpv3_authentication_options_4]
+
+A **single user** can be configured. Multiple snmptrap input declarations will be needed if multiple SNMPv3 users are required. These options are required only if you are using SNMPv3.
+
+### `auth_pass` [plugins-inputs-snmptrap-auth_pass]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting
+
+The `auth_pass` option specifies the SNMPv3 authentication passphrase or password.
+
+
+### `auth_protocol` [plugins-inputs-snmptrap-auth_protocol]
+
+The `auth_protocol` option specifies the SNMPv3 authentication protocol or type
+
+* Value can be any of: `md5`, `sha`, `sha2`, `hmac128sha224`, `hmac192sha256`, `hmac256sha384`, `hmac384sha512`
+* Note that `sha2` and `hmac192sha256` are equivalent
+* There is no default value for this setting
+
+
+### `priv_pass` [plugins-inputs-snmptrap-priv_pass]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting
+
+The `priv_pass` option specifies the SNMPv3 encryption password.
+
+
+### `priv_protocol` [plugins-inputs-snmptrap-priv_protocol]
+
+* Value can be any of: `des`, `3des`, `aes`, `aes128`, `aes192`, `aes256`
+* Note that `aes` and `aes128` are equivalent
+* There is no default value for this setting
+
+The `priv_protocol` option specifies the SNMPv3 privacy/encryption protocol.
+
+
+### `security_level` [plugins-inputs-snmptrap-security_level]
+
+* Value can be any of: `noAuthNoPriv`, `authNoPriv`, `authPriv`
+* There is no default value for this setting
+
+The `security_level` option specifies the SNMPv3 security level between Authentication, No Privacy; Authentication, Privacy; or no Authentication, no Privacy.
+
+
+### `security_name` [plugins-inputs-snmptrap-security_name]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting
+
+The `security_name` option specifies the SNMPv3 security name or user name.
+
+
+
+## Configuration examples [_configuration_examples_2]
+
+**Specifying SNMPv3 traps settings**
+
+```ruby
+input {
+ snmptrap {
+ supported_versions => ['3']
+ security_name => "mySecurityName"
+ auth_protocol => "sha"
+ auth_pass => "ShaPassword"
+ priv_protocol => "aes"
+ priv_pass => "AesPasword"
+ security_level => "authPriv"
+ }
+}
+```
+
+
+## Common options [plugins-inputs-snmptrap-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-snmptrap-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-snmptrap-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-snmptrap-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-snmptrap-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-snmptrap-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-snmptrap-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-snmptrap-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-snmptrap-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-snmptrap-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-snmptrap-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 snmptrap inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ snmptrap {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-snmptrap-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-snmptrap-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
diff --git a/docs/reference/plugins-inputs-sqlite.md b/docs/reference/plugins-inputs-sqlite.md
new file mode 100644
index 000000000..9989dea76
--- /dev/null
+++ b/docs/reference/plugins-inputs-sqlite.md
@@ -0,0 +1,208 @@
+---
+navigation_title: "sqlite"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-sqlite.html
+---
+
+# Sqlite input plugin [plugins-inputs-sqlite]
+
+
+* Plugin version: v3.0.4
+* Released on: 2018-04-06
+* [Changelog](https://github.com/logstash-plugins/logstash-input-sqlite/blob/v3.0.4/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-sqlite-index.md).
+
+## Installation [_installation_15]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-input-sqlite`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_52]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-sqlite). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_52]
+
+Read rows from an sqlite database.
+
+This is most useful in cases where you are logging directly to a table. Any tables being watched must have an `id` column that is monotonically increasing.
+
+All tables are read by default except:
+
+* ones matching `sqlite_%` - these are internal/adminstrative tables for sqlite
+* `since_table` - this is used by this plugin to track state.
+
+Example
+
+```sql
+ % sqlite /tmp/example.db
+ sqlite> CREATE TABLE weblogs (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ ip STRING,
+ request STRING,
+ response INTEGER);
+ sqlite> INSERT INTO weblogs (ip, request, response)
+ VALUES ("1.2.3.4", "/index.html", 200);
+```
+
+Then with this logstash config:
+
+```ruby
+ input {
+ sqlite {
+ path => "/tmp/example.db"
+ type => weblogs
+ }
+ }
+ output {
+ stdout {
+ debug => true
+ }
+ }
+```
+
+Sample output:
+
+```ruby
+ {
+ "@source" => "sqlite://sadness/tmp/x.db",
+ "@tags" => [],
+ "@fields" => {
+ "ip" => "1.2.3.4",
+ "request" => "/index.html",
+ "response" => 200
+ },
+ "@timestamp" => "2013-05-29T06:16:30.850Z",
+ "@source_host" => "sadness",
+ "@source_path" => "/tmp/x.db",
+ "@message" => "",
+ "@type" => "foo"
+ }
+```
+
+
+## Sqlite Input Configuration Options [plugins-inputs-sqlite-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-sqlite-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`batch`](#plugins-inputs-sqlite-batch) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`exclude_tables`](#plugins-inputs-sqlite-exclude_tables) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`path`](#plugins-inputs-sqlite-path) | [string](/reference/configuration-file-structure.md#string) | Yes |
+
+Also see [Common options](#plugins-inputs-sqlite-common-options) for a list of options supported by all input plugins.
+
+
+
+### `batch` [plugins-inputs-sqlite-batch]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `5`
+
+How many rows to fetch at a time from each `SELECT` call.
+
+
+### `exclude_tables` [plugins-inputs-sqlite-exclude_tables]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+Any tables to exclude by name. By default all tables are followed.
+
+
+### `path` [plugins-inputs-sqlite-path]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The path to the sqlite database file.
+
+
+
+## Common options [plugins-inputs-sqlite-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-sqlite-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-sqlite-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-sqlite-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-sqlite-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-sqlite-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-sqlite-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-sqlite-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-sqlite-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-sqlite-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-sqlite-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 sqlite inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ sqlite {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-sqlite-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-sqlite-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-sqs.md b/docs/reference/plugins-inputs-sqs.md
new file mode 100644
index 000000000..06cbbeae1
--- /dev/null
+++ b/docs/reference/plugins-inputs-sqs.md
@@ -0,0 +1,355 @@
+---
+navigation_title: "sqs"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-sqs.html
+---
+
+# Sqs input plugin [plugins-inputs-sqs]
+
+
+* A component of the [aws integration plugin](/reference/plugins-integrations-aws.md)
+* Integration version: v7.1.8
+* Released on: 2024-07-26
+* [Changelog](https://github.com/logstash-plugins/logstash-integration-aws/blob/v7.1.8/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-sqs-index.md).
+
+## Getting help [_getting_help_53]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-integration-aws). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_53]
+
+Pull events from an Amazon Web Services Simple Queue Service (SQS) queue.
+
+SQS is a simple, scalable queue system that is part of the Amazon Web Services suite of tools.
+
+Although SQS is similar to other queuing systems like AMQP, it uses a custom API and requires that you have an AWS account. See [http://aws.amazon.com/sqs/](http://aws.amazon.com/sqs/) for more details on how SQS works, what the pricing schedule looks like and how to setup a queue.
+
+To use this plugin, you **must**:
+
+* Have an AWS account
+* Setup an SQS queue
+* Create an identity that has access to consume messages from the queue.
+
+The "consumer" identity must have the following permissions on the queue:
+
+* `sqs:ChangeMessageVisibility`
+* `sqs:ChangeMessageVisibilityBatch`
+* `sqs:DeleteMessage`
+* `sqs:DeleteMessageBatch`
+* `sqs:GetQueueAttributes`
+* `sqs:GetQueueUrl`
+* `sqs:ListQueues`
+* `sqs:ReceiveMessage`
+
+Typically, you should setup an IAM policy, create a user and apply the IAM policy to the user. A sample policy is as follows:
+
+```json
+ {
+ "Statement": [
+ {
+ "Action": [
+ "sqs:ChangeMessageVisibility",
+ "sqs:ChangeMessageVisibilityBatch",
+ "sqs:DeleteMessage",
+ "sqs:DeleteMessageBatch",
+ "sqs:GetQueueAttributes",
+ "sqs:GetQueueUrl",
+ "sqs:ListQueues",
+ "sqs:ReceiveMessage"
+ ],
+ "Effect": "Allow",
+ "Resource": [
+ "arn:aws:sqs:us-east-1:123456789012:Logstash"
+ ]
+ }
+ ]
+ }
+```
+
+See [http://aws.amazon.com/iam/](http://aws.amazon.com/iam/) for more details on setting up AWS identities.
+
+
+## Sqs Input Configuration Options [plugins-inputs-sqs-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-sqs-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`access_key_id`](#plugins-inputs-sqs-access_key_id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`additional_settings`](#plugins-inputs-sqs-additional_settings) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`aws_credentials_file`](#plugins-inputs-sqs-aws_credentials_file) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`endpoint`](#plugins-inputs-sqs-endpoint) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`id_field`](#plugins-inputs-sqs-id_field) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`md5_field`](#plugins-inputs-sqs-md5_field) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`polling_frequency`](#plugins-inputs-sqs-polling_frequency) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`proxy_uri`](#plugins-inputs-sqs-proxy_uri) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`queue`](#plugins-inputs-sqs-queue) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`queue_owner_aws_account_id`](#plugins-inputs-sqs-queue_owner_aws_account_id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`region`](#plugins-inputs-sqs-region) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`role_arn`](#plugins-inputs-sqs-role_arn) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`role_session_name`](#plugins-inputs-sqs-role_session_name) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`secret_access_key`](#plugins-inputs-sqs-secret_access_key) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`sent_timestamp_field`](#plugins-inputs-sqs-sent_timestamp_field) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`session_token`](#plugins-inputs-sqs-session_token) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`threads`](#plugins-inputs-sqs-threads) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`use_aws_bundled_ca`](#plugins-inputs-sqs-use_aws_bundled_ca) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+
+Also see [Common options](#plugins-inputs-sqs-common-options) for a list of options supported by all input plugins.
+
+
+
+### `access_key_id` [plugins-inputs-sqs-access_key_id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order:
+
+1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config
+2. External credentials file specified by `aws_credentials_file`
+3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`
+4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY`
+5. IAM Instance Profile (available when running inside EC2)
+
+
+### `additional_settings` [plugins-inputs-sqs-additional_settings]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Key-value pairs of settings and corresponding values used to parametrize the connection to SQS. See full list in [the AWS SDK documentation](https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/SQS/Client.html). Example:
+
+```ruby
+ input {
+ sqs {
+ access_key_id => "1234"
+ secret_access_key => "secret"
+ queue => "logstash-test-queue"
+ additional_settings => {
+ force_path_style => true
+ follow_redirects => false
+ }
+ }
+ }
+```
+
+
+### `aws_credentials_file` [plugins-inputs-sqs-aws_credentials_file]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Path to YAML file containing a hash of AWS credentials. This file will only be loaded if `access_key_id` and `secret_access_key` aren’t set. The contents of the file should look like this:
+
+```ruby
+ :access_key_id: "12345"
+ :secret_access_key: "54321"
+```
+
+
+### `endpoint` [plugins-inputs-sqs-endpoint]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The endpoint to connect to. By default it is constructed using the value of `region`. This is useful when connecting to S3 compatible services, but beware that these aren’t guaranteed to work correctly with the AWS SDK.
+
+
+### `id_field` [plugins-inputs-sqs-id_field]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Name of the event field in which to store the SQS message ID
+
+
+### `md5_field` [plugins-inputs-sqs-md5_field]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Name of the event field in which to store the SQS message MD5 checksum
+
+
+### `polling_frequency` [plugins-inputs-sqs-polling_frequency]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `20`
+
+Polling frequency, default is 20 seconds
+
+
+### `proxy_uri` [plugins-inputs-sqs-proxy_uri]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+URI to proxy server if required
+
+
+### `queue` [plugins-inputs-sqs-queue]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Name of the SQS Queue name to pull messages from. Note that this is just the name of the queue, not the URL or ARN.
+
+
+### `queue_owner_aws_account_id` [plugins-inputs-sqs-queue_owner_aws_account_id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+ID of the AWS account owning the queue if you want to use a [cross-account queue](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-basic-examples-of-sqs-policies.html#grant-two-permissions-to-one-account) with embedded policy. Note that AWS SDK only support numerical account ID and not account aliases.
+
+
+### `region` [plugins-inputs-sqs-region]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"us-east-1"`
+
+The AWS Region
+
+
+### `role_arn` [plugins-inputs-sqs-role_arn]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The AWS IAM Role to assume, if any. This is used to generate temporary credentials, typically for cross-account access. See the [AssumeRole API documentation](https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html) for more information.
+
+
+### `role_session_name` [plugins-inputs-sqs-role_session_name]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"logstash"`
+
+Session name to use when assuming an IAM role.
+
+
+### `secret_access_key` [plugins-inputs-sqs-secret_access_key]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The AWS Secret Access Key
+
+
+### `sent_timestamp_field` [plugins-inputs-sqs-sent_timestamp_field]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Name of the event field in which to store the SQS message Sent Timestamp
+
+
+### `session_token` [plugins-inputs-sqs-session_token]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The AWS Session token for temporary credential
+
+
+### `threads` [plugins-inputs-sqs-threads]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1`
+
+
+### `use_aws_bundled_ca` [plugins-inputs-sqs-use_aws_bundled_ca]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Use bundled CA certificates that ship with AWS SDK to verify SSL peer certificates. For cases where the default certificates are unavailable, e.g. Windows, you can set this to `true`.
+
+
+
+## Common options [plugins-inputs-sqs-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-sqs-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-sqs-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-sqs-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-sqs-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-sqs-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-sqs-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-sqs-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-sqs-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"json"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-sqs-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-sqs-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 sqs inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ sqs {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-sqs-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-sqs-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-stdin.md b/docs/reference/plugins-inputs-stdin.md
new file mode 100644
index 000000000..03a1a6071
--- /dev/null
+++ b/docs/reference/plugins-inputs-stdin.md
@@ -0,0 +1,140 @@
+---
+navigation_title: "stdin"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-stdin.html
+---
+
+# Stdin input plugin [plugins-inputs-stdin]
+
+
+* Plugin version: v3.4.0
+* Released on: 2021-08-04
+* [Changelog](https://github.com/logstash-plugins/logstash-input-stdin/blob/v3.4.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-stdin-index.md).
+
+## Getting help [_getting_help_54]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-stdin). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_54]
+
+Read events from standard input.
+
+By default, each event is assumed to be one line. If you want to join lines, you’ll want to use the multiline codec.
+
+
+## Stdin Input Configuration Options [plugins-inputs-stdin-options]
+
+This plugin supports the following configuration options.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`ecs_compatibility`](#plugins-inputs-stdin-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-inputs-stdin-common-options) for a list of options supported by all input plugins.
+
+
+
+### `ecs_compatibility` [plugins-inputs-stdin-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: does not use ECS-compatible field names (using `host` field to store host name)
+ * `v1`,`v8`: uses fields that are compatible with Elastic Common Schema (using `[host][hostname]`)
+
+* Default value depends on which version of Logstash is running:
+
+ * When Logstash provides a `pipeline.ecs_compatibility` setting, its value is used as the default
+ * Otherwise, the default value is `disabled`.
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)).
+
+
+
+## Common options [plugins-inputs-stdin-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-stdin-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-stdin-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-stdin-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-stdin-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-stdin-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-stdin-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-stdin-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-stdin-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"line"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-stdin-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-stdin-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 stdin inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ stdin {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-stdin-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-stdin-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-stomp.md b/docs/reference/plugins-inputs-stomp.md
new file mode 100644
index 000000000..042f16c81
--- /dev/null
+++ b/docs/reference/plugins-inputs-stomp.md
@@ -0,0 +1,199 @@
+---
+navigation_title: "stomp"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-stomp.html
+---
+
+# Stomp input plugin [plugins-inputs-stomp]
+
+
+* Plugin version: v3.0.8
+* Released on: 2018-04-06
+* [Changelog](https://github.com/logstash-plugins/logstash-input-stomp/blob/v3.0.8/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-stomp-index.md).
+
+## Installation [_installation_16]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-input-stomp`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_55]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-stomp). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_55]
+
+Creates events received with the STOMP protocol.
+
+
+## Stomp Input Configuration Options [plugins-inputs-stomp-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-stomp-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`destination`](#plugins-inputs-stomp-destination) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`host`](#plugins-inputs-stomp-host) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`password`](#plugins-inputs-stomp-password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`port`](#plugins-inputs-stomp-port) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`reconnect`](#plugins-inputs-stomp-reconnect) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`reconnect_interval`](#plugins-inputs-stomp-reconnect_interval) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`user`](#plugins-inputs-stomp-user) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`vhost`](#plugins-inputs-stomp-vhost) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-inputs-stomp-common-options) for a list of options supported by all input plugins.
+
+
+
+### `destination` [plugins-inputs-stomp-destination]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The destination to read events from.
+
+Example: `/topic/logstash`
+
+
+### `host` [plugins-inputs-stomp-host]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"localhost"`
+
+The address of the STOMP server.
+
+
+### `password` [plugins-inputs-stomp-password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* Default value is `""`
+
+The password to authenticate with.
+
+
+### `port` [plugins-inputs-stomp-port]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `61613`
+
+The port to connet to on your STOMP server.
+
+
+### `reconnect` [plugins-inputs-stomp-reconnect]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Auto reconnect
+
+
+### `reconnect_interval` [plugins-inputs-stomp-reconnect_interval]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `30`
+
+
+### `user` [plugins-inputs-stomp-user]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `""`
+
+The username to authenticate with.
+
+
+### `vhost` [plugins-inputs-stomp-vhost]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `nil`
+
+The vhost to use
+
+
+
+## Common options [plugins-inputs-stomp-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-stomp-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-stomp-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-stomp-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-stomp-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-stomp-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-stomp-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-stomp-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-stomp-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-stomp-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-stomp-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 stomp inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ stomp {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-stomp-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-stomp-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-syslog.md b/docs/reference/plugins-inputs-syslog.md
new file mode 100644
index 000000000..3cf0a4154
--- /dev/null
+++ b/docs/reference/plugins-inputs-syslog.md
@@ -0,0 +1,264 @@
+---
+navigation_title: "syslog"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-syslog.html
+---
+
+# Syslog input plugin [plugins-inputs-syslog]
+
+
+* Plugin version: v3.7.0
+* Released on: 2023-10-17
+* [Changelog](https://github.com/logstash-plugins/logstash-input-syslog/blob/v3.7.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-syslog-index.md).
+
+## Getting help [_getting_help_56]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-syslog). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_56]
+
+Read syslog messages as events over the network.
+
+This input is a good choice if you already use syslog today. It is also a good choice if you want to receive logs from appliances and network devices where you cannot run your own log collector.
+
+Of course, *syslog* is a very muddy term. By default, this input only supports `RFC3164` syslog with some small modifications. However, some non-standard syslog formats can be read and parsed if a functional `grok_pattern` is provided. The date format is still only allowed to be `RFC3164` style or `ISO8601`.
+
+For more information see the [RFC3164 page](http://www.ietf.org/rfc/rfc3164.txt).
+
+Note: This input will start listeners on both TCP and UDP.
+
+
+## Syslog Input Configuration Options [plugins-inputs-syslog-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-syslog-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`ecs_compatibility`](#plugins-inputs-syslog-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`facility_labels`](#plugins-inputs-syslog-facility_labels) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`grok_pattern`](#plugins-inputs-syslog-grok_pattern) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`host`](#plugins-inputs-syslog-host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`locale`](#plugins-inputs-syslog-locale) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`port`](#plugins-inputs-syslog-port) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`proxy_protocol`](#plugins-inputs-syslog-proxy_protocol) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`severity_labels`](#plugins-inputs-syslog-severity_labels) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`syslog_field`](#plugins-inputs-syslog-syslog_field) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`timezone`](#plugins-inputs-syslog-timezone) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`use_labels`](#plugins-inputs-syslog-use_labels) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+
+Also see [Common options](#plugins-inputs-syslog-common-options) for a list of options supported by all input plugins.
+
+
+
+### `ecs_compatibility` [plugins-inputs-syslog-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: does not use ECS-compatible field names (for example, `priority` for syslog priority)
+ * `v1`,`v8`: uses fields that are compatible with Elastic Common Schema (for example, `[log][syslog][priority]`)
+
+* Default value depends on which version of Logstash is running:
+
+ * When Logstash provides a `pipeline.ecs_compatibility` setting, its value is used as the default
+ * Otherwise, the default value is `disabled`.
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)).
+
+
+### `facility_labels` [plugins-inputs-syslog-facility_labels]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `["kernel", "user-level", "mail", "system", "security/authorization", "syslogd", "line printer", "network news", "UUCP", "clock", "security/authorization", "FTP", "NTP", "log audit", "log alert", "clock", "local0", "local1", "local2", "local3", "local4", "local5", "local6", "local7"]`
+
+Labels for facility levels defined in RFC3164.
+
+You can use this option to override the integer→label mapping for syslog inputs that behave differently than the RFCs.
+
+Provide a zero-indexed array with all of your facility labels *in order*. If a log message contains a facility number with no corresponding entry, the facility_label is not added to the event.
+
+
+### `grok_pattern` [plugins-inputs-syslog-grok_pattern]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"<%{POSINT:priority}>%{{SYSLOGLINE}}"`
+* Default value depends on whether [`ecs_compatibility`](#plugins-inputs-syslog-ecs_compatibility) is enabled:
+
+ * ECS Compatibility disabled: `"<%{POSINT:priority}>%{{SYSLOGLINE}}"`
+ * ECS Compatibility enabled: `"<%{POSINT:[log][syslog][priority]:int}>%{{SYSLOGLINE}}"`
+
+
+The default value should read and properly parse syslog lines which are fully compliant with [RFC3164](http://www.ietf.org/rfc/rfc3164.txt).
+
+You can override this value to parse non-standard lines with a valid grok pattern which will parse the received lines. If the line is unable to be parsed, the `_grokparsefailure_sysloginput` tag will be added.
+
+The grok pattern must provide a `timestamp` field. If the `timestamp` field is omitted, or is unable to be parsed as `RFC3164` style or `ISO8601`, a `_dateparsefailure` tag will be added.
+
+
+### `host` [plugins-inputs-syslog-host]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"0.0.0.0"`
+
+The address to listen on.
+
+
+### `locale` [plugins-inputs-syslog-locale]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Specify a locale to be used for date parsing using either IETF-BCP47 or POSIX language tag. Simple examples are `en`,`en-US` for BCP47 or `en_US` for POSIX. If not specified, the platform default will be used.
+
+The locale is mostly necessary to be set for parsing month names (pattern with MMM) and weekday names (pattern with EEE).
+
+
+### `port` [plugins-inputs-syslog-port]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `514`
+
+The port to listen on. Remember that ports less than 1024 (privileged ports) may require root to use.
+
+
+### `proxy_protocol` [plugins-inputs-syslog-proxy_protocol]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Proxy protocol support, only v1 is supported at this time [http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt](http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt)
+
+
+### `severity_labels` [plugins-inputs-syslog-severity_labels]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `["Emergency", "Alert", "Critical", "Error", "Warning", "Notice", "Informational", "Debug"]`
+
+Labels for severity levels defined in RFC3164.
+
+Provide a zero-indexed array with all of your severity labels *in order*. If a log message contains a severity label with no corresponding entry, the severity_label is not added to the event.
+
+
+### `syslog_field` [plugins-inputs-syslog-syslog_field]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"message"`
+
+Codecs process the data before the rest of the data is parsed. Some codecs, like CEF, put the syslog data into another field after pre-processing the data. Use this option in conjunction with the `grok_pattern` configuration to allow the syslog input plugin to fully parse the syslog data in this case.
+
+```sh
+input {
+ syslog {
+ port => 12345
+ codec => cef
+ syslog_field => "syslog"
+ grok_pattern => "<%{POSINT:priority}>%{SYSLOGTIMESTAMP:timestamp} CUSTOM GROK HERE"
+ }
+}
+```
+
+
+### `timezone` [plugins-inputs-syslog-timezone]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Specify a time zone canonical ID to be used for date parsing. The valid IDs are listed on the [Joda.org available time zones page](http://joda-time.sourceforge.net/timezones.html). This is useful in case the time zone cannot be extracted from the value, and is not the platform default. If this is not specified the platform default will be used. Canonical ID is good as it takes care of daylight saving time for you. For example, `America/Los_Angeles` or `Europe/Paris` are valid IDs.
+
+
+### `use_labels` [plugins-inputs-syslog-use_labels]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Use label parsing for severity and facility levels.
+
+
+
+## Common options [plugins-inputs-syslog-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-syslog-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-syslog-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-syslog-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-syslog-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-syslog-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-syslog-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-syslog-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-syslog-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-syslog-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-syslog-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 syslog inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ syslog {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-syslog-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-syslog-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-tcp.md b/docs/reference/plugins-inputs-tcp.md
new file mode 100644
index 000000000..c73a3edad
--- /dev/null
+++ b/docs/reference/plugins-inputs-tcp.md
@@ -0,0 +1,408 @@
+---
+navigation_title: "tcp"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-tcp.html
+---
+
+# Tcp input plugin [plugins-inputs-tcp]
+
+
+* Plugin version: v7.0.0
+* Released on: 2025-01-10
+* [Changelog](https://github.com/logstash-plugins/logstash-input-tcp/blob/v7.0.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-tcp-index.md).
+
+## Getting help [_getting_help_57]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-tcp). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_57]
+
+Read events over a TCP socket.
+
+Like stdin and file inputs, each event is assumed to be one line of text.
+
+Can either accept connections from clients or connect to a server, depending on `mode`.
+
+### Accepting log4j2 logs [_accepting_log4j2_logs]
+
+Log4j2 can send JSON over a socket, and we can use that combined with our tcp input to accept the logs.
+
+First, we need to configure your application to send logs in JSON over a socket. The following log4j2.xml accomplishes this task.
+
+Note, you will want to change the `host` and `port` settings in this configuration to match your needs.
+
+```
+
+
+
+
+
+
+
+
+
+
+
+
+```
+To accept this in Logstash, you will want tcp input and a date filter:
+
+```
+input {
+ tcp {
+ port => 12345
+ codec => json
+ }
+}
+```
+and add a date filter to take log4j2’s `timeMillis` field and use it as the event timestamp
+
+```
+filter {
+ date {
+ match => [ "timeMillis", "UNIX_MS" ]
+ }
+}
+```
+
+
+## Event Metadata and the Elastic Common Schema (ECS) [plugins-inputs-tcp-ecs_metadata]
+
+In addition to decoding the events, this input will add metadata about the TCP connection itself to each event. This can be helpful when applications are configured to send events directly to this input’s TCP listener without including information about themselves.
+
+Historically, this metadata was added to a variety of non-standard top-level fields, which had the potential to create confusion and schema conflicts downstream. With ECS compatibility mode, we can ensure a pipeline still has access to this metadata throughout the event’s lifecycle without polluting the top-level namespace.
+
+| Metadata Group | ecs: `v1`, `v8` | ecs: `disabled` |
+| --- | --- | --- |
+| Source Metadata from the TCP connectionon which events are being received, includingthe sender’s name, ip, and outbound port. | [@metadata][input][tcp][source][name] | [host] |
+| [@metadata][input][tcp][source][ip] | [@metadata][ip_address] |
+| [@metadata][input][tcp][source][port] | [port] |
+| Proxy Metadata from a proxied TCP connection.Available when receiving events by proxy and`proxy_protocol => true` | [@metadata][input][tcp][proxy][ip] | [proxy_host] |
+| [@metadata][input][tcp][proxy][port] | [proxy_port] |
+| SSL Subject Metadata from a secured TCPconnection. Available when `ssl_enabled => true`AND `ssl_client_authentication => 'optional' or 'required'` | [@metadata][input][tcp][ssl][subject] | [sslsubject] |
+
+For example, the Elastic Common Schema reserves the [top-level `host` field](ecs://reference/ecs-host.md) for information about the host on which the event happened. If an event is missing this metadata, it can be copied into place from the source TCP connection metadata that has been added to the event:
+
+```txt
+filter {
+ if [@metadata][input][tcp][source] and ![host] {
+ mutate {
+ copy => {
+ "[@metadata][input][tcp][source][name]" => "[host][name]"
+ "[@metadata][input][tcp][source][ip]" => "[host][ip]"
+ }
+ }
+ }
+}
+```
+
+
+## Tcp Input Configuration Options [plugins-inputs-tcp-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-tcp-common-options) described later.
+
+::::{note}
+As of version `7.0.0` of this plugin, a number of previously deprecated settings related to SSL have been removed. Please see the [TCP Input Obsolete Configuration Options](#plugins-inputs-tcp-obsolete-options) for more details.
+::::
+
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`dns_reverse_lookup_enabled`](#plugins-inputs-tcp-dns_reverse_lookup_enabled) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`ecs_compatibility`](#plugins-inputs-tcp-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`host`](#plugins-inputs-tcp-host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`mode`](#plugins-inputs-tcp-mode) | [string](/reference/configuration-file-structure.md#string), one of `["server", "client"]` | No |
+| [`port`](#plugins-inputs-tcp-port) | [number](/reference/configuration-file-structure.md#number) | Yes |
+| [`proxy_protocol`](#plugins-inputs-tcp-proxy_protocol) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`ssl_certificate`](#plugins-inputs-tcp-ssl_certificate) | a valid filesystem path | No |
+| [`ssl_certificate_authorities`](#plugins-inputs-tcp-ssl_certificate_authorities) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`ssl_cipher_suites`](#plugins-inputs-tcp-ssl_cipher_suites) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_client_authentication`](#plugins-inputs-tcp-ssl_client_authentication) | [string](/reference/configuration-file-structure.md#string), one of `["none", "optional", "required"]` | No |
+| [`ssl_enabled`](#plugins-inputs-tcp-ssl_enabled) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`ssl_extra_chain_certs`](#plugins-inputs-tcp-ssl_extra_chain_certs) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`ssl_key`](#plugins-inputs-tcp-ssl_key) | a valid filesystem path | No |
+| [`ssl_key_passphrase`](#plugins-inputs-tcp-ssl_key_passphrase) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_supported_protocols`](#plugins-inputs-tcp-ssl_supported_protocols) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_verification_mode`](#plugins-inputs-tcp-ssl_verification_mode) | [string](/reference/configuration-file-structure.md#string), one of `["full", "none"]` | No |
+| [`tcp_keep_alive`](#plugins-inputs-tcp-tcp_keep_alive) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+
+Also see [Common options](#plugins-inputs-tcp-common-options) for a list of options supported by all input plugins.
+
+
+
+### `dns_reverse_lookup_enabled` [plugins-inputs-tcp-dns_reverse_lookup_enabled]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+It is possible to avoid DNS reverse-lookups by disabling this setting. If disabled, the address metadata that is added to events will contain the source address as-specified at the TCP layer and IPs will not be resolved to hostnames.
+
+
+### `ecs_compatibility` [plugins-inputs-tcp-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: unstructured connection metadata added at root level
+ * `v1`,`v8`: structured connection metadata added under `[@metadata][input][tcp]`
+
+* Default value depends on which version of Logstash is running:
+
+ * When Logstash provides a `pipeline.ecs_compatibility` setting, its value is used as the default
+ * Otherwise, the default value is `disabled`.
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)). The value of this setting affects the [placement of a TCP connection’s metadata](#plugins-inputs-tcp-ecs_metadata) on events.
+
+
+### `host` [plugins-inputs-tcp-host]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"0.0.0.0"`
+
+When mode is `server`, the address to listen on. When mode is `client`, the address to connect to.
+
+
+### `mode` [plugins-inputs-tcp-mode]
+
+* Value can be any of: `server`, `client`
+* Default value is `"server"`
+
+Mode to operate in. `server` listens for client connections, `client` connects to a server.
+
+
+### `port` [plugins-inputs-tcp-port]
+
+* This is a required setting.
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+When mode is `server`, the port to listen on. When mode is `client`, the port to connect to.
+
+
+### `proxy_protocol` [plugins-inputs-tcp-proxy_protocol]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Proxy protocol support, only v1 is supported at this time [http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt](http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt)
+
+
+### `ssl_certificate` [plugins-inputs-tcp-ssl_certificate]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+Path to certificate in PEM format. This certificate will be presented to the other part of the TLS connection.
+
+
+### `ssl_certificate_authorities` [plugins-inputs-tcp-ssl_certificate_authorities]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+Validate client certificate or certificate chain against these authorities. You can define multiple files or paths. All the certificates will be read and added to the trust store.
+
+
+### `ssl_cipher_suites` [plugins-inputs-tcp-ssl_cipher_suites]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value includes *all* cipher suites enabled by the JDK and depends on JDK configuration
+
+Supported cipher suites vary depending on Java version used, and entries look like `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384`. For more information, see Oracle’s [JDK SunJSSE provider documentation](https://docs.oracle.com/en/java/javase/11/security/oracle-providers.html#GUID-7093246A-31A3-4304-AC5F-5FB6400405E2) and the table of supported [Java cipher suite names](https://docs.oracle.com/en/java/javase/11/docs/specs/security/standard-names.html#jsse-cipher-suite-names).
+
+::::{note}
+To check the supported cipher suites locally run the following script: `$LS_HOME/bin/ruby -e 'p javax.net.ssl.SSLServerSocketFactory.getDefault.getSupportedCipherSuites'`.
+::::
+
+
+
+### `ssl_client_authentication` [plugins-inputs-tcp-ssl_client_authentication]
+
+* Value can be any of: `none`, `optional`, `required`
+* Default value is `required`
+
+Controls the server’s behavior in regard to requesting a certificate from client connections: `none` disables the client authentication. `required` forces a client to present a certificate, while `optional` requests a client certificate but the client is not required to present one.
+
+When mutual TLS is enabled (`optional` or `required`), the certificate presented by the client must be signed by trusted [`ssl_certificate_authorities`](#plugins-inputs-tcp-ssl_certificate_authorities) (CAs). Please note that the server does not validate the client certificate CN (Common Name) or SAN (Subject Alternative Name).
+
+::::{note}
+This setting can be used only if [`mode`](#plugins-inputs-tcp-mode) is `server` and [`ssl_certificate_authorities`](#plugins-inputs-tcp-ssl_certificate_authorities) is set.
+::::
+
+
+
+### `ssl_enabled` [plugins-inputs-tcp-ssl_enabled]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Enable SSL (must be set for other `ssl_` options to take effect).
+
+
+### `ssl_extra_chain_certs` [plugins-inputs-tcp-ssl_extra_chain_certs]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+An Array of paths to extra X509 certificates. These are used together with the certificate to construct the certificate chain presented to the client.
+
+
+### `ssl_key` [plugins-inputs-tcp-ssl_key]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+The path to the private key corresponding to the specified certificate (PEM format).
+
+
+### `ssl_key_passphrase` [plugins-inputs-tcp-ssl_key_passphrase]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* Default value is `nil`
+
+SSL key passphrase for the private key.
+
+
+### `ssl_supported_protocols` [plugins-inputs-tcp-ssl_supported_protocols]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Allowed values are: `'TLSv1.1'`, `'TLSv1.2'`, `'TLSv1.3'`
+* Default depends on the JDK being used. With up-to-date Logstash, the default is `['TLSv1.2', 'TLSv1.3']`. `'TLSv1.1'` is not considered secure and is only provided for legacy applications.
+
+List of allowed SSL/TLS versions to use when establishing a secure connection.
+
+::::{note}
+If you configure the plugin to use `'TLSv1.1'` on any recent JVM, such as the one packaged with Logstash, the protocol is disabled by default and needs to be enabled manually by changing `jdk.tls.disabledAlgorithms` in the **$JDK_HOME/conf/security/java.security** configuration file. That is, `TLSv1.1` needs to be removed from the list.
+::::
+
+
+
+### `ssl_verification_mode` [plugins-inputs-tcp-ssl_verification_mode]
+
+* Value can be any of: `full`, `none`
+* Default value is `full`
+
+Defines how to verify the certificates presented by another party in the TLS connection:
+
+`full` validates that the server certificate has an issue date that’s within the not_before and not_after dates; chains to a trusted Certificate Authority (CA), and has a hostname or IP address that matches the names within the certificate.
+
+`none` performs no certificate validation.
+
+This setting can be used only if [`mode`](#plugins-inputs-tcp-mode) is `client`.
+
+::::{warning}
+Setting certificate verification to `none` disables many security benefits of SSL/TLS, which is very dangerous. For more information on disabling certificate verification please read [https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf](https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf)
+::::
+
+
+
+### `tcp_keep_alive` [plugins-inputs-tcp-tcp_keep_alive]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Instruct the socket to use TCP keep alive. If it’s `true` then the underlying socket will use the OS defaults settings for keep alive. If it’s `false` it doesn’t configure any keep alive setting for the underlying socket.
+
+
+
+## TCP Input Obsolete Configuration Options [plugins-inputs-tcp-obsolete-options]
+
+::::{warning}
+As of version `7.0.0` of this plugin, some configuration options have been replaced. The plugin will fail to start if it contains any of these obsolete options.
+::::
+
+
+| Setting | Replaced by |
+| --- | --- |
+| ssl_cert | [`ssl_certificate`](#plugins-inputs-tcp-ssl_certificate) |
+| ssl_enable | [`ssl_enabled`](#plugins-inputs-tcp-ssl_enabled) |
+| ssl_verify | [`ssl_client_authentication`](#plugins-inputs-tcp-ssl_client_authentication) in `server` mode and [`ssl_verification_mode`](#plugins-inputs-tcp-ssl_verification_mode) in `client` mode |
+
+
+## Common options [plugins-inputs-tcp-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-tcp-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-tcp-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-tcp-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-tcp-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-tcp-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-tcp-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-tcp-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-tcp-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"line"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-tcp-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-tcp-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 tcp inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ tcp {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-tcp-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-tcp-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-twitter.md b/docs/reference/plugins-inputs-twitter.md
new file mode 100644
index 000000000..3451ed5f4
--- /dev/null
+++ b/docs/reference/plugins-inputs-twitter.md
@@ -0,0 +1,341 @@
+---
+navigation_title: "twitter"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-twitter.html
+---
+
+# Twitter input plugin [plugins-inputs-twitter]
+
+
+* Plugin version: v4.1.1
+* Released on: 2023-11-16
+* [Changelog](https://github.com/logstash-plugins/logstash-input-twitter/blob/v4.1.1/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-twitter-index.md).
+
+## Getting help [_getting_help_58]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-twitter). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_58]
+
+Ingest events from the Twitter Streaming API.
+
+Example:
+
+```ruby
+ input {
+ twitter {
+ consumer_key => '...'
+ consumer_secret => '...'
+ oauth_token => '...'
+ oauth_token_secret => '...'
+ keywords => [ 'logstash' ]
+ }
+ }
+```
+
+Sample event fields generated:
+
+```ruby
+ {
+ "@timestamp" => 2019-09-23T16:41:53.000Z,
+ "message" => "I forgot how fun it is to write @logstash configs !!! Thank you @jordansissel and @elastic !!!"
+ "user" => "missnebun",
+ "in-reply-to" => nil,
+ "retweeted" => false,
+ "source" => "http://twitter.com/missnebun/status/1176174859833004037",
+ "user_mentions" => [
+ { "screen_name"=>"logstash", "name"=>"logstash", "id"=>217155915 },
+ { "screen_name"=>"jordansissel", "name"=>"@jordansissel", "id"=>15782607 },
+ { "screen_name"=>"elastic", "name"=>"Elastic", "id"=>84512601 }],
+ "symbols" => [],
+ "hashtags" => [],
+ "client" => "Twitter for iPhone "
+ }
+```
+
+
+## Compatibility with the Elastic Common Schema (ECS) [plugins-inputs-twitter-ecs]
+
+Twitter streams are very specific and do not map easily to the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)). We recommend setting a [`target`](#plugins-inputs-twitter-target) when [ECS compatibility mode](#plugins-inputs-twitter-ecs_compatibility) is enabled. The plugin issues a warning in the log when a `target` isn’t set.
+
+
+## Twitter Input Configuration Options [plugins-inputs-twitter-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-twitter-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`consumer_key`](#plugins-inputs-twitter-consumer_key) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`consumer_secret`](#plugins-inputs-twitter-consumer_secret) | [password](/reference/configuration-file-structure.md#password) | Yes |
+| [`ecs_compatibility`](#plugins-inputs-twitter-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`follows`](#plugins-inputs-twitter-follows) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`full_tweet`](#plugins-inputs-twitter-full_tweet) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`ignore_retweets`](#plugins-inputs-twitter-ignore_retweets) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`keywords`](#plugins-inputs-twitter-keywords) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`languages`](#plugins-inputs-twitter-languages) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`locations`](#plugins-inputs-twitter-locations) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`oauth_token`](#plugins-inputs-twitter-oauth_token) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`oauth_token_secret`](#plugins-inputs-twitter-oauth_token_secret) | [password](/reference/configuration-file-structure.md#password) | Yes |
+| [`proxy_address`](#plugins-inputs-twitter-proxy_address) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`proxy_port`](#plugins-inputs-twitter-proxy_port) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`rate_limit_reset_in`](#plugins-inputs-twitter-rate_limit_reset_in) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`use_proxy`](#plugins-inputs-twitter-use_proxy) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`use_samples`](#plugins-inputs-twitter-use_samples) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`target`](#plugins-inputs-twitter-target) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-inputs-twitter-common-options) for a list of options supported by all input plugins.
+
+
+
+### `consumer_key` [plugins-inputs-twitter-consumer_key]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Your Twitter App’s consumer key
+
+Don’t know what this is? You need to create an "application" on Twitter, see this url: [https://dev.twitter.com/apps/new](https://dev.twitter.com/apps/new)
+
+
+### `consumer_secret` [plugins-inputs-twitter-consumer_secret]
+
+* This is a required setting.
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Your Twitter App’s consumer secret
+
+If you don’t have one of these, you can create one by registering a new application with Twitter: [https://dev.twitter.com/apps/new](https://dev.twitter.com/apps/new)
+
+
+### `ecs_compatibility` [plugins-inputs-twitter-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: does not use ECS-compatible field names (fields might be set at the root of the event)
+ * `v1`, `v8`: avoids field names that might conflict with Elastic Common Schema (for example, Twitter specific properties)
+
+* Default value depends on which version of Logstash is running:
+
+ * When Logstash provides a `pipeline.ecs_compatibility` setting, its value is used as the default
+ * Otherwise, the default value is `disabled`.
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)).
+
+
+### `follows` [plugins-inputs-twitter-follows]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+A comma separated list of user IDs, indicating the users to return statuses for in the Twitter stream. See [https://developer.twitter.com/en/docs/tweets/filter-realtime/guides/basic-stream-parameters](https://developer.twitter.com/en/docs/tweets/filter-realtime/guides/basic-stream-parameters) for more details.
+
+
+### `full_tweet` [plugins-inputs-twitter-full_tweet]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Record full tweet object as given to us by the Twitter Streaming API.
+
+
+### `ignore_retweets` [plugins-inputs-twitter-ignore_retweets]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Lets you ignore the retweets coming out of the Twitter API. Default ⇒ false
+
+
+### `keywords` [plugins-inputs-twitter-keywords]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Any keywords to track in the Twitter stream. For multiple keywords, use the syntax ["foo", "bar"]. There’s a logical OR between each keyword string listed and a logical AND between words separated by spaces per keyword string. See [https://dev.twitter.com/streaming/overview/request-parameters#track](https://dev.twitter.com/streaming/overview/request-parameters#track) for more details.
+
+The wildcard "*" option is not supported. To ingest a sample stream of all tweets, the use_samples option is recommended.
+
+
+### `languages` [plugins-inputs-twitter-languages]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+A list of BCP 47 language identifiers corresponding to any of the languages listed on Twitter’s advanced search page will only return tweets that have been detected as being written in the specified languages.
+
+
+### `locations` [plugins-inputs-twitter-locations]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+A comma-separated list of longitude, latitude pairs specifying a set of bounding boxes to filter tweets by. See [https://dev.twitter.com/streaming/overview/request-parameters#locations](https://dev.twitter.com/streaming/overview/request-parameters#locations) for more details.
+
+
+### `oauth_token` [plugins-inputs-twitter-oauth_token]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Your oauth token.
+
+To get this, login to Twitter with whatever account you want, then visit [https://dev.twitter.com/apps](https://dev.twitter.com/apps)
+
+Click on your app (used with the consumer_key and consumer_secret settings) Then at the bottom of the page, click *Create my access token* which will create an oauth token and secret bound to your account and that application.
+
+
+### `oauth_token_secret` [plugins-inputs-twitter-oauth_token_secret]
+
+* This is a required setting.
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Your oauth token secret.
+
+To get this, login to Twitter with whatever account you want, then visit [https://dev.twitter.com/apps](https://dev.twitter.com/apps)
+
+Click on your app (used with the consumer_key and consumer_secret settings) Then at the bottom of the page, click *Create my access token* which will create an oauth token and secret bound to your account and that application.
+
+
+### `proxy_address` [plugins-inputs-twitter-proxy_address]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"127.0.0.1"`
+
+Location of the proxy, by default the same machine as the one running this LS instance
+
+
+### `proxy_port` [plugins-inputs-twitter-proxy_port]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `3128`
+
+Port where the proxy is listening, by default 3128 (squid)
+
+
+### `rate_limit_reset_in` [plugins-inputs-twitter-rate_limit_reset_in]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `300`
+
+Duration in seconds to wait before retrying a connection when twitter responds with a 429 TooManyRequests In some cases the *x-rate-limit-reset* header is not set in the response and .rate_limit.reset_in is nil. If this occurs then we use the integer specified here. The default is 5 minutes.
+
+
+### `use_proxy` [plugins-inputs-twitter-use_proxy]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+When to use a proxy to handle the connections
+
+
+### `use_samples` [plugins-inputs-twitter-use_samples]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Returns a small random sample of all public statuses. The tweets returned by the default access level are the same, so if two different clients connect to this endpoint, they will see the same tweets. If set to true, the keywords, follows, locations, and languages options will be ignored. Default ⇒ false
+
+
+### `target` [plugins-inputs-twitter-target]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Without a `target`, events are created from tweets at the root level. When the `target` is set to a field reference, the tweet data is placed in the target field instead.
+
+This option can be useful to avoid populating unknown fields when a downstream schema such as ECS is enforced.
+
+
+
+## Common options [plugins-inputs-twitter-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-twitter-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-twitter-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-twitter-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-twitter-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-twitter-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-twitter-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-twitter-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-twitter-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-twitter-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-twitter-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 twitter inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ twitter {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-twitter-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-twitter-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-udp.md b/docs/reference/plugins-inputs-udp.md
new file mode 100644
index 000000000..2c88d8fd1
--- /dev/null
+++ b/docs/reference/plugins-inputs-udp.md
@@ -0,0 +1,225 @@
+---
+navigation_title: "udp"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-udp.html
+---
+
+# Udp input plugin [plugins-inputs-udp]
+
+
+* Plugin version: v3.5.0
+* Released on: 2021-08-04
+* [Changelog](https://github.com/logstash-plugins/logstash-input-udp/blob/v3.5.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-udp-index.md).
+
+## Getting help [_getting_help_59]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-udp). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_59]
+
+Read messages as events over the network via udp. The only required configuration item is `port`, which specifies the udp port logstash will listen on for event streams.
+
+### Event Metadata and the Elastic Common Schema (ECS) [plugins-inputs-udp-ecs_metadata]
+
+This plugin adds a field containing the source IP address of the UDP packet. By default, the IP address is stored in the host field. When [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)) is enabled (in [`ecs_compatibility`](#plugins-inputs-udp-ecs_compatibility)), the source IP address is stored in the [host][ip] field.
+
+You can customize the field name using the [`source_ip_fieldname`](#plugins-inputs-udp-source_ip_fieldname). See [`ecs_compatibility`](#plugins-inputs-udp-ecs_compatibility) for more information.
+
+
+
+## Udp Input Configuration Options [plugins-inputs-udp-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-udp-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`buffer_size`](#plugins-inputs-udp-buffer_size) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`ecs_compatibility`](#plugins-inputs-udp-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`host`](#plugins-inputs-udp-host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`port`](#plugins-inputs-udp-port) | [number](/reference/configuration-file-structure.md#number) | Yes |
+| [`queue_size`](#plugins-inputs-udp-queue_size) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`receive_buffer_bytes`](#plugins-inputs-udp-receive_buffer_bytes) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`source_ip_fieldname`](#plugins-inputs-udp-source_ip_fieldname) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`workers`](#plugins-inputs-udp-workers) | [number](/reference/configuration-file-structure.md#number) | No |
+
+Also see [Common options](#plugins-inputs-udp-common-options) for a list of options supported by all input plugins.
+
+
+
+### `buffer_size` [plugins-inputs-udp-buffer_size]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `65536`
+
+The maximum packet size to read from the network
+
+
+### `ecs_compatibility` [plugins-inputs-udp-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: unstructured connection metadata added at root level
+ * `v1`: structured connection metadata added under ECS compliant namespaces
+
+* Default value depends on which version of Logstash is running:
+
+ * When Logstash provides a `pipeline.ecs_compatibility` setting, its value is used as the default
+ * Otherwise, the default value is `disabled`.
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)).
+
+The value of this setting affects the placement of a TCP connection’s metadata on events.
+
+| `disabled` | `v1` | Availability | Description |
+| --- | --- | --- | --- |
+| host | [host][ip] | *Always* | *Source IP of UDP packet* |
+
+
+### `host` [plugins-inputs-udp-host]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"0.0.0.0"`
+
+The address which logstash will listen on.
+
+
+### `port` [plugins-inputs-udp-port]
+
+* This is a required setting.
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+The port which logstash will listen on. Remember that ports less than 1024 (privileged ports) may require root or elevated privileges to use.
+
+
+### `queue_size` [plugins-inputs-udp-queue_size]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `2000`
+
+This is the number of unprocessed UDP packets you can hold in memory before packets will start dropping.
+
+
+### `receive_buffer_bytes` [plugins-inputs-udp-receive_buffer_bytes]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+The socket receive buffer size in bytes. If option is not set, the operating system default is used. The operating system will use the max allowed value if receive_buffer_bytes is larger than allowed. Consult your operating system documentation if you need to increase this max allowed value.
+
+
+### `source_ip_fieldname` [plugins-inputs-udp-source_ip_fieldname]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value could be `"host"` or `[host][ip]` depending on the value of [`ecs_compatibility`](#plugins-inputs-udp-ecs_compatibility)
+
+The name of the field where the source IP address will be stored. See [Event Metadata and the Elastic Common Schema (ECS)](#plugins-inputs-udp-ecs_metadata) for more information on how ECS compatibility settings affect these defaults.
+
+Example:
+
+```ruby
+ input {
+ udp {
+ source_ip_fieldname => "[appliance][monitoring][ip]"
+ }
+ }
+```
+
+
+### `workers` [plugins-inputs-udp-workers]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `2`
+
+Number of threads processing packets
+
+
+
+## Common options [plugins-inputs-udp-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-udp-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-udp-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-udp-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-udp-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-udp-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-udp-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-udp-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-udp-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-udp-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-udp-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 udp inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ udp {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-udp-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-udp-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-unix.md b/docs/reference/plugins-inputs-unix.md
new file mode 100644
index 000000000..e31618bac
--- /dev/null
+++ b/docs/reference/plugins-inputs-unix.md
@@ -0,0 +1,223 @@
+---
+navigation_title: "unix"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-unix.html
+---
+
+# Unix input plugin [plugins-inputs-unix]
+
+
+* Plugin version: v3.1.2
+* Released on: 2022-10-03
+* [Changelog](https://github.com/logstash-plugins/logstash-input-unix/blob/v3.1.2/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-unix-index.md).
+
+## Getting help [_getting_help_60]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-unix). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_60]
+
+Read events over a UNIX socket.
+
+Like `stdin` and `file` inputs, each event is assumed to be one line of text.
+
+Can either accept connections from clients or connect to a server, depending on `mode`.
+
+
+## Compatibility with the Elastic Common Schema (ECS) [plugins-inputs-unix-ecs]
+
+This plugin adds extra fields about the event’s source. Configure the [`ecs_compatibility`](#plugins-inputs-unix-ecs_compatibility) option if you want to ensure that these fields are compatible with [ECS](ecs://reference/index.md).
+
+These fields are added after the event has been decoded by the appropriate codec, and will not overwrite existing values.
+
+| ECS Disabled | ECS v1 , v8 | Description |
+| --- | --- | --- |
+| `host` | `[host][name]` | The name of the {{ls}} host that processed the event |
+| `path` | `[file][path]` | The socket path configured in the plugin |
+
+
+## Unix Input Configuration Options [plugins-inputs-unix-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-unix-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`data_timeout`](#plugins-inputs-unix-data_timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`ecs_compatibility`](#plugins-inputs-unix-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`force_unlink`](#plugins-inputs-unix-force_unlink) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`mode`](#plugins-inputs-unix-mode) | [string](/reference/configuration-file-structure.md#string), one of `["server", "client"]` | No |
+| [`path`](#plugins-inputs-unix-path) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`socket_not_present_retry_interval_seconds`](#plugins-inputs-unix-socket_not_present_retry_interval_seconds) | [number](/reference/configuration-file-structure.md#number) | Yes |
+
+Also see [Common options](#plugins-inputs-unix-common-options) for a list of options supported by all input plugins.
+
+
+
+### `data_timeout` [plugins-inputs-unix-data_timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `-1`
+
+The *read* timeout in seconds. If a particular connection is idle for more than this timeout period, we will assume it is dead and close it.
+
+If you never want to timeout, use -1.
+
+
+### `ecs_compatibility` [plugins-inputs-unix-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: uses backwards compatible field names, such as `[host]`
+ * `v1`, `v8`: uses fields that are compatible with ECS, such as `[host][name]`
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)). See [Compatibility with the Elastic Common Schema (ECS)](#plugins-inputs-unix-ecs) for detailed information.
+
+**Sample output: ECS enabled**
+
+```ruby
+{
+ "@timestamp" => 2021-11-16T13:20:06.308Z,
+ "file" => {
+ "path" => "/tmp/sock41299"
+ },
+ "host" => {
+ "name" => "deus-ex-machina"
+ },
+ "message" => "foo"
+}
+```
+
+**Sample output: ECS disabled**
+
+```ruby
+{
+ "@timestamp" => 2021-11-16T13:20:06.308Z,
+ "path" => "/tmp/sock41299",
+ "host" => "deus-ex-machina",
+ "message" => "foo"
+}
+```
+
+
+### `force_unlink` [plugins-inputs-unix-force_unlink]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Remove socket file in case of EADDRINUSE failure
+
+
+### `mode` [plugins-inputs-unix-mode]
+
+* Value can be any of: `server`, `client`
+* Default value is `"server"`
+
+Mode to operate in. `server` listens for client connections, `client` connects to a server.
+
+
+### `path` [plugins-inputs-unix-path]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+When mode is `server`, the path to listen on. When mode is `client`, the path to connect to.
+
+
+### `socket_not_present_retry_interval_seconds` [plugins-inputs-unix-socket_not_present_retry_interval_seconds]
+
+* This is a required setting.
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `5`
+
+Amount of time in seconds to wait if the socket file is not present, before retrying. Only positive values are allowed.
+
+This setting is only used if `mode` is `client`.
+
+
+
+## Common options [plugins-inputs-unix-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-unix-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-unix-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-unix-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-unix-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-unix-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-unix-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-unix-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-unix-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"line"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-unix-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-unix-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 unix inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ unix {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-unix-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-unix-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
diff --git a/docs/reference/plugins-inputs-varnishlog.md b/docs/reference/plugins-inputs-varnishlog.md
new file mode 100644
index 000000000..5e2e584b8
--- /dev/null
+++ b/docs/reference/plugins-inputs-varnishlog.md
@@ -0,0 +1,132 @@
+---
+navigation_title: "varnishlog"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-varnishlog.html
+---
+
+# Varnishlog input plugin [plugins-inputs-varnishlog]
+
+
+* Plugin version: v3.0.4
+* Released on: 2018-04-06
+* [Changelog](https://github.com/logstash-plugins/logstash-input-varnishlog/blob/v3.0.4/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-varnishlog-index.md).
+
+## Installation [_installation_17]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-input-varnishlog`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_61]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-varnishlog). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_61]
+
+Read from varnish cache’s shared memory log
+
+
+## Varnishlog Input Configuration Options [plugins-inputs-varnishlog-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-varnishlog-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`threads`](#plugins-inputs-varnishlog-threads) | [number](/reference/configuration-file-structure.md#number) | No |
+
+Also see [Common options](#plugins-inputs-varnishlog-common-options) for a list of options supported by all input plugins.
+
+
+
+### `threads` [plugins-inputs-varnishlog-threads]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1`
+
+
+
+## Common options [plugins-inputs-varnishlog-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-varnishlog-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-varnishlog-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-varnishlog-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-varnishlog-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-varnishlog-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-varnishlog-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-varnishlog-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-varnishlog-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-varnishlog-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-varnishlog-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 varnishlog inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ varnishlog {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-varnishlog-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-varnishlog-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-websocket.md b/docs/reference/plugins-inputs-websocket.md
new file mode 100644
index 000000000..e131e4511
--- /dev/null
+++ b/docs/reference/plugins-inputs-websocket.md
@@ -0,0 +1,144 @@
+---
+navigation_title: "websocket"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-websocket.html
+---
+
+# Websocket input plugin [plugins-inputs-websocket]
+
+
+* Plugin version: v4.0.4
+* Released on: 2018-04-06
+* [Changelog](https://github.com/logstash-plugins/logstash-input-websocket/blob/v4.0.4/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-websocket-index.md).
+
+## Installation [_installation_18]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-input-websocket`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_62]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-websocket). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_62]
+
+Read events over the websocket protocol.
+
+
+## Websocket Input Configuration Options [plugins-inputs-websocket-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-websocket-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`mode`](#plugins-inputs-websocket-mode) | [string](/reference/configuration-file-structure.md#string), one of `["client"]` | No |
+| [`url`](#plugins-inputs-websocket-url) | [string](/reference/configuration-file-structure.md#string) | Yes |
+
+Also see [Common options](#plugins-inputs-websocket-common-options) for a list of options supported by all input plugins.
+
+
+
+### `mode` [plugins-inputs-websocket-mode]
+
+* Value can be any of: `client`
+* Default value is `"client"`
+
+Select the plugin’s mode of operation. Right now only client mode is supported, i.e. this plugin connects to a websocket server and receives events from the server as websocket messages.
+
+
+### `url` [plugins-inputs-websocket-url]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The URL to connect to.
+
+
+
+## Common options [plugins-inputs-websocket-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-websocket-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-websocket-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-websocket-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-websocket-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-websocket-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-websocket-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-websocket-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-websocket-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"json"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-websocket-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-websocket-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 websocket inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ websocket {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-websocket-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-websocket-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-wmi.md b/docs/reference/plugins-inputs-wmi.md
new file mode 100644
index 000000000..9a51b364e
--- /dev/null
+++ b/docs/reference/plugins-inputs-wmi.md
@@ -0,0 +1,202 @@
+---
+navigation_title: "wmi"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-wmi.html
+---
+
+# Wmi input plugin [plugins-inputs-wmi]
+
+
+* Plugin version: v3.0.4
+* Released on: 2018-04-06
+* [Changelog](https://github.com/logstash-plugins/logstash-input-wmi/blob/v3.0.4/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-wmi-index.md).
+
+## Installation [_installation_19]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-input-wmi`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_63]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-wmi). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_63]
+
+Collect data from WMI query
+
+This is useful for collecting performance metrics and other data which is accessible via WMI on a Windows host
+
+Example:
+
+```ruby
+ input {
+ wmi {
+ query => "select * from Win32_Process"
+ interval => 10
+ }
+ wmi {
+ query => "select PercentProcessorTime from Win32_PerfFormattedData_PerfOS_Processor where name = '_Total'"
+ }
+ wmi { # Connect to a remote host
+ query => "select * from Win32_Process"
+ host => "MyRemoteHost"
+ user => "mydomain\myuser"
+ password => "Password"
+ }
+ }
+```
+
+
+## Wmi Input Configuration Options [plugins-inputs-wmi-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-wmi-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`host`](#plugins-inputs-wmi-host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`interval`](#plugins-inputs-wmi-interval) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`namespace`](#plugins-inputs-wmi-namespace) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`password`](#plugins-inputs-wmi-password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`query`](#plugins-inputs-wmi-query) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`user`](#plugins-inputs-wmi-user) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-inputs-wmi-common-options) for a list of options supported by all input plugins.
+
+
+
+### `host` [plugins-inputs-wmi-host]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"localhost"`
+
+Host to connect to ( Defaults to localhost )
+
+
+### `interval` [plugins-inputs-wmi-interval]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `10`
+
+Polling interval
+
+
+### `namespace` [plugins-inputs-wmi-namespace]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"root\\cimv2"`
+
+Namespace when doing remote connections
+
+
+### `password` [plugins-inputs-wmi-password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Password when doing remote connections
+
+
+### `query` [plugins-inputs-wmi-query]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+WMI query
+
+
+### `user` [plugins-inputs-wmi-user]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Username when doing remote connections
+
+
+
+## Common options [plugins-inputs-wmi-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-wmi-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-wmi-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-wmi-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-wmi-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-wmi-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-wmi-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-wmi-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-wmi-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-wmi-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-wmi-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 wmi inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ wmi {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-wmi-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-wmi-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-inputs-xmpp.md b/docs/reference/plugins-inputs-xmpp.md
new file mode 100644
index 000000000..c603bacf6
--- /dev/null
+++ b/docs/reference/plugins-inputs-xmpp.md
@@ -0,0 +1,165 @@
+---
+navigation_title: "xmpp"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-inputs-xmpp.html
+---
+
+# Xmpp input plugin [plugins-inputs-xmpp]
+
+
+* Plugin version: v3.1.7
+* Released on: 2018-04-06
+* [Changelog](https://github.com/logstash-plugins/logstash-input-xmpp/blob/v3.1.7/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/input-xmpp-index.md).
+
+## Installation [_installation_20]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-input-xmpp`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_64]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-input-xmpp). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_64]
+
+This input allows you to receive events over XMPP/Jabber.
+
+This plugin can be used for accepting events from humans or applications XMPP, or you can use it for PubSub or general message passing for logstash to logstash.
+
+
+## Xmpp Input Configuration Options [plugins-inputs-xmpp-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-inputs-xmpp-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`host`](#plugins-inputs-xmpp-host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`password`](#plugins-inputs-xmpp-password) | [password](/reference/configuration-file-structure.md#password) | Yes |
+| [`rooms`](#plugins-inputs-xmpp-rooms) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`user`](#plugins-inputs-xmpp-user) | [string](/reference/configuration-file-structure.md#string) | Yes |
+
+Also see [Common options](#plugins-inputs-xmpp-common-options) for a list of options supported by all input plugins.
+
+
+
+### `host` [plugins-inputs-xmpp-host]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The xmpp server to connect to. This is optional. If you omit this setting, the host on the user/identity is used. (`foo.com` for `user@foo.com`)
+
+
+### `password` [plugins-inputs-xmpp-password]
+
+* This is a required setting.
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+The xmpp password for the user/identity.
+
+
+### `rooms` [plugins-inputs-xmpp-rooms]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+if muc/multi-user-chat required, give the name of the room that you want to join: `room@conference.domain/nick`
+
+
+### `user` [plugins-inputs-xmpp-user]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The user or resource ID, like `foo@example.com`.
+
+
+
+## Common options [plugins-inputs-xmpp-common-options]
+
+These configuration options are supported by all input plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`add_field`](#plugins-inputs-xmpp-add_field) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`codec`](#plugins-inputs-xmpp-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-inputs-xmpp-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-inputs-xmpp-id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`tags`](#plugins-inputs-xmpp-tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`type`](#plugins-inputs-xmpp-type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `add_field` [plugins-inputs-xmpp-add_field]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add a field to an event
+
+
+### `codec` [plugins-inputs-xmpp-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-inputs-xmpp-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance by default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-inputs-xmpp-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type, for example, if you have 2 xmpp inputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+input {
+ xmpp {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+### `tags` [plugins-inputs-xmpp-tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Add any number of arbitrary tags to your event.
+
+This can help with processing later.
+
+
+### `type` [plugins-inputs-xmpp-type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a `type` field to all events handled by this input.
+
+Types are used mainly for filter activation.
+
+The type is stored as part of the event itself, so you can also use the type to search for it in Kibana.
+
+If you try to set a type on an event that already has one (for example when you send an event from a shipper to an indexer) then a new input will not override the existing type. A type set at the shipper stays with that event for its life even when sent to another Logstash server.
+
+
+
diff --git a/docs/reference/plugins-integrations-aws.md b/docs/reference/plugins-integrations-aws.md
new file mode 100644
index 000000000..ffa19cd60
--- /dev/null
+++ b/docs/reference/plugins-integrations-aws.md
@@ -0,0 +1,35 @@
+---
+navigation_title: "aws"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-integrations-aws.html
+---
+
+# AWS Integration Plugin [plugins-integrations-aws]
+
+
+* Plugin version: v7.1.8
+* Released on: 2024-07-26
+* [Changelog](https://github.com/logstash-plugins/logstash-integration-aws/blob/v7.1.8/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/integration-aws-index.md).
+
+## Getting help [_getting_help]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-integration-aws). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_2]
+
+The AWS Integration Plugin provides integrated plugins for working with Amazon Web Services:
+
+* [Cloudfront Codec Plugin](/reference/plugins-codecs-cloudfront.md)
+* [Cloudtrail Codec Plugin](/reference/plugins-codecs-cloudtrail.md)
+* [Cloudwatch Input Plugin](/reference/plugins-inputs-cloudwatch.md)
+* [S3 Input Plugin](/reference/plugins-inputs-s3.md)
+* [Sqs Input Plugin](/reference/plugins-inputs-sqs.md)
+* [Cloudwatch Output Plugin](/reference/plugins-outputs-cloudwatch.md)
+* [S3 Output Plugin](/reference/plugins-outputs-s3.md)
+* [Sns Output Plugin](/reference/plugins-outputs-sns.md)
+* [Sqs Output Plugin](/reference/plugins-outputs-sqs.md)
+
+
diff --git a/docs/reference/plugins-integrations-elastic_enterprise_search.md b/docs/reference/plugins-integrations-elastic_enterprise_search.md
new file mode 100644
index 000000000..bd4a791f2
--- /dev/null
+++ b/docs/reference/plugins-integrations-elastic_enterprise_search.md
@@ -0,0 +1,27 @@
+---
+navigation_title: "elastic_enterprise_search"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-integrations-elastic_enterprise_search.html
+---
+
+# Elastic Enterprise Search integration plugin [plugins-integrations-elastic_enterprise_search]
+
+* Plugin version: v3.0.0
+* Released on: 2023-11-07
+* [Changelog](https://github.com/logstash-plugins/logstash-integration-elastic_enterprise_search/blob/v3.0.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/integration-elastic_enterprise_search-index.md).
+
+## Getting help [_getting_help_2]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-integration-elastic_enterprise_search). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_3]
+
+The Elastic Enterprise Search integration plugin provides integrated plugins for working with [Elastic App Search](https://www.elastic.co/app-search) and [Elastic Workplace Search](https://www.elastic.co/workplace-search) services:
+
+* [Elastic App Search output plugin](/reference/plugins-outputs-elastic_app_search.md)
+* [Elastic Workplace Search output plugin](/reference/plugins-outputs-elastic_workplace_search.md)
+
+
diff --git a/docs/reference/plugins-integrations-jdbc.md b/docs/reference/plugins-integrations-jdbc.md
new file mode 100644
index 000000000..c97ed7b71
--- /dev/null
+++ b/docs/reference/plugins-integrations-jdbc.md
@@ -0,0 +1,29 @@
+---
+navigation_title: "jdbc"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-integrations-jdbc.html
+---
+
+# JDBC Integration Plugin [plugins-integrations-jdbc]
+
+
+* Plugin version: v5.5.2
+* Released on: 2024-12-23
+* [Changelog](https://github.com/logstash-plugins/logstash-integration-jdbc/blob/v5.5.2/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/integration-jdbc-index.md).
+
+## Getting help [_getting_help_3]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-integration-jdbc). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_4]
+
+The JDBC Integration Plugin provides integrated plugins for working with databases that provide JDBC drivers:
+
+* [JDBC Input Plugin](/reference/plugins-inputs-jdbc.md)
+* [JDBC Static Filter Plugin](/reference/plugins-filters-jdbc_static.md)
+* [JDBC Streaming Filter Plugin](/reference/plugins-filters-jdbc_streaming.md)
+
+
diff --git a/docs/reference/plugins-integrations-kafka.md b/docs/reference/plugins-integrations-kafka.md
new file mode 100644
index 000000000..755388707
--- /dev/null
+++ b/docs/reference/plugins-integrations-kafka.md
@@ -0,0 +1,30 @@
+---
+navigation_title: "kafka"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-integrations-kafka.html
+---
+
+# Kafka Integration Plugin [plugins-integrations-kafka]
+
+
+* Plugin version: v11.6.0
+* Released on: 2025-01-07
+* [Changelog](https://github.com/logstash-plugins/logstash-integration-kafka/blob/v11.6.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/integration-kafka-index.md).
+
+## Getting help [_getting_help_4]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-integration-kafka). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_5]
+
+The Kafka Integration Plugin provides integrated plugins for working with the [Kafka](https://kafka.apache.org/) distributed streaming platform.
+
+* [Kafka Input Plugin](/reference/plugins-inputs-kafka.md)
+* [Kafka Output Plugin](/reference/plugins-outputs-kafka.md)
+
+This plugin uses Kafka Client 3.8.1. For broker compatibility, see the official [Kafka compatibility reference](https://cwiki.apache.org/confluence/display/KAFKA/Compatibility+Matrix). If the linked compatibility wiki is not up-to-date, please contact Kafka support/community to confirm compatibility.
+
+
diff --git a/docs/reference/plugins-integrations-logstash.md b/docs/reference/plugins-integrations-logstash.md
new file mode 100644
index 000000000..aba4ead1a
--- /dev/null
+++ b/docs/reference/plugins-integrations-logstash.md
@@ -0,0 +1,77 @@
+---
+navigation_title: "logstash"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-integrations-logstash.html
+---
+
+# Logstash Integration Plugin [plugins-integrations-logstash]
+
+
+* Plugin version: v1.0.4
+* Released on: 2024-12-10
+* [Changelog](https://github.com/logstash-plugins/logstash-integration-logstash/blob/v1.0.4/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/integration-logstash-index.md).
+
+## Getting help [_getting_help_5]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-integration-logstash). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_6]
+
+The Logstash Integration Plugin provides integrated plugins for sending events from one Logstash to another instance(s):
+
+* [Logstash output plugin](/reference/plugins-outputs-logstash.md)
+* [Logstash input plugin](/reference/plugins-inputs-logstash.md)
+
+### High-level concepts [plugins-integrations-logstash-concepts]
+
+You can configure a `logstash` output to send events to one or more `logstash` inputs, which are each in another pipeline that is running in different processes or on a different host.
+
+To do so, you should first configure the downstream pipeline with a `logstash` input plugin, bound to an available port so that it can listen for inbound connections. Security is enabled by default, so you will need to either provide identity material or disable SSL.
+
+::::{note}
+You will need a TCP route from the upstream pipeline to the interface that the downstream pipeline is bound to.
+::::
+
+
+```
+input {
+ logstash {
+ port => 9800
+
+ # SSL IDENTITY <1>
+ ssl_keystore_path => "/path/to/identity.p12"
+ ssl_keystore_password => "${SSL_IDENTITY_PASSWORD}"
+ }
+}
+```
+
+1. Identity material typically should include identity claims about the hostnames and ip addresses that will be used by upstream output plugins.
+
+
+Once the downstream pipeline is configured and running, you may send events from any number of upstream pipelines by adding a `logstash` output plugin that points to the downstream input. You may need to configure SSL to trust the certificates presented by the downstream input plugin.
+
+```
+output {
+ logstash {
+ hosts => ["10.0.0.123:9800", "10.0.0.125:9801"]
+
+ # SSL TRUST <1>
+ ssl_truststore_path => "/path/to/truststore.p12"
+ ssl_truststore_password => "${SSL_TRUST_PASSWORD}"
+ }
+}
+```
+
+1. Unless SSL is disabled or the downstream input is expected to present certificates signed by globally-trusted authorities, you will likely need to provide a source-of-trust.
+
+
+
+
+## Load Balancing [plugins-integrations-logstash-load-balancing]
+
+When a `logstash` output is configured to send to multiple `hosts`, it distributes events in batches to *all* of those downstream hosts fairly, favoring those without recent errors. This increases the likelihood of each batch being routed to a downstream that is up and has capacity to receive events.
+
+
diff --git a/docs/reference/plugins-integrations-rabbitmq.md b/docs/reference/plugins-integrations-rabbitmq.md
new file mode 100644
index 000000000..d17b46c21
--- /dev/null
+++ b/docs/reference/plugins-integrations-rabbitmq.md
@@ -0,0 +1,28 @@
+---
+navigation_title: "rabbitmq"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-integrations-rabbitmq.html
+---
+
+# Rabbitmq Integration Plugin [plugins-integrations-rabbitmq]
+
+
+* Plugin version: v7.4.0
+* Released on: 2024-09-16
+* [Changelog](https://github.com/logstash-plugins/logstash-integration-rabbitmq/blob/v7.4.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/integration-rabbitmq-index.md).
+
+## Getting help [_getting_help_6]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-integration-rabbitmq). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_7]
+
+The RabbitMQ Integration Plugin provides integrated plugins for working with [RabbitMQ](http://www.rabbitmq.com/).
+
+* [RabbitMQ Input Plugin](/reference/plugins-inputs-rabbitmq.md)
+* [RabbitMQ Output Plugin](/reference/plugins-outputs-rabbitmq.md)
+
+
diff --git a/docs/reference/plugins-integrations-snmp.md b/docs/reference/plugins-integrations-snmp.md
new file mode 100644
index 000000000..6141e73b5
--- /dev/null
+++ b/docs/reference/plugins-integrations-snmp.md
@@ -0,0 +1,168 @@
+---
+navigation_title: "snmp"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-integrations-snmp.html
+---
+
+# SNMP Integration Plugin [plugins-integrations-snmp]
+
+
+* Plugin version: v4.0.5
+* Released on: 2025-01-06
+* [Changelog](https://github.com/logstash-plugins/logstash-integration-snmp/blob/v4.0.5/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/integration-snmp-index.md).
+
+## Getting help [_getting_help_7]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-integration-snmp). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+:::::{admonition} Announcing the new SNMP integration plugin
+The new `logstash-integration-snmp` plugin is available and bundled with {{ls}} 8.15.0 (and later) by default. This plugin combines our classic `logstash-input-snmp` and `logstash-input-snmptrap` plugins into a single Ruby gem at v4.0.0 and later. Earlier versions of the stand-alone plugins that were bundled with {{ls}} by default will be replaced by the 4.0.0+ version contained in this new integration.
+
+::::{important}
+Before you upgrade to {{ls}} 8.15.0 that includes this new integration by default, be aware of [behavioral and mapping differences](#plugins-integrations-snmp-migration) between stand-alone plugins and the new versions included in `integration-snmp`. If you need to maintain current mappings for the `input-snmptrap` plugin, you have options to [preserve existing behavior](#plugins-integrations-snmp-input-snmptrap-compat).
+::::
+
+
+:::::
+
+
+
+## Description [_description_8]
+
+The SNMP integration plugin includes:
+
+* [SNMP input plugin](/reference/plugins-inputs-snmp.md)
+* [Snmptrap input plugin](/reference/plugins-inputs-snmptrap.md)
+
+The new `logstash-integration-snmp` plugin combines the `logstash-input-snmp` and `logstash-input-snmptrap` plugins into one integrated plugin that encompasses the capabilities of both. This integrated plugin package provides better alignment in snmp processing, better resource management, easier package maintenance, and a smaller installation footprint.
+
+In this section, we’ll cover:
+
+* [Migrating to `logstash-integration-snmp` from individual plugins](#plugins-integrations-snmp-migration)
+* [Importing MIBs](#plugins-integrations-snmp-import-mibs)
+
+
+## Migrating to `logstash-integration-snmp` from individual plugins [plugins-integrations-snmp-migration]
+
+You’ll retain and expand the functionality of existing stand-alone plugins, but in a more compact, integrated package. In this section, we’ll note mapping and behavioral changes, and explain how to preserve current behavior if needed.
+
+### Migration notes: `logstash-input-snmp` [plugins-integrations-snmp-migration-input-snmp]
+
+As a component of the new `logstash-integration-snmp` plugin, the `logstash-input-snmp` plugin offers the same capabilities as the stand-alone [logstash-input-snmp](https://github.com/logstash-plugins/logstash-input-snmp).
+
+You might need to address some behavior changes depending on the use-case and how the ingested data is being handled through the pipeline.
+
+#### Changes to mapping and error logging: `logstash-input-snmp` [plugins-integrations-snmp-input-snmp-mapping]
+
+* **No such instance errors** are mapped as `error: no such instance currently exists at this OID string` instead of `noSuchInstance`.
+* **No such object errors** are mapped as `error: no such object currently exists at this OID string` instead of `noSuchObject`.
+* **End of MIB view errors** are mapped as `error: end of MIB view` instead of `endOfMibView`.
+* An **unknown variable type** falls back to the `string` representation instead of logging an error as it did in with the stand-alone `logstash-input-snmp`. This change should not affect existing pipelines, unless they have custom error handlers that rely on specific error messages.
+
+
+
+### Migration notes: `logstash-input-snmptrap` [plugins-integrations-snmp-migration-input-snmptrap]
+
+As a component of the new `logstash-integration-snmp` plugin, the `logstash-input-snmptrap` plugin offers *almost the same capabilities* as the stand-alone [logstash-input-snmp](https://github.com/logstash-plugins/logstash-input-snmp) plugin.
+
+You might need to address some behavior changes depending on your use case and how the ingested data is being handled through the pipeline.
+
+#### Changes to mapping and error logging: `logstash-input-snmptrap` [plugins-integrations-snmp-input-snmptrap-mapping]
+
+* The **PDU variable bindings** are mapped into the {{ls}} event using the defined data type. By default, the stand-alone `logstash-input-snmptrap` plugin converts all of the data to `string`, ignoring the original type. If this behavior is not what you want, you can use a filter to retain the original type.
+* **SNMP `TimeTicks` variables** are mapped as `Long` timestamps instead of formatted date string (`%d days, %02d:%02d:%02d.%02d`).
+* **`null` variables values** are mapped using the string `null` instead of `Null` (upper-case N).
+* **No such instance errors** are mapped as `error: no such instance currently exists at this OID string` instead of `noSuchInstance`.
+* **No such object errors** are mapped as `error: no such object currently exists at this OID string` instead of `noSuchObject`.
+* **End of MIB view errors** are mapped as `error: end of MIB view` instead of `endOfMibView`.
+* The previous generation (stand-alone) input-snmptrap plugin formatted the **`message` field** as a ruby-snmp `SNMP::SNMPv1_Trap` object representation.
+
+ ```sh
+ ], @timestamp=#, @generic_trap=6, @enterprise=[1.2.3.4.5.6], @source_ip="127.0.0.1", @agent_addr=#, @specific_trap=99>
+ ```
+
+ The new integrated `input-snmptrap` plugin uses JSON to format **`message` field**.
+
+ ```json
+ {"error_index":0, "variable_bindings":{"1.3.6.1.6.3.1.1.4.1.0":"SNMPv2-MIB::coldStart", "1.3.6.1.2.1.1.3.0":0}, "error_status":0, "type":"TRAP", "error_status_text":"Success", "community":"public", "version":"2c", "request_id":1436216872}
+ ```
+
+
+
+#### Maintain maximum compatibility with previous implementation [plugins-integrations-snmp-input-snmptrap-compat]
+
+If needed, you can configure the new `logstash-integration-snmp` plugin to maintain maximum compatibility with the previous (stand-alone) version of the [input-snmp](https://github.com/logstash-plugins/logstash-input-snmp) plugin.
+
+```ruby
+input {
+ snmptrap {
+ use_provided_mibs => false
+ oid_mapping_format => 'ruby_snmp'
+ oid_map_field_values => true
+ }
+}
+```
+
+
+
+
+## Importing MIBs [plugins-integrations-snmp-import-mibs]
+
+The SNMP plugins already include the IETF MIBs (management information bases) and these do not need to be imported. To disable the bundled MIBs set the `use_provided_mibs` option to `false`.
+
+Any other MIB will need to be manually imported to provide mapping of the numeric OIDs to MIB field names in the resulting event.
+
+To import a MIB, the OSS [libsmi library](https://www.ibr.cs.tu-bs.de/projects/libsmi/) is required. libsmi is available and installable on most operating systems.
+
+To import a MIB, you need to first convert the ASN.1 MIB file into a `.dic` file using the libsmi `smidump` command line utility.
+
+**Example (using `RFC1213-MIB` file)**
+
+```sh
+$ smidump --level=1 -k -f python RFC1213-MIB > RFC1213-MIB.dic
+```
+
+Note that the resulting file as output by `smidump` must have the `.dic` extension.
+
+### Preventing a `failed to locate MIB module` error [plugins-integrations-snmp-locate-mibs]
+
+The `smidump` function looks for MIB dependencies in its pre-configured paths list. To avoid the `failed to locate MIB module` error, you may need to provide the MIBs locations in your particular environment.
+
+The recommended ways to provide the additional path configuration are:
+
+* an environment variable, or
+* a config file to provide the additional path configuration.
+
+See the "MODULE LOCATIONS" section of the [smi_config documentation](https://www.ibr.cs.tu-bs.de/projects/libsmi/smi_config.html#MODULE%20LOCATIONS) for more information.
+
+
+### Option 1: Use an environment variable [plugins-integrations-snmp-env-var]
+
+Set the `SMIPATH` env var with the path to your MIBs. Be sure to include a prepended colon (`:`) for the path.
+
+```sh
+$ SMIPATH=":/path/to/mibs/" smidump -k -f python CISCO-PROCESS-MIB.mib > CISCO-PROCESS-MIB_my.dic <1>
+```
+
+1. Notice the colon that precedes the path definition.
+
+
+
+### Option 2: Provide a configuration file [plugins-integrations-snmp-mib-config]
+
+The other approach is to create a configuration file with the `path` option. For example, you could create a file called `smi.conf`:
+
+```sh
+path :/path/to/mibs/
+```
+
+And use the config with smidump:
+
+```sh
+$ smidump -c smi.conf -k -f python CISCO-PROCESS-MIB.mib > CISCO-PROCESS-MIB_my.dic
+```
+
+
+
diff --git a/docs/reference/plugins-outputs-boundary.md b/docs/reference/plugins-outputs-boundary.md
new file mode 100644
index 000000000..e9ddada64
--- /dev/null
+++ b/docs/reference/plugins-outputs-boundary.md
@@ -0,0 +1,168 @@
+---
+navigation_title: "boundary"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-boundary.html
+---
+
+# Boundary output plugin [plugins-outputs-boundary]
+
+
+* Plugin version: v3.0.6
+* Released on: 2023-05-30
+* [Changelog](https://github.com/logstash-plugins/logstash-output-boundary/blob/v3.0.6/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-boundary-index.md).
+
+## Installation [_installation_21]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-output-boundary`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_65]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-boundary). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_65]
+
+This output lets you send annotations to Boundary based on Logstash events
+
+Note that since Logstash maintains no state these will be one-shot events
+
+By default the start and stop time will be the event timestamp
+
+
+## Boundary Output Configuration Options [plugins-outputs-boundary-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-boundary-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`api_key`](#plugins-outputs-boundary-api_key) | [password](/reference/configuration-file-structure.md#password) | Yes |
+| [`auto`](#plugins-outputs-boundary-auto) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`bsubtype`](#plugins-outputs-boundary-bsubtype) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`btags`](#plugins-outputs-boundary-btags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`btype`](#plugins-outputs-boundary-btype) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`end_time`](#plugins-outputs-boundary-end_time) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`org_id`](#plugins-outputs-boundary-org_id) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`start_time`](#plugins-outputs-boundary-start_time) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-outputs-boundary-common-options) for a list of options supported by all output plugins.
+
+
+
+### `api_key` [plugins-outputs-boundary-api_key]
+
+* This is a required setting.
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Your Boundary API key
+
+
+### `auto` [plugins-outputs-boundary-auto]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Auto If set to true, logstash will try to pull boundary fields out of the event. Any field explicitly set by config options will override these. `['type', 'subtype', 'creation_time', 'end_time', 'links', 'tags', 'loc']`
+
+
+### `bsubtype` [plugins-outputs-boundary-bsubtype]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Sub-Type
+
+
+### `btags` [plugins-outputs-boundary-btags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Tags Set any custom tags for this event Default are the Logstash tags if any
+
+
+### `btype` [plugins-outputs-boundary-btype]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Type
+
+
+### `end_time` [plugins-outputs-boundary-end_time]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+End time Override the stop time Note that Boundary requires this to be seconds since epoch If overriding, it is your responsibility to type this correctly By default this is set to `event.get("@timestamp").to_i`
+
+
+### `org_id` [plugins-outputs-boundary-org_id]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Your Boundary Org ID
+
+
+### `start_time` [plugins-outputs-boundary-start_time]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Start time Override the start time Note that Boundary requires this to be seconds since epoch If overriding, it is your responsibility to type this correctly By default this is set to `event.get("@timestamp").to_i`
+
+
+
+## Common options [plugins-outputs-boundary-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-boundary-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-boundary-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-boundary-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-boundary-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-boundary-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-boundary-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 boundary outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ boundary {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-circonus.md b/docs/reference/plugins-outputs-circonus.md
new file mode 100644
index 000000000..02a757dce
--- /dev/null
+++ b/docs/reference/plugins-outputs-circonus.md
@@ -0,0 +1,134 @@
+---
+navigation_title: "circonus"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-circonus.html
+---
+
+# Circonus output plugin [plugins-outputs-circonus]
+
+
+* Plugin version: v3.0.7
+* Released on: 2023-05-30
+* [Changelog](https://github.com/logstash-plugins/logstash-output-circonus/blob/v3.0.7/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-circonus-index.md).
+
+## Installation [_installation_22]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-output-circonus`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_66]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-circonus). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_66]
+
+This output sends annotations to Circonus based on Logstash events.
+
+
+## Circonus Output Configuration Options [plugins-outputs-circonus-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-circonus-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`annotation`](#plugins-outputs-circonus-annotation) | [hash](/reference/configuration-file-structure.md#hash) | Yes |
+| [`api_token`](#plugins-outputs-circonus-api_token) | [password](/reference/configuration-file-structure.md#password) | Yes |
+| [`app_name`](#plugins-outputs-circonus-app_name) | [string](/reference/configuration-file-structure.md#string) | Yes |
+
+Also see [Common options](#plugins-outputs-circonus-common-options) for a list of options supported by all output plugins.
+
+
+
+### `annotation` [plugins-outputs-circonus-annotation]
+
+* This is a required setting.
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Annotations Registers an annotation with Circonus The only required field is `title` and `description`. `start` and `stop` will be set to the event timestamp. You can add any other optional annotation values as well. All values will be passed through `event.sprintf`
+
+Example:
+
+```ruby
+ ["title":"Logstash event", "description":"Logstash event for %{host}"]
+```
+
+or
+
+```ruby
+ ["title":"Logstash event", "description":"Logstash event for %{host}", "parent_id", "1"]
+```
+
+
+### `api_token` [plugins-outputs-circonus-api_token]
+
+* This is a required setting.
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Your Circonus API Token
+
+
+### `app_name` [plugins-outputs-circonus-app_name]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Your Circonus App name This will be passed through `event.sprintf` so variables are allowed here:
+
+Example: `app_name => "%{{myappname}}"`
+
+
+
+## Common options [plugins-outputs-circonus-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-circonus-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-circonus-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-circonus-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-circonus-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-circonus-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-circonus-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 circonus outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ circonus {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-cloudwatch.md b/docs/reference/plugins-outputs-cloudwatch.md
new file mode 100644
index 000000000..bab8c918a
--- /dev/null
+++ b/docs/reference/plugins-outputs-cloudwatch.md
@@ -0,0 +1,307 @@
+---
+navigation_title: "cloudwatch"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-cloudwatch.html
+---
+
+# Cloudwatch output plugin [plugins-outputs-cloudwatch]
+
+
+* A component of the [aws integration plugin](/reference/plugins-integrations-aws.md)
+* Integration version: v7.1.8
+* Released on: 2024-07-26
+* [Changelog](https://github.com/logstash-plugins/logstash-integration-aws/blob/v7.1.8/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-cloudwatch-index.md).
+
+## Getting help [_getting_help_67]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-integration-aws). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_67]
+
+This output lets you aggregate and send metric data to AWS CloudWatch
+
+
+## Summary: [_summary]
+
+This plugin is intended to be used on a logstash indexer agent (but that is not the only way, see below.) In the intended scenario, one cloudwatch output plugin is configured, on the logstash indexer node, with just AWS API credentials, and possibly a region and/or a namespace. The output looks for fields present in events, and when it finds them, it uses them to calculate aggregate statistics. If the `metricname` option is set in this output, then any events which pass through it will be aggregated & sent to CloudWatch, but that is not recommended. The intended use is to NOT set the metricname option here, and instead to add a `CW_metricname` field (and other fields) to only the events you want sent to CloudWatch.
+
+When events pass through this output they are queued for background aggregation and sending, which happens every minute by default. The queue has a maximum size, and when it is full aggregated statistics will be sent to CloudWatch ahead of schedule. Whenever this happens a warning message is written to logstash’s log. If you see this you should increase the `queue_size` configuration option to avoid the extra API calls. The queue is emptied every time we send data to CloudWatch.
+
+Note: when logstash is stopped the queue is destroyed before it can be processed. This is a known limitation of logstash and will hopefully be addressed in a future version.
+
+
+## Details: [_details]
+
+There are two ways to configure this plugin, and they can be used in combination: event fields & per-output defaults
+
+Event Field configuration… You add fields to your events in inputs & filters and this output reads those fields to aggregate events. The names of the fields read are configurable via the `field_*` options.
+
+Per-output defaults… You set universal defaults in this output plugin’s configuration, and if an event does not have a field for that option then the default is used.
+
+Notice, the event fields take precedence over the per-output defaults.
+
+At a minimum events must have a "metric name" to be sent to CloudWatch. This can be achieved either by providing a default here OR by adding a `CW_metricname` field. By default, if no other configuration is provided besides a metric name, then events will be counted (Unit: Count, Value: 1) by their metric name (either a default or from their `CW_metricname` field)
+
+Other fields which can be added to events to modify the behavior of this plugin are, `CW_namespace`, `CW_unit`, `CW_value`, and `CW_dimensions`. All of these field names are configurable in this output. You can also set per-output defaults for any of them. See below for details.
+
+Read more about [AWS CloudWatch](http://aws.amazon.com/cloudwatch/), and the specific of API endpoint this output uses, [PutMetricData](http://docs.amazonwebservices.com/AmazonCloudWatch/latest/APIReference/API_PutMetricData.html)
+
+
+## Cloudwatch Output Configuration Options [plugins-outputs-cloudwatch-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-cloudwatch-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`access_key_id`](#plugins-outputs-cloudwatch-access_key_id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`aws_credentials_file`](#plugins-outputs-cloudwatch-aws_credentials_file) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`batch_size`](#plugins-outputs-cloudwatch-batch_size) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`dimensions`](#plugins-outputs-cloudwatch-dimensions) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`field_dimensions`](#plugins-outputs-cloudwatch-field_dimensions) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`field_metricname`](#plugins-outputs-cloudwatch-field_metricname) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`field_namespace`](#plugins-outputs-cloudwatch-field_namespace) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`field_unit`](#plugins-outputs-cloudwatch-field_unit) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`field_value`](#plugins-outputs-cloudwatch-field_value) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`metricname`](#plugins-outputs-cloudwatch-metricname) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`namespace`](#plugins-outputs-cloudwatch-namespace) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`proxy_uri`](#plugins-outputs-cloudwatch-proxy_uri) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`queue_size`](#plugins-outputs-cloudwatch-queue_size) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`region`](#plugins-outputs-cloudwatch-region) | [string](/reference/configuration-file-structure.md#string), one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]` | No |
+| [`secret_access_key`](#plugins-outputs-cloudwatch-secret_access_key) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`session_token`](#plugins-outputs-cloudwatch-session_token) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`timeframe`](#plugins-outputs-cloudwatch-timeframe) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`unit`](#plugins-outputs-cloudwatch-unit) | [string](/reference/configuration-file-structure.md#string), one of `["Seconds", "Microseconds", "Milliseconds", "Bytes", "Kilobytes", "Megabytes", "Gigabytes", "Terabytes", "Bits", "Kilobits", "Megabits", "Gigabits", "Terabits", "Percent", "Count", "Bytes/Second", "Kilobytes/Second", "Megabytes/Second", "Gigabytes/Second", "Terabytes/Second", "Bits/Second", "Kilobits/Second", "Megabits/Second", "Gigabits/Second", "Terabits/Second", "Count/Second", "None"]` | No |
+| [`use_aws_bundled_ca`](#plugins-outputs-cloudwatch-use_aws_bundled_ca) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`value`](#plugins-outputs-cloudwatch-value) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-outputs-cloudwatch-common-options) for a list of options supported by all output plugins.
+
+
+
+### `access_key_id` [plugins-outputs-cloudwatch-access_key_id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order:
+
+1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config
+2. External credentials file specified by `aws_credentials_file`
+3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`
+4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY`
+5. IAM Instance Profile (available when running inside EC2)
+
+
+### `aws_credentials_file` [plugins-outputs-cloudwatch-aws_credentials_file]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Path to YAML file containing a hash of AWS credentials. This file will only be loaded if `access_key_id` and `secret_access_key` aren’t set. The contents of the file should look like this:
+
+```ruby
+ :access_key_id: "12345"
+ :secret_access_key: "54321"
+```
+
+
+### `batch_size` [plugins-outputs-cloudwatch-batch_size]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `20`
+
+How many data points can be given in one call to the CloudWatch API
+
+
+### `dimensions` [plugins-outputs-cloudwatch-dimensions]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* There is no default value for this setting.
+
+The default dimensions [ name, value, … ] to use for events which do not have a `CW_dimensions` field
+
+
+### `field_dimensions` [plugins-outputs-cloudwatch-field_dimensions]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"CW_dimensions"`
+
+The name of the field used to set the dimensions on an event metric The field named here, if present in an event, must have an array of one or more key & value pairs, for example… `add_field => [ "CW_dimensions", "Environment", "CW_dimensions", "prod" ]` or, equivalently… `add_field => [ "CW_dimensions", "Environment" ]` `add_field => [ "CW_dimensions", "prod" ]`
+
+
+### `field_metricname` [plugins-outputs-cloudwatch-field_metricname]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"CW_metricname"`
+
+The name of the field used to set the metric name on an event The author of this plugin recommends adding this field to events in inputs & filters rather than using the per-output default setting so that one output plugin on your logstash indexer can serve all events (which of course had fields set on your logstash shippers.)
+
+
+### `field_namespace` [plugins-outputs-cloudwatch-field_namespace]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"CW_namespace"`
+
+The name of the field used to set a different namespace per event Note: Only one namespace can be sent to CloudWatch per API call so setting different namespaces will increase the number of API calls and those cost money.
+
+
+### `field_unit` [plugins-outputs-cloudwatch-field_unit]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"CW_unit"`
+
+The name of the field used to set the unit on an event metric
+
+
+### `field_value` [plugins-outputs-cloudwatch-field_value]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"CW_value"`
+
+The name of the field used to set the value (float) on an event metric
+
+
+### `metricname` [plugins-outputs-cloudwatch-metricname]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The default metric name to use for events which do not have a `CW_metricname` field. Beware: If this is provided then all events which pass through this output will be aggregated and sent to CloudWatch, so use this carefully. Furthermore, when providing this option, you will probably want to also restrict events from passing through this output using event type, tag, and field matching
+
+
+### `namespace` [plugins-outputs-cloudwatch-namespace]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"Logstash"`
+
+The default namespace to use for events which do not have a `CW_namespace` field
+
+
+### `proxy_uri` [plugins-outputs-cloudwatch-proxy_uri]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+URI to proxy server if required
+
+
+### `queue_size` [plugins-outputs-cloudwatch-queue_size]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `10000`
+
+How many events to queue before forcing a call to the CloudWatch API ahead of `timeframe` schedule Set this to the number of events-per-timeframe you will be sending to CloudWatch to avoid extra API calls
+
+
+### `region` [plugins-outputs-cloudwatch-region]
+
+* Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1`
+* Default value is `"us-east-1"`
+
+The AWS Region
+
+
+### `secret_access_key` [plugins-outputs-cloudwatch-secret_access_key]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The AWS Secret Access Key
+
+
+### `session_token` [plugins-outputs-cloudwatch-session_token]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The AWS Session token for temporary credential
+
+
+### `timeframe` [plugins-outputs-cloudwatch-timeframe]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"1m"`
+
+Constants aggregate_key members Units How often to send data to CloudWatch This does not affect the event timestamps, events will always have their actual timestamp (to-the-minute) sent to CloudWatch.
+
+We only call the API if there is data to send.
+
+See the Rufus Scheduler docs for an [explanation of allowed values](https://github.com/jmettraux/rufus-scheduler#the-time-strings-understood-by-rufus-scheduler)
+
+
+### `unit` [plugins-outputs-cloudwatch-unit]
+
+* Value can be any of: `Seconds`, `Microseconds`, `Milliseconds`, `Bytes`, `Kilobytes`, `Megabytes`, `Gigabytes`, `Terabytes`, `Bits`, `Kilobits`, `Megabits`, `Gigabits`, `Terabits`, `Percent`, `Count`, `Bytes/Second`, `Kilobytes/Second`, `Megabytes/Second`, `Gigabytes/Second`, `Terabytes/Second`, `Bits/Second`, `Kilobits/Second`, `Megabits/Second`, `Gigabits/Second`, `Terabits/Second`, `Count/Second`, `None`
+* Default value is `"Count"`
+
+The default unit to use for events which do not have a `CW_unit` field If you set this option you should probably set the "value" option along with it
+
+
+### `use_aws_bundled_ca` [plugins-outputs-cloudwatch-use_aws_bundled_ca]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Use bundled CA certificates that ship with AWS SDK to verify SSL peer certificates. For cases where the default certificates are unavailable, e.g. Windows, you can set this to `true`.
+
+
+### `value` [plugins-outputs-cloudwatch-value]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"1"`
+
+The default value to use for events which do not have a `CW_value` field If provided, this must be a string which can be converted to a float, for example… "1", "2.34", ".5", and "0.67" If you set this option you should probably set the `unit` option along with it
+
+
+
+## Common options [plugins-outputs-cloudwatch-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-cloudwatch-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-cloudwatch-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-cloudwatch-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-cloudwatch-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-cloudwatch-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-cloudwatch-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 cloudwatch outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ cloudwatch {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-csv.md b/docs/reference/plugins-outputs-csv.md
new file mode 100644
index 000000000..0daf2e53e
--- /dev/null
+++ b/docs/reference/plugins-outputs-csv.md
@@ -0,0 +1,190 @@
+---
+navigation_title: "csv"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-csv.html
+---
+
+# Csv output plugin [plugins-outputs-csv]
+
+
+* Plugin version: v3.0.10
+* Released on: 2023-12-19
+* [Changelog](https://github.com/logstash-plugins/logstash-output-csv/blob/v3.0.10/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-csv-index.md).
+
+## Getting help [_getting_help_68]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-csv). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_68]
+
+CSV output.
+
+Write events to disk in CSV or other delimited format Based on the file output, many config values are shared Uses the Ruby csv library internally
+
+
+## Csv Output Configuration Options [plugins-outputs-csv-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-csv-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`create_if_deleted`](#plugins-outputs-csv-create_if_deleted) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`csv_options`](#plugins-outputs-csv-csv_options) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`dir_mode`](#plugins-outputs-csv-dir_mode) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`fields`](#plugins-outputs-csv-fields) | [array](/reference/configuration-file-structure.md#array) | Yes |
+| [`file_mode`](#plugins-outputs-csv-file_mode) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`filename_failure`](#plugins-outputs-csv-filename_failure) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`flush_interval`](#plugins-outputs-csv-flush_interval) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`gzip`](#plugins-outputs-csv-gzip) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`path`](#plugins-outputs-csv-path) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`spreadsheet_safe`](#plugins-outputs-csv-spreadsheet_safe) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+
+Also see [Common options](#plugins-outputs-csv-common-options) for a list of options supported by all output plugins.
+
+
+
+### `create_if_deleted` [plugins-outputs-csv-create_if_deleted]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+If the configured file is deleted, but an event is handled by the plugin, the plugin will recreate the file. Default ⇒ true
+
+
+### `csv_options` [plugins-outputs-csv-csv_options]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Options for CSV output. This is passed directly to the Ruby stdlib to_csv function. Full documentation is available on the [Ruby CSV documentation page](http://ruby-doc.org/stdlib-2.0.0/libdoc/csv/rdoc/index.html). A typical use case would be to use alternative column or row separators eg: `csv_options => {"col_sep" => "\t" "row_sep" => "\r\n"}` gives tab separated data with windows line endings
+
+
+### `dir_mode` [plugins-outputs-csv-dir_mode]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `-1`
+
+Dir access mode to use. Note that due to the bug in jruby system umask is ignored on linux: [https://github.com/jruby/jruby/issues/3426](https://github.com/jruby/jruby/issues/3426) Setting it to -1 uses default OS value. Example: `"dir_mode" => 0750`
+
+
+### `fields` [plugins-outputs-csv-fields]
+
+* This is a required setting.
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+The field names from the event that should be written to the CSV file. Fields are written to the CSV in the same order as the array. If a field does not exist on the event, an empty string will be written. Supports field reference syntax eg: `fields => ["field1", "[nested][field]"]`.
+
+
+### `file_mode` [plugins-outputs-csv-file_mode]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `-1`
+
+File access mode to use. Note that due to the bug in jruby system umask is ignored on linux: [https://github.com/jruby/jruby/issues/3426](https://github.com/jruby/jruby/issues/3426) Setting it to -1 uses default OS value. Example: `"file_mode" => 0640`
+
+
+### `filename_failure` [plugins-outputs-csv-filename_failure]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"_filepath_failures"`
+
+If the generated path is invalid, the events will be saved into this file and inside the defined path.
+
+
+### `flush_interval` [plugins-outputs-csv-flush_interval]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `2`
+
+Flush interval (in seconds) for flushing writes to log files. 0 will flush on every message.
+
+
+### `gzip` [plugins-outputs-csv-gzip]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Gzip the output stream before writing to disk.
+
+
+### `path` [plugins-outputs-csv-path]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+This output writes events to files on disk. You can use fields from the event as parts of the filename and/or path.
+
+By default, this output writes one event per line in **json** format. You can customise the line format using the `line` codec like:
+
+```ruby
+output {
+ file {
+ path => ...
+ codec => line { format => "custom format: %{message}"}
+ }
+}
+```
+
+The path to the file to write. Event fields can be used here, like `/var/log/logstash/%{{host}}/%{{application}}` One may also utilize the path option for date-based log rotation via the joda time format. This will use the event timestamp. E.g.: `path => "./test-%{+YYYY-MM-dd}.txt"` to create `./test-2013-05-29.txt`
+
+If you use an absolute path you cannot start with a dynamic string. E.g: `/%{{myfield}}/`, `/test-%{{myfield}}/` are not valid paths
+
+
+### `spreadsheet_safe` [plugins-outputs-csv-spreadsheet_safe]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Option to not escape/munge string values. Please note turning off this option may not make the values safe in your spreadsheet application
+
+
+
+## Common options [plugins-outputs-csv-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-csv-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-csv-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-csv-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-csv-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-csv-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-csv-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 csv outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ csv {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
diff --git a/docs/reference/plugins-outputs-datadog.md b/docs/reference/plugins-outputs-datadog.md
new file mode 100644
index 000000000..e1da98a71
--- /dev/null
+++ b/docs/reference/plugins-outputs-datadog.md
@@ -0,0 +1,165 @@
+---
+navigation_title: "datadog"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-datadog.html
+---
+
+# Datadog output plugin [plugins-outputs-datadog]
+
+
+* Plugin version: v3.0.6
+* Released on: 2023-05-31
+* [Changelog](https://github.com/logstash-plugins/logstash-output-datadog/blob/v3.0.6/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-datadog-index.md).
+
+## Installation [_installation_23]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-output-datadog`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_69]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-datadog). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_69]
+
+This output sends events to DataDogHQ based on Logstash events.
+
+Note that since Logstash maintains no state these will be one-shot events
+
+
+## Datadog Output Configuration Options [plugins-outputs-datadog-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-datadog-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`alert_type`](#plugins-outputs-datadog-alert_type) | [string](/reference/configuration-file-structure.md#string), one of `["info", "error", "warning", "success"]` | No |
+| [`api_key`](#plugins-outputs-datadog-api_key) | [password](/reference/configuration-file-structure.md#password) | Yes |
+| [`date_happened`](#plugins-outputs-datadog-date_happened) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`dd_tags`](#plugins-outputs-datadog-dd_tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`priority`](#plugins-outputs-datadog-priority) | [string](/reference/configuration-file-structure.md#string), one of `["normal", "low"]` | No |
+| [`source_type_name`](#plugins-outputs-datadog-source_type_name) | [string](/reference/configuration-file-structure.md#string), one of `["nagios", "hudson", "jenkins", "user", "my apps", "feed", "chef", "puppet", "git", "bitbucket", "fabric", "capistrano"]` | No |
+| [`text`](#plugins-outputs-datadog-text) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`title`](#plugins-outputs-datadog-title) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-outputs-datadog-common-options) for a list of options supported by all output plugins.
+
+
+
+### `alert_type` [plugins-outputs-datadog-alert_type]
+
+* Value can be any of: `info`, `error`, `warning`, `success`
+* There is no default value for this setting.
+
+Alert type
+
+
+### `api_key` [plugins-outputs-datadog-api_key]
+
+* This is a required setting.
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Your DatadogHQ API key
+
+
+### `date_happened` [plugins-outputs-datadog-date_happened]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Date Happened
+
+
+### `dd_tags` [plugins-outputs-datadog-dd_tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Tags Set any custom tags for this event Default are the Logstash tags if any
+
+
+### `priority` [plugins-outputs-datadog-priority]
+
+* Value can be any of: `normal`, `low`
+* There is no default value for this setting.
+
+Priority
+
+
+### `source_type_name` [plugins-outputs-datadog-source_type_name]
+
+* Value can be any of: `nagios`, `hudson`, `jenkins`, `user`, `my apps`, `feed`, `chef`, `puppet`, `git`, `bitbucket`, `fabric`, `capistrano`
+* Default value is `"my apps"`
+
+Source type name
+
+
+### `text` [plugins-outputs-datadog-text]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"%{{message}}"`
+
+Text
+
+
+### `title` [plugins-outputs-datadog-title]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"Logstash event for %{{host}}"`
+
+Title
+
+
+
+## Common options [plugins-outputs-datadog-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-datadog-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-datadog-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-datadog-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-datadog-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-datadog-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-datadog-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 datadog outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ datadog {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-datadog_metrics.md b/docs/reference/plugins-outputs-datadog_metrics.md
new file mode 100644
index 000000000..c62aecd8d
--- /dev/null
+++ b/docs/reference/plugins-outputs-datadog_metrics.md
@@ -0,0 +1,181 @@
+---
+navigation_title: "datadog_metrics"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-datadog_metrics.html
+---
+
+# Datadog_metrics output plugin [plugins-outputs-datadog_metrics]
+
+
+* Plugin version: v3.0.7
+* Released on: 2024-10-25
+* [Changelog](https://github.com/logstash-plugins/logstash-output-datadog_metrics/blob/v3.0.7/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-datadog_metrics-index.md).
+
+## Installation [_installation_24]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-output-datadog_metrics`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_70]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-datadog_metrics). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_70]
+
+This output lets you send metrics to DataDogHQ based on Logstash events. Default `queue_size` and `timeframe` are low in order to provide near realtime alerting. If you do not use Datadog for alerting, consider raising these thresholds.
+
+
+## Datadog_metrics Output Configuration Options [plugins-outputs-datadog_metrics-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-datadog_metrics-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`api_key`](#plugins-outputs-datadog_metrics-api_key) | [password](/reference/configuration-file-structure.md#password) | Yes |
+| [`api_url`](#plugins-outputs-datadog_metrics-api_url) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`dd_tags`](#plugins-outputs-datadog_metrics-dd_tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`device`](#plugins-outputs-datadog_metrics-device) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`host`](#plugins-outputs-datadog_metrics-host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`metric_name`](#plugins-outputs-datadog_metrics-metric_name) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`metric_type`](#plugins-outputs-datadog_metrics-metric_type) | [string](/reference/configuration-file-structure.md#string), one of `["gauge", "counter", "%{{metric_type}}"]` | No |
+| [`metric_value`](#plugins-outputs-datadog_metrics-metric_value) | <<,>> | No |
+| [`queue_size`](#plugins-outputs-datadog_metrics-queue_size) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`timeframe`](#plugins-outputs-datadog_metrics-timeframe) | [number](/reference/configuration-file-structure.md#number) | No |
+
+Also see [Common options](#plugins-outputs-datadog_metrics-common-options) for a list of options supported by all output plugins.
+
+
+
+### `api_key` [plugins-outputs-datadog_metrics-api_key]
+
+* This is a required setting.
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Your DatadogHQ API key. [https://app.datadoghq.com/account/settings#api](https://app.datadoghq.com/account/settings#api)
+
+
+### `api_url` [plugins-outputs-datadog_metrics-api_url]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"https://api.datadoghq.com/api/v1/series"`
+
+Set the api endpoint for Datadog EU Site users
+
+
+### `dd_tags` [plugins-outputs-datadog_metrics-dd_tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Set any custom tags for this event, default are the Logstash tags if any.
+
+
+### `device` [plugins-outputs-datadog_metrics-device]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"%{{metric_device}}"`
+
+The name of the device that produced the metric.
+
+
+### `host` [plugins-outputs-datadog_metrics-host]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `%{{host}}`
+
+The name of the host that produced the metric.
+
+
+### `metric_name` [plugins-outputs-datadog_metrics-metric_name]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"%{{metric_name}}"`
+
+The name of the time series.
+
+
+### `metric_type` [plugins-outputs-datadog_metrics-metric_type]
+
+* Value can be any of: `gauge`, `counter`, `%{{metric_type}}`
+* Default value is `"%{{metric_type}}"`
+
+The type of the metric.
+
+
+### `metric_value` [plugins-outputs-datadog_metrics-metric_value]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"%{{metric_value}}"`
+
+The value.
+
+
+### `queue_size` [plugins-outputs-datadog_metrics-queue_size]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `10`
+
+How many events to queue before flushing to Datadog prior to schedule set in `@timeframe`
+
+
+### `timeframe` [plugins-outputs-datadog_metrics-timeframe]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `10`
+
+How often (in seconds) to flush queued events to Datadog
+
+
+
+## Common options [plugins-outputs-datadog_metrics-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-datadog_metrics-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-datadog_metrics-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-datadog_metrics-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-datadog_metrics-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-datadog_metrics-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-datadog_metrics-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 datadog_metrics outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ datadog_metrics {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-dynatrace.md b/docs/reference/plugins-outputs-dynatrace.md
new file mode 100644
index 000000000..dd4ea64dc
--- /dev/null
+++ b/docs/reference/plugins-outputs-dynatrace.md
@@ -0,0 +1,32 @@
+---
+navigation_title: "dynatrace"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-dynatrace.html
+---
+
+# Dynatrace plugin [plugins-outputs-dynatrace]
+
+
+* This plugin was created and is maintained by a contributor.
+* [Change log](https://github.com/dynatrace-oss/logstash-output-dynatrace/blob/master/CHANGELOG.md)
+
+## Installation [_installation_25]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-output-dynatrace`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Description [_description_71]
+
+This plugin sends Logstash events to the Dynatrace Generic log ingest API v2.
+
+
+## Documentation [_documentation_2]
+
+[ Documentation](https://github.com/dynatrace-oss/logstash-output-dynatrace/blob/main/docs/index.asciidoc) for the logstash-output-dynatrace plugin is maintained by the creator.
+
+
+## Getting Help [_getting_help_71]
+
+This is a third-party plugin. For bugs or feature requests, open an issue in the [plugins-outputs-dynatrace Github repo](https://github.com/dynatrace-oss/logstash-output-dynatrace).
+
+
diff --git a/docs/reference/plugins-outputs-elastic_app_search.md b/docs/reference/plugins-outputs-elastic_app_search.md
new file mode 100644
index 000000000..da934082d
--- /dev/null
+++ b/docs/reference/plugins-outputs-elastic_app_search.md
@@ -0,0 +1,250 @@
+---
+navigation_title: "elastic_app_search"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-elastic_app_search.html
+---
+
+# Elastic App Search output plugin [plugins-outputs-elastic_app_search]
+
+
+* A component of the [elastic_enterprise_search integration plugin](/reference/plugins-integrations-elastic_enterprise_search.md)
+* Integration version: v3.0.0
+* Released on: 2023-11-07
+* [Changelog](https://github.com/logstash-plugins/logstash-integration-elastic_enterprise_search/blob/v3.0.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-elastic_app_search-index.md).
+
+## Getting help [_getting_help_72]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-integration-elastic_enterprise_search). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_72]
+
+This output lets you send events to the [Elastic App Search](https://www.elastic.co/app-search) solution, both the [self-managed](https://www.elastic.co/downloads/app-search) or the [managed](https://www.elastic.co/cloud/app-search-service) service. On receiving a batch of events from the Logstash pipeline, the plugin converts the events into documents and uses the App Search bulk API to index multiple events in one request.
+
+App Search doesn’t allow fields to begin with `@timestamp`. By default the `@timestamp` and `@version` fields will be removed from each event before the event is sent to App Search. If you want to keep the `@timestamp` field, you can use the [timestamp_destination](#plugins-outputs-elastic_app_search-timestamp_destination) option to store the timestamp in a different field.
+
+::::{note}
+This gem does not support codec customization.
+::::
+
+
+
+## AppSearch Output configuration options [plugins-outputs-elastic_app_search-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-elastic_app_search-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`api_key`](#plugins-outputs-elastic_app_search-api_key) | [password](/reference/configuration-file-structure.md#password) | Yes |
+| [`document_id`](#plugins-outputs-elastic_app_search-document_id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`engine`](#plugins-outputs-elastic_app_search-engine) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`ssl_certificate_authorities`](#plugins-outputs-elastic_app_search-ssl_certificate_authorities) | list of [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_cipher_suites`](#plugins-outputs-elastic_app_search-ssl_cipher_suites) | list of [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_supported_protocols`](#plugins-outputs-elastic_app_search-ssl_supported_protocols) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_truststore_password`](#plugins-outputs-elastic_app_search-ssl_truststore_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_truststore_path`](#plugins-outputs-elastic_app_search-ssl_truststore_path) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_truststore_type`](#plugins-outputs-elastic_app_search-ssl_truststore_type) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_verification_mode`](#plugins-outputs-elastic_app_search-ssl_verification_mode) | [string](/reference/configuration-file-structure.md#string), one of `["full", "none"]` | No |
+| [`timestamp_destination`](#plugins-outputs-elastic_app_search-timestamp_destination) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`url`](#plugins-outputs-elastic_app_search-url) | [string](/reference/configuration-file-structure.md#string) | Yes |
+
+Also see [Common options](#plugins-outputs-elastic_app_search-common-options) for a list of options supported by all output plugins.
+
+
+
+### `api_key` [plugins-outputs-elastic_app_search-api_key]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value
+
+The private API Key with write permissions. Visit the App Search API keys reference [page](https://www.elastic.co/guide/en/app-search/current/authentication.html#authentication-api-keys) for more information.
+
+
+### `document_id` [plugins-outputs-elastic_app_search-document_id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value
+
+The id for app search documents. This can be an interpolated value like `myapp-%{{sequence_id}}`. Reusing ids will cause documents to be rewritten.
+
+
+### `engine` [plugins-outputs-elastic_app_search-engine]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value
+
+The name of the search engine you created in App Search, an information repository that includes the indexed document records. The `engine` field supports [sprintf format](/reference/event-dependent-configuration.md#sprintf) to allow the engine name to be derived from a field value from each event, for example `engine-%{{engine_name}}`.
+
+Invalid engine names cause ingestion to stop until the field value can be resolved into a valid engine name. This situation can happen if the interpolated field value resolves to a value without a matching engine, or, if the field is missing from the event and cannot be resolved at all.
+
+::::{tip}
+Consider adding a "default" engine type in the configuration to catch errors if the field is missing from the event.
+::::
+
+
+Example:
+
+```ruby
+input {
+ stdin {
+ codec => json
+ }
+}
+
+filter {
+ if ![engine_name] {
+ mutate {
+ add_field => {"engine_name" => "default"}
+ }
+ }
+}
+
+output {
+ elastic_app_search {
+ engine => "engine_%{[engine_name]}"
+ }
+}
+```
+
+
+### `ssl_certificate_authorities` [plugins-outputs-elastic_app_search-ssl_certificate_authorities]
+
+* Value type is a list of [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting
+
+The .cer or .pem files to validate the server’s certificate.
+
+::::{note}
+You cannot use this setting and [`ssl_truststore_path`](#plugins-outputs-elastic_app_search-ssl_truststore_path) at the same time.
+::::
+
+
+
+### `ssl_cipher_suites` [plugins-outputs-elastic_app_search-ssl_cipher_suites]
+
+* Value type is a list of [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting
+
+The list of cipher suites to use, listed by priorities. Supported cipher suites vary depending on the Java and protocol versions.
+
+
+### `ssl_supported_protocols` [plugins-outputs-elastic_app_search-ssl_supported_protocols]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Allowed values are: `'TLSv1.1'`, `'TLSv1.2'`, `'TLSv1.3'`
+* Default depends on the JDK being used. With up-to-date Logstash, the default is `['TLSv1.2', 'TLSv1.3']`. `'TLSv1.1'` is not considered secure and is only provided for legacy applications.
+
+List of allowed SSL/TLS versions to use when establishing a connection to the Elasticsearch cluster.
+
+For Java 8 `'TLSv1.3'` is supported only since **8u262** (AdoptOpenJDK), but requires that you set the `LS_JAVA_OPTS="-Djdk.tls.client.protocols=TLSv1.3"` system property in Logstash.
+
+::::{note}
+If you configure the plugin to use `'TLSv1.1'` on any recent JVM, such as the one packaged with Logstash, the protocol is disabled by default and needs to be enabled manually by changing `jdk.tls.disabledAlgorithms` in the **$JDK_HOME/conf/security/java.security** configuration file. That is, `TLSv1.1` needs to be removed from the list.
+::::
+
+
+
+### `ssl_truststore_password` [plugins-outputs-elastic_app_search-ssl_truststore_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Set the truststore password
+
+
+### `ssl_truststore_path` [plugins-outputs-elastic_app_search-ssl_truststore_path]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+The truststore to validate the server’s certificate. It can be either `.jks` or `.p12`.
+
+::::{note}
+You cannot use this setting and [`ssl_certificate_authorities`](#plugins-outputs-elastic_app_search-ssl_certificate_authorities) at the same time.
+::::
+
+
+
+### `ssl_truststore_type` [plugins-outputs-elastic_app_search-ssl_truststore_type]
+
+* Value can be any of: `jks`, `pkcs12`
+* If not provided, the value will be inferred from the truststore filename.
+
+The format of the truststore file. It must be either `jks` or `pkcs12`.
+
+
+### `ssl_verification_mode` [plugins-outputs-elastic_app_search-ssl_verification_mode]
+
+* Value can be any of: `full`, `none`
+* Default value is `full`
+
+Defines how to verify the certificates presented by another party in the TLS connection:
+
+`full` validates that the server certificate has an issue date that’s within the not_before and not_after dates; chains to a trusted Certificate Authority (CA), and has a hostname or IP address that matches the names within the certificate.
+
+`none` performs no certificate validation.
+
+::::{warning}
+Setting certificate verification to `none` disables many security benefits of SSL/TLS, which is very dangerous. For more information on disabling certificate verification please read [https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf](https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf)
+::::
+
+
+
+### `timestamp_destination` [plugins-outputs-elastic_app_search-timestamp_destination]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value
+
+Where to move the value from the `@timestamp` field.
+
+All Logstash events contain a `@timestamp` field. App Search doesn’t support fields starting with `@timestamp`, and by default, the `@timestamp` field will be deleted.
+
+To keep the timestamp field, set this value to the name of the field where you want `@timestamp` copied.
+
+
+### `url` [plugins-outputs-elastic_app_search-url]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `http://localhost:3002`
+
+The value of the API endpoint in the form of a URL.
+
+
+
+## Common options [plugins-outputs-elastic_app_search-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`enable_metric`](#plugins-outputs-elastic_app_search-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-elastic_app_search-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `enable_metric` [plugins-outputs-elastic_app_search-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-elastic_app_search-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 elastic_app_search outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ elastic_app_search {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
diff --git a/docs/reference/plugins-outputs-elastic_workplace_search.md b/docs/reference/plugins-outputs-elastic_workplace_search.md
new file mode 100644
index 000000000..976269af8
--- /dev/null
+++ b/docs/reference/plugins-outputs-elastic_workplace_search.md
@@ -0,0 +1,262 @@
+---
+navigation_title: "elastic_workplace_search"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-elastic_workplace_search.html
+---
+
+# Elastic Workplace Search output plugin [plugins-outputs-elastic_workplace_search]
+
+
+* A component of the [elastic_enterprise_search integration plugin](/reference/plugins-integrations-elastic_enterprise_search.md)
+* Integration version: v3.0.0
+* Released on: 2023-11-07
+* [Changelog](https://github.com/logstash-plugins/logstash-integration-elastic_enterprise_search/blob/v3.0.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-elastic_workplace_search-index.md).
+
+## Getting help [_getting_help_73]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-integration-elastic_enterprise_search). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_73]
+
+This output lets you send events to the [Elastic Workplace Search](https://www.elastic.co/workplace-search) solution. On receiving a batch of events from the Logstash pipeline, the plugin converts the events into documents and uses the Workplace Search bulk API to index multiple events in one request.
+
+Workplace Search doesn’t allow fields to begin with `@timestamp`. By default the `@timestamp` and `@version` fields will be removed from each event before the event is sent to Workplace Search. If you want to keep the `@timestamp` field, you can use the [timestamp_destination](#plugins-outputs-elastic_workplace_search-timestamp_destination) option to store the timestamp in a different field.
+
+::::{note}
+This gem does not support codec customization.
+::::
+
+
+
+## Workplace Search Output Configuration Options [plugins-outputs-elastic_workplace_search-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-elastic_workplace_search-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`access_token`](#plugins-outputs-elastic_workplace_search-access_token) | [password](/reference/configuration-file-structure.md#password) | Yes |
+| [`document_id`](#plugins-outputs-elastic_workplace_search-document_id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`source`](#plugins-outputs-elastic_workplace_search-source) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`ssl_certificate_authorities`](#plugins-outputs-elastic_workplace_search-ssl_certificate_authorities) | list of [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_cipher_suites`](#plugins-outputs-elastic_workplace_search-ssl_cipher_suites) | list of [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_supported_protocols`](#plugins-outputs-elastic_workplace_search-ssl_supported_protocols) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_truststore_password`](#plugins-outputs-elastic_workplace_search-ssl_truststore_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_truststore_path`](#plugins-outputs-elastic_workplace_search-ssl_truststore_path) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_truststore_type`](#plugins-outputs-elastic_workplace_search-ssl_truststore_type) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_verification_mode`](#plugins-outputs-elastic_workplace_search-ssl_verification_mode) | [string](/reference/configuration-file-structure.md#string), one of `["full", "none"]` | No |
+| [`timestamp_destination`](#plugins-outputs-elastic_workplace_search-timestamp_destination) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`url`](#plugins-outputs-elastic_workplace_search-url) | [string](/reference/configuration-file-structure.md#string) | Yes |
+
+Also see [Common options](#plugins-outputs-elastic_workplace_search-common-options) for a list of options supported by all output plugins.
+
+
+
+### `access_token` [plugins-outputs-elastic_workplace_search-access_token]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value
+
+The source access token. Visit the source overview page in the Workplace Search dashboard to find the token associated with your source.
+
+
+### `document_id` [plugins-outputs-elastic_workplace_search-document_id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value
+
+The id for workplace search documents. This can be an interpolated value like `myapp-%{{sequence_id}}`. Reusing ids will cause documents to be rewritten.
+
+
+### `source` [plugins-outputs-elastic_workplace_search-source]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value
+
+The ID of the source you created in Workplace Search. The `source` field supports [sprintf format](/reference/event-dependent-configuration.md#sprintf) to allow the source ID to be derived from a field value from each event, for example `%{{source_id}}`.
+
+Invalid source IDs cause ingestion to stop until the field value can be resolved into a valid source ID. This situation can happen if the interpolated field value resolves to a value without a matching source, or, if the field is missing from the event and cannot be resolved at all.
+
+::::{tip}
+Consider adding a "default" source type in the configuration to catch errors if the field is missing from the event.
+::::
+
+
+Example:
+
+```ruby
+input {
+ stdin {
+ codec => json
+ }
+}
+
+filter {
+ if ![source_id] {
+ mutate {
+ add_field => {"source_id" => "default"}
+ }
+ }
+}
+
+output {
+ elastic_workplace_search {
+ source => "%{[source_id]}"
+ access_token => "abracadabra"
+ url => "http://workplace.search.com:3002"
+ }
+}
+```
+
+
+### `ssl_certificate_authorities` [plugins-outputs-elastic_workplace_search-ssl_certificate_authorities]
+
+* Value type is a list of [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting
+
+The .cer or .pem files to validate the server’s certificate.
+
+::::{note}
+You cannot use this setting and [`ssl_truststore_path`](#plugins-outputs-elastic_workplace_search-ssl_truststore_path) at the same time.
+::::
+
+
+
+### `ssl_cipher_suites` [plugins-outputs-elastic_workplace_search-ssl_cipher_suites]
+
+* Value type is a list of [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting
+
+The list of cipher suites to use, listed by priorities. Supported cipher suites vary depending on the Java and protocol versions.
+
+
+### `ssl_supported_protocols` [plugins-outputs-elastic_workplace_search-ssl_supported_protocols]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Allowed values are: `'TLSv1.1'`, `'TLSv1.2'`, `'TLSv1.3'`
+* Default depends on the JDK being used. With up-to-date Logstash, the default is `['TLSv1.2', 'TLSv1.3']`. `'TLSv1.1'` is not considered secure and is only provided for legacy applications.
+
+List of allowed SSL/TLS versions to use when establishing a connection to the Elasticsearch cluster.
+
+For Java 8 `'TLSv1.3'` is supported only since **8u262** (AdoptOpenJDK), but requires that you set the `LS_JAVA_OPTS="-Djdk.tls.client.protocols=TLSv1.3"` system property in Logstash.
+
+::::{note}
+If you configure the plugin to use `'TLSv1.1'` on any recent JVM, such as the one packaged with Logstash, the protocol is disabled by default and needs to be enabled manually by changing `jdk.tls.disabledAlgorithms` in the **$JDK_HOME/conf/security/java.security** configuration file. That is, `TLSv1.1` needs to be removed from the list.
+::::
+
+
+
+### `ssl_truststore_password` [plugins-outputs-elastic_workplace_search-ssl_truststore_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Set the truststore password
+
+
+### `ssl_truststore_path` [plugins-outputs-elastic_workplace_search-ssl_truststore_path]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+The truststore to validate the server’s certificate. It can be either `.jks` or `.p12`.
+
+::::{note}
+You cannot use this setting and [`ssl_certificate_authorities`](#plugins-outputs-elastic_workplace_search-ssl_certificate_authorities) at the same time.
+::::
+
+
+
+### `ssl_truststore_type` [plugins-outputs-elastic_workplace_search-ssl_truststore_type]
+
+* Value can be any of: `jks`, `pkcs12`
+* If not provided, the value will be inferred from the truststore filename.
+
+The format of the truststore file. It must be either `jks` or `pkcs12`.
+
+
+### `ssl_verification_mode` [plugins-outputs-elastic_workplace_search-ssl_verification_mode]
+
+* Value can be any of: `full`, `none`
+* Default value is `full`
+
+Defines how to verify the certificates presented by another party in the TLS connection:
+
+`full` validates that the server certificate has an issue date that’s within the not_before and not_after dates; chains to a trusted Certificate Authority (CA), and has a hostname or IP address that matches the names within the certificate.
+
+`none` performs no certificate validation.
+
+::::{warning}
+Setting certificate verification to `none` disables many security benefits of SSL/TLS, which is very dangerous. For more information on disabling certificate verification please read [https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf](https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf)
+::::
+
+
+
+### `timestamp_destination` [plugins-outputs-elastic_workplace_search-timestamp_destination]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value
+
+Where to move the value from the `@timestamp` field.
+
+All Logstash events contain a `@timestamp` field. Workplace Search doesn’t support fields starting with `@timestamp`, and by default, the `@timestamp` field will be deleted.
+
+To keep the timestamp field, set this value to the name of the field where you want `@timestamp` copied.
+
+
+### `url` [plugins-outputs-elastic_workplace_search-url]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `http://localhost:3002`
+
+The value of the API endpoint in the form of a URL.
+
+**Examples**
+
+On premise instance:
+
+`http://workplace.company.com:3002`
+
+Elastic Cloud instance:
+
+`https://7c455f508468426cb53912be65548117.ent-search.eu-west-1.aws.cloud.es.io`
+
+
+
+## Common options [plugins-outputs-elastic_workplace_search-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`enable_metric`](#plugins-outputs-elastic_workplace_search-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-elastic_workplace_search-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `enable_metric` [plugins-outputs-elastic_workplace_search-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-elastic_workplace_search-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 elastic_workplace_search outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ elastic_workplace_search {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
diff --git a/docs/reference/plugins-outputs-elasticsearch.md b/docs/reference/plugins-outputs-elasticsearch.md
new file mode 100644
index 000000000..2daeaa32a
--- /dev/null
+++ b/docs/reference/plugins-outputs-elasticsearch.md
@@ -0,0 +1,1242 @@
+---
+navigation_title: "elasticsearch"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-elasticsearch.html
+---
+
+# Elasticsearch output plugin [plugins-outputs-elasticsearch]
+
+
+* Plugin version: v12.0.1
+* Released on: 2025-01-14
+* [Changelog](https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v12.0.1/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-elasticsearch-index.md).
+
+## Getting help [_getting_help_74]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-elasticsearch). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_74]
+
+Elasticsearch provides near real-time search and analytics for all types of data. The Elasticsearch output plugin can store both time series datasets (such as logs, events, and metrics) and non-time series data in Elasticsearch.
+
+You can [learn more about Elasticsearch](https://www.elastic.co/elasticsearch/) on the website landing page or in the [Elasticsearch documentation](docs-content://get-started/index.md).
+
+::::{admonition} Compatibility Note
+:class: note
+
+When connected to Elasticsearch 7.x, modern versions of this plugin don’t use the document-type when inserting documents, unless the user explicitly sets [`document_type`](#plugins-outputs-elasticsearch-document_type).
+
+If you are using an earlier version of Logstash and wish to connect to Elasticsearch 7.x, first upgrade Logstash to version 6.8 to ensure it picks up changes to the Elasticsearch index template.
+
+If you are using a custom [`template`](#plugins-outputs-elasticsearch-template), ensure your template uses the `_doc` document-type before connecting to Elasticsearch 7.x.
+
+::::
+
+
+
+## {{ls}} to {{es-serverless}} [plugins-outputs-elasticsearch-serverless]
+
+You can use this plugin to send your {{ls}} data to {{es-serverless}}. Some differences to note between {{es-serverless}} and self-managed {{es}}:
+
+* Use **API keys** to access {{serverless-full}} from {{ls}}. Any user-based security settings in your {{es}} output plugin configuration are ignored and may cause errors.
+* {{es-serverless}} uses **data streams** and [{{dlm}} ({{dlm-init}})](docs-content://manage-data/lifecycle/data-stream.md) instead of {{ilm}} ({{ilm-init}}). Any {{ilm-init}} settings in your {{es}} output plugin configuration are ignored and may cause errors.
+* **{{ls}} monitoring** is available through the [{{ls}} Integration](https://github.com/elastic/integrations/blob/main/packages/logstash/_dev/build/docs/README.md) in [Elastic Observability](docs-content://solutions/observability.md) on {{serverless-full}}.
+
+::::{admonition} Known issue for {{ls}} to {es-serverless}
+The logstash-output-elasticsearch `hosts` setting on {{serverless-short}} defaults the port to 9200 when omitted. Set the value to port :443 instead.
+
+::::
+
+
+For more info on sending data from {{ls}} to {{es-serverless}}, check out the [{{es-serverless}} docs](docs-content://solutions/search.md).
+
+
+## Hosted {{es}} Service on Elastic Cloud [plugins-outputs-elasticsearch-ess]
+
+{ess-leadin}
+
+
+## Compatibility with the Elastic Common Schema (ECS) [_compatibility_with_the_elastic_common_schema_ecs]
+
+This plugin will persist events to Elasticsearch in the shape produced by your pipeline, and *cannot* be used to re-shape the event structure into a shape that complies with ECS. To produce events that fully comply with ECS, you will need to populate ECS-defined fields throughout your pipeline definition.
+
+However, the Elasticsearch Index Templates it manages can be configured to be ECS-compatible by setting [`ecs_compatibility`](#plugins-outputs-elasticsearch-ecs_compatibility). By having an ECS-compatible template in place, we can ensure that Elasticsearch is prepared to create and index fields in a way that is compatible with ECS, and will correctly reject events with fields that conflict and cannot be coerced.
+
+
+## Data streams [plugins-outputs-elasticsearch-data-streams]
+
+The {{es}} output plugin can store both time series datasets (such as logs, events, and metrics) and non-time series data in Elasticsearch.
+
+Use the data stream options for indexing time series datasets (such as logs, metrics, and events) into {{es}} and {{es-serverless}}:
+
+* [`data_stream`](#plugins-outputs-elasticsearch-data_stream)
+* [`data_stream_auto_routing`](#plugins-outputs-elasticsearch-data_stream_auto_routing)
+* [`data_stream_dataset`](#plugins-outputs-elasticsearch-data_stream_dataset)
+* [`data_stream_namespace`](#plugins-outputs-elasticsearch-data_stream_namespace)
+* [`data_stream_sync_fields`](#plugins-outputs-elasticsearch-data_stream_sync_fields)
+* [`data_stream_type`](#plugins-outputs-elasticsearch-data_stream_type)
+
+::::{important}
+[ECS compatibility](#plugins-outputs-elasticsearch-ecs_compatibility) must be enabled (set to `v1` or `v8`) for data streams to work properly.
+::::
+
+
+### Data stream configuration examples [plugins-outputs-elasticsearch-ds-examples]
+
+**Example: Basic default configuration**
+
+```sh
+output {
+ elasticsearch {
+ hosts => "hostname"
+ data_stream => "true"
+ }
+}
+```
+
+This example shows the minimal settings for processing data streams. Events with `data_stream.*`` fields are routed to the appropriate data streams. If the fields are missing, routing defaults to `logs-generic-default`.
+
+**Example: Customize data stream name**
+
+```sh
+output {
+ elasticsearch {
+ hosts => "hostname"
+ data_stream => "true"
+ data_stream_type => "metrics"
+ data_stream_dataset => "foo"
+ data_stream_namespace => "bar"
+ }
+}
+```
+
+
+
+## Writing to different indices: best practices [_writing_to_different_indices_best_practices]
+
+::::{note}
+You cannot use dynamic variable substitution when `ilm_enabled` is `true` and when using `ilm_rollover_alias`.
+::::
+
+
+If you’re sending events to the same Elasticsearch cluster, but you’re targeting different indices you can:
+
+* use different Elasticsearch outputs, each one with a different value for the `index` parameter
+* use one Elasticsearch output and use the dynamic variable substitution for the `index` parameter
+
+Each Elasticsearch output is a new client connected to the cluster:
+
+* it has to initialize the client and connect to Elasticsearch (restart time is longer if you have more clients)
+* it has an associated connection pool
+
+In order to minimize the number of open connections to Elasticsearch, maximize the bulk size and reduce the number of "small" bulk requests (which could easily fill up the queue), it is usually more efficient to have a single Elasticsearch output.
+
+Example:
+
+```ruby
+ output {
+ elasticsearch {
+ index => "%{[some_field][sub_field]}-%{+YYYY.MM.dd}"
+ }
+ }
+```
+
+**What to do in case there is no field in the event containing the destination index prefix?**
+
+You can use the `mutate` filter and conditionals to add a [`[@metadata](/reference/event-dependent-configuration.md#metadata)` field] to set the destination index for each event. The `[@metadata]` fields will not be sent to Elasticsearch.
+
+Example:
+
+```ruby
+ filter {
+ if [log_type] in [ "test", "staging" ] {
+ mutate { add_field => { "[@metadata][target_index]" => "test-%{+YYYY.MM}" } }
+ } else if [log_type] == "production" {
+ mutate { add_field => { "[@metadata][target_index]" => "prod-%{+YYYY.MM.dd}" } }
+ } else {
+ mutate { add_field => { "[@metadata][target_index]" => "unknown-%{+YYYY}" } }
+ }
+ }
+ output {
+ elasticsearch {
+ index => "%{[@metadata][target_index]}"
+ }
+ }
+```
+
+
+## Retry Policy [_retry_policy]
+
+The retry policy has changed significantly in the 8.1.1 release. This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience either partial or total failures. The bulk API sends batches of requests to an HTTP endpoint. Error codes for the HTTP request are handled differently than error codes for individual documents.
+
+HTTP requests to the bulk API are expected to return a 200 response code. All other response codes are retried indefinitely.
+
+The following document errors are handled as follows:
+
+* 400 and 404 errors are sent to the dead letter queue (DLQ), if enabled. If a DLQ is not enabled, a log message will be emitted, and the event will be dropped. See [DLQ Policy](#plugins-outputs-elasticsearch-dlq-policy) for more info.
+* 409 errors (conflict) are logged as a warning and dropped.
+
+Note that 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. It is more performant for Elasticsearch to retry these exceptions than this plugin.
+
+
+## DLQ Policy [plugins-outputs-elasticsearch-dlq-policy]
+
+Mapping (404) errors from Elasticsearch can lead to data loss. Unfortunately mapping errors cannot be handled without human intervention and without looking at the field that caused the mapping mismatch. If the DLQ is enabled, the original events causing the mapping errors are stored in a file that can be processed at a later time. Often times, the offending field can be removed and re-indexed to Elasticsearch. If the DLQ is not enabled, and a mapping error happens, the problem is logged as a warning, and the event is dropped. See [Dead letter queues (DLQ)](/reference/dead-letter-queues.md) for more information about processing events in the DLQ. The list of error codes accepted for DLQ could be customized with [`dlq_custom_codes`](#plugins-outputs-elasticsearch-dlq_custom_codes) but should be used only in motivated cases.
+
+
+## {{ilm-cap}} ({{ilm-init}}) [plugins-outputs-elasticsearch-ilm]
+
+::::{note}
+* The {{ilm-cap}} ({{ilm-init}}) feature does not apply for {{es-serverless}}. Any {{ilm-init}} settings in your plugin configuration are ignored and may cause errors.
+* The {{ilm-init}} feature requires plugin version `9.3.1` or higher.
+* This feature requires an {{es}} instance of 6.6.0 or higher with at least a Basic license
+
+::::
+
+
+{{ls}} can use [{{ilm}}](docs-content://manage-data/lifecycle/index-lifecycle-management.md) to automate the management of indices over time.
+
+The use of {{ilm}} is controlled by the `ilm_enabled` setting. By default, this setting detects whether the Elasticsearch instance supports {{ilm-init}}, and uses it if it is available. `ilm_enabled` can also be set to `true` or `false` to override the automatic detection, or disable {{ilm-init}}.
+
+This will overwrite the index settings and adjust the {{ls}} template to write the necessary settings for the template to support {{ilm}}, including the index policy and rollover alias to be used.
+
+{{ls}} creates a rollover alias for the indices to be written to, including a pattern for how the actual indices will be named, and unless an ILM policy that already exists has been specified, a default policy will also be created. The default policy is configured to rollover an index when it reaches either 50 gigabytes in size, or is 30 days old, whichever happens first.
+
+The default rollover alias is called `logstash`, with a default pattern for the rollover index of `{now/d}-00001`, which will name indices on the date that the index is rolled over, followed by an incrementing number. Note that the pattern must end with a dash and a number that will be incremented.
+
+See the [Rollover API documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-rollover) for more details on naming.
+
+The rollover alias, ilm pattern and policy can be modified.
+
+See config below for an example:
+
+```ruby
+ output {
+ elasticsearch {
+ ilm_rollover_alias => "custom"
+ ilm_pattern => "000001"
+ ilm_policy => "custom_policy"
+ }
+ }
+```
+
+::::{note}
+* Custom ILM policies must already exist on the {{es}} cluster before they can be used.
+* If the rollover alias or pattern is modified, the index template will need to be overwritten as the settings `index.lifecycle.name` and `index.lifecycle.rollover_alias` are automatically written to the template
+* If the index property is supplied in the output definition, it will be overwritten by the rollover alias.
+
+::::
+
+
+
+## Batch Sizes [_batch_sizes]
+
+This plugin attempts to send batches of events to the [{{es}} Bulk API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk) as a single request. However, if a batch exceeds 20MB we break it up into multiple bulk requests. If a single document exceeds 20MB it is sent as a single request.
+
+
+## DNS Caching [_dns_caching]
+
+This plugin uses the JVM to lookup DNS entries and is subject to the value of [networkaddress.cache.ttl](https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.md), a global setting for the JVM.
+
+As an example, to set your DNS TTL to 1 second you would set the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`.
+
+Keep in mind that a connection with keepalive enabled will not reevaluate its DNS value while the keepalive is in effect.
+
+
+## HTTP Compression [_http_compression]
+
+This plugin always reads compressed responses from {{es}}. By default, it sends compressed bulk requests to {{es}}.
+
+If you are concerned about bandwidth, you can set a higher [`compression_level`](#plugins-outputs-elasticsearch-compression_level) to trade CPU capacity for a reduction in network IO.
+
+
+## Authentication [_authentication_2]
+
+Authentication to a secure Elasticsearch cluster is possible using one of the `user`/`password`, `cloud_auth` or `api_key` options.
+
+
+## Authorization [plugins-outputs-elasticsearch-autz]
+
+Authorization to a secure Elasticsearch cluster requires `read` permission at index level and `monitoring` permissions at cluster level. The `monitoring` permission at cluster level is necessary to perform periodic connectivity checks.
+
+
+## Handling non UTF-8 data [plugins-outputs-elasticsearch-handling-non-utf-8]
+
+This plugin transmits events to Elasticsearch using a JSON API, and therefore requires that all string values in events to be valid UTF-8. When a string value on an event contains one or more byte sequences that are not valid in UTF-8, each offending byte sequence is replaced with the UTF-8 replacement character (`\uFFFD`).
+
+
+## Elasticsearch Output Configuration Options [plugins-outputs-elasticsearch-options]
+
+This plugin supports these configuration options plus the [Common options](#plugins-outputs-elasticsearch-common-options) described later.
+
+::::{note}
+As of version 12.0.0 of this plugin, a number of previously deprecated SSL settings have been removed. Please check out [Elasticsearch Output Obsolete Configuration Options](#plugins-outputs-elasticsearch-obsolete-options) for details.
+::::
+
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`action`](#plugins-outputs-elasticsearch-action) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`api_key`](#plugins-outputs-elasticsearch-api_key) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`bulk_path`](#plugins-outputs-elasticsearch-bulk_path) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ca_trusted_fingerprint`](#plugins-outputs-elasticsearch-ca_trusted_fingerprint) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`cloud_auth`](#plugins-outputs-elasticsearch-cloud_auth) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`cloud_id`](#plugins-outputs-elasticsearch-cloud_id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`compression_level`](#plugins-outputs-elasticsearch-compression_level) | [number](/reference/configuration-file-structure.md#number), one of `[0 ~ 9]` | No |
+| [`custom_headers`](#plugins-outputs-elasticsearch-custom_headers) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`data_stream`](#plugins-outputs-elasticsearch-data_stream) | [string](/reference/configuration-file-structure.md#string), one of `["true", "false", "auto"]` | No |
+| [`data_stream_auto_routing`](#plugins-outputs-elasticsearch-data_stream_auto_routing) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`data_stream_dataset`](#plugins-outputs-elasticsearch-data_stream_dataset) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`data_stream_namespace`](#plugins-outputs-elasticsearch-data_stream_namespace) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`data_stream_sync_fields`](#plugins-outputs-elasticsearch-data_stream_sync_fields) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`data_stream_type`](#plugins-outputs-elasticsearch-data_stream_type) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`dlq_custom_codes`](#plugins-outputs-elasticsearch-dlq_custom_codes) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`dlq_on_failed_indexname_interpolation`](#plugins-outputs-elasticsearch-dlq_on_failed_indexname_interpolation) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`doc_as_upsert`](#plugins-outputs-elasticsearch-doc_as_upsert) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`document_id`](#plugins-outputs-elasticsearch-document_id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`document_type`](#plugins-outputs-elasticsearch-document_type) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ecs_compatibility`](#plugins-outputs-elasticsearch-ecs_compatibility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`failure_type_logging_whitelist`](#plugins-outputs-elasticsearch-failure_type_logging_whitelist) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`healthcheck_path`](#plugins-outputs-elasticsearch-healthcheck_path) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`hosts`](#plugins-outputs-elasticsearch-hosts) | [uri](/reference/configuration-file-structure.md#uri) | No |
+| [`http_compression`](#plugins-outputs-elasticsearch-http_compression) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`ilm_enabled`](#plugins-outputs-elasticsearch-ilm_enabled) | [string](/reference/configuration-file-structure.md#string), one of `["true", "false", "auto"]` | No |
+| [`ilm_pattern`](#plugins-outputs-elasticsearch-ilm_pattern) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ilm_policy`](#plugins-outputs-elasticsearch-ilm_policy) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ilm_rollover_alias`](#plugins-outputs-elasticsearch-ilm_rollover_alias) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`index`](#plugins-outputs-elasticsearch-index) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`silence_errors_in_log`](#plugins-outputs-elasticsearch-silence_errors_in_log) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`manage_template`](#plugins-outputs-elasticsearch-manage_template) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`parameters`](#plugins-outputs-elasticsearch-parameters) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`parent`](#plugins-outputs-elasticsearch-parent) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`password`](#plugins-outputs-elasticsearch-password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`path`](#plugins-outputs-elasticsearch-path) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`pipeline`](#plugins-outputs-elasticsearch-pipeline) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`pool_max`](#plugins-outputs-elasticsearch-pool_max) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`pool_max_per_route`](#plugins-outputs-elasticsearch-pool_max_per_route) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`proxy`](#plugins-outputs-elasticsearch-proxy) | [uri](/reference/configuration-file-structure.md#uri) | No |
+| [`resurrect_delay`](#plugins-outputs-elasticsearch-resurrect_delay) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`retry_initial_interval`](#plugins-outputs-elasticsearch-retry_initial_interval) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`retry_max_interval`](#plugins-outputs-elasticsearch-retry_max_interval) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`retry_on_conflict`](#plugins-outputs-elasticsearch-retry_on_conflict) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`routing`](#plugins-outputs-elasticsearch-routing) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`script`](#plugins-outputs-elasticsearch-script) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`script_lang`](#plugins-outputs-elasticsearch-script_lang) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`script_type`](#plugins-outputs-elasticsearch-script_type) | [string](/reference/configuration-file-structure.md#string), one of `["inline", "indexed", "file"]` | No |
+| [`script_var_name`](#plugins-outputs-elasticsearch-script_var_name) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`scripted_upsert`](#plugins-outputs-elasticsearch-scripted_upsert) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`sniffing`](#plugins-outputs-elasticsearch-sniffing) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`sniffing_delay`](#plugins-outputs-elasticsearch-sniffing_delay) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`sniffing_path`](#plugins-outputs-elasticsearch-sniffing_path) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_certificate`](#plugins-outputs-elasticsearch-ssl_certificate) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_certificate_authorities`](#plugins-outputs-elasticsearch-ssl_certificate_authorities) | list of [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_cipher_suites`](#plugins-outputs-elasticsearch-ssl_cipher_suites) | list of [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_enabled`](#plugins-outputs-elasticsearch-ssl_enabled) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`ssl_key`](#plugins-outputs-elasticsearch-ssl_key) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_keystore_password`](#plugins-outputs-elasticsearch-ssl_keystore_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_keystore_path`](#plugins-outputs-elasticsearch-ssl_keystore_path) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_keystore_type`](#plugins-outputs-elasticsearch-ssl_keystore_type) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_supported_protocols`](#plugins-outputs-elasticsearch-ssl_supported_protocols) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_truststore_password`](#plugins-outputs-elasticsearch-ssl_truststore_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_truststore_path`](#plugins-outputs-elasticsearch-ssl_truststore_path) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_truststore_type`](#plugins-outputs-elasticsearch-ssl_truststore_type) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_verification_mode`](#plugins-outputs-elasticsearch-ssl_verification_mode) | [string](/reference/configuration-file-structure.md#string), one of `["full", "none"]` | No |
+| [`template`](#plugins-outputs-elasticsearch-template) | a valid filesystem path | No |
+| [`template_api`](#plugins-outputs-elasticsearch-template_api) | [string](/reference/configuration-file-structure.md#string), one of `["auto", "legacy", "composable"]` | No |
+| [`template_name`](#plugins-outputs-elasticsearch-template_name) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`template_overwrite`](#plugins-outputs-elasticsearch-template_overwrite) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`timeout`](#plugins-outputs-elasticsearch-timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`upsert`](#plugins-outputs-elasticsearch-upsert) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`user`](#plugins-outputs-elasticsearch-user) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`validate_after_inactivity`](#plugins-outputs-elasticsearch-validate_after_inactivity) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`version`](#plugins-outputs-elasticsearch-version) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`version_type`](#plugins-outputs-elasticsearch-version_type) | [string](/reference/configuration-file-structure.md#string), one of `["internal", "external", "external_gt", "external_gte", "force"]` | No |
+
+Also see [Common options](#plugins-outputs-elasticsearch-common-options) for a list of options supported by all output plugins.
+
+
+
+### `action` [plugins-outputs-elasticsearch-action]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `create` for data streams, and `index` for non-time series data.
+
+The Elasticsearch action to perform. Valid actions are:
+
+* `index`: indexes a document (an event from Logstash).
+* `delete`: deletes a document by id (An id is required for this action)
+* `create`: indexes a document, fails if a document by that id already exists in the index.
+* `update`: updates a document by id. Update has a special case where you can upsert — update a document if not already present. See the `doc_as_upsert` option. NOTE: This does not work and is not supported in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash!
+* A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` would use the foo field for the action. If resolved action is not in [`index`, `delete`, `create`, `update`], the event will not be sent to {{es}}. Instead the event will be sent to the pipeline’s [dead-letter-queue (DLQ)](/reference/dead-letter-queues.md) (if enabled), or it will be logged and dropped.
+
+For more details on actions, check out the [Elasticsearch bulk API documentation](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk).
+
+
+### `api_key` [plugins-outputs-elasticsearch-api_key]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Authenticate using Elasticsearch API key. Note that this option also requires SSL/TLS, which can be enabled by supplying a [`cloud_id`](#plugins-outputs-elasticsearch-cloud_id), a list of HTTPS [`hosts`](#plugins-outputs-elasticsearch-hosts), or by setting [`ssl_enabled => true`](#plugins-outputs-elasticsearch-ssl_enabled).
+
+Format is `id:api_key` where `id` and `api_key` are as returned by the Elasticsearch [Create API key API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key).
+
+
+### `bulk_path` [plugins-outputs-elasticsearch-bulk_path]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* The default value for this settings is `/_bulk?filter_path=errors,items.*.error,items.*.status`
+
+HTTP Path to perform the _bulk requests to * This default bulk path is the concatenation of the value of `path` parameter and `/_bulk?filter_path=errors,items.*.error,items.*.status` * The `filter_path` query parameter is appended to the bulk path to reduce the payload between logstash and elasticsearch. However, if a custom `filter_path` query parameter is included in the `bulk_path` setting, then that value will be used.
+
+
+### `ca_trusted_fingerprint` [plugins-outputs-elasticsearch-ca_trusted_fingerprint]
+
+* Value type is [string](/reference/configuration-file-structure.md#string), and must contain exactly 64 hexadecimal characters.
+* There is no default value for this setting.
+* Use of this option *requires* Logstash 8.3+
+
+The SHA-256 fingerprint of an SSL Certificate Authority to trust, such as the autogenerated self-signed CA for an Elasticsearch cluster.
+
+
+### `cloud_auth` [plugins-outputs-elasticsearch-cloud_auth]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Cloud authentication string (":" format) is an alternative for the `user`/`password` pair.
+
+For more details, check out the [Logstash-to-Cloud documentation](/reference/connecting-to-cloud.md).
+
+
+### `cloud_id` [plugins-outputs-elasticsearch-cloud_id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Cloud ID, from the Elastic Cloud web console. If set `hosts` should not be used.
+
+For more details, check out the [Logstash-to-Cloud documentation](/reference/connecting-to-cloud.md).
+
+
+### `compression_level` [plugins-outputs-elasticsearch-compression_level]
+
+* Value can be any of: `0`, `1`, `2`, `3`, `4`, `5`, `6`, `7`, `8`, `9`
+* Default value is `1`
+
+The gzip compression level. Setting this value to `0` disables compression. The compression level must be in the range of `1` (best speed) to `9` (best compression).
+
+Increasing the compression level will reduce the network usage but will increase the CPU usage.
+
+
+### `data_stream` [plugins-outputs-elasticsearch-data_stream]
+
+* Value can be any of: `true`, `false` and `auto`
+* Default is `false` in Logstash 7.x and `auto` starting in Logstash 8.0.
+
+Defines whether data will be indexed into an Elasticsearch data stream. The other `data_stream_*` settings will be used only if this setting is enabled.
+
+Logstash handles the output as a data stream when the supplied configuration is compatible with data streams and this value is set to `auto`. Note that [ECS compatibility](#plugins-outputs-elasticsearch-ecs_compatibility) must be enabled (set to `v1` or `v8`) for data streams to work properly.
+
+
+### `data_stream_auto_routing` [plugins-outputs-elasticsearch-data_stream_auto_routing]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`.
+
+Automatically routes events by deriving the data stream name using specific event fields with the `%{[data_stream][type]}-%{[data_stream][dataset]}-%{[data_stream][namespace]}` format.
+
+If enabled, the `data_stream.*` event fields will take precedence over the `data_stream_type`, `data_stream_dataset`, and `data_stream_namespace` settings, but will fall back to them if any of the fields are missing from the event.
+
+
+### `data_stream_dataset` [plugins-outputs-elasticsearch-data_stream_dataset]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `generic`.
+
+The data stream dataset used to construct the data stream at index time.
+
+
+### `data_stream_namespace` [plugins-outputs-elasticsearch-data_stream_namespace]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `default`.
+
+The data stream namespace used to construct the data stream at index time.
+
+
+### `data_stream_sync_fields` [plugins-outputs-elasticsearch-data_stream_sync_fields]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Automatically adds and syncs the `data_stream.*` event fields if they are missing from the event. This ensures that fields match the name of the data stream that is receiving events.
+
+::::{note}
+If existing `data_stream.*` event fields do not match the data stream name and `data_stream_auto_routing` is disabled, the event fields will be overwritten with a warning.
+::::
+
+
+
+### `data_stream_type` [plugins-outputs-elasticsearch-data_stream_type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `logs`.
+
+The data stream type used to construct the data stream at index time. Currently, only `logs`, `metrics`, `synthetics` and `traces` are supported.
+
+
+### `dlq_custom_codes` [plugins-outputs-elasticsearch-dlq_custom_codes]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `[]`.
+
+List single-action error codes from Elasticsearch’s Bulk API that are considered valid to move the events into the dead letter queue. This list is an addition to the ordinary error codes considered for this feature, 400 and 404. It’s considered a configuration error to re-use the same predefined codes for success, DLQ or conflict. The option accepts a list of natural numbers corresponding to HTTP errors codes.
+
+
+### `dlq_on_failed_indexname_interpolation` [plugins-outputs-elasticsearch-dlq_on_failed_indexname_interpolation]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`.
+
+If enabled, failed index name interpolation events go into dead letter queue.
+
+
+### `doc_as_upsert` [plugins-outputs-elasticsearch-doc_as_upsert]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Enable `doc_as_upsert` for update mode. Create a new document with source if `document_id` doesn’t exist in Elasticsearch.
+
+
+### `document_id` [plugins-outputs-elasticsearch-document_id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The document ID for the index. Useful for overwriting existing entries in Elasticsearch with the same ID.
+
+
+### `document_type` [plugins-outputs-elasticsearch-document_type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+* This option is deprecated
+
+::::{note}
+This option is deprecated due to the [removal of types in Elasticsearch 6.0](https://www.elastic.co/guide/en/elasticsearch/reference/6.0/removal-of-types.html). It will be removed in the next major version of Logstash.
+::::
+
+
+::::{note}
+This value is ignored and has no effect for Elasticsearch clusters `8.x`.
+::::
+
+
+This sets the document type to write events to. Generally you should try to write only similar events to the same *type*. String expansion `%{{foo}}` works here. If you don’t set a value for this option:
+
+* for elasticsearch clusters 8.x: no value will be used;
+* for elasticsearch clusters 7.x: the value of *_doc* will be used;
+* for elasticsearch clusters 6.x: the value of *doc* will be used;
+* for elasticsearch clusters 5.x and below: the event’s *type* field will be used, if the field is not present the value of *doc* will be used.
+
+
+### `ecs_compatibility` [plugins-outputs-elasticsearch-ecs_compatibility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are:
+
+ * `disabled`: does not provide ECS-compatible templates
+ * `v1`,`v8`: Elastic Common Schema-compliant behavior
+
+* Default value depends on which version of Logstash is running:
+
+ * When Logstash provides a `pipeline.ecs_compatibility` setting, its value is used as the default
+ * Otherwise, the default value is `disabled`.
+
+
+Controls this plugin’s compatibility with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)), including the installation of ECS-compatible index templates. The value of this setting affects the *default* values of:
+
+* [`index`](#plugins-outputs-elasticsearch-index)
+* [`template_name`](#plugins-outputs-elasticsearch-template_name)
+* [`ilm_rollover_alias`](#plugins-outputs-elasticsearch-ilm_rollover_alias)
+
+
+### `failure_type_logging_whitelist` [plugins-outputs-elasticsearch-failure_type_logging_whitelist]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+::::{note}
+Deprecated, refer to [`silence_errors_in_log`](#plugins-outputs-elasticsearch-silence_errors_in_log).
+::::
+
+
+
+### `custom_headers` [plugins-outputs-elasticsearch-custom_headers]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* There is no default value for this setting.
+
+Pass a set of key value pairs as the headers sent in each request to an elasticsearch node. The headers will be used for any kind of request (_bulk request, template installation, health checks and sniffing). These custom headers will be overidden by settings like `compression_level`.
+
+
+### `healthcheck_path` [plugins-outputs-elasticsearch-healthcheck_path]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+HTTP Path where a HEAD request is sent when a backend is marked down the request is sent in the background to see if it has come back again before it is once again eligible to service requests. If you have custom firewall rules you may need to change this
+
+
+### `hosts` [plugins-outputs-elasticsearch-hosts]
+
+* Value type is [uri](/reference/configuration-file-structure.md#uri)
+* Default value is `[//127.0.0.1]`
+
+Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. Remember the `http` protocol uses the [http](elasticsearch://reference/elasticsearch/configuration-reference/networking-settings.md) address (eg. 9200, not 9300).
+
+Examples:
+
+```
+`"127.0.0.1"`
+`["127.0.0.1:9200","127.0.0.2:9200"]`
+`["http://127.0.0.1"]`
+`["https://127.0.0.1:9200"]`
+`["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath)
+```
+Exclude [dedicated master nodes](elasticsearch://reference/elasticsearch/configuration-reference/node-settings.md) from the `hosts` list to prevent Logstash from sending bulk requests to the master nodes. This parameter should reference only data or client nodes in Elasticsearch.
+
+Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance.
+
+
+### `http_compression` [plugins-outputs-elasticsearch-http_compression]
+
+::::{admonition} Deprecated in 11.17.0.
+:class: warning
+
+Replaced by [`compression_level`](#plugins-outputs-elasticsearch-compression_level)
+::::
+
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Setting `true` enables gzip compression level 1 on requests.
+
+This setting allows you to reduce this plugin’s outbound network traffic by compressing each bulk *request* to {{es}}.
+
+::::{note}
+This output plugin reads compressed *responses* from {{es}} regardless of the value of this setting.
+::::
+
+
+
+### `ilm_enabled` [plugins-outputs-elasticsearch-ilm_enabled]
+
+* Value can be any of: `true`, `false`, `auto`
+* Default value is `auto`
+
+The default setting of `auto` will automatically enable [Index Lifecycle Management](docs-content://manage-data/lifecycle/index-lifecycle-management.md), if the Elasticsearch cluster is running Elasticsearch version `7.0.0` or higher with the ILM feature enabled, and disable it otherwise.
+
+Setting this flag to `false` will disable the Index Lifecycle Management feature, even if the Elasticsearch cluster supports ILM. Setting this flag to `true` will enable Index Lifecycle Management feature, if the Elasticsearch cluster supports it. This is required to enable Index Lifecycle Management on a version of Elasticsearch earlier than version `7.0.0`.
+
+::::{note}
+This feature requires a Basic License or above to be installed on an Elasticsearch cluster version 6.6.0 or later.
+::::
+
+
+
+### `ilm_pattern` [plugins-outputs-elasticsearch-ilm_pattern]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `{now/d}-000001`
+
+Pattern used for generating indices managed by [Index Lifecycle Management](docs-content://manage-data/lifecycle/index-lifecycle-management.md). The value specified in the pattern will be appended to the write alias, and incremented automatically when a new index is created by ILM.
+
+Date Math can be used when specifying an ilm pattern, see [Rollover API docs](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-rollover) for details.
+
+::::{note}
+Updating the pattern will require the index template to be rewritten.
+::::
+
+
+::::{note}
+The pattern must finish with a dash and a number that will be automatically incremented when indices rollover.
+::::
+
+
+::::{note}
+The pattern is a 6-digit string padded by zeros, regardless of prior index name. Example: 000001. See [Rollover path parameters API docs](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-rollover) for details.
+::::
+
+
+
+### `ilm_policy` [plugins-outputs-elasticsearch-ilm_policy]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `logstash-policy`
+
+Modify this setting to use a custom Index Lifecycle Management policy, rather than the default. If this value is not set, the default policy will be automatically installed into Elasticsearch
+
+::::{note}
+If this setting is specified, the policy must already exist in Elasticsearch cluster.
+::::
+
+
+
+### `ilm_rollover_alias` [plugins-outputs-elasticsearch-ilm_rollover_alias]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value depends on whether [`ecs_compatibility`](#plugins-outputs-elasticsearch-ecs_compatibility) is enabled:
+
+ * ECS Compatibility disabled: `logstash`
+ * ECS Compatibility enabled: `ecs-logstash`
+
+
+The rollover alias is the alias where indices managed using Index Lifecycle Management will be written to.
+
+::::{note}
+If both `index` and `ilm_rollover_alias` are specified, `ilm_rollover_alias` takes precedence.
+::::
+
+
+::::{note}
+Updating the rollover alias will require the index template to be rewritten.
+::::
+
+
+::::{note}
+`ilm_rollover_alias` does NOT support dynamic variable substitution as `index` does.
+::::
+
+
+
+### `index` [plugins-outputs-elasticsearch-index]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value depends on whether [`ecs_compatibility`](#plugins-outputs-elasticsearch-ecs_compatibility) is enabled:
+
+ * ECS Compatibility disabled: `"logstash-%{+yyyy.MM.dd}"`
+ * ECS Compatibility enabled: `"ecs-logstash-%{+yyyy.MM.dd}"`
+
+
+The indexing target to write events to. Can point to an [index](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-mgmt.html), [alias](docs-content://manage-data/data-store/aliases.md), or [data stream](docs-content://manage-data/data-store/data-streams.md). This can be dynamic using the `%{{foo}}` syntax. The default value will partition your indices by day so you can more easily delete old data or only search specific date ranges. Indexes may not contain uppercase characters. For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. Logstash uses [Joda formats](http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html) and the `@timestamp` field of each event is being used as source for the date.
+
+
+### `manage_template` [plugins-outputs-elasticsearch-manage_template]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true` for non-time series data, and `false` for data streams.
+
+From Logstash 1.3 onwards, a template is applied to Elasticsearch during Logstash’s startup if one with the name [`template_name`](#plugins-outputs-elasticsearch-template_name) does not already exist. By default, the contents of this template is the default template for `logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern `logstash-*`. Should you require support for other index names, or would like to change the mappings in the template in general, a custom template can be specified by setting `template` to the path of a template file.
+
+Setting `manage_template` to false disables this feature. If you require more control over template creation, (e.g. creating indices dynamically based on field names) you should set `manage_template` to false and use the REST API to apply your templates manually.
+
+
+### `parameters` [plugins-outputs-elasticsearch-parameters]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* There is no default value for this setting.
+
+Pass a set of key value pairs as the URL query string. This query string is added to every host listed in the *hosts* configuration. If the *hosts* list contains urls that already have query strings, the one specified here will be appended.
+
+
+### `parent` [plugins-outputs-elasticsearch-parent]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `nil`
+
+For child documents, ID of the associated parent. This can be dynamic using the `%{{foo}}` syntax.
+
+
+### `password` [plugins-outputs-elasticsearch-password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Password to authenticate to a secure Elasticsearch cluster
+
+
+### `path` [plugins-outputs-elasticsearch-path]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps the root path for the Elasticsearch HTTP API lives. Note that if you use paths as components of URLs in the *hosts* field you may not also set this field. That will raise an error at startup
+
+
+### `pipeline` [plugins-outputs-elasticsearch-pipeline]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value.
+
+Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration here like `pipeline => "%{[@metadata][pipeline]}"`. The pipeline parameter won’t be set if the value resolves to empty string ("").
+
+
+### `pool_max` [plugins-outputs-elasticsearch-pool_max]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1000`
+
+While the output tries to reuse connections efficiently we have a maximum. This sets the maximum number of open connections the output will create. Setting this too low may mean frequently closing / opening connections which is bad.
+
+
+### `pool_max_per_route` [plugins-outputs-elasticsearch-pool_max_per_route]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `100`
+
+While the output tries to reuse connections efficiently we have a maximum per endpoint. This sets the maximum number of open connections per endpoint the output will create. Setting this too low may mean frequently closing / opening connections which is bad.
+
+
+### `proxy` [plugins-outputs-elasticsearch-proxy]
+
+* Value type is [uri](/reference/configuration-file-structure.md#uri)
+* There is no default value for this setting.
+
+Set the address of a forward HTTP proxy. This setting accepts only URI arguments to prevent leaking credentials. An empty string is treated as if proxy was not set. This is useful when using environment variables e.g. `proxy => '${LS_PROXY:}'`.
+
+
+### `resurrect_delay` [plugins-outputs-elasticsearch-resurrect_delay]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `5`
+
+How frequently, in seconds, to wait between resurrection attempts. Resurrection is the process by which backend endpoints marked *down* are checked to see if they have come back to life
+
+
+### `retry_initial_interval` [plugins-outputs-elasticsearch-retry_initial_interval]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `2`
+
+Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval`
+
+
+### `retry_max_interval` [plugins-outputs-elasticsearch-retry_max_interval]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `64`
+
+Set max interval in seconds between bulk retries.
+
+
+### `retry_on_conflict` [plugins-outputs-elasticsearch-retry_on_conflict]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1`
+
+The number of times Elasticsearch should internally retry an update/upserted document.
+
+
+### `routing` [plugins-outputs-elasticsearch-routing]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+A routing override to be applied to all processed events. This can be dynamic using the `%{{foo}}` syntax.
+
+
+### `script` [plugins-outputs-elasticsearch-script]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `""`
+
+Set script name for scripted update mode
+
+Example:
+
+```ruby
+ output {
+ elasticsearch {
+ script => "ctx._source.message = params.event.get('message')"
+ }
+ }
+```
+
+
+### `script_lang` [plugins-outputs-elasticsearch-script_lang]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"painless"`
+
+Set the language of the used script. When using indexed (stored) scripts on Elasticsearch 6.0 and higher, you must set this parameter to `""` (empty string).
+
+
+### `script_type` [plugins-outputs-elasticsearch-script_type]
+
+* Value can be any of: `inline`, `indexed`, `file`
+* Default value is `["inline"]`
+
+Define the type of script referenced by "script" variable inline : "script" contains inline script indexed : "script" contains the name of script directly indexed in elasticsearch file : "script" contains the name of script stored in elasticsearch’s config directory
+
+
+### `script_var_name` [plugins-outputs-elasticsearch-script_var_name]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"event"`
+
+Set variable name passed to script (scripted update)
+
+
+### `scripted_upsert` [plugins-outputs-elasticsearch-scripted_upsert]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+if enabled, script is in charge of creating non-existent document (scripted update)
+
+
+### `silence_errors_in_log` [plugins-outputs-elasticsearch-silence_errors_in_log]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+Defines the list of Elasticsearch errors that you don’t want to log. A useful example is when you want to skip all 409 errors which are `version_conflict_engine_exception`.
+
+```ruby
+ output {
+ elasticsearch {
+ silence_errors_in_log => ["version_conflict_engine_exception"]
+ }
+ }
+```
+
+::::{note}
+Deprecates [`failure_type_logging_whitelist`](#plugins-outputs-elasticsearch-failure_type_logging_whitelist).
+::::
+
+
+
+### `sniffing` [plugins-outputs-elasticsearch-sniffing]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. For Elasticsearch 5.x and 6.x any nodes with `http.enabled` (on by default) will be added to the hosts list, excluding master-only nodes.
+
+
+### `sniffing_delay` [plugins-outputs-elasticsearch-sniffing_delay]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `5`
+
+How long to wait, in seconds, between sniffing attempts
+
+
+### `sniffing_path` [plugins-outputs-elasticsearch-sniffing_path]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+HTTP Path to be used for the sniffing requests the default value is computed by concatenating the path value and "_nodes/http" if sniffing_path is set it will be used as an absolute path do not use full URL here, only paths, e.g. "/sniff/_nodes/http"
+
+
+### `ssl_certificate` [plugins-outputs-elasticsearch-ssl_certificate]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+SSL certificate to use to authenticate the client. This certificate should be an OpenSSL-style X.509 certificate file.
+
+::::{note}
+This setting can be used only if [`ssl_key`](#plugins-outputs-elasticsearch-ssl_key) is set.
+::::
+
+
+
+### `ssl_certificate_authorities` [plugins-outputs-elasticsearch-ssl_certificate_authorities]
+
+* Value type is a list of [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting
+
+The .cer or .pem files to validate the server’s certificate.
+
+::::{note}
+You cannot use this setting and [`ssl_truststore_path`](#plugins-outputs-elasticsearch-ssl_truststore_path) at the same time.
+::::
+
+
+
+### `ssl_cipher_suites` [plugins-outputs-elasticsearch-ssl_cipher_suites]
+
+* Value type is a list of [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting
+
+The list of cipher suites to use, listed by priorities. Supported cipher suites vary depending on the Java and protocol versions.
+
+
+### `ssl_enabled` [plugins-outputs-elasticsearch-ssl_enabled]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* There is no default value for this setting.
+
+Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme is specified in the URLs listed in [`hosts`](#plugins-outputs-elasticsearch-hosts) or extracted from the [`cloud_id`](#plugins-outputs-elasticsearch-cloud_id). If no explicit protocol is specified plain HTTP will be used.
+
+
+### `ssl_key` [plugins-outputs-elasticsearch-ssl_key]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+SSL key to use. This key must be in the PKCS8 format and PEM encoded. You can use the [openssl pkcs8](https://www.openssl.org/docs/man1.1.1/man1/openssl-pkcs8.md) command to complete the conversion. For example, the command to convert a PEM encoded PKCS1 private key to a PEM encoded, non-encrypted PKCS8 key is:
+
+```sh
+openssl pkcs8 -inform PEM -in path/to/logstash.key -topk8 -nocrypt -outform PEM -out path/to/logstash.pkcs8.key
+```
+
+::::{note}
+This setting can be used only if [`ssl_certificate`](#plugins-outputs-elasticsearch-ssl_certificate) is set.
+::::
+
+
+
+### `ssl_keystore_password` [plugins-outputs-elasticsearch-ssl_keystore_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Set the keystore password
+
+
+### `ssl_keystore_path` [plugins-outputs-elasticsearch-ssl_keystore_path]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+The keystore used to present a certificate to the server. It can be either `.jks` or `.p12`
+
+::::{note}
+You cannot use this setting and [`ssl_certificate`](#plugins-outputs-elasticsearch-ssl_certificate) at the same time.
+::::
+
+
+
+### `ssl_keystore_type` [plugins-outputs-elasticsearch-ssl_keystore_type]
+
+* Value can be any of: `jks`, `pkcs12`
+* If not provided, the value will be inferred from the keystore filename.
+
+The format of the keystore file. It must be either `jks` or `pkcs12`.
+
+
+### `ssl_supported_protocols` [plugins-outputs-elasticsearch-ssl_supported_protocols]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Allowed values are: `'TLSv1.1'`, `'TLSv1.2'`, `'TLSv1.3'`
+* Default depends on the JDK being used. With up-to-date Logstash, the default is `['TLSv1.2', 'TLSv1.3']`. `'TLSv1.1'` is not considered secure and is only provided for legacy applications.
+
+List of allowed SSL/TLS versions to use when establishing a connection to the Elasticsearch cluster.
+
+For Java 8 `'TLSv1.3'` is supported only since **8u262** (AdoptOpenJDK), but requires that you set the `LS_JAVA_OPTS="-Djdk.tls.client.protocols=TLSv1.3"` system property in Logstash.
+
+::::{note}
+If you configure the plugin to use `'TLSv1.1'` on any recent JVM, such as the one packaged with Logstash, the protocol is disabled by default and needs to be enabled manually by changing `jdk.tls.disabledAlgorithms` in the **$JDK_HOME/conf/security/java.security** configuration file. That is, `TLSv1.1` needs to be removed from the list.
+::::
+
+
+
+### `ssl_truststore_password` [plugins-outputs-elasticsearch-ssl_truststore_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Set the truststore password
+
+
+### `ssl_truststore_path` [plugins-outputs-elasticsearch-ssl_truststore_path]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+The truststore to validate the server’s certificate. It can be either `.jks` or `.p12`.
+
+::::{note}
+You cannot use this setting and [`ssl_certificate_authorities`](#plugins-outputs-elasticsearch-ssl_certificate_authorities) at the same time.
+::::
+
+
+
+### `ssl_truststore_type` [plugins-outputs-elasticsearch-ssl_truststore_type]
+
+* Value can be any of: `jks`, `pkcs12`
+* If not provided, the value will be inferred from the truststore filename.
+
+The format of the truststore file. It must be either `jks` or `pkcs12`.
+
+
+### `ssl_verification_mode` [plugins-outputs-elasticsearch-ssl_verification_mode]
+
+* Value can be any of: `full`, `none`
+* Default value is `full`
+
+Defines how to verify the certificates presented by another party in the TLS connection:
+
+`full` validates that the server certificate has an issue date that’s within the not_before and not_after dates; chains to a trusted Certificate Authority (CA), and has a hostname or IP address that matches the names within the certificate.
+
+`none` performs no certificate validation.
+
+::::{warning}
+Setting certificate verification to `none` disables many security benefits of SSL/TLS, which is very dangerous. For more information on disabling certificate verification please read [https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf](https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf)
+::::
+
+
+
+### `template` [plugins-outputs-elasticsearch-template]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+You can set the path to your own template here, if you so desire. If not set, the included template will be used.
+
+
+### `template_api` [plugins-outputs-elasticsearch-template_api]
+
+* Value can be any of: `auto`, `legacy`, `composable`
+* Default value is `auto`
+
+The default setting of `auto` will use [index template API](docs-content://manage-data/data-store/templates.md) to create index template, if the Elasticsearch cluster is running Elasticsearch version `8.0.0` or higher, and use [legacy template API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-indices-put-template) otherwise.
+
+Setting this flag to `legacy` will use legacy template API to create index template. Setting this flag to `composable` will use index template API to create index template.
+
+::::{note}
+The format of template provided to [`template`](#plugins-outputs-elasticsearch-template) needs to match the template API being used.
+::::
+
+
+
+### `template_name` [plugins-outputs-elasticsearch-template_name]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value depends on whether [`ecs_compatibility`](#plugins-outputs-elasticsearch-ecs_compatibility) is enabled:
+
+ * ECS Compatibility disabled: `logstash`
+ * ECS Compatibility enabled: `ecs-logstash`
+
+
+This configuration option defines how the template is named inside Elasticsearch. Note that if you have used the template management features and subsequently change this, you will need to prune the old template manually, e.g.
+
+`curl -XDELETE `
+
+where `OldTemplateName` is whatever the former setting was.
+
+
+### `template_overwrite` [plugins-outputs-elasticsearch-template_overwrite]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+The template_overwrite option will always overwrite the indicated template in Elasticsearch with either the one indicated by template or the included one. This option is set to false by default. If you always want to stay up to date with the template provided by Logstash, this option could be very useful to you. Likewise, if you have your own template file managed by puppet, for example, and you wanted to be able to update it regularly, this option could help there as well.
+
+Please note that if you are using your own customized version of the Logstash template (logstash), setting this to true will make Logstash to overwrite the "logstash" template (i.e. removing all customized settings)
+
+
+### `timeout` [plugins-outputs-elasticsearch-timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `60`
+
+Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If a timeout occurs, the request will be retried.
+
+
+### `upsert` [plugins-outputs-elasticsearch-upsert]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `""`
+
+Set upsert content for update mode. Create a new document with this parameter as json string if `document_id` doesn’t exists
+
+
+### `user` [plugins-outputs-elasticsearch-user]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Username to authenticate to a secure Elasticsearch cluster
+
+
+### `validate_after_inactivity` [plugins-outputs-elasticsearch-validate_after_inactivity]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `10000`
+
+How long to wait before checking for a stale connection to determine if a keepalive request is needed. Consider setting this value lower than the default, possibly to 0, if you get connection errors regularly.
+
+This client is based on Apache Commons. Here’s how the [Apache Commons documentation](https://hc.apache.org/httpcomponents-client-4.5.x/current/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)) describes this option: "Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool."
+
+
+### `version` [plugins-outputs-elasticsearch-version]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The version to use for indexing. Use sprintf syntax like `%{{my_version}}` to use a field value here. See the [versioning support blog](https://www.elastic.co/blog/elasticsearch-versioning-support) for more information.
+
+
+### `version_type` [plugins-outputs-elasticsearch-version_type]
+
+* Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force`
+* There is no default value for this setting.
+
+The version_type to use for indexing. See the [versioning support blog](https://www.elastic.co/blog/elasticsearch-versioning-support) and [Version types](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-create) in the Elasticsearch documentation.
+
+
+
+## Elasticsearch Output Obsolete Configuration Options [plugins-outputs-elasticsearch-obsolete-options]
+
+::::{warning}
+As of version `12.0.0` of this plugin, some configuration options have been replaced. The plugin will fail to start if it contains any of these obsolete options.
+::::
+
+
+| Setting | Replaced by |
+| --- | --- |
+| cacert | [`ssl_certificate_authorities`](#plugins-outputs-elasticsearch-ssl_certificate_authorities) |
+| keystore | [`ssl_keystore_path`](#plugins-outputs-elasticsearch-ssl_keystore_path) |
+| keystore_password | [`ssl_keystore_password`](#plugins-outputs-elasticsearch-ssl_keystore_password) |
+| ssl | [`ssl_enabled`](#plugins-outputs-elasticsearch-ssl_enabled) |
+| ssl_certificate_verification | [`ssl_verification_mode`](#plugins-outputs-elasticsearch-ssl_verification_mode) |
+| truststore | [`ssl_truststore_path`](#plugins-outputs-elasticsearch-ssl_truststore_path) |
+| truststore_password | [`ssl_truststore_password`](#plugins-outputs-elasticsearch-ssl_truststore_password) |
+
+
+## Common options [plugins-outputs-elasticsearch-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`enable_metric`](#plugins-outputs-elasticsearch-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-elasticsearch-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `enable_metric` [plugins-outputs-elasticsearch-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-elasticsearch-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 elasticsearch outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ elasticsearch {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
diff --git a/docs/reference/plugins-outputs-email.md b/docs/reference/plugins-outputs-email.md
new file mode 100644
index 000000000..60d7869da
--- /dev/null
+++ b/docs/reference/plugins-outputs-email.md
@@ -0,0 +1,293 @@
+---
+navigation_title: "email"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-email.html
+---
+
+# Email output plugin [plugins-outputs-email]
+
+
+* Plugin version: v4.1.3
+* Released on: 2023-10-03
+* [Changelog](https://github.com/logstash-plugins/logstash-output-email/blob/v4.1.3/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-email-index.md).
+
+## Getting help [_getting_help_75]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-email). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_75]
+
+Sends email when an output is received. Alternatively, you may include or exclude the email output execution using conditionals.
+
+
+## Usage Example [_usage_example]
+
+```ruby
+output {
+ if "shouldmail" in [tags] {
+ email {
+ to => 'technical@example.com'
+ from => 'monitor@example.com'
+ subject => 'Alert - %{title}'
+ body => "Tags: %{tags}\\n\\Content:\\n%{message}"
+ template_file => "/tmp/email_template.mustache"
+ domain => 'mail.example.com'
+ port => 25
+ }
+ }
+}
+```
+
+
+## Email Output Configuration Options [plugins-outputs-email-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-email-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`address`](#plugins-outputs-email-address) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`attachments`](#plugins-outputs-email-attachments) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`authentication`](#plugins-outputs-email-authentication) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`body`](#plugins-outputs-email-body) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`cc`](#plugins-outputs-email-cc) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`bcc`](#plugins-outputs-email-bcc) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`contenttype`](#plugins-outputs-email-contenttype) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`debug`](#plugins-outputs-email-debug) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`domain`](#plugins-outputs-email-domain) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`from`](#plugins-outputs-email-from) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`htmlbody`](#plugins-outputs-email-htmlbody) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`password`](#plugins-outputs-email-password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`port`](#plugins-outputs-email-port) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`replyto`](#plugins-outputs-email-replyto) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`subject`](#plugins-outputs-email-subject) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`to`](#plugins-outputs-email-to) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`use_tls`](#plugins-outputs-email-use_tls) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`username`](#plugins-outputs-email-username) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`via`](#plugins-outputs-email-via) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`template_file`](#plugins-outputs-email-template_file) | [path](/reference/configuration-file-structure.md#path) | No |
+
+Also see [Common options](#plugins-outputs-email-common-options) for a list of options supported by all output plugins.
+
+
+
+### `address` [plugins-outputs-email-address]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"localhost"`
+
+The address used to connect to the mail server
+
+
+### `attachments` [plugins-outputs-email-attachments]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+Attachments - specify the name(s) and location(s) of the files.
+
+
+### `authentication` [plugins-outputs-email-authentication]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Authentication method used when identifying with the server
+
+
+### `body` [plugins-outputs-email-body]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `""`
+
+Body for the email - plain text only.
+
+
+### `cc` [plugins-outputs-email-cc]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The fully-qualified email address(es) to include as cc: address(es).
+
+This field also accepts a comma-separated string of addresses, for example: `"me@example.com, you@example.com"`
+
+
+### `bcc` [plugins-outputs-email-bcc]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The fully-qualified email address(es) to include as bcc: address(es).
+
+This field accepts several addresses like cc.
+
+
+### `contenttype` [plugins-outputs-email-contenttype]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"text/html; charset=UTF-8"`
+
+contenttype : for multipart messages, set the content-type and/or charset of the HTML part. NOTE: this may not be functional (KH)
+
+
+### `debug` [plugins-outputs-email-debug]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Run the mail relay in debug mode
+
+
+### `domain` [plugins-outputs-email-domain]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"localhost"`
+
+The HELO/EHLO domain name used in the greeting message when connecting to a remote SMTP server. Some servers require this name to match the actual hostname of the connecting client.
+
+
+### `from` [plugins-outputs-email-from]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"logstash.alert@example.com"`
+
+The fully-qualified email address for the From: field in the email.
+
+
+### `htmlbody` [plugins-outputs-email-htmlbody]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `""`
+
+HTML Body for the email, which may contain HTML markup.
+
+
+### `password` [plugins-outputs-email-password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Password to authenticate with the server
+
+
+### `port` [plugins-outputs-email-port]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `25`
+
+Port used to communicate with the mail server
+
+
+### `replyto` [plugins-outputs-email-replyto]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The fully qualified email address for the Reply-To: field.
+
+
+### `subject` [plugins-outputs-email-subject]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `""`
+
+Subject: for the email.
+
+
+### `to` [plugins-outputs-email-to]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The fully-qualified email address to send the email to.
+
+This field also accepts a comma-separated string of addresses, for example: `"me@example.com, you@example.com"`
+
+You can also use dynamic fields from the event with the `%{{fieldname}}` syntax.
+
+
+### `use_tls` [plugins-outputs-email-use_tls]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Enables TLS when communicating with the server
+
+
+### `username` [plugins-outputs-email-username]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Username to authenticate with the server
+
+
+### `via` [plugins-outputs-email-via]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"smtp"`
+
+How Logstash should send the email, either via SMTP or by invoking sendmail.
+
+
+### `template_file` [plugins-outputs-email-template_file]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+Path of a [Mustache templating](https://mustache.github.io/) file used for email templating. See example in test fixture. Can be used with `body` to send multi-part emails. Takes precedence over `htmlBody`.
+
+
+
+## Common options [plugins-outputs-email-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-email-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-email-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-email-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-email-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-email-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-email-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 email outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ email {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-exec.md b/docs/reference/plugins-outputs-exec.md
new file mode 100644
index 000000000..b71004419
--- /dev/null
+++ b/docs/reference/plugins-outputs-exec.md
@@ -0,0 +1,129 @@
+---
+navigation_title: "exec"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-exec.html
+---
+
+# Exec output plugin [plugins-outputs-exec]
+
+
+* Plugin version: v3.1.4
+* Released on: 2018-04-06
+* [Changelog](https://github.com/logstash-plugins/logstash-output-exec/blob/v3.1.4/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-exec-index.md).
+
+## Installation [_installation_26]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-output-exec`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_76]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-exec). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_76]
+
+The exec output will run a command for each event received. Ruby’s `system()` function will be used, i.e. the command string will be passed to a shell. You can use `%{{name}}` and other dynamic strings in the command to pass select fields from the event to the child process. Example:
+
+```ruby
+ output {
+ if [type] == "abuse" {
+ exec {
+ command => "iptables -A INPUT -s %{clientip} -j DROP"
+ }
+ }
+ }
+```
+
+::::{warning}
+If you want it non-blocking you should use `&` or `dtach` or other such techniques. There is no timeout for the commands being run so misbehaving commands could otherwise stall the Logstash pipeline indefinitely.
+::::
+
+
+::::{warning}
+Exercise great caution with `%{{name}}` field placeholders. The contents of the field will be included verbatim without any sanitization, i.e. any shell metacharacters from the field values will be passed straight to the shell.
+::::
+
+
+
+## Exec Output Configuration Options [plugins-outputs-exec-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-exec-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`command`](#plugins-outputs-exec-command) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`quiet`](#plugins-outputs-exec-quiet) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+
+Also see [Common options](#plugins-outputs-exec-common-options) for a list of options supported by all output plugins.
+
+
+
+### `command` [plugins-outputs-exec-command]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Command line to execute via subprocess. Use `dtach` or `screen` to make it non blocking. This value can include `%{{name}}` and other dynamic strings.
+
+
+### `quiet` [plugins-outputs-exec-quiet]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+display the result of the command to the terminal
+
+
+
+## Common options [plugins-outputs-exec-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-exec-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-exec-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-exec-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-exec-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-exec-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-exec-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 exec outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ exec {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-file.md b/docs/reference/plugins-outputs-file.md
new file mode 100644
index 000000000..d687f78b1
--- /dev/null
+++ b/docs/reference/plugins-outputs-file.md
@@ -0,0 +1,176 @@
+---
+navigation_title: "file"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-file.html
+---
+
+# File output plugin [plugins-outputs-file]
+
+
+* Plugin version: v4.3.0
+* Released on: 2020-04-27
+* [Changelog](https://github.com/logstash-plugins/logstash-output-file/blob/v4.3.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-file-index.md).
+
+## Getting help [_getting_help_77]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-file). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_77]
+
+This output writes events to files on disk. You can use fields from the event as parts of the filename and/or path.
+
+By default, this output writes one event per line in **json** format. You can customise the line format using the `line` codec like
+
+```ruby
+output {
+ file {
+ path => ...
+ codec => line { format => "custom format: %{message}"}
+ }
+}
+```
+
+
+## File Output Configuration Options [plugins-outputs-file-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-file-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`create_if_deleted`](#plugins-outputs-file-create_if_deleted) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`dir_mode`](#plugins-outputs-file-dir_mode) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`file_mode`](#plugins-outputs-file-file_mode) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`filename_failure`](#plugins-outputs-file-filename_failure) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`flush_interval`](#plugins-outputs-file-flush_interval) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`gzip`](#plugins-outputs-file-gzip) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`path`](#plugins-outputs-file-path) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`stale_cleanup_interval`](#plugins-outputs-file-stale_cleanup_interval) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`write_behavior`](#plugins-outputs-file-write_behavior) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-outputs-file-common-options) for a list of options supported by all output plugins.
+
+
+
+### `create_if_deleted` [plugins-outputs-file-create_if_deleted]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+If the configured file is deleted, but an event is handled by the plugin, the plugin will recreate the file. Default ⇒ true
+
+
+### `dir_mode` [plugins-outputs-file-dir_mode]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `-1`
+
+Dir access mode to use. Note that due to the bug in jruby system umask is ignored on linux: [https://github.com/jruby/jruby/issues/3426](https://github.com/jruby/jruby/issues/3426) Setting it to -1 uses default OS value. Example: `"dir_mode" => 0750`
+
+
+### `file_mode` [plugins-outputs-file-file_mode]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `-1`
+
+File access mode to use. Note that due to the bug in jruby system umask is ignored on linux: [https://github.com/jruby/jruby/issues/3426](https://github.com/jruby/jruby/issues/3426) Setting it to -1 uses default OS value. Example: `"file_mode" => 0640`
+
+
+### `filename_failure` [plugins-outputs-file-filename_failure]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"_filepath_failures"`
+
+If the generated path is invalid, the events will be saved into this file and inside the defined path.
+
+
+### `flush_interval` [plugins-outputs-file-flush_interval]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `2`
+
+Flush interval (in seconds) for flushing writes to log files. 0 will flush on every message.
+
+
+### `gzip` [plugins-outputs-file-gzip]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Gzip the output stream before writing to disk.
+
+
+### `path` [plugins-outputs-file-path]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The path to the file to write. Event fields can be used here, like `/var/log/logstash/%{{host}}/%{{application}}` One may also utilize the path option for date-based log rotation via the joda time format. This will use the event timestamp. E.g.: `path => "./test-%{+YYYY-MM-dd}.txt"` to create `./test-2013-05-29.txt`
+
+If you use an absolute path you cannot start with a dynamic string. E.g: `/%{{myfield}}/`, `/test-%{{myfield}}/` are not valid paths
+
+
+### `stale_cleanup_interval` [plugins-outputs-file-stale_cleanup_interval]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `10`
+
+Defines the interval, in seconds, between the stale files cleanup runs. The stale files cleanup cycle closes inactive files (i.e files not written to since the last cycle).
+
+
+### `write_behavior` [plugins-outputs-file-write_behavior]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `append`
+
+If `append`, the file will be opened for appending and each new event will be written at the end of the file. If `overwrite`, the file will be truncated before writing and only the most recent event will appear in the file.
+
+
+
+## Common options [plugins-outputs-file-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-file-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-file-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-file-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-file-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"json_lines"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-file-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-file-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 file outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ file {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
diff --git a/docs/reference/plugins-outputs-ganglia.md b/docs/reference/plugins-outputs-ganglia.md
new file mode 100644
index 000000000..68b6f66bd
--- /dev/null
+++ b/docs/reference/plugins-outputs-ganglia.md
@@ -0,0 +1,182 @@
+---
+navigation_title: "ganglia"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-ganglia.html
+---
+
+# Ganglia output plugin [plugins-outputs-ganglia]
+
+
+* Plugin version: v3.0.6
+* Released on: 2018-04-06
+* [Changelog](https://github.com/logstash-plugins/logstash-output-ganglia/blob/v3.0.6/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-ganglia-index.md).
+
+## Installation [_installation_27]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-output-ganglia`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_78]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-ganglia). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_78]
+
+This output allows you to pull metrics from your logs and ship them to ganglia’s gmond. This is heavily based on the graphite output.
+
+
+## Ganglia Output Configuration Options [plugins-outputs-ganglia-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-ganglia-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`group`](#plugins-outputs-ganglia-group) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`host`](#plugins-outputs-ganglia-host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`lifetime`](#plugins-outputs-ganglia-lifetime) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`max_interval`](#plugins-outputs-ganglia-max_interval) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`metric`](#plugins-outputs-ganglia-metric) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`metric_type`](#plugins-outputs-ganglia-metric_type) | [string](/reference/configuration-file-structure.md#string), one of `["string", "int8", "uint8", "int16", "uint16", "int32", "uint32", "float", "double"]` | No |
+| [`port`](#plugins-outputs-ganglia-port) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`slope`](#plugins-outputs-ganglia-slope) | [string](/reference/configuration-file-structure.md#string), one of `["zero", "positive", "negative", "both", "unspecified"]` | No |
+| [`units`](#plugins-outputs-ganglia-units) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`value`](#plugins-outputs-ganglia-value) | [string](/reference/configuration-file-structure.md#string) | Yes |
+
+Also see [Common options](#plugins-outputs-ganglia-common-options) for a list of options supported by all output plugins.
+
+
+
+### `group` [plugins-outputs-ganglia-group]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `""`
+
+Metric group
+
+
+### `host` [plugins-outputs-ganglia-host]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"localhost"`
+
+The address of the ganglia server.
+
+
+### `lifetime` [plugins-outputs-ganglia-lifetime]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `300`
+
+Lifetime in seconds of this metric
+
+
+### `max_interval` [plugins-outputs-ganglia-max_interval]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `60`
+
+Maximum time in seconds between gmetric calls for this metric.
+
+
+### `metric` [plugins-outputs-ganglia-metric]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The metric to use. This supports dynamic strings like `%{{host}}`
+
+
+### `metric_type` [plugins-outputs-ganglia-metric_type]
+
+* Value can be any of: `string`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `uint32`, `float`, `double`
+* Default value is `"uint8"`
+
+The type of value for this metric.
+
+
+### `port` [plugins-outputs-ganglia-port]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `8649`
+
+The port to connect on your ganglia server.
+
+
+### `slope` [plugins-outputs-ganglia-slope]
+
+* Value can be any of: `zero`, `positive`, `negative`, `both`, `unspecified`
+* Default value is `"both"`
+
+Metric slope, represents metric behavior
+
+
+### `units` [plugins-outputs-ganglia-units]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `""`
+
+Gmetric units for metric, such as "kb/sec" or "ms" or whatever unit this metric uses.
+
+
+### `value` [plugins-outputs-ganglia-value]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The value to use. This supports dynamic strings like `%{{bytes}}` It will be coerced to a floating point value. Values which cannot be coerced will zero (0)
+
+
+
+## Common options [plugins-outputs-ganglia-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-ganglia-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-ganglia-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-ganglia-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-ganglia-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-ganglia-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-ganglia-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 ganglia outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ ganglia {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-gelf.md b/docs/reference/plugins-outputs-gelf.md
new file mode 100644
index 000000000..ab33c4248
--- /dev/null
+++ b/docs/reference/plugins-outputs-gelf.md
@@ -0,0 +1,205 @@
+---
+navigation_title: "gelf"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-gelf.html
+---
+
+# Gelf output plugin [plugins-outputs-gelf]
+
+
+* Plugin version: v3.1.7
+* Released on: 2018-04-06
+* [Changelog](https://github.com/logstash-plugins/logstash-output-gelf/blob/v3.1.7/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-gelf-index.md).
+
+## Installation [_installation_28]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-output-gelf`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_79]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-gelf). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_79]
+
+This output generates messages in GELF format. This is most useful if you want to use Logstash to output events to Graylog2.
+
+More information at [The Graylog2 GELF specs page](http://docs.graylog.org/en/2.3/pages/gelf.md#gelf-payload-specification)
+
+
+## Gelf Output Configuration Options [plugins-outputs-gelf-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-gelf-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`chunksize`](#plugins-outputs-gelf-chunksize) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`custom_fields`](#plugins-outputs-gelf-custom_fields) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`full_message`](#plugins-outputs-gelf-full_message) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`host`](#plugins-outputs-gelf-host) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`ignore_metadata`](#plugins-outputs-gelf-ignore_metadata) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`level`](#plugins-outputs-gelf-level) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`port`](#plugins-outputs-gelf-port) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`protocol`](#plugins-outputs-gelf-protocol) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`sender`](#plugins-outputs-gelf-sender) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ship_metadata`](#plugins-outputs-gelf-ship_metadata) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`ship_tags`](#plugins-outputs-gelf-ship_tags) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`short_message`](#plugins-outputs-gelf-short_message) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-outputs-gelf-common-options) for a list of options supported by all output plugins.
+
+
+
+### `chunksize` [plugins-outputs-gelf-chunksize]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1420`
+
+The chunksize. You usually don’t need to change this.
+
+
+### `custom_fields` [plugins-outputs-gelf-custom_fields]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+The GELF custom field mappings. GELF supports arbitrary attributes as custom fields. This exposes that. Exclude the `_` portion of the field name e.g. `custom_fields => ['foo_field', 'some_value']` sets `_foo_field` = `some_value`.
+
+
+### `full_message` [plugins-outputs-gelf-full_message]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"%{{message}}"`
+
+The GELF full message. Dynamic values like `%{{foo}}` are permitted here.
+
+
+### `host` [plugins-outputs-gelf-host]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Graylog2 server IP address or hostname.
+
+
+### `ignore_metadata` [plugins-outputs-gelf-ignore_metadata]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `["@timestamp", "@version", "severity", "host", "source_host", "source_path", "short_message"]`
+
+Ignore these fields when `ship_metadata` is set. Typically this lists the fields used in dynamic values for GELF fields.
+
+
+### `level` [plugins-outputs-gelf-level]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `["%{{severity}}", "INFO"]`
+
+The GELF message level. Dynamic values like `%{{level}}` are permitted here; useful if you want to parse the *log level* from an event and use that as the GELF level/severity.
+
+Values here can be integers [0..7] inclusive or any of "debug", "info", "warn", "error", "fatal" (case insensitive). Single-character versions of these are also valid, "d", "i", "w", "e", "f", "u" The following additional severity\_labels from Logstash’s syslog\_pri filter are accepted: "emergency", "alert", "critical", "warning", "notice", and "informational".
+
+
+### `port` [plugins-outputs-gelf-port]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `12201`
+
+Graylog2 server port number.
+
+
+### `protocol` [plugins-outputs-gelf-protocol]
+
+By default, this plugin outputs via the UDP transfer protocol, but can be configured to use TCP instead.
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"UDP"`
+
+Values here can be either "TCP" or "UDP".
+
+
+### `sender` [plugins-outputs-gelf-sender]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `%{{host}}`
+
+Allow overriding of the GELF `sender` field. This is useful if you want to use something other than the event’s source host as the "sender" of an event. A common case for this is using the application name instead of the hostname.
+
+
+### `ship_metadata` [plugins-outputs-gelf-ship_metadata]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Should Logstash ship metadata within event object? This will cause Logstash to ship any fields in the event (such as those created by grok) in the GELF messages. These will be sent as underscored "additional fields".
+
+
+### `ship_tags` [plugins-outputs-gelf-ship_tags]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Ship tags within events. This will cause Logstash to ship the tags of an event as the field `\_tags`.
+
+
+### `short_message` [plugins-outputs-gelf-short_message]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"short_message"`
+
+The GELF short message field name. If the field does not exist or is empty, the event message is taken instead.
+
+
+
+## Common options [plugins-outputs-gelf-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-gelf-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-gelf-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-gelf-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-gelf-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-gelf-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-gelf-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 gelf outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ gelf {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-google_bigquery.md b/docs/reference/plugins-outputs-google_bigquery.md
new file mode 100644
index 000000000..d03cbf6e7
--- /dev/null
+++ b/docs/reference/plugins-outputs-google_bigquery.md
@@ -0,0 +1,423 @@
+---
+navigation_title: "google_bigquery"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-google_bigquery.html
+---
+
+# Google BigQuery output plugin [plugins-outputs-google_bigquery]
+
+
+* Plugin version: v4.6.0
+* Released on: 2024-09-16
+* [Changelog](https://github.com/logstash-plugins/logstash-output-google_bigquery/blob/v4.6.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-google_bigquery-index.md).
+
+## Installation [_installation_29]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-output-google_bigquery`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_80]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-google_bigquery). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_80]
+
+### Summary [_summary_2]
+
+This Logstash plugin uploads events to Google BigQuery using the streaming API so data can become available to query nearly immediately.
+
+You can configure it to flush periodically, after N events or after a certain amount of data is ingested.
+
+
+### Environment Configuration [_environment_configuration]
+
+You must enable BigQuery on your Google Cloud account and create a dataset to hold the tables this plugin generates.
+
+You must also grant the service account this plugin uses access to the dataset.
+
+You can use [Logstash conditionals](/reference/event-dependent-configuration.md) and multiple configuration blocks to upload events with different structures.
+
+
+### Usage [_usage_2]
+
+This is an example of Logstash config:
+
+```ruby
+output {
+ google_bigquery {
+ project_id => "folkloric-guru-278" (required)
+ dataset => "logs" (required)
+ csv_schema => "path:STRING,status:INTEGER,score:FLOAT" (required) <1>
+ json_key_file => "/path/to/key.json" (optional) <2>
+ error_directory => "/tmp/bigquery-errors" (required)
+ date_pattern => "%Y-%m-%dT%H:00" (optional)
+ flush_interval_secs => 30 (optional)
+ }
+}
+```
+
+1. Specify either a csv_schema or a json_schema.
+2. If the key is not used, then the plugin tries to find [Application Default Credentials](https://cloud.google.com/docs/authentication/production)
+
+
+
+### Considerations [_considerations]
+
+* There is a small fee to insert data into BigQuery using the streaming API.
+* This plugin buffers events in-memory, so make sure the flush configurations are appropriate for your use-case and consider using [Logstash Persistent Queues](/reference/persistent-queues.md).
+* Events will be flushed when [`batch_size`](#plugins-outputs-google_bigquery-batch_size), [`batch_size_bytes`](#plugins-outputs-google_bigquery-batch_size_bytes), or [`flush_interval_secs`](#plugins-outputs-google_bigquery-flush_interval_secs) is met, whatever comes first. If you notice a delay in your processing or low throughput, try adjusting those settings.
+
+
+### Additional Resources [_additional_resources]
+
+* [Application Default Credentials (ADC) Overview](https://cloud.google.com/docs/authentication/production)
+* [BigQuery Introduction](https://cloud.google.com/bigquery/)
+* [BigQuery Quotas and Limits](https://cloud.google.com/bigquery/quotas)
+* [BigQuery Schema Formats and Types](https://cloud.google.com/bigquery/docs/schemas)
+
+
+
+## Google BigQuery Output Configuration Options [plugins-outputs-google_bigquery-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-google_bigquery-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`batch_size`](#plugins-outputs-google_bigquery-batch_size) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`batch_size_bytes`](#plugins-outputs-google_bigquery-batch_size_bytes) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`csv_schema`](#plugins-outputs-google_bigquery-csv_schema) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`dataset`](#plugins-outputs-google_bigquery-dataset) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`date_pattern`](#plugins-outputs-google_bigquery-date_pattern) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`deleter_interval_secs`](#plugins-outputs-google_bigquery-deleter_interval_secs) | [number](/reference/configuration-file-structure.md#number) | *Deprecated* |
+| [`error_directory`](#plugins-outputs-google_bigquery-error_directory) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`flush_interval_secs`](#plugins-outputs-google_bigquery-flush_interval_secs) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`ignore_unknown_values`](#plugins-outputs-google_bigquery-ignore_unknown_values) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`json_key_file`](#plugins-outputs-google_bigquery-json_key_file) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`json_schema`](#plugins-outputs-google_bigquery-json_schema) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`key_password`](#plugins-outputs-google_bigquery-key_password) | [string](/reference/configuration-file-structure.md#string) | *Deprecated* |
+| [`project_id`](#plugins-outputs-google_bigquery-project_id) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`service_account`](#plugins-outputs-google_bigquery-service_account) | [string](/reference/configuration-file-structure.md#string) | *Deprecated* |
+| [`skip_invalid_rows`](#plugins-outputs-google_bigquery-skip_invalid_rows) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`table_prefix`](#plugins-outputs-google_bigquery-table_prefix) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`table_separator`](#plugins-outputs-google_bigquery-table_separator) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`temp_directory`](#plugins-outputs-google_bigquery-temp_directory) | [string](/reference/configuration-file-structure.md#string) | *Deprecated* |
+| [`temp_file_prefix`](#plugins-outputs-google_bigquery-temp_file_prefix) | [string](/reference/configuration-file-structure.md#string) | *Deprecated* |
+| [`uploader_interval_secs`](#plugins-outputs-google_bigquery-uploader_interval_secs) | [number](/reference/configuration-file-structure.md#number) | *Deprecated* |
+
+Also see [Common options](#plugins-outputs-google_bigquery-common-options) for a list of options supported by all output plugins.
+
+
+
+### `batch_size` [plugins-outputs-google_bigquery-batch_size]
+
+::::{note}
+Added in 4.0.0.
+::::
+
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `128`
+
+The maximum number of messages to upload at a single time. This number must be < 10,000. Batching can increase performance and throughput to a point, but at the cost of per-request latency. Too few rows per request and the overhead of each request can make ingestion inefficient. Too many rows per request and the throughput may drop. BigQuery recommends using about 500 rows per request, but experimentation with representative data (schema and data sizes) will help you determine the ideal batch size.
+
+
+### `batch_size_bytes` [plugins-outputs-google_bigquery-batch_size_bytes]
+
+::::{note}
+Added in 4.0.0.
+::::
+
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1_000_000`
+
+An approximate number of bytes to upload as part of a batch. This number should be < 10MB or inserts may fail.
+
+
+### `csv_schema` [plugins-outputs-google_bigquery-csv_schema]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `nil`
+
+Schema for log data. It must follow the format `name1:type1(,name2:type2)*`. For example, `path:STRING,status:INTEGER,score:FLOAT`.
+
+
+### `dataset` [plugins-outputs-google_bigquery-dataset]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The BigQuery dataset the tables for the events will be added to.
+
+
+### `date_pattern` [plugins-outputs-google_bigquery-date_pattern]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"%Y-%m-%dT%H:00"`
+
+Time pattern for BigQuery table, defaults to hourly tables. Must Time.strftime patterns: www.ruby-doc.org/core-2.0/Time.html#method-i-strftime
+
+
+### `deleter_interval_secs` [plugins-outputs-google_bigquery-deleter_interval_secs]
+
+::::{admonition} Deprecated in 4.0.0.
+:class: warning
+
+Events are uploaded in real-time without being stored to disk.
+::::
+
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+
+
+### `error_directory` [plugins-outputs-google_bigquery-error_directory]
+
+::::{note}
+Added in 4.0.0.
+::::
+
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"/tmp/bigquery"`.
+
+The location to store events that could not be uploaded due to errors. By default if *any* message in an insert is invalid all will fail. You can use [`skip_invalid_rows`](#plugins-outputs-google_bigquery-skip_invalid_rows) to allow partial inserts.
+
+Consider using an additional Logstash input to pipe the contents of these to an alert platform so you can manually fix the events.
+
+Or use [GCS FUSE](https://cloud.google.com/storage/docs/gcs-fuse) to transparently upload to a GCS bucket.
+
+Files names follow the pattern `[table name]-[UNIX timestamp].log`
+
+
+### `flush_interval_secs` [plugins-outputs-google_bigquery-flush_interval_secs]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `5`
+
+Uploads all data this often even if other upload criteria aren’t met.
+
+
+### `ignore_unknown_values` [plugins-outputs-google_bigquery-ignore_unknown_values]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Indicates if BigQuery should ignore values that are not represented in the table schema. If true, the extra values are discarded. If false, BigQuery will reject the records with extra fields and the job will fail. The default value is false.
+
+::::{note}
+You may want to add a Logstash filter like the following to remove common fields it adds:
+::::
+
+
+```ruby
+mutate {
+ remove_field => ["@version","@timestamp","path","host","type", "message"]
+}
+```
+
+
+### `json_key_file` [plugins-outputs-google_bigquery-json_key_file]
+
+::::{admonition} Added in 4.0.0.
+:class: note
+
+Replaces [`key_password`](#plugins-outputs-google_bigquery-key_password) and [`service_account`](#plugins-outputs-google_bigquery-service_account).
+::::
+
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `nil`
+
+If Logstash is running within Google Compute Engine, the plugin can use GCE’s Application Default Credentials. Outside of GCE, you will need to specify a Service Account JSON key file.
+
+
+### `json_schema` [plugins-outputs-google_bigquery-json_schema]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `nil`
+
+Schema for log data as a hash. These can include nested records, descriptions, and modes.
+
+Example:
+
+```ruby
+json_schema => {
+ fields => [{
+ name => "endpoint"
+ type => "STRING"
+ description => "Request route"
+ }, {
+ name => "status"
+ type => "INTEGER"
+ mode => "NULLABLE"
+ }, {
+ name => "params"
+ type => "RECORD"
+ mode => "REPEATED"
+ fields => [{
+ name => "key"
+ type => "STRING"
+ }, {
+ name => "value"
+ type => "STRING"
+ }]
+ }]
+}
+```
+
+
+### `key_password` [plugins-outputs-google_bigquery-key_password]
+
+::::{admonition} Deprecated in 4.0.0.
+:class: warning
+
+Replaced by `json_key_file` or by using ADC. See [`json_key_file`](#plugins-outputs-google_bigquery-json_key_file)
+::::
+
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+
+
+### `project_id` [plugins-outputs-google_bigquery-project_id]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Google Cloud Project ID (number, not Project Name!).
+
+
+### `service_account` [plugins-outputs-google_bigquery-service_account]
+
+::::{admonition} Deprecated in 4.0.0.
+:class: warning
+
+Replaced by `json_key_file` or by using ADC. See [`json_key_file`](#plugins-outputs-google_bigquery-json_key_file)
+::::
+
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+
+
+### `skip_invalid_rows` [plugins-outputs-google_bigquery-skip_invalid_rows]
+
+::::{note}
+Added in 4.1.0.
+::::
+
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Insert all valid rows of a request, even if invalid rows exist. The default value is false, which causes the entire request to fail if any invalid rows exist.
+
+
+### `table_prefix` [plugins-outputs-google_bigquery-table_prefix]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"logstash"`
+
+BigQuery table ID prefix to be used when creating new tables for log data. Table name will be ``
+
+
+### `table_separator` [plugins-outputs-google_bigquery-table_separator]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"_"`
+
+BigQuery table separator to be added between the table_prefix and the date suffix.
+
+
+### `temp_directory` [plugins-outputs-google_bigquery-temp_directory]
+
+::::{admonition} Deprecated in 4.0.0.
+:class: warning
+
+Events are uploaded in real-time without being stored to disk.
+::::
+
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+
+
+### `temp_file_prefix` [plugins-outputs-google_bigquery-temp_file_prefix]
+
+::::{admonition} Deprecated in 4.0.0.
+:class: warning
+
+Events are uploaded in real-time without being stored to disk
+::::
+
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+
+
+### `uploader_interval_secs` [plugins-outputs-google_bigquery-uploader_interval_secs]
+
+::::{admonition} Deprecated in 4.0.0.
+:class: warning
+
+This field is no longer used
+::::
+
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `60`
+
+Uploader interval when uploading new files to BigQuery. Adjust time based on your time pattern (for example, for hourly files, this interval can be around one hour).
+
+
+
+## Common options [plugins-outputs-google_bigquery-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-google_bigquery-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-google_bigquery-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-google_bigquery-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-google_bigquery-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-google_bigquery-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-google_bigquery-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 google_bigquery outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ google_bigquery {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-google_cloud_storage.md b/docs/reference/plugins-outputs-google_cloud_storage.md
new file mode 100644
index 000000000..c393ea1e2
--- /dev/null
+++ b/docs/reference/plugins-outputs-google_cloud_storage.md
@@ -0,0 +1,307 @@
+---
+navigation_title: "google_cloud_storage"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-google_cloud_storage.html
+---
+
+# Google Cloud Storage output plugin [plugins-outputs-google_cloud_storage]
+
+
+* Plugin version: v4.5.0
+* Released on: 2024-09-16
+* [Changelog](https://github.com/logstash-plugins/logstash-output-google_cloud_storage/blob/v4.5.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-google_cloud_storage-index.md).
+
+## Installation [_installation_30]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-output-google_cloud_storage`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_81]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-google_cloud_storage). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_81]
+
+A plugin to upload log events to Google Cloud Storage (GCS), rolling files based on the date pattern provided as a configuration setting. Events are written to files locally and, once file is closed, this plugin uploads it to the configured bucket.
+
+For more info on Google Cloud Storage, please go to: [https://cloud.google.com/products/cloud-storage](https://cloud.google.com/products/cloud-storage)
+
+In order to use this plugin, a Google service account must be used. For more information, please refer to: [https://developers.google.com/storage/docs/authentication#service_accounts](https://developers.google.com/storage/docs/authentication#service_accounts)
+
+Recommendation: experiment with the settings depending on how much log data you generate, so the uploader can keep up with the generated logs. Using gzip output can be a good option to reduce network traffic when uploading the log files and in terms of storage costs as well.
+
+
+## Usage [_usage_3]
+
+This is an example of logstash config:
+
+```json
+output {
+ google_cloud_storage {
+ bucket => "my_bucket" (required)
+ json_key_file => "/path/to/privatekey.json" (optional)
+ temp_directory => "/tmp/logstash-gcs" (optional)
+ log_file_prefix => "logstash_gcs" (optional)
+ max_file_size_kbytes => 1024 (optional)
+ output_format => "plain" (optional)
+ date_pattern => "%Y-%m-%dT%H:00" (optional)
+ flush_interval_secs => 2 (optional)
+ gzip => false (optional)
+ gzip_content_encoding => false (optional)
+ uploader_interval_secs => 60 (optional)
+ include_uuid => true (optional)
+ include_hostname => true (optional)
+ }
+}
+```
+
+### Additional Resources [_additional_resources_2]
+
+* [Application Default Credentials (ADC) Overview](https://cloud.google.com/docs/authentication/production)
+* [Cloud Storage Introduction](https://cloud.google.com/storage/)
+* [Pricing Information](https://cloud.google.com/storage/pricing)
+
+
+
+## Improvements TODO List [_improvements_todo_list]
+
+* Support logstash event variables to determine filename.
+* Turn Google API code into a Plugin Mixin (like AwsConfig).
+* There’s no recover method, so if logstash/plugin crashes, files may not be uploaded to GCS.
+* Allow user to configure file name.
+* Allow parallel uploads for heavier loads (+ connection configuration if exposed by Ruby API client)
+
+
+## Google_cloud_storage Output Configuration Options [plugins-outputs-google_cloud_storage-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-google_cloud_storage-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`bucket`](#plugins-outputs-google_cloud_storage-bucket) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`date_pattern`](#plugins-outputs-google_cloud_storage-date_pattern) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`flush_interval_secs`](#plugins-outputs-google_cloud_storage-flush_interval_secs) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`gzip`](#plugins-outputs-google_cloud_storage-gzip) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`gzip_content_encoding`](#plugins-outputs-google_cloud_storage-gzip_content_encoding) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`include_hostname`](#plugins-outputs-google_cloud_storage-include_hostname) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`include_uuid`](#plugins-outputs-google_cloud_storage-include_uuid) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`json_key_file`](#plugins-outputs-google_cloud_storage-json_key_file) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`key_password`](#plugins-outputs-google_cloud_storage-key_password) | [string](/reference/configuration-file-structure.md#string) | *Deprecated* |
+| [`log_file_prefix`](#plugins-outputs-google_cloud_storage-log_file_prefix) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`max_concurrent_uploads`](#plugins-outputs-google_cloud_storage-max_concurrent_uploads) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`max_file_size_kbytes`](#plugins-outputs-google_cloud_storage-max_file_size_kbytes) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`output_format`](#plugins-outputs-google_cloud_storage-output_format) | [string](/reference/configuration-file-structure.md#string), one of `["json", "plain", nil]` | *Deprecated* |
+| [`service_account`](#plugins-outputs-google_cloud_storage-service_account) | [string](/reference/configuration-file-structure.md#string) | *Deprecated* |
+| [`temp_directory`](#plugins-outputs-google_cloud_storage-temp_directory) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`uploader_interval_secs`](#plugins-outputs-google_cloud_storage-uploader_interval_secs) | [number](/reference/configuration-file-structure.md#number) | No |
+
+Also see [Common options](#plugins-outputs-google_cloud_storage-common-options) for a list of options supported by all output plugins.
+
+
+
+### `bucket` [plugins-outputs-google_cloud_storage-bucket]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+GCS bucket name, without "gs://" or any other prefix.
+
+
+### `date_pattern` [plugins-outputs-google_cloud_storage-date_pattern]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"%Y-%m-%dT%H:00"`
+
+Time pattern for log file, defaults to hourly files. Must Time.strftime patterns: www.ruby-doc.org/core-2.0/Time.html#method-i-strftime
+
+
+### `flush_interval_secs` [plugins-outputs-google_cloud_storage-flush_interval_secs]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `2`
+
+Flush interval in seconds for flushing writes to log files. 0 will flush on every message.
+
+
+### `gzip` [plugins-outputs-google_cloud_storage-gzip]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Gzip output stream when writing events to log files, set `Content-Type` to `application/gzip` instead of `text/plain`, and use file suffix `.log.gz` instead of `.log`.
+
+
+### `gzip_content_encoding` [plugins-outputs-google_cloud_storage-gzip_content_encoding]
+
+::::{note}
+Added in 3.3.0.
+::::
+
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Gzip output stream when writing events to log files and set `Content-Encoding` to `gzip`. This will upload your files as `gzip` saving network and storage costs, but they will be transparently decompressed when you read them from the storage bucket.
+
+See the Cloud Storage documentation on [metadata](https://cloud.google.com/storage/docs/metadata#content-encoding) and [transcoding](https://cloud.google.com/storage/docs/transcoding#content-type_vs_content-encoding) for more information.
+
+**Note**: It is not recommended to use both `gzip_content_encoding` and `gzip`. This compresses your file *twice*, will increase the work your machine does and makes the files larger than just compressing once.
+
+
+### `include_hostname` [plugins-outputs-google_cloud_storage-include_hostname]
+
+::::{note}
+Added in 3.1.0.
+::::
+
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Should the hostname be included in the file name? You may want to turn this off for privacy reasons or if you are running multiple instances of Logstash and need to match the files you create with a simple glob such as if you wanted to import files to BigQuery.
+
+
+### `include_uuid` [plugins-outputs-google_cloud_storage-include_uuid]
+
+::::{note}
+Added in 3.1.0.
+::::
+
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Adds a UUID to the end of a file name. You may want to enable this feature so files don’t clobber one another if you’re running multiple instances of Logstash or if you expect frequent node restarts.
+
+
+### `json_key_file` [plugins-outputs-google_cloud_storage-json_key_file]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `nil`
+
+The plugin can use [Application Default Credentials (ADC)](https://cloud.google.com/docs/authentication/production), if it’s running on Compute Engine, Kubernetes Engine, App Engine, or Cloud Functions.
+
+Outside of Google Cloud, you will need create a Service Account JSON key file through the web interface or with the following command: `gcloud iam service-accounts keys create key.json --iam-account my-sa-123@my-project-123.iam.gserviceaccount.com`
+
+
+### `key_password` [plugins-outputs-google_cloud_storage-key_password]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"notasecret"`
+
+**Deprecated** this feature is no longer used, the setting is now a part of [`json_key_file`](#plugins-outputs-google_cloud_storage-json_key_file).
+
+
+### `log_file_prefix` [plugins-outputs-google_cloud_storage-log_file_prefix]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"logstash_gcs"`
+
+Log file prefix. Log file will follow the format: _hostname_date<.part?>.log
+
+
+### `max_concurrent_uploads` [plugins-outputs-google_cloud_storage-max_concurrent_uploads]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `5`
+
+Sets the maximum number of concurrent uploads to Cloud Storage at a time. Uploads are I/O bound so it makes sense to tune this paramater with regards to the network bandwidth available and the latency between your server and Cloud Storage.
+
+
+### `max_file_size_kbytes` [plugins-outputs-google_cloud_storage-max_file_size_kbytes]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `10000`
+
+Sets max file size in kbytes. 0 disable max file check.
+
+
+### `output_format` [plugins-outputs-google_cloud_storage-output_format]
+
+* Value can be any of: `json`, `plain`, or no value
+* Default value is no value
+
+**Deprecated**, this feature will be removed in the next major release. Use codecs instead.
+
+* If you are using the `json` value today, switch to the `json_lines` codec.
+* If you are using the `plain` value today, switch to the `line` codec.
+
+The event format you want to store in files. Defaults to plain text.
+
+Note: if you want to use a codec you MUST not set this value.
+
+
+### `service_account` [plugins-outputs-google_cloud_storage-service_account]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+**Deprecated** this feature is no longer used, the setting is now a part of [`json_key_file`](#plugins-outputs-google_cloud_storage-json_key_file).
+
+
+### `temp_directory` [plugins-outputs-google_cloud_storage-temp_directory]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `""`
+
+Directory where temporary files are stored. Defaults to /tmp/logstash-gcs-
+
+
+### `uploader_interval_secs` [plugins-outputs-google_cloud_storage-uploader_interval_secs]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `60`
+
+Uploader interval when uploading new files to GCS. Adjust time based on your time pattern (for example, for hourly files, this interval can be around one hour).
+
+
+
+## Common options [plugins-outputs-google_cloud_storage-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-google_cloud_storage-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-google_cloud_storage-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-google_cloud_storage-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-google_cloud_storage-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"line"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-google_cloud_storage-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-google_cloud_storage-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 google_cloud_storage outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ google_cloud_storage {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
diff --git a/docs/reference/plugins-outputs-google_pubsub.md b/docs/reference/plugins-outputs-google_pubsub.md
new file mode 100644
index 000000000..330528727
--- /dev/null
+++ b/docs/reference/plugins-outputs-google_pubsub.md
@@ -0,0 +1,253 @@
+---
+navigation_title: "google_pubsub"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-google_pubsub.html
+---
+
+# Google Cloud Pub/Sub Output Plugin [plugins-outputs-google_pubsub]
+
+
+* Plugin version: v1.2.0
+* Released on: 2023-08-22
+* [Changelog](https://github.com/logstash-plugins/logstash-output-google_pubsub/blob/v1.2.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-google_pubsub-index.md).
+
+## Installation [_installation_31]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-output-google_pubsub`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_82]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-google_pubsub). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_82]
+
+A Logstash plugin to upload log events to [Google Cloud Pubsub](https://cloud.google.com/pubsub/). Events are batched and uploaded in the background for the sake of efficiency. Message payloads are serialized JSON representations of the events.
+
+Example use-cases:
+
+* Stream events to Dataproc via Pub/Sub for real-time analysis.
+* Forward events from an on-prem datacenter to the Logstash in the cloud.
+* Use Pub/Sub as an scalable buffer to even out event flow between processing steps.
+
+Note: While this project is partially maintained by Google, this is not an official Google product.
+
+
+## Environment Configuration [_environment_configuration_2]
+
+To use this plugin, you must create a [service account](https://developers.google.com/storage/docs/authentication#service_accounts) and grant it the publish permission on a topic. You MAY also use the [Application Default Credentials](https://cloud.google.com/docs/authentication/production) assigned to a compute instance.
+
+The Pub/Sub topic *must* exist before you run the plugin.
+
+
+## Example Configurations [_example_configurations]
+
+### Basic [_basic_2]
+
+A basic configuration which only includes a project, topic, and JSON key file:
+
+```ruby
+output {
+ google_pubsub {
+ # Required attributes
+ project_id => "my_project"
+ topic => "my_topic"
+
+ # Optional if you're using app default credentials
+ json_key_file => "service_account_key.json"
+ }
+}
+```
+
+
+### High Volume [_high_volume]
+
+If you find that uploads are going too slowly, you can increase the message batching:
+
+```ruby
+output {
+ google_pubsub {
+ project_id => "my_project"
+ topic => "my_topic"
+ json_key_file => "service_account_key.json"
+
+ # Options for configuring the upload
+ message_count_threshold => 1000
+ delay_threshold_secs => 10
+ request_byte_threshold => 5000000
+ }
+}
+```
+
+
+### Attributes [_attributes]
+
+You can attach additional attributes to each request. For example, you could attach a datacenter label to a log message to help with debugging:
+
+```ruby
+output {
+ google_pubsub {
+ project_id => "my_project"
+ topic => "my_topic"
+ json_key_file => "service_account_key.json"
+
+
+ attributes => {"origin" => "pacific-datacenter"}
+ }
+}
+```
+
+
+### Different Codecs [_different_codecs]
+
+You can use codecs with this plugin to change the body of the events:
+
+```ruby
+output {
+ google_pubsub {
+ project_id => "my_project"
+ topic => "my_topic"
+ json_key_file => "service_account_key.json"
+
+
+ codec => plain {format => "%{[time]}: %{[message]}"}
+ }
+}
+```
+
+
+
+## Additional Resources [_additional_resources_3]
+
+* [Cloud Pub/Sub Homepage](https://cloud.google.com/pubsub/)
+* [Cloud Pub/Sub Pricing](https://cloud.google.com/pubsub/pricing/)
+* [IAM Service Accounts](https://cloud.google.com/iam/docs/service-accounts)
+* [Application Default Credentials](https://cloud.google.com/docs/authentication/production)
+
+
+## Google Cloud Pub/Sub Output Configuration Options [plugins-outputs-google_pubsub-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-google_pubsub-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`project_id`](#plugins-outputs-google_pubsub-project_id) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`topic`](#plugins-outputs-google_pubsub-topic) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`json_key_file`](#plugins-outputs-google_pubsub-json_key_file) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`delay_threshold_secs`](#plugins-outputs-google_pubsub-delay_threshold_secs) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`message_count_threshold`](#plugins-outputs-google_pubsub-message_count_threshold) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`request_byte_threshold`](#plugins-outputs-google_pubsub-request_byte_threshold) | [bytes](/reference/configuration-file-structure.md#bytes) | No |
+| [`attributes`](#plugins-outputs-google_pubsub-attributes) | [hash](/reference/configuration-file-structure.md#hash) | No |
+
+Also see [Common options](#plugins-outputs-google_pubsub-common-options) for a list of options supported by all input plugins.
+
+### `project_id` [plugins-outputs-google_pubsub-project_id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Google Cloud Project ID (name, not number).
+
+
+### `topic` [plugins-outputs-google_pubsub-topic]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Google Cloud Pub/Sub Topic. You must create the topic manually before running this plugin.
+
+
+### `json_key_file` [plugins-outputs-google_pubsub-json_key_file]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+The path to the key to authenticate your user to the bucket. This service user *must* have the `pubsub.topics.publish` permission so it can publish to the topic.
+
+If Logstash is running within Google Compute Engine and no `json_key_file` is defined, the plugin will use GCE’s Application Default Credentials. Outside of GCE, you must to specify a Service Account JSON key file.
+
+
+### `delay_threshold_secs` [plugins-outputs-google_pubsub-delay_threshold_secs]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default is: `5`
+
+Send the batch once this delay has passed, from the time the first message is queued. Must be greater than 0.
+
+
+### `message_count_threshold` [plugins-outputs-google_pubsub-message_count_threshold]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default is: `100`
+
+Once this many messages are queued, send all the messages in a single call, even if the delay threshold hasn’t elapsed yet. Must be < 1000. A value of 0 will cause messages to instantly be sent but will reduce total throughput due to overhead.
+
+
+### `request_byte_threshold` [plugins-outputs-google_pubsub-request_byte_threshold]
+
+* Value type is [bytes](/reference/configuration-file-structure.md#bytes)
+* Default is: `1000000`
+
+Once the number of bytes in the batched request reaches this threshold, send all of the messages in a single call, even if neither the delay or message count thresholds have been exceeded yet. This includes full message payload size, including any attributes set.
+
+
+### `attributes` [plugins-outputs-google_pubsub-attributes]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default is: `{}`
+
+Attributes to add to the message in key: value formats. Keys and values MUST be strings.
+
+
+
+## Common options [plugins-outputs-google_pubsub-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-google_pubsub-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-google_pubsub-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-google_pubsub-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-google_pubsub-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"json"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-google_pubsub-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-google_pubsub-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 google_pubsub outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ google_pubsub {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-graphite.md b/docs/reference/plugins-outputs-graphite.md
new file mode 100644
index 000000000..148251326
--- /dev/null
+++ b/docs/reference/plugins-outputs-graphite.md
@@ -0,0 +1,203 @@
+---
+navigation_title: "graphite"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-graphite.html
+---
+
+# Graphite output plugin [plugins-outputs-graphite]
+
+
+* Plugin version: v3.1.6
+* Released on: 2018-07-11
+* [Changelog](https://github.com/logstash-plugins/logstash-output-graphite/blob/v3.1.6/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-graphite-index.md).
+
+## Getting help [_getting_help_83]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-graphite). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_83]
+
+This output allows you to pull metrics from your logs and ship them to Graphite. Graphite is an open source tool for storing and graphing metrics.
+
+An example use case: Some applications emit aggregated stats in the logs every 10 seconds. Using the grok filter and this output, it is possible to capture the metric values from the logs and emit them to Graphite.
+
+
+## Graphite Output Configuration Options [plugins-outputs-graphite-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-graphite-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`exclude_metrics`](#plugins-outputs-graphite-exclude_metrics) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`fields_are_metrics`](#plugins-outputs-graphite-fields_are_metrics) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`host`](#plugins-outputs-graphite-host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`include_metrics`](#plugins-outputs-graphite-include_metrics) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`metrics`](#plugins-outputs-graphite-metrics) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`metrics_format`](#plugins-outputs-graphite-metrics_format) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`nested_object_separator`](#plugins-outputs-graphite-nested_object_separator) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`port`](#plugins-outputs-graphite-port) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`reconnect_interval`](#plugins-outputs-graphite-reconnect_interval) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`resend_on_failure`](#plugins-outputs-graphite-resend_on_failure) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`timestamp_field`](#plugins-outputs-graphite-timestamp_field) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-outputs-graphite-common-options) for a list of options supported by all output plugins.
+
+
+
+### `exclude_metrics` [plugins-outputs-graphite-exclude_metrics]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `["%{[^}]+}"]`
+
+Exclude regex matched metric names, by default exclude unresolved `%{{field}}` strings.
+
+
+### `fields_are_metrics` [plugins-outputs-graphite-fields_are_metrics]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+An array indicating that these event fields should be treated as metrics and will be sent verbatim to Graphite. You may use either `fields_are_metrics` or `metrics`, but not both.
+
+
+### `host` [plugins-outputs-graphite-host]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"localhost"`
+
+The hostname or IP address of the Graphite server.
+
+
+### `include_metrics` [plugins-outputs-graphite-include_metrics]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[".*"]`
+
+Include only regex matched metric names.
+
+
+### `metrics` [plugins-outputs-graphite-metrics]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+The metric(s) to use. This supports dynamic strings like `%{{host}}` for metric names and also for values. This is a hash field with key being the metric name, value being the metric value. Example:
+
+```ruby
+ metrics => { "%{host}/uptime" => "%{uptime_1m}" }
+```
+
+The value will be coerced to a floating point value. Values which cannot be coerced will be set to zero (0). You may use either `metrics` or `fields_are_metrics`, but not both.
+
+
+### `metrics_format` [plugins-outputs-graphite-metrics_format]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"*"`
+
+Defines the format of the metric string. The placeholder *** will be replaced with the name of the actual metric.
+
+```ruby
+ metrics_format => "foo.bar.*.sum"
+```
+
+::::{note}
+If no metrics_format is defined, the name of the metric will be used as fallback.
+::::
+
+
+
+### `nested_object_separator` [plugins-outputs-graphite-nested_object_separator]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"."`
+
+When hashes are passed in as values they are broken out into a dotted notation For instance if you configure this plugin with # [source,ruby] metrics ⇒ "mymetrics"
+
+and "mymetrics" is a nested hash of *{a ⇒ 1, b ⇒ { c ⇒ 2 }}* this plugin will generate two metrics: a ⇒ 1, and b.c ⇒ 2 . If you’ve specified a *metrics_format* it will respect that, but you still may want control over the separator within these nested key names. This config setting changes the separator from the *.* default.
+
+
+### `port` [plugins-outputs-graphite-port]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `2003`
+
+The port to connect to on the Graphite server.
+
+
+### `reconnect_interval` [plugins-outputs-graphite-reconnect_interval]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `2`
+
+Interval between reconnect attempts to Carbon.
+
+
+### `resend_on_failure` [plugins-outputs-graphite-resend_on_failure]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Should metrics be resent on failure?
+
+
+### `timestamp_field` [plugins-outputs-graphite-timestamp_field]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"@timestamp"`
+
+Use this field for the timestamp instead of *@timestamp* which is the default. Useful when backfilling or just getting more accurate data into graphite since you probably have a cache layer infront of Logstash.
+
+
+
+## Common options [plugins-outputs-graphite-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-graphite-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-graphite-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-graphite-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-graphite-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-graphite-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-graphite-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 graphite outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ graphite {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-graphtastic.md b/docs/reference/plugins-outputs-graphtastic.md
new file mode 100644
index 000000000..a57cf0c0d
--- /dev/null
+++ b/docs/reference/plugins-outputs-graphtastic.md
@@ -0,0 +1,179 @@
+---
+navigation_title: "graphtastic"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-graphtastic.html
+---
+
+# Graphtastic output plugin [plugins-outputs-graphtastic]
+
+
+* Plugin version: v3.0.4
+* Released on: 2018-04-06
+* [Changelog](https://github.com/logstash-plugins/logstash-output-graphtastic/blob/v3.0.4/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-graphtastic-index.md).
+
+## Installation [_installation_32]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-output-graphtastic`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_84]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-graphtastic). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_84]
+
+A plugin for a newly developed Java/Spring Metrics application I didn’t really want to code this project but I couldn’t find a respectable alternative that would also run on any Windows machine - which is the problem and why I am not going with Graphite and statsd. This application provides multiple integration options so as to make its use under your network requirements possible. This includes a REST option that is always enabled for your use in case you want to write a small script to send the occasional metric data.
+
+Find GraphTastic here : [https://github.com/NickPadilla/GraphTastic](https://github.com/NickPadilla/GraphTastic)
+
+
+## Graphtastic Output Configuration Options [plugins-outputs-graphtastic-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-graphtastic-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`batch_number`](#plugins-outputs-graphtastic-batch_number) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`context`](#plugins-outputs-graphtastic-context) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`error_file`](#plugins-outputs-graphtastic-error_file) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`host`](#plugins-outputs-graphtastic-host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`integration`](#plugins-outputs-graphtastic-integration) | [string](/reference/configuration-file-structure.md#string), one of `["udp", "tcp", "rmi", "rest"]` | No |
+| [`metrics`](#plugins-outputs-graphtastic-metrics) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`port`](#plugins-outputs-graphtastic-port) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`retries`](#plugins-outputs-graphtastic-retries) | [number](/reference/configuration-file-structure.md#number) | No |
+
+Also see [Common options](#plugins-outputs-graphtastic-common-options) for a list of options supported by all output plugins.
+
+
+
+### `batch_number` [plugins-outputs-graphtastic-batch_number]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `60`
+
+the number of metrics to send to GraphTastic at one time. 60 seems to be the perfect amount for UDP, with default packet size.
+
+
+### `context` [plugins-outputs-graphtastic-context]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"graphtastic"`
+
+if using rest as your end point you need to also provide the application url it defaults to localhost/graphtastic. You can customize the application url by changing the name of the .war file. There are other ways to change the application context, but they vary depending on the Application Server in use. Please consult your application server documentation for more on application contexts.
+
+
+### `error_file` [plugins-outputs-graphtastic-error_file]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `""`
+
+setting allows you to specify where we save errored transactions this makes the most sense at this point - will need to decide on how we reintegrate these error metrics NOT IMPLEMENTED!
+
+
+### `host` [plugins-outputs-graphtastic-host]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"127.0.0.1"`
+
+host for the graphtastic server - defaults to 127.0.0.1
+
+
+### `integration` [plugins-outputs-graphtastic-integration]
+
+* Value can be any of: `udp`, `tcp`, `rmi`, `rest`
+* Default value is `"udp"`
+
+options are udp(fastest - default) - rmi(faster) - rest(fast) - tcp(don’t use TCP yet - some problems - errors out on linux)
+
+
+### `metrics` [plugins-outputs-graphtastic-metrics]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+metrics hash - you will provide a name for your metric and the metric data as key value pairs. so for example:
+
+```ruby
+metrics => { "Response" => "%{response}" }
+```
+
+example for the logstash config
+
+```ruby
+metrics => [ "Response", "%{response}" ]
+```
+
+::::{note}
+you can also use the dynamic fields for the key value as well as the actual value
+::::
+
+
+
+### `port` [plugins-outputs-graphtastic-port]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+port for the graphtastic instance - defaults to 1199 for RMI, 1299 for TCP, 1399 for UDP, and 8080 for REST
+
+
+### `retries` [plugins-outputs-graphtastic-retries]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1`
+
+number of attempted retry after send error - currently only way to integrate errored transactions - should try and save to a file or later consumption either by graphtastic utility or by this program after connectivity is ensured to be established.
+
+
+
+## Common options [plugins-outputs-graphtastic-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-graphtastic-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-graphtastic-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-graphtastic-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-graphtastic-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-graphtastic-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-graphtastic-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 graphtastic outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ graphtastic {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-http.md b/docs/reference/plugins-outputs-http.md
new file mode 100644
index 000000000..819b6bc84
--- /dev/null
+++ b/docs/reference/plugins-outputs-http.md
@@ -0,0 +1,513 @@
+---
+navigation_title: "http"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-http.html
+---
+
+# Http output plugin [plugins-outputs-http]
+
+
+* Plugin version: v6.0.0
+* Released on: 2024-11-21
+* [Changelog](https://github.com/logstash-plugins/logstash-output-http/blob/v6.0.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-http-index.md).
+
+## Getting help [_getting_help_85]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-http). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_85]
+
+This output lets you send events to a generic HTTP(S) endpoint.
+
+This output will execute up to *pool_max* requests in parallel for performance. Consider this when tuning this plugin for performance.
+
+Additionally, note that when parallel execution is used strict ordering of events is not guaranteed!
+
+Beware, this gem does not yet support codecs. Please use the *format* option for now.
+
+
+## Retry policy [plugins-outputs-http-retry_policy]
+
+This output has two levels of retry: library and plugin.
+
+### Library retry [plugins-outputs-http-library_retry]
+
+The library retry applies to IO related failures. Non retriable errors include SSL related problems, unresolvable hosts, connection issues, and OS/JVM level interruptions happening during a request.
+
+The options for library retry are:
+
+* [`automatic_retries`](#plugins-outputs-http-automatic_retries). Controls the number of times the plugin should retry after failures at the library level.
+* [`retry_non_idempotent`](#plugins-outputs-http-retry_non_idempotent). When set to `false`, GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried.
+
+
+### Plugin retry [plugins-outputs-http-plugin_retry]
+
+The options for plugin level retry are:
+
+* [`retry_failed`](#plugins-outputs-http-retry_failed). When set to `true`, the plugin retries indefinitely for HTTP error response codes defined in the [`retryable_codes`](#plugins-outputs-http-retryable_codes) option (429, 500, 502, 503, 504) and retryable exceptions (socket timeout/ error, DNS resolution failure and client protocol exception).
+* [`retryable_codes`](#plugins-outputs-http-retryable_codes). Sets http response codes that trigger a retry.
+
+::::{note}
+The `retry_failed` option does not control the library level retry.
+::::
+
+
+
+
+## Http Output Configuration Options [plugins-outputs-http-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-http-common-options) described later.
+
+::::{note}
+As of version `6.0.0` of this plugin, a number of previously deprecated settings related to SSL have been removed. Please check out [HTTP Output Obsolete Configuration Options](#plugins-outputs-http-obsolete-options) for details.
+::::
+
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`automatic_retries`](#plugins-outputs-http-automatic_retries) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`connect_timeout`](#plugins-outputs-http-connect_timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`content_type`](#plugins-outputs-http-content_type) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`cookies`](#plugins-outputs-http-cookies) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`follow_redirects`](#plugins-outputs-http-follow_redirects) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`format`](#plugins-outputs-http-format) | [string](/reference/configuration-file-structure.md#string), one of `["json", "json_batch", "form", "message"]` | No |
+| [`headers`](#plugins-outputs-http-headers) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`http_compression`](#plugins-outputs-http-http_compression) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`http_method`](#plugins-outputs-http-http_method) | [string](/reference/configuration-file-structure.md#string), one of `["put", "post", "patch", "delete", "get", "head"]` | Yes |
+| [`ignorable_codes`](#plugins-outputs-http-ignorable_codes) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`keepalive`](#plugins-outputs-http-keepalive) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`mapping`](#plugins-outputs-http-mapping) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`message`](#plugins-outputs-http-message) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`pool_max`](#plugins-outputs-http-pool_max) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`pool_max_per_route`](#plugins-outputs-http-pool_max_per_route) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`proxy`](#plugins-outputs-http-proxy) | <<,>> | No |
+| [`request_timeout`](#plugins-outputs-http-request_timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`retry_failed`](#plugins-outputs-http-retry_failed) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`retry_non_idempotent`](#plugins-outputs-http-retry_non_idempotent) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`retryable_codes`](#plugins-outputs-http-retryable_codes) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`socket_timeout`](#plugins-outputs-http-socket_timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`ssl_certificate`](#plugins-outputs-http-ssl_certificate) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_certificate_authorities`](#plugins-outputs-http-ssl_certificate_authorities) | list of [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_cipher_suites`](#plugins-outputs-http-ssl_cipher_suites) | list of [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_enabled`](#plugins-outputs-http-ssl_enabled) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`ssl_keystore_password`](#plugins-outputs-http-ssl_keystore_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_keystore_path`](#plugins-outputs-http-ssl_keystore_path) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_keystore_type`](#plugins-outputs-http-ssl_keystore_type) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_supported_protocols`](#plugins-outputs-http-ssl_supported_protocols) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_truststore_password`](#plugins-outputs-http-ssl_truststore_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_truststore_path`](#plugins-outputs-http-ssl_truststore_path) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_truststore_type`](#plugins-outputs-http-ssl_truststore_type) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_verification_mode`](#plugins-outputs-http-ssl_verification_mode) | [string](/reference/configuration-file-structure.md#string), one of `["full", "none"]` | No |
+| [`url`](#plugins-outputs-http-url) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`validate_after_inactivity`](#plugins-outputs-http-validate_after_inactivity) | [number](/reference/configuration-file-structure.md#number) | No |
+
+Also see [Common options](#plugins-outputs-http-common-options) for a list of options supported by all output plugins.
+
+
+
+### `automatic_retries` [plugins-outputs-http-automatic_retries]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1`
+
+How many times should the client retry a failing URL. We recommend setting this option to a value other than zero if the [`keepalive` option](#plugins-outputs-http-keepalive) is enabled. Some servers incorrectly end keepalives early, requiring a retry. See [Retry Policy](#plugins-outputs-http-retry_policy) for more information.
+
+
+### `connect_timeout` [plugins-outputs-http-connect_timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `10`
+
+Timeout (in seconds) to wait for a connection to be established. Default is `10s`
+
+
+### `content_type` [plugins-outputs-http-content_type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Content type
+
+If not specified, this defaults to the following:
+
+* if format is "json", "application/json"
+* if format is "json_batch", "application/json". Each Logstash batch of events will be concatenated into a single array and sent in one request.
+* if format is "form", "application/x-www-form-urlencoded"
+
+
+### `cookies` [plugins-outputs-http-cookies]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Enable cookie support. With this enabled the client will persist cookies across requests as a normal web browser would. Enabled by default
+
+
+### `follow_redirects` [plugins-outputs-http-follow_redirects]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Should redirects be followed? Defaults to `true`
+
+
+### `format` [plugins-outputs-http-format]
+
+* Value can be any of: `json`, `json_batch`, `form`, `message`
+* Default value is `"json"`
+
+Set the format of the http body.
+
+If json_batch, each batch of events received by this output will be placed into a single JSON array and sent in one request. This is particularly useful for high throughput scenarios such as sending data between Logstash instaces.
+
+If form, then the body will be the mapping (or whole event) converted into a query parameter string, e.g. `foo=bar&baz=fizz...`
+
+If message, then the body will be the result of formatting the event according to message
+
+Otherwise, the event is sent as json.
+
+
+### `headers` [plugins-outputs-http-headers]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* There is no default value for this setting.
+
+Custom headers to use format is `headers => ["X-My-Header", "%{{host}}"]`
+
+
+### `http_compression` [plugins-outputs-http-http_compression]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Enable request compression support. With this enabled the plugin will compress http requests using gzip.
+
+
+### `http_method` [plugins-outputs-http-http_method]
+
+* This is a required setting.
+* Value can be any of: `put`, `post`, `patch`, `delete`, `get`, `head`
+* There is no default value for this setting.
+
+The HTTP Verb. One of "put", "post", "patch", "delete", "get", "head"
+
+
+### `ignorable_codes` [plugins-outputs-http-ignorable_codes]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+If you would like to consider some non-2xx codes to be successes enumerate them here. Responses returning these codes will be considered successes
+
+
+### `keepalive` [plugins-outputs-http-keepalive]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least one with this to fix interactions with broken keepalive implementations.
+
+
+### `mapping` [plugins-outputs-http-mapping]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* There is no default value for this setting.
+
+This lets you choose the structure and parts of the event that are sent.
+
+For example:
+
+```ruby
+ mapping => {"foo" => "%{host}"
+ "bar" => "%{type}"}
+```
+
+
+### `message` [plugins-outputs-http-message]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+
+### `pool_max` [plugins-outputs-http-pool_max]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `50`
+
+Max number of concurrent connections. Defaults to `50`
+
+
+### `pool_max_per_route` [plugins-outputs-http-pool_max_per_route]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `25`
+
+Max number of concurrent connections to a single host. Defaults to `25`
+
+
+### `proxy` [plugins-outputs-http-proxy]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+If you’d like to use an HTTP proxy . This supports multiple configuration syntaxes:
+
+1. Proxy host in form: `http://proxy.org:1234`
+2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}`
+3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}`
+
+
+### `request_timeout` [plugins-outputs-http-request_timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `60`
+
+This module makes it easy to add a very fully configured HTTP client to logstash based on [Manticore](https://github.com/cheald/manticore). For an example of its usage see [https://github.com/logstash-plugins/logstash-input-http_poller](https://github.com/logstash-plugins/logstash-input-http_poller) Timeout (in seconds) for the entire request
+
+
+### `retry_failed` [plugins-outputs-http-retry_failed]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Note that this option controls plugin-level retries only. It has no affect on library-level retries.
+
+Set this option to `false` if you want to disable infinite retries for HTTP error response codes defined in the [`retryable_codes`](#plugins-outputs-http-retryable_codes) or retryable exceptions (Timeout, SocketException, ClientProtocolException, ResolutionFailure and SocketTimeout). See [Retry policy](#plugins-outputs-http-retry_policy) for more information.
+
+
+### `retry_non_idempotent` [plugins-outputs-http-retry_non_idempotent]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+When this option is set to `false` and `automatic_retries` is enabled, GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried.
+
+When set to `true` and `automatic_retries` is enabled, this will cause non-idempotent HTTP verbs (such as POST) to be retried. See [Retry Policy](#plugins-outputs-http-retry_policy) for more information.
+
+
+### `retryable_codes` [plugins-outputs-http-retryable_codes]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `[429, 500, 502, 503, 504]`
+
+If the plugin encounters these response codes, the plugin will retry indefinitely. See [Retry Policy](#plugins-outputs-http-retry_policy) for more information.
+
+
+### `socket_timeout` [plugins-outputs-http-socket_timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `10`
+
+Timeout (in seconds) to wait for data on the socket. Default is `10s`
+
+
+### `ssl_certificate` [plugins-outputs-http-ssl_certificate]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+SSL certificate to use to authenticate the client. This certificate should be an OpenSSL-style X.509 certificate file.
+
+::::{note}
+This setting can be used only if [`ssl_key`](#plugins-outputs-http-ssl_key) is set.
+::::
+
+
+
+### `ssl_certificate_authorities` [plugins-outputs-http-ssl_certificate_authorities]
+
+* Value type is a list of [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting
+
+The .cer or .pem CA files to validate the server’s certificate.
+
+
+### `ssl_cipher_suites` [plugins-outputs-http-ssl_cipher_suites]
+
+* Value type is a list of [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting
+
+The list of cipher suites to use, listed by priorities. Supported cipher suites vary depending on the Java and protocol versions.
+
+
+### `ssl_enabled` [plugins-outputs-http-ssl_enabled]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Enable SSL/TLS secured communication. It must be `true` for other `ssl_` options to take effect.
+
+
+### `ssl_key` [plugins-outputs-http-ssl_key]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+OpenSSL-style RSA private key that corresponds to the [`ssl_certificate`](#plugins-outputs-http-ssl_certificate).
+
+::::{note}
+This setting can be used only if [`ssl_certificate`](#plugins-outputs-http-ssl_certificate) is set.
+::::
+
+
+
+### `ssl_keystore_password` [plugins-outputs-http-ssl_keystore_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Set the keystore password
+
+
+### `ssl_keystore_path` [plugins-outputs-http-ssl_keystore_path]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+The keystore used to present a certificate to the server. It can be either `.jks` or `.p12`
+
+
+### `ssl_keystore_type` [plugins-outputs-http-ssl_keystore_type]
+
+* Value can be any of: `jks`, `pkcs12`
+* If not provided, the value will be inferred from the keystore filename.
+
+The format of the keystore file. It must be either `jks` or `pkcs12`.
+
+
+### `ssl_supported_protocols` [plugins-outputs-http-ssl_supported_protocols]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Allowed values are: `'TLSv1.1'`, `'TLSv1.2'`, `'TLSv1.3'`
+* Default depends on the JDK being used. With up-to-date Logstash, the default is `['TLSv1.2', 'TLSv1.3']`. `'TLSv1.1'` is not considered secure and is only provided for legacy applications.
+
+List of allowed SSL/TLS versions to use when establishing a connection to the HTTP endpoint.
+
+For Java 8 `'TLSv1.3'` is supported only since **8u262** (AdoptOpenJDK), but requires that you set the `LS_JAVA_OPTS="-Djdk.tls.client.protocols=TLSv1.3"` system property in Logstash.
+
+::::{note}
+If you configure the plugin to use `'TLSv1.1'` on any recent JVM, such as the one packaged with Logstash, the protocol is disabled by default and needs to be enabled manually by changing `jdk.tls.disabledAlgorithms` in the **$JDK_HOME/conf/security/java.security** configuration file. That is, `TLSv1.1` needs to be removed from the list.
+::::
+
+
+
+### `ssl_truststore_password` [plugins-outputs-http-ssl_truststore_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Set the truststore password
+
+
+### `ssl_truststore_path` [plugins-outputs-http-ssl_truststore_path]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+The truststore to validate the server’s certificate. It can be either `.jks` or `.p12`.
+
+
+### `ssl_truststore_type` [plugins-outputs-http-ssl_truststore_type]
+
+* Value can be any of: `jks`, `pkcs12`
+* If not provided, the value will be inferred from the truststore filename.
+
+The format of the truststore file. It must be either `jks` or `pkcs12`.
+
+
+### `ssl_verification_mode` [plugins-outputs-http-ssl_verification_mode]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Supported values are: `full`, `none`
+* Default value is `full`
+
+Controls the verification of server certificates. The `full` option verifies that the provided certificate is signed by a trusted authority (CA) and also that the server’s hostname (or IP address) matches the names identified within the certificate.
+
+The `none` setting performs no verification of the server’s certificate. This mode disables many of the security benefits of SSL/TLS and should only be used after cautious consideration. It is primarily intended as a temporary diagnostic mechanism when attempting to resolve TLS errors. Using `none` in production environments is strongly discouraged.
+
+
+### `url` [plugins-outputs-http-url]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+URL to use
+
+
+### `validate_after_inactivity` [plugins-outputs-http-validate_after_inactivity]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `200`
+
+How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. You may want to set this lower, possibly to 0 if you get connection errors regularly Quoting the Apache commons docs (this client is based Apache Commmons): *Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.* See [these docs for more info](https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.md#setValidateAfterInactivity(int))
+
+
+
+## HTTP Output Obsolete Configuration Options [plugins-outputs-http-obsolete-options]
+
+::::{warning}
+As of version `6.0.0` of this plugin, some configuration options have been replaced. The plugin will fail to start if it contains any of these obsolete options.
+::::
+
+
+| Setting | Replaced by |
+| --- | --- |
+| cacert | [`ssl_certificate_authorities`](#plugins-outputs-http-ssl_certificate_authorities) |
+| client_cert | [`ssl_certificate`](#plugins-outputs-http-ssl_certificate) |
+| client_key | [`ssl_key`](#plugins-outputs-http-ssl_key) |
+| keystore | [`ssl_keystore_path`](#plugins-outputs-http-ssl_keystore_path) |
+| keystore_password | [`ssl_keystore_password`](#plugins-outputs-http-ssl_keystore_password) |
+| keystore_type | [`ssl_keystore_password`](#plugins-outputs-http-ssl_keystore_password) |
+| truststore | [`ssl_truststore_path`](#plugins-outputs-http-ssl_truststore_path) |
+| truststore_password | [`ssl_truststore_password`](#plugins-outputs-http-ssl_truststore_password) |
+| truststore_type | [`ssl_truststore_type`](#plugins-outputs-http-ssl_truststore_type) |
+
+
+## Common options [plugins-outputs-http-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-http-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-http-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-http-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-http-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-http-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-http-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 http outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ http {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
diff --git a/docs/reference/plugins-outputs-influxdb.md b/docs/reference/plugins-outputs-influxdb.md
new file mode 100644
index 000000000..6dd24ec03
--- /dev/null
+++ b/docs/reference/plugins-outputs-influxdb.md
@@ -0,0 +1,291 @@
+---
+navigation_title: "influxdb"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-influxdb.html
+---
+
+# Influxdb output plugin [plugins-outputs-influxdb]
+
+
+* Plugin version: v5.0.6
+* Released on: 2021-06-07
+* [Changelog](https://github.com/logstash-plugins/logstash-output-influxdb/blob/v5.0.6/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-influxdb-index.md).
+
+## Installation [_installation_33]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-output-influxdb`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_86]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-influxdb). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_86]
+
+This output lets you output Metrics to InfluxDB (>= 0.9.0-rc31)
+
+The configuration here attempts to be as friendly as possible and minimize the need for multiple definitions to write to multiple measurements and still be efficient
+
+the InfluxDB API let’s you do some semblance of bulk operation per http call but each call is database-specific
+
+You can learn more at [InfluxDB homepage](http://influxdb.com)
+
+
+## Influxdb Output Configuration Options [plugins-outputs-influxdb-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-influxdb-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`allow_time_override`](#plugins-outputs-influxdb-allow_time_override) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`coerce_values`](#plugins-outputs-influxdb-coerce_values) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`data_points`](#plugins-outputs-influxdb-data_points) | [hash](/reference/configuration-file-structure.md#hash) | Yes |
+| [`db`](#plugins-outputs-influxdb-db) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`exclude_fields`](#plugins-outputs-influxdb-exclude_fields) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`flush_size`](#plugins-outputs-influxdb-flush_size) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`host`](#plugins-outputs-influxdb-host) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`idle_flush_time`](#plugins-outputs-influxdb-idle_flush_time) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`initial_delay`](#plugins-outputs-influxdb-initial_delay) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`max_retries`](#plugins-outputs-influxdb-max_retries) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`measurement`](#plugins-outputs-influxdb-measurement) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`password`](#plugins-outputs-influxdb-password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`port`](#plugins-outputs-influxdb-port) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`retention_policy`](#plugins-outputs-influxdb-retention_policy) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`send_as_tags`](#plugins-outputs-influxdb-send_as_tags) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`ssl`](#plugins-outputs-influxdb-ssl) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`time_precision`](#plugins-outputs-influxdb-time_precision) | [string](/reference/configuration-file-structure.md#string), one of `["n", "u", "ms", "s", "m", "h"]` | No |
+| [`use_event_fields_for_data_points`](#plugins-outputs-influxdb-use_event_fields_for_data_points) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`user`](#plugins-outputs-influxdb-user) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-outputs-influxdb-common-options) for a list of options supported by all output plugins.
+
+
+
+### `allow_time_override` [plugins-outputs-influxdb-allow_time_override]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Allow the override of the `time` column in the event?
+
+By default any column with a name of `time` will be ignored and the time will be determined by the value of `@timestamp`.
+
+Setting this to `true` allows you to explicitly set the `time` column yourself
+
+Note: **`time` must be an epoch value in either seconds, milliseconds or microseconds**
+
+
+### `coerce_values` [plugins-outputs-influxdb-coerce_values]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Allow value coercion
+
+this will attempt to convert data point values to the appropriate type before posting otherwise sprintf-filtered numeric values could get sent as strings format is `{'column_name' => 'datatype'}`
+
+currently supported datatypes are `integer` and `float`
+
+
+### `data_points` [plugins-outputs-influxdb-data_points]
+
+* This is a required setting.
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Hash of key/value pairs representing data points to send to the named database Example: `{'column1' => 'value1', 'column2' => 'value2'}`
+
+Events for the same measurement will be batched together where possible Both keys and values support sprintf formatting
+
+
+### `db` [plugins-outputs-influxdb-db]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"statistics"`
+
+The database to write - supports sprintf formatting
+
+
+### `exclude_fields` [plugins-outputs-influxdb-exclude_fields]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `["@timestamp", "@version", "sequence", "message", "type"]`
+
+An array containing the names of fields from the event to exclude from the data points
+
+Events, in general, contain keys "@version" and "@timestamp". Other plugins may add others that you’ll want to exclude (such as "command" from the exec plugin).
+
+This only applies when use_event_fields_for_data_points is true.
+
+
+### `flush_size` [plugins-outputs-influxdb-flush_size]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `100`
+
+This setting controls how many events will be buffered before sending a batch of events. Note that these are only batched for the same measurement
+
+
+### `host` [plugins-outputs-influxdb-host]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The hostname or IP address to reach your InfluxDB instance
+
+
+### `idle_flush_time` [plugins-outputs-influxdb-idle_flush_time]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1`
+
+The amount of time since last flush before a flush is forced.
+
+This setting helps ensure slow event rates don’t get stuck in Logstash. For example, if your `flush_size` is 100, and you have received 10 events, and it has been more than `idle_flush_time` seconds since the last flush, logstash will flush those 10 events automatically.
+
+This helps keep both fast and slow log streams moving along in near-real-time.
+
+
+### `initial_delay` [plugins-outputs-influxdb-initial_delay]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1`
+
+The amount of time in seconds to delay the initial retry on connection failure.
+
+The delay will increase exponentially for each retry attempt (up to max_retries).
+
+
+### `max_retries` [plugins-outputs-influxdb-max_retries]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `3`
+
+The number of time to retry recoverable errors before dropping the events.
+
+A value of -1 will cause the plugin to retry indefinately. A value of 0 will cause the plugin to never retry. Otherwise it will retry up to the specified number of times.
+
+
+### `measurement` [plugins-outputs-influxdb-measurement]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"logstash"`
+
+Measurement name - supports sprintf formatting
+
+
+### `password` [plugins-outputs-influxdb-password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* Default value is `nil`
+
+The password for the user who access to the named database
+
+
+### `port` [plugins-outputs-influxdb-port]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `8086`
+
+The port for InfluxDB
+
+
+### `retention_policy` [plugins-outputs-influxdb-retention_policy]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"autogen"`
+
+The retention policy to use
+
+
+### `send_as_tags` [plugins-outputs-influxdb-send_as_tags]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `["host"]`
+
+An array containing the names of fields to send to Influxdb as tags instead of fields. Influxdb 0.9 convention is that values that do not change every request should be considered metadata and given as tags. Tags are only sent when present in `data_points` or if `use_event_fields_for_data_points` is `true`.
+
+
+### `ssl` [plugins-outputs-influxdb-ssl]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Enable SSL/TLS secured communication to InfluxDB
+
+
+### `time_precision` [plugins-outputs-influxdb-time_precision]
+
+* Value can be any of: `n`, `u`, `ms`, `s`, `m`, `h`
+* Default value is `"ms"`
+
+Set the level of precision of `time`
+
+only useful when overriding the time value
+
+
+### `use_event_fields_for_data_points` [plugins-outputs-influxdb-use_event_fields_for_data_points]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Automatically use fields from the event as the data points sent to Influxdb
+
+
+### `user` [plugins-outputs-influxdb-user]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `nil`
+
+The user who has access to the named database
+
+
+
+## Common options [plugins-outputs-influxdb-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-influxdb-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-influxdb-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-influxdb-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-influxdb-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-influxdb-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-influxdb-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 influxdb outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ influxdb {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
diff --git a/docs/reference/plugins-outputs-irc.md b/docs/reference/plugins-outputs-irc.md
new file mode 100644
index 000000000..1dd4fbfa3
--- /dev/null
+++ b/docs/reference/plugins-outputs-irc.md
@@ -0,0 +1,202 @@
+---
+navigation_title: "irc"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-irc.html
+---
+
+# Irc output plugin [plugins-outputs-irc]
+
+
+* Plugin version: v3.0.6
+* Released on: 2018-04-06
+* [Changelog](https://github.com/logstash-plugins/logstash-output-irc/blob/v3.0.6/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-irc-index.md).
+
+## Installation [_installation_34]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-output-irc`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_87]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-irc). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_87]
+
+Write events to IRC
+
+
+## Irc Output Configuration Options [plugins-outputs-irc-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-irc-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`channels`](#plugins-outputs-irc-channels) | [array](/reference/configuration-file-structure.md#array) | Yes |
+| [`format`](#plugins-outputs-irc-format) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`host`](#plugins-outputs-irc-host) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`messages_per_second`](#plugins-outputs-irc-messages_per_second) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`nick`](#plugins-outputs-irc-nick) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`password`](#plugins-outputs-irc-password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`port`](#plugins-outputs-irc-port) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`post_string`](#plugins-outputs-irc-post_string) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`pre_string`](#plugins-outputs-irc-pre_string) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`real`](#plugins-outputs-irc-real) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`secure`](#plugins-outputs-irc-secure) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`user`](#plugins-outputs-irc-user) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-outputs-irc-common-options) for a list of options supported by all output plugins.
+
+
+
+### `channels` [plugins-outputs-irc-channels]
+
+* This is a required setting.
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Channels to broadcast to.
+
+These should be full channel names including the *#* symbol, such as "#logstash".
+
+
+### `format` [plugins-outputs-irc-format]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"%{{message}}"`
+
+Message format to send, event tokens are usable here
+
+
+### `host` [plugins-outputs-irc-host]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Address of the host to connect to
+
+
+### `messages_per_second` [plugins-outputs-irc-messages_per_second]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `0.5`
+
+Limit the rate of messages sent to IRC in messages per second.
+
+
+### `nick` [plugins-outputs-irc-nick]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"logstash"`
+
+IRC Nickname
+
+
+### `password` [plugins-outputs-irc-password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+IRC server password
+
+
+### `port` [plugins-outputs-irc-port]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `6667`
+
+Port on host to connect to.
+
+
+### `post_string` [plugins-outputs-irc-post_string]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Static string after event
+
+
+### `pre_string` [plugins-outputs-irc-pre_string]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Static string before event
+
+
+### `real` [plugins-outputs-irc-real]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"logstash"`
+
+IRC Real name
+
+
+### `secure` [plugins-outputs-irc-secure]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Set this to true to enable SSL.
+
+
+### `user` [plugins-outputs-irc-user]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"logstash"`
+
+IRC Username
+
+
+
+## Common options [plugins-outputs-irc-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-irc-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-irc-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-irc-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-irc-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-irc-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-irc-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 irc outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ irc {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-java_stdout.md b/docs/reference/plugins-outputs-java_stdout.md
new file mode 100644
index 000000000..b25dd30d6
--- /dev/null
+++ b/docs/reference/plugins-outputs-java_stdout.md
@@ -0,0 +1,92 @@
+---
+navigation_title: "java_stdout"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-java_stdout.html
+---
+
+# Java_stdout output plugin [plugins-outputs-java_stdout]
+
+
+**{{ls}} Core Plugin.** The java_stdout output plugin cannot be installed or uninstalled independently of {{ls}}.
+
+## Getting help [_getting_help_88]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash).
+
+
+## Description [_description_88]
+
+Prints events to the STDOUT of the shell running Logstash. This output is convenient for debugging plugin configurations by providing instant access to event data after it has passed through the inputs and filters.
+
+For example, the following output configuration in conjunction with the Logstash `-e` command-line flag, will allow you to see the results of your event pipeline for quick iteration.
+
+```ruby
+ output {
+ java_stdout {}
+ }
+```
+
+Useful codecs include:
+
+`java_line`: outputs event data in JSON format followed by an end-of-line character. This is the default codec for java_stdout.
+
+```ruby
+ output {
+ stdout { }
+ }
+```
+
+
+## Java_stdout Output Configuration Options [plugins-outputs-java_stdout-options]
+
+There are no special configuration options for this plugin, but it does support the [Common options](#plugins-outputs-java_stdout-common-options).
+
+
+## Common options [plugins-outputs-java_stdout-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-java_stdout-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-java_stdout-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-java_stdout-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-java_stdout-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"java_line"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-java_stdout-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-java_stdout-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 java_stdout outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ java_stdout {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-juggernaut.md b/docs/reference/plugins-outputs-juggernaut.md
new file mode 100644
index 000000000..96819315c
--- /dev/null
+++ b/docs/reference/plugins-outputs-juggernaut.md
@@ -0,0 +1,158 @@
+---
+navigation_title: "juggernaut"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-juggernaut.html
+---
+
+# Juggernaut output plugin [plugins-outputs-juggernaut]
+
+
+* Plugin version: v3.0.6
+* Released on: 2018-04-06
+* [Changelog](https://github.com/logstash-plugins/logstash-output-juggernaut/blob/v3.0.6/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-juggernaut-index.md).
+
+## Installation [_installation_35]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-output-juggernaut`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_89]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-juggernaut). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_89]
+
+Push messages to the juggernaut websockets server:
+
+* [https://github.com/maccman/juggernaut](https://github.com/maccman/juggernaut)
+
+Wraps Websockets and supports other methods (including xhr longpolling) This is basically, just an extension of the redis output (Juggernaut pulls messages from redis). But it pushes messages to a particular channel and formats the messages in the way juggernaut expects.
+
+
+## Juggernaut Output Configuration Options [plugins-outputs-juggernaut-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-juggernaut-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`channels`](#plugins-outputs-juggernaut-channels) | [array](/reference/configuration-file-structure.md#array) | Yes |
+| [`db`](#plugins-outputs-juggernaut-db) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`host`](#plugins-outputs-juggernaut-host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`message_format`](#plugins-outputs-juggernaut-message_format) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`password`](#plugins-outputs-juggernaut-password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`port`](#plugins-outputs-juggernaut-port) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`timeout`](#plugins-outputs-juggernaut-timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+
+Also see [Common options](#plugins-outputs-juggernaut-common-options) for a list of options supported by all output plugins.
+
+
+
+### `channels` [plugins-outputs-juggernaut-channels]
+
+* This is a required setting.
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+List of channels to which to publish. Dynamic names are valid here, for example `logstash-%{{type}}`.
+
+
+### `db` [plugins-outputs-juggernaut-db]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `0`
+
+The redis database number.
+
+
+### `host` [plugins-outputs-juggernaut-host]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"127.0.0.1"`
+
+The hostname of the redis server to which juggernaut is listening.
+
+
+### `message_format` [plugins-outputs-juggernaut-message_format]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+How should the message be formatted before pushing to the websocket.
+
+
+### `password` [plugins-outputs-juggernaut-password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Password to authenticate with. There is no authentication by default.
+
+
+### `port` [plugins-outputs-juggernaut-port]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `6379`
+
+The port to connect on.
+
+
+### `timeout` [plugins-outputs-juggernaut-timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `5`
+
+Redis initial connection timeout in seconds.
+
+
+
+## Common options [plugins-outputs-juggernaut-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-juggernaut-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-juggernaut-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-juggernaut-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-juggernaut-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-juggernaut-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-juggernaut-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 juggernaut outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ juggernaut {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-kafka.md b/docs/reference/plugins-outputs-kafka.md
new file mode 100644
index 000000000..93944c676
--- /dev/null
+++ b/docs/reference/plugins-outputs-kafka.md
@@ -0,0 +1,600 @@
+---
+navigation_title: "kafka"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-kafka.html
+---
+
+# Kafka output plugin [plugins-outputs-kafka]
+
+
+* A component of the [kafka integration plugin](/reference/plugins-integrations-kafka.md)
+* Integration version: v11.6.0
+* Released on: 2025-01-07
+* [Changelog](https://github.com/logstash-plugins/logstash-integration-kafka/blob/v11.6.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-kafka-index.md).
+
+## Getting help [_getting_help_90]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-integration-kafka). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_90]
+
+Write events to a Kafka topic.
+
+This plugin uses Kafka Client 3.8.1. For broker compatibility, see the official [Kafka compatibility reference](https://cwiki.apache.org/confluence/display/KAFKA/Compatibility+Matrix). If the linked compatibility wiki is not up-to-date, please contact Kafka support/community to confirm compatibility.
+
+If you require features not yet available in this plugin (including client version upgrades), please file an issue with details about what you need.
+
+This output supports connecting to Kafka over:
+
+* SSL (requires plugin version 3.0.0 or later)
+* Kerberos SASL (requires plugin version 5.1.0 or later)
+
+By default security is disabled but can be turned on as needed.
+
+The only required configuration is the topic_id.
+
+The default codec is plain. Logstash will encode your events with not only the message field but also with a timestamp and hostname.
+
+If you want the full content of your events to be sent as json, you should set the codec in the output configuration like this:
+
+```ruby
+ output {
+ kafka {
+ codec => json
+ topic_id => "mytopic"
+ }
+ }
+```
+
+For more information see [https://kafka.apache.org/38/documentation.html#theproducer](https://kafka.apache.org/38/documentation.html#theproducer)
+
+Kafka producer configuration: [https://kafka.apache.org/38/documentation.html#producerconfigs](https://kafka.apache.org/38/documentation.html#producerconfigs)
+
+::::{note}
+This plugin does not support using a proxy when communicating to the Kafka broker.
+::::
+
+
+
+## Kafka Output Configuration Options [plugins-outputs-kafka-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-kafka-common-options) described later.
+
+::::{note}
+Some of these options map to a Kafka option. Defaults usually reflect the Kafka default setting, and might change if Kafka’s producer defaults change. See the [https://kafka.apache.org/38/documentation](https://kafka.apache.org/38/documentation) for more details.
+::::
+
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`acks`](#plugins-outputs-kafka-acks) | [string](/reference/configuration-file-structure.md#string), one of `["0", "1", "all"]` | No |
+| [`batch_size`](#plugins-outputs-kafka-batch_size) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`bootstrap_servers`](#plugins-outputs-kafka-bootstrap_servers) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`buffer_memory`](#plugins-outputs-kafka-buffer_memory) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`client_dns_lookup`](#plugins-outputs-kafka-client_dns_lookup) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`client_id`](#plugins-outputs-kafka-client_id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`compression_type`](#plugins-outputs-kafka-compression_type) | [string](/reference/configuration-file-structure.md#string), one of `["none", "gzip", "snappy", "lz4", "zstd"]` | No |
+| [`connections_max_idle_ms`](#plugins-outputs-kafka-connections_max_idle_ms) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`jaas_path`](#plugins-outputs-kafka-jaas_path) | a valid filesystem path | No |
+| [`kerberos_config`](#plugins-outputs-kafka-kerberos_config) | a valid filesystem path | No |
+| [`key_serializer`](#plugins-outputs-kafka-key_serializer) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`linger_ms`](#plugins-outputs-kafka-linger_ms) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`max_request_size`](#plugins-outputs-kafka-max_request_size) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`message_headers`](#plugins-outputs-kafka-message_headers) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`message_key`](#plugins-outputs-kafka-message_key) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`metadata_fetch_timeout_ms`](#plugins-outputs-kafka-metadata_fetch_timeout_ms) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`metadata_max_age_ms`](#plugins-outputs-kafka-metadata_max_age_ms) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`partitioner`](#plugins-outputs-kafka-partitioner) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`receive_buffer_bytes`](#plugins-outputs-kafka-receive_buffer_bytes) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`reconnect_backoff_ms`](#plugins-outputs-kafka-reconnect_backoff_ms) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`request_timeout_ms`](#plugins-outputs-kafka-request_timeout_ms) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`retries`](#plugins-outputs-kafka-retries) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`retry_backoff_ms`](#plugins-outputs-kafka-retry_backoff_ms) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`sasl_client_callback_handler_class`](#plugins-outputs-kafka-sasl_client_callback_handler_class) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`sasl_oauthbearer_token_endpoint_url`](#plugins-outputs-kafka-sasl_oauthbearer_token_endpoint_url) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`sasl_oauthbearer_scope_claim_name`](#plugins-outputs-kafka-sasl_oauthbearer_scope_claim_name) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`sasl_login_callback_handler_class`](#plugins-outputs-kafka-sasl_login_callback_handler_class) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`sasl_login_connect_timeout_ms`](#plugins-outputs-kafka-sasl_login_connect_timeout_ms) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`sasl_login_read_timeout_ms`](#plugins-outputs-kafka-sasl_login_read_timeout_ms) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`sasl_login_retry_backoff_ms`](#plugins-outputs-kafka-sasl_login_retry_backoff_ms) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`sasl_login_retry_backoff_max_ms`](#plugins-outputs-kafka-sasl_login_retry_backoff_max_ms) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`sasl_jaas_config`](#plugins-outputs-kafka-sasl_jaas_config) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`sasl_kerberos_service_name`](#plugins-outputs-kafka-sasl_kerberos_service_name) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`sasl_mechanism`](#plugins-outputs-kafka-sasl_mechanism) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`security_protocol`](#plugins-outputs-kafka-security_protocol) | [string](/reference/configuration-file-structure.md#string), one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]` | No |
+| [`send_buffer_bytes`](#plugins-outputs-kafka-send_buffer_bytes) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`ssl_endpoint_identification_algorithm`](#plugins-outputs-kafka-ssl_endpoint_identification_algorithm) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_key_password`](#plugins-outputs-kafka-ssl_key_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_keystore_location`](#plugins-outputs-kafka-ssl_keystore_location) | a valid filesystem path | No |
+| [`ssl_keystore_password`](#plugins-outputs-kafka-ssl_keystore_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_keystore_type`](#plugins-outputs-kafka-ssl_keystore_type) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_truststore_location`](#plugins-outputs-kafka-ssl_truststore_location) | a valid filesystem path | No |
+| [`ssl_truststore_password`](#plugins-outputs-kafka-ssl_truststore_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_truststore_type`](#plugins-outputs-kafka-ssl_truststore_type) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`topic_id`](#plugins-outputs-kafka-topic_id) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`value_serializer`](#plugins-outputs-kafka-value_serializer) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-outputs-kafka-common-options) for a list of options supported by all output plugins.
+
+
+
+### `acks` [plugins-outputs-kafka-acks]
+
+* Value can be any of: `0`, `1`, `all`
+* Default value is `"1"`
+
+The number of acknowledgments the producer requires the leader to have received before considering a request complete.
+
+`acks=0`. The producer will not wait for any acknowledgment from the server.
+
+`acks=1`. The leader will write the record to its local log, but will respond without waiting for full acknowledgement from all followers.
+
+`acks=all`. The leader will wait for the full set of in-sync replicas before acknowledging the record.
+
+
+### `batch_size` [plugins-outputs-kafka-batch_size]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `16384`.
+
+The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. This helps performance on both the client and the server. This configuration controls the default batch size in bytes.
+
+
+### `bootstrap_servers` [plugins-outputs-kafka-bootstrap_servers]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"localhost:9092"`
+
+This is for bootstrapping and the producer will only use it for getting metadata (topics, partitions and replicas). The socket connections for sending the actual data will be established based on the broker information returned in the metadata. The format is `host1:port1,host2:port2`, and the list can be a subset of brokers or a VIP pointing to a subset of brokers.
+
+
+### `buffer_memory` [plugins-outputs-kafka-buffer_memory]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `33554432` (32MB).
+
+The total bytes of memory the producer can use to buffer records waiting to be sent to the server.
+
+
+### `client_dns_lookup` [plugins-outputs-kafka-client_dns_lookup]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Valid options are `use_all_dns_ips`, `resolve_canonical_bootstrap_servers_only`, `default`
+* Default value is `"default"`
+
+Controls how DNS lookups are done. If set to `use_all_dns_ips`, Logstash tries all IP addresses returned for a hostname before failing the connection. If set to `resolve_canonical_bootstrap_servers_only`, each entry will be resolved and expanded into a list of canonical names.
+
+::::{note}
+Starting from Kafka 3 `default` value for `client.dns.lookup` value has been removed. If not explicitly configured it defaults to `use_all_dns_ips`.
+
+::::
+
+
+
+### `client_id` [plugins-outputs-kafka-client_id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"logstash"`
+
+The id string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included with the request
+
+
+### `compression_type` [plugins-outputs-kafka-compression_type]
+
+* Value can be any of: `none`, `gzip`, `snappy`, `lz4`, `zstd`
+* Default value is `"none"`
+
+The compression type for all data generated by the producer. The default is none (meaning no compression). Valid values are none, gzip, snappy, lz4, or zstd.
+
+
+### `connections_max_idle_ms` [plugins-outputs-kafka-connections_max_idle_ms]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `540000` milliseconds (9 minutes).
+
+Close idle connections after the number of milliseconds specified by this config.
+
+
+### `jaas_path` [plugins-outputs-kafka-jaas_path]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client:
+
+```java
+KafkaClient {
+ com.sun.security.auth.module.Krb5LoginModule required
+ useTicketCache=true
+ renewTicket=true
+ serviceName="kafka";
+ };
+```
+
+Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same `jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on different JVM instances.
+
+
+### `kerberos_config` [plugins-outputs-kafka-kerberos_config]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+Optional path to kerberos config file. This is krb5.conf style as detailed in [https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html](https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html)
+
+
+### `key_serializer` [plugins-outputs-kafka-key_serializer]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"org.apache.kafka.common.serialization.StringSerializer"`
+
+Serializer class for the key of the message
+
+
+### `linger_ms` [plugins-outputs-kafka-linger_ms]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `0`
+
+The producer groups together any records that arrive in between request transmissions into a single batched request. Normally this occurs only under load when records arrive faster than they can be sent out. However in some circumstances the client may want to reduce the number of requests even under moderate load. This setting accomplishes this by adding a small amount of artificial delay—that is, rather than immediately sending out a record the producer will wait for up to the given delay to allow other records to be sent so that the sends can be batched together.
+
+
+### `max_request_size` [plugins-outputs-kafka-max_request_size]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1048576` (1MB).
+
+The maximum size of a request
+
+
+### `message_headers` [plugins-outputs-kafka-message_headers]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+
+ * Keys are header names, and must be [string](/reference/configuration-file-structure.md#string)
+ * Values are header values, and must be [string](/reference/configuration-file-structure.md#string)
+ * Values support interpolation from event field values
+
+* There is no default value for this setting.
+
+A map of key value pairs, each corresponding to a header name and its value respectively. Example:
+
+```ruby
+ message_headers => { "event_timestamp" => "%{@timestamp}" }
+```
+
+
+### `message_key` [plugins-outputs-kafka-message_key]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The key for the message.
+
+
+### `metadata_fetch_timeout_ms` [plugins-outputs-kafka-metadata_fetch_timeout_ms]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `60000` milliseconds (60 seconds).
+
+The timeout setting for initial metadata request to fetch topic metadata.
+
+
+### `metadata_max_age_ms` [plugins-outputs-kafka-metadata_max_age_ms]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `300000` milliseconds (5 minutes).
+
+The max time in milliseconds before a metadata refresh is forced.
+
+
+### `partitioner` [plugins-outputs-kafka-partitioner]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The default behavior is to hash the `message_key` of an event to get the partition. When no message key is present, the plugin picks a partition in a round-robin fashion.
+
+Available options for choosing a partitioning strategy are as follows:
+
+* `default` use the default partitioner as described above
+* `round_robin` distributes writes to all partitions equally, regardless of `message_key`
+* `uniform_sticky` sticks to a partition for the duration of a batch than randomly picks a new one
+
+
+### `receive_buffer_bytes` [plugins-outputs-kafka-receive_buffer_bytes]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `32768` (32KB).
+
+The size of the TCP receive buffer to use when reading data
+
+
+### `reconnect_backoff_ms` [plugins-outputs-kafka-reconnect_backoff_ms]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `50`.
+
+The amount of time to wait before attempting to reconnect to a given host when a connection fails.
+
+
+### `request_timeout_ms` [plugins-outputs-kafka-request_timeout_ms]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `40000` milliseconds (40 seconds).
+
+The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.
+
+
+### `retries` [plugins-outputs-kafka-retries]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+The default retry behavior is to retry until successful. To prevent data loss, changing this setting is discouraged.
+
+If you choose to set `retries`, a value greater than zero will cause the client to only retry a fixed number of times. This will result in data loss if a transport fault exists for longer than your retry count (network outage, Kafka down, etc).
+
+A value less than zero is a configuration error.
+
+Starting with version 10.5.0, this plugin will only retry exceptions that are a subclass of [RetriableException](https://kafka.apache.org/38/javadoc/org/apache/kafka/common/errors/RetriableException.html) and [InterruptException](https://kafka.apache.org/38/javadoc/org/apache/kafka/common/errors/InterruptException.html). If producing a message throws any other exception, an error is logged and the message is dropped without retrying. This prevents the Logstash pipeline from hanging indefinitely.
+
+In versions prior to 10.5.0, any exception is retried indefinitely unless the `retries` option is configured.
+
+
+### `retry_backoff_ms` [plugins-outputs-kafka-retry_backoff_ms]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `100` milliseconds.
+
+The amount of time to wait before attempting to retry a failed produce request to a given topic partition.
+
+
+### `sasl_client_callback_handler_class` [plugins-outputs-kafka-sasl_client_callback_handler_class]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The SASL client callback handler class the specified SASL mechanism should use.
+
+
+### `sasl_oauthbearer_token_endpoint_url` [plugins-outputs-kafka-sasl_oauthbearer_token_endpoint_url]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The URL for the OAuth 2.0 issuer token endpoint.
+
+
+### `sasl_oauthbearer_scope_claim_name` [plugins-outputs-kafka-sasl_oauthbearer_scope_claim_name]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"scope"`
+
+(optional) The override name of the scope claim.
+
+
+### `sasl_login_callback_handler_class` [plugins-outputs-kafka-sasl_login_callback_handler_class]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The SASL login callback handler class the specified SASL mechanism should use.
+
+
+### `sasl_login_connect_timeout_ms` [plugins-outputs-kafka-sasl_login_connect_timeout_ms]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+(optional) The duration, in milliseconds, for HTTPS connect timeout
+
+
+### `sasl_login_read_timeout_ms` [plugins-outputs-kafka-sasl_login_read_timeout_ms]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+(optional) The duration, in milliseconds, for HTTPS read timeout.
+
+
+### `sasl_login_retry_backoff_ms` [plugins-outputs-kafka-sasl_login_retry_backoff_ms]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `100` milliseconds.
+
+(optional) The duration, in milliseconds, to wait between HTTPS call attempts.
+
+
+### `sasl_login_retry_backoff_max_ms` [plugins-outputs-kafka-sasl_login_retry_backoff_max_ms]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `10000` milliseconds.
+
+(optional) The maximum duration, in milliseconds, for HTTPS call attempts.
+
+
+### `sasl_jaas_config` [plugins-outputs-kafka-sasl_jaas_config]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+JAAS configuration setting local to this plugin instance, as opposed to settings using config file configured using `jaas_path`, which are shared across the JVM. This allows each plugin instance to have its own configuration.
+
+If both `sasl_jaas_config` and `jaas_path` configurations are set, the setting here takes precedence.
+
+Example (setting for Azure Event Hub):
+
+```ruby
+ output {
+ kafka {
+ sasl_jaas_config => "org.apache.kafka.common.security.plain.PlainLoginModule required username='auser' password='apassword';"
+ }
+ }
+```
+
+
+### `sasl_kerberos_service_name` [plugins-outputs-kafka-sasl_kerberos_service_name]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The Kerberos principal name that Kafka broker runs as. This can be defined either in Kafka’s JAAS config or in Kafka’s config.
+
+
+### `sasl_mechanism` [plugins-outputs-kafka-sasl_mechanism]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"GSSAPI"`
+
+[SASL mechanism](http://kafka.apache.org/documentation.html#security_sasl) used for client connections. This may be any mechanism for which a security provider is available. GSSAPI is the default mechanism.
+
+
+### `security_protocol` [plugins-outputs-kafka-security_protocol]
+
+* Value can be any of: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL`
+* Default value is `"PLAINTEXT"`
+
+Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL
+
+
+### `send_buffer_bytes` [plugins-outputs-kafka-send_buffer_bytes]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `131072` (128KB).
+
+The size of the TCP send buffer to use when sending data.
+
+
+### `ssl_endpoint_identification_algorithm` [plugins-outputs-kafka-ssl_endpoint_identification_algorithm]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"https"`
+
+The endpoint identification algorithm, defaults to `"https"`. Set to empty string `""` to disable
+
+
+### `ssl_key_password` [plugins-outputs-kafka-ssl_key_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+The password of the private key in the key store file.
+
+
+### `ssl_keystore_location` [plugins-outputs-kafka-ssl_keystore_location]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+If client authentication is required, this setting stores the keystore path.
+
+
+### `ssl_keystore_password` [plugins-outputs-kafka-ssl_keystore_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+If client authentication is required, this setting stores the keystore password
+
+
+### `ssl_keystore_type` [plugins-outputs-kafka-ssl_keystore_type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The keystore type.
+
+
+### `ssl_truststore_location` [plugins-outputs-kafka-ssl_truststore_location]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+The JKS truststore path to validate the Kafka broker’s certificate.
+
+
+### `ssl_truststore_password` [plugins-outputs-kafka-ssl_truststore_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+The truststore password
+
+
+### `ssl_truststore_type` [plugins-outputs-kafka-ssl_truststore_type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The truststore type.
+
+
+### `topic_id` [plugins-outputs-kafka-topic_id]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The topic to produce messages to
+
+
+### `value_serializer` [plugins-outputs-kafka-value_serializer]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"org.apache.kafka.common.serialization.StringSerializer"`
+
+Serializer class for the value of the message
+
+
+
+## Common options [plugins-outputs-kafka-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-kafka-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-kafka-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-kafka-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-kafka-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-kafka-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-kafka-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 kafka outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ kafka {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-librato.md b/docs/reference/plugins-outputs-librato.md
new file mode 100644
index 000000000..9685d658a
--- /dev/null
+++ b/docs/reference/plugins-outputs-librato.md
@@ -0,0 +1,213 @@
+---
+navigation_title: "librato"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-librato.html
+---
+
+# Librato output plugin [plugins-outputs-librato]
+
+
+* Plugin version: v3.0.7
+* Released on: 2019-10-09
+* [Changelog](https://github.com/logstash-plugins/logstash-output-librato/blob/v3.0.7/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-librato-index.md).
+
+## Installation [_installation_36]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-output-librato`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_91]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-librato). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_91]
+
+This output lets you send metrics, annotations, and alerts to Librato based on Logstash events
+
+This is VERY experimental and inefficient right now.
+
+
+## Librato Output Configuration Options [plugins-outputs-librato-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-librato-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`account_id`](#plugins-outputs-librato-account_id) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`annotation`](#plugins-outputs-librato-annotation) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`api_token`](#plugins-outputs-librato-api_token) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`batch_size`](#plugins-outputs-librato-batch_size) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`counter`](#plugins-outputs-librato-counter) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`gauge`](#plugins-outputs-librato-gauge) | [hash](/reference/configuration-file-structure.md#hash) | No |
+
+Also see [Common options](#plugins-outputs-librato-common-options) for a list of options supported by all output plugins.
+
+
+
+### `account_id` [plugins-outputs-librato-account_id]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Your Librato account usually an email address
+
+
+### `annotation` [plugins-outputs-librato-annotation]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Annotations Registers an annotation with Librato The only required field is `title` and `name`. `start_time` and `end_time` will be set to `event.get("@timestamp").to_i` You can add any other optional annotation values as well. All values will be passed through `event.sprintf`
+
+Example:
+
+```ruby
+ {
+ "title" => "Logstash event on %{host}"
+ "name" => "logstash_stream"
+ }
+```
+
+or
+
+```ruby
+ {
+ "title" => "Logstash event"
+ "description" => "%{message}"
+ "name" => "logstash_stream"
+ }
+```
+
+
+### `api_token` [plugins-outputs-librato-api_token]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Your Librato API Token
+
+
+### `batch_size` [plugins-outputs-librato-batch_size]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"10"`
+
+Batch size Number of events to batch up before sending to Librato.
+
+
+### `counter` [plugins-outputs-librato-counter]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Counters Send data to Librato as a counter
+
+Example:
+
+```ruby
+ {
+ "value" => "1"
+ "source" => "%{host}"
+ "name" => "messages_received"
+ }
+```
+
+Additionally, you can override the `measure_time` for the event. Must be a unix timestamp:
+
+```ruby
+ {
+ "value" => "1"
+ "source" => "%{host}"
+ "name" => "messages_received"
+ "measure_time" => "%{my_unixtime_field}"
+ }
+```
+
+Default is to use the event’s timestamp
+
+
+### `gauge` [plugins-outputs-librato-gauge]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Gauges Send data to Librato as a gauge
+
+Example:
+
+```ruby
+ {
+ "value" => "%{bytes_received}"
+ "source" => "%{host}"
+ "name" => "apache_bytes"
+ }
+```
+
+Additionally, you can override the `measure_time` for the event. Must be a unix timestamp:
+
+```ruby
+ {
+ "value" => "%{bytes_received}"
+ "source" => "%{host}"
+ "name" => "apache_bytes"
+ "measure_time" => "%{my_unixtime_field}
+ }
+```
+
+Default is to use the event’s timestamp
+
+
+
+## Common options [plugins-outputs-librato-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-librato-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-librato-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-librato-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-librato-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-librato-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-librato-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 librato outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ librato {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-loggly.md b/docs/reference/plugins-outputs-loggly.md
new file mode 100644
index 000000000..3908e0c76
--- /dev/null
+++ b/docs/reference/plugins-outputs-loggly.md
@@ -0,0 +1,231 @@
+---
+navigation_title: "loggly"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-loggly.html
+---
+
+# Loggly output plugin [plugins-outputs-loggly]
+
+
+* Plugin version: v6.0.0
+* Released on: 2018-07-03
+* [Changelog](https://github.com/logstash-plugins/logstash-output-loggly/blob/v6.0.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-loggly-index.md).
+
+## Installation [_installation_37]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-output-loggly`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_93]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-loggly). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_93]
+
+Got a loggly account? Use logstash to ship logs to Loggly!
+
+This is most useful so you can use logstash to parse and structure your logs and ship structured, json events to your account at Loggly.
+
+To use this, you’ll need to use a Loggly input with type *http* and *json logging* enabled.
+
+
+## Loggly Output Configuration Options [plugins-outputs-loggly-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-loggly-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`can_retry`](#plugins-outputs-loggly-can_retry) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`convert_timestamp`](#plugins-outputs-loggly-convert_timestamp) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`host`](#plugins-outputs-loggly-host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`key`](#plugins-outputs-loggly-key) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`max_event_size`](#plugins-outputs-loggly-max_event_size) | [bytes](/reference/configuration-file-structure.md#bytes) | Yes |
+| [`max_payload_size`](#plugins-outputs-loggly-max_payload_size) | [bytes](/reference/configuration-file-structure.md#bytes) | Yes |
+| [`proto`](#plugins-outputs-loggly-proto) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`proxy_host`](#plugins-outputs-loggly-proxy_host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`proxy_password`](#plugins-outputs-loggly-proxy_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`proxy_port`](#plugins-outputs-loggly-proxy_port) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`proxy_user`](#plugins-outputs-loggly-proxy_user) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`retry_count`](#plugins-outputs-loggly-retry_count) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`tag`](#plugins-outputs-loggly-tag) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-outputs-loggly-common-options) for a list of options supported by all output plugins.
+
+
+
+### `can_retry` [plugins-outputs-loggly-can_retry]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Can Retry. Setting this value true helps user to send multiple retry attempts if the first request fails
+
+
+### `convert_timestamp` [plugins-outputs-loggly-convert_timestamp]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+The plugin renames Logstash’s *@timestamp* field to *timestamp* before sending, so that Loggly recognizes it automatically.
+
+This will do nothing if your event doesn’t have a *@timestamp* field or if your event already has a *timestamp* field.
+
+Note that the actual Logstash event is not modified by the output. This modification only happens on a copy of the event, prior to sending.
+
+
+### `host` [plugins-outputs-loggly-host]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"logs-01.loggly.com"`
+
+The hostname to send logs to. This should target the loggly http input server which is usually "logs-01.loggly.com" (Gen2 account). See the [Loggly HTTP endpoint documentation](https://www.loggly.com/docs/http-endpoint/).
+
+
+### `key` [plugins-outputs-loggly-key]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The loggly http customer token to use for sending. You can find yours in "Source Setup", under "Customer Tokens".
+
+You can use `%{{foo}}` field lookups here if you need to pull the api key from the event. This is mainly aimed at multitenant hosting providers who want to offer shipping a customer’s logs to that customer’s loggly account.
+
+
+### `max_event_size` [plugins-outputs-loggly-max_event_size]
+
+* This is a required setting.
+* Value type is [bytes](/reference/configuration-file-structure.md#bytes)
+* Default value is 1 Mib
+
+The Loggly API supports event size up to 1 Mib.
+
+You should only need to change this setting if the API limits have changed and you need to override the plugin’s behaviour.
+
+See the [Loggly bulk API documentation](https://www.loggly.com/docs/http-bulk-endpoint/)
+
+
+### `max_payload_size` [plugins-outputs-loggly-max_payload_size]
+
+* This is a required setting.
+* Value type is [bytes](/reference/configuration-file-structure.md#bytes)
+* Default value is 5 Mib
+
+The Loggly API supports API call payloads up to 5 Mib.
+
+You should only need to change this setting if the API limits have changed and you need to override the plugin’s behaviour.
+
+See the [Loggly bulk API documentation](https://www.loggly.com/docs/http-bulk-endpoint/)
+
+
+### `proto` [plugins-outputs-loggly-proto]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"http"`
+
+Should the log action be sent over https instead of plain http
+
+
+### `proxy_host` [plugins-outputs-loggly-proxy_host]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Proxy Host
+
+
+### `proxy_password` [plugins-outputs-loggly-proxy_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* Default value is `""`
+
+Proxy Password
+
+
+### `proxy_port` [plugins-outputs-loggly-proxy_port]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+Proxy Port
+
+
+### `proxy_user` [plugins-outputs-loggly-proxy_user]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Proxy Username
+
+
+### `retry_count` [plugins-outputs-loggly-retry_count]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `5`
+
+Retry count. It may be possible that the request may timeout due to slow Internet connection if such condition appears, retry_count helps in retrying request for multiple times It will try to submit request until retry_count and then halt
+
+
+### `tag` [plugins-outputs-loggly-tag]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+
+Loggly Tags help you to find your logs in the Loggly dashboard easily. You can search for a tag in Loggly, using `"tag:your_tag"`.
+
+If you need to specify multiple tags here on your events, specify them as outlined in [the tag documentation](https://www.loggly.com/docs/tags/). E.g. `"tag" => "foo,bar,myApp"`.
+
+You can also use `"tag" => "%{{somefield}},%{{another_field}}"` to take your tag values from `somefield` and `another_field` on your event. If the field doesn’t exist, no tag will be created. Helpful for leveraging [Loggly source groups](https://www.loggly.com/docs/source-groups/).
+
+
+
+## Common options [plugins-outputs-loggly-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-loggly-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-loggly-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-loggly-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-loggly-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-loggly-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-loggly-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 loggly outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ loggly {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-logstash.md b/docs/reference/plugins-outputs-logstash.md
new file mode 100644
index 000000000..f8d5f592f
--- /dev/null
+++ b/docs/reference/plugins-outputs-logstash.md
@@ -0,0 +1,299 @@
+---
+navigation_title: "logstash"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-logstash.html
+---
+
+# Logstash output plugin [plugins-outputs-logstash]
+
+
+* A component of the [logstash integration plugin](/reference/plugins-integrations-logstash.md)
+* Integration version: v1.0.4
+* Released on: 2024-12-10
+* [Changelog](https://github.com/logstash-plugins/logstash-integration-logstash/blob/v1.0.4/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-logstash-index.md).
+
+## Getting help [_getting_help_92]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-integration-logstash). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_92]
+
+Send events to a [Logstash input plugin](/reference/plugins-inputs-logstash.md) in a pipeline that may be in another process or on another host. You must have a TCP route to the port (defaults to 9800) on an interface that the downstream input is bound to.
+
+::::{note}
+Sending events to *any* destination other than a `logstash-input` plugin is neither advised nor supported. We will maintain cross-compatibility with any two supported versions of output/input pair and reserve the right to change details such as protocol and encoding.
+::::
+
+
+### Minimum Configuration [plugins-outputs-logstash-minimum-config]
+
+| SSL Enabled | SSL Disabled |
+| --- | --- |
+| ``` output { logstash { hosts => "10.0.0.123:9801" } } ``` | ``` output { logstash { hosts => "10.0.0.123:9801" ssl_enabled => false } } ``` |
+
+
+### Configuration Concepts [plugins-outputs-logstash-config-connecting]
+
+Configure this output plugin to connect to a [Logstash input plugin](/reference/plugins-inputs-logstash.md) by specifying its `hosts`. Depending on the downstream plugin’s configuration, you may need to also configure the target port, SSL, and/or credentials.
+
+
+### Security: SSL Trust [plugins-outputs-logstash-config-ssl-trust]
+
+When communicating over SSL, this plugin establishes trust of the server it connects to before transmitting credentials or events.
+
+It does so by ensuring that the responding server presents a currently-valid certificate with identity claims matching host it is connecting to, signed by a trusted signing authority, along with proof-of-possession of the associated private key material.
+
+The system trust store is used by default. You can provide an *alternate* source of trust with *ONE OF*:
+
+* A PEM-formatted list of trusted certificate authorities (see [`ssl_certificate_authorities`](#plugins-outputs-logstash-ssl_certificate_authorities))
+* A PKCS12- or JKS-formatted truststore (see [`ssl_truststore_path`](#plugins-outputs-logstash-ssl_truststore_path))
+
+
+### Security: SSL Identity [plugins-outputs-logstash-config-ssl-identity]
+
+If the downstream input plugin is configured to request or require client authentication, you can configure this plugin to provide its proof-of-identity with *ONE OF*:
+
+* JKS- or PKCS12-formatted Keystore (see [`ssl_keystore_path`](#plugins-outputs-logstash-ssl_keystore_path))
+* PKCS8-formatted Certificate/Key pair (see [`ssl_certificate`](#plugins-outputs-logstash-ssl_certificate))
+
+
+### Security: Credentials [plugins-outputs-logstash-config-credentials]
+
+If the downstream `logstash-input` plugin is configured to require `username` and `password`, you will need to configure this output with a matching [`username`](#plugins-outputs-logstash-username) and [`password`](#plugins-outputs-logstash-password).
+
+::::{note}
+when SSL is disabled, data and credentials will be transmitted in clear-text.
+::::
+
+
+
+
+## Logstash Output Configuration Options [plugins-outputs-logstash-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-logstash-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`hosts`](#plugins-outputs-logstash-hosts) | list of [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`password`](#plugins-outputs-logstash-password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_enabled`](#plugins-outputs-logstash-ssl_enabled) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`ssl_certificate`](#plugins-outputs-logstash-ssl_certificate) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_certificate_authorities`](#plugins-outputs-logstash-ssl_certificate_authorities) | list of [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_key`](#plugins-outputs-logstash-ssl_key) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_keystore_path`](#plugins-outputs-logstash-ssl_keystore_path) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_keystore_password`](#plugins-outputs-logstash-ssl_keystore_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_truststore_path`](#plugins-outputs-logstash-ssl_truststore_path) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_truststore_password`](#plugins-outputs-logstash-ssl_truststore_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_verification_mode`](#plugins-outputs-logstash-ssl_verification_mode) | [string](/reference/configuration-file-structure.md#string), one of `["full", "none"]` | No |
+| [`username`](#plugins-outputs-logstash-username) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-outputs-logstash-common-options) for a list of options supported by all output plugins.
+
+
+
+### `hosts` [plugins-outputs-logstash-hosts]
+
+* Value type is list of [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+* Constraints:
+
+ * When using IPv6, IP address must be in an enclosed in brackets.
+ * When a port is not provided, the default `9800` is used.
+
+
+The addresses of one or more downstream `input`s to connect to.
+
+Host can be any of IPv4, IPv6 (in enclosed bracket) or host name, examples:
+
+* `"127.0.0.1"`
+* `"127.0.0.1:9801"`
+* `"ds.example.com"`
+* `"ds.example.com:9802"`
+* `"[::1]"`
+* `"[::1]:9803"`
+* `"[2001:0db8:85a3:0000:0000:8a2e:0370:7334]"`
+* `"[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:9804"`
+
+Plugin balances incoming load among the `hosts`. For more information, visit [Logstash integration plugin](/reference/plugins-integrations-logstash.md) *Load Balancing* section.
+
+When connecting, communication to downstream input {{ls}} is secured with SSL unless configured otherwise.
+
+::::{admonition} Disabling SSL is dangerous
+:class: warning
+
+The security of this plugin relies on SSL to avoid leaking credentials and to avoid running illegitimate ingest pipeline definitions.
+
+::::
+
+
+::::{note}
+when using SSL, the server that responds must present a certificated with identity claim matching this host name or ip address.
+::::
+
+
+
+### `password` [plugins-outputs-logstash-password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+* Required when [`username`](#plugins-outputs-logstash-username) is configured.
+
+Password for password-based authentication.
+
+When the downstream input plugin is configured with a `username` and `password`, you must also configure upstream outputs with a matching `username`/`password` pair.
+
+
+### `ssl_enabled` [plugins-outputs-logstash-ssl_enabled]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Logstash-to-Logstash communication is secured by default. When the downstream input plugin disables SSL, it must also be disabled here.
+
+You can disable SSL with `+ssl_enabled => false+`. When disabled, setting any `ssl_*` configuration causes configuration failure.
+
+
+### `ssl_certificate` [plugins-outputs-logstash-ssl_certificate]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+* When present, [`ssl_key`](#plugins-outputs-logstash-ssl_key) is also required.
+* Cannot be combined with configurations that disable SSL.
+
+Path to a PEM-encoded certificate or certificate chain with which to identify this plugin to connecting downstream input.
+
+
+### `ssl_certificate_authorities` [plugins-outputs-logstash-ssl_certificate_authorities]
+
+* Value type is a [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+* Cannot be combined with configurations that disable SSL.
+* Cannot be combined with [`+ssl_verification_mode => none+`](#plugins-outputs-logstash-ssl_verification_mode).
+
+One or more PEM-encoded files defining certificate authorities for use in downstream input authentication. This setting can be used to *override* the system trust store for verifying the SSL certificate presented by downstream input.
+
+
+### `ssl_key` [plugins-outputs-logstash-ssl_key]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+* Required when connection identity is configured with [`ssl_certificate`](#plugins-outputs-logstash-ssl_certificate)
+* Cannot be combined with configurations that disable SSL.
+
+A path to an PEM-encoded *unencrypted* PKCS8 SSL certificate key.
+
+
+### `ssl_keystore_path` [plugins-outputs-logstash-ssl_keystore_path]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+* When present, [`ssl_keystore_password`](#plugins-outputs-logstash-ssl_keystore_password) is also required.
+* Cannot be combined with configurations that disable SSL.
+
+A path to a JKS- or PKCS12-formatted keystore with which to identify this plugin to the downstream input. The provided identity will be used if the downstream input enables [SSL client authentication](#plugins-outputs-logstash-config-ssl-trust).
+
+
+### `ssl_keystore_password` [plugins-outputs-logstash-ssl_keystore_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+* Required when connection identity is configured with [`ssl_keystore_path`](#plugins-outputs-logstash-ssl_keystore_path)
+* Cannot be combined with configurations that disable SSL.
+
+Password for the [`ssl_keystore_path`](#plugins-outputs-logstash-ssl_keystore_path)
+
+
+### `ssl_truststore_path` [plugins-outputs-logstash-ssl_truststore_path]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+* When present, [`ssl_truststore_path`](#plugins-outputs-logstash-ssl_truststore_path) is also required.
+* Cannot be combined with configurations that disable SSL.
+* Cannot be combined with [`+ssl_verification_mode => none+`](#plugins-outputs-logstash-ssl_verification_mode).
+
+A path to a JKS- or PKCS12-formatted truststore with which to validate the identity claims of the downstream input. The provided identity will be used if the downstream input enables [SSL client authentication](#plugins-outputs-logstash-config-ssl-trust).
+
+
+### `ssl_truststore_password` [plugins-outputs-logstash-ssl_truststore_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+* Required when connection identity is configured with [`ssl_truststore_path`](#plugins-outputs-logstash-ssl_truststore_path)
+* Cannot be combined with configurations that disable SSL.
+
+Password for the [`ssl_truststore_path`](#plugins-outputs-logstash-ssl_truststore_path)
+
+
+### `ssl_verification_mode` [plugins-outputs-logstash-ssl_verification_mode]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* The supported modes are:
+
+ * `full`: verifies that a certificate provided by the client has an identity claim matching [`hosts`](#plugins-outputs-logstash-hosts), is signed by a trusted authority (CA), is within its valid date range, and that the client has possession of the associated key.
+ * `none`: performs no validation of the presented certificate
+
+* The default value is `full`.
+* Cannot be combined with configurations that disable SSL.
+
+When communicating over SSL, this setting controls how the downstream input’s certificate is verified.
+
+
+### `username` [plugins-outputs-logstash-username]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+* When present, [`password`](#plugins-outputs-logstash-password) is also required.
+
+Username for password-based authentication.
+
+When the downstream input plugin is configured with a `username` and `password`, you must also configure upstream outputs with a matching `username`/`password` pair.
+
+::::{note}
+when SSL is disabled, credentials will be transmitted in clear-text.
+::::
+
+
+
+
+## Common options [plugins-outputs-logstash-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`enable_metric`](#plugins-outputs-logstash-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-logstash-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `enable_metric` [plugins-outputs-logstash-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-logstash-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 logstash outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ logstash {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-lumberjack.md b/docs/reference/plugins-outputs-lumberjack.md
new file mode 100644
index 000000000..3b767b510
--- /dev/null
+++ b/docs/reference/plugins-outputs-lumberjack.md
@@ -0,0 +1,139 @@
+---
+navigation_title: "lumberjack"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-lumberjack.html
+---
+
+# Lumberjack output plugin [plugins-outputs-lumberjack]
+
+
+* Plugin version: v3.1.9
+* Released on: 2021-08-30
+* [Changelog](https://github.com/logstash-plugins/logstash-output-lumberjack/blob/v3.1.9/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-lumberjack-index.md).
+
+## Getting help [_getting_help_94]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-lumberjack). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_94]
+
+This output sends events using the lumberjack protocol.
+
+
+## Lumberjack Output Configuration Options [plugins-outputs-lumberjack-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-lumberjack-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`flush_size`](#plugins-outputs-lumberjack-flush_size) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`hosts`](#plugins-outputs-lumberjack-hosts) | [array](/reference/configuration-file-structure.md#array) | Yes |
+| [`idle_flush_time`](#plugins-outputs-lumberjack-idle_flush_time) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`port`](#plugins-outputs-lumberjack-port) | [number](/reference/configuration-file-structure.md#number) | Yes |
+| [`ssl_certificate`](#plugins-outputs-lumberjack-ssl_certificate) | a valid filesystem path | Yes |
+
+Also see [Common options](#plugins-outputs-lumberjack-common-options) for a list of options supported by all output plugins.
+
+
+
+### `flush_size` [plugins-outputs-lumberjack-flush_size]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1024`
+
+To make efficient calls to the lumberjack output we are buffering events locally. if the number of events exceed the number the declared `flush_size` we will send them to the logstash server.
+
+
+### `hosts` [plugins-outputs-lumberjack-hosts]
+
+* This is a required setting.
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+List of addresses lumberjack can send to. When the plugin needs to connect to the remote peer, it randomly selects one of the hosts.
+
+When the plugin is registered, it opens a connection to one of the hosts. If the plugin detects a connection error, it selects a different host from the list and opens a new connection.
+
+
+### `idle_flush_time` [plugins-outputs-lumberjack-idle_flush_time]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1`
+
+The amount of time since last flush before a flush is forced.
+
+This setting helps ensure slow event rates don’t get stuck in Logstash. For example, if your `flush_size` is 100, and you have received 10 events, and it has been more than `idle_flush_time` seconds since the last flush, Logstash will flush those 10 events automatically.
+
+This helps keep both fast and slow log streams moving along in near-real-time.
+
+
+### `port` [plugins-outputs-lumberjack-port]
+
+* This is a required setting.
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+the port to connect to
+
+
+### `ssl_certificate` [plugins-outputs-lumberjack-ssl_certificate]
+
+* This is a required setting.
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+ssl certificate to use
+
+
+
+## Common options [plugins-outputs-lumberjack-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-lumberjack-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-lumberjack-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-lumberjack-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-lumberjack-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-lumberjack-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-lumberjack-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 lumberjack outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ lumberjack {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-metriccatcher.md b/docs/reference/plugins-outputs-metriccatcher.md
new file mode 100644
index 000000000..5c385b759
--- /dev/null
+++ b/docs/reference/plugins-outputs-metriccatcher.md
@@ -0,0 +1,198 @@
+---
+navigation_title: "metriccatcher"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-metriccatcher.html
+---
+
+# Metriccatcher output plugin [plugins-outputs-metriccatcher]
+
+
+* Plugin version: v3.0.4
+* Released on: 2018-04-06
+* [Changelog](https://github.com/logstash-plugins/logstash-output-metriccatcher/blob/v3.0.4/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-metriccatcher-index.md).
+
+## Installation [_installation_38]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-output-metriccatcher`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_95]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-metriccatcher). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_95]
+
+This output ships metrics to MetricCatcher, allowing you to utilize Coda Hale’s Metrics.
+
+More info on MetricCatcher: [https://github.com/clearspring/MetricCatcher](https://github.com/clearspring/MetricCatcher)
+
+At Clearspring, we use it to count the response codes from Apache logs:
+
+```ruby
+ metriccatcher {
+ host => "localhost"
+ port => "1420"
+ type => "apache-access"
+ fields => [ "response" ]
+ meter => {
+ "%{host}.apache.response.%{response}" => "1"
+ }
+ }
+```
+
+
+## Metriccatcher Output Configuration Options [plugins-outputs-metriccatcher-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-metriccatcher-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`biased`](#plugins-outputs-metriccatcher-biased) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`counter`](#plugins-outputs-metriccatcher-counter) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`gauge`](#plugins-outputs-metriccatcher-gauge) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`host`](#plugins-outputs-metriccatcher-host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`meter`](#plugins-outputs-metriccatcher-meter) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`port`](#plugins-outputs-metriccatcher-port) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`timer`](#plugins-outputs-metriccatcher-timer) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`uniform`](#plugins-outputs-metriccatcher-uniform) | [hash](/reference/configuration-file-structure.md#hash) | No |
+
+Also see [Common options](#plugins-outputs-metriccatcher-common-options) for a list of options supported by all output plugins.
+
+
+
+### `biased` [plugins-outputs-metriccatcher-biased]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* There is no default value for this setting.
+
+The metrics to send. This supports dynamic strings like `%{{host}}` for metric names and also for values. This is a hash field with key of the metric name, value of the metric value.
+
+The value will be coerced to a floating point value. Values which cannot be coerced will zero (0)
+
+
+### `counter` [plugins-outputs-metriccatcher-counter]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* There is no default value for this setting.
+
+The metrics to send. This supports dynamic strings like `%{{host}}` for metric names and also for values. This is a hash field with key of the metric name, value of the metric value. Example:
+
+```ruby
+ counter => { "%{host}.apache.hits.%{response} => "1" }
+```
+
+The value will be coerced to a floating point value. Values which cannot be coerced will zero (0)
+
+
+### `gauge` [plugins-outputs-metriccatcher-gauge]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* There is no default value for this setting.
+
+The metrics to send. This supports dynamic strings like `%{{host}}` for metric names and also for values. This is a hash field with key of the metric name, value of the metric value.
+
+The value will be coerced to a floating point value. Values which cannot be coerced will zero (0)
+
+
+### `host` [plugins-outputs-metriccatcher-host]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"localhost"`
+
+The address of the MetricCatcher
+
+
+### `meter` [plugins-outputs-metriccatcher-meter]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* There is no default value for this setting.
+
+The metrics to send. This supports dynamic strings like `%{{host}}` for metric names and also for values. This is a hash field with key of the metric name, value of the metric value.
+
+The value will be coerced to a floating point value. Values which cannot be coerced will zero (0)
+
+
+### `port` [plugins-outputs-metriccatcher-port]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1420`
+
+The port to connect on your MetricCatcher
+
+
+### `timer` [plugins-outputs-metriccatcher-timer]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* There is no default value for this setting.
+
+The metrics to send. This supports dynamic strings like `%{{host}}` for metric names and also for values. This is a hash field with key of the metric name, value of the metric value. Example:
+
+```ruby
+ timer => { "%{host}.apache.response_time => "%{response_time}" }
+```
+
+The value will be coerced to a floating point value. Values which cannot be coerced will zero (0)
+
+
+### `uniform` [plugins-outputs-metriccatcher-uniform]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* There is no default value for this setting.
+
+The metrics to send. This supports dynamic strings like `%{{host}}` for metric names and also for values. This is a hash field with key of the metric name, value of the metric value.
+
+The value will be coerced to a floating point value. Values which cannot be coerced will zero (0)
+
+
+
+## Common options [plugins-outputs-metriccatcher-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-metriccatcher-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-metriccatcher-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-metriccatcher-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-metriccatcher-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-metriccatcher-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-metriccatcher-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 metriccatcher outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ metriccatcher {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-mongodb.md b/docs/reference/plugins-outputs-mongodb.md
new file mode 100644
index 000000000..872bb8255
--- /dev/null
+++ b/docs/reference/plugins-outputs-mongodb.md
@@ -0,0 +1,174 @@
+---
+navigation_title: "mongodb"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-mongodb.html
+---
+
+# Mongodb output plugin [plugins-outputs-mongodb]
+
+
+* Plugin version: v3.1.8
+* Released on: 2025-01-02
+* [Changelog](https://github.com/logstash-plugins/logstash-output-mongodb/blob/v3.1.8/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-mongodb-index.md).
+
+## Installation [_installation_39]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-output-mongodb`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_96]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-mongodb). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_96]
+
+This output writes events to MongoDB.
+
+
+## Mongodb Output Configuration Options [plugins-outputs-mongodb-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-mongodb-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`bulk`](#plugins-outputs-mongodb-bulk) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`bulk_interval`](#plugins-outputs-mongodb-bulk_interval) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`bulk_size`](#plugins-outputs-mongodb-bulk_size) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`collection`](#plugins-outputs-mongodb-collection) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`database`](#plugins-outputs-mongodb-database) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`generateId`](#plugins-outputs-mongodb-generateId) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`isodate`](#plugins-outputs-mongodb-isodate) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`retry_delay`](#plugins-outputs-mongodb-retry_delay) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`uri`](#plugins-outputs-mongodb-uri) | [string](/reference/configuration-file-structure.md#string) | Yes |
+
+Also see [Common options](#plugins-outputs-mongodb-common-options) for a list of options supported by all output plugins.
+
+
+
+### `bulk` [plugins-outputs-mongodb-bulk]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Bulk insert flag, set to true to allow bulk insertion, else it will insert events one by one.
+
+
+### `bulk_interval` [plugins-outputs-mongodb-bulk_interval]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `2`
+
+Bulk interval, Used to insert events periodically if the "bulk" flag is activated.
+
+
+### `bulk_size` [plugins-outputs-mongodb-bulk_size]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `900`
+
+Bulk events number, if the number of events to insert into a collection raise that limit, it will be bulk inserted whatever the bulk interval value (mongodb hard limit is 1000).
+
+
+### `collection` [plugins-outputs-mongodb-collection]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The collection to use. This value can use `%{{foo}}` values to dynamically select a collection based on data in the event.
+
+
+### `database` [plugins-outputs-mongodb-database]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The database to use.
+
+
+### `generateId` [plugins-outputs-mongodb-generateId]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+If true, an "_id" field will be added to the document before insertion. The "_id" field will use the timestamp of the event and overwrite an existing "_id" field in the event.
+
+
+### `isodate` [plugins-outputs-mongodb-isodate]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+If true, store the @timestamp field in MongoDB as an ISODate type instead of an ISO8601 string. For more information about this, see [http://www.mongodb.org/display/DOCS/Dates](http://www.mongodb.org/display/DOCS/Dates).
+
+
+### `retry_delay` [plugins-outputs-mongodb-retry_delay]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `3`
+
+The number of seconds to wait after failure before retrying.
+
+
+### `uri` [plugins-outputs-mongodb-uri]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+A MongoDB URI to connect to. See [http://docs.mongodb.org/manual/reference/connection-string/](http://docs.mongodb.org/manual/reference/connection-string/).
+
+
+
+## Common options [plugins-outputs-mongodb-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-mongodb-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-mongodb-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-mongodb-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-mongodb-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-mongodb-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-mongodb-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 mongodb outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ mongodb {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-nagios.md b/docs/reference/plugins-outputs-nagios.md
new file mode 100644
index 000000000..3ee917f32
--- /dev/null
+++ b/docs/reference/plugins-outputs-nagios.md
@@ -0,0 +1,129 @@
+---
+navigation_title: "nagios"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-nagios.html
+---
+
+# Nagios output plugin [plugins-outputs-nagios]
+
+
+* Plugin version: v3.0.6
+* Released on: 2018-04-06
+* [Changelog](https://github.com/logstash-plugins/logstash-output-nagios/blob/v3.0.6/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-nagios-index.md).
+
+## Getting help [_getting_help_97]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-nagios). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_97]
+
+The Nagios output is used for sending passive check results to Nagios via the Nagios command file. This output currently supports Nagios 3.
+
+For this output to work, your event *must* have the following Logstash event fields:
+
+* `nagios_host`
+* `nagios_service`
+
+These Logstash event fields are supported, but optional:
+
+* `nagios_annotation`
+* `nagios_level` (overrides `nagios_level` configuration option)
+
+There are two configuration options:
+
+* `commandfile` - The location of the Nagios external command file. Defaults to */var/lib/nagios3/rw/nagios.cmd*
+* `nagios_level` - Specifies the level of the check to be sent. Defaults to CRITICAL and can be overriden by setting the "nagios_level" field to one of "OK", "WARNING", "CRITICAL", or "UNKNOWN"
+
+ ```ruby
+ output{
+ if [message] =~ /(error|ERROR|CRITICAL)/ {
+ nagios {
+ # your config here
+ }
+ }
+ }
+ ```
+
+
+
+## Nagios Output Configuration Options [plugins-outputs-nagios-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-nagios-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`commandfile`](#plugins-outputs-nagios-commandfile) | <<,>> | No |
+| [`nagios_level`](#plugins-outputs-nagios-nagios_level) | [string](/reference/configuration-file-structure.md#string), one of `["0", "1", "2", "3"]` | No |
+
+Also see [Common options](#plugins-outputs-nagios-common-options) for a list of options supported by all output plugins.
+
+
+
+### `commandfile` [plugins-outputs-nagios-commandfile]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"/var/lib/nagios3/rw/nagios.cmd"`
+
+The full path to your Nagios command file.
+
+
+### `nagios_level` [plugins-outputs-nagios-nagios_level]
+
+* Value can be any of: `0`, `1`, `2`, `3`
+* Default value is `"2"`
+
+The Nagios check level. Should be one of 0=OK, 1=WARNING, 2=CRITICAL, 3=UNKNOWN. Defaults to 2 - CRITICAL.
+
+
+
+## Common options [plugins-outputs-nagios-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-nagios-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-nagios-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-nagios-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-nagios-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-nagios-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-nagios-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 nagios outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ nagios {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-nagios_nsca.md b/docs/reference/plugins-outputs-nagios_nsca.md
new file mode 100644
index 000000000..90aebffc1
--- /dev/null
+++ b/docs/reference/plugins-outputs-nagios_nsca.md
@@ -0,0 +1,180 @@
+---
+navigation_title: "nagios_nsca"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-nagios_nsca.html
+---
+
+# Nagios_nsca output plugin [plugins-outputs-nagios_nsca]
+
+
+* Plugin version: v3.0.7
+* Released on: 2021-09-20
+* [Changelog](https://github.com/logstash-plugins/logstash-output-nagios_nsca/blob/v3.0.7/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-nagios_nsca-index.md).
+
+## Installation [_installation_40]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-output-nagios_nsca`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_98]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-nagios_nsca). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_98]
+
+The nagios_nsca output is used for sending passive check results to Nagios through the NSCA protocol.
+
+This is useful if your Nagios server is not the same as the source host from where you want to send logs or alerts. If you only have one server, this output is probably overkill # for you, take a look at the *nagios* output instead.
+
+Here is a sample config using the nagios_nsca output:
+
+```ruby
+ output {
+ nagios_nsca {
+ # specify the hostname or ip of your nagios server
+ host => "nagios.example.com"
+```
+
+```
+ # specify the port to connect to
+ port => 5667
+ }
+}
+```
+
+## Nagios_nsca Output Configuration Options [plugins-outputs-nagios_nsca-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-nagios_nsca-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`host`](#plugins-outputs-nagios_nsca-host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`message_format`](#plugins-outputs-nagios_nsca-message_format) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`nagios_host`](#plugins-outputs-nagios_nsca-nagios_host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`nagios_service`](#plugins-outputs-nagios_nsca-nagios_service) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`nagios_status`](#plugins-outputs-nagios_nsca-nagios_status) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`port`](#plugins-outputs-nagios_nsca-port) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`send_nsca_bin`](#plugins-outputs-nagios_nsca-send_nsca_bin) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`send_nsca_config`](#plugins-outputs-nagios_nsca-send_nsca_config) | a valid filesystem path | No |
+
+Also see [Common options](#plugins-outputs-nagios_nsca-common-options) for a list of options supported by all output plugins.
+
+
+
+### `host` [plugins-outputs-nagios_nsca-host]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"localhost"`
+
+The nagios host or IP to send logs to. It should have a NSCA daemon running.
+
+
+### `message_format` [plugins-outputs-nagios_nsca-message_format]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"%{@timestamp} %{{host}}: %{{message}}"`
+
+The format to use when writing events to nagios. This value supports any string and can include `%{{name}}` and other dynamic strings.
+
+
+### `nagios_host` [plugins-outputs-nagios_nsca-nagios_host]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `%{{host}}`
+
+The nagios *host* you want to submit a passive check result to. This parameter accepts interpolation, e.g. you can use `@source_host` or other logstash internal variables.
+
+
+### `nagios_service` [plugins-outputs-nagios_nsca-nagios_service]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"LOGSTASH"`
+
+The nagios *service* you want to submit a passive check result to. This parameter accepts interpolation, e.g. you can use `@source_host` or other logstash internal variables.
+
+
+### `nagios_status` [plugins-outputs-nagios_nsca-nagios_status]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The status to send to nagios. Should be 0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN
+
+
+### `port` [plugins-outputs-nagios_nsca-port]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `5667`
+
+The port where the NSCA daemon on the nagios host listens.
+
+
+### `send_nsca_bin` [plugins-outputs-nagios_nsca-send_nsca_bin]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"/usr/sbin/send_nsca"`
+
+The path to the *send_nsca* binary on the local host.
+
+
+### `send_nsca_config` [plugins-outputs-nagios_nsca-send_nsca_config]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+The path to the send_nsca config file on the local host. Leave blank if you don’t want to provide a config file.
+
+
+
+## Common options [plugins-outputs-nagios_nsca-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-nagios_nsca-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-nagios_nsca-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-nagios_nsca-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-nagios_nsca-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-nagios_nsca-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-nagios_nsca-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 nagios_nsca outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ nagios_nsca {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-opentsdb.md b/docs/reference/plugins-outputs-opentsdb.md
new file mode 100644
index 000000000..73edc1837
--- /dev/null
+++ b/docs/reference/plugins-outputs-opentsdb.md
@@ -0,0 +1,131 @@
+---
+navigation_title: "opentsdb"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-opentsdb.html
+---
+
+# Opentsdb output plugin [plugins-outputs-opentsdb]
+
+
+* Plugin version: v3.1.5
+* Released on: 2018-04-06
+* [Changelog](https://github.com/logstash-plugins/logstash-output-opentsdb/blob/v3.1.5/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-opentsdb-index.md).
+
+## Installation [_installation_41]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-output-opentsdb`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_99]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-opentsdb). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_99]
+
+This output allows you to pull metrics from your logs and ship them to opentsdb. Opentsdb is an open source tool for storing and graphing metrics.
+
+
+## Opentsdb Output Configuration Options [plugins-outputs-opentsdb-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-opentsdb-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`host`](#plugins-outputs-opentsdb-host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`metrics`](#plugins-outputs-opentsdb-metrics) | [array](/reference/configuration-file-structure.md#array) | Yes |
+| [`port`](#plugins-outputs-opentsdb-port) | [number](/reference/configuration-file-structure.md#number) | No |
+
+Also see [Common options](#plugins-outputs-opentsdb-common-options) for a list of options supported by all output plugins.
+
+
+
+### `host` [plugins-outputs-opentsdb-host]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"localhost"`
+
+The address of the opentsdb server.
+
+
+### `metrics` [plugins-outputs-opentsdb-metrics]
+
+* This is a required setting.
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+The metric(s) to use. This supports dynamic strings like `%{{source_host}}` for metric names and also for values. This is an array field with key of the metric name, value of the metric value, and multiple tag,values . Example:
+
+```ruby
+ [
+ "%{host}/uptime",
+ %{uptime_1m} " ,
+ "hostname" ,
+ "%{host}
+ "anotherhostname" ,
+ "%{host}
+ ]
+```
+
+The value will be coerced to a floating point value. Values which cannot be coerced will zero (0)
+
+
+### `port` [plugins-outputs-opentsdb-port]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `4242`
+
+The port to connect on your graphite server.
+
+
+
+## Common options [plugins-outputs-opentsdb-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-opentsdb-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-opentsdb-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-opentsdb-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-opentsdb-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-opentsdb-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-opentsdb-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 opentsdb outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ opentsdb {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-pagerduty.md b/docs/reference/plugins-outputs-pagerduty.md
new file mode 100644
index 000000000..9e1717d57
--- /dev/null
+++ b/docs/reference/plugins-outputs-pagerduty.md
@@ -0,0 +1,145 @@
+---
+navigation_title: "pagerduty"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-pagerduty.html
+---
+
+# Pagerduty output plugin [plugins-outputs-pagerduty]
+
+
+* Plugin version: v3.0.9
+* Released on: 2020-01-27
+* [Changelog](https://github.com/logstash-plugins/logstash-output-pagerduty/blob/v3.0.9/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-pagerduty-index.md).
+
+## Installation [_installation_42]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-output-pagerduty`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_100]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-pagerduty). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_100]
+
+The PagerDuty output will send notifications based on pre-configured services and escalation policies. Logstash can send "trigger", "acknowledge" and "resolve" event types. In addition, you may configure custom descriptions and event details. The only required field is the PagerDuty "Service API Key", which can be found on the service’s web page on pagerduty.com. In the default case, the description and event details will be populated by Logstash, using `message`, `timestamp` and `host` data.
+
+
+## Pagerduty Output Configuration Options [plugins-outputs-pagerduty-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-pagerduty-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`description`](#plugins-outputs-pagerduty-description) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`details`](#plugins-outputs-pagerduty-details) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`event_type`](#plugins-outputs-pagerduty-event_type) | [string](/reference/configuration-file-structure.md#string), one of `["trigger", "acknowledge", "resolve"]` | No |
+| [`incident_key`](#plugins-outputs-pagerduty-incident_key) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`pdurl`](#plugins-outputs-pagerduty-pdurl) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`service_key`](#plugins-outputs-pagerduty-service_key) | [string](/reference/configuration-file-structure.md#string) | Yes |
+
+Also see [Common options](#plugins-outputs-pagerduty-common-options) for a list of options supported by all output plugins.
+
+
+
+### `description` [plugins-outputs-pagerduty-description]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"Logstash event for %{{host}}"`
+
+Custom description
+
+
+### `details` [plugins-outputs-pagerduty-details]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{"timestamp"=>"%{@timestamp}", "message"=>"%{{message}}"}`
+
+The event details. These might be data from the Logstash event fields you wish to include. Tags are automatically included if detected so there is no need to explicitly add them here.
+
+
+### `event_type` [plugins-outputs-pagerduty-event_type]
+
+* Value can be any of: `trigger`, `acknowledge`, `resolve`
+* Default value is `"trigger"`
+
+Event type
+
+
+### `incident_key` [plugins-outputs-pagerduty-incident_key]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"logstash/%{{host}}/%{{type}}"`
+
+The service key to use. You’ll need to set this up in PagerDuty beforehand.
+
+
+### `pdurl` [plugins-outputs-pagerduty-pdurl]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"https://events.pagerduty.com/generic/2010-04-15/create_event.json"`
+
+PagerDuty API URL. You shouldn’t need to change this, but is included to allow for flexibility should PagerDuty iterate the API and Logstash hasn’t been updated yet.
+
+
+### `service_key` [plugins-outputs-pagerduty-service_key]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The PagerDuty Service API Key
+
+
+
+## Common options [plugins-outputs-pagerduty-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-pagerduty-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-pagerduty-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-pagerduty-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-pagerduty-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-pagerduty-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-pagerduty-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 pagerduty outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ pagerduty {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-pipe.md b/docs/reference/plugins-outputs-pipe.md
new file mode 100644
index 000000000..636108d6a
--- /dev/null
+++ b/docs/reference/plugins-outputs-pipe.md
@@ -0,0 +1,117 @@
+---
+navigation_title: "pipe"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-pipe.html
+---
+
+# Pipe output plugin [plugins-outputs-pipe]
+
+
+* Plugin version: v3.0.6
+* Released on: 2018-04-06
+* [Changelog](https://github.com/logstash-plugins/logstash-output-pipe/blob/v3.0.6/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-pipe-index.md).
+
+## Getting help [_getting_help_101]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-pipe). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_101]
+
+Pipe output.
+
+Pipe events to stdin of another program. You can use fields from the event as parts of the command. WARNING: This feature can cause logstash to fork off multiple children if you are not carefull with per-event commandline.
+
+
+## Pipe Output Configuration Options [plugins-outputs-pipe-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-pipe-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`command`](#plugins-outputs-pipe-command) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`message_format`](#plugins-outputs-pipe-message_format) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ttl`](#plugins-outputs-pipe-ttl) | [number](/reference/configuration-file-structure.md#number) | No |
+
+Also see [Common options](#plugins-outputs-pipe-common-options) for a list of options supported by all output plugins.
+
+
+
+### `command` [plugins-outputs-pipe-command]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Command line to launch and pipe to
+
+
+### `message_format` [plugins-outputs-pipe-message_format]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The format to use when writing events to the pipe. This value supports any string and can include `%{{name}}` and other dynamic strings.
+
+If this setting is omitted, the full json representation of the event will be written as a single line.
+
+
+### `ttl` [plugins-outputs-pipe-ttl]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `10`
+
+Close pipe that hasn’t been used for TTL seconds. -1 or 0 means never close.
+
+
+
+## Common options [plugins-outputs-pipe-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-pipe-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-pipe-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-pipe-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-pipe-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-pipe-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-pipe-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 pipe outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ pipe {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-rabbitmq.md b/docs/reference/plugins-outputs-rabbitmq.md
new file mode 100644
index 000000000..8df1efa19
--- /dev/null
+++ b/docs/reference/plugins-outputs-rabbitmq.md
@@ -0,0 +1,296 @@
+---
+navigation_title: "rabbitmq"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-rabbitmq.html
+---
+
+# Rabbitmq output plugin [plugins-outputs-rabbitmq]
+
+
+* A component of the [rabbitmq integration plugin](/reference/plugins-integrations-rabbitmq.md)
+* Integration version: v7.4.0
+* Released on: 2024-09-16
+* [Changelog](https://github.com/logstash-plugins/logstash-integration-rabbitmq/blob/v7.4.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-rabbitmq-index.md).
+
+## Getting help [_getting_help_102]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-integration-rabbitmq). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_102]
+
+Push events to a RabbitMQ exchange. Requires RabbitMQ 2.x or later version (3.x is recommended).
+
+Relevant links:
+
+* [RabbitMQ](http://www.rabbitmq.com/)
+* [March Hare](http://rubymarchhare.info)
+
+
+## Rabbitmq Output Configuration Options [plugins-outputs-rabbitmq-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-rabbitmq-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`arguments`](#plugins-outputs-rabbitmq-arguments) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`automatic_recovery`](#plugins-outputs-rabbitmq-automatic_recovery) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`connect_retry_interval`](#plugins-outputs-rabbitmq-connect_retry_interval) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`connection_timeout`](#plugins-outputs-rabbitmq-connection_timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`durable`](#plugins-outputs-rabbitmq-durable) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`exchange`](#plugins-outputs-rabbitmq-exchange) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`exchange_type`](#plugins-outputs-rabbitmq-exchange_type) | [string](/reference/configuration-file-structure.md#string), one of `["fanout", "direct", "topic", "x-consistent-hash", "x-modulus-hash"]` | Yes |
+| [`heartbeat`](#plugins-outputs-rabbitmq-heartbeat) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`host`](#plugins-outputs-rabbitmq-host) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`key`](#plugins-outputs-rabbitmq-key) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`message_properties`](#plugins-outputs-rabbitmq-message_properties) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`passive`](#plugins-outputs-rabbitmq-passive) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`password`](#plugins-outputs-rabbitmq-password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`persistent`](#plugins-outputs-rabbitmq-persistent) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`port`](#plugins-outputs-rabbitmq-port) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`ssl`](#plugins-outputs-rabbitmq-ssl) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`ssl_certificate_password`](#plugins-outputs-rabbitmq-ssl_certificate_password) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_certificate_path`](#plugins-outputs-rabbitmq-ssl_certificate_path) | a valid filesystem path | No |
+| [`ssl_version`](#plugins-outputs-rabbitmq-ssl_version) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`user`](#plugins-outputs-rabbitmq-user) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`vhost`](#plugins-outputs-rabbitmq-vhost) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-outputs-rabbitmq-common-options) for a list of options supported by all output plugins.
+
+
+
+### `arguments` [plugins-outputs-rabbitmq-arguments]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `{}`
+
+Extra queue arguments as an array. To make a RabbitMQ queue mirrored, use: `{"x-ha-policy" => "all"}`
+
+
+### `automatic_recovery` [plugins-outputs-rabbitmq-automatic_recovery]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Set this to automatically recover from a broken connection. You almost certainly don’t want to override this!!!
+
+
+### `connect_retry_interval` [plugins-outputs-rabbitmq-connect_retry_interval]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1`
+
+Time in seconds to wait before retrying a connection
+
+
+### `connection_timeout` [plugins-outputs-rabbitmq-connection_timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+The default connection timeout in milliseconds. If not specified the timeout is infinite.
+
+
+### `durable` [plugins-outputs-rabbitmq-durable]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Is this exchange durable? (aka; Should it survive a broker restart?)
+
+
+### `exchange` [plugins-outputs-rabbitmq-exchange]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The name of the exchange
+
+
+### `exchange_type` [plugins-outputs-rabbitmq-exchange_type]
+
+* This is a required setting.
+* Value can be any of: `fanout`, `direct`, `topic`, `x-consistent-hash`, `x-modulus-hash`
+* There is no default value for this setting.
+
+The exchange type (fanout, topic, direct)
+
+
+### `heartbeat` [plugins-outputs-rabbitmq-heartbeat]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+Heartbeat delay in seconds. If unspecified no heartbeats will be sent
+
+
+### `host` [plugins-outputs-rabbitmq-host]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Common functionality for the rabbitmq input/output RabbitMQ server address(es) host can either be a single host, or a list of hosts i.e. host ⇒ "localhost" or host ⇒ ["host01", "host02]
+
+if multiple hosts are provided on the initial connection and any subsequent recovery attempts of the hosts is chosen at random and connected to. Note that only one host connection is active at a time.
+
+
+### `key` [plugins-outputs-rabbitmq-key]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"logstash"`
+
+The default codec for this plugin is JSON. You can override this to suit your particular needs however. Key to route to by default. Defaults to *logstash*
+
+* Routing keys are ignored on fanout exchanges.
+
+
+### `message_properties` [plugins-outputs-rabbitmq-message_properties]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Add properties to be set per-message here, such as *content_type*, *priority*. Values can be [`sprintf` templates](/reference/event-dependent-configuration.md#sprintf), whose value for each message will be populated from the event.
+
+Example:
+
+```ruby
+ message_properties => {
+ "content_type" => "application/json"
+ "priority" => 1
+ }
+```
+
+
+### `passive` [plugins-outputs-rabbitmq-passive]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Passive queue creation? Useful for checking queue existance without modifying server state
+
+
+### `password` [plugins-outputs-rabbitmq-password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* Default value is `"guest"`
+
+RabbitMQ password
+
+
+### `persistent` [plugins-outputs-rabbitmq-persistent]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Should RabbitMQ persist messages to disk?
+
+
+### `port` [plugins-outputs-rabbitmq-port]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `5672`
+
+RabbitMQ port to connect on
+
+
+### `ssl` [plugins-outputs-rabbitmq-ssl]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* There is no default value for this setting.
+
+Enable or disable SSL. Note that by default remote certificate verification is off. Specify ssl_certificate_path and ssl_certificate_password if you need certificate verification
+
+
+### `ssl_certificate_password` [plugins-outputs-rabbitmq-ssl_certificate_password]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Password for the encrypted PKCS12 (.p12) certificate file specified in ssl_certificate_path
+
+
+### `ssl_certificate_path` [plugins-outputs-rabbitmq-ssl_certificate_path]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+Path to an SSL certificate in PKCS12 (.p12) format used for verifying the remote host
+
+
+### `ssl_version` [plugins-outputs-rabbitmq-ssl_version]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"TLSv1.2"`
+
+Version of the SSL protocol to use.
+
+
+### `user` [plugins-outputs-rabbitmq-user]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"guest"`
+
+RabbitMQ username
+
+
+### `vhost` [plugins-outputs-rabbitmq-vhost]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"/"`
+
+The vhost (virtual host) to use. If you don’t know what this is, leave the default. With the exception of the default vhost ("/"), names of vhosts should not begin with a forward slash.
+
+
+
+## Common options [plugins-outputs-rabbitmq-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-rabbitmq-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-rabbitmq-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-rabbitmq-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-rabbitmq-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"json"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-rabbitmq-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-rabbitmq-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 rabbitmq outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ rabbitmq {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-redis.md b/docs/reference/plugins-outputs-redis.md
new file mode 100644
index 000000000..908da3e7a
--- /dev/null
+++ b/docs/reference/plugins-outputs-redis.md
@@ -0,0 +1,312 @@
+---
+navigation_title: "redis"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-redis.html
+---
+
+# Redis output plugin [plugins-outputs-redis]
+
+
+* Plugin version: v5.2.0
+* Released on: 2024-06-04
+* [Changelog](https://github.com/logstash-plugins/logstash-output-redis/blob/v5.2.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-redis-index.md).
+
+## Getting help [_getting_help_103]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-redis). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_103]
+
+This output will send events to a Redis queue using RPUSH. The RPUSH command is supported in Redis v0.0.7+. Using PUBLISH to a channel requires at least v1.3.8+. While you may be able to make these Redis versions work, the best performance and stability will be found in more recent stable versions. Versions 2.6.0+ are recommended.
+
+For more information, see [the Redis homepage](http://redis.io/)
+
+
+## Redis Output Configuration Options [plugins-outputs-redis-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-redis-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`batch`](#plugins-outputs-redis-batch) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`batch_events`](#plugins-outputs-redis-batch_events) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`batch_timeout`](#plugins-outputs-redis-batch_timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`congestion_interval`](#plugins-outputs-redis-congestion_interval) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`congestion_threshold`](#plugins-outputs-redis-congestion_threshold) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`data_type`](#plugins-outputs-redis-data_type) | [string](/reference/configuration-file-structure.md#string), one of `["list", "channel"]` | Yes |
+| [`db`](#plugins-outputs-redis-db) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`host`](#plugins-outputs-redis-host) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`key`](#plugins-outputs-redis-key) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`password`](#plugins-outputs-redis-password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`port`](#plugins-outputs-redis-port) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`reconnect_interval`](#plugins-outputs-redis-reconnect_interval) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`shuffle_hosts`](#plugins-outputs-redis-shuffle_hosts) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`ssl_certificate`](#plugins-outputs-redis-ssl_certificate) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_certificate_authorities`](#plugins-outputs-redis-ssl_certificate_authorities) | list of [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_cipher_suites`](#plugins-outputs-redis-ssl_cipher_suites) | list of [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_enabled`](#plugins-outputs-redis-ssl_enabled) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`ssl_key`](#plugins-outputs-redis-ssl_key) | [path](/reference/configuration-file-structure.md#path) | No |
+| [`ssl_supported_protocols`](#plugins-outputs-redis-ssl_supported_protocols) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_verification_mode`](#plugins-outputs-redis-ssl_verification_mode) | [string](/reference/configuration-file-structure.md#string), one of `["full", "none"]` | No |
+| [`timeout`](#plugins-outputs-redis-timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+
+Also see [Common options](#plugins-outputs-redis-common-options) for a list of options supported by all output plugins.
+
+
+
+### `batch` [plugins-outputs-redis-batch]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Set to true if you want Redis to batch up values and send 1 RPUSH command instead of one command per value to push on the list. Note that this only works with `data_type="list"` mode right now.
+
+If true, we send an RPUSH every "batch_events" events or "batch_timeout" seconds (whichever comes first). Only supported for `data_type` is "list".
+
+
+### `batch_events` [plugins-outputs-redis-batch_events]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `50`
+
+If batch is set to true, the number of events we queue up for an RPUSH.
+
+
+### `batch_timeout` [plugins-outputs-redis-batch_timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `5`
+
+If batch is set to true, the maximum amount of time between RPUSH commands when there are pending events to flush.
+
+
+### `congestion_interval` [plugins-outputs-redis-congestion_interval]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1`
+
+How often to check for congestion. Default is one second. Zero means to check on every event.
+
+
+### `congestion_threshold` [plugins-outputs-redis-congestion_threshold]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `0`
+
+In case Redis `data_type` is `list` and has more than `@congestion_threshold` items, block until someone consumes them and reduces congestion, otherwise if there are no consumers Redis will run out of memory, unless it was configured with OOM protection. But even with OOM protection, a single Redis list can block all other users of Redis, until Redis CPU consumption reaches the max allowed RAM size. A default value of 0 means that this limit is disabled. Only supported for `list` Redis `data_type`.
+
+
+### `data_type` [plugins-outputs-redis-data_type]
+
+* Value can be any of: `list`, `channel`
+* There is no default value for this setting.
+
+Either list or channel. If `data_type` is list, then we will set RPUSH to key. If `data_type` is channel, then we will PUBLISH to `key`.
+
+
+### `db` [plugins-outputs-redis-db]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `0`
+
+The Redis database number.
+
+
+### `host` [plugins-outputs-redis-host]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `["127.0.0.1"]`
+
+The hostname(s) of your Redis server(s). Ports may be specified on any hostname, which will override the global port config. If the hosts list is an array, Logstash will pick one random host to connect to, if that host is disconnected it will then pick another.
+
+For example:
+
+```ruby
+ "127.0.0.1"
+ ["127.0.0.1", "127.0.0.2"]
+ ["127.0.0.1:6380", "127.0.0.1"]
+```
+
+
+### `key` [plugins-outputs-redis-key]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The name of a Redis list or channel. Dynamic names are valid here, for example `logstash-%{{type}}`.
+
+
+### `password` [plugins-outputs-redis-password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Password to authenticate with. There is no authentication by default.
+
+
+### `port` [plugins-outputs-redis-port]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `6379`
+
+The default port to connect on. Can be overridden on any hostname.
+
+
+### `ssl` [plugins-outputs-redis-ssl]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Enable SSL support.
+
+
+### `reconnect_interval` [plugins-outputs-redis-reconnect_interval]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1`
+
+Interval for reconnecting to failed Redis connections
+
+
+### `shuffle_hosts` [plugins-outputs-redis-shuffle_hosts]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Shuffle the host list during Logstash startup.
+
+
+### `ssl_certificate` [plugins-outputs-redis-ssl_certificate]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+Path to certificate in PEM format. This certificate will be presented to the other part of the TLS connection.
+
+
+### `ssl_certificate_authorities` [plugins-outputs-redis-ssl_certificate_authorities]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+Validate the certificate chain against these authorities. You can define multiple files. All the certificates will be read and added to the trust store. The system CA path is automatically included.
+
+
+### `ssl_cipher_suites` [plugins-outputs-redis-ssl_cipher_suites]
+
+* Value type is a list of [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting
+
+The list of cipher suites to use, listed by priorities. Supported cipher suites vary depending on the Java and protocol versions.
+
+
+### `ssl_enabled` [plugins-outputs-redis-ssl_enabled]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Enable SSL (must be set for other `ssl_` options to take effect).
+
+
+### `ssl_key` [plugins-outputs-redis-ssl_key]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+SSL key path
+
+
+### `ssl_key_passphrase` [plugins-outputs-redis-ssl_key_passphrase]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* Default value is `nil`
+
+SSL key passphrase
+
+
+### `ssl_supported_protocols` [plugins-outputs-redis-ssl_supported_protocols]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Allowed values are: `'TLSv1.1'`, `'TLSv1.2'`, `'TLSv1.3'`
+* Default depends on the JDK being used. With up-to-date Logstash, the default is `['TLSv1.2', 'TLSv1.3']`. `'TLSv1.1'` is not considered secure and is only provided for legacy applications.
+
+List of allowed SSL/TLS versions to use when establishing a secure connection.
+
+::::{note}
+If you configure the plugin to use `'TLSv1.1'` on any recent JVM, such as the one packaged with Logstash, the protocol is disabled by default and needs to be enabled manually by changing `jdk.tls.disabledAlgorithms` in the **$JDK_HOME/conf/security/java.security** configuration file. That is, `TLSv1.1` needs to be removed from the list.
+::::
+
+
+
+### `ssl_verification_mode` [plugins-outputs-redis-ssl_verification_mode]
+
+* Value can be any of: `full`, `none`
+* Default value is `full`
+
+Defines how to verify the certificates presented by another part in the TLS connection:
+
+`full` validates that the server certificate has an issue date that’s within the not_before and not_after dates; chains to a trusted Certificate Authority (CA), and has a hostname or IP address that matches the names within the certificate.
+
+`none` performs no certificate validation.
+
+
+### `timeout` [plugins-outputs-redis-timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `5`
+
+Redis initial connection timeout in seconds.
+
+
+
+## Common options [plugins-outputs-redis-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-redis-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-redis-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-redis-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-redis-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"json"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-redis-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-redis-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 redis outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ redis {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-redmine.md b/docs/reference/plugins-outputs-redmine.md
new file mode 100644
index 000000000..c4918d7f5
--- /dev/null
+++ b/docs/reference/plugins-outputs-redmine.md
@@ -0,0 +1,229 @@
+---
+navigation_title: "redmine"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-redmine.html
+---
+
+# Redmine output plugin [plugins-outputs-redmine]
+
+
+* Plugin version: v3.0.4
+* Released on: 2018-04-06
+* [Changelog](https://github.com/logstash-plugins/logstash-output-redmine/blob/v3.0.4/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-redmine-index.md).
+
+## Installation [_installation_43]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-output-redmine`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_104]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-redmine). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_104]
+
+The redmine output is used to create a ticket via the API redmine.
+
+It send a POST request in a JSON format and use TOKEN authentication
+
+ — Exemple of use —
+
+```ruby
+ output {
+ redmine {
+ url => "http://redmineserver.tld"
+ token => 'token'
+ project_id => 200
+ tracker_id => 1
+ status_id => 3
+ priority_id => 2
+ subject => "Error ... detected"
+ }
+ }
+```
+
+
+## Redmine Output Configuration Options [plugins-outputs-redmine-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-redmine-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`assigned_to_id`](#plugins-outputs-redmine-assigned_to_id) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`categorie_id`](#plugins-outputs-redmine-categorie_id) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`description`](#plugins-outputs-redmine-description) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`fixed_version_id`](#plugins-outputs-redmine-fixed_version_id) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`parent_issue_id`](#plugins-outputs-redmine-parent_issue_id) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`priority_id`](#plugins-outputs-redmine-priority_id) | [number](/reference/configuration-file-structure.md#number) | Yes |
+| [`project_id`](#plugins-outputs-redmine-project_id) | [number](/reference/configuration-file-structure.md#number) | Yes |
+| [`ssl`](#plugins-outputs-redmine-ssl) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`status_id`](#plugins-outputs-redmine-status_id) | [number](/reference/configuration-file-structure.md#number) | Yes |
+| [`subject`](#plugins-outputs-redmine-subject) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`token`](#plugins-outputs-redmine-token) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`tracker_id`](#plugins-outputs-redmine-tracker_id) | [number](/reference/configuration-file-structure.md#number) | Yes |
+| [`url`](#plugins-outputs-redmine-url) | [string](/reference/configuration-file-structure.md#string) | Yes |
+
+Also see [Common options](#plugins-outputs-redmine-common-options) for a list of options supported by all output plugins.
+
+
+
+### `assigned_to_id` [plugins-outputs-redmine-assigned_to_id]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `nil`
+
+redmine issue assigned_to not required for post_issue
+
+
+### `categorie_id` [plugins-outputs-redmine-categorie_id]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `nil`
+
+not required for post_issue
+
+
+### `description` [plugins-outputs-redmine-description]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"%{{message}}"`
+
+redmine issue description required
+
+
+### `fixed_version_id` [plugins-outputs-redmine-fixed_version_id]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `nil`
+
+redmine issue fixed_version_id
+
+
+### `parent_issue_id` [plugins-outputs-redmine-parent_issue_id]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `nil`
+
+redmine issue parent_issue_id not required for post_issue
+
+
+### `priority_id` [plugins-outputs-redmine-priority_id]
+
+* This is a required setting.
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+redmine issue priority_id required
+
+
+### `project_id` [plugins-outputs-redmine-project_id]
+
+* This is a required setting.
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+redmine issue projet_id required
+
+
+### `ssl` [plugins-outputs-redmine-ssl]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+
+### `status_id` [plugins-outputs-redmine-status_id]
+
+* This is a required setting.
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+redmine issue status_id required
+
+
+### `subject` [plugins-outputs-redmine-subject]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `%{{host}}`
+
+redmine issue subject required
+
+
+### `token` [plugins-outputs-redmine-token]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+redmine token user used for authentication
+
+
+### `tracker_id` [plugins-outputs-redmine-tracker_id]
+
+* This is a required setting.
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+redmine issue tracker_id required
+
+
+### `url` [plugins-outputs-redmine-url]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+host of redmine app value format : *http://urlofredmine.tld* - Not add */issues* at end
+
+
+
+## Common options [plugins-outputs-redmine-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-redmine-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-redmine-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-redmine-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-redmine-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-redmine-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-redmine-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 redmine outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ redmine {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-riak.md b/docs/reference/plugins-outputs-riak.md
new file mode 100644
index 000000000..21b0e00d3
--- /dev/null
+++ b/docs/reference/plugins-outputs-riak.md
@@ -0,0 +1,213 @@
+---
+navigation_title: "riak"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-riak.html
+---
+
+# Riak output plugin [plugins-outputs-riak]
+
+
+* Plugin version: v3.0.5
+* Released on: 2019-10-09
+* [Changelog](https://github.com/logstash-plugins/logstash-output-riak/blob/v3.0.5/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-riak-index.md).
+
+## Installation [_installation_44]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-output-riak`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_105]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-riak). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_105]
+
+Riak is a distributed k/v store from Basho. It’s based on the Dynamo model.
+
+
+## Riak Output Configuration Options [plugins-outputs-riak-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-riak-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`bucket`](#plugins-outputs-riak-bucket) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`bucket_props`](#plugins-outputs-riak-bucket_props) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`enable_search`](#plugins-outputs-riak-enable_search) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`enable_ssl`](#plugins-outputs-riak-enable_ssl) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`indices`](#plugins-outputs-riak-indices) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`key_name`](#plugins-outputs-riak-key_name) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`nodes`](#plugins-outputs-riak-nodes) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`proto`](#plugins-outputs-riak-proto) | [string](/reference/configuration-file-structure.md#string), one of `["http", "pb"]` | No |
+| [`ssl_opts`](#plugins-outputs-riak-ssl_opts) | [hash](/reference/configuration-file-structure.md#hash) | No |
+
+Also see [Common options](#plugins-outputs-riak-common-options) for a list of options supported by all output plugins.
+
+
+
+### `bucket` [plugins-outputs-riak-bucket]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `["logstash-%{+YYYY.MM.dd}"]`
+
+The bucket name to write events to Expansion is supported here as values are passed through event.sprintf Multiple buckets can be specified here but any bucket-specific settings defined apply to ALL the buckets.
+
+
+### `bucket_props` [plugins-outputs-riak-bucket_props]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* There is no default value for this setting.
+
+Bucket properties (NYI) Logstash hash of properties for the bucket i.e.
+
+```ruby
+ bucket_props => {
+ "r" => "one"
+ "w" => "one"
+ "dw", "one
+ }
+```
+
+or
+
+```ruby
+ bucket_props => { "n_val" => "3" }
+```
+
+Properties will be passed as-is
+
+
+### `enable_search` [plugins-outputs-riak-enable_search]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Search Enable search on the bucket defined above
+
+
+### `enable_ssl` [plugins-outputs-riak-enable_ssl]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+SSL Enable SSL
+
+
+### `indices` [plugins-outputs-riak-indices]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Indices Array of fields to add 2i on e.g.
+
+```ruby
+ `indices => ["source_host", "type"]
+```
+
+Off by default as not everyone runs eleveldb
+
+
+### `key_name` [plugins-outputs-riak-key_name]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The event key name variables are valid here.
+
+Choose this carefully. Best to let riak decide.
+
+
+### `nodes` [plugins-outputs-riak-nodes]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{"localhost"=>"8098"}`
+
+The nodes of your Riak cluster This can be a single host or a Logstash hash of node/port pairs e.g
+
+```ruby
+ {
+ "node1" => "8098"
+ "node2" => "8098"
+ }
+```
+
+
+### `proto` [plugins-outputs-riak-proto]
+
+* Value can be any of: `http`, `pb`
+* Default value is `"http"`
+
+The protocol to use HTTP or ProtoBuf Applies to ALL backends listed above No mix and match
+
+
+### `ssl_opts` [plugins-outputs-riak-ssl_opts]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* There is no default value for this setting.
+
+Options for SSL connections. Only applied if SSL is enabled. Logstash hash that maps to the riak-client options here: [https://github.com/basho/riak-ruby-client/wiki/Connecting-to-Riak](https://github.com/basho/riak-ruby-client/wiki/Connecting-to-Riak).
+
+You’ll likely want something like this:
+
+```ruby
+ ssl_opts => {
+ "pem" => "/etc/riak.pem"
+ "ca_path" => "/usr/share/certificates"
+ }
+```
+
+Per the riak client docs, the above sample options will turn on SSL `VERIFY_PEER`
+
+
+
+## Common options [plugins-outputs-riak-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-riak-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-riak-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-riak-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-riak-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-riak-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-riak-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 riak outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ riak {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-riemann.md b/docs/reference/plugins-outputs-riemann.md
new file mode 100644
index 000000000..e49def857
--- /dev/null
+++ b/docs/reference/plugins-outputs-riemann.md
@@ -0,0 +1,215 @@
+---
+navigation_title: "riemann"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-riemann.html
+---
+
+# Riemann output plugin [plugins-outputs-riemann]
+
+
+* Plugin version: v3.0.7
+* Released on: 2020-07-15
+* [Changelog](https://github.com/logstash-plugins/logstash-output-riemann/blob/v3.0.7/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-riemann-index.md).
+
+## Installation [_installation_45]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-output-riemann`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_106]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-riemann). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_106]
+
+Riemann is a network event stream processing system.
+
+While Riemann is very similar conceptually to Logstash, it has much more in terms of being a monitoring system replacement.
+
+Riemann is used in Logstash much like statsd or other metric-related outputs
+
+You can learn about Riemann here:
+
+* [http://riemann.io/](http://riemann.io/)
+
+You can see the author talk about it here:
+
+* [http://vimeo.com/38377415](http://vimeo.com/38377415)
+
+
+## Riemann Output Configuration Options [plugins-outputs-riemann-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-riemann-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`debug`](#plugins-outputs-riemann-debug) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`host`](#plugins-outputs-riemann-host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`map_fields`](#plugins-outputs-riemann-map_fields) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`port`](#plugins-outputs-riemann-port) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`protocol`](#plugins-outputs-riemann-protocol) | [string](/reference/configuration-file-structure.md#string), one of `["tcp", "udp"]` | No |
+| [`riemann_event`](#plugins-outputs-riemann-riemann_event) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`sender`](#plugins-outputs-riemann-sender) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-outputs-riemann-common-options) for a list of options supported by all output plugins.
+
+
+
+### `debug` [plugins-outputs-riemann-debug]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Enable debugging output?
+
+
+### `host` [plugins-outputs-riemann-host]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"localhost"`
+
+The address of the Riemann server.
+
+
+### `map_fields` [plugins-outputs-riemann-map_fields]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+If set to true automatically map all logstash defined fields to riemann event fields. All nested logstash fields will be mapped to riemann fields containing all parent keys separated by dots and the deepest value.
+
+As an example, the logstash event:
+
+```ruby
+ {
+ "@timestamp":"2013-12-10T14:36:26.151+0000",
+ "@version": 1,
+ "message":"log message",
+ "host": "host.domain.com",
+ "nested_field": {
+ "key": "value"
+ }
+ }
+```
+
+Is mapped to this riemann event:
+
+```ruby
+ {
+ :time 1386686186,
+ :host host.domain.com,
+ :message log message,
+ :nested_field.key value
+ }
+```
+
+It can be used in conjunction with or independent of the riemann_event option. When used with the riemann_event any duplicate keys receive their value from riemann_event instead of the logstash event itself.
+
+
+### `port` [plugins-outputs-riemann-port]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `5555`
+
+The port to connect to on your Riemann server.
+
+
+### `protocol` [plugins-outputs-riemann-protocol]
+
+* Value can be any of: `tcp`, `udp`
+* Default value is `"tcp"`
+
+The protocol to use UDP is non-blocking TCP is blocking
+
+Logstash’s default output behaviour is to never lose events As such, we use tcp as default here
+
+
+### `riemann_event` [plugins-outputs-riemann-riemann_event]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* There is no default value for this setting.
+
+A Hash to set Riemann event fields ([http://riemann.io/concepts.html](http://riemann.io/concepts.html)).
+
+The following event fields are supported: `description`, `state`, `metric`, `ttl`, `service`
+
+Tags found on the Logstash event will automatically be added to the Riemann event.
+
+Any other field set here will be passed to Riemann as an event attribute.
+
+Example:
+
+```ruby
+ riemann {
+ riemann_event => {
+ "metric" => "%{metric}"
+ "service" => "%{service}"
+ }
+ }
+```
+
+`metric` and `ttl` values will be coerced to a floating point value. Values which cannot be coerced will zero (0.0).
+
+`description`, by default, will be set to the event message but can be overridden here.
+
+
+### `sender` [plugins-outputs-riemann-sender]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `%{{host}}`
+
+The name of the sender. This sets the `host` value in the Riemann event
+
+
+
+## Common options [plugins-outputs-riemann-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-riemann-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-riemann-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-riemann-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-riemann-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-riemann-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-riemann-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 riemann outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ riemann {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-s3.md b/docs/reference/plugins-outputs-s3.md
new file mode 100644
index 000000000..5d754a300
--- /dev/null
+++ b/docs/reference/plugins-outputs-s3.md
@@ -0,0 +1,458 @@
+---
+navigation_title: "s3"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-s3.html
+---
+
+# S3 output plugin [plugins-outputs-s3]
+
+
+* A component of the [aws integration plugin](/reference/plugins-integrations-aws.md)
+* Integration version: v7.1.8
+* Released on: 2024-07-26
+* [Changelog](https://github.com/logstash-plugins/logstash-integration-aws/blob/v7.1.8/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-s3-index.md).
+
+## Getting help [_getting_help_107]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-integration-aws). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_107]
+
+This plugin batches and uploads logstash events into Amazon Simple Storage Service (Amazon S3).
+
+::::{important}
+The S3 output plugin only supports AWS S3. Other S3 compatible storage solutions are not supported.
+::::
+
+
+S3 outputs create temporary files into the OS' temporary directory. You can specify where to save them using the `temporary_directory` option.
+
+::::{important}
+For configurations containing multiple s3 outputs with the restore option enabled, each output should define its own *temporary_directory*.
+::::
+
+
+### Requirements [_requirements]
+
+* Amazon S3 Bucket and S3 Access Permissions (Typically access_key_id and secret_access_key)
+* S3 PutObject permission
+
+
+### S3 output file [_s3_output_file]
+
+```txt
+`ls.s3.312bc026-2f5d-49bc-ae9f-5940cf4ad9a6.2013-04-18T10.00.tag_hello.part0.txt`
+```
+
+| | | |
+| --- | --- | --- |
+| ls.s3 | indicates logstash plugin s3 | |
+| 312bc026-2f5d-49bc-ae9f-5940cf4ad9a6 | a new, random uuid per file. | |
+| 2013-04-18T10.00 | represents the time whenever you specify time_file. | |
+| tag_hello | indicates the event’s tag. | |
+| part0 | If you indicate size_file, it will generate more parts if your file.size > size_file.When a file is full, it gets pushed to the bucket and then deleted from the temporary directory.If a file is empty, it is simply deleted. Empty files will not be pushed. | |
+
+
+### Crash Recovery [_crash_recovery]
+
+This plugin will recover and upload temporary log files after crash/abnormal termination when using `restore` set to true
+
+
+### Usage [_usage_4]
+
+This is an example of logstash config:
+
+```ruby
+output {
+ s3{
+ access_key_id => "crazy_key" (optional)
+ secret_access_key => "monkey_access_key" (optional)
+ region => "eu-west-1" (optional, default = "us-east-1")
+ bucket => "your_bucket" (required)
+ size_file => 2048 (optional) - Bytes
+ time_file => 5 (optional) - Minutes
+ codec => "plain" (optional)
+ canned_acl => "private" (optional. Options are "private", "public-read", "public-read-write", "authenticated-read", "aws-exec-read", "bucket-owner-read", "bucket-owner-full-control", "log-delivery-write". Defaults to "private" )
+ }
+```
+
+
+
+## S3 Output Configuration Options [plugins-outputs-s3-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-s3-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`access_key_id`](#plugins-outputs-s3-access_key_id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`additional_settings`](#plugins-outputs-s3-additional_settings) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`aws_credentials_file`](#plugins-outputs-s3-aws_credentials_file) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`bucket`](#plugins-outputs-s3-bucket) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`canned_acl`](#plugins-outputs-s3-canned_acl) | [string](/reference/configuration-file-structure.md#string), one of `["private", "public-read", "public-read-write", "authenticated-read", "aws-exec-read", "bucket-owner-read", "bucket-owner-full-control", "log-delivery-write"]` | No |
+| [`encoding`](#plugins-outputs-s3-encoding) | [string](/reference/configuration-file-structure.md#string), one of `["none", "gzip"]` | No |
+| [`endpoint`](#plugins-outputs-s3-endpoint) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`prefix`](#plugins-outputs-s3-prefix) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`proxy_uri`](#plugins-outputs-s3-proxy_uri) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`region`](#plugins-outputs-s3-region) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`restore`](#plugins-outputs-s3-restore) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`retry_count`](#plugins-outputs-s3-retry_count) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`retry_delay`](#plugins-outputs-s3-retry_delay) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`role_arn`](#plugins-outputs-s3-role_arn) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`role_session_name`](#plugins-outputs-s3-role_session_name) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`rotation_strategy`](#plugins-outputs-s3-rotation_strategy) | [string](/reference/configuration-file-structure.md#string), one of `["size_and_time", "size", "time"]` | No |
+| [`secret_access_key`](#plugins-outputs-s3-secret_access_key) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`server_side_encryption`](#plugins-outputs-s3-server_side_encryption) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`server_side_encryption_algorithm`](#plugins-outputs-s3-server_side_encryption_algorithm) | [string](/reference/configuration-file-structure.md#string), one of `["AES256", "aws:kms"]` | No |
+| [`session_token`](#plugins-outputs-s3-session_token) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`signature_version`](#plugins-outputs-s3-signature_version) | [string](/reference/configuration-file-structure.md#string), one of `["v2", "v4"]` | No |
+| [`size_file`](#plugins-outputs-s3-size_file) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`ssekms_key_id`](#plugins-outputs-s3-ssekms_key_id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`storage_class`](#plugins-outputs-s3-storage_class) | [string](/reference/configuration-file-structure.md#string), one of `["STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA", "INTELLIGENT_TIERING", "GLACIER", "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR", "SNOW", "EXPRESS_ONEZONE"]` | No |
+| [`temporary_directory`](#plugins-outputs-s3-temporary_directory) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`time_file`](#plugins-outputs-s3-time_file) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`upload_multipart_threshold`](#plugins-outputs-s3-upload_multipart_threshold) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`upload_queue_size`](#plugins-outputs-s3-upload_queue_size) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`upload_workers_count`](#plugins-outputs-s3-upload_workers_count) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`use_aws_bundled_ca`](#plugins-outputs-s3-use_aws_bundled_ca) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`validate_credentials_on_root_bucket`](#plugins-outputs-s3-validate_credentials_on_root_bucket) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+
+Also see [Common options](#plugins-outputs-s3-common-options) for a list of options supported by all output plugins.
+
+
+
+### `access_key_id` [plugins-outputs-s3-access_key_id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order:
+
+1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config
+2. External credentials file specified by `aws_credentials_file`
+3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`
+4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY`
+5. IAM Instance Profile (available when running inside EC2)
+
+
+### `additional_settings` [plugins-outputs-s3-additional_settings]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+Key-value pairs of settings and corresponding values used to parametrize the connection to S3. See full list in [the AWS SDK documentation](https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html). Example:
+
+```ruby
+ output {
+ s3 {
+ access_key_id => "1234",
+ secret_access_key => "secret",
+ region => "eu-west-1",
+ bucket => "logstash-test",
+ additional_settings => {
+ "force_path_style" => true,
+ "follow_redirects" => false
+ }
+ }
+ }
+```
+
+
+### `aws_credentials_file` [plugins-outputs-s3-aws_credentials_file]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Path to YAML file containing a hash of AWS credentials. This file will only be loaded if `access_key_id` and `secret_access_key` aren’t set. The contents of the file should look like this:
+
+```ruby
+ :access_key_id: "12345"
+ :secret_access_key: "54321"
+```
+
+
+### `bucket` [plugins-outputs-s3-bucket]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+S3 bucket
+
+
+### `canned_acl` [plugins-outputs-s3-canned_acl]
+
+* Value can be any of: `private`, `public-read`, `public-read-write`, `authenticated-read`, `aws-exec-read`, `bucket-owner-read`, `bucket-owner-full-control`, `log-delivery-write`
+* Default value is `"private"`
+
+The S3 canned ACL to use when putting the file. Defaults to "private".
+
+
+### `encoding` [plugins-outputs-s3-encoding]
+
+* Value can be any of: `none`, `gzip`
+* Default value is `"none"`
+
+Specify the content encoding. Supports ("gzip"). Defaults to "none"
+
+
+### `endpoint` [plugins-outputs-s3-endpoint]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The endpoint to connect to. By default it is constructed using the value of `region`. This is useful when connecting to S3 compatible services, but beware that these aren’t guaranteed to work correctly with the AWS SDK. The endpoint should be an HTTP or HTTPS URL, e.g. [https://example.com](https://example.com)
+
+
+### `prefix` [plugins-outputs-s3-prefix]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `""`
+
+Specify a prefix to the uploaded filename to simulate directories on S3. Prefix does not require leading slash. This option supports [Logstash interpolation](/reference/event-dependent-configuration.md#sprintf). For example, files can be prefixed with the event date using `prefix = "%{+YYYY}/%{+MM}/%{+dd}"`.
+
+::::{important}
+Take care when you are using interpolated strings in prefixes. This has the potential to create large numbers of unique prefixes, causing large numbers of in-progress uploads. This scenario may result in performance and stability issues, which can be further exacerbated when you use a rotation_strategy that delays uploads.
+::::
+
+
+
+### `proxy_uri` [plugins-outputs-s3-proxy_uri]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+URI to proxy server if required
+
+
+### `region` [plugins-outputs-s3-region]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"us-east-1"`
+
+The AWS Region
+
+
+### `restore` [plugins-outputs-s3-restore]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Used to enable recovery after crash/abnormal termination. Temporary log files will be recovered and uploaded.
+
+
+### `retry_count` [plugins-outputs-s3-retry_count]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `Infinity`
+
+Allows to limit number of retries when S3 uploading fails.
+
+
+### `retry_delay` [plugins-outputs-s3-retry_delay]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1`
+
+Delay (in seconds) to wait between consecutive retries on upload failures.
+
+
+### `role_arn` [plugins-outputs-s3-role_arn]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The AWS IAM Role to assume, if any. This is used to generate temporary credentials, typically for cross-account access. See the [AssumeRole API documentation](https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html) for more information.
+
+
+### `role_session_name` [plugins-outputs-s3-role_session_name]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"logstash"`
+
+Session name to use when assuming an IAM role.
+
+
+### `rotation_strategy` [plugins-outputs-s3-rotation_strategy]
+
+* Value can be any of: `size_and_time`, `size`, `time`
+* Default value is `"size_and_time"`
+
+Controls when to close the file and push it to S3.
+
+If you set this value to `size`, it uses the value set in [`size_file`](#plugins-outputs-s3-size_file). If you set this value to `time`, it uses the value set in [`time_file`](#plugins-outputs-s3-time_file). If you set this value to `size_and_time`, it uses the values from [`size_file`](#plugins-outputs-s3-size_file) and [`time_file`](#plugins-outputs-s3-time_file), and splits the file when either one matches.
+
+The default strategy checks both size and time. The first value to match triggers file rotation.
+
+
+### `secret_access_key` [plugins-outputs-s3-secret_access_key]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The AWS Secret Access Key
+
+
+### `server_side_encryption` [plugins-outputs-s3-server_side_encryption]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Specifies whether or not to use S3’s server side encryption. Defaults to no encryption.
+
+
+### `server_side_encryption_algorithm` [plugins-outputs-s3-server_side_encryption_algorithm]
+
+* Value can be any of: `AES256`, `aws:kms`
+* Default value is `"AES256"`
+
+Specifies what type of encryption to use when SSE is enabled.
+
+
+### `session_token` [plugins-outputs-s3-session_token]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The AWS Session token for temporary credential
+
+
+### `signature_version` [plugins-outputs-s3-signature_version]
+
+* Value can be any of: `v2`, `v4`
+* There is no default value for this setting.
+
+The version of the S3 signature hash to use. Normally uses the internal client default, can be explicitly specified here
+
+
+### `size_file` [plugins-outputs-s3-size_file]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `5242880`
+
+Set the file size in bytes. When the number of bytes exceeds the `size_file` value, a new file is created. If you use tags, Logstash generates a specific size file for every tag.
+
+
+### `ssekms_key_id` [plugins-outputs-s3-ssekms_key_id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The key to use when specified along with server_side_encryption ⇒ aws:kms. If server_side_encryption ⇒ aws:kms is set but this is not default KMS key is used. [http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html](http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html)
+
+
+### `storage_class` [plugins-outputs-s3-storage_class]
+
+* Value can be any of: `STANDARD`, `REDUCED_REDUNDANCY`, `STANDARD_IA`, `ONEZONE_IA`, `INTELLIGENT_TIERING`, `GLACIER`, `DEEP_ARCHIVE`, `OUTPOSTS`, `GLACIER_IR`, `SNOW`, `EXPRESS_ONEZONE`
+* Default value is `"STANDARD"`
+
+Specifies what S3 storage class to use when uploading the file. More information about the different storage classes can be found: [http://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html](http://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) Defaults to STANDARD.
+
+
+### `temporary_directory` [plugins-outputs-s3-temporary_directory]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"/tmp/logstash"`
+
+Set the directory where logstash will store the tmp files before sending it to S3 default to the current OS temporary directory in linux /tmp/logstash
+
+
+### `time_file` [plugins-outputs-s3-time_file]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `15`
+
+Set the time, in MINUTES, to close the current sub_time_section of bucket. If [`rotation_strategy`](#plugins-outputs-s3-rotation_strategy) is set to `time` or `size_and_time`, then `time_file` cannot be set to 0. Otherwise, the plugin raises a configuration error.
+
+
+### `upload_multipart_threshold` [plugins-outputs-s3-upload_multipart_threshold]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `15728640`
+
+Files larger than this number are uploaded using the S3 multipart APIs
+
+
+### `upload_queue_size` [plugins-outputs-s3-upload_queue_size]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `4`
+
+Number of items we can keep in the local queue before uploading them
+
+
+### `upload_workers_count` [plugins-outputs-s3-upload_workers_count]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `4`
+
+Specify how many workers to use to upload the files to S3
+
+
+### `use_aws_bundled_ca` [plugins-outputs-s3-use_aws_bundled_ca]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Use bundled CA certificates that ship with AWS SDK to verify SSL peer certificates. For cases where the default certificates are unavailable, e.g. Windows, you can set this to `true`.
+
+
+### `validate_credentials_on_root_bucket` [plugins-outputs-s3-validate_credentials_on_root_bucket]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+The common use case is to define permissions on the root bucket and give Logstash full access to write logs. In some circumstances, you need more granular permissions on the subfolder. This allows you to disable the check at startup.
+
+
+
+## Common options [plugins-outputs-s3-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-s3-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-s3-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-s3-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-s3-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"line"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-s3-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-s3-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 s3 outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ s3 {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-sink.md b/docs/reference/plugins-outputs-sink.md
new file mode 100644
index 000000000..9a904a569
--- /dev/null
+++ b/docs/reference/plugins-outputs-sink.md
@@ -0,0 +1,74 @@
+---
+navigation_title: "sink"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-sink.html
+---
+
+# Sink output plugin [plugins-outputs-sink]
+
+
+**{{ls}} Core Plugin.** The sink output plugin cannot be installed or uninstalled independently of {{ls}}.
+
+## Getting help [_getting_help_108]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash).
+
+
+## Description [_description_108]
+
+An event sink that discards any events received. Generally useful for testing the performance of inputs and filters.
+
+
+## Sink Output Configuration Options [plugins-outputs-sink-options]
+
+There are no special configuration options for this plugin, but it does support the [Common options](#plugins-outputs-sink-common-options).
+
+
+## Common options [plugins-outputs-sink-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-sink-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-sink-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-sink-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-sink-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-sink-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-sink-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 sink outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ sink {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-sns.md b/docs/reference/plugins-outputs-sns.md
new file mode 100644
index 000000000..3f16aa362
--- /dev/null
+++ b/docs/reference/plugins-outputs-sns.md
@@ -0,0 +1,198 @@
+---
+navigation_title: "sns"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-sns.html
+---
+
+# Sns output plugin [plugins-outputs-sns]
+
+
+* A component of the [aws integration plugin](/reference/plugins-integrations-aws.md)
+* Integration version: v7.1.8
+* Released on: 2024-07-26
+* [Changelog](https://github.com/logstash-plugins/logstash-integration-aws/blob/v7.1.8/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-sns-index.md).
+
+## Getting help [_getting_help_109]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-integration-aws). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_109]
+
+SNS output.
+
+Send events to Amazon’s Simple Notification Service, a hosted pub/sub framework. It supports various subscription types, including email, HTTP/S, SMS, and SQS.
+
+For further documentation about the service see:
+
+```
+http://docs.amazonwebservices.com/sns/latest/api/
+```
+This plugin looks for the following fields on events it receives:
+
+* `sns` - If no ARN is found in the configuration file, this will be used as the ARN to publish.
+* `sns_subject` - The subject line that should be used. Optional. The `%{{host}}` will be used if `sns_subject` is not present. The subject will be truncated to 100 characters. If `sns_subject` is set to a non-string value a JSON version of that value will be saved.
+* `sns_message` - Optional string of message to be sent. If this is set to a non-string value it will be encoded with the specified `codec`. If this is not set the entire event will be encoded with the codec. with the @message truncated so that the length of the JSON fits in `32768` bytes.
+
+
+## Upgrading to 2.0.0 [_upgrading_to_2_0_0]
+
+This plugin used to have a `format` option for controlling the encoding of messages prior to being sent to SNS. This plugin now uses the logstash standard [codec](/reference/configuration-file-structure.md#codec) option for encoding instead. If you want the same *plain* format as the v0/1 codec (`format => "plain"`) use `codec => "s3_plain"`.
+
+
+## Sns Output Configuration Options [plugins-outputs-sns-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-sns-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`access_key_id`](#plugins-outputs-sns-access_key_id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`arn`](#plugins-outputs-sns-arn) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`aws_credentials_file`](#plugins-outputs-sns-aws_credentials_file) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`proxy_uri`](#plugins-outputs-sns-proxy_uri) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`publish_boot_message_arn`](#plugins-outputs-sns-publish_boot_message_arn) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`region`](#plugins-outputs-sns-region) | [string](/reference/configuration-file-structure.md#string), one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]` | No |
+| [`secret_access_key`](#plugins-outputs-sns-secret_access_key) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`session_token`](#plugins-outputs-sns-session_token) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`use_aws_bundled_ca`](#plugins-outputs-sns-use_aws_bundled_ca) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+
+Also see [Common options](#plugins-outputs-sns-common-options) for a list of options supported by all output plugins.
+
+
+
+### `access_key_id` [plugins-outputs-sns-access_key_id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order:
+
+1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config
+2. External credentials file specified by `aws_credentials_file`
+3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`
+4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY`
+5. IAM Instance Profile (available when running inside EC2)
+
+
+### `arn` [plugins-outputs-sns-arn]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Optional ARN to send messages to. If you do not set this you must include the `sns` field in your events to set the ARN on a per-message basis!
+
+
+### `aws_credentials_file` [plugins-outputs-sns-aws_credentials_file]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Path to YAML file containing a hash of AWS credentials. This file will only be loaded if `access_key_id` and `secret_access_key` aren’t set. The contents of the file should look like this:
+
+```ruby
+ :access_key_id: "12345"
+ :secret_access_key: "54321"
+```
+
+
+### `proxy_uri` [plugins-outputs-sns-proxy_uri]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+URI to proxy server if required
+
+
+### `publish_boot_message_arn` [plugins-outputs-sns-publish_boot_message_arn]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+When an ARN for an SNS topic is specified here, the message "Logstash successfully booted" will be sent to it when this plugin is registered.
+
+Example: arn:aws:sns:us-east-1:770975001275:logstash-testing
+
+
+### `region` [plugins-outputs-sns-region]
+
+* Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1`
+* Default value is `"us-east-1"`
+
+The AWS Region
+
+
+### `secret_access_key` [plugins-outputs-sns-secret_access_key]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The AWS Secret Access Key
+
+
+### `session_token` [plugins-outputs-sns-session_token]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The AWS Session token for temporary credential
+
+
+### `use_aws_bundled_ca` [plugins-outputs-sns-use_aws_bundled_ca]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Use bundled CA certificates that ship with AWS SDK to verify SSL peer certificates. For cases where the default certificates are unavailable, e.g. Windows, you can set this to `true`.
+
+
+
+## Common options [plugins-outputs-sns-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-sns-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-sns-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-sns-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-sns-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-sns-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-sns-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 sns outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ sns {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-solr_http.md b/docs/reference/plugins-outputs-solr_http.md
new file mode 100644
index 000000000..d62a25bcd
--- /dev/null
+++ b/docs/reference/plugins-outputs-solr_http.md
@@ -0,0 +1,136 @@
+---
+navigation_title: "solr_http"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-solr_http.html
+---
+
+# Solr_http output plugin [plugins-outputs-solr_http]
+
+
+* Plugin version: v3.0.5
+* Released on: 2018-04-06
+* [Changelog](https://github.com/logstash-plugins/logstash-output-solr_http/blob/v3.0.5/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-solr_http-index.md).
+
+## Installation [_installation_46]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-output-solr_http`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_110]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-solr_http). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_110]
+
+This output lets you index&store your logs in Solr. If you want to get started quickly you should use version 4.4 or above in schemaless mode, which will try and guess your fields automatically. To turn that on, you can use the example included in the Solr archive:
+
+```shell
+ tar zxf solr-4.4.0.tgz
+ cd example
+ mv solr solr_ #back up the existing sample conf
+ cp -r example-schemaless/solr/ . #put the schemaless conf in place
+ java -jar start.jar #start Solr
+```
+
+You can learn more at [the Solr home page](https://lucene.apache.org/solr/)
+
+
+## Solr_http Output Configuration Options [plugins-outputs-solr_http-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-solr_http-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`document_id`](#plugins-outputs-solr_http-document_id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`flush_size`](#plugins-outputs-solr_http-flush_size) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`idle_flush_time`](#plugins-outputs-solr_http-idle_flush_time) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`solr_url`](#plugins-outputs-solr_http-solr_url) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-outputs-solr_http-common-options) for a list of options supported by all output plugins.
+
+
+
+### `document_id` [plugins-outputs-solr_http-document_id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `nil`
+
+Solr document ID for events. You’d typically have a variable here, like `%{{foo}}` so you can assign your own IDs
+
+
+### `flush_size` [plugins-outputs-solr_http-flush_size]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `100`
+
+Number of events to queue up before writing to Solr
+
+
+### `idle_flush_time` [plugins-outputs-solr_http-idle_flush_time]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1`
+
+Amount of time since the last flush before a flush is done even if the number of buffered events is smaller than flush_size
+
+
+### `solr_url` [plugins-outputs-solr_http-solr_url]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"http://localhost:8983/solr"`
+
+URL used to connect to Solr
+
+
+
+## Common options [plugins-outputs-solr_http-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-solr_http-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-solr_http-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-solr_http-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-solr_http-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-solr_http-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-solr_http-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 solr_http outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ solr_http {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-sqs.md b/docs/reference/plugins-outputs-sqs.md
new file mode 100644
index 000000000..40a4ba742
--- /dev/null
+++ b/docs/reference/plugins-outputs-sqs.md
@@ -0,0 +1,266 @@
+---
+navigation_title: "sqs"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-sqs.html
+---
+
+# Sqs output plugin [plugins-outputs-sqs]
+
+
+* A component of the [aws integration plugin](/reference/plugins-integrations-aws.md)
+* Integration version: v7.1.8
+* Released on: 2024-07-26
+* [Changelog](https://github.com/logstash-plugins/logstash-integration-aws/blob/v7.1.8/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-sqs-index.md).
+
+## Getting help [_getting_help_111]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-integration-aws). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_111]
+
+Push events to an Amazon Web Services (AWS) Simple Queue Service (SQS) queue.
+
+SQS is a simple, scalable queue system that is part of the Amazon Web Services suite of tools. Although SQS is similar to other queuing systems such as Advanced Message Queuing Protocol (AMQP), it uses a custom API and requires that you have an AWS account. See [http://aws.amazon.com/sqs/](http://aws.amazon.com/sqs/) for more details on how SQS works, what the pricing schedule looks like and how to setup a queue.
+
+The "consumer" identity must have the following permissions on the queue:
+
+* `sqs:GetQueueUrl`
+* `sqs:SendMessage`
+* `sqs:SendMessageBatch`
+
+Typically, you should setup an IAM policy, create a user and apply the IAM policy to the user. See [http://aws.amazon.com/iam/](http://aws.amazon.com/iam/) for more details on setting up AWS identities. A sample policy is as follows:
+
+```json
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": [
+ "sqs:GetQueueUrl",
+ "sqs:SendMessage",
+ "sqs:SendMessageBatch"
+ ],
+ "Resource": "arn:aws:sqs:us-east-1:123456789012:my-sqs-queue"
+ }
+ ]
+}
+```
+
+
+## Batch Publishing [_batch_publishing]
+
+This output publishes messages to SQS in batches in order to optimize event throughput and increase performance. This is done using the [`SendMessageBatch`](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SendMessageBatch.html) API. When publishing messages to SQS in batches, the following service limits must be respected (see [Limits in Amazon SQS](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-messages.html))):
+
+* The maximum allowed individual message size is 256KiB.
+* The maximum total payload size (i.e. the sum of the sizes of all individual messages within a batch) is also 256KiB.
+
+This plugin will dynamically adjust the size of the batch published to SQS in order to ensure that the total payload size does not exceed 256KiB.
+
+::::{warning}
+This output cannot currently handle messages larger than 256KiB. Any single message exceeding this size will be dropped.
+::::
+
+
+
+## Sqs Output Configuration Options [plugins-outputs-sqs-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-sqs-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`access_key_id`](#plugins-outputs-sqs-access_key_id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`aws_credentials_file`](#plugins-outputs-sqs-aws_credentials_file) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`batch_events`](#plugins-outputs-sqs-batch_events) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`endpoint`](#plugins-outputs-sqs-endpoint) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`message_max_size`](#plugins-outputs-sqs-message_max_size) | [bytes](/reference/configuration-file-structure.md#bytes) | No |
+| [`proxy_uri`](#plugins-outputs-sqs-proxy_uri) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`queue`](#plugins-outputs-sqs-queue) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`queue_owner_aws_account_id`](#plugins-outputs-sqs-queue_owner_aws_account_id) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`region`](#plugins-outputs-sqs-region) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`role_arn`](#plugins-outputs-sqs-role_arn) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`role_session_name`](#plugins-outputs-sqs-role_session_name) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`secret_access_key`](#plugins-outputs-sqs-secret_access_key) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`session_token`](#plugins-outputs-sqs-session_token) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`use_aws_bundled_ca`](#plugins-outputs-sqs-use_aws_bundled_ca) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+
+Also see [Common options](#plugins-outputs-sqs-common-options) for a list of options supported by all output plugins.
+
+
+
+### `access_key_id` [plugins-outputs-sqs-access_key_id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order:
+
+1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config
+2. External credentials file specified by `aws_credentials_file`
+3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`
+4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY`
+5. IAM Instance Profile (available when running inside EC2)
+
+
+### `aws_credentials_file` [plugins-outputs-sqs-aws_credentials_file]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Path to YAML file containing a hash of AWS credentials. This file will only be loaded if `access_key_id` and `secret_access_key` aren’t set. The contents of the file should look like this:
+
+```ruby
+ :access_key_id: "12345"
+ :secret_access_key: "54321"
+```
+
+
+### `batch_events` [plugins-outputs-sqs-batch_events]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `10`
+
+The number of events to be sent in each batch. Set this to `1` to disable the batch sending of messages.
+
+
+### `endpoint` [plugins-outputs-sqs-endpoint]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The endpoint to connect to. By default it is constructed using the value of `region`. This is useful when connecting to S3 compatible services, but beware that these aren’t guaranteed to work correctly with the AWS SDK.
+
+
+### `message_max_size` [plugins-outputs-sqs-message_max_size]
+
+* Value type is [bytes](/reference/configuration-file-structure.md#bytes)
+* Default value is `"256KiB"`
+
+The maximum number of bytes for any message sent to SQS. Messages exceeding this size will be dropped. See [http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-messages.html](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-messages.html).
+
+
+### `proxy_uri` [plugins-outputs-sqs-proxy_uri]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+URI to proxy server if required
+
+
+### `queue` [plugins-outputs-sqs-queue]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The name of the target SQS queue. Note that this is just the name of the queue, not the URL or ARN.
+
+
+### `queue_owner_aws_account_id` [plugins-outputs-sqs-queue_owner_aws_account_id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The owning account id of the target SQS queue. IAM permissions need to be configured on both accounts to function.
+
+
+### `region` [plugins-outputs-sqs-region]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"us-east-1"`
+
+The AWS Region
+
+
+### `role_arn` [plugins-outputs-sqs-role_arn]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The AWS IAM Role to assume, if any. This is used to generate temporary credentials, typically for cross-account access. See the [AssumeRole API documentation](https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html) for more information.
+
+
+### `role_session_name` [plugins-outputs-sqs-role_session_name]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"logstash"`
+
+Session name to use when assuming an IAM role.
+
+
+### `secret_access_key` [plugins-outputs-sqs-secret_access_key]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The AWS Secret Access Key
+
+
+### `session_token` [plugins-outputs-sqs-session_token]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The AWS Session token for temporary credential
+
+
+### `use_aws_bundled_ca` [plugins-outputs-sqs-use_aws_bundled_ca]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Use bundled CA certificates that ship with AWS SDK to verify SSL peer certificates. For cases where the default certificates are unavailable, e.g. Windows, you can set this to `true`.
+
+
+
+## Common options [plugins-outputs-sqs-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-sqs-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-sqs-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-sqs-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-sqs-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"json"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-sqs-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-sqs-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 sqs outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ sqs {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-statsd.md b/docs/reference/plugins-outputs-statsd.md
new file mode 100644
index 000000000..e9e8732a0
--- /dev/null
+++ b/docs/reference/plugins-outputs-statsd.md
@@ -0,0 +1,229 @@
+---
+navigation_title: "statsd"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-statsd.html
+---
+
+# Statsd output plugin [plugins-outputs-statsd]
+
+
+* Plugin version: v3.2.0
+* Released on: 2018-06-05
+* [Changelog](https://github.com/logstash-plugins/logstash-output-statsd/blob/v3.2.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-statsd-index.md).
+
+## Installation [_installation_47]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-output-statsd`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_112]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-statsd). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_112]
+
+statsd is a network daemon for aggregating statistics, such as counters and timers, and shipping over UDP to backend services, such as Graphite or Datadog. The general idea is that you send metrics to statsd and every few seconds it will emit the aggregated values to the backend. Example aggregates are sums, average and maximum values, their standard deviation, etc. This plugin makes it easy to send such metrics based on data in Logstash events.
+
+You can learn about statsd here:
+
+* [Etsy blog post announcing statsd](https://codeascraft.com/2011/02/15/measure-anything-measure-everything/)
+* [statsd on github](https://github.com/etsy/statsd)
+
+Typical examples of how this can be used with Logstash include counting HTTP hits by response code, summing the total number of bytes of traffic served, and tracking the 50th and 95th percentile of the processing time of requests.
+
+Each metric emitted to statsd has a dot-separated path, a type, and a value. The metric path is built from the `namespace` and `sender` options together with the metric name that’s picked up depending on the type of metric. All in all, the metric path will follow this pattern:
+
+```
+namespace.sender.metric
+```
+With regards to this plugin, the default namespace is "logstash", the default sender is the `host` field, and the metric name depends on what is set as the metric name in the `increment`, `decrement`, `timing`, `count`, `set` or `gauge` options. In metric paths, colons (":"), pipes ("|") and at signs ("@") are reserved and will be replaced by underscores ("_").
+
+Example:
+
+```ruby
+output {
+ statsd {
+ host => "statsd.example.org"
+ count => {
+ "http.bytes" => "%{bytes}"
+ }
+ }
+}
+```
+
+If run on a host named hal9000 the configuration above will send the following metric to statsd if the current event has 123 in its `bytes` field:
+
+```
+logstash.hal9000.http.bytes:123|c
+```
+
+## Statsd Output Configuration Options [plugins-outputs-statsd-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-statsd-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`count`](#plugins-outputs-statsd-count) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`decrement`](#plugins-outputs-statsd-decrement) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`gauge`](#plugins-outputs-statsd-gauge) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`host`](#plugins-outputs-statsd-host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`increment`](#plugins-outputs-statsd-increment) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`namespace`](#plugins-outputs-statsd-namespace) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`port`](#plugins-outputs-statsd-port) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`sample_rate`](#plugins-outputs-statsd-sample_rate) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`sender`](#plugins-outputs-statsd-sender) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`set`](#plugins-outputs-statsd-set) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`timing`](#plugins-outputs-statsd-timing) | [hash](/reference/configuration-file-structure.md#hash) | No |
+
+Also see [Common options](#plugins-outputs-statsd-common-options) for a list of options supported by all output plugins.
+
+
+
+### `count` [plugins-outputs-statsd-count]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+A count metric. `metric_name => count` as hash. `%{{fieldname}}` substitutions are allowed in the metric names.
+
+
+### `decrement` [plugins-outputs-statsd-decrement]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+A decrement metric. Metric names as array. `%{{fieldname}}` substitutions are allowed in the metric names.
+
+
+### `gauge` [plugins-outputs-statsd-gauge]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+A gauge metric. `metric_name => gauge` as hash. `%{{fieldname}}` substitutions are allowed in the metric names.
+
+
+### `host` [plugins-outputs-statsd-host]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"localhost"`
+
+The hostname or IP address of the statsd server.
+
+
+### `increment` [plugins-outputs-statsd-increment]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+An increment metric. Metric names as array. `%{{fieldname}}` substitutions are allowed in the metric names.
+
+
+### `namespace` [plugins-outputs-statsd-namespace]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"logstash"`
+
+The statsd namespace to use for this metric. `%{{fieldname}}` substitutions are allowed.
+
+
+### `port` [plugins-outputs-statsd-port]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `8125`
+
+The port to connect to on your statsd server.
+
+
+### `protocol` [plugins-outputs-statsd-protocol]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"udp"`
+
+The protocol to connect to on your statsd server.
+
+
+### `sample_rate` [plugins-outputs-statsd-sample_rate]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1`
+
+The sample rate for the metric.
+
+
+### `sender` [plugins-outputs-statsd-sender]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `%{{host}}`
+
+The name of the sender. Dots will be replaced with underscores. `%{{fieldname}}` substitutions are allowed.
+
+
+### `set` [plugins-outputs-statsd-set]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+A set metric. `metric_name => "string"` to append as hash. `%{{fieldname}}` substitutions are allowed in the metric names.
+
+
+### `timing` [plugins-outputs-statsd-timing]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* Default value is `{}`
+
+A timing metric. `metric_name => duration` as hash. `%{{fieldname}}` substitutions are allowed in the metric names.
+
+
+
+## Common options [plugins-outputs-statsd-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-statsd-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-statsd-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-statsd-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-statsd-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-statsd-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-statsd-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 statsd outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ statsd {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-stdout.md b/docs/reference/plugins-outputs-stdout.md
new file mode 100644
index 000000000..2d76eb6d1
--- /dev/null
+++ b/docs/reference/plugins-outputs-stdout.md
@@ -0,0 +1,104 @@
+---
+navigation_title: "stdout"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-stdout.html
+---
+
+# Stdout output plugin [plugins-outputs-stdout]
+
+
+* Plugin version: v3.1.4
+* Released on: 2018-04-06
+* [Changelog](https://github.com/logstash-plugins/logstash-output-stdout/blob/v3.1.4/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-stdout-index.md).
+
+## Getting help [_getting_help_113]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-stdout). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_113]
+
+A simple output which prints to the STDOUT of the shell running Logstash. This output can be quite convenient when debugging plugin configurations, by allowing instant access to the event data after it has passed through the inputs and filters.
+
+For example, the following output configuration, in conjunction with the Logstash `-e` command-line flag, will allow you to see the results of your event pipeline for quick iteration.
+
+```ruby
+ output {
+ stdout {}
+ }
+```
+
+Useful codecs include:
+
+`rubydebug`: outputs event data using the ruby "awesome_print" [library](http://rubygems.org/gems/awesome_print) This is the default codec for stdout.
+
+```ruby
+ output {
+ stdout { }
+ }
+```
+
+`json`: outputs event data in structured JSON format
+
+```ruby
+ output {
+ stdout { codec => json }
+ }
+```
+
+
+## Stdout Output Configuration Options [plugins-outputs-stdout-options]
+
+There are no special configuration options for this plugin, but it does support the [Common options](#plugins-outputs-stdout-common-options).
+
+
+## Common options [plugins-outputs-stdout-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-stdout-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-stdout-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-stdout-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-stdout-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"rubydebug"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-stdout-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-stdout-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 stdout outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ stdout {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-stomp.md b/docs/reference/plugins-outputs-stomp.md
new file mode 100644
index 000000000..bac776dec
--- /dev/null
+++ b/docs/reference/plugins-outputs-stomp.md
@@ -0,0 +1,168 @@
+---
+navigation_title: "stomp"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-stomp.html
+---
+
+# Stomp output plugin [plugins-outputs-stomp]
+
+
+* Plugin version: v3.0.9
+* Released on: 2018-04-06
+* [Changelog](https://github.com/logstash-plugins/logstash-output-stomp/blob/v3.0.9/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-stomp-index.md).
+
+## Installation [_installation_48]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-output-stomp`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_114]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-stomp). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_114]
+
+This output writes events using the STOMP protocol.
+
+
+## Stomp Output Configuration Options [plugins-outputs-stomp-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-stomp-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`debug`](#plugins-outputs-stomp-debug) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`destination`](#plugins-outputs-stomp-destination) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`headers`](#plugins-outputs-stomp-headers) | [hash](/reference/configuration-file-structure.md#hash) | No |
+| [`host`](#plugins-outputs-stomp-host) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`password`](#plugins-outputs-stomp-password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`port`](#plugins-outputs-stomp-port) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`user`](#plugins-outputs-stomp-user) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`vhost`](#plugins-outputs-stomp-vhost) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-outputs-stomp-common-options) for a list of options supported by all output plugins.
+
+
+
+### `debug` [plugins-outputs-stomp-debug]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Enable debugging output?
+
+
+### `destination` [plugins-outputs-stomp-destination]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The destination to read events from. Supports string expansion, meaning `%{{foo}}` values will expand to the field value.
+
+Example: "/topic/logstash"
+
+
+### `headers` [plugins-outputs-stomp-headers]
+
+* Value type is [hash](/reference/configuration-file-structure.md#hash)
+* There is no default value for this setting.
+
+Custom headers to send with each message. Supports string expansion, meaning `%{{foo}}` values will expand to the field value.
+
+Example: headers ⇒ ["amq-msg-type", "text", "host", `"%{{host}}"`]
+
+
+### `host` [plugins-outputs-stomp-host]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The address of the STOMP server.
+
+
+### `password` [plugins-outputs-stomp-password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* Default value is `""`
+
+The password to authenticate with.
+
+
+### `port` [plugins-outputs-stomp-port]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `61613`
+
+The port to connect to on your STOMP server.
+
+
+### `user` [plugins-outputs-stomp-user]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `""`
+
+The username to authenticate with.
+
+
+### `vhost` [plugins-outputs-stomp-vhost]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `nil`
+
+The vhost to use
+
+
+
+## Common options [plugins-outputs-stomp-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-stomp-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-stomp-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-stomp-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-stomp-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-stomp-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-stomp-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 stomp outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ stomp {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-syslog.md b/docs/reference/plugins-outputs-syslog.md
new file mode 100644
index 000000000..866acb45b
--- /dev/null
+++ b/docs/reference/plugins-outputs-syslog.md
@@ -0,0 +1,267 @@
+---
+navigation_title: "syslog"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-syslog.html
+---
+
+# Syslog output plugin [plugins-outputs-syslog]
+
+
+* Plugin version: v3.0.5
+* Released on: 2018-04-06
+* [Changelog](https://github.com/logstash-plugins/logstash-output-syslog/blob/v3.0.5/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-syslog-index.md).
+
+## Installation [_installation_49]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-output-syslog`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_115]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-syslog). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_115]
+
+Send events to a syslog server.
+
+You can send messages compliant with RFC3164 or RFC5424 using either UDP or TCP as the transport protocol.
+
+By default the contents of the `message` field will be shipped as the free-form message text part of the emitted syslog message. If your messages don’t have a `message` field or if you for some other reason want to change the emitted message, modify the `message` configuration option.
+
+
+## Syslog Output Configuration Options [plugins-outputs-syslog-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-syslog-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`appname`](#plugins-outputs-syslog-appname) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`facility`](#plugins-outputs-syslog-facility) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`host`](#plugins-outputs-syslog-host) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`message`](#plugins-outputs-syslog-message) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`msgid`](#plugins-outputs-syslog-msgid) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`port`](#plugins-outputs-syslog-port) | [number](/reference/configuration-file-structure.md#number) | Yes |
+| [`priority`](#plugins-outputs-syslog-priority) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`procid`](#plugins-outputs-syslog-procid) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`protocol`](#plugins-outputs-syslog-protocol) | [string](/reference/configuration-file-structure.md#string), one of `["tcp", "udp", "ssl-tcp"]` | No |
+| [`reconnect_interval`](#plugins-outputs-syslog-reconnect_interval) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`rfc`](#plugins-outputs-syslog-rfc) | [string](/reference/configuration-file-structure.md#string), one of `["rfc3164", "rfc5424"]` | No |
+| [`severity`](#plugins-outputs-syslog-severity) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`sourcehost`](#plugins-outputs-syslog-sourcehost) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_cacert`](#plugins-outputs-syslog-ssl_cacert) | a valid filesystem path | No |
+| [`ssl_cert`](#plugins-outputs-syslog-ssl_cert) | a valid filesystem path | No |
+| [`ssl_key`](#plugins-outputs-syslog-ssl_key) | a valid filesystem path | No |
+| [`ssl_key_passphrase`](#plugins-outputs-syslog-ssl_key_passphrase) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_verify`](#plugins-outputs-syslog-ssl_verify) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`use_labels`](#plugins-outputs-syslog-use_labels) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+
+Also see [Common options](#plugins-outputs-syslog-common-options) for a list of options supported by all output plugins.
+
+
+
+### `appname` [plugins-outputs-syslog-appname]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"LOGSTASH"`
+
+application name for syslog message. The new value can include `%{{foo}}` strings to help you build a new value from other parts of the event.
+
+
+### `facility` [plugins-outputs-syslog-facility]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"user-level"`
+
+facility label for syslog message default fallback to user-level as in rfc3164 The new value can include `%{{foo}}` strings to help you build a new value from other parts of the event.
+
+
+### `host` [plugins-outputs-syslog-host]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+syslog server address to connect to
+
+
+### `message` [plugins-outputs-syslog-message]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"%{{message}}"`
+
+message text to log. The new value can include `%{{foo}}` strings to help you build a new value from other parts of the event.
+
+
+### `msgid` [plugins-outputs-syslog-msgid]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"-"`
+
+message id for syslog message. The new value can include `%{{foo}}` strings to help you build a new value from other parts of the event.
+
+
+### `port` [plugins-outputs-syslog-port]
+
+* This is a required setting.
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+syslog server port to connect to
+
+
+### `priority` [plugins-outputs-syslog-priority]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"%{{syslog_pri}}"`
+
+syslog priority The new value can include `%{{foo}}` strings to help you build a new value from other parts of the event.
+
+
+### `procid` [plugins-outputs-syslog-procid]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"-"`
+
+process id for syslog message. The new value can include `%{{foo}}` strings to help you build a new value from other parts of the event.
+
+
+### `protocol` [plugins-outputs-syslog-protocol]
+
+* Value can be any of: `tcp`, `udp`, `ssl-tcp`
+* Default value is `"udp"`
+
+syslog server protocol. you can choose between udp, tcp and ssl/tls over tcp
+
+
+### `reconnect_interval` [plugins-outputs-syslog-reconnect_interval]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1`
+
+when connection fails, retry interval in sec.
+
+
+### `rfc` [plugins-outputs-syslog-rfc]
+
+* Value can be any of: `rfc3164`, `rfc5424`
+* Default value is `"rfc3164"`
+
+syslog message format: you can choose between rfc3164 or rfc5424
+
+
+### `severity` [plugins-outputs-syslog-severity]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"notice"`
+
+severity label for syslog message default fallback to notice as in rfc3164 The new value can include `%{{foo}}` strings to help you build a new value from other parts of the event.
+
+
+### `sourcehost` [plugins-outputs-syslog-sourcehost]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `%{{host}}`
+
+source host for syslog message. The new value can include `%{{foo}}` strings to help you build a new value from other parts of the event.
+
+
+### `ssl_cacert` [plugins-outputs-syslog-ssl_cacert]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+The SSL CA certificate, chainfile or CA path. The system CA path is automatically included.
+
+
+### `ssl_cert` [plugins-outputs-syslog-ssl_cert]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+SSL certificate path
+
+
+### `ssl_key` [plugins-outputs-syslog-ssl_key]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+SSL key path
+
+
+### `ssl_key_passphrase` [plugins-outputs-syslog-ssl_key_passphrase]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* Default value is `nil`
+
+SSL key passphrase
+
+
+### `ssl_verify` [plugins-outputs-syslog-ssl_verify]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Verify the identity of the other end of the SSL connection against the CA.
+
+
+### `use_labels` [plugins-outputs-syslog-use_labels]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+use label parsing for severity and facility levels use priority field if set to false
+
+
+
+## Common options [plugins-outputs-syslog-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-syslog-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-syslog-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-syslog-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-syslog-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-syslog-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-syslog-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 syslog outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ syslog {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-tcp.md b/docs/reference/plugins-outputs-tcp.md
new file mode 100644
index 000000000..ca5abd4ec
--- /dev/null
+++ b/docs/reference/plugins-outputs-tcp.md
@@ -0,0 +1,250 @@
+---
+navigation_title: "tcp"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-tcp.html
+---
+
+# Tcp output plugin [plugins-outputs-tcp]
+
+
+* Plugin version: v7.0.0
+* Released on: 2025-01-10
+* [Changelog](https://github.com/logstash-plugins/logstash-output-tcp/blob/v7.0.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-tcp-index.md).
+
+## Getting help [_getting_help_116]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-tcp). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_116]
+
+Write events over a TCP socket.
+
+By default this plugin uses the `json` codec. In order to have each event json separated by a newline, use the `json_lines` codec.
+
+Can either accept connections from clients or connect to a server, depending on `mode`.
+
+
+## Tcp Output Configuration Options [plugins-outputs-tcp-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-tcp-common-options) described later.
+
+::::{note}
+As of version `7.0.0` of this plugin, a number of previously deprecated settings related to SSL have been removed. Please see the [TCP Output Obsolete Configuration Options](#plugins-outputs-tcp-obsolete-options) for more details.
+::::
+
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`host`](#plugins-outputs-tcp-host) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`mode`](#plugins-outputs-tcp-mode) | [string](/reference/configuration-file-structure.md#string), one of `["server", "client"]` | No |
+| [`port`](#plugins-outputs-tcp-port) | [number](/reference/configuration-file-structure.md#number) | Yes |
+| [`reconnect_interval`](#plugins-outputs-tcp-reconnect_interval) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`ssl_certificate`](#plugins-outputs-tcp-ssl_certificate) | a valid filesystem path | No |
+| [`ssl_certificate_authorities`](#plugins-outputs-tcp-ssl_certificate_authorities) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`ssl_cipher_suites`](#plugins-outputs-tcp-ssl_cipher_suites) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_client_authentication`](#plugins-outputs-tcp-ssl_client_authentication) | [string](/reference/configuration-file-structure.md#string), one of `["none", "optional", "required"]` | No |
+| [`ssl_enabled`](#plugins-outputs-tcp-ssl_enabled) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`ssl_key`](#plugins-outputs-tcp-ssl_key) | a valid filesystem path | No |
+| [`ssl_key_passphrase`](#plugins-outputs-tcp-ssl_key_passphrase) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`ssl_supported_protocols`](#plugins-outputs-tcp-ssl_supported_protocols) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_verification_mode`](#plugins-outputs-tcp-ssl_verification_mode) | [string](/reference/configuration-file-structure.md#string), one of `["full", "none"]` | No |
+
+Also see [Common options](#plugins-outputs-tcp-common-options) for a list of options supported by all output plugins.
+
+
+
+### `host` [plugins-outputs-tcp-host]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+When mode is `server`, the address to listen on. When mode is `client`, the address to connect to.
+
+
+### `mode` [plugins-outputs-tcp-mode]
+
+* Value can be any of: `server`, `client`
+* Default value is `"client"`
+
+Mode to operate in. `server` listens for client connections, `client` connects to a server.
+
+
+### `port` [plugins-outputs-tcp-port]
+
+* This is a required setting.
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* There is no default value for this setting.
+
+When mode is `server`, the port to listen on. When mode is `client`, the port to connect to.
+
+
+### `reconnect_interval` [plugins-outputs-tcp-reconnect_interval]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `10`
+
+When connect failed,retry interval in sec.
+
+
+### `ssl_certificate` [plugins-outputs-tcp-ssl_certificate]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+Path to certificate in PEM format. This certificate will be presented to the other part of the TLS connection.
+
+
+### `ssl_certificate_authorities` [plugins-outputs-tcp-ssl_certificate_authorities]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* Default value is `[]`
+
+Validate client certificate or certificate chain against these authorities. You can define multiple files. All the certificates will be read and added to the trust store. The system CA path is automatically included.
+
+
+### `ssl_cipher_suites` [plugins-outputs-tcp-ssl_cipher_suites]
+
+* Value type is a list of [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting
+
+The list of cipher suites to use, listed by priorities. Supported cipher suites vary depending on the Java and protocol versions.
+
+
+### `ssl_client_authentication` [plugins-outputs-tcp-ssl_client_authentication]
+
+* Value can be any of: `none`, `optional`, `required`
+* Default value is `none`
+
+Controls the server’s behavior in regard to requesting a certificate from client connections: `none` disables the client authentication. `required` forces a client to present a certificate, while `optional` requests a client certificate but the client is not required to present one.
+
+When mutual TLS is enabled (`optional` or `required`), the certificate presented by the client must be signed by trusted [`ssl_certificate_authorities`](#plugins-outputs-tcp-ssl_certificate_authorities) (CAs). Please note that the server does not validate the client certificate CN (Common Name) or SAN (Subject Alternative Name).
+
+::::{note}
+This setting can be used only if [`mode`](#plugins-outputs-tcp-mode) is `server` and [`ssl_certificate_authorities`](#plugins-outputs-tcp-ssl_certificate_authorities) is set.
+::::
+
+
+
+### `ssl_enabled` [plugins-outputs-tcp-ssl_enabled]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Enable SSL (must be set for other `ssl_` options to take effect).
+
+
+### `ssl_key` [plugins-outputs-tcp-ssl_key]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+SSL key path
+
+
+### `ssl_key_passphrase` [plugins-outputs-tcp-ssl_key_passphrase]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* Default value is `nil`
+
+SSL key passphrase
+
+
+### `ssl_supported_protocols` [plugins-outputs-tcp-ssl_supported_protocols]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Allowed values are: `'TLSv1.1'`, `'TLSv1.2'`, `'TLSv1.3'`
+* Default depends on the JDK being used. With up-to-date Logstash, the default is `['TLSv1.2', 'TLSv1.3']`. `'TLSv1.1'` is not considered secure and is only provided for legacy applications.
+
+List of allowed SSL/TLS versions to use when establishing a secure connection.
+
+::::{note}
+If you configure the plugin to use `'TLSv1.1'` on any recent JVM, such as the one packaged with Logstash, the protocol is disabled by default and needs to be enabled manually by changing `jdk.tls.disabledAlgorithms` in the **$JDK_HOME/conf/security/java.security** configuration file. That is, `TLSv1.1` needs to be removed from the list.
+::::
+
+
+
+### `ssl_verification_mode` [plugins-outputs-tcp-ssl_verification_mode]
+
+* Value can be any of: `full`, `none`
+* Default value is `full`
+
+Defines how to verify the certificates presented by another part in the TLS connection:
+
+`full` validates that the server certificate has an issue date that’s within the not_before and not_after dates; chains to a trusted Certificate Authority (CA), and has a hostname or IP address that matches the names within the certificate.
+
+`none` performs no certificate validation.
+
+::::{note}
+This setting can be used only if [`mode`](#plugins-outputs-tcp-mode) is `client`.
+::::
+
+
+
+
+## TCP Output Obsolete Configuration Options [plugins-outputs-tcp-obsolete-options]
+
+::::{warning}
+As of version `6.0.0` of this plugin, some configuration options have been replaced. The plugin will fail to start if it contains any of these obsolete options.
+::::
+
+
+| Setting | Replaced by |
+| --- | --- |
+| ssl_cacert | [`ssl_certificate_authorities`](#plugins-outputs-tcp-ssl_certificate_authorities) |
+| ssl_cert | [`ssl_certificate`](#plugins-outputs-tcp-ssl_certificate) |
+| ssl_enable | [`ssl_enabled`](#plugins-outputs-tcp-ssl_enabled) |
+| ssl_verify | [`ssl_client_authentication`](#plugins-outputs-tcp-ssl_client_authentication) in `server` mode and [`ssl_verification_mode`](#plugins-outputs-tcp-ssl_verification_mode) in `client` mode |
+
+
+## Common options [plugins-outputs-tcp-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-tcp-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-tcp-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-tcp-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-tcp-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"json"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-tcp-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-tcp-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 tcp outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ tcp {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-timber.md b/docs/reference/plugins-outputs-timber.md
new file mode 100644
index 000000000..d37eb28cf
--- /dev/null
+++ b/docs/reference/plugins-outputs-timber.md
@@ -0,0 +1,243 @@
+---
+navigation_title: "timber"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-timber.html
+---
+
+# Timber output plugin [plugins-outputs-timber]
+
+
+* Plugin version: v1.0.3
+* Released on: 2017-09-02
+* [Changelog](https://github.com/logstash-plugins/logstash-output-timber/blob/v1.0.3/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-timber-index.md).
+
+## Installation [_installation_50]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-output-timber`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_117]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-timber). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_117]
+
+This output sends structured events to the [Timber.io logging service](https://timber.io). Timber is a cloud-based logging service designed for developers, providing easy features out of the box that make you more productive. [Tail users](https://timber.io/docs/app/console/tail-a-user), [trace requests](https://timber.io/docs/app/console/trace-http-requests), [inspect HTTP parameters](https://timber.io/docs/app/console/inspect-http-requests), and [search](https://timber.io/docs/app/console/searching) on rich structured data without sacrificing readability.
+
+Internally, it’s a highly efficient HTTP transport that uses batching and retries for fast and reliable delivery.
+
+This output will execute up to *pool_max* requests in parallel for performance. Consider this when tuning this plugin for performance. The default of 50 should be sufficient for most setups.
+
+Additionally, note that when parallel execution is used strict ordering of events is not guaranteed!
+
+
+## Timber Output Configuration Options [plugins-outputs-timber-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-timber-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`api_key`](#plugins-outputs-timber-api_key) | Your Timber.io API key | No |
+| [`cacert`](#plugins-outputs-timber-cacert) | a valid filesystem path | No |
+| [`client_cert`](#plugins-outputs-timber-client_cert) | a valid filesystem path | No |
+| [`client_key`](#plugins-outputs-timber-client_key) | a valid filesystem path | No |
+| [`connect_timeout`](#plugins-outputs-timber-connect_timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`keystore`](#plugins-outputs-timber-keystore) | a valid filesystem path | No |
+| [`keystore_password`](#plugins-outputs-timber-keystore_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`keystore_type`](#plugins-outputs-timber-keystore_type) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`pool_max`](#plugins-outputs-timber-pool_max) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`proxy`](#plugins-outputs-timber-proxy) | <<,>> | No |
+| [`request_timeout`](#plugins-outputs-timber-request_timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`socket_timeout`](#plugins-outputs-timber-socket_timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`truststore`](#plugins-outputs-timber-truststore) | a valid filesystem path | No |
+| [`truststore_password`](#plugins-outputs-timber-truststore_password) | [password](/reference/configuration-file-structure.md#password) | No |
+| [`truststore_type`](#plugins-outputs-timber-truststore_type) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-outputs-timber-common-options) for a list of options supported by all output plugins.
+
+
+
+### `api_key` [plugins-outputs-timber-api_key]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Your Timber.io API key. You can obtain your API by creating an app in the [Timber console](https://app.timber.io).
+
+
+### `cacert` [plugins-outputs-timber-cacert]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+If you need to use a custom X.509 CA (.pem certs) specify the path to that here.
+
+
+### `client_cert` [plugins-outputs-timber-client_cert]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+If you’d like to use a client certificate (note, most people don’t want this) set the path to the x509 cert here
+
+
+### `client_key` [plugins-outputs-timber-client_key]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+If you’re using a client certificate specify the path to the encryption key here
+
+
+### `connect_timeout` [plugins-outputs-timber-connect_timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `10`
+
+Timeout (in seconds) to wait for a connection to be established. Default is `10s`
+
+
+### `keystore` [plugins-outputs-timber-keystore]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys!
+
+
+### `keystore_password` [plugins-outputs-timber-keystore_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Specify the keystore password here. Note, most .jks files created with keytool require a password!
+
+
+### `keystore_type` [plugins-outputs-timber-keystore_type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"JKS"`
+
+Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS`
+
+
+### `pool_max` [plugins-outputs-timber-pool_max]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `50`
+
+Max number of concurrent connections. Defaults to `50`
+
+
+### `proxy` [plugins-outputs-timber-proxy]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+If you’d like to use an HTTP proxy . This supports multiple configuration syntaxes:
+
+1. Proxy host in form: `http://proxy.org:1234`
+2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}`
+3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}`
+
+
+### `request_timeout` [plugins-outputs-timber-request_timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `60`
+
+This module makes it easy to add a very fully configured HTTP client to logstash based on [Manticore](https://github.com/cheald/manticore). For an example of its usage see [https://github.com/logstash-plugins/logstash-input-http_poller](https://github.com/logstash-plugins/logstash-input-http_poller) Timeout (in seconds) for the entire request
+
+
+### `socket_timeout` [plugins-outputs-timber-socket_timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `10`
+
+Timeout (in seconds) to wait for data on the socket. Default is `10s`
+
+
+### `ssl_certificate_validation` [plugins-outputs-timber-ssl_certificate_validation]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Set this to false to disable SSL/TLS certificate validation Note: setting this to false is generally considered insecure!
+
+
+### `truststore` [plugins-outputs-timber-truststore]
+
+* Value type is [path](/reference/configuration-file-structure.md#path)
+* There is no default value for this setting.
+
+If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs!
+
+
+### `truststore_password` [plugins-outputs-timber-truststore_password]
+
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+Specify the truststore password here. Note, most .jks files created with keytool require a password!
+
+
+### `truststore_type` [plugins-outputs-timber-truststore_type]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"JKS"`
+
+Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS`
+
+
+
+## Common options [plugins-outputs-timber-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-timber-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-timber-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-timber-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-timber-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-timber-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-timber-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 timber outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ timber {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-udp.md b/docs/reference/plugins-outputs-udp.md
new file mode 100644
index 000000000..7f46ca4db
--- /dev/null
+++ b/docs/reference/plugins-outputs-udp.md
@@ -0,0 +1,127 @@
+---
+navigation_title: "udp"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-udp.html
+---
+
+# Udp output plugin [plugins-outputs-udp]
+
+
+* Plugin version: v3.2.0
+* Released on: 2021-07-14
+* [Changelog](https://github.com/logstash-plugins/logstash-output-udp/blob/v3.2.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-udp-index.md).
+
+## Getting help [_getting_help_118]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-udp). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_118]
+
+Send events over UDP
+
+Keep in mind that UDP does not provide delivery or duplicate protection guarantees. Even when this plugin succeeds at writing to the UDP socket, there is no guarantee that the recipient will receive exactly one copy of the event.
+
+When this plugin fails to write to the UDP socket, by default the event will be dropped and the error message will be logged. The [`retry_count`](#plugins-outputs-udp-retry_count) option in conjunction with the [`retry_backoff_ms`](#plugins-outputs-udp-retry_backoff_ms) option can be used to retry a failed write for a number of times before dropping the event.
+
+
+## Udp Output Configuration Options [plugins-outputs-udp-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-udp-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`host`](#plugins-outputs-udp-host) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`port`](#plugins-outputs-udp-port) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`retry_count`](#plugins-outputs-udp-retry_count) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`retry_backoff_ms`](#plugins-outputs-udp-retry_backoff_ms) | [number](/reference/configuration-file-structure.md#number) | No |
+
+Also see [Common options](#plugins-outputs-udp-common-options) for a list of options supported by all output plugins.
+
+
+
+### `host` [plugins-outputs-udp-host]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The address to send messages to
+
+
+### `port` [plugins-outputs-udp-port]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The port to send messages on. This can be dynamic using the `%{[target][port]}` syntax.
+
+
+### `retry_count` [plugins-outputs-udp-retry_count]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `0`
+
+The number of times to retry a failed UPD socket write
+
+
+### `retry_backoff_ms` [plugins-outputs-udp-retry_backoff_ms]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `10`
+
+The amount of time to wait in milliseconds before attempting to retry a failed UPD socket write
+
+
+
+## Common options [plugins-outputs-udp-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-udp-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-udp-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-udp-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-udp-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"json"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-udp-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-udp-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 udp outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ udp {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-webhdfs.md b/docs/reference/plugins-outputs-webhdfs.md
new file mode 100644
index 000000000..cedf138dc
--- /dev/null
+++ b/docs/reference/plugins-outputs-webhdfs.md
@@ -0,0 +1,332 @@
+---
+navigation_title: "webhdfs"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-webhdfs.html
+---
+
+# Webhdfs output plugin [plugins-outputs-webhdfs]
+
+
+* Plugin version: v3.1.0
+* Released on: 2023-10-03
+* [Changelog](https://github.com/logstash-plugins/logstash-output-webhdfs/blob/v3.1.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-webhdfs-index.md).
+
+## Getting help [_getting_help_119]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-webhdfs). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_119]
+
+This plugin sends Logstash events into files in HDFS via the [webhdfs](https://hadoop.apache.org/docs/r1.0.4/webhdfs.html) REST API.
+
+
+## Dependencies [_dependencies]
+
+This plugin has no dependency on jars from hadoop, thus reducing configuration and compatibility problems. It uses the webhdfs gem from Kazuki Ohta and TAGOMORI Satoshi (@see: [https://github.com/kzk/webhdfs](https://github.com/kzk/webhdfs)). Optional dependencies are zlib and snappy gem if you use the compression functionality.
+
+
+## Operational Notes [_operational_notes]
+
+If you get an error like:
+
+```
+Max write retries reached. Exception: initialize: name or service not known {:level=>:error}
+```
+make sure that the hostname of your namenode is resolvable on the host running Logstash. When creating/appending to a file, webhdfs somtime sends a `307 TEMPORARY_REDIRECT` with the `HOSTNAME` of the machine its running on.
+
+
+## Usage [_usage_5]
+
+This is an example of Logstash config:
+
+```ruby
+input {
+ ...
+}
+filter {
+ ...
+}
+output {
+ webhdfs {
+ host => "127.0.0.1" # (required)
+ port => 50070 # (optional, default: 50070)
+ path => "/user/logstash/dt=%{+YYYY-MM-dd}/logstash-%{+HH}.log" # (required)
+ user => "hue" # (required)
+ }
+}
+```
+
+
+## Webhdfs Output Configuration Options [plugins-outputs-webhdfs-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-webhdfs-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`compression`](#plugins-outputs-webhdfs-compression) | [string](/reference/configuration-file-structure.md#string), one of `["none", "snappy", "gzip"]` | No |
+| [`flush_size`](#plugins-outputs-webhdfs-flush_size) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`host`](#plugins-outputs-webhdfs-host) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`idle_flush_time`](#plugins-outputs-webhdfs-idle_flush_time) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`kerberos_keytab`](#plugins-outputs-webhdfs-kerberos_keytab) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`open_timeout`](#plugins-outputs-webhdfs-open_timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`path`](#plugins-outputs-webhdfs-path) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`port`](#plugins-outputs-webhdfs-port) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`read_timeout`](#plugins-outputs-webhdfs-read_timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`retry_interval`](#plugins-outputs-webhdfs-retry_interval) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`retry_known_errors`](#plugins-outputs-webhdfs-retry_known_errors) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`retry_times`](#plugins-outputs-webhdfs-retry_times) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`single_file_per_thread`](#plugins-outputs-webhdfs-single_file_per_thread) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`snappy_bufsize`](#plugins-outputs-webhdfs-snappy_bufsize) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`snappy_format`](#plugins-outputs-webhdfs-snappy_format) | [string](/reference/configuration-file-structure.md#string), one of `["stream", "file"]` | No |
+| [`ssl_cert`](#plugins-outputs-webhdfs-ssl_cert) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`ssl_key`](#plugins-outputs-webhdfs-ssl_key) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`standby_host`](#plugins-outputs-webhdfs-standby_host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`standby_port`](#plugins-outputs-webhdfs-standby_port) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`use_httpfs`](#plugins-outputs-webhdfs-use_httpfs) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`use_kerberos_auth`](#plugins-outputs-webhdfs-use_kerberos_auth) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`use_ssl_auth`](#plugins-outputs-webhdfs-use_ssl_auth) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`user`](#plugins-outputs-webhdfs-user) | [string](/reference/configuration-file-structure.md#string) | Yes |
+
+Also see [Common options](#plugins-outputs-webhdfs-common-options) for a list of options supported by all output plugins.
+
+
+
+### `compression` [plugins-outputs-webhdfs-compression]
+
+* Value can be any of: `none`, `snappy`, `gzip`
+* Default value is `"none"`
+
+Compress output. One of [*none*, *snappy*, *gzip*]
+
+
+### `flush_size` [plugins-outputs-webhdfs-flush_size]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `500`
+
+Sending data to webhdfs if event count is above, even if `store_interval_in_secs` is not reached.
+
+
+### `host` [plugins-outputs-webhdfs-host]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The server name for webhdfs/httpfs connections.
+
+
+### `idle_flush_time` [plugins-outputs-webhdfs-idle_flush_time]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1`
+
+Sending data to webhdfs in x seconds intervals.
+
+
+### `kerberos_keytab` [plugins-outputs-webhdfs-kerberos_keytab]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Set kerberos keytab file. Note that the gssapi library needs to be available to use this.
+
+
+### `open_timeout` [plugins-outputs-webhdfs-open_timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `30`
+
+WebHdfs open timeout, default 30s.
+
+
+### `path` [plugins-outputs-webhdfs-path]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The path to the file to write to. Event fields can be used here, as well as date fields in the joda time format, e.g.: `/user/logstash/dt=%{+YYYY-MM-dd}/%{@source_host}-%{+HH}.log`
+
+
+### `port` [plugins-outputs-webhdfs-port]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `50070`
+
+The server port for webhdfs/httpfs connections.
+
+
+### `read_timeout` [plugins-outputs-webhdfs-read_timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `30`
+
+The WebHdfs read timeout, default 30s.
+
+
+### `retry_interval` [plugins-outputs-webhdfs-retry_interval]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `0.5`
+
+How long should we wait between retries.
+
+
+### `retry_known_errors` [plugins-outputs-webhdfs-retry_known_errors]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Retry some known webhdfs errors. These may be caused by race conditions when appending to same file, etc.
+
+
+### `retry_times` [plugins-outputs-webhdfs-retry_times]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `5`
+
+How many times should we retry. If retry_times is exceeded, an error will be logged and the event will be discarded.
+
+
+### `single_file_per_thread` [plugins-outputs-webhdfs-single_file_per_thread]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Avoid appending to same file in multiple threads. This solves some problems with multiple logstash output threads and locked file leases in webhdfs. If this option is set to true, %{[@metadata][thread_id]} needs to be used in path config settting.
+
+
+### `snappy_bufsize` [plugins-outputs-webhdfs-snappy_bufsize]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `32768`
+
+Set snappy chunksize. Only neccessary for stream format. Defaults to 32k. Max is 65536 @see [http://code.google.com/p/snappy/source/browse/trunk/framing_format.txt](http://code.google.com/p/snappy/source/browse/trunk/framing_format.txt)
+
+
+### `snappy_format` [plugins-outputs-webhdfs-snappy_format]
+
+* Value can be any of: `stream`, `file`
+* Default value is `"stream"`
+
+Set snappy format. One of "stream", "file". Set to stream to be hive compatible.
+
+
+### `ssl_cert` [plugins-outputs-webhdfs-ssl_cert]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Set ssl cert file.
+
+
+### `ssl_key` [plugins-outputs-webhdfs-ssl_key]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Set ssl key file.
+
+
+### `standby_host` [plugins-outputs-webhdfs-standby_host]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `false`
+
+Standby namenode for ha hdfs.
+
+
+### `standby_port` [plugins-outputs-webhdfs-standby_port]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `50070`
+
+Standby namenode port for ha hdfs.
+
+
+### `use_httpfs` [plugins-outputs-webhdfs-use_httpfs]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Use httpfs mode if set to true, else webhdfs.
+
+
+### `use_kerberos_auth` [plugins-outputs-webhdfs-use_kerberos_auth]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Set kerberos authentication.
+
+
+### `use_ssl_auth` [plugins-outputs-webhdfs-use_ssl_auth]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `false`
+
+Set ssl authentication. Note that the openssl library needs to be available to use this.
+
+
+### `user` [plugins-outputs-webhdfs-user]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The Username for webhdfs.
+
+
+
+## Common options [plugins-outputs-webhdfs-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-webhdfs-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-webhdfs-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-webhdfs-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-webhdfs-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"line"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-webhdfs-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-webhdfs-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 webhdfs outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ webhdfs {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-websocket.md b/docs/reference/plugins-outputs-websocket.md
new file mode 100644
index 000000000..42d9545a3
--- /dev/null
+++ b/docs/reference/plugins-outputs-websocket.md
@@ -0,0 +1,112 @@
+---
+navigation_title: "websocket"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-websocket.html
+---
+
+# Websocket output plugin [plugins-outputs-websocket]
+
+
+* Plugin version: v3.1.0
+* Released on: 2024-01-11
+* [Changelog](https://github.com/logstash-plugins/logstash-output-websocket/blob/v3.1.0/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-websocket-index.md).
+
+## Installation [_installation_51]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-output-websocket`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_120]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-websocket). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_120]
+
+This output runs a websocket server and publishes any messages to all connected websocket clients.
+
+You can connect to it with ws://:/
+
+If no clients are connected, any messages received are ignored.
+
+
+## Websocket Output Configuration Options [plugins-outputs-websocket-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-websocket-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`host`](#plugins-outputs-websocket-host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`port`](#plugins-outputs-websocket-port) | [number](/reference/configuration-file-structure.md#number) | No |
+
+Also see [Common options](#plugins-outputs-websocket-common-options) for a list of options supported by all output plugins.
+
+
+
+### `host` [plugins-outputs-websocket-host]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"0.0.0.0"`
+
+The address to serve websocket data from
+
+
+### `port` [plugins-outputs-websocket-port]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `3232`
+
+The port to serve websocket data from
+
+
+
+## Common options [plugins-outputs-websocket-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-websocket-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-websocket-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-websocket-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-websocket-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-websocket-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-websocket-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 websocket outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ websocket {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-xmpp.md b/docs/reference/plugins-outputs-xmpp.md
new file mode 100644
index 000000000..5c509658a
--- /dev/null
+++ b/docs/reference/plugins-outputs-xmpp.md
@@ -0,0 +1,149 @@
+---
+navigation_title: "xmpp"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-xmpp.html
+---
+
+# Xmpp output plugin [plugins-outputs-xmpp]
+
+
+* Plugin version: v3.0.8
+* Released on: 2018-04-06
+* [Changelog](https://github.com/logstash-plugins/logstash-output-xmpp/blob/v3.0.8/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-xmpp-index.md).
+
+## Installation [_installation_52]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-output-xmpp`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_121]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-xmpp). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_121]
+
+This output allows you ship events over XMPP/Jabber.
+
+This plugin can be used for posting events to humans over XMPP, or you can use it for PubSub or general message passing for logstash to logstash.
+
+
+## Xmpp Output Configuration Options [plugins-outputs-xmpp-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-xmpp-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`host`](#plugins-outputs-xmpp-host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`message`](#plugins-outputs-xmpp-message) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`password`](#plugins-outputs-xmpp-password) | [password](/reference/configuration-file-structure.md#password) | Yes |
+| [`rooms`](#plugins-outputs-xmpp-rooms) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`user`](#plugins-outputs-xmpp-user) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`users`](#plugins-outputs-xmpp-users) | [array](/reference/configuration-file-structure.md#array) | No |
+
+Also see [Common options](#plugins-outputs-xmpp-common-options) for a list of options supported by all output plugins.
+
+
+
+### `host` [plugins-outputs-xmpp-host]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The xmpp server to connect to. This is optional. If you omit this setting, the host on the user/identity is used. (foo.com for `user@foo.com`)
+
+
+### `message` [plugins-outputs-xmpp-message]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The message to send. This supports dynamic strings like `%{{host}}`
+
+
+### `password` [plugins-outputs-xmpp-password]
+
+* This is a required setting.
+* Value type is [password](/reference/configuration-file-structure.md#password)
+* There is no default value for this setting.
+
+The xmpp password for the user/identity.
+
+
+### `rooms` [plugins-outputs-xmpp-rooms]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+if muc/multi-user-chat required, give the name of the room that you want to join: room@conference.domain/nick
+
+
+### `user` [plugins-outputs-xmpp-user]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The user or resource ID, like `foo@example.com`.
+
+
+### `users` [plugins-outputs-xmpp-users]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+The users to send messages to
+
+
+
+## Common options [plugins-outputs-xmpp-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-xmpp-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-xmpp-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-xmpp-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-xmpp-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-xmpp-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-xmpp-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 xmpp outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ xmpp {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/plugins-outputs-zabbix.md b/docs/reference/plugins-outputs-zabbix.md
new file mode 100644
index 000000000..f81c17a2a
--- /dev/null
+++ b/docs/reference/plugins-outputs-zabbix.md
@@ -0,0 +1,190 @@
+---
+navigation_title: "zabbix"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/plugins-outputs-zabbix.html
+---
+
+# Zabbix output plugin [plugins-outputs-zabbix]
+
+
+* Plugin version: v3.0.5
+* Released on: 2018-04-06
+* [Changelog](https://github.com/logstash-plugins/logstash-output-zabbix/blob/v3.0.5/CHANGELOG.md)
+
+For other versions, see the [Versioned plugin docs](logstash-docs://reference/output-zabbix-index.md).
+
+## Installation [_installation_53]
+
+For plugins not bundled by default, it is easy to install by running `bin/logstash-plugin install logstash-output-zabbix`. See [Working with plugins](/reference/working-with-plugins.md) for more details.
+
+
+## Getting help [_getting_help_122]
+
+For questions about the plugin, open a topic in the [Discuss](http://discuss.elastic.co) forums. For bugs or feature requests, open an issue in [Github](https://github.com/logstash-plugins/logstash-output-zabbix). For the list of Elastic supported plugins, please consult the [Elastic Support Matrix](https://www.elastic.co/support/matrix#logstash_plugins).
+
+
+## Description [_description_122]
+
+The Zabbix output is used to send item data (key/value pairs) to a Zabbix server. The event `@timestamp` will automatically be associated with the Zabbix item data.
+
+The Zabbix Sender protocol is described at [https://www.zabbix.org/wiki/Docs/protocols/zabbix_sender/2.0](https://www.zabbix.org/wiki/Docs/protocols/zabbix_sender/2.0) Zabbix uses a kind of nested key/value store.
+
+```txt
+ host
+ ├── item1
+ │ └── value1
+ ├── item2
+ │ └── value2
+ ├── ...
+ │ └── ...
+ ├── item_n
+ │ └── value_n
+```
+
+Each "host" is an identifier, and each item is associated with that host. Items are typed on the Zabbix side. You can send numbers as strings and Zabbix will Do The Right Thing.
+
+In the Zabbix UI, ensure that your hostname matches the value referenced by `zabbix_host`. Create the item with the key as it appears in the field referenced by `zabbix_key`. In the item configuration window, ensure that the type dropdown is set to Zabbix Trapper. Also be sure to set the type of information that Zabbix should expect for this item.
+
+This plugin does not currently send in batches. While it is possible to do so, this is not supported. Be careful not to flood your Zabbix server with too many events per second.
+
+::::{note}
+This plugin will log a warning if a necessary field is missing. It will not attempt to resend if Zabbix is down, but will log an error message.
+::::
+
+
+
+## Zabbix Output Configuration Options [plugins-outputs-zabbix-options]
+
+This plugin supports the following configuration options plus the [Common options](#plugins-outputs-zabbix-common-options) described later.
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`multi_value`](#plugins-outputs-zabbix-multi_value) | [array](/reference/configuration-file-structure.md#array) | No |
+| [`timeout`](#plugins-outputs-zabbix-timeout) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`zabbix_host`](#plugins-outputs-zabbix-zabbix_host) | [string](/reference/configuration-file-structure.md#string) | Yes |
+| [`zabbix_key`](#plugins-outputs-zabbix-zabbix_key) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`zabbix_server_host`](#plugins-outputs-zabbix-zabbix_server_host) | [string](/reference/configuration-file-structure.md#string) | No |
+| [`zabbix_server_port`](#plugins-outputs-zabbix-zabbix_server_port) | [number](/reference/configuration-file-structure.md#number) | No |
+| [`zabbix_value`](#plugins-outputs-zabbix-zabbix_value) | [string](/reference/configuration-file-structure.md#string) | No |
+
+Also see [Common options](#plugins-outputs-zabbix-common-options) for a list of options supported by all output plugins.
+
+
+
+### `multi_value` [plugins-outputs-zabbix-multi_value]
+
+* Value type is [array](/reference/configuration-file-structure.md#array)
+* There is no default value for this setting.
+
+Use the `multi_value` directive to send multiple key/value pairs. This can be thought of as an array, like:
+
+`[ zabbix_key1, zabbix_value1, zabbix_key2, zabbix_value2, ... zabbix_keyN, zabbix_valueN ]`
+
+…where `zabbix_key1` is an instance of `zabbix_key`, and `zabbix_value1` is an instance of `zabbix_value`. If the field referenced by any `zabbix_key` or `zabbix_value` does not exist, that entry will be ignored.
+
+This directive cannot be used in conjunction with the single-value directives `zabbix_key` and `zabbix_value`.
+
+
+### `timeout` [plugins-outputs-zabbix-timeout]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `1`
+
+The number of seconds to wait before giving up on a connection to the Zabbix server. This number should be very small, otherwise delays in delivery of other outputs could result.
+
+
+### `zabbix_host` [plugins-outputs-zabbix-zabbix_host]
+
+* This is a required setting.
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+The field name which holds the Zabbix host name. This can be a sub-field of the @metadata field.
+
+
+### `zabbix_key` [plugins-outputs-zabbix-zabbix_key]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+A single field name which holds the value you intend to use as the Zabbix item key. This can be a sub-field of the @metadata field. This directive will be ignored if using `multi_value`
+
+::::{important}
+`zabbix_key` is required if not using `multi_value`.
+::::
+
+
+
+### `zabbix_server_host` [plugins-outputs-zabbix-zabbix_server_host]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"localhost"`
+
+The IP or resolvable hostname where the Zabbix server is running
+
+
+### `zabbix_server_port` [plugins-outputs-zabbix-zabbix_server_port]
+
+* Value type is [number](/reference/configuration-file-structure.md#number)
+* Default value is `10051`
+
+The port on which the Zabbix server is running
+
+
+### `zabbix_value` [plugins-outputs-zabbix-zabbix_value]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* Default value is `"message"`
+
+The field name which holds the value you want to send. This directive will be ignored if using `multi_value`
+
+
+
+## Common options [plugins-outputs-zabbix-common-options]
+
+These configuration options are supported by all output plugins:
+
+| Setting | Input type | Required |
+| --- | --- | --- |
+| [`codec`](#plugins-outputs-zabbix-codec) | [codec](/reference/configuration-file-structure.md#codec) | No |
+| [`enable_metric`](#plugins-outputs-zabbix-enable_metric) | [boolean](/reference/configuration-file-structure.md#boolean) | No |
+| [`id`](#plugins-outputs-zabbix-id) | [string](/reference/configuration-file-structure.md#string) | No |
+
+### `codec` [plugins-outputs-zabbix-codec]
+
+* Value type is [codec](/reference/configuration-file-structure.md#codec)
+* Default value is `"plain"`
+
+The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output without needing a separate filter in your Logstash pipeline.
+
+
+### `enable_metric` [plugins-outputs-zabbix-enable_metric]
+
+* Value type is [boolean](/reference/configuration-file-structure.md#boolean)
+* Default value is `true`
+
+Disable or enable metric logging for this specific plugin instance. By default we record all the metrics we can, but you can disable metrics collection for a specific plugin.
+
+
+### `id` [plugins-outputs-zabbix-id]
+
+* Value type is [string](/reference/configuration-file-structure.md#string)
+* There is no default value for this setting.
+
+Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. It is strongly recommended to set this ID in your configuration. This is particularly useful when you have two or more plugins of the same type. For example, if you have 2 zabbix outputs. Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs.
+
+```json
+output {
+ zabbix {
+ id => "my_plugin_id"
+ }
+}
+```
+
+::::{note}
+Variable substitution in the `id` field only supports environment variables and does not support the use of values from the secret store.
+::::
+
+
+
+
diff --git a/docs/reference/private-rubygem.md b/docs/reference/private-rubygem.md
new file mode 100644
index 000000000..ea3be84dc
--- /dev/null
+++ b/docs/reference/private-rubygem.md
@@ -0,0 +1,55 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/private-rubygem.html
+---
+
+# Private Gem Repositories [private-rubygem]
+
+The Logstash plugin manager connects to a Ruby gems repository to install and update Logstash plugins. By default, this repository is [http://rubygems.org](http://rubygems.org).
+
+Some use cases are unable to use the default repository, as in the following examples:
+
+* A firewall blocks access to the default repository.
+* You are developing your own plugins locally.
+* Airgap requirements on the local system.
+
+When you use a custom gem repository, be sure to make plugin dependencies available.
+
+Several open source projects enable you to run your own plugin server, among them:
+
+* [Geminabox](https://github.com/geminabox/geminabox)
+* [Gemirro](https://github.com/PierreRambaud/gemirro)
+* [Gemfury](https://gemfury.com/)
+* [Artifactory](http://www.jfrog.com/open-source/)
+
+## Editing the Gemfile [_editing_the_gemfile]
+
+The gemfile is a configuration file that specifies information required for plugin management. Each gem file has a `source` line that specifies a location for plugin content.
+
+By default, the gemfile’s `source` line reads:
+
+```shell
+# This is a Logstash generated Gemfile.
+# If you modify this file manually all comments and formatting will be lost.
+
+source "https://rubygems.org"
+```
+
+To change the source, edit the `source` line to contain your preferred source, as in the following example:
+
+```shell
+# This is a Logstash generated Gemfile.
+# If you modify this file manually all comments and formatting will be lost.
+
+source "https://my.private.repository"
+```
+
+After saving the new version of the gemfile, use [plugin management commands](/reference/working-with-plugins.md) normally.
+
+The following links contain further material on setting up some commonly used repositories:
+
+* [Geminabox](https://github.com/geminabox/geminabox/blob/master/README.md)
+* [Artifactory](https://www.jfrog.com/confluence/display/RTF/RubyGems+Repositories)
+* Running a [rubygems mirror](http://guides.rubygems.org/run-your-own-gem-server/)
+
+
diff --git a/docs/reference/processing.md b/docs/reference/processing.md
new file mode 100644
index 000000000..4837ab00b
--- /dev/null
+++ b/docs/reference/processing.md
@@ -0,0 +1,48 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/processing.html
+---
+
+# Processing Details [processing]
+
+Understanding how {{ls}} works and how components interrelate can help you make better decisions when you are setting up or adjusting your {{ls}} environment. This section is designed to elevate concepts to assist with that level of knowledge.
+
+::::{note}
+This is a new section. We’re still working on it.
+::::
+
+
+
+## Event ordering [event-ordering]
+
+By design and by default, {{ls}} does not guarantee event order. Reordering can occur in two places:
+
+* Events within a batch can be reordered during filter processing.
+* In-flight batches can be reordered when one or more batches are processed faster than others.
+
+When maintaining event order is important, use a single worker and set *pipeline.ordered ⇒ true*. This approach ensures that batches are computed one-after-the-other, and that events maintain their order within the batch.
+
+
+### *pipeline.ordered* setting [order-setting]
+
+The `pipeline.ordered` setting in [logstash.yml](/reference/logstash-settings-file.md) gives you more control over event ordering for single worker pipelines.
+
+`auto` automatically enables ordering if the `pipeline.workers` setting is also set to `1`. `true` will enforce ordering on the pipeline and prevent logstash from starting if there are multiple workers. `false` will disable the processing required to preserve order. Ordering will not be guaranteed, but you save the processing cost required to preserve order.
+
+
+## Java pipeline initialization time [pipeline-init-time]
+
+The Java pipeline initialization time appears in the startup logs at INFO level. Initialization time is the time it takes to compile the pipeline config and instantiate the compiled execution for all workers.
+
+
+## Reserved fields in {{ls}} events [reserved-fields]
+
+Some fields in {{ls}} events are reserved, or are required to adhere to a certain shape. Using these fields can cause runtime exceptions when the event API or plugins encounter incompatible values.
+
+| | |
+| --- | --- |
+| [`@metadata`](/reference/event-dependent-configuration.md#metadata) | A key/value map. Ruby-based Plugin API: value is an[org.jruby.RubyHash](https://javadoc.io/static/org.jruby/jruby-core/9.2.5.0/org/jruby/RubyHash.html). Java-based Plugin API: value is an[org.logstash.ConvertedMap](https://github.com/elastic/logstash/blob/main/logstash-core/src/main/java/org/logstash/ConvertedMap.java). In serialized form (such as JSON): a key/value map where the keys must bestrings and the values are not constrained to a particular type. |
+| `@timestamp` | An object holding representation of a specific moment in time. Ruby-based Plugin API: value is an[org.jruby.RubyTime](https://javadoc.io/static/org.jruby/jruby-core/9.2.5.0/org/jruby/RubyTime.html). Java-based Plugin API: value is a[java.time.Instant](https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/time/Instant.html). In serialized form (such as JSON) or when setting with Event#set: anISO8601-compliant String value is acceptable. |
+| `@version` | A string, holding an integer value. |
+| `tags` | An array of distinct strings |
+
diff --git a/docs/reference/queues-data-resiliency.md b/docs/reference/queues-data-resiliency.md
new file mode 100644
index 000000000..4ccb7a1fa
--- /dev/null
+++ b/docs/reference/queues-data-resiliency.md
@@ -0,0 +1,21 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/resiliency.html
+---
+
+# Queues and data resiliency [resiliency]
+
+By default, Logstash uses [in-memory bounded queues](/reference/memory-queue.md) between pipeline stages (inputs → pipeline workers) to buffer events.
+
+As data flows through the event processing pipeline, Logstash may encounter situations that prevent it from delivering events to the configured output. For example, the data might contain unexpected data types, or Logstash might terminate abnormally.
+
+To guard against data loss and ensure that events flow through the pipeline without interruption, Logstash provides data resiliency features.
+
+* [Persistent queues (PQ)](/reference/persistent-queues.md) protect against data loss by storing events in an internal queue on disk.
+* [Dead letter queues (DLQ)](/reference/dead-letter-queues.md) provide on-disk storage for events that Logstash is unable to process so that you can evaluate them. You can easily reprocess events in the dead letter queue by using the `dead_letter_queue` input plugin.
+
+These resiliency features are disabled by default. To turn on these features, you must explicitly enable them in the Logstash [settings file](/reference/logstash-settings-file.md).
+
+
+
+
diff --git a/docs/reference/reloading-config.md b/docs/reference/reloading-config.md
new file mode 100644
index 000000000..052791cec
--- /dev/null
+++ b/docs/reference/reloading-config.md
@@ -0,0 +1,58 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/reloading-config.html
+---
+
+# Reloading the Config File [reloading-config]
+
+You can set Logstash to detect and reload configuration changes automatically.
+
+To enable automatic config reloading, start Logstash with the `--config.reload.automatic` (or `-r`) command-line option specified. For example:
+
+```shell
+bin/logstash -f apache.config --config.reload.automatic
+```
+
+::::{note}
+The `--config.reload.automatic` option is not available when you specify the `-e` flag to pass in configuration settings from the command-line.
+::::
+
+
+By default, Logstash checks for configuration changes every 3 seconds. To change this interval, use the `--config.reload.interval ` option, where `interval` specifies how often Logstash checks the config files for changes (in seconds).
+
+Note that the unit qualifier (`s`) is required.
+
+## Force reloading the config file [force-reload]
+
+If Logstash is already running without auto-reload enabled, you can force Logstash to reload the config file and restart the pipeline. Do this by sending a SIGHUP (signal hangup) to the process running Logstash. For example:
+
+```shell
+kill -SIGHUP 14175
+```
+
+Where 14175 is the ID of the process running Logstash.
+
+::::{note}
+This functionality is not supported on Windows OS.
+::::
+
+
+
+## How automatic config reloading works [_how_automatic_config_reloading_works]
+
+When Logstash detects a change in a config file, it stops the current pipeline by stopping all inputs, and it attempts to create a new pipeline that uses the updated configuration. After validating the syntax of the new configuration, Logstash verifies that all inputs and outputs can be initialized (for example, that all required ports are open). If the checks are successful, Logstash swaps the existing pipeline with the new pipeline. If the checks fail, the old pipeline continues to function, and the errors are propagated to the console.
+
+During automatic config reloading, the JVM is not restarted. The creating and swapping of pipelines all happens within the same process.
+
+Changes to [grok](/reference/plugins-filters-grok.md) pattern files are also reloaded, but only when a change in the config file triggers a reload (or the pipeline is restarted).
+
+In general, Logstash is not watching or monitoring any configuration files used or referenced by inputs, filters or outputs.
+
+
+## Plugins that prevent automatic reloading [plugins-block-reload]
+
+Input and output plugins usually interact with OS resources. In some circumstances those resources can’t be released without a restart. For this reason some plugins can’t be simply updated and this prevents pipeline reload.
+
+The [stdin input](/reference/plugins-inputs-stdin.md) plugin, for example, prevents reloading for these reasons.
+
+
diff --git a/docs/reference/running-logstash-command-line.md b/docs/reference/running-logstash-command-line.md
new file mode 100644
index 000000000..591d52e21
--- /dev/null
+++ b/docs/reference/running-logstash-command-line.md
@@ -0,0 +1,165 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/running-logstash-command-line.html
+---
+
+# Running Logstash from the Command Line [running-logstash-command-line]
+
+::::{admonition} macOS Gatekeeper warnings
+:class: important
+
+Apple’s rollout of stricter notarization requirements affected the notarization of the 9.0.0-beta1 {{ls}} artifacts. If macOS Catalina displays a dialog when you first run {{ls}} that interrupts it, you will need to take an action to allow it to run. To prevent Gatekeeper checks on the {{ls}} files, run the following command on the downloaded `.tar.gz` archive or the directory to which was extracted:
+
+```sh
+xattr -d -r com.apple.quarantine
+```
+
+For example, if the `.tar.gz` file was extracted to the default logstash-9.0.0-beta1 directory, the command is:
+
+```sh
+xattr -d -r com.apple.quarantine logstash-9.0.0-beta1
+```
+
+Alternatively, you can add a security override if a Gatekeeper popup appears by following the instructions in the *How to open an app that hasn’t been notarized or is from an unidentified developer* section of [Safely open apps on your Mac](https://support.apple.com/en-us/HT202491).
+
+::::
+
+
+To run Logstash from the command line, use the following command:
+
+```shell
+bin/logstash [options]
+```
+
+To run Logstash from the Windows command line, use the following command:
+
+```shell
+bin/logstash.bat [options]
+```
+
+Where `options` are [command-line](#command-line-flags) flags that you can specify to control Logstash execution. The location of the `bin` directory varies by platform. See [Logstash Directory Layout](/reference/dir-layout.md) to find the location of `bin\logstash` on your system.
+
+The following example runs Logstash and loads the Logstash config defined in the `mypipeline.conf` file:
+
+```shell
+bin/logstash -f mypipeline.conf
+```
+
+Any flags that you set at the command line override the corresponding settings in [logstash.yml](/reference/logstash-settings-file.md), but the file itself is not changed. It remains as-is for subsequent Logstash runs.
+
+Specifying command line options is useful when you are testing Logstash. However, in a production environment, we recommend that you use [logstash.yml](/reference/logstash-settings-file.md) to control Logstash execution. Using the settings file makes it easier for you to specify multiple options, and it provides you with a single, versionable file that you can use to start up Logstash consistently for each run.
+
+## Command-Line Flags [command-line-flags]
+
+Logstash has the following flags. You can use the `--help` flag to display this information.
+
+**`--node.name NAME`**
+: Specify the name of this Logstash instance. If no value is given it will default to the current hostname.
+
+**`-f, --path.config CONFIG_PATH`**
+: Load the Logstash config from a specific file or directory. If a directory is given, all files in that directory will be concatenated in lexicographical order and then parsed as a single config file. Specifying this flag multiple times is not supported. If you specify this flag multiple times, Logstash uses the last occurrence (for example, `-f foo -f bar` is the same as `-f bar`).
+
+ You can specify wildcards ([globs](/reference/glob-support.md)) and any matched files will be loaded in the order described above. For example, you can use the wildcard feature to load specific files by name:
+
+ ```shell
+ bin/logstash --debug -f '/tmp/{one,two,three}'
+ ```
+
+ With this command, Logstash concatenates three config files, `/tmp/one`, `/tmp/two`, and `/tmp/three`, and parses them into a single config.
+
+
+**`-e, --config.string CONFIG_STRING`**
+: Use the given string as the configuration data. Same syntax as the config file. If no input is specified, then the following is used as the default input: `input { stdin { type => stdin } }` and if no output is specified, then the following is used as the default output: `output { stdout { codec => rubydebug } }`. If you wish to use both defaults, please use the empty string for the `-e` flag. The default is nil.
+
+**`--plugin-classloaders`**
+: (Beta) Load Java plugins in independent classloaders to isolate their dependencies.
+
+**`--pipeline.id ID`**
+: Sets the ID of pipeline. The default is `main`.
+
+**`-w, --pipeline.workers COUNT`**
+: Sets the number of pipeline workers to run. This option sets the number of workers that will, in parallel, execute the filter and output stages of the pipeline. If you find that events are backing up, or that the CPU is not saturated, consider increasing this number to better utilize machine processing power. The default is the number of the host’s CPU cores.
+
+**`--pipeline.ordered ORDERED`**
+: Preserves events order. Possible values are `auto` (default), `true` and `false`. This setting will work only when also using a single worker for the pipeline. Note that when enabled, it may impact the performance of the filters and output processing. The `auto` option will automatically enable ordering if the `pipeline.workers` setting is set to `1`. Use `true` to enable ordering on the pipeline and prevent logstash from starting if there are multiple workers. Use `false` to disable any extra processing necessary for preserving ordering.
+
+**`-b, --pipeline.batch.size SIZE`**
+: Size of batches the pipeline is to work in. This option defines the maximum number of events an individual worker thread will collect from inputs before attempting to execute its filters and outputs. The default is 125 events. Larger batch sizes are generally more efficient, but come at the cost of increased memory overhead. You may need to increase JVM heap space in the `jvm.options` config file. See [Logstash Configuration Files](/reference/config-setting-files.md) for more info.
+
+**`-u, --pipeline.batch.delay DELAY_IN_MS`**
+: When creating pipeline batches, how long to wait while polling for the next event. This option defines how long in milliseconds to wait while polling for the next event before dispatching an undersized batch to filters and outputs. The default is 50ms.
+
+**`--pipeline.ecs_compatibility MODE`**
+: Sets the process default value for ECS compatibility mode. Can be an ECS version like `v1` or `v8`, or `disabled`. The default is `v8`. Pipelines defined before Logstash 8 operated without ECS in mind. To ensure a migrated pipeline continues to operate as it did in older releases of Logstash, opt-OUT of ECS for the individual pipeline by setting `pipeline.ecs_compatibility: disabled` in its `pipelines.yml` definition. Using the command-line flag will set the default for *all* pipelines, including new ones. See [ECS compatibility](/reference/ecs-ls.md#ecs-compatibility) for more info.
+
+**`--pipeline.unsafe_shutdown`**
+: Force Logstash to exit during shutdown even if there are still inflight events in memory. By default, Logstash will refuse to quit until all received events have been pushed to the outputs. Enabling this option can lead to data loss during shutdown.
+
+**`--path.data PATH`**
+: This should point to a writable directory. Logstash will use this directory whenever it needs to store data. Plugins will also have access to this path. The default is the `data` directory under Logstash home.
+
+**`-p, --path.plugins PATH`**
+: A path of where to find custom plugins. This flag can be given multiple times to include multiple paths. Plugins are expected to be in a specific directory hierarchy: `PATH/logstash/TYPE/NAME.rb` where `TYPE` is `inputs`, `filters`, `outputs`, or `codecs`, and `NAME` is the name of the plugin.
+
+**`-l, --path.logs PATH`**
+: Directory to write Logstash internal logs to.
+
+**`--log.level LEVEL`**
+: Set the log level for Logstash. Possible values are:
+
+ * `fatal`: log very severe error messages that will usually be followed by the application aborting
+ * `error`: log errors
+ * `warn`: log warnings
+ * `info`: log verbose info (this is the default)
+ * `debug`: log debugging info (for developers)
+ * `trace`: log finer-grained messages beyond debugging info
+
+
+**`--config.debug`**
+: Show the fully compiled configuration as a debug log message (you must also have `--log.level=debug` enabled). WARNING: The log message will include any *password* options passed to plugin configs as plaintext, and may result in plaintext passwords appearing in your logs!
+
+**`-i, --interactive SHELL`**
+: Drop to shell instead of running as normal. Valid shells are "irb" and "pry".
+
+**`-V, --version`**
+: Emit the version of Logstash and its friends, then exit.
+
+**`-t, --config.test_and_exit`**
+: Check configuration for valid syntax and then exit. Note that grok patterns are not checked for correctness with this flag. Logstash can read multiple config files from a directory. If you combine this flag with `--log.level=debug`, Logstash will log the combined config file, annotating each config block with the source file it came from.
+
+**`-r, --config.reload.automatic`**
+: Monitor configuration changes and reload whenever the configuration is changed. NOTE: Use SIGHUP to manually reload the config. The default is false.
+
+**`--config.reload.interval RELOAD_INTERVAL`**
+: How frequently to poll the configuration location for changes. The default value is "3s". Note that the unit qualifier (`s`) is required.
+
+**`--api.enabled ENABLED`**
+: The HTTP API is enabled by default, but can be disabled by passing `false` to this option.
+
+**`--api.http.host HTTP_HOST`**
+: Web API binding host. This option specifies the bind address for the metrics REST endpoint. The default is "127.0.0.1".
+
+**`--api.http.port HTTP_PORT`**
+: Web API http port. This option specifies the bind port for the metrics REST endpoint. The default is 9600-9700. This setting accepts a range of the format 9600-9700. Logstash will pick up the first available port.
+
+**`--log.format FORMAT`**
+: Specify if Logstash should write its own logs in JSON form (one event per line) or in plain text (using Ruby’s Object#inspect). The default is "plain".
+
+**`--log.format.json.fix_duplicate_message_fields ENABLED`**
+: Avoid `message` field collision using JSON log format. Possible values are `true` (default) and `false`.
+
+**`--path.settings SETTINGS_DIR`**
+: Set the directory containing the `logstash.yml` [settings file](/reference/logstash-settings-file.md) as well as the log4j logging configuration. This can also be set through the LS_SETTINGS_DIR environment variable. The default is the `config` directory under Logstash home.
+
+**`--enable-local-plugin-development`**
+: This flag enables developers to update their local Gemfile without running into issues caused by a frozen lockfile. This flag can be helpful when you are developing/testing plugins locally.
+
+::::{note}
+This flag is for Logstash developers only. End users should not need it.
+::::
+
+
+**`-h, --help`**
+: Print help
+
+
diff --git a/docs/reference/running-logstash-kubernetes.md b/docs/reference/running-logstash-kubernetes.md
new file mode 100644
index 000000000..a35dec203
--- /dev/null
+++ b/docs/reference/running-logstash-kubernetes.md
@@ -0,0 +1,9 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/running-logstash-kubernetes.html
+---
+
+# Running Logstash on Kubernetes [running-logstash-kubernetes]
+
+Check out the [QuickStart](docs-content://deploy-manage/deploy/cloud-on-k8s/install-using-yaml-manifest-quickstart.md) to install ECK and [Run {{ls}} on ECK](docs-content://deploy-manage/deploy/cloud-on-k8s/logstash.md) to deploy {{ls}} with ECK.
+
diff --git a/docs/reference/running-logstash-windows.md b/docs/reference/running-logstash-windows.md
new file mode 100644
index 000000000..66b0ec4ce
--- /dev/null
+++ b/docs/reference/running-logstash-windows.md
@@ -0,0 +1,198 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/running-logstash-windows.html
+---
+
+# Running Logstash on Windows [running-logstash-windows]
+
+Before reading this section, see [Installing Logstash](/reference/installing-logstash.md) to get started. You also need to be familiar with [Running Logstash from the Command Line](/reference/running-logstash-command-line.md) as command line options are used to test running Logstash on Windows.
+
+::::{important}
+Specifying command line options is useful when you are testing Logstash. However, in a production environment, we recommend that you use [logstash.yml](/reference/logstash-settings-file.md) to control Logstash execution. Using the settings file makes it easier for you to specify multiple options, and it provides you with a single, versionable file that you can use to start up Logstash consistently for each run.
+::::
+
+
+Logstash is not started automatically after installation. How to start and stop Logstash on Windows depends on whether you want to run it manually, as a service (with [NSSM](https://nssm.cc/)), or run it as a scheduled task. This guide provides an example of some of the ways Logstash can run on Windows.
+
+::::{note}
+It is recommended to validate your configuration works by running Logstash manually before running Logstash as a service or a scheduled task.
+::::
+
+
+## Validating JVM prerequisites on Windows [running-logstash-windows-validation]
+
+After installing a [supported JVM](https://www.elastic.co/support/matrix#matrix_jvm), open a [PowerShell](https://docs.microsoft.com/en-us/powershell/) session and run the following commands to verify `LS_JAVA_HOME` is set and the Java version:
+
+### `Write-Host $env:LS_JAVA_HOME` [_write_host_envls_java_home]
+
+* The output should be pointed to where the JVM software is located, for example:
+
+ ```sh
+ PS C:\> Write-Host $env:LS_JAVA_HOME
+ C:\Program Files\Java\jdk-11.0.3
+ ```
+
+* If `LS_JAVA_HOME` is not set, perform one of the following:
+
+ * Set using the GUI:
+
+ * Navigate to the Windows [Environmental Variables](https://docs.microsoft.com/en-us/windows/win32/procthread/environment-variables) window
+ * In the Environmental Variables window, edit LS_JAVA_HOME to point to where the JDK software is located, for example: `C:\Program Files\Java\jdk-11.0.3`
+
+ * Set using PowerShell:
+
+ * In an Administrative PowerShell session, execute the following [SETX](https://docs.microsoft.com/en-us/windows-server/administration/windows-commands/setx) commands:
+
+ ```sh
+ PS C:\Windows\system32> SETX /m LS_JAVA_HOME "C:\Program Files\Java\jdk-11.0.3"
+ PS C:\Windows\system32> SETX /m PATH "$env:PATH;C:\Program Files\Java\jdk-11.0.3\bin;"
+ ```
+
+ * Exit PowerShell, then open a new PowerShell session and run `Write-Host $env:LS_JAVA_HOME` to verify
+
+
+
+### `Java -version` [_java_version]
+
+* This command produces output similar to the following:
+
+ ```sh
+ PS C:\> Java -version
+ java version "11.0.3" 2019-04-16 LTS
+ Java(TM) SE Runtime Environment 18.9 (build 11.0.3+12-LTS)
+ Java HotSpot(TM) 64-Bit Server VM 18.9 (build 11.0.3+12-LTS, mixed mode)
+ ```
+
+
+Once you have [*Setting Up and Running Logstash*](/reference/setting-up-running-logstash.md) and validated JVM pre-requisites, you may proceed.
+
+::::{note}
+For the examples listed below, we are running Windows Server 2016, Java 11.0.3, have extracted the [Logstash ZIP package](https://www.elastic.co/downloads/logstash) to `C:\logstash-9.0.0\`, and using the example `syslog.conf` file shown below (stored in `C:\logstash-9.0.0\config\`).
+::::
+
+
+
+
+## Running Logstash manually [running-logstash-windows-manual]
+
+Logstash can be run manually using [PowerShell](https://docs.microsoft.com/en-us/powershell/). Open an Administrative [PowerShell](https://docs.microsoft.com/en-us/powershell/) session, then run the following commands:
+
+```sh
+PS C:\Windows\system32> cd C:\logstash-9.0.0\
+PS C:\logstash-9.0.0> .\bin\logstash.bat -f .\config\syslog.conf
+```
+
+::::{note}
+In a production environment, we recommend that you use [logstash.yml](/reference/logstash-settings-file.md) to control Logstash execution.
+::::
+
+
+Wait for the following messages to appear, to confirm Logstash has started successfully:
+
+```sh
+[logstash.runner ] Starting Logstash {"logstash.version"=>"9.0.0"}
+[logstash.inputs.udp ] Starting UDP listener {:address=>"0.0.0.0:514"}
+[logstash.agent ] Successfully started Logstash API endpoint {:port=>9600}
+```
+
+
+## Running Logstash as a service with NSSM [running-logstash-windows-nssm]
+
+::::{note}
+It is recommended to validate your configuration works by running Logstash manually before you proceed.
+::::
+
+
+Download [NSSM](https://nssm.cc/), then extract `nssm.exe` from `nssm-\win64\nssm.exe` to `C:\logstash-9.0.0\bin\`. Then open an Administrative [PowerShell](https://docs.microsoft.com/en-us/powershell/) session, then run the following commands:
+
+```sh
+PS C:\Windows\system32> cd C:\logstash-9.0.0\
+PS C:\logstash-9.0.0> .\bin\nssm.exe install logstash
+```
+
+Once the `NSSM service installer` window appears, specify the following parameters in the `Application` tab:
+
+* In the `Application` tab:
+
+ * Path: Path to `logstash.bat`: `C:\logstash-9.0.0\bin\logstash.bat`
+ * Startup Directory: Path to the `bin` directory: `C:\logstash-9.0.0\bin`
+ * Arguments: For this example to start Logstash: `-f C:\logstash-9.0.0\config\syslog.conf`
+
+ ::::{note}
+ In a production environment, we recommend that you use [logstash.yml](/reference/logstash-settings-file.md) to control Logstash execution.
+ ::::
+
+* Review and make any changes necessary in the `Details` tab:
+
+ * Ensure `Startup Type` is set appropriately
+ * Set the `Display name` and `Description` fields to something relevant
+
+* Review any other required settings (for the example we aren’t making any other changes)
+
+ * Be sure to determine if you need to set the `Log on` user
+
+* Validate the `Service name` is set appropriately
+
+ * For this example, we will set ours to `logstash-syslog`
+
+* Click `Install Service`
+
+ * Click *OK* when the `Service "logstash-syslog" installed successfully!` window appears
+
+
+Once the service has been installed with NSSM, validate and start the service following the [PowerShell Managing Services](https://docs.microsoft.com/en-us/powershell/scripting/samples/managing-services) documentation.
+
+
+## Running Logstash with Task Scheduler [running-logstash-windows-scheduledtask]
+
+::::{note}
+It is recommended to validate your configuration works by running Logstash manually before you proceed.
+::::
+
+
+Open the Windows [Task Scheduler](https://docs.microsoft.com/en-us/windows/desktop/taskschd/task-scheduler-start-page), then click `Create Task` in the Actions window. Specify the following parameters in the `Actions` tab:
+
+* In the `Actions` tab:
+
+ * Click `New`, then specify the following:
+ * Action: `Start a program`
+ * Program/script: `C:\logstash-9.0.0\bin\logstash.bat`
+ * Add arguments: `-f C:\logstash-9.0.0\config\syslog.conf`
+ * Start in: `C:\logstash-9.0.0\bin\`
+
+ ::::{note}
+ In a production environment, we recommend that you use [logstash.yml](/reference/logstash-settings-file.md) to control Logstash execution.
+ ::::
+
+* Review and make any changes necessary in the `General`, `Triggers`, `Conditions`, and `Settings` tabs.
+* Click `OK` to finish creating the scheduled task.
+* Once the new task has been created, either wait for it to run on the schedule or select the service then click `Run` to start the task.
+
+::::{note}
+Logstash can be stopped by selecting the service, then clicking `End` in the Task Scheduler window.
+::::
+
+
+
+## Example Logstash Configuration [running-logstash-windows-example]
+
+We will configure Logstash to listen for syslog messages over port 514 with this configuration (file name is `syslog.conf`):
+
+```yaml
+# Sample Logstash configuration for receiving
+# UDP syslog messages over port 514
+
+input {
+ udp {
+ port => 514
+ type => "syslog"
+ }
+}
+
+output {
+ elasticsearch { hosts => ["localhost:9200"] }
+ stdout { codec => rubydebug }
+}
+```
+
+
diff --git a/docs/reference/running-logstash.md b/docs/reference/running-logstash.md
new file mode 100644
index 000000000..4283b2d20
--- /dev/null
+++ b/docs/reference/running-logstash.md
@@ -0,0 +1,27 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/running-logstash.html
+---
+
+# Running Logstash as a Service on Debian or RPM [running-logstash]
+
+Logstash is not started automatically after installation. Starting and stopping Logstash depends on the init system of the underlying operating system, which is now systemd.
+
+As systemd is now the de-facto init system, here are some common operating systems and versions that use it. This list is intended to be informative, not exhaustive.
+
+| | | |
+| --- | --- | --- |
+| Distribution | Service System | |
+| Ubuntu 16.04 and newer | [systemd](#running-logstash-systemd) | |
+| Debian 8 "jessie" and newer | [systemd](#running-logstash-systemd) | |
+| CentOS (and RHEL) 7 and newer | [systemd](#running-logstash-systemd) | |
+
+## Running Logstash by Using Systemd [running-logstash-systemd]
+
+Distributions like Debian Jessie, Ubuntu 15.10+, and many of the SUSE derivatives use systemd and the `systemctl` command to start and stop services. Logstash places the systemd unit files in `/etc/systemd/system` for both deb and rpm. After installing the package, you can start up Logstash with:
+
+```sh
+sudo systemctl start logstash.service
+```
+
+
diff --git a/docs/reference/secure-connection.md b/docs/reference/secure-connection.md
new file mode 100644
index 000000000..dcb85687d
--- /dev/null
+++ b/docs/reference/secure-connection.md
@@ -0,0 +1,498 @@
+---
+navigation_title: "Secure your connection"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/ls-security.html
+---
+
+# Secure your connection to {{es}} [ls-security]
+
+
+The Logstash {{es}} [output](/reference/plugins-outputs-elasticsearch.md), [input](/reference/plugins-inputs-elasticsearch.md), and [filter](/reference/plugins-filters-elasticsearch.md) plugins, as well as [monitoring](monitoring-logstash.md) and central management, support authentication and encryption over HTTPS.
+
+{{es}} clusters are secured by default (starting in 8.0). You need to configure authentication credentials for Logstash in order to establish communication. Logstash throws an exception and the processing pipeline is halted if authentication fails.
+
+In addition to configuring authentication credentials for Logstash, you need to grant authorized users permission to access the Logstash indices.
+
+Security is enabled by default on the {{es}} cluster (starting in 8.0). You must enable TLS/SSL in the {{es}} output section of the Logstash configuration in order to allow Logstash to communicate with the {{es}} cluster.
+
+
+## {{es}} security on by default [es-security-on]
+
+{{es}} generates its own default self-signed Secure Sockets Layer (SSL) certificates at startup.
+
+{{ls}} must establish a Secure Sockets Layer (SSL) connection before it can transfer data to a secured {{es}} cluster. {{ls}} must have a copy of the certificate authority (CA) that signed the {{es}} cluster’s certificates. When a new {{es}} cluster is started up *without* dedicated certificates, it generates its own default self-signed Certificate Authority at startup. See [Starting the Elastic Stack with security enabled](docs-content://deploy-manage/deploy/self-managed/installing-elasticsearch.md) for more info.
+
+{{ess}} uses certificates signed by standard publicly trusted certificate authorities, and therefore setting a cacert is not necessary.
+
+::::{admonition} Security to {{serverless-full}} $$$serverless$$$
+:class: note
+
+{{es-serverless}} simplifies safe, secure communication between {{ls}} and {{es}}.
+
+Configure the [{{ls}} {{es}} output plugin](/reference/plugins-outputs-elasticsearch.md) to use [`cloud_id`](/reference/plugins-outputs-elasticsearch.md#plugins-outputs-elasticsearch-cloud_id) and an [`api_key`](/reference/plugins-outputs-elasticsearch.md#plugins-outputs-elasticsearch-api_key) to establish safe, secure communication between {{ls}} and {{es-serverless}}. No additional SSL configuration steps are needed.
+
+Configuration example:
+
+* `output {elasticsearch { cloud_id => "" api_key => "" } }`
+
+For more details, check out [Grant access using API keys](#ls-api-keys).
+
+::::
+
+
+::::{admonition} Security to hosted {{ess}} $$$hosted-ess$$$
+:class: note
+
+Our hosted {{ess}} on Elastic Cloud simplifies safe, secure communication between {{ls}} and {{es}}. When you configure the [{{ls}} {{es}} output plugin](/reference/plugins-outputs-elasticsearch.md) to use [`cloud_id`](/reference/plugins-outputs-elasticsearch.md#plugins-outputs-elasticsearch-cloud_id) with either the [`cloud_auth` option](/reference/plugins-outputs-elasticsearch.md#plugins-outputs-elasticsearch-cloud_auth) or the [`api_key` option](/reference/plugins-outputs-elasticsearch.md#plugins-outputs-elasticsearch-api_key), no additional SSL configuration steps are needed. {ess-leadin-short}
+
+Configuration example:
+
+* `output {elasticsearch { cloud_id => "" cloud_auth => "" } }`
+* `output {elasticsearch { cloud_id => "" api_key => "" } }`
+
+For more details, check out [Grant access using API keys](#ls-api-keys) or [Sending data to Elastic Cloud (hosted Elasticsearch Service)](/reference/connecting-to-cloud.md).
+
+::::
+
+
+
+### Secure communication with an on-premise {{es}} cluster [es-security-onprem]
+
+If you are running {{es}} on your own hardware and using the Elasticsearch cluster’s default self-signed certificates, you need to complete a few more steps to establish secure communication between {{ls}} and {{es}}.
+
+You need to:
+
+* Copy the self-signed CA certificate from {{es}} and save it to {{ls}}.
+* Configure the elasticsearch-output plugin to use the certificate.
+
+These steps are not necessary if your cluster is using public trusted certificates.
+
+
+#### Copy and save the certificate [es-sec-copy-cert]
+
+By default an on-premise {{es}} cluster generates a self-signed CA and creates its own SSL certificates when it starts. Therefore {{ls}} needs its own copy of the self-signed CA from the {{es}} cluster in order for {{ls}} to validate the certificate presented by {{es}}.
+
+Copy the [self-signed CA certificate](docs-content://deploy-manage/deploy/self-managed/installing-elasticsearch.md#stack-security-certificates) from the {{es}} `config/certs` directory.
+
+Save it to a location that Logstash can access, such as `config/certs` on the {{ls}} instance.
+
+
+#### Configure the elasticsearch output [es-sec-plugin]
+
+Use the [`elasticsearch output`'s](/reference/plugins-outputs-elasticsearch.md) [`ssl_certificate_authorities` option](/reference/plugins-outputs-elasticsearch.md#plugins-outputs-elasticsearch-ssl_certificate_authorities) to point to the certificate’s location.
+
+**Example**
+
+```ruby
+output {
+ elasticsearch {
+ hosts => ["https://...] <1>
+ ssl_certificate_authorities => ['/etc/logstash/config/certs/ca.crt'] <2>
+ }
+}
+```
+
+1. Note that the `hosts` url must begin with `https`
+2. Path to the {{ls}} copy of the {{es}} certificate
+
+
+For more information about establishing secure communication with {{es}}, see [security is on by default](docs-content://deploy-manage/deploy/self-managed/installing-elasticsearch.md).
+
+
+### Configuring Logstash to use basic authentication [ls-http-auth-basic]
+
+Logstash needs to be able to manage index templates, create indices, and write and delete documents in the indices it creates.
+
+To set up authentication credentials for Logstash:
+
+1. Use the **Management > Roles** UI in {{kib}} or the `role` API to create a `logstash_writer` role. For **cluster** privileges, add `manage_index_templates` and `monitor`. For **indices** privileges, add `write`, `create`, and `create_index`.
+
+ Add `manage_ilm` for cluster and `manage` and `manage_ilm` for indices if you plan to use [index lifecycle management](docs-content://manage-data/lifecycle/index-lifecycle-management/tutorial-automate-rollover.md).
+
+ ```sh
+ POST _security/role/logstash_writer
+ {
+ "cluster": ["manage_index_templates", "monitor", "manage_ilm"], <1>
+ "indices": [
+ {
+ "names": [ "logstash-*" ], <2>
+ "privileges": ["write","create","create_index","manage","manage_ilm"] <3>
+ }
+ ]
+ }
+ ```
+
+ 1. The cluster needs the `manage_ilm` privilege if [index lifecycle management](docs-content://manage-data/lifecycle/index-lifecycle-management/tutorial-automate-rollover.md) is enabled.
+ 2. If you use a custom Logstash index pattern, specify your custom pattern instead of the default `logstash-*` pattern.
+ 3. If [index lifecycle management](docs-content://manage-data/lifecycle/index-lifecycle-management/tutorial-automate-rollover.md) is enabled, the role requires the `manage` and `manage_ilm` privileges to load index lifecycle policies, create rollover aliases, and create and manage rollover indices.
+
+2. Create a `logstash_internal` user and assign it the `logstash_writer` role. You can create users from the **Management > Users** UI in {{kib}} or through the `user` API:
+
+ ```sh
+ POST _security/user/logstash_internal
+ {
+ "password" : "x-pack-test-password",
+ "roles" : [ "logstash_writer"],
+ "full_name" : "Internal Logstash User"
+ }
+ ```
+
+3. Configure Logstash to authenticate as the `logstash_internal` user you just created. You configure credentials separately for each of the {{es}} plugins in your Logstash `.conf` file. For example:
+
+ ```js
+ input {
+ elasticsearch {
+ ...
+ user => logstash_internal
+ password => x-pack-test-password
+ }
+ }
+ filter {
+ elasticsearch {
+ ...
+ user => logstash_internal
+ password => x-pack-test-password
+ }
+ }
+ output {
+ elasticsearch {
+ ...
+ user => logstash_internal
+ password => x-pack-test-password
+ }
+ }
+ ```
+
+
+
+### Granting access to the indices Logstash creates [ls-user-access]
+
+To access the indices Logstash creates, users need the `read` and `view_index_metadata` privileges:
+
+1. Create a `logstash_reader` role that has the `read` and `view_index_metadata` privileges for the Logstash indices. You can create roles from the **Management > Roles** UI in {{kib}} or through the `role` API:
+
+ ```sh
+ POST _security/role/logstash_reader
+ {
+ "cluster": ["manage_logstash_pipelines"],
+ "indices": [
+ {
+ "names": [ "logstash-*" ],
+ "privileges": ["read","view_index_metadata"]
+ }
+ ]
+ }
+ ```
+
+2. Assign your Logstash users the `logstash_reader` role. If the Logstash user will be using [centralized pipeline management](/reference/logstash-centralized-pipeline-management.md), also assign the `logstash_system` role. You can create and manage users from the **Management > Users** UI in {{kib}} or through the `user` API:
+
+ ```sh
+ POST _security/user/logstash_user
+ {
+ "password" : "x-pack-test-password",
+ "roles" : [ "logstash_reader", "logstash_system"], <1>
+ "full_name" : "Kibana User for Logstash"
+ }
+ ```
+
+ 1. `logstash_system` is a built-in role that provides the necessary permissions to check the availability of the supported features of {{es}} cluster.
+
+
+
+### Configuring Logstash to use TLS/SSL encryption [ls-http-ssl]
+
+If TLS encryption is enabled on an on premise {{es}} cluster, you need to configure the `ssl` and `cacert` options in your Logstash `.conf` file:
+
+```js
+output {
+ elasticsearch {
+ ...
+ ssl => true
+ cacert => '/path/to/cert.pem' <1>
+ }
+}
+```
+
+1. The path to the local `.pem` file that contains the Certificate Authority’s certificate.
+
+
+::::{note}
+Hosted {{ess}} simplifies security. This configuration step is not necessary for hosted Elasticsearch Service on Elastic Cloud. {ess-leadin-short}
+::::
+
+
+
+### Configuring the {{es}} output to use PKI authentication [ls-http-auth-pki]
+
+The `elasticsearch` output supports PKI authentication. To use an X.509 client-certificate for authentication, you configure the `keystore` and `keystore_password` options in your Logstash `.conf` file:
+
+```js
+output {
+ elasticsearch {
+ ...
+ keystore => /path/to/keystore.jks
+ keystore_password => realpassword
+ truststore => /path/to/truststore.jks <1>
+ truststore_password => realpassword
+ }
+}
+```
+
+1. If you use a separate truststore, the truststore path and password are also required.
+
+
+
+### Configuring credentials for {{ls}} monitoring [ls-monitoring-user]
+
+If you want to monitor your Logstash instance with {{stack-monitor-features}}, and store the monitoring data in a secured {{es}} cluster, you must configure Logstash with a username and password for a user with the appropriate permissions.
+
+The {{security-features}} come preconfigured with a [`logstash_system` built-in user](docs-content://deploy-manage/users-roles/cluster-or-deployment-auth/built-in-users.md) for this purpose. This user has the minimum permissions necessary for the monitoring function, and *should not* be used for any other purpose - it is specifically *not intended* for use within a Logstash pipeline.
+
+By default, the `logstash_system` user does not have a password. The user will not be enabled until you set a password. See [Setting built-in user passwords](docs-content://deploy-manage/users-roles/cluster-or-deployment-auth/built-in-users.md#set-built-in-user-passwords).
+
+Then configure the user and password in the `logstash.yml` configuration file:
+
+```yaml
+xpack.monitoring.elasticsearch.username: logstash_system
+xpack.monitoring.elasticsearch.password: t0p.s3cr3t
+```
+
+If you initially installed an older version of {{xpack}} and then upgraded, the `logstash_system` user may have defaulted to `disabled` for security reasons. You can enable the user through the `user` API:
+
+```console
+PUT _security/user/logstash_system/_enable
+```
+
+
+### Configuring credentials for Centralized Pipeline Management [ls-pipeline-management-user]
+
+If you plan to use Logstash [centralized pipeline management](/reference/logstash-centralized-pipeline-management.md), you need to configure the username and password that Logstash uses for managing configurations.
+
+You configure the user and password in the `logstash.yml` configuration file:
+
+```yaml
+xpack.management.elasticsearch.username: logstash_admin_user <1>
+xpack.management.elasticsearch.password: t0p.s3cr3t
+```
+
+1. The user you specify here must have the built-in `logstash_admin` role as well as the `logstash_writer` role that you created earlier.
+
+
+
+### Grant access using API keys [ls-api-keys]
+
+Instead of using usernames and passwords, you can use API keys to grant access to {{es}} resources. You can set API keys to expire at a certain time, and you can explicitly invalidate them. Any user with the `manage_api_key` or `manage_own_api_key` cluster privilege can create API keys.
+
+Tips for creating API keys:
+
+* API keys are tied to the cluster they are created in. If you are sending output to different clusters, be sure to create the correct kind of API key.
+* {{ls}} can send both collected data and monitoring information to {{es}}. If you are sending both to the same cluster, you can use the same API key. For different clusters, you need an API key per cluster.
+* A single cluster can share a key for ingestion and monitoring purposes.
+* A production cluster and a monitoring cluster require separate keys.
+
+::::{note}
+For security reasons, we recommend using a unique API key per {{ls}} instance. You can create as many API keys per user as necessary.
+::::
+
+
+
+#### Create an API key [ls-create-api-key]
+
+You can create API keys using either the [Create API key API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key) or the [Kibana UI](docs-content://deploy-manage/api-keys/elasticsearch-api-keys.md). This section walks you through creating an API key using the [Create API key API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key). The privileges needed are the same for either approach.
+
+Here is an example that shows how to create an API key for publishing to {{es}} using the [Elasticsearch output plugin](/reference/plugins-outputs-elasticsearch.md).
+
+```console
+POST /_security/api_key
+{
+ "name": "logstash_host001", <1>
+ "role_descriptors": {
+ "logstash_writer": { <2>
+ "cluster": ["monitor", "manage_ilm", "read_ilm"],
+ "index": [
+ {
+ "names": ["logstash-*"],
+ "privileges": ["view_index_metadata", "create_doc"]
+ }
+ ]
+ }
+ }
+}
+```
+
+1. Name of the API key
+2. Granted privileges
+
+
+The return value should look similar to this:
+
+```console-result
+{
+ "id":"TiNAGG4BaaMdaH1tRfuU", <1>
+ "name":"logstash_host001",
+ "api_key":"KnR6yE41RrSowb0kQ0HWoA" <2>
+}
+```
+
+1. Unique id for this API key
+2. Generated API key
+
+
+
+##### Create an API key for publishing [ls-api-key-publish]
+
+You’re in luck! The example we used in the [Create an API key](#ls-create-api-key) section creates an API key for publishing to {{es}} using the [Elasticsearch output plugin](/reference/plugins-outputs-elasticsearch.md).
+
+Here’s an example using the API key in your [Elasticsearch output plugin](/reference/plugins-outputs-elasticsearch.md) configuration.
+
+```ruby
+output {
+ elasticsearch {
+ api_key => "TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA" <1>
+ }
+}
+```
+
+1. Format is `id:api_key` (as returned by [Create API key](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key))
+
+
+
+##### Create an API key for reading [ls-api-key-input]
+
+Creating an API key to use for reading data from {{es}} is similar to creating an API key for publishing described earlier. You can use the example in the [Create an API key](#ls-create-api-key) section, granting the appropriate privileges.
+
+Here’s an example using the API key in your [Elasticsearch inputs plugin](/reference/plugins-inputs-elasticsearch.md) configuration.
+
+```ruby
+input {
+ elasticsearch {
+ "api_key" => "TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA" <1>
+ }
+}
+```
+
+1. Format is `id:api_key` (as returned by [Create API key](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key))s
+
+
+
+##### Create an API key for filtering [ls-api-key-filter]
+
+Creating an API key to use for processing data from {{es}} is similar to creating an API key for publishing described earlier. You can use the example in the [Create an API key](#ls-create-api-key) section, granting the appropriate privileges.
+
+Here’s an example using the API key in your [Elasticsearch filter plugin](/reference/plugins-filters-elasticsearch.md) configuration.
+
+```ruby
+filter {
+ elasticsearch {
+ api_key => "TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA" <1>
+ }
+}
+```
+
+1. Format is `id:api_key` (as returned by [Create API key](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key))
+
+
+
+##### Create an API key for monitoring [ls-api-key-monitor]
+
+To create an API key to use for sending monitoring data to {{es}}, use the [Create API key API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key). For example:
+
+```console
+POST /_security/api_key
+{
+ "name": "logstash_host001", <1>
+ "role_descriptors": {
+ "logstash_monitoring": { <2>
+ "cluster": ["monitor"],
+ "index": [
+ {
+ "names": [".monitoring-ls-*"],
+ "privileges": ["create_index", "create"]
+ }
+ ]
+ }
+ }
+}
+```
+
+1. Name of the API key
+2. Granted privileges
+
+
+The return value should look similar to this:
+
+```console-result
+{
+ "id":"TiNAGG4BaaMdaH1tRfuU", <1>
+ "name":"logstash_host001",
+ "api_key":"KnR6yE41RrSowb0kQ0HWoA" <2>
+}
+```
+
+1. Unique id for this API key
+2. Generated API key
+
+
+Now you can use this API key in your logstash.yml configuration file:
+
+```yaml
+xpack.monitoring.elasticsearch.api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA <1>
+```
+
+1. Format is `id:api_key` (as returned by [Create API key](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key))
+
+
+
+##### Create an API key for central management [ls-api-key-man]
+
+To create an API key to use for central management, use the [Create API key API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key). For example:
+
+```console
+POST /_security/api_key
+{
+ "name": "logstash_host001", <1>
+ "role_descriptors": {
+ "logstash_monitoring": { <2>
+ "cluster": ["monitor", "manage_logstash_pipelines"]
+ }
+ }
+}
+```
+
+1. Name of the API key
+2. Granted privileges
+
+
+The return value should look similar to this:
+
+```console-result
+{
+ "id":"TiNAGG4BaaMdaH1tRfuU", <1>
+ "name":"logstash_host001",
+ "api_key":"KnR6yE41RrSowb0kQ0HWoA" <2>
+}
+```
+
+1. Unique id for this API key
+2. Generated API key
+
+
+Now you can use this API key in your logstash.yml configuration file:
+
+```yaml
+xpack.management.elasticsearch.api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA <1>
+```
+
+1. Format is `id:api_key` (as returned by [Create API key](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key))
+
+
+
+#### Learn more about API keys [learn-more-api-keys]
+
+See the {{es}} API key documentation for more information:
+
+* [Create API key](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key)
+* [Get API key information](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-api-key)
+* [Invalidate API key](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-invalidate-api-key)
+
+See [API Keys](docs-content://deploy-manage/api-keys/elasticsearch-api-keys.md) for info on managing API keys through {{kib}}.
diff --git a/docs/reference/serverless-monitoring-with-elastic-agent.md b/docs/reference/serverless-monitoring-with-elastic-agent.md
new file mode 100644
index 000000000..2a4f613e3
--- /dev/null
+++ b/docs/reference/serverless-monitoring-with-elastic-agent.md
@@ -0,0 +1,68 @@
+---
+navigation_title: "Collect monitoring data for dashboards ({{serverless-short}} )"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/serverless-monitoring-with-elastic-agent.html
+---
+
+# Collect {{ls}} monitoring data for dashboards ({{serverless-short}}) [serverless-monitoring-with-elastic-agent]
+
+
+{{ls}} monitoring is available on {{serverless-full}} through the [{{ls}} Integration](https://github.com/elastic/integrations/blob/main/packages/logstash/_dev/build/docs/README.md) in [Elastic Observability](docs-content://solutions/observability.md). {{agent}} collects monitoring data from your {{ls}} instance, sends it directly to {{serverless-full}}, and shows the data in {{ls}} dashboards.
+
+You’ll need to have an [Elastic Observability](docs-content://solutions/observability.md) project. We’ll provide steps to help you create one.
+
+**Prerequisite**
+
+::::{dropdown} Disable default collection of {{ls}} monitoring metrics
+:name: disable-default-svrless
+
+Set `monitoring.enabled` to `false` in logstash.yml to disable default collection:
+
+```yaml
+monitoring.enabled: false
+```
+
+::::
+
+
+
+## Add and configure the {{ls}} integration [setup-project]
+
+**Add the {{ls}} integration**
+
+1. Log in to your [cloud.elastic.co](https://cloud.elastic.co/) account and create an Observability serverless project.
+2. Select **Get Started** from the main menu.
+3. Select **Start exploring** (near the bottom of the page).
+4. On the **Integrations** page, search for **{{ls}}** and select it to see details.
+5. Click **Add {{ls}}**.
+6. Follow the instructions to install {{agent}} and add the {{ls}} integration.
+
+For more info, check out the [Elastic Observability](docs-content://solutions/observability.md) docs.
+
+**Configure the integration to collect logs**
+
+* Make sure that **Logs** is ON if you want to collect logs from your {{ls}} instance. Check the settings to be sure that they are configured correctly.
+* Modify the log paths to match your {{ls}} environment.
+
+**Configure the integration to collect metrics**
+
+* Make sure that **Metrics (Stack Monitoring)** is OFF, and that **Metrics (Technical Preview)** is ON.
+* Set the {{ls}} URL to point to your {{ls}} instance. By default, the integration collects {{ls}} monitoring metrics from `https://localhost:9600`. If that host and port number are not correct, update the `Logstash URL` setting. If you configured {{ls}} to use encrypted communications and/or a username and password, you must access it using HTTPS. Expand the **Advanced Settings** options, and fill in the appropriate values for your {{ls}} instance.
+
+
+## View assets [view-assets-esvrless]
+
+1. Go to **Project settings → Integrations** to see your **Installed integrations**.
+2. Select the {{ls}} integration, and then select **Assets** to access dashboards for the {{ls}} integration.
+
+
+## Monitor {{ls}} logs and metrics [view-data-svrless]
+
+From the list of assets, open the **[Metrics {{ls}}] {{ls}} overview** dashboard to view overall performance. Then follow the navigation panel to further drill down into {{ls}} performance.
+
+:::{image} ../images/integration-dashboard-overview.png
+:alt: The {{ls}} Overview dashboard in {{kib}} with various metrics from your monitored {ls}
+:class: screenshot
+:::
+
+You can hover over any visualization to adjust its settings, or click the **Edit** button to make changes to the dashboard. To learn more, refer to [Dashboard and visualizations](docs-content://explore-analyze/dashboards.md).
diff --git a/docs/reference/setting-up-running-logstash.md b/docs/reference/setting-up-running-logstash.md
new file mode 100644
index 000000000..1fc32482b
--- /dev/null
+++ b/docs/reference/setting-up-running-logstash.md
@@ -0,0 +1,36 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/setup-logstash.html
+---
+
+# Setting up and running Logstash [setup-logstash]
+
+Before reading this section, see [Installing Logstash](/reference/installing-logstash.md) for basic installation instructions to get you started.
+
+This section includes additional information on how to set up and run Logstash, including:
+
+* [Logstash Directory Layout](/reference/dir-layout.md)
+* [Logstash Configuration Files](/reference/config-setting-files.md)
+* [logstash.yml](/reference/logstash-settings-file.md)
+* [Secrets keystore for secure settings](/reference/keystore.md)
+* [Running Logstash from the Command Line](/reference/running-logstash-command-line.md)
+* [Running Logstash as a Service on Debian or RPM](/reference/running-logstash.md)
+* [Running Logstash on Docker](/reference/docker.md)
+* [Configuring Logstash for Docker](/reference/docker-config.md)
+* [Running Logstash on Kubernetes](/reference/running-logstash-kubernetes.md)
+* [Running Logstash on Windows](/reference/running-logstash-windows.md)
+* [Logging](/reference/logging.md)
+* [Shutting Down Logstash](/reference/shutdown.md)
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docs/static/shutdown.asciidoc b/docs/reference/shutdown.md
similarity index 56%
rename from docs/static/shutdown.asciidoc
rename to docs/reference/shutdown.md
index 18835c066..53a191640 100644
--- a/docs/static/shutdown.asciidoc
+++ b/docs/reference/shutdown.md
@@ -1,28 +1,28 @@
-[[shutdown]]
-=== Shutting Down Logstash
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/shutdown.html
+---
-If you're running {ls} as a service, use one of the following commands to stop it:
+# Shutting Down Logstash [shutdown]
+
+If you’re running {{ls}} as a service, use one of the following commands to stop it:
* On systemd, use:
-+
-[source,shell]
-----
-systemctl stop logstash
-----
-If you're running {ls} directly in the console on a POSIX system, you can stop
-it by sending SIGTERM to the {ls} process. For example:
+ ```shell
+ systemctl stop logstash
+ ```
-[source,shell]
-----
+
+If you’re running {{ls}} directly in the console on a POSIX system, you can stop it by sending SIGTERM to the {{ls}} process. For example:
+
+```shell
kill -TERM {logstash_pid}
-----
+```
-Alternatively, enter *Ctrl-C* in the console.
+Alternatively, enter **Ctrl-C** in the console.
-
-
-==== What Happens During a Controlled Shutdown?
+## What Happens During a Controlled Shutdown? [_what_happens_during_a_controlled_shutdown]
When you attempt to shut down a running Logstash instance, Logstash performs several steps before it can safely shut down. It must:
@@ -33,32 +33,28 @@ When you attempt to shut down a running Logstash instance, Logstash performs sev
The following conditions affect the shutdown process:
* An input plugin receiving data at a slow pace.
-* A slow filter, like a Ruby filter executing `sleep(10000)` or an Elasticsearch filter that is executing a very heavy
-query.
+* A slow filter, like a Ruby filter executing `sleep(10000)` or an Elasticsearch filter that is executing a very heavy query.
* A disconnected output plugin that is waiting to reconnect to flush in-flight events.
These situations make the duration and success of the shutdown process unpredictable.
-Logstash has a stall detection mechanism that analyzes the behavior of the pipeline and plugins during shutdown.
-This mechanism produces periodic information about the count of inflight events in internal queues and a list of busy
-worker threads.
+Logstash has a stall detection mechanism that analyzes the behavior of the pipeline and plugins during shutdown. This mechanism produces periodic information about the count of inflight events in internal queues and a list of busy worker threads.
-To enable Logstash to forcibly terminate in the case of a stalled shutdown, use the `--pipeline.unsafe_shutdown` flag when
-you start Logstash.
+To enable Logstash to forcibly terminate in the case of a stalled shutdown, use the `--pipeline.unsafe_shutdown` flag when you start Logstash.
-WARNING: Unsafe shutdowns, force-kills of the Logstash process, or crashes of the Logstash process for any other reason may result in data loss (unless you've
-enabled Logstash to use <>). Shut down
-Logstash safely whenever possible.
+::::{warning}
+Unsafe shutdowns, force-kills of the Logstash process, or crashes of the Logstash process for any other reason may result in data loss (unless you’ve enabled Logstash to use [persistent queues](/reference/persistent-queues.md)). Shut down Logstash safely whenever possible.
+::::
-[[shutdown-stall-example]]
-==== Stall Detection Example
-In this example, slow filter execution prevents the pipeline from performing a clean shutdown. Because Logstash is
-started with the `--pipeline.unsafe_shutdown` flag, the shutdown results in the loss of 20 events.
-========
-[source,shell]
-bin/logstash -e 'input { generator { } } filter { ruby { code => "sleep 10000" } }
+## Stall Detection Example [shutdown-stall-example]
+
+In this example, slow filter execution prevents the pipeline from performing a clean shutdown. Because Logstash is started with the `--pipeline.unsafe_shutdown` flag, the shutdown results in the loss of 20 events.
+
+::::{admonition}
+```shell
+bin/logstash -e 'input { generator { } } filter { ruby { code => "sleep 10000" } }
output { stdout { codec => dots } }' -w 1 --pipeline.unsafe_shutdown
Pipeline main started
^CSIGINT received. Shutting down the agent. {:level=>:warn}
@@ -66,18 +62,23 @@ stopping pipeline {:id=>"main", :level=>:warn}
Received shutdown signal, but pipeline is still waiting for in-flight events
to be processed. Sending another ^C will force quit Logstash, but this may cause
data loss. {:level=>:warn}
-{"inflight_count"=>125, "stalling_thread_info"=>{["LogStash::Filters::Ruby",
-{"code"=>"sleep 10000"}]=>[{"thread_id"=>19, "name"=>"[main]>worker0",
+{"inflight_count"=>125, "stalling_thread_info"=>{["LogStash::Filters::Ruby",
+{"code"=>"sleep 10000"}]=>[{"thread_id"=>19, "name"=>"[main]>worker0",
"current_call"=>"(ruby filter code):1:in `sleep'"}]}} {:level=>:warn}
-The shutdown process appears to be stalled due to busy or blocked plugins.
+The shutdown process appears to be stalled due to busy or blocked plugins.
Check the logs for more information. {:level=>:error}
-{"inflight_count"=>125, "stalling_thread_info"=>{["LogStash::Filters::Ruby",
-{"code"=>"sleep 10000"}]=>[{"thread_id"=>19, "name"=>"[main]>worker0",
+{"inflight_count"=>125, "stalling_thread_info"=>{["LogStash::Filters::Ruby",
+{"code"=>"sleep 10000"}]=>[{"thread_id"=>19, "name"=>"[main]>worker0",
"current_call"=>"(ruby filter code):1:in `sleep'"}]}} {:level=>:warn}
-{"inflight_count"=>125, "stalling_thread_info"=>{["LogStash::Filters::Ruby",
-{"code"=>"sleep 10000"}]=>[{"thread_id"=>19, "name"=>"[main]>worker0",
+{"inflight_count"=>125, "stalling_thread_info"=>{["LogStash::Filters::Ruby",
+{"code"=>"sleep 10000"}]=>[{"thread_id"=>19, "name"=>"[main]>worker0",
"current_call"=>"(ruby filter code):1:in `sleep'"}]}} {:level=>:warn}
Forcefully quitting logstash.. {:level=>:fatal}
-========
+```
+
+::::
+
+
+When `--pipeline.unsafe_shutdown` isn’t enabled, Logstash continues to run and produce these reports periodically.
+
-When `--pipeline.unsafe_shutdown` isn't enabled, Logstash continues to run and produce these reports periodically.
diff --git a/docs/reference/tips-best-practices.md b/docs/reference/tips-best-practices.md
new file mode 100644
index 000000000..f6003263f
--- /dev/null
+++ b/docs/reference/tips-best-practices.md
@@ -0,0 +1,118 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/tips.html
+---
+
+# Tips and best practices [tips]
+
+We are adding more tips and best practices, so please check back soon. If you have something to add, please:
+
+* create an issue at [https://github.com/elastic/logstash/issues](https://github.com/elastic/logstash/issues), or
+* create a pull request with your proposed changes at [https://github.com/elastic/logstash](https://github.com/elastic/logstash).
+
+Also check out the [Logstash discussion forum](https://discuss.elastic.co/c/logstash).
+
+
+## Command line [tip-cli]
+
+
+### Shell commands on Windows OS [tip-windows-cli]
+
+Command line examples often show single quotes. On Windows systems, replace a single quote `'` with a double quote `"`.
+
+**Example**
+
+Instead of:
+
+```
+bin/logstash -e 'input { stdin { } } output { stdout {} }'
+```
+
+Use this format on Windows systems:
+
+```
+bin\logstash -e "input { stdin { } } output { stdout {} }"
+```
+
+
+## Pipelines [tip-pipelines]
+
+
+### Pipeline management [tip-pipeline-mgmt]
+
+You can manage pipelines in a {{ls}} instance using either local pipeline configurations or [centralized pipeline management](/reference/configuring-centralized-pipelines.md) in {{kib}}.
+
+After you configure Logstash to use centralized pipeline management, you can no longer specify local pipeline configurations. The `pipelines.yml` file and settings such as `path.config` and `config.string` are inactive when centralized pipeline management is enabled.
+
+
+## Tips using filters [tip-filters]
+
+
+### Check to see if a boolean field exists [tip-check-field]
+
+You can use the mutate filter to see if a boolean field exists.
+
+{{ls}} supports [@metadata] fields—fields that are not visible for output plugins and live only in the filtering state. You can use [@metadata] fields with the mutate filter to see if a field exists.
+
+```ruby
+filter {
+ mutate {
+ # we use a "temporal" field with a predefined arbitrary known value that
+ # lives only in filtering stage.
+ add_field => { "[@metadata][test_field_check]" => "a null value" }
+
+ # we copy the field of interest into that temporal field.
+ # If the field doesn't exist, copy is not executed.
+ copy => { "test_field" => "[@metadata][test_field_check]" }
+ }
+
+
+ # now we now if testField didn't exists, our field will have
+ # the initial arbitrary value
+ if [@metadata][test_field_check] == "a null value" {
+ # logic to execute when [test_field] did not exist
+ mutate { add_field => { "field_did_not_exist" => true }}
+ } else {
+ # logic to execute when [test_field] existed
+ mutate { add_field => { "field_did_exist" => true }}
+ }
+}
+```
+
+
+## Kafka [tip-kafka]
+
+
+### Kafka settings [tip-kafka-settings]
+
+
+#### Partitions per topic [tip-kafka-partitions]
+
+"How many partitions should I use per topic?"
+
+At least the number of {{ls}} nodes multiplied by consumer threads per node.
+
+Better yet, use a multiple of the above number. Increasing the number of partitions for an existing topic is extremely complicated. Partitions have a very low overhead. Using 5 to 10 times the number of partitions suggested by the first point is generally fine, so long as the overall partition count does not exceed 2000.
+
+Err on the side of over-partitioning up to a total 1000 partitions overall. Try not to exceed 1000 partitions.
+
+
+#### Consumer threads [tip-kafka-threads]
+
+"How many consumer threads should I configure?"
+
+Lower values tend to be more efficient and have less memory overhead. Try a value of `1` then iterate your way up. The value should in general be lower than the number of pipeline workers. Values larger than 4 rarely result in performance improvement.
+
+
+### Kafka input and persistent queue (PQ) [tip-kafka-pq-persist]
+
+
+#### Kafka offset commits [tip-kafka-offset-commit]
+
+"Does Kafka Input commit offsets only after the event has been safely persisted to the PQ?"
+
+"Does Kafa Input commit offsets only for events that have passed the pipeline fully?"
+
+No, we can’t make that guarantee. Offsets are committed to Kafka periodically. If writes to the PQ are slow or blocked, offsets for events that haven’t safely reached the PQ can be committed.
+
+
diff --git a/docs/reference/toc.yml b/docs/reference/toc.yml
new file mode 100644
index 000000000..ac26bb767
--- /dev/null
+++ b/docs/reference/toc.yml
@@ -0,0 +1,330 @@
+project: 'Logstash reference'
+toc:
+ - file: index.md
+ - file: getting-started-with-logstash.md
+ children:
+ - file: installing-logstash.md
+ - file: first-event.md
+ - file: advanced-pipeline.md
+ - file: multiple-input-output-plugins.md
+ - file: how-logstash-works.md
+ children:
+ - file: execution-model.md
+ - file: ecs-ls.md
+ - file: processing.md
+ - file: setting-up-running-logstash.md
+ children:
+ - file: dir-layout.md
+ - file: config-setting-files.md
+ - file: logstash-settings-file.md
+ - file: keystore.md
+ - file: running-logstash-command-line.md
+ - file: running-logstash.md
+ - file: docker.md
+ - file: docker-config.md
+ - file: running-logstash-kubernetes.md
+ - file: running-logstash-windows.md
+ - file: logging.md
+ - file: shutdown.md
+ - file: upgrading-logstash.md
+ - file: upgrading-using-package-managers.md
+ - file: upgrading-using-direct-download.md
+ - file: upgrading-minor-versions.md
+ - file: upgrading-logstash-9-0.md
+ - file: creating-logstash-pipeline.md
+ children:
+ - file: configuration-file-structure.md
+ - file: event-dependent-configuration.md
+ - file: environment-variables.md
+ - file: connecting-to-cloud.md
+ - file: config-examples.md
+ - file: secure-connection.md
+ - file: advanced-logstash-configurations.md
+ children:
+ - file: multiple-pipelines.md
+ - file: pipeline-to-pipeline.md
+ - file: reloading-config.md
+ - file: multiline.md
+ - file: glob-support.md
+ # TO DO: Doesn't exist on master
+ # - file: ingest-converter.md
+ - file: logstash-to-logstash-communications.md
+ children:
+ - file: ls-to-ls-lumberjack.md
+ - file: ls-to-ls-http.md
+ - file: ls-to-ls-native.md
+ - file: managing-logstash.md
+ children:
+ - file: logstash-centralized-pipeline-management.md
+ - file: configuring-centralized-pipelines.md
+ - file: using-logstash-with-elastic-integrations.md
+ # TO DO: Not migrated
+ # - file: working-with-logstash-modules.md
+ # children:
+ # TO DO: Doesn't exist on master
+ # - file: arcsight-module.md
+ # TO DO: Doesn't exist on master
+ # - file: netflow-module.md
+ # TO DO: Doesn't exist on master
+ # - file: azure-module.md
+ - file: working-with-filebeat-modules.md
+ children:
+ - file: use-ingest-pipelines.md
+ - file: use-filebeat-modules-kafka.md
+ - file: working-with-winlogbeat-modules.md
+ - file: queues-data-resiliency.md
+ children:
+ - file: memory-queue.md
+ - file: persistent-queues.md
+ - file: dead-letter-queues.md
+ - file: transforming-data.md
+ children:
+ - file: core-operations.md
+ - file: data-deserialization.md
+ - file: field-extraction.md
+ - file: lookup-enrichment.md
+ - file: deploying-scaling-logstash.md
+ - file: managing-geoip-databases.md
+ children:
+ - file: logstash-geoip-database-management.md
+ - file: configuring-geoip-database-management.md
+ - file: performance-tuning.md
+ children:
+ - file: performance-troubleshooting.md
+ - file: tuning-logstash.md
+ - file: monitoring-logstash-with-elastic-agent.md
+ children:
+ - file: dashboard-monitoring-with-elastic-agent.md
+ - file: serverless-monitoring-with-elastic-agent.md
+ - file: monitoring-with-elastic-agent.md
+ - file: monitoring-logstash-legacy.md
+ children:
+ - file: monitoring-with-metricbeat.md
+ - file: monitoring-internal-collection-legacy.md
+ - file: logstash-monitoring-ui.md
+ - file: logstash-pipeline-viewer.md
+ - file: monitoring-troubleshooting.md
+ - file: monitoring-logstash.md
+ - file: working-with-plugins.md
+ children:
+ - file: plugin-concepts.md
+ - file: plugin-generator.md
+ - file: offline-plugins.md
+ - file: private-rubygem.md
+ - file: event-api.md
+ - file: integration-plugins.md
+ children:
+ - file: plugins-integrations-aws.md
+ - file: plugins-integrations-elastic_enterprise_search.md
+ - file: plugins-integrations-jdbc.md
+ - file: plugins-integrations-kafka.md
+ - file: plugins-integrations-logstash.md
+ - file: plugins-integrations-rabbitmq.md
+ - file: plugins-integrations-snmp.md
+ - file: input-plugins.md
+ children:
+ - file: plugins-inputs-azure_event_hubs.md
+ - file: plugins-inputs-beats.md
+ - file: plugins-inputs-cloudwatch.md
+ - file: plugins-inputs-couchdb_changes.md
+ - file: plugins-inputs-dead_letter_queue.md
+ - file: plugins-inputs-elastic_agent.md
+ - file: plugins-inputs-elastic_serverless_forwarder.md
+ - file: plugins-inputs-elasticsearch.md
+ - file: plugins-inputs-exec.md
+ - file: plugins-inputs-file.md
+ - file: plugins-inputs-ganglia.md
+ - file: plugins-inputs-gelf.md
+ - file: plugins-inputs-generator.md
+ - file: plugins-inputs-github.md
+ - file: plugins-inputs-google_cloud_storage.md
+ - file: plugins-inputs-google_pubsub.md
+ - file: plugins-inputs-graphite.md
+ - file: plugins-inputs-heartbeat.md
+ - file: plugins-inputs-http.md
+ - file: plugins-inputs-http_poller.md
+ - file: plugins-inputs-imap.md
+ - file: plugins-inputs-irc.md
+ - file: plugins-inputs-java_generator.md
+ - file: plugins-inputs-java_stdin.md
+ - file: plugins-inputs-jdbc.md
+ - file: plugins-inputs-jms.md
+ - file: plugins-inputs-jmx.md
+ - file: plugins-inputs-kafka.md
+ - file: plugins-inputs-kinesis.md
+ - file: plugins-inputs-logstash.md
+ - file: plugins-inputs-log4j.md
+ - file: plugins-inputs-lumberjack.md
+ - file: plugins-inputs-meetup.md
+ - file: plugins-inputs-pipe.md
+ - file: plugins-inputs-puppet_facter.md
+ - file: plugins-inputs-rabbitmq.md
+ - file: plugins-inputs-redis.md
+ - file: plugins-inputs-relp.md
+ - file: plugins-inputs-rss.md
+ - file: plugins-inputs-s3.md
+ - file: plugins-inputs-s3-sns-sqs.md
+ - file: plugins-inputs-salesforce.md
+ - file: plugins-inputs-snmp.md
+ - file: plugins-inputs-snmptrap.md
+ - file: plugins-inputs-sqlite.md
+ - file: plugins-inputs-sqs.md
+ - file: plugins-inputs-stdin.md
+ - file: plugins-inputs-stomp.md
+ - file: plugins-inputs-syslog.md
+ - file: plugins-inputs-tcp.md
+ - file: plugins-inputs-twitter.md
+ - file: plugins-inputs-udp.md
+ - file: plugins-inputs-unix.md
+ - file: plugins-inputs-varnishlog.md
+ - file: plugins-inputs-websocket.md
+ - file: plugins-inputs-wmi.md
+ - file: plugins-inputs-xmpp.md
+ - file: output-plugins.md
+ children:
+ - file: plugins-outputs-boundary.md
+ - file: plugins-outputs-circonus.md
+ - file: plugins-outputs-cloudwatch.md
+ - file: plugins-outputs-csv.md
+ - file: plugins-outputs-datadog.md
+ - file: plugins-outputs-datadog_metrics.md
+ - file: plugins-outputs-dynatrace.md
+ - file: plugins-outputs-elastic_app_search.md
+ - file: plugins-outputs-elastic_workplace_search.md
+ - file: plugins-outputs-elasticsearch.md
+ - file: plugins-outputs-email.md
+ - file: plugins-outputs-exec.md
+ - file: plugins-outputs-file.md
+ - file: plugins-outputs-ganglia.md
+ - file: plugins-outputs-gelf.md
+ - file: plugins-outputs-google_bigquery.md
+ - file: plugins-outputs-google_cloud_storage.md
+ - file: plugins-outputs-google_pubsub.md
+ - file: plugins-outputs-graphite.md
+ - file: plugins-outputs-graphtastic.md
+ - file: plugins-outputs-http.md
+ - file: plugins-outputs-influxdb.md
+ - file: plugins-outputs-irc.md
+ - file: plugins-outputs-java_stdout.md
+ - file: plugins-outputs-juggernaut.md
+ - file: plugins-outputs-kafka.md
+ - file: plugins-outputs-librato.md
+ - file: plugins-outputs-logstash.md
+ - file: plugins-outputs-loggly.md
+ - file: plugins-outputs-lumberjack.md
+ - file: plugins-outputs-metriccatcher.md
+ - file: plugins-outputs-mongodb.md
+ - file: plugins-outputs-nagios.md
+ - file: plugins-outputs-nagios_nsca.md
+ - file: plugins-outputs-opentsdb.md
+ - file: plugins-outputs-pagerduty.md
+ - file: plugins-outputs-pipe.md
+ - file: plugins-outputs-rabbitmq.md
+ - file: plugins-outputs-redis.md
+ - file: plugins-outputs-redmine.md
+ - file: plugins-outputs-riak.md
+ - file: plugins-outputs-riemann.md
+ - file: plugins-outputs-s3.md
+ - file: plugins-outputs-sink.md
+ - file: plugins-outputs-sns.md
+ - file: plugins-outputs-solr_http.md
+ - file: plugins-outputs-sqs.md
+ - file: plugins-outputs-statsd.md
+ - file: plugins-outputs-stdout.md
+ - file: plugins-outputs-stomp.md
+ - file: plugins-outputs-syslog.md
+ - file: plugins-outputs-tcp.md
+ - file: plugins-outputs-timber.md
+ - file: plugins-outputs-udp.md
+ - file: plugins-outputs-webhdfs.md
+ - file: plugins-outputs-websocket.md
+ - file: plugins-outputs-xmpp.md
+ - file: plugins-outputs-zabbix.md
+ - file: filter-plugins.md
+ children:
+ - file: plugins-filters-age.md
+ - file: plugins-filters-aggregate.md
+ - file: plugins-filters-alter.md
+ - file: plugins-filters-bytes.md
+ - file: plugins-filters-cidr.md
+ - file: plugins-filters-cipher.md
+ - file: plugins-filters-clone.md
+ - file: plugins-filters-csv.md
+ - file: plugins-filters-date.md
+ - file: plugins-filters-de_dot.md
+ - file: plugins-filters-dissect.md
+ - file: plugins-filters-dns.md
+ - file: plugins-filters-drop.md
+ - file: plugins-filters-elapsed.md
+ - file: plugins-filters-elastic_integration.md
+ - file: plugins-filters-elasticsearch.md
+ - file: plugins-filters-environment.md
+ - file: plugins-filters-extractnumbers.md
+ - file: plugins-filters-fingerprint.md
+ - file: plugins-filters-geoip.md
+ - file: plugins-filters-grok.md
+ - file: plugins-filters-http.md
+ - file: plugins-filters-i18n.md
+ - file: plugins-filters-java_uuid.md
+ - file: plugins-filters-jdbc_static.md
+ - file: plugins-filters-jdbc_streaming.md
+ - file: plugins-filters-json.md
+ - file: plugins-filters-json_encode.md
+ - file: plugins-filters-kv.md
+ - file: plugins-filters-memcached.md
+ - file: plugins-filters-metricize.md
+ - file: plugins-filters-metrics.md
+ - file: plugins-filters-mutate.md
+ - file: plugins-filters-prune.md
+ - file: plugins-filters-range.md
+ - file: plugins-filters-ruby.md
+ - file: plugins-filters-sleep.md
+ - file: plugins-filters-split.md
+ - file: plugins-filters-syslog_pri.md
+ - file: plugins-filters-threats_classifier.md
+ - file: plugins-filters-throttle.md
+ - file: plugins-filters-tld.md
+ - file: plugins-filters-translate.md
+ - file: plugins-filters-truncate.md
+ - file: plugins-filters-urldecode.md
+ - file: plugins-filters-useragent.md
+ - file: plugins-filters-uuid.md
+ - file: plugins-filters-wurfl_device_detection.md
+ - file: plugins-filters-xml.md
+ - file: codec-plugins.md
+ children:
+ - file: plugins-codecs-avro.md
+ - file: plugins-codecs-cef.md
+ - file: plugins-codecs-cloudfront.md
+ - file: plugins-codecs-cloudtrail.md
+ - file: plugins-codecs-collectd.md
+ - file: plugins-codecs-csv.md
+ - file: plugins-codecs-dots.md
+ - file: plugins-codecs-edn.md
+ - file: plugins-codecs-edn_lines.md
+ - file: plugins-codecs-es_bulk.md
+ - file: plugins-codecs-fluent.md
+ - file: plugins-codecs-graphite.md
+ - file: plugins-codecs-gzip_lines.md
+ - file: plugins-codecs-jdots.md
+ - file: plugins-codecs-java_line.md
+ - file: plugins-codecs-java_plain.md
+ - file: plugins-codecs-json.md
+ - file: plugins-codecs-json_lines.md
+ - file: plugins-codecs-line.md
+ - file: plugins-codecs-msgpack.md
+ - file: plugins-codecs-multiline.md
+ - file: plugins-codecs-netflow.md
+ - file: plugins-codecs-nmap.md
+ - file: plugins-codecs-plain.md
+ - file: plugins-codecs-protobuf.md
+ - file: plugins-codecs-rubydebug.md
+ - file: tips-best-practices.md
+ children:
+ - file: jvm-settings.md
+ # TO DO: Was not migrated
+ # - file: upgrading-with-persistent-queue-enabled.md
+ # TO DO: Was not migrated
+ # - file: multiline-filter-plugin.md
+ # TO DO: Was not migrated
+ # - file: field-references-deep-dive.md
\ No newline at end of file
diff --git a/docs/reference/transforming-data.md b/docs/reference/transforming-data.md
new file mode 100644
index 000000000..e470c4e0c
--- /dev/null
+++ b/docs/reference/transforming-data.md
@@ -0,0 +1,20 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/transformation.html
+---
+
+# Transforming data [transformation]
+
+With over 200 plugins in the Logstash plugin ecosystem, it’s sometimes challenging to choose the best plugin to meet your data processing needs. In this section, we’ve collected a list of popular plugins and organized them according to their processing capabilities:
+
+* [Performing Core Operations](/reference/core-operations.md)
+* [Deserializing Data](/reference/data-deserialization.md)
+* [Extracting Fields and Wrangling Data](/reference/field-extraction.md)
+* [Enriching Data with Lookups](/reference/lookup-enrichment.md)
+
+Also see [*Filter plugins*](/reference/filter-plugins.md) and [*Codec plugins*](/reference/codec-plugins.md) for the full list of available data processing plugins.
+
+
+
+
+
diff --git a/docs/reference/tuning-logstash.md b/docs/reference/tuning-logstash.md
new file mode 100644
index 000000000..8004397a8
--- /dev/null
+++ b/docs/reference/tuning-logstash.md
@@ -0,0 +1,80 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/tuning-logstash.html
+---
+
+# Tuning and profiling logstash pipeline performance [tuning-logstash]
+
+The [Flow Metrics](https://www.elastic.co/docs/api/doc/logstash/operation/operation-nodestats) in Logstash’s Monitoring API can provide excellent insight into how events are flowing through your pipelines. They can reveal whether your pipeline is constrained for resources, which parts of your pipeline are consuming the most resources, and provide useful feedback when tuning.
+
+
+## Worker utilization [tuning-logstash-worker-utilization]
+
+When a pipeline’s `worker_utilization` flow metric is consistently near 100, all of its workers are occupied processing the filters and outputs of the pipeline. We can see *which* plugins in the pipeline are consuming the available worker capacity by looking at the plugin-level `worker_utilization` and `worker_millis_per_event` flow metrics. Using this information, we can gain intuition about how to tune the pipeline’s settings to add resources, how to find and eliminate wasteful computation, or realize the need to scale up/out the capacity of downstream destinations.
+
+In general, plugins fit into one of two categories:
+
+* **CPU-bound**: plugins that perform computation on the contents of events *without* the use of the network or disk IO tend to benefit from incrementally increasing `pipeline.workers` as long as the process has available CPU; once CPU is exhausted additional concurrency can result in *lower* throughput as the pipeline workers contend for resources and the amount of time spent in context-switching increases.
+* **IO-bound**: plugins that use the network to either enrich events or transmit events tend to benefit from incrementally increasing `pipeline.workers` and/or tuning the `pipeline.batch.*` parameters described below. This allows them to make better use of network resources, as long as those external services are not exerting back-pressure (even if Logstash is using nearly all of its available CPU).
+
+The further a pipeline’s `worker_utilization` is from 100, the more time its workers are spending waiting for events to arrive in the queue. Because the volume of data in most pipelines is often inconsistent, the goal should be to tune the pipeline such that it has the resources to avoid propagating back-pressure to its inputs during peak periods.
+
+
+## Queue back-pressure [tuning-logstash-queue-backpressure]
+
+When a pipeline receives events faster than it can process them, the inputs eventually experience back-pressure that prevents them from receiving additional events. Depending on the input plugin being used, back-pressure can either propagate upstream or lead to data loss.
+
+A pipeline’s `queue_backpressure` flow metric reflects how much time the inputs are spending attempting to push events into the queue. The metric isn’t precisely comparable across pipelines, but instead allows you to compare a single pipeline’s current behaviour to *itself* over time. When this metric is growing, look *downstream* at the pipeline’s filters and outputs to see if they are using resources effectively, have sufficient resources allocated, or are experiencing back-pressure of their own.
+
+::::{note}
+A persisted queue offers durability guarantees and can absorb back-pressure for longer than the default in-memory queue, but once it is full it too propagates back-pressure. The `queue_persisted_growth_events` flow metric is useful measure of how much back-pressure is being actively absorbed by the persisted queue, and should trend toward zero (or less) over the pipeline’s lifetime. Negative numbers indicate that the queue is *shrinking*, and that the workers are catching up on lag that had previously developed.
+::::
+
+
+
+## Tuning-related settings [tuning-logstash-settings]
+
+The Logstash defaults are chosen to provide fast, safe performance for most users. However if you notice performance issues, you may need to modify some of the defaults. Logstash provides the following configurable options for tuning pipeline performance: `pipeline.workers`, `pipeline.batch.size`, and `pipeline.batch.delay`.
+
+For more information about setting these options, see [logstash.yml](/reference/logstash-settings-file.md).
+
+Make sure you’ve read the [Performance troubleshooting](/reference/performance-troubleshooting.md) before modifying these options.
+
+* The `pipeline.workers` setting determines how many threads to run for filter and output processing. If you find that events are backing up, or that the CPU is not saturated, consider increasing the value of this parameter to make better use of available processing power. Good results can even be found increasing this number past the number of available processors as these threads may spend significant time in an I/O wait state when writing to external systems.
+* The `pipeline.batch.size` setting defines the maximum number of events an individual worker thread collects from the queue before attempting to execute filters and outputs. Larger batch sizes are generally more efficient, but increase memory overhead. Output plugins can process each batch as a logical unit. The Elasticsearch output, for example, attempts to send a single [bulk request](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-bulk) for each batch received. Tuning the `pipeline.batch.size` setting adjusts the size of bulk requests sent to Elasticsearch.
+* The `pipeline.batch.delay` setting rarely needs to be tuned. This setting adjusts the latency of the Logstash pipeline. Pipeline batch delay is the maximum amount of time in milliseconds that a pipeline worker waits for each new event while its current batch is not yet full. After this time elapses without any more events becoming available, the worker begins to execute filters and outputs. The maximum time that the worker waits between receiving an event and processing that event in a filter is the product of the `pipeline.batch.delay` and `pipeline.batch.size` settings.
+
+
+## Notes on pipeline configuration and performance [_notes_on_pipeline_configuration_and_performance]
+
+If you plan to modify the default pipeline settings, take into account the following suggestions:
+
+* The total number of inflight events is determined by the product of the `pipeline.workers` and `pipeline.batch.size` settings. This product is referred to as the *inflight count*. Keep the value of the inflight count in mind as you adjust the `pipeline.workers` and `pipeline.batch.size` settings. Pipelines that intermittently receive large events at irregular intervals require sufficient memory to handle these spikes. Set the JVM heap space accordingly in the `jvm.options` config file (See [Logstash Configuration Files](/reference/config-setting-files.md) for more info).
+* Measure each change to make sure it increases, rather than decreases, performance.
+* Ensure that you leave enough memory available to cope with a sudden increase in event size. For example, an application that generates exceptions that are represented as large blobs of text.
+* The number of workers may be set higher than the number of CPU cores since outputs often spend idle time in I/O wait conditions.
+* Threads in Java have names and you can use the `jstack`, `top`, and the VisualVM graphical tools to figure out which resources a given thread uses.
+* On Linux platforms, Logstash labels its threads with descriptive names. For example, inputs show up as `[base]workerN`, where N is an integer. Where possible, other threads are also labeled to help you identify their purpose.
+
+
+## Profiling the heap [profiling-the-heap]
+
+When tuning Logstash you may have to adjust the heap size. You can use the [VisualVM](https://visualvm.github.io/) tool to profile the heap. The **Monitor** pane in particular is useful for checking whether your heap allocation is sufficient for the current workload. The screenshots below show sample **Monitor** panes. The first pane examines a Logstash instance configured with too many inflight events. The second pane examines a Logstash instance configured with an appropriate amount of inflight events. Note that the specific batch sizes used here are most likely not applicable to your specific workload, as the memory demands of Logstash vary in large part based on the type of messages you are sending.
+
+:::{image} ../images/pipeline_overload.png
+:alt: pipeline overload
+:::
+
+:::{image} ../images/pipeline_correct_load.png
+:alt: pipeline correct load
+:::
+
+In the first example we see that the CPU isn’t being used very efficiently. In fact, the JVM is often times having to stop the VM for “full GCs”. Full garbage collections are a common symptom of excessive memory pressure. This is visible in the spiky pattern on the CPU chart. In the more efficiently configured example, the GC graph pattern is more smooth, and the CPU is used in a more uniform manner. You can also see that there is ample headroom between the allocated heap size, and the maximum allowed, giving the JVM GC a lot of room to work with.
+
+Examining the in-depth GC statistics with a tool similar to the excellent [VisualGC](https://visualvm.github.io/plugins.html) plugin shows that the over-allocated VM spends very little time in the efficient Eden GC, compared to the time spent in the more resource-intensive Old Gen “Full” GCs.
+
+::::{note}
+As long as the GC pattern is acceptable, heap sizes that occasionally increase to the maximum are acceptable. Such heap size spikes happen in response to a burst of large events passing through the pipeline. In general practice, maintain a gap between the used amount of heap memory and the maximum. This document is not a comprehensive guide to JVM GC tuning. Read the official [Oracle guide](http://www.oracle.com/webfolder/technetwork/tutorials/obe/java/gc01/index.html) for more information on the topic. We also recommend reading [Debugging Java Performance](https://www.semicomplete.com/blog/geekery/debugging-java-performance/).
+::::
+
+
diff --git a/docs/reference/upgrading-logstash-9-0.md b/docs/reference/upgrading-logstash-9-0.md
new file mode 100644
index 000000000..48b4d137f
--- /dev/null
+++ b/docs/reference/upgrading-logstash-9-0.md
@@ -0,0 +1,34 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/upgrading-logstash-9.0.html
+---
+
+# Upgrading Logstash to 9.0 [upgrading-logstash-9.0]
+
+Before upgrading Logstash:
+
+* Read the [*Release Notes*](/release-notes/index.md).
+* Read the [breaking changes](/release-notes/breaking-changes.md) docs.
+
+ There you can find info on these topics and more:
+
+ * [Changes to SSL settings in {{ls}} plugins](/release-notes/breaking-changes.md#ssl-settings-9.0)
+
+
+If you are installing Logstash with other components in the Elastic Stack, also see the [Elastic Stack installation and upgrade documentation](docs-content://deploy-manage/index.md).
+
+::::{note}
+Upgrading between non-consecutive major versions (7.x to 9.x, for example) is not supported. We recommend that you upgrade to 8.17, and then upgrade to 9.0.
+::::
+
+
+
+## Upgrade to {{ls}} 8.17 before upgrading to 9.0 [upgrade-to-previous]
+
+If you haven’t already, upgrade to version 8.17 before you upgrade to 9.0. If you’re using other products in the {{stack}}, upgrade {{ls}} as part of the [{{stack}} upgrade process](docs-content://deploy-manage/upgrade/deployment-or-cluster.md).
+
+::::{tip}
+Upgrading to {{ls}} 8.17 gives you a head-start on new 9.0 features. This step helps reduce risk and makes roll backs easier if you hit a snag.
+::::
+
+
diff --git a/docs/reference/upgrading-logstash.md b/docs/reference/upgrading-logstash.md
new file mode 100644
index 000000000..20265a4b6
--- /dev/null
+++ b/docs/reference/upgrading-logstash.md
@@ -0,0 +1,73 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/upgrading-logstash.html
+---
+
+# Upgrading Logstash [upgrading-logstash]
+
+::::{important}
+Before upgrading Logstash:
+
+* Consult the [breaking changes](/release-notes/breaking-changes.md) docs.
+* Read the [*Release Notes*](/release-notes/index.md).
+* Test upgrades in a development environment before upgrading your production cluster.
+
+While upgrading Logstash:
+
+* If you use monitoring, you must re-use the data directory when you upgrade Logstash. Otherwise, the Logstash node is assigned a new persistent UUID and becomes a new node in the monitoring data.
+
+::::
+
+
+If you’re upgrading other products in the stack, also read the [Elastic Stack Installation and Upgrade Guide](docs-content://deploy-manage/index.md).
+
+See the following topics for information about upgrading Logstash:
+
+* [Upgrading using package managers](/reference/upgrading-using-package-managers.md)
+* [Upgrading using a direct download](/reference/upgrading-using-direct-download.md)
+* [Upgrading between minor versions](/reference/upgrading-minor-versions.md)
+* [Upgrading Logstash to 9.0](https://www.elastic.co/guide/en/logstash/master/upgrading-logstash-9.0.html)
+
+
+## When to upgrade [_when_to_upgrade]
+
+Fresh installations can and should start with the same version across the Elastic Stack.
+
+Elasticsearch 9.0 does not require Logstash 9.0. An Elasticsearch 9.0 cluster will happily receive data from earlier versions of Logstash via the default HTTP communication layer. This provides some flexibility to decide when to upgrade Logstash relative to an Elasticsearch upgrade. It may or may not be convenient for you to upgrade them together, and it is not required to be done at the same time as long as Elasticsearch is upgraded first. However, there are special plugin cases for example, if your pipeline includes [elastic_integration filter](/reference/plugins-filters-elastic_integration.md) plugin. See [when `elastic_integration` is in {{ls}} pipeline](#upgrading-when-elastic_integration-in-pipeline) section for details.
+
+You should upgrade in a timely manner to get the performance improvements that come with Logstash 9.0, but do so in the way that makes the most sense for your environment.
+
+
+## When not to upgrade [_when_not_to_upgrade]
+
+If any Logstash plugin that you require is not compatible with Logstash 9.0, then you should wait until it is ready before upgrading.
+
+Although we make great efforts to ensure compatibility, Logstash 9.0 is not completely backwards compatible. As noted in the Elastic Stack upgrade guide, you should not upgrade Logstash 9.0 before you upgrade Elasticsearch 9.0. This is both practical and because some Logstash 9.0 plugins may attempt to use features of Elasticsearch 9.0 that did not exist in earlier versions.
+
+For example, if you attempt to send the 8.x template to a cluster before Elasticsearch 9.0, then all indexing likely fail. If you use your own custom template with Logstash, then this issue can be ignored.
+
+Another example is when your pipeline utilizes the [`elastic_integration` filter](/reference/plugins-filters-elastic_integration.md) plugin. In such cases, the plugin may encounter issues loading and executing deprecated integrations or features that have been removed in newer versions. This can lead to disruptions in your pipeline’s functionality, especially if your workflow relies on these outdated components. For a comprehensive understanding of how to handle such scenarios and ensure compatibility, refer to the [when `elastic_integration` is in {{ls}} pipeline](#upgrading-when-elastic_integration-in-pipeline) section in this documentation.
+
+
+## When `elastic_integration` is in {{ls}} pipeline [upgrading-when-elastic_integration-in-pipeline]
+
+[elastic_integration filter](/reference/plugins-filters-elastic_integration.md) plugin requires a special attention due to its dependencies on various components of the stack such as {{es}}, {{kib}} and {{ls}}. Any updates, deprecations, or changes in the stack products can directly impact the functionality of the plugin.
+
+**When upgrading {{es}}**
+
+This plugin is compiled with a specific version of {{es}} and embeds {{es}} Ingest Node components that match the `major.minor` stack version. Therefore, we recommend using a plugin version that aligns with the `major.minor` version of your stack.
+
+If the versions do not match, the plugin may encounter issues such as failing to load or execute pipelines. For example, if your {{es}} version is newer than the plugin, the plugin may not support new features introduced in the updated {{es}} version. Conversely, if your {{es}} version is older, the plugin may rely on features that have been deprecated or removed in your {{es}} version.
+
+**When upgrading {{kib}}**
+
+When you upgrade {{kib}}, {{kib}} downloads the latest version of the integrations through [Elastic Package Registry](docs-content://reference/ingestion-tools/fleet/index.md#package-registry-intro). As part of the upgrade process, you will also have the opportunity to review and upgrade your currently installed integrations to their latest versions. However, we strongly recommend upgrading the [elastic_integration filter](/reference/plugins-filters-elastic_integration.md) plugin before upgrading {{kib}} and {{es}}. This is because [elastic_integration filter](/reference/plugins-filters-elastic_integration.md) plugin pulls and processes the ingest pipelines associated with the installed integrations. These pipelines are then executed using the {{es}} Ingest Node components that the plugin was compiled with. If {{es}} or {{es}} is upgraded first, there is a risk of incompatibility between the plugin’s ingest componenets and the newer versions of {{es}}'s Ingest Node features or {{kib}}'s integration definitions.
+
+**When upgrading {{ls}}**
+
+This plugin is by default embedded in {{ls}} core. When you upgrade {{ls}}, new version of the plugin is installed. The plugin is backward compatible accross {{ls}} 8.x versions. However, if you are considering to upgrade {{ls}} only (not the plugin), there are exceptions cases, such as JDK compatibility which require matching certain {{ls}} versions. We recommend visiting [elastic_integration plugin requirements](/reference/plugins-filters-elastic_integration.md#plugins-filters-elastic_integration-requirements) guide considering the {{ls}} version you are upgrading to.
+
+
+
+
+
diff --git a/docs/reference/upgrading-minor-versions.md b/docs/reference/upgrading-minor-versions.md
new file mode 100644
index 000000000..283d28642
--- /dev/null
+++ b/docs/reference/upgrading-minor-versions.md
@@ -0,0 +1,11 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/upgrading-minor-versions.html
+---
+
+# Upgrading between minor versions [upgrading-minor-versions]
+
+As a general rule, you can upgrade between minor versions (for example, 9.x to 9.y, where x < y) by simply installing the new release and restarting {{ls}}. {{ls}} typically maintains backwards compatibility for configuration settings and exported fields. Please review the [release notes](/release-notes/index.md) for potential exceptions.
+
+Upgrading between non-consecutive major versions (7.x to 9.x, for example) is not supported.
+
diff --git a/docs/reference/upgrading-using-direct-download.md b/docs/reference/upgrading-using-direct-download.md
new file mode 100644
index 000000000..5b7922763
--- /dev/null
+++ b/docs/reference/upgrading-using-direct-download.md
@@ -0,0 +1,18 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/upgrading-using-direct-download.html
+---
+
+# Upgrading using a direct download [upgrading-using-direct-download]
+
+This procedure downloads the relevant Logstash binaries directly from Elastic.
+
+1. Shut down your Logstash pipeline, including any inputs that send events to Logstash.
+2. Download the [Logstash installation file](https://www.elastic.co/downloads/logstash) that matches your host environment.
+3. Backup your `config/` and `data/` folders in a temporary space.
+4. Delete your Logstash directory.
+5. Unpack the installation file into the folder that contained the Logstash directory that you just deleted.
+6. Restore the `config/` and `data/` folders that were previously saved, overwriting the folders created during the unpack operation.
+7. Test your configuration file with the `logstash --config.test_and_exit -f ` command. Configuration options for some Logstash plugins have changed.
+8. Restart your Logstash pipeline after updating your configuration file.
+
diff --git a/docs/reference/upgrading-using-package-managers.md b/docs/reference/upgrading-using-package-managers.md
new file mode 100644
index 000000000..21b13644c
--- /dev/null
+++ b/docs/reference/upgrading-using-package-managers.md
@@ -0,0 +1,15 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/upgrading-using-package-managers.html
+---
+
+# Upgrading using package managers [upgrading-using-package-managers]
+
+This procedure uses [package managers](/reference/installing-logstash.md#package-repositories) to upgrade Logstash.
+
+1. Shut down your Logstash pipeline, including any inputs that send events to Logstash.
+2. Using the directions in the [Installing from Package Repositories](/reference/installing-logstash.md#package-repositories) section, update your repository links to point to the 9.x repositories.
+3. Run the `apt-get upgrade logstash` or `yum update logstash` command as appropriate for your operating system.
+4. Test your configuration file with the `logstash --config.test_and_exit -f ` command. Configuration options for some Logstash plugins have changed in the 9.x release.
+5. Restart your Logstash pipeline after you have updated your configuration file.
+
diff --git a/docs/reference/use-filebeat-modules-kafka.md b/docs/reference/use-filebeat-modules-kafka.md
new file mode 100644
index 000000000..cf13065bc
--- /dev/null
+++ b/docs/reference/use-filebeat-modules-kafka.md
@@ -0,0 +1,119 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/use-filebeat-modules-kafka.html
+---
+
+# Example: Set up Filebeat modules to work with Kafka and Logstash [use-filebeat-modules-kafka]
+
+This section shows how to set up {{filebeat}} [modules](beats://reference/filebeat/filebeat-modules-overview.md) to work with {{ls}} when you are using Kafka in between {{filebeat}} and {{ls}} in your publishing pipeline. The main goal of this example is to show how to load ingest pipelines from {{filebeat}} and use them with {{ls}}.
+
+The examples in this section show simple configurations with topic names hard coded. For a full list of configuration options, see documentation about configuring the [Kafka input plugin](/reference/plugins-inputs-kafka.md). Also see [Configure the Kafka output](beats://reference/filebeat/kafka-output.md) in the *{{filebeat}} Reference*.
+
+## Set up and run {{filebeat}} [_set_up_and_run_filebeat]
+
+1. If you haven’t already set up the {{filebeat}} index template and sample {{kib}} dashboards, run the {{filebeat}} `setup` command to do that now:
+
+ ```shell
+ filebeat -e setup
+ ```
+
+ The `-e` flag is optional and sends output to standard error instead of syslog.
+
+ A connection to {{es}} and {{kib}} is required for this one-time setup step because {{filebeat}} needs to create the index template in {{es}} and load the sample dashboards into {{kib}}. For more information about configuring the connection to {{es}}, see the Filebeat [quick start](beats://reference/filebeat/filebeat-installation-configuration.md).
+
+ After the template and dashboards are loaded, you’ll see the message `INFO {{kib}} dashboards successfully loaded. Loaded dashboards`.
+
+2. Run the `modules enable` command to enable the modules that you want to run. For example:
+
+ ```shell
+ filebeat modules enable system
+ ```
+
+ You can further configure the module by editing the config file under the {{filebeat}} `modules.d` directory. For example, if the log files are not in the location expected by the module, you can set the `var.paths` option.
+
+ ::::{note}
+ You must enable at least one fileset in the module. **Filesets are disabled by default.**
+ ::::
+
+3. Run the `setup` command with the `--pipelines` and `--modules` options specified to load ingest pipelines for the modules you’ve enabled. This step also requires a connection to {{es}}. If you want use a {{ls}} pipeline instead of ingest node to parse the data, skip this step.
+
+ ```shell
+ filebeat setup --pipelines --modules system
+ ```
+
+4. Configure {{filebeat}} to send log lines to Kafka. To do this, in the `filebeat.yml` config file, disable the {{es}} output by commenting it out, and enable the Kafka output. For example:
+
+ ```yaml
+ #output.elasticsearch:
+ #hosts: ["localhost:9200"]
+ output.kafka:
+ hosts: ["kafka:9092"]
+ topic: "filebeat"
+ codec.json:
+ pretty: false
+ ```
+
+5. Start {{filebeat}}. For example:
+
+ ```shell
+ filebeat -e
+ ```
+
+ {{filebeat}} will attempt to send messages to {{ls}} and continue until {{ls}} is available to receive them.
+
+ ::::{note}
+ Depending on how you’ve installed {{filebeat}}, you might see errors related to file ownership or permissions when you try to run {{filebeat}} modules. See [Config File Ownership and Permissions](beats://reference/libbeat/config-file-permissions.md) in the *Beats Platform Reference* if you encounter errors related to file ownership or permissions.
+ ::::
+
+
+
+## Create and start the {{ls}} pipeline [_create_and_start_the_ls_pipeline]
+
+1. On the system where {{ls}} is installed, create a {{ls}} pipeline configuration that reads from a Kafka input and sends events to an {{es}} output:
+
+ ```yaml
+ input {
+ kafka {
+ bootstrap_servers => "myhost:9092"
+ topics => ["filebeat"]
+ codec => json
+ }
+ }
+
+ output {
+ if [@metadata][pipeline] {
+ elasticsearch {
+ hosts => "https://myEShost:9200"
+ manage_template => false
+ index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
+ pipeline => "%{[@metadata][pipeline]}" <1>
+ user => "elastic"
+ password => "secret"
+ }
+ } else {
+ elasticsearch {
+ hosts => "https://myEShost:9200"
+ manage_template => false
+ index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
+ user => "elastic"
+ password => "secret"
+ }
+ }
+ }
+ ```
+
+ 1. Set the `pipeline` option to `%{[@metadata][pipeline]}`. This setting configures {{ls}} to select the correct ingest pipeline based on metadata passed in the event.
+
+2. Start {{ls}}, passing in the pipeline configuration file you just defined. For example:
+
+ ```shell
+ bin/logstash -f mypipeline.conf
+ ```
+
+ {{ls}} should start a pipeline and begin receiving events from the Kafka input.
+
+
+
+## Visualize the data [_visualize_the_data]
+
+To visualize the data in {{kib}}, launch the {{kib}} web interface by pointing your browser to port 5601. For example, [http://127.0.0.1:5601](http://127.0.0.1:5601). Click **Dashboards** then view the {{filebeat}} dashboards.
diff --git a/docs/reference/use-ingest-pipelines.md b/docs/reference/use-ingest-pipelines.md
new file mode 100644
index 000000000..05b381347
--- /dev/null
+++ b/docs/reference/use-ingest-pipelines.md
@@ -0,0 +1,65 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/use-ingest-pipelines.html
+---
+
+# Use ingest pipelines for parsing [use-ingest-pipelines]
+
+When you use {{filebeat}} modules with {{ls}}, you can use the ingest pipelines provided by {{filebeat}} to parse the data. You need to load the pipelines into {{es}} and configure {{ls}} to use them.
+
+**To load the ingest pipelines:**
+
+On the system where {{filebeat}} is installed, run the `setup` command with the `--pipelines` option specified to load ingest pipelines for specific modules. For example, the following command loads ingest pipelines for the system and nginx modules:
+
+```shell
+filebeat setup --pipelines --modules nginx,system
+```
+
+A connection to {{es}} is required for this setup step because {{filebeat}} needs to load the ingest pipelines into {{es}}. If necessary, you can temporarily disable your configured output and enable the {{es}} output before running the command.
+
+**To configure {{ls}} to use the pipelines:**
+
+On the system where {{ls}} is installed, create a {{ls}} pipeline configuration that reads from a {{ls}} input, such as {{beats}} or Kafka, and sends events to an {{es}} output. Set the `pipeline` option in the {{es}} output to `%{[@metadata][pipeline]}` to use the ingest pipelines that you loaded previously.
+
+Here’s an example configuration that reads data from the Beats input and uses {{filebeat}} ingest pipelines to parse data collected by modules:
+
+```yaml
+input {
+ beats {
+ port => 5044
+ }
+}
+
+output {
+ if [@metadata][pipeline] {
+ elasticsearch {
+ hosts => "https://061ab24010a2482e9d64729fdb0fd93a.us-east-1.aws.found.io:9243"
+ manage_template => false
+ index => "%{[@metadata][beat]}-%{[@metadata][version]}" <1>
+ action => "create" <2>
+ pipeline => "%{[@metadata][pipeline]}" <3>
+ user => "elastic"
+ password => "secret"
+ }
+ } else {
+ elasticsearch {
+ hosts => "https://061ab24010a2482e9d64729fdb0fd93a.us-east-1.aws.found.io:9243"
+ manage_template => false
+ index => "%{[@metadata][beat]}-%{[@metadata][version]}" <1>
+ action => "create"
+ user => "elastic"
+ password => "secret"
+ }
+ }
+}
+```
+
+1. If data streams are disabled in your configuration, set the `index` option to `%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}`. Data streams are enabled by default.
+2. If you are disabling the use of Data Streams on your configuration, you can remove this setting, or set it to a different value as appropriate.
+3. Configures {{ls}} to select the correct ingest pipeline based on metadata passed in the event.
+
+
+See the {{filebeat}} [Modules](beats://reference/filebeat/filebeat-modules-overview.md) documentation for more information about setting up and running modules.
+
+For a full example, see [Example: Set up {{filebeat}} modules to work with Kafka and {{ls}}](/reference/use-filebeat-modules-kafka.md).
+
diff --git a/docs/reference/using-logstash-with-elastic-integrations.md b/docs/reference/using-logstash-with-elastic-integrations.md
new file mode 100644
index 000000000..d8213b80c
--- /dev/null
+++ b/docs/reference/using-logstash-with-elastic-integrations.md
@@ -0,0 +1,87 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/ea-integrations.html
+---
+
+# Using Logstash with Elastic integrations [ea-integrations]
+
+You can take advantage of the extensive, built-in capabilities of Elastic {{integrations}}--such as managing data collection, transformation, and visualization—and then use {{ls}} for additional data processing and output options. {{ls}} can further expand capabilities for use cases where you need additional processing, or if you need your data delivered to multiple destinations.
+
+
+## Elastic {{integrations}}: ingesting to visualizing [integrations-value]
+
+[Elastic {{integrations}}](integration-docs://reference/index.md) provide quick, end-to-end solutions for:
+
+* ingesting data from a variety of data sources,
+* ensuring compliance with the [Elastic Common Schema (ECS)][Elastic Common Schema (ECS)](ecs://reference/index.md)),
+* getting the data into the {{stack}}, and
+* visualizing it with purpose-built dashboards.
+
+{{integrations}} are available for [popular services and platforms](integration-docs://reference/all_integrations.md), such as Nginx, AWS, and MongoDB, as well as many generic input types like log files. Each integration includes pre-packaged assets to help reduce the time between ingest and insights.
+
+To see available integrations, go to the {{kib}} home page, and click **Add {{integrations}}**. You can use the query bar to search for integrations you may want to use. When you find an integration for your data source, the UI walks you through adding and configuring it.
+
+
+## Extend {{integrations}} with {{ls}} [integrations-and-ls]
+
+Logstash can run the ingest pipeline component of your Elastic integration when you use the Logstash `filter-elastic_integration` plugin in your {{ls}} pipeline.
+
+Adding the `filter-elastic_integration` plugin as the *first* filter plugin keeps the pipeline’s behavior as close as possible to the behavior you’d expect if the bytes were processed by the integration in {{es}}. The more you modify an event before calling the `elastic_integration` filter, the higher the risk that the modifications will have meaningful effect in how the event is transformed.
+
+::::{admonition} How to
+Create a {{ls}} pipeline that uses the [elastic_agent input](/reference/plugins-inputs-elastic_agent.md) plugin, and the [elastic_integration filter](/reference/plugins-filters-elastic_integration.md) plugin as the *first* filter in your {{ls}} pipeline. You can add more filters for additional processing, but they must come after the `logstash-filter-elastic_integration` plugin in your configuration. Add one or more output plugins to complete your pipeline.
+
+::::
+
+
+**Sample pipeline configuration**
+
+```ruby
+input {
+ elastic_agent {
+ port => 5044
+ }
+}
+
+filter {
+ elastic_integration{ <1>
+ cloud_id => ""
+ cloud_auth => "
+ source => "[http][host]"
+ target => "[@metadata][tenant]"
+ dictionary_path => "/etc/conf.d/logstash/tenants.yml"
+ }
+}
+
+output { <3>
+ if [@metadata][tenant] == "tenant01" {
+ elasticsearch {
+ cloud_id => ""
+ api_key => ""
+ }
+ } else if [@metadata][tenant] == "tenant02" {
+ elasticsearch {
+ cloud_id => ""
+ api_key => ""
+ }
+ }
+}
+```
+
+1. Use `filter-elastic_integration` as the first filter in your pipeline
+2. You can use additional filters as long as they follow `filter-elastic_integration`
+3. Sample config to output data to multiple destinations
+
+
+
+### Using `filter-elastic_integration` with `output-elasticsearch` [es-tips]
+
+Elastic {{integrations}} are designed to work with [data streams](/reference/plugins-outputs-elasticsearch.md#plugins-outputs-elasticsearch-data-streams) and [ECS-compatible](/reference/plugins-outputs-elasticsearch.md#_compatibility_with_the_elastic_common_schema_ecs) output. Be sure that these features are enabled in the [`output-elasticsearch`](/reference/plugins-outputs-elasticsearch.md) plugin.
+
+* Set [`data-stream`](/reference/plugins-outputs-elasticsearch.md#plugins-outputs-elasticsearch-data_stream) to `true`. (Check out [Data streams](/reference/plugins-outputs-elasticsearch.md#plugins-outputs-elasticsearch-data-streams) for additional data streams settings.)
+* Set [`ecs-compatibility`](/reference/plugins-outputs-elasticsearch.md#plugins-outputs-elasticsearch-ecs_compatibility) to `v1` or `v8`.
+
+Check out the [`output-elasticsearch` plugin](/reference/plugins-outputs-elasticsearch.md) docs for additional settings.
diff --git a/docs/reference/working-with-filebeat-modules.md b/docs/reference/working-with-filebeat-modules.md
new file mode 100644
index 000000000..ebf3c65c5
--- /dev/null
+++ b/docs/reference/working-with-filebeat-modules.md
@@ -0,0 +1,13 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/filebeat-modules.html
+---
+
+# Working with Filebeat modules [filebeat-modules]
+
+{{filebeat}} comes packaged with pre-built [modules](beats://reference/filebeat/filebeat-modules.md) that contain the configurations needed to collect, parse, enrich, and visualize data from various log file formats. Each {{filebeat}} module consists of one or more filesets that contain ingest node pipelines, {{es}} templates, {{filebeat}} input configurations, and {{kib}} dashboards.
+
+You can use {{filebeat}} modules with {{ls}}, but you need to do some extra setup. The simplest approach is to [set up and use the ingest pipelines](/reference/use-ingest-pipelines.md) provided by {{filebeat}}.
+
+
+
diff --git a/docs/reference/working-with-plugins.md b/docs/reference/working-with-plugins.md
new file mode 100644
index 000000000..863d72cd2
--- /dev/null
+++ b/docs/reference/working-with-plugins.md
@@ -0,0 +1,145 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/working-with-plugins.html
+---
+
+# Working with plugins [working-with-plugins]
+
+::::{admonition} macOS Gatekeeper warnings
+:class: important
+
+Apple’s rollout of stricter notarization requirements affected the notarization of the 9.0.0-beta1 {{ls}} artifacts. If macOS Catalina displays a dialog when you first run {{ls}}, you need to take an action to allow it to run. To prevent Gatekeeper checks on the {{ls}} files, run the following command on the downloaded `.tar.gz` archive or the directory to which was extracted:
+
+```sh
+xattr -d -r com.apple.quarantine
+```
+
+For example, if the `.tar.gz` file was extracted to the default logstash-9.0.0-beta1 directory, the command is:
+
+```sh
+xattr -d -r com.apple.quarantine logstash-9.0.0-beta1
+```
+
+Alternatively, you can add a security override if a Gatekeeper popup appears by following the instructions in the *How to open an app that hasn’t been notarized or is from an unidentified developer* section of [Safely open apps on your Mac](https://support.apple.com/en-us/HT202491).
+
+::::
+
+
+Logstash has a rich collection of input, filter, codec, and output plugins. Check out the [Elastic Support Matrix](https://www.elastic.co/support/matrix#matrix_logstash_plugins) to see which plugins are supported at various levels.
+
+Plugins are available in self-contained packages called gems and hosted on [RubyGems.org](https://rubygems.org/). Use the plugin manager script--`bin/logstash-plugin`--to manage plugins:
+
+* [Listing plugins](#listing-plugins)
+* [Adding plugins to your deployment](#installing-plugins)
+* [Updating plugins](#updating-plugins)
+* [Removing plugins](#removing-plugins)
+* [Advanced: Adding a locally built plugin](#installing-local-plugins)
+* [Advanced: Using `--path.plugins`](#installing-local-plugins-path)
+
+
+## No internet connection? [pointer-to-offline]
+
+If you don’t have an internet connection, check out [Offline Plugin Management](/reference/offline-plugins.md) for information on [building](/reference/offline-plugins.md#building-offline-packs), [installing](/reference/offline-plugins.md#installing-offline-packs), and [updating](/reference/offline-plugins.md#updating-offline-packs) offline plugin packs.
+
+
+### Proxy configuration [http-proxy]
+
+Most plugin manager commands require access to the internet to reach [RubyGems.org](https://rubygems.org). If your organization is behind a firewall, you can set these environments variables to configure Logstash to use your proxy.
+
+```shell
+export http_proxy=http://localhost:3128
+export https_proxy=http://localhost:3128
+```
+
+
+## Listing plugins [listing-plugins]
+
+Logstash release packages bundle common plugins. To list the plugins currently available in your deployment:
+
+```shell
+bin/logstash-plugin list <1>
+bin/logstash-plugin list --verbose <2>
+bin/logstash-plugin list '*namefragment*' <3>
+bin/logstash-plugin list --group output <4>
+```
+
+1. Lists all installed plugins
+2. Lists installed plugins with version information
+3. Lists all installed plugins containing a namefragment
+4. Lists all installed plugins for a particular group (input, filter, codec, output)
+
+
+
+## Adding plugins to your deployment [installing-plugins]
+
+When you have access to internet, you can retrieve plugins hosted on the [RubyGems.org](https://rubygems.org/)public repository and install them on top of your Logstash installation.
+
+```shell
+bin/logstash-plugin install logstash-input-github
+```
+
+After a plugin is successfully installed, you can use it in your configuration file.
+
+
+## Updating plugins [updating-plugins]
+
+Plugins have their own release cycles and are often released independently of Logstash’s core release cycle. Using the update subcommand you can get the latest version of the plugin.
+
+```shell
+bin/logstash-plugin update <1>
+bin/logstash-plugin update logstash-input-github <2>
+```
+
+1. updates all installed plugins
+2. updates only the plugin you specify
+
+
+
+### Major version plugin updates [updating-major]
+
+To avoid introducing breaking changes, the plugin manager updates only plugins for which newer *minor* or *patch* versions exist by default. If you wish to also include breaking changes, specify `--level=major`.
+
+```shell
+bin/logstash-plugin update --level=major <1>
+bin/logstash-plugin update --level=major logstash-input-github <2>
+```
+
+1. updates all installed plugins to latest, including major versions with breaking changes
+2. updates only the plugin you specify to latest, including major versions with breaking changes
+
+
+
+## Removing plugins [removing-plugins]
+
+If you need to remove plugins from your Logstash installation:
+
+```shell
+bin/logstash-plugin remove logstash-input-github
+```
+
+
+### Advanced: Adding a locally built plugin [installing-local-plugins]
+
+In some cases, you may want to install plugins which are not yet released and not hosted on RubyGems.org. Logstash provides you the option to install a locally built plugin which is packaged as a ruby gem. Using a file location:
+
+```shell
+bin/logstash-plugin install /path/to/logstash-output-kafka-1.0.0.gem
+```
+
+
+### Advanced: Using `--path.plugins` [installing-local-plugins-path]
+
+Using the Logstash `--path.plugins` flag, you can load a plugin source code located on your file system. Typically this is used by developers who are iterating on a custom plugin and want to test it before creating a ruby gem.
+
+The path needs to be in a specific directory hierarchy: `PATH/logstash/TYPE/NAME.rb`, where TYPE is *inputs* *filters*, *outputs* or *codecs* and NAME is the name of the plugin.
+
+```shell
+# supposing the code is in /opt/shared/lib/logstash/inputs/my-custom-plugin-code.rb
+bin/logstash --path.plugins /opt/shared/lib
+```
+
+
+
+
+
+
diff --git a/docs/reference/working-with-winlogbeat-modules.md b/docs/reference/working-with-winlogbeat-modules.md
new file mode 100644
index 000000000..8061d2065
--- /dev/null
+++ b/docs/reference/working-with-winlogbeat-modules.md
@@ -0,0 +1,70 @@
+---
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/winlogbeat-modules.html
+---
+
+# Working with Winlogbeat modules [winlogbeat-modules]
+
+{{winlogbeat}} comes packaged with pre-built [modules](beats://reference/winlogbeat/winlogbeat-modules.md) that contain the configurations needed to collect, parse, enrich, and visualize data from various Windows logging providers. Each {{winlogbeat}} module consists of one or more filesets that contain ingest node pipelines, {{es}} templates, {{winlogbeat}} input configurations, and {{kib}} dashboards.
+
+You can use {{winlogbeat}} modules with {{ls}}, but you need to do some extra setup. The simplest approach is to [set up and use the ingest pipelines](#use-winlogbeat-ingest-pipelines) provided by {{winlogbeat}}.
+
+
+## Use ingest pipelines for parsing [use-winlogbeat-ingest-pipelines]
+
+When you use {{winlogbeat}} modules with {{ls}}, you can use the ingest pipelines provided by {{winlogbeat}} to parse the data. You need to load the pipelines into {{es}} and configure {{ls}} to use them.
+
+**To load the ingest pipelines:**
+
+On the system where {{winlogbeat}} is installed, run the `setup` command with the `--pipelines` option specified to load ingest pipelines for specific modules. For example, the following command loads ingest pipelines for the security and sysmon modules:
+
+```shell
+winlogbeat setup --pipelines --modules security,sysmon
+```
+
+A connection to {{es}} is required for this setup step because {{winlogbeat}} needs to load the ingest pipelines into {{es}}. If necessary, you can temporarily disable your configured output and enable the {{es}} output before running the command.
+
+**To configure {{ls}} to use the pipelines:**
+
+On the system where {{ls}} is installed, create a {{ls}} pipeline configuration that reads from a {{ls}} input, such as {{beats}} or Kafka, and sends events to an {{es}} output. Set the `pipeline` option in the {{es}} output to `%{[@metadata][pipeline]}` to use the ingest pipelines that you loaded previously.
+
+Here’s an example configuration that reads data from the Beats input and uses {{winlogbeat}} ingest pipelines to parse data collected by modules:
+
+```yaml
+input {
+ beats {
+ port => 5044
+ }
+}
+
+output {
+ if [@metadata][pipeline] {
+ elasticsearch {
+ hosts => "https://061ab24010a2482e9d64729fdb0fd93a.us-east-1.aws.found.io:9243"
+ manage_template => false
+ index => "%{[@metadata][beat]}-%{[@metadata][version]}" <1>
+ action => "create" <2>
+ pipeline => "%{[@metadata][pipeline]}" <3>
+ user => "elastic"
+ password => "secret"
+ }
+ } else {
+ elasticsearch {
+ hosts => "https://061ab24010a2482e9d64729fdb0fd93a.us-east-1.aws.found.io:9243"
+ manage_template => false
+ index => "%{[@metadata][beat]}-%{[@metadata][version]}" <1>
+ action => "create"
+ user => "elastic"
+ password => "secret"
+ }
+ }
+}
+```
+
+1. If data streams are disabled in your configuration, set the `index` option to `%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}`. Data streams are enabled by default.
+2. If you are disabling the use of Data Streams on your configuration, you can remove this setting, or set it to a different value as appropriate.
+3. Configures {{ls}} to select the correct ingest pipeline based on metadata passed in the event.
+
+
+See the {{winlogbeat}} [Modules](beats://reference/winlogbeat/winlogbeat-modules.md) documentation for more information about setting up and running modules.
+
diff --git a/docs/release-notes/breaking-changes.md b/docs/release-notes/breaking-changes.md
new file mode 100644
index 000000000..f09f4c8a0
--- /dev/null
+++ b/docs/release-notes/breaking-changes.md
@@ -0,0 +1,206 @@
+---
+navigation_title: "Logstash"
+---
+
+# Logstash breaking changes [logstash-breaking-changes]
+Before you upgrade, carefully review the Logstash breaking changes and take the necessary steps to mitigate any issues.
+
+To learn how to upgrade, check out .
+
+% ## Next version [logstash-nextversion-breaking-changes]
+% **Release date:** Month day, year
+
+% ::::{dropdown} Title of breaking change
+% Description of the breaking change.
+% For more information, check [PR #](PR link).
+% **Impact** Impact of the breaking change.
+% **Action** Steps for mitigating deprecation impact.
+% ::::
+
+## 9.0.0 [logstash-900-breaking-changes]
+**Release date:** March 25, 2025
+
+### Changes to SSL settings in {{ls}} plugins [ssl-settings-9.0]
+We’ve removed deprecated SSL settings in some {{ls}} plugins, and have replaced them with updated settings. If your plugin configuration contains any of these obsolete options, the plugin may fail to start.
+
+::::{dropdown} `logstash-input-beats`
+:name: input-beats-ssl-9.0
+
+| Setting | Replaced by |
+| --- | --- |
+| cipher_suites | [`ssl_cipher_suites`](/reference/plugins-inputs-beats.md#plugins-inputs-beats-ssl_cipher_suites) |
+| ssl | [`ssl_enabled`](/reference/plugins-inputs-beats.md#plugins-inputs-beats-ssl_enabled) |
+| ssl_peer_metadata | `ssl_peer_metadata` option of [`enrich`](/reference/plugins-inputs-beats.md#plugins-inputs-beats-enrich) |
+| ssl_verify_mode | [`ssl_client_authentication`](/reference/plugins-inputs-beats.md#plugins-inputs-beats-ssl_client_authentication) |
+| tls_min_version | [`ssl_supported_protocols`](/reference/plugins-inputs-beats.md#plugins-inputs-beats-ssl_supported_protocols) |
+| tls_max_version | [`ssl_supported_protocols`](/reference/plugins-inputs-beats.md#plugins-inputs-beats-ssl_supported_protocols) |
+
+::::
+
+
+::::{dropdown} `logstash-input-elastic_agent`
+:name: input-elastic_agent-ssl-9.0
+
+| Setting | Replaced by |
+| --- | --- |
+| cipher_suites | [`ssl_cipher_suites`](/reference/plugins-inputs-elastic_agent.md#plugins-inputs-elastic_agent-ssl_cipher_suites) |
+| ssl | [`ssl_enabled`](/reference/plugins-inputs-elastic_agent.md#plugins-inputs-elastic_agent-ssl_enabled) |
+| ssl_peer_metadata | `ssl_peer_metadata` option of [`enrich`](/reference/plugins-inputs-elastic_agent.md#plugins-inputs-elastic_agent-enrich) |
+| ssl_verify_mode | [`ssl_client_authentication`](/reference/plugins-inputs-elastic_agent.md#plugins-inputs-elastic_agent-ssl_client_authentication) |
+| tls_min_version | [`ssl_supported_protocols`](/reference/plugins-inputs-elastic_agent.md#plugins-inputs-elastic_agent-ssl_supported_protocols) |
+| tls_max_version | [`ssl_supported_protocols`](/reference/plugins-inputs-elastic_agent.md#plugins-inputs-elastic_agent-ssl_supported_protocols) |
+
+::::
+
+
+::::{dropdown} `logstash-input-elasticsearch`
+:name: input-elasticsearch-ssl-9.0
+
+| Setting | Replaced by |
+| --- | --- |
+| ca_file | [`ssl_certificate_authorities`](/reference/plugins-inputs-elasticsearch.md#plugins-inputs-elasticsearch-ssl_certificate_authorities) |
+| ssl | [`ssl_enabled`](/reference/plugins-inputs-elasticsearch.md#plugins-inputs-elasticsearch-ssl_enabled) |
+| ssl_certificate_verification | [`ssl_verification_mode`](/reference/plugins-inputs-elasticsearch.md#plugins-inputs-elasticsearch-ssl_verification_mode) |
+
+::::
+
+
+::::{dropdown} `logstash-input-elastic_serverless_forwarder`
+:name: input-elastic_serverless_forwarder-ssl-9.0
+
+| Setting | Replaced by |
+| --- | --- |
+| ssl | [`ssl_enabled`](/reference/plugins-inputs-elastic_serverless_forwarder.md#plugins-inputs-elastic_serverless_forwarder-ssl_enabled) |
+
+::::
+
+
+::::{dropdown} `logstash-input-http`
+:name: input-http-ssl-9.0
+
+| Setting | Replaced by |
+| --- | --- |
+| cipher_suites | [`ssl_cipher_suites`](/reference/plugins-inputs-http.md#plugins-inputs-http-ssl_cipher_suites) |
+| keystore | [`ssl_keystore_path`](/reference/plugins-inputs-http.md#plugins-inputs-http-ssl_keystore_path) |
+| keystore_password | [`ssl_keystore_password`](/reference/plugins-inputs-http.md#plugins-inputs-http-ssl_keystore_password) |
+| ssl | [`ssl_enabled`](/reference/plugins-inputs-http.md#plugins-inputs-http-ssl_enabled) |
+| ssl_verify_mode | [`ssl_client_authentication`](/reference/plugins-inputs-http.md#plugins-inputs-http-ssl_client_authentication) |
+| tls_max_version | [`ssl_supported_protocols`](/reference/plugins-inputs-http.md#plugins-inputs-http-ssl_supported_protocols) |
+| tls_min_version | [`ssl_supported_protocols`](/reference/plugins-inputs-http.md#plugins-inputs-http-ssl_supported_protocols) |
+| verify_mode | [`ssl_client_authentication`](/reference/plugins-inputs-http.md#plugins-inputs-http-ssl_client_authentication) |
+
+::::
+
+
+::::{dropdown} `logstash-input-http_poller`
+:name: input-http_poller-ssl-9.0
+
+| Setting | Replaced by |
+| --- | --- |
+| cacert | [`ssl_certificate_authorities`](/reference/plugins-inputs-http_poller.md#plugins-inputs-http_poller-ssl_certificate_authorities) |
+| client_cert | [`ssl_certificate`](/reference/plugins-inputs-http_poller.md#plugins-inputs-http_poller-ssl_certificate) |
+| client_key | [`ssl_key`](/reference/plugins-inputs-http_poller.md#plugins-inputs-http_poller-ssl_key) |
+| keystore | [`ssl_keystore_path`](/reference/plugins-inputs-http_poller.md#plugins-inputs-http_poller-ssl_keystore_path) |
+| keystore_password | [`ssl_keystore_password`](/reference/plugins-inputs-http_poller.md#plugins-inputs-http_poller-ssl_keystore_password) |
+| keystore_type | [`ssl_keystore_password`](/reference/plugins-inputs-http_poller.md#plugins-inputs-http_poller-ssl_keystore_password) |
+| truststore | [`ssl_truststore_path`](/reference/plugins-inputs-http_poller.md#plugins-inputs-http_poller-ssl_truststore_path) |
+| truststore_password | [`ssl_truststore_password`](/reference/plugins-inputs-http_poller.md#plugins-inputs-http_poller-ssl_truststore_password) |
+| truststore_type | [`ssl_truststore_type`](/reference/plugins-inputs-http_poller.md#plugins-inputs-http_poller-ssl_truststore_type) |
+
+::::
+
+
+::::{dropdown} `logstash-input-tcp`
+:name: input-tcp-ssl-9.0
+
+| Setting | Replaced by |
+| --- | --- |
+| ssl_cert | [`ssl_certificate`](/reference/plugins-inputs-tcp.md#plugins-inputs-tcp-ssl_certificate) |
+| ssl_enable | [`ssl_enabled`](/reference/plugins-inputs-tcp.md#plugins-inputs-tcp-ssl_enabled) |
+| ssl_verify | [`ssl_client_authentication`](/reference/plugins-inputs-tcp.md#plugins-inputs-tcp-ssl_client_authentication) in `server` mode and [`ssl_verification_mode`](/reference/plugins-inputs-tcp.md#plugins-inputs-tcp-ssl_verification_mode) in `client` mode |
+
+::::
+
+
+::::{dropdown} `logstash-filter-elasticsearch`
+:name: filter-elasticsearch-ssl-9.0
+
+| Setting | Replaced by |
+| --- | --- |
+| ca_file | [`ssl_certificate_authorities`](/reference/plugins-filters-elasticsearch.md#plugins-filters-elasticsearch-ssl_certificate_authorities) |
+| keystore | [`ssl_keystore_path`](/reference/plugins-filters-elasticsearch.md#plugins-filters-elasticsearch-ssl_keystore_path) |
+| keystore_password | [`ssl_keystore_password`](/reference/plugins-filters-elasticsearch.md#plugins-filters-elasticsearch-ssl_keystore_password) |
+| ssl | [`ssl_enabled`](/reference/plugins-filters-elasticsearch.md#plugins-filters-elasticsearch-ssl_enabled) |
+
+::::
+
+
+::::{dropdown} `logstash-filter-http`
+:name: filter-http-ssl-9.0
+
+| Setting | Replaced by |
+| --- | --- |
+| cacert | [`ssl_certificate_authorities`](/reference/plugins-filters-http.md#plugins-filters-http-ssl_certificate_authorities) |
+| client_cert | [`ssl_certificate`](/reference/plugins-filters-http.md#plugins-filters-http-ssl_certificate) |
+| client_key | [`ssl_key`](/reference/plugins-filters-http.md#plugins-filters-http-ssl_key) |
+| keystore | [`ssl_keystore_path`](/reference/plugins-filters-http.md#plugins-filters-http-ssl_keystore_path) |
+| keystore_password | [`ssl_keystore_password`](/reference/plugins-filters-http.md#plugins-filters-http-ssl_keystore_password) |
+| keystore_type | [`ssl_keystore_type`](/reference/plugins-filters-http.md#plugins-filters-http-ssl_keystore_type) |
+| truststore | [`ssl_truststore_path`](/reference/plugins-filters-http.md#plugins-filters-http-ssl_truststore_path) |
+| truststore_password | [`ssl_truststore_password`](/reference/plugins-filters-http.md#plugins-filters-http-ssl_truststore_password) |
+| truststore_type | [`ssl_truststore_type`](/reference/plugins-filters-http.md#plugins-filters-http-ssl_truststore_type) |
+
+::::
+
+
+::::{dropdown} `logstash-output-elasticsearch`
+:name: output-elasticsearch-ssl-9.0
+
+| Setting | Replaced by |
+| --- | --- |
+| cacert | [`ssl_certificate_authorities`](/reference/plugins-outputs-elasticsearch.md#plugins-outputs-elasticsearch-ssl_certificate_authorities) |
+| keystore | [`ssl_keystore_path`](/reference/plugins-outputs-elasticsearch.md#plugins-outputs-elasticsearch-ssl_keystore_path) |
+| keystore_password | [`ssl_keystore_password`](/reference/plugins-outputs-elasticsearch.md#plugins-outputs-elasticsearch-ssl_keystore_password) |
+| ssl | [`ssl_enabled`](/reference/plugins-outputs-elasticsearch.md#plugins-outputs-elasticsearch-ssl_enabled) |
+| ssl_certificate_verification | [`ssl_verification_mode`](/reference/plugins-outputs-elasticsearch.md#plugins-outputs-elasticsearch-ssl_verification_mode) |
+| truststore | [`ssl_truststore_path`](/reference/plugins-outputs-elasticsearch.md#plugins-outputs-elasticsearch-ssl_truststore_path) |
+| truststore_password | [`ssl_truststore_password`](/reference/plugins-outputs-elasticsearch.md#plugins-outputs-elasticsearch-ssl_truststore_password) |
+
+::::
+
+
+::::{dropdown} `logstash-output-http`
+:name: output-http-ssl-9.0
+
+| Setting | Replaced by |
+| --- | --- |
+| cacert | [`ssl_certificate_authorities`](/reference/plugins-outputs-http.md#plugins-outputs-http-ssl_certificate_authorities) |
+| client_cert | [`ssl_certificate`](/reference/plugins-outputs-http.md#plugins-outputs-http-ssl_certificate) |
+| client_key | [`ssl_key`](/reference/plugins-outputs-http.md#plugins-outputs-http-ssl_key) |
+| keystore | [`ssl_keystore_path`](/reference/plugins-outputs-http.md#plugins-outputs-http-ssl_keystore_path) |
+| keystore_password | [`ssl_keystore_password`](/reference/plugins-outputs-http.md#plugins-outputs-http-ssl_keystore_password) |
+| keystore_type | [`ssl_keystore_password`](/reference/plugins-outputs-http.md#plugins-outputs-http-ssl_keystore_password) |
+| truststore | [`ssl_truststore_path`](/reference/plugins-outputs-http.md#plugins-outputs-http-ssl_truststore_path) |
+| truststore_password | [`ssl_truststore_password`](/reference/plugins-outputs-http.md#plugins-outputs-http-ssl_truststore_password) |
+| truststore_type | [`ssl_truststore_type`](/reference/plugins-outputs-http.md#plugins-outputs-http-ssl_truststore_type) |
+
+::::
+
+
+::::{dropdown} `logstash-output-tcp`
+:name: output-tcp-ssl-9.0
+
+| Setting | Replaced by |
+| --- | --- |
+| ssl_cacert | [`ssl_certificate_authorities`](/reference/plugins-outputs-tcp.md#plugins-outputs-tcp-ssl_certificate_authorities) |
+| ssl_cert | [`ssl_certificate`](/reference/plugins-outputs-tcp.md#plugins-outputs-tcp-ssl_certificate) |
+| ssl_enable | [`ssl_enabled`](/reference/plugins-outputs-tcp.md#plugins-outputs-tcp-ssl_enabled) |
+| ssl_verify | [`ssl_client_authentication`](/reference/plugins-outputs-tcp.md#plugins-outputs-tcp-ssl_client_authentication) in `server` mode and [`ssl_verification_mode`](/reference/plugins-outputs-tcp.md#plugins-outputs-tcp-ssl_verification_mode) in `client` mode |
+
+::::
+
+### Enterprise_search integration plugin is deprecated [enterprise_search-deprecated-9.0]
+
+We’ve deprecated the {{ls}} Enterprise_search integration plugin, and its component App Search and Workplace Search plugins. These plugins will receive only security updates and critical fixes moving forward.
+
+We recommend using our native {{es}} tools for your Search use cases. For more details, please visit the [Search solution and use case documentation](docs-content://solutions/search.md).
diff --git a/docs/release-notes/deprecations.md b/docs/release-notes/deprecations.md
new file mode 100644
index 000000000..12d346cf1
--- /dev/null
+++ b/docs/release-notes/deprecations.md
@@ -0,0 +1,28 @@
+---
+navigation_title: "Logstash"
+---
+
+# Logstash deprecations [logstash-deprecations]
+Review the deprecated functionality for your Logstash version. While deprecations have no immediate impact, we strongly encourage you update your implementation after you upgrade.
+
+To learn how to upgrade, check out .
+
+% ## Next version [logstash-versionnext-deprecations]
+% **Release date:** Month day, year
+
+% ::::{dropdown} Deprecation title
+% Description of the deprecation.
+% For more information, check [PR #](PR link).
+% **Impact** Impact of deprecation.
+% **Action** Steps for mitigating deprecation impact.
+% ::::
+
+% ## 9.0.0 [logstash-900-deprecations]
+% **Release date:** March 25, 2025
+
+% ::::{dropdown} Deprecation title
+% Description of the deprecation.
+% For more information, check [PR #](PR link).
+% **Impact** Impact of deprecation.
+% **Action** Steps for mitigating deprecation impact.
+% ::::
\ No newline at end of file
diff --git a/docs/release-notes/index.md b/docs/release-notes/index.md
new file mode 100644
index 000000000..099d77a61
--- /dev/null
+++ b/docs/release-notes/index.md
@@ -0,0 +1,32 @@
+---
+navigation_title: "Logstash"
+mapped_pages:
+ - https://www.elastic.co/guide/en/logstash/current/releasenotes.html
+ - https://www.elastic.co/guide/en/logstash/master/upgrading-logstash-9.0.html
+---
+
+# Logstash release notes [logstash-release-notes]
+
+Review the changes, fixes, and more in each version of Logstash.
+
+To check for security updates, go to [Security announcements for the Elastic stack](https://discuss.elastic.co/c/announcements/security-announcements/31).
+
+% Release notes include only features, enhancements, and fixes. Add breaking changes, deprecations, and known issues to the applicable release notes sections.
+
+% ## version.next [logstash-next-release-notes]
+% **Release date:** Month day, year
+
+% ### Features and enhancements [logstash-next-features-enhancements]
+% *
+
+% ### Fixes [logstash-next-fixes]
+% *
+
+## 9.0.0 [logstash-900-release-notes]
+**Release date:** March 25, 2025
+
+### Features and enhancements [logstash-900-features-enhancements]
+
+### Fixes [logstash-900-fixes]
+
+### Plugins [logstash-plugin-900-changes]
\ No newline at end of file
diff --git a/docs/release-notes/known-issues.md b/docs/release-notes/known-issues.md
new file mode 100644
index 000000000..2e3fbd4de
--- /dev/null
+++ b/docs/release-notes/known-issues.md
@@ -0,0 +1,7 @@
+---
+navigation_title: "Logstash"
+---
+
+# Logstash known issues
+
+% What needs to be done: Write from scratch
\ No newline at end of file
diff --git a/docs/release-notes/toc.yml b/docs/release-notes/toc.yml
new file mode 100644
index 000000000..a41006794
--- /dev/null
+++ b/docs/release-notes/toc.yml
@@ -0,0 +1,5 @@
+toc:
+ - file: index.md
+ - file: known-issues.md
+ - file: breaking-changes.md
+ - file: deprecations.md
\ No newline at end of file
diff --git a/docs/static/advanced-pipeline.asciidoc b/docs/static/advanced-pipeline.asciidoc
deleted file mode 100644
index 648e4d587..000000000
--- a/docs/static/advanced-pipeline.asciidoc
+++ /dev/null
@@ -1,868 +0,0 @@
-[[advanced-pipeline]]
-=== Parsing Logs with Logstash
-
-In <>, you created a basic Logstash pipeline to test your Logstash setup. In the real world, a Logstash
-pipeline is a bit more complex: it typically has one or more input, filter, and output plugins.
-
-In this section, you create a Logstash pipeline that uses Filebeat to take Apache web logs as input, parses those
-logs to create specific, named fields from the logs, and writes the parsed data to an Elasticsearch cluster. Rather than
-defining the pipeline configuration at the command line, you'll define the pipeline in a config file.
-
-To get started, go https://download.elastic.co/demos/logstash/gettingstarted/logstash-tutorial.log.gz[here] to
-download the sample data set used in this example. Unpack the file.
-
-
-[[configuring-filebeat]]
-==== Configuring Filebeat to Send Log Lines to Logstash
-
-Before you create the Logstash pipeline, you'll configure Filebeat to send log lines to Logstash.
-The https://github.com/elastic/beats/tree/main/filebeat[Filebeat] client is a lightweight, resource-friendly tool
-that collects logs from files on the server and forwards these logs to your Logstash instance for processing.
-Filebeat is designed for reliability and low latency. Filebeat has a light resource footprint on the host machine,
-and the {logstash-ref}/plugins-inputs-beats.html[`Beats input`] plugin minimizes the resource demands on the Logstash
-instance.
-
-NOTE: In a typical use case, Filebeat runs on a separate machine from the machine running your
-Logstash instance. For the purposes of this tutorial, Logstash and Filebeat are running on the
-same machine.
-
-The default Logstash installation includes the {logstash-ref}/plugins-inputs-beats.html[`Beats input`] plugin. The Beats
-input plugin enables Logstash to receive events from the Elastic Beats framework, which means that any Beat written
-to work with the Beats framework, such as Packetbeat and Metricbeat, can also send event data to Logstash.
-
-To install Filebeat on your data source machine, download the appropriate package from the Filebeat https://www.elastic.co/downloads/beats/filebeat[product page]. You can also refer to
-{filebeat-ref}/filebeat-installation-configuration.html[Filebeat quick start] for additional
-installation instructions.
-
-After installing Filebeat, you need to configure it. Open the `filebeat.yml` file located in your Filebeat installation
-directory, and replace the contents with the following lines. Make sure `paths` points to the example Apache log file,
-`logstash-tutorial.log`, that you downloaded earlier:
-
-[source,yaml]
---------------------------------------------------------------------------------
-filebeat.inputs:
-- type: log
- paths:
- - /path/to/file/logstash-tutorial.log <1>
-output.logstash:
- hosts: ["localhost:5044"]
---------------------------------------------------------------------------------
-<1> Absolute path to the file or files that Filebeat processes.
-
-Save your changes.
-
-To keep the configuration simple, you won't specify TLS/SSL settings as you would in a real world
-scenario.
-
-At the data source machine, run Filebeat with the following command:
-
-[source,shell]
---------------------------------------------------------------------------------
-sudo ./filebeat -e -c filebeat.yml -d "publish"
---------------------------------------------------------------------------------
-
-NOTE: If you run Filebeat as root, you need to change ownership of the configuration file (see
-{beats-ref}/config-file-permissions.html[Config File Ownership and Permissions]
-in the _Beats Platform Reference_).
-
-Filebeat will attempt to connect on port 5044. Until Logstash starts with an active Beats plugin, there
-won’t be any answer on that port, so any messages you see regarding failure to connect on that port are normal for now.
-
-==== Configuring Logstash for Filebeat Input
-
-Next, you create a Logstash configuration pipeline that uses the Beats input plugin to receive
-events from Beats.
-
-The following text represents the skeleton of a configuration pipeline:
-
-[source,json]
---------------------------------------------------------------------------------
-# The # character at the beginning of a line indicates a comment. Use
-# comments to describe your configuration.
-input {
-}
-# The filter part of this file is commented out to indicate that it is
-# optional.
-# filter {
-#
-# }
-output {
-}
---------------------------------------------------------------------------------
-
-This skeleton is non-functional, because the input and output sections don’t have any valid options defined.
-
-To get started, copy and paste the skeleton configuration pipeline into a file named `first-pipeline.conf` in your home
-Logstash directory.
-
-Next, configure your Logstash instance to use the Beats input plugin by adding the following lines to the `input` section
-of the `first-pipeline.conf` file:
-
-[source,json]
---------------------------------------------------------------------------------
- beats {
- port => "5044"
- }
---------------------------------------------------------------------------------
-
-You'll configure Logstash to write to Elasticsearch later. For now, you can add the following line
-to the `output` section so that the output is printed to stdout when you run Logstash:
-
-[source,json]
---------------------------------------------------------------------------------
- stdout { codec => rubydebug }
---------------------------------------------------------------------------------
-
-When you're done, the contents of `first-pipeline.conf` should look like this:
-
-[source,json]
---------------------------------------------------------------------------------
-input {
- beats {
- port => "5044"
- }
-}
-# The filter part of this file is commented out to indicate that it is
-# optional.
-# filter {
-#
-# }
-output {
- stdout { codec => rubydebug }
-}
---------------------------------------------------------------------------------
-
-To verify your configuration, run the following command:
-
-[source,shell]
---------------------------------------------------------------------------------
-bin/logstash -f first-pipeline.conf --config.test_and_exit
---------------------------------------------------------------------------------
-
-The `--config.test_and_exit` option parses your configuration file and reports any errors.
-
-If the configuration file passes the configuration test, start Logstash with the following command:
-
-[source,shell]
---------------------------------------------------------------------------------
-bin/logstash -f first-pipeline.conf --config.reload.automatic
---------------------------------------------------------------------------------
-
-The `--config.reload.automatic` option enables automatic config reloading so that you don't have to stop and restart Logstash
-every time you modify the configuration file.
-
-As Logstash starts up, you might see one or more warning messages about Logstash ignoring the `pipelines.yml` file. You
-can safely ignore this warning. The `pipelines.yml` file is used for running <>
-in a single Logstash instance. For the examples shown here, you are running a single pipeline.
-
-If your pipeline is working correctly, you should see a series of events like the following written to the console:
-
-[source,json]
---------------------------------------------------------------------------------
-{
- "@timestamp" => 2017-11-09T01:44:20.071Z,
- "offset" => 325,
- "@version" => "1",
- "beat" => {
- "name" => "My-MacBook-Pro.local",
- "hostname" => "My-MacBook-Pro.local",
- "version" => "6.0.0"
- },
- "host" => "My-MacBook-Pro.local",
- "prospector" => {
- "type" => "log"
- },
- "input" => {
- "type" => "log"
- },
- "source" => "/path/to/file/logstash-tutorial.log",
- "message" => "83.149.9.216 - - [04/Jan/2015:05:13:42 +0000] \"GET /presentations/logstash-monitorama-2013/images/kibana-search.png HTTP/1.1\" 200 203023 \"http://semicomplete.com/presentations/logstash-monitorama-2013/\" \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.77 Safari/537.36\"",
- "tags" => [
- [0] "beats_input_codec_plain_applied"
- ]
-}
-...
-
---------------------------------------------------------------------------------
-
-
-[float]
-[[configuring-grok-filter]]
-==== Parsing Web Logs with the Grok Filter Plugin
-
-Now you have a working pipeline that reads log lines from Filebeat. However you'll notice that the format of the log messages
-is not ideal. You want to parse the log messages to create specific, named fields from the logs.
-To do this, you'll use the `grok` filter plugin.
-
-The {logstash-ref}/plugins-filters-grok.html[`grok`] filter plugin is one of several plugins that are available by default in
-Logstash. For details on how to manage Logstash plugins, see the <> for
-the plugin manager.
-
-The `grok` filter plugin enables you to parse the unstructured log data into something structured and queryable.
-
-Because the `grok` filter plugin looks for patterns in the incoming log data, configuring the plugin requires you to
-make decisions about how to identify the patterns that are of interest to your use case. A representative line from the
-web server log sample looks like this:
-
-[source,shell]
---------------------------------------------------------------------------------
-83.149.9.216 - - [04/Jan/2015:05:13:42 +0000] "GET /presentations/logstash-monitorama-2013/images/kibana-search.png
-HTTP/1.1" 200 203023 "http://semicomplete.com/presentations/logstash-monitorama-2013/" "Mozilla/5.0 (Macintosh; Intel
-Mac OS X 10_9_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.77 Safari/537.36"
---------------------------------------------------------------------------------
-
-The IP address at the beginning of the line is easy to identify, as is the timestamp in brackets. To parse the data, you can use the `%{COMBINEDAPACHELOG}` grok pattern, which structures lines from the Apache log using the following schema:
-
-[horizontal]
-*Information*:: *Field Name*
-IP Address:: `clientip`
-User ID:: `ident`
-User Authentication:: `auth`
-timestamp:: `timestamp`
-HTTP Verb:: `verb`
-Request body:: `request`
-HTTP Version:: `httpversion`
-HTTP Status Code:: `response`
-Bytes served:: `bytes`
-Referrer URL:: `referrer`
-User agent:: `agent`
-
-TIP: If you need help building grok patterns, try out the
-{kibana-ref}/xpack-grokdebugger.html[Grok Debugger]. The Grok Debugger is an
-{xpack} feature under the Basic License and is therefore *free to use*.
-
-Edit the `first-pipeline.conf` file and replace the entire `filter` section with the following text:
-
-[source,json]
---------------------------------------------------------------------------------
-filter {
- grok {
- match => { "message" => "%{COMBINEDAPACHELOG}"}
- }
-}
---------------------------------------------------------------------------------
-
-When you're done, the contents of `first-pipeline.conf` should look like this:
-
-[source,json]
---------------------------------------------------------------------------------
-input {
- beats {
- port => "5044"
- }
-}
-filter {
- grok {
- match => { "message" => "%{COMBINEDAPACHELOG}"}
- }
-}
-output {
- stdout { codec => rubydebug }
-}
---------------------------------------------------------------------------------
-
-Save your changes. Because you've enabled automatic config reloading, you don't have to restart Logstash to
-pick up your changes. However, you do need to force Filebeat to read the log file from scratch. To do this,
-go to the terminal window where Filebeat is running and press Ctrl+C to shut down Filebeat. Then delete the
-Filebeat registry file. For example, run:
-
-[source,shell]
---------------------------------------------------------------------------------
-sudo rm data/registry
---------------------------------------------------------------------------------
-
-Since Filebeat stores the state of each file it harvests in the registry, deleting the registry file forces
-Filebeat to read all the files it's harvesting from scratch.
-
-Next, restart Filebeat with the following command:
-
-[source,shell]
---------------------------------------------------------------------------------
-sudo ./filebeat -e -c filebeat.yml -d "publish"
---------------------------------------------------------------------------------
-
-There might be a slight delay before Filebeat begins processing events if it needs to wait for Logstash to reload the
-config file.
-
-After Logstash applies the grok pattern, the events will have the following JSON representation:
-
-[source,json]
---------------------------------------------------------------------------------
-{
- "request" => "/presentations/logstash-monitorama-2013/images/kibana-search.png",
- "agent" => "\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.77 Safari/537.36\"",
- "offset" => 325,
- "auth" => "-",
- "ident" => "-",
- "verb" => "GET",
- "prospector" => {
- "type" => "log"
- },
- "input" => {
- "type" => "log"
- },
- "source" => "/path/to/file/logstash-tutorial.log",
- "message" => "83.149.9.216 - - [04/Jan/2015:05:13:42 +0000] \"GET /presentations/logstash-monitorama-2013/images/kibana-search.png HTTP/1.1\" 200 203023 \"http://semicomplete.com/presentations/logstash-monitorama-2013/\" \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.77 Safari/537.36\"",
- "tags" => [
- [0] "beats_input_codec_plain_applied"
- ],
- "referrer" => "\"http://semicomplete.com/presentations/logstash-monitorama-2013/\"",
- "@timestamp" => 2017-11-09T02:51:12.416Z,
- "response" => "200",
- "bytes" => "203023",
- "clientip" => "83.149.9.216",
- "@version" => "1",
- "beat" => {
- "name" => "My-MacBook-Pro.local",
- "hostname" => "My-MacBook-Pro.local",
- "version" => "6.0.0"
- },
- "host" => "My-MacBook-Pro.local",
- "httpversion" => "1.1",
- "timestamp" => "04/Jan/2015:05:13:42 +0000"
-}
---------------------------------------------------------------------------------
-
-Notice that the event includes the original message, but the log message is also broken down into specific fields.
-
-[float]
-[[configuring-geoip-plugin]]
-==== Enhancing Your Data with the Geoip Filter Plugin
-
-In addition to parsing log data for better searches, filter plugins can derive supplementary information from existing
-data. As an example, the {logstash-ref}/plugins-filters-geoip.html[`geoip`] plugin looks up IP addresses, derives geographic
-location information from the addresses, and adds that location information to the logs.
-
-Configure your Logstash instance to use the `geoip` filter plugin by adding the following lines to the `filter` section
-of the `first-pipeline.conf` file:
-
-[source,json]
---------------------------------------------------------------------------------
- geoip {
- source => "clientip"
- }
---------------------------------------------------------------------------------
-
-The `geoip` plugin configuration requires you to specify the name of the source field that contains the IP address to look up. In this example, the `clientip` field contains the IP address.
-
-Since filters are evaluated in sequence, make sure that the `geoip` section is after the `grok` section of
-the configuration file and that both the `grok` and `geoip` sections are nested within the `filter` section.
-
-When you're done, the contents of `first-pipeline.conf` should look like this:
-
-[source,json]
---------------------------------------------------------------------------------
-input {
- beats {
- port => "5044"
- }
-}
- filter {
- grok {
- match => { "message" => "%{COMBINEDAPACHELOG}"}
- }
- geoip {
- source => "clientip"
- }
-}
-output {
- stdout { codec => rubydebug }
-}
---------------------------------------------------------------------------------
-
-Save your changes. To force Filebeat to read the log file from scratch, as you did earlier, shut down Filebeat (press Ctrl+C),
-delete the registry file, and then restart Filebeat with the following command:
-
-[source,shell]
---------------------------------------------------------------------------------
-sudo ./filebeat -e -c filebeat.yml -d "publish"
---------------------------------------------------------------------------------
-
-Notice that the event now contains geographic location information:
-
-[source,json]
---------------------------------------------------------------------------------
-{
- "request" => "/presentations/logstash-monitorama-2013/images/kibana-search.png",
- "agent" => "\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.77 Safari/537.36\"",
- "geoip" => {
- "timezone" => "Europe/Moscow",
- "ip" => "83.149.9.216",
- "latitude" => 55.7485,
- "continent_code" => "EU",
- "city_name" => "Moscow",
- "country_name" => "Russia",
- "country_code2" => "RU",
- "country_code3" => "RU",
- "region_name" => "Moscow",
- "location" => {
- "lon" => 37.6184,
- "lat" => 55.7485
- },
- "postal_code" => "101194",
- "region_code" => "MOW",
- "longitude" => 37.6184
- },
- ...
---------------------------------------------------------------------------------
-
-
-[float]
-[[indexing-parsed-data-into-elasticsearch]]
-==== Indexing Your Data into Elasticsearch
-
-Now that the web logs are broken down into specific fields, you're ready to get
-your data into Elasticsearch.
-
-TIP: {ess-leadin}
-
-The Logstash pipeline can index the data into an
-Elasticsearch cluster. Edit the `first-pipeline.conf` file and replace the entire `output` section with the following
-text:
-
-[source,json]
---------------------------------------------------------------------------------
-output {
- elasticsearch {
- hosts => [ "localhost:9200" ]
- }
-}
---------------------------------------------------------------------------------
-
-With this configuration, Logstash uses http protocol to connect to Elasticsearch. The above example assumes that
-Logstash and Elasticsearch are running on the same instance. You can specify a remote Elasticsearch instance by using
-the `hosts` configuration to specify something like `hosts => [ "es-machine:9092" ]`.
-
-At this point, your `first-pipeline.conf` file has input, filter, and output sections properly configured, and looks
-something like this:
-
-[source,json]
---------------------------------------------------------------------------------
-input {
- beats {
- port => "5044"
- }
-}
- filter {
- grok {
- match => { "message" => "%{COMBINEDAPACHELOG}"}
- }
- geoip {
- source => "clientip"
- }
-}
-output {
- elasticsearch {
- hosts => [ "localhost:9200" ]
- }
-}
---------------------------------------------------------------------------------
-
-Save your changes. To force Filebeat to read the log file from scratch, as you did earlier, shut down Filebeat (press Ctrl+C),
-delete the registry file, and then restart Filebeat with the following command:
-
-[source,shell]
---------------------------------------------------------------------------------
-sudo ./filebeat -e -c filebeat.yml -d "publish"
---------------------------------------------------------------------------------
-
-[float]
-[[testing-initial-pipeline]]
-===== Testing Your Pipeline
-
-Now that the Logstash pipeline is configured to index the data into an
-Elasticsearch cluster, you can query Elasticsearch.
-
-Try a test query to Elasticsearch based on the fields created by the `grok` filter plugin.
-Replace $DATE with the current date, in YYYY.MM.DD format:
-
-[source,shell]
---------------------------------------------------------------------------------
-curl -XGET 'localhost:9200/logstash-$DATE/_search?pretty&q=response=200'
---------------------------------------------------------------------------------
-
-NOTE: The date used in the index name is based on UTC, not the timezone where Logstash is running.
-If the query returns `index_not_found_exception`, make sure that `logstash-$DATE` reflects the actual
-name of the index. To see a list of available indexes, use this query: `curl 'localhost:9200/_cat/indices?v'`.
-
-You should get multiple hits back. For example:
-
-[source,json]
---------------------------------------------------------------------------------
-{
- "took": 50,
- "timed_out": false,
- "_shards": {
- "total": 5,
- "successful": 5,
- "skipped": 0,
- "failed": 0
- },
- "hits": {
- "total": 98,
- "max_score": 2.793642,
- "hits": [
- {
- "_index": "logstash-2017.11.09",
- "_type": "doc",
- "_id": "3IzDnl8BW52sR0fx5wdV",
- "_score": 2.793642,
- "_source": {
- "request": "/presentations/logstash-monitorama-2013/images/frontend-response-codes.png",
- "agent": """"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.77 Safari/537.36"""",
- "geoip": {
- "timezone": "Europe/Moscow",
- "ip": "83.149.9.216",
- "latitude": 55.7485,
- "continent_code": "EU",
- "city_name": "Moscow",
- "country_name": "Russia",
- "country_code2": "RU",
- "country_code3": "RU",
- "region_name": "Moscow",
- "location": {
- "lon": 37.6184,
- "lat": 55.7485
- },
- "postal_code": "101194",
- "region_code": "MOW",
- "longitude": 37.6184
- },
- "offset": 2932,
- "auth": "-",
- "ident": "-",
- "verb": "GET",
- "prospector": {
- "type": "log"
- },
- "input": {
- "type": "log"
- },
- "source": "/path/to/file/logstash-tutorial.log",
- "message": """83.149.9.216 - - [04/Jan/2015:05:13:45 +0000] "GET /presentations/logstash-monitorama-2013/images/frontend-response-codes.png HTTP/1.1" 200 52878 "http://semicomplete.com/presentations/logstash-monitorama-2013/" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.77 Safari/537.36"""",
- "tags": [
- "beats_input_codec_plain_applied"
- ],
- "referrer": """"http://semicomplete.com/presentations/logstash-monitorama-2013/"""",
- "@timestamp": "2017-11-09T03:11:35.304Z",
- "response": "200",
- "bytes": "52878",
- "clientip": "83.149.9.216",
- "@version": "1",
- "beat": {
- "name": "My-MacBook-Pro.local",
- "hostname": "My-MacBook-Pro.local",
- "version": "6.0.0"
- },
- "host": "My-MacBook-Pro.local",
- "httpversion": "1.1",
- "timestamp": "04/Jan/2015:05:13:45 +0000"
- }
- },
- ...
-
---------------------------------------------------------------------------------
-
-Try another search for the geographic information derived from the IP address.
-Replace $DATE with the current date, in YYYY.MM.DD format:
-
-[source,shell]
---------------------------------------------------------------------------------
-curl -XGET 'localhost:9200/logstash-$DATE/_search?pretty&q=geoip.city_name=Buffalo'
---------------------------------------------------------------------------------
-
-A few log entries come from Buffalo, so the query produces the following response:
-
-[source,json]
---------------------------------------------------------------------------------
-{
- "took": 9,
- "timed_out": false,
- "_shards": {
- "total": 5,
- "successful": 5,
- "skipped": 0,
- "failed": 0
- },
- "hits": {
- "total": 2,
- "max_score": 2.6390574,
- "hits": [
- {
- "_index": "logstash-2017.11.09",
- "_type": "doc",
- "_id": "L4zDnl8BW52sR0fx5whY",
- "_score": 2.6390574,
- "_source": {
- "request": "/blog/geekery/disabling-battery-in-ubuntu-vms.html?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+semicomplete%2Fmain+%28semicomplete.com+-+Jordan+Sissel%29",
- "agent": """"Tiny Tiny RSS/1.11 (http://tt-rss.org/)"""",
- "geoip": {
- "timezone": "America/New_York",
- "ip": "198.46.149.143",
- "latitude": 42.8864,
- "continent_code": "NA",
- "city_name": "Buffalo",
- "country_name": "United States",
- "country_code2": "US",
- "dma_code": 514,
- "country_code3": "US",
- "region_name": "New York",
- "location": {
- "lon": -78.8781,
- "lat": 42.8864
- },
- "postal_code": "14202",
- "region_code": "NY",
- "longitude": -78.8781
- },
- "offset": 22795,
- "auth": "-",
- "ident": "-",
- "verb": "GET",
- "prospector": {
- "type": "log"
- },
- "input": {
- "type": "log"
- },
- "source": "/path/to/file/logstash-tutorial.log",
- "message": """198.46.149.143 - - [04/Jan/2015:05:29:13 +0000] "GET /blog/geekery/disabling-battery-in-ubuntu-vms.html?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+semicomplete%2Fmain+%28semicomplete.com+-+Jordan+Sissel%29 HTTP/1.1" 200 9316 "-" "Tiny Tiny RSS/1.11 (http://tt-rss.org/)"""",
- "tags": [
- "beats_input_codec_plain_applied"
- ],
- "referrer": """"-"""",
- "@timestamp": "2017-11-09T03:11:35.321Z",
- "response": "200",
- "bytes": "9316",
- "clientip": "198.46.149.143",
- "@version": "1",
- "beat": {
- "name": "My-MacBook-Pro.local",
- "hostname": "My-MacBook-Pro.local",
- "version": "6.0.0"
- },
- "host": "My-MacBook-Pro.local",
- "httpversion": "1.1",
- "timestamp": "04/Jan/2015:05:29:13 +0000"
- }
- },
- ...
-
---------------------------------------------------------------------------------
-
-If you are using Kibana to visualize your data, you can also explore the Filebeat data in Kibana:
-
-image::static/images/kibana-filebeat-data.png[Discovering Filebeat data in Kibana]
-
-See the {filebeat-ref}/filebeat-installation-configuration.html[Filebeat quick start docs] for info about loading the Kibana
-index pattern for Filebeat.
-
-You've successfully created a pipeline that uses Filebeat to take Apache web logs as input, parses those logs to
-create specific, named fields from the logs, and writes the parsed data to an Elasticsearch cluster. Next, you
-learn how to create a pipeline that uses multiple input and output plugins.
-
-[[multiple-input-output-plugins]]
-=== Stitching Together Multiple Input and Output Plugins
-
-The information you need to manage often comes from several disparate sources, and use cases can require multiple
-destinations for your data. Your Logstash pipeline can use multiple input and output plugins to handle these
-requirements.
-
-In this section, you create a Logstash pipeline that takes input from a Twitter feed and the Filebeat client, then
-sends the information to an Elasticsearch cluster as well as writing the information directly to a file.
-
-[float]
-[[twitter-configuration]]
-==== Reading from a Twitter Feed
-
-To add a Twitter feed, you use the {logstash-ref}/plugins-inputs-twitter.html[`twitter`] input plugin. To
-configure the plugin, you need several pieces of information:
-
-* A _consumer key_, which uniquely identifies your Twitter app.
-* A _consumer secret_, which serves as the password for your Twitter app.
-* One or more _keywords_ to search in the incoming feed. The example shows using "cloud" as a keyword, but you can use whatever you want.
-* An _oauth token_, which identifies the Twitter account using this app.
-* An _oauth token secret_, which serves as the password of the Twitter account.
-
-Visit https://dev.twitter.com/apps[https://dev.twitter.com/apps] to set up a Twitter account and generate your consumer
-key and secret, as well as your access token and secret. See the docs for the {logstash-ref}/plugins-inputs-twitter.html[`twitter`] input plugin if you're not sure how to generate these keys.
-
-Like you did earlier when you worked on <>, create a config file (called `second-pipeline.conf`) that
-contains the skeleton of a configuration pipeline. If you want, you can reuse the file you created earlier, but make
-sure you pass in the correct config file name when you run Logstash.
-
-Add the following lines to the `input` section of the `second-pipeline.conf` file, substituting your values for the
-placeholder values shown here:
-
-[source,json]
---------------------------------------------------------------------------------
- twitter {
- consumer_key => "enter_your_consumer_key_here"
- consumer_secret => "enter_your_secret_here"
- keywords => ["cloud"]
- oauth_token => "enter_your_access_token_here"
- oauth_token_secret => "enter_your_access_token_secret_here"
- }
---------------------------------------------------------------------------------
-
-[float]
-[[configuring-lsf]]
-==== Configuring Filebeat to Send Log Lines to Logstash
-
-As you learned earlier in <>, the https://github.com/elastic/beats/tree/main/filebeat[Filebeat]
-client is a lightweight, resource-friendly tool that collects logs from files on the server and forwards these logs to your
-Logstash instance for processing.
-
-After installing Filebeat, you need to configure it. Open the `filebeat.yml` file located in your Filebeat installation
-directory, and replace the contents with the following lines. Make sure `paths` points to your syslog:
-
-[source,shell]
---------------------------------------------------------------------------------
-filebeat.inputs:
-- type: log
- paths:
- - /var/log/*.log <1>
- fields:
- type: syslog <2>
-output.logstash:
- hosts: ["localhost:5044"]
---------------------------------------------------------------------------------
-<1> Absolute path to the file or files that Filebeat processes.
-<2> Adds a field called `type` with the value `syslog` to the event.
-
-Save your changes.
-
-To keep the configuration simple, you won't specify TLS/SSL settings as you would in a real world
-scenario.
-
-Configure your Logstash instance to use the Filebeat input plugin by adding the following lines to the `input` section
-of the `second-pipeline.conf` file:
-
-[source,json]
---------------------------------------------------------------------------------
- beats {
- port => "5044"
- }
---------------------------------------------------------------------------------
-
-[float]
-[[logstash-file-output]]
-==== Writing Logstash Data to a File
-
-You can configure your Logstash pipeline to write data directly to a file with the
-{logstash-ref}/plugins-outputs-file.html[`file`] output plugin.
-
-Configure your Logstash instance to use the `file` output plugin by adding the following lines to the `output` section
-of the `second-pipeline.conf` file:
-
-[source,json]
---------------------------------------------------------------------------------
- file {
- path => "/path/to/target/file"
- }
---------------------------------------------------------------------------------
-
-[float]
-[[multiple-es-nodes]]
-==== Writing to Multiple Elasticsearch Nodes
-
-Writing to multiple Elasticsearch nodes lightens the resource demands on a given Elasticsearch node, as well as
-providing redundant points of entry into the cluster when a particular node is unavailable.
-
-To configure your Logstash instance to write to multiple Elasticsearch nodes, edit the `output` section of the `second-pipeline.conf` file to read:
-
-[source,json]
---------------------------------------------------------------------------------
-output {
- elasticsearch {
- hosts => ["IP Address 1:port1", "IP Address 2:port2", "IP Address 3"]
- }
-}
---------------------------------------------------------------------------------
-
-Use the IP addresses of three non-master nodes in your Elasticsearch cluster in the host line. When the `hosts`
-parameter lists multiple IP addresses, Logstash load-balances requests across the list of addresses. Also note that
-the default port for Elasticsearch is `9200` and can be omitted in the configuration above.
-
-[float]
-[[testing-second-pipeline]]
-===== Testing the Pipeline
-
-At this point, your `second-pipeline.conf` file looks like this:
-
-[source,json]
---------------------------------------------------------------------------------
-input {
- twitter {
- consumer_key => "enter_your_consumer_key_here"
- consumer_secret => "enter_your_secret_here"
- keywords => ["cloud"]
- oauth_token => "enter_your_access_token_here"
- oauth_token_secret => "enter_your_access_token_secret_here"
- }
- beats {
- port => "5044"
- }
-}
-output {
- elasticsearch {
- hosts => ["IP Address 1:port1", "IP Address 2:port2", "IP Address 3"]
- }
- file {
- path => "/path/to/target/file"
- }
-}
---------------------------------------------------------------------------------
-
-Logstash is consuming data from the Twitter feed you configured, receiving data from Filebeat, and
-indexing this information to three nodes in an Elasticsearch cluster as well as writing to a file.
-
-At the data source machine, run Filebeat with the following command:
-
-[source,shell]
---------------------------------------------------------------------------------
-sudo ./filebeat -e -c filebeat.yml -d "publish"
---------------------------------------------------------------------------------
-
-Filebeat will attempt to connect on port 5044. Until Logstash starts with an active Beats plugin, there
-won’t be any answer on that port, so any messages you see regarding failure to connect on that port are normal for now.
-
-To verify your configuration, run the following command:
-
-[source,shell]
---------------------------------------------------------------------------------
-bin/logstash -f second-pipeline.conf --config.test_and_exit
---------------------------------------------------------------------------------
-
-The `--config.test_and_exit` option parses your configuration file and reports any errors. When the configuration file
-passes the configuration test, start Logstash with the following command:
-
-[source,shell]
---------------------------------------------------------------------------------
-bin/logstash -f second-pipeline.conf
---------------------------------------------------------------------------------
-
-Use the `grep` utility to search in the target file to verify that information is present:
-
-[source,shell]
---------------------------------------------------------------------------------
-grep syslog /path/to/target/file
---------------------------------------------------------------------------------
-
-Run an Elasticsearch query to find the same information in the Elasticsearch cluster:
-
-[source,shell]
---------------------------------------------------------------------------------
-curl -XGET 'localhost:9200/logstash-$DATE/_search?pretty&q=fields.type:syslog'
---------------------------------------------------------------------------------
-
-Replace $DATE with the current date, in YYYY.MM.DD format.
-
-To see data from the Twitter feed, try this query:
-
-[source,shell]
---------------------------------------------------------------------------------
-curl -XGET 'http://localhost:9200/logstash-$DATE/_search?pretty&q=client:iphone'
---------------------------------------------------------------------------------
-
-Again, remember to replace $DATE with the current date, in YYYY.MM.DD format.
diff --git a/docs/static/best-practice.asciidoc b/docs/static/best-practice.asciidoc
deleted file mode 100644
index a49b8f567..000000000
--- a/docs/static/best-practice.asciidoc
+++ /dev/null
@@ -1,149 +0,0 @@
-[[tips]]
-== Tips and best practices
-
-We are adding more tips and best practices, so please check back soon.
-If you have something to add, please:
-
-* create an issue at
-https://github.com/elastic/logstash/issues, or
-* create a pull request with your proposed changes at https://github.com/elastic/logstash.
-
-// After merge, update PR link to link directly to this topic in GH
-
-Also check out the https://discuss.elastic.co/c/logstash[Logstash discussion
-forum].
-
-[discrete]
-[[tip-cli]]
-=== Command line
-
-[discrete]
-[[tip-windows-cli]]
-==== Shell commands on Windows OS
-
-Command line examples often show single quotes.
-On Windows systems, replace a single quote `'` with a double quote `"`.
-
-*Example*
-
-Instead of:
-
------
-bin/logstash -e 'input { stdin { } } output { stdout {} }'
------
-
-Use this format on Windows systems:
-
------
-bin\logstash -e "input { stdin { } } output { stdout {} }"
------
-
-[discrete]
-[[tip-pipelines]]
-=== Pipelines
-
-[discrete]
-[[tip-pipeline-mgmt]]
-==== Pipeline management
-
-You can manage pipelines in a {ls} instance using either local pipeline configurations or
-{logstash-ref}/configuring-centralized-pipelines.html[centralized pipeline management]
-in {kib}.
-
-After you configure Logstash to use centralized pipeline management, you can
-no longer specify local pipeline configurations. The `pipelines.yml` file and
-settings such as `path.config` and `config.string` are inactive when centralized
-pipeline management is enabled.
-
-
-[discrete]
-[[tip-filters]]
-=== Tips using filters
-
-[discrete]
-[[tip-check-field]]
-==== Check to see if a boolean field exists
-
-You can use the mutate filter to see if a boolean field exists.
-
-{ls} supports [@metadata] fields--fields that are not visible for output plugins and live only in the filtering state.
-You can use [@metadata] fields with the mutate filter to see if a field exists.
-
-[source,ruby]
------
-filter {
- mutate {
- # we use a "temporal" field with a predefined arbitrary known value that
- # lives only in filtering stage.
- add_field => { "[@metadata][test_field_check]" => "a null value" }
-
- # we copy the field of interest into that temporal field.
- # If the field doesn't exist, copy is not executed.
- copy => { "test_field" => "[@metadata][test_field_check]" }
- }
-
-
- # now we now if testField didn't exists, our field will have
- # the initial arbitrary value
- if [@metadata][test_field_check] == "a null value" {
- # logic to execute when [test_field] did not exist
- mutate { add_field => { "field_did_not_exist" => true }}
- } else {
- # logic to execute when [test_field] existed
- mutate { add_field => { "field_did_exist" => true }}
- }
-}
------
-
-[discrete]
-[[tip-kafka]]
-=== Kafka
-
-[discrete]
-[[tip-kafka-settings]]
-==== Kafka settings
-
-[discrete]
-[[tip-kafka-partitions]]
-===== Partitions per topic
-
-"How many partitions should I use per topic?"
-
-At least the number of {ls} nodes multiplied by consumer threads per node.
-
-Better yet, use a multiple of the above number. Increasing the number of
-partitions for an existing topic is extremely complicated. Partitions have a
-very low overhead. Using 5 to 10 times the number of partitions suggested by the
-first point is generally fine, so long as the overall partition count does not
-exceed 2000.
-
-Err on the side of over-partitioning up to a total 1000
-partitions overall. Try not to exceed 1000 partitions.
-
-[discrete]
-[[tip-kafka-threads]]
-===== Consumer threads
-
-"How many consumer threads should I configure?"
-
-Lower values tend to be more efficient and have less memory overhead. Try a
-value of `1` then iterate your way up. The value should in general be lower than
-the number of pipeline workers. Values larger than 4 rarely result in
-performance improvement.
-
-[discrete]
-[[tip-kafka-pq-persist]]
-==== Kafka input and persistent queue (PQ)
-
-[discrete]
-[[tip-kafka-offset-commit]]
-===== Kafka offset commits
-
-"Does Kafka Input commit offsets only after the event has been safely persisted to the PQ?"
-
-"Does Kafa Input commit offsets only for events that have passed the pipeline fully?"
-
-No, we can’t make that guarantee. Offsets are committed to Kafka periodically. If
-writes to the PQ are slow or blocked, offsets for events that haven’t safely
-reached the PQ can be committed.
-
diff --git a/docs/static/breaking-changes-60.asciidoc b/docs/static/breaking-changes-60.asciidoc
deleted file mode 100644
index 327551bb9..000000000
--- a/docs/static/breaking-changes-60.asciidoc
+++ /dev/null
@@ -1,58 +0,0 @@
-[[breaking-6.0]]
-=== Breaking changes in 6.0 series
-
-Here are the breaking changes for 6.0.
-
-[discrete]
-[[breaking-pq]]
-=== Breaking change across PQ versions prior to Logstash 6.3.0
-
-If you are upgrading from Logstash 6.2.x or any earlier version (including 5.x)
-and have the persistent queue enabled, we strongly recommend that you drain or
-delete the persistent queue before you upgrade. See <>
-for information and instructions.
-
-[discrete]
-[[breaking-6.0-rel]]
-=== Breaking changes in 6.0
-
-[discrete]
-==== Changes in Logstash Core
-
-These changes can affect any instance of Logstash that uses impacted features.
-Changes to Logstash Core are plugin agnostic.
-
-[discrete]
-===== Application Settings
-
-* The setting `config.reload.interval` has been changed to use time value strings such as `5m`, `10s` etc.
- Previously, users had to convert this to a millisecond time value themselves.
- Note that the unit qualifier (`s`) is required.
-
-[discrete]
-===== RPM/Deb package changes
-
-* For `rpm` and `deb` release artifacts, config files that match the `*.conf` glob pattern must be in the conf.d folder,
- or the files will not be loaded.
-
-[discrete]
-===== Command Line Interface behavior
-
-* The `-e` and `-f` CLI options are now mutually exclusive. This also applies to the corresponding long form options `config.string` and
- `path.config`. This means any configurations provided via `-e` will no longer be appended to the configurations provided via `-f`.
-* Configurations provided with `-f` or `config.path` will not be appended with `stdin` input and `stdout` output automatically.
-
-[discrete]
-===== List of plugins bundled with Logstash
-
-The following plugins were removed from the default bundle based on usage data. You can still install these plugins manually:
-
-* logstash-codec-oldlogstashjson
-* logstash-input-couchdb_changes
-* logstash-input-irc
-* logstash-input-log4j
-* logstash-input-lumberjack
-* logstash-filter-uuid
-* logstash-output-xmpp
-* logstash-output-irc
-* logstash-output-statsd
diff --git a/docs/static/breaking-changes-70.asciidoc b/docs/static/breaking-changes-70.asciidoc
deleted file mode 100644
index bc6dd6806..000000000
--- a/docs/static/breaking-changes-70.asciidoc
+++ /dev/null
@@ -1,195 +0,0 @@
-[[breaking-7.0]]
-=== Breaking changes in 7.0 series
-
-Here are the breaking changes for {ls} 7.0.
-
-[discrete]
-==== Changes in Logstash Core
-
-These changes can affect any instance of Logstash that uses impacted features.
-Changes to Logstash Core are plugin agnostic.
-
-[discrete]
-[[java-exec-default]]
-===== Java execution engine enabled by default
-
-The new Java execution engine is enabled by default. It features faster
-performance, reduced memory usage, and lower config startup and reload times.
-
-For more information, see the blog post about the
-https://www.elastic.co/blog/meet-the-new-logstash-java-execution-engine[initial
-release of the Java execution engine].
-
-We went to considerable lengths to make this change seamless. Still, it's a big
-change. If you notice different behaviors that might be related, please
-https://github.com/elastic/logstash/issues[open a GitHub issue] to let us
-know.
-
-[discrete]
-[[beats-ecs]]
-===== Beats conform to the Elastic Common Schema (ECS)
-
-As of 7.0, Beats fields conform to the {ecs-ref}/index.html[Elastic Common
-Schema (ECS)].
-
-If you upgrade Logstash before you upgrade Beats, the payloads continue to use
-the pre-ECS schema. If you upgrade your Beats before you upgrade Logstash, then
-you'll get payloads with the ECS schema in advance of any Logstash upgrade.
-
-If you see mapping conflicts after upgrade, that is an indication that the
-Beats/ECS change is influencing the data reaching existing indices.
-
-
-[discrete]
-[[field-ref-strict]]
-===== Field Reference parser is more strict
-
-The Field Reference parser, which is used to interpret references to fields in
-your pipelines and plugins, was made to be more strict and will now reject
-inputs that are either ambiguous or illegal. Since 6.4, Logstash has emitted
-warnings when encountering input that is ambiguous, and allowed an early opt-in
-of strict-mode parsing either by providing the command-line flag
-`--field-reference-parser STRICT` or by adding `config.field_reference.parser:
-STRICT` to `logstash.yml`.
-
-Here's an example.
-
-*Before*
-
-[source,txt]
------
-logstash-6.7.0 % echo "hello"| bin/logstash -e 'filter { mutate { replace => { "message" => "%{[[]]message]} you" } } }'
-[2019-04-05T16:52:18,691][WARN ][org.logstash.FieldReference] Detected ambiguous Field Reference `[[]]message]`, which we expanded to the path `[message]`; in a future release of Logstash, ambiguous Field References will not be expanded.
-{
- "message" => "hello you",
- "@version" => "1",
- "@timestamp" => 2019-04-05T15:52:18.546Z,
- "type" => "stdin",
- "host" => "overcraft.lan"
-}
------
-
-*After*
-
-[source,txt]
------
-logstash-7.0.0 % echo "hello"| bin/logstash -e 'filter { mutate { replace => { "message" => "%{[[]]message]} you" } } }'
-[2019-04-05T16:48:09,135][FATAL][logstash.runner ] An unexpected error occurred! {:error=>java.lang.IllegalStateException: org.logstash.FieldReference$IllegalSyntaxException: Invalid FieldReference: `[[]]message]`
-[2019-04-05T16:48:09,167][ERROR][org.logstash.Logstash ] java.lang.IllegalStateException: Logstash stopped processing because of an error: (SystemExit) exit
------
-
-
-[discrete]
-==== Changes in Logstash Plugins
-
-With 7.0.0, we have taken the opportunity to upgrade a number of bundled plugins
-to their newest major version, absorbing their breaking changes into the
-Logstash distribution.
-
-While these upgrades included new features and important fixes, only the
-breaking changes are called out below.
-
-NOTE: The majority of the changes to plugins are the removal of previously-deprecated
-and now-obsolete options. Please ensure that your pipeline
-configurations do not use these removed options before upgrading.
-
-[discrete]
-===== Codec Plugins
-
-Here are the breaking changes for codec plugins.
-
-*CEF Codec*
-
-* Removed obsolete `sev` option
-* Removed obsolete `deprecated_v1_fields` option
-
-*Netflow Codec*
-
-* Changed decoding of application_id to implement RFC6759; the format changes from a pair of colon-separated ids (e.g. `0:40567`) to a variable number of double-dot-separated ids (e.g. `0..12356..40567`).
-
-[discrete]
-===== Filter Plugins
-
-Here are the breaking changes for filter plugins.
-
-*Clone Filter*
-
-* Make `clones` a required option
-
-*Geoip Filter*
-
-* Removed obsolete `lru_cache_size` option
-
-*HTTP Filter*
-
-* Removed obsolete `ssl_certificate_verify` option
-
-[discrete]
-===== Input Plugins
-
-Here are the breaking changes for input plugins.
-
-*Beats Input*
-
-* Removed obsolete `congestion_threshold` option
-* Removed obsolete `target_field_for_codec` option
-* Changed default value of `add_hostname` to false
-
-NOTE: In Beats 7.0.0, the fields exported by Beats _to_ the Logstash Beats Input
-conform to the {ecs-ref}/index.html[Elastic Common Schema (ECS)]. Many of the
-exported fields have been renamed, so you may need to modify your pipeline
-configurations to access them at their new locations prior to upgrading your
-Beats.
-
-*HTTP Input*
-
-* Removed obsolete `ssl_certificate_verify` option
-
-*HTTP Poller Input*
-
-* Removed obsolete `interval` option
-* Removed obsolete `ssl_certificate_verify` option
-
-*Tcp Input*
-
-* Removed obsolete `data_timeout` option
-* Removed obsolete `ssl_cacert` option
-
-[discrete]
-===== Output Plugins
-
-Here are the breaking changes for output plugins.
-
-*Elasticsearch Output*
-
-* {es} {ref}/index-lifecycle-management.html[Index lifecycle management (ILM)] is
-auto-detected and enabled by default if your {es} cluster supports it.
-* Remove support for parent/child (still support join data type) since we don't
-support multiple document types any more
-* Removed obsolete `flush_size` option
-* Removed obsolete `idle_flush_time` option
-
-*HTTP Output*
-
-* Removed obsolete `ssl_certificate_verify` option
-
-*Kafka Output*
-
-* Removed obsolete `block_on_buffer_full` option
-* Removed obsolete `ssl` option
-* Removed obsolete `timeout_ms` option
-
-*Redis Output*
-
-* Removed obsolete `queue` option
-* Removed obsolete `name` option
-
-*Sqs Output*
-
-* Removed obsolete `batch` option
-* Removed obsolete `batch_timeout` option
-
-*Tcp Output*
-
-* Removed obsolete `message_format` option
-
diff --git a/docs/static/breaking-changes-80.asciidoc b/docs/static/breaking-changes-80.asciidoc
deleted file mode 100644
index 8beed0fbc..000000000
--- a/docs/static/breaking-changes-80.asciidoc
+++ /dev/null
@@ -1,73 +0,0 @@
-[[breaking-8.0]]
-=== Breaking changes in 8.0 series
-
-[discrete]
-[[breaking-8.15]]
-=== Breaking changes in 8.15
-
-*New {ls} SNMP integration plugin*
-
-Prior to 8.15.0, {ls} bundled stand-alone versions of the `input-snmp` and `input-snmptrap` plugins by default.
-When you upgrade to 8.15.0, the stand-alone plugins are replaced by the 4.0.0+ version contained in the new SNMP integration plugin.
-
-IMPORTANT: Before you upgrade to {ls} 8.15.0, be aware of {logstash-ref}/plugins-integrations-snmp.html#plugins-integrations-snmp-migration[behavioral and mapping differences] between stand-alone plugins and the new versions included in the {logstash-ref}/plugins-integrations-snmp.html[SNMP integration plugin].
-If you need to maintain current mappings for the `input-snmptrap` plugin, you have some options to {logstash-ref}/plugins-integrations-snmp.html#plugins-integrations-snmp-input-snmptrap-compat[preserve existing behavior].
-
-[discrete]
-=== Breaking changes in 8.0
-
-[discrete]
-[[security-on-8.0]]
-===== Secure communication with {es}
-{ls} must establish a Secure Sockets Layer (SSL) connection before it can transfer data to an on-premise {es} cluster.
-{ls} must have a copy of the {es} CA that signed the cluster's certificates.
-When a new {es} cluster is started up _without_ dedicated certificates, it generates its own default self-signed Certificate Authority at startup.
-
-Our hosted {ess} simplifies safe, secure communication between Logstash and Elasticsearch.
-{ess} uses certificates signed by standard publicly trusted certificate authorities, and therefore setting a cacert is not necessary.
-
-For more information, see {logstash-ref}/ls-security.html#es-security-on[{es} security on by default].
-
-[discrete]
-[[bc-java-11-minimum]]
-===== Java 11 minimum
-Logstash requires Java 11 or later.
-By default, Logstash will run with the bundled JDK, which has been verified to
-work with each specific version of Logstash, and generally provides the best
-performance and reliability.
-
-[discrete]
-[[bc-java-home]]
-===== Support for JAVA_HOME removed
-We've removed support for using `JAVA_HOME` to override the path to the JDK.
-Users who need to use a version other than the bundled JDK should set the value
-of `LS_JAVA_HOME` to the path of their preferred JDK.
-The value of `JAVA_HOME` will be ignored.
-
-[discrete]
-[[bc-ecs-compatibility]]
-===== ECS compatibility is now on by default
-Many plugins can now be run in a mode that avoids implicit conflict with the Elastic Common Schema.
-This mode is controlled individually with each plugin's `ecs_compatibility` option, which defaults to the value of the Logstash `pipeline.ecs_compatibility` setting.
-In Logstash 8, this compatibility mode will be on-by-default for all pipelines. https://github.com/elastic/logstash/issues/11623[#11623]
-
-If you wish to _lock in_ a pipeline's behaviour from Logstash 7.x before upgrading to Logstash 8, you can set `pipeline.ecs_compatibility: disabled` to its definition in `pipelines.yml` (or globally in `logstash.yml`).
-
-[discrete]
-[[bc-ruby-engine]]
-===== Ruby Execution Engine removed
-The Java Execution Engine has been the default engine since Logstash 7.0, and works with plugins written in either Ruby or Java.
-Removal of the Ruby Execution Engine will not affect the ability to run existing pipelines. https://github.com/elastic/logstash/pull/12517[#12517]
-
-[discrete]
-[[bc-utf-16]]
-===== Support for UTF-16
-We have added support for UTF-16 and other multi-byte-character when reading log files. https://github.com/elastic/logstash/pull/9702[#9702]
-
-[discrete]
-[[bc-field-ref-parser]]
-===== Field Reference parser configuration setting removed
-The Field Reference parser interprets references to fields in your pipelines and plugins.
-Its behavior was configurable in 6.x, and 7.x allowed only a single option: `strict`.
-8.0 no longer recognizes the setting, but maintains the same behavior as the `strict` setting.
-{ls} rejects ambiguous and illegal inputs as standard behavior.
diff --git a/docs/static/breaking-changes-90.asciidoc b/docs/static/breaking-changes-90.asciidoc
deleted file mode 100644
index 88a5fc806..000000000
--- a/docs/static/breaking-changes-90.asciidoc
+++ /dev/null
@@ -1,265 +0,0 @@
-[discrete]
-[[breaking-9.0]]
-=== Breaking changes in 9.0
-coming[9.0.0]
-
-[discrete]
-[[ssl-settings-9.0]]
-===== Changes to SSL settings in {ls} plugins
-
-We've removed deprecated SSL settings in some {ls} plugins, and have replaced them with updated settings.
-If your plugin configuration contains any of these obsolete options, the plugin may fail to start.
-
-Click the arrow beside a plugin name to see the list of settings that have been
-removed and their replacements.
-
-**Plugins with changes to SSL settings**
-
-[discrete]
-[[input-beats-ssl-9.0]]
-.`logstash-input-beats`
-
-[%collapsible]
-====
-
-[cols="<,<",options="header",]
-|=======================================================================
-|Setting|Replaced by
-| cipher_suites |<>
-| ssl |<>
-| ssl_peer_metadata |`ssl_peer_metadata` option of <>
-| ssl_verify_mode |<>
-| tls_min_version |<>
-| tls_max_version |<>
-|=======================================================================
-
-====
-
-[discrete]
-[[input-elastic_agent-ssl-9.0]]
-.`logstash-input-elastic_agent`
-
-[%collapsible]
-====
-
-[cols="<,<",options="header",]
-|=======================================================================
-|Setting|Replaced by
-| cipher_suites |<>
-| ssl |<>
-| ssl_peer_metadata | `ssl_peer_metadata` option of <>
-| ssl_verify_mode |<>
-| tls_min_version |<>
-| tls_max_version |<>
-|=======================================================================
-
-====
-
-
-[discrete]
-[[input-elasticsearch-ssl-9.0]]
-.`logstash-input-elasticsearch`
-
-[%collapsible]
-====
-
-[cols="<,<",options="header",]
-|=======================================================================
-|Setting|Replaced by
-| ca_file |<>
-| ssl |<>
-| ssl_certificate_verification |<>
-|=======================================================================
-
-====
-
-[discrete]
-[[input-elastic_serverless_forwarder-ssl-9.0]]
-.`logstash-input-elastic_serverless_forwarder`
-
-[%collapsible]
-====
-
-[cols="<,<",options="header",]
-|=======================================================================
-|Setting|Replaced by
-| ssl |<>
-|=======================================================================
-
-====
-
-[discrete]
-[[input-http-ssl-9.0]]
-.`logstash-input-http`
-
-[%collapsible]
-====
-
-
-[cols="<,<",options="header",]
-|=======================================================================
-|Setting|Replaced by
-| cipher_suites |<>
-| keystore |<>
-| keystore_password |<>
-| ssl |<>
-| ssl_verify_mode |<>
-| tls_max_version |<>
-| tls_min_version |<>
-| verify_mode |<>
-|=======================================================================
-
-====
-
-[discrete]
-[[input-http_poller-ssl-9.0]]
-.`logstash-input-http_poller`
-
-[%collapsible]
-====
-
-[cols="<,<",options="header",]
-|=======================================================================
-|Setting|Replaced by
-| cacert |<>
-| client_cert |<>
-| client_key |<>
-| keystore |<>
-| keystore_password |<>
-| keystore_type |<>
-| truststore |<>
-| truststore_password |<>
-| truststore_type |<>
-|=======================================================================
-
-====
-
-[discrete]
-[[input-tcp-ssl-9.0]]
-.`logstash-input-tcp`
-
-[%collapsible]
-====
-
-[cols="<,<",options="header",]
-|=======================================================================
-|Setting|Replaced by
-| ssl_cert |<>
-| ssl_enable |<>
-| ssl_verify |<> in `server` mode and <> in `client` mode
-|=======================================================================
-
-====
-
-[discrete]
-[[filter-elasticsearch-ssl-9.0]]
-.`logstash-filter-elasticsearch`
-
-[%collapsible]
-====
-
-[cols="<,<",options="header",]
-|=======================================================================
-|Setting|Replaced by
-| ca_file |<>
-| keystore |<>
-| keystore_password |<>
-| ssl |<>
-|=======================================================================
-
-====
-
-[discrete]
-[[filter-http-ssl-9.0]]
-.`logstash-filter-http`
-
-[%collapsible]
-====
-
-[cols="<,<",options="header",]
-|=======================================================================
-|Setting|Replaced by
-| cacert |<>
-| client_cert |<>
-| client_key |<>
-| keystore |<>
-| keystore_password |<>
-| keystore_type |<>
-| truststore |<>
-| truststore_password |<>
-| truststore_type |<