Merge branch 'master' into cloudwatch-docs

This commit is contained in:
Louis Zuckerman 2013-01-01 16:45:39 -05:00
commit 66405e736a
21 changed files with 124 additions and 108 deletions

View file

@ -31,7 +31,7 @@ else
TAR_OPTS=--wildcards
endif
TESTS=$(wildcard spec/support/*.rb spec/filters/*.rb spec/examples/*.rb spec/event.rb spec/jar.rb)
TESTS=$(wildcard spec/support/*.rb spec/filters/*.rb spec/examples/*.rb spec/event.rb)
default: jar
# Figure out if we're using wget or curl
@ -194,10 +194,13 @@ build/flatgems: | build vendor/bundle
done
flatjar-test:
cd build && GEM_HOME= GEM_PATH= java -jar logstash-$(VERSION)-flatjar.jar rspec $(TESTS)
GEM_HOME= GEM_PATH= java -jar logstash-$(VERSION)-flatjar.jar rspec $(TESTS)
cd build && GEM_HOME= GEM_PATH= java -jar logstash-$(VERSION)-flatjar.jar rspec spec/jar.rb
jar-test:
cd build && GEM_HOME= GEM_PATH= java -jar logstash-$(VERSION)-monolithic.jar rspec $(TESTS)
#cd build && GEM_HOME= GEM_PATH= java -jar logstash-$(VERSION)-monolithic.jar rspec $(TESTS)
GEM_HOME= GEM_PATH= java -jar logstash-$(VERSION)-monolithic.jar rspec $(TESTS)
cd build && GEM_HOME= GEM_PATH= java -jar logstash-$(VERSION)-monolithic.jar rspec spec/jar.rb
flatjar-test-and-report:
cd build && GEM_HOME= GEM_PATH= java -jar logstash-$(VERSION)-monolithic.jar rspec $(TESTS) --format h > results.flatjar.html

View file

@ -89,7 +89,7 @@ plugin tree is. In our case, it's the current directory.
If you use the monolith jar release of logstash, you have an additional option
- you can include the plugin right in the jar file.
% jar -uf jar -uf logstash-%VERSION%-monolithic.jar logstash/filters/foo.rb
% jar -uf logstash-%VERSION%-monolithic.jar logstash/filters/foo.rb
# Verify it's in the right location in the jar!
% jar tf logstash-%VERSION%-monolithic.jar | grep foo.rb

View file

@ -15,7 +15,7 @@ class LogStash::Filters::Anonymize < LogStash::Filters::Base
config :key, :validate => :string, :required => true
# digest/hash type
config :algorithm, :validate => ['SHA', 'SHA1', 'SHA224', 'SHA256', 'SHA384', 'SHA512', 'MD4', 'MD5', "MURMUR3", "IPV4_NETWORK"], :required => true, :default => 'SHA1'
config :algorithm, :validate => ['SHA1', 'SHA224', 'SHA256', 'SHA384', 'SHA512', 'MD4', 'MD5', "MURMUR3", "IPV4_NETWORK"], :required => true, :default => 'SHA1'
public
def register
@ -63,8 +63,8 @@ class LogStash::Filters::Anonymize < LogStash::Filters::Base
def algorithm
case @algorithm
when 'SHA'
return OpenSSL::Digest::SHA.new
#when 'SHA'
#return OpenSSL::Digest::SHA.new
when 'SHA1'
return OpenSSL::Digest::SHA1.new
when 'SHA224'

View file

@ -1,6 +1,5 @@
require "logstash/filters/base"
require "logstash/namespace"
require "geoip"
# Add GeoIP fields from Maxmind database
#
@ -28,6 +27,7 @@ class LogStash::Filters::GeoIP < LogStash::Filters::Base
public
def register
require "geoip"
if @database.nil?
if __FILE__ =~ /^file:\/.+!.+/
# Running from a jar, assume GeoLiteCity.dat is at the root.

View file

@ -9,11 +9,22 @@ class LogStash::Filters::Json < LogStash::Filters::Base
plugin_status "beta"
# Config for json is:
# source: dest
#
# * source => dest
#
# For example, if you have a field named 'foo' that contains your json,
# and you want to store the evaluated json object in 'bar', do this:
#
# filter {
# json {
# foo => bar
# }
# }
#
# JSON in the value of the source field will be expanded into a
# datastructure in the "dest" field. Note: if the "dest" field
# already exists, it will be overridden.
config /[A-Za-z0-9_-]+/, :validate => :string
config /[A-Za-z0-9_@-]+/, :validate => :string
public
def register

View file

@ -1,6 +1,5 @@
require "logstash/filters/base"
require "logstash/namespace"
require "nokogiri"
# XML filter. Takes a field that contains XML and expands it into
# an actual datastructure.
@ -42,6 +41,7 @@ class LogStash::Filters::Xml < LogStash::Filters::Base
public
def register
require "nokogiri"
require "xmlsimple"
@xml = {}

View file

@ -83,9 +83,9 @@ class LogStash::Inputs::Base < LogStash::Plugin
case @format
when "plain"
raw.force_encoding(@charset)
if @charset != "UTF-8"
# Convert to UTF-8 if not in that character set.
raw.force_encoding(@charset)
raw = raw.encode("UTF-8", :invalid => :replace, :undef => :replace)
end
event.message = raw

View file

@ -4,8 +4,6 @@ require "logstash/namespace"
require "pathname"
require "socket" # for Socket.gethostname
require "addressable/uri"
# Stream events from files.
#
# By default, each event is assumed to be one line. If you
@ -72,6 +70,7 @@ class LogStash::Inputs::File < LogStash::Inputs::Base
public
def register
require "addressable/uri"
require "filewatch/tail"
require "digest/md5"
LogStash::Util::set_thread_name("input|file|#{path.join(":")}")

View file

@ -23,11 +23,11 @@ class LogStash::Inputs::Tcp < LogStash::Inputs::Base
# When mode is `client`, the port to connect to.
config :port, :validate => :number, :required => true
# Read timeout in seconds. If a particular tcp connection is
# idle for more than this timeout period, we will assume
# it is dead and close it.
# The 'read' timeout in seconds. If a particular tcp connection is idle for
# more than this timeout period, we will assume it is dead and close it.
#
# If you never want to timeout, use -1.
config :data_timeout, :validate => :number, :default => 5
config :data_timeout, :validate => :number, :default => -1
# Mode to operate in. `server` listens for client connections,
# `client` connects to a server.
@ -73,6 +73,9 @@ class LogStash::Inputs::Tcp < LogStash::Inputs::Base
:client => socket.peer)
end # begin
rescue IOError
# nothing
ensure
begin
socket.close
rescue IOError
@ -93,10 +96,12 @@ class LogStash::Inputs::Tcp < LogStash::Inputs::Base
public
def run(output_queue)
if server?
@thread = Thread.current
@client_threads = []
loop do
# Start a new thread for each connection.
begin
Thread.start(@server_socket.accept) do |s|
@client_threads << Thread.start(@server_socket.accept) do |s|
# TODO(sissel): put this block in its own method.
# monkeypatch a 'peer' method onto the socket.
@ -108,7 +113,11 @@ class LogStash::Inputs::Tcp < LogStash::Inputs::Base
end # Thread.start
rescue IOError
if @interrupted
#Intended shutdown, get out of the loop
# Intended shutdown, get out of the loop
@server_socket.close
@client_threads.each do |thread|
thread.raise(IOError.new)
end
break
else
# Else it was a genuine IOError caused by something else, so propagate it up..
@ -130,7 +139,7 @@ class LogStash::Inputs::Tcp < LogStash::Inputs::Base
def teardown
if server?
@interrupted = true
@server_socket.close
@thread.raise(IOError.new)
end
end # def teardown
end # class LogStash::Inputs::Tcp

View file

@ -89,7 +89,7 @@ class LogStash::Outputs::ElasticSearchHTTP < LogStash::Outputs::Base
def receive_bulk(event, index, type)
header = { "index" => { "_index" => index, "_type" => type } }
if @document_id.nil?
if !@document_id.nil?
header["index"]["_id"] = event.sprintf(@document_id)
end
@queue << [

View file

@ -207,7 +207,7 @@ class LogStash::Outputs::ElasticSearchRiver < LogStash::Outputs::Base
# "action\ndata\n"
# where 'action' is index or delete, data is the data to index.
header = { "index" => { "_index" => index, "_type" => type } }
if @document_id.nil?
if !@document_id.nil?
header["index"]["_id"] = event.sprintf(@document_id)
end

View file

@ -7,11 +7,17 @@ class LogStash::Outputs::Email < LogStash::Outputs::Base
config_name "email"
plugin_status "experimental"
# the registered fields that we want to monitor
# The registered fields that we want to monitor
# A hash of matches of field => value
# Takes the form of:
#
# { "match name", "field.in.event,value.expected, , operand(and/or),field.in.event,value.expected, , or...",
# "match name", "..." }
#
# The match name can be referenced using the `%{matchName}` field.
config :match, :validate => :hash, :required => true
# the To address setting - fully qualified email address to send to
# The To address setting - fully qualified email address to send to
config :to, :validate => :string, :required => true
# The From setting for email - fully qualified email address for the From:

View file

@ -2,13 +2,14 @@ require "logstash/outputs/base"
require "logstash/namespace"
require "logstash/event"
# Push messages to the juggernaut websockets server
# https://github.com/maccman/juggernaut Wraps
# Websockets and supports other methods (including xhr longpolling)
# This is basiccaly, just an extension of the redis output
# (Juggernaut pulls messages from redis). But it pushes messages
# to a particular channel and formats the messages in the way
# juggernaut expects.
# Push messages to the juggernaut websockets server:
#
# * https://github.com/maccman/juggernaut
#
# Wraps Websockets and supports other methods (including xhr longpolling) This
# is basically, just an extension of the redis output (Juggernaut pulls
# messages from redis). But it pushes messages to a particular channel and
# formats the messages in the way juggernaut expects.
class LogStash::Outputs::Juggernaut < LogStash::Outputs::Base
config_name "juggernaut"
@ -33,7 +34,7 @@ class LogStash::Outputs::Juggernaut < LogStash::Outputs::Base
# valid here, for example "logstash-%{@type}".
config :channels, :validate => :array, :required => true
# How should be message be formatted before pusing to the websocket.
# How should the message be formatted before pushing to the websocket.
config :message_format, :validate => :string
public

View file

@ -9,8 +9,13 @@ require "logstash/outputs/base"
# * "nagios_host"
# * "nagios_service"
#
# This field is supported, but optional:
# "nagios_annotation"
# These fields are supported, but optional:
#
# * "nagios_annotation"
# * "nagios_level"
#
# The plugin defaults to sending CRITICAL check results. You can send WARNING check
# results by setting the "nagios_level" field to "warn".
#
# The easiest way to use this output is with the grep filter.
# Presumably, you only want certain events matching a given pattern
@ -28,9 +33,9 @@ require "logstash/outputs/base"
# ]
# }
# }
#
#
# output{
# nagios {
# nagios {
# # only process events with this tag
# tags => "nagios-update"
# }

View file

@ -4,8 +4,7 @@ require "logstash/namespace"
# SNS output.
#
# Send events to Amazon's Simple Notification Service, a hosted pub/sub
# framework. It supports subscribers of type email, HTTP/S, SMS, and
# SQS.
# framework. It supports subscribers of type email, HTTP/S, SMS, and SQS.
#
# For further documentation about the service see:
#
@ -13,16 +12,16 @@ require "logstash/namespace"
#
# This plugin looks for the following fields on events it receives:
#
# "sns" => If no ARN is found in the configuration file,
# this will be used as the ARN to publish.
# "sns_subject" => The subject line that should be used. Optional.
# "%{@source}" will be used if not present
# (truncated at MAX_SUBJECT_SIZE_IN_CHARACTERS).
# "sns_message" => The message that should be sent. Optional. The
# event serialzed as JSON will be used if not
# present (with @message truncated so that the
# length of the JSON fits in
# MAX_MESSAGE_SIZE_IN_BYTES).
# * sns - If no ARN is found in the configuration file, this will be used as
# the ARN to publish.
# * sns_subject - The subject line that should be used.
# Optional. The "%{@source}" will be used if not present and truncated at
# MAX_SUBJECT_SIZE_IN_CHARACTERS.
# * sns_message - The message that should be
# sent. Optional. The event serialzed as JSON will be used if not present and
# with the @message truncated so that the length of the JSON fits in
# MAX_MESSAGE_SIZE_IN_BYTES.
#
class LogStash::Outputs::Sns < LogStash::Outputs::Base
MAX_SUBJECT_SIZE_IN_CHARACTERS = 100
MAX_MESSAGE_SIZE_IN_BYTES = 32768
@ -36,16 +35,11 @@ class LogStash::Outputs::Sns < LogStash::Outputs::Base
# Path to YAML file containing a hash of AWS credentials. This file
# will be loaded if `access_key_id` and `secret_access_key` aren't
# set.
# set. The contents of the file should look like this:
#
# Example:
#
# The path to YAML file containing a hash of the AWS credentials for
# your account. The contents of the file should look like this:
#
# ---
# :access_key_id: "12345"
# :secret_access_key: "54321"
# ---
# :access_key_id: "12345"
# :secret_access_key: "54321"
#
config :credentials, :validate => :string

View file

@ -12,11 +12,13 @@ require "logstash/namespace"
# what the pricing schedule looks like and how to setup a queue.
#
# To use this plugin, you *must*:
#
# * Have an AWS account
# * Setup an SQS queue
# * Create an identify that has access to publish messages to the queue.
#
# The "consumer" identity must have the following permissions on the queue:
#
# * sqs:ChangeMessageVisibility
# * sqs:ChangeMessageVisibilityBatch
# * sqs:GetQueueAttributes
@ -92,4 +94,4 @@ class LogStash::Outputs::SQS < LogStash::Outputs::Base
@sqs_queue = nil
finished
end # def teardown
end
end

View file

@ -4,7 +4,7 @@ require "logstash/namespace"
# Write events to a 0MQ PUB socket.
#
# You need to have the 0mq 2.1.x library installed to be able to use
# this input plugin.
# this output plugin.
#
# The default settings will create a publisher connecting to a subscriber
# bound to tcp://127.0.0.1:2120
@ -14,15 +14,14 @@ class LogStash::Outputs::ZeroMQ < LogStash::Outputs::Base
config_name "zeromq"
plugin_status "beta"
# 0mq socket address to connect or bind
# Please note that `inproc://` will not work with logstash
# As each we use a context per thread
# By default, inputs bind/listen
# and outputs connect
# 0mq socket address to connect or bind.
# Please note that `inproc://` will not work with logstashi.
# For each we use a context per thread.
# By default, inputs bind/listen and outputs connect.
config :address, :validate => :array, :default => ["tcp://127.0.0.1:2120"]
# 0mq topology
# The default logstash topologies work as follows:
#
# * pushpull - inputs are pull, outputs are push
# * pubsub - inputs are subscribers, outputs are publishers
# * pair - inputs are clients, inputs are servers
@ -33,31 +32,26 @@ class LogStash::Outputs::ZeroMQ < LogStash::Outputs::Base
# TODO (lusis) add router/dealer
config :topology, :validate => ["pushpull", "pubsub", "pair"], :required => true
# 0mq topic
# This is used for the 'pubsub' topology only
# On inputs, this allows you to filter messages by topic
# On outputs, this allows you to tag a message for routing
# This is used for the 'pubsub' topology only.
# On inputs, this allows you to filter messages by topic.
# On outputs, this allows you to tag a message for routing.
# NOTE: ZeroMQ does subscriber-side filtering
# NOTE: Topic is evaluated with `event.sprintf` so
# macros are valid here
# NOTE: Topic is evaluated with `event.sprintf` so macros are valid here.
config :topic, :validate => :string, :default => ""
# mode
# server mode binds/listens
# client mode connects
# Server mode binds/listens. Client mode connects.
config :mode, :validate => ["server", "client"], :default => "client"
# 0mq socket options
# This exposes zmq_setsockopt
# for advanced tuning
# see http://api.zeromq.org/2-1:zmq-setsockopt for details
# This exposes zmq_setsockopt for advanced tuning.
# See http://api.zeromq.org/2-1:zmq-setsockopt for details.
#
# This is where you would set values like:
# ZMQ::HWM - high water mark
# ZMQ::IDENTITY - named queues
# ZMQ::SWAP_SIZE - space for disk overflow
#
# example: sockopt => ["ZMQ::HWM", 50, "ZMQ::IDENTITY", "my_named_queue"]
# * ZMQ::HWM - high water mark
# * ZMQ::IDENTITY - named queues
# * ZMQ::SWAP_SIZE - space for disk overflow
#
# Example: sockopt => ["ZMQ::HWM", 50, "ZMQ::IDENTITY", "my_named_queue"]
config :sockopt, :validate => :hash
public

View file

@ -133,6 +133,9 @@ class LogStash::Runner
# Try inside the jar.
jar_root = __FILE__.gsub(/!.*/,"!")
newpath = File.join(jar_root, args.first)
# Strip leading 'jar:' path (JRUBY_6970)
newpath.gsub!(/^jar:/, "")
if File.exists?(newpath)
# Add the 'spec' dir to the load path so specs can run
specpath = File.join(jar_root, "spec")
@ -167,6 +170,7 @@ class LogStash::Runner
$: << File.expand_path("#{File.dirname(__FILE__)}/../../spec")
require "test_utils"
#p :args => fixedargs
rspec = runner.new(fixedargs)
rspec.run
@runners << rspec

View file

@ -4,24 +4,6 @@ require "logstash/filters/anonymize"
describe LogStash::Filters::Anonymize do
extend LogStash::RSpec
describe "anonymize string with SHA alogrithm" do
# The logstash config goes here.
# At this time, only filters are supported.
config <<-CONFIG
filter {
anonymize {
fields => ["clientip"]
key => "longencryptionkey"
algorithm => 'SHA'
}
}
CONFIG
sample "@fields" => {"clientip" => "123.123.123.123"} do
insist { subject["clientip"] } == "0d01b2191194d261fa1a2e7c18a38d44953ab4e2"
end
end
describe "anonymize ipaddress with IPV4_NETWORK algorithm" do
# The logstash config goes here.
# At this time, only filters are supported.

View file

@ -75,14 +75,16 @@ describe "inputs/tcp" do
tcp = plugins.first
output = Shiftback.new do |event|
sequence += 1
tcp.teardown if sequence == event_count
begin
insist { event.message } == "Hello ü Û"
insist { event.message } == "Hello ü Û\n"
insist { event.message.encoding } == Encoding.find("UTF-8")
rescue Exception => failure
# Get out of the threads nets
th.raise failure
end
if sequence == event_count
tcp.teardown
end
end
tcp.register
@ -91,14 +93,13 @@ describe "inputs/tcp" do
tcp.run(output)
end
#Send events from clients sockets
client_socket = TCPSocket.new("0.0.0.0", port)
event_count.times do |value|
client_socket = TCPSocket.new("0.0.0.0", port)
client_socket.write "Hello ü Û"
client_socket.close
# micro sleep to ensure sequencing
sleep(0.1)
client_socket.write "Hello ü Û\n"
end
client_socket.close
#wait for input termination
puts "Waiting for tcp input thread to finish"
thread.join
end # input
end

View file

@ -13,6 +13,11 @@ if RUBY_VERSION < "1.9.2"
raise LoadError
end
if ENV["TEST_DEBUG"]
Cabin::Channel.get.level = :debug
Cabin::Channel.get.subscribe(STDOUT)
end
module LogStash
module RSpec
def config(configstr)