Fixing the faillings metrics tests (#5365)

This PR make sure that all the tests using the agent or the pipeline are
correctly shutting down their pipelines to make sure the metrics state
are coherent with the config defined in the test.

Fixes: #5291
This commit is contained in:
Pier-Hugues Pellerin 2016-05-26 17:06:04 -04:00 committed by Suyog Rao
parent 27e41eac14
commit 1033df7be4
5 changed files with 33 additions and 19 deletions

View file

@ -45,7 +45,7 @@ class LogStash::Agent
@collect_metric = setting("metric.collect") @collect_metric = setting("metric.collect")
@metric = create_metric_collector @metric = create_metric_collector
@periodic_pollers = LogStash::Instrument::PeriodicPollers.new(metric) @periodic_pollers = LogStash::Instrument::PeriodicPollers.new(create_metric_collector)
end end
def execute def execute
@ -83,7 +83,6 @@ class LogStash::Agent
pipeline = create_pipeline(pipeline_settings) pipeline = create_pipeline(pipeline_settings)
return unless pipeline.is_a?(LogStash::Pipeline) return unless pipeline.is_a?(LogStash::Pipeline)
pipeline.metric = @metric
if @auto_reload && pipeline.non_reloadable_plugins.any? if @auto_reload && pipeline.non_reloadable_plugins.any?
@logger.error(I18n.t("logstash.agent.non_reloadable_config_register"), @logger.error(I18n.t("logstash.agent.non_reloadable_config_register"),
:pipeline_id => pipeline_id, :pipeline_id => pipeline_id,

View file

@ -54,17 +54,9 @@ module LogStash; class Pipeline
@worker_threads = [] @worker_threads = []
# Metric object should be passed upstream, multiple pipeline share the same metric
# and collector only the namespace will changes.
# If no metric is given, we use a `NullMetric` for all internal calls.
# We also do this to make the changes backward compatible with previous testing of the
# pipeline.
#
# This needs to be configured before we evaluate the code to make # This needs to be configured before we evaluate the code to make
# sure the metric instance is correctly send to the plugin. # sure the metric instance is correctly send to the plugins to make the namespace scoping work
# NOTE: It is the responsibility of the Agent to set this externally with a setter @metric = settings.get_value("metric.collect") ? Instrument::Metric.new : Instrument::NullMetric.new
# if there's an intent of this not being a NullMetric
@metric = Instrument::NullMetric.new
grammar = LogStashConfigParser.new grammar = LogStashConfigParser.new
@config = grammar.parse(config_str) @config = grammar.parse(config_str)
@ -80,7 +72,7 @@ module LogStash; class Pipeline
# The config code is hard to represent as a log message... # The config code is hard to represent as a log message...
# So just print it. # So just print it.
if @settings.get("config.debug") && logger.debug? if @settings.get_value("config.debug") && logger.debug?
logger.debug("Compiled pipeline code", :code => code) logger.debug("Compiled pipeline code", :code => code)
end end

View file

@ -98,6 +98,7 @@ describe LogStash::Agent do
sleep 0.1 sleep 0.1
Stud.stop!(t) Stud.stop!(t)
t.join t.join
subject.shutdown
end end
end end
@ -114,6 +115,7 @@ describe LogStash::Agent do
sleep 0.1 sleep 0.1
Stud.stop!(t) Stud.stop!(t)
t.join t.join
subject.shutdown
end end
end end
@ -129,6 +131,8 @@ describe LogStash::Agent do
sleep 0.1 sleep 0.1
Stud.stop!(t) Stud.stop!(t)
t.join t.join
subject.shutdown
end end
end end
end end
@ -157,6 +161,7 @@ describe LogStash::Agent do
sleep 0.1 sleep 0.1
Stud.stop!(t) Stud.stop!(t)
t.join t.join
subject.shutdown
end end
end end
@ -172,6 +177,7 @@ describe LogStash::Agent do
sleep 0.1 sleep 0.1
Stud.stop!(t) Stud.stop!(t)
t.join t.join
subject.shutdown
end end
end end
@ -186,6 +192,7 @@ describe LogStash::Agent do
sleep 0.1 sleep 0.1
Stud.stop!(t) Stud.stop!(t)
t.join t.join
subject.shutdown
end end
end end
end end
@ -361,6 +368,7 @@ describe LogStash::Agent do
end end
after :each do after :each do
subject.shutdown
Stud.stop!(@t) Stud.stop!(@t)
@t.join @t.join
end end

View file

@ -47,6 +47,10 @@ describe LogStash::PipelineReporter do
@post_snapshot = reporter.snapshot @post_snapshot = reporter.snapshot
end end
after do
pipeline.shutdown
end
describe "events filtered" do describe "events filtered" do
it "should start at zero" do it "should start at zero" do
expect(@pre_snapshot.events_filtered).to eql(0) expect(@pre_snapshot.events_filtered).to eql(0)

View file

@ -155,6 +155,7 @@ describe LogStash::Pipeline do
{:count_was=>worker_thread_count, :filters=>["dummyfilter"]}) {:count_was=>worker_thread_count, :filters=>["dummyfilter"]})
pipeline.run pipeline.run
expect(pipeline.worker_threads.size).to eq(safe_thread_count) expect(pipeline.worker_threads.size).to eq(safe_thread_count)
pipeline.shutdown
end end
end end
@ -168,6 +169,7 @@ describe LogStash::Pipeline do
{:worker_threads=> override_thread_count, :filters=>["dummyfilter"]}) {:worker_threads=> override_thread_count, :filters=>["dummyfilter"]})
pipeline.run pipeline.run
expect(pipeline.worker_threads.size).to eq(override_thread_count) expect(pipeline.worker_threads.size).to eq(override_thread_count)
pipeline.shutdown
end end
end end
end end
@ -193,6 +195,7 @@ describe LogStash::Pipeline do
pipeline = TestPipeline.new(test_config_with_filters) pipeline = TestPipeline.new(test_config_with_filters)
pipeline.run pipeline.run
expect(pipeline.worker_threads.size).to eq(worker_thread_count) expect(pipeline.worker_threads.size).to eq(worker_thread_count)
pipeline.shutdown
end end
end end
end end
@ -239,6 +242,7 @@ describe LogStash::Pipeline do
expect(pipeline.outputs.size ).to eq(1) expect(pipeline.outputs.size ).to eq(1)
expect(pipeline.outputs.first.workers.size ).to eq(::LogStash::SETTINGS.get("pipeline.output.workers")) expect(pipeline.outputs.first.workers.size ).to eq(::LogStash::SETTINGS.get("pipeline.output.workers"))
expect(pipeline.outputs.first.workers.first.num_closes ).to eq(1) expect(pipeline.outputs.first.workers.first.num_closes ).to eq(1)
pipeline.shutdown
end end
it "should call output close correctly with output workers" do it "should call output close correctly with output workers" do
@ -255,6 +259,7 @@ describe LogStash::Pipeline do
output_delegator.workers.each do |plugin| output_delegator.workers.each do |plugin|
expect(plugin.num_closes ).to eq(1) expect(plugin.num_closes ).to eq(1)
end end
pipeline.shutdown
end end
end end
end end
@ -276,6 +281,7 @@ describe LogStash::Pipeline do
expect(pipeline).to receive(:start_flusher).ordered.and_call_original expect(pipeline).to receive(:start_flusher).ordered.and_call_original
pipeline.run pipeline.run
pipeline.shutdown
end end
end end
@ -392,8 +398,14 @@ describe LogStash::Pipeline do
output { } output { }
CONFIG CONFIG
it "uses a `NullMetric` object if no metric is given" do it "uses a `NullMetric` object if `metric.collect` is set to false" do
pipeline = LogStash::Pipeline.new(config) settings = double("LogStash::SETTINGS")
allow(settings).to receive(:get_value).with("pipeline.id").and_return("main")
allow(settings).to receive(:get_value).with("metric.collect").and_return(false)
allow(settings).to receive(:get_value).with("config.debug").and_return(false)
pipeline = LogStash::Pipeline.new(config, settings)
expect(pipeline.metric).to be_kind_of(LogStash::Instrument::NullMetric) expect(pipeline.metric).to be_kind_of(LogStash::Instrument::NullMetric)
end end
end end
@ -511,7 +523,7 @@ describe LogStash::Pipeline do
t = Thread.new { subject.run } t = Thread.new { subject.run }
sleep(0.1) sleep(0.1)
expect(subject.started_at).to be < Time.now expect(subject.started_at).to be < Time.now
t.kill rescue nil subject.shutdown
end end
end end
@ -536,7 +548,7 @@ describe LogStash::Pipeline do
t = Thread.new { subject.run } t = Thread.new { subject.run }
sleep(0.1) sleep(0.1)
expect(subject.uptime).to be > 0 expect(subject.uptime).to be > 0
t.kill rescue nil subject.shutdown
end end
end end
end end
@ -590,7 +602,6 @@ describe LogStash::Pipeline do
# Reset the metric store # Reset the metric store
LogStash::Instrument::Collector.instance.clear LogStash::Instrument::Collector.instance.clear
subject.metric = metric
Thread.new { subject.run } Thread.new { subject.run }
# make sure we have received all the generated events # make sure we have received all the generated events
sleep 1 while dummyoutput.events.size < number_of_events sleep 1 while dummyoutput.events.size < number_of_events