2019-07-29 07:53:12 -04:00
|
|
|
# frozen_string_literal: true
|
|
|
|
|
2020-01-16 13:08:46 -05:00
|
|
|
require 'spec_helper'
|
2019-07-29 07:53:12 -04:00
|
|
|
|
2020-06-24 14:09:03 -04:00
|
|
|
RSpec.describe Gitlab::SidekiqMiddleware::ServerMetrics do
|
2019-11-22 13:06:00 -05:00
|
|
|
context "with worker attribution" do
|
|
|
|
subject { described_class.new }
|
2019-10-22 08:06:20 -04:00
|
|
|
|
2019-11-22 13:06:00 -05:00
|
|
|
let(:queue) { :test }
|
|
|
|
let(:worker_class) { worker.class }
|
|
|
|
let(:job) { {} }
|
|
|
|
let(:job_status) { :done }
|
|
|
|
let(:labels_with_job_status) { labels.merge(job_status: job_status.to_s) }
|
2020-04-02 14:08:11 -04:00
|
|
|
let(:default_labels) do
|
|
|
|
{ queue: queue.to_s,
|
|
|
|
worker: worker_class.to_s,
|
|
|
|
boundary: "",
|
|
|
|
external_dependencies: "no",
|
|
|
|
feature_category: "",
|
|
|
|
urgency: "low" }
|
|
|
|
end
|
2019-11-22 13:06:00 -05:00
|
|
|
|
|
|
|
shared_examples "a metrics middleware" do
|
|
|
|
context "with mocked prometheus" do
|
|
|
|
let(:concurrency_metric) { double('concurrency metric') }
|
|
|
|
|
|
|
|
let(:queue_duration_seconds) { double('queue duration seconds metric') }
|
|
|
|
let(:completion_seconds_metric) { double('completion seconds metric') }
|
|
|
|
let(:user_execution_seconds_metric) { double('user execution seconds metric') }
|
2020-03-10 08:08:16 -04:00
|
|
|
let(:db_seconds_metric) { double('db seconds metric') }
|
|
|
|
let(:gitaly_seconds_metric) { double('gitaly seconds metric') }
|
2019-11-22 13:06:00 -05:00
|
|
|
let(:failed_total_metric) { double('failed total metric') }
|
|
|
|
let(:retried_total_metric) { double('retried total metric') }
|
2020-05-20 08:07:52 -04:00
|
|
|
let(:redis_requests_total) { double('redis calls total metric') }
|
2019-11-22 13:06:00 -05:00
|
|
|
let(:running_jobs_metric) { double('running jobs metric') }
|
2020-05-20 08:07:52 -04:00
|
|
|
let(:redis_seconds_metric) { double('redis seconds metric') }
|
2020-06-01 11:08:16 -04:00
|
|
|
let(:elasticsearch_seconds_metric) { double('elasticsearch seconds metric') }
|
|
|
|
let(:elasticsearch_requests_total) { double('elasticsearch calls total metric') }
|
2019-11-22 13:06:00 -05:00
|
|
|
|
|
|
|
before do
|
|
|
|
allow(Gitlab::Metrics).to receive(:histogram).with(:sidekiq_jobs_queue_duration_seconds, anything, anything, anything).and_return(queue_duration_seconds)
|
|
|
|
allow(Gitlab::Metrics).to receive(:histogram).with(:sidekiq_jobs_completion_seconds, anything, anything, anything).and_return(completion_seconds_metric)
|
|
|
|
allow(Gitlab::Metrics).to receive(:histogram).with(:sidekiq_jobs_cpu_seconds, anything, anything, anything).and_return(user_execution_seconds_metric)
|
2020-03-10 08:08:16 -04:00
|
|
|
allow(Gitlab::Metrics).to receive(:histogram).with(:sidekiq_jobs_db_seconds, anything, anything, anything).and_return(db_seconds_metric)
|
|
|
|
allow(Gitlab::Metrics).to receive(:histogram).with(:sidekiq_jobs_gitaly_seconds, anything, anything, anything).and_return(gitaly_seconds_metric)
|
2020-05-20 08:07:52 -04:00
|
|
|
allow(Gitlab::Metrics).to receive(:histogram).with(:sidekiq_redis_requests_duration_seconds, anything, anything, anything).and_return(redis_seconds_metric)
|
2020-06-01 11:08:16 -04:00
|
|
|
allow(Gitlab::Metrics).to receive(:histogram).with(:sidekiq_elasticsearch_requests_duration_seconds, anything, anything, anything).and_return(elasticsearch_seconds_metric)
|
2019-11-22 13:06:00 -05:00
|
|
|
allow(Gitlab::Metrics).to receive(:counter).with(:sidekiq_jobs_failed_total, anything).and_return(failed_total_metric)
|
|
|
|
allow(Gitlab::Metrics).to receive(:counter).with(:sidekiq_jobs_retried_total, anything).and_return(retried_total_metric)
|
2020-05-20 08:07:52 -04:00
|
|
|
allow(Gitlab::Metrics).to receive(:counter).with(:sidekiq_redis_requests_total, anything).and_return(redis_requests_total)
|
2020-06-01 11:08:16 -04:00
|
|
|
allow(Gitlab::Metrics).to receive(:counter).with(:sidekiq_elasticsearch_requests_total, anything).and_return(elasticsearch_requests_total)
|
2019-11-22 13:06:00 -05:00
|
|
|
allow(Gitlab::Metrics).to receive(:gauge).with(:sidekiq_running_jobs, anything, {}, :all).and_return(running_jobs_metric)
|
|
|
|
allow(Gitlab::Metrics).to receive(:gauge).with(:sidekiq_concurrency, anything, {}, :all).and_return(concurrency_metric)
|
|
|
|
|
|
|
|
allow(concurrency_metric).to receive(:set)
|
|
|
|
end
|
|
|
|
|
|
|
|
describe '#initialize' do
|
|
|
|
it 'sets concurrency metrics' do
|
|
|
|
expect(concurrency_metric).to receive(:set).with({}, Sidekiq.options[:concurrency].to_i)
|
|
|
|
|
|
|
|
subject
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
describe '#call' do
|
|
|
|
let(:thread_cputime_before) { 1 }
|
|
|
|
let(:thread_cputime_after) { 2 }
|
|
|
|
let(:thread_cputime_duration) { thread_cputime_after - thread_cputime_before }
|
|
|
|
|
|
|
|
let(:monotonic_time_before) { 11 }
|
|
|
|
let(:monotonic_time_after) { 20 }
|
|
|
|
let(:monotonic_time_duration) { monotonic_time_after - monotonic_time_before }
|
|
|
|
|
|
|
|
let(:queue_duration_for_job) { 0.01 }
|
|
|
|
|
2020-03-10 08:08:16 -04:00
|
|
|
let(:db_duration) { 3 }
|
|
|
|
let(:gitaly_duration) { 4 }
|
|
|
|
|
2020-05-20 08:07:52 -04:00
|
|
|
let(:redis_calls) { 2 }
|
|
|
|
let(:redis_duration) { 0.01 }
|
|
|
|
|
2020-06-01 11:08:16 -04:00
|
|
|
let(:elasticsearch_calls) { 8 }
|
|
|
|
let(:elasticsearch_duration) { 0.54 }
|
|
|
|
|
2019-11-22 13:06:00 -05:00
|
|
|
before do
|
|
|
|
allow(subject).to receive(:get_thread_cputime).and_return(thread_cputime_before, thread_cputime_after)
|
|
|
|
allow(Gitlab::Metrics::System).to receive(:monotonic_time).and_return(monotonic_time_before, monotonic_time_after)
|
|
|
|
allow(Gitlab::InstrumentationHelper).to receive(:queue_duration_for_job).with(job).and_return(queue_duration_for_job)
|
2020-03-10 08:08:16 -04:00
|
|
|
allow(ActiveRecord::LogSubscriber).to receive(:runtime).and_return(db_duration * 1000)
|
2019-11-22 13:06:00 -05:00
|
|
|
|
2020-05-20 08:07:52 -04:00
|
|
|
job[:gitaly_duration_s] = gitaly_duration
|
|
|
|
job[:redis_calls] = redis_calls
|
|
|
|
job[:redis_duration_s] = redis_duration
|
|
|
|
|
2020-06-01 11:08:16 -04:00
|
|
|
job[:elasticsearch_calls] = elasticsearch_calls
|
|
|
|
job[:elasticsearch_duration_s] = elasticsearch_duration
|
|
|
|
|
2020-05-20 08:07:52 -04:00
|
|
|
allow(running_jobs_metric).to receive(:increment)
|
|
|
|
allow(redis_requests_total).to receive(:increment)
|
2020-06-01 11:08:16 -04:00
|
|
|
allow(elasticsearch_requests_total).to receive(:increment)
|
2020-05-20 08:07:52 -04:00
|
|
|
allow(queue_duration_seconds).to receive(:observe)
|
|
|
|
allow(user_execution_seconds_metric).to receive(:observe)
|
|
|
|
allow(db_seconds_metric).to receive(:observe)
|
|
|
|
allow(gitaly_seconds_metric).to receive(:observe)
|
|
|
|
allow(completion_seconds_metric).to receive(:observe)
|
|
|
|
allow(redis_seconds_metric).to receive(:observe)
|
2020-06-01 11:08:16 -04:00
|
|
|
allow(elasticsearch_seconds_metric).to receive(:observe)
|
2019-11-22 13:06:00 -05:00
|
|
|
end
|
|
|
|
|
|
|
|
it 'yields block' do
|
|
|
|
expect { |b| subject.call(worker, job, :test, &b) }.to yield_control.once
|
|
|
|
end
|
|
|
|
|
|
|
|
it 'sets queue specific metrics' do
|
2020-05-20 08:07:52 -04:00
|
|
|
expect(running_jobs_metric).to receive(:increment).with(labels, -1)
|
|
|
|
expect(running_jobs_metric).to receive(:increment).with(labels, 1)
|
|
|
|
expect(queue_duration_seconds).to receive(:observe).with(labels, queue_duration_for_job) if queue_duration_for_job
|
|
|
|
expect(user_execution_seconds_metric).to receive(:observe).with(labels_with_job_status, thread_cputime_duration)
|
|
|
|
expect(db_seconds_metric).to receive(:observe).with(labels_with_job_status, db_duration)
|
|
|
|
expect(gitaly_seconds_metric).to receive(:observe).with(labels_with_job_status, gitaly_duration)
|
|
|
|
expect(completion_seconds_metric).to receive(:observe).with(labels_with_job_status, monotonic_time_duration)
|
|
|
|
expect(redis_seconds_metric).to receive(:observe).with(labels_with_job_status, redis_duration)
|
2020-06-01 11:08:16 -04:00
|
|
|
expect(elasticsearch_seconds_metric).to receive(:observe).with(labels_with_job_status, elasticsearch_duration)
|
2020-05-20 08:07:52 -04:00
|
|
|
expect(redis_requests_total).to receive(:increment).with(labels_with_job_status, redis_calls)
|
2020-06-01 11:08:16 -04:00
|
|
|
expect(elasticsearch_requests_total).to receive(:increment).with(labels_with_job_status, elasticsearch_calls)
|
2020-05-20 08:07:52 -04:00
|
|
|
|
2019-11-22 13:06:00 -05:00
|
|
|
subject.call(worker, job, :test) { nil }
|
|
|
|
end
|
|
|
|
|
2020-07-21 23:09:40 -04:00
|
|
|
it 'sets the thread name if it was nil' do
|
|
|
|
allow(Thread.current).to receive(:name).and_return(nil)
|
|
|
|
expect(Thread.current).to receive(:name=).with(Gitlab::Metrics::Samplers::ThreadsSampler::SIDEKIQ_WORKER_THREAD_NAME)
|
|
|
|
|
|
|
|
subject.call(worker, job, :test) { nil }
|
|
|
|
end
|
|
|
|
|
2019-11-22 13:06:00 -05:00
|
|
|
context 'when job_duration is not available' do
|
|
|
|
let(:queue_duration_for_job) { nil }
|
|
|
|
|
|
|
|
it 'does not set the queue_duration_seconds histogram' do
|
|
|
|
expect(queue_duration_seconds).not_to receive(:observe)
|
|
|
|
|
|
|
|
subject.call(worker, job, :test) { nil }
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
context 'when error is raised' do
|
|
|
|
let(:job_status) { :fail }
|
|
|
|
|
|
|
|
it 'sets sidekiq_jobs_failed_total and reraises' do
|
|
|
|
expect(failed_total_metric).to receive(:increment).with(labels, 1)
|
|
|
|
|
|
|
|
expect { subject.call(worker, job, :test) { raise StandardError, "Failed" } }.to raise_error(StandardError, "Failed")
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
context 'when job is retried' do
|
|
|
|
let(:job) { { 'retry_count' => 1 } }
|
|
|
|
|
|
|
|
it 'sets sidekiq_jobs_retried_total metric' do
|
|
|
|
expect(retried_total_metric).to receive(:increment)
|
|
|
|
|
|
|
|
subject.call(worker, job, :test) { nil }
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
2019-10-22 08:06:20 -04:00
|
|
|
|
2019-11-22 13:06:00 -05:00
|
|
|
context "with prometheus integrated" do
|
|
|
|
describe '#call' do
|
|
|
|
it 'yields block' do
|
|
|
|
expect { |b| subject.call(worker, job, :test, &b) }.to yield_control.once
|
|
|
|
end
|
2019-10-22 08:06:20 -04:00
|
|
|
|
2019-11-22 13:06:00 -05:00
|
|
|
context 'when error is raised' do
|
|
|
|
let(:job_status) { :fail }
|
2019-10-28 11:05:58 -04:00
|
|
|
|
2019-11-22 13:06:00 -05:00
|
|
|
it 'sets sidekiq_jobs_failed_total and reraises' do
|
|
|
|
expect { subject.call(worker, job, :test) { raise StandardError, "Failed" } }.to raise_error(StandardError, "Failed")
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
2019-10-28 11:05:58 -04:00
|
|
|
|
2019-11-22 13:06:00 -05:00
|
|
|
context "when workers are not attributed" do
|
2020-05-20 17:09:09 -04:00
|
|
|
before do
|
|
|
|
stub_const('TestNonAttributedWorker', Class.new)
|
|
|
|
TestNonAttributedWorker.class_eval do
|
|
|
|
include Sidekiq::Worker
|
|
|
|
end
|
2019-11-22 13:06:00 -05:00
|
|
|
end
|
2020-05-20 17:09:09 -04:00
|
|
|
|
2019-11-22 13:06:00 -05:00
|
|
|
let(:worker) { TestNonAttributedWorker.new }
|
2020-03-02 13:07:42 -05:00
|
|
|
let(:labels) { default_labels.merge(urgency: "") }
|
2019-07-29 07:53:12 -04:00
|
|
|
|
2019-11-22 13:06:00 -05:00
|
|
|
it_behaves_like "a metrics middleware"
|
|
|
|
end
|
2019-07-29 07:53:12 -04:00
|
|
|
|
2019-11-22 13:06:00 -05:00
|
|
|
context "when workers are attributed" do
|
2020-03-02 13:07:42 -05:00
|
|
|
def create_attributed_worker_class(urgency, external_dependencies, resource_boundary, category)
|
2019-11-22 13:06:00 -05:00
|
|
|
Class.new do
|
|
|
|
include Sidekiq::Worker
|
|
|
|
include WorkerAttributes
|
|
|
|
|
2020-03-02 13:07:42 -05:00
|
|
|
urgency urgency if urgency
|
2019-11-22 13:06:00 -05:00
|
|
|
worker_has_external_dependencies! if external_dependencies
|
|
|
|
worker_resource_boundary resource_boundary unless resource_boundary == :unknown
|
|
|
|
feature_category category unless category.nil?
|
|
|
|
end
|
|
|
|
end
|
2019-07-29 07:53:12 -04:00
|
|
|
|
2020-03-02 13:07:42 -05:00
|
|
|
let(:urgency) { nil }
|
2019-11-22 13:06:00 -05:00
|
|
|
let(:external_dependencies) { false }
|
|
|
|
let(:resource_boundary) { :unknown }
|
|
|
|
let(:feature_category) { nil }
|
2020-03-02 13:07:42 -05:00
|
|
|
let(:worker_class) { create_attributed_worker_class(urgency, external_dependencies, resource_boundary, feature_category) }
|
2019-11-22 13:06:00 -05:00
|
|
|
let(:worker) { worker_class.new }
|
2019-10-28 11:05:58 -04:00
|
|
|
|
2020-03-02 13:07:42 -05:00
|
|
|
context "high urgency" do
|
|
|
|
let(:urgency) { :high }
|
|
|
|
let(:labels) { default_labels.merge(urgency: "high") }
|
2019-10-28 11:05:58 -04:00
|
|
|
|
2019-11-22 13:06:00 -05:00
|
|
|
it_behaves_like "a metrics middleware"
|
|
|
|
end
|
2019-07-29 07:53:12 -04:00
|
|
|
|
2019-11-22 13:06:00 -05:00
|
|
|
context "external dependencies" do
|
|
|
|
let(:external_dependencies) { true }
|
|
|
|
let(:labels) { default_labels.merge(external_dependencies: "yes") }
|
2019-07-29 07:53:12 -04:00
|
|
|
|
2019-11-22 13:06:00 -05:00
|
|
|
it_behaves_like "a metrics middleware"
|
|
|
|
end
|
2019-07-29 07:53:12 -04:00
|
|
|
|
2019-11-22 13:06:00 -05:00
|
|
|
context "cpu boundary" do
|
|
|
|
let(:resource_boundary) { :cpu }
|
|
|
|
let(:labels) { default_labels.merge(boundary: "cpu") }
|
2019-10-28 11:05:58 -04:00
|
|
|
|
2019-11-22 13:06:00 -05:00
|
|
|
it_behaves_like "a metrics middleware"
|
|
|
|
end
|
2019-10-28 11:05:58 -04:00
|
|
|
|
2019-11-22 13:06:00 -05:00
|
|
|
context "memory boundary" do
|
|
|
|
let(:resource_boundary) { :memory }
|
|
|
|
let(:labels) { default_labels.merge(boundary: "memory") }
|
2019-10-10 05:06:08 -04:00
|
|
|
|
2019-11-22 13:06:00 -05:00
|
|
|
it_behaves_like "a metrics middleware"
|
2019-10-28 11:05:58 -04:00
|
|
|
end
|
2019-10-10 05:06:08 -04:00
|
|
|
|
2019-11-22 13:06:00 -05:00
|
|
|
context "feature category" do
|
|
|
|
let(:feature_category) { :authentication }
|
|
|
|
let(:labels) { default_labels.merge(feature_category: "authentication") }
|
2019-07-29 07:53:12 -04:00
|
|
|
|
2019-11-22 13:06:00 -05:00
|
|
|
it_behaves_like "a metrics middleware"
|
2019-07-29 07:53:12 -04:00
|
|
|
end
|
2019-10-23 08:06:18 -04:00
|
|
|
|
2019-11-22 13:06:00 -05:00
|
|
|
context "combined" do
|
2020-03-12 14:09:28 -04:00
|
|
|
let(:urgency) { :throttled }
|
2019-11-22 13:06:00 -05:00
|
|
|
let(:external_dependencies) { true }
|
|
|
|
let(:resource_boundary) { :cpu }
|
|
|
|
let(:feature_category) { :authentication }
|
2020-03-12 14:09:28 -04:00
|
|
|
let(:labels) { default_labels.merge(urgency: "throttled", external_dependencies: "yes", boundary: "cpu", feature_category: "authentication") }
|
2019-10-23 08:06:18 -04:00
|
|
|
|
2019-11-22 13:06:00 -05:00
|
|
|
it_behaves_like "a metrics middleware"
|
2019-07-29 07:53:12 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|