2016-11-22 23:39:00 -05:00
|
|
|
# frozen_string_literal: true
|
2012-10-20 17:03:43 -04:00
|
|
|
|
2019-04-01 12:20:41 -04:00
|
|
|
require "sidekiq"
|
2018-05-30 16:20:28 -04:00
|
|
|
|
2019-09-15 15:55:42 -04:00
|
|
|
require "zlib"
|
|
|
|
require "base64"
|
|
|
|
|
2019-04-01 12:20:41 -04:00
|
|
|
module Sidekiq
|
2012-12-04 08:11:25 -05:00
|
|
|
class Stats
|
2015-01-16 20:54:34 -05:00
|
|
|
def initialize
|
|
|
|
fetch_stats!
|
|
|
|
end
|
|
|
|
|
2012-12-04 13:14:38 -05:00
|
|
|
def processed
|
2015-01-16 20:54:34 -05:00
|
|
|
stat :processed
|
2012-12-04 13:14:38 -05:00
|
|
|
end
|
2012-12-04 08:11:25 -05:00
|
|
|
|
2012-12-04 13:14:38 -05:00
|
|
|
def failed
|
2015-01-16 20:54:34 -05:00
|
|
|
stat :failed
|
2015-01-16 17:39:51 -05:00
|
|
|
end
|
|
|
|
|
|
|
|
def scheduled_size
|
2015-01-16 20:54:34 -05:00
|
|
|
stat :scheduled_size
|
2015-01-16 17:39:51 -05:00
|
|
|
end
|
|
|
|
|
|
|
|
def retry_size
|
2015-01-16 20:54:34 -05:00
|
|
|
stat :retry_size
|
2015-01-16 17:39:51 -05:00
|
|
|
end
|
|
|
|
|
|
|
|
def dead_size
|
2015-01-16 20:54:34 -05:00
|
|
|
stat :dead_size
|
|
|
|
end
|
|
|
|
|
|
|
|
def enqueued
|
|
|
|
stat :enqueued
|
|
|
|
end
|
|
|
|
|
|
|
|
def processes_size
|
|
|
|
stat :processes_size
|
|
|
|
end
|
|
|
|
|
|
|
|
def workers_size
|
|
|
|
stat :workers_size
|
|
|
|
end
|
|
|
|
|
|
|
|
def default_queue_latency
|
|
|
|
stat :default_queue_latency
|
2015-01-16 17:39:51 -05:00
|
|
|
end
|
|
|
|
|
2015-01-28 11:47:09 -05:00
|
|
|
def queues
|
|
|
|
Sidekiq::Stats::Queues.new.lengths
|
|
|
|
end
|
|
|
|
|
2015-01-16 18:25:42 -05:00
|
|
|
def fetch_stats!
|
2019-04-01 12:20:41 -04:00
|
|
|
pipe1_res = Sidekiq.redis { |conn|
|
2015-01-16 20:54:34 -05:00
|
|
|
conn.pipelined do
|
2019-04-01 12:20:41 -04:00
|
|
|
conn.get("stat:processed")
|
|
|
|
conn.get("stat:failed")
|
|
|
|
conn.zcard("schedule")
|
|
|
|
conn.zcard("retry")
|
|
|
|
conn.zcard("dead")
|
|
|
|
conn.scard("processes")
|
|
|
|
conn.lrange("queue:default", -1, -1)
|
2015-01-16 20:54:34 -05:00
|
|
|
end
|
2019-04-01 12:20:41 -04:00
|
|
|
}
|
2015-01-16 20:54:34 -05:00
|
|
|
|
2019-04-01 12:20:41 -04:00
|
|
|
processes = Sidekiq.redis { |conn|
|
2019-09-12 07:21:42 -04:00
|
|
|
conn.sscan_each("processes").to_a
|
2019-04-01 12:20:41 -04:00
|
|
|
}
|
2018-05-24 17:11:31 -04:00
|
|
|
|
2019-04-01 12:20:41 -04:00
|
|
|
queues = Sidekiq.redis { |conn|
|
2019-09-12 07:21:42 -04:00
|
|
|
conn.sscan_each("queues").to_a
|
2019-04-01 12:20:41 -04:00
|
|
|
}
|
2018-05-24 17:11:31 -04:00
|
|
|
|
2019-04-01 12:20:41 -04:00
|
|
|
pipe2_res = Sidekiq.redis { |conn|
|
2015-01-16 20:54:34 -05:00
|
|
|
conn.pipelined do
|
2019-05-30 13:41:47 -04:00
|
|
|
processes.each { |key| conn.hget(key, "busy") }
|
|
|
|
queues.each { |queue| conn.llen("queue:#{queue}") }
|
2015-01-16 20:54:34 -05:00
|
|
|
end
|
2019-04-01 12:20:41 -04:00
|
|
|
}
|
2015-01-16 20:54:34 -05:00
|
|
|
|
2018-05-24 17:11:31 -04:00
|
|
|
s = processes.size
|
2019-10-08 17:50:26 -04:00
|
|
|
workers_size = pipe2_res[0...s].sum(&:to_i)
|
|
|
|
enqueued = pipe2_res[s..-1].sum(&:to_i)
|
2015-01-16 20:54:34 -05:00
|
|
|
|
|
|
|
default_queue_latency = if (entry = pipe1_res[6].first)
|
2019-04-01 12:20:41 -04:00
|
|
|
job = begin
|
|
|
|
Sidekiq.load_json(entry)
|
|
|
|
rescue
|
|
|
|
{}
|
|
|
|
end
|
|
|
|
now = Time.now.to_f
|
|
|
|
thence = job["enqueued_at"] || now
|
|
|
|
now - thence
|
|
|
|
else
|
|
|
|
0
|
|
|
|
end
|
2015-01-16 20:54:34 -05:00
|
|
|
@stats = {
|
2019-04-01 12:20:41 -04:00
|
|
|
processed: pipe1_res[0].to_i,
|
|
|
|
failed: pipe1_res[1].to_i,
|
|
|
|
scheduled_size: pipe1_res[2],
|
|
|
|
retry_size: pipe1_res[3],
|
|
|
|
dead_size: pipe1_res[4],
|
|
|
|
processes_size: pipe1_res[5],
|
2015-01-16 20:54:34 -05:00
|
|
|
|
|
|
|
default_queue_latency: default_queue_latency,
|
2019-04-01 12:20:41 -04:00
|
|
|
workers_size: workers_size,
|
2020-03-17 16:38:48 -04:00
|
|
|
enqueued: enqueued
|
2015-01-16 20:54:34 -05:00
|
|
|
}
|
2015-01-16 18:25:42 -05:00
|
|
|
end
|
|
|
|
|
|
|
|
def reset(*stats)
|
2019-08-28 13:13:41 -04:00
|
|
|
all = %w[failed processed]
|
2015-01-16 18:25:42 -05:00
|
|
|
stats = stats.empty? ? all : all & stats.flatten.compact.map(&:to_s)
|
|
|
|
|
|
|
|
mset_args = []
|
|
|
|
stats.each do |stat|
|
|
|
|
mset_args << "stat:#{stat}"
|
|
|
|
mset_args << 0
|
|
|
|
end
|
|
|
|
Sidekiq.redis do |conn|
|
|
|
|
conn.mset(*mset_args)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
private
|
|
|
|
|
2015-01-16 17:39:51 -05:00
|
|
|
def stat(s)
|
2015-01-16 20:54:34 -05:00
|
|
|
@stats[s]
|
2015-01-16 17:39:51 -05:00
|
|
|
end
|
|
|
|
|
2015-01-16 20:54:34 -05:00
|
|
|
class Queues
|
|
|
|
def lengths
|
|
|
|
Sidekiq.redis do |conn|
|
2019-09-12 07:21:42 -04:00
|
|
|
queues = conn.sscan_each("queues").to_a
|
2015-01-16 20:54:34 -05:00
|
|
|
|
2019-04-01 12:20:41 -04:00
|
|
|
lengths = conn.pipelined {
|
2015-01-16 20:54:34 -05:00
|
|
|
queues.each do |queue|
|
|
|
|
conn.llen("queue:#{queue}")
|
2015-01-16 17:39:51 -05:00
|
|
|
end
|
2019-04-01 12:20:41 -04:00
|
|
|
}
|
2015-01-16 17:39:51 -05:00
|
|
|
|
2019-10-16 11:42:32 -04:00
|
|
|
array_of_arrays = queues.zip(lengths).sort_by { |_, size| -size }
|
|
|
|
Hash[array_of_arrays]
|
2015-01-16 20:54:34 -05:00
|
|
|
end
|
2015-01-16 17:39:51 -05:00
|
|
|
end
|
2012-12-04 13:14:38 -05:00
|
|
|
end
|
2012-12-04 08:11:25 -05:00
|
|
|
|
2012-12-05 20:35:49 -05:00
|
|
|
class History
|
|
|
|
def initialize(days_previous, start_date = nil)
|
|
|
|
@days_previous = days_previous
|
|
|
|
@start_date = start_date || Time.now.utc.to_date
|
|
|
|
end
|
|
|
|
|
|
|
|
def processed
|
2017-05-31 15:08:57 -04:00
|
|
|
@processed ||= date_stat_hash("processed")
|
2012-12-05 20:35:49 -05:00
|
|
|
end
|
|
|
|
|
|
|
|
def failed
|
2017-05-31 15:08:57 -04:00
|
|
|
@failed ||= date_stat_hash("failed")
|
2012-12-05 20:35:49 -05:00
|
|
|
end
|
|
|
|
|
|
|
|
private
|
|
|
|
|
|
|
|
def date_stat_hash(stat)
|
|
|
|
stat_hash = {}
|
2019-10-16 11:42:32 -04:00
|
|
|
dates = @start_date.downto(@start_date - @days_previous + 1).map { |date|
|
|
|
|
date.strftime("%Y-%m-%d")
|
|
|
|
}
|
|
|
|
|
|
|
|
keys = dates.map { |datestr| "stat:#{stat}:#{datestr}" }
|
2012-12-05 20:35:49 -05:00
|
|
|
|
2017-05-31 15:08:57 -04:00
|
|
|
begin
|
|
|
|
Sidekiq.redis do |conn|
|
|
|
|
conn.mget(keys).each_with_index do |value, idx|
|
|
|
|
stat_hash[dates[idx]] = value ? value.to_i : 0
|
|
|
|
end
|
2012-12-05 20:35:49 -05:00
|
|
|
end
|
2017-05-31 15:08:57 -04:00
|
|
|
rescue Redis::CommandError
|
|
|
|
# mget will trigger a CROSSSLOT error when run against a Cluster
|
|
|
|
# TODO Someone want to add Cluster support?
|
2012-12-05 20:35:49 -05:00
|
|
|
end
|
|
|
|
|
|
|
|
stat_hash
|
|
|
|
end
|
|
|
|
end
|
2012-12-04 08:11:25 -05:00
|
|
|
end
|
|
|
|
|
2012-10-20 17:03:43 -04:00
|
|
|
##
|
|
|
|
# Encapsulates a queue within Sidekiq.
|
|
|
|
# Allows enumeration of all jobs within the queue
|
|
|
|
# and deletion of jobs.
|
|
|
|
#
|
|
|
|
# queue = Sidekiq::Queue.new("mailer")
|
|
|
|
# queue.each do |job|
|
|
|
|
# job.klass # => 'MyWorker'
|
|
|
|
# job.args # => [1, 2, 3]
|
|
|
|
# job.delete if job.jid == 'abcdef1234567890'
|
|
|
|
# end
|
|
|
|
#
|
|
|
|
class Queue
|
|
|
|
include Enumerable
|
|
|
|
|
2016-01-18 15:40:01 -05:00
|
|
|
##
|
|
|
|
# Return all known queues within Redis.
|
|
|
|
#
|
2013-06-01 17:54:29 -04:00
|
|
|
def self.all
|
2019-09-12 07:21:42 -04:00
|
|
|
Sidekiq.redis { |c| c.sscan_each("queues").to_a }.sort.map { |q| Sidekiq::Queue.new(q) }
|
2013-06-01 17:54:29 -04:00
|
|
|
end
|
|
|
|
|
2012-10-20 17:03:43 -04:00
|
|
|
attr_reader :name
|
|
|
|
|
2019-04-01 12:20:41 -04:00
|
|
|
def initialize(name = "default")
|
2018-08-01 12:36:37 -04:00
|
|
|
@name = name.to_s
|
2012-10-20 17:03:43 -04:00
|
|
|
@rname = "queue:#{name}"
|
|
|
|
end
|
|
|
|
|
|
|
|
def size
|
|
|
|
Sidekiq.redis { |con| con.llen(@rname) }
|
|
|
|
end
|
|
|
|
|
2014-04-25 01:16:34 -04:00
|
|
|
# Sidekiq Pro overrides this
|
|
|
|
def paused?
|
|
|
|
false
|
|
|
|
end
|
|
|
|
|
2016-01-18 15:40:01 -05:00
|
|
|
##
|
|
|
|
# Calculates this queue's latency, the difference in seconds since the oldest
|
|
|
|
# job in the queue was enqueued.
|
|
|
|
#
|
|
|
|
# @return Float
|
2013-05-24 01:58:06 -04:00
|
|
|
def latency
|
2019-04-01 12:20:41 -04:00
|
|
|
entry = Sidekiq.redis { |conn|
|
2013-05-24 01:58:06 -04:00
|
|
|
conn.lrange(@rname, -1, -1)
|
2019-04-01 12:20:41 -04:00
|
|
|
}.first
|
2013-05-24 01:58:06 -04:00
|
|
|
return 0 unless entry
|
2017-02-02 15:25:29 -05:00
|
|
|
job = Sidekiq.load_json(entry)
|
|
|
|
now = Time.now.to_f
|
2019-04-01 12:20:41 -04:00
|
|
|
thence = job["enqueued_at"] || now
|
2017-02-02 15:25:29 -05:00
|
|
|
now - thence
|
2013-05-24 01:58:06 -04:00
|
|
|
end
|
|
|
|
|
2015-02-12 14:54:41 -05:00
|
|
|
def each
|
2013-10-12 18:56:34 -04:00
|
|
|
initial_size = size
|
|
|
|
deleted_size = 0
|
2012-10-20 17:03:43 -04:00
|
|
|
page = 0
|
|
|
|
page_size = 50
|
|
|
|
|
2019-04-01 12:20:41 -04:00
|
|
|
loop do
|
2013-10-12 18:56:34 -04:00
|
|
|
range_start = page * page_size - deleted_size
|
2019-08-28 13:13:41 -04:00
|
|
|
range_end = range_start + page_size - 1
|
2019-04-01 12:20:41 -04:00
|
|
|
entries = Sidekiq.redis { |conn|
|
2013-10-12 18:56:34 -04:00
|
|
|
conn.lrange @rname, range_start, range_end
|
2019-04-01 12:20:41 -04:00
|
|
|
}
|
2012-10-20 17:03:43 -04:00
|
|
|
break if entries.empty?
|
|
|
|
page += 1
|
|
|
|
entries.each do |entry|
|
2015-02-12 14:54:41 -05:00
|
|
|
yield Job.new(entry, @name)
|
2012-10-20 17:03:43 -04:00
|
|
|
end
|
2013-10-12 18:56:34 -04:00
|
|
|
deleted_size = initial_size - size
|
2012-10-20 17:03:43 -04:00
|
|
|
end
|
|
|
|
end
|
2012-11-25 21:43:48 -05:00
|
|
|
|
2016-01-18 15:40:01 -05:00
|
|
|
##
|
|
|
|
# Find the job with the given JID within this queue.
|
|
|
|
#
|
|
|
|
# This is a slow, inefficient operation. Do not use under
|
|
|
|
# normal conditions. Sidekiq Pro contains a faster version.
|
2013-04-17 14:11:29 -04:00
|
|
|
def find_job(jid)
|
2015-02-12 14:57:40 -05:00
|
|
|
detect { |j| j.jid == jid }
|
2013-04-17 14:11:29 -04:00
|
|
|
end
|
|
|
|
|
2012-11-25 21:43:48 -05:00
|
|
|
def clear
|
|
|
|
Sidekiq.redis do |conn|
|
|
|
|
conn.multi do
|
2020-02-05 17:05:17 -05:00
|
|
|
conn.unlink(@rname)
|
2018-02-16 16:01:25 -05:00
|
|
|
conn.srem("queues", name)
|
2012-11-25 21:43:48 -05:00
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
2014-03-07 00:53:16 -05:00
|
|
|
alias_method :💣, :clear
|
2012-10-20 17:03:43 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
##
|
2012-10-30 13:06:20 -04:00
|
|
|
# Encapsulates a pending job within a Sidekiq queue or
|
|
|
|
# sorted set.
|
|
|
|
#
|
2012-10-20 17:03:43 -04:00
|
|
|
# The job should be considered immutable but may be
|
|
|
|
# removed from the queue via Job#delete.
|
|
|
|
#
|
|
|
|
class Job
|
|
|
|
attr_reader :item
|
2016-03-09 17:10:15 -05:00
|
|
|
attr_reader :value
|
2012-10-20 17:03:43 -04:00
|
|
|
|
2019-04-01 12:20:41 -04:00
|
|
|
def initialize(item, queue_name = nil)
|
2017-03-24 14:56:10 -04:00
|
|
|
@args = nil
|
2012-10-20 17:03:43 -04:00
|
|
|
@value = item
|
2017-03-24 14:56:10 -04:00
|
|
|
@item = item.is_a?(Hash) ? item : parse(item)
|
2019-04-01 12:20:41 -04:00
|
|
|
@queue = queue_name || @item["queue"]
|
2017-03-24 14:56:10 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def parse(item)
|
|
|
|
Sidekiq.load_json(item)
|
|
|
|
rescue JSON::ParserError
|
|
|
|
# If the job payload in Redis is invalid JSON, we'll load
|
|
|
|
# the item as an empty hash and store the invalid JSON as
|
|
|
|
# the job 'args' for display in the Web UI.
|
|
|
|
@invalid = true
|
|
|
|
@args = [item]
|
|
|
|
{}
|
2012-10-20 17:03:43 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def klass
|
2019-04-01 12:20:41 -04:00
|
|
|
self["class"]
|
2012-10-20 17:03:43 -04:00
|
|
|
end
|
|
|
|
|
2014-05-19 23:54:28 -04:00
|
|
|
def display_class
|
|
|
|
# Unwrap known wrappers so they show up in a human-friendly manner in the Web UI
|
|
|
|
@klass ||= case klass
|
|
|
|
when /\ASidekiq::Extensions::Delayed/
|
2014-06-04 23:52:22 -04:00
|
|
|
safe_load(args[0], klass) do |target, method, _|
|
2014-06-04 18:54:00 -04:00
|
|
|
"#{target}.#{method}"
|
|
|
|
end
|
2014-05-19 23:54:28 -04:00
|
|
|
when "ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper"
|
2019-04-01 12:20:41 -04:00
|
|
|
job_class = @item["wrapped"] || args[0]
|
2019-09-12 13:20:37 -04:00
|
|
|
if job_class == "ActionMailer::DeliveryJob" || job_class == "ActionMailer::MailDeliveryJob"
|
2015-12-14 16:29:02 -05:00
|
|
|
# MailerClass#mailer_method
|
2019-04-01 12:20:41 -04:00
|
|
|
args[0]["arguments"][0..1].join("#")
|
2015-12-14 16:29:02 -05:00
|
|
|
else
|
2019-04-01 12:20:41 -04:00
|
|
|
job_class
|
2015-12-14 16:29:02 -05:00
|
|
|
end
|
2014-05-19 23:54:28 -04:00
|
|
|
else
|
|
|
|
klass
|
2019-04-01 12:20:41 -04:00
|
|
|
end
|
2014-05-19 23:54:28 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def display_args
|
|
|
|
# Unwrap known wrappers so they show up in a human-friendly manner in the Web UI
|
2017-10-02 15:00:25 -04:00
|
|
|
@display_args ||= case klass
|
2014-05-19 23:54:28 -04:00
|
|
|
when /\ASidekiq::Extensions::Delayed/
|
2014-06-04 23:52:22 -04:00
|
|
|
safe_load(args[0], args) do |_, _, arg|
|
2014-06-04 18:54:00 -04:00
|
|
|
arg
|
|
|
|
end
|
2014-05-19 23:54:28 -04:00
|
|
|
when "ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper"
|
2019-04-01 12:20:41 -04:00
|
|
|
job_args = self["wrapped"] ? args[0]["arguments"] : []
|
|
|
|
if (self["wrapped"] || args[0]) == "ActionMailer::DeliveryJob"
|
2017-04-24 19:33:16 -04:00
|
|
|
# remove MailerClass, mailer_method and 'deliver_now'
|
|
|
|
job_args.drop(3)
|
2019-09-12 13:20:37 -04:00
|
|
|
elsif (self["wrapped"] || args[0]) == "ActionMailer::MailDeliveryJob"
|
|
|
|
# remove MailerClass, mailer_method and 'deliver_now'
|
|
|
|
job_args.drop(3).first["args"]
|
2015-12-14 16:29:02 -05:00
|
|
|
else
|
2017-04-24 19:33:16 -04:00
|
|
|
job_args
|
2015-12-14 16:29:02 -05:00
|
|
|
end
|
2014-05-19 23:54:28 -04:00
|
|
|
else
|
2019-04-01 12:20:41 -04:00
|
|
|
if self["encrypt"]
|
2017-04-24 19:40:28 -04:00
|
|
|
# no point in showing 150+ bytes of random garbage
|
2019-04-01 12:20:41 -04:00
|
|
|
args[-1] = "[encrypted data]"
|
2017-04-24 19:40:28 -04:00
|
|
|
end
|
2014-05-19 23:54:28 -04:00
|
|
|
args
|
2019-04-01 12:20:41 -04:00
|
|
|
end
|
2014-05-19 23:54:28 -04:00
|
|
|
end
|
|
|
|
|
2012-10-20 17:03:43 -04:00
|
|
|
def args
|
2019-04-01 12:20:41 -04:00
|
|
|
@args || @item["args"]
|
2012-10-20 17:03:43 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def jid
|
2019-04-01 12:20:41 -04:00
|
|
|
self["jid"]
|
2012-10-20 17:03:43 -04:00
|
|
|
end
|
|
|
|
|
2013-05-23 07:31:41 -04:00
|
|
|
def enqueued_at
|
2019-04-01 12:20:41 -04:00
|
|
|
self["enqueued_at"] ? Time.at(self["enqueued_at"]).utc : nil
|
2013-05-23 07:31:41 -04:00
|
|
|
end
|
|
|
|
|
2015-06-03 06:45:35 -04:00
|
|
|
def created_at
|
2019-04-01 12:20:41 -04:00
|
|
|
Time.at(self["created_at"] || self["enqueued_at"] || 0).utc
|
2015-06-03 06:45:35 -04:00
|
|
|
end
|
|
|
|
|
2019-09-21 17:03:59 -04:00
|
|
|
def tags
|
|
|
|
self["tags"] || []
|
|
|
|
end
|
|
|
|
|
2019-09-15 15:55:42 -04:00
|
|
|
def error_backtrace
|
|
|
|
# Cache nil values
|
|
|
|
if defined?(@error_backtrace)
|
|
|
|
@error_backtrace
|
|
|
|
else
|
|
|
|
value = self["error_backtrace"]
|
|
|
|
@error_backtrace = value && uncompress_backtrace(value)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-04-01 12:20:41 -04:00
|
|
|
attr_reader :queue
|
2012-10-20 17:03:43 -04:00
|
|
|
|
2013-05-24 22:59:40 -04:00
|
|
|
def latency
|
2017-02-02 15:25:29 -05:00
|
|
|
now = Time.now.to_f
|
2019-04-01 12:20:41 -04:00
|
|
|
now - (@item["enqueued_at"] || @item["created_at"] || now)
|
2013-05-24 22:59:40 -04:00
|
|
|
end
|
|
|
|
|
2012-10-20 17:03:43 -04:00
|
|
|
##
|
|
|
|
# Remove this job from the queue.
|
|
|
|
def delete
|
2019-04-01 12:20:41 -04:00
|
|
|
count = Sidekiq.redis { |conn|
|
2013-11-20 17:51:58 -05:00
|
|
|
conn.lrem("queue:#{@queue}", 1, @value)
|
2019-04-01 12:20:41 -04:00
|
|
|
}
|
2012-10-20 17:03:43 -04:00
|
|
|
count != 0
|
|
|
|
end
|
|
|
|
|
|
|
|
def [](name)
|
2017-01-17 17:58:08 -05:00
|
|
|
# nil will happen if the JSON fails to parse.
|
|
|
|
# We don't guarantee Sidekiq will work with bad job JSON but we should
|
|
|
|
# make a best effort to minimize the damage.
|
|
|
|
@item ? @item[name] : nil
|
2012-10-20 17:03:43 -04:00
|
|
|
end
|
2014-06-04 23:52:22 -04:00
|
|
|
|
|
|
|
private
|
|
|
|
|
|
|
|
def safe_load(content, default)
|
2019-04-01 12:20:41 -04:00
|
|
|
yield(*YAML.load(content))
|
|
|
|
rescue => ex
|
|
|
|
# #1761 in dev mode, it's possible to have jobs enqueued which haven't been loaded into
|
|
|
|
# memory yet so the YAML can't be loaded.
|
|
|
|
Sidekiq.logger.warn "Unable to load YAML: #{ex.message}" unless Sidekiq.options[:environment] == "development"
|
|
|
|
default
|
2014-06-04 23:52:22 -04:00
|
|
|
end
|
2019-09-15 15:55:42 -04:00
|
|
|
|
|
|
|
def uncompress_backtrace(backtrace)
|
|
|
|
if backtrace.is_a?(Array)
|
2019-10-08 16:45:17 -04:00
|
|
|
# Handle old jobs with raw Array backtrace format
|
2019-09-15 15:55:42 -04:00
|
|
|
backtrace
|
|
|
|
else
|
|
|
|
decoded = Base64.decode64(backtrace)
|
|
|
|
uncompressed = Zlib::Inflate.inflate(decoded)
|
2019-10-08 16:45:17 -04:00
|
|
|
begin
|
|
|
|
Sidekiq.load_json(uncompressed)
|
|
|
|
rescue
|
|
|
|
# Handle old jobs with marshalled backtrace format
|
|
|
|
# TODO Remove in 7.x
|
|
|
|
Marshal.load(uncompressed)
|
|
|
|
end
|
2019-09-15 15:55:42 -04:00
|
|
|
end
|
|
|
|
end
|
2012-10-20 17:03:43 -04:00
|
|
|
end
|
|
|
|
|
2012-10-30 13:06:20 -04:00
|
|
|
class SortedEntry < Job
|
2012-10-20 17:03:43 -04:00
|
|
|
attr_reader :score
|
2014-02-10 00:17:05 -05:00
|
|
|
attr_reader :parent
|
2012-10-20 17:03:43 -04:00
|
|
|
|
2012-10-30 13:06:20 -04:00
|
|
|
def initialize(parent, score, item)
|
2012-10-20 17:03:43 -04:00
|
|
|
super(item)
|
|
|
|
@score = score
|
2012-10-30 13:06:20 -04:00
|
|
|
@parent = parent
|
2012-10-20 17:03:43 -04:00
|
|
|
end
|
|
|
|
|
2012-10-30 13:06:20 -04:00
|
|
|
def at
|
2013-08-15 23:06:16 -04:00
|
|
|
Time.at(score).utc
|
2012-10-20 17:09:27 -04:00
|
|
|
end
|
|
|
|
|
2012-10-20 17:03:43 -04:00
|
|
|
def delete
|
2015-08-02 14:57:57 -04:00
|
|
|
if @value
|
|
|
|
@parent.delete_by_value(@parent.name, @value)
|
|
|
|
else
|
2015-08-08 21:23:56 -04:00
|
|
|
@parent.delete_by_jid(score, jid)
|
2015-08-02 14:57:57 -04:00
|
|
|
end
|
2012-11-26 11:22:48 -05:00
|
|
|
end
|
|
|
|
|
2013-01-29 16:17:59 -05:00
|
|
|
def reschedule(at)
|
2019-10-02 16:30:55 -04:00
|
|
|
Sidekiq.redis do |conn|
|
2019-10-17 09:09:00 -04:00
|
|
|
conn.zincrby(@parent.name, at.to_f - @score, Sidekiq.dump_json(@item))
|
2019-10-02 16:30:55 -04:00
|
|
|
end
|
2013-01-29 16:17:59 -05:00
|
|
|
end
|
|
|
|
|
2013-06-21 22:43:06 -04:00
|
|
|
def add_to_queue
|
2014-06-11 23:31:39 -04:00
|
|
|
remove_job do |message|
|
|
|
|
msg = Sidekiq.load_json(message)
|
|
|
|
Sidekiq::Client.push(msg)
|
2013-06-21 22:43:06 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2012-11-26 11:22:48 -05:00
|
|
|
def retry
|
2014-06-11 23:31:39 -04:00
|
|
|
remove_job do |message|
|
|
|
|
msg = Sidekiq.load_json(message)
|
2019-04-01 12:20:41 -04:00
|
|
|
msg["retry_count"] -= 1 if msg["retry_count"]
|
2014-06-11 23:31:39 -04:00
|
|
|
Sidekiq::Client.push(msg)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2014-07-28 16:42:06 -04:00
|
|
|
##
|
|
|
|
# Place job in the dead set
|
|
|
|
def kill
|
|
|
|
remove_job do |message|
|
2017-08-10 00:20:32 -04:00
|
|
|
DeadSet.new.kill(message)
|
2014-07-28 16:42:06 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2017-03-16 14:33:24 -04:00
|
|
|
def error?
|
2019-04-01 12:20:41 -04:00
|
|
|
!!item["error_class"]
|
2017-03-16 14:33:24 -04:00
|
|
|
end
|
|
|
|
|
2014-06-11 23:31:39 -04:00
|
|
|
private
|
|
|
|
|
|
|
|
def remove_job
|
2012-11-26 11:22:48 -05:00
|
|
|
Sidekiq.redis do |conn|
|
2019-04-01 12:20:41 -04:00
|
|
|
results = conn.multi {
|
2014-02-21 21:02:28 -05:00
|
|
|
conn.zrangebyscore(parent.name, score, score)
|
|
|
|
conn.zremrangebyscore(parent.name, score, score)
|
2019-04-01 12:20:41 -04:00
|
|
|
}.first
|
2014-06-11 23:31:39 -04:00
|
|
|
|
|
|
|
if results.size == 1
|
|
|
|
yield results.first
|
|
|
|
else
|
|
|
|
# multiple jobs with the same score
|
|
|
|
# find the one with the right JID and push it
|
2019-10-16 11:42:32 -04:00
|
|
|
matched, nonmatched = results.partition { |message|
|
2014-06-11 23:31:39 -04:00
|
|
|
if message.index(jid)
|
|
|
|
msg = Sidekiq.load_json(message)
|
2019-04-01 12:20:41 -04:00
|
|
|
msg["jid"] == jid
|
2014-06-11 23:31:39 -04:00
|
|
|
else
|
|
|
|
false
|
|
|
|
end
|
2019-04-01 12:20:41 -04:00
|
|
|
}
|
2014-07-19 17:16:55 -04:00
|
|
|
|
2019-10-16 11:42:32 -04:00
|
|
|
msg = matched.first
|
2014-07-19 17:16:55 -04:00
|
|
|
yield msg if msg
|
2014-06-11 23:31:39 -04:00
|
|
|
|
|
|
|
# push the rest back onto the sorted set
|
|
|
|
conn.multi do
|
2019-10-16 11:42:32 -04:00
|
|
|
nonmatched.each do |message|
|
2014-06-11 23:31:39 -04:00
|
|
|
conn.zadd(parent.name, score.to_f.to_s, message)
|
|
|
|
end
|
|
|
|
end
|
2012-11-26 11:22:48 -05:00
|
|
|
end
|
|
|
|
end
|
2012-10-20 17:03:43 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2012-10-30 13:06:20 -04:00
|
|
|
class SortedSet
|
2012-10-20 17:03:43 -04:00
|
|
|
include Enumerable
|
|
|
|
|
2014-02-10 00:17:05 -05:00
|
|
|
attr_reader :name
|
|
|
|
|
2012-10-30 13:06:20 -04:00
|
|
|
def initialize(name)
|
2014-02-10 00:17:05 -05:00
|
|
|
@name = name
|
2013-10-23 22:30:53 -04:00
|
|
|
@_size = size
|
2012-10-30 13:06:20 -04:00
|
|
|
end
|
|
|
|
|
2012-10-20 17:03:43 -04:00
|
|
|
def size
|
2014-12-17 15:09:20 -05:00
|
|
|
Sidekiq.redis { |c| c.zcard(name) }
|
2012-10-20 17:03:43 -04:00
|
|
|
end
|
|
|
|
|
2019-09-12 14:54:10 -04:00
|
|
|
def scan(match, count = 100)
|
2019-10-09 09:44:02 -04:00
|
|
|
return to_enum(:scan, match, count) unless block_given?
|
2019-09-12 10:43:27 -04:00
|
|
|
|
|
|
|
match = "*#{match}*" unless match.include?("*")
|
|
|
|
Sidekiq.redis do |conn|
|
|
|
|
conn.zscan_each(name, match: match, count: count) do |entry, score|
|
|
|
|
yield SortedEntry.new(self, score, entry)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2014-03-02 19:36:00 -05:00
|
|
|
def clear
|
|
|
|
Sidekiq.redis do |conn|
|
2020-02-05 17:05:17 -05:00
|
|
|
conn.unlink(name)
|
2014-03-02 19:36:00 -05:00
|
|
|
end
|
|
|
|
end
|
2014-03-07 00:56:59 -05:00
|
|
|
alias_method :💣, :clear
|
2014-03-02 19:36:00 -05:00
|
|
|
end
|
|
|
|
|
|
|
|
class JobSet < SortedSet
|
2013-01-29 16:17:59 -05:00
|
|
|
def schedule(timestamp, message)
|
|
|
|
Sidekiq.redis do |conn|
|
2014-02-10 00:17:05 -05:00
|
|
|
conn.zadd(name, timestamp.to_f.to_s, Sidekiq.dump_json(message))
|
2013-01-29 16:17:59 -05:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2015-02-12 14:54:41 -05:00
|
|
|
def each
|
2013-10-23 22:30:53 -04:00
|
|
|
initial_size = @_size
|
2013-10-23 23:36:13 -04:00
|
|
|
offset_size = 0
|
2012-10-20 17:03:43 -04:00
|
|
|
page = -1
|
|
|
|
page_size = 50
|
|
|
|
|
2019-04-01 12:20:41 -04:00
|
|
|
loop do
|
2013-10-23 23:36:13 -04:00
|
|
|
range_start = page * page_size + offset_size
|
2019-08-28 13:13:41 -04:00
|
|
|
range_end = range_start + page_size - 1
|
2019-04-01 12:20:41 -04:00
|
|
|
elements = Sidekiq.redis { |conn|
|
2014-12-17 15:09:20 -05:00
|
|
|
conn.zrange name, range_start, range_end, with_scores: true
|
2019-04-01 12:20:41 -04:00
|
|
|
}
|
2012-10-30 13:06:20 -04:00
|
|
|
break if elements.empty?
|
2012-10-20 17:03:43 -04:00
|
|
|
page -= 1
|
2019-04-01 12:20:41 -04:00
|
|
|
elements.reverse_each do |element, score|
|
2015-02-12 14:54:41 -05:00
|
|
|
yield SortedEntry.new(self, score, element)
|
2012-10-20 17:03:43 -04:00
|
|
|
end
|
2013-10-23 23:36:13 -04:00
|
|
|
offset_size = initial_size - @_size
|
2012-10-20 17:03:43 -04:00
|
|
|
end
|
|
|
|
end
|
2012-10-30 13:06:20 -04:00
|
|
|
|
2019-09-23 00:01:00 -04:00
|
|
|
##
|
|
|
|
# Fetch jobs that match a given time or Range. Job ID is an
|
|
|
|
# optional second argument.
|
2012-11-26 14:53:22 -05:00
|
|
|
def fetch(score, jid = nil)
|
2019-09-23 00:01:00 -04:00
|
|
|
begin_score, end_score =
|
|
|
|
if score.is_a?(Range)
|
|
|
|
[score.first, score.last]
|
|
|
|
else
|
|
|
|
[score, score]
|
|
|
|
end
|
|
|
|
|
2019-04-01 12:20:41 -04:00
|
|
|
elements = Sidekiq.redis { |conn|
|
2019-09-23 00:01:00 -04:00
|
|
|
conn.zrangebyscore(name, begin_score, end_score, with_scores: true)
|
2019-04-01 12:20:41 -04:00
|
|
|
}
|
2012-11-26 14:53:22 -05:00
|
|
|
|
2019-04-01 12:20:41 -04:00
|
|
|
elements.each_with_object([]) do |element, result|
|
2019-09-23 00:01:00 -04:00
|
|
|
data, job_score = element
|
|
|
|
entry = SortedEntry.new(self, job_score, data)
|
|
|
|
result << entry if jid.nil? || entry.jid == jid
|
2012-11-26 14:53:22 -05:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2016-01-18 15:40:01 -05:00
|
|
|
##
|
|
|
|
# Find the job with the given JID within this sorted set.
|
2019-09-11 22:54:52 -04:00
|
|
|
# This is a slower O(n) operation. Do not use for app logic.
|
2013-04-16 15:43:24 -04:00
|
|
|
def find_job(jid)
|
2019-09-11 22:54:52 -04:00
|
|
|
Sidekiq.redis do |conn|
|
|
|
|
conn.zscan_each(name, match: "*#{jid}*", count: 100) do |entry, score|
|
|
|
|
job = JSON.parse(entry)
|
|
|
|
matched = job["jid"] == jid
|
|
|
|
return SortedEntry.new(self, score, entry) if matched
|
|
|
|
end
|
|
|
|
end
|
|
|
|
nil
|
2013-04-16 15:43:24 -04:00
|
|
|
end
|
|
|
|
|
2015-08-02 14:57:57 -04:00
|
|
|
def delete_by_value(name, value)
|
|
|
|
Sidekiq.redis do |conn|
|
|
|
|
ret = conn.zrem(name, value)
|
|
|
|
@_size -= 1 if ret
|
|
|
|
ret
|
|
|
|
end
|
|
|
|
end
|
2012-11-26 14:53:22 -05:00
|
|
|
|
2015-08-02 14:57:57 -04:00
|
|
|
def delete_by_jid(score, jid)
|
2015-08-08 21:23:56 -04:00
|
|
|
Sidekiq.redis do |conn|
|
|
|
|
elements = conn.zrangebyscore(name, score, score)
|
|
|
|
elements.each do |element|
|
2019-10-09 09:45:23 -04:00
|
|
|
if element.index(jid)
|
|
|
|
message = Sidekiq.load_json(element)
|
|
|
|
if message["jid"] == jid
|
|
|
|
ret = conn.zrem(name, element)
|
|
|
|
@_size -= 1 if ret
|
|
|
|
break ret
|
|
|
|
end
|
2013-10-23 23:36:13 -04:00
|
|
|
end
|
2012-11-26 14:53:22 -05:00
|
|
|
end
|
2012-10-30 13:06:20 -04:00
|
|
|
end
|
|
|
|
end
|
2012-11-25 21:43:48 -05:00
|
|
|
|
2015-08-08 21:23:56 -04:00
|
|
|
alias_method :delete, :delete_by_jid
|
2012-10-30 13:06:20 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
##
|
|
|
|
# Allows enumeration of scheduled jobs within Sidekiq.
|
|
|
|
# Based on this, you can search/filter for jobs. Here's an
|
|
|
|
# example where I'm selecting all jobs of a certain type
|
2017-02-10 10:46:44 -05:00
|
|
|
# and deleting them from the schedule queue.
|
2012-10-30 13:06:20 -04:00
|
|
|
#
|
|
|
|
# r = Sidekiq::ScheduledSet.new
|
2017-02-10 10:46:44 -05:00
|
|
|
# r.select do |scheduled|
|
|
|
|
# scheduled.klass == 'Sidekiq::Extensions::DelayedClass' &&
|
|
|
|
# scheduled.args[0] == 'User' &&
|
|
|
|
# scheduled.args[1] == 'setup_new_subscriber'
|
2012-10-30 13:06:20 -04:00
|
|
|
# end.map(&:delete)
|
2014-03-02 19:36:00 -05:00
|
|
|
class ScheduledSet < JobSet
|
2012-10-30 13:06:20 -04:00
|
|
|
def initialize
|
2019-04-01 12:20:41 -04:00
|
|
|
super "schedule"
|
2012-10-30 13:06:20 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
##
|
|
|
|
# Allows enumeration of retries within Sidekiq.
|
|
|
|
# Based on this, you can search/filter for jobs. Here's an
|
|
|
|
# example where I'm selecting all jobs of a certain type
|
|
|
|
# and deleting them from the retry queue.
|
|
|
|
#
|
|
|
|
# r = Sidekiq::RetrySet.new
|
|
|
|
# r.select do |retri|
|
|
|
|
# retri.klass == 'Sidekiq::Extensions::DelayedClass' &&
|
|
|
|
# retri.args[0] == 'User' &&
|
|
|
|
# retri.args[1] == 'setup_new_subscriber'
|
|
|
|
# end.map(&:delete)
|
2014-03-02 19:36:00 -05:00
|
|
|
class RetrySet < JobSet
|
2012-10-30 13:06:20 -04:00
|
|
|
def initialize
|
2019-04-01 12:20:41 -04:00
|
|
|
super "retry"
|
2012-10-30 13:06:20 -04:00
|
|
|
end
|
2013-02-19 23:36:59 -05:00
|
|
|
|
2014-02-09 17:56:01 -05:00
|
|
|
def retry_all
|
2019-04-01 12:20:41 -04:00
|
|
|
each(&:retry) while size > 0
|
2014-02-09 17:56:01 -05:00
|
|
|
end
|
2018-08-25 10:42:01 -04:00
|
|
|
|
|
|
|
def kill_all
|
2019-04-01 12:20:41 -04:00
|
|
|
each(&:kill) while size > 0
|
2018-08-25 10:42:01 -04:00
|
|
|
end
|
2014-02-09 17:56:01 -05:00
|
|
|
end
|
|
|
|
|
2014-07-28 18:42:55 -04:00
|
|
|
##
|
|
|
|
# Allows enumeration of dead jobs within Sidekiq.
|
|
|
|
#
|
2014-03-02 19:36:00 -05:00
|
|
|
class DeadSet < JobSet
|
2014-02-09 17:56:01 -05:00
|
|
|
def initialize
|
2019-04-01 12:20:41 -04:00
|
|
|
super "dead"
|
2014-02-09 17:56:01 -05:00
|
|
|
end
|
|
|
|
|
2019-04-01 12:20:41 -04:00
|
|
|
def kill(message, opts = {})
|
2017-08-10 00:20:32 -04:00
|
|
|
now = Time.now.to_f
|
|
|
|
Sidekiq.redis do |conn|
|
|
|
|
conn.multi do
|
|
|
|
conn.zadd(name, now.to_s, message)
|
2019-04-01 12:20:41 -04:00
|
|
|
conn.zremrangebyscore(name, "-inf", now - self.class.timeout)
|
2017-08-10 00:20:32 -04:00
|
|
|
conn.zremrangebyrank(name, 0, - self.class.max_jobs)
|
|
|
|
end
|
|
|
|
end
|
2018-01-15 13:39:32 -05:00
|
|
|
|
|
|
|
if opts[:notify_failure] != false
|
|
|
|
job = Sidekiq.load_json(message)
|
|
|
|
r = RuntimeError.new("Job killed by API")
|
|
|
|
r.set_backtrace(caller)
|
2018-01-31 13:22:22 -05:00
|
|
|
Sidekiq.death_handlers.each do |handle|
|
2018-01-15 13:39:32 -05:00
|
|
|
handle.call(job, r)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
true
|
2017-08-10 00:20:32 -04:00
|
|
|
end
|
|
|
|
|
2013-02-19 23:36:59 -05:00
|
|
|
def retry_all
|
2019-04-01 12:20:41 -04:00
|
|
|
each(&:retry) while size > 0
|
2013-02-19 23:36:59 -05:00
|
|
|
end
|
2015-02-04 15:33:49 -05:00
|
|
|
|
|
|
|
def self.max_jobs
|
|
|
|
Sidekiq.options[:dead_max_jobs]
|
|
|
|
end
|
|
|
|
|
|
|
|
def self.timeout
|
|
|
|
Sidekiq.options[:dead_timeout_in_seconds]
|
|
|
|
end
|
2012-10-20 17:03:43 -04:00
|
|
|
end
|
|
|
|
|
2014-03-03 00:18:26 -05:00
|
|
|
##
|
|
|
|
# Enumerates the set of Sidekiq processes which are actively working
|
2019-08-28 12:59:28 -04:00
|
|
|
# right now. Each process sends a heartbeat to Redis every 5 seconds
|
2014-03-03 00:18:26 -05:00
|
|
|
# so this set should be relatively accurate, barring network partitions.
|
2014-03-08 01:41:10 -05:00
|
|
|
#
|
2014-05-13 23:33:20 -04:00
|
|
|
# Yields a Sidekiq::Process.
|
2014-03-08 01:41:10 -05:00
|
|
|
#
|
2014-03-03 00:18:26 -05:00
|
|
|
class ProcessSet
|
|
|
|
include Enumerable
|
2014-03-02 19:36:00 -05:00
|
|
|
|
2019-04-01 12:20:41 -04:00
|
|
|
def initialize(clean_plz = true)
|
2018-05-30 16:20:28 -04:00
|
|
|
cleanup if clean_plz
|
2014-10-06 11:53:06 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
# Cleans up dead processes recorded in Redis.
|
|
|
|
# Returns the number of processes cleaned.
|
2018-05-30 16:20:28 -04:00
|
|
|
def cleanup
|
2014-10-06 11:53:06 -04:00
|
|
|
count = 0
|
|
|
|
Sidekiq.redis do |conn|
|
2019-09-12 07:21:42 -04:00
|
|
|
procs = conn.sscan_each("processes").to_a.sort
|
2019-04-01 12:20:41 -04:00
|
|
|
heartbeats = conn.pipelined {
|
2014-10-06 11:53:06 -04:00
|
|
|
procs.each do |key|
|
2019-04-01 12:20:41 -04:00
|
|
|
conn.hget(key, "info")
|
2014-10-06 11:53:06 -04:00
|
|
|
end
|
2019-04-01 12:20:41 -04:00
|
|
|
}
|
2014-10-06 11:53:06 -04:00
|
|
|
|
|
|
|
# the hash named key has an expiry of 60 seconds.
|
|
|
|
# if it's not found, that means the process has not reported
|
|
|
|
# in to Redis and probably died.
|
2019-10-16 11:42:32 -04:00
|
|
|
to_prune = procs.select.with_index { |proc, i|
|
|
|
|
heartbeats[i].nil?
|
|
|
|
}
|
2019-04-01 12:20:41 -04:00
|
|
|
count = conn.srem("processes", to_prune) unless to_prune.empty?
|
2014-10-06 11:53:06 -04:00
|
|
|
end
|
|
|
|
count
|
|
|
|
end
|
|
|
|
|
2015-02-12 14:54:41 -05:00
|
|
|
def each
|
2019-10-09 09:43:18 -04:00
|
|
|
result = Sidekiq.redis { |conn|
|
|
|
|
procs = conn.sscan_each("processes").to_a.sort
|
2014-03-03 00:18:26 -05:00
|
|
|
|
2014-03-22 00:24:19 -04:00
|
|
|
# We're making a tradeoff here between consuming more memory instead of
|
|
|
|
# making more roundtrips to Redis, but if you have hundreds or thousands of workers,
|
|
|
|
# you'll be happier this way
|
2019-10-09 09:43:18 -04:00
|
|
|
conn.pipelined do
|
2014-10-06 11:53:06 -04:00
|
|
|
procs.each do |key|
|
2019-04-01 12:20:41 -04:00
|
|
|
conn.hmget(key, "info", "busy", "beat", "quiet")
|
2014-03-22 00:24:19 -04:00
|
|
|
end
|
2019-10-09 09:43:18 -04:00
|
|
|
end
|
|
|
|
}
|
2014-03-22 00:24:19 -04:00
|
|
|
|
2019-10-09 09:43:18 -04:00
|
|
|
result.each do |info, busy, at_s, quiet|
|
|
|
|
# If a process is stopped between when we query Redis for `procs` and
|
|
|
|
# when we query for `result`, we will have an item in `result` that is
|
|
|
|
# composed of `nil` values.
|
|
|
|
next if info.nil?
|
2017-05-02 13:08:26 -04:00
|
|
|
|
2019-10-09 09:43:18 -04:00
|
|
|
hash = Sidekiq.load_json(info)
|
|
|
|
yield Process.new(hash.merge("busy" => busy.to_i, "beat" => at_s.to_f, "quiet" => quiet))
|
2014-03-02 19:36:00 -05:00
|
|
|
end
|
|
|
|
end
|
2014-03-09 17:32:27 -04:00
|
|
|
|
|
|
|
# This method is not guaranteed accurate since it does not prune the set
|
|
|
|
# based on current heartbeat. #each does that and ensures the set only
|
|
|
|
# contains Sidekiq processes which have sent a heartbeat within the last
|
|
|
|
# 60 seconds.
|
|
|
|
def size
|
2019-04-01 12:20:41 -04:00
|
|
|
Sidekiq.redis { |conn| conn.scard("processes") }
|
2014-03-09 17:32:27 -04:00
|
|
|
end
|
2017-05-26 13:25:32 -04:00
|
|
|
|
|
|
|
# Returns the identity of the current cluster leader or "" if no leader.
|
|
|
|
# This is a Sidekiq Enterprise feature, will always return "" in Sidekiq
|
|
|
|
# or Sidekiq Pro.
|
|
|
|
def leader
|
|
|
|
@leader ||= begin
|
2019-05-30 13:41:47 -04:00
|
|
|
x = Sidekiq.redis { |c| c.get("dear-leader") }
|
2017-05-26 13:25:32 -04:00
|
|
|
# need a non-falsy value so we can memoize
|
2019-04-01 12:20:41 -04:00
|
|
|
x ||= ""
|
2017-05-26 13:25:32 -04:00
|
|
|
x
|
|
|
|
end
|
|
|
|
end
|
2014-03-02 19:36:00 -05:00
|
|
|
end
|
2013-01-24 12:50:30 -05:00
|
|
|
|
2014-05-13 23:33:20 -04:00
|
|
|
#
|
2016-01-18 15:40:01 -05:00
|
|
|
# Sidekiq::Process represents an active Sidekiq process talking with Redis.
|
|
|
|
# Each process has a set of attributes which look like this:
|
2014-05-13 23:33:20 -04:00
|
|
|
#
|
|
|
|
# {
|
|
|
|
# 'hostname' => 'app-1.example.com',
|
|
|
|
# 'started_at' => <process start time>,
|
|
|
|
# 'pid' => 12345,
|
|
|
|
# 'tag' => 'myapp'
|
|
|
|
# 'concurrency' => 25,
|
|
|
|
# 'queues' => ['default', 'low'],
|
|
|
|
# 'busy' => 10,
|
|
|
|
# 'beat' => <last heartbeat>,
|
2014-12-30 20:25:55 -05:00
|
|
|
# 'identity' => <unique string identifying the process>,
|
2014-05-13 23:33:20 -04:00
|
|
|
# }
|
|
|
|
class Process
|
|
|
|
def initialize(hash)
|
|
|
|
@attribs = hash
|
|
|
|
end
|
|
|
|
|
2014-08-06 12:41:46 -04:00
|
|
|
def tag
|
2019-04-01 12:20:41 -04:00
|
|
|
self["tag"]
|
2014-08-06 12:41:46 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def labels
|
2019-04-01 12:20:41 -04:00
|
|
|
Array(self["labels"])
|
2014-08-06 12:41:46 -04:00
|
|
|
end
|
|
|
|
|
2014-05-13 23:33:20 -04:00
|
|
|
def [](key)
|
|
|
|
@attribs[key]
|
|
|
|
end
|
2014-05-14 00:41:40 -04:00
|
|
|
|
2017-05-26 13:25:32 -04:00
|
|
|
def identity
|
2019-04-01 12:20:41 -04:00
|
|
|
self["identity"]
|
2017-05-26 13:25:32 -04:00
|
|
|
end
|
|
|
|
|
2014-05-14 00:41:40 -04:00
|
|
|
def quiet!
|
2019-04-01 12:20:41 -04:00
|
|
|
signal("TSTP")
|
2014-05-16 00:12:44 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def stop!
|
2019-04-01 12:20:41 -04:00
|
|
|
signal("TERM")
|
2014-05-16 00:12:44 -04:00
|
|
|
end
|
|
|
|
|
2015-03-22 19:46:41 -04:00
|
|
|
def dump_threads
|
2019-04-01 12:20:41 -04:00
|
|
|
signal("TTIN")
|
2015-03-22 19:46:41 -04:00
|
|
|
end
|
|
|
|
|
2016-01-07 16:33:37 -05:00
|
|
|
def stopping?
|
2019-04-01 12:20:41 -04:00
|
|
|
self["quiet"] == "true"
|
2016-01-07 16:33:37 -05:00
|
|
|
end
|
2015-03-22 19:46:41 -04:00
|
|
|
|
2014-05-16 00:12:44 -04:00
|
|
|
private
|
|
|
|
|
|
|
|
def signal(sig)
|
2014-05-14 00:41:40 -04:00
|
|
|
key = "#{identity}-signals"
|
|
|
|
Sidekiq.redis do |c|
|
|
|
|
c.multi do
|
2014-05-16 00:12:44 -04:00
|
|
|
c.lpush(key, sig)
|
2014-05-14 00:41:40 -04:00
|
|
|
c.expire(key, 60)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
2014-05-13 23:33:20 -04:00
|
|
|
end
|
|
|
|
|
2013-01-24 12:50:30 -05:00
|
|
|
##
|
2016-01-18 15:40:01 -05:00
|
|
|
# A worker is a thread that is currently processing a job.
|
2013-01-24 12:50:30 -05:00
|
|
|
# Programmatic access to the current active worker set.
|
|
|
|
#
|
|
|
|
# WARNING WARNING WARNING
|
|
|
|
#
|
|
|
|
# This is live data that can change every millisecond.
|
2013-05-23 00:50:22 -04:00
|
|
|
# If you call #size => 5 and then expect #each to be
|
2013-01-24 12:50:30 -05:00
|
|
|
# called 5 times, you're going to have a bad time.
|
|
|
|
#
|
|
|
|
# workers = Sidekiq::Workers.new
|
|
|
|
# workers.size => 2
|
2014-03-08 17:21:52 -05:00
|
|
|
# workers.each do |process_id, thread_id, work|
|
|
|
|
# # process_id is a unique identifier per Sidekiq process
|
|
|
|
# # thread_id is a unique identifier per thread
|
2013-01-24 12:50:30 -05:00
|
|
|
# # work is a Hash which looks like:
|
|
|
|
# # { 'queue' => name, 'run_at' => timestamp, 'payload' => msg }
|
2014-02-01 23:48:44 -05:00
|
|
|
# # run_at is an epoch Integer.
|
2013-01-24 12:50:30 -05:00
|
|
|
# end
|
2014-03-08 17:21:52 -05:00
|
|
|
#
|
2013-01-24 12:50:30 -05:00
|
|
|
class Workers
|
|
|
|
include Enumerable
|
|
|
|
|
2015-02-12 14:54:41 -05:00
|
|
|
def each
|
2014-03-08 01:41:10 -05:00
|
|
|
Sidekiq.redis do |conn|
|
2019-09-12 07:21:42 -04:00
|
|
|
procs = conn.sscan_each("processes").to_a
|
2014-03-08 01:41:10 -05:00
|
|
|
procs.sort.each do |key|
|
2019-04-01 12:20:41 -04:00
|
|
|
valid, workers = conn.pipelined {
|
2020-06-09 18:14:02 -04:00
|
|
|
conn.exists?(key)
|
2014-03-08 01:41:10 -05:00
|
|
|
conn.hgetall("#{key}:workers")
|
2019-04-01 12:20:41 -04:00
|
|
|
}
|
2014-03-08 01:41:10 -05:00
|
|
|
next unless valid
|
|
|
|
workers.each_pair do |tid, json|
|
2019-11-22 12:53:44 -05:00
|
|
|
hsh = Sidekiq.load_json(json)
|
|
|
|
p = hsh["payload"]
|
|
|
|
# avoid breaking API, this is a side effect of the JSON optimization in #4316
|
|
|
|
hsh["payload"] = Sidekiq.load_json(p) if p.is_a?(String)
|
|
|
|
yield key, tid, hsh
|
2014-03-08 01:41:10 -05:00
|
|
|
end
|
|
|
|
end
|
2013-01-24 12:50:30 -05:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2014-03-08 17:21:52 -05:00
|
|
|
# Note that #size is only as accurate as Sidekiq's heartbeat,
|
|
|
|
# which happens every 5 seconds. It is NOT real-time.
|
|
|
|
#
|
2014-03-08 17:10:27 -05:00
|
|
|
# Not very efficient if you have lots of Sidekiq
|
|
|
|
# processes but the alternative is a global counter
|
|
|
|
# which can easily get out of sync with crashy processes.
|
2013-01-24 12:50:30 -05:00
|
|
|
def size
|
2014-03-08 17:10:27 -05:00
|
|
|
Sidekiq.redis do |conn|
|
2019-09-12 07:21:42 -04:00
|
|
|
procs = conn.sscan_each("processes").to_a
|
2015-04-10 17:28:28 -04:00
|
|
|
if procs.empty?
|
|
|
|
0
|
|
|
|
else
|
2019-04-01 12:20:41 -04:00
|
|
|
conn.pipelined {
|
2015-04-10 17:28:28 -04:00
|
|
|
procs.each do |key|
|
2019-04-01 12:20:41 -04:00
|
|
|
conn.hget(key, "busy")
|
2015-04-10 17:28:28 -04:00
|
|
|
end
|
2019-10-08 17:50:26 -04:00
|
|
|
}.sum(&:to_i)
|
2015-04-10 17:28:28 -04:00
|
|
|
end
|
2014-03-08 17:10:27 -05:00
|
|
|
end
|
2013-01-24 12:50:30 -05:00
|
|
|
end
|
|
|
|
end
|
2012-10-20 17:03:43 -04:00
|
|
|
end
|