2012-03-24 16:28:18 -04:00
|
|
|
require 'sidekiq'
|
|
|
|
require 'celluloid'
|
|
|
|
|
|
|
|
module Sidekiq
|
|
|
|
##
|
|
|
|
# The Fetcher blocks on Redis, waiting for a message to process
|
|
|
|
# from the queues. It gets the message and hands it to the Manager
|
|
|
|
# to assign to a ready Processor.
|
|
|
|
class Fetcher
|
|
|
|
include Celluloid
|
|
|
|
include Sidekiq::Util
|
|
|
|
|
2012-03-25 22:52:15 -04:00
|
|
|
TIMEOUT = 1
|
|
|
|
|
2013-01-06 00:17:08 -05:00
|
|
|
def initialize(mgr, options)
|
|
|
|
klass = Sidekiq.options[:fetch] || BasicFetch
|
2012-03-24 16:28:18 -04:00
|
|
|
@mgr = mgr
|
2013-01-06 00:17:08 -05:00
|
|
|
@strategy = klass.new(options)
|
2012-03-24 16:28:18 -04:00
|
|
|
end
|
|
|
|
|
2012-03-25 22:52:15 -04:00
|
|
|
# Fetching is straightforward: the Manager makes a fetch
|
|
|
|
# request for each idle processor when Sidekiq starts and
|
|
|
|
# then issues a new fetch request every time a Processor
|
|
|
|
# finishes a message.
|
|
|
|
#
|
|
|
|
# Because we have to shut down cleanly, we can't block
|
|
|
|
# forever and we can't loop forever. Instead we reschedule
|
|
|
|
# a new fetch if the current fetch turned up nothing.
|
2012-03-24 16:28:18 -04:00
|
|
|
def fetch
|
|
|
|
watchdog('Fetcher#fetch died') do
|
2012-05-12 00:25:38 -04:00
|
|
|
return if Sidekiq::Fetcher.done?
|
|
|
|
|
2012-04-18 19:31:17 -04:00
|
|
|
begin
|
2013-01-06 00:17:08 -05:00
|
|
|
work = @strategy.retrieve_work
|
2012-03-24 16:28:18 -04:00
|
|
|
|
2013-01-06 00:17:08 -05:00
|
|
|
if work
|
|
|
|
@mgr.async.assign(work)
|
2012-04-18 19:31:17 -04:00
|
|
|
else
|
|
|
|
after(0) { fetch }
|
|
|
|
end
|
|
|
|
rescue => ex
|
2012-04-18 23:13:10 -04:00
|
|
|
logger.error("Error fetching message: #{ex}")
|
|
|
|
logger.error(ex.backtrace.first)
|
2012-04-18 20:06:46 -04:00
|
|
|
sleep(TIMEOUT)
|
|
|
|
after(0) { fetch }
|
2012-03-30 23:59:08 -04:00
|
|
|
end
|
2012-03-24 16:28:18 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2012-05-12 00:25:38 -04:00
|
|
|
# Ugh. Say hello to a bloody hack.
|
|
|
|
# Can't find a clean way to get the fetcher to just stop processing
|
|
|
|
# its mailbox when shutdown starts.
|
|
|
|
def self.done!
|
|
|
|
@done = true
|
|
|
|
end
|
|
|
|
|
|
|
|
def self.done?
|
|
|
|
@done
|
|
|
|
end
|
2013-01-06 00:17:08 -05:00
|
|
|
end
|
|
|
|
|
|
|
|
class BasicFetch
|
|
|
|
def initialize(options)
|
|
|
|
@strictly_ordered_queues = !!options[:strict]
|
|
|
|
@queues = options[:queues].map { |q| "queue:#{q}" }
|
|
|
|
@unique_queues = @queues.uniq
|
|
|
|
end
|
|
|
|
|
|
|
|
def retrieve_work
|
|
|
|
UnitOfWork.new(*Sidekiq.redis { |conn| conn.brpop(*queues_cmd) })
|
|
|
|
end
|
2012-05-12 00:25:38 -04:00
|
|
|
|
2013-01-06 00:17:08 -05:00
|
|
|
UnitOfWork = Struct.new(:queue, :message) do
|
|
|
|
def acknowledge
|
|
|
|
# nothing to do
|
|
|
|
end
|
|
|
|
|
|
|
|
def queue_name
|
|
|
|
queue.gsub(/.*queue:/, '')
|
|
|
|
end
|
|
|
|
|
|
|
|
def requeue
|
|
|
|
Sidekiq.redis do |conn|
|
|
|
|
conn.rpush(queue, message)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
2012-04-03 23:19:29 -04:00
|
|
|
|
|
|
|
# Creating the Redis#blpop command takes into account any
|
|
|
|
# configured queue weights. By default Redis#blpop returns
|
|
|
|
# data from the first queue that has pending elements. We
|
|
|
|
# recreate the queue command each time we invoke Redis#blpop
|
|
|
|
# to honor weights and avoid queue starvation.
|
|
|
|
def queues_cmd
|
2013-01-06 00:17:08 -05:00
|
|
|
return @unique_queues.dup << Sidekiq::Fetcher::TIMEOUT if @strictly_ordered_queues
|
2012-07-31 16:58:50 -04:00
|
|
|
queues = @queues.sample(@unique_queues.size).uniq
|
|
|
|
queues.concat(@unique_queues - queues)
|
2013-01-06 00:17:08 -05:00
|
|
|
queues << Sidekiq::Fetcher::TIMEOUT
|
2012-04-03 23:19:29 -04:00
|
|
|
end
|
2012-03-24 16:28:18 -04:00
|
|
|
end
|
|
|
|
end
|