1
0
Fork 0
mirror of https://github.com/mperham/sidekiq.git synced 2022-11-09 13:52:34 -05:00
mperham--sidekiq/lib/sidekiq/fetch.rb

102 lines
2.7 KiB
Ruby
Raw Normal View History

require 'sidekiq'
require 'celluloid'
module Sidekiq
##
# The Fetcher blocks on Redis, waiting for a message to process
# from the queues. It gets the message and hands it to the Manager
# to assign to a ready Processor.
class Fetcher
include Celluloid
include Sidekiq::Util
2012-03-25 22:52:15 -04:00
TIMEOUT = 1
def initialize(mgr, options)
klass = Sidekiq.options[:fetch] || BasicFetch
@mgr = mgr
@strategy = klass.new(options)
end
2012-03-25 22:52:15 -04:00
# Fetching is straightforward: the Manager makes a fetch
# request for each idle processor when Sidekiq starts and
# then issues a new fetch request every time a Processor
# finishes a message.
#
# Because we have to shut down cleanly, we can't block
# forever and we can't loop forever. Instead we reschedule
# a new fetch if the current fetch turned up nothing.
def fetch
watchdog('Fetcher#fetch died') do
return if Sidekiq::Fetcher.done?
begin
work = @strategy.retrieve_work
if work
@mgr.async.assign(work)
else
after(0) { fetch }
end
rescue => ex
2012-04-18 23:13:10 -04:00
logger.error("Error fetching message: #{ex}")
logger.error(ex.backtrace.first)
2012-04-18 20:06:46 -04:00
sleep(TIMEOUT)
after(0) { fetch }
2012-03-30 23:59:08 -04:00
end
end
end
# Ugh. Say hello to a bloody hack.
# Can't find a clean way to get the fetcher to just stop processing
# its mailbox when shutdown starts.
def self.done!
@done = true
end
def self.done?
@done
end
end
class BasicFetch
def initialize(options)
@strictly_ordered_queues = !!options[:strict]
@queues = options[:queues].map { |q| "queue:#{q}" }
@unique_queues = @queues.uniq
end
def retrieve_work
UnitOfWork.new(*Sidekiq.redis { |conn| conn.brpop(*queues_cmd) })
end
UnitOfWork = Struct.new(:queue, :message) do
def acknowledge
# nothing to do
end
def queue_name
queue.gsub(/.*queue:/, '')
end
def requeue
Sidekiq.redis do |conn|
conn.rpush(queue, message)
end
end
end
2012-04-03 23:19:29 -04:00
# Creating the Redis#blpop command takes into account any
# configured queue weights. By default Redis#blpop returns
# data from the first queue that has pending elements. We
# recreate the queue command each time we invoke Redis#blpop
# to honor weights and avoid queue starvation.
def queues_cmd
return @unique_queues.dup << Sidekiq::Fetcher::TIMEOUT if @strictly_ordered_queues
queues = @queues.sample(@unique_queues.size).uniq
queues.concat(@unique_queues - queues)
queues << Sidekiq::Fetcher::TIMEOUT
2012-04-03 23:19:29 -04:00
end
end
end