1
0
Fork 0
mirror of https://github.com/mperham/sidekiq.git synced 2022-11-09 13:52:34 -05:00
mperham--sidekiq/lib/sidekiq/manager.rb

230 lines
6.1 KiB
Ruby
Raw Normal View History

2014-03-07 00:41:05 -05:00
# encoding: utf-8
require 'sidekiq/util'
require 'sidekiq/actor'
require 'sidekiq/processor'
require 'sidekiq/fetch'
2012-01-22 19:01:46 -05:00
2012-01-16 19:14:47 -05:00
module Sidekiq
##
# The main router in the system. This
# manages the processor state and accepts messages
# from Redis to be dispatched to an idle processor.
2012-01-16 19:14:47 -05:00
#
class Manager
2012-01-22 14:32:38 -05:00
include Util
include Actor
trap_exit :processor_died
2012-01-22 14:32:38 -05:00
attr_reader :ready
attr_reader :busy
attr_accessor :fetcher
SPIN_TIME_FOR_GRACEFUL_SHUTDOWN = 1
JVM_RESERVED_SIGNALS = ['USR1', 'USR2'] # Don't Process#kill if we get these signals via the API
def initialize(condvar, options={})
logger.debug { options.inspect }
@options = options
@count = options[:concurrency] || 25
2012-02-05 16:22:57 -05:00
@done_callback = nil
@finished = condvar
2012-01-22 14:32:38 -05:00
@in_progress = {}
@threads = {}
2012-01-22 19:01:46 -05:00
@done = false
@busy = []
2013-06-12 18:16:19 -04:00
@ready = @count.times.map do
p = Processor.new_link(current_actor)
p.proxy_id = p.object_id
p
end
2012-01-22 14:32:38 -05:00
end
def stop(options={})
2012-03-25 22:52:15 -04:00
watchdog('Manager#stop died') do
should_shutdown = options[:shutdown]
2012-03-25 22:52:15 -04:00
timeout = options[:timeout]
2012-03-25 22:52:15 -04:00
@done = true
2014-12-26 12:20:15 -05:00
logger.info { "Terminating #{@ready.size} quiet workers" }
2012-03-31 00:22:19 -04:00
@ready.each { |x| x.terminate if x.alive? }
2012-03-25 22:52:15 -04:00
@ready.clear
2012-01-22 14:32:38 -05:00
return if clean_up_for_graceful_shutdown
hard_shutdown_in timeout if should_shutdown
end
2012-01-22 14:32:38 -05:00
end
def clean_up_for_graceful_shutdown
if @busy.empty?
shutdown
return true
end
after(SPIN_TIME_FOR_GRACEFUL_SHUTDOWN) { clean_up_for_graceful_shutdown }
false
end
def start
2012-03-25 22:52:15 -04:00
@ready.each { dispatch }
2012-01-22 14:32:38 -05:00
end
2012-01-16 19:18:36 -05:00
def when_done(&blk)
@done_callback = blk
end
def processor_done(processor)
watchdog('Manager#processor_done died') do
@done_callback.call(processor) if @done_callback
@in_progress.delete(processor.object_id)
@threads.delete(processor.object_id)
@busy.delete(processor)
if stopped?
2012-02-18 23:01:29 -05:00
processor.terminate if processor.alive?
shutdown if @busy.empty?
else
2012-03-12 22:57:04 -04:00
@ready << processor if processor.alive?
end
dispatch
2012-01-16 23:02:58 -05:00
end
2012-01-16 19:18:36 -05:00
end
2012-01-16 23:02:58 -05:00
def processor_died(processor, reason)
watchdog("Manager#processor_died died") do
@in_progress.delete(processor.object_id)
@threads.delete(processor.object_id)
@busy.delete(processor)
unless stopped?
2013-06-12 18:16:19 -04:00
p = Processor.new_link(current_actor)
p.proxy_id = p.object_id
@ready << p
dispatch
else
shutdown if @busy.empty?
end
2012-01-22 19:01:46 -05:00
end
2012-01-22 14:32:38 -05:00
end
def assign(work)
watchdog("Manager#assign died") do
if stopped?
# Race condition between Manager#stop if Fetcher
# is blocked on redis and gets a message after
# all the ready Processors have been stopped.
# Push the message back to redis.
work.requeue
else
processor = @ready.pop
@in_progress[processor.object_id] = work
@busy << processor
processor.async.process(work)
end
2012-01-24 01:08:38 -05:00
end
end
# A hack worthy of Rube Goldberg. We need to be able
# to hard stop a working thread. But there's no way for us to
# get handle to the underlying thread performing work for a processor
# so we have it call us and tell us.
def real_thread(proxy_id, thr)
@threads[proxy_id] = thr
end
def heartbeat(key, data, json)
2014-03-31 12:32:54 -04:00
proctitle = ['sidekiq', Sidekiq::VERSION]
proctitle << data['tag'] unless data['tag'].empty?
proctitle << "[#{@busy.size} of #{data['concurrency']} busy]"
proctitle << 'stopping' if stopped?
$0 = proctitle.join(' ')
(key, json)
after(5) do
heartbeat(key, data, json)
end
end
private
2012-01-22 14:32:38 -05:00
def (key, json)
begin
_, _, _, msg = Sidekiq.redis do |conn|
conn.multi do
conn.sadd('processes', key)
conn.hmset(key, 'info', json, 'busy', @busy.size, 'beat', Time.now.to_f)
conn.expire(key, 60)
2014-05-14 00:41:40 -04:00
conn.rpop("#{key}-signals")
end
end
2014-05-14 00:41:40 -04:00
return unless msg
if JVM_RESERVED_SIGNALS.include?(msg)
Sidekiq::CLI.instance.handle_signal(msg)
else
::Process.kill(msg, $$)
end
rescue => e
# ignore all redis/network issues
logger.error("heartbeat: #{e.message}")
end
end
def hard_shutdown_in(delay)
2013-04-04 23:01:16 -04:00
logger.info { "Pausing up to #{delay} seconds to allow workers to finish..." }
2012-04-07 22:33:32 -04:00
after(delay) do
2013-03-27 01:57:12 -04:00
watchdog("Manager#hard_shutdown_in died") do
# We've reached the timeout and we still have busy workers.
# They must die but their messages shall live on.
logger.warn { "Terminating #{@busy.size} busy worker threads" }
logger.warn { "Work still in progress #{@in_progress.values.inspect}" }
2013-11-22 15:49:14 -05:00
requeue
@busy.each do |processor|
if processor.alive? && t = @threads.delete(processor.object_id)
2013-06-10 18:17:47 -04:00
t.raise Shutdown
end
end
@finished.signal
end
end
end
def dispatch
return if stopped?
# This is a safety check to ensure we haven't leaked
# processors somehow.
raise "BUG: No processors, cannot continue!" if @ready.empty? && @busy.empty?
2012-03-25 22:52:15 -04:00
raise "No ready processor!?" if @ready.empty?
@fetcher.async.fetch
2012-01-22 14:32:38 -05:00
end
def stopped?
@done
end
2013-11-22 15:45:30 -05:00
def shutdown
2013-11-22 15:49:14 -05:00
requeue
@finished.signal
2013-11-22 15:49:14 -05:00
end
def requeue
2013-11-22 15:45:30 -05:00
# Re-enqueue terminated jobs
# NOTE: You may notice that we may push a job back to redis before
# the worker thread is terminated. This is ok because Sidekiq's
# contract says that jobs are run AT LEAST once. Process termination
# is delayed until we're certain the jobs are back in Redis because
# it is worse to lose a job than to run it twice.
Sidekiq::Fetcher.strategy.bulk_requeue(@in_progress.values, @options)
2013-11-22 15:45:30 -05:00
@in_progress.clear
end
2012-01-16 19:14:47 -05:00
end
end