1
0
Fork 0
mirror of https://github.com/mperham/sidekiq.git synced 2022-11-09 13:52:34 -05:00
mperham--sidekiq/lib/sidekiq/processor.rb

189 lines
4.5 KiB
Ruby
Raw Normal View History

require 'sidekiq/util'
require 'sidekiq/fetch'
2015-10-07 12:42:10 -04:00
require 'thread'
2015-10-08 23:59:05 -04:00
require 'concurrent/map'
require 'concurrent/atomic/atomic_fixnum'
module Sidekiq
2012-06-13 00:55:06 -04:00
##
2015-10-09 18:33:42 -04:00
# The Processor is a standalone thread which:
#
# 1. fetches a job from Redis
# 2. executes the job
# a. instantiate the Worker
# b. run the middleware chain
# c. call #perform
#
# A Processor can exit due to shutdown (processor_stopped)
# or due to an error during job execution (processor_died)
#
# If an error occurs in the job execution, the
# Processor calls the Manager to create a new one
# to replace itself and exits.
#
class Processor
include Util
2015-10-05 13:13:00 -04:00
attr_reader :thread
2015-10-08 12:48:28 -04:00
attr_reader :job
2015-10-08 12:37:37 -04:00
def initialize(mgr)
2015-10-05 13:13:00 -04:00
@mgr = mgr
@down = false
2015-10-05 13:13:00 -04:00
@done = false
@job = nil
2015-10-09 18:33:42 -04:00
@thread = nil
2015-10-08 12:37:37 -04:00
@strategy = (mgr.options[:fetch] || Sidekiq::BasicFetch).new(mgr.options)
2016-02-01 18:59:20 -05:00
@reloader = Sidekiq.options[:reloader]
2015-10-05 13:13:00 -04:00
end
def terminate(wait=false)
@done = true
2015-10-08 12:37:37 -04:00
return if !@thread
2015-10-06 15:43:01 -04:00
@thread.value if wait
end
def kill(wait=false)
2015-10-08 12:37:37 -04:00
@done = true
return if !@thread
2015-10-05 13:13:00 -04:00
# unlike the other actors, terminate does not wait
# for the thread to finish because we don't know how
# long the job will take to finish. Instead we
# provide a `kill` method to call after the shutdown
# timeout passes.
2015-10-06 15:43:01 -04:00
@thread.raise ::Sidekiq::Shutdown
2015-10-05 13:13:00 -04:00
@thread.value if wait
end
2015-10-06 15:43:01 -04:00
def start
@thread ||= safe_thread("processor", &method(:run))
end
2015-10-06 15:43:01 -04:00
private unless $TESTING
2015-10-05 13:13:00 -04:00
def run
begin
while !@done
2015-10-08 12:37:37 -04:00
process_one
2015-10-05 13:13:00 -04:00
end
2015-10-09 00:50:45 -04:00
@mgr.processor_stopped(self)
2015-10-09 18:33:42 -04:00
rescue Sidekiq::Shutdown
@mgr.processor_stopped(self)
2015-10-05 13:13:00 -04:00
rescue Exception => ex
@mgr.processor_died(self, ex)
end
end
2015-10-08 12:37:37 -04:00
def process_one
2015-10-08 12:48:28 -04:00
@job = fetch
process(@job) if @job
@job = nil
2015-10-08 12:37:37 -04:00
end
def get_one
begin
work = @strategy.retrieve_work
2015-10-08 12:48:28 -04:00
(logger.info { "Redis is online, #{Time.now - @down} sec downtime" }; @down = nil) if @down
work
2015-10-08 12:37:37 -04:00
rescue Sidekiq::Shutdown
rescue => ex
handle_fetch_exception(ex)
end
end
def fetch
j = get_one
if j && @done
j.requeue
nil
else
j
end
end
def handle_fetch_exception(ex)
if !@down
@down = Time.now
logger.error("Error fetching job: #{ex}")
ex.backtrace.each do |bt|
logger.error(bt)
end
end
sleep(1)
nil
end
2015-10-06 15:43:01 -04:00
def process(work)
jobstr = work.job
queue = work.queue_name
2016-02-01 18:59:20 -05:00
@reloader.call do
ack = false
begin
job = Sidekiq.load_json(jobstr)
klass = job['class'.freeze].constantize
worker = klass.new
worker.jid = job['jid'.freeze]
stats(worker, job, queue) do
Sidekiq.server_middleware.invoke(worker, job, queue) do
# Only ack if we either attempted to start this job or
# successfully completed it. This prevents us from
# losing jobs if a middleware raises an exception before yielding
ack = true
execute_job(worker, cloned(job['args'.freeze]))
end
2012-08-16 21:12:25 -04:00
end
2016-02-01 18:59:20 -05:00
ack = true
rescue Sidekiq::Shutdown
# Had to force kill this job because it didn't finish
# within the timeout. Don't acknowledge the work since
# we didn't properly finish it.
ack = false
rescue Exception => ex
handle_exception(ex, job || { :job => jobstr })
raise
ensure
work.acknowledge if ack
end
end
end
def execute_job(worker, cloned_args)
worker.perform(*cloned_args)
end
def thread_identity
@str ||= Thread.current.object_id.to_s(36)
end
WORKER_STATE = Concurrent::Map.new
PROCESSED = Concurrent::AtomicFixnum.new
FAILURE = Concurrent::AtomicFixnum.new
def stats(worker, job, queue)
tid = thread_identity
WORKER_STATE[tid] = {:queue => queue, :payload => job, :run_at => Time.now.to_i }
begin
yield
2012-05-12 16:23:23 -04:00
rescue Exception
FAILURE.increment
raise
ensure
WORKER_STATE.delete(tid)
PROCESSED.increment
end
end
# Deep clone the arguments passed to the worker so that if
# the job fails, what is pushed back onto Redis hasn't
# been mutated by the worker.
def cloned(ary)
Marshal.load(Marshal.dump(ary))
end
end
end