1
0
Fork 0
mirror of https://github.com/mperham/sidekiq.git synced 2022-11-09 13:52:34 -05:00
mperham--sidekiq/lib/sidekiq/processor.rb

148 lines
4.2 KiB
Ruby
Raw Normal View History

require 'sidekiq/util'
require 'sidekiq/actor'
require 'sidekiq/middleware/server/retry_jobs'
2012-02-25 16:43:53 -05:00
require 'sidekiq/middleware/server/logging'
module Sidekiq
2012-06-13 00:55:06 -04:00
##
# The Processor receives a message from the Manager and actually
# processes it. It instantiates the worker, runs the middleware
# chain and then calls Sidekiq::Worker#perform.
class Processor
# To prevent a memory leak, ensure that stats expire. However, they should take up a minimal amount of storage
# so keep them around for a long time
STATS_TIMEOUT = 24 * 60 * 60 * 365 * 5
include Util
include Actor
def self.default_middleware
Middleware::Chain.new do |m|
2012-02-25 16:43:53 -05:00
m.add Middleware::Server::Logging
2012-03-18 02:04:31 -04:00
m.add Middleware::Server::RetryJobs
if defined?(::ActiveRecord::Base)
require 'sidekiq/middleware/server/active_record'
m.add Sidekiq::Middleware::Server::ActiveRecord
end
end
end
attr_accessor :proxy_id
def initialize(boss)
@boss = boss
end
def process(work)
msgstr = work.message
queue = work.queue_name
@boss.async.real_thread(proxy_id, Thread.current)
ack = true
begin
msg = Sidekiq.load_json(msgstr)
klass = msg['class'].constantize
worker = klass.new
worker.jid = msg['jid']
stats(worker, msg, queue) do
Sidekiq.server_middleware.invoke(worker, msg, queue) do
worker.perform(*cloned(msg['args']))
2012-08-16 21:12:25 -04:00
end
end
rescue Sidekiq::Shutdown
# Had to force kill this job because it didn't finish
# within the timeout. Don't acknowledge the work since
# we didn't properly finish it.
ack = false
rescue Exception => ex
handle_exception(ex, msg || { :message => msgstr })
raise
ensure
work.acknowledge if ack
end
@boss.async.processor_done(current_actor)
end
def inspect
"<Processor##{object_id.to_s(16)}>"
end
2013-03-27 01:55:07 -04:00
private
def thread_identity
@str ||= Thread.current.object_id.to_s(36)
end
def stats(worker, msg, queue)
2014-02-24 23:06:48 -05:00
# Do not conflate errors from the job with errors caused by updating
# stats so calling code can react appropriately
2014-02-24 16:08:57 -05:00
retry_and_suppress_exceptions do
2014-02-24 23:06:48 -05:00
hash = Sidekiq.dump_json({:queue => queue, :payload => msg, :run_at => Time.now.to_i })
Sidekiq.redis do |conn|
conn.multi do
conn.hmset("#{identity}:workers", thread_identity, hash)
conn.expire("#{identity}:workers", 60*60*4)
end
end
end
begin
yield
2012-05-12 16:23:23 -04:00
rescue Exception
2014-02-24 16:08:57 -05:00
retry_and_suppress_exceptions do
Sidekiq.redis do |conn|
failed = "stat:failed:#{Time.now.utc.to_date}"
result = conn.multi do
conn.incrby("stat:failed", 1)
conn.incrby(failed, 1)
end
conn.expire(failed, STATS_TIMEOUT) if result.last == 1
end
end
raise
ensure
2014-02-24 16:08:57 -05:00
retry_and_suppress_exceptions do
Sidekiq.redis do |conn|
processed = "stat:processed:#{Time.now.utc.to_date}"
result = conn.multi do
conn.hdel("#{identity}:workers", thread_identity)
conn.incrby("stat:processed", 1)
conn.incrby(processed, 1)
end
conn.expire(processed, STATS_TIMEOUT) if result.last == 1
end
end
end
end
# Deep clone the arguments passed to the worker so that if
# the message fails, what is pushed back onto Redis hasn't
# been mutated by the worker.
def cloned(ary)
Marshal.load(Marshal.dump(ary))
end
2014-02-24 16:10:07 -05:00
# If an exception occurs in the block passed to this method, that block will be retried up to max_retries times.
# All exceptions will be swallowed and logged.
2014-02-24 16:08:57 -05:00
def retry_and_suppress_exceptions(max_retries = 2)
retry_count = 0
begin
yield
2014-02-24 16:10:58 -05:00
rescue => e
retry_count += 1
if retry_count <= max_retries
2014-02-24 16:08:57 -05:00
Sidekiq.logger.debug {"Suppressing and retrying error: #{e.inspect}"}
2014-02-24 16:10:58 -05:00
sleep(1)
retry
else
2014-02-24 23:47:44 -05:00
handle_exception(e, { :message => "Exhausted #{max_retries} retries"})
end
end
end
end
end