2012-02-08 20:04:02 -05:00
|
|
|
require 'sidekiq/util'
|
2013-05-10 23:43:53 -04:00
|
|
|
require 'sidekiq/actor'
|
2012-02-08 20:04:02 -05:00
|
|
|
|
2012-02-19 16:02:32 -05:00
|
|
|
require 'sidekiq/middleware/server/active_record'
|
2012-03-17 16:41:53 -04:00
|
|
|
require 'sidekiq/middleware/server/retry_jobs'
|
2012-02-25 16:43:53 -05:00
|
|
|
require 'sidekiq/middleware/server/logging'
|
2012-02-19 16:02:32 -05:00
|
|
|
|
2012-01-25 16:32:51 -05:00
|
|
|
module Sidekiq
|
2012-06-13 00:55:06 -04:00
|
|
|
##
|
|
|
|
# The Processor receives a message from the Manager and actually
|
|
|
|
# processes it. It instantiates the worker, runs the middleware
|
|
|
|
# chain and then calls Sidekiq::Worker#perform.
|
2012-01-25 16:32:51 -05:00
|
|
|
class Processor
|
2013-05-31 12:02:27 -04:00
|
|
|
STATS_TIMEOUT = 180 * 24 * 60 * 60
|
|
|
|
|
2012-01-26 15:45:04 -05:00
|
|
|
include Util
|
2013-05-10 23:43:53 -04:00
|
|
|
include Actor
|
2012-01-25 16:32:51 -05:00
|
|
|
|
2012-02-19 16:02:32 -05:00
|
|
|
def self.default_middleware
|
|
|
|
Middleware::Chain.new do |m|
|
2012-02-25 16:43:53 -05:00
|
|
|
m.add Middleware::Server::Logging
|
2012-03-18 02:04:31 -04:00
|
|
|
m.add Middleware::Server::RetryJobs
|
2012-02-19 16:02:32 -05:00
|
|
|
m.add Middleware::Server::ActiveRecord
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2013-03-27 01:56:49 -04:00
|
|
|
# store the actual working thread so we
|
|
|
|
# can later kill if it necessary during
|
|
|
|
# hard shutdown.
|
|
|
|
attr_accessor :actual_work_thread
|
|
|
|
|
2012-02-08 20:04:02 -05:00
|
|
|
def initialize(boss)
|
|
|
|
@boss = boss
|
2012-02-04 19:53:09 -05:00
|
|
|
end
|
|
|
|
|
2013-01-06 00:17:08 -05:00
|
|
|
def process(work)
|
|
|
|
msgstr = work.message
|
|
|
|
queue = work.queue_name
|
2012-06-29 23:37:45 -04:00
|
|
|
|
2013-05-11 19:12:01 -04:00
|
|
|
@actual_work_thread = Thread.current
|
2013-06-06 22:20:53 -04:00
|
|
|
do_defer do
|
|
|
|
begin
|
|
|
|
msg = Sidekiq.load_json(msgstr)
|
|
|
|
klass = msg['class'].constantize
|
|
|
|
worker = klass.new
|
|
|
|
worker.jid = msg['jid']
|
|
|
|
|
|
|
|
stats(worker, msg, queue) do
|
|
|
|
Sidekiq.server_middleware.invoke(worker, msg, queue) do
|
|
|
|
worker.perform(*cloned(msg['args']))
|
|
|
|
end
|
2012-08-16 21:12:25 -04:00
|
|
|
end
|
2013-06-06 22:20:53 -04:00
|
|
|
rescue Sidekiq::Shutdown
|
|
|
|
# Had to force kill this job because it didn't finish
|
|
|
|
# within the timeout.
|
|
|
|
rescue Exception => ex
|
|
|
|
handle_exception(ex, msg || { :message => msgstr })
|
|
|
|
raise
|
|
|
|
ensure
|
|
|
|
work.acknowledge
|
2012-02-11 02:16:12 -05:00
|
|
|
end
|
2012-02-04 19:53:09 -05:00
|
|
|
end
|
2013-05-11 19:12:01 -04:00
|
|
|
|
2012-11-03 22:56:06 -04:00
|
|
|
@boss.async.processor_done(current_actor)
|
2012-01-25 16:32:51 -05:00
|
|
|
end
|
2012-01-29 17:35:16 -05:00
|
|
|
|
2013-03-27 01:55:07 -04:00
|
|
|
private
|
|
|
|
|
2013-06-06 22:20:53 -04:00
|
|
|
# We use Celluloid's defer to workaround tiny little
|
|
|
|
# Fiber stacks (4kb!) in MRI 1.9.
|
|
|
|
#
|
|
|
|
# For some reason, Celluloid's thread dispatch, TaskThread,
|
|
|
|
# is unstable under heavy concurrency but TaskFiber has proven
|
|
|
|
# itself stable.
|
|
|
|
NEED_DEFER = (RUBY_ENGINE == 'ruby' && RUBY_VERSION < '2.0.0')
|
|
|
|
|
|
|
|
def do_defer(&block)
|
|
|
|
if NEED_DEFER
|
|
|
|
defer(&block)
|
|
|
|
else
|
|
|
|
yield
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2013-03-27 01:55:07 -04:00
|
|
|
def identity
|
2012-02-15 15:30:31 -05:00
|
|
|
@str ||= "#{hostname}:#{process_id}-#{Thread.current.object_id}:default"
|
2012-02-11 02:16:12 -05:00
|
|
|
end
|
|
|
|
|
|
|
|
def stats(worker, msg, queue)
|
2012-03-14 12:56:13 -04:00
|
|
|
redis do |conn|
|
2012-02-11 16:14:03 -05:00
|
|
|
conn.multi do
|
2013-03-27 01:55:07 -04:00
|
|
|
conn.sadd('workers', identity)
|
|
|
|
conn.setex("worker:#{identity}:started", EXPIRY, Time.now.to_s)
|
2012-09-16 10:27:49 -04:00
|
|
|
hash = {:queue => queue, :payload => msg, :run_at => Time.now.to_i }
|
2013-03-27 01:55:07 -04:00
|
|
|
conn.setex("worker:#{identity}", EXPIRY, Sidekiq.dump_json(hash))
|
2012-02-11 16:14:03 -05:00
|
|
|
end
|
2012-02-11 02:16:12 -05:00
|
|
|
end
|
|
|
|
|
|
|
|
begin
|
|
|
|
yield
|
2012-05-12 16:23:23 -04:00
|
|
|
rescue Exception
|
2012-03-14 12:56:13 -04:00
|
|
|
redis do |conn|
|
2013-05-31 12:02:27 -04:00
|
|
|
failed = "stat:failed:#{Time.now.utc.to_date}"
|
|
|
|
result = conn.multi do
|
2012-02-11 16:14:03 -05:00
|
|
|
conn.incrby("stat:failed", 1)
|
2013-05-31 12:02:27 -04:00
|
|
|
conn.incrby(failed, 1)
|
2012-02-11 16:14:03 -05:00
|
|
|
end
|
2013-05-31 12:02:27 -04:00
|
|
|
conn.expire(failed, STATS_TIMEOUT) if result.last == 1
|
2012-02-11 02:16:12 -05:00
|
|
|
end
|
|
|
|
raise
|
|
|
|
ensure
|
2012-03-14 12:56:13 -04:00
|
|
|
redis do |conn|
|
2013-05-31 12:02:27 -04:00
|
|
|
processed = "stat:processed:#{Time.now.utc.to_date}"
|
|
|
|
result = conn.multi do
|
2013-03-27 01:55:07 -04:00
|
|
|
conn.srem("workers", identity)
|
|
|
|
conn.del("worker:#{identity}")
|
|
|
|
conn.del("worker:#{identity}:started")
|
2012-02-11 16:14:03 -05:00
|
|
|
conn.incrby("stat:processed", 1)
|
2013-05-31 12:02:27 -04:00
|
|
|
conn.incrby(processed, 1)
|
2012-02-11 16:14:03 -05:00
|
|
|
end
|
2013-05-31 12:02:27 -04:00
|
|
|
conn.expire(processed, STATS_TIMEOUT) if result.last == 1
|
2012-02-11 02:16:12 -05:00
|
|
|
end
|
|
|
|
end
|
2012-08-04 15:11:46 -04:00
|
|
|
end
|
2012-02-11 02:16:12 -05:00
|
|
|
|
2012-08-05 23:53:59 -04:00
|
|
|
# Singleton classes are not clonable.
|
2012-10-18 10:28:51 -04:00
|
|
|
SINGLETON_CLASSES = [ NilClass, TrueClass, FalseClass, Symbol, Fixnum, Float, Bignum ].freeze
|
2012-08-05 23:53:59 -04:00
|
|
|
|
2012-08-04 15:11:46 -04:00
|
|
|
# Clone the arguments passed to the worker so that if
|
|
|
|
# the message fails, what is pushed back onto Redis hasn't
|
|
|
|
# been mutated by the worker.
|
|
|
|
def cloned(ary)
|
|
|
|
ary.map do |val|
|
|
|
|
SINGLETON_CLASSES.include?(val.class) ? val : val.clone
|
|
|
|
end
|
2012-02-11 02:16:12 -05:00
|
|
|
end
|
2012-01-25 16:32:51 -05:00
|
|
|
end
|
|
|
|
end
|