1
0
Fork 0
mirror of https://github.com/mperham/sidekiq.git synced 2022-11-09 13:52:34 -05:00

Remove "worker" from codebase where possible

This commit is contained in:
Mike Perham 2022-03-03 12:37:25 -08:00
parent 7672f05063
commit e7d154eeb5
26 changed files with 198 additions and 173 deletions

View file

@ -2,6 +2,19 @@
[Sidekiq Changes](https://github.com/mperham/sidekiq/blob/main/Changes.md) | [Sidekiq Pro Changes](https://github.com/mperham/sidekiq/blob/main/Pro-Changes.md) | [Sidekiq Enterprise Changes](https://github.com/mperham/sidekiq/blob/main/Ent-Changes.md)
HEAD
---------
- Many under-the-hood changes to remove all usage of the term "worker"
from the Sidekiq codebase and APIs. This mostly involved RDoc and local variable
names but a few constants and public APIs were changed. The old APIs will be removed
in Sidekiq 7.0.
```
Sidekiq::DEFAULT_WORKER_OPTIONS -> Sidekiq.default_job_options
Sidekiq.default_worker_options -> Sidekiq.default_job_options
Sidekiq::Queues["default"].jobs_by_worker(HardJob) -> Sidekiq::Queues["default"].jobs_by_class(HardJob)
```
6.4.1
---------

View file

@ -40,11 +40,6 @@ module Sidekiq
reloader: proc { |&block| block.call }
}
DEFAULT_WORKER_OPTIONS = {
"retry" => true,
"queue" => "default"
}
FAKE_INFO = {
"redis_version" => "9.9.9",
"uptime_in_days" => "9999",
@ -158,13 +153,20 @@ module Sidekiq
Middleware::Chain.new
end
def self.default_worker_options=(hash)
# stringify
@default_worker_options = default_worker_options.merge(hash.transform_keys(&:to_s))
def self.default_worker_options=(hash) # deprecated
@default_job_options = default_job_options.merge(hash.transform_keys(&:to_s))
end
def self.default_worker_options
defined?(@default_worker_options) ? @default_worker_options : DEFAULT_WORKER_OPTIONS
def self.default_job_options=(hash)
@default_job_options = default_job_options.merge(hash.transform_keys(&:to_s))
end
def self.default_worker_options # deprecated
@default_job_options ||= {"retry" => true, "queue" => "default"}
end
def self.default_job_options
@default_job_options ||= {"retry" => true, "queue" => "default"}
end
##
@ -258,12 +260,12 @@ module Sidekiq
options[:on_complex_arguments] = mode
end
# We are shutting down Sidekiq but what about workers that
# We are shutting down Sidekiq but what about threads that
# are working on some long job? This error is
# raised in workers that have not finished within the hard
# raised in jobs that have not finished within the hard
# timeout limit. This is needed to rollback db transactions,
# otherwise Ruby's Thread#kill will commit. See #377.
# DO NOT RESCUE THIS ERROR IN YOUR WORKERS
# DO NOT RESCUE THIS ERROR IN YOUR JOBS
class Shutdown < Interrupt; end
end

View file

@ -964,7 +964,7 @@ module Sidekiq
procs.sort.each do |key|
valid, workers = conn.pipelined { |pipeline|
pipeline.exists?(key)
pipeline.hgetall("#{key}:workers")
pipeline.hgetall("#{key}:work")
}
next unless valid
workers.each_pair do |tid, json|

View file

@ -295,7 +295,7 @@ module Sidekiq
(File.directory?(options[:require]) && !File.exist?("#{options[:require]}/config/application.rb"))
logger.info "=================================================================="
logger.info " Please point Sidekiq to a Rails application or a Ruby file "
logger.info " to load your worker classes with -r [DIR|FILE]."
logger.info " to load your job classes with -r [DIR|FILE]."
logger.info "=================================================================="
logger.info @parser
die(1)
@ -336,7 +336,7 @@ module Sidekiq
parse_queue opts, queue, weight
end
o.on "-r", "--require [PATH|DIR]", "Location of Rails application with workers or file to require" do |arg|
o.on "-r", "--require [PATH|DIR]", "Location of Rails application with jobs or file to require" do |arg|
opts[:require] = arg
end

View file

@ -15,7 +15,7 @@ module Sidekiq
# client.middleware do |chain|
# chain.use MyClientMiddleware
# end
# client.push('class' => 'SomeWorker', 'args' => [1,2,3])
# client.push('class' => 'SomeJob', 'args' => [1,2,3])
#
# All client instances default to the globally-defined
# Sidekiq.client_middleware but you can change as necessary.
@ -49,16 +49,16 @@ module Sidekiq
# The main method used to push a job to Redis. Accepts a number of options:
#
# queue - the named queue to use, default 'default'
# class - the worker class to call, required
# class - the job class to call, required
# args - an array of simple arguments to the perform method, must be JSON-serializable
# at - timestamp to schedule the job (optional), must be Numeric (e.g. Time.now.to_f)
# retry - whether to retry this job if it fails, default true or an integer number of retries
# backtrace - whether to save any error backtrace, default false
#
# If class is set to the class name, the jobs' options will be based on Sidekiq's default
# worker options. Otherwise, they will be based on the job class's options.
# job options. Otherwise, they will be based on the job class's options.
#
# Any options valid for a worker class's sidekiq_options are also available here.
# Any options valid for a job class's sidekiq_options are also available here.
#
# All options must be strings, not symbols. NB: because we are serializing to JSON, all
# symbols in 'args' will be converted to strings. Note that +backtrace: true+ can take quite a bit of
@ -67,7 +67,7 @@ module Sidekiq
# Returns a unique Job ID. If middleware stops the job, nil will be returned instead.
#
# Example:
# push('queue' => 'my_queue', 'class' => MyWorker, 'args' => ['foo', 1, :bat => 'bar'])
# push('queue' => 'my_queue', 'class' => MyJob, 'args' => ['foo', 1, :bat => 'bar'])
#
def push(item)
push_bulk(item.merge("args" => [item["args"]])).first
@ -116,8 +116,8 @@ module Sidekiq
#
# pool = ConnectionPool.new { Redis.new }
# Sidekiq::Client.via(pool) do
# SomeWorker.perform_async(1,2,3)
# SomeOtherWorker.perform_async(1,2,3)
# SomeJob.perform_async(1,2,3)
# SomeOtherJob.perform_async(1,2,3)
# end
#
# Generally this is only needed for very large Sidekiq installs processing
@ -142,10 +142,10 @@ module Sidekiq
end
# Resque compatibility helpers. Note all helpers
# should go through Worker#client_push.
# should go through Sidekiq::Job#client_push.
#
# Example usage:
# Sidekiq::Client.enqueue(MyWorker, 'foo', 1, :bat => 'bar')
# Sidekiq::Client.enqueue(MyJob, 'foo', 1, :bat => 'bar')
#
# Messages are enqueued to the 'default' queue.
#
@ -154,14 +154,14 @@ module Sidekiq
end
# Example usage:
# Sidekiq::Client.enqueue_to(:queue_name, MyWorker, 'foo', 1, :bat => 'bar')
# Sidekiq::Client.enqueue_to(:queue_name, MyJob, 'foo', 1, :bat => 'bar')
#
def enqueue_to(queue, klass, *args)
klass.client_push("queue" => queue, "class" => klass, "args" => args)
end
# Example usage:
# Sidekiq::Client.enqueue_to_in(:queue_name, 3.minutes, MyWorker, 'foo', 1, :bat => 'bar')
# Sidekiq::Client.enqueue_to_in(:queue_name, 3.minutes, MyJob, 'foo', 1, :bat => 'bar')
#
def enqueue_to_in(queue, interval, klass, *args)
int = interval.to_f
@ -175,7 +175,7 @@ module Sidekiq
end
# Example usage:
# Sidekiq::Client.enqueue_in(3.minutes, MyWorker, 'foo', 1, :bat => 'bar')
# Sidekiq::Client.enqueue_in(3.minutes, MyJob, 'foo', 1, :bat => 'bar')
#
def enqueue_in(interval, klass, *args)
klass.perform_in(interval, *args)
@ -226,10 +226,10 @@ module Sidekiq
end
end
def process_single(worker_class, item)
def process_single(job_class, item)
queue = item["queue"]
middleware.invoke(worker_class, item, queue, @redis_pool) do
middleware.invoke(job_class, item, queue, @redis_pool) do
item
end
end

View file

@ -25,11 +25,11 @@ module Sidekiq
#
# A job looks like:
#
# { 'class' => 'HardWorker', 'args' => [1, 2, 'foo'], 'retry' => true }
# { 'class' => 'HardJob', 'args' => [1, 2, 'foo'], 'retry' => true }
#
# The 'retry' option also accepts a number (in place of 'true'):
#
# { 'class' => 'HardWorker', 'args' => [1, 2, 'foo'], 'retry' => 5 }
# { 'class' => 'HardJob', 'args' => [1, 2, 'foo'], 'retry' => 5 }
#
# The job will be retried this number of times before giving up. (If simply
# 'true', Sidekiq retries 25 times)
@ -53,11 +53,11 @@ module Sidekiq
#
# Sidekiq.options[:max_retries] = 7
#
# or limit the number of retries for a particular worker and send retries to
# or limit the number of retries for a particular job and send retries to
# a low priority queue with:
#
# class MyWorker
# include Sidekiq::Worker
# class MyJob
# include Sidekiq::Job
# sidekiq_options retry: 10, retry_queue: 'low'
# end
#
@ -76,7 +76,7 @@ module Sidekiq
# The global retry handler requires only the barest of data.
# We want to be able to retry as much as possible so we don't
# require the worker to be instantiated.
# require the job to be instantiated.
def global(jobstr, queue)
yield
rescue Handled => ex
@ -103,14 +103,14 @@ module Sidekiq
end
# The local retry support means that any errors that occur within
# this block can be associated with the given worker instance.
# this block can be associated with the given job instance.
# This is required to support the `sidekiq_retries_exhausted` block.
#
# Note that any exception from the block is wrapped in the Skip
# exception so the global block does not reprocess the error. The
# Skip exception is unwrapped within Sidekiq::Processor#process before
# calling the handle_exception handlers.
def local(worker, jobstr, queue)
def local(jobinst, jobstr, queue)
yield
rescue Handled => ex
raise ex
@ -123,11 +123,11 @@ module Sidekiq
msg = Sidekiq.load_json(jobstr)
if msg["retry"].nil?
msg["retry"] = worker.class.get_sidekiq_options["retry"]
msg["retry"] = jobinst.class.get_sidekiq_options["retry"]
end
raise e unless msg["retry"]
attempt_retry(worker, msg, queue, e)
attempt_retry(jobinst, msg, queue, e)
# We've handled this error associated with this job, don't
# need to handle it at the global level
raise Skip
@ -135,10 +135,10 @@ module Sidekiq
private
# Note that +worker+ can be nil here if an error is raised before we can
# instantiate the worker instance. All access must be guarded and
# Note that +jobinst+ can be nil here if an error is raised before we can
# instantiate the job instance. All access must be guarded and
# best effort.
def attempt_retry(worker, msg, queue, exception)
def attempt_retry(jobinst, msg, queue, exception)
max_retry_attempts = retry_attempts_from(msg["retry"], @max_retries)
msg["queue"] = (msg["retry_queue"] || queue)
@ -170,7 +170,7 @@ module Sidekiq
end
if count < max_retry_attempts
delay = delay_for(worker, count, exception)
delay = delay_for(jobinst, count, exception)
# Logging here can break retries if the logging device raises ENOSPC #3979
# logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
retry_at = Time.now.to_f + delay
@ -180,13 +180,13 @@ module Sidekiq
end
else
# Goodbye dear message, you (re)tried your best I'm sure.
retries_exhausted(worker, msg, exception)
retries_exhausted(jobinst, msg, exception)
end
end
def retries_exhausted(worker, msg, exception)
def retries_exhausted(jobinst, msg, exception)
begin
block = worker&.sidekiq_retries_exhausted_block
block = jobinst&.sidekiq_retries_exhausted_block
block&.call(msg, exception)
rescue => e
handle_exception(e, {context: "Error calling retries_exhausted", job: msg})
@ -215,19 +215,19 @@ module Sidekiq
end
end
def delay_for(worker, count, exception)
def delay_for(jobinst, count, exception)
jitter = rand(10) * (count + 1)
if worker&.sidekiq_retry_in_block
custom_retry_in = retry_in(worker, count, exception).to_i
if jobinst&.sidekiq_retry_in_block
custom_retry_in = retry_in(jobinst, count, exception).to_i
return custom_retry_in + jitter if custom_retry_in > 0
end
(count**4) + 15 + jitter
end
def retry_in(worker, count, exception)
worker.sidekiq_retry_in_block.call(count, exception)
def retry_in(jobinst, count, exception)
jobinst.sidekiq_retry_in_block.call(count, exception)
rescue Exception => e
handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default"})
handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{jobinst.class.name}, falling back to default"})
nil
end

View file

@ -48,10 +48,10 @@ module Sidekiq
def normalized_hash(item_class)
if item_class.is_a?(Class)
raise(ArgumentError, "Message must include a Sidekiq::Worker class, not class name: #{item_class.ancestors.inspect}") unless item_class.respond_to?(:get_sidekiq_options)
raise(ArgumentError, "Message must include a Sidekiq::Job class, not class name: #{item_class.ancestors.inspect}") unless item_class.respond_to?(:get_sidekiq_options)
item_class.get_sidekiq_options
else
Sidekiq.default_worker_options
Sidekiq.default_job_options
end
end

View file

@ -15,7 +15,7 @@ module Sidekiq
proc { "sidekiq" },
proc { Sidekiq::VERSION },
proc { |me, data| data["tag"] },
proc { |me, data| "[#{Processor::WORKER_STATE.size} of #{data["concurrency"]} busy]" },
proc { |me, data| "[#{Processor::WORK_STATE.size} of #{data["concurrency"]} busy]" },
proc { |me, data| "stopping" if me.stopping? }
]
@ -43,9 +43,7 @@ module Sidekiq
@poller.terminate
end
# Shuts down the process. This method does not
# return until all work is complete and cleaned up.
# It can take up to the timeout to complete.
# Shuts down this Sidekiq instance. Waits up to the deadline for all jobs to complete.
def stop
deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + @options[:timeout]
@ -55,7 +53,7 @@ module Sidekiq
@manager.stop(deadline)
# Requeue everything in case there was a worker who grabbed work while stopped
# Requeue everything in case there was a thread which fetched a job while the process was stopped.
# This call is a no-op in Sidekiq but necessary for Sidekiq Pro.
strategy = @options[:fetch]
strategy.bulk_requeue([], @options)
@ -86,7 +84,7 @@ module Sidekiq
Sidekiq.redis do |conn|
conn.pipelined do |pipeline|
pipeline.srem("processes", identity)
pipeline.unlink("#{identity}:workers")
pipeline.unlink("#{identity}:work")
end
end
rescue
@ -132,9 +130,8 @@ module Sidekiq
begin
fails = Processor::FAILURE.reset
procd = Processor::PROCESSED.reset
curstate = Processor::WORKER_STATE.dup
curstate = Processor::WORK_STATE.dup
workers_key = "#{key}:workers"
nowdate = Time.now.utc.strftime("%Y-%m-%d")
Sidekiq.redis do |conn|
@ -146,12 +143,16 @@ module Sidekiq
transaction.incrby("stat:failed", fails)
transaction.incrby("stat:failed:#{nowdate}", fails)
transaction.expire("stat:failed:#{nowdate}", STATS_TTL)
end
transaction.unlink(workers_key)
# work is the current set of executing jobs
work_key = "#{key}:work"
conn.pipelined do |transaction|
transaction.unlink(work_key)
curstate.each_pair do |tid, hash|
transaction.hset(workers_key, tid, Sidekiq.dump_json(hash))
transaction.hset(work_key, tid, Sidekiq.dump_json(hash))
end
transaction.expire(workers_key, 60)
transaction.expire(work_key, 60)
end
end
@ -214,7 +215,7 @@ module Sidekiq
Last RTT readings were #{RTT_READINGS.buffer.inspect}, ideally these should be < 1000.
Ensure Redis is running in the same AZ or datacenter as Sidekiq.
If these values are close to 100,000, that means your Sidekiq process may be
CPU overloaded; see https://github.com/mperham/sidekiq/discussions/5039
CPU-saturated; reduce your concurrency and/or see https://github.com/mperham/sidekiq/discussions/5039
EOM
RTT_READINGS.reset
end

View file

@ -50,7 +50,7 @@ module Sidekiq
return if @done
@done = true
logger.info { "Terminating quiet workers" }
logger.info { "Terminating quiet threads" }
@workers.each { |x| x.terminate }
fire_event(:quiet, reverse: true)
end
@ -65,7 +65,7 @@ module Sidekiq
sleep PAUSE_TIME
return if @workers.empty?
logger.info { "Pausing to allow workers to finish..." }
logger.info { "Pausing to allow jobs to finish..." }
wait_for(deadline) { @workers.empty? }
return if @workers.empty?
@ -96,7 +96,7 @@ module Sidekiq
private
def hard_shutdown
# We've reached the timeout and we still have busy workers.
# We've reached the timeout and we still have busy threads.
# They must die but their jobs shall live on.
cleanup = nil
@plock.synchronize do
@ -106,12 +106,12 @@ module Sidekiq
if cleanup.size > 0
jobs = cleanup.map { |p| p.job }.compact
logger.warn { "Terminating #{cleanup.size} busy worker threads" }
logger.warn { "Work still in progress #{jobs.inspect}" }
logger.warn { "Terminating #{cleanup.size} busy threads" }
logger.warn { "Jobs still in progress #{jobs.inspect}" }
# Re-enqueue unfinished jobs
# NOTE: You may notice that we may push a job back to redis before
# the worker thread is terminated. This is ok because Sidekiq's
# the thread is terminated. This is ok because Sidekiq's
# contract says that jobs are run AT LEAST once. Process termination
# is delayed until we're certain the jobs are back in Redis because
# it is worse to lose a job than to run it twice.

View file

@ -44,10 +44,10 @@ module Sidekiq
# This is an example of a minimal server middleware:
#
# class MyServerHook
# def call(worker_instance, msg, queue)
# puts "Before work"
# def call(job_instance, msg, queue)
# puts "Before job"
# yield
# puts "After work"
# puts "After job"
# end
# end
#
@ -56,7 +56,7 @@ module Sidekiq
# to Redis:
#
# class MyClientHook
# def call(worker_class, msg, queue, redis_pool)
# def call(job_class, msg, queue, redis_pool)
# puts "Before push"
# result = yield
# puts "After push"

View file

@ -10,16 +10,16 @@ module Sidekiq::Middleware::I18n
# Get the current locale and store it in the message
# to be sent to Sidekiq.
class Client
def call(_worker, msg, _queue, _redis)
msg["locale"] ||= I18n.locale
def call(_jobclass, job, _queue, _redis)
job["locale"] ||= I18n.locale
yield
end
end
# Pull the msg locale out and set the current thread to use it.
class Server
def call(_worker, msg, _queue, &block)
I18n.with_locale(msg.fetch("locale", I18n.default_locale), &block)
def call(_jobclass, job, _queue, &block)
I18n.with_locale(job.fetch("locale", I18n.default_locale), &block)
end
end
end

View file

@ -11,7 +11,7 @@ module Sidekiq
#
# 1. fetches a job from Redis
# 2. executes the job
# a. instantiate the Worker
# a. instantiate the job class
# b. run the middleware chain
# c. call #perform
#
@ -80,12 +80,12 @@ module Sidekiq
end
def get_one
work = @strategy.retrieve_work
uow = @strategy.retrieve_work
if @down
logger.info { "Redis is online, #{::Process.clock_gettime(::Process::CLOCK_MONOTONIC) - @down} sec downtime" }
@down = nil
end
work
uow
rescue Sidekiq::Shutdown
rescue => ex
handle_fetch_exception(ex)
@ -130,10 +130,10 @@ module Sidekiq
# Effectively this block denotes a "unit of work" to Rails.
@reloader.call do
klass = constantize(job_hash["class"])
worker = klass.new
worker.jid = job_hash["jid"]
@retrier.local(worker, jobstr, queue) do
yield worker
inst = klass.new
inst.jid = job_hash["jid"]
@retrier.local(inst, jobstr, queue) do
yield inst
end
end
end
@ -142,9 +142,9 @@ module Sidekiq
end
end
def process(work)
jobstr = work.job
queue = work.queue_name
def process(uow)
jobstr = uow.job
queue = uow.queue_name
# Treat malformed JSON as a special case: job goes straight to the morgue.
job_hash = nil
@ -154,14 +154,14 @@ module Sidekiq
handle_exception(ex, {context: "Invalid JSON for job", jobstr: jobstr})
# we can't notify because the job isn't a valid hash payload.
DeadSet.new.kill(jobstr, notify_failure: false)
return work.acknowledge
return uow.acknowledge
end
ack = false
begin
dispatch(job_hash, queue, jobstr) do |worker|
Sidekiq.server_middleware.invoke(worker, job_hash, queue) do
execute_job(worker, job_hash["args"])
dispatch(job_hash, queue, jobstr) do |inst|
Sidekiq.server_middleware.invoke(inst, job_hash, queue) do
execute_job(inst, job_hash["args"])
end
end
ack = true
@ -186,14 +186,14 @@ module Sidekiq
if ack
# We don't want a shutdown signal to interrupt job acknowledgment.
Thread.handle_interrupt(Sidekiq::Shutdown => :never) do
work.acknowledge
uow.acknowledge
end
end
end
end
def execute_job(worker, cloned_args)
worker.perform(*cloned_args)
def execute_job(inst, cloned_args)
inst.perform(*cloned_args)
end
# Ruby doesn't provide atomic counters out of the box so we'll
@ -219,39 +219,39 @@ module Sidekiq
end
# jruby's Hash implementation is not threadsafe, so we wrap it in a mutex here
class SharedWorkerState
class SharedWorkState
def initialize
@worker_state = {}
@work_state = {}
@lock = Mutex.new
end
def set(tid, hash)
@lock.synchronize { @worker_state[tid] = hash }
@lock.synchronize { @work_state[tid] = hash }
end
def delete(tid)
@lock.synchronize { @worker_state.delete(tid) }
@lock.synchronize { @work_state.delete(tid) }
end
def dup
@lock.synchronize { @worker_state.dup }
@lock.synchronize { @work_state.dup }
end
def size
@lock.synchronize { @worker_state.size }
@lock.synchronize { @work_state.size }
end
def clear
@lock.synchronize { @worker_state.clear }
@lock.synchronize { @work_state.clear }
end
end
PROCESSED = Counter.new
FAILURE = Counter.new
WORKER_STATE = SharedWorkerState.new
WORK_STATE = SharedWorkState.new
def stats(jobstr, queue)
WORKER_STATE.set(tid, {queue: queue, payload: jobstr, run_at: Time.now.to_i})
WORK_STATE.set(tid, {queue: queue, payload: jobstr, run_at: Time.now.to_i})
begin
yield
@ -259,7 +259,7 @@ module Sidekiq
FAILURE.incr
raise
ensure
WORKER_STATE.delete(tid)
WORK_STATE.delete(tid)
PROCESSED.incr
end
end

View file

@ -1,6 +1,6 @@
# frozen_string_literal: true
require "sidekiq/worker"
require "sidekiq/job"
module Sidekiq
class Rails < ::Rails::Engine
@ -33,13 +33,13 @@ module Sidekiq
# end
initializer "sidekiq.active_job_integration" do
ActiveSupport.on_load(:active_job) do
include ::Sidekiq::Worker::Options unless respond_to?(:sidekiq_options)
include ::Sidekiq::Job::Options unless respond_to?(:sidekiq_options)
end
end
initializer "sidekiq.rails_logger" do
Sidekiq.configure_server do |_|
# This is the integration code necessary so that if code uses `Rails.logger.info "Hello"`,
# This is the integration code necessary so that if a job uses `Rails.logger.info "Hello"`,
# it will appear in the Sidekiq console with all of the job context. See #5021 and
# https://github.com/rails/rails/blob/b5f2b550f69a99336482739000c58e4e04e033aa/railties/lib/rails/commands/server/server_command.rb#L82-L84
unless ::Rails.logger == ::Sidekiq.logger || ::ActiveSupport::Logger.logger_outputs_to?(::Rails.logger, $stdout)
@ -48,6 +48,13 @@ module Sidekiq
end
end
config.before_configuration do
dep = ActiveSupport::Deprecation.new("7.0", "Sidekiq")
dep.deprecate_methods(Sidekiq.singleton_class,
default_worker_options: :default_job_options,
"default_worker_options=": :default_job_options=)
end
# This hook happens after all initializers are run, just before returning
# from config/environment.rb back to sidekiq/cli.rb.
#

View file

@ -38,7 +38,7 @@ module Sidekiq
private
# Sidekiq needs a lot of concurrent Redis connections.
# Sidekiq needs many concurrent Redis connections.
#
# We need a connection for each Processor.
# We need a connection for Pro's real-time change listener
@ -47,7 +47,7 @@ module Sidekiq
# - enterprise's leader election
# - enterprise's cron support
def verify_sizing(size, concurrency)
raise ArgumentError, "Your Redis connection pool is too small for Sidekiq to work. Your pool has #{size} connections but must have at least #{concurrency + 2}" if size < (concurrency + 2)
raise ArgumentError, "Your Redis connection pool is too small for Sidekiq. Your pool has #{size} connections but must have at least #{concurrency + 2}" if size < (concurrency + 2)
end
def build_client(options)

View file

@ -101,20 +101,20 @@ module Sidekiq
##
# The Queues class is only for testing the fake queue implementation.
# There are 2 data structures involved in tandem. This is due to the
# Rspec syntax of change(QueueWorker.jobs, :size). It keeps a reference
# Rspec syntax of change(HardJob.jobs, :size). It keeps a reference
# to the array. Because the array was dervied from a filter of the total
# jobs enqueued, it appeared as though the array didn't change.
#
# To solve this, we'll keep 2 hashes containing the jobs. One with keys based
# on the queue, and another with keys of the worker names, so the array for
# QueueWorker.jobs is a straight reference to a real array.
# on the queue, and another with keys of the job type, so the array for
# HardJob.jobs is a straight reference to a real array.
#
# Queue-based hash:
#
# {
# "default"=>[
# {
# "class"=>"TestTesting::QueueWorker",
# "class"=>"TestTesting::HardJob",
# "args"=>[1, 2],
# "retry"=>true,
# "queue"=>"default",
@ -124,12 +124,12 @@ module Sidekiq
# ]
# }
#
# Worker-based hash:
# Job-based hash:
#
# {
# "TestTesting::QueueWorker"=>[
# "TestTesting::HardJob"=>[
# {
# "class"=>"TestTesting::QueueWorker",
# "class"=>"TestTesting::HardJob",
# "args"=>[1, 2],
# "retry"=>true,
# "queue"=>"default",
@ -144,14 +144,14 @@ module Sidekiq
# require 'sidekiq/testing'
#
# assert_equal 0, Sidekiq::Queues["default"].size
# HardWorker.perform_async(:something)
# HardJob.perform_async(:something)
# assert_equal 1, Sidekiq::Queues["default"].size
# assert_equal :something, Sidekiq::Queues["default"].first['args'][0]
#
# You can also clear all workers' jobs:
# You can also clear all jobs:
#
# assert_equal 0, Sidekiq::Queues["default"].size
# HardWorker.perform_async(:something)
# HardJob.perform_async(:something)
# Sidekiq::Queues.clear_all
# assert_equal 0, Sidekiq::Queues["default"].size
#
@ -170,35 +170,36 @@ module Sidekiq
def push(queue, klass, job)
jobs_by_queue[queue] << job
jobs_by_worker[klass] << job
jobs_by_class[klass] << job
end
def jobs_by_queue
@jobs_by_queue ||= Hash.new { |hash, key| hash[key] = [] }
end
def jobs_by_worker
@jobs_by_worker ||= Hash.new { |hash, key| hash[key] = [] }
def jobs_by_class
@jobs_by_class ||= Hash.new { |hash, key| hash[key] = [] }
end
alias_method :jobs_by_worker, :jobs_by_class
def delete_for(jid, queue, klass)
jobs_by_queue[queue.to_s].delete_if { |job| job["jid"] == jid }
jobs_by_worker[klass].delete_if { |job| job["jid"] == jid }
jobs_by_class[klass].delete_if { |job| job["jid"] == jid }
end
def clear_for(queue, klass)
jobs_by_queue[queue].clear
jobs_by_worker[klass].clear
jobs_by_class[klass].clear
end
def clear_all
jobs_by_queue.clear
jobs_by_worker.clear
jobs_by_class.clear
end
end
end
module Worker
module Job
##
# The Sidekiq testing infrastructure overrides perform_async
# so that it does not actually touch the network. Instead it
@ -212,16 +213,16 @@ module Sidekiq
#
# require 'sidekiq/testing'
#
# assert_equal 0, HardWorker.jobs.size
# HardWorker.perform_async(:something)
# assert_equal 1, HardWorker.jobs.size
# assert_equal :something, HardWorker.jobs[0]['args'][0]
# assert_equal 0, HardJob.jobs.size
# HardJob.perform_async(:something)
# assert_equal 1, HardJob.jobs.size
# assert_equal :something, HardJob.jobs[0]['args'][0]
#
# assert_equal 0, Sidekiq::Extensions::DelayedMailer.jobs.size
# MyMailer.delay.send_welcome_email('foo@example.com')
# assert_equal 1, Sidekiq::Extensions::DelayedMailer.jobs.size
#
# You can also clear and drain all workers' jobs:
# You can also clear and drain all job types:
#
# assert_equal 0, Sidekiq::Extensions::DelayedMailer.jobs.size
# assert_equal 0, Sidekiq::Extensions::DelayedModel.jobs.size
@ -241,14 +242,14 @@ module Sidekiq
#
# RSpec.configure do |config|
# config.before(:each) do
# Sidekiq::Worker.clear_all
# Sidekiq::Job.clear_all
# end
# end
#
# or for acceptance testing, i.e. with cucumber:
#
# AfterStep do
# Sidekiq::Worker.drain_all
# Sidekiq::Job.drain_all
# end
#
# When I sign up as "foo@example.com"
@ -262,7 +263,7 @@ module Sidekiq
# Jobs queued for this worker
def jobs
Queues.jobs_by_worker[to_s]
Queues.jobs_by_class[to_s]
end
# Clear all jobs for this worker
@ -288,11 +289,11 @@ module Sidekiq
end
def process_job(job)
worker = new
worker.jid = job["jid"]
worker.bid = job["bid"] if worker.respond_to?(:bid=)
Sidekiq::Testing.server_middleware.invoke(worker, job, job["queue"]) do
execute_job(worker, job["args"])
inst = new
inst.jid = job["jid"]
inst.bid = job["bid"] if inst.respond_to?(:bid=)
Sidekiq::Testing.server_middleware.invoke(inst, job, job["queue"]) do
execute_job(inst, job["args"])
end
end
@ -306,18 +307,18 @@ module Sidekiq
Queues.jobs_by_queue.values.flatten
end
# Clear all queued jobs across all workers
# Clear all queued jobs
def clear_all
Queues.clear_all
end
# Drain all queued jobs across all workers
# Drain (execute) all queued jobs
def drain_all
while jobs.any?
worker_classes = jobs.map { |job| job["class"] }.uniq
job_classes = jobs.map { |job| job["class"] }.uniq
worker_classes.each do |worker_class|
Sidekiq::Testing.constantize(worker_class).drain
job_classes.each do |job_class|
Sidekiq::Testing.constantize(job_class).drain
end
end
end

View file

@ -4,7 +4,7 @@ require "sidekiq/testing"
##
# The Sidekiq inline infrastructure overrides perform_async so that it
# actually calls perform instead. This allows workers to be run inline in a
# actually calls perform instead. This allows jobs to be run inline in a
# testing environment.
#
# This is similar to `Resque.inline = true` functionality.
@ -15,8 +15,8 @@ require "sidekiq/testing"
#
# $external_variable = 0
#
# class ExternalWorker
# include Sidekiq::Worker
# class ExternalJob
# include Sidekiq::Job
#
# def perform
# $external_variable = 1
@ -24,7 +24,7 @@ require "sidekiq/testing"
# end
#
# assert_equal 0, $external_variable
# ExternalWorker.perform_async
# ExternalJob.perform_async
# assert_equal 1, $external_variable
#
Sidekiq::Testing.inline!

View file

@ -140,8 +140,8 @@ module Sidekiq
params[:direction] == "asc" ? "&uarr;" : "&darr;"
end
def workers
@workers ||= Sidekiq::Workers.new
def workset
@work ||= Sidekiq::WorkSet.new
end
def processes
@ -175,7 +175,7 @@ module Sidekiq
end
def current_status
workers.size == 0 ? "idle" : "active"
workset.size == 0 ? "idle" : "active"
end
def relative_time(time)

View file

@ -82,7 +82,7 @@ module Sidekiq
end
def get_sidekiq_options # :nodoc:
self.sidekiq_options_hash ||= Sidekiq.default_worker_options
self.sidekiq_options_hash ||= Sidekiq.default_job_options
end
def sidekiq_class_attribute(*attrs)

View file

@ -1,3 +1,4 @@
Sidekiq.default_worker_options = { queue: "something" }
Sidekiq.configure_client do |config|
config.redis = { :size => 2 }
end

View file

@ -604,7 +604,7 @@ describe 'API' do
conn.hmset(key, 'info', Sidekiq.dump_json(pdata), 'busy', 0, 'beat', Time.now.to_f)
end
s = "#{key}:workers"
s = "#{key}:work"
data = Sidekiq.dump_json({ 'payload' => "{}", 'queue' => 'default', 'run_at' => Time.now.to_i })
Sidekiq.redis do |c|
c.hmset(s, '1234', data)
@ -618,7 +618,7 @@ describe 'API' do
assert_equal Time.now.year, Time.at(y['run_at']).year
end
s = "#{key}:workers"
s = "#{key}:work"
data = Sidekiq.dump_json({ 'payload' => {}, 'queue' => 'default', 'run_at' => (Time.now.to_i - 2*60*60) })
Sidekiq.redis do |c|
c.multi do |transaction|

View file

@ -110,7 +110,7 @@ describe Sidekiq::Client do
it 'enqueues' do
Sidekiq.redis {|c| c.flushdb }
assert_equal Sidekiq.default_worker_options, MyWorker.get_sidekiq_options
assert_equal Sidekiq.default_job_options, MyWorker.get_sidekiq_options
assert MyWorker.perform_async(1, 2)
assert Sidekiq::Client.enqueue(MyWorker, 1, 2)
assert Sidekiq::Client.enqueue_to(:custom_queue, MyWorker, 1, 2)

View file

@ -28,13 +28,13 @@ describe Sidekiq::Launcher do
@launcher.manager = @mgr
@id = @launcher.identity
Sidekiq::Processor::WORKER_STATE.set('a', {'b' => 1})
Sidekiq::Processor::WORK_STATE.set('a', {'b' => 1})
@proctitle = $0
end
after do
Sidekiq::Processor::WORKER_STATE.clear
Sidekiq::Processor::WORK_STATE.clear
$0 = @proctitle
end

View file

@ -48,14 +48,14 @@ describe Sidekiq do
end
end
describe 'default_worker_options' do
describe 'default_job_options' do
it 'stringifies keys' do
@old_options = Sidekiq.default_worker_options
@old_options = Sidekiq.default_job_options
begin
Sidekiq.default_worker_options = { queue: 'cat'}
assert_equal 'cat', Sidekiq.default_worker_options['queue']
Sidekiq.default_job_options = { queue: 'cat'}
assert_equal 'cat', Sidekiq.default_job_options['queue']
ensure
Sidekiq.default_worker_options = @old_options
Sidekiq.default_job_options = @old_options
end
end
end

View file

@ -66,11 +66,11 @@ describe Sidekiq::Web do
conn.incr('busy')
conn.sadd('processes', 'foo:1234')
conn.hmset('foo:1234', 'info', Sidekiq.dump_json('hostname' => 'foo', 'started_at' => Time.now.to_f, "queues" => [], 'concurrency' => 10), 'at', Time.now.to_f, 'busy', 4)
identity = 'foo:1234:workers'
identity = 'foo:1234:work'
hash = {:queue => 'critical', :payload => { 'class' => WebWorker.name, 'args' => [1,'abc'] }, :run_at => Time.now.to_i }
conn.hmset(identity, 1001, Sidekiq.dump_json(hash))
end
assert_equal ['1001'], Sidekiq::Workers.new.map { |pid, tid, data| tid }
assert_equal ['1001'], Sidekiq::WorkSet.new.map { |pid, tid, data| tid }
get '/busy'
assert_equal 200, last_response.status
@ -446,7 +446,7 @@ describe Sidekiq::Web do
pro = 'foo:1234'
conn.sadd('processes', pro)
conn.hmset(pro, 'info', Sidekiq.dump_json('started_at' => Time.now.to_f, 'labels' => ['frumduz'], 'queues' =>[], 'concurrency' => 10), 'busy', 1, 'beat', Time.now.to_f)
identity = "#{pro}:workers"
identity = "#{pro}:work"
hash = {:queue => 'critical', :payload => { 'class' => "FailWorker", 'args' => ["<a>hello</a>"] }, :run_at => Time.now.to_i }
conn.hmset(identity, 100001, Sidekiq.dump_json(hash))
conn.incr('busy')
@ -721,7 +721,7 @@ describe Sidekiq::Web do
conn.multi do |transaction|
transaction.sadd("processes", key)
transaction.hmset(key, 'info', Sidekiq.dump_json('hostname' => 'foo', 'started_at' => Time.now.to_f, "queues" => []), 'at', Time.now.to_f, 'busy', 4)
transaction.hmset("#{key}:workers", Time.now.to_f, msg)
transaction.hmset("#{key}:work", Time.now.to_f, msg)
end
end
end

View file

@ -9,7 +9,7 @@
</li>
<li class="busy col-sm-1">
<a href="<%= root_path %>busy">
<span id="txtBusy" class="count"><%= number_with_delimiter(workers.size) %></span>
<span id="txtBusy" class="count"><%= number_with_delimiter(workset.size) %></span>
<span class="desc"><%= t('Busy') %></span>
</a>
</li>

View file

@ -15,7 +15,7 @@
<p><%= t('Threads') %></p>
</div>
<div class="stat">
<h3><%= ws = workers.size; number_with_delimiter(ws) %></h3>
<h3><%= ws = workset.size; number_with_delimiter(ws) %></h3>
<p><%= t('Busy') %></p>
</div>
<div class="stat">
@ -109,7 +109,7 @@
<th><%= t('Arguments') %></th>
<th><%= t('Started') %></th>
</thead>
<% workers.each do |process, thread, msg| %>
<% workset.each do |process, thread, msg| %>
<% job = Sidekiq::JobRecord.new(msg['payload']) %>
<tr>
<td><%= process %></td>