1
0
Fork 0
mirror of https://github.com/mperham/sidekiq.git synced 2022-11-09 13:52:34 -05:00

Introduce Sidekiq::Capsule (#5487)

* Initial work on Sidekiq::Config

* Initial work on Sidekiq::Config

* reduce dependencies in deploy marks

* bare sidekiq and webapp

* Modify runtime to work with Capsules

* Cleanup

* Rename test files to remove test_ prefix

* Update test suite and standard rules to be more compliant

* Move constant definition outside code, per standard formatting

* Loads of changes for introduction of Capsules

* Remove Redis adapter abstraction

* update capsule overview

* Ensure Sidekiq.redis uses the correct pool for jobs running within a Capsule

* Use default_capsule for safety

* Slow down the beat to halve its Redis overhead

* move config fixtures into cfg/

* Add capsule middleware test

* use accessor
This commit is contained in:
Mike Perham 2022-08-25 10:15:11 -07:00 committed by GitHub
parent d0eb6f0f46
commit 29dca70e24
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
81 changed files with 2446 additions and 2068 deletions

View file

@ -6,8 +6,4 @@ ignore:
- Lint/InheritException
- '**/*':
- Lint/RescueException
- Security/YAMLLoad
- Style/GlobalVars
- 'test/test*.rb':
- Lint/ConstantDefinitionInBlock

View file

@ -5,6 +5,7 @@ gemspec
gem "rake"
gem "rails"
gem "redis-client"
# gem "debug"
# Required for Ruby 3.1
# https://github.com/mikel/mail/pull/1439

View file

@ -13,7 +13,7 @@ require "standard/rake"
Rake::TestTask.new(:test) do |test|
test.warning = true
test.pattern = "test/**/test_*.rb"
test.pattern = "test/**/*.rb"
end
task default: [:standard, :test]

View file

@ -3,4 +3,3 @@
source "https://rubygems.org"
gem "sidekiq", path: ".."
gem "rackup"

6
bare/bare.ru Normal file
View file

@ -0,0 +1,6 @@
require "securerandom"
require "sidekiq/web"
secret_key = SecureRandom.hex(32)
use Rack::Session::Cookie, secret: secret_key, same_site: true, max_age: 86400
run Sidekiq::Web

View file

@ -1 +1,7 @@
require "sidekiq/api"
Sidekiq.configure_server do |config|
config.redis = {db: 14}
config.capsule("single") do |cap|
cap.concurrency = 1
cap.queues = %w[single_threaded]
end
end

View file

@ -10,7 +10,7 @@ def integrate_with_systemd
return unless ENV["NOTIFY_SOCKET"]
Sidekiq.configure_server do |config|
Sidekiq.logger.info "Enabling systemd notification integration"
config.logger.info "Enabling systemd notification integration"
require "sidekiq/sd_notify"
config.on(:startup) do
Sidekiq::SdNotify.ready
@ -31,12 +31,7 @@ begin
cli.run
rescue => e
raise e if $DEBUG
if Sidekiq.error_handlers.length == 0
warn e.message
warn e.backtrace.join("\n")
else
cli.handle_exception e
end
warn e.message
warn e.backtrace.join("\n")
exit 1
end

257
config.rb Normal file
View file

@ -0,0 +1,257 @@
require "forwardable"
require "sidekiq/redis_connection"
module Sidekiq
# Sidekiq::Config represents the configuration for an instance of Sidekiq.
class Config
extend Forwardable
DEFAULTS = {
queues: ["default"],
labels: [],
concurrency: 10,
require: ".",
strict: true,
environment: nil,
timeout: 25,
poll_interval_average: nil,
average_scheduled_poll_interval: 5,
on_complex_arguments: :raise,
error_handlers: [],
death_handlers: [],
lifecycle_events: {
startup: [],
quiet: [],
shutdown: [],
heartbeat: []
},
dead_max_jobs: 10_000,
dead_timeout_in_seconds: 180 * 24 * 60 * 60, # 6 months
reloader: proc { |&block| block.call }
}
ERROR_HANDLER = ->(ex, ctx, cfg = Sidekiq.config) {
l = cfg.logger
l.warn(Sidekiq.dump_json(ctx)) unless ctx.empty?
l.warn("#{ex.class.name}: #{ex.message}")
l.warn(ex.backtrace.join("\n")) unless ex.backtrace.nil?
}
def initialize(options = {})
@options = DEFAULTS.merge(options)
@options[:error_handlers] << ERROR_HANDLER if @options[:error_handlers].empty?
@directory = {}
end
def_delegators :@options, :[], :[]=, :fetch, :key?, :has_key?, :merge!
attr_reader :options
# config.concurrency = 5
def concurrency=(val)
self[:concurrency] = Integer(val)
end
# config.queues = %w( high default low ) # strict
# config.queues = %w( high,3 default,2 low,1 ) # weighted
# config.queues = %w( feature1,1 feature2,1 feature3,1 ) # random
#
# With weighted priority, queue will be checked first (weight / total) of the time.
# high will be checked first (3/6) or 50% of the time.
# I'd recommend setting weights between 1-10. Weights in the hundreds or thousands
# are ridiculous and unnecessarily expensive. You can get random queue ordering
# by explicitly setting all weights to 1.
def queues=(val)
self[:queues] = Array(val).each_with_object([]) do |qstr, memo|
name, weight = qstr.split(",")
self[:strict] = false if weight.to_i > 0
[weight.to_i, 1].max.times do
memo << name
end
end
end
def redis
raise ArgumentError, "requires a block" unless block_given?
redis_pool.with do |conn|
retryable = true
begin
yield conn
rescue RedisClientAdapter::BaseError => ex
# 2550 Failover can cause the server to become a replica, need
# to disconnect and reopen the socket to get back to the primary.
# 4495 Use the same logic if we have a "Not enough replicas" error from the primary
# 4985 Use the same logic when a blocking command is force-unblocked
# The same retry logic is also used in client.rb
if retryable && ex.message =~ /READONLY|NOREPLICAS|UNBLOCKED/
conn.close
retryable = false
retry
end
raise
end
end
end
def register(name, instance)
@directory[name] = instance
end
def lookup(name)
# JNDI is just a fancy name for a hash lookup
@directory[name]
end
def redis_info
redis do |conn|
conn.info
rescue RedisClientAdapter::CommandError => ex
# 2850 return fake version when INFO command has (probably) been renamed
raise unless /unknown command/.match?(ex.message)
{
"redis_version" => "9.9.9",
"uptime_in_days" => "9999",
"connected_clients" => "9999",
"used_memory_human" => "9P",
"used_memory_peak_human" => "9P"
}.freeze
end
end
def redis_pool
# connection pool is lazy, it will not create connections unless you actually need them
# so don't be skimpy!
@redis ||= RedisConnection.create(size: required_pool_size, logger: logger)
end
def redis=(hash)
pool = if hash.is_a?(ConnectionPool)
hash
else
RedisConnection.create(hash.merge(size: required_pool_size, logger: logger))
end
raise ArgumentError, "Your Redis connection pool is too small for Sidekiq. Your pool has #{pool.size} connections but must have at least #{required_pool_size}" if pool.size < required_pool_size
@redis = pool
end
# Sidekiq needs many concurrent Redis connections.
#
# We need a connection for each Processor.
# We need a connection for Pro's real-time change listener
# We need a connection to various features to call Redis every few seconds:
# - the process heartbeat.
# - enterprise's leader election
# - enterprise's cron support
def required_pool_size
if Sidekiq.server?
self[:concurrency] + 3
elsif ENV["RAILS_MAX_THREADS"]
Integer(ENV["RAILS_MAX_THREADS"])
else
5
end
end
def client_middleware
@client_chain ||= Middleware::Chain.new
yield @client_chain if block_given?
@client_chain
end
def server_middleware
@server_chain ||= Middleware::Chain.new
yield @server_chain if block_given?
@server_chain
end
##
# Death handlers are called when all retries for a job have been exhausted and
# the job dies. It's the notification to your application
# that this job will not succeed without manual intervention.
#
# Sidekiq.configure_server do |config|
# config.death_handlers << ->(job, ex) do
# end
# end
def death_handlers
@options[:death_handlers]
end
# deprecated
def log_formatter
warn "config.log_formatter is deprecated, use `config.logger.formatter"
logger.formatter
end
def log_formatter=(log_formatter)
warn "`config.log_formatter=` is deprecated, use `config.logger.formatter=`"
logger.formatter = log_formatter
end
def logger
@logger ||= Sidekiq::Logger.new($stdout, level: :info).tap do |log|
log.formatter = if ENV["DYNO"]
Sidekiq::Logger::Formatters::WithoutTimestamp.new
else
Sidekiq::Logger::Formatters::Pretty.new
end
end
end
def logger=(logger)
if logger.nil?
self.logger.level = Logger::FATAL
return
end
logger.extend(Sidekiq::LoggingUtils)
@logger = logger
end
# How frequently Redis should be checked by a random Sidekiq process for
# scheduled and retriable jobs. Each individual process will take turns by
# waiting some multiple of this value.
#
# See sidekiq/scheduled.rb for an in-depth explanation of this value
def average_scheduled_poll_interval=(interval)
@options[:average_scheduled_poll_interval] = interval
end
# Register a proc to handle any error which occurs within the Sidekiq process.
#
# Sidekiq.configure_server do |config|
# config.error_handlers << proc {|ex,ctx_hash| MyErrorService.notify(ex, ctx_hash) }
# end
#
# The default error handler logs errors to @logger.
def error_handlers
@options[:error_handlers]
end
# Register a block to run at a point in the Sidekiq lifecycle.
# :startup, :quiet or :shutdown are valid events.
#
# Sidekiq.configure_server do |config|
# config.on(:shutdown) do
# puts "Goodbye cruel world!"
# end
# end
def on(event, &block)
raise ArgumentError, "Symbols only please: #{event}" unless event.is_a?(Symbol)
raise ArgumentError, "Invalid event name: #{event}" unless @options[:lifecycle_events].key?(event)
@options[:lifecycle_events][event] << block
end
# INTERNAL USE ONLY
def handle_exception(ex, ctx = {})
@options[:error_handlers].each do |handler|
handler.call(ex, ctx, self)
rescue => e
l = logger
l.error "!!! ERROR HANDLER THREW AN ERROR !!!"
l.error e
l.error e.backtrace.join("\n") unless e.backtrace.nil?
end
end
end
end

140
docs/capsule.md Normal file
View file

@ -0,0 +1,140 @@
# Sidekiq 7.0 Capsules
Sidekiq 7.0 contains the largest internal refactoring since Sidekiq 4.0.
This refactoring is designed to improve deployment flexibility and allow
new use cases.
# The Problem
Before 7.0, Sidekiq used a large number of global methods on the Sidekiq module to access things
like the Redis connection pool, the logger, and process configuration, e.g.
```ruby
Sidekiq.logger.info "Hello world"
Sidekiq.redis {|c| c.sadd("some_set", "new_member") }
Sidekiq.configure_server {|config| config... }
```
The issue is that this pattern implies a global, mutable singleton.
It does not work with Ractors. It does not allow multiple instances in one process.
It does not allow embedding within another Ruby process (e.g. puma).
Today the only supported Sidekiq deployment pattern is running `bundle exec sidekiq`.
# The Solution
Sidekiq 7.0 aims to refactor Sidekiq internals to allow more flexibility in how
Sidekiq can be used.
## Sidekiq::Config
Before, all Sidekiq configuration went through the Sidekiq module and was stored in the top-level hash at `Sidekiq.options`.
Now Sidekiq::CLI creates a `Sidekiq::Config` object which holds the global configuration at, shockingly, `Sidekiq.global_configuration`.
This instance is now passed into `Sidekiq.configure_{client,server} do |config|`
## Sidekiq::Capsule
`Sidekiq::Capsule` represents the set of resources necessary to process a set of queues.
By default, Sidekiq::CLI creates one `Sidekiq::Capsule` instance and mutates it according to the command line parameters and the data in `config/sidekiq.yml`.
You create additional Capsules within your initializer, like so:
```ruby
Sidekiq.configure_server do |config|
config.capsule("single-threaded") do |cap|
cap.concurrency = 1
cap.queues = %w[single]
end
end
```
Capsules can have their own customzied middleware chains but by default will inherit the global middleware configuration. Each Capsule will have its own Redis connection pool sized to the configured concurrency.
`Sidekiq::Launcher` is the top-level component which takes a `Sidekiq::Config` and launches the
tree of runtime components for each capsule. Once passed to Launcher, the global Config and each Capsule should be considered frozen and immutable.
Every internal component of Sidekiq takes a `Sidekiq::Capsule` instance and uses it. The Capsule
holds previously "global" state like the connection pool, error handlers, lifecycle callbacks, etc.
There is still one iron-clad rule: **a Sidekiq process only executes jobs from one Redis instance**; all Capsules within a process must use the same Redis instance.
If you want to process jobs from two separate Redis instances, you need to start two separate Sidekiq processes.
## Use Cases
With Capsules, you can programmatically tune how a Sidekiq process handles specific queues. One
Capsule can use 1 thread to process jobs within a `thread_unsafe` queue while another Capsule uses
10 threads to process `default` jobs.
```ruby
# within your initializer
Sidekiq.configure_server do |config|
config.capsule("unsafe") do |capsule|
capsule.queues = %w(thread_unsafe)
capsule.concurrency = 1
end
end
```
The contents of `config/sidekiq.yml` configure the default capsule.
## Redis Pools
Before 7.0, the Sidekiq process would create a redis pool sized to `concurrency + 3`.
Now Sidekiq will create multiple Redis pools: a global pool of **five** connections available to global components, a pool of **concurrency** for the job processors within each Capsule.
So for a Sidekiq process with a default Capsule and a single threaded Capsule, you should have three Redis pools of size 5, 10 and 1.
Remember that connection pools are lazy so it won't create all those connections unless they are actively needed.
All Sidekiq components and add-ons should avoid using `Sidekiq.redis` or `Sidekiq.logger`.
Instead use the implicit `redis` or `logger` methods available on `Sidekiq::Component`, `Sidekiq::Capsule` or `Sidekiq::{Client,Server}Middleware`.
## Sidekiq::Component
`Sidekiq::Component` is a module which provides helpful methods based on a `config` reader:
```ruby
module Sidekiq::Component
def config
@config
end
def redis(&block)
config.redis(&block)
end
def logger
config.logger
end
def handle_exception(ex, ctx)
# avoids calling `Sidekiq.error_handlers...`
config.handle_exception(ex, ctx)
end
end
class Sidekiq::Processor
include Sidekiq::Component
def initialize(capsule)
@config = capsule
end
def ...
# old
Sidekiq.redis {|c| ... }
Sidekiq.logger.info "Hello world!"
# new
redis {|c| ... }
logger.info "Hello world!"
rescue => ex
handle_exception(ex, ...)
end
end
```
Sidekiq::Capsule overrides Sidekiq::Config in order to provide Capsule-local resources so
you'll see places within Sidekiq where Capsule acts like a Config.
With this pattern, we greatly reduce the use of global APIs throughout Sidekiq internals.
Where beforefore we'd call `Sidekiq.xyz`, we instead provide similar functionality like
`config.xyz`.

View file

@ -1,86 +0,0 @@
# Sidekiq 7.0 Components
Sidekiq 7.0 contains the largest internal refactoring since Sidekiq 4.0.
This refactoring is designed to improve deployment flexibility and allow
new use cases.
# The Problem
Sidekiq today uses a large number of class-level methods to access things
like the Redis connection pool, the logger, and process configuration, e.g.
```ruby
Sidekiq.logger.info "Hello world"
Sidekiq.redis {|c| c.sadd("some_set", "new_member") }
Sidekiq.configure_server {|config| config... }
```
The problem is that this pattern implies a global, mutable singleton.
It does not work with Ractors. It does not allow multiple instances in one process.
It does not allow embedding within another Ruby process (e.g. puma).
Today the only supported Sidekiq deployment pattern is running `bundle exec sidekiq`.
# The Solution
Sidekiq 7.0 aims to refactor Sidekiq internals to allow more flexibility in how
Sidekiq can be used.
## Sidekiq::Config
`Sidekiq::Config` represents the configuration for an instance of Sidekiq. Sidekiq::CLI
creates a `Sidekiq::Config` instance and mutates it according to the command line parameters
and the data in `config/sidekiq.yml`.
`Sidekiq::Launcher` is the top-level component which takes a `Sidekiq::Config` and creates the
tree of runtime components. Once passed to Launcher, the Config is frozen and immutable.
Every internal component of Sidekiq takes a `Sidekiq::Config` instance and uses it. The Config
holds previously "global" state like the connection pool, error handlers, lifecycle callbacks, etc.
## Sidekiq::Component
`Sidekiq::Component` is a module which provides helpful methods based on a `config` reader:
```ruby
module Sidekiq::Component
def config
@config
end
def redis(&block)
config.redis(&block)
end
def logger
config.logger
end
def handle_exception(ex, ctx)
# avoids calling `Sidekiq.error_handlers...`
config.handle_exception(ex, ctx)
end
end
class Sidekiq::Processor
include Sidekiq::Component
def initialize(config)
@config = config
end
def ...
# old
Sidekiq.redis {|c| ... }
Sidekiq.logger.info "Hello world!"
# new
redis {|c| ... }
logger.info "Hello world!"
rescue => ex
handle_exception(ex, ...)
end
end
```
With this pattern, we greatly reduce the use of global APIs throughout Sidekiq internals.
Where beforefore we'd call `Sidekiq.xyz`, we instead provide similar functionality like
`config.xyz`.

View file

@ -3,6 +3,7 @@
require "sidekiq/version"
fail "Sidekiq #{Sidekiq::VERSION} does not support Ruby versions below 2.7.0." if RUBY_PLATFORM != "java" && Gem::Version.new(RUBY_VERSION) < Gem::Version.new("2.7.0")
require "sidekiq/config"
require "sidekiq/logger"
require "sidekiq/client"
require "sidekiq/transaction_aware_client"
@ -16,234 +17,14 @@ module Sidekiq
NAME = "Sidekiq"
LICENSE = "See LICENSE and the LGPL-3.0 for licensing details."
DEFAULTS = {
queues: [],
labels: [],
concurrency: 10,
require: ".",
strict: true,
environment: nil,
timeout: 25,
poll_interval_average: nil,
average_scheduled_poll_interval: 5,
on_complex_arguments: :raise,
error_handlers: [],
death_handlers: [],
lifecycle_events: {
startup: [],
quiet: [],
shutdown: [],
# triggers when we fire the first heartbeat on startup OR repairing a network partition
heartbeat: [],
# triggers on EVERY heartbeat call, every 10 seconds
beat: []
},
dead_max_jobs: 10_000,
dead_timeout_in_seconds: 180 * 24 * 60 * 60, # 6 months
reloader: proc { |&block| block.call }
}
FAKE_INFO = {
"redis_version" => "9.9.9",
"uptime_in_days" => "9999",
"connected_clients" => "9999",
"used_memory_human" => "9P",
"used_memory_peak_human" => "9P"
}
def self.°°
puts "Take a deep breath and count to ten..."
end
# config.concurrency = 5
def self.concurrency=(val)
self[:concurrency] = Integer(val)
end
# config.queues = %w( high default low ) # strict
# config.queues = %w( high,3 default,2 low,1 ) # weighted
# config.queues = %w( feature1,1 feature2,1 feature3,1 ) # random
#
# With weighted priority, queue will be checked first (weight / total) of the time.
# high will be checked first (3/6) or 50% of the time.
# I'd recommend setting weights between 1-10. Weights in the hundreds or thousands
# are ridiculous and unnecessarily expensive. You can get random queue ordering
# by explicitly setting all weights to 1.
def self.queues=(val)
self[:queues] = Array(val).each_with_object([]) do |qstr, memo|
name, weight = qstr.split(",")
self[:strict] = false if weight.to_i > 0
[weight.to_i, 1].max.times do
memo << name
end
end
end
### Private APIs
def self.default_error_handler(ex, ctx)
logger.warn(dump_json(ctx)) unless ctx.empty?
logger.warn("#{ex.class.name}: #{ex.message}")
logger.warn(ex.backtrace.join("\n")) unless ex.backtrace.nil?
end
# DEFAULT_ERROR_HANDLER is a constant that allows the default error handler to
# be referenced. It must be defined here, after the default_error_handler
# method is defined.
DEFAULT_ERROR_HANDLER = method(:default_error_handler)
@config = DEFAULTS.dup
def self.options
logger.warn "`config.options[:key] = value` is deprecated, use `config[:key] = value`: #{caller(1..2)}"
@config
end
def self.options=(opts)
logger.warn "config.options = hash` is deprecated, use `config.merge!(hash)`: #{caller(1..2)}"
@config = opts
end
def self.[](key)
@config[key]
end
def self.[]=(key, val)
@config[key] = val
end
def self.merge!(hash)
@config.merge!(hash)
end
def self.fetch(...)
@config.fetch(...)
end
def self.handle_exception(ex, ctx = {})
self[:error_handlers].each do |handler|
handler.call(ex, ctx)
rescue => ex
logger.error "!!! ERROR HANDLER THREW AN ERROR !!!"
logger.error ex
logger.error ex.backtrace.join("\n") unless ex.backtrace.nil?
end
end
###
##
# Configuration for Sidekiq server, use like:
#
# Sidekiq.configure_server do |config|
# config.server_middleware do |chain|
# chain.add MyServerHook
# end
# end
def self.configure_server
yield self if server?
end
##
# Configuration for Sidekiq client, use like:
#
# Sidekiq.configure_client do |config|
# config.redis = { size: 1, url: 'redis://myhost:8877/0' }
# end
def self.configure_client
yield self unless server?
end
def self.server?
defined?(Sidekiq::CLI)
end
def self.redis
raise ArgumentError, "requires a block" unless block_given?
redis_pool.with do |conn|
retryable = true
begin
yield conn
rescue RedisConnection.adapter::BaseError => ex
# 2550 Failover can cause the server to become a replica, need
# to disconnect and reopen the socket to get back to the primary.
# 4495 Use the same logic if we have a "Not enough replicas" error from the primary
# 4985 Use the same logic when a blocking command is force-unblocked
# The same retry logic is also used in client.rb
if retryable && ex.message =~ /READONLY|NOREPLICAS|UNBLOCKED/
conn.close
retryable = false
retry
end
raise
end
end
end
def self.redis_info
redis do |conn|
conn.info
rescue RedisConnection.adapter::CommandError => ex
# 2850 return fake version when INFO command has (probably) been renamed
raise unless /unknown command/.match?(ex.message)
FAKE_INFO
end
end
def self.redis_pool
@redis ||= RedisConnection.create
end
def self.redis=(hash)
@redis = if hash.is_a?(ConnectionPool)
hash
else
RedisConnection.create(hash)
end
end
def self.client_middleware
@client_chain ||= Middleware::Chain.new(self)
yield @client_chain if block_given?
@client_chain
end
def self.server_middleware
@server_chain ||= default_server_middleware
yield @server_chain if block_given?
@server_chain
end
def self.default_server_middleware
Middleware::Chain.new(self)
end
def self.default_worker_options=(hash) # deprecated
@default_job_options = default_job_options.merge(hash.transform_keys(&:to_s))
end
def self.default_job_options=(hash)
@default_job_options = default_job_options.merge(hash.transform_keys(&:to_s))
end
def self.default_worker_options # deprecated
@default_job_options ||= {"retry" => true, "queue" => "default"}
end
def self.default_job_options
@default_job_options ||= {"retry" => true, "queue" => "default"}
end
##
# Death handlers are called when all retries for a job have been exhausted and
# the job dies. It's the notification to your application
# that this job will not succeed without manual intervention.
#
# Sidekiq.configure_server do |config|
# config.death_handlers << ->(job, ex) do
# end
# end
def self.death_handlers
self[:death_handlers]
end
def self.load_json(string)
JSON.parse(string)
end
@ -252,34 +33,6 @@ module Sidekiq
JSON.generate(object)
end
def self.log_formatter
@log_formatter ||= if ENV["DYNO"]
Sidekiq::Logger::Formatters::WithoutTimestamp.new
else
Sidekiq::Logger::Formatters::Pretty.new
end
end
def self.log_formatter=(log_formatter)
@log_formatter = log_formatter
logger.formatter = log_formatter
end
def self.logger
@logger ||= Sidekiq::Logger.new($stdout, level: :info)
end
def self.logger=(logger)
if logger.nil?
self.logger.level = Logger::FATAL
return self.logger
end
logger.extend(Sidekiq::LoggingUtils)
@logger = logger
end
def self.pro?
defined?(Sidekiq::Pro)
end
@ -288,42 +41,36 @@ module Sidekiq
defined?(Sidekiq::Enterprise)
end
# How frequently Redis should be checked by a random Sidekiq process for
# scheduled and retriable jobs. Each individual process will take turns by
# waiting some multiple of this value.
#
# See sidekiq/scheduled.rb for an in-depth explanation of this value
def self.average_scheduled_poll_interval=(interval)
self[:average_scheduled_poll_interval] = interval
def self.redis_pool
(Thread.current[:sidekiq_capsule] || default_configuration).redis_pool
end
# Register a proc to handle any error which occurs within the Sidekiq process.
#
# Sidekiq.configure_server do |config|
# config.error_handlers << proc {|ex,ctx_hash| MyErrorService.notify(ex, ctx_hash) }
# end
#
# The default error handler logs errors to Sidekiq.logger.
def self.error_handlers
self[:error_handlers]
end
# Register a block to run at a point in the Sidekiq lifecycle.
# :startup, :quiet or :shutdown are valid events.
#
# Sidekiq.configure_server do |config|
# config.on(:shutdown) do
# puts "Goodbye cruel world!"
# end
# end
def self.on(event, &block)
raise ArgumentError, "Symbols only please: #{event}" unless event.is_a?(Symbol)
raise ArgumentError, "Invalid event name: #{event}" unless self[:lifecycle_events].key?(event)
self[:lifecycle_events][event] << block
def self.redis(&block)
(Thread.current[:sidekiq_capsule] || default_configuration).redis(&block)
end
def self.strict_args!(mode = :raise)
self[:on_complex_arguments] = mode
Sidekiq::Config::DEFAULTS[:on_complex_arguments] = mode
end
def self.default_job_options=(hash)
@default_job_options = default_job_options.merge(hash.transform_keys(&:to_s))
end
def self.default_job_options
@default_job_options ||= {"retry" => true, "queue" => "default"}
end
def self.default_configuration
@config ||= Sidekiq::Config.new
end
def self.configure_server
yield default_configuration if server?
end
def self.configure_client
yield default_configuration unless server?
end
# We are shutting down Sidekiq but what about threads that

View file

@ -57,7 +57,18 @@ module Sidekiq
end
def queues
Sidekiq::Stats::Queues.new.lengths
Sidekiq.redis do |conn|
queues = conn.sscan_each("queues").to_a
lengths = conn.pipelined { |pipeline|
queues.each do |queue|
pipeline.llen("queue:#{queue}")
end
}
array_of_arrays = queues.zip(lengths).sort_by { |_, size| -size }
array_of_arrays.to_h
end
end
# O(1) redis calls
@ -155,25 +166,8 @@ module Sidekiq
@stats[s] || raise(ArgumentError, "Unknown stat #{s}")
end
class Queues
def lengths
Sidekiq.redis do |conn|
queues = conn.sscan_each("queues").to_a
lengths = conn.pipelined { |pipeline|
queues.each do |queue|
pipeline.llen("queue:#{queue}")
end
}
array_of_arrays = queues.zip(lengths).sort_by { |_, size| -size }
array_of_arrays.to_h
end
end
end
class History
def initialize(days_previous, start_date = nil)
def initialize(days_previous, start_date = nil, pool: nil)
# we only store five years of data in Redis
raise ArgumentError if days_previous < 1 || days_previous > (5 * 365)
@days_previous = days_previous
@ -198,15 +192,10 @@ module Sidekiq
keys = dates.map { |datestr| "stat:#{stat}:#{datestr}" }
begin
Sidekiq.redis do |conn|
conn.mget(keys).each_with_index do |value, idx|
stat_hash[dates[idx]] = value ? value.to_i : 0
end
Sidekiq.redis do |conn|
conn.mget(keys).each_with_index do |value, idx|
stat_hash[dates[idx]] = value ? value.to_i : 0
end
rescue RedisConnection.adapter::CommandError
# mget will trigger a CROSSSLOT error when run against a Cluster
# TODO Someone want to add Cluster support?
end
stat_hash
@ -585,16 +574,20 @@ module Sidekiq
# @!attribute [r] Name
attr_reader :name
# Redis location
attr_accessor :pool
# :nodoc:
# @api private
def initialize(name)
@pool = Sidekiq.default_configuration.redis_pool
@name = name
@_size = size
end
# real-time size of the set, will change
def size
Sidekiq.redis { |c| c.zcard(name) }
@pool.with { |c| c.zcard(name) }
end
# Scan through each element of the sorted set, yielding each to the supplied block.
@ -607,7 +600,7 @@ module Sidekiq
return to_enum(:scan, match, count) unless block_given?
match = "*#{match}*" unless match.include?("*")
Sidekiq.redis do |conn|
@pool.with do |conn|
conn.zscan_each(name, match: match, count: count) do |entry, score|
yield SortedEntry.new(self, score, entry)
end
@ -616,7 +609,7 @@ module Sidekiq
# @return [Boolean] always true
def clear
Sidekiq.redis do |conn|
@pool.with do |conn|
conn.unlink(name)
end
true
@ -638,7 +631,7 @@ module Sidekiq
# @param timestamp [Time] the score for the job
# @param job [Hash] the job data
def schedule(timestamp, job)
Sidekiq.redis do |conn|
@pool.with do |conn|
conn.zadd(name, timestamp.to_f.to_s, Sidekiq.dump_json(job))
end
end
@ -652,7 +645,7 @@ module Sidekiq
loop do
range_start = page * page_size + offset_size
range_end = range_start + page_size - 1
elements = Sidekiq.redis { |conn|
elements = @pool.with { |conn|
conn.zrange name, range_start, range_end, withscores: true
}
break if elements.empty?
@ -679,7 +672,7 @@ module Sidekiq
[score, score]
end
elements = Sidekiq.redis { |conn|
elements = @pool.with { |conn|
conn.zrangebyscore(name, begin_score, end_score, withscores: true)
}
@ -697,7 +690,7 @@ module Sidekiq
# @param jid [String] the job identifier
# @return [SortedEntry] the record or nil
def find_job(jid)
Sidekiq.redis do |conn|
@pool.with do |conn|
conn.zscan_each(name, match: "*#{jid}*", count: 100) do |entry, score|
job = JSON.parse(entry)
matched = job["jid"] == jid
@ -710,7 +703,7 @@ module Sidekiq
# :nodoc:
# @api private
def delete_by_value(name, value)
Sidekiq.redis do |conn|
@pool.with do |conn|
ret = conn.zrem(name, value)
@_size -= 1 if ret
ret
@ -720,7 +713,7 @@ module Sidekiq
# :nodoc:
# @api private
def delete_by_jid(score, jid)
Sidekiq.redis do |conn|
@pool.with do |conn|
elements = conn.zrangebyscore(name, score, score)
elements.each do |element|
if element.index(jid)
@ -790,11 +783,11 @@ module Sidekiq
# @param message [String] the job data as JSON
def kill(message, opts = {})
now = Time.now.to_f
Sidekiq.redis do |conn|
@pool.with do |conn|
conn.multi do |transaction|
transaction.zadd(name, now.to_s, message)
transaction.zremrangebyscore(name, "-inf", now - self.class.timeout)
transaction.zremrangebyrank(name, 0, - self.class.max_jobs)
transaction.zremrangebyscore(name, "-inf", now - Sidekiq::Config::DEFAULTS[:dead_timeout_in_seconds])
transaction.zremrangebyrank(name, 0, - Sidekiq::Config::DEFAULTS[:dead_max_jobs])
end
end
@ -802,7 +795,7 @@ module Sidekiq
job = Sidekiq.load_json(message)
r = RuntimeError.new("Job killed by API")
r.set_backtrace(caller)
Sidekiq.death_handlers.each do |handle|
Sidekiq.default_configuration.death_handlers.each do |handle|
handle.call(job, r)
end
end
@ -813,18 +806,6 @@ module Sidekiq
def retry_all
each(&:retry) while size > 0
end
# The maximum size of the Dead set. Older entries will be trimmed
# to stay within this limit. Default value is 10,000.
def self.max_jobs
Sidekiq[:dead_max_jobs]
end
# The time limit for entries within the Dead set. Older entries will be thrown away.
# Default value is six months.
def self.timeout
Sidekiq[:dead_timeout_in_seconds]
end
end
##

105
lib/sidekiq/capsule.rb Normal file
View file

@ -0,0 +1,105 @@
require "sidekiq/component"
require "sidekiq/fetch"
module Sidekiq
# A Sidekiq::Capsule is the set of resources necessary to
# process one or more queues with a given concurrency.
# One "default" Capsule is started but the user may declare additional
# Capsules in the initializer.
#
# To process a "single" queue with one thread so jobs are processed
# serially, you can do this:
#
# Sidekiq.configure_server do |config|
# config.capsule("single-threaded") do |cap|
# cap.concurrency = 1
# cap.queues = %w(single)
# end
# end
class Capsule
include Sidekiq::Component
attr_reader :name
attr_reader :queues
attr_reader :strict
attr_accessor :concurrency
attr_accessor :fetch_class
def initialize(name, config)
@name = name
@config = config
@queues = ["default"]
@concurrency = 10
@strict = true
@fetch_class = Sidekiq::BasicFetch
end
def fetcher
@fetcher ||= fetch_class.new(self)
end
def stop
fetcher&.bulk_requeue([], nil)
end
def queues=(val)
@strict = true
@queues = Array(val).each_with_object([]) do |qstr, memo|
arr = qstr
arr = qstr.split(",") if qstr.is_a?(String)
name, weight = arr
@strict = false if weight.to_i > 0
[weight.to_i, 1].max.times do
memo << name
end
end
end
# Allow the middleware to be different per-capsule.
# Avoid if possible and add middleware globally so all
# capsules share the same chains. Easier to debug that way.
def client_middleware
@client_chain ||= config.client_middleware.dup
yield @client_chain if block_given?
@client_chain
end
def server_middleware
@server_chain ||= config.server_middleware.dup
yield @server_chain if block_given?
@server_chain
end
def redis_pool
# connection pool is lazy, it will not create connections unless you actually need them
# so don't be skimpy!
@redis ||= config.new_redis_pool(@concurrency)
end
def redis
raise ArgumentError, "requires a block" unless block_given?
redis_pool.with do |conn|
retryable = true
begin
yield conn
rescue RedisClientAdapter::BaseError => ex
# 2550 Failover can cause the server to become a replica, need
# to disconnect and reopen the socket to get back to the primary.
# 4495 Use the same logic if we have a "Not enough replicas" error from the primary
# 4985 Use the same logic when a blocking command is force-unblocked
# The same retry logic is also used in client.rb
if retryable && ex.message =~ /READONLY|NOREPLICAS|UNBLOCKED/
conn.close
retryable = false
retry
end
raise
end
end
end
def logger
config.logger
end
end
end

View file

@ -10,6 +10,7 @@ require "fileutils"
require "sidekiq"
require "sidekiq/component"
require "sidekiq/capsule"
require "sidekiq/launcher"
module Sidekiq # :nodoc:
@ -22,13 +23,14 @@ module Sidekiq # :nodoc:
attr_accessor :config
def parse(args = ARGV.dup)
@config = Sidekiq
@config[:error_handlers].clear
@config[:error_handlers] << @config.method(:default_error_handler)
@config ||= Sidekiq::Config.new
setup_options(args)
initialize_logger
validate!
# if you are changing this in user or app code, you have a bug.
Sidekiq.instance_variable_set(:@config, @config)
end
def jruby?
@ -41,7 +43,7 @@ module Sidekiq # :nodoc:
def run(boot_app: true)
boot_application if boot_app
if environment == "development" && $stdout.tty? && @config.log_formatter.is_a?(Sidekiq::Logger::Formatters::Pretty)
if environment == "development" && $stdout.tty? && @config.logger.formatter.is_a?(Sidekiq::Logger::Formatters::Pretty)
print_banner
end
logger.info "Booted Rails #{::Rails.version} application in #{environment} environment" if rails_app?
@ -90,9 +92,9 @@ module Sidekiq # :nodoc:
# Since the user can pass us a connection pool explicitly in the initializer, we
# need to verify the size is large enough or else Sidekiq's performance is dramatically slowed.
cursize = @config.redis_pool.size
needed = @config[:concurrency] + 2
raise "Sidekiq's pool of #{cursize} Redis connections is too small, please increase the size to at least #{needed}" if cursize < needed
@config.capsules.each do |cap|
raise ArgumentError, "Pool size too small" if cap.redis_pool.size < cap.concurrency
end
# cache process identity
@config[:identity] = identity
@ -260,6 +262,11 @@ module Sidekiq # :nodoc:
# merge with defaults
@config.merge!(opts)
cap = Sidekiq::Capsule.new("default", @config)
cap.queues = opts[:queues]
cap.concurrency = opts[:concurrency] || 10
@config.capsules << cap
end
def boot_application
@ -332,8 +339,8 @@ module Sidekiq # :nodoc:
end
o.on "-q", "--queue QUEUE[,WEIGHT]", "Queues to process with optional weights" do |arg|
queue, weight = arg.split(",")
parse_queue opts, queue, weight
opts[:queues] ||= []
opts[:queues] << arg
end
o.on "-r", "--require [PATH|DIR]", "Location of Rails application with jobs or file to require" do |arg|
@ -382,7 +389,7 @@ module Sidekiq # :nodoc:
def parse_config(path)
erb = ERB.new(File.read(path))
erb.filename = File.expand_path(path)
opts = load_yaml(erb.result) || {}
opts = YAML.safe_load(erb.result, permitted_classes: [Symbol], aliases: true) || {}
if opts.respond_to? :deep_symbolize_keys!
opts.deep_symbolize_keys!
@ -393,31 +400,9 @@ module Sidekiq # :nodoc:
opts = opts.merge(opts.delete(environment.to_sym) || {})
opts.delete(:strict)
parse_queues(opts, opts.delete(:queues) || [])
opts
end
def load_yaml(src)
if Psych::VERSION > "4.0"
YAML.safe_load(src, permitted_classes: [Symbol], aliases: true)
else
YAML.load(src)
end
end
def parse_queues(opts, queues_and_weights)
queues_and_weights.each { |queue_and_weight| parse_queue(opts, *queue_and_weight) }
end
def parse_queue(opts, queue, weight = nil)
opts[:queues] ||= []
opts[:strict] = true if opts[:strict].nil?
raise ArgumentError, "queues: #{queue} cannot be defined twice" if opts[:queues].include?(queue)
[weight.to_i, 1].max.times { opts[:queues] << queue.to_s }
opts[:strict] = false if weight.to_i > 0
end
def rails_app?
defined?(::Rails) && ::Rails.respond_to?(:application)
end

View file

@ -21,7 +21,6 @@ module Sidekiq
# Sidekiq.client_middleware but you can change as necessary.
#
def middleware(&block)
@chain ||= Sidekiq.client_middleware
if block
@chain = @chain.dup
yield @chain
@ -31,18 +30,31 @@ module Sidekiq
attr_accessor :redis_pool
# Sidekiq::Client normally uses the default Redis pool but you may
# pass a custom ConnectionPool if you want to shard your
# Sidekiq jobs across several Redis instances (for scalability
# reasons, e.g.)
# Sidekiq::Client is responsible for pushing job payloads to Redis.
# Requires the :pool or :config keyword argument.
#
# Sidekiq::Client.new(ConnectionPool.new { Redis.new })
# Sidekiq::Client.new(pool: Sidekiq::RedisConnection.create)
#
# Generally this is only needed for very large Sidekiq installs processing
# thousands of jobs per second. I don't recommend sharding unless you
# cannot scale any other way (e.g. splitting your app into smaller apps).
def initialize(redis_pool = nil)
@redis_pool = redis_pool || Thread.current[:sidekiq_via_pool] || Sidekiq.redis_pool
# Inside the Sidekiq process, you can reuse the configured resources:
#
# Sidekiq::Client.new(config: config)
#
# @param pool [ConnectionPool] explicit Redis pool to use
# @param config [Sidekiq::Config] use the pool and middleware from the given Sidekiq container
# @param chain [Sidekiq::Middleware::Chain] use the given middleware chain
def initialize(*args, **kwargs)
if args.size == 1 && kwargs.size == 0
warn "Sidekiq::Client.new(pool) is deprecated, please use Sidekiq::Client.new(pool: pool), #{caller(0..3)}"
# old calling method, accept 1 pool argument
@redis_pool = args[0]
@chain = Sidekiq.default_configuration.client_middleware
else
# new calling method: keyword arguments
config = kwargs[:config] || Sidekiq.default_configuration
@redis_pool = kwargs[:pool] || Thread.current[:sidekiq_via_pool] || config&.redis_pool
@chain = kwargs[:chain] || config&.client_middleware
raise ArgumentError, "No Redis pool available for Sidekiq::Client" unless @redis_pool
end
end
##
@ -201,7 +213,7 @@ module Sidekiq
conn.pipelined do |pipeline|
atomic_push(pipeline, payloads)
end
rescue RedisConnection.adapter::BaseError => ex
rescue RedisClient::Error => ex
# 2550 Failover can cause the server to become a replica, need
# to disconnect and reopen the socket to get back to the primary.
# 4495 Use the same logic if we have a "Not enough replicas" error from the primary

250
lib/sidekiq/config.rb Normal file
View file

@ -0,0 +1,250 @@
require "forwardable"
require "sidekiq/redis_connection"
module Sidekiq
# Sidekiq::Config represents the global configuration for an instance of Sidekiq.
class Config
extend Forwardable
DEFAULTS = {
labels: [],
require: ".",
environment: nil,
concurrency: 10,
timeout: 25,
poll_interval_average: nil,
average_scheduled_poll_interval: 5,
on_complex_arguments: :raise,
error_handlers: [],
death_handlers: [],
lifecycle_events: {
startup: [],
quiet: [],
shutdown: [],
# triggers when we fire the first heartbeat on startup OR repairing a network partition
heartbeat: [],
# triggers on EVERY heartbeat call, every 10 seconds
beat: []
},
dead_max_jobs: 10_000,
dead_timeout_in_seconds: 180 * 24 * 60 * 60, # 6 months
reloader: proc { |&block| block.call }
}
ERROR_HANDLER = ->(ex, ctx, cfg = Sidekiq.default_configuration) {
l = cfg.logger
l.warn(Sidekiq.dump_json(ctx)) unless ctx.empty?
l.warn("#{ex.class.name}: #{ex.message}")
l.warn(ex.backtrace.join("\n")) unless ex.backtrace.nil?
}
def initialize(options = {})
@options = DEFAULTS.merge(options)
@options[:error_handlers] << ERROR_HANDLER if @options[:error_handlers].empty?
@directory = {}
@redis_config = {}
@capsules = []
end
def_delegators :@options, :[], :[]=, :fetch, :key?, :has_key?, :merge!
attr_reader :options
attr_reader :capsules
# LEGACY: edits the default capsule
# config.concurrency = 5
def concurrency=(val)
default_capsule.concurrency = Integer(val)
end
# Edit the default capsule.
# config.queues = %w( high default low ) # strict
# config.queues = %w( high,3 default,2 low,1 ) # weighted
# config.queues = %w( feature1,1 feature2,1 feature3,1 ) # random
#
# With weighted priority, queue will be checked first (weight / total) of the time.
# high will be checked first (3/6) or 50% of the time.
# I'd recommend setting weights between 1-10. Weights in the hundreds or thousands
# are ridiculous and unnecessarily expensive. You can get random queue ordering
# by explicitly setting all weights to 1.
def queues=(val)
default_capsule.queues = val
end
def queues
default_capsule.queues
end
def client_middleware
@client_chain ||= Sidekiq::Middleware::Chain.new
yield @client_chain if block_given?
@client_chain
end
def server_middleware
@server_chain ||= Sidekiq::Middleware::Chain.new
yield @server_chain if block_given?
@server_chain
end
def default_capsule
@capsules.first || Sidekiq::Capsule.new("default", self).tap do |cap|
@capsules << cap
end
end
# register a new queue processing subsystem
def capsule(name)
cap = Sidekiq::Capsule.new(name, self)
yield cap
@capsules << cap
end
# All capsules must use the same Redis configuration
def redis=(hash)
@redis_config = @redis_config.merge(hash)
end
def redis_pool
# this is our global client/housekeeping pool. each capsule has its
# own pool for executing threads.
size = Integer(ENV["RAILS_MAX_THREADS"] || 5)
@redis ||= new_redis_pool(size)
end
def new_redis_pool(size)
# connection pool is lazy, it will not create connections unless you actually need them
# so don't be skimpy!
RedisConnection.create(@redis_config.merge(size: size, logger: logger))
end
def redis_info
redis do |conn|
conn.info
rescue RedisClientAdapter::CommandError => ex
# 2850 return fake version when INFO command has (probably) been renamed
raise unless /unknown command/.match?(ex.message)
{
"redis_version" => "9.9.9",
"uptime_in_days" => "9999",
"connected_clients" => "9999",
"used_memory_human" => "9P",
"used_memory_peak_human" => "9P"
}.freeze
end
end
def redis
raise ArgumentError, "requires a block" unless block_given?
redis_pool.with do |conn|
retryable = true
begin
yield conn
rescue RedisClientAdapter::BaseError => ex
# 2550 Failover can cause the server to become a replica, need
# to disconnect and reopen the socket to get back to the primary.
# 4495 Use the same logic if we have a "Not enough replicas" error from the primary
# 4985 Use the same logic when a blocking command is force-unblocked
# The same retry logic is also used in client.rb
if retryable && ex.message =~ /READONLY|NOREPLICAS|UNBLOCKED/
conn.close
retryable = false
retry
end
raise
end
end
end
# register global singletons which can be accessed elsewhere
def register(name, instance)
@directory[name] = instance
end
# find a singleton
def lookup(name)
# JNDI is just a fancy name for a hash lookup
@directory[name]
end
##
# Death handlers are called when all retries for a job have been exhausted and
# the job dies. It's the notification to your application
# that this job will not succeed without manual intervention.
#
# Sidekiq.configure_server do |config|
# config.death_handlers << ->(job, ex) do
# end
# end
def death_handlers
@options[:death_handlers]
end
# How frequently Redis should be checked by a random Sidekiq process for
# scheduled and retriable jobs. Each individual process will take turns by
# waiting some multiple of this value.
#
# See sidekiq/scheduled.rb for an in-depth explanation of this value
def average_scheduled_poll_interval=(interval)
@options[:average_scheduled_poll_interval] = interval
end
# Register a proc to handle any error which occurs within the Sidekiq process.
#
# Sidekiq.configure_server do |config|
# config.error_handlers << proc {|ex,ctx_hash| MyErrorService.notify(ex, ctx_hash) }
# end
#
# The default error handler logs errors to @logger.
def error_handlers
@options[:error_handlers]
end
# Register a block to run at a point in the Sidekiq lifecycle.
# :startup, :quiet or :shutdown are valid events.
#
# Sidekiq.configure_server do |config|
# config.on(:shutdown) do
# puts "Goodbye cruel world!"
# end
# end
def on(event, &block)
raise ArgumentError, "Symbols only please: #{event}" unless event.is_a?(Symbol)
raise ArgumentError, "Invalid event name: #{event}" unless @options[:lifecycle_events].key?(event)
@options[:lifecycle_events][event] << block
end
def logger
@logger ||= Sidekiq::Logger.new($stdout, level: :info).tap do |log|
log.level = Logger::INFO
log.formatter = if ENV["DYNO"]
Sidekiq::Logger::Formatters::WithoutTimestamp.new
else
Sidekiq::Logger::Formatters::Pretty.new
end
end
end
def logger=(logger)
if logger.nil?
self.logger.level = Logger::FATAL
return
end
logger.extend(Sidekiq::LoggingUtils)
@logger = logger
end
# INTERNAL USE ONLY
def handle_exception(ex, ctx = {})
@options[:error_handlers].each do |handler|
handler.call(ex, ctx, self)
rescue => e
l = logger
l.error "!!! ERROR HANDLER THREW AN ERROR !!!"
l.error e
l.error e.backtrace.join("\n") unless e.backtrace.nil?
end
end
end
end

View file

@ -26,11 +26,11 @@ module Sidekiq # :nodoc:
end
}
def initialize(config)
raise ArgumentError, "missing queue list" unless config[:queues]
@config = config
@strictly_ordered_queues = !!@config[:strict]
@queues = @config[:queues].map { |q| "queue:#{q}" }
def initialize(cap)
raise ArgumentError, "missing queue list" unless cap.queues
@config = cap
@strictly_ordered_queues = !!@config.strict
@queues = config.queues.map { |q| "queue:#{q}" }
if @strictly_ordered_queues
@queues.uniq!
@queues << TIMEOUT
@ -50,7 +50,7 @@ module Sidekiq # :nodoc:
UnitOfWork.new(queue, job, config) if queue
end
def bulk_requeue(inprogress, options)
def bulk_requeue(inprogress, _)
return if inprogress.empty?
logger.debug { "Re-queueing terminated jobs" }

View file

@ -209,7 +209,8 @@ module Sidekiq
queue = item["queue"]
# run client-side middleware
result = Sidekiq.client_middleware.invoke(item["class"], item, queue, Sidekiq.redis_pool) do
cfg = Sidekiq.default_configuration
result = cfg.client_middleware.invoke(item["class"], item, queue, cfg.redis_pool) do
item
end
return nil unless result
@ -224,7 +225,7 @@ module Sidekiq
job.bid = msg["bid"] if job.respond_to?(:bid)
# run the job through server-side middleware
result = Sidekiq.server_middleware.invoke(job, msg, msg["queue"]) do
result = cfg.server_middleware.invoke(job, msg, msg["queue"]) do
# perform it
job.perform(*msg["args"])
true
@ -358,9 +359,9 @@ module Sidekiq
end
def build_client # :nodoc:
pool = Thread.current[:sidekiq_via_pool] || get_sidekiq_options["pool"] || Sidekiq.redis_pool
pool = Thread.current[:sidekiq_via_pool] || get_sidekiq_options["pool"] || Sidekiq.default_configuration.redis_pool
client_class = get_sidekiq_options["client_class"] || Sidekiq::Client
client_class.new(pool)
client_class.new(pool: pool)
end
end
end

View file

@ -2,7 +2,7 @@
module Sidekiq
class JobLogger
def initialize(logger = Sidekiq.logger)
def initialize(logger)
@logger = logger
end

View file

@ -68,9 +68,9 @@ module Sidekiq
DEFAULT_MAX_RETRY_ATTEMPTS = 25
def initialize(options)
@config = options
@max_retries = @config[:max_retries] || DEFAULT_MAX_RETRY_ATTEMPTS
def initialize(capsule)
@config = @capsule = capsule
@max_retries = Sidekiq.default_configuration[:max_retries] || DEFAULT_MAX_RETRY_ATTEMPTS
end
# The global retry handler requires only the barest of data.
@ -91,7 +91,7 @@ module Sidekiq
if msg["retry"]
process_retry(nil, msg, queue, e)
else
Sidekiq.death_handlers.each do |handler|
@capsule.config.death_handlers.each do |handler|
handler.call(msg, e)
rescue => handler_ex
handle_exception(handler_ex, {context: "Error calling death handler", job: msg})
@ -223,7 +223,7 @@ module Sidekiq
send_to_morgue(msg) unless msg["dead"] == false
config.death_handlers.each do |handler|
@capsule.config.death_handlers.each do |handler|
handler.call(msg, exception)
rescue => e
handle_exception(e, {context: "Error calling death handler", job: msg})
@ -235,11 +235,11 @@ module Sidekiq
payload = Sidekiq.dump_json(msg)
now = Time.now.to_f
config.redis do |conn|
redis do |conn|
conn.multi do |xa|
xa.zadd("dead", now.to_s, payload)
xa.zremrangebyscore("dead", "-inf", now - config[:dead_timeout_in_seconds])
xa.zremrangebyrank("dead", 0, - config[:dead_max_jobs])
xa.zremrangebyscore("dead", "-inf", now - @capsule.config[:dead_timeout_in_seconds])
xa.zremrangebyrank("dead", 0, - @capsule.config[:dead_max_jobs])
end
end
end

View file

@ -17,14 +17,14 @@ module Sidekiq
def verify_json(item)
job_class = item["wrapped"] || item["class"]
if Sidekiq[:on_complex_arguments] == :raise
if Sidekiq::Config::DEFAULTS[:on_complex_arguments] == :raise
msg = <<~EOM
Job arguments to #{job_class} must be native JSON types, see https://github.com/mperham/sidekiq/wiki/Best-Practices.
To disable this error, add `Sidekiq.strict_args!(false)` to your initializer.
EOM
raise(ArgumentError, msg) unless json_safe?(item)
elsif Sidekiq[:on_complex_arguments] == :warn
Sidekiq.logger.warn <<~EOM unless json_safe?(item)
elsif Sidekiq::Config::DEFAULTS[:on_complex_arguments] == :warn
warn <<~EOM unless json_safe?(item)
Job arguments to #{job_class} do not serialize to JSON safely. This will raise an error in
Sidekiq 7.0. See https://github.com/mperham/sidekiq/wiki/Best-Practices or raise an error today
by calling `Sidekiq.strict_args!` during Sidekiq initialization.

View file

@ -1,12 +1,12 @@
# frozen_string_literal: true
require "sidekiq/manager"
require "sidekiq/fetch"
require "sidekiq/capsule"
require "sidekiq/scheduled"
require "sidekiq/ring_buffer"
module Sidekiq
# The Launcher starts the Manager and Poller threads and provides the process heartbeat.
# The Launcher starts the Capsule Managers, the Poller thread and provides the process heartbeat.
class Launcher
include Sidekiq::Component
@ -16,48 +16,51 @@ module Sidekiq
proc { "sidekiq" },
proc { Sidekiq::VERSION },
proc { |me, data| data["tag"] },
proc { |me, data| "[#{Processor::WORK_STATE.size} of #{data["concurrency"]} busy]" },
proc { |me, data| "[#{Processor::WORK_STATE.size} of #{me.config.capsules.map { |cap| cap.concurrency }.sum} busy]" },
proc { |me, data| "stopping" if me.stopping? }
]
attr_accessor :manager, :poller, :fetcher
attr_accessor :managers, :poller
def initialize(options)
@config = options
options[:fetch] ||= BasicFetch.new(options)
@manager = Sidekiq::Manager.new(options)
@poller = Sidekiq::Scheduled::Poller.new(options)
def initialize(config)
@config = config
@managers = config.capsules.map do |cap|
Sidekiq::Manager.new(cap)
end
@poller = Sidekiq::Scheduled::Poller.new(@config)
@done = false
end
def run
@thread = safe_thread("heartbeat", &method(:start_heartbeat))
@poller.start
@manager.start
@managers.each(&:start)
end
# Stops this instance from processing any more jobs,
#
def quiet
return if @done
@done = true
@manager.quiet
@managers.each(&:quiet)
@poller.terminate
fire_event(:quiet, reverse: true)
end
# Shuts down this Sidekiq instance. Waits up to the deadline for all jobs to complete.
def stop
deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + @config[:timeout]
@done = true
@manager.quiet
@poller.terminate
quiet
stoppers = @managers.map do |mgr|
Thread.new do
mgr.stop(deadline)
end
end
@manager.stop(deadline)
# Requeue everything in case there was a thread which fetched a job while the process was stopped.
# This call is a no-op in Sidekiq but necessary for Sidekiq Pro.
strategy = @config[:fetch]
strategy.bulk_requeue([], @config)
fire_event(:shutdown, reverse: true)
stoppers.each(&:join)
clear_heartbeat
end
@ -68,7 +71,7 @@ module Sidekiq
private unless $TESTING
BEAT_PAUSE = 5
BEAT_PAUSE = 10
def start_heartbeat
loop do
@ -107,7 +110,7 @@ module Sidekiq
nowdate = Time.now.utc.strftime("%Y-%m-%d")
begin
Sidekiq.redis do |conn|
redis do |conn|
conn.pipelined do |pipeline|
pipeline.incrby("stat:processed", procd)
pipeline.incrby("stat:processed:#{nowdate}", procd)
@ -119,9 +122,7 @@ module Sidekiq
end
end
rescue => ex
# we're exiting the process, things might be shut down so don't
# try to handle the exception
Sidekiq.logger.warn("Unable to flush stats: #{ex}")
logger.warn("Unable to flush stats: #{ex}")
end
end
@ -130,23 +131,10 @@ module Sidekiq
fails = procd = 0
begin
fails = Processor::FAILURE.reset
procd = Processor::PROCESSED.reset
flush_stats
curstate = Processor::WORK_STATE.dup
nowdate = Time.now.utc.strftime("%Y-%m-%d")
redis do |conn|
conn.multi do |transaction|
transaction.incrby("stat:processed", procd)
transaction.incrby("stat:processed:#{nowdate}", procd)
transaction.expire("stat:processed:#{nowdate}", STATS_TTL)
transaction.incrby("stat:failed", fails)
transaction.incrby("stat:failed:#{nowdate}", fails)
transaction.expire("stat:failed:#{nowdate}", STATS_TTL)
end
# work is the current set of executing jobs
work_key = "#{key}:work"
conn.pipelined do |transaction|
@ -251,8 +239,8 @@ module Sidekiq
"started_at" => Time.now.to_f,
"pid" => ::Process.pid,
"tag" => @config[:tag] || "",
"concurrency" => @config[:concurrency],
"queues" => @config[:queues].uniq,
"concurrency" => @config.capsules.map { |cap| cap.concurrency }.sum,
"queues" => @config.capsules.map { |cap| cap.queues }.flatten.uniq,
"labels" => @config[:labels],
"identity" => identity
}

View file

@ -31,7 +31,7 @@ module Sidekiq
"fatal" => 4
}
LEVELS.default_proc = proc do |_, level|
Sidekiq.logger.warn("Invalid log level: #{level.inspect}")
puts("Invalid log level: #{level.inspect}")
nil
end
@ -75,11 +75,6 @@ module Sidekiq
class Logger < ::Logger
include LoggingUtils
def initialize(*args, **kwargs)
super
self.formatter = Sidekiq.log_formatter
end
module Formatters
class Base < ::Logger::Formatter
def tid

View file

@ -1,7 +1,6 @@
# frozen_string_literal: true
require "sidekiq/processor"
require "sidekiq/fetch"
require "set"
module Sidekiq
@ -23,19 +22,20 @@ module Sidekiq
include Sidekiq::Component
attr_reader :workers
attr_reader :capsule
def initialize(options = {})
@config = options
logger.debug { options.inspect }
@count = options[:concurrency] || 10
def initialize(capsule)
@config = @capsule = capsule
logger.debug { capsule }
@count = capsule.concurrency
raise ArgumentError, "Concurrency of #{@count} is not supported" if @count < 1
@done = false
@workers = Set.new
@plock = Mutex.new
@count.times do
@workers << Processor.new(@config, &method(:processor_result))
end
@plock = Mutex.new
end
def start
@ -46,14 +46,12 @@ module Sidekiq
return if @done
@done = true
logger.info { "Terminating quiet threads" }
logger.info { "Terminating quiet threads for #{capsule.name} capsule" }
@workers.each(&:terminate)
fire_event(:quiet, reverse: true)
end
def stop(deadline)
quiet
fire_event(:shutdown, reverse: true)
# some of the shutdown events can be async,
# we don't have any way to know when they're done but
@ -66,6 +64,8 @@ module Sidekiq
return if @workers.empty?
hard_shutdown
ensure
capsule.stop
end
def processor_result(processor, reason = nil)
@ -105,8 +105,7 @@ module Sidekiq
# contract says that jobs are run AT LEAST once. Process termination
# is delayed until we're certain the jobs are back in Redis because
# it is worse to lose a job than to run it twice.
strategy = @config[:fetch]
strategy.bulk_requeue(jobs, @config)
capsule.fetcher.bulk_requeue(jobs, nil)
end
cleanup.each do |processor|

View file

@ -1,13 +1,13 @@
require "sidekiq"
require "sidekiq/redis_connection"
require "time"
# This file is designed to be required within the user's
# deployment script; it should need a bare minimum of dependencies.
#
# require "sidekiq/metrics/deploy"
# gitdesc = `git log -1 --format="%h %s"`.strip
# d = Sidekiq::Metrics::Deploy.new
# d.mark(label: gitdesc)
# require "sidekiq/metrics/deploy"
# gitdesc = `git log -1 --format="%h %s"`.strip
# d = Sidekiq::Metrics::Deploy.new
# d.mark(label: gitdesc)
#
# Note that you cannot mark more than once per minute. This is a feature, not a bug.
module Sidekiq
@ -15,7 +15,7 @@ module Sidekiq
class Deploy
MARK_TTL = 90 * 24 * 60 * 60 # 90 days
def initialize(pool = Sidekiq.redis_pool)
def initialize(pool = Sidekiq::RedisConnection.create)
@pool = pool
end

View file

@ -13,9 +13,9 @@ module Sidekiq
# NB: all metrics and times/dates are UTC only. We specifically do not
# support timezones.
class Query
def initialize(pool: Sidekiq.redis_pool, now: Time.now)
def initialize(pool: nil, now: Time.now)
@time = now.utc
@pool = pool
@pool = pool || Sidekiq.default_configuration.redis_pool
@klass = nil
end

View file

@ -152,6 +152,7 @@ module Sidekiq
def exists?(klass)
any? { |entry| entry.klass == klass }
end
alias_method :include?, :exists?
# @return [Boolean] if the chain contains no middleware
def empty?

View file

@ -50,14 +50,9 @@ module Sidekiq
end
end
def self.persist(klass)
Sidekiq.configure_client do |config|
config.client_middleware.add Save, klass
end
Sidekiq.configure_server do |config|
config.client_middleware.add Save, klass
config.server_middleware.add Load, klass
end
def self.persist(klass, config = Sidekiq.default_configuration)
config.client_middleware.add Save, klass
config.server_middleware.add Load, klass
end
end
end

7
lib/sidekiq/pool.rb Normal file
View file

@ -0,0 +1,7 @@
module Sidekiq
module PoolAccess
def redis_pool
Thread.current[:sidekiq_redis_pool] || (@redis ||= Sidekiq::RedisConnection.create)
end
end
end

View file

@ -26,18 +26,18 @@ module Sidekiq
attr_reader :thread
attr_reader :job
attr_reader :capsule
def initialize(options, &block)
def initialize(capsule, &block)
@config = @capsule = capsule
@callback = block
@down = false
@done = false
@job = nil
@thread = nil
@config = options
@strategy = options[:fetch]
@reloader = options[:reloader] || proc { |&block| block.call }
@job_logger = (options[:job_logger] || Sidekiq::JobLogger).new
@retrier = Sidekiq::JobRetry.new(options)
@reloader = Sidekiq.default_configuration[:reloader]
@job_logger = (capsule.config[:job_logger] || Sidekiq::JobLogger).new(logger)
@retrier = Sidekiq::JobRetry.new(capsule)
end
def terminate(wait = false)
@ -59,12 +59,16 @@ module Sidekiq
end
def start
@thread ||= safe_thread("processor", &method(:run))
@thread ||= safe_thread("#{config.name}/processor", &method(:run))
end
private unless $TESTING
def run
# By setting this thread-local, Sidekiq.redis will access +Sidekiq::Capsule#redis_pool+
# instead of the global pool in +Sidekiq::Config#redis_pool+.
Thread.current[:sidekiq_capsule] = @capsule
process_one until @done
@callback.call(self)
rescue Sidekiq::Shutdown
@ -80,7 +84,7 @@ module Sidekiq
end
def get_one
uow = @strategy.retrieve_work
uow = capsule.fetcher.retrieve_work
if @down
logger.info { "Redis is online, #{::Process.clock_gettime(::Process::CLOCK_MONOTONIC) - @down} sec downtime" }
@down = nil
@ -153,11 +157,11 @@ module Sidekiq
rescue => ex
handle_exception(ex, {context: "Invalid JSON for job", jobstr: jobstr})
now = Time.now.to_f
config.redis do |conn|
redis do |conn|
conn.multi do |xa|
xa.zadd("dead", now.to_s, jobstr)
xa.zremrangebyscore("dead", "-inf", now - config[:dead_timeout_in_seconds])
xa.zremrangebyrank("dead", 0, - config[:dead_max_jobs])
xa.zremrangebyscore("dead", "-inf", now - @capsule.config[:dead_timeout_in_seconds])
xa.zremrangebyrank("dead", 0, - @capsule.config[:dead_max_jobs])
end
end
return uow.acknowledge
@ -166,7 +170,7 @@ module Sidekiq
ack = false
begin
dispatch(job_hash, queue, jobstr) do |inst|
@config.server_middleware.invoke(inst, job_hash, queue) do
config.server_middleware.invoke(inst, job_hash, queue) do
execute_job(inst, job_hash["args"])
end
end

View file

@ -1,10 +1,7 @@
# frozen_string_literal: true
require "connection_pool"
require "redis_client"
require "redis_client/decorator"
require "uri"
require "sidekiq/redis_connection"
module Sidekiq
class RedisClientAdapter
@ -112,9 +109,8 @@ module Sidekiq
opts = options.dup
if opts[:namespace]
Sidekiq.logger.error("Your Redis configuration uses the namespace '#{opts[:namespace]}' but this feature isn't supported by redis-client. " \
"Either use the redis adapter or remove the namespace.")
Kernel.exit(-127)
raise ArgumentError, "Your Redis configuration uses the namespace '#{opts[:namespace]}' but this feature isn't supported by redis-client. " \
"Either use the redis adapter or remove the namespace."
end
opts.delete(:size)
@ -144,5 +140,3 @@ module Sidekiq
end
end
end
Sidekiq::RedisConnection.adapter = Sidekiq::RedisClientAdapter

View file

@ -2,49 +2,24 @@
require "connection_pool"
require "uri"
require "sidekiq/redis_client_adapter"
module Sidekiq
module RedisConnection
class << self
attr_reader :adapter
def adapter=(adapter)
raise "no" if adapter == self
result = case adapter
when Class
adapter
else
require "sidekiq/#{adapter}_adapter"
nil
end
@adapter = result if result
end
def create(options = {})
symbolized_options = options.transform_keys(&:to_sym)
symbolized_options[:url] ||= determine_redis_provider
if !symbolized_options[:url] && (u = determine_redis_provider)
symbolized_options[:url] = u
size = symbolized_options.delete(:size) || 5
pool_timeout = symbolized_options.delete(:pool_timeout) || 1
if symbolized_options[:logger]
log_info(symbolized_options)
symbolized_options.delete(:logger)
end
size = if symbolized_options[:size]
symbolized_options[:size]
elsif Sidekiq.server?
# Give ourselves plenty of connections. pool is lazy
# so we won't create them until we need them.
Sidekiq[:concurrency] + 5
elsif ENV["RAILS_MAX_THREADS"]
Integer(ENV["RAILS_MAX_THREADS"])
else
5
end
verify_sizing(size, Sidekiq[:concurrency]) if Sidekiq.server?
pool_timeout = symbolized_options[:pool_timeout] || 1
log_info(symbolized_options)
redis_config = adapter.new(symbolized_options)
redis_config = Sidekiq::RedisClientAdapter.new(symbolized_options)
ConnectionPool.new(timeout: pool_timeout, size: size) do
redis_config.new_client
end
@ -52,18 +27,6 @@ module Sidekiq
private
# Sidekiq needs many concurrent Redis connections.
#
# We need a connection for each Processor.
# We need a connection for Pro's real-time change listener
# We need a connection to various features to call Redis every few seconds:
# - the process heartbeat.
# - enterprise's leader election
# - enterprise's cron support
def verify_sizing(size, concurrency)
raise ArgumentError, "Your Redis connection pool is too small for Sidekiq. Your pool has #{size} connections but must have at least #{concurrency + 2}" if size < (concurrency + 2)
end
def log_info(options)
redacted = "REDACTED"
@ -82,11 +45,7 @@ module Sidekiq
scrubbed_options[:sentinels]&.each do |sentinel|
sentinel[:password] = redacted if sentinel[:password]
end
if Sidekiq.server?
Sidekiq.logger.info("Booting Sidekiq #{Sidekiq::VERSION} with #{adapter.name} options #{scrubbed_options}")
else
Sidekiq.logger.debug("#{Sidekiq::NAME} client with #{adapter.name} options #{scrubbed_options}")
end
options[:logger].info("Sidekiq #{Sidekiq::VERSION} connecting to Redis with options #{scrubbed_options}")
end
def determine_redis_provider

View file

@ -8,6 +8,8 @@ module Sidekiq
SETS = %w[retry schedule]
class Enq
include Sidekiq::Component
LUA_ZPOPBYSCORE = <<~LUA
local key, now = KEYS[1], ARGV[1]
local jobs = redis.call("zrangebyscore", key, "-inf", now, "limit", 0, 1)
@ -17,7 +19,9 @@ module Sidekiq
end
LUA
def initialize
def initialize(container)
@config = container
@client = Sidekiq::Client.new(config: container)
@done = false
@lua_zpopbyscore_sha = nil
end
@ -25,15 +29,15 @@ module Sidekiq
def enqueue_jobs(sorted_sets = SETS)
# A job's "score" in Redis is the time at which it should be processed.
# Just check Redis for the set of jobs with a timestamp before now.
Sidekiq.redis do |conn|
redis do |conn|
sorted_sets.each do |sorted_set|
# Get next item in the queue with score (time to execute) <= now.
# We need to go through the list one at a time to reduce the risk of something
# going wrong between the time jobs are popped from the scheduled queue and when
# they are pushed onto a work queue and losing the jobs.
while !@done && (job = zpopbyscore(conn, keys: [sorted_set], argv: [Time.now.to_f.to_s]))
Sidekiq::Client.push(Sidekiq.load_json(job))
Sidekiq.logger.debug { "enqueued #{sorted_set}: #{job}" }
@client.push(Sidekiq.load_json(job))
logger.debug { "enqueued #{sorted_set}: #{job}" }
end
end
end
@ -52,7 +56,7 @@ module Sidekiq
end
conn.evalsha(@lua_zpopbyscore_sha, keys, argv)
rescue RedisConnection.adapter::CommandError => e
rescue RedisClient::CommandError => e
raise unless e.message.start_with?("NOSCRIPT")
@lua_zpopbyscore_sha = nil
@ -70,9 +74,9 @@ module Sidekiq
INITIAL_WAIT = 10
def initialize(options)
@config = options
@enq = (options[:scheduled_enq] || Sidekiq::Scheduled::Enq).new
def initialize(config)
@config = config
@enq = (config[:scheduled_enq] || Sidekiq::Scheduled::Enq).new(config)
@sleeper = ConnectionPool::TimedStack.new
@done = false
@thread = nil

View file

@ -304,7 +304,7 @@ module Sidekiq
def jobs_for(klass)
jobs.select do |job|
marshalled = job["args"][0]
marshalled.index(klass.to_s) && YAML.load(marshalled)[0] == klass
marshalled.index(klass.to_s) && YAML.safe_load(marshalled)[0] == klass
end
end
end

View file

@ -5,8 +5,8 @@ require "sidekiq/client"
module Sidekiq
class TransactionAwareClient
def initialize(redis_pool)
@redis_client = Client.new(redis_pool)
def initialize(pool: nil, config: nil)
@redis_client = Client.new(pool: pool, config: config)
end
def push(item)
@ -34,11 +34,10 @@ module Sidekiq
begin
require "after_commit_everywhere"
rescue LoadError
Sidekiq.logger.error("You need to add after_commit_everywhere to your Gemfile to use Sidekiq's transactional client")
raise
raise %q(You need to add `gem "after_commit_everywhere"` to your Gemfile to use Sidekiq's transactional client)
end
default_job_options["client_class"] = Sidekiq::TransactionAwareClient
Sidekiq.default_job_options["client_class"] = Sidekiq::TransactionAwareClient
Sidekiq::JobUtil::TRANSIENT_ATTRIBUTES << "client_class"
true
end

View file

@ -307,7 +307,7 @@ module Sidekiq
end
get "/stats/queues" do
json Sidekiq::Stats::Queues.new.lengths
json Sidekiq::Stats.new.queues
end
def call(env)

View file

@ -176,13 +176,13 @@ module Sidekiq
end
def redis_url
Sidekiq.redis do |conn|
Sidekiq.default_configuration.redis do |conn|
conn._config.server_url
end
end
def redis_info
Sidekiq.redis_info
Sidekiq.default_configuration.redis_info
end
def root_path
@ -320,7 +320,7 @@ module Sidekiq
end
def environment_title_prefix
environment = Sidekiq[:environment] || ENV["APP_ENV"] || ENV["RAILS_ENV"] || ENV["RACK_ENV"] || "development"
environment = Sidekiq.default_configuration[:environment] || ENV["APP_ENV"] || ENV["RAILS_ENV"] || ENV["RACK_ENV"] || "development"
"[#{environment.upcase}] " unless environment == "production"
end

View file

@ -54,6 +54,21 @@ Sidekiq.configure_server do |config|
Sidekiq::Metrics::Deploy.new.mark(label: label)
end
class Singler
include Sidekiq::ServerMiddleware
def call(w, j, q)
puts q
end
end
Sidekiq.configure_server do |config|
config.capsule("single_threaded") do |cap|
cap.concurrency = 1
cap.queues = %w[single default]
cap.server_middleware.add Singler
end
end
# helper jobs for seeding metrics data
# you will need to restart if you change any of these
class FooJob

View file

@ -5,6 +5,7 @@ require "sidekiq/cli"
require "sidekiq/fetch"
require "sidekiq/scheduled"
require "sidekiq/processor"
require "sidekiq/api"
class JoeWorker
include Sidekiq::Job
@ -17,13 +18,8 @@ end
describe "Actors" do
before do
Sidekiq.reset!
Sidekiq.redis { |c| c.flushdb }
@config = Sidekiq
@config[:queues] = %w[default]
@config[:fetch] = Sidekiq::BasicFetch.new(@config)
@config[:error_handlers] << Sidekiq.method(:default_error_handler)
# @config.logger.level = Logger::DEBUG
@config = reset!
@cap = @config.default_capsule
end
describe "scheduler" do
@ -74,14 +70,14 @@ describe "Actors" do
end
it "can start and stop" do
f = Sidekiq::Processor.new(@config) { |p, ex| raise "should not raise!" }
f = Sidekiq::Processor.new(@cap) { |p, ex| raise "should not raise!" }
f.terminate
end
it "can process" do
q = Sidekiq::Queue.new
assert_equal 0, q.size
p = Sidekiq::Processor.new(@config) do |pr, ex|
p = Sidekiq::Processor.new(@cap) do |pr, ex|
result(pr, ex)
end
JoeWorker.perform_async(0)
@ -100,9 +96,10 @@ describe "Actors" do
end
it "deals with errors" do
@config.logger.level = Logger::ERROR
q = Sidekiq::Queue.new
assert_equal 0, q.size
p = Sidekiq::Processor.new(@config) do |pr, ex|
p = Sidekiq::Processor.new(@cap) do |pr, ex|
result(pr, ex)
end
jid = JoeWorker.perform_async("boom")
@ -125,11 +122,12 @@ describe "Actors" do
it "gracefully kills" do
q = Sidekiq::Queue.new
assert_equal 0, q.size
p = Sidekiq::Processor.new(@config) do |pr, ex|
p = Sidekiq::Processor.new(@cap) do |pr, ex|
result(pr, ex)
end
jid = JoeWorker.perform_async(1)
jid = JoeWorker.perform_async(2)
assert jid, jid
# debugger if q.size == 0
assert_equal 1, q.size
a = $count

View file

@ -5,9 +5,39 @@ require "sidekiq/api"
require "active_job"
require "action_mailer"
class ApiMailer < ActionMailer::Base
def test_email(*)
end
end
class ApiJob < ActiveJob::Base
def perform(*)
end
end
class ApiWorker
include Sidekiq::Job
end
class WorkerWithTags
include Sidekiq::Job
sidekiq_options tags: ["foo"]
end
SERIALIZED_JOBS = {
"5.x" => [
'{"class":"ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper","wrapped":"ApiJob","queue":"default","args":[{"job_class":"ApiJob","job_id":"f1bde53f-3852-4ae4-a879-c12eacebbbb0","provider_job_id":null,"queue_name":"default","priority":null,"arguments":[1,2,3],"executions":0,"locale":"en"}],"retry":true,"jid":"099eee72911085a511d0e312","created_at":1568305542.339916,"enqueued_at":1568305542.339947}',
'{"class":"ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper","wrapped":"ActionMailer::DeliveryJob","queue":"mailers","args":[{"job_class":"ActionMailer::DeliveryJob","job_id":"19cc0115-3d1c-4bbe-a51e-bfa1385895d1","provider_job_id":null,"queue_name":"mailers","priority":null,"arguments":["ApiMailer","test_email","deliver_now",1,2,3],"executions":0,"locale":"en"}],"retry":true,"jid":"37436e5504936400e8cf98db","created_at":1568305542.370133,"enqueued_at":1568305542.370241}'
],
"6.x" => [
'{"class":"ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper","wrapped":"ApiJob","queue":"default","args":[{"job_class":"ApiJob","job_id":"ff2b48d4-bdce-4825-af6b-ef8c11ab651e","provider_job_id":null,"queue_name":"default","priority":null,"arguments":[1,2,3],"executions":0,"exception_executions":{},"locale":"en","timezone":"UTC","enqueued_at":"2019-09-12T16:28:37Z"}],"retry":true,"jid":"ce121bf77b37ae81fe61b6dc","created_at":1568305717.9469702,"enqueued_at":1568305717.947005}',
'{"class":"ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper","wrapped":"ActionMailer::MailDeliveryJob","queue":"mailers","args":[{"job_class":"ActionMailer::MailDeliveryJob","job_id":"2f967da1-a389-479c-9a4e-5cc059e6d65c","provider_job_id":null,"queue_name":"mailers","priority":null,"arguments":["ApiMailer","test_email","deliver_now",{"args":[1,2,3],"_aj_symbol_keys":["args"]}],"executions":0,"exception_executions":{},"locale":"en","timezone":"UTC","enqueued_at":"2019-09-12T16:28:37Z"}],"retry":true,"jid":"469979df52bb9ef9f48b49e1","created_at":1568305717.9457421,"enqueued_at":1568305717.9457731}'
]
}
describe "API" do
before do
Sidekiq.redis { |c| c.flushdb }
@cfg = reset!
end
describe "stats" do
@ -22,7 +52,7 @@ describe "API" do
describe "processed" do
it "returns number of processed jobs" do
Sidekiq.redis { |conn| conn.set("stat:processed", 5) }
@cfg.redis { |conn| conn.set("stat:processed", 5) }
s = Sidekiq::Stats.new
assert_equal 5, s.processed
end
@ -30,7 +60,7 @@ describe "API" do
describe "failed" do
it "returns number of failed jobs" do
Sidekiq.redis { |conn| conn.set("stat:failed", 5) }
@cfg.redis { |conn| conn.set("stat:failed", 5) }
s = Sidekiq::Stats.new
assert_equal 5, s.failed
end
@ -38,7 +68,7 @@ describe "API" do
describe "reset" do
before do
Sidekiq.redis do |conn|
@cfg.redis do |conn|
conn.set("stat:processed", 5)
conn.set("stat:failed", 10)
end
@ -75,7 +105,7 @@ describe "API" do
describe "workers_size" do
it "retrieves the number of busy workers" do
Sidekiq.redis do |c|
@cfg.redis do |c|
c.sadd("processes", "process_1")
c.sadd("processes", "process_2")
c.hset("process_1", "busy", 1)
@ -88,12 +118,12 @@ describe "API" do
describe "queues" do
it "is initially empty" do
s = Sidekiq::Stats::Queues.new
assert_equal 0, s.lengths.size
s = Sidekiq::Stats.new
assert_equal 0, s.queues.size
end
it "returns a hash of queue and size in order" do
Sidekiq.redis do |conn|
@cfg.redis do |conn|
conn.rpush "queue:foo", "{}"
conn.sadd "queues", "foo"
@ -101,17 +131,13 @@ describe "API" do
conn.sadd "queues", "bar"
end
s = Sidekiq::Stats::Queues.new
assert_equal ({"foo" => 1, "bar" => 3}), s.lengths
assert_equal "bar", s.lengths.first.first
assert_equal Sidekiq::Stats.new.queues, Sidekiq::Stats::Queues.new.lengths
assert_equal({"foo" => 1, "bar" => 3}, Sidekiq::Stats.new.queues)
end
end
describe "enqueued" do
it "handles latency for good jobs" do
Sidekiq.redis do |conn|
@cfg.redis do |conn|
conn.rpush "queue:default", "{\"enqueued_at\": #{Time.now.to_f}}"
conn.sadd "queues", "default"
end
@ -122,7 +148,7 @@ describe "API" do
end
it "handles latency for incomplete jobs" do
Sidekiq.redis do |conn|
@cfg.redis do |conn|
conn.rpush "queue:default", "{}"
conn.sadd "queues", "default"
end
@ -133,7 +159,7 @@ describe "API" do
end
it "returns total enqueued jobs" do
Sidekiq.redis do |conn|
@cfg.redis do |conn|
conn.rpush "queue:foo", "{}"
conn.sadd "queues", "foo"
@ -168,7 +194,7 @@ describe "API" do
describe "processed" do
it "retrieves hash of dates" do
Sidekiq.redis do |c|
@cfg.redis do |c|
c.incrby("stat:processed:2012-12-24", 4)
c.incrby("stat:processed:2012-12-25", 1)
c.incrby("stat:processed:2012-12-26", 6)
@ -189,7 +215,7 @@ describe "API" do
describe "failed" do
it "retrieves hash of dates" do
Sidekiq.redis do |c|
@cfg.redis do |c|
c.incrby("stat:failed:2012-12-24", 4)
c.incrby("stat:failed:2012-12-25", 1)
c.incrby("stat:failed:2012-12-26", 6)
@ -222,25 +248,6 @@ describe "API" do
ActiveJob::Base.logger = nil
end
class ApiMailer < ActionMailer::Base
def test_email(*)
end
end
class ApiJob < ActiveJob::Base
def perform(*)
end
end
class ApiWorker
include Sidekiq::Worker
end
class WorkerWithTags
include Sidekiq::Worker
sidekiq_options tags: ["foo"]
end
it "can enumerate jobs" do
q = Sidekiq::Queue.new
Time.stub(:now, Time.new(2012, 12, 26)) do
@ -275,16 +282,7 @@ describe "API" do
end
describe "Rails unwrapping" do
SERIALIZED_JOBS = {
"5.x" => [
'{"class":"ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper","wrapped":"ApiJob","queue":"default","args":[{"job_class":"ApiJob","job_id":"f1bde53f-3852-4ae4-a879-c12eacebbbb0","provider_job_id":null,"queue_name":"default","priority":null,"arguments":[1,2,3],"executions":0,"locale":"en"}],"retry":true,"jid":"099eee72911085a511d0e312","created_at":1568305542.339916,"enqueued_at":1568305542.339947}',
'{"class":"ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper","wrapped":"ActionMailer::DeliveryJob","queue":"mailers","args":[{"job_class":"ActionMailer::DeliveryJob","job_id":"19cc0115-3d1c-4bbe-a51e-bfa1385895d1","provider_job_id":null,"queue_name":"mailers","priority":null,"arguments":["ApiMailer","test_email","deliver_now",1,2,3],"executions":0,"locale":"en"}],"retry":true,"jid":"37436e5504936400e8cf98db","created_at":1568305542.370133,"enqueued_at":1568305542.370241}'
],
"6.x" => [
'{"class":"ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper","wrapped":"ApiJob","queue":"default","args":[{"job_class":"ApiJob","job_id":"ff2b48d4-bdce-4825-af6b-ef8c11ab651e","provider_job_id":null,"queue_name":"default","priority":null,"arguments":[1,2,3],"executions":0,"exception_executions":{},"locale":"en","timezone":"UTC","enqueued_at":"2019-09-12T16:28:37Z"}],"retry":true,"jid":"ce121bf77b37ae81fe61b6dc","created_at":1568305717.9469702,"enqueued_at":1568305717.947005}',
'{"class":"ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper","wrapped":"ActionMailer::MailDeliveryJob","queue":"mailers","args":[{"job_class":"ActionMailer::MailDeliveryJob","job_id":"2f967da1-a389-479c-9a4e-5cc059e6d65c","provider_job_id":null,"queue_name":"mailers","priority":null,"arguments":["ApiMailer","test_email","deliver_now",{"args":[1,2,3],"_aj_symbol_keys":["args"]}],"executions":0,"exception_executions":{},"locale":"en","timezone":"UTC","enqueued_at":"2019-09-12T16:28:37Z"}],"retry":true,"jid":"469979df52bb9ef9f48b49e1","created_at":1568305717.9457421,"enqueued_at":1568305717.9457731}'
]
}.each_pair do |ver, jobs|
SERIALIZED_JOBS.each_pair do |ver, jobs|
it "unwraps ActiveJob #{ver} jobs" do
# ApiJob.perform_later(1,2,3)
# puts Sidekiq::Queue.new.first.value
@ -538,7 +536,7 @@ describe "API" do
}
time = Time.now.to_f
Sidekiq.redis do |conn|
@cfg.redis do |conn|
conn.multi do |transaction|
transaction.sadd("processes", odata["key"])
transaction.hmset(odata["key"], "info", Sidekiq.dump_json(odata), "busy", 10, "beat", time)
@ -556,8 +554,8 @@ describe "API" do
data.quiet!
data.stop!
signals_string = "#{odata["key"]}-signals"
assert_equal "TERM", Sidekiq.redis { |c| c.lpop(signals_string) }
assert_equal "TSTP", Sidekiq.redis { |c| c.lpop(signals_string) }
assert_equal "TERM", @cfg.redis { |c| c.lpop(signals_string) }
assert_equal "TSTP", @cfg.redis { |c| c.lpop(signals_string) }
end
it "can enumerate workers" do
@ -570,14 +568,14 @@ describe "API" do
hn = Socket.gethostname
key = "#{hn}:#{$$}"
pdata = {"pid" => $$, "hostname" => hn, "started_at" => Time.now.to_i}
Sidekiq.redis do |conn|
@cfg.redis do |conn|
conn.sadd("processes", key)
conn.hmset(key, "info", Sidekiq.dump_json(pdata), "busy", 0, "beat", Time.now.to_f)
end
s = "#{key}:work"
data = Sidekiq.dump_json({"payload" => "{}", "queue" => "default", "run_at" => Time.now.to_i})
Sidekiq.redis do |c|
@cfg.redis do |c|
c.hmset(s, "1234", data)
end
@ -591,7 +589,7 @@ describe "API" do
s = "#{key}:work"
data = Sidekiq.dump_json({"payload" => {}, "queue" => "default", "run_at" => (Time.now.to_i - 2 * 60 * 60)})
Sidekiq.redis do |c|
@cfg.redis do |c|
c.multi do |transaction|
transaction.hmset(s, "5678", data)
transaction.hmset("b#{s}", "5678", data)
@ -622,7 +620,7 @@ describe "API" do
it "prunes processes which have died" do
data = {"pid" => rand(10_000), "hostname" => "app#{rand(1_000)}", "started_at" => Time.now.to_f}
key = "#{data["hostname"]}:#{data["pid"]}"
Sidekiq.redis do |conn|
@cfg.redis do |conn|
conn.sadd("processes", key)
conn.hmset(key, "info", Sidekiq.dump_json(data), "busy", 0, "beat", Time.now.to_f)
end
@ -631,7 +629,7 @@ describe "API" do
assert_equal 1, ps.size
assert_equal 1, ps.to_a.size
Sidekiq.redis do |conn|
@cfg.redis do |conn|
conn.sadd("processes", "bar:987")
conn.sadd("processes", "bar:986")
conn.del("process_cleanup")
@ -644,7 +642,7 @@ describe "API" do
def add_retry(jid = "bob", at = Time.now.to_f)
payload = Sidekiq.dump_json("class" => "ApiWorker", "args" => [1, "mike"], "queue" => "default", "jid" => jid, "retry_count" => 2, "failed_at" => Time.now.to_f, "error_backtrace" => ["line1", "line2"])
Sidekiq.redis do |conn|
@cfg.redis do |conn|
conn.zadd("retry", at.to_s, payload)
end
end

80
test/capsule.rb Normal file
View file

@ -0,0 +1,80 @@
# frozen_string_literal: true
require_relative "helper"
require "sidekiq/capsule"
describe Sidekiq::Capsule do
before do
@config = reset!
@cap = @config.default_capsule
end
it "provides its own redis pool" do
one = @cap
one.concurrency = 2
two = Sidekiq::Capsule.new("foo", @config)
two.concurrency = 3
# the pool is cached
assert_equal one.redis_pool, one.redis_pool
assert_equal two.redis_pool, two.redis_pool
# they are sized correctly
assert_equal 2, one.redis_pool.size
assert_equal 3, two.redis_pool.size
refute_equal one.redis_pool, two.redis_pool
# they point to the same Redis
assert one.redis { |c| c.set("hello", "world") }
assert_equal "world", two.redis { |c| c.get("hello") }
end
it "parses queues correctly" do
cap = @cap
assert_equal ["default"], cap.queues
assert cap.strict
cap.queues = %w[foo bar,2]
assert_equal %w[foo bar bar], cap.queues
refute cap.strict
cap.queues = ["default"]
assert_equal %w[default], cap.queues
assert cap.strict
# config/sidekiq.yml input will look like this
cap.queues = [["foo"], ["baz", 3]]
assert_equal %w[foo baz baz baz], cap.queues
refute cap.strict
end
it "can have customized middleware chains" do
one = Object.new
two = Object.new
@config.client_middleware.add one
@config.server_middleware.add one
assert_includes @config.client_middleware, one
assert_includes @config.server_middleware, one
@config.capsule("testy") do |cap|
cap.concurrency = 2
cap.queues = %w[foo bar,2]
cap.server_middleware do |chain|
chain.add two
end
cap.client_middleware do |chain|
chain.add two
end
end
assert_equal 2, @config.capsules.size
cap = @config.capsules[1]
assert_equal "testy", cap.name
assert_equal 2, cap.concurrency
assert_includes cap.server_middleware, one
assert_includes cap.client_middleware, one
assert_includes cap.server_middleware, two
assert_includes cap.client_middleware, two
refute_includes @config.server_middleware, two
refute_includes @config.client_middleware, two
end
end

569
test/cli.rb Normal file
View file

@ -0,0 +1,569 @@
# frozen_string_literal: true
require_relative "helper"
require "sidekiq/cli"
describe Sidekiq::CLI do
before do
@logdev = StringIO.new
@config = reset!
@config.logger = Logger.new(@logdev)
@cli = Sidekiq::CLI.new.tap { |c| c.config = config }
end
attr_reader :config
attr_reader :logdev
def queues
@cli.config.capsules.first.queues
end
def concurrency
@cli.config.capsules.first.concurrency
end
def strict
@cli.config.capsules.first.strict
end
describe "#parse" do
describe "options" do
it "accepts -r" do
@cli.parse(%w[sidekiq -r ./test/fake_env.rb])
assert_equal "./test/fake_env.rb", config[:require]
end
describe "concurrency" do
it "accepts with -c" do
@cli.parse(%w[sidekiq -c 60 -r ./test/fake_env.rb])
assert_equal 60, concurrency
end
describe "when concurrency is empty and RAILS_MAX_THREADS env var is set" do
before do
ENV["RAILS_MAX_THREADS"] = "9"
end
after do
ENV.delete("RAILS_MAX_THREADS")
end
it "sets concurrency from RAILS_MAX_THREADS env var" do
@cli.parse(%w[sidekiq -r ./test/fake_env.rb])
assert_equal 9, concurrency
end
it "option overrides RAILS_MAX_THREADS env var" do
@cli.parse(%w[sidekiq -c 60 -r ./test/fake_env.rb])
assert_equal 60, concurrency
end
end
end
describe "setting internal options via the config file" do
describe "setting the `strict` option via the config file" do
it "discards the `strict` option specified via the config file" do
@cli.parse(%w[sidekiq -C ./test/cfg/config_with_internal_options.yml])
assert_equal true, !!strict
end
end
end
describe "queues" do
it "accepts with -q" do
@cli.parse(%w[sidekiq -q foo -r ./test/fake_env.rb])
assert_equal ["foo"], queues
end
describe "when weights are not present" do
it "accepts queues without weights" do
@cli.parse(%w[sidekiq -q foo -q bar -r ./test/fake_env.rb])
assert_equal ["foo", "bar"], queues
end
it "sets strictly ordered queues" do
@cli.parse(%w[sidekiq -q foo -q bar -r ./test/fake_env.rb])
assert_equal true, !!strict
end
end
describe "when weights are present" do
it "accepts queues with weights" do
@cli.parse(%w[sidekiq -q foo,3 -q bar -r ./test/fake_env.rb])
assert_equal ["foo", "foo", "foo", "bar"], queues
end
it "does not set strictly ordered queues" do
@cli.parse(%w[sidekiq -q foo,3 -q bar -r ./test/fake_env.rb])
assert_equal false, !!strict
end
end
it "accepts queues with multi-word names" do
@cli.parse(%w[sidekiq -q queue_one -q queue-two -r ./test/fake_env.rb])
assert_equal ["queue_one", "queue-two"], queues
end
it "accepts queues with dots in the name" do
@cli.parse(%w[sidekiq -q foo.bar -r ./test/fake_env.rb])
assert_equal ["foo.bar"], queues
end
describe "when queues are empty" do
describe "when no queues are specified via -q" do
it "sets 'default' queue" do
@cli.parse(%w[sidekiq -r ./test/fake_env.rb])
assert_equal ["default"], queues
end
end
describe "when no queues are specified via the config file" do
it "sets 'default' queue" do
@cli.parse(%w[sidekiq -C ./test/cfg/config_empty.yml -r ./test/fake_env.rb])
assert_equal ["default"], queues
end
end
end
end
describe "timeout" do
it "accepts with -t" do
@cli.parse(%w[sidekiq -t 30 -r ./test/fake_env.rb])
assert_equal 30, config[:timeout]
end
end
describe "verbose" do
it "accepts with -v" do
@cli.parse(%w[sidekiq -v -r ./test/fake_env.rb])
assert_equal Logger::DEBUG, @config.logger.level
end
end
describe "config file" do
it "accepts with -C" do
@cli.parse(%w[sidekiq -C ./test/config.yml])
assert_equal "./test/config.yml", config[:config_file]
refute config[:verbose]
assert_equal "./test/fake_env.rb", config[:require]
assert_nil config[:environment]
assert_equal 50, concurrency
assert_equal 2, queues.count { |q| q == "very_often" }
assert_equal 1, queues.count { |q| q == "seldom" }
end
it "accepts stringy keys" do
@cli.parse(%w[sidekiq -C ./test/cfg/config_string.yml])
assert_equal "./test/cfg/config_string.yml", config[:config_file]
refute config[:verbose]
assert_equal "./test/fake_env.rb", config[:require]
assert_nil config[:environment]
assert_equal 50, concurrency
assert_equal 2, queues.count { |q| q == "very_often" }
assert_equal 1, queues.count { |q| q == "seldom" }
end
it "accepts environment specific config" do
@cli.parse(%w[sidekiq -e staging -C ./test/cfg/config_environment.yml])
assert_equal "./test/cfg/config_environment.yml", config[:config_file]
refute config[:verbose]
assert_equal "./test/fake_env.rb", config[:require]
assert_equal "staging", config[:environment]
assert_equal 50, concurrency
assert_equal 2, queues.count { |q| q == "very_often" }
assert_equal 1, queues.count { |q| q == "seldom" }
end
it "accepts environment specific config with alias" do
@cli.parse(%w[sidekiq -e staging -C ./test/cfg/config_with_alias.yml])
assert_equal "./test/cfg/config_with_alias.yml", config[:config_file]
refute config[:verbose]
assert_equal "./test/fake_env.rb", config[:require]
assert_equal "staging", config[:environment]
assert_equal 50, concurrency
assert_equal 2, queues.count { |q| q == "very_often" }
assert_equal 1, queues.count { |q| q == "seldom" }
@cli.parse(%w[sidekiq -e production -C ./test/cfg/config_with_alias.yml])
assert_equal "./test/cfg/config_with_alias.yml", config[:config_file]
assert config[:verbose]
assert_equal "./test/fake_env.rb", config[:require]
assert_equal "production", config[:environment]
assert_equal 50, concurrency
assert_equal 2, queues.count { |q| q == "very_often" }
assert_equal 1, queues.count { |q| q == "seldom" }
end
it "exposes ERB expected __FILE__ and __dir__" do
given_path = "./test/cfg/config__FILE__and__dir__.yml"
expected_file = File.expand_path(given_path)
# As per Ruby's Kernel module docs, __dir__ is equivalent to File.dirname(File.realpath(__FILE__))
expected_dir = File.dirname(File.realpath(expected_file))
@cli.parse(%W[sidekiq -C #{given_path}])
assert_equal(expected_file, config.fetch(:__FILE__))
assert_equal(expected_dir, config.fetch(:__dir__))
end
end
describe "default config file" do
describe "when required path is a directory" do
it "tries config/sidekiq.yml from required diretory" do
@cli.parse(%w[sidekiq -r ./test/dummy])
assert_equal "./test/dummy/config/sidekiq.yml", config[:config_file]
assert_equal 25, concurrency
end
end
describe "when required path is a file" do
it "tries config/sidekiq.yml from current diretory" do
config[:require] = "./test/dummy" # stub current dir ./
@cli.parse(%w[sidekiq -r ./test/fake_env.rb])
assert_equal "./test/dummy/config/sidekiq.yml", config[:config_file]
assert_equal 25, concurrency
end
end
describe "without any required path" do
it "tries config/sidekiq.yml from current diretory" do
config[:require] = "./test/dummy" # stub current dir ./
@cli.parse(%w[sidekiq])
assert_equal "./test/dummy/config/sidekiq.yml", config[:config_file]
assert_equal 25, concurrency
end
end
describe "when config file and flags" do
it "merges options" do
@cli.parse(%w[sidekiq -C ./test/config.yml
-e snoop
-c 100
-r ./test/fake_env.rb
-q often,7
-q seldom,3])
assert_equal "./test/config.yml", config[:config_file]
refute config[:verbose]
assert_equal "./test/fake_env.rb", config[:require]
assert_equal "snoop", config[:environment]
assert_equal 100, concurrency
assert_equal 7, queues.count { |q| q == "often" }
assert_equal 3, queues.count { |q| q == "seldom" }
end
describe "when the config file specifies queues with weights" do
describe "when -q specifies queues without weights" do
it "sets strictly ordered queues" do
@cli.parse(%w[sidekiq -C ./test/config.yml
-r ./test/fake_env.rb
-q foo -q bar])
assert_equal true, !!strict
end
end
describe "when -q specifies no queues" do
it "does not set strictly ordered queues" do
@cli.parse(%w[sidekiq -C ./test/config.yml
-r ./test/fake_env.rb])
assert_equal false, !!strict
end
end
describe "when -q specifies queues with weights" do
it "does not set strictly ordered queues" do
@cli.parse(%w[sidekiq -C ./test/config.yml
-r ./test/fake_env.rb
-q foo,2 -q bar,3])
assert_equal false, !!strict
end
end
end
describe "when the config file specifies queues without weights" do
describe "when -q specifies queues without weights" do
it "sets strictly ordered queues" do
@cli.parse(%w[sidekiq -C ./test/cfg/config_queues_without_weights.yml
-r ./test/fake_env.rb
-q foo -q bar])
assert_equal true, !!strict
end
end
describe "when -q specifies no queues" do
it "sets strictly ordered queues" do
@cli.parse(%w[sidekiq -C ./test/cfg/config_queues_without_weights.yml
-r ./test/fake_env.rb])
assert_equal true, !!strict
end
end
describe "when -q specifies queues with weights" do
it "does not set strictly ordered queues" do
@cli.parse(%w[sidekiq -C ./test/cfg/config_queues_without_weights.yml
-r ./test/fake_env.rb
-q foo,2 -q bar,3])
assert_equal false, !!strict
end
end
end
describe "when the config file specifies no queues" do
describe "when -q specifies queues without weights" do
it "sets strictly ordered queues" do
@cli.parse(%w[sidekiq -C ./test/cfg/config_empty.yml
-r ./test/fake_env.rb
-q foo -q bar])
assert_equal true, !!strict
end
end
describe "when -q specifies no queues" do
it "sets strictly ordered queues" do
@cli.parse(%w[sidekiq -C ./test/cfg/config_empty.yml
-r ./test/fake_env.rb])
assert_equal true, !!strict
end
end
describe "when -q specifies queues with weights" do
it "does not set strictly ordered queues" do
@cli.parse(%w[sidekiq -C ./test/cfg/config_empty.yml
-r ./test/fake_env.rb
-q foo,2 -q bar,3])
assert_equal false, !!strict
end
end
end
end
describe "default config file" do
describe "when required path is a directory" do
it "tries config/sidekiq.yml" do
@cli.parse(%w[sidekiq -r ./test/dummy])
assert_equal "sidekiq.yml", File.basename(config[:config_file])
assert_equal 25, concurrency
end
end
end
end
end
describe "validation" do
describe "when required application path does not exist" do
it "exits with status 1" do
exit = assert_raises(SystemExit) { @cli.parse(%w[sidekiq -r /non/existent/path]) }
assert_equal 1, exit.status
end
end
describe "when required path is a directory without config/application.rb" do
it "exits with status 1" do
exit = assert_raises(SystemExit) { @cli.parse(%w[sidekiq -r ./test/fixtures]) }
assert_equal 1, exit.status
end
describe "when config file path does not exist" do
it "raises argument error" do
assert_raises(ArgumentError) do
@cli.parse(%w[sidekiq -r ./test/fake_env.rb -C /non/existent/path])
end
end
end
end
describe "when concurrency is not valid" do
describe "when set to 0" do
it "raises argument error" do
assert_raises(ArgumentError) do
@cli.parse(%w[sidekiq -r ./test/fake_env.rb -c 0])
end
end
end
describe "when set to a negative number" do
it "raises argument error" do
assert_raises(ArgumentError) do
@cli.parse(%w[sidekiq -r ./test/fake_env.rb -c -2])
end
end
end
end
describe "when timeout is not valid" do
describe "when set to 0" do
it "raises argument error" do
assert_raises(ArgumentError) do
@cli.parse(%w[sidekiq -r ./test/fake_env.rb -t 0])
end
end
end
describe "when set to a negative number" do
it "raises argument error" do
assert_raises(ArgumentError) do
@cli.parse(%w[sidekiq -r ./test/fake_env.rb -t -2])
end
end
end
end
end
describe "#run" do
before do
@cli.config[:require] = "./test/fake_env.rb"
end
describe "require workers" do
describe "when path is a rails directory" do
before do
@cli.config[:require] = "./test/dummy"
@cli.environment = "test"
end
it "requires sidekiq railtie and rails application with environment" do
@cli.stub(:launch, nil) do
@cli.run
end
assert defined?(Sidekiq::Rails)
assert defined?(Dummy::Application)
end
it "tags with the app directory name" do
@cli.stub(:launch, nil) do
@cli.run
end
assert_equal "dummy", @cli.config[:tag]
end
end
describe "when path is file" do
it "requires application" do
@cli.stub(:launch, nil) do
@cli.run
end
assert $LOADED_FEATURES.any? { |x| x =~ /test\/fake_env/ }
end
end
end
it "prints rails info" do
@cli.stub(:environment, "production") do
@cli.stub(:launch, nil) do
@cli.run
end
assert_includes @logdev.string, "Booted Rails #{::Rails.version} application in production environment"
end
end
describe "checking maxmemory policy" do
it "warns if the policy is not noeviction" do
redis_info = {"maxmemory_policy" => "allkeys-lru", "redis_version" => "6.2.1"}
@cli.config.stub(:redis_info, redis_info) do
@cli.stub(:launch, nil) do
@cli.run
end
end
assert_includes @logdev.string, "allkeys-lru"
end
it "silent if the policy is noeviction" do
redis_info = {"maxmemory_policy" => "noeviction", "redis_version" => "6.2.1"}
@cli.config.stub(:redis_info, redis_info) do
@cli.stub(:launch, nil) do
@cli.run
end
end
refute_includes @logdev.string, "noeviction"
end
end
end
describe "signal handling" do
%w[INT TERM].each do |sig|
describe sig do
it "raises interrupt error" do
assert_raises Interrupt do
@cli.handle_signal(sig)
end
end
end
end
describe "TSTP" do
it "quiets with a corresponding event" do
quiet = false
@cli.config.on(:quiet) do
quiet = true
end
@cli.launcher = Sidekiq::Launcher.new(@cli.config)
@cli.handle_signal("TSTP")
assert_match(/Got TSTP signal/, logdev.string)
assert_equal true, quiet
end
end
describe "TTIN" do
it "prints backtraces for all threads in the process to the logfile" do
@cli.handle_signal("TTIN")
assert_match(/Got TTIN signal/, logdev.string)
assert_match(/\bbacktrace\b/, logdev.string)
end
end
describe "UNKNOWN" do
it "logs about" do
# @cli.parse(%w[sidekiq -r ./test/fake_env.rb])
@cli.handle_signal("UNKNOWN")
assert_match(/Got UNKNOWN signal/, logdev.string)
assert_match(/No signal handler registered/, logdev.string)
end
end
end
end
end

View file

@ -5,7 +5,69 @@ require "active_job"
require "sidekiq/api"
require "sidekiq/rails"
class MyWorker
include Sidekiq::Job
end
class QueuedWorker
include Sidekiq::Job
sidekiq_options queue: :flimflam
end
class InterestingWorker
include Sidekiq::Job
def perform(an_argument)
end
end
class TestActiveJob < ActiveJob::Base
def perform(arg)
end
end
class BaseWorker
include Sidekiq::Job
sidekiq_options "retry" => "base"
end
class AWorker < BaseWorker
end
class BWorker < BaseWorker
sidekiq_options "retry" => "b"
end
class CWorker < BaseWorker
sidekiq_options "retry" => 2
end
class Stopper
def call(worker_class, job, queue, r)
raise ArgumentError unless r
yield if job["args"].first.odd?
end
end
class MiddlewareArguments
def call(worker_class, job, queue, redis)
$arguments_worker_class = worker_class
$arguments_job = job
$arguments_queue = queue
$arguments_redis = redis
yield
end
end
class DWorker < BaseWorker
end
describe Sidekiq::Client do
before do
@config = reset!
@client = Sidekiq::Client.new(config: @config)
end
describe "errors" do
it "raises ArgumentError with invalid params" do
assert_raises ArgumentError do
@ -46,8 +108,7 @@ describe Sidekiq::Client do
end
it "can push" do
client = Sidekiq::Client.new
jid = client.push("class" => "Blah", "args" => [1, 2, 3])
jid = @client.push("class" => "Blah", "args" => [1, 2, 3])
assert_equal 24, jid.size
end
@ -57,13 +118,12 @@ describe Sidekiq::Client do
msg["args"][0] == 1 ? yield : false
end
end
client = Sidekiq::Client.new
client.middleware do |chain|
@client.middleware do |chain|
chain.add mware
end
q = Sidekiq::Queue.new
q.clear
result = client.push_bulk("class" => "Blah", "args" => [[1], [2], [3]])
result = @client.push_bulk("class" => "Blah", "args" => [[1], [2], [3]])
assert_equal 1, result.size
assert_equal 1, q.size
end
@ -76,15 +136,13 @@ describe Sidekiq::Client do
msg
end
}
client = Sidekiq::Client.new
client.middleware do |chain|
@client.middleware do |chain|
chain.add mware
end
client.push("class" => "Blah", "args" => [1, 2, 3])
@client.push("class" => "Blah", "args" => [1, 2, 3])
assert $called
assert client.middleware.exists?(mware)
refute Sidekiq.client_middleware.exists?(mware)
assert @client.middleware.exists?(mware)
end
end
@ -107,17 +165,7 @@ describe Sidekiq::Client do
assert_equal pre + 1, q.size
end
class MyWorker
include Sidekiq::Worker
end
class QueuedWorker
include Sidekiq::Worker
sidekiq_options queue: :flimflam
end
it "enqueues" do
Sidekiq.redis { |c| c.flushdb }
assert_equal Sidekiq.default_job_options, MyWorker.get_sidekiq_options
assert MyWorker.perform_async(1, 2)
assert Sidekiq::Client.enqueue(MyWorker, 1, 2)
@ -140,13 +188,6 @@ describe Sidekiq::Client do
Sidekiq.strict_args!(:raise)
end
class InterestingWorker
include Sidekiq::Worker
def perform(an_argument)
end
end
it "enqueues jobs with a symbol as an argument" do
InterestingWorker.perform_async(:symbol)
end
@ -248,11 +289,6 @@ describe Sidekiq::Client do
ActiveJob::Base.logger = nil
end
class TestActiveJob < ActiveJob::Base
def perform(arg)
end
end
it "raises error with correct class name" do
error = assert_raises ArgumentError do
TestActiveJob.perform_later(BigDecimal("1.1212"))
@ -362,47 +398,13 @@ describe Sidekiq::Client do
end
end
class BaseWorker
include Sidekiq::Worker
sidekiq_options "retry" => "base"
end
class AWorker < BaseWorker
end
class BWorker < BaseWorker
sidekiq_options "retry" => "b"
end
class CWorker < BaseWorker
sidekiq_options "retry" => 2
end
describe "client middleware" do
class Stopper
def call(worker_class, job, queue, r)
raise ArgumentError unless r
yield if job["args"].first.odd?
end
end
class MiddlewareArguments
def call(worker_class, job, queue, redis)
$arguments_worker_class = worker_class
$arguments_job = job
$arguments_queue = queue
$arguments_redis = redis
yield
end
end
it "push sends correct arguments to middleware" do
minimum_job_args = ["args", "class", "created_at", "enqueued_at", "jid", "queue"]
client = Sidekiq::Client.new
client.middleware do |chain|
@client.middleware do |chain|
chain.add MiddlewareArguments
end
client.push("class" => MyWorker, "args" => [0])
@client.push("class" => MyWorker, "args" => [0])
assert_equal($arguments_worker_class, MyWorker)
assert((minimum_job_args & $arguments_job.keys) == minimum_job_args)
@ -411,11 +413,10 @@ describe Sidekiq::Client do
it "push bulk sends correct arguments to middleware" do
minimum_job_args = ["args", "class", "created_at", "enqueued_at", "jid", "queue"]
client = Sidekiq::Client.new
client.middleware do |chain|
@client.middleware do |chain|
chain.add MiddlewareArguments
end
client.push_bulk("class" => MyWorker, "args" => [[0]])
@client.push_bulk("class" => MyWorker, "args" => [[0]])
assert_equal($arguments_worker_class, MyWorker)
assert((minimum_job_args & $arguments_job.keys) == minimum_job_args)
@ -423,14 +424,13 @@ describe Sidekiq::Client do
end
it "can stop some of the jobs from pushing" do
client = Sidekiq::Client.new
client.middleware do |chain|
@client.middleware do |chain|
chain.add Stopper
end
assert_nil client.push("class" => MyWorker, "args" => [0])
assert_match(/[0-9a-f]{12}/, client.push("class" => MyWorker, "args" => [1]))
client.push_bulk("class" => MyWorker, "args" => [[0], [1]]).each do |jid|
assert_nil @client.push("class" => MyWorker, "args" => [0])
assert_match(/[0-9a-f]{12}/, @client.push("class" => MyWorker, "args" => [1]))
@client.push_bulk("class" => MyWorker, "args" => [[0], [1]]).each do |jid|
assert_match(/[0-9a-f]{12}/, jid)
end
end
@ -444,9 +444,6 @@ describe Sidekiq::Client do
end
describe "sharding" do
class DWorker < BaseWorker
end
it "allows sidekiq_options to point to different Redi" do
conn = MiniTest::Mock.new
conn.expect(:pipelined, [0, 1])
@ -468,7 +465,7 @@ describe Sidekiq::Client do
end
it "allows #via to point to different Redi" do
default = Sidekiq::Client.new.redis_pool
default = @client.redis_pool
moo = MiniTest::Mock.new
moo.expect(:pipelined, [0, 1])

View file

@ -11,6 +11,10 @@ module Myapp
end
describe "Current attributes" do
before do
@config = reset!
end
it "saves" do
cm = Sidekiq::CurrentAttributes::Save.new(Myapp::Current)
job = {}
@ -33,10 +37,10 @@ describe "Current attributes" do
end
it "persists" do
Sidekiq::CurrentAttributes.persist(Myapp::Current)
Sidekiq::CurrentAttributes.persist(Myapp::Current, @config)
job_hash = {}
with_context(:user_id, 16) do
Sidekiq.client_middleware.invoke(nil, job_hash, nil, nil) do
@config.client_middleware.invoke(nil, job_hash, nil, nil) do
assert_equal 16, job_hash["cattr"][:user_id]
end
end

View file

@ -16,32 +16,32 @@ describe "DeadSet" do
end
it "should remove dead jobs older than Sidekiq::DeadSet.timeout" do
Sidekiq::DeadSet.stub(:timeout, 10) do
Time.stub(:now, Time.now - 11) do
dead_set.kill(Sidekiq.dump_json(jid: "000103", class: "MyWorker3", args: [])) # the oldest
end
Time.stub(:now, Time.now - 9) do
dead_set.kill(Sidekiq.dump_json(jid: "000102", class: "MyWorker2", args: []))
end
dead_set.kill(Sidekiq.dump_json(jid: "000101", class: "MyWorker1", args: []))
old, Sidekiq::Config::DEFAULTS[:dead_timeout_in_seconds] = Sidekiq::Config::DEFAULTS[:dead_timeout_in_seconds], 10
Time.stub(:now, Time.now - 11) do
dead_set.kill(Sidekiq.dump_json(jid: "000103", class: "MyWorker3", args: [])) # the oldest
end
Time.stub(:now, Time.now - 9) do
dead_set.kill(Sidekiq.dump_json(jid: "000102", class: "MyWorker2", args: []))
end
dead_set.kill(Sidekiq.dump_json(jid: "000101", class: "MyWorker1", args: []))
assert_nil dead_set.find_job("000103")
assert dead_set.find_job("000102")
assert dead_set.find_job("000101")
ensure
Sidekiq::Config::DEFAULTS[:dead_timeout_in_seconds] = old
end
it "should remove all but last Sidekiq::DeadSet.max_jobs-1 jobs" do
Sidekiq::DeadSet.stub(:max_jobs, 3) do
dead_set.kill(Sidekiq.dump_json(jid: "000101", class: "MyWorker1", args: []))
dead_set.kill(Sidekiq.dump_json(jid: "000102", class: "MyWorker2", args: []))
dead_set.kill(Sidekiq.dump_json(jid: "000103", class: "MyWorker3", args: []))
end
old, Sidekiq::Config::DEFAULTS[:dead_max_jobs] = Sidekiq::Config::DEFAULTS[:dead_max_jobs], 3
dead_set.kill(Sidekiq.dump_json(jid: "000101", class: "MyWorker1", args: []))
dead_set.kill(Sidekiq.dump_json(jid: "000102", class: "MyWorker2", args: []))
dead_set.kill(Sidekiq.dump_json(jid: "000103", class: "MyWorker3", args: []))
assert_nil dead_set.find_job("000101")
assert dead_set.find_job("000102")
assert dead_set.find_job("000103")
ensure
Sidekiq::Config::DEFAULTS[:dead_max_jobs] = old
end
end

View file

@ -26,20 +26,16 @@ end
describe Sidekiq::Component do
describe "with mock logger" do
before do
@config = Sidekiq
@config[:error_handlers] << Sidekiq.method(:default_error_handler)
end
after do
@config[:error_handlers].clear
@config = reset!
end
it "logs the exception to Sidekiq.logger" do
output = capture_logging do
output = capture_logging(@config) do
Thing.new(@config).invoke_exception(a: 1)
end
assert_match(/"a":1/, output, "didn't include the context")
assert_match(/Something didn't work!/, output, "didn't include the exception message")
assert_match(/test\/test_exception_handler.rb/, output, "didn't include the backtrace")
assert_match(/test\/exception_handler.rb/, output, "didn't include the backtrace")
end
describe "when the exception does not have a backtrace" do

View file

@ -2,25 +2,23 @@
require_relative "helper"
require "sidekiq/fetch"
require "sidekiq/capsule"
require "sidekiq/api"
describe Sidekiq::BasicFetch do
before do
Sidekiq.redis do |conn|
conn.flushdb
@config = reset!
@cap = @config.default_capsule
@config.redis do |conn|
conn.rpush("queue:basic", "msg")
end
Sidekiq.reset!
@config = Sidekiq
end
def fetcher(options)
@config.merge!(options)
Sidekiq::BasicFetch.new(@config)
end
it "retrieves" do
fetch = fetcher(queues: ["basic", "bar"])
@cap.queues = ["basic", "bar,3"]
refute @cap.strict
fetch = Sidekiq::BasicFetch.new(@cap)
uow = fetch.retrieve_work
refute_nil uow
assert_equal "basic", uow.queue_name
@ -33,13 +31,15 @@ describe Sidekiq::BasicFetch do
end
it "retrieves with strict setting" do
fetch = fetcher(queues: ["basic", "bar", "bar"], strict: true)
@cap.queues = ["basic", "bar"]
assert @cap.strict
fetch = Sidekiq::BasicFetch.new(@cap)
cmd = fetch.queues_cmd
assert_equal cmd, ["queue:basic", "queue:bar", Sidekiq::BasicFetch::TIMEOUT]
end
it "bulk requeues" do
Sidekiq.redis do |conn|
@config.redis do |conn|
conn.rpush("queue:foo", ["bob", "bar"])
conn.rpush("queue:bar", "widget")
end
@ -49,18 +49,20 @@ describe Sidekiq::BasicFetch do
assert_equal 2, q1.size
assert_equal 1, q2.size
fetch = fetcher(queues: ["foo", "bar"])
@cap.queues = ["foo", "bar"]
fetch = Sidekiq::BasicFetch.new(@cap)
works = 3.times.map { fetch.retrieve_work }
assert_equal 0, q1.size
assert_equal 0, q2.size
fetch.bulk_requeue(works, {queues: []})
fetch.bulk_requeue(works, nil)
assert_equal 2, q1.size
assert_equal 1, q2.size
end
it "sleeps when no queues are active" do
fetch = fetcher(queues: [])
@cap.queues = []
fetch = Sidekiq::BasicFetch.new(@cap)
mock = Minitest::Mock.new
mock.expect(:call, nil, [Sidekiq::BasicFetch::TIMEOUT])
fetch.stub(:sleep, mock) { assert_nil fetch.retrieve_work }

View file

@ -25,30 +25,26 @@ end
ENV["REDIS_URL"] ||= "redis://localhost/15"
Sidekiq.logger = ::Logger.new($stdout)
Sidekiq.logger.level = Logger::ERROR
if ENV["SIDEKIQ_REDIS_CLIENT"]
Sidekiq::RedisConnection.adapter = :redis_client
def reset!
RedisClient.new(url: ENV["REDIS_URL"]).call("flushall")
cfg = Sidekiq::Config.new
cfg.logger = ::Logger.new("/dev/null")
cfg.logger.level = Logger::WARN
Sidekiq.instance_variable_set :@config, cfg
cfg
end
def capture_logging(lvl = Logger::INFO)
old = Sidekiq.logger
def capture_logging(cfg, lvl = Logger::INFO)
old = cfg.logger
begin
out = StringIO.new
logger = ::Logger.new(out)
logger.level = lvl
Sidekiq.logger = logger
yield
cfg.logger = logger
yield logger
out.string
ensure
Sidekiq.logger = old
end
end
module Sidekiq
def self.reset!
@config = DEFAULTS.dup
cfg.logger = old
end
end

View file

@ -1,19 +1,43 @@
# frozen_string_literal: true
require_relative "helper"
require "sidekiq/api"
require "active_support/core_ext/numeric/time"
class MySetJob
include Sidekiq::Job
queue_as :foo
sidekiq_options "retry" => 12
def perform
end
end
class MyCustomJob
include Sidekiq::Job
def perform(recorder)
$my_recorder << ["work_performed"]
end
end
class MyCustomMiddleware
def initialize(name, recorder)
@name = name
@recorder = recorder
end
def call(*args)
@recorder << "#{@name}-before"
response = yield
@recorder << "#{@name}-after"
response
end
end
describe Sidekiq::Job do
describe "#set" do
class MySetJob
include Sidekiq::Job
queue_as :foo
sidekiq_options "retry" => 12
def perform
end
end
def setup
Sidekiq.redis { |c| c.flushdb }
before do
@cfg = reset!
end
it "provides basic ActiveJob compatibilility" do
@ -115,35 +139,13 @@ describe Sidekiq::Job do
describe "#perform_inline" do
$my_recorder = []
class MyCustomJob
include Sidekiq::Job
def perform(recorder)
$my_recorder << ["work_performed"]
end
end
class MyCustomMiddleware
def initialize(name, recorder)
@name = name
@recorder = recorder
end
def call(*args)
@recorder << "#{@name}-before"
response = yield
@recorder << "#{@name}-after"
response
end
end
it "executes middleware & runs job inline" do
server_chain = Sidekiq::Middleware::Chain.new
server_chain.add MyCustomMiddleware, "1-server", $my_recorder
client_chain = Sidekiq::Middleware::Chain.new
client_chain.add MyCustomMiddleware, "1-client", $my_recorder
Sidekiq.stub(:server_middleware, server_chain) do
Sidekiq.stub(:client_middleware, client_chain) do
Sidekiq.default_configuration.stub(:server_middleware, server_chain) do
Sidekiq.default_configuration.stub(:client_middleware, client_chain) do
MyCustomJob.perform_inline($my_recorder)
assert_equal $my_recorder.flatten, %w[1-client-before 1-client-after 1-server-before work_performed 1-server-after]
end

View file

@ -10,6 +10,11 @@ class JobGeneratorTest < Rails::Generators::TestCase
destination File.expand_path("../../tmp", __FILE__)
setup :prepare_destination
setup do
# TODO what's the proper way to silence the generator output?
Rails.logger.level = Logger::WARN
end
test "all files are properly created" do
run_generator ["foo"]
assert_file "app/sidekiq/foo_job.rb"

View file

@ -5,10 +5,12 @@ require "sidekiq/job_logger"
describe "Job logger" do
before do
@old = Sidekiq.logger
@output = StringIO.new
@logger = Sidekiq::Logger.new(@output, level: :info)
Sidekiq.logger = @logger
@logger.formatter = Sidekiq::Logger::Formatters::Pretty.new
@cfg = reset!
@cfg.logger = @logger
Thread.current[:sidekiq_context] = nil
Thread.current[:sidekiq_tid] = nil
@ -17,7 +19,6 @@ describe "Job logger" do
after do
Thread.current[:sidekiq_context] = nil
Thread.current[:sidekiq_tid] = nil
Sidekiq.logger = @old
end
it "tests pretty output" do
@ -79,21 +80,6 @@ describe "Job logger" do
assert_match(/INFO: done/, c)
end
it "tests custom log level uses default log level for invalid value" do
jl = Sidekiq::JobLogger.new(@logger)
job = {"class" => "FooWorker", "log_level" => "non_existent"}
assert @logger.info?
jl.prepare(job) do
jl.call(job, "queue") do
assert @logger.info?
end
end
assert @logger.info?
log_level_warning = @output.string.lines[0]
assert_match(/WARN: Invalid log level/, log_level_warning)
end
it "tests custom logger with non numeric levels" do
logger_class = Class.new(Logger) do
def level
@ -110,7 +96,7 @@ describe "Job logger" do
end
@logger = logger_class.new(@output, level: :info)
Sidekiq.logger = @logger
@cfg.logger = @logger
jl = Sidekiq::JobLogger.new(@logger)
job = {"class" => "FooWorker", "log_level" => "debug"}

View file

@ -9,15 +9,10 @@ describe Sidekiq::Launcher do
end
before do
Sidekiq.redis { |c| c.flushdb }
Sidekiq.reset!
@config = Sidekiq
@config = reset!
@config.capsules << Sidekiq::Capsule.new("default", @config)
@config.capsules.first.concurrency = 3
@config[:tag] = "myapp"
@config[:concurrency] = 3
end
def new_manager(opts)
Sidekiq::Manager.new(opts)
end
describe "memory collection" do
@ -30,10 +25,7 @@ describe Sidekiq::Launcher do
describe "heartbeat" do
before do
@mgr = new_manager(@config)
@launcher = Sidekiq::Launcher.new(@config)
@launcher.manager = @mgr
@id = @launcher.identity
@id = subject.identity
Sidekiq::Processor::WORK_STATE.set("a", {"b" => 1})
@ -56,13 +48,13 @@ describe Sidekiq::Launcher do
it "stores process info in redis" do
subject.heartbeat
workers, rtt = Sidekiq.redis { |c| c.hmget(subject.identity, "busy", "rtt_us") }
workers, rtt = @config.redis { |c| c.hmget(subject.identity, "busy", "rtt_us") }
assert_equal "1", workers
refute_nil rtt
assert_in_delta 1000, rtt.to_i, 1000
expires = Sidekiq.redis { |c| c.pttl(subject.identity) }
expires = @config.redis { |c| c.pttl(subject.identity) }
assert_in_delta 60000, expires, 500
end
@ -71,7 +63,7 @@ describe Sidekiq::Launcher do
before do
@cnt = 0
Sidekiq.on(:heartbeat) do
@config.on(:heartbeat) do
@cnt += 1
end
end
@ -100,11 +92,11 @@ describe Sidekiq::Launcher do
it "stores process info in redis" do
subject.heartbeat
info = Sidekiq.redis { |c| c.hmget(subject.identity, "busy") }
info = @config.redis { |c| c.hmget(subject.identity, "busy") }
assert_equal ["1"], info
expires = Sidekiq.redis { |c| c.pttl(subject.identity) }
expires = @config.redis { |c| c.pttl(subject.identity) }
assert_in_delta 60000, expires, 50
end
@ -112,20 +104,20 @@ describe Sidekiq::Launcher do
it "fires new heartbeat events" do
i = 0
Sidekiq.on(:heartbeat) do
@config.on(:heartbeat) do
i += 1
end
assert_equal 0, i
@launcher.heartbeat
subject.heartbeat
assert_equal 1, i
@launcher.heartbeat
subject.heartbeat
assert_equal 1, i
end
describe "when manager is active" do
before do
Sidekiq::Launcher::PROCTITLES << proc { "xyz" }
@launcher.heartbeat
subject.heartbeat
Sidekiq::Launcher::PROCTITLES.pop
end
@ -134,9 +126,9 @@ describe Sidekiq::Launcher do
end
it "stores process info in redis" do
info = Sidekiq.redis { |c| c.hmget(@id, "busy") }
info = @config.redis { |c| c.hmget(@id, "busy") }
assert_equal ["1"], info
expires = Sidekiq.redis { |c| c.pttl(@id) }
expires = @config.redis { |c| c.pttl(@id) }
assert_in_delta 60000, expires, 500
end
end
@ -144,8 +136,8 @@ describe Sidekiq::Launcher do
describe "when manager is stopped" do
before do
@launcher.quiet
@launcher.heartbeat
subject.quiet
subject.heartbeat
end
# after do
@ -157,9 +149,9 @@ describe Sidekiq::Launcher do
end
it "stores process info in redis" do
info = Sidekiq.redis { |c| c.hmget(@id, "busy") }
info = @config.redis { |c| c.hmget(@id, "busy") }
assert_equal ["1"], info
expires = Sidekiq.redis { |c| c.pttl(@id) }
expires = @config.redis { |c| c.pttl(@id) }
assert_in_delta 60000, expires, 50
end
end

View file

@ -7,35 +7,29 @@ describe "logger" do
before do
@output = StringIO.new
@logger = Sidekiq::Logger.new(@output)
@logger.formatter = Sidekiq::Logger::Formatters::Pretty.new
@config = Sidekiq::Config.new
Sidekiq.log_formatter = nil
Thread.current[:sidekiq_context] = nil
Thread.current[:sidekiq_tid] = nil
end
after do
Sidekiq.log_formatter = nil
Thread.current[:sidekiq_context] = nil
Thread.current[:sidekiq_tid] = nil
end
it "tests default logger format" do
assert_kind_of Sidekiq::Logger::Formatters::Pretty, Sidekiq::Logger.new(@output).formatter
assert_kind_of Sidekiq::Logger::Formatters::Pretty, @config.logger.formatter
end
it "tests heroku logger formatter" do
ENV["DYNO"] = "dyno identifier"
assert_kind_of Sidekiq::Logger::Formatters::WithoutTimestamp, Sidekiq::Logger.new(@output).formatter
assert_kind_of Sidekiq::Logger::Formatters::WithoutTimestamp, @config.logger.formatter
ensure
ENV["DYNO"] = nil
end
it "tests json logger formatter" do
Sidekiq.log_formatter = Sidekiq::Logger::Formatters::JSON.new
assert_kind_of Sidekiq::Logger::Formatters::JSON, Sidekiq::Logger.new(@output).formatter
end
it "tests with context" do
subject = Sidekiq::Context
assert_equal({}, subject.current)

View file

@ -5,23 +5,23 @@ require "sidekiq/manager"
describe Sidekiq::Manager do
before do
Sidekiq.redis { |c| c.flushdb }
Sidekiq.reset!
@config = Sidekiq
@config[:fetch] = Sidekiq::BasicFetch.new(@config)
@config = reset!
@cap = Sidekiq::Capsule.new("default", @config)
@config.capsules << @cap
end
def new_manager
Sidekiq::Manager.new(@config)
Sidekiq::Manager.new(@cap)
end
it "creates N processor instances" do
mgr = new_manager
assert_equal @config[:concurrency], mgr.workers.size
assert_equal @cap.concurrency, mgr.workers.size
end
it "shuts down the system" do
mgr = new_manager
mgr.start
mgr.stop(::Process.clock_gettime(::Process::CLOCK_MONOTONIC))
end

View file

@ -9,7 +9,7 @@ require "sidekiq/api"
describe Sidekiq::Metrics do
before do
Sidekiq.redis { |c| c.flushdb }
@config = reset!
end
def fixed_time
@ -17,7 +17,7 @@ describe Sidekiq::Metrics do
end
def create_known_metrics(time = fixed_time)
smet = Sidekiq::Metrics::ExecutionTracker.new(Sidekiq)
smet = Sidekiq::Metrics::ExecutionTracker.new(@config)
smet.track("critical", "App::SomeJob") { sleep 0.001 }
smet.track("critical", "App::FooJob") { sleep 0.001 }
assert_raises RuntimeError do
@ -73,7 +73,7 @@ describe Sidekiq::Metrics do
h.record_time(302)
h.record_time(300000000)
assert_equal 8, h.sum
key = Sidekiq.redis do |conn|
key = @config.redis do |conn|
h.persist(conn, fixed_time)
end
assert_equal 0, h.sum
@ -81,7 +81,7 @@ describe Sidekiq::Metrics do
assert_equal "App::FooJob-22-22:3", key
h = Sidekiq::Metrics::Histogram.new("App::FooJob")
data = Sidekiq.redis { |c| h.fetch(c, fixed_time) }
data = @config.redis { |c| h.fetch(c, fixed_time) }
{0 => 1, 3 => 3, 7 => 3, 25 => 1}.each_pair do |idx, val|
assert_equal val, data[idx]
end

View file

@ -3,23 +3,83 @@
require_relative "helper"
require "sidekiq/middleware/chain"
require "sidekiq/processor"
require "sidekiq/capsule"
class CustomMiddleware
def initialize(name, recorder)
@name = name
@recorder = recorder
end
def call(*args)
@recorder << [@name, "before"]
yield
@recorder << [@name, "after"]
end
end
class CustomWorker
$recorder = []
include Sidekiq::Worker
def perform(recorder)
$recorder << ["work_performed"]
end
end
class NonYieldingMiddleware
def call(*args)
end
end
class ArgumentYieldingMiddleware
def call(*args)
yield 1
end
end
class AnotherCustomMiddleware
def initialize(name, recorder)
@name = name
@recorder = recorder
end
def call(*args)
@recorder << [@name, "before"]
yield
@recorder << [@name, "after"]
end
end
class YetAnotherCustomMiddleware
def initialize(name, recorder)
@name = name
@recorder = recorder
end
def call(*args)
@recorder << [@name, "before"]
yield
@recorder << [@name, "after"]
end
end
class FooC
include Sidekiq::ClientMiddleware
def initialize(*args)
@args = args
end
def call(w, j, q, rp)
redis { |c| c.incr(self.class.name) }
logger.info { |c| [self.class.name, @args].inspect }
yield
end
end
describe Sidekiq::Middleware do
before do
$errors = []
end
class CustomMiddleware
def initialize(name, recorder)
@name = name
@recorder = recorder
end
def call(*args)
@recorder << [@name, "before"]
yield
@recorder << [@name, "after"]
end
@config = reset!
end
it "supports custom middleware" do
@ -29,55 +89,9 @@ describe Sidekiq::Middleware do
assert_equal CustomMiddleware, chain.entries.last.klass
end
class CustomWorker
$recorder = []
include Sidekiq::Worker
def perform(recorder)
$recorder << ["work_performed"]
end
end
class NonYieldingMiddleware
def call(*args)
end
end
class ArgumentYieldingMiddleware
def call(*args)
yield 1
end
end
class AnotherCustomMiddleware
def initialize(name, recorder)
@name = name
@recorder = recorder
end
def call(*args)
@recorder << [@name, "before"]
yield
@recorder << [@name, "after"]
end
end
class YetAnotherCustomMiddleware
def initialize(name, recorder)
@name = name
@recorder = recorder
end
def call(*args)
@recorder << [@name, "before"]
yield
@recorder << [@name, "after"]
end
end
it "executes middleware in the proper order" do
msg = Sidekiq.dump_json({"class" => CustomWorker.to_s, "args" => [$recorder]})
@config = Sidekiq
@config.server_middleware do |chain|
# should only add once, second should replace the first
2.times { |i| chain.add CustomMiddleware, i.to_s, $recorder }
@ -85,7 +99,7 @@ describe Sidekiq::Middleware do
chain.insert_after AnotherCustomMiddleware, YetAnotherCustomMiddleware, "3", $recorder
end
processor = Sidekiq::Processor.new(@config) { |pr, ex| }
processor = Sidekiq::Processor.new(@config.default_capsule) { |pr, ex| }
processor.process(Sidekiq::BasicFetch::UnitOfWork.new("queue:default", msg))
assert_equal %w[2 before 3 before 1 before work_performed 1 after 3 after 2 after], $recorder.flatten
end
@ -166,23 +180,9 @@ describe Sidekiq::Middleware do
end
end
class FooC
include Sidekiq::ClientMiddleware
def initialize(*args)
@args = args
end
def call(w, j, q, rp)
redis { |c| c.incr(self.class.name) }
logger.info { |c| [self.class.name, @args].inspect }
yield
end
end
describe "configuration" do
it "gets an object which provides redis and logging" do
cfg = Sidekiq
chain = Sidekiq::Middleware::Chain.new(cfg)
chain = Sidekiq::Middleware::Chain.new(@config.default_capsule)
chain.add FooC, foo: "bar"
final_action = nil
chain.invoke(nil, nil, nil, nil) { final_action = true }

View file

@ -3,26 +3,67 @@
require_relative "helper"
require "sidekiq/fetch"
require "sidekiq/cli"
require "sidekiq/api"
require "sidekiq/processor"
describe Sidekiq::Processor do
TestProcessorException = Class.new(StandardError)
TEST_PROC_EXCEPTION = TestProcessorException.new("kerboom!")
TestProcessorException = Class.new(StandardError)
TEST_PROC_EXCEPTION = TestProcessorException.new("kerboom!")
before do
$invokes = 0
@config = Sidekiq
@config[:fetch] = Sidekiq::BasicFetch.new(@config)
@processor = ::Sidekiq::Processor.new(@config) { |*args| }
class MockWorker
include Sidekiq::Worker
def perform(args)
raise TEST_PROC_EXCEPTION if args.to_s == "boom"
args.pop if args.is_a? Array
$invokes += 1
end
end
class ExceptionRaisingMiddleware
def initialize(raise_before_yield, raise_after_yield, skip)
@raise_before_yield = raise_before_yield
@raise_after_yield = raise_after_yield
@skip = skip
end
class MockWorker
include Sidekiq::Worker
def perform(args)
raise TEST_PROC_EXCEPTION if args.to_s == "boom"
args.pop if args.is_a? Array
$invokes += 1
def call(worker, item, queue)
raise TEST_PROC_EXCEPTION if @raise_before_yield
yield unless @skip
raise TEST_PROC_EXCEPTION if @raise_after_yield
end
end
class ArgsMutatingServerMiddleware
def call(worker, item, queue)
item["args"] = item["args"].map do |arg|
arg.to_sym if arg.is_a?(String)
end
yield
end
end
class ArgsMutatingClientMiddleware
def call(worker, item, queue, redis_pool)
item["args"] = item["args"].map do |arg|
arg.to_s if arg.is_a?(Symbol)
end
yield
end
end
class CustomJobLogger < Sidekiq::JobLogger
def call(item, queue)
$invokes += 1
yield
rescue Exception
raise
end
end
describe Sidekiq::Processor do
before do
$invokes = 0
@config = reset!
@processor = ::Sidekiq::Processor.new(@config.default_capsule) { |*args| }
end
def work(msg, queue = "queue:default")
@ -72,11 +113,11 @@ describe Sidekiq::Processor do
end
before do
Sidekiq.error_handlers << error_handler
@config.error_handlers << error_handler
end
after do
Sidekiq.error_handlers.pop
@config.error_handlers.pop
end
it "handles invalid JSON" do
@ -127,8 +168,8 @@ describe Sidekiq::Processor do
it "handles exceptions raised during fetch" do
fetch_stub = lambda { raise StandardError, "fetch exception" }
# swallow logging because actually care about the added exception handler
capture_logging do
@processor.instance_variable_get(:@strategy).stub(:retrieve_work, fetch_stub) do
capture_logging(@config) do
@processor.capsule.fetcher.stub(:retrieve_work, fetch_stub) do
@processor.process_one
end
end
@ -138,20 +179,6 @@ describe Sidekiq::Processor do
end
describe "acknowledgement" do
class ExceptionRaisingMiddleware
def initialize(raise_before_yield, raise_after_yield, skip)
@raise_before_yield = raise_before_yield
@raise_after_yield = raise_after_yield
@skip = skip
end
def call(worker, item, queue)
raise TEST_PROC_EXCEPTION if @raise_before_yield
yield unless @skip
raise TEST_PROC_EXCEPTION if @raise_after_yield
end
end
let(:raise_before_yield) { false }
let(:raise_after_yield) { false }
let(:skip_job) { false }
@ -161,15 +188,12 @@ describe Sidekiq::Processor do
before do
work.expect(:queue_name, "queue:default")
work.expect(:job, Sidekiq.dump_json({"class" => MockWorker.to_s, "args" => worker_args}))
Sidekiq.server_middleware do |chain|
@config.server_middleware do |chain|
chain.prepend ExceptionRaisingMiddleware, raise_before_yield, raise_after_yield, skip_job
end
end
after do
Sidekiq.server_middleware do |chain|
chain.remove ExceptionRaisingMiddleware
end
work.verify
end
@ -179,6 +203,7 @@ describe Sidekiq::Processor do
it "acks the job" do
work.expect(:acknowledge, nil)
begin
@processor.config.logger.level = Logger::ERROR
@processor.process(work)
flunk "Expected #process to raise exception"
rescue TestProcessorException
@ -192,6 +217,7 @@ describe Sidekiq::Processor do
it "acks the job" do
work.expect(:acknowledge, nil)
begin
@processor.config.logger.level = Logger::ERROR
@processor.process(work)
flunk "Expected #process to raise exception"
rescue TestProcessorException
@ -214,6 +240,7 @@ describe Sidekiq::Processor do
it "acks the job" do
work.expect(:acknowledge, nil)
begin
@processor.config.logger.level = Logger::ERROR
@processor.process(work)
flunk "Expected #process to raise exception"
rescue TestProcessorException
@ -230,42 +257,15 @@ describe Sidekiq::Processor do
end
describe "retry" do
class ArgsMutatingServerMiddleware
def call(worker, item, queue)
item["args"] = item["args"].map do |arg|
arg.to_sym if arg.is_a?(String)
end
yield
end
end
class ArgsMutatingClientMiddleware
def call(worker, item, queue, redis_pool)
item["args"] = item["args"].map do |arg|
arg.to_s if arg.is_a?(Symbol)
end
yield
end
end
before do
Sidekiq.server_middleware do |chain|
@config.server_middleware do |chain|
chain.prepend ArgsMutatingServerMiddleware
end
Sidekiq.client_middleware do |chain|
@config.client_middleware do |chain|
chain.prepend ArgsMutatingClientMiddleware
end
end
after do
Sidekiq.server_middleware do |chain|
chain.remove ArgsMutatingServerMiddleware
end
Sidekiq.client_middleware do |chain|
chain.remove ArgsMutatingClientMiddleware
end
end
describe "middleware mutates the job args and then fails" do
it "requeues with original arguments" do
job_data = {"class" => MockWorker.to_s, "args" => ["boom"]}
@ -291,25 +291,53 @@ describe Sidekiq::Processor do
end
describe "custom job logger class" do
class CustomJobLogger < Sidekiq::JobLogger
def call(item, queue)
yield
rescue Exception
raise
end
end
before do
opts = Sidekiq
opts[:job_logger] = CustomJobLogger
opts[:fetch] = Sidekiq::BasicFetch.new(opts)
@processor = ::Sidekiq::Processor.new(opts) { |pr, ex| }
@config[:job_logger] = CustomJobLogger
@processor = ::Sidekiq::Processor.new(@config.capsules.first) { |pr, ex| }
end
it "is called instead default Sidekiq::JobLogger" do
msg = Sidekiq.dump_json({"class" => MockWorker.to_s, "args" => ["myarg"]})
@processor.process(work(msg))
assert_equal 1, $invokes
assert_equal 2, $invokes
end
end
describe "stats" do
before do
@config.redis { |c| c.flushdb }
end
describe "when successful" do
let(:processed_today_key) { "stat:processed:#{Time.now.utc.strftime("%Y-%m-%d")}" }
def successful_job
msg = Sidekiq.dump_json({"class" => MockWorker.to_s, "args" => ["myarg"]})
@processor.process(work(msg))
end
it "increments processed stat" do
Sidekiq::Processor::PROCESSED.reset
successful_job
assert_equal 1, Sidekiq::Processor::PROCESSED.reset
end
end
end
describe "stats" do
before do
@config.redis { |c| c.flushdb }
end
def successful_job
msg = Sidekiq.dump_json({"class" => MockWorker.to_s, "args" => ["myarg"]})
@processor.process(work(msg))
end
it "increments processed stat" do
Sidekiq::Processor::PROCESSED.reset
successful_job
assert_equal 1, Sidekiq::Processor::PROCESSED.reset
end
end
end

View file

@ -1,12 +1,13 @@
# frozen_string_literal: true
require_relative "helper"
require "active_job"
require "sidekiq/rails"
require "sidekiq/api"
describe "ActiveJob" do
before do
Sidekiq.redis { |c| c.flushdb }
@config = reset!
# need to force this since we aren't booting a Rails app
ActiveJob::Base.queue_adapter = :sidekiq
ActiveJob::Base.logger = nil
@ -16,19 +17,19 @@ describe "ActiveJob" do
it "does not allow Sidekiq::Job in AJ::Base classes" do
ex = assert_raises ArgumentError do
Class.new(ActiveJob::Base) do
include Sidekiq::Worker
include Sidekiq::Job
end
end
assert_includes ex.message, "Sidekiq::Job cannot be included"
end
it "loads Sidekiq::Worker::Options in AJ::Base classes" do
it "loads Sidekiq::Job::Options in AJ::Base classes" do
aj = Class.new(ActiveJob::Base) do
queue_as :bar
sidekiq_options retry: 4, queue: "foo", backtrace: 5
sidekiq_retry_in { |count, _exception| count * 10 }
sidekiq_retries_exhausted do |msg, _exception|
Sidekiq.logger.warn "Failed #{msg["class"]} with #{msg["args"]}: #{msg["error_message"]}"
@config.logger.warn "Failed #{msg["class"]} with #{msg["args"]}: #{msg["error_message"]}"
end
end

View file

@ -1,18 +1,15 @@
# frozen_string_literal: true
require_relative "helper"
require "sidekiq/cli"
require "sidekiq/redis_connection"
require "sidekiq/capsule"
describe Sidekiq::RedisConnection do
describe "create" do
before do
Sidekiq.reset!
@old = ENV["REDIS_URL"]
ENV["REDIS_URL"] = "redis://localhost/15"
end
after do
ENV["REDIS_URL"] = @old
@config = reset!
@config.capsules << Sidekiq::Capsule.new("default", @config)
@config.capsules.first.concurrency = 12
end
def client_for(redis)
@ -29,6 +26,7 @@ describe Sidekiq::RedisConnection do
it "creates a pooled redis connection" do
pool = Sidekiq::RedisConnection.create
assert_equal 5, pool.size
assert_equal client_class, pool.checkout.class
end
@ -36,54 +34,38 @@ describe Sidekiq::RedisConnection do
# `connection_pool`, until then we need to reach into the internal state to
# verify the setting.
describe "size" do
def client_connection(*args)
def client_connection(args = {})
Sidekiq.stub(:server?, nil) do
Sidekiq::RedisConnection.create(*args)
@config.redis = args
@config.redis_pool
end
end
def server_connection(*args)
def server_connection(args = {})
Sidekiq.stub(:server?, "constant") do
Sidekiq::RedisConnection.create(*args)
@config.redis = args
@config.redis_pool
end
end
it "uses the specified custom pool size" do
pool = client_connection(size: 42)
assert_equal 42, pool.instance_eval { @size }
assert_equal 42, pool.instance_eval { @available.length }
pool = server_connection(size: 42)
assert_equal 42, pool.instance_eval { @size }
assert_equal 42, pool.instance_eval { @available.length }
end
it "defaults server pool sizes based on concurrency with padding" do
_expected_padding = 5
config = Sidekiq
prev_concurrency = config[:concurrency]
config[:concurrency] = 6
it "sizes default pool" do
pool = server_connection
assert_equal 11, pool.instance_eval { @size }
assert_equal 11, pool.instance_eval { @available.length }
ensure
config[:concurrency] = prev_concurrency
assert_equal 5, pool.size
end
it "defaults client pool sizes to 5" do
pool = client_connection
assert_equal 5, pool.size
end
assert_equal 5, pool.instance_eval { @size }
assert_equal 5, pool.instance_eval { @available.length }
it "sizes capsule pools based on concurrency" do
assert_equal 12, @config.capsules.first.redis_pool.size
end
it "changes client pool sizes with ENV" do
ENV["RAILS_MAX_THREADS"] = "9"
pool = client_connection
assert_equal 9, pool.instance_eval { @size }
assert_equal 9, pool.instance_eval { @available.length }
assert_equal 9, pool.size
ensure
ENV.delete("RAILS_MAX_THREADS")
end
@ -113,15 +95,10 @@ describe Sidekiq::RedisConnection do
describe "namespace" do
it "isn't supported" do
Kernel.stub(:exit, ->(code) { raise "Exited #{code}" }) do
output = capture_logging do
error = assert_raises RuntimeError do
Sidekiq::RedisConnection.create(namespace: "xxx")
end
assert_includes error.message, "Exited -127"
end
assert_includes output, "Your Redis configuration uses the namespace 'xxx' but this feature isn't supported by redis-client"
error = assert_raises ArgumentError do
Sidekiq::RedisConnection.create(namespace: "xxx")
end
assert_includes error.message, "Your Redis configuration uses the namespace 'xxx' but this feature isn't supported by redis-client"
end
end
@ -176,8 +153,8 @@ describe Sidekiq::RedisConnection do
password: "secret"
}
output = capture_logging do
Sidekiq::RedisConnection.create(options)
output = capture_logging(@config) do |logger|
Sidekiq::RedisConnection.create(options.merge(logger: logger))
end
refute_includes(options.inspect, "REDACTED")
@ -188,17 +165,17 @@ describe Sidekiq::RedisConnection do
end
it "prunes SSL parameters from the logging" do
options = {
ssl_params: {
cert_store: OpenSSL::X509::Store.new
output = capture_logging(@config) do |logger|
options = {
ssl_params: {
cert_store: OpenSSL::X509::Store.new
},
logger: logger
}
}
output = capture_logging do
Sidekiq::RedisConnection.create(options)
assert_includes(options.inspect, "ssl_params")
end
assert_includes(options.inspect, "ssl_params")
refute_includes(output, "ssl_params")
end
end

View file

@ -4,9 +4,10 @@ require_relative "helper"
require "sidekiq/scheduled"
require "sidekiq/job_retry"
require "sidekiq/api"
require "sidekiq/capsule"
class SomeWorker
include Sidekiq::Worker
include Sidekiq::Job
end
class BadErrorMessage < StandardError
@ -15,12 +16,47 @@ class BadErrorMessage < StandardError
end
end
class CustomWorkerWithoutException
include Sidekiq::Worker
sidekiq_retry_in do |count|
count * 2
end
end
class SpecialError < StandardError
end
class CustomWorkerWithException
include Sidekiq::Worker
sidekiq_retry_in do |count, exception|
case exception
when RuntimeError
:kill
when Interrupt
:discard
when SpecialError
nil
when ArgumentError
count * 4
else
count * 2
end
end
end
class ErrorWorker
include Sidekiq::Worker
sidekiq_retry_in do |count|
count / 0
end
end
describe Sidekiq::JobRetry do
before do
Sidekiq.redis { |c| c.flushdb }
@config = Sidekiq
@config[:max_retries] = 25
@config[:error_handlers] << Sidekiq.method(:default_error_handler)
@config = reset!
end
describe "middleware" do
@ -29,7 +65,7 @@ describe Sidekiq::JobRetry do
end
def handler
@handler ||= Sidekiq::JobRetry.new(@config)
@handler ||= Sidekiq::JobRetry.new(@config.default_capsule)
end
def jobstr(options = {})
@ -253,44 +289,6 @@ describe Sidekiq::JobRetry do
end
describe "custom retry delay" do
class CustomWorkerWithoutException
include Sidekiq::Worker
sidekiq_retry_in do |count|
count * 2
end
end
class SpecialError < StandardError
end
class CustomWorkerWithException
include Sidekiq::Worker
sidekiq_retry_in do |count, exception|
case exception
when RuntimeError
:kill
when Interrupt
:discard
when SpecialError
nil
when ArgumentError
count * 4
else
count * 2
end
end
end
class ErrorWorker
include Sidekiq::Worker
sidekiq_retry_in do |count|
count / 0
end
end
it "retries with a default delay" do
strat, count = handler.__send__(:delay_for, worker, 2, StandardError.new)
assert_equal :default, strat
@ -335,7 +333,7 @@ describe Sidekiq::JobRetry do
end
it "falls back to the default retry on exception" do
output = capture_logging do
output = capture_logging(@config) do
strat, count = handler.__send__(:delay_for, ErrorWorker, 2, StandardError.new)
assert_equal :default, strat
refute_equal 4, count

View file

@ -2,7 +2,7 @@ require_relative "helper"
require "sidekiq/job_retry"
class NewWorker
include Sidekiq::Worker
include Sidekiq::Job
sidekiq_class_attribute :exhausted_called, :exhausted_job, :exhausted_exception
@ -14,7 +14,7 @@ class NewWorker
end
class OldWorker
include Sidekiq::Worker
include Sidekiq::Job
sidekiq_class_attribute :exhausted_called, :exhausted_job, :exhausted_exception
@ -24,6 +24,10 @@ class OldWorker
end
end
class Foobar
include Sidekiq::Job
end
describe "sidekiq_retries_exhausted" do
def cleanup
[NewWorker, OldWorker].each do |worker_class|
@ -34,7 +38,7 @@ describe "sidekiq_retries_exhausted" do
end
before do
@config = Sidekiq
@config = reset!
cleanup
end
@ -51,7 +55,7 @@ describe "sidekiq_retries_exhausted" do
end
def handler
@handler ||= Sidekiq::JobRetry.new(@config)
@handler ||= Sidekiq::JobRetry.new(@config.default_capsule)
end
def job(options = {})
@ -121,14 +125,10 @@ describe "sidekiq_retries_exhausted" do
end
it "allows global failure handlers" do
class Foobar
include Sidekiq::Worker
end
exhausted_job = nil
exhausted_exception = nil
Sidekiq.death_handlers.clear
Sidekiq.death_handlers << proc do |job, ex|
@config.death_handlers.clear
@config.death_handlers << proc do |job, ex|
exhausted_job = job
exhausted_exception = ex
end
@ -142,7 +142,5 @@ describe "sidekiq_retries_exhausted" do
assert exhausted_job
assert_equal raised_error, exhausted_exception
ensure
Sidekiq.death_handlers.clear
end
end

View file

@ -2,17 +2,24 @@
require_relative "helper"
require "sidekiq/scheduled"
require "sidekiq/api"
class ScheduledWorker
include Sidekiq::Job
def perform(x)
end
end
class MyStopper
def call(worker_class, job, queue, r)
yield if job["args"].first.odd?
end
end
describe Sidekiq::Scheduled do
class ScheduledWorker
include Sidekiq::Worker
def perform(x)
end
end
describe "poller" do
before do
Sidekiq.redis { |c| c.flushdb }
@config = reset!
@error_1 = {"class" => ScheduledWorker.name, "args" => [0], "queue" => "queue_1"}
@error_2 = {"class" => ScheduledWorker.name, "args" => [1], "queue" => "queue_2"}
@error_3 = {"class" => ScheduledWorker.name, "args" => [2], "queue" => "queue_3"}
@ -20,35 +27,28 @@ describe Sidekiq::Scheduled do
@future_2 = {"class" => ScheduledWorker.name, "args" => [4], "queue" => "queue_5"}
@future_3 = {"class" => ScheduledWorker.name, "args" => [5], "queue" => "queue_6"}
@config = Sidekiq
@retry = Sidekiq::RetrySet.new
@scheduled = Sidekiq::ScheduledSet.new
@poller = Sidekiq::Scheduled::Poller.new(@config)
end
class MyStopper
def call(worker_class, job, queue, r)
yield if job["args"].first.odd?
end
# @config.logger = ::Logger.new($stdout)
# @config.logger.level = Logger::DEBUG
end
it "executes client middleware" do
Sidekiq.client_middleware.add MyStopper
begin
@retry.schedule (Time.now - 60).to_f, @error_1
@retry.schedule (Time.now - 60).to_f, @error_2
@scheduled.schedule (Time.now - 60).to_f, @future_2
@scheduled.schedule (Time.now - 60).to_f, @future_3
@config.client_middleware.add MyStopper
@poller.enqueue
@retry.schedule (Time.now - 60).to_f, @error_1
@retry.schedule (Time.now - 60).to_f, @error_2
@scheduled.schedule (Time.now - 60).to_f, @future_2
@scheduled.schedule (Time.now - 60).to_f, @future_3
assert_equal 0, Sidekiq::Queue.new("queue_1").size
assert_equal 1, Sidekiq::Queue.new("queue_2").size
assert_equal 0, Sidekiq::Queue.new("queue_5").size
assert_equal 1, Sidekiq::Queue.new("queue_6").size
ensure
Sidekiq.client_middleware.remove MyStopper
end
@poller.enqueue
assert_equal 0, Sidekiq::Queue.new("queue_1").size
assert_equal 1, Sidekiq::Queue.new("queue_2").size
assert_equal 0, Sidekiq::Queue.new("queue_5").size
assert_equal 1, Sidekiq::Queue.new("queue_6").size
end
it "should empty the retry and scheduled queues up to the current time" do
@ -67,7 +67,7 @@ describe Sidekiq::Scheduled do
Time.stub(:now, enqueued_time) do
@poller.enqueue
Sidekiq.redis do |conn|
@config.redis do |conn|
%w[queue:queue_1 queue:queue_2 queue:queue_4 queue:queue_5].each do |queue_name|
assert_equal 1, conn.llen(queue_name)
job = Sidekiq.load_json(conn.lrange(queue_name, 0, -1)[0])
@ -94,7 +94,7 @@ describe Sidekiq::Scheduled do
@poller.terminate
@poller.enqueue
Sidekiq.redis do |conn|
@config.redis do |conn|
%w[queue:queue_1 queue:queue_4].each do |queue_name|
assert_equal 0, conn.llen(queue_name)
end
@ -106,11 +106,11 @@ describe Sidekiq::Scheduled do
end
def with_sidekiq_option(name, value)
original, Sidekiq[name] = Sidekiq[name], value
original, @config[name] = @config[name], value
begin
yield
ensure
Sidekiq[name] = original
@config[name] = original
end
end
@ -128,7 +128,7 @@ describe Sidekiq::Scheduled do
it "calculates an average poll interval based on the number of known Sidekiq processes" do
with_sidekiq_option(:average_scheduled_poll_interval, 10) do
3.times do |i|
Sidekiq.redis do |conn|
@config.redis do |conn|
conn.sadd("processes", "process-#{i}")
conn.hset("process-#{i}", "info", "")
end

View file

@ -1,25 +1,25 @@
# frozen_string_literal: true
require_relative "helper"
require "sidekiq/scheduled"
require "sidekiq/api"
require "active_support/core_ext/integer/time"
class SomeScheduledWorker
include Sidekiq::Job
sidekiq_options queue: :custom_queue
def perform(x)
end
end
# Assume we can pass any class as time to perform_in
class TimeDuck
def to_f
42.0
end
end
describe "job scheduling" do
describe "middleware" do
class SomeScheduledWorker
include Sidekiq::Worker
sidekiq_options queue: :custom_queue
def perform(x)
end
end
# Assume we can pass any class as time to perform_in
class TimeDuck
def to_f
42.0
end
end
it "schedules jobs" do
ss = Sidekiq::ScheduledSet.new
ss.clear

View file

@ -4,7 +4,7 @@ require_relative "helper"
describe Sidekiq do
before do
@config = Sidekiq
@config = reset!
end
describe "json processing" do
@ -24,40 +24,39 @@ describe Sidekiq do
describe "lifecycle events" do
it "handles invalid input" do
config = @config
config[:lifecycle_events][:startup].clear
@config[:lifecycle_events][:startup].clear
e = assert_raises ArgumentError do
config.on(:startp)
@config.on(:startp)
end
assert_match(/Invalid event name/, e.message)
e = assert_raises ArgumentError do
config.on("startup")
@config.on("startup")
end
assert_match(/Symbols only/, e.message)
config.on(:startup) do
@config.on(:startup) do
1 + 1
end
assert_equal 2, config[:lifecycle_events][:startup].first.call
assert_equal 2, @config[:lifecycle_events][:startup].first.call
end
end
describe "default_job_options" do
it "stringifies keys" do
@old_options = @config.default_job_options
@old_options = Sidekiq.default_job_options
begin
@config.default_job_options = {queue: "cat"}
assert_equal "cat", @config.default_job_options["queue"]
Sidekiq.default_job_options = {queue: "cat"}
assert_equal "cat", Sidekiq.default_job_options["queue"]
ensure
@config.default_job_options = @old_options
Sidekiq.default_job_options = @old_options
end
end
end
describe "error handling" do
it "deals with user-specified error handlers which raise errors" do
output = capture_logging do
output = capture_logging(@config) do
@config.error_handlers << proc { |x, hash|
raise "boom"
}
@ -72,9 +71,9 @@ describe Sidekiq do
describe "redis connection" do
it "does not continually retry" do
assert_raises Sidekiq::RedisConnection.adapter::CommandError do
assert_raises Sidekiq::RedisClientAdapter::CommandError do
@config.redis do |c|
raise Sidekiq::RedisConnection.adapter::CommandError, "READONLY You can't write against a replica."
raise Sidekiq::RedisClientAdapter::CommandError, "READONLY You can't write against a replica."
end
end
end
@ -83,7 +82,7 @@ describe Sidekiq do
counts = []
@config.redis do |c|
counts << c.info["total_connections_received"].to_i
raise Sidekiq::RedisConnection.adapter::CommandError, "READONLY You can't write against a replica." if counts.size == 1
raise Sidekiq::RedisClientAdapter::CommandError, "READONLY You can't write against a replica." if counts.size == 1
end
assert_equal 2, counts.size
assert_equal counts[0] + 1, counts[1]
@ -93,7 +92,7 @@ describe Sidekiq do
counts = []
@config.redis do |c|
counts << c.info["total_connections_received"].to_i
raise Sidekiq::RedisConnection.adapter::CommandError, "UNBLOCKED force unblock from blocking operation, instance state changed (master -> replica?)" if counts.size == 1
raise Sidekiq::RedisClientAdapter::CommandError, "UNBLOCKED force unblock from blocking operation, instance state changed (master -> replica?)" if counts.size == 1
end
assert_equal 2, counts.size
assert_equal counts[0] + 1, counts[1]

View file

@ -19,7 +19,7 @@ end
describe Sidekiq::Monitor do
before do
Sidekiq.redis { |c| c.flushdb }
@config = reset!
end
describe "status" do

View file

@ -1,598 +0,0 @@
# frozen_string_literal: true
require_relative "helper"
require "sidekiq/cli"
describe Sidekiq::CLI do
describe "#parse" do
before do
Sidekiq.reset!
@logger = Sidekiq.logger
@logdev = StringIO.new
Sidekiq.logger = Logger.new(@logdev)
@config = Sidekiq
end
attr_reader :config
after do
Sidekiq.logger = @logger
end
subject do
Sidekiq::CLI.new.tap { |c| c.config = config }
end
def logdev
@logdev ||= StringIO.new
end
describe "#parse" do
describe "options" do
describe "require" do
it "accepts with -r" do
subject.parse(%w[sidekiq -r ./test/fake_env.rb])
assert_equal "./test/fake_env.rb", config[:require]
end
end
describe "concurrency" do
it "accepts with -c" do
subject.parse(%w[sidekiq -c 60 -r ./test/fake_env.rb])
assert_equal 60, config[:concurrency]
end
describe "when concurrency is empty and RAILS_MAX_THREADS env var is set" do
before do
ENV["RAILS_MAX_THREADS"] = "9"
end
after do
ENV.delete("RAILS_MAX_THREADS")
end
it "sets concurrency from RAILS_MAX_THREADS env var" do
subject.parse(%w[sidekiq -r ./test/fake_env.rb])
assert_equal 9, config[:concurrency]
end
it "option overrides RAILS_MAX_THREADS env var" do
subject.parse(%w[sidekiq -c 60 -r ./test/fake_env.rb])
assert_equal 60, config[:concurrency]
end
end
end
describe "setting internal options via the config file" do
describe "setting the `strict` option via the config file" do
it "discards the `strict` option specified via the config file" do
subject.parse(%w[sidekiq -C ./test/config_with_internal_options.yml])
assert_equal true, !!config[:strict]
end
end
end
describe "queues" do
it "accepts with -q" do
subject.parse(%w[sidekiq -q foo -r ./test/fake_env.rb])
assert_equal ["foo"], config[:queues]
end
describe "when weights are not present" do
it "accepts queues without weights" do
subject.parse(%w[sidekiq -q foo -q bar -r ./test/fake_env.rb])
assert_equal ["foo", "bar"], config[:queues]
end
it "sets strictly ordered queues" do
subject.parse(%w[sidekiq -q foo -q bar -r ./test/fake_env.rb])
assert_equal true, !!config[:strict]
end
end
describe "when weights are present" do
it "accepts queues with weights" do
subject.parse(%w[sidekiq -q foo,3 -q bar -r ./test/fake_env.rb])
assert_equal ["foo", "foo", "foo", "bar"], config[:queues]
end
it "does not set strictly ordered queues" do
subject.parse(%w[sidekiq -q foo,3 -q bar -r ./test/fake_env.rb])
assert_equal false, !!config[:strict]
end
end
it "accepts queues with multi-word names" do
subject.parse(%w[sidekiq -q queue_one -q queue-two -r ./test/fake_env.rb])
assert_equal ["queue_one", "queue-two"], config[:queues]
end
it "accepts queues with dots in the name" do
subject.parse(%w[sidekiq -q foo.bar -r ./test/fake_env.rb])
assert_equal ["foo.bar"], config[:queues]
end
describe "when duplicate queue names" do
it "raises an argument error" do
assert_raises(ArgumentError) { subject.parse(%w[sidekiq -q foo -q foo -r ./test/fake_env.rb]) }
assert_raises(ArgumentError) { subject.parse(%w[sidekiq -q foo,3 -q foo,1 -r ./test/fake_env.rb]) }
end
end
describe "when queues are empty" do
describe "when no queues are specified via -q" do
it "sets 'default' queue" do
subject.parse(%w[sidekiq -r ./test/fake_env.rb])
assert_equal ["default"], config[:queues]
end
end
describe "when no queues are specified via the config file" do
it "sets 'default' queue" do
subject.parse(%w[sidekiq -C ./test/config_empty.yml -r ./test/fake_env.rb])
assert_equal ["default"], config[:queues]
end
end
end
end
describe "timeout" do
it "accepts with -t" do
subject.parse(%w[sidekiq -t 30 -r ./test/fake_env.rb])
assert_equal 30, config[:timeout]
end
end
describe "verbose" do
it "accepts with -v" do
subject.parse(%w[sidekiq -v -r ./test/fake_env.rb])
assert_equal Logger::DEBUG, Sidekiq.logger.level
end
end
describe "config file" do
it "accepts with -C" do
subject.parse(%w[sidekiq -C ./test/config.yml])
assert_equal "./test/config.yml", config[:config_file]
refute config[:verbose]
assert_equal "./test/fake_env.rb", config[:require]
assert_nil config[:environment]
assert_equal 50, config[:concurrency]
assert_equal 2, config[:queues].count { |q| q == "very_often" }
assert_equal 1, config[:queues].count { |q| q == "seldom" }
end
it "accepts stringy keys" do
subject.parse(%w[sidekiq -C ./test/config_string.yml])
assert_equal "./test/config_string.yml", config[:config_file]
refute config[:verbose]
assert_equal "./test/fake_env.rb", config[:require]
assert_nil config[:environment]
assert_equal 50, config[:concurrency]
assert_equal 2, config[:queues].count { |q| q == "very_often" }
assert_equal 1, config[:queues].count { |q| q == "seldom" }
end
it "accepts environment specific config" do
subject.parse(%w[sidekiq -e staging -C ./test/config_environment.yml])
assert_equal "./test/config_environment.yml", config[:config_file]
refute config[:verbose]
assert_equal "./test/fake_env.rb", config[:require]
assert_equal "staging", config[:environment]
assert_equal 50, config[:concurrency]
assert_equal 2, config[:queues].count { |q| q == "very_often" }
assert_equal 1, config[:queues].count { |q| q == "seldom" }
end
it "accepts environment specific config with alias" do
subject.parse(%w[sidekiq -e staging -C ./test/config_with_alias.yml])
assert_equal "./test/config_with_alias.yml", config[:config_file]
refute config[:verbose]
assert_equal "./test/fake_env.rb", config[:require]
assert_equal "staging", config[:environment]
assert_equal 50, config[:concurrency]
assert_equal 2, config[:queues].count { |q| q == "very_often" }
assert_equal 1, config[:queues].count { |q| q == "seldom" }
subject.parse(%w[sidekiq -e production -C ./test/config_with_alias.yml])
assert_equal "./test/config_with_alias.yml", config[:config_file]
assert config[:verbose]
assert_equal "./test/fake_env.rb", config[:require]
assert_equal "production", config[:environment]
assert_equal 50, config[:concurrency]
assert_equal 2, config[:queues].count { |q| q == "very_often" }
assert_equal 1, config[:queues].count { |q| q == "seldom" }
end
it "exposes ERB expected __FILE__ and __dir__" do
given_path = "./test/config__FILE__and__dir__.yml"
expected_file = File.expand_path(given_path)
# As per Ruby's Kernel module docs, __dir__ is equivalent to File.dirname(File.realpath(__FILE__))
expected_dir = File.dirname(File.realpath(expected_file))
subject.parse(%W[sidekiq -C #{given_path}])
assert_equal(expected_file, config.fetch(:__FILE__))
assert_equal(expected_dir, config.fetch(:__dir__))
end
end
describe "default config file" do
describe "when required path is a directory" do
it "tries config/sidekiq.yml from required diretory" do
subject.parse(%w[sidekiq -r ./test/dummy])
assert_equal "./test/dummy/config/sidekiq.yml", config[:config_file]
assert_equal 25, config[:concurrency]
end
end
describe "when required path is a file" do
it "tries config/sidekiq.yml from current diretory" do
config[:require] = "./test/dummy" # stub current dir ./
subject.parse(%w[sidekiq -r ./test/fake_env.rb])
assert_equal "./test/dummy/config/sidekiq.yml", config[:config_file]
assert_equal 25, config[:concurrency]
end
end
describe "without any required path" do
it "tries config/sidekiq.yml from current diretory" do
config[:require] = "./test/dummy" # stub current dir ./
subject.parse(%w[sidekiq])
assert_equal "./test/dummy/config/sidekiq.yml", config[:config_file]
assert_equal 25, config[:concurrency]
end
end
describe "when config file and flags" do
it "merges options" do
subject.parse(%w[sidekiq -C ./test/config.yml
-e snoop
-c 100
-r ./test/fake_env.rb
-q often,7
-q seldom,3])
assert_equal "./test/config.yml", config[:config_file]
refute config[:verbose]
assert_equal "./test/fake_env.rb", config[:require]
assert_equal "snoop", config[:environment]
assert_equal 100, config[:concurrency]
assert_equal 7, config[:queues].count { |q| q == "often" }
assert_equal 3, config[:queues].count { |q| q == "seldom" }
end
describe "when the config file specifies queues with weights" do
describe "when -q specifies queues without weights" do
it "sets strictly ordered queues" do
subject.parse(%w[sidekiq -C ./test/config.yml
-r ./test/fake_env.rb
-q foo -q bar])
assert_equal true, !!config[:strict]
end
end
describe "when -q specifies no queues" do
it "does not set strictly ordered queues" do
subject.parse(%w[sidekiq -C ./test/config.yml
-r ./test/fake_env.rb])
assert_equal false, !!config[:strict]
end
end
describe "when -q specifies queues with weights" do
it "does not set strictly ordered queues" do
subject.parse(%w[sidekiq -C ./test/config.yml
-r ./test/fake_env.rb
-q foo,2 -q bar,3])
assert_equal false, !!config[:strict]
end
end
end
describe "when the config file specifies queues without weights" do
describe "when -q specifies queues without weights" do
it "sets strictly ordered queues" do
subject.parse(%w[sidekiq -C ./test/config_queues_without_weights.yml
-r ./test/fake_env.rb
-q foo -q bar])
assert_equal true, !!config[:strict]
end
end
describe "when -q specifies no queues" do
it "sets strictly ordered queues" do
subject.parse(%w[sidekiq -C ./test/config_queues_without_weights.yml
-r ./test/fake_env.rb])
assert_equal true, !!config[:strict]
end
end
describe "when -q specifies queues with weights" do
it "does not set strictly ordered queues" do
subject.parse(%w[sidekiq -C ./test/config_queues_without_weights.yml
-r ./test/fake_env.rb
-q foo,2 -q bar,3])
assert_equal false, !!config[:strict]
end
end
end
describe "when the config file specifies no queues" do
describe "when -q specifies queues without weights" do
it "sets strictly ordered queues" do
subject.parse(%w[sidekiq -C ./test/config_empty.yml
-r ./test/fake_env.rb
-q foo -q bar])
assert_equal true, !!config[:strict]
end
end
describe "when -q specifies no queues" do
it "sets strictly ordered queues" do
subject.parse(%w[sidekiq -C ./test/config_empty.yml
-r ./test/fake_env.rb])
assert_equal true, !!config[:strict]
end
end
describe "when -q specifies queues with weights" do
it "does not set strictly ordered queues" do
subject.parse(%w[sidekiq -C ./test/config_empty.yml
-r ./test/fake_env.rb
-q foo,2 -q bar,3])
assert_equal false, !!config[:strict]
end
end
end
end
describe "default config file" do
describe "when required path is a directory" do
it "tries config/sidekiq.yml" do
subject.parse(%w[sidekiq -r ./test/dummy])
assert_equal "sidekiq.yml", File.basename(config[:config_file])
assert_equal 25, config[:concurrency]
end
end
end
end
end
describe "validation" do
describe "when required application path does not exist" do
it "exits with status 1" do
exit = assert_raises(SystemExit) { subject.parse(%w[sidekiq -r /non/existent/path]) }
assert_equal 1, exit.status
end
end
describe "when required path is a directory without config/application.rb" do
it "exits with status 1" do
exit = assert_raises(SystemExit) { subject.parse(%w[sidekiq -r ./test/fixtures]) }
assert_equal 1, exit.status
end
describe "when config file path does not exist" do
it "raises argument error" do
assert_raises(ArgumentError) do
subject.parse(%w[sidekiq -r ./test/fake_env.rb -C /non/existent/path])
end
end
end
end
describe "when concurrency is not valid" do
describe "when set to 0" do
it "raises argument error" do
assert_raises(ArgumentError) do
subject.parse(%w[sidekiq -r ./test/fake_env.rb -c 0])
end
end
end
describe "when set to a negative number" do
it "raises argument error" do
assert_raises(ArgumentError) do
subject.parse(%w[sidekiq -r ./test/fake_env.rb -c -2])
end
end
end
end
describe "when timeout is not valid" do
describe "when set to 0" do
it "raises argument error" do
assert_raises(ArgumentError) do
subject.parse(%w[sidekiq -r ./test/fake_env.rb -t 0])
end
end
end
describe "when set to a negative number" do
it "raises argument error" do
assert_raises(ArgumentError) do
subject.parse(%w[sidekiq -r ./test/fake_env.rb -t -2])
end
end
end
end
end
end
describe "#run" do
before do
subject.config = Sidekiq
subject.config[:concurrency] = 2
subject.config[:require] = "./test/fake_env.rb"
end
describe "require workers" do
describe "when path is a rails directory" do
before do
subject.config[:require] = "./test/dummy"
subject.environment = "test"
end
it "requires sidekiq railtie and rails application with environment" do
subject.stub(:launch, nil) do
subject.run
end
assert defined?(Sidekiq::Rails)
assert defined?(Dummy::Application)
end
it "tags with the app directory name" do
subject.stub(:launch, nil) do
subject.run
end
assert_equal "dummy", subject.config[:tag]
end
end
describe "when path is file" do
it "requires application" do
subject.stub(:launch, nil) do
subject.run
end
assert $LOADED_FEATURES.any? { |x| x =~ /test\/fake_env/ }
end
end
end
describe "when development environment and stdout tty" do
it "prints banner" do
subject.stub(:environment, "development") do
assert_output(/#{Regexp.escape(Sidekiq::CLI.banner)}/) do
$stdout.stub(:tty?, true) do
subject.stub(:launch, nil) do
subject.run
end
end
end
end
end
end
it "prints rails info" do
subject.stub(:environment, "production") do
subject.stub(:launch, nil) do
subject.run
end
assert_includes @logdev.string, "Booted Rails #{::Rails.version} application in production environment"
end
end
describe "checking maxmemory policy" do
it "warns if the policy is not noeviction" do
redis_info = {"maxmemory_policy" => "allkeys-lru", "redis_version" => "6.2.1"}
Sidekiq.stub(:redis_info, redis_info) do
subject.stub(:launch, nil) do
subject.run
end
end
assert_includes @logdev.string, "allkeys-lru"
end
it "silent if the policy is noeviction" do
redis_info = {"maxmemory_policy" => "noeviction", "redis_version" => "6.2.1"}
Sidekiq.stub(:redis_info, redis_info) do
subject.stub(:launch, nil) do
subject.run
end
end
refute_includes @logdev.string, "noeviction"
end
end
end
describe "signal handling" do
%w[INT TERM].each do |sig|
describe sig do
it "raises interrupt error" do
assert_raises Interrupt do
subject.handle_signal(sig)
end
end
end
end
describe "TSTP" do
it "quiets with a corresponding event" do
quiet = false
subject.config = Sidekiq
subject.config.on(:quiet) do
quiet = true
end
subject.launcher = Sidekiq::Launcher.new(subject.config)
subject.handle_signal("TSTP")
assert_match(/Got TSTP signal/, logdev.string)
assert_equal true, quiet
end
end
describe "TTIN" do
it "prints backtraces for all threads in the process to the logfile" do
subject.handle_signal("TTIN")
assert_match(/Got TTIN signal/, logdev.string)
assert_match(/\bbacktrace\b/, logdev.string)
end
end
describe "UNKNOWN" do
it "logs about" do
# subject.parse(%w[sidekiq -r ./test/fake_env.rb])
subject.handle_signal("UNKNOWN")
assert_match(/Got UNKNOWN signal/, logdev.string)
assert_match(/No signal handler registered/, logdev.string)
end
end
end
end
end

View file

@ -2,6 +2,24 @@
require_relative "helper"
class AttributeWorker
include Sidekiq::Job
sidekiq_class_attribute :count
self.count = 0
attr_accessor :foo
def perform
self.class.count += 1 if foo == :bar
end
end
class AttributeMiddleware
def call(worker, msg, queue)
worker.foo = :bar if worker.respond_to?(:foo=)
yield
end
end
describe "Sidekiq::Testing" do
describe "require/load sidekiq/testing.rb" do
before do
@ -89,24 +107,6 @@ describe "Sidekiq::Testing" do
Sidekiq::Testing.disable!
end
class AttributeWorker
include Sidekiq::Worker
sidekiq_class_attribute :count
self.count = 0
attr_accessor :foo
def perform
self.class.count += 1 if foo == :bar
end
end
class AttributeMiddleware
def call(worker, msg, queue)
worker.foo = :bar if worker.respond_to?(:foo=)
yield
end
end
it "wraps the inlined worker with middleware" do
Sidekiq::Testing.server_middleware do |chain|
chain.add AttributeMiddleware

View file

@ -1,31 +1,82 @@
# frozen_string_literal: true
require_relative "helper"
class PerformError < RuntimeError; end
class DirectWorker
include Sidekiq::Job
def perform(a, b)
a + b
end
end
class EnqueuedWorker
include Sidekiq::Job
def perform(a, b)
a + b
end
end
class StoredWorker
include Sidekiq::Job
def perform(error)
raise PerformError if error
end
end
class SpecificJidWorker
include Sidekiq::Job
sidekiq_class_attribute :count
self.count = 0
def perform(worker_jid)
return unless worker_jid == jid
self.class.count += 1
end
end
class FirstWorker
include Sidekiq::Job
sidekiq_class_attribute :count
self.count = 0
def perform
self.class.count += 1
end
end
class SecondWorker
include Sidekiq::Job
sidekiq_class_attribute :count
self.count = 0
def perform
self.class.count += 1
end
end
class ThirdWorker
include Sidekiq::Job
sidekiq_class_attribute :count
def perform
FirstWorker.perform_async
SecondWorker.perform_async
end
end
class QueueWorker
include Sidekiq::Job
def perform(a, b)
a + b
end
end
class AltQueueWorker
include Sidekiq::Job
sidekiq_options queue: :alt
def perform(a, b)
a + b
end
end
describe "Sidekiq::Testing.fake" do
class PerformError < RuntimeError; end
class DirectWorker
include Sidekiq::Worker
def perform(a, b)
a + b
end
end
class EnqueuedWorker
include Sidekiq::Worker
def perform(a, b)
a + b
end
end
class StoredWorker
include Sidekiq::Worker
def perform(error)
raise PerformError if error
end
end
before do
require "sidekiq/testing"
Sidekiq::Testing.fake!
@ -48,7 +99,8 @@ describe "Sidekiq::Testing.fake" do
assert_equal 2, DirectWorker.jobs.size
assert DirectWorker.perform_at(10, 1, 2)
assert_equal 3, DirectWorker.jobs.size
assert_in_delta 10.seconds.from_now.to_f, DirectWorker.jobs.last["at"], 0.1
soon = (Time.now.to_f + 10)
assert_in_delta soon, DirectWorker.jobs.last["at"], 0.1
end
it "stubs the enqueue call" do
@ -74,16 +126,6 @@ describe "Sidekiq::Testing.fake" do
assert_equal 0, StoredWorker.jobs.size
end
class SpecificJidWorker
include Sidekiq::Worker
sidekiq_class_attribute :count
self.count = 0
def perform(worker_jid)
return unless worker_jid == jid
self.class.count += 1
end
end
it "execute only jobs with assigned JID" do
4.times do |i|
jid = SpecificJidWorker.perform_async(nil)
@ -136,33 +178,6 @@ describe "Sidekiq::Testing.fake" do
end
end
class FirstWorker
include Sidekiq::Worker
sidekiq_class_attribute :count
self.count = 0
def perform
self.class.count += 1
end
end
class SecondWorker
include Sidekiq::Worker
sidekiq_class_attribute :count
self.count = 0
def perform
self.class.count += 1
end
end
class ThirdWorker
include Sidekiq::Worker
sidekiq_class_attribute :count
def perform
FirstWorker.perform_async
SecondWorker.perform_async
end
end
it "clears jobs across all workers" do
Sidekiq::Worker.jobs.clear
FirstWorker.count = 0
@ -271,21 +286,6 @@ describe "Sidekiq::Testing.fake" do
Sidekiq::Queues.clear_all
end
class QueueWorker
include Sidekiq::Worker
def perform(a, b)
a + b
end
end
class AltQueueWorker
include Sidekiq::Worker
sidekiq_options queue: :alt
def perform(a, b)
a + b
end
end
it "finds enqueued jobs" do
assert_equal 0, Sidekiq::Queues["default"].size

View file

@ -2,26 +2,26 @@
require_relative "helper"
class InlineError < RuntimeError; end
class ParameterIsNotString < RuntimeError; end
class InlineWorker
include Sidekiq::Job
def perform(pass)
raise ArgumentError, "no jid" unless jid
raise InlineError unless pass
end
end
class InlineWorkerWithTimeParam
include Sidekiq::Job
def perform(time)
raise ParameterIsNotString unless time.is_a?(String) || time.is_a?(Numeric)
end
end
describe "Sidekiq::Testing.inline" do
class InlineError < RuntimeError; end
class ParameterIsNotString < RuntimeError; end
class InlineWorker
include Sidekiq::Worker
def perform(pass)
raise ArgumentError, "no jid" unless jid
raise InlineError unless pass
end
end
class InlineWorkerWithTimeParam
include Sidekiq::Worker
def perform(time)
raise ParameterIsNotString unless time.is_a?(String) || time.is_a?(Numeric)
end
end
before do
require "sidekiq/testing/inline"
Sidekiq::Testing.inline!

View file

@ -52,7 +52,7 @@ end
describe Sidekiq::TransactionAwareClient do
before do
Sidekiq.redis { |c| c.flushdb }
@config = reset!
@app = Dummy::Application.new
Post.delete_all
end

View file

@ -4,6 +4,14 @@ require_relative "helper"
require "sidekiq/web"
require "rack/test"
class WebWorker
include Sidekiq::Worker
def perform(a, b)
a + b
end
end
describe Sidekiq::Web do
include Rack::Test::Methods
@ -16,18 +24,10 @@ describe Sidekiq::Web do
end
before do
Sidekiq.redis { |c| c.flushdb }
@config = reset!
app.middlewares.clear
end
class WebWorker
include Sidekiq::Worker
def perform(a, b)
a + b
end
end
it "can show text with any locales" do
rackenv = {"HTTP_ACCEPT_LANGUAGE" => "ru,en"}
get "/", {}, rackenv
@ -60,7 +60,7 @@ describe Sidekiq::Web do
describe "busy" do
it "can display workers" do
Sidekiq.redis do |conn|
@config.redis do |conn|
conn.incr("busy")
conn.sadd("processes", "foo:1234")
conn.hmset("foo:1234", "info", Sidekiq.dump_json("hostname" => "foo", "started_at" => Time.now.to_f, "queues" => [], "concurrency" => 10), "at", Time.now.to_f, "busy", 4)
@ -81,20 +81,20 @@ describe Sidekiq::Web do
identity = "identity"
signals_key = "#{identity}-signals"
assert_nil Sidekiq.redis { |c| c.lpop signals_key }
assert_nil @config.redis { |c| c.lpop signals_key }
post "/busy", "quiet" => "1", "identity" => identity
assert_equal 302, last_response.status
assert_equal "TSTP", Sidekiq.redis { |c| c.lpop signals_key }
assert_equal "TSTP", @config.redis { |c| c.lpop signals_key }
end
it "can stop a process" do
identity = "identity"
signals_key = "#{identity}-signals"
assert_nil Sidekiq.redis { |c| c.lpop signals_key }
assert_nil @config.redis { |c| c.lpop signals_key }
post "/busy", "stop" => "1", "identity" => identity
assert_equal 302, last_response.status
assert_equal "TERM", Sidekiq.redis { |c| c.lpop signals_key }
assert_equal "TERM", @config.redis { |c| c.lpop signals_key }
end
end
@ -134,7 +134,7 @@ describe Sidekiq::Web do
end
it "can sort on enqueued_at column" do
Sidekiq.redis do |conn|
@config.redis do |conn|
(1000..1005).each do |i|
conn.lpush("queue:default", Sidekiq.dump_json(args: [i], enqueued_at: Time.now.to_i + i))
end
@ -150,7 +150,7 @@ describe Sidekiq::Web do
end
it "can delete a queue" do
Sidekiq.redis do |conn|
@config.redis do |conn|
conn.rpush("queue:foo", "{\"args\":[],\"enqueued_at\":1567894960}")
conn.sadd("queues", "foo")
end
@ -161,7 +161,7 @@ describe Sidekiq::Web do
post "/queues/foo"
assert_equal 302, last_response.status
Sidekiq.redis do |conn|
@config.redis do |conn|
refute conn.smembers("queues").include?("foo")
refute conn.exists?("queue:foo")
end
@ -240,7 +240,7 @@ describe Sidekiq::Web do
end
it "can delete a job" do
Sidekiq.redis do |conn|
@config.redis do |conn|
conn.rpush("queue:foo", '{"args":[],"enqueued_at":1567894960}')
conn.rpush("queue:foo", '{"foo":"bar","args":[],"enqueued_at":1567894960}')
conn.rpush("queue:foo", '{"foo2":"bar2","args":[],"enqueued_at":1567894960}')
@ -252,7 +252,7 @@ describe Sidekiq::Web do
post "/queues/foo/delete", key_val: "{\"foo\":\"bar\"}"
assert_equal 302, last_response.status
Sidekiq.redis do |conn|
@config.redis do |conn|
refute conn.lrange("queue:foo", 0, -1).include?("{\"foo\":\"bar\"}")
end
end
@ -386,7 +386,7 @@ describe Sidekiq::Web do
it "can delete scheduled" do
params = add_scheduled
Sidekiq.redis do |conn|
@config.redis do |conn|
assert_equal 1, conn.zcard("schedule")
post "/scheduled", "key" => [job_params(*params)], "delete" => "Delete"
assert_equal 302, last_response.status
@ -398,7 +398,7 @@ describe Sidekiq::Web do
it "can move scheduled to default queue" do
q = Sidekiq::Queue.new
params = add_scheduled
Sidekiq.redis do |conn|
@config.redis do |conn|
assert_equal 1, conn.zcard("schedule")
assert_equal 0, q.size
post "/scheduled", "key" => [job_params(*params)], "add_to_queue" => "AddToQueue"
@ -440,7 +440,7 @@ describe Sidekiq::Web do
assert !last_response.body.include?("args\"><a>hello</a><")
# on /workers page
Sidekiq.redis do |conn|
@config.redis do |conn|
pro = "foo:1234"
conn.sadd("processes", pro)
conn.hmset(pro, "info", Sidekiq.dump_json("started_at" => Time.now.to_f, "labels" => ["frumduz"], "queues" => [], "concurrency" => 10), "busy", 1, "beat", Time.now.to_f)
@ -514,7 +514,7 @@ describe Sidekiq::Web do
describe "stats" do
before do
Sidekiq.redis do |conn|
@config.redis do |conn|
conn.set("stat:processed", 5)
conn.set("stat:failed", 2)
conn.sadd("queues", "default")
@ -550,6 +550,7 @@ describe Sidekiq::Web do
describe "bad JSON" do
it "displays without error" do
s = Sidekiq::DeadSet.new
assert_equal 0, s.size
(_, score) = kill_bad
assert_equal 1, s.size
@ -567,7 +568,7 @@ describe Sidekiq::Web do
describe "stats/queues" do
before do
Sidekiq.redis do |conn|
@config.redis do |conn|
conn.set("stat:processed", 5)
conn.set("stat:failed", 2)
conn.sadd("queues", "default")
@ -640,7 +641,7 @@ describe Sidekiq::Web do
"args" => ["bob", 1, Time.now.to_f],
"jid" => SecureRandom.hex(12),
"tags" => ["tag1", "tag2"]}
Sidekiq.redis do |conn|
@config.redis do |conn|
conn.zadd("schedule", score, Sidekiq.dump_json(msg))
end
[msg, score]
@ -656,7 +657,7 @@ describe Sidekiq::Web do
"failed_at" => Time.now.to_f,
"jid" => SecureRandom.hex(12)}
score = Time.now.to_f
Sidekiq.redis do |conn|
@config.redis do |conn|
conn.zadd("retry", score, Sidekiq.dump_json(msg))
end
@ -673,7 +674,7 @@ describe Sidekiq::Web do
"failed_at" => Time.now.utc,
"jid" => jid}
score = Time.now.to_f
Sidekiq.redis do |conn|
@config.redis do |conn|
conn.zadd("dead", score, Sidekiq.dump_json(msg))
end
[msg, score]
@ -682,7 +683,7 @@ describe Sidekiq::Web do
def kill_bad
job = "{ something bad }"
score = Time.now.to_f
Sidekiq.redis do |conn|
@config.redis do |conn|
conn.zadd("dead", score, job)
end
[job, score]
@ -698,7 +699,7 @@ describe Sidekiq::Web do
"failed_at" => Time.now.to_f,
"jid" => SecureRandom.hex(12)}
score = Time.now.to_f
Sidekiq.redis do |conn|
@config.redis do |conn|
conn.zadd("retry", score, Sidekiq.dump_json(msg))
end
@ -712,7 +713,7 @@ describe Sidekiq::Web do
def add_worker
key = "#{hostname}:#{$$}"
msg = "{\"queue\":\"default\",\"payload\":{\"retry\":true,\"queue\":\"default\",\"timeout\":20,\"backtrace\":5,\"class\":\"HardWorker\",\"args\":[\"bob\",10,5],\"jid\":\"2b5ad2b016f5e063a1c62872\"},\"run_at\":1361208995}"
Sidekiq.redis do |conn|
@config.redis do |conn|
conn.multi do |transaction|
transaction.sadd("processes", key)
transaction.hmset(key, "info", Sidekiq.dump_json("hostname" => "foo", "started_at" => Time.now.to_f, "queues" => []), "at", Time.now.to_f, "busy", 4)

View file

@ -3,40 +3,40 @@
require_relative "helper"
require "sidekiq/web"
class Helpers
include Sidekiq::WebHelpers
def initialize(params = {})
@thehash = default.merge(params)
end
def request
self
end
def settings
self
end
def locales
["web/locales"]
end
def env
@thehash
end
def default
{
}
end
end
describe "Web helpers" do
before do
Sidekiq.redis { |c| c.flushdb }
end
class Helpers
include Sidekiq::WebHelpers
def initialize(params = {})
@thehash = default.merge(params)
end
def request
self
end
def settings
self
end
def locales
["web/locales"]
end
def env
@thehash
end
def default
{
}
end
end
it "tests locale determination" do
obj = Helpers.new
assert_equal "en", obj.locale