2015-12-31 18:33:35 -05:00
# frozen_string_literal: true
2019-04-01 12:20:41 -04:00
require " securerandom "
require " sidekiq/middleware/chain "
2012-02-08 20:04:02 -05:00
2012-01-21 19:42:21 -05:00
module Sidekiq
class Client
2013-10-24 00:47:57 -04:00
##
# Define client-side middleware:
#
# client = Sidekiq::Client.new
# client.middleware do |chain|
# chain.use MyClientMiddleware
# end
# client.push('class' => 'SomeWorker', 'args' => [1,2,3])
#
# All client instances default to the globally-defined
# Sidekiq.client_middleware but you can change as necessary.
#
def middleware ( & block )
@chain || = Sidekiq . client_middleware
2020-12-16 14:07:31 -05:00
if block
2013-10-24 00:47:57 -04:00
@chain = @chain . dup
yield @chain
2012-02-19 16:02:32 -05:00
end
2013-10-24 00:47:57 -04:00
@chain
end
2014-03-26 23:58:45 -04:00
attr_accessor :redis_pool
2014-03-24 23:32:12 -04:00
# Sidekiq::Client normally uses the default Redis pool but you may
# pass a custom ConnectionPool if you want to shard your
# Sidekiq jobs across several Redis instances (for scalability
# reasons, e.g.)
#
# Sidekiq::Client.new(ConnectionPool.new { Redis.new })
#
2014-03-26 23:58:45 -04:00
# Generally this is only needed for very large Sidekiq installs processing
2016-01-18 10:59:09 -05:00
# thousands of jobs per second. I don't recommend sharding unless you
# cannot scale any other way (e.g. splitting your app into smaller apps).
2019-04-01 12:20:41 -04:00
def initialize ( redis_pool = nil )
2014-03-26 23:58:45 -04:00
@redis_pool = redis_pool || Thread . current [ :sidekiq_via_pool ] || Sidekiq . redis_pool
2014-03-24 13:20:16 -04:00
end
2013-10-24 00:47:57 -04:00
##
# The main method used to push a job to Redis. Accepts a number of options:
#
# queue - the named queue to use, default 'default'
# class - the worker class to call, required
# args - an array of simple arguments to the perform method, must be JSON-serializable
2017-03-23 14:05:20 -04:00
# at - timestamp to schedule the job (optional), must be Numeric (e.g. Time.now.to_f)
2016-01-18 10:59:09 -05:00
# retry - whether to retry this job if it fails, default true or an integer number of retries
2013-10-24 00:47:57 -04:00
# backtrace - whether to save any error backtrace, default false
2017-08-11 18:43:06 -04:00
#
# If class is set to the class name, the jobs' options will be based on Sidekiq's default
# worker options. Otherwise, they will be based on the job class's options.
#
2017-05-25 15:56:20 -04:00
# Any options valid for a worker class's sidekiq_options are also available here.
2013-10-24 00:47:57 -04:00
#
# All options must be strings, not symbols. NB: because we are serializing to JSON, all
2016-01-18 10:59:09 -05:00
# symbols in 'args' will be converted to strings. Note that +backtrace: true+ can take quite a bit of
# space in Redis; a large volume of failing jobs can start Redis swapping if you aren't careful.
2013-10-24 00:47:57 -04:00
#
2015-01-26 16:17:54 -05:00
# Returns a unique Job ID. If middleware stops the job, nil will be returned instead.
2013-10-24 00:47:57 -04:00
#
# Example:
2013-10-24 00:58:15 -04:00
# push('queue' => 'my_queue', 'class' => MyWorker, 'args' => ['foo', 1, :bat => 'bar'])
2013-10-24 00:47:57 -04:00
#
def push ( item )
2021-11-15 16:40:38 -05:00
normed = normalize_item ( item )
payload = process_single ( item [ " class " ] , normed )
2013-10-24 00:47:57 -04:00
2015-01-26 16:17:54 -05:00
if payload
raw_push ( [ payload ] )
2019-04-01 12:20:41 -04:00
payload [ " jid " ]
2015-01-26 16:17:54 -05:00
end
2013-10-24 00:47:57 -04:00
end
2021-11-15 16:40:38 -05:00
def perform_inline ( item )
2021-11-15 05:27:48 -05:00
normed = normalize_item ( item )
2021-11-15 16:40:38 -05:00
payload = process_single ( item [ " class " ] , normed )
msg = Sidekiq . load_json ( Sidekiq . dump_json ( payload ) )
klass = msg [ 'class' ] . constantize
job = klass . new
job . jid = msg [ 'jid' ]
msg [ 'id' ] || = SecureRandom . hex ( 12 )
Sidekiq . server_middleware . invoke ( job , msg , msg [ 'queue' ] ) do
job . perform ( * msg [ 'args' ] )
end
2021-11-15 05:27:48 -05:00
end
2013-10-24 00:47:57 -04:00
##
2018-05-15 14:27:49 -04:00
# Push a large number of jobs to Redis. This method cuts out the redis
# network round trip latency. I wouldn't recommend pushing more than
# 1000 per call but YMMV based on network quality, size of job args, etc.
# A large number of jobs can cause a bit of Redis command processing latency.
2013-10-24 00:47:57 -04:00
#
2013-10-24 00:58:15 -04:00
# Takes the same arguments as #push except that args is expected to be
2013-10-24 00:47:57 -04:00
# an Array of Arrays. All other keys are duplicated for each job. Each job
# is run through the client middleware pipeline and each job gets its own Job ID
# as normal.
#
2015-01-26 15:43:12 -05:00
# Returns an array of the of pushed jobs' jids. The number of jobs pushed can be less
# than the number given if the middleware stopped processing for one or more jobs.
2013-10-24 00:47:57 -04:00
def push_bulk ( items )
2020-05-26 14:01:22 -04:00
args = items [ " args " ]
2020-05-26 14:03:17 -04:00
raise ArgumentError , " Bulk arguments must be an Array of Arrays: [[1], [2]] " unless args . is_a? ( Array ) && args . all? ( Array )
2020-05-26 14:01:22 -04:00
return [ ] if args . empty? # no jobs to push
2016-03-16 12:26:07 -04:00
2019-09-04 15:21:48 -04:00
at = items . delete ( " at " )
2021-10-05 17:01:58 -04:00
raise ArgumentError , " Job 'at' must be a Numeric or an Array of Numeric timestamps " if at && ( Array ( at ) . empty? || ! Array ( at ) . all? { | entry | entry . is_a? ( Numeric ) } )
2020-06-18 10:33:13 -04:00
raise ArgumentError , " Job 'at' Array must have same size as 'args' Array " if at . is_a? ( Array ) && at . size != args . size
2019-09-04 15:21:48 -04:00
2013-10-24 00:47:57 -04:00
normed = normalize_item ( items )
2020-05-26 14:01:22 -04:00
payloads = args . map . with_index { | job_args , index |
copy = normed . merge ( " args " = > job_args , " jid " = > SecureRandom . hex ( 12 ) , " enqueued_at " = > Time . now . to_f )
2019-10-15 12:13:17 -04:00
copy [ " at " ] = ( at . is_a? ( Array ) ? at [ index ] : at ) if at
2019-09-04 15:21:48 -04:00
2019-04-01 12:20:41 -04:00
result = process_single ( items [ " class " ] , copy )
result || nil
} . compact
raw_push ( payloads ) unless payloads . empty?
payloads . collect { | payload | payload [ " jid " ] }
2013-10-24 00:47:57 -04:00
end
2012-02-15 14:28:19 -05:00
2014-03-26 23:58:45 -04:00
# Allows sharding of jobs across any number of Redis instances. All jobs
# defined within the block will use the given Redis connection pool.
#
# pool = ConnectionPool.new { Redis.new }
# Sidekiq::Client.via(pool) do
# SomeWorker.perform_async(1,2,3)
# SomeOtherWorker.perform_async(1,2,3)
# end
#
# Generally this is only needed for very large Sidekiq installs processing
2016-01-18 10:59:09 -05:00
# thousands of jobs per second. I do not recommend sharding unless
# you cannot scale any other way (e.g. splitting your app into smaller apps).
2014-03-26 23:58:45 -04:00
def self . via ( pool )
raise ArgumentError , " No pool given " if pool . nil?
2016-03-29 15:14:07 -04:00
current_sidekiq_pool = Thread . current [ :sidekiq_via_pool ]
2014-03-26 23:58:45 -04:00
Thread . current [ :sidekiq_via_pool ] = pool
yield
ensure
2018-01-10 12:53:57 -05:00
Thread . current [ :sidekiq_via_pool ] = current_sidekiq_pool
2014-03-26 23:58:45 -04:00
end
2013-10-24 00:47:57 -04:00
class << self
2013-03-24 20:42:43 -04:00
def push ( item )
2015-08-21 14:09:27 -04:00
new . push ( item )
2013-03-24 20:42:43 -04:00
end
2012-01-21 19:42:21 -05:00
2013-03-24 20:42:43 -04:00
def push_bulk ( items )
2015-08-21 14:09:27 -04:00
new . push_bulk ( items )
2013-03-24 20:42:43 -04:00
end
2012-07-17 10:48:54 -04:00
2013-09-29 17:24:25 -04:00
# Resque compatibility helpers. Note all helpers
# should go through Worker#client_push.
2013-03-24 20:42:43 -04:00
#
# Example usage:
# Sidekiq::Client.enqueue(MyWorker, 'foo', 1, :bat => 'bar')
#
# Messages are enqueued to the 'default' queue.
#
def enqueue ( klass , * args )
2019-04-01 12:20:41 -04:00
klass . client_push ( " class " = > klass , " args " = > args )
2013-03-24 20:42:43 -04:00
end
2012-09-11 23:53:22 -04:00
2013-03-24 20:42:43 -04:00
# Example usage:
# Sidekiq::Client.enqueue_to(:queue_name, MyWorker, 'foo', 1, :bat => 'bar')
#
def enqueue_to ( queue , klass , * args )
2019-04-01 12:20:41 -04:00
klass . client_push ( " queue " = > queue , " class " = > klass , " args " = > args )
2013-03-24 20:42:43 -04:00
end
2012-09-11 23:53:22 -04:00
2013-09-29 03:58:16 -04:00
# Example usage:
# Sidekiq::Client.enqueue_to_in(:queue_name, 3.minutes, MyWorker, 'foo', 1, :bat => 'bar')
#
def enqueue_to_in ( queue , interval , klass , * args )
int = interval . to_f
now = Time . now . to_f
ts = ( int < 1_000_000_000 ? now + int : int )
2019-04-01 12:20:41 -04:00
item = { " class " = > klass , " args " = > args , " at " = > ts , " queue " = > queue }
item . delete ( " at " ) if ts < = now
2013-09-29 03:58:16 -04:00
klass . client_push ( item )
end
# Example usage:
# Sidekiq::Client.enqueue_in(3.minutes, MyWorker, 'foo', 1, :bat => 'bar')
#
def enqueue_in ( interval , klass , * args )
2013-09-29 17:24:25 -04:00
klass . perform_in ( interval , * args )
2013-09-29 03:58:16 -04:00
end
2013-10-24 00:47:57 -04:00
end
2013-09-29 03:58:16 -04:00
2013-10-24 00:47:57 -04:00
private
def raw_push ( payloads )
2014-03-24 13:56:31 -04:00
@redis_pool . with do | conn |
2021-11-02 10:49:34 -04:00
conn . pipelined do
2014-08-31 17:04:19 -04:00
atomic_push ( conn , payloads )
2012-10-25 23:55:32 -04:00
end
end
2014-08-31 17:04:19 -04:00
true
end
def atomic_push ( conn , payloads )
2019-10-15 12:13:17 -04:00
if payloads . first . key? ( " at " )
2019-04-01 12:20:41 -04:00
conn . zadd ( " schedule " , payloads . map { | hash |
at = hash . delete ( " at " ) . to_s
2014-08-31 17:04:19 -04:00
[ at , Sidekiq . dump_json ( hash ) ]
2019-04-01 12:20:41 -04:00
} )
2014-08-31 17:04:19 -04:00
else
2019-04-27 08:04:46 -04:00
queue = payloads . first [ " queue " ]
2015-06-03 13:44:26 -04:00
now = Time . now . to_f
2019-04-01 12:20:41 -04:00
to_push = payloads . map { | entry |
2019-04-16 13:12:04 -04:00
entry [ " enqueued_at " ] = now
2015-06-03 06:45:35 -04:00
Sidekiq . dump_json ( entry )
2019-04-01 12:20:41 -04:00
}
2019-04-27 08:04:46 -04:00
conn . sadd ( " queues " , queue )
conn . lpush ( " queue: #{ queue } " , to_push )
2014-08-31 17:04:19 -04:00
end
2013-10-24 00:47:57 -04:00
end
2012-10-25 23:55:32 -04:00
2013-10-24 00:47:57 -04:00
def process_single ( worker_class , item )
2019-04-01 12:20:41 -04:00
queue = item [ " queue " ]
2012-09-11 23:53:22 -04:00
2014-03-26 00:38:17 -04:00
middleware . invoke ( worker_class , item , queue , @redis_pool ) do
2013-10-24 00:47:57 -04:00
item
2012-09-11 23:53:22 -04:00
end
2013-10-24 00:47:57 -04:00
end
2012-09-11 23:53:22 -04:00
2020-05-02 18:59:33 -04:00
def validate ( item )
raise ( ArgumentError , " Job must be a Hash with 'class' and 'args' keys: ` #{ item } ` " ) unless item . is_a? ( Hash ) && item . key? ( " class " ) && item . key? ( " args " )
raise ( ArgumentError , " Job args must be an Array: ` #{ item } ` " ) unless item [ " args " ] . is_a? ( Array )
raise ( ArgumentError , " Job class must be either a Class or String representation of the class name: ` #{ item } ` " ) unless item [ " class " ] . is_a? ( Class ) || item [ " class " ] . is_a? ( String )
raise ( ArgumentError , " Job 'at' must be a Numeric timestamp: ` #{ item } ` " ) if item . key? ( " at " ) && ! item [ " at " ] . is_a? ( Numeric )
raise ( ArgumentError , " Job tags must be an Array: ` #{ item } ` " ) if item [ " tags " ] && ! item [ " tags " ] . is_a? ( Array )
end
2013-10-24 00:47:57 -04:00
def normalize_item ( item )
2020-05-02 18:59:33 -04:00
validate ( item )
2019-04-01 12:20:41 -04:00
# raise(ArgumentError, "Arguments must be native JSON types, see https://github.com/mperham/sidekiq/wiki/Best-Practices") unless JSON.load(JSON.dump(item['args'])) == item['args']
2019-12-18 13:59:37 -05:00
# merge in the default sidekiq_options for the item's class and/or wrapped element
# this allows ActiveJobs to control sidekiq_options too.
defaults = normalized_hash ( item [ " class " ] )
defaults = defaults . merge ( item [ " wrapped " ] . get_sidekiq_options ) if item [ " wrapped " ] . respond_to? ( " get_sidekiq_options " )
item = defaults . merge ( item )
2019-04-01 12:20:41 -04:00
2020-02-26 13:51:25 -05:00
raise ( ArgumentError , " Job must include a valid queue name " ) if item [ " queue " ] . nil? || item [ " queue " ] == " "
2019-04-01 12:20:41 -04:00
item [ " class " ] = item [ " class " ] . to_s
item [ " queue " ] = item [ " queue " ] . to_s
item [ " jid " ] || = SecureRandom . hex ( 12 )
item [ " created_at " ] || = Time . now . to_f
2019-12-18 13:59:37 -05:00
2015-04-04 20:17:13 -04:00
item
end
def normalized_hash ( item_class )
if item_class . is_a? ( Class )
2019-04-01 12:20:41 -04:00
raise ( ArgumentError , " Message must include a Sidekiq::Worker class, not class name: #{ item_class . ancestors . inspect } " ) unless item_class . respond_to? ( " get_sidekiq_options " )
2015-04-04 20:17:13 -04:00
item_class . get_sidekiq_options
2013-10-24 00:47:57 -04:00
else
2015-04-04 20:17:13 -04:00
Sidekiq . default_worker_options
2012-11-15 15:58:58 -05:00
end
2012-09-11 23:53:22 -04:00
end
2012-01-21 19:42:21 -05:00
end
end