mirror of
https://github.com/mperham/sidekiq.git
synced 2022-11-09 13:52:34 -05:00
Sidekiq will now shut down successfully if Redis is down.
This commit is contained in:
parent
b04f7ac033
commit
f4c54ff5e2
3 changed files with 24 additions and 11 deletions
|
@ -1,8 +1,9 @@
|
|||
2.11.3
|
||||
-----------
|
||||
|
||||
- Better handling for Redis downtime when fetching jobs, don't print
|
||||
exceptions every second and print success message when Redis is back.
|
||||
- Better handling for Redis downtime when fetching jobs and shutting
|
||||
down, don't print exceptions every second and print success message
|
||||
when Redis is back.
|
||||
- Fix unclean shutdown leading to duplicate jobs [#897]
|
||||
- Add Korean locale [#890]
|
||||
|
||||
|
|
|
@ -41,17 +41,25 @@ module Sidekiq
|
|||
after(0) { fetch }
|
||||
end
|
||||
rescue => ex
|
||||
if !@down
|
||||
logger.error("Error fetching message: #{ex}")
|
||||
ex.backtrace.each do |bt|
|
||||
logger.error(bt)
|
||||
end
|
||||
end
|
||||
@down ||= Time.now
|
||||
sleep(TIMEOUT)
|
||||
after(0) { fetch }
|
||||
handle_exception(ex)
|
||||
end
|
||||
|
||||
end
|
||||
end
|
||||
|
||||
def handle_exception(ex)
|
||||
if !@down
|
||||
logger.error("Error fetching message: #{ex}")
|
||||
ex.backtrace.each do |bt|
|
||||
logger.error(bt)
|
||||
end
|
||||
end
|
||||
@down ||= Time.now
|
||||
sleep(TIMEOUT)
|
||||
after(0) { fetch }
|
||||
rescue Task::TerminatedError
|
||||
# If redis is down when we try to shut down, all the fetch backlog
|
||||
# raises these errors. Haven't been able to figure out what I'm doing wrong.
|
||||
end
|
||||
|
||||
# Ugh. Say hello to a bloody hack.
|
||||
|
@ -96,6 +104,8 @@ module Sidekiq
|
|||
end
|
||||
end
|
||||
Sidekiq.logger.info("Pushed #{inprogress.size} messages back to Redis")
|
||||
rescue => ex
|
||||
Sidekiq.logger.warn("Failed to requeue #{inprogress.size} jobs: #{ex.message}")
|
||||
end
|
||||
|
||||
UnitOfWork = Struct.new(:queue, :message) do
|
||||
|
|
|
@ -122,6 +122,8 @@ module Sidekiq
|
|||
end
|
||||
conn.srem('workers', workers_to_remove) if !workers_to_remove.empty?
|
||||
end
|
||||
rescue => ex
|
||||
Sidekiq.logger.warn("Unable to clear worker set while shutting down: #{ex.message}")
|
||||
end
|
||||
|
||||
def hard_shutdown_in(delay)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue