mirror of
https://github.com/rails/rails.git
synced 2022-11-09 12:12:34 -05:00
Merge pull request #25344 from matthewd/debug-locks
ActionDispatch::DebugLocks
This commit is contained in:
commit
ad95b6fc72
4 changed files with 172 additions and 5 deletions
|
@ -50,6 +50,7 @@ module ActionDispatch
|
|||
autoload :Callbacks
|
||||
autoload :Cookies
|
||||
autoload :DebugExceptions
|
||||
autoload :DebugLocks
|
||||
autoload :ExceptionWrapper
|
||||
autoload :Executor
|
||||
autoload :Flash
|
||||
|
|
122
actionpack/lib/action_dispatch/middleware/debug_locks.rb
Normal file
122
actionpack/lib/action_dispatch/middleware/debug_locks.rb
Normal file
|
@ -0,0 +1,122 @@
|
|||
module ActionDispatch
|
||||
# This middleware can be used to diagnose deadlocks in the autoload interlock.
|
||||
#
|
||||
# To use it, insert it near the top of the middleware stack, using
|
||||
# <tt>config/application.rb</tt>:
|
||||
#
|
||||
# config.middleware.insert_before Rack::Sendfile, ActionDispatch::DebugLocks
|
||||
#
|
||||
# After restarting the application and re-triggering the deadlock condition,
|
||||
# <tt>/rails/locks</tt> will show a summary of all threads currently known to
|
||||
# the interlock, which lock level they are holding or awaiting, and their
|
||||
# current backtrace.
|
||||
#
|
||||
# Generally a deadlock will be caused by the interlock conflicting with some
|
||||
# other external lock or blocking I/O call. These cannot be automatically
|
||||
# identified, but should be visible in the displayed backtraces.
|
||||
#
|
||||
# NOTE: The formatting and content of this middleware's output is intended for
|
||||
# human consumption, and should be expected to change between releases.
|
||||
#
|
||||
# This middleware exposes operational details of the server, with no access
|
||||
# control. It should only be enabled when in use, and removed thereafter.
|
||||
class DebugLocks
|
||||
def initialize(app, path = '/rails/locks')
|
||||
@app = app
|
||||
@path = path
|
||||
end
|
||||
|
||||
def call(env)
|
||||
req = ActionDispatch::Request.new env
|
||||
|
||||
if req.get?
|
||||
path = req.path_info.chomp('/'.freeze)
|
||||
if path == @path
|
||||
return render_details(req)
|
||||
end
|
||||
end
|
||||
|
||||
@app.call(env)
|
||||
end
|
||||
|
||||
private
|
||||
def render_details(req)
|
||||
threads = ActiveSupport::Dependencies.interlock.raw_state do |threads|
|
||||
# The Interlock itself comes to a complete halt as long as this block
|
||||
# is executing. That gives us a more consistent picture of everything,
|
||||
# but creates a pretty strong Observer Effect.
|
||||
#
|
||||
# Most directly, that means we need to do as little as possible in
|
||||
# this block. More widely, it means this middleware should remain a
|
||||
# strictly diagnostic tool (to be used when something has gone wrong),
|
||||
# and not for any sort of general monitoring.
|
||||
|
||||
threads.each.with_index do |(thread, info), idx|
|
||||
info[:index] = idx
|
||||
info[:backtrace] = thread.backtrace
|
||||
end
|
||||
|
||||
threads
|
||||
end
|
||||
|
||||
str = threads.map do |thread, info|
|
||||
if info[:exclusive]
|
||||
lock_state = 'Exclusive'
|
||||
elsif info[:sharing] > 0
|
||||
lock_state = 'Sharing'
|
||||
lock_state << " x#{info[:sharing]}" if info[:sharing] > 1
|
||||
else
|
||||
lock_state = 'No lock'
|
||||
end
|
||||
|
||||
if info[:waiting]
|
||||
lock_state << ' (yielded share)'
|
||||
end
|
||||
|
||||
msg = "Thread #{info[:index]} [0x#{thread.__id__.to_s(16)} #{thread.status || 'dead'}] #{lock_state}\n"
|
||||
|
||||
if info[:sleeper]
|
||||
msg << " Waiting in #{info[:sleeper]}"
|
||||
msg << " to #{info[:purpose].to_s.inspect}" unless info[:purpose].nil?
|
||||
msg << "\n"
|
||||
|
||||
if info[:compatible]
|
||||
compat = info[:compatible].map { |c| c == false ? "share" : c.to_s.inspect }
|
||||
msg << " may be pre-empted for: #{compat.join(', ')}\n"
|
||||
end
|
||||
|
||||
blockers = threads.values.select { |binfo| blocked_by?(info, binfo, threads.values) }
|
||||
msg << " blocked by: #{blockers.map {|i| i[:index] }.join(', ')}\n" if blockers.any?
|
||||
end
|
||||
|
||||
blockees = threads.values.select { |binfo| blocked_by?(binfo, info, threads.values) }
|
||||
msg << " blocking: #{blockees.map {|i| i[:index] }.join(', ')}\n" if blockees.any?
|
||||
|
||||
msg << "\n#{info[:backtrace].join("\n")}\n" if info[:backtrace]
|
||||
end.join("\n\n---\n\n\n")
|
||||
|
||||
[200, { "Content-Type" => "text/plain", "Content-Length" => str.size }, [str]]
|
||||
end
|
||||
|
||||
def blocked_by?(victim, blocker, all_threads)
|
||||
return false if victim.equal?(blocker)
|
||||
|
||||
case victim[:sleeper]
|
||||
when :start_sharing
|
||||
blocker[:exclusive] ||
|
||||
(!victim[:waiting] && blocker[:compatible] && !blocker[:compatible].include?(false))
|
||||
when :start_exclusive
|
||||
blocker[:sharing] > 0 ||
|
||||
blocker[:exclusive] ||
|
||||
(blocker[:compatible] && !blocker[:compatible].include?(victim[:purpose]))
|
||||
when :yield_shares
|
||||
blocker[:exclusive]
|
||||
when :stop_exclusive
|
||||
blocker[:exclusive] ||
|
||||
victim[:compatible] &&
|
||||
victim[:compatible].include?(blocker[:purpose]) &&
|
||||
all_threads.all? { |other| !other[:compatible] || blocker.equal?(other) || other[:compatible].include?(blocker[:purpose]) }
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
|
@ -14,6 +14,38 @@ module ActiveSupport
|
|||
# to upgrade share locks to exclusive.
|
||||
|
||||
|
||||
def raw_state # :nodoc:
|
||||
synchronize do
|
||||
threads = @sleeping.keys | @sharing.keys | @waiting.keys
|
||||
threads |= [@exclusive_thread] if @exclusive_thread
|
||||
|
||||
data = {}
|
||||
|
||||
threads.each do |thread|
|
||||
purpose, compatible = @waiting[thread]
|
||||
|
||||
data[thread] = {
|
||||
thread: thread,
|
||||
sharing: @sharing[thread],
|
||||
exclusive: @exclusive_thread == thread,
|
||||
purpose: purpose,
|
||||
compatible: compatible,
|
||||
waiting: !!@waiting[thread],
|
||||
sleeper: @sleeping[thread],
|
||||
}
|
||||
end
|
||||
|
||||
# NB: Yields while holding our *internal* synchronize lock,
|
||||
# which is supposed to be used only for a few instructions at
|
||||
# a time. This allows the caller to inspect additional state
|
||||
# without things changing out from underneath, but would have
|
||||
# disastrous effects upon normal operation. Fortunately, this
|
||||
# method is only intended to be called when things have
|
||||
# already gone wrong.
|
||||
yield data
|
||||
end
|
||||
end
|
||||
|
||||
def initialize
|
||||
super()
|
||||
|
||||
|
@ -21,6 +53,7 @@ module ActiveSupport
|
|||
|
||||
@sharing = Hash.new(0)
|
||||
@waiting = {}
|
||||
@sleeping = {}
|
||||
@exclusive_thread = nil
|
||||
@exclusive_depth = 0
|
||||
end
|
||||
|
@ -46,7 +79,7 @@ module ActiveSupport
|
|||
return false if no_wait
|
||||
|
||||
yield_shares(purpose: purpose, compatible: compatible, block_share: true) do
|
||||
@cv.wait_while { busy_for_exclusive?(purpose) }
|
||||
wait_for(:start_exclusive) { busy_for_exclusive?(purpose) }
|
||||
end
|
||||
end
|
||||
@exclusive_thread = Thread.current
|
||||
|
@ -69,7 +102,7 @@ module ActiveSupport
|
|||
|
||||
if eligible_waiters?(compatible)
|
||||
yield_shares(compatible: compatible, block_share: true) do
|
||||
@cv.wait_while { @exclusive_thread || eligible_waiters?(compatible) }
|
||||
wait_for(:stop_exclusive) { @exclusive_thread || eligible_waiters?(compatible) }
|
||||
end
|
||||
end
|
||||
@cv.broadcast
|
||||
|
@ -84,11 +117,11 @@ module ActiveSupport
|
|||
elsif @waiting[Thread.current]
|
||||
# We're nested inside a +yield_shares+ call: we'll resume as
|
||||
# soon as there isn't an exclusive lock in our way
|
||||
@cv.wait_while { @exclusive_thread }
|
||||
wait_for(:start_sharing) { @exclusive_thread }
|
||||
else
|
||||
# This is an initial / outermost share call: any outstanding
|
||||
# requests for an exclusive lock get to go first
|
||||
@cv.wait_while { busy_for_sharing?(false) }
|
||||
wait_for(:start_sharing) { busy_for_sharing?(false) }
|
||||
end
|
||||
@sharing[Thread.current] += 1
|
||||
end
|
||||
|
@ -153,7 +186,7 @@ module ActiveSupport
|
|||
yield
|
||||
ensure
|
||||
synchronize do
|
||||
@cv.wait_while { @exclusive_thread && @exclusive_thread != Thread.current }
|
||||
wait_for(:yield_shares) { @exclusive_thread && @exclusive_thread != Thread.current }
|
||||
|
||||
if previous_wait
|
||||
@waiting[Thread.current] = previous_wait
|
||||
|
@ -181,6 +214,13 @@ module ActiveSupport
|
|||
def eligible_waiters?(compatible)
|
||||
@waiting.any? { |t, (p, _)| compatible.include?(p) && @waiting.all? { |t2, (_, c2)| t == t2 || c2.include?(p) } }
|
||||
end
|
||||
|
||||
def wait_for(method)
|
||||
@sleeping[Thread.current] = method
|
||||
@cv.wait_while { yield }
|
||||
ensure
|
||||
@sleeping.delete Thread.current
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
|
@ -46,6 +46,10 @@ module ActiveSupport #:nodoc:
|
|||
yield
|
||||
end
|
||||
end
|
||||
|
||||
def raw_state(&block) # :nodoc:
|
||||
@lock.raw_state(&block)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
|
Loading…
Reference in a new issue