2018-09-17 12:41:14 -04:00
# frozen_string_literal: true
2011-09-27 13:53:45 -04:00
require 'stringio'
2011-09-27 12:23:03 -04:00
2011-09-22 22:24:43 -04:00
require 'puma/thread_pool'
2011-09-27 12:23:03 -04:00
require 'puma/const'
2011-09-27 16:52:50 -04:00
require 'puma/events'
2011-12-01 17:33:34 -05:00
require 'puma/null_io'
2012-07-23 13:26:52 -04:00
require 'puma/reactor'
require 'puma/client'
2012-08-02 18:03:52 -04:00
require 'puma/binder'
2013-02-05 01:31:40 -05:00
require 'puma/util'
2020-02-27 14:50:34 -05:00
require 'puma/io_buffer'
2020-10-09 10:09:48 -04:00
require 'puma/request'
2011-09-27 12:23:03 -04:00
require 'socket'
2019-10-01 12:05:46 -04:00
require 'forwardable'
2011-09-18 16:02:34 -04:00
2011-09-22 22:24:43 -04:00
module Puma
2011-12-01 18:23:14 -05:00
# The HTTP Server itself. Serves out a single Rack app.
2018-05-01 16:44:08 -04:00
#
# This class is used by the `Puma::Single` and `Puma::Cluster` classes
# to generate one or more `Puma::Server` instances capable of handling requests.
2019-05-06 10:26:13 -04:00
# Each Puma process will contain one `Puma::Server` instance.
2018-05-01 16:44:08 -04:00
#
# The `Puma::Server` instance pulls requests from the socket, adds them to a
# `Puma::Reactor` where they get eventually passed to a `Puma::ThreadPool`.
#
# Each `Puma::Server` will have one reactor and one thread pool.
2011-09-18 16:02:34 -04:00
class Server
2011-09-22 22:24:43 -04:00
include Puma :: Const
2020-10-09 10:09:48 -04:00
include Request
2019-10-01 12:05:46 -04:00
extend Forwardable
2011-09-18 16:02:34 -04:00
2011-09-27 17:33:17 -04:00
attr_reader :thread
attr_reader :events
2020-09-29 09:12:47 -04:00
attr_reader :min_threads , :max_threads # for #stats
attr_reader :requests_count # @version 5.0.0
# @todo the following may be deprecated in the future
attr_reader :auto_trim_time , :early_hints , :first_data_timeout ,
:leak_stack_on_error ,
:persistent_timeout , :reaping_time
# @deprecated v6.0.0
attr_writer :auto_trim_time , :early_hints , :first_data_timeout ,
:leak_stack_on_error , :min_threads , :max_threads ,
:persistent_timeout , :reaping_time
2011-09-23 01:14:39 -04:00
attr_accessor :app
2020-09-29 09:12:47 -04:00
attr_accessor :binder
2011-09-23 01:14:39 -04:00
2020-09-29 09:12:47 -04:00
def_delegators :@binder , :add_tcp_listener , :add_ssl_listener ,
:add_unix_listener , :connected_ports
ThreadLocalKey = :puma_server
2011-09-23 01:14:39 -04:00
2011-12-01 18:23:14 -05:00
# Create a server for the rack app +app+.
#
# +events+ is an object which will be called when certain error events occur
# to be handled. See Puma::Events for the list of current methods to implement.
2011-09-18 16:02:34 -04:00
#
2011-12-01 18:23:14 -05:00
# Server#run returns a thread that you can join on to wait for the server
2015-10-17 14:28:21 -04:00
# to do its work.
2011-09-18 16:02:34 -04:00
#
2020-09-29 09:12:47 -04:00
# @note Several instance variables exist so they are available for testing,
# and have default values set via +fetch+. Normally the values are set via
# `::Puma::Configuration.puma_default_options`.
#
2013-07-15 17:29:10 -04:00
def initialize ( app , events = Events . stdio , options = { } )
2011-09-27 17:33:17 -04:00
@app = app
@events = events
2011-09-18 16:02:34 -04:00
2020-05-01 23:05:14 -04:00
@check , @notify = nil
2011-12-05 12:01:19 -05:00
@status = :stop
2011-09-23 01:14:39 -04:00
2016-09-05 19:41:46 -04:00
@auto_trim_time = 30
2015-05-19 10:14:30 -04:00
@reaping_time = 1
2011-09-24 03:26:17 -04:00
2011-09-27 17:33:17 -04:00
@thread = nil
@thread_pool = nil
2011-09-27 14:29:20 -04:00
2020-09-29 09:12:47 -04:00
@options = options
2011-10-05 00:11:10 -04:00
2020-09-29 09:12:47 -04:00
@early_hints = options . fetch :early_hints , nil
@first_data_timeout = options . fetch :first_data_timeout , FIRST_DATA_TIMEOUT
@min_threads = options . fetch :min_threads , 0
@max_threads = options . fetch :max_threads , ( Puma . mri? ? 5 : 16 )
@persistent_timeout = options . fetch :persistent_timeout , PERSISTENT_TIMEOUT
@queue_requests = options . fetch :queue_requests , true
2020-10-26 17:59:30 -04:00
@max_fast_inline = options . fetch :max_fast_inline , MAX_FAST_INLINE
2013-02-09 13:12:13 -05:00
2020-09-29 09:12:47 -04:00
temp = ! ! ( @options [ :environment ] =~ / \ A(development|test) \ z / )
@leak_stack_on_error = @options [ :environment ] ? temp : true
2013-07-01 19:48:17 -04:00
2020-09-29 09:12:47 -04:00
@binder = Binder . new ( events )
2013-07-15 17:29:10 -04:00
2012-01-13 18:33:05 -05:00
ENV [ 'RACK_ENV' ] || = " development "
2013-08-07 19:36:04 -04:00
@mode = :http
2017-03-14 12:09:39 -04:00
@precheck_closing = true
2020-02-11 10:47:40 -05:00
@requests_count = 0
2011-09-18 16:02:34 -04:00
end
2013-02-09 13:12:13 -05:00
def inherit_binder ( bind )
@binder = bind
end
2020-08-30 06:50:55 -04:00
class << self
2020-09-29 09:12:47 -04:00
# @!attribute [r] current
def current
Thread . current [ ThreadLocalKey ]
end
2020-08-30 06:50:55 -04:00
# :nodoc:
2020-09-17 11:15:19 -04:00
# @version 5.0.0
2020-08-30 06:50:55 -04:00
def tcp_cork_supported?
RbConfig :: CONFIG [ 'host_os' ] =~ / linux / &&
Socket . const_defined? ( :IPPROTO_TCP ) &&
2020-09-22 11:27:38 -04:00
Socket . const_defined? ( :TCP_CORK )
end
# :nodoc:
# @version 5.0.0
def closed_socket_supported?
RbConfig :: CONFIG [ 'host_os' ] =~ / linux / &&
Socket . const_defined? ( :IPPROTO_TCP ) &&
2020-08-30 06:50:55 -04:00
Socket . const_defined? ( :TCP_INFO )
end
private :tcp_cork_supported?
2020-09-22 11:27:38 -04:00
private :closed_socket_supported?
2020-08-30 06:50:55 -04:00
end
2011-12-01 18:23:14 -05:00
# On Linux, use TCP_CORK to better control how the TCP stack
# packetizes our stream. This improves both latency and throughput.
#
2020-08-30 06:50:55 -04:00
if tcp_cork_supported?
2017-02-25 10:21:07 -05:00
UNPACK_TCP_STATE_FROM_TCP_INFO = " C " . freeze
2011-12-01 14:40:30 -05:00
# 6 == Socket::IPPROTO_TCP
# 3 == TCP_CORK
# 1/0 == turn on/off
def cork_socket ( socket )
2014-01-25 22:32:54 -05:00
begin
2020-08-30 06:50:55 -04:00
socket . setsockopt ( Socket :: IPPROTO_TCP , Socket :: TCP_CORK , 1 ) if socket . kind_of? TCPSocket
2020-05-09 10:50:29 -04:00
rescue IOError , SystemCallError
2017-07-19 14:22:36 -04:00
Thread . current . purge_interrupt_queue if Thread . current . respond_to? :purge_interrupt_queue
2014-01-25 22:32:54 -05:00
end
2011-12-01 14:40:30 -05:00
end
def uncork_socket ( socket )
2013-06-18 02:07:50 -04:00
begin
2020-08-30 06:50:55 -04:00
socket . setsockopt ( Socket :: IPPROTO_TCP , Socket :: TCP_CORK , 0 ) if socket . kind_of? TCPSocket
2020-05-09 10:50:29 -04:00
rescue IOError , SystemCallError
2017-07-19 14:22:36 -04:00
Thread . current . purge_interrupt_queue if Thread . current . respond_to? :purge_interrupt_queue
2013-06-18 02:07:50 -04:00
end
2011-12-01 14:40:30 -05:00
end
2020-09-22 11:27:38 -04:00
else
def cork_socket ( socket )
end
2017-02-25 10:21:07 -05:00
2020-09-22 11:27:38 -04:00
def uncork_socket ( socket )
end
end
if closed_socket_supported?
2017-02-25 10:21:07 -05:00
def closed_socket? ( socket )
return false unless socket . kind_of? TCPSocket
2017-03-14 12:09:39 -04:00
return false unless @precheck_closing
begin
2020-08-31 19:04:08 -04:00
tcp_info = socket . getsockopt ( Socket :: IPPROTO_TCP , Socket :: TCP_INFO )
2017-03-14 12:09:39 -04:00
rescue IOError , SystemCallError
2017-07-19 14:22:36 -04:00
Thread . current . purge_interrupt_queue if Thread . current . respond_to? :purge_interrupt_queue
2017-03-14 12:09:39 -04:00
@precheck_closing = false
false
else
state = tcp_info . unpack ( UNPACK_TCP_STATE_FROM_TCP_INFO ) [ 0 ]
# TIME_WAIT: 6, CLOSE: 7, CLOSE_WAIT: 8, LAST_ACK: 9, CLOSING: 11
( state > = 6 && state < = 9 ) || state == 11
end
2017-02-25 10:21:07 -05:00
end
2011-12-01 14:40:30 -05:00
else
2017-02-25 10:21:07 -05:00
def closed_socket? ( socket )
false
end
2011-12-01 14:40:30 -05:00
end
2020-09-29 09:12:47 -04:00
# @!attribute [r] backlog
2011-12-05 14:15:44 -05:00
def backlog
@thread_pool and @thread_pool . backlog
end
2020-09-29 09:12:47 -04:00
# @!attribute [r] running
2011-12-05 14:15:44 -05:00
def running
@thread_pool and @thread_pool . spawned
end
2018-05-04 13:54:05 -04:00
2018-05-04 14:02:44 -04:00
# This number represents the number of requests that
# the server is capable of taking right now.
#
# For example if the number is 5 then it means
# there are 5 threads sitting idle ready to take
# a request. If one request comes in, then the
# value would be 4 until it finishes processing.
2020-09-29 09:12:47 -04:00
# @!attribute [r] pool_capacity
2018-05-04 13:54:05 -04:00
def pool_capacity
2018-05-04 14:02:44 -04:00
@thread_pool and @thread_pool . pool_capacity
2018-05-04 13:54:05 -04:00
end
2012-07-19 19:25:20 -04:00
# Runs the server.
2011-12-01 18:23:14 -05:00
#
2012-07-19 19:25:20 -04:00
# If +background+ is true (the default) then a thread is spun
# up in the background to handle requests. Otherwise requests
# are handled synchronously.
#
2020-11-29 10:35:50 -05:00
def run ( background = true , thread_name : 'server' )
2011-09-24 03:19:22 -04:00
BasicSocket . do_not_reverse_lookup = true
2013-09-13 12:56:39 -04:00
@events . fire :state , :booting
2011-12-05 12:01:19 -05:00
@status = :run
2011-09-27 17:33:17 -04:00
2020-10-06 09:22:53 -04:00
@thread_pool = ThreadPool . new (
@min_threads ,
@max_threads ,
:: Puma :: IOBuffer ,
& method ( :process_client )
)
2011-09-27 17:33:17 -04:00
2020-04-29 23:02:49 -04:00
@thread_pool . out_of_band_hook = @options [ :out_of_band ]
2014-11-26 02:18:30 -05:00
@thread_pool . clean_thread_locals = @options [ :clean_thread_locals ]
2020-04-10 03:04:27 -04:00
if @queue_requests
2020-10-06 09:22:53 -04:00
@reactor = Reactor . new ( & method ( :reactor_wakeup ) )
@reactor . run
2015-01-20 07:20:39 -05:00
end
2012-07-23 13:26:52 -04:00
2015-05-19 10:14:30 -04:00
if @reaping_time
@thread_pool . auto_reap! ( @reaping_time )
end
2011-12-05 13:07:01 -05:00
if @auto_trim_time
@thread_pool . auto_trim! ( @auto_trim_time )
end
2020-10-20 09:26:16 -04:00
@check , @notify = Puma :: Util . pipe unless @notify
2013-09-13 12:56:39 -04:00
@events . fire :state , :running
2012-07-19 19:25:20 -04:00
if background
2019-09-15 04:52:34 -04:00
@thread = Thread . new do
2020-11-29 10:35:50 -05:00
Puma . set_thread_name thread_name
2019-09-15 04:52:34 -04:00
handle_servers
end
2012-07-19 19:25:20 -04:00
return @thread
else
handle_servers
end
end
2020-10-06 09:22:53 -04:00
# This method is called from the Reactor thread when a queued Client receives data,
# times out, or when the Reactor is shutting down.
#
# It is responsible for ensuring that a request has been completely received
# before it starts to be processed by the ThreadPool. This may be known as read buffering.
# If read buffering is not done, and no other read buffering is performed (such as by an application server
# such as nginx) then the application would be subject to a slow client attack.
#
# For a graphical representation of how the request buffer works see [architecture.md](https://github.com/puma/puma/blob/master/docs/architecture.md#connection-pipeline).
#
# The method checks to see if it has the full header and body with
# the `Puma::Client#try_to_finish` method. If the full request has been sent,
# then the request is passed to the ThreadPool (`@thread_pool << client`)
# so that a "worker thread" can pick up the request and begin to execute application logic.
# The Client is then removed from the reactor (return `true`).
#
# If a client object times out, a 408 response is written, its connection is closed,
# and the object is removed from the reactor (return `true`).
#
# If the Reactor is shutting down, all Clients are either timed out or passed to the
# ThreadPool, depending on their current state (#can_close?).
#
# Otherwise, if the full request is not ready then the client will remain in the reactor
# (return `false`). When the client sends more data to the socket the `Puma::Client` object
# will wake up and again be checked to see if it's ready to be passed to the thread pool.
def reactor_wakeup ( client )
shutdown = ! @queue_requests
if client . try_to_finish || ( shutdown && ! client . can_close? )
@thread_pool << client
elsif shutdown || client . timeout == 0
client . timeout!
end
rescue StandardError = > e
client_error ( e , client )
client . close
true
end
2012-07-19 19:25:20 -04:00
def handle_servers
begin
check = @check
2012-08-02 18:03:52 -04:00
sockets = [ check ] + @binder . ios
2012-07-19 19:25:20 -04:00
pool = @thread_pool
2015-01-20 13:29:31 -05:00
queue_requests = @queue_requests
2012-07-19 19:25:20 -04:00
2016-01-06 13:12:09 -05:00
remote_addr_value = nil
remote_addr_header = nil
case @options [ :remote_address ]
when :value
remote_addr_value = @options [ :remote_address_value ]
when :header
remote_addr_header = @options [ :remote_address_header ]
end
2012-07-19 19:25:20 -04:00
while @status == :run
begin
ios = IO . select sockets
ios . first . each do | sock |
if sock == check
break if handle_check
else
2020-04-26 13:03:43 -04:00
pool . wait_until_not_full
pool . wait_for_less_busy_worker (
@options [ :wait_for_less_busy_worker ] . to_f )
io = begin
sock . accept_nonblock
rescue IO :: WaitReadable
next
2012-08-01 13:11:27 -04:00
end
2020-04-26 13:03:43 -04:00
client = Client . new io , @binder . env ( sock )
if remote_addr_value
client . peerip = remote_addr_value
elsif remote_addr_header
client . remote_addr_header = remote_addr_header
2012-08-01 13:11:27 -04:00
end
2020-04-26 13:03:43 -04:00
pool << client
2011-09-24 03:19:22 -04:00
end
end
2012-07-19 19:25:20 -04:00
rescue Object = > e
2020-05-09 12:03:32 -04:00
@events . unknown_error e , nil , " Listen loop "
2011-09-24 03:19:22 -04:00
end
2012-07-19 19:25:20 -04:00
end
2011-12-05 12:01:19 -05:00
2013-09-13 12:56:39 -04:00
@events . fire :state , @status
2015-01-20 13:29:31 -05:00
if queue_requests
2020-10-06 09:22:53 -04:00
@queue_requests = false
2015-01-20 07:20:39 -05:00
@reactor . shutdown
end
2020-04-10 03:04:27 -04:00
graceful_shutdown if @status == :stop || @status == :restart
2013-03-18 19:20:59 -04:00
rescue Exception = > e
2020-05-09 12:03:32 -04:00
@events . unknown_error e , nil , " Exception handling servers "
2012-07-19 19:25:20 -04:00
ensure
2020-07-19 18:06:10 -04:00
begin
@check . close unless @check . closed?
rescue Errno :: EBADF , RuntimeError
# RuntimeError is Ruby 2.2 issue, can't modify frozen IOError
# Errno::EBADF is infrequently raised
end
2013-02-05 01:31:40 -05:00
@notify . close
2020-05-01 18:44:58 -04:00
@notify = nil
@check = nil
2011-09-18 16:02:34 -04:00
end
2013-09-13 12:56:39 -04:00
@events . fire :state , :done
2011-09-24 03:19:22 -04:00
end
2011-09-18 16:02:34 -04:00
2011-12-01 18:23:14 -05:00
# :nodoc:
2011-09-24 03:19:22 -04:00
def handle_check
2014-02-17 12:07:17 -05:00
cmd = @check . read ( 1 )
2011-09-24 03:19:22 -04:00
case cmd
when STOP_COMMAND
2011-12-05 12:01:19 -05:00
@status = :stop
return true
when HALT_COMMAND
@status = :halt
2011-09-24 03:19:22 -04:00
return true
2012-04-04 11:38:22 -04:00
when RESTART_COMMAND
@status = :restart
return true
2011-09-24 03:19:22 -04:00
end
return false
2011-09-18 16:02:34 -04:00
end
2020-10-06 09:22:53 -04:00
# Given a connection on +client+, handle the incoming requests,
# or queue the connection in the Reactor if no request is available.
#
# This method is called from a ThreadPool worker thread.
2011-12-01 18:23:14 -05:00
#
2020-10-06 09:22:53 -04:00
# This method supports HTTP Keep-Alive so it may, depending on if the client
2011-12-01 18:23:14 -05:00
# indicates that it supports keep alive, wait for another request before
# returning.
#
2020-10-06 09:22:53 -04:00
# Return true if one or more requests were processed.
2012-08-11 18:09:09 -04:00
def process_client ( client , buffer )
2020-10-06 09:22:53 -04:00
# Advertise this server into the thread
Thread . current [ ThreadLocalKey ] = self
clean_thread_locals = @options [ :clean_thread_locals ]
close_socket = true
requests = 0
2011-09-18 16:02:34 -04:00
begin
2020-10-06 09:22:53 -04:00
if @queue_requests &&
! client . eagerly_finish
2016-07-24 23:58:16 -04:00
2020-10-06 09:22:53 -04:00
client . set_timeout ( @first_data_timeout )
if @reactor . add client
close_socket = false
return false
end
end
2011-12-08 15:17:45 -05:00
2020-10-06 09:22:53 -04:00
with_force_shutdown ( client ) do
client . finish ( @first_data_timeout )
end
2019-12-05 02:19:32 -05:00
2012-07-23 13:26:52 -04:00
while true
2020-10-09 10:09:48 -04:00
@requests_count += 1
2012-08-11 18:09:09 -04:00
case handle_request ( client , buffer )
2012-07-23 13:26:52 -04:00
when false
2020-10-06 09:22:53 -04:00
break
2012-07-23 13:26:52 -04:00
when :async
close_socket = false
2020-10-06 09:22:53 -04:00
break
2012-07-23 13:26:52 -04:00
when true
2012-08-11 18:09:09 -04:00
buffer . reset
2016-02-25 16:35:47 -05:00
ThreadPool . clean_thread_locals if clean_thread_locals
2019-12-05 02:19:32 -05:00
requests += 1
check_for_more_data = @status == :run
2020-10-26 17:59:30 -04:00
if requests > = @max_fast_inline
2019-12-05 02:19:32 -05:00
# This will mean that reset will only try to use the data it already
# has buffered and won't try to read more data. What this means is that
# every client, independent of their request speed, gets treated like a slow
2020-10-26 17:59:30 -04:00
# one once every max_fast_inline requests.
2019-12-05 02:19:32 -05:00
check_for_more_data = false
end
2020-10-06 09:22:53 -04:00
next_request_ready = with_force_shutdown ( client ) do
2020-09-24 10:24:34 -04:00
client . reset ( check_for_more_data )
end
unless next_request_ready
2020-10-06 09:22:53 -04:00
break unless @queue_requests
client . set_timeout @persistent_timeout
if @reactor . add client
Prevent connections from entering Reactor after shutdown begins (#2377)
When a `Puma::Server` instance starts to shutdown, it sets the instance
variable `@queue_requests` to `false`, then starts to shut down the
Reactor. The intent here is that after the Reactor shuts down, threads
won't have the option of sending their connections to the Reactor (the
connection must either be closed, assuming we've written a response to
the socket, or the thread must wait until the client has finished
writing its request, then write a response, then finally close the
connection). This works most of the time just fine.
The problem is that there are races in the implementation of the
`ThreadPool` where it's possible for a thread to see that
`@queue_requests` is `true` before the `Server` shutdown has started,
then later try to add the request to the Reactor, not knowing that it's
already shutting down.
Depending on the precise order of operations, one possible outcome is
that the `ThreadPool` executes `@reactor.add` after the `@reactor` has
closed its `@trigger` pipe, resulting in the error `Error reached top of
thread-pool: closed stream (IOError)`. Clients experience connection
reset errors as a result.
The fix introduced in this commit is to add a `Mutex` that makes it
impossible for a thread in the `ThreadPool` to add a client connection
to the `@reactor` after `@queue_requests` has been set to `false`.
Co-Authored-By: Leo Belyi <leonid.belyi@mycase.com>
Co-authored-by: Leo Belyi <leonid.belyi@mycase.com>
2020-09-23 10:34:10 -04:00
close_socket = false
2020-10-06 09:22:53 -04:00
break
Prevent connections from entering Reactor after shutdown begins (#2377)
When a `Puma::Server` instance starts to shutdown, it sets the instance
variable `@queue_requests` to `false`, then starts to shut down the
Reactor. The intent here is that after the Reactor shuts down, threads
won't have the option of sending their connections to the Reactor (the
connection must either be closed, assuming we've written a response to
the socket, or the thread must wait until the client has finished
writing its request, then write a response, then finally close the
connection). This works most of the time just fine.
The problem is that there are races in the implementation of the
`ThreadPool` where it's possible for a thread to see that
`@queue_requests` is `true` before the `Server` shutdown has started,
then later try to add the request to the Reactor, not knowing that it's
already shutting down.
Depending on the precise order of operations, one possible outcome is
that the `ThreadPool` executes `@reactor.add` after the `@reactor` has
closed its `@trigger` pipe, resulting in the error `Error reached top of
thread-pool: closed stream (IOError)`. Clients experience connection
reset errors as a result.
The fix introduced in this commit is to add a `Mutex` that makes it
impossible for a thread in the `ThreadPool` to add a client connection
to the `@reactor` after `@queue_requests` has been set to `false`.
Co-Authored-By: Leo Belyi <leonid.belyi@mycase.com>
Co-authored-by: Leo Belyi <leonid.belyi@mycase.com>
2020-09-23 10:34:10 -04:00
end
2011-09-18 16:02:34 -04:00
end
end
end
2020-10-06 09:22:53 -04:00
true
2011-09-27 16:52:50 -04:00
rescue StandardError = > e
2020-10-06 09:22:53 -04:00
client_error ( e , client )
# The ensure tries to close +client+ down
requests > 0
2011-09-18 16:02:34 -04:00
ensure
2012-11-18 13:19:22 -05:00
buffer . reset
2011-09-18 16:02:34 -04:00
begin
2012-01-08 20:27:57 -05:00
client . close if close_socket
2020-05-09 12:03:32 -04:00
rescue IOError , SystemCallError
2017-07-19 14:22:36 -04:00
Thread . current . purge_interrupt_queue if Thread . current . respond_to? :purge_interrupt_queue
2011-09-18 16:02:34 -04:00
# Already closed
2011-09-27 16:52:50 -04:00
rescue StandardError = > e
2020-05-09 12:03:32 -04:00
@events . unknown_error e , nil , " Client "
2011-09-18 16:02:34 -04:00
end
end
end
2020-10-06 09:22:53 -04:00
# Triggers a client timeout if the thread-pool shuts down
# during execution of the provided block.
def with_force_shutdown ( client , & block )
@thread_pool . with_force_shutdown ( & block )
rescue ThreadPool :: ForceShutdown
client . timeout!
end
2020-10-09 10:09:48 -04:00
# :nocov:
2013-07-05 17:40:34 -04:00
2016-04-07 14:22:15 -04:00
# Given the request +env+ from +client+ and the partial body +body+
2011-12-01 18:23:14 -05:00
# plus a potential Content-Length value +cl+, finish reading
# the body and return it.
#
# If the body is larger than MAX_BODY, a Tempfile object is used
# for the body, otherwise a StringIO is used.
2020-10-09 10:09:48 -04:00
# @deprecated 6.0.0
2011-12-01 18:23:14 -05:00
#
2011-11-22 13:45:58 -05:00
def read_body ( env , client , body , cl )
content_length = cl . to_i
2011-09-24 03:19:22 -04:00
2012-03-29 18:26:03 -04:00
remain = content_length - body . bytesize
2011-09-24 03:19:22 -04:00
return StringIO . new ( body ) if remain < = 0
# Use a Tempfile if there is a lot of data left
if remain > MAX_BODY
stream = Tempfile . new ( Const :: PUMA_TMP_BASE )
stream . binmode
else
2012-05-15 18:19:28 -04:00
# The body[0,0] trick is to get an empty string in the same
# encoding as body.
stream = StringIO . new body [ 0 , 0 ]
2011-09-24 03:19:22 -04:00
end
2012-05-15 18:19:28 -04:00
stream . write body
2011-09-24 03:19:22 -04:00
# Read an odd sized chunk so we can read even sized ones
# after this
chunk = client . readpartial ( remain % CHUNK_SIZE )
# No chunk means a closed socket
unless chunk
stream . close
return nil
end
remain -= stream . write ( chunk )
2020-10-09 10:09:48 -04:00
# Read the rest of the chunks
2011-09-24 03:19:22 -04:00
while remain > 0
chunk = client . readpartial ( CHUNK_SIZE )
unless chunk
stream . close
return nil
end
remain -= stream . write ( chunk )
end
stream . rewind
return stream
end
2020-10-09 10:09:48 -04:00
# :nocov:
2011-09-24 03:19:22 -04:00
2020-10-06 09:22:53 -04:00
# Handle various error types thrown by Client I/O operations.
def client_error ( e , client )
# Swallow, do not log
return if [ ConnectionError , EOFError ] . include? ( e . class )
lowlevel_error ( e , client . env )
case e
when MiniSSL :: SSLError
@events . ssl_error e , client . io
when HttpParserError
client . write_error ( 400 )
@events . parse_error e , client
else
client . write_error ( 500 )
@events . unknown_error e , nil , " Read "
end
end
2011-12-01 18:23:14 -05:00
# A fallback rack response if +@app+ raises as exception.
#
2020-03-31 17:42:44 -04:00
def lowlevel_error ( e , env , status = 500 )
2014-02-17 12:07:17 -05:00
if handler = @options [ :lowlevel_error_handler ]
2016-02-25 16:33:32 -05:00
if handler . arity == 1
return handler . call ( e )
2020-03-31 17:42:44 -04:00
elsif handler . arity == 2
2016-02-25 16:33:32 -05:00
return handler . call ( e , env )
2020-03-31 17:42:44 -04:00
else
return handler . call ( e , env , status )
2016-02-25 16:33:32 -05:00
end
2014-02-17 12:07:17 -05:00
end
2013-07-01 19:48:17 -04:00
if @leak_stack_on_error
2020-03-31 17:42:44 -04:00
[ status , { } , [ " Puma caught this error: #{ e . message } ( #{ e . class } ) \n #{ e . backtrace . join ( " \n " ) } " ] ]
2013-07-01 19:48:17 -04:00
else
2020-03-31 17:42:44 -04:00
[ status , { } , [ " An unhandled lowlevel error occurred. The application logs may have details. \n " ] ]
2013-07-01 19:48:17 -04:00
end
2011-09-24 03:26:17 -04:00
end
2011-09-24 03:19:22 -04:00
# Wait for all outstanding requests to finish.
2011-12-01 18:23:14 -05:00
#
2011-09-24 03:19:22 -04:00
def graceful_shutdown
2015-04-11 01:52:38 -04:00
if @options [ :shutdown_debug ]
threads = Thread . list
total = threads . size
2015-04-11 02:01:06 -04:00
pid = Process . pid
$stdout . syswrite " #{ pid } : === Begin thread backtrace dump === \n "
2015-04-11 01:52:38 -04:00
threads . each_with_index do | t , i |
2015-04-11 02:01:06 -04:00
$stdout . syswrite " #{ pid } : Thread #{ i + 1 } / #{ total } : #{ t . inspect } \n "
$stdout . syswrite " #{ pid } : #{ t . backtrace . join ( " \n #{ pid } : " ) } \n \n "
2015-04-11 01:52:38 -04:00
end
2015-04-11 02:01:06 -04:00
$stdout . syswrite " #{ pid } : === End thread backtrace dump === \n "
2015-04-11 01:52:38 -04:00
end
2013-07-15 17:29:10 -04:00
if @options [ :drain_on_shutdown ]
count = 0
while true
ios = IO . select @binder . ios , nil , nil , 0
break unless ios
ios . first . each do | sock |
begin
if io = sock . accept_nonblock
count += 1
2014-03-28 11:26:11 -04:00
client = Client . new io , @binder . env ( sock )
@thread_pool << client
2013-07-15 17:29:10 -04:00
end
2020-05-09 12:03:32 -04:00
rescue SystemCallError
2013-07-15 17:29:10 -04:00
end
end
end
@events . debug " Drained #{ count } additional connections. "
end
2018-12-10 09:19:48 -05:00
if @status != :restart
@binder . close
end
2016-04-07 13:25:10 -04:00
if @thread_pool
if timeout = @options [ :force_shutdown_after ]
2020-09-24 10:24:34 -04:00
@thread_pool . shutdown timeout . to_f
2016-04-07 13:25:10 -04:00
else
@thread_pool . shutdown
end
end
2011-09-24 03:19:22 -04:00
end
2011-09-23 01:14:39 -04:00
2017-06-28 12:54:19 -04:00
def notify_safely ( message )
2020-10-20 09:26:16 -04:00
@notify << message
rescue IOError , NoMethodError , Errno :: EPIPE
# The server, in another thread, is shutting down
Thread . current . purge_interrupt_queue if Thread . current . respond_to? :purge_interrupt_queue
rescue RuntimeError = > e
# Temporary workaround for https://bugs.ruby-lang.org/issues/13239
if e . message . include? ( 'IOError' )
2017-07-19 14:22:36 -04:00
Thread . current . purge_interrupt_queue if Thread . current . respond_to? :purge_interrupt_queue
2020-10-20 09:26:16 -04:00
else
raise e
2013-05-31 13:26:28 -04:00
end
2017-06-28 12:54:19 -04:00
end
private :notify_safely
2011-09-23 01:14:39 -04:00
2017-06-28 12:54:19 -04:00
# Stops the acceptor thread and then causes the worker threads to finish
# off the request queue before finally exiting.
def stop ( sync = false )
notify_safely ( STOP_COMMAND )
2011-09-27 17:33:17 -04:00
@thread . join if @thread && sync
2011-09-23 01:14:39 -04:00
end
2011-12-05 12:01:19 -05:00
def halt ( sync = false )
2017-06-28 12:54:19 -04:00
notify_safely ( HALT_COMMAND )
2011-12-05 12:01:19 -05:00
@thread . join if @thread && sync
end
2012-04-04 11:38:22 -04:00
2020-05-01 18:44:58 -04:00
def begin_restart ( sync = false )
2017-06-28 12:54:19 -04:00
notify_safely ( RESTART_COMMAND )
2020-05-01 18:44:58 -04:00
@thread . join if @thread && sync
2012-04-04 11:38:22 -04:00
end
2012-11-29 14:32:50 -05:00
2016-07-24 16:26:14 -04:00
def shutting_down?
@status == :stop || @status == :restart
end
2020-02-28 13:53:29 -05:00
2020-04-14 00:06:30 -04:00
# List of methods invoked by #stats.
2020-09-17 11:15:19 -04:00
# @version 5.0.0
2020-04-14 00:06:30 -04:00
STAT_METHODS = [ :backlog , :running , :pool_capacity , :max_threads , :requests_count ] . freeze
# Returns a hash of stats about the running server for reporting purposes.
2020-09-17 11:15:19 -04:00
# @version 5.0.0
2020-09-29 09:12:47 -04:00
# @!attribute [r] stats
2020-04-14 00:06:30 -04:00
def stats
STAT_METHODS . map { | name | [ name , send ( name ) || 0 ] } . to_h
end
2011-09-18 16:02:34 -04:00
end
end