2018-09-17 11:41:14 -05:00
|
|
|
# frozen_string_literal: true
|
|
|
|
|
2012-08-09 16:54:55 -07:00
|
|
|
class IO
|
2012-08-10 19:35:47 -07:00
|
|
|
# We need to use this for a jruby work around on both 1.8 and 1.9.
|
|
|
|
# So this either creates the constant (on 1.8), or harmlessly
|
|
|
|
# reopens it (on 1.9).
|
2012-08-09 16:54:55 -07:00
|
|
|
module WaitReadable
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2012-08-10 19:35:47 -07:00
|
|
|
require 'puma/detect'
|
2016-07-24 22:02:23 -07:00
|
|
|
require 'tempfile'
|
2019-10-01 18:05:46 +02:00
|
|
|
require 'forwardable'
|
2012-08-10 19:35:47 -07:00
|
|
|
|
|
|
|
if Puma::IS_JRUBY
|
|
|
|
# We have to work around some OpenSSL buffer/io-readiness bugs
|
|
|
|
# so we pull it in regardless of if the user is binding
|
|
|
|
# to an SSL socket
|
|
|
|
require 'openssl'
|
|
|
|
end
|
|
|
|
|
2012-07-23 10:26:52 -07:00
|
|
|
module Puma
|
2013-06-01 14:20:45 -07:00
|
|
|
|
|
|
|
class ConnectionError < RuntimeError; end
|
|
|
|
|
2022-03-30 08:06:46 -06:00
|
|
|
class HttpParserError501 < IOError; end
|
|
|
|
|
2018-05-01 15:42:05 -05:00
|
|
|
# An instance of this class represents a unique request from a client.
|
2019-09-20 13:21:19 +02:00
|
|
|
# For example, this could be a web request from a browser or from CURL.
|
2018-05-01 15:42:05 -05:00
|
|
|
#
|
|
|
|
# An instance of `Puma::Client` can be used as if it were an IO object
|
2019-09-20 13:21:19 +02:00
|
|
|
# by the reactor. The reactor is expected to call `#to_io`
|
|
|
|
# on any non-IO objects it polls. For example, nio4r internally calls
|
2019-06-11 17:06:25 -04:00
|
|
|
# `IO::try_convert` (which may call `#to_io`) when a new socket is
|
|
|
|
# registered.
|
2018-05-01 15:42:05 -05:00
|
|
|
#
|
|
|
|
# Instances of this class are responsible for knowing if
|
|
|
|
# the header and body are fully buffered via the `try_to_finish` method.
|
|
|
|
# They can be used to "time out" a response via the `timeout_at` reader.
|
2022-03-30 08:06:46 -06:00
|
|
|
#
|
2012-07-23 10:26:52 -07:00
|
|
|
class Client
|
2022-03-30 08:06:46 -06:00
|
|
|
|
|
|
|
# this tests all values but the last, which must be chunked
|
|
|
|
ALLOWED_TRANSFER_ENCODING = %w[compress deflate gzip].freeze
|
|
|
|
|
|
|
|
# chunked body validation
|
|
|
|
CHUNK_SIZE_INVALID = /[^\h]/.freeze
|
|
|
|
CHUNK_VALID_ENDING = "\r\n".freeze
|
|
|
|
|
|
|
|
# Content-Length header value validation
|
|
|
|
CONTENT_LENGTH_VALUE_INVALID = /[^\d]/.freeze
|
|
|
|
|
|
|
|
TE_ERR_MSG = 'Invalid Transfer-Encoding'
|
|
|
|
|
2019-09-20 13:30:22 +02:00
|
|
|
# The object used for a request with no body. All requests with
|
|
|
|
# no body share this one object since it has no state.
|
|
|
|
EmptyBody = NullIO.new
|
|
|
|
|
2012-07-23 10:26:52 -07:00
|
|
|
include Puma::Const
|
2019-10-01 18:05:46 +02:00
|
|
|
extend Forwardable
|
2012-07-23 10:26:52 -07:00
|
|
|
|
2013-08-07 16:36:04 -07:00
|
|
|
def initialize(io, env=nil)
|
2012-07-23 10:26:52 -07:00
|
|
|
@io = io
|
|
|
|
@to_io = io.to_io
|
|
|
|
@proto_env = env
|
2013-08-07 16:36:04 -07:00
|
|
|
if !env
|
|
|
|
@env = nil
|
|
|
|
else
|
|
|
|
@env = env.dup
|
|
|
|
end
|
2012-07-23 10:26:52 -07:00
|
|
|
|
|
|
|
@parser = HttpParser.new
|
|
|
|
@parsed_bytes = 0
|
|
|
|
@read_header = true
|
2021-09-07 06:21:03 -07:00
|
|
|
@read_proxy = false
|
2012-07-23 14:29:33 -07:00
|
|
|
@ready = false
|
|
|
|
|
2012-07-23 10:26:52 -07:00
|
|
|
@body = nil
|
2019-04-26 22:06:48 -05:00
|
|
|
@body_read_start = nil
|
2012-07-23 10:26:52 -07:00
|
|
|
@buffer = nil
|
2015-04-21 09:48:13 -06:00
|
|
|
@tempfile = nil
|
2012-07-23 10:26:52 -07:00
|
|
|
|
|
|
|
@timeout_at = nil
|
2012-08-09 16:54:55 -07:00
|
|
|
|
|
|
|
@requests_served = 0
|
2013-02-05 22:39:16 -08:00
|
|
|
@hijacked = false
|
2016-01-06 10:12:09 -08:00
|
|
|
|
|
|
|
@peerip = nil
|
2021-07-13 14:30:55 -07:00
|
|
|
@peer_family = nil
|
2021-05-20 09:16:02 -07:00
|
|
|
@listener = nil
|
2016-01-06 10:12:09 -08:00
|
|
|
@remote_addr_header = nil
|
2021-09-07 06:21:03 -07:00
|
|
|
@expect_proxy_proto = false
|
2019-02-20 08:42:33 -08:00
|
|
|
|
|
|
|
@body_remain = 0
|
2019-08-04 07:52:09 +09:00
|
|
|
|
|
|
|
@in_last_chunk = false
|
2012-07-23 10:26:52 -07:00
|
|
|
end
|
|
|
|
|
2015-04-21 09:48:13 -06:00
|
|
|
attr_reader :env, :to_io, :body, :io, :timeout_at, :ready, :hijacked,
|
|
|
|
:tempfile
|
2012-07-23 10:26:52 -07:00
|
|
|
|
2016-01-06 10:12:09 -08:00
|
|
|
attr_writer :peerip
|
|
|
|
|
2021-05-20 09:16:02 -07:00
|
|
|
attr_accessor :remote_addr_header, :listener
|
2016-01-06 10:12:09 -08:00
|
|
|
|
2019-10-01 18:05:46 +02:00
|
|
|
def_delegators :@io, :closed?
|
2016-02-25 13:17:47 -08:00
|
|
|
|
2020-10-25 14:58:49 -05:00
|
|
|
# Test to see if io meets a bare minimum of functioning, @to_io needs to be
|
|
|
|
# used for MiniSSL::Socket
|
|
|
|
def io_ok?
|
|
|
|
@to_io.is_a?(::BasicSocket) && !closed?
|
|
|
|
end
|
|
|
|
|
2020-09-25 13:50:57 -05:00
|
|
|
# @!attribute [r] inspect
|
2012-08-09 16:54:55 -07:00
|
|
|
def inspect
|
|
|
|
"#<Puma::Client:0x#{object_id.to_s(16)} @ready=#{@ready.inspect}>"
|
|
|
|
end
|
|
|
|
|
2013-02-05 22:39:16 -08:00
|
|
|
# For the hijack protocol (allows us to just put the Client object
|
|
|
|
# into the env)
|
|
|
|
def call
|
|
|
|
@hijacked = true
|
|
|
|
env[HIJACK_IO] ||= @io
|
|
|
|
end
|
|
|
|
|
2020-09-25 13:50:57 -05:00
|
|
|
# @!attribute [r] in_data_phase
|
2014-01-30 17:37:38 -05:00
|
|
|
def in_data_phase
|
2021-09-07 06:21:03 -07:00
|
|
|
!(@read_header || @read_proxy)
|
2014-01-30 17:37:38 -05:00
|
|
|
end
|
|
|
|
|
2012-07-23 10:26:52 -07:00
|
|
|
def set_timeout(val)
|
2020-10-06 06:22:53 -07:00
|
|
|
@timeout_at = Process.clock_gettime(Process::CLOCK_MONOTONIC) + val
|
|
|
|
end
|
|
|
|
|
|
|
|
# Number of seconds until the timeout elapses.
|
|
|
|
def timeout
|
|
|
|
[@timeout_at - Process.clock_gettime(Process::CLOCK_MONOTONIC), 0].max
|
2012-07-23 10:26:52 -07:00
|
|
|
end
|
|
|
|
|
2012-09-09 19:51:36 -07:00
|
|
|
def reset(fast_check=true)
|
2012-07-23 10:26:52 -07:00
|
|
|
@parser.reset
|
|
|
|
@read_header = true
|
2021-09-07 06:21:03 -07:00
|
|
|
@read_proxy = !!@expect_proxy_proto
|
2012-07-23 10:26:52 -07:00
|
|
|
@env = @proto_env.dup
|
|
|
|
@body = nil
|
2015-04-21 09:48:13 -06:00
|
|
|
@tempfile = nil
|
2012-07-23 10:26:52 -07:00
|
|
|
@parsed_bytes = 0
|
2012-07-23 14:29:33 -07:00
|
|
|
@ready = false
|
2019-02-20 08:42:33 -08:00
|
|
|
@body_remain = 0
|
2021-04-26 22:38:48 +08:00
|
|
|
@peerip = nil if @remote_addr_header
|
2019-08-04 07:52:09 +09:00
|
|
|
@in_last_chunk = false
|
2012-07-23 10:26:52 -07:00
|
|
|
|
|
|
|
if @buffer
|
2021-09-07 06:21:03 -07:00
|
|
|
return false unless try_to_parse_proxy_protocol
|
|
|
|
|
2012-07-23 10:26:52 -07:00
|
|
|
@parsed_bytes = @parser.execute(@env, @buffer, @parsed_bytes)
|
|
|
|
|
|
|
|
if @parser.finished?
|
|
|
|
return setup_body
|
|
|
|
elsif @parsed_bytes >= MAX_HEADER
|
|
|
|
raise HttpParserError,
|
|
|
|
"HEADER is longer than allowed, aborting client early."
|
|
|
|
end
|
|
|
|
|
|
|
|
return false
|
2019-02-20 08:42:33 -08:00
|
|
|
else
|
|
|
|
begin
|
2021-07-25 14:27:20 -05:00
|
|
|
if fast_check && @to_io.wait_readable(FAST_TRACK_KA_TIMEOUT)
|
2019-02-20 08:42:33 -08:00
|
|
|
return try_to_finish
|
|
|
|
end
|
|
|
|
rescue IOError
|
|
|
|
# swallow it
|
|
|
|
end
|
|
|
|
|
2012-07-23 10:26:52 -07:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
def close
|
2012-07-24 17:24:44 -07:00
|
|
|
begin
|
|
|
|
@io.close
|
2021-11-06 12:13:44 +01:00
|
|
|
rescue IOError, Errno::EBADF
|
2021-10-01 15:11:15 -05:00
|
|
|
Puma::Util.purge_interrupt_queue
|
2012-07-24 17:24:44 -07:00
|
|
|
end
|
2012-07-23 10:26:52 -07:00
|
|
|
end
|
|
|
|
|
2021-09-07 06:21:03 -07:00
|
|
|
# If necessary, read the PROXY protocol from the buffer. Returns
|
|
|
|
# false if more data is needed.
|
|
|
|
def try_to_parse_proxy_protocol
|
|
|
|
if @read_proxy
|
|
|
|
if @expect_proxy_proto == :v1
|
|
|
|
if @buffer.include? "\r\n"
|
|
|
|
if md = PROXY_PROTOCOL_V1_REGEX.match(@buffer)
|
|
|
|
if md[1]
|
|
|
|
@peerip = md[1].split(" ")[0]
|
|
|
|
end
|
|
|
|
@buffer = md.post_match
|
|
|
|
end
|
|
|
|
# if the buffer has a \r\n but doesn't have a PROXY protocol
|
|
|
|
# request, this is just HTTP from a non-PROXY client; move on
|
|
|
|
@read_proxy = false
|
|
|
|
return @buffer.size > 0
|
|
|
|
else
|
|
|
|
return false
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
true
|
|
|
|
end
|
|
|
|
|
2012-07-23 10:26:52 -07:00
|
|
|
def try_to_finish
|
2021-09-07 06:21:03 -07:00
|
|
|
return read_body if in_data_phase
|
2012-07-23 10:26:52 -07:00
|
|
|
|
2012-08-22 22:34:10 -07:00
|
|
|
begin
|
|
|
|
data = @io.read_nonblock(CHUNK_SIZE)
|
2020-02-21 08:26:38 -08:00
|
|
|
rescue IO::WaitReadable
|
2012-08-22 22:34:10 -07:00
|
|
|
return false
|
2020-09-27 11:29:28 -05:00
|
|
|
rescue EOFError
|
|
|
|
# Swallow error, don't log
|
|
|
|
rescue SystemCallError, IOError
|
2013-06-01 14:20:45 -07:00
|
|
|
raise ConnectionError, "Connection error detected during read"
|
2012-08-22 22:34:10 -07:00
|
|
|
end
|
2012-07-23 10:26:52 -07:00
|
|
|
|
2018-01-19 08:58:31 -08:00
|
|
|
# No data means a closed socket
|
|
|
|
unless data
|
|
|
|
@buffer = nil
|
2018-04-24 15:54:51 -03:00
|
|
|
set_ready
|
2018-01-19 08:58:31 -08:00
|
|
|
raise EOFError
|
|
|
|
end
|
|
|
|
|
2012-07-23 10:26:52 -07:00
|
|
|
if @buffer
|
|
|
|
@buffer << data
|
|
|
|
else
|
|
|
|
@buffer = data
|
|
|
|
end
|
|
|
|
|
2021-09-07 06:21:03 -07:00
|
|
|
return false unless try_to_parse_proxy_protocol
|
|
|
|
|
2012-07-23 10:26:52 -07:00
|
|
|
@parsed_bytes = @parser.execute(@env, @buffer, @parsed_bytes)
|
|
|
|
|
|
|
|
if @parser.finished?
|
|
|
|
return setup_body
|
|
|
|
elsif @parsed_bytes >= MAX_HEADER
|
|
|
|
raise HttpParserError,
|
|
|
|
"HEADER is longer than allowed, aborting client early."
|
|
|
|
end
|
2016-09-01 23:57:38 +02:00
|
|
|
|
2012-07-23 10:26:52 -07:00
|
|
|
false
|
|
|
|
end
|
|
|
|
|
2020-10-06 08:12:41 -05:00
|
|
|
def eagerly_finish
|
|
|
|
return true if @ready
|
2021-07-25 14:27:20 -05:00
|
|
|
return false unless @to_io.wait_readable(0)
|
2020-10-06 08:12:41 -05:00
|
|
|
try_to_finish
|
|
|
|
end
|
2012-07-23 14:29:33 -07:00
|
|
|
|
2020-02-20 04:36:34 -08:00
|
|
|
def finish(timeout)
|
2020-10-06 06:22:53 -07:00
|
|
|
return if @ready
|
2021-07-25 14:27:20 -05:00
|
|
|
@to_io.wait_readable(timeout) || timeout! until try_to_finish
|
2020-10-06 06:22:53 -07:00
|
|
|
end
|
|
|
|
|
|
|
|
def timeout!
|
|
|
|
write_error(408) if in_data_phase
|
|
|
|
raise ConnectionError
|
2015-01-20 13:20:39 +01:00
|
|
|
end
|
2019-09-20 13:30:22 +02:00
|
|
|
|
2019-09-20 13:41:58 +02:00
|
|
|
def write_error(status_code)
|
2019-09-20 13:30:22 +02:00
|
|
|
begin
|
2019-09-20 13:41:58 +02:00
|
|
|
@io << ERROR_RESPONSE[status_code]
|
2019-09-20 13:30:22 +02:00
|
|
|
rescue StandardError
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
def peerip
|
|
|
|
return @peerip if @peerip
|
|
|
|
|
|
|
|
if @remote_addr_header
|
2021-06-30 12:58:20 -07:00
|
|
|
hdr = (@env[@remote_addr_header] || @io.peeraddr.last).split(/[\s,]/).first
|
2019-09-20 13:30:22 +02:00
|
|
|
@peerip = hdr
|
|
|
|
return hdr
|
|
|
|
end
|
|
|
|
|
|
|
|
@peerip ||= @io.peeraddr.last
|
|
|
|
end
|
|
|
|
|
2021-07-13 14:30:55 -07:00
|
|
|
def peer_family
|
|
|
|
return @peer_family if @peer_family
|
|
|
|
|
|
|
|
@peer_family ||= begin
|
|
|
|
@io.local_address.afamily
|
|
|
|
rescue
|
|
|
|
Socket::AF_INET
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2020-04-10 00:04:27 -07:00
|
|
|
# Returns true if the persistent connection can be closed immediately
|
|
|
|
# without waiting for the configured idle/shutdown timeout.
|
2020-09-17 10:15:19 -05:00
|
|
|
# @version 5.0.0
|
|
|
|
#
|
2020-04-10 00:04:27 -07:00
|
|
|
def can_close?
|
2020-10-27 06:51:26 -07:00
|
|
|
# Allow connection to close if we're not in the middle of parsing a request.
|
|
|
|
@parsed_bytes == 0
|
2020-04-10 00:04:27 -07:00
|
|
|
end
|
|
|
|
|
2021-09-07 06:21:03 -07:00
|
|
|
def expect_proxy_proto=(val)
|
|
|
|
if val
|
|
|
|
if @read_header
|
|
|
|
@read_proxy = true
|
|
|
|
end
|
|
|
|
else
|
|
|
|
@read_proxy = false
|
|
|
|
end
|
|
|
|
@expect_proxy_proto = val
|
|
|
|
end
|
|
|
|
|
2019-09-20 13:30:22 +02:00
|
|
|
private
|
|
|
|
|
|
|
|
def setup_body
|
|
|
|
@body_read_start = Process.clock_gettime(Process::CLOCK_MONOTONIC, :millisecond)
|
|
|
|
|
|
|
|
if @env[HTTP_EXPECT] == CONTINUE
|
|
|
|
# TODO allow a hook here to check the headers before
|
|
|
|
# going forward
|
|
|
|
@io << HTTP_11_100
|
|
|
|
@io.flush
|
|
|
|
end
|
|
|
|
|
|
|
|
@read_header = false
|
|
|
|
|
|
|
|
body = @parser.body
|
|
|
|
|
|
|
|
te = @env[TRANSFER_ENCODING2]
|
2020-05-18 16:01:53 -07:00
|
|
|
if te
|
2022-03-30 08:06:46 -06:00
|
|
|
te_lwr = te.downcase
|
|
|
|
if te.include? ','
|
|
|
|
te_ary = te_lwr.split ','
|
|
|
|
te_count = te_ary.count CHUNKED
|
|
|
|
te_valid = te_ary[0..-2].all? { |e| ALLOWED_TRANSFER_ENCODING.include? e }
|
|
|
|
if te_ary.last == CHUNKED && te_count == 1 && te_valid
|
|
|
|
@env.delete TRANSFER_ENCODING2
|
|
|
|
return setup_chunked_body body
|
|
|
|
elsif te_count >= 1
|
|
|
|
raise HttpParserError , "#{TE_ERR_MSG}, multiple chunked: '#{te}'"
|
|
|
|
elsif !te_valid
|
|
|
|
raise HttpParserError501, "#{TE_ERR_MSG}, unknown value: '#{te}'"
|
2020-05-18 16:01:53 -07:00
|
|
|
end
|
2022-03-30 08:06:46 -06:00
|
|
|
elsif te_lwr == CHUNKED
|
|
|
|
@env.delete TRANSFER_ENCODING2
|
|
|
|
return setup_chunked_body body
|
|
|
|
elsif ALLOWED_TRANSFER_ENCODING.include? te_lwr
|
|
|
|
raise HttpParserError , "#{TE_ERR_MSG}, single value must be chunked: '#{te}'"
|
|
|
|
else
|
|
|
|
raise HttpParserError501 , "#{TE_ERR_MSG}, unknown value: '#{te}'"
|
2020-05-18 16:01:53 -07:00
|
|
|
end
|
2019-09-20 13:30:22 +02:00
|
|
|
end
|
|
|
|
|
|
|
|
@chunked_body = false
|
|
|
|
|
|
|
|
cl = @env[CONTENT_LENGTH]
|
|
|
|
|
2022-03-30 08:06:46 -06:00
|
|
|
if cl
|
|
|
|
# cannot contain characters that are not \d
|
|
|
|
if cl =~ CONTENT_LENGTH_VALUE_INVALID
|
|
|
|
raise HttpParserError, "Invalid Content-Length: #{cl.inspect}"
|
|
|
|
end
|
|
|
|
else
|
2019-09-20 13:30:22 +02:00
|
|
|
@buffer = body.empty? ? nil : body
|
|
|
|
@body = EmptyBody
|
|
|
|
set_ready
|
|
|
|
return true
|
|
|
|
end
|
|
|
|
|
|
|
|
remain = cl.to_i - body.bytesize
|
|
|
|
|
|
|
|
if remain <= 0
|
|
|
|
@body = StringIO.new(body)
|
|
|
|
@buffer = nil
|
|
|
|
set_ready
|
|
|
|
return true
|
|
|
|
end
|
|
|
|
|
|
|
|
if remain > MAX_BODY
|
|
|
|
@body = Tempfile.new(Const::PUMA_TMP_BASE)
|
Immediately unlink temporary files (#2613)
Puma has a limit (`Puma::Const::MAX_BODY` - around 110 KiB) over which
it will write request bodies to disk for handing off to the
application. When it does this, the request body can be left on disk
if the Puma process receives SIGKILL. Consider an extremely minimal
`config.ru`:
run(proc { [204, {}, []] })
If we then:
1. Start `puma`, noting the process ID.
2. Start a slow file transfer, using `curl --limit-rate 100k` (for
example) and `-T $PATH_TO_LARGE_FILE`.
3. Watch `$TMPDIR/puma*`.
We will see Puma start to write this temporary file. If we then send
SIGKILL to Puma, the file won't be cleaned up. With this patch, it
will - at least on POSIX systems. On Windows it may still be available.
This is suggested in the Ruby Tempfile documentation, and even uses this
specific example:
https://ruby-doc.org/stdlib-2.7.2/libdoc/tempfile/rdoc/Tempfile.html#class-Tempfile-label-Unlink+after+creation
> On POSIX systems, it's possible to unlink a file right after creating
> it, and before closing it. This removes the filesystem entry without
> closing the file handle, so it ensures that only the processes that
> already had the file handle open can access the file’s contents. It's
> strongly recommended that you do this if you do not want any other
> processes to be able to read from or write to the Tempfile, and you do
> not need to know the Tempfile's filename either.
>
> For example, a practical use case for unlink-after-creation would be
> this: you need a large byte buffer that's too large to comfortably fit
> in RAM, e.g. when you're writing a web server and you want to buffer
> the client's file upload data.
2021-04-27 15:35:42 +01:00
|
|
|
@body.unlink
|
2019-09-20 13:30:22 +02:00
|
|
|
@body.binmode
|
|
|
|
@tempfile = @body
|
|
|
|
else
|
|
|
|
# The body[0,0] trick is to get an empty string in the same
|
|
|
|
# encoding as body.
|
|
|
|
@body = StringIO.new body[0,0]
|
|
|
|
end
|
|
|
|
|
|
|
|
@body.write body
|
|
|
|
|
|
|
|
@body_remain = remain
|
|
|
|
|
2021-06-10 12:28:35 -06:00
|
|
|
false
|
2019-09-20 13:30:22 +02:00
|
|
|
end
|
2015-01-20 13:20:39 +01:00
|
|
|
|
2012-07-23 10:26:52 -07:00
|
|
|
def read_body
|
2016-07-24 22:02:23 -07:00
|
|
|
if @chunked_body
|
|
|
|
return read_chunked_body
|
|
|
|
end
|
|
|
|
|
2012-07-23 10:26:52 -07:00
|
|
|
# Read an odd sized chunk so we can read even sized ones
|
|
|
|
# after this
|
|
|
|
remain = @body_remain
|
|
|
|
|
|
|
|
if remain > CHUNK_SIZE
|
|
|
|
want = CHUNK_SIZE
|
|
|
|
else
|
|
|
|
want = remain
|
|
|
|
end
|
|
|
|
|
2012-08-22 22:34:10 -07:00
|
|
|
begin
|
|
|
|
chunk = @io.read_nonblock(want)
|
2020-02-21 08:26:38 -08:00
|
|
|
rescue IO::WaitReadable
|
2012-08-22 22:34:10 -07:00
|
|
|
return false
|
2013-06-01 14:20:45 -07:00
|
|
|
rescue SystemCallError, IOError
|
|
|
|
raise ConnectionError, "Connection error detected during read"
|
2012-08-22 22:34:10 -07:00
|
|
|
end
|
2012-07-23 10:26:52 -07:00
|
|
|
|
|
|
|
# No chunk means a closed socket
|
|
|
|
unless chunk
|
|
|
|
@body.close
|
2012-07-24 10:59:04 -07:00
|
|
|
@buffer = nil
|
2018-04-24 15:54:51 -03:00
|
|
|
set_ready
|
2012-07-23 10:26:52 -07:00
|
|
|
raise EOFError
|
|
|
|
end
|
|
|
|
|
|
|
|
remain -= @body.write(chunk)
|
|
|
|
|
|
|
|
if remain <= 0
|
|
|
|
@body.rewind
|
2012-07-24 10:59:04 -07:00
|
|
|
@buffer = nil
|
2018-04-24 15:54:51 -03:00
|
|
|
set_ready
|
2012-07-23 10:26:52 -07:00
|
|
|
return true
|
|
|
|
end
|
|
|
|
|
|
|
|
@body_remain = remain
|
|
|
|
|
|
|
|
false
|
|
|
|
end
|
2012-09-05 22:09:42 -07:00
|
|
|
|
2019-09-20 13:30:22 +02:00
|
|
|
def read_chunked_body
|
|
|
|
while true
|
|
|
|
begin
|
|
|
|
chunk = @io.read_nonblock(4096)
|
|
|
|
rescue IO::WaitReadable
|
|
|
|
return false
|
|
|
|
rescue SystemCallError, IOError
|
|
|
|
raise ConnectionError, "Connection error detected during read"
|
|
|
|
end
|
|
|
|
|
|
|
|
# No chunk means a closed socket
|
|
|
|
unless chunk
|
|
|
|
@body.close
|
|
|
|
@buffer = nil
|
|
|
|
set_ready
|
|
|
|
raise EOFError
|
|
|
|
end
|
|
|
|
|
2020-05-29 01:35:10 +01:00
|
|
|
if decode_chunk(chunk)
|
2021-02-04 14:59:39 +01:00
|
|
|
@env[CONTENT_LENGTH] = @chunked_content_length.to_s
|
2020-05-29 01:35:10 +01:00
|
|
|
return true
|
|
|
|
end
|
2018-04-24 15:54:51 -03:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2019-09-20 13:30:22 +02:00
|
|
|
def setup_chunked_body(body)
|
|
|
|
@chunked_body = true
|
|
|
|
@partial_part_left = 0
|
|
|
|
@prev_chunk = ""
|
|
|
|
|
|
|
|
@body = Tempfile.new(Const::PUMA_TMP_BASE)
|
Immediately unlink temporary files (#2613)
Puma has a limit (`Puma::Const::MAX_BODY` - around 110 KiB) over which
it will write request bodies to disk for handing off to the
application. When it does this, the request body can be left on disk
if the Puma process receives SIGKILL. Consider an extremely minimal
`config.ru`:
run(proc { [204, {}, []] })
If we then:
1. Start `puma`, noting the process ID.
2. Start a slow file transfer, using `curl --limit-rate 100k` (for
example) and `-T $PATH_TO_LARGE_FILE`.
3. Watch `$TMPDIR/puma*`.
We will see Puma start to write this temporary file. If we then send
SIGKILL to Puma, the file won't be cleaned up. With this patch, it
will - at least on POSIX systems. On Windows it may still be available.
This is suggested in the Ruby Tempfile documentation, and even uses this
specific example:
https://ruby-doc.org/stdlib-2.7.2/libdoc/tempfile/rdoc/Tempfile.html#class-Tempfile-label-Unlink+after+creation
> On POSIX systems, it's possible to unlink a file right after creating
> it, and before closing it. This removes the filesystem entry without
> closing the file handle, so it ensures that only the processes that
> already had the file handle open can access the file’s contents. It's
> strongly recommended that you do this if you do not want any other
> processes to be able to read from or write to the Tempfile, and you do
> not need to know the Tempfile's filename either.
>
> For example, a practical use case for unlink-after-creation would be
> this: you need a large byte buffer that's too large to comfortably fit
> in RAM, e.g. when you're writing a web server and you want to buffer
> the client's file upload data.
2021-04-27 15:35:42 +01:00
|
|
|
@body.unlink
|
2019-09-20 13:30:22 +02:00
|
|
|
@body.binmode
|
|
|
|
@tempfile = @body
|
2020-05-29 01:35:10 +01:00
|
|
|
@chunked_content_length = 0
|
|
|
|
|
|
|
|
if decode_chunk(body)
|
2021-02-04 14:59:39 +01:00
|
|
|
@env[CONTENT_LENGTH] = @chunked_content_length.to_s
|
2020-05-29 01:35:10 +01:00
|
|
|
return true
|
|
|
|
end
|
|
|
|
end
|
2019-09-20 13:30:22 +02:00
|
|
|
|
2020-09-17 10:15:19 -05:00
|
|
|
# @version 5.0.0
|
2020-05-29 01:35:10 +01:00
|
|
|
def write_chunk(str)
|
|
|
|
@chunked_content_length += @body.write(str)
|
2012-09-05 22:09:42 -07:00
|
|
|
end
|
|
|
|
|
2019-09-20 13:30:22 +02:00
|
|
|
def decode_chunk(chunk)
|
|
|
|
if @partial_part_left > 0
|
|
|
|
if @partial_part_left <= chunk.size
|
|
|
|
if @partial_part_left > 2
|
2020-05-29 01:35:10 +01:00
|
|
|
write_chunk(chunk[0..(@partial_part_left-3)]) # skip the \r\n
|
2019-09-20 13:30:22 +02:00
|
|
|
end
|
|
|
|
chunk = chunk[@partial_part_left..-1]
|
|
|
|
@partial_part_left = 0
|
|
|
|
else
|
2020-07-30 16:07:55 +02:00
|
|
|
if @partial_part_left > 2
|
|
|
|
if @partial_part_left == chunk.size + 1
|
|
|
|
# Don't include the last \r
|
|
|
|
write_chunk(chunk[0..(@partial_part_left-3)])
|
|
|
|
else
|
|
|
|
# don't include the last \r\n
|
|
|
|
write_chunk(chunk)
|
|
|
|
end
|
|
|
|
end
|
2019-09-20 13:30:22 +02:00
|
|
|
@partial_part_left -= chunk.size
|
|
|
|
return false
|
|
|
|
end
|
2014-01-30 13:23:01 -05:00
|
|
|
end
|
|
|
|
|
2019-09-20 13:30:22 +02:00
|
|
|
if @prev_chunk.empty?
|
|
|
|
io = StringIO.new(chunk)
|
|
|
|
else
|
|
|
|
io = StringIO.new(@prev_chunk+chunk)
|
|
|
|
@prev_chunk = ""
|
2012-09-05 22:09:42 -07:00
|
|
|
end
|
2016-01-06 10:12:09 -08:00
|
|
|
|
2019-09-20 13:30:22 +02:00
|
|
|
while !io.eof?
|
|
|
|
line = io.gets
|
|
|
|
if line.end_with?("\r\n")
|
2022-03-30 08:06:46 -06:00
|
|
|
# Puma doesn't process chunk extensions, but should parse if they're
|
|
|
|
# present, which is the reason for the semicolon regex
|
|
|
|
chunk_hex = line.strip[/\A[^;]+/]
|
|
|
|
if chunk_hex =~ CHUNK_SIZE_INVALID
|
|
|
|
raise HttpParserError, "Invalid chunk size: '#{chunk_hex}'"
|
|
|
|
end
|
|
|
|
len = chunk_hex.to_i(16)
|
2019-09-20 13:30:22 +02:00
|
|
|
if len == 0
|
|
|
|
@in_last_chunk = true
|
|
|
|
@body.rewind
|
|
|
|
rest = io.read
|
|
|
|
last_crlf_size = "\r\n".bytesize
|
|
|
|
if rest.bytesize < last_crlf_size
|
|
|
|
@buffer = nil
|
|
|
|
@partial_part_left = last_crlf_size - rest.bytesize
|
|
|
|
return false
|
|
|
|
else
|
|
|
|
@buffer = rest[last_crlf_size..-1]
|
|
|
|
@buffer = nil if @buffer.empty?
|
|
|
|
set_ready
|
|
|
|
return true
|
|
|
|
end
|
|
|
|
end
|
2016-01-06 10:12:09 -08:00
|
|
|
|
2019-09-20 13:30:22 +02:00
|
|
|
len += 2
|
|
|
|
|
|
|
|
part = io.read(len)
|
|
|
|
|
|
|
|
unless part
|
|
|
|
@partial_part_left = len
|
|
|
|
next
|
|
|
|
end
|
|
|
|
|
|
|
|
got = part.size
|
|
|
|
|
|
|
|
case
|
|
|
|
when got == len
|
2022-03-30 08:06:46 -06:00
|
|
|
# proper chunked segment must end with "\r\n"
|
|
|
|
if part.end_with? CHUNK_VALID_ENDING
|
|
|
|
write_chunk(part[0..-3]) # to skip the ending \r\n
|
|
|
|
else
|
|
|
|
raise HttpParserError, "Chunk size mismatch"
|
|
|
|
end
|
2019-09-20 13:30:22 +02:00
|
|
|
when got <= len - 2
|
2020-05-29 01:35:10 +01:00
|
|
|
write_chunk(part)
|
2019-09-20 13:30:22 +02:00
|
|
|
@partial_part_left = len - part.size
|
|
|
|
when got == len - 1 # edge where we get just \r but not \n
|
2020-05-29 01:35:10 +01:00
|
|
|
write_chunk(part[0..-2])
|
2019-09-20 13:30:22 +02:00
|
|
|
@partial_part_left = len - part.size
|
|
|
|
end
|
|
|
|
else
|
|
|
|
@prev_chunk = line
|
|
|
|
return false
|
|
|
|
end
|
2016-01-06 10:12:09 -08:00
|
|
|
end
|
|
|
|
|
2019-09-20 13:30:22 +02:00
|
|
|
if @in_last_chunk
|
|
|
|
set_ready
|
|
|
|
true
|
|
|
|
else
|
|
|
|
false
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
def set_ready
|
|
|
|
if @body_read_start
|
|
|
|
@env['puma.request_body_wait'] = Process.clock_gettime(Process::CLOCK_MONOTONIC, :millisecond) - @body_read_start
|
|
|
|
end
|
|
|
|
@requests_served += 1
|
|
|
|
@ready = true
|
2016-01-06 10:12:09 -08:00
|
|
|
end
|
2012-07-23 10:26:52 -07:00
|
|
|
end
|
|
|
|
end
|