2011-09-27 14:07:56 -04:00
|
|
|
require 'rubygems'
|
2011-09-18 16:02:34 -04:00
|
|
|
require 'rack'
|
2011-09-27 13:53:45 -04:00
|
|
|
require 'stringio'
|
2011-09-27 12:23:03 -04:00
|
|
|
|
2011-09-22 22:24:43 -04:00
|
|
|
require 'puma/thread_pool'
|
2011-09-27 12:23:03 -04:00
|
|
|
require 'puma/const'
|
2011-09-27 16:52:50 -04:00
|
|
|
require 'puma/events'
|
2011-09-27 12:23:03 -04:00
|
|
|
|
2011-10-02 12:33:48 -04:00
|
|
|
require 'puma_http11'
|
2011-09-27 12:23:03 -04:00
|
|
|
|
|
|
|
require 'socket'
|
2011-09-18 16:02:34 -04:00
|
|
|
|
2011-09-22 22:24:43 -04:00
|
|
|
module Puma
|
2011-09-18 16:02:34 -04:00
|
|
|
class Server
|
|
|
|
|
2011-09-22 22:24:43 -04:00
|
|
|
include Puma::Const
|
2011-09-18 16:02:34 -04:00
|
|
|
|
2011-09-27 17:33:17 -04:00
|
|
|
attr_reader :thread
|
|
|
|
attr_reader :events
|
2011-09-23 01:14:39 -04:00
|
|
|
attr_accessor :app
|
|
|
|
|
2011-09-27 17:33:17 -04:00
|
|
|
attr_accessor :min_threads
|
|
|
|
attr_accessor :max_threads
|
2011-10-05 00:11:10 -04:00
|
|
|
attr_accessor :persistent_timeout
|
2011-09-23 01:14:39 -04:00
|
|
|
|
2011-09-18 16:02:34 -04:00
|
|
|
# Creates a working server on host:port (strange things happen if port
|
|
|
|
# isn't a Number).
|
|
|
|
#
|
2011-09-24 02:26:13 -04:00
|
|
|
# Use HttpServer#run to start the server and HttpServer#acceptor.join to
|
2011-09-18 16:02:34 -04:00
|
|
|
# join the thread that's processing incoming requests on the socket.
|
|
|
|
#
|
2011-09-27 17:33:17 -04:00
|
|
|
def initialize(app, events=Events::DEFAULT)
|
|
|
|
@app = app
|
|
|
|
@events = events
|
2011-09-18 16:02:34 -04:00
|
|
|
|
|
|
|
@check, @notify = IO.pipe
|
2011-09-27 13:53:45 -04:00
|
|
|
@ios = [@check]
|
|
|
|
|
2011-09-27 17:33:17 -04:00
|
|
|
@running = false
|
2011-09-23 01:14:39 -04:00
|
|
|
|
2011-09-27 17:33:17 -04:00
|
|
|
@min_threads = 0
|
|
|
|
@max_threads = 16
|
2011-09-24 03:26:17 -04:00
|
|
|
|
2011-09-27 17:33:17 -04:00
|
|
|
@thread = nil
|
|
|
|
@thread_pool = nil
|
2011-09-27 14:29:20 -04:00
|
|
|
|
2011-10-05 00:11:10 -04:00
|
|
|
@persistent_timeout = PERSISTENT_TIMEOUT
|
|
|
|
|
2011-09-27 14:29:20 -04:00
|
|
|
@proto_env = {
|
|
|
|
"rack.version".freeze => Rack::VERSION,
|
2011-09-27 16:52:50 -04:00
|
|
|
"rack.errors".freeze => events.stderr,
|
2011-09-27 14:29:20 -04:00
|
|
|
"rack.multithread".freeze => true,
|
|
|
|
"rack.multiprocess".freeze => false,
|
|
|
|
"rack.run_once".freeze => true,
|
|
|
|
"SCRIPT_NAME".freeze => "",
|
|
|
|
"CONTENT_TYPE".freeze => "",
|
|
|
|
"QUERY_STRING".freeze => "",
|
|
|
|
SERVER_PROTOCOL => HTTP_11,
|
|
|
|
SERVER_SOFTWARE => PUMA_VERSION,
|
|
|
|
GATEWAY_INTERFACE => CGI_VER
|
|
|
|
}
|
2011-09-18 16:02:34 -04:00
|
|
|
end
|
|
|
|
|
2011-09-27 13:53:45 -04:00
|
|
|
def add_tcp_listener(host, port)
|
|
|
|
@ios << TCPServer.new(host, port)
|
|
|
|
end
|
|
|
|
|
|
|
|
def add_unix_listener(path)
|
|
|
|
@ios << UNIXServer.new(path)
|
|
|
|
end
|
|
|
|
|
2011-09-24 03:19:22 -04:00
|
|
|
# Runs the server. It returns the thread used so you can "join" it.
|
|
|
|
# You can also access the HttpServer#acceptor attribute to get the
|
|
|
|
# thread later.
|
|
|
|
def run
|
|
|
|
BasicSocket.do_not_reverse_lookup = true
|
|
|
|
|
2011-09-27 17:33:17 -04:00
|
|
|
@running = true
|
|
|
|
|
|
|
|
@thread_pool = ThreadPool.new(@min_threads, @max_threads) do |client|
|
|
|
|
process_client(client)
|
|
|
|
end
|
|
|
|
|
|
|
|
@thread = Thread.new do
|
2011-09-24 03:19:22 -04:00
|
|
|
begin
|
|
|
|
check = @check
|
2011-09-27 13:53:45 -04:00
|
|
|
sockets = @ios
|
2011-09-24 03:19:22 -04:00
|
|
|
pool = @thread_pool
|
|
|
|
|
|
|
|
while @running
|
|
|
|
begin
|
|
|
|
ios = IO.select sockets
|
|
|
|
ios.first.each do |sock|
|
|
|
|
if sock == check
|
|
|
|
break if handle_check
|
|
|
|
else
|
2011-09-27 13:53:45 -04:00
|
|
|
pool << sock.accept
|
2011-09-24 03:19:22 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
rescue Errno::ECONNABORTED
|
|
|
|
# client closed the socket even before accept
|
|
|
|
client.close rescue nil
|
|
|
|
rescue Object => e
|
2011-09-27 16:52:50 -04:00
|
|
|
@events.unknown_error self, env, e, "Listen loop"
|
2011-09-24 03:19:22 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
graceful_shutdown
|
|
|
|
ensure
|
2011-09-27 13:53:45 -04:00
|
|
|
@ios.each { |i| i.close }
|
2011-09-18 16:02:34 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2011-09-27 17:33:17 -04:00
|
|
|
return @thread
|
2011-09-24 03:19:22 -04:00
|
|
|
end
|
2011-09-18 16:02:34 -04:00
|
|
|
|
2011-09-24 03:19:22 -04:00
|
|
|
def handle_check
|
|
|
|
cmd = @check.read(1)
|
|
|
|
|
|
|
|
case cmd
|
|
|
|
when STOP_COMMAND
|
|
|
|
@running = false
|
|
|
|
return true
|
|
|
|
end
|
|
|
|
|
|
|
|
return false
|
2011-09-18 16:02:34 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def process_client(client)
|
|
|
|
begin
|
2011-09-30 11:30:37 -04:00
|
|
|
while true
|
|
|
|
parser = HttpParser.new
|
|
|
|
env = @proto_env.dup
|
|
|
|
data = client.readpartial(CHUNK_SIZE)
|
|
|
|
nparsed = 0
|
|
|
|
|
|
|
|
# Assumption: nparsed will always be less since data will get filled
|
|
|
|
# with more after each parsing. If it doesn't get more then there was
|
|
|
|
# a problem with the read operation on the client socket.
|
|
|
|
# Effect is to stop processing when the socket can't fill the buffer
|
|
|
|
# for further parsing.
|
|
|
|
while nparsed < data.length
|
|
|
|
nparsed = parser.execute(env, data, nparsed)
|
|
|
|
|
|
|
|
if parser.finished?
|
2011-11-22 13:45:58 -05:00
|
|
|
cl = env[CONTENT_LENGTH]
|
|
|
|
|
|
|
|
return unless handle_request(env, client, parser.body, cl)
|
|
|
|
|
|
|
|
nparsed += parser.body.size if cl
|
2011-10-05 00:11:10 -04:00
|
|
|
|
2011-10-18 01:47:35 -04:00
|
|
|
if data.size > nparsed
|
|
|
|
data.slice!(0, nparsed)
|
|
|
|
parser = HttpParser.new
|
|
|
|
env = @proto_env.dup
|
|
|
|
nparsed = 0
|
|
|
|
else
|
|
|
|
unless IO.select([client], nil, nil, @persistent_timeout)
|
|
|
|
raise EOFError, "Timed out persistent connection"
|
|
|
|
end
|
2011-10-05 00:11:10 -04:00
|
|
|
end
|
2011-09-30 11:30:37 -04:00
|
|
|
else
|
|
|
|
# Parser is not done, queue up more data to read and continue parsing
|
|
|
|
chunk = client.readpartial(CHUNK_SIZE)
|
|
|
|
return if !chunk or chunk.length == 0 # read failed, stop processing
|
|
|
|
|
|
|
|
data << chunk
|
|
|
|
if data.length >= MAX_HEADER
|
|
|
|
raise HttpParserError,
|
|
|
|
"HEADER is longer than allowed, aborting client early."
|
|
|
|
end
|
2011-09-18 16:02:34 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
2011-09-27 16:52:50 -04:00
|
|
|
rescue EOFError, SystemCallError
|
2011-09-18 16:02:34 -04:00
|
|
|
client.close rescue nil
|
|
|
|
|
|
|
|
rescue HttpParserError => e
|
2011-09-27 16:52:50 -04:00
|
|
|
@events.parse_error self, env, e
|
2011-09-18 16:02:34 -04:00
|
|
|
|
2011-09-27 16:52:50 -04:00
|
|
|
rescue StandardError => e
|
|
|
|
@events.unknown_error self, env, e, "Read"
|
2011-09-18 16:02:34 -04:00
|
|
|
|
|
|
|
ensure
|
|
|
|
begin
|
|
|
|
client.close
|
2011-09-27 16:52:50 -04:00
|
|
|
rescue IOError, SystemCallError
|
2011-09-18 16:02:34 -04:00
|
|
|
# Already closed
|
2011-09-27 16:52:50 -04:00
|
|
|
rescue StandardError => e
|
|
|
|
@events.unknown_error self, env, e, "Client"
|
2011-09-18 16:02:34 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2011-09-27 14:29:20 -04:00
|
|
|
def normalize_env(env, client)
|
|
|
|
if host = env[HTTP_HOST]
|
2011-09-24 03:19:22 -04:00
|
|
|
if colon = host.index(":")
|
2011-09-27 14:29:20 -04:00
|
|
|
env[SERVER_NAME] = host[0, colon]
|
|
|
|
env[SERVER_PORT] = host[colon+1, host.size]
|
2011-09-24 03:19:22 -04:00
|
|
|
else
|
2011-09-27 14:29:20 -04:00
|
|
|
env[SERVER_NAME] = host
|
|
|
|
env[SERVER_PORT] = PORT_80
|
2011-09-18 16:02:34 -04:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2011-09-27 14:29:20 -04:00
|
|
|
unless env[REQUEST_PATH]
|
2011-09-24 03:19:22 -04:00
|
|
|
# it might be a dumbass full host request header
|
2011-09-27 14:29:20 -04:00
|
|
|
uri = URI.parse(env[REQUEST_URI])
|
|
|
|
env[REQUEST_PATH] = uri.path
|
2011-09-24 02:26:13 -04:00
|
|
|
|
2011-09-27 14:29:20 -04:00
|
|
|
raise "No REQUEST PATH" unless env[REQUEST_PATH]
|
2011-09-24 02:26:13 -04:00
|
|
|
end
|
|
|
|
|
2011-10-14 15:15:35 -04:00
|
|
|
env[PATH_INFO] = env[REQUEST_PATH]
|
|
|
|
|
2011-09-24 03:19:22 -04:00
|
|
|
# From http://www.ietf.org/rfc/rfc3875 :
|
|
|
|
# "Script authors should be aware that the REMOTE_ADDR and
|
|
|
|
# REMOTE_HOST meta-variables (see sections 4.1.8 and 4.1.9)
|
|
|
|
# may not identify the ultimate source of the request.
|
|
|
|
# They identify the client for the immediate request to the
|
|
|
|
# server; that client may be a proxy, gateway, or other
|
|
|
|
# intermediary acting on behalf of the actual source client."
|
|
|
|
#
|
2011-09-27 14:29:20 -04:00
|
|
|
env[REMOTE_ADDR] = client.peeraddr.last
|
2011-09-24 02:26:13 -04:00
|
|
|
end
|
|
|
|
|
2011-11-22 13:45:58 -05:00
|
|
|
def handle_request(env, client, body, cl)
|
2011-09-27 14:29:20 -04:00
|
|
|
normalize_env env, client
|
2011-09-24 02:26:13 -04:00
|
|
|
|
2011-11-22 13:45:58 -05:00
|
|
|
body = read_body env, client, body, cl
|
2011-09-24 02:26:13 -04:00
|
|
|
|
2011-09-30 11:30:37 -04:00
|
|
|
return false unless body
|
2011-09-24 02:26:13 -04:00
|
|
|
|
2011-09-27 14:29:20 -04:00
|
|
|
env["rack.input"] = body
|
|
|
|
env["rack.url_scheme"] = env["HTTPS"] ? "https" : "http"
|
2011-09-18 16:02:34 -04:00
|
|
|
|
2011-10-18 02:08:09 -04:00
|
|
|
allow_chunked = false
|
|
|
|
|
2011-10-03 17:28:46 -04:00
|
|
|
if env['HTTP_VERSION'] == 'HTTP/1.1'
|
2011-10-18 02:08:09 -04:00
|
|
|
allow_chunked = true
|
2011-10-03 17:28:46 -04:00
|
|
|
http_version = "HTTP/1.1 "
|
|
|
|
keep_alive = env["HTTP_CONNECTION"] != "close"
|
|
|
|
else
|
|
|
|
http_version = "HTTP/1.0 "
|
|
|
|
keep_alive = env["HTTP_CONNECTION"] == "Keep-Alive"
|
|
|
|
end
|
|
|
|
|
2011-09-30 11:30:37 -04:00
|
|
|
chunked = false
|
|
|
|
|
2011-10-21 00:44:34 -04:00
|
|
|
after_reply = env['rack.after_reply'] = []
|
|
|
|
|
2011-09-27 14:29:20 -04:00
|
|
|
begin
|
2011-09-24 03:26:17 -04:00
|
|
|
begin
|
|
|
|
status, headers, res_body = @app.call(env)
|
|
|
|
rescue => e
|
|
|
|
status, headers, res_body = lowlevel_error(e)
|
|
|
|
end
|
2011-09-18 16:02:34 -04:00
|
|
|
|
2011-09-30 11:30:37 -04:00
|
|
|
content_length = nil
|
|
|
|
|
2011-10-18 02:12:56 -04:00
|
|
|
if res_body.kind_of? Array and res_body.size == 1
|
2011-09-30 11:30:37 -04:00
|
|
|
content_length = res_body[0].size
|
|
|
|
end
|
|
|
|
|
2011-10-03 17:28:46 -04:00
|
|
|
client.write http_version
|
2011-09-18 16:02:34 -04:00
|
|
|
client.write status.to_s
|
|
|
|
client.write " "
|
|
|
|
client.write HTTP_STATUS_CODES[status]
|
2011-10-03 17:04:57 -04:00
|
|
|
client.write "\r\n"
|
2011-09-30 11:30:37 -04:00
|
|
|
|
2011-09-18 16:02:34 -04:00
|
|
|
colon = ": "
|
|
|
|
line_ending = "\r\n"
|
|
|
|
|
2011-09-24 02:26:13 -04:00
|
|
|
headers.each do |k, vs|
|
2011-10-23 02:06:27 -04:00
|
|
|
case k
|
|
|
|
when "Content-Length"
|
2011-10-17 16:56:45 -04:00
|
|
|
content_length = vs
|
|
|
|
next
|
2011-10-23 02:06:27 -04:00
|
|
|
when "Transfer-Encoding"
|
|
|
|
allow_chunked = false
|
|
|
|
content_length = nil
|
2011-10-17 16:56:45 -04:00
|
|
|
end
|
|
|
|
|
2011-09-24 02:26:13 -04:00
|
|
|
vs.split("\n").each do |v|
|
2011-09-18 16:02:34 -04:00
|
|
|
client.write k
|
|
|
|
client.write colon
|
|
|
|
client.write v
|
|
|
|
client.write line_ending
|
2011-09-24 02:26:13 -04:00
|
|
|
end
|
|
|
|
end
|
2011-09-18 16:02:34 -04:00
|
|
|
|
2011-10-17 16:56:45 -04:00
|
|
|
client.write "Connection: close\r\n" unless keep_alive
|
|
|
|
|
|
|
|
if content_length
|
|
|
|
client.write "Content-Length: #{content_length}\r\n"
|
2011-10-18 02:08:09 -04:00
|
|
|
elsif allow_chunked
|
2011-10-17 16:56:45 -04:00
|
|
|
client.write "Transfer-Encoding: chunked\r\n"
|
|
|
|
chunked = true
|
|
|
|
end
|
|
|
|
|
2011-09-18 16:02:34 -04:00
|
|
|
client.write line_ending
|
|
|
|
|
2011-10-18 02:12:56 -04:00
|
|
|
res_body.each do |part|
|
2011-09-30 11:30:37 -04:00
|
|
|
if chunked
|
2011-10-18 02:12:56 -04:00
|
|
|
client.write part.size.to_s(16)
|
2011-09-30 11:30:37 -04:00
|
|
|
client.write line_ending
|
2011-10-18 02:12:56 -04:00
|
|
|
client.write part
|
2011-09-30 11:30:37 -04:00
|
|
|
client.write line_ending
|
|
|
|
else
|
2011-10-18 02:12:56 -04:00
|
|
|
client.write part
|
2011-09-30 11:30:37 -04:00
|
|
|
end
|
|
|
|
|
2011-09-18 16:02:34 -04:00
|
|
|
client.flush
|
|
|
|
end
|
2011-09-30 11:30:37 -04:00
|
|
|
|
|
|
|
if chunked
|
|
|
|
client.write "0"
|
|
|
|
client.write line_ending
|
2011-10-21 02:04:24 -04:00
|
|
|
client.write line_ending
|
2011-09-30 11:30:37 -04:00
|
|
|
client.flush
|
|
|
|
end
|
|
|
|
|
2011-09-18 16:02:34 -04:00
|
|
|
ensure
|
2011-09-24 02:26:13 -04:00
|
|
|
body.close
|
|
|
|
res_body.close if res_body.respond_to? :close
|
2011-10-21 00:44:34 -04:00
|
|
|
|
|
|
|
after_reply.each { |o| o.call }
|
2011-09-18 16:02:34 -04:00
|
|
|
end
|
2011-09-30 11:30:37 -04:00
|
|
|
|
|
|
|
return keep_alive
|
2011-09-18 16:02:34 -04:00
|
|
|
end
|
|
|
|
|
2011-11-22 13:45:58 -05:00
|
|
|
def read_body(env, client, body, cl)
|
|
|
|
content_length = cl.to_i
|
2011-09-24 03:19:22 -04:00
|
|
|
|
|
|
|
remain = content_length - body.size
|
|
|
|
|
|
|
|
return StringIO.new(body) if remain <= 0
|
|
|
|
|
|
|
|
# Use a Tempfile if there is a lot of data left
|
|
|
|
if remain > MAX_BODY
|
|
|
|
stream = Tempfile.new(Const::PUMA_TMP_BASE)
|
|
|
|
stream.binmode
|
|
|
|
else
|
|
|
|
stream = StringIO.new
|
|
|
|
end
|
|
|
|
|
|
|
|
stream.write body
|
|
|
|
|
|
|
|
# Read an odd sized chunk so we can read even sized ones
|
|
|
|
# after this
|
|
|
|
chunk = client.readpartial(remain % CHUNK_SIZE)
|
|
|
|
|
|
|
|
# No chunk means a closed socket
|
|
|
|
unless chunk
|
|
|
|
stream.close
|
|
|
|
return nil
|
|
|
|
end
|
|
|
|
|
|
|
|
remain -= stream.write(chunk)
|
|
|
|
|
|
|
|
# Raed the rest of the chunks
|
|
|
|
while remain > 0
|
|
|
|
chunk = client.readpartial(CHUNK_SIZE)
|
|
|
|
unless chunk
|
|
|
|
stream.close
|
|
|
|
return nil
|
|
|
|
end
|
|
|
|
|
|
|
|
remain -= stream.write(chunk)
|
|
|
|
end
|
|
|
|
|
|
|
|
stream.rewind
|
|
|
|
|
|
|
|
return stream
|
|
|
|
end
|
|
|
|
|
2011-09-24 03:26:17 -04:00
|
|
|
def lowlevel_error(e)
|
|
|
|
[500, {}, ["No application configured"]]
|
|
|
|
end
|
|
|
|
|
2011-09-24 03:19:22 -04:00
|
|
|
# Wait for all outstanding requests to finish.
|
|
|
|
def graceful_shutdown
|
2011-09-27 17:33:17 -04:00
|
|
|
@thread_pool.shutdown if @thread_pool
|
2011-09-24 03:19:22 -04:00
|
|
|
end
|
2011-09-23 01:14:39 -04:00
|
|
|
|
|
|
|
# Stops the acceptor thread and then causes the worker threads to finish
|
|
|
|
# off the request queue before finally exiting.
|
|
|
|
def stop(sync=false)
|
|
|
|
@notify << STOP_COMMAND
|
|
|
|
|
2011-09-27 17:33:17 -04:00
|
|
|
@thread.join if @thread && sync
|
2011-09-23 01:14:39 -04:00
|
|
|
end
|
2011-09-29 16:20:19 -04:00
|
|
|
|
|
|
|
def attempt_bonjour(name)
|
|
|
|
begin
|
|
|
|
require 'dnssd'
|
2011-10-21 19:46:22 -04:00
|
|
|
rescue LoadError
|
2011-09-29 16:20:19 -04:00
|
|
|
return false
|
|
|
|
end
|
|
|
|
|
|
|
|
@bonjour_registered = false
|
|
|
|
announced = false
|
|
|
|
|
|
|
|
@ios.each do |io|
|
|
|
|
if io.kind_of? TCPServer
|
2011-09-30 11:30:37 -04:00
|
|
|
fixed_name = name.gsub(/\./, "-")
|
2011-09-29 16:20:19 -04:00
|
|
|
|
|
|
|
DNSSD.announce io, "puma - #{fixed_name}", "http" do |r|
|
|
|
|
@bonjour_registered = true
|
|
|
|
end
|
|
|
|
|
|
|
|
announced = true
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
return announced
|
|
|
|
end
|
|
|
|
|
|
|
|
def bonjour_registered?
|
|
|
|
@bonjour_registered ||= false
|
|
|
|
end
|
2011-09-18 16:02:34 -04:00
|
|
|
end
|
|
|
|
end
|