2006-01-28 14:03:53 -05:00
|
|
|
require 'socket'
|
|
|
|
require 'http11'
|
|
|
|
require 'thread'
|
2006-01-28 14:34:12 -05:00
|
|
|
require 'stringio'
|
2006-02-20 19:55:39 -05:00
|
|
|
require 'mongrel/cgi'
|
|
|
|
require 'mongrel/handlers'
|
2006-02-28 02:04:41 -05:00
|
|
|
require 'mongrel/command'
|
|
|
|
require 'timeout'
|
2006-01-28 14:03:53 -05:00
|
|
|
|
|
|
|
# Mongrel module containing all of the classes (include C extensions) for running
|
|
|
|
# a Mongrel web server. It contains a minimalist HTTP server with just enough
|
|
|
|
# functionality to service web application requests fast as possible.
|
|
|
|
module Mongrel
|
|
|
|
|
2006-02-28 00:17:23 -05:00
|
|
|
class URIClassifier
|
|
|
|
# Returns the URIs that have been registered with this classifier so far.
|
|
|
|
# The URIs returned should not be modified as this will cause a memory leak.
|
|
|
|
# You can use this to inspect the contents of the URIClassifier.
|
|
|
|
def uris
|
|
|
|
@handler_map.keys
|
|
|
|
end
|
|
|
|
|
|
|
|
# Simply does an inspect that looks like a Hash inspect.
|
|
|
|
def inspect
|
|
|
|
@handler_map.inspect
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
|
2006-02-20 17:39:37 -05:00
|
|
|
# Used to stop the HttpServer via Thread.raise.
|
|
|
|
class StopServer < Exception
|
|
|
|
end
|
|
|
|
|
2006-02-28 02:04:41 -05:00
|
|
|
# Used to timeout worker threads that have taken too long
|
|
|
|
class TimeoutWorker < Exception
|
|
|
|
end
|
|
|
|
|
2006-02-03 00:42:08 -05:00
|
|
|
# Every standard HTTP code mapped to the appropriate message. These are
|
|
|
|
# used so frequently that they are placed directly in Mongrel for easy
|
|
|
|
# access rather than Mongrel::Const.
|
2006-01-28 14:34:12 -05:00
|
|
|
HTTP_STATUS_CODES = {
|
|
|
|
100 => 'Continue',
|
|
|
|
101 => 'Switching Protocols',
|
|
|
|
200 => 'OK',
|
|
|
|
201 => 'Created',
|
|
|
|
202 => 'Accepted',
|
|
|
|
203 => 'Non-Authoritative Information',
|
|
|
|
204 => 'No Content',
|
|
|
|
205 => 'Reset Content',
|
|
|
|
206 => 'Partial Content',
|
|
|
|
300 => 'Multiple Choices',
|
|
|
|
301 => 'Moved Permanently',
|
|
|
|
302 => 'Moved Temporarily',
|
|
|
|
303 => 'See Other',
|
|
|
|
304 => 'Not Modified',
|
|
|
|
305 => 'Use Proxy',
|
|
|
|
400 => 'Bad Request',
|
|
|
|
401 => 'Unauthorized',
|
|
|
|
402 => 'Payment Required',
|
|
|
|
403 => 'Forbidden',
|
|
|
|
404 => 'Not Found',
|
|
|
|
405 => 'Method Not Allowed',
|
|
|
|
406 => 'Not Acceptable',
|
|
|
|
407 => 'Proxy Authentication Required',
|
|
|
|
408 => 'Request Time-out',
|
|
|
|
409 => 'Conflict',
|
|
|
|
410 => 'Gone',
|
|
|
|
411 => 'Length Required',
|
|
|
|
412 => 'Precondition Failed',
|
|
|
|
413 => 'Request Entity Too Large',
|
|
|
|
414 => 'Request-URI Too Large',
|
|
|
|
415 => 'Unsupported Media Type',
|
|
|
|
500 => 'Internal Server Error',
|
|
|
|
501 => 'Not Implemented',
|
|
|
|
502 => 'Bad Gateway',
|
|
|
|
503 => 'Service Unavailable',
|
|
|
|
504 => 'Gateway Time-out',
|
|
|
|
505 => 'HTTP Version not supported'
|
|
|
|
}
|
|
|
|
|
2006-02-09 21:38:18 -05:00
|
|
|
|
|
|
|
|
2006-02-03 00:42:08 -05:00
|
|
|
# Frequently used constants when constructing requests or responses. Many times
|
|
|
|
# the constant just refers to a string with the same contents. Using these constants
|
|
|
|
# gave about a 3% to 10% performance improvement over using the strings directly.
|
|
|
|
# Symbols did not really improve things much compared to constants.
|
|
|
|
#
|
|
|
|
# While Mongrel does try to emulate the CGI/1.2 protocol, it does not use the REMOTE_IDENT,
|
|
|
|
# REMOTE_USER, or REMOTE_HOST parameters since those are either a security problem or
|
|
|
|
# too taxing on performance.
|
|
|
|
module Const
|
|
|
|
# This is the part of the path after the SCRIPT_NAME. URIClassifier will determine this.
|
|
|
|
PATH_INFO="PATH_INFO"
|
|
|
|
# This is the intial part that your handler is identified as by URIClassifier.
|
|
|
|
SCRIPT_NAME="SCRIPT_NAME"
|
|
|
|
# The original URI requested by the client. Passed to URIClassifier to build PATH_INFO and SCRIPT_NAME.
|
|
|
|
REQUEST_URI='REQUEST_URI'
|
|
|
|
|
|
|
|
# Content length (also available as HTTP_CONTENT_LENGTH).
|
|
|
|
CONTENT_LENGTH='CONTENT_LENGTH'
|
|
|
|
|
|
|
|
# Content length (also available as CONTENT_LENGTH).
|
|
|
|
HTTP_CONTENT_LENGTH='HTTP_CONTENT_LENGTH'
|
|
|
|
|
|
|
|
# Content type (also available as HTTP_CONTENT_TYPE).
|
|
|
|
CONTENT_TYPE='CONTENT_TYPE'
|
|
|
|
|
|
|
|
# Content type (also available as CONTENT_TYPE).
|
|
|
|
HTTP_CONTENT_TYPE='HTTP_CONTENT_TYPE'
|
|
|
|
|
|
|
|
# Gateway interface key in the HttpRequest parameters.
|
|
|
|
GATEWAY_INTERFACE='GATEWAY_INTERFACE'
|
|
|
|
# We claim to support CGI/1.2.
|
|
|
|
GATEWAY_INTERFACE_VALUE='CGI/1.2'
|
|
|
|
|
|
|
|
# Hosts remote IP address. Mongrel does not do DNS resolves since that slows
|
|
|
|
# processing down considerably.
|
|
|
|
REMOTE_ADDR='REMOTE_ADDR'
|
|
|
|
|
|
|
|
# This is not given since Mongrel does not do DNS resolves. It is only here for
|
|
|
|
# completeness for the CGI standard.
|
|
|
|
REMOTE_HOST='REMOTE_HOST'
|
|
|
|
|
|
|
|
# The name/host of our server as given by the HttpServer.new(host,port) call.
|
|
|
|
SERVER_NAME='SERVER_NAME'
|
|
|
|
|
|
|
|
# The port of our server as given by the HttpServer.new(host,port) call.
|
|
|
|
SERVER_PORT='SERVER_PORT'
|
|
|
|
|
2006-02-20 01:40:48 -05:00
|
|
|
# SERVER_NAME and SERVER_PORT come from this.
|
|
|
|
HTTP_HOST='HTTP_HOST'
|
|
|
|
|
2006-02-03 00:42:08 -05:00
|
|
|
# Official server protocol key in the HttpRequest parameters.
|
|
|
|
SERVER_PROTOCOL='SERVER_PROTOCOL'
|
|
|
|
# Mongrel claims to support HTTP/1.1.
|
|
|
|
SERVER_PROTOCOL_VALUE='HTTP/1.1'
|
|
|
|
|
|
|
|
# The actual server software being used (it's Mongrel man).
|
|
|
|
SERVER_SOFTWARE='SERVER_SOFTWARE'
|
|
|
|
|
|
|
|
# Current Mongrel version (used for SERVER_SOFTWARE and other response headers).
|
2006-03-07 19:52:20 -05:00
|
|
|
MONGREL_VERSION='Mongrel 0.3.10'
|
2006-02-03 00:42:08 -05:00
|
|
|
|
|
|
|
# The standard empty 404 response for bad requests. Use Error4040Handler for custom stuff.
|
|
|
|
ERROR_404_RESPONSE="HTTP/1.1 404 Not Found\r\nConnection: close\r\nServer: #{MONGREL_VERSION}\r\n\r\nNOT FOUND"
|
|
|
|
|
|
|
|
# A common header for indicating the server is too busy. Not used yet.
|
|
|
|
ERROR_503_RESPONSE="HTTP/1.1 503 Service Unavailable\r\n\r\nBUSY"
|
|
|
|
|
|
|
|
# The basic max request size we'll try to read.
|
|
|
|
CHUNK_SIZE=(16 * 1024)
|
|
|
|
|
|
|
|
end
|
|
|
|
|
|
|
|
|
2006-01-28 14:03:53 -05:00
|
|
|
# When a handler is found for a registered URI then this class is constructed
|
|
|
|
# and passed to your HttpHandler::process method. You should assume that
|
|
|
|
# *one* handler processes all requests. Included in the HttpReqeust is a
|
|
|
|
# HttpRequest.params Hash that matches common CGI params, and a HttpRequest.body
|
|
|
|
# which is a string containing the request body (raw for now).
|
|
|
|
#
|
2006-02-03 00:42:08 -05:00
|
|
|
# Mongrel really only supports small-ish request bodies right now since really
|
2006-01-28 14:03:53 -05:00
|
|
|
# huge ones have to be completely read off the wire and put into a string.
|
|
|
|
# Later there will be several options for efficiently handling large file
|
|
|
|
# uploads.
|
|
|
|
class HttpRequest
|
|
|
|
attr_reader :body, :params
|
2006-02-03 00:42:08 -05:00
|
|
|
|
2006-01-28 14:03:53 -05:00
|
|
|
# You don't really call this. It's made for you.
|
|
|
|
# Main thing it does is hook up the params, and store any remaining
|
|
|
|
# body data into the HttpRequest.body attribute.
|
|
|
|
def initialize(params, initial_body, socket)
|
|
|
|
@body = initial_body || ""
|
|
|
|
@params = params
|
|
|
|
@socket = socket
|
2006-02-03 00:42:08 -05:00
|
|
|
|
2006-01-28 14:03:53 -05:00
|
|
|
# fix up the CGI requirements
|
2006-02-03 00:42:08 -05:00
|
|
|
params[Const::CONTENT_LENGTH] = params[Const::HTTP_CONTENT_LENGTH] || 0
|
2006-02-20 01:40:48 -05:00
|
|
|
params[Const::CONTENT_TYPE] = params[Const::HTTP_CONTENT_TYPE] if params[Const::HTTP_CONTENT_TYPE]
|
2006-02-20 19:55:39 -05:00
|
|
|
params[Const::GATEWAY_INTERFACE]=Const::GATEWAY_INTERFACE_VALUE
|
|
|
|
params[Const::REMOTE_ADDR]=socket.peeraddr[3]
|
|
|
|
host,port = params[Const::HTTP_HOST].split(":")
|
|
|
|
params[Const::SERVER_NAME]=host
|
2006-03-03 01:02:33 -05:00
|
|
|
params[Const::SERVER_PORT]=port || 80
|
2006-02-20 19:55:39 -05:00
|
|
|
params[Const::SERVER_PROTOCOL]=Const::SERVER_PROTOCOL_VALUE
|
|
|
|
params[Const::SERVER_SOFTWARE]=Const::MONGREL_VERSION
|
|
|
|
|
2006-01-28 14:03:53 -05:00
|
|
|
|
|
|
|
# now, if the initial_body isn't long enough for the content length we have to fill it
|
|
|
|
# TODO: adapt for big ass stuff by writing to a temp file
|
2006-02-03 00:42:08 -05:00
|
|
|
clen = params[Const::HTTP_CONTENT_LENGTH].to_i
|
2006-01-28 14:03:53 -05:00
|
|
|
if @body.length < clen
|
|
|
|
@body << @socket.read(clen - @body.length)
|
|
|
|
end
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2006-01-28 14:34:12 -05:00
|
|
|
|
2006-02-03 00:42:08 -05:00
|
|
|
# This class implements a simple way of constructing the HTTP headers dynamically
|
|
|
|
# via a Hash syntax. Think of it as a write-only Hash. Refer to HttpResponse for
|
|
|
|
# information on how this is used.
|
|
|
|
#
|
|
|
|
# One consequence of this write-only nature is that you can write multiple headers
|
|
|
|
# by just doing them twice (which is sometimes needed in HTTP), but that the normal
|
|
|
|
# semantics for Hash (where doing an insert replaces) is not there.
|
2006-01-28 14:34:12 -05:00
|
|
|
class HeaderOut
|
|
|
|
attr_reader :out
|
|
|
|
|
|
|
|
def initialize(out)
|
|
|
|
@out = out
|
|
|
|
end
|
|
|
|
|
2006-02-03 00:42:08 -05:00
|
|
|
# Simply writes "#{key}: #{value}" to an output buffer.
|
2006-01-28 14:34:12 -05:00
|
|
|
def[]=(key,value)
|
|
|
|
@out.write(key)
|
|
|
|
@out.write(": ")
|
|
|
|
@out.write(value)
|
|
|
|
@out.write("\r\n")
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2006-02-03 00:42:08 -05:00
|
|
|
# Writes and controls your response to the client using the HTTP/1.1 specification.
|
|
|
|
# You use it by simply doing:
|
|
|
|
#
|
|
|
|
# response.start(200) do |head,out|
|
|
|
|
# head['Content-Type'] = 'text/plain'
|
|
|
|
# out.write("hello\n")
|
|
|
|
# end
|
|
|
|
#
|
|
|
|
# The parameter to start is the response code--which Mongrel will translate for you
|
|
|
|
# based on HTTP_STATUS_CODES. The head parameter is how you write custom headers.
|
|
|
|
# The out parameter is where you write your body. The default status code for
|
|
|
|
# HttpResponse.start is 200 so the above example is redundant.
|
|
|
|
#
|
|
|
|
# As you can see, it's just like using a Hash and as you do this it writes the proper
|
|
|
|
# header to the output on the fly. You can even intermix specifying headers and
|
|
|
|
# writing content. The HttpResponse class with write the things in the proper order
|
|
|
|
# once the HttpResponse.block is ended.
|
|
|
|
#
|
|
|
|
# You may also work the HttpResponse object directly using the various attributes available
|
|
|
|
# for the raw socket, body, header, and status codes. If you do this you're on your own.
|
|
|
|
# A design decision was made to force the client to not pipeline requests. HTTP/1.1
|
|
|
|
# pipelining really kills the performance due to how it has to be handled and how
|
|
|
|
# unclear the standard is. To fix this the HttpResponse gives a "Connection: close"
|
|
|
|
# header which forces the client to close right away. The bonus for this is that it
|
|
|
|
# gives a pretty nice speed boost to most clients since they can close their connection
|
|
|
|
# immediately.
|
|
|
|
#
|
|
|
|
# One additional caveat is that you don't have to specify the Content-length header
|
|
|
|
# as the HttpResponse will write this for you based on the out length.
|
2006-01-28 14:03:53 -05:00
|
|
|
class HttpResponse
|
|
|
|
attr_reader :socket
|
2006-01-28 15:27:34 -05:00
|
|
|
attr_reader :body
|
2006-01-28 14:34:12 -05:00
|
|
|
attr_reader :header
|
|
|
|
attr_reader :status
|
|
|
|
attr_writer :status
|
|
|
|
|
2006-01-28 14:03:53 -05:00
|
|
|
def initialize(socket)
|
|
|
|
@socket = socket
|
2006-01-28 15:27:34 -05:00
|
|
|
@body = StringIO.new
|
2006-01-28 14:34:12 -05:00
|
|
|
@status = 404
|
|
|
|
@header = HeaderOut.new(StringIO.new)
|
|
|
|
end
|
|
|
|
|
2006-02-03 00:42:08 -05:00
|
|
|
# Receives a block passing it the header and body for you to work with.
|
|
|
|
# When the block is finished it writes everything you've done to
|
|
|
|
# the socket in the proper order. This lets you intermix header and
|
|
|
|
# body content as needed.
|
2006-01-28 14:34:12 -05:00
|
|
|
def start(status=200)
|
2006-02-12 14:31:28 -05:00
|
|
|
@status = status.to_i
|
2006-01-28 15:27:34 -05:00
|
|
|
yield @header, @body
|
2006-01-28 14:34:12 -05:00
|
|
|
finished
|
|
|
|
end
|
2006-02-03 00:42:08 -05:00
|
|
|
|
|
|
|
# Primarily used in exception handling to reset the response output in order to write
|
|
|
|
# an alternative response.
|
|
|
|
def reset
|
|
|
|
@header.out.rewind
|
|
|
|
@body.rewind
|
|
|
|
end
|
|
|
|
|
2006-02-08 07:48:41 -05:00
|
|
|
def send_status
|
2006-02-16 01:41:47 -05:00
|
|
|
status = "HTTP/1.1 #{@status} #{HTTP_STATUS_CODES[@status]}\r\nContent-Length: #{@body.length}\r\nConnection: close\r\n"
|
|
|
|
@socket.write(status)
|
2006-02-08 07:48:41 -05:00
|
|
|
end
|
|
|
|
|
|
|
|
def send_header
|
2006-01-28 14:34:12 -05:00
|
|
|
@header.out.rewind
|
2006-02-08 07:48:41 -05:00
|
|
|
@socket.write(@header.out.read)
|
|
|
|
@socket.write("\r\n")
|
|
|
|
end
|
|
|
|
|
|
|
|
def send_body
|
2006-01-28 15:27:34 -05:00
|
|
|
@body.rewind
|
2006-01-30 01:25:20 -05:00
|
|
|
# connection: close is also added to ensure that the client does not pipeline.
|
2006-01-28 15:27:34 -05:00
|
|
|
@socket.write(@body.read)
|
2006-02-12 14:31:28 -05:00
|
|
|
end
|
2006-02-08 07:48:41 -05:00
|
|
|
|
|
|
|
# This takes whatever has been done to header and body and then writes it in the
|
|
|
|
# proper format to make an HTTP/1.1 response.
|
|
|
|
def finished
|
|
|
|
send_status
|
|
|
|
send_header
|
|
|
|
send_body
|
2006-01-28 14:03:53 -05:00
|
|
|
end
|
|
|
|
end
|
|
|
|
|
2006-01-28 14:34:12 -05:00
|
|
|
|
2006-01-28 14:03:53 -05:00
|
|
|
# This is the main driver of Mongrel, while the Mognrel::HttpParser and Mongrel::URIClassifier
|
|
|
|
# make up the majority of how the server functions. It's a very simple class that just
|
|
|
|
# has a thread accepting connections and a simple HttpServer.process_client function
|
|
|
|
# to do the heavy lifting with the IO and Ruby.
|
|
|
|
#
|
|
|
|
# You use it by doing the following:
|
|
|
|
#
|
|
|
|
# server = HttpServer.new("0.0.0.0", 3000)
|
|
|
|
# server.register("/stuff", MyNifterHandler.new)
|
|
|
|
# server.run.join
|
|
|
|
#
|
|
|
|
# The last line can be just server.run if you don't want to join the thread used.
|
|
|
|
# If you don't though Ruby will mysteriously just exit on you.
|
2006-01-30 01:25:20 -05:00
|
|
|
#
|
|
|
|
# Ruby's thread implementation is "interesting" to say the least. Experiments with
|
|
|
|
# *many* different types of IO processing simply cannot make a dent in it. Future
|
|
|
|
# releases of Mongrel will find other creative ways to make threads faster, but don't
|
|
|
|
# hold your breath until Ruby 1.9 is actually finally useful.
|
2006-01-28 14:03:53 -05:00
|
|
|
class HttpServer
|
|
|
|
attr_reader :acceptor
|
|
|
|
|
|
|
|
# Creates a working server on host:port (strange things happen if port isn't a Number).
|
|
|
|
# Use HttpServer::run to start the server.
|
2006-01-30 01:25:20 -05:00
|
|
|
#
|
|
|
|
# The num_processors variable has varying affects on how requests are processed. You'd
|
|
|
|
# think adding more processing threads (processors) would make the server faster, but
|
|
|
|
# that's just not true. There's actually an effect of how Ruby does threads such that
|
|
|
|
# the more processors waiting on the request queue, the slower the system is to handle
|
|
|
|
# each request. But, the lower the number of processors the fewer concurrent responses
|
|
|
|
# the server can make.
|
|
|
|
#
|
|
|
|
# 20 is the default number of processors and is based on experimentation on a few
|
|
|
|
# systems. If you find that you overload Mongrel too much
|
|
|
|
# try changing it higher. If you find that responses are way too slow
|
|
|
|
# try lowering it (after you've tuned your stuff of course).
|
2006-02-14 07:19:02 -05:00
|
|
|
def initialize(host, port, num_processors=20, timeout=120)
|
2006-03-02 21:48:46 -05:00
|
|
|
@socket = TCPServer.new(host, port)
|
2006-02-03 00:42:08 -05:00
|
|
|
|
2006-01-28 14:03:53 -05:00
|
|
|
@classifier = URIClassifier.new
|
2006-01-30 01:25:20 -05:00
|
|
|
@req_queue = Queue.new
|
2006-02-03 00:42:08 -05:00
|
|
|
@host = host
|
|
|
|
@port = port
|
2006-02-20 17:39:37 -05:00
|
|
|
@processors = []
|
2006-02-03 00:42:08 -05:00
|
|
|
|
2006-02-28 02:04:41 -05:00
|
|
|
# create the worker threads
|
|
|
|
num_processors.times do |i|
|
2006-02-20 17:39:37 -05:00
|
|
|
@processors << Thread.new do
|
2006-01-30 01:25:20 -05:00
|
|
|
while client = @req_queue.deq
|
2006-02-28 02:04:41 -05:00
|
|
|
Timeout::timeout(timeout) do
|
|
|
|
process_client(client)
|
|
|
|
end
|
2006-01-30 01:25:20 -05:00
|
|
|
end
|
|
|
|
end
|
2006-02-28 02:04:41 -05:00
|
|
|
end
|
|
|
|
|
2006-01-28 14:03:53 -05:00
|
|
|
end
|
|
|
|
|
2006-01-30 01:25:20 -05:00
|
|
|
|
|
|
|
# Does the majority of the IO processing. It has been written in Ruby using
|
|
|
|
# about 7 different IO processing strategies and no matter how it's done
|
2006-02-03 00:42:08 -05:00
|
|
|
# the performance just does not improve. It is currently carefully constructed
|
|
|
|
# to make sure that it gets the best possible performance, but anyone who
|
|
|
|
# thinks they can make it faster is more than welcome to take a crack at it.
|
2006-01-28 14:03:53 -05:00
|
|
|
def process_client(client)
|
|
|
|
begin
|
|
|
|
parser = HttpParser.new
|
|
|
|
params = {}
|
2006-02-03 00:42:08 -05:00
|
|
|
data = client.readpartial(Const::CHUNK_SIZE)
|
2006-01-30 01:25:20 -05:00
|
|
|
|
2006-01-28 14:03:53 -05:00
|
|
|
while true
|
|
|
|
nread = parser.execute(params, data)
|
2006-01-30 01:25:20 -05:00
|
|
|
if parser.finished?
|
2006-02-03 00:42:08 -05:00
|
|
|
script_name, path_info, handler = @classifier.resolve(params[Const::REQUEST_URI])
|
2006-01-30 01:25:20 -05:00
|
|
|
|
2006-01-28 14:03:53 -05:00
|
|
|
if handler
|
2006-02-03 00:42:08 -05:00
|
|
|
params[Const::PATH_INFO] = path_info
|
|
|
|
params[Const::SCRIPT_NAME] = script_name
|
2006-01-28 14:03:53 -05:00
|
|
|
request = HttpRequest.new(params, data[nread ... data.length], client)
|
|
|
|
response = HttpResponse.new(client)
|
|
|
|
handler.process(request, response)
|
|
|
|
else
|
2006-02-03 00:42:08 -05:00
|
|
|
client.write(Const::ERROR_404_RESPONSE)
|
2006-01-28 14:03:53 -05:00
|
|
|
end
|
2006-02-20 01:40:48 -05:00
|
|
|
|
|
|
|
break #done
|
2006-01-28 14:03:53 -05:00
|
|
|
else
|
|
|
|
# gotta stream and read again until we can get the parser to be character safe
|
|
|
|
# TODO: make this more efficient since this means we're parsing a lot repeatedly
|
|
|
|
parser.reset
|
2006-02-03 00:42:08 -05:00
|
|
|
data << client.readpartial(Const::CHUNK_SIZE)
|
2006-01-28 14:03:53 -05:00
|
|
|
end
|
|
|
|
end
|
2006-01-30 01:25:20 -05:00
|
|
|
rescue EOFError
|
|
|
|
# ignored
|
|
|
|
rescue Errno::ECONNRESET
|
|
|
|
# ignored
|
|
|
|
rescue Errno::EPIPE
|
|
|
|
# ignored
|
2006-01-28 14:03:53 -05:00
|
|
|
rescue => details
|
2006-01-30 01:25:20 -05:00
|
|
|
STDERR.puts "ERROR(#{details.class}): #{details}"
|
2006-01-28 14:03:53 -05:00
|
|
|
STDERR.puts details.backtrace.join("\n")
|
|
|
|
ensure
|
|
|
|
client.close
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# Runs the thing. It returns the thread used so you can "join" it. You can also
|
|
|
|
# access the HttpServer::acceptor attribute to get the thread later.
|
|
|
|
def run
|
2006-02-03 00:42:08 -05:00
|
|
|
BasicSocket.do_not_reverse_lookup=true
|
2006-01-28 14:03:53 -05:00
|
|
|
@acceptor = Thread.new do
|
2006-02-20 17:39:37 -05:00
|
|
|
Thread.current[:stopped] = false
|
|
|
|
|
|
|
|
while not Thread.current[:stopped]
|
|
|
|
begin
|
|
|
|
@req_queue << @socket.accept
|
|
|
|
rescue StopServer
|
|
|
|
STDERR.puts "Server stopped. Exiting."
|
|
|
|
@socket.close if not @socket.closed?
|
|
|
|
break
|
|
|
|
rescue Errno::EMFILE
|
|
|
|
STDERR.puts "Too many open files. Try increasing ulimits."
|
|
|
|
sleep 0.5
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
# now that processing is done we feed enough false onto the request queue to get
|
|
|
|
# each processor to exit and stop processing.
|
|
|
|
@processors.length.times { @req_queue << false }
|
|
|
|
|
|
|
|
# finally we wait until the queue is empty
|
|
|
|
while @req_queue.length > 0
|
|
|
|
STDERR.puts "Shutdown waiting for #{@req_queue.length} requests" if @req_queue.length > 0
|
|
|
|
sleep 1
|
2006-01-28 14:03:53 -05:00
|
|
|
end
|
|
|
|
end
|
2006-02-20 17:39:37 -05:00
|
|
|
|
|
|
|
@acceptor.priority = 1
|
2006-03-04 12:55:39 -05:00
|
|
|
|
|
|
|
return @acceptor
|
2006-01-28 14:03:53 -05:00
|
|
|
end
|
|
|
|
|
|
|
|
|
|
|
|
# Simply registers a handler with the internal URIClassifier. When the URI is
|
|
|
|
# found in the prefix of a request then your handler's HttpHandler::process method
|
|
|
|
# is called. See Mongrel::URIClassifier#register for more information.
|
|
|
|
def register(uri, handler)
|
|
|
|
@classifier.register(uri, handler)
|
|
|
|
end
|
|
|
|
|
|
|
|
# Removes any handler registered at the given URI. See Mongrel::URIClassifier#unregister
|
|
|
|
# for more information.
|
|
|
|
def unregister(uri)
|
|
|
|
@classifier.unregister(uri)
|
|
|
|
end
|
2006-02-20 17:39:37 -05:00
|
|
|
|
|
|
|
# Stops the acceptor thread and then causes the worker threads to finish
|
|
|
|
# off the request queue before finally exiting.
|
|
|
|
def stop
|
2006-03-02 21:48:46 -05:00
|
|
|
stopper = Thread.new do
|
|
|
|
@acceptor[:stopped] = true
|
|
|
|
exc = StopServer.new
|
|
|
|
@acceptor.raise(exc)
|
|
|
|
end
|
|
|
|
stopper.priority = 10
|
2006-02-20 17:39:37 -05:00
|
|
|
end
|
|
|
|
|
2006-01-28 14:03:53 -05:00
|
|
|
end
|
2006-02-03 00:42:08 -05:00
|
|
|
|
2006-02-20 19:55:39 -05:00
|
|
|
end
|
2006-02-03 00:42:08 -05:00
|
|
|
|
2006-02-16 01:41:47 -05:00
|
|
|
|