2009-12-17 22:13:29 -05:00
|
|
|
module CoffeeScript
|
|
|
|
|
2009-12-17 22:54:24 -05:00
|
|
|
# The lexer reads a stream of CoffeeScript and divvys it up into tagged
|
|
|
|
# tokens. A minor bit of the ambiguity in the grammar has been avoided by
|
|
|
|
# pushing some extra smarts into the Lexer.
|
2009-12-17 22:13:29 -05:00
|
|
|
class Lexer
|
|
|
|
|
2009-12-17 22:54:24 -05:00
|
|
|
# The list of keywords passed verbatim to the parser.
|
2009-12-17 22:13:29 -05:00
|
|
|
KEYWORDS = ["if", "else", "then", "unless",
|
2009-12-23 20:24:55 -05:00
|
|
|
"true", "false", "yes", "no", "on", "off",
|
2009-12-24 20:21:20 -05:00
|
|
|
"and", "or", "is", "isnt", "not",
|
2009-12-17 22:13:29 -05:00
|
|
|
"new", "return",
|
|
|
|
"try", "catch", "finally", "throw",
|
|
|
|
"break", "continue",
|
2010-01-11 00:01:16 -05:00
|
|
|
"for", "in", "of", "by", "where", "while",
|
2010-01-16 11:24:10 -05:00
|
|
|
"delete", "instanceof", "typeof",
|
2009-12-24 04:33:59 -05:00
|
|
|
"switch", "when",
|
2009-12-24 19:49:23 -05:00
|
|
|
"super", "extends",
|
2010-01-23 21:11:27 -05:00
|
|
|
"arguments"]
|
2009-12-17 22:13:29 -05:00
|
|
|
|
2009-12-17 22:54:24 -05:00
|
|
|
# Token matching regexes.
|
2010-01-10 15:52:23 -05:00
|
|
|
IDENTIFIER = /\A([a-zA-Z$_](\w|\$)*)/
|
2010-01-01 10:15:22 -05:00
|
|
|
NUMBER = /\A(\b((0(x|X)[0-9a-fA-F]+)|([0-9]+(\.[0-9]+)?(e[+\-]?[0-9]+)?)))\b/i
|
2010-01-06 23:15:13 -05:00
|
|
|
STRING = /\A(""|''|"(.*?)([^\\]|\\\\)"|'(.*?)([^\\]|\\\\)')/m
|
2010-01-19 09:49:23 -05:00
|
|
|
HEREDOC = /\A("{6}|'{6}|"{3}\n?(.*?)\n?([ \t]*)"{3}|'{3}\n?(.*?)\n?([ \t]*)'{3})/m
|
2010-01-06 23:15:13 -05:00
|
|
|
JS = /\A(``|`(.*?)([^\\]|\\\\)`)/m
|
2010-01-16 22:17:55 -05:00
|
|
|
OPERATOR = /\A([+\*&|\/\-%=<>:!?]+)/
|
2009-12-29 23:01:08 -05:00
|
|
|
WHITESPACE = /\A([ \t]+)/
|
2009-12-30 21:11:54 -05:00
|
|
|
COMMENT = /\A(((\n?[ \t]*)?#.*$)+)/
|
2010-01-13 20:59:57 -05:00
|
|
|
CODE = /\A(=?=>)/
|
2010-01-06 23:15:13 -05:00
|
|
|
REGEX = /\A(\/(.*?)([^\\]|\\\\)\/[imgy]{0,4})/
|
2010-01-09 12:12:38 -05:00
|
|
|
MULTI_DENT = /\A((\n([ \t]*))+)(\.)?/
|
2009-12-30 21:11:54 -05:00
|
|
|
LAST_DENT = /\n([ \t]*)/
|
2009-12-31 13:26:38 -05:00
|
|
|
ASSIGNMENT = /\A(:|=)\Z/
|
2009-12-17 22:13:29 -05:00
|
|
|
|
2009-12-17 22:54:24 -05:00
|
|
|
# Token cleaning regexes.
|
2010-01-13 23:24:45 -05:00
|
|
|
JS_CLEANER = /(\A`|`\Z)/
|
|
|
|
MULTILINER = /\n/
|
2010-01-19 09:49:23 -05:00
|
|
|
STRING_NEWLINES = /\n[ \t]*/
|
|
|
|
COMMENT_CLEANER = /(^[ \t]*#|\n[ \t]*$)/
|
2010-01-13 23:24:45 -05:00
|
|
|
NO_NEWLINE = /\A([+\*&|\/\-%=<>:!.\\][<>=&|]*|and|or|is|isnt|not|delete|typeof|instanceof)\Z/
|
2010-01-19 09:49:23 -05:00
|
|
|
HEREDOC_INDENT = /^[ \t]+/
|
2009-12-17 22:13:29 -05:00
|
|
|
|
2010-01-01 12:08:36 -05:00
|
|
|
# Tokens which a regular expression will never immediately follow, but which
|
|
|
|
# a division operator might.
|
2010-01-01 09:49:18 -05:00
|
|
|
# See: http://www.mozilla.org/js/language/js20-2002-04/rationale/syntax.html#regular-expressions
|
2010-01-01 12:08:36 -05:00
|
|
|
NOT_REGEX = [
|
|
|
|
:IDENTIFIER, :NUMBER, :REGEX, :STRING,
|
|
|
|
')', '++', '--', ']', '}',
|
2010-01-23 21:11:27 -05:00
|
|
|
:FALSE, :NULL, :TRUE
|
2010-01-01 12:08:36 -05:00
|
|
|
]
|
2010-01-01 09:49:18 -05:00
|
|
|
|
2009-12-17 22:54:24 -05:00
|
|
|
# Scan by attempting to match tokens one character at a time. Slow and steady.
|
2009-12-17 22:13:29 -05:00
|
|
|
def tokenize(code)
|
2010-01-12 23:06:12 -05:00
|
|
|
@code = code.chomp # Cleanup code by remove extra line breaks
|
|
|
|
@i = 0 # Current character position we're parsing
|
|
|
|
@line = 1 # The current line.
|
|
|
|
@indent = 0 # The current indent level.
|
|
|
|
@indents = [] # The stack of all indent levels we are currently within.
|
|
|
|
@tokens = [] # Collection of all parsed tokens in the form [:TOKEN_TYPE, value]
|
2009-12-17 22:13:29 -05:00
|
|
|
while @i < @code.length
|
|
|
|
@chunk = @code[@i..-1]
|
|
|
|
extract_next_token
|
|
|
|
end
|
2009-12-30 19:26:37 -05:00
|
|
|
puts "original stream: #{@tokens.inspect}" if ENV['VERBOSE']
|
2009-12-26 16:22:53 -05:00
|
|
|
close_indentation
|
2009-12-31 13:43:24 -05:00
|
|
|
Rewriter.new.rewrite(@tokens)
|
2009-12-13 17:07:16 -05:00
|
|
|
end
|
|
|
|
|
2010-01-01 12:11:35 -05:00
|
|
|
# At every position, run through this list of attempted matches,
|
|
|
|
# short-circuiting if any of them succeed.
|
2009-12-17 22:13:29 -05:00
|
|
|
def extract_next_token
|
|
|
|
return if identifier_token
|
|
|
|
return if number_token
|
2010-01-11 23:53:50 -05:00
|
|
|
return if heredoc_token
|
2009-12-17 22:13:29 -05:00
|
|
|
return if string_token
|
|
|
|
return if js_token
|
|
|
|
return if regex_token
|
2009-12-24 16:48:46 -05:00
|
|
|
return if indent_token
|
2009-12-30 21:20:30 -05:00
|
|
|
return if comment_token
|
2009-12-17 22:13:29 -05:00
|
|
|
return if whitespace_token
|
|
|
|
return literal_token
|
|
|
|
end
|
2009-12-13 17:07:16 -05:00
|
|
|
|
2010-01-01 12:11:35 -05:00
|
|
|
# Tokenizers ==========================================================
|
|
|
|
|
2009-12-17 22:54:24 -05:00
|
|
|
# Matches identifying literals: variables, keywords, method names, etc.
|
2009-12-17 22:13:29 -05:00
|
|
|
def identifier_token
|
|
|
|
return false unless identifier = @chunk[IDENTIFIER, 1]
|
2010-01-01 12:11:35 -05:00
|
|
|
# Keywords are special identifiers tagged with their own name,
|
|
|
|
# 'if' will result in an [:IF, "if"] token.
|
2009-12-17 22:13:29 -05:00
|
|
|
tag = KEYWORDS.include?(identifier) ? identifier.upcase.to_sym : :IDENTIFIER
|
2010-01-03 22:25:38 -05:00
|
|
|
tag = :LEADING_WHEN if tag == :WHEN && [:OUTDENT, :INDENT, "\n"].include?(last_tag)
|
2010-01-10 16:16:59 -05:00
|
|
|
@tokens[-1][0] = :PROTOTYPE_ACCESS if tag == :IDENTIFIER && last_value == '::'
|
2010-01-23 23:30:55 -05:00
|
|
|
if tag == :IDENTIFIER && last_value == '.' && !(@tokens[-2] && @tokens[-2][1] == '.')
|
|
|
|
if @tokens[-2][0] == "?"
|
|
|
|
@tokens[-1][0] = :SOAK_ACCESS
|
|
|
|
@tokens.delete_at(-2)
|
|
|
|
else
|
|
|
|
@tokens[-1][0] = :PROPERTY_ACCESS
|
|
|
|
end
|
|
|
|
end
|
2009-12-17 22:13:29 -05:00
|
|
|
token(tag, identifier)
|
|
|
|
@i += identifier.length
|
|
|
|
end
|
2009-12-13 17:07:16 -05:00
|
|
|
|
2009-12-17 22:54:24 -05:00
|
|
|
# Matches numbers, including decimals, hex, and exponential notation.
|
2009-12-17 22:13:29 -05:00
|
|
|
def number_token
|
|
|
|
return false unless number = @chunk[NUMBER, 1]
|
|
|
|
token(:NUMBER, number)
|
|
|
|
@i += number.length
|
|
|
|
end
|
2009-12-13 17:07:16 -05:00
|
|
|
|
2009-12-17 22:54:24 -05:00
|
|
|
# Matches strings, including multi-line strings.
|
2009-12-17 22:13:29 -05:00
|
|
|
def string_token
|
|
|
|
return false unless string = @chunk[STRING, 1]
|
2010-01-13 23:24:45 -05:00
|
|
|
escaped = string.gsub(STRING_NEWLINES, " \\\n")
|
2009-12-17 22:13:29 -05:00
|
|
|
token(:STRING, escaped)
|
2010-01-11 23:53:50 -05:00
|
|
|
@line += string.count("\n")
|
2009-12-17 22:13:29 -05:00
|
|
|
@i += string.length
|
2009-12-17 09:29:49 -05:00
|
|
|
end
|
2009-12-13 17:07:16 -05:00
|
|
|
|
2010-01-11 23:53:50 -05:00
|
|
|
# Matches heredocs, adjusting indentation to the correct level.
|
|
|
|
def heredoc_token
|
|
|
|
return false unless match = @chunk.match(HEREDOC)
|
2010-01-12 23:06:12 -05:00
|
|
|
doc = match[2] || match[4]
|
|
|
|
indent = doc.scan(HEREDOC_INDENT).min
|
|
|
|
doc.gsub!(/^#{indent}/, "")
|
|
|
|
doc.gsub!("\n", "\\n")
|
2010-01-11 23:53:50 -05:00
|
|
|
doc.gsub!('"', '\\"')
|
|
|
|
token(:STRING, "\"#{doc}\"")
|
|
|
|
@line += match[1].count("\n")
|
|
|
|
@i += match[1].length
|
|
|
|
end
|
|
|
|
|
2009-12-17 22:54:24 -05:00
|
|
|
# Matches interpolated JavaScript.
|
2009-12-17 22:13:29 -05:00
|
|
|
def js_token
|
|
|
|
return false unless script = @chunk[JS, 1]
|
|
|
|
token(:JS, script.gsub(JS_CLEANER, ''))
|
|
|
|
@i += script.length
|
|
|
|
end
|
2009-12-15 09:11:27 -05:00
|
|
|
|
2009-12-17 22:54:24 -05:00
|
|
|
# Matches regular expression literals.
|
2009-12-17 22:13:29 -05:00
|
|
|
def regex_token
|
|
|
|
return false unless regex = @chunk[REGEX, 1]
|
2010-01-01 09:49:18 -05:00
|
|
|
return false if NOT_REGEX.include?(last_tag)
|
2009-12-17 22:13:29 -05:00
|
|
|
token(:REGEX, regex)
|
|
|
|
@i += regex.length
|
|
|
|
end
|
2009-12-13 18:37:29 -05:00
|
|
|
|
2009-12-17 22:54:24 -05:00
|
|
|
# Matches and consumes comments.
|
2009-12-22 11:27:19 -05:00
|
|
|
def comment_token
|
2009-12-17 22:13:29 -05:00
|
|
|
return false unless comment = @chunk[COMMENT, 1]
|
2009-12-23 19:42:18 -05:00
|
|
|
@line += comment.scan(MULTILINER).length
|
2009-12-22 11:27:19 -05:00
|
|
|
token(:COMMENT, comment.gsub(COMMENT_CLEANER, '').split(MULTILINER))
|
|
|
|
token("\n", "\n")
|
2009-12-17 22:13:29 -05:00
|
|
|
@i += comment.length
|
|
|
|
end
|
2009-12-13 17:07:16 -05:00
|
|
|
|
2009-12-24 17:51:53 -05:00
|
|
|
# Record tokens for indentation differing from the previous line.
|
2009-12-24 16:48:46 -05:00
|
|
|
def indent_token
|
2009-12-29 23:01:08 -05:00
|
|
|
return false unless indent = @chunk[MULTI_DENT, 1]
|
|
|
|
@line += indent.scan(MULTILINER).size
|
|
|
|
@i += indent.size
|
2010-01-09 12:12:38 -05:00
|
|
|
next_character = @chunk[MULTI_DENT, 4]
|
2010-01-24 23:40:45 -05:00
|
|
|
no_newlines = next_character == '.' || (last_value.to_s.match(NO_NEWLINE) && @tokens[-2][0] != '.' && !last_value.match(CODE))
|
2010-01-09 12:12:38 -05:00
|
|
|
return suppress_newlines(indent) if no_newlines
|
2009-12-30 21:11:54 -05:00
|
|
|
size = indent.scan(LAST_DENT).last.last.length
|
2009-12-26 16:22:53 -05:00
|
|
|
return newline_token(indent) if size == @indent
|
2009-12-24 16:48:46 -05:00
|
|
|
if size > @indent
|
2009-12-26 13:49:11 -05:00
|
|
|
token(:INDENT, size - @indent)
|
2009-12-29 09:18:41 -05:00
|
|
|
@indents << (size - @indent)
|
2009-12-24 16:48:46 -05:00
|
|
|
else
|
2009-12-26 13:59:47 -05:00
|
|
|
outdent_token(@indent - size)
|
2009-12-24 16:48:46 -05:00
|
|
|
end
|
2009-12-29 09:18:41 -05:00
|
|
|
@indent = size
|
2009-12-24 16:48:46 -05:00
|
|
|
end
|
|
|
|
|
2010-01-01 12:11:35 -05:00
|
|
|
# Record an oudent token or tokens, if we're moving back inwards past
|
|
|
|
# multiple recorded indents.
|
2009-12-26 13:59:47 -05:00
|
|
|
def outdent_token(move_out)
|
2009-12-29 20:39:51 -05:00
|
|
|
while move_out > 0 && !@indents.empty?
|
2009-12-26 13:59:47 -05:00
|
|
|
last_indent = @indents.pop
|
|
|
|
token(:OUTDENT, last_indent)
|
|
|
|
move_out -= last_indent
|
|
|
|
end
|
2009-12-29 09:55:37 -05:00
|
|
|
token("\n", "\n")
|
2009-12-26 13:59:47 -05:00
|
|
|
end
|
|
|
|
|
2009-12-17 22:54:24 -05:00
|
|
|
# Matches and consumes non-meaningful whitespace.
|
2009-12-17 22:13:29 -05:00
|
|
|
def whitespace_token
|
|
|
|
return false unless whitespace = @chunk[WHITESPACE, 1]
|
|
|
|
@i += whitespace.length
|
|
|
|
end
|
2009-12-13 17:07:16 -05:00
|
|
|
|
2009-12-26 13:49:11 -05:00
|
|
|
# Multiple newlines get merged together.
|
|
|
|
# Use a trailing \ to escape newlines.
|
|
|
|
def newline_token(newlines)
|
2009-12-30 21:41:01 -05:00
|
|
|
token("\n", "\n") unless last_value == "\n"
|
2009-12-29 23:01:08 -05:00
|
|
|
true
|
2009-12-26 13:49:11 -05:00
|
|
|
end
|
2009-12-31 13:22:33 -05:00
|
|
|
|
2009-12-30 22:49:25 -05:00
|
|
|
# Tokens to explicitly escape newlines are removed once their job is done.
|
|
|
|
def suppress_newlines(newlines)
|
|
|
|
@tokens.pop if last_value == "\\"
|
|
|
|
true
|
|
|
|
end
|
2009-12-26 13:49:11 -05:00
|
|
|
|
2009-12-17 22:13:29 -05:00
|
|
|
# We treat all other single characters as a token. Eg.: ( ) , . !
|
|
|
|
# Multi-character operators are also literal tokens, so that Racc can assign
|
2009-12-26 13:49:11 -05:00
|
|
|
# the proper order of operations.
|
2009-12-17 22:13:29 -05:00
|
|
|
def literal_token
|
|
|
|
value = @chunk[OPERATOR, 1]
|
|
|
|
tag_parameters if value && value.match(CODE)
|
|
|
|
value ||= @chunk[0,1]
|
2009-12-31 13:26:38 -05:00
|
|
|
tag = value.match(ASSIGNMENT) ? :ASSIGN : value
|
2009-12-25 16:21:17 -05:00
|
|
|
token(tag, value)
|
2009-12-17 22:13:29 -05:00
|
|
|
@i += value.length
|
2009-12-13 17:07:16 -05:00
|
|
|
end
|
|
|
|
|
2010-01-01 12:11:35 -05:00
|
|
|
# Helpers ==========================================================
|
|
|
|
|
2009-12-22 10:48:58 -05:00
|
|
|
# Add a token to the results, taking note of the line number, and
|
|
|
|
# immediately-preceding comment.
|
2009-12-17 22:13:29 -05:00
|
|
|
def token(tag, value)
|
2009-12-22 11:27:19 -05:00
|
|
|
@tokens << [tag, Value.new(value, @line)]
|
2009-12-17 22:13:29 -05:00
|
|
|
end
|
2009-12-17 09:29:49 -05:00
|
|
|
|
2010-01-01 09:49:18 -05:00
|
|
|
# Peek at the previous token's value.
|
2009-12-17 22:13:29 -05:00
|
|
|
def last_value
|
|
|
|
@tokens.last && @tokens.last[1]
|
|
|
|
end
|
2009-12-17 09:29:49 -05:00
|
|
|
|
2010-01-01 09:49:18 -05:00
|
|
|
# Peek at the previous token's tag.
|
|
|
|
def last_tag
|
|
|
|
@tokens.last && @tokens.last[0]
|
|
|
|
end
|
|
|
|
|
2009-12-17 22:54:24 -05:00
|
|
|
# A source of ambiguity in our grammar was parameter lists in function
|
|
|
|
# definitions (as opposed to argument lists in function calls). Tag
|
2009-12-31 17:50:12 -05:00
|
|
|
# parameter identifiers in order to avoid this. Also, parameter lists can
|
|
|
|
# make use of splats.
|
2009-12-17 22:13:29 -05:00
|
|
|
def tag_parameters
|
2009-12-31 17:50:12 -05:00
|
|
|
i = 0
|
2010-01-24 23:40:45 -05:00
|
|
|
tagged = false
|
2009-12-17 22:13:29 -05:00
|
|
|
loop do
|
2009-12-31 17:50:12 -05:00
|
|
|
i -= 1
|
2009-12-31 19:52:13 -05:00
|
|
|
tok = @tokens[i]
|
2009-12-18 07:11:01 -05:00
|
|
|
return if !tok
|
2010-01-24 23:40:45 -05:00
|
|
|
if ['.', ','].include?(tok[0])
|
|
|
|
tagged = false
|
|
|
|
next
|
|
|
|
end
|
|
|
|
return if tagged
|
2009-12-18 07:11:01 -05:00
|
|
|
return if tok[0] != :IDENTIFIER
|
2009-12-31 19:52:13 -05:00
|
|
|
tok[0] = :PARAM
|
2010-01-24 23:40:45 -05:00
|
|
|
tagged = true
|
2009-12-17 22:13:29 -05:00
|
|
|
end
|
2009-12-13 20:29:44 -05:00
|
|
|
end
|
|
|
|
|
2010-01-04 22:51:02 -05:00
|
|
|
# Close up all remaining open blocks. IF the first token is an indent,
|
|
|
|
# axe it.
|
2009-12-24 16:48:46 -05:00
|
|
|
def close_indentation
|
2009-12-26 13:59:47 -05:00
|
|
|
outdent_token(@indent)
|
2009-12-24 16:48:46 -05:00
|
|
|
end
|
|
|
|
|
2009-12-16 20:48:37 -05:00
|
|
|
end
|
2009-12-13 17:07:16 -05:00
|
|
|
end
|