mirror of
https://github.com/jashkenas/coffeescript.git
synced 2022-11-09 12:23:24 -05:00
passing through comments as tags on Values, but not printing them out quite yet...
This commit is contained in:
parent
9b8f018646
commit
d45643c527
5 changed files with 24 additions and 17 deletions
12
TODO
12
TODO
|
@ -1,19 +1,9 @@
|
|||
TODO:
|
||||
|
||||
* Finish the examples.
|
||||
* Finish the doc page.
|
||||
|
||||
* Write a test suite that checks the JS evaluation.
|
||||
|
||||
* Figure out a generic way to transform statements into expressions, and
|
||||
use it recursively for returns and assigns on whiles, fors, ifs, etc.
|
||||
|
||||
* Create the documentation page. (amy, idle)
|
||||
uv -c . -s coffeescript -t amy --no-lines examples/code.cs > code.html
|
||||
|
||||
* Object comprehensions would be easy to add to array comprehensions -- if
|
||||
we knew that the variable in question is, indeed, an object. Check for
|
||||
length? Special syntax to tag it?
|
||||
|
||||
* Is it possible to close blocks (functions, ifs, trys) without an explicit
|
||||
block delimiter or significant whitespace?
|
||||
|
||||
|
|
|
@ -32,6 +32,7 @@ module CoffeeScript
|
|||
# Token cleaning regexes.
|
||||
JS_CLEANER = /(\A`|`\Z)/
|
||||
MULTILINER = /[\r\n]/
|
||||
COMMENT_CLEANER = /^\s*#\s*/
|
||||
|
||||
# Tokens that always constitute the start of an expression.
|
||||
EXP_START = ['{', '(', '[']
|
||||
|
@ -111,6 +112,8 @@ module CoffeeScript
|
|||
# Matches and consumes comments.
|
||||
def remove_comment
|
||||
return false unless comment = @chunk[COMMENT, 1]
|
||||
cleaned = comment.gsub(COMMENT_CLEANER, '')
|
||||
@prev_comment ? @prev_comment << cleaned : @prev_comment = [cleaned]
|
||||
@i += comment.length
|
||||
end
|
||||
|
||||
|
@ -139,10 +142,12 @@ module CoffeeScript
|
|||
@i += value.length
|
||||
end
|
||||
|
||||
# Add a token to the results, taking note of the line number for syntax
|
||||
# errors later in the parse.
|
||||
# Add a token to the results, taking note of the line number, and
|
||||
# immediately-preceding comment.
|
||||
def token(tag, value)
|
||||
@tokens << [tag, Value.new(value, @line)]
|
||||
comment = @prev_comment
|
||||
@prev_comment = nil
|
||||
@tokens << [tag, Value.new(value, @line, comment)]
|
||||
end
|
||||
|
||||
# Peek at the previous token.
|
||||
|
|
|
@ -3,10 +3,10 @@ module CoffeeScript
|
|||
# Instead of producing raw Ruby objects, the Lexer produces values of this
|
||||
# class, wrapping native objects tagged with line number information.
|
||||
class Value
|
||||
attr_reader :line
|
||||
attr_reader :line, :comment
|
||||
|
||||
def initialize(value, line)
|
||||
@value, @line = value, line
|
||||
def initialize(value, line, comment=nil)
|
||||
@value, @line, @comment = value, line, comment
|
||||
end
|
||||
|
||||
def to_str
|
||||
|
|
|
@ -35,6 +35,12 @@ class LexerTest < Test::Unit::TestCase
|
|||
[")", ")"], [:IF, "if"], [:IDENTIFIER, "happy"]]
|
||||
end
|
||||
|
||||
def test_lexing_comment
|
||||
code = "a: 1\n # comment\n # on two lines\nb: 2"
|
||||
token = @lex.tokenize(code).detect {|t| t[1].comment }
|
||||
assert token[1].comment == ['comment', 'on two lines']
|
||||
end
|
||||
|
||||
def test_lexing
|
||||
tokens = @lex.tokenize(File.read('test/fixtures/each.cs'))
|
||||
assert tokens.inspect == File.read('test/fixtures/each.tokens')
|
||||
|
|
|
@ -52,6 +52,12 @@ class ParserTest < Test::Unit::TestCase
|
|||
assert nodes.first.source.literal.objects.last.value == "5"
|
||||
end
|
||||
|
||||
def test_parsing_comment
|
||||
nodes = @par.parse("a: 1\n # comment\nb: 2").expressions
|
||||
# Comments are being passed through to the raw values,
|
||||
# but are not yet properly exposed within the nodes.
|
||||
end
|
||||
|
||||
def test_parsing
|
||||
nodes = @par.parse(File.read('test/fixtures/each.cs'))
|
||||
assign = nodes.expressions.first
|
||||
|
|
Loading…
Add table
Reference in a new issue