merging Stan's recursive tokenizing fix for interpolations.

This commit is contained in:
Jeremy Ashkenas 2010-04-11 09:26:21 -04:00
parent 8de2fb9009
commit 2e842f0146
2 changed files with 9 additions and 5 deletions

View File

@ -60,6 +60,9 @@
this.extract_next_token(); this.extract_next_token();
} }
this.close_indentation(); this.close_indentation();
if (o.rewrite === false) {
return this.tokens;
}
return (new Rewriter()).rewrite(this.tokens); return (new Rewriter()).rewrite(this.tokens);
}; };
// At every position, run through this list of attempted matches, // At every position, run through this list of attempted matches,
@ -461,7 +464,7 @@
// new Lexer, tokenize the interpolated contents, and merge them into the // new Lexer, tokenize the interpolated contents, and merge them into the
// token stream. // token stream.
Lexer.prototype.interpolate_string = function interpolate_string(str, escape_quotes) { Lexer.prototype.interpolate_string = function interpolate_string(str, escape_quotes) {
var _a, _b, _c, _d, _e, _f, _g, escaped, expr, group, i, index, inner, interp, interpolated, lexer, match, nested, pi, quote, tag, token, tokens, value; var _a, _b, _c, _d, _e, _f, _g, escaped, expr, group, i, idx, inner, interp, interpolated, lexer, match, nested, pi, quote, tag, tok, token, tokens, value;
if (str.length < 3 || !starts(str, '"')) { if (str.length < 3 || !starts(str, '"')) {
return this.token('STRING', str); return this.token('STRING', str);
} else { } else {
@ -497,9 +500,9 @@
line: this.line line: this.line
}); });
_c = nested; _c = nested;
for (index = 0, _d = _c.length; index < _d; index++) { for (idx = 0, _d = _c.length; idx < _d; idx++) {
value = _c[index]; tok = _c[idx];
value[0] === 'CALL_END' ? (nested[index][0] = ')') : null; tok[0] === 'CALL_END' ? (tok[0] = ')') : null;
} }
nested.pop(); nested.pop();
tokens.push(['TOKENS', nested]); tokens.push(['TOKENS', nested]);

View File

@ -56,6 +56,7 @@ exports.Lexer: class Lexer
@chunk: @code.slice @i @chunk: @code.slice @i
@extract_next_token() @extract_next_token()
@close_indentation() @close_indentation()
return @tokens if o.rewrite is off
(new Rewriter()).rewrite @tokens (new Rewriter()).rewrite @tokens
# At every position, run through this list of attempted matches, # At every position, run through this list of attempted matches,
@ -366,7 +367,7 @@ exports.Lexer: class Lexer
inner: expr.substring(2, expr.length - 1) inner: expr.substring(2, expr.length - 1)
if inner.length if inner.length
nested: lexer.tokenize "($inner)", {line: @line} nested: lexer.tokenize "($inner)", {line: @line}
(nested[index][0]: ')') for value, index in nested when value[0] is 'CALL_END' (tok[0]: ')') for tok, idx in nested when tok[0] is 'CALL_END'
nested.pop() nested.pop()
tokens.push ['TOKENS', nested] tokens.push ['TOKENS', nested]
else else