From 2e842f0146854f1d742ceb292c2fa9ebe1103542 Mon Sep 17 00:00:00 2001 From: Jeremy Ashkenas Date: Sun, 11 Apr 2010 09:26:21 -0400 Subject: [PATCH] merging Stan's recursive tokenizing fix for interpolations. --- lib/lexer.js | 11 +++++++---- src/lexer.coffee | 3 ++- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/lib/lexer.js b/lib/lexer.js index 2782d3f6..9a0827a1 100644 --- a/lib/lexer.js +++ b/lib/lexer.js @@ -60,6 +60,9 @@ this.extract_next_token(); } this.close_indentation(); + if (o.rewrite === false) { + return this.tokens; + } return (new Rewriter()).rewrite(this.tokens); }; // At every position, run through this list of attempted matches, @@ -461,7 +464,7 @@ // new Lexer, tokenize the interpolated contents, and merge them into the // token stream. Lexer.prototype.interpolate_string = function interpolate_string(str, escape_quotes) { - var _a, _b, _c, _d, _e, _f, _g, escaped, expr, group, i, index, inner, interp, interpolated, lexer, match, nested, pi, quote, tag, token, tokens, value; + var _a, _b, _c, _d, _e, _f, _g, escaped, expr, group, i, idx, inner, interp, interpolated, lexer, match, nested, pi, quote, tag, tok, token, tokens, value; if (str.length < 3 || !starts(str, '"')) { return this.token('STRING', str); } else { @@ -497,9 +500,9 @@ line: this.line }); _c = nested; - for (index = 0, _d = _c.length; index < _d; index++) { - value = _c[index]; - value[0] === 'CALL_END' ? (nested[index][0] = ')') : null; + for (idx = 0, _d = _c.length; idx < _d; idx++) { + tok = _c[idx]; + tok[0] === 'CALL_END' ? (tok[0] = ')') : null; } nested.pop(); tokens.push(['TOKENS', nested]); diff --git a/src/lexer.coffee b/src/lexer.coffee index 20a6bdfc..54972767 100644 --- a/src/lexer.coffee +++ b/src/lexer.coffee @@ -56,6 +56,7 @@ exports.Lexer: class Lexer @chunk: @code.slice @i @extract_next_token() @close_indentation() + return @tokens if o.rewrite is off (new Rewriter()).rewrite @tokens # At every position, run through this list of attempted matches, @@ -366,7 +367,7 @@ exports.Lexer: class Lexer inner: expr.substring(2, expr.length - 1) if inner.length nested: lexer.tokenize "($inner)", {line: @line} - (nested[index][0]: ')') for value, index in nested when value[0] is 'CALL_END' + (tok[0]: ')') for tok, idx in nested when tok[0] is 'CALL_END' nested.pop() tokens.push ['TOKENS', nested] else