diff --git a/lib/lexer.js b/lib/lexer.js index a4954b2b..f9f9a009 100644 --- a/lib/lexer.js +++ b/lib/lexer.js @@ -149,7 +149,9 @@ doc = this.sanitizeHeredoc(match[2] || match[4], { quote: quote }); - this.interpolateString(("" + quote + doc + quote), false, true); + this.interpolateString(("" + quote + doc + quote), { + heredoc: true + }); this.line += count(match[1], "\n"); this.i += match[1].length; return true; @@ -205,7 +207,9 @@ return '\\' + escaped; }); this.tokens = this.tokens.concat([['(', '('], ['NEW', 'new'], ['IDENTIFIER', 'RegExp'], ['CALL_START', '(']]); - this.interpolateString(("\"" + str + "\""), true); + this.interpolateString(("\"" + str + "\""), { + escapeQuotes: true + }); if (flags) { this.tokens.splice(this.tokens.length, 0, [',', ','], ['STRING', ("\"" + flags + "\"")]); } @@ -453,8 +457,9 @@ } return !i ? false : str.substring(0, i); }; - Lexer.prototype.interpolateString = function(str, escapeQuotes, heredoc) { + Lexer.prototype.interpolateString = function(str, options) { var _d, _e, _f, _g, _h, _i, _j, escaped, expr, group, i, idx, inner, interp, interpolated, lexer, match, nested, pi, quote, tag, tok, token, tokens, value; + options = options || {}; if (str.length < 3 || !starts(str, '"')) { return this.token('STRING', str); } else { @@ -486,7 +491,7 @@ } inner = expr.substring(2, expr.length - 1); if (inner.length) { - if (heredoc) { + if (options.heredoc) { inner = inner.replace(new RegExp('\\\\' + quote, 'g'), quote); } nested = lexer.tokenize(("(" + inner + ")"), { @@ -525,7 +530,7 @@ value = _j[1]; if (tag === 'TOKENS') { this.tokens = this.tokens.concat(value); - } else if (tag === 'STRING' && escapeQuotes) { + } else if (tag === 'STRING' && options.escapeQuotes) { escaped = value.substring(1, value.length - 1).replace(/"/g, '\\"'); this.token(tag, ("\"" + escaped + "\"")); } else { diff --git a/src/lexer.coffee b/src/lexer.coffee index ae224796..80da3fef 100644 --- a/src/lexer.coffee +++ b/src/lexer.coffee @@ -132,7 +132,7 @@ exports.Lexer: class Lexer return false unless match: @chunk.match(HEREDOC) quote: match[1].substr 0, 1 doc: @sanitizeHeredoc match[2] or match[4], {quote} - @interpolateString "$quote$doc$quote", no, yes + @interpolateString "$quote$doc$quote", {heredoc: yes} @line: + count match[1], "\n" @i: + match[1].length true @@ -170,7 +170,7 @@ exports.Lexer: class Lexer str: regex.substring(1).split('/')[0] str: str.replace REGEX_ESCAPE, (escaped) -> '\\' + escaped @tokens: @tokens.concat [['(', '('], ['NEW', 'new'], ['IDENTIFIER', 'RegExp'], ['CALL_START', '(']] - @interpolateString "\"$str\"", yes + @interpolateString "\"$str\"", {escapeQuotes: yes} @tokens.splice @tokens.length, 0, [',', ','], ['STRING', "\"$flags\""] if flags @tokens.splice @tokens.length, 0, [')', ')'], [')', ')'] else @@ -389,7 +389,8 @@ exports.Lexer: class Lexer # If it encounters an interpolation, this method will recursively create a # new Lexer, tokenize the interpolated contents, and merge them into the # token stream. - interpolateString: (str, escapeQuotes, heredoc) -> + interpolateString: (str, options) -> + options: or {} if str.length < 3 or not starts str, '"' @token 'STRING', str else @@ -411,7 +412,7 @@ exports.Lexer: class Lexer tokens.push ['STRING', "$quote${ str.substring(pi, i) }$quote"] if pi < i inner: expr.substring(2, expr.length - 1) if inner.length - inner: inner.replace new RegExp('\\\\' + quote, 'g'), quote if heredoc + inner: inner.replace new RegExp('\\\\' + quote, 'g'), quote if options.heredoc nested: lexer.tokenize "($inner)", {line: @line} (tok[0]: ')') for tok, idx in nested when tok[0] is 'CALL_END' nested.pop() @@ -429,7 +430,7 @@ exports.Lexer: class Lexer [tag, value]: token if tag is 'TOKENS' @tokens: @tokens.concat value - else if tag is 'STRING' and escapeQuotes + else if tag is 'STRING' and options.escapeQuotes escaped: value.substring(1, value.length - 1).replace(/"/g, '\\"') @token tag, "\"$escaped\"" else