Fix TODO in interpolateString.

This commit is contained in:
Jason Walton 2012-11-20 12:05:59 -05:00
parent df6c497ab0
commit 969e45a599
2 changed files with 46 additions and 40 deletions

View File

@ -169,7 +169,8 @@
}
if (0 < string.indexOf('#{', 1)) {
this.interpolateString(string.slice(1, -1), {
offsetInChunk: 1
strOffset: 1,
lexedLength: string.length
});
} else {
this.token('STRING', this.escapeLines(string, 0, string.length));
@ -198,7 +199,8 @@
if (quote === '"' && 0 <= doc.indexOf('#{')) {
this.interpolateString(doc, {
heredoc: true,
offsetInChunk: 3
strOffset: 3,
lexedLength: heredoc.length
});
} else {
this.token('STRING', this.makeString(doc, quote, true), 0, heredoc.length);
@ -272,8 +274,7 @@
this.token('CALL_START', '(', 0, 0);
tokens = [];
_ref2 = this.interpolateString(body, {
regex: true,
offsetInChunk: 3
regex: true
});
for (_i = 0, _len = _ref2.length; _i < _len; _i++) {
token = _ref2[_i];
@ -574,17 +575,17 @@
};
Lexer.prototype.interpolateString = function(str, options) {
var column, expr, heredoc, i, inner, interpolated, len, letter, lexedLength, line, locationToken, nested, offsetInChunk, originalOffsetInChunk, pi, plusToken, popped, regex, tag, token, tokens, value, _i, _len, _ref2, _ref3, _ref4;
var column, expr, heredoc, i, inner, interpolated, len, letter, lexedLength, line, locationToken, nested, offsetInChunk, pi, plusToken, popped, regex, strOffset, tag, token, tokens, value, _i, _len, _ref2, _ref3, _ref4;
if (options == null) {
options = {};
}
heredoc = options.heredoc, regex = options.regex, offsetInChunk = options.offsetInChunk;
originalOffsetInChunk = offsetInChunk;
lexedLength = str.length;
heredoc = options.heredoc, regex = options.regex, offsetInChunk = options.offsetInChunk, strOffset = options.strOffset, lexedLength = options.lexedLength;
offsetInChunk = offsetInChunk || 0;
strOffset = strOffset || 0;
lexedLength = lexedLength || str.length;
if (heredoc && str.length > 0 && str[0] === '\n') {
str = str.slice(1);
offsetInChunk++;
strOffset++;
}
tokens = [];
pi = 0;
@ -598,11 +599,11 @@
continue;
}
if (pi < i) {
tokens.push(this.makeToken('NEOSTRING', str.slice(pi, i), offsetInChunk + pi));
tokens.push(this.makeToken('NEOSTRING', str.slice(pi, i), strOffset + pi));
}
inner = expr.slice(1, -1);
if (inner.length) {
_ref2 = this.getLineAndColumnFromChunk(offsetInChunk + i + 1), line = _ref2[0], column = _ref2[1];
_ref2 = this.getLineAndColumnFromChunk(strOffset + i + 1), line = _ref2[0], column = _ref2[1];
nested = new Lexer().tokenize(inner, {
line: line,
column: column,
@ -614,8 +615,8 @@
}
if (len = nested.length) {
if (len > 1) {
nested.unshift(this.makeToken('(', '(', offsetInChunk + i + 1, 0));
nested.push(this.makeToken(')', ')', offsetInChunk + i + 1 + inner.length, 0));
nested.unshift(this.makeToken('(', '(', strOffset + i + 1, 0));
nested.push(this.makeToken(')', ')', strOffset + i + 1 + inner.length, 0));
}
tokens.push(['TOKENS', nested]);
}
@ -624,19 +625,19 @@
pi = i + 1;
}
if ((i > pi && pi < str.length)) {
tokens.push(this.makeToken('NEOSTRING', str.slice(pi), offsetInChunk + pi));
tokens.push(this.makeToken('NEOSTRING', str.slice(pi), strOffset + pi));
}
if (regex) {
return tokens;
}
if (!tokens.length) {
return this.token('STRING', '""', originalOffsetInChunk, lexedLength);
return this.token('STRING', '""', offsetInChunk, lexedLength);
}
if (tokens[0][0] !== 'NEOSTRING') {
tokens.unshift(this.makeToken('NEOSTRING', '', originalOffsetInChunk));
tokens.unshift(this.makeToken('NEOSTRING', '', offsetInChunk));
}
if (interpolated = tokens.length > 1) {
this.token('(', '(', originalOffsetInChunk, 0);
this.token('(', '(', offsetInChunk, 0);
}
for (i = _i = 0, _len = tokens.length; _i < _len; i = ++_i) {
token = tokens[i];
@ -664,7 +665,7 @@
}
}
if (interpolated) {
this.token(')', ')', originalOffsetInChunk + lexedLength, 0);
this.token(')', ')', offsetInChunk + lexedLength, 0);
}
return tokens;
};

View File

@ -178,7 +178,7 @@ exports.Lexer = class Lexer
when '"'
return 0 unless string = @balancedString @chunk, '"'
if 0 < string.indexOf '#{', 1
@interpolateString string[1...-1], offsetInChunk: 1
@interpolateString string[1...-1], strOffset: 1, lexedLength: string.length
else
@token 'STRING', @escapeLines string, 0, string.length
else
@ -195,7 +195,7 @@ exports.Lexer = class Lexer
quote = heredoc.charAt 0
doc = @sanitizeHeredoc match[2], quote: quote, indent: null
if quote is '"' and 0 <= doc.indexOf '#{'
@interpolateString doc, heredoc: yes, offsetInChunk: 3
@interpolateString doc, heredoc: yes, strOffset: 3, lexedLength: heredoc.length
else
@token 'STRING', @makeString(doc, quote, yes), 0, heredoc.length
heredoc.length
@ -246,7 +246,7 @@ exports.Lexer = class Lexer
@token 'IDENTIFIER', 'RegExp', 0, 0
@token 'CALL_START', '(', 0, 0
tokens = []
for token in @interpolateString(body, regex: yes, offsetInChunk: 3)
for token in @interpolateString(body, regex: yes)
[tag, value] = token
if tag is 'TOKENS'
tokens.push value...
@ -491,21 +491,26 @@ exports.Lexer = class Lexer
# If it encounters an interpolation, this method will recursively create a
# new Lexer, tokenize the interpolated contents, and merge them into the
# token stream.
#
# - `str` is the start of the string contents (IE with the " or """ stripped
# off.)
# - `options.offsetInChunk` is the start of the interpolated string in the
# current chunk, including the " or """, etc... If not provided, this is
# assumed to be 0. `options.lexedLength` is the length of the
# interpolated string, including both the start and end quotes. Both of these
# values are ignored if `options.regex` is true.
# - `options.strOffset` is the offset of str, relative to the start of the
# current chunk.
interpolateString: (str, options = {}) ->
{heredoc, regex, offsetInChunk} = options
# TODO: we pass in offsetInChunk, but we've already discarded the " or the
# """, or the /// that got us here. Those characters are not going to end
# up being part of any tokens.
originalOffsetInChunk = offsetInChunk
lexedLength = str.length
{heredoc, regex, offsetInChunk, strOffset, lexedLength} = options
offsetInChunk = offsetInChunk || 0
strOffset = strOffset || 0
lexedLength = lexedLength || str.length
# Clip leading \n from heredoc
offsetInChunk = offsetInChunk || 0
if heredoc and str.length > 0 and str[0] == '\n'
str = str[1...]
offsetInChunk++
strOffset++
# Parse the string.
tokens = []
@ -519,33 +524,33 @@ exports.Lexer = class Lexer
(expr = @balancedString str[i + 1..], '}')
continue
# NEOSTRING is a fake token. This will be converted to a string below.
tokens.push @makeToken('NEOSTRING', str[pi...i], offsetInChunk + pi) if pi < i
tokens.push @makeToken('NEOSTRING', str[pi...i], strOffset + pi) if pi < i
inner = expr[1...-1]
if inner.length
[line, column] = @getLineAndColumnFromChunk(offsetInChunk + i + 1)
[line, column] = @getLineAndColumnFromChunk(strOffset + i + 1)
nested = new Lexer().tokenize inner, line: line, column: column, rewrite: off
popped = nested.pop()
popped = nested.shift() if nested[0]?[0] is 'TERMINATOR'
if len = nested.length
if len > 1
nested.unshift @makeToken '(', '(', offsetInChunk + i + 1, 0
nested.push @makeToken ')', ')', offsetInChunk + i + 1 + inner.length, 0
nested.unshift @makeToken '(', '(', strOffset + i + 1, 0
nested.push @makeToken ')', ')', strOffset + i + 1 + inner.length, 0
# Push a fake 'TOKENS' token, which will get turned into real tokens below.
tokens.push ['TOKENS', nested]
i += expr.length
pi = i + 1
tokens.push @makeToken('NEOSTRING', str[pi..], offsetInChunk + pi) if i > pi < str.length
tokens.push @makeToken('NEOSTRING', str[pi..], strOffset + pi) if i > pi < str.length
# If regex, then return now and let the regex code deal with all these fake tokens
return tokens if regex
# If we didn't find any tokens, then just return an empty string.
return @token 'STRING', '""', originalOffsetInChunk, lexedLength unless tokens.length
return @token 'STRING', '""', offsetInChunk, lexedLength unless tokens.length
# If the first token is not a string, add a fake empty string to the beginning.
tokens.unshift @makeToken('NEOSTRING', '', originalOffsetInChunk) unless tokens[0][0] is 'NEOSTRING'
tokens.unshift @makeToken('NEOSTRING', '', offsetInChunk) unless tokens[0][0] is 'NEOSTRING'
@token '(', '(', originalOffsetInChunk, 0 if interpolated = tokens.length > 1
@token '(', '(', offsetInChunk, 0 if interpolated = tokens.length > 1
# Push all the tokens
for token, i in tokens
[tag, value] = token
@ -569,7 +574,7 @@ exports.Lexer = class Lexer
@tokens.push token
else
@error "Unexpected #{tag}"
@token ')', ')', originalOffsetInChunk + lexedLength, 0 if interpolated
@token ')', ')', offsetInChunk + lexedLength, 0 if interpolated
tokens
# Pairs up a closing token, ensuring that all listed pairs of tokens are