String interpolation AST (#5175)

* updated grammar

* empty chunks

* remove unused

* remove unused

* add comment
This commit is contained in:
Julian Rosse 2019-03-24 22:00:44 -04:00 committed by Geoffrey Booth
parent 4549f9a4c5
commit eaeb77a527
14 changed files with 569 additions and 128 deletions

View File

@ -214,7 +214,8 @@
function() {
return new StringWithInterpolations(Block.wrap($2),
{
quote: $1.quote
quote: $1.quote,
startQuote: LOC(1)(new Literal($1.toString()))
});
})
],

View File

@ -165,8 +165,12 @@
}
};
// Get a lookup hash for a token based on its location data.
// Multiple tokens might have the same location hash, but using exclusive
// location data distinguishes e.g. zero-length generated tokens from
// actual source tokens.
buildLocationHash = function(loc) {
return `${loc.first_line}x${loc.first_column}-${loc.last_line}x${loc.last_column}`;
return `${loc.range[0]}-${loc.range[1]}`;
};
// Build a dictionary of extra token properties organized by tokens locations

View File

@ -377,7 +377,11 @@
}
}
delimiter = quote.charAt(0);
this.mergeInterpolationTokens(tokens, {quote, indent}, (value) => {
this.mergeInterpolationTokens(tokens, {
quote,
indent,
endOffset: end
}, (value) => {
return this.validateUnicodeCodePointEscapes(value, {
delimiter: quote
});
@ -450,8 +454,9 @@
// this comment to; and follow with a newline.
commentAttachments[0].newLine = true;
this.lineToken(this.chunk.slice(comment.length));
placeholderToken = this.makeToken('JS', '');
placeholderToken.generated = true;
placeholderToken = this.makeToken('JS', '', {
generated: true
});
placeholderToken.comments = commentAttachments;
this.tokens.push(placeholderToken);
this.newlineToken(0);
@ -562,7 +567,8 @@
});
this.mergeInterpolationTokens(tokens, {
double: true,
heregex: {flags}
heregex: {flags},
endOffset: end
}, (str) => {
return this.validateUnicodeCodePointEscapes(str, {delimiter});
});
@ -864,7 +870,9 @@
tokens,
index: end
} = this.matchWithInterpolations(INSIDE_CSX, '>', '</', CSX_INTERPOLATION));
this.mergeInterpolationTokens(tokens, {}, (value) => {
this.mergeInterpolationTokens(tokens, {
endOffset: end
}, (value) => {
return this.validateUnicodeCodePointEscapes(value, {
delimiter: '>'
});
@ -1127,7 +1135,7 @@
// This method allows us to have strings within interpolations within strings,
// ad infinitum.
matchWithInterpolations(regex, delimiter, closingDelimiter = delimiter, interpolators = /^#\{/) {
var braceInterpolator, close, column, firstToken, index, interpolationOffset, interpolator, lastToken, line, match, nested, offset, offsetInChunk, open, ref, ref1, rest, str, strPart, tokens;
var braceInterpolator, close, column, index, interpolationOffset, interpolator, line, match, nested, offset, offsetInChunk, open, ref, ref1, rest, str, strPart, tokens;
tokens = [];
offsetInChunk = delimiter.length;
if (this.chunk.slice(0, offsetInChunk) !== delimiter) {
@ -1172,6 +1180,8 @@
[open] = nested, [close] = slice.call(nested, -1);
open[0] = 'INTERPOLATION_START';
open[1] = '(';
open[2].first_column -= interpolationOffset;
open[2].range = [open[2].range[0] - interpolationOffset, open[2].range[1]];
close[0] = 'INTERPOLATION_END';
close[1] = ')';
close.origin = ['', 'end of interpolation', close[2]];
@ -1206,21 +1216,6 @@
length: delimiter.length
});
}
[firstToken] = tokens, [lastToken] = slice.call(tokens, -1);
firstToken[2].first_column -= delimiter.length;
firstToken[2].range[0] -= delimiter.length;
lastToken[2].range[1] += closingDelimiter.length;
if (lastToken[1].substr(-1) === '\n') {
lastToken[2].last_line += 1;
lastToken[2].last_column = closingDelimiter.length - 1;
} else {
lastToken[2].last_column += closingDelimiter.length;
}
lastToken[2].last_column_exclusive += closingDelimiter.length;
if (lastToken[1].length === 0) {
lastToken[2].last_column -= 1;
lastToken[2].range[1] -= 1;
}
return {
tokens,
index: offsetInChunk + closingDelimiter.length
@ -1232,11 +1227,11 @@
// of `'NEOSTRING'`s are converted using `fn` and turned into strings using
// `options` first.
mergeInterpolationTokens(tokens, options, fn) {
var $, converted, double, firstIndex, heregex, i, indent, j, k, lastToken, len, len1, locationToken, lparen, placeholderToken, quote, rparen, tag, token, tokensToPush, val, value;
({quote, indent, double, heregex} = options);
var $, converted, double, endOffset, firstIndex, heregex, i, indent, j, k, lastToken, len, len1, locationToken, lparen, placeholderToken, quote, ref, ref1, rparen, tag, token, tokensToPush, val, value;
({quote, indent, double, heregex, endOffset} = options);
if (tokens.length > 1) {
lparen = this.token('STRING_START', '(', {
length: 0,
length: (ref = quote != null ? quote.length : void 0) != null ? ref : 0,
data: {quote}
});
}
@ -1289,6 +1284,20 @@
}
token[0] = 'STRING';
token[1] = '"' + converted + '"';
if (tokens.length === 1 && (quote != null)) {
token[2].first_column -= quote.length;
if (token[1].substr(-2, 1) === '\n') {
token[2].last_line += 1;
token[2].last_column = quote.length - 1;
} else {
token[2].last_column += quote.length;
if (token[1].length === 2) {
token[2].last_column -= 1;
}
}
token[2].last_column_exclusive += quote.length;
token[2].range = [token[2].range[0] - quote.length, token[2].range[1] + quote.length];
}
locationToken = token;
tokensToPush = [token];
}
@ -1311,16 +1320,10 @@
}
];
lparen[2] = lparen.origin[2];
rparen = this.token('STRING_END', ')');
return rparen[2] = {
first_line: lastToken[2].last_line,
first_column: lastToken[2].last_column,
last_line: lastToken[2].last_line,
last_column: lastToken[2].last_column,
last_line_exclusive: lastToken[2].last_line_exclusive,
last_column_exclusive: lastToken[2].last_column_exclusive,
range: lastToken[2].range
};
return rparen = this.token('STRING_END', ')', {
offset: endOffset - (quote != null ? quote : '').length,
length: (ref1 = quote != null ? quote.length : void 0) != null ? ref1 : 0
});
}
}
@ -1382,7 +1385,7 @@
// so if last_column == first_column, then were looking at a character of length 1.
lastCharacter = length > 0 ? length - 1 : 0;
[locationData.last_line, locationData.last_column, endOffset] = this.getLineAndColumnFromChunk(offsetInChunk + lastCharacter);
[locationData.last_line_exclusive, locationData.last_column_exclusive] = this.getLineAndColumnFromChunk(offsetInChunk + lastCharacter + 1);
[locationData.last_line_exclusive, locationData.last_column_exclusive] = this.getLineAndColumnFromChunk(offsetInChunk + lastCharacter + (length > 0 ? 1 : 0));
locationData.range[1] = length > 0 ? endOffset + 1 : endOffset;
return locationData;
}
@ -1392,13 +1395,17 @@
makeToken(tag, value, {
offset: offsetInChunk = 0,
length = value.length,
origin
origin,
generated
} = {}) {
var token;
token = [tag, value, this.makeLocationData({offsetInChunk, length})];
if (origin) {
token.origin = origin;
}
if (generated) {
token.generated = true;
}
return token;
}
@ -1408,9 +1415,9 @@
// not specified, the length of `value` will be used.
// Returns the new token.
token(tag, value, {offset, length, origin, data} = {}) {
token(tag, value, {offset, length, origin, data, generated} = {}) {
var token;
token = this.makeToken(tag, value, {offset, length, origin});
token = this.makeToken(tag, value, {offset, length, origin, generated});
if (data) {
addTokenData(token, data);
}

View File

@ -4,7 +4,7 @@
// nodes are created as the result of actions in the [grammar](grammar.html),
// but some are created by other nodes as a method of code generation. To convert
// the syntax tree into a string of JavaScript code, call `compile()` on the root.
var Access, Arr, Assign, AwaitReturn, Base, Block, BooleanLiteral, CSXAttribute, CSXAttributes, CSXElement, CSXExpressionContainer, CSXIdentifier, CSXTag, Call, Catch, Class, Code, CodeFragment, ComputedPropertyName, DefaultLiteral, Elision, ExecutableClassBody, Existence, Expansion, ExportAllDeclaration, ExportDeclaration, ExportDefaultDeclaration, ExportNamedDeclaration, ExportSpecifier, ExportSpecifierList, Extends, For, FuncDirectiveReturn, FuncGlyph, HEREGEX_OMIT, HereComment, HoistTarget, IdentifierLiteral, If, ImportClause, ImportDeclaration, ImportDefaultSpecifier, ImportNamespaceSpecifier, ImportSpecifier, ImportSpecifierList, In, Index, InfinityLiteral, Interpolation, JS_FORBIDDEN, LEADING_BLANK_LINE, LEVEL_ACCESS, LEVEL_COND, LEVEL_LIST, LEVEL_OP, LEVEL_PAREN, LEVEL_TOP, LineComment, Literal, MetaProperty, ModuleDeclaration, ModuleSpecifier, ModuleSpecifierList, NEGATE, NO, NaNLiteral, NullLiteral, NumberLiteral, Obj, ObjectProperty, Op, Param, Parens, PassthroughLiteral, PropertyName, Range, RegexLiteral, RegexWithInterpolations, Return, Root, SIMPLENUM, SIMPLE_STRING_OMIT, STRING_OMIT, Scope, Slice, Splat, StatementLiteral, StringLiteral, StringWithInterpolations, Super, SuperCall, Switch, SwitchCase, SwitchWhen, TAB, THIS, TRAILING_BLANK_LINE, TaggedTemplateCall, ThisLiteral, Throw, Try, UTILITIES, UndefinedLiteral, Value, While, YES, YieldReturn, addDataToNode, attachCommentsToNode, compact, del, ends, extend, flatten, fragmentsToText, greater, hasLineComments, indentInitial, isAstLocGreater, isFunction, isLiteralArguments, isLiteralThis, isLocationDataEndGreater, isLocationDataStartGreater, isNumber, isPlainObject, isUnassignable, jisonLocationDataToAstLocationData, lesser, locationDataToString, makeDelimitedLiteral, merge, mergeAstLocationData, mergeLocationData, moveComments, multident, replaceUnicodeCodePointEscapes, shouldCacheOrIsAssignable, some, starts, throwSyntaxError, unfoldSoak, unshiftAfterComments, utility,
var Access, Arr, Assign, AwaitReturn, Base, Block, BooleanLiteral, CSXAttribute, CSXAttributes, CSXElement, CSXExpressionContainer, CSXIdentifier, CSXTag, Call, Catch, Class, Code, CodeFragment, ComputedPropertyName, DefaultLiteral, Elision, ExecutableClassBody, Existence, Expansion, ExportAllDeclaration, ExportDeclaration, ExportDefaultDeclaration, ExportNamedDeclaration, ExportSpecifier, ExportSpecifierList, Extends, For, FuncDirectiveReturn, FuncGlyph, HEREGEX_OMIT, HereComment, HoistTarget, IdentifierLiteral, If, ImportClause, ImportDeclaration, ImportDefaultSpecifier, ImportNamespaceSpecifier, ImportSpecifier, ImportSpecifierList, In, Index, InfinityLiteral, Interpolation, JS_FORBIDDEN, LEADING_BLANK_LINE, LEVEL_ACCESS, LEVEL_COND, LEVEL_LIST, LEVEL_OP, LEVEL_PAREN, LEVEL_TOP, LineComment, Literal, MetaProperty, ModuleDeclaration, ModuleSpecifier, ModuleSpecifierList, NEGATE, NO, NaNLiteral, NullLiteral, NumberLiteral, Obj, ObjectProperty, Op, Param, Parens, PassthroughLiteral, PropertyName, Range, RegexLiteral, RegexWithInterpolations, Return, Root, SIMPLENUM, SIMPLE_STRING_OMIT, STRING_OMIT, Scope, Slice, Splat, StatementLiteral, StringLiteral, StringWithInterpolations, Super, SuperCall, Switch, SwitchCase, SwitchWhen, TAB, THIS, TRAILING_BLANK_LINE, TaggedTemplateCall, TemplateElement, ThisLiteral, Throw, Try, UTILITIES, UndefinedLiteral, Value, While, YES, YieldReturn, addDataToNode, attachCommentsToNode, compact, del, ends, extend, flatten, fragmentsToText, greater, hasLineComments, indentInitial, isAstLocGreater, isFunction, isLiteralArguments, isLiteralThis, isLocationDataEndGreater, isLocationDataStartGreater, isNumber, isPlainObject, isUnassignable, jisonLocationDataToAstLocationData, lesser, locationDataToString, makeDelimitedLiteral, merge, mergeAstLocationData, mergeLocationData, moveComments, multident, replaceUnicodeCodePointEscapes, shouldCacheOrIsAssignable, some, starts, throwSyntaxError, unfoldSoak, unshiftAfterComments, utility,
indexOf = [].indexOf,
splice = [].splice,
slice1 = [].slice;
@ -6620,10 +6620,11 @@
//### StringWithInterpolations
exports.StringWithInterpolations = StringWithInterpolations = (function() {
class StringWithInterpolations extends Base {
constructor(body1, {quote} = {}) {
constructor(body1, {quote, startQuote} = {}) {
super();
this.body = body1;
this.quote = quote;
this.startQuote = startQuote;
}
// `unwrap` returns `this` to stop ancestor nodes reaching in to grab @body,
@ -6637,13 +6638,8 @@
return this.body.shouldCache();
}
compileNode(o) {
var code, element, elements, expr, fragments, j, len1, salvagedComments, wrapped;
if (this.csxAttribute) {
wrapped = new Parens(new StringWithInterpolations(this.body));
wrapped.csxAttribute = true;
return wrapped.compileNode(o);
}
extractElements(o) {
var elements, expr, salvagedComments;
// Assumes that `expr` is `Block`
expr = this.body.unwrap();
elements = [];
@ -6697,6 +6693,20 @@
}
return true;
});
return elements;
}
compileNode(o) {
var code, element, elements, fragments, j, len1, ref1, wrapped;
if (this.comments == null) {
this.comments = (ref1 = this.startQuote) != null ? ref1.comments : void 0;
}
if (this.csxAttribute) {
wrapped = new Parens(new StringWithInterpolations(this.body));
wrapped.csxAttribute = true;
return wrapped.compileNode(o);
}
elements = this.extractElements(o);
fragments = [];
if (!this.csx) {
fragments.push(this.makeCode('`'));
@ -6722,8 +6732,8 @@
}
code = element.compileToFragments(o, LEVEL_PAREN);
if (!this.isNestedTag(element) || code.some(function(fragment) {
var ref1;
return (ref1 = fragment.comments) != null ? ref1.some(function(comment) {
var ref2;
return (ref2 = fragment.comments) != null ? ref2.some(function(comment) {
return comment.here === false;
}) : void 0;
})) {
@ -6752,6 +6762,29 @@
return this.csx && call instanceof CSXElement;
}
astType() {
return 'TemplateLiteral';
}
astProperties(o) {
var element, elements, expressions, index, j, last, len1, quasis;
elements = this.extractElements(o);
[last] = slice1.call(elements, -1);
quasis = [];
expressions = [];
for (index = j = 0, len1 = elements.length; j < len1; index = ++j) {
element = elements[index];
if (element instanceof StringLiteral) {
quasis.push(new TemplateElement(element.originalValue, {
tail: element === last
}).withLocationDataFrom(element).ast(o));
} else {
expressions.push(element.unwrap().ast(o));
}
}
return {expressions, quasis, quote: this.quote};
}
};
StringWithInterpolations.prototype.children = ['body'];
@ -6760,6 +6793,26 @@
}).call(this);
exports.TemplateElement = TemplateElement = class TemplateElement extends Base {
constructor(value1, {
tail: tail1
} = {}) {
super();
this.value = value1;
this.tail = tail1;
}
astProperties() {
return {
value: {
raw: this.value
},
tail: !!this.tail
};
}
};
exports.Interpolation = Interpolation = (function() {
class Interpolation extends Base {
constructor(expression1) {

View File

@ -169,7 +169,8 @@ break;
case 43:
this.$ = yy.addDataToNode(yy, _$[$0-2], $$[$0-2], _$[$0], $$[$0], true)(new yy.StringWithInterpolations(yy.Block.wrap($$[$0-1]),
{
quote: $$[$0-2].quote
quote: $$[$0-2].quote,
startQuote: yy.addDataToNode(yy, _$[$0-2], $$[$0-2], null, null, true)(new yy.Literal($$[$0-2].toString()))
}));
break;
case 44: case 107: case 154: case 173: case 195: case 228: case 242: case 246: case 297: case 343:

View File

@ -766,8 +766,8 @@
// location corresponding to the last “real” token under the node.
fixOutdentLocationData() {
return this.scanTokens(function(token, i, tokens) {
var prevLocationData;
if (!(token[0] === 'OUTDENT' || (token.generated && token[0] === 'CALL_END') || (token.generated && token[0] === '}'))) {
var prevLocationData, ref;
if (!(token[0] === 'OUTDENT' || (token.generated && token[0] === 'CALL_END' && !((ref = token.data) != null ? ref.closingTagNameToken : void 0)) || (token.generated && token[0] === '}'))) {
return 1;
}
prevLocationData = tokens[i - 1][2];

View File

@ -183,7 +183,7 @@ grammar =
double: $1.double
heregex: $1.heregex
)
o 'STRING_START Interpolations STRING_END', -> new StringWithInterpolations Block.wrap($2), quote: $1.quote
o 'STRING_START Interpolations STRING_END', -> new StringWithInterpolations Block.wrap($2), quote: $1.quote, startQuote: LOC(1)(new Literal $1.toString())
]
Interpolations: [

View File

@ -114,8 +114,12 @@ buildLocationData = (first, last) ->
last.range[1]
]
# Get a lookup hash for a token based on its location data.
# Multiple tokens might have the same location hash, but using exclusive
# location data distinguishes e.g. zero-length generated tokens from
# actual source tokens.
buildLocationHash = (loc) ->
"#{loc.first_line}x#{loc.first_column}-#{loc.last_line}x#{loc.last_column}"
"#{loc.range[0]}-#{loc.range[1]}"
# Build a dictionary of extra token properties organized by tokens locations
# used as lookup hashes.

View File

@ -301,7 +301,7 @@ exports.Lexer = class Lexer
indent = attempt if indent is null or 0 < attempt.length < indent.length
delimiter = quote.charAt(0)
@mergeInterpolationTokens tokens, {quote, indent}, (value) =>
@mergeInterpolationTokens tokens, {quote, indent, endOffset: end}, (value) =>
@validateUnicodeCodePointEscapes value, delimiter: quote
if @atCSXTag()
@ -355,8 +355,7 @@ exports.Lexer = class Lexer
# this comment to; and follow with a newline.
commentAttachments[0].newLine = yes
@lineToken @chunk[comment.length..] # Set the indent.
placeholderToken = @makeToken 'JS', ''
placeholderToken.generated = yes
placeholderToken = @makeToken 'JS', '', generated: yes
placeholderToken.comments = commentAttachments
@tokens.push placeholderToken
@newlineToken 0
@ -417,7 +416,7 @@ exports.Lexer = class Lexer
@token 'REGEX_START', '(', {length: 0, origin}
@token 'IDENTIFIER', 'RegExp', length: 0
@token 'CALL_START', '(', length: 0
@mergeInterpolationTokens tokens, {double: yes, heregex: {flags}}, (str) =>
@mergeInterpolationTokens tokens, {double: yes, heregex: {flags}, endOffset: end}, (str) =>
@validateUnicodeCodePointEscapes str, {delimiter}
if flags
@token ',', ',', offset: index - 1, length: 0
@ -613,7 +612,7 @@ exports.Lexer = class Lexer
@token ',', 'JSX_COMMA', generated: yes
{tokens, index: end} =
@matchWithInterpolations INSIDE_CSX, '>', '</', CSX_INTERPOLATION
@mergeInterpolationTokens tokens, {}, (value) =>
@mergeInterpolationTokens tokens, {endOffset: end}, (value) =>
@validateUnicodeCodePointEscapes value, delimiter: '>'
match = CSX_IDENTIFIER.exec(@chunk[end...]) or CSX_FRAGMENT_IDENTIFIER.exec(@chunk[end...])
if not match or match[1] isnt "#{csxTag.name}#{(".#{property}" for property in csxTag.properties).join ''}"
@ -821,6 +820,11 @@ exports.Lexer = class Lexer
[open, ..., close] = nested
open[0] = 'INTERPOLATION_START'
open[1] = '('
open[2].first_column -= interpolationOffset
open[2].range = [
open[2].range[0] - interpolationOffset
open[2].range[1]
]
close[0] = 'INTERPOLATION_END'
close[1] = ')'
close.origin = ['', 'end of interpolation', close[2]]
@ -845,20 +849,6 @@ exports.Lexer = class Lexer
unless str[...closingDelimiter.length] is closingDelimiter
@error "missing #{closingDelimiter}", length: delimiter.length
[firstToken, ..., lastToken] = tokens
firstToken[2].first_column -= delimiter.length
firstToken[2].range[0] -= delimiter.length
lastToken[2].range[1] += closingDelimiter.length
if lastToken[1].substr(-1) is '\n'
lastToken[2].last_line += 1
lastToken[2].last_column = closingDelimiter.length - 1
else
lastToken[2].last_column += closingDelimiter.length
lastToken[2].last_column_exclusive += closingDelimiter.length
if lastToken[1].length is 0
lastToken[2].last_column -= 1
lastToken[2].range[1] -= 1
{tokens, index: offsetInChunk + closingDelimiter.length}
# Merge the array `tokens` of the fake token types `'TOKENS'` and `'NEOSTRING'`
@ -866,10 +856,10 @@ exports.Lexer = class Lexer
# of `'NEOSTRING'`s are converted using `fn` and turned into strings using
# `options` first.
mergeInterpolationTokens: (tokens, options, fn) ->
{quote, indent, double, heregex} = options
{quote, indent, double, heregex, endOffset} = options
if tokens.length > 1
lparen = @token 'STRING_START', '(', length: 0, data: {quote}
lparen = @token 'STRING_START', '(', length: quote?.length ? 0, data: {quote}
firstIndex = @tokens.length
$ = tokens.length - 1
@ -900,6 +890,19 @@ exports.Lexer = class Lexer
addTokenData token, {heregex} if heregex
token[0] = 'STRING'
token[1] = '"' + converted + '"'
if tokens.length is 1 and quote?
token[2].first_column -= quote.length
if token[1].substr(-2, 1) is '\n'
token[2].last_line +=1
token[2].last_column = quote.length - 1
else
token[2].last_column += quote.length
token[2].last_column -= 1 if token[1].length is 2
token[2].last_column_exclusive += quote.length
token[2].range = [
token[2].range[0] - quote.length
token[2].range[1] + quote.length
]
locationToken = token
tokensToPush = [token]
@tokens.push tokensToPush...
@ -919,15 +922,7 @@ exports.Lexer = class Lexer
]
]
lparen[2] = lparen.origin[2]
rparen = @token 'STRING_END', ')'
rparen[2] =
first_line: lastToken[2].last_line
first_column: lastToken[2].last_column
last_line: lastToken[2].last_line
last_column: lastToken[2].last_column
last_line_exclusive: lastToken[2].last_line_exclusive
last_column_exclusive: lastToken[2].last_column_exclusive
range: lastToken[2].range
rparen = @token 'STRING_END', ')', offset: endOffset - (quote ? '').length, length: quote?.length ? 0
# Pairs up a closing token, ensuring that all listed pairs of tokens are
# correctly balanced throughout the course of the token stream.
@ -982,16 +977,17 @@ exports.Lexer = class Lexer
[locationData.last_line, locationData.last_column, endOffset] =
@getLineAndColumnFromChunk offsetInChunk + lastCharacter
[locationData.last_line_exclusive, locationData.last_column_exclusive] =
@getLineAndColumnFromChunk offsetInChunk + lastCharacter + 1
@getLineAndColumnFromChunk offsetInChunk + lastCharacter + (if length > 0 then 1 else 0)
locationData.range[1] = if length > 0 then endOffset + 1 else endOffset
locationData
# Same as `token`, except this just returns the token without adding it
# to the results.
makeToken: (tag, value, {offset: offsetInChunk = 0, length = value.length, origin} = {}) ->
makeToken: (tag, value, {offset: offsetInChunk = 0, length = value.length, origin, generated} = {}) ->
token = [tag, value, @makeLocationData {offsetInChunk, length}]
token.origin = origin if origin
token.generated = yes if generated
token
# Add a token to the results.
@ -1000,8 +996,8 @@ exports.Lexer = class Lexer
# not specified, the length of `value` will be used.
#
# Returns the new token.
token: (tag, value, {offset, length, origin, data} = {}) ->
token = @makeToken tag, value, {offset, length, origin}
token: (tag, value, {offset, length, origin, data, generated} = {}) ->
token = @makeToken tag, value, {offset, length, origin, generated}
addTokenData token, data if data
@tokens.push token
token

View File

@ -4426,7 +4426,7 @@ exports.Parens = class Parens extends Base
#### StringWithInterpolations
exports.StringWithInterpolations = class StringWithInterpolations extends Base
constructor: (@body, {@quote} = {}) ->
constructor: (@body, {@quote, @startQuote} = {}) ->
super()
children: ['body']
@ -4438,12 +4438,7 @@ exports.StringWithInterpolations = class StringWithInterpolations extends Base
shouldCache: -> @body.shouldCache()
compileNode: (o) ->
if @csxAttribute
wrapped = new Parens new StringWithInterpolations @body
wrapped.csxAttribute = yes
return wrapped.compileNode o
extractElements: (o) ->
# Assumes that `expr` is `Block`
expr = @body.unwrap()
@ -4483,6 +4478,18 @@ exports.StringWithInterpolations = class StringWithInterpolations extends Base
delete node.comments
return yes
elements
compileNode: (o) ->
@comments ?= @startQuote?.comments
if @csxAttribute
wrapped = new Parens new StringWithInterpolations @body
wrapped.csxAttribute = yes
return wrapped.compileNode o
elements = @extractElements o
fragments = []
fragments.push @makeCode '`' unless @csx
for element in elements
@ -4518,6 +4525,36 @@ exports.StringWithInterpolations = class StringWithInterpolations extends Base
call = element.unwrapAll?()
@csx and call instanceof CSXElement
astType: -> 'TemplateLiteral'
astProperties: (o) ->
elements = @extractElements o
[..., last] = elements
quasis = []
expressions = []
for element, index in elements
if element instanceof StringLiteral
quasis.push new TemplateElement(
element.originalValue
tail: element is last
).withLocationDataFrom(element).ast o
else
expressions.push element.unwrap().ast o
{expressions, quasis, @quote}
exports.TemplateElement = class TemplateElement extends Base
constructor: (@value, {@tail} = {}) ->
super()
astProperties: ->
return
value:
raw: @value
tail: !!@tail
exports.Interpolation = class Interpolation extends Base
constructor: (@expression) ->
super()

View File

@ -539,7 +539,7 @@ exports.Rewriter = class Rewriter
fixOutdentLocationData: ->
@scanTokens (token, i, tokens) ->
return 1 unless token[0] is 'OUTDENT' or
(token.generated and token[0] is 'CALL_END') or
(token.generated and token[0] is 'CALL_END' and not token.data?.closingTagNameToken) or
(token.generated and token[0] is '}')
prevLocationData = tokens[i - 1][2]
token[2] =

View File

@ -2215,23 +2215,108 @@ test "AST as expected for Parens node", ->
type: 'NumericLiteral'
value: 1
# test "AST as expected for StringWithInterpolations node", ->
# testExpression '"#{o}/"',
# type: 'StringWithInterpolations'
# quote: '"'
# body:
# type: 'Block'
# expressions: [
# originalValue: ''
# ,
# type: 'Interpolation'
# expression:
# type: 'Value'
# base:
# value: 'o'
# ,
# originalValue: '/'
# ]
test "AST as expected for StringWithInterpolations node", ->
testExpression '"a#{b}c"',
type: 'TemplateLiteral'
expressions: [
ID 'b'
]
quasis: [
type: 'TemplateElement'
value:
raw: 'a'
tail: no
,
type: 'TemplateElement'
value:
raw: 'c'
tail: yes
]
quote: '"'
testExpression '"""a#{b}c"""',
type: 'TemplateLiteral'
expressions: [
ID 'b'
]
quasis: [
type: 'TemplateElement'
value:
raw: 'a'
tail: no
,
type: 'TemplateElement'
value:
raw: 'c'
tail: yes
]
quote: '"""'
testExpression '"#{b}"',
type: 'TemplateLiteral'
expressions: [
ID 'b'
]
quasis: [
type: 'TemplateElement'
value:
raw: ''
tail: no
,
type: 'TemplateElement'
value:
raw: ''
tail: yes
]
quote: '"'
testExpression '''
" a
#{b}
c
"
''',
type: 'TemplateLiteral'
expressions: [
ID 'b'
]
quasis: [
type: 'TemplateElement'
value:
raw: ' a\n '
tail: no
,
type: 'TemplateElement'
value:
raw: '\n c\n'
tail: yes
]
quote: '"'
testExpression '''
"""
a
b#{
c
}d
"""
''',
type: 'TemplateLiteral'
expressions: [
ID 'c'
]
quasis: [
type: 'TemplateElement'
value:
raw: '\n a\n b'
tail: no
,
type: 'TemplateElement'
value:
raw: 'd\n'
tail: yes
]
quote: '"""'
test "AST as expected for For node", ->
testStatement 'for x, i in arr when x? then return',

View File

@ -5376,3 +5376,256 @@ test "AST location data as expected for For node", ->
end:
line: 2
column: 3
test "AST location data as expected for StringWithInterpolations node", ->
testAstLocationData '"a#{b}c"',
type: 'TemplateLiteral'
expressions: [
start: 4
end: 5
range: [4, 5]
loc:
start:
line: 1
column: 4
end:
line: 1
column: 5
]
quasis: [
start: 1
end: 2
range: [1, 2]
loc:
start:
line: 1
column: 1
end:
line: 1
column: 2
,
start: 6
end: 7
range: [6, 7]
loc:
start:
line: 1
column: 6
end:
line: 1
column: 7
]
start: 0
end: 8
range: [0, 8]
loc:
start:
line: 1
column: 0
end:
line: 1
column: 8
testAstLocationData '"""a#{b}c"""',
type: 'TemplateLiteral'
expressions: [
start: 6
end: 7
range: [6, 7]
loc:
start:
line: 1
column: 6
end:
line: 1
column: 7
]
quasis: [
start: 3
end: 4
range: [3, 4]
loc:
start:
line: 1
column: 3
end:
line: 1
column: 4
,
start: 8
end: 9
range: [8, 9]
loc:
start:
line: 1
column: 8
end:
line: 1
column: 9
]
start: 0
end: 12
range: [0, 12]
loc:
start:
line: 1
column: 0
end:
line: 1
column: 12
testAstLocationData '"#{b}"',
type: 'TemplateLiteral'
expressions: [
start: 3
end: 4
range: [3, 4]
loc:
start:
line: 1
column: 3
end:
line: 1
column: 4
]
quasis: [
start: 1
end: 1
range: [1, 1]
loc:
start:
line: 1
column: 1
end:
line: 1
column: 1
,
start: 5
end: 5
range: [5, 5]
loc:
start:
line: 1
column: 5
end:
line: 1
column: 5
]
start: 0
end: 6
range: [0, 6]
loc:
start:
line: 1
column: 0
end:
line: 1
column: 6
testAstLocationData '''
" a
#{b}
c
"
''',
type: 'TemplateLiteral'
expressions: [
start: 8
end: 9
range: [8, 9]
loc:
start:
line: 2
column: 4
end:
line: 2
column: 5
]
quasis: [
start: 1
end: 6
range: [1, 6]
loc:
start:
line: 1
column: 1
end:
line: 2
column: 2
,
start: 10
end: 15
range: [10, 15]
loc:
start:
line: 2
column: 6
end:
line: 4
column: 0
]
start: 0
end: 16
range: [0, 16]
loc:
start:
line: 1
column: 0
end:
line: 4
column: 1
testAstLocationData '''
"""
a
b#{
c
}d
"""
''',
type: 'TemplateLiteral'
expressions: [
start: 20
end: 21
range: [20, 21]
loc:
start:
line: 4
column: 4
end:
line: 4
column: 5
]
quasis: [
start: 3
end: 13
range: [3, 13]
loc:
start:
line: 1
column: 3
end:
line: 3
column: 5
,
start: 25
end: 27
range: [25, 27]
loc:
start:
line: 5
column: 3
end:
line: 6
column: 0
]
start: 0
end: 30
range: [0, 30]
loc:
start:
line: 1
column: 0
end:
line: 6
column: 3

View File

@ -72,7 +72,7 @@ test 'Verify locations in string interpolation (in "string")', ->
[a, b, c] = getMatchingTokens '"a#{b}c"', '"a"', 'b', '"c"'
eq a[2].first_line, 0
eq a[2].first_column, 0
eq a[2].first_column, 1
eq a[2].last_line, 0
eq a[2].last_column, 1
@ -84,7 +84,7 @@ test 'Verify locations in string interpolation (in "string")', ->
eq c[2].first_line, 0
eq c[2].first_column, 6
eq c[2].last_line, 0
eq c[2].last_column, 7
eq c[2].last_column, 6
test 'Verify locations in string interpolation (in "string", multiple interpolation)', ->
[a, b, c] = getMatchingTokens '"#{a}b#{c}"', 'a', '"b"', 'c'
@ -180,7 +180,7 @@ test 'Verify locations in string interpolation (in """string""", line breaks)',
[a, b, c] = getMatchingTokens '"""a\n#{b}\nc"""', '"a\n"', 'b', '"\nc"'
eq a[2].first_line, 0
eq a[2].first_column, 0
eq a[2].first_column, 3
eq a[2].last_line, 0
eq a[2].last_column, 4
@ -192,7 +192,7 @@ test 'Verify locations in string interpolation (in """string""", line breaks)',
eq c[2].first_line, 1
eq c[2].first_column, 4
eq c[2].last_line, 2
eq c[2].last_column, 3
eq c[2].last_column, 0
test 'Verify locations in string interpolation (in """string""", starting with a line break)', ->
[b, c] = getMatchingTokens '"""\n#{b}\nc"""', 'b', '"\nc"'
@ -205,13 +205,13 @@ test 'Verify locations in string interpolation (in """string""", starting with a
eq c[2].first_line, 1
eq c[2].first_column, 4
eq c[2].last_line, 2
eq c[2].last_column, 3
eq c[2].last_column, 0
test 'Verify locations in string interpolation (in """string""", starting with line breaks)', ->
[a, b, c] = getMatchingTokens '"""\n\n#{b}\nc"""', '"\n\n"', 'b', '"\nc"'
eq a[2].first_line, 0
eq a[2].first_column, 0
eq a[2].first_column, 3
eq a[2].last_line, 1
eq a[2].last_column, 0
@ -223,7 +223,7 @@ test 'Verify locations in string interpolation (in """string""", starting with l
eq c[2].first_line, 2
eq c[2].first_column, 4
eq c[2].last_line, 3
eq c[2].last_column, 3
eq c[2].last_column, 0
test 'Verify locations in string interpolation (in """string""", multiple interpolation)', ->
[a, b, c] = getMatchingTokens '"""#{a}\nb\n#{c}"""', 'a', '"\nb\n"', 'c'
@ -301,7 +301,7 @@ test 'Verify locations in heregex interpolation (in ///regex///, multiple interp
[a, b, c] = getMatchingTokens '///a#{b}c///', '"a"', 'b', '"c"'
eq a[2].first_line, 0
eq a[2].first_column, 0
eq a[2].first_column, 3
eq a[2].last_line, 0
eq a[2].last_column, 3
@ -313,7 +313,7 @@ test 'Verify locations in heregex interpolation (in ///regex///, multiple interp
eq c[2].first_line, 0
eq c[2].first_column, 8
eq c[2].last_line, 0
eq c[2].last_column, 11
eq c[2].last_column, 8
test 'Verify locations in heregex interpolation (in ///regex///, multiple interpolation and line breaks)', ->
[a, b, c] = getMatchingTokens '///#{a}\nb\n#{c}///', 'a', '"\nb\n"', 'c'
@ -355,7 +355,7 @@ test 'Verify locations in heregex interpolation (in ///regex///, multiple interp
[a, b, c] = getMatchingTokens '///a\n\n\n#{b}\n\n\nc///', '"a\n\n\n"', 'b', '"\n\n\nc"'
eq a[2].first_line, 0
eq a[2].first_column, 0
eq a[2].first_column, 3
eq a[2].last_line, 2
eq a[2].last_column, 0
@ -367,7 +367,7 @@ test 'Verify locations in heregex interpolation (in ///regex///, multiple interp
eq c[2].first_line, 3
eq c[2].first_column, 4
eq c[2].last_line, 6
eq c[2].last_column, 3
eq c[2].last_column, 0
test 'Verify locations in heregex interpolation (in ///regex///, multiple interpolation and line breaks and starting with linebreak)', ->
[a, b, c] = getMatchingTokens '///\n#{a}\nb\n#{c}///', 'a', '"\nb\n"', 'c'
@ -409,7 +409,7 @@ test 'Verify locations in heregex interpolation (in ///regex///, multiple interp
[a, b, c] = getMatchingTokens '///\n\n\na\n\n\n#{b}\n\n\nc///', '"\n\n\na\n\n\n"', 'b', '"\n\n\nc"'
eq a[2].first_line, 0
eq a[2].first_column, 0
eq a[2].first_column, 3
eq a[2].last_line, 5
eq a[2].last_column, 0
@ -421,7 +421,7 @@ test 'Verify locations in heregex interpolation (in ///regex///, multiple interp
eq c[2].first_line, 6
eq c[2].first_column, 4
eq c[2].last_line, 9
eq c[2].last_column, 3
eq c[2].last_column, 0
test "#3822: Simple string/regex start/end should include delimiters", ->
[stringToken] = CoffeeScript.tokens "'string'"