1691 lines
63 KiB
JavaScript
1691 lines
63 KiB
JavaScript
// Generated by CoffeeScript 2.0.2
|
||
(function() {
|
||
// The CoffeeScript Lexer. Uses a series of token-matching regexes to attempt
|
||
// matches against the beginning of the source code. When a match is found,
|
||
// a token is produced, we consume the match, and start again. Tokens are in the
|
||
// form:
|
||
|
||
// [tag, value, locationData]
|
||
|
||
// where locationData is {first_line, first_column, last_line, last_column}, which is a
|
||
// format that can be fed directly into [Jison](https://github.com/zaach/jison). These
|
||
// are read by jison in the `parser.lexer` function defined in coffeescript.coffee.
|
||
var BOM, BOOL, CALLABLE, CODE, COFFEE_ALIASES, COFFEE_ALIAS_MAP, COFFEE_KEYWORDS, COMMENT, COMPARABLE_LEFT_SIDE, COMPARE, COMPOUND_ASSIGN, CSX_ATTRIBUTE, CSX_IDENTIFIER, CSX_INTERPOLATION, HERECOMMENT_ILLEGAL, HEREDOC_DOUBLE, HEREDOC_INDENT, HEREDOC_SINGLE, HEREGEX, HEREGEX_OMIT, HERE_JSTOKEN, IDENTIFIER, INDENTABLE_CLOSERS, INDEXABLE, INSIDE_CSX, INVERSES, JSTOKEN, JS_KEYWORDS, LEADING_BLANK_LINE, LINE_BREAK, LINE_CONTINUER, Lexer, MATH, MULTI_DENT, NOT_REGEX, NUMBER, OPERATOR, POSSIBLY_DIVISION, REGEX, REGEX_FLAGS, REGEX_ILLEGAL, REGEX_INVALID_ESCAPE, RELATION, RESERVED, Rewriter, SHIFT, SIMPLE_STRING_OMIT, STRICT_PROSCRIBED, STRING_DOUBLE, STRING_INVALID_ESCAPE, STRING_OMIT, STRING_SINGLE, STRING_START, TRAILING_BLANK_LINE, TRAILING_SPACES, UNARY, UNARY_MATH, UNFINISHED, UNICODE_CODE_POINT_ESCAPE, VALID_FLAGS, WHITESPACE, attachCommentsToNode, compact, count, invertLiterate, isForFrom, isUnassignable, key, locationDataToString, merge, repeat, starts, throwSyntaxError,
|
||
indexOf = [].indexOf;
|
||
|
||
({Rewriter, INVERSES} = require('./rewriter'));
|
||
|
||
// Import the helpers we need.
|
||
({count, starts, compact, repeat, invertLiterate, merge, attachCommentsToNode, locationDataToString, throwSyntaxError} = require('./helpers'));
|
||
|
||
// The Lexer Class
|
||
// ---------------
|
||
|
||
// The Lexer class reads a stream of CoffeeScript and divvies it up into tagged
|
||
// tokens. Some potential ambiguity in the grammar has been avoided by
|
||
// pushing some extra smarts into the Lexer.
|
||
exports.Lexer = Lexer = class Lexer {
|
||
// **tokenize** is the Lexer's main method. Scan by attempting to match tokens
|
||
// one at a time, using a regular expression anchored at the start of the
|
||
// remaining code, or a custom recursive token-matching method
|
||
// (for interpolations). When the next token has been recorded, we move forward
|
||
// within the code past the token, and begin again.
|
||
|
||
// Each tokenizing method is responsible for returning the number of characters
|
||
// it has consumed.
|
||
|
||
// Before returning the token stream, run it through the [Rewriter](rewriter.html).
|
||
tokenize(code, opts = {}) {
|
||
var consumed, end, i, ref;
|
||
this.literate = opts.literate; // Are we lexing literate CoffeeScript?
|
||
this.indent = 0; // The current indentation level.
|
||
this.baseIndent = 0; // The overall minimum indentation level.
|
||
this.indebt = 0; // The over-indentation at the current level.
|
||
this.outdebt = 0; // The under-outdentation at the current level.
|
||
this.indents = []; // The stack of all current indentation levels.
|
||
this.indentLiteral = ''; // The indentation.
|
||
this.ends = []; // The stack for pairing up tokens.
|
||
this.tokens = []; // Stream of parsed tokens in the form `['TYPE', value, location data]`.
|
||
this.seenFor = false; // Used to recognize `FORIN`, `FOROF` and `FORFROM` tokens.
|
||
this.seenImport = false; // Used to recognize `IMPORT FROM? AS?` tokens.
|
||
this.seenExport = false; // Used to recognize `EXPORT FROM? AS?` tokens.
|
||
this.importSpecifierList = false; // Used to identify when in an `IMPORT {...} FROM? ...`.
|
||
this.exportSpecifierList = false; // Used to identify when in an `EXPORT {...} FROM? ...`.
|
||
this.csxDepth = 0; // Used to optimize CSX checks, how deep in CSX we are.
|
||
this.csxObjAttribute = {}; // Used to detect if CSX attributes is wrapped in {} (<div {props...} />).
|
||
this.chunkLine = opts.line || 0; // The start line for the current @chunk.
|
||
this.chunkColumn = opts.column || 0; // The start column of the current @chunk.
|
||
code = this.clean(code); // The stripped, cleaned original source code.
|
||
|
||
// At every position, run through this list of attempted matches,
|
||
// short-circuiting if any of them succeed. Their order determines precedence:
|
||
// `@literalToken` is the fallback catch-all.
|
||
i = 0;
|
||
while (this.chunk = code.slice(i)) {
|
||
consumed = this.identifierToken() || this.commentToken() || this.whitespaceToken() || this.lineToken() || this.stringToken() || this.numberToken() || this.csxToken() || this.regexToken() || this.jsToken() || this.literalToken();
|
||
// Update position.
|
||
[this.chunkLine, this.chunkColumn] = this.getLineAndColumnFromChunk(consumed);
|
||
i += consumed;
|
||
if (opts.untilBalanced && this.ends.length === 0) {
|
||
return {
|
||
tokens: this.tokens,
|
||
index: i
|
||
};
|
||
}
|
||
}
|
||
this.closeIndentation();
|
||
if (end = this.ends.pop()) {
|
||
this.error(`missing ${end.tag}`, ((ref = end.origin) != null ? ref : end)[2]);
|
||
}
|
||
if (opts.rewrite === false) {
|
||
return this.tokens;
|
||
}
|
||
return (new Rewriter).rewrite(this.tokens);
|
||
}
|
||
|
||
// Preprocess the code to remove leading and trailing whitespace, carriage
|
||
// returns, etc. If we’re lexing literate CoffeeScript, strip external Markdown
|
||
// by removing all lines that aren’t indented by at least four spaces or a tab.
|
||
clean(code) {
|
||
if (code.charCodeAt(0) === BOM) {
|
||
code = code.slice(1);
|
||
}
|
||
code = code.replace(/\r/g, '').replace(TRAILING_SPACES, '');
|
||
if (WHITESPACE.test(code)) {
|
||
code = `\n${code}`;
|
||
this.chunkLine--;
|
||
}
|
||
if (this.literate) {
|
||
code = invertLiterate(code);
|
||
}
|
||
return code;
|
||
}
|
||
|
||
// Tokenizers
|
||
// ----------
|
||
|
||
// Matches identifying literals: variables, keywords, method names, etc.
|
||
// Check to ensure that JavaScript reserved words aren’t being used as
|
||
// identifiers. Because CoffeeScript reserves a handful of keywords that are
|
||
// allowed in JavaScript, we’re careful not to tag them as keywords when
|
||
// referenced as property names here, so you can still do `jQuery.is()` even
|
||
// though `is` means `===` otherwise.
|
||
identifierToken() {
|
||
var alias, colon, colonOffset, colonToken, id, idLength, inCSXTag, input, match, poppedToken, prev, prevprev, ref, ref1, ref2, ref3, ref4, ref5, ref6, ref7, ref8, regExSuper, regex, sup, tag, tagToken;
|
||
inCSXTag = this.atCSXTag();
|
||
regex = inCSXTag ? CSX_ATTRIBUTE : IDENTIFIER;
|
||
if (!(match = regex.exec(this.chunk))) {
|
||
return 0;
|
||
}
|
||
[input, id, colon] = match;
|
||
// Preserve length of id for location data
|
||
idLength = id.length;
|
||
poppedToken = void 0;
|
||
if (id === 'own' && this.tag() === 'FOR') {
|
||
this.token('OWN', id);
|
||
return id.length;
|
||
}
|
||
if (id === 'from' && this.tag() === 'YIELD') {
|
||
this.token('FROM', id);
|
||
return id.length;
|
||
}
|
||
if (id === 'as' && this.seenImport) {
|
||
if (this.value() === '*') {
|
||
this.tokens[this.tokens.length - 1][0] = 'IMPORT_ALL';
|
||
} else if (ref = this.value(true), indexOf.call(COFFEE_KEYWORDS, ref) >= 0) {
|
||
prev = this.prev();
|
||
[prev[0], prev[1]] = ['IDENTIFIER', this.value(true)];
|
||
}
|
||
if ((ref1 = this.tag()) === 'DEFAULT' || ref1 === 'IMPORT_ALL' || ref1 === 'IDENTIFIER') {
|
||
this.token('AS', id);
|
||
return id.length;
|
||
}
|
||
}
|
||
if (id === 'as' && this.seenExport) {
|
||
if ((ref2 = this.tag()) === 'IDENTIFIER' || ref2 === 'DEFAULT') {
|
||
this.token('AS', id);
|
||
return id.length;
|
||
}
|
||
if (ref3 = this.value(true), indexOf.call(COFFEE_KEYWORDS, ref3) >= 0) {
|
||
prev = this.prev();
|
||
[prev[0], prev[1]] = ['IDENTIFIER', this.value(true)];
|
||
this.token('AS', id);
|
||
return id.length;
|
||
}
|
||
}
|
||
if (id === 'default' && this.seenExport && ((ref4 = this.tag()) === 'EXPORT' || ref4 === 'AS')) {
|
||
this.token('DEFAULT', id);
|
||
return id.length;
|
||
}
|
||
if (id === 'do' && (regExSuper = /^(\s*super)(?!\(\))/.exec(this.chunk.slice(3)))) {
|
||
this.token('SUPER', 'super');
|
||
this.token('CALL_START', '(');
|
||
this.token('CALL_END', ')');
|
||
[input, sup] = regExSuper;
|
||
return sup.length + 3;
|
||
}
|
||
prev = this.prev();
|
||
tag = colon || (prev != null) && (((ref5 = prev[0]) === '.' || ref5 === '?.' || ref5 === '::' || ref5 === '?::') || !prev.spaced && prev[0] === '@') ? 'PROPERTY' : 'IDENTIFIER';
|
||
if (tag === 'IDENTIFIER' && (indexOf.call(JS_KEYWORDS, id) >= 0 || indexOf.call(COFFEE_KEYWORDS, id) >= 0) && !(this.exportSpecifierList && indexOf.call(COFFEE_KEYWORDS, id) >= 0)) {
|
||
tag = id.toUpperCase();
|
||
if (tag === 'WHEN' && (ref6 = this.tag(), indexOf.call(LINE_BREAK, ref6) >= 0)) {
|
||
tag = 'LEADING_WHEN';
|
||
} else if (tag === 'FOR') {
|
||
this.seenFor = true;
|
||
} else if (tag === 'UNLESS') {
|
||
tag = 'IF';
|
||
} else if (tag === 'IMPORT') {
|
||
this.seenImport = true;
|
||
} else if (tag === 'EXPORT') {
|
||
this.seenExport = true;
|
||
} else if (indexOf.call(UNARY, tag) >= 0) {
|
||
tag = 'UNARY';
|
||
} else if (indexOf.call(RELATION, tag) >= 0) {
|
||
if (tag !== 'INSTANCEOF' && this.seenFor) {
|
||
tag = 'FOR' + tag;
|
||
this.seenFor = false;
|
||
} else {
|
||
tag = 'RELATION';
|
||
if (this.value() === '!') {
|
||
poppedToken = this.tokens.pop();
|
||
id = '!' + id;
|
||
}
|
||
}
|
||
}
|
||
} else if (tag === 'IDENTIFIER' && this.seenFor && id === 'from' && isForFrom(prev)) {
|
||
tag = 'FORFROM';
|
||
this.seenFor = false;
|
||
// Throw an error on attempts to use `get` or `set` as keywords, or
|
||
// what CoffeeScript would normally interpret as calls to functions named
|
||
// `get` or `set`, i.e. `get({foo: function () {}})`.
|
||
} else if (tag === 'PROPERTY' && prev) {
|
||
if (prev.spaced && (ref7 = prev[0], indexOf.call(CALLABLE, ref7) >= 0) && /^[gs]et$/.test(prev[1]) && this.tokens[this.tokens.length - 2][0] !== '.') {
|
||
this.error(`'${prev[1]}' cannot be used as a keyword, or as a function call without parentheses`, prev[2]);
|
||
} else {
|
||
prevprev = this.tokens[this.tokens.length - 2];
|
||
if (((ref8 = prev[0]) === '@' || ref8 === 'THIS') && prevprev && prevprev.spaced && /^[gs]et$/.test(prevprev[1]) && this.tokens[this.tokens.length - 3][0] !== '.') {
|
||
this.error(`'${prevprev[1]}' cannot be used as a keyword, or as a function call without parentheses`, prevprev[2]);
|
||
}
|
||
}
|
||
}
|
||
if (tag === 'IDENTIFIER' && indexOf.call(RESERVED, id) >= 0) {
|
||
this.error(`reserved word '${id}'`, {
|
||
length: id.length
|
||
});
|
||
}
|
||
if (!(tag === 'PROPERTY' || this.exportSpecifierList)) {
|
||
if (indexOf.call(COFFEE_ALIASES, id) >= 0) {
|
||
alias = id;
|
||
id = COFFEE_ALIAS_MAP[id];
|
||
}
|
||
tag = (function() {
|
||
switch (id) {
|
||
case '!':
|
||
return 'UNARY';
|
||
case '==':
|
||
case '!=':
|
||
return 'COMPARE';
|
||
case 'true':
|
||
case 'false':
|
||
return 'BOOL';
|
||
case 'break':
|
||
case 'continue':
|
||
case 'debugger':
|
||
return 'STATEMENT';
|
||
case '&&':
|
||
case '||':
|
||
return id;
|
||
default:
|
||
return tag;
|
||
}
|
||
})();
|
||
}
|
||
tagToken = this.token(tag, id, 0, idLength);
|
||
if (alias) {
|
||
tagToken.origin = [tag, alias, tagToken[2]];
|
||
}
|
||
if (poppedToken) {
|
||
[tagToken[2].first_line, tagToken[2].first_column] = [poppedToken[2].first_line, poppedToken[2].first_column];
|
||
}
|
||
if (colon) {
|
||
colonOffset = input.lastIndexOf(inCSXTag ? '=' : ':');
|
||
colonToken = this.token(':', ':', colonOffset, colon.length);
|
||
if (inCSXTag) { // used by rewriter
|
||
colonToken.csxColon = true;
|
||
}
|
||
}
|
||
if (inCSXTag && tag === 'IDENTIFIER' && prev[0] !== ':') {
|
||
this.token(',', ',', 0, 0, tagToken);
|
||
}
|
||
return input.length;
|
||
}
|
||
|
||
// Matches numbers, including decimals, hex, and exponential notation.
|
||
// Be careful not to interfere with ranges in progress.
|
||
numberToken() {
|
||
var base, lexedLength, match, number, numberValue, tag;
|
||
if (!(match = NUMBER.exec(this.chunk))) {
|
||
return 0;
|
||
}
|
||
number = match[0];
|
||
lexedLength = number.length;
|
||
switch (false) {
|
||
case !/^0[BOX]/.test(number):
|
||
this.error(`radix prefix in '${number}' must be lowercase`, {
|
||
offset: 1
|
||
});
|
||
break;
|
||
case !/^(?!0x).*E/.test(number):
|
||
this.error(`exponential notation in '${number}' must be indicated with a lowercase 'e'`, {
|
||
offset: number.indexOf('E')
|
||
});
|
||
break;
|
||
case !/^0\d*[89]/.test(number):
|
||
this.error(`decimal literal '${number}' must not be prefixed with '0'`, {
|
||
length: lexedLength
|
||
});
|
||
break;
|
||
case !/^0\d+/.test(number):
|
||
this.error(`octal literal '${number}' must be prefixed with '0o'`, {
|
||
length: lexedLength
|
||
});
|
||
}
|
||
base = (function() {
|
||
switch (number.charAt(1)) {
|
||
case 'b':
|
||
return 2;
|
||
case 'o':
|
||
return 8;
|
||
case 'x':
|
||
return 16;
|
||
default:
|
||
return null;
|
||
}
|
||
})();
|
||
numberValue = base != null ? parseInt(number.slice(2), base) : parseFloat(number);
|
||
tag = numberValue === 2e308 ? 'INFINITY' : 'NUMBER';
|
||
this.token(tag, number, 0, lexedLength);
|
||
return lexedLength;
|
||
}
|
||
|
||
// Matches strings, including multiline strings, as well as heredocs, with or without
|
||
// interpolation.
|
||
stringToken() {
|
||
var $, attempt, delimiter, doc, end, heredoc, i, indent, indentRegex, match, prev, quote, ref, regex, token, tokens;
|
||
[quote] = STRING_START.exec(this.chunk) || [];
|
||
if (!quote) {
|
||
return 0;
|
||
}
|
||
// If the preceding token is `from` and this is an import or export statement,
|
||
// properly tag the `from`.
|
||
prev = this.prev();
|
||
if (prev && this.value() === 'from' && (this.seenImport || this.seenExport)) {
|
||
prev[0] = 'FROM';
|
||
}
|
||
regex = (function() {
|
||
switch (quote) {
|
||
case "'":
|
||
return STRING_SINGLE;
|
||
case '"':
|
||
return STRING_DOUBLE;
|
||
case "'''":
|
||
return HEREDOC_SINGLE;
|
||
case '"""':
|
||
return HEREDOC_DOUBLE;
|
||
}
|
||
})();
|
||
heredoc = quote.length === 3;
|
||
({
|
||
tokens,
|
||
index: end
|
||
} = this.matchWithInterpolations(regex, quote));
|
||
$ = tokens.length - 1;
|
||
delimiter = quote.charAt(0);
|
||
if (heredoc) {
|
||
// Find the smallest indentation. It will be removed from all lines later.
|
||
indent = null;
|
||
doc = ((function() {
|
||
var j, len, results;
|
||
results = [];
|
||
for (i = j = 0, len = tokens.length; j < len; i = ++j) {
|
||
token = tokens[i];
|
||
if (token[0] === 'NEOSTRING') {
|
||
results.push(token[1]);
|
||
}
|
||
}
|
||
return results;
|
||
})()).join('#{}');
|
||
while (match = HEREDOC_INDENT.exec(doc)) {
|
||
attempt = match[1];
|
||
if (indent === null || (0 < (ref = attempt.length) && ref < indent.length)) {
|
||
indent = attempt;
|
||
}
|
||
}
|
||
if (indent) {
|
||
indentRegex = RegExp(`\\n${indent}`, "g");
|
||
}
|
||
this.mergeInterpolationTokens(tokens, {delimiter}, (value, i) => {
|
||
value = this.formatString(value, {
|
||
delimiter: quote
|
||
});
|
||
if (indentRegex) {
|
||
value = value.replace(indentRegex, '\n');
|
||
}
|
||
if (i === 0) {
|
||
value = value.replace(LEADING_BLANK_LINE, '');
|
||
}
|
||
if (i === $) {
|
||
value = value.replace(TRAILING_BLANK_LINE, '');
|
||
}
|
||
return value;
|
||
});
|
||
} else {
|
||
this.mergeInterpolationTokens(tokens, {delimiter}, (value, i) => {
|
||
value = this.formatString(value, {
|
||
delimiter: quote
|
||
});
|
||
value = value.replace(SIMPLE_STRING_OMIT, function(match, offset) {
|
||
if ((i === 0 && offset === 0) || (i === $ && offset + match.length === value.length)) {
|
||
return '';
|
||
} else {
|
||
return ' ';
|
||
}
|
||
});
|
||
return value;
|
||
});
|
||
}
|
||
if (this.atCSXTag()) {
|
||
this.token(',', ',', 0, 0, this.prev);
|
||
}
|
||
return end;
|
||
}
|
||
|
||
// Matches and consumes comments. The comments are taken out of the token
|
||
// stream and saved for later, to be reinserted into the output after
|
||
// everything has been parsed and the JavaScript code generated.
|
||
commentToken(chunk = this.chunk) {
|
||
var comment, commentAttachments, content, contents, here, i, match, matchIllegal, newLine, placeholderToken, prev;
|
||
if (!(match = chunk.match(COMMENT))) {
|
||
return 0;
|
||
}
|
||
[comment, here] = match;
|
||
contents = null;
|
||
// Does this comment follow code on the same line?
|
||
newLine = /^\s*\n+\s*#/.test(comment);
|
||
if (here) {
|
||
matchIllegal = HERECOMMENT_ILLEGAL.exec(comment);
|
||
if (matchIllegal) {
|
||
this.error(`block comments cannot contain ${matchIllegal[0]}`, {
|
||
offset: matchIllegal.index,
|
||
length: matchIllegal[0].length
|
||
});
|
||
}
|
||
// Parse indentation or outdentation as if this block comment didn’t exist.
|
||
chunk = chunk.replace(`###${here}###`, '');
|
||
// Remove leading newlines, like `Rewriter::removeLeadingNewlines`, to
|
||
// avoid the creation of unwanted `TERMINATOR` tokens.
|
||
chunk = chunk.replace(/^\n+/, '');
|
||
this.lineToken(chunk);
|
||
// Pull out the ###-style comment’s content, and format it.
|
||
content = here;
|
||
if (indexOf.call(content, '\n') >= 0) {
|
||
content = content.replace(RegExp(`\\n${repeat(' ', this.indent)}`, "g"), '\n');
|
||
}
|
||
contents = [content];
|
||
} else {
|
||
// The `COMMENT` regex captures successive line comments as one token.
|
||
// Remove any leading newlines before the first comment, but preserve
|
||
// blank lines between line comments.
|
||
content = comment.replace(/^(\n*)/, '');
|
||
content = content.replace(/^([ |\t]*)#/gm, '');
|
||
contents = content.split('\n');
|
||
}
|
||
commentAttachments = (function() {
|
||
var j, len, results;
|
||
results = [];
|
||
for (i = j = 0, len = contents.length; j < len; i = ++j) {
|
||
content = contents[i];
|
||
results.push({
|
||
content: content,
|
||
here: here != null,
|
||
newLine: newLine || i !== 0 // Line comments after the first one start new lines, by definition.
|
||
});
|
||
}
|
||
return results;
|
||
})();
|
||
prev = this.prev();
|
||
if (!prev) {
|
||
// If there’s no previous token, create a placeholder token to attach
|
||
// this comment to; and follow with a newline.
|
||
commentAttachments[0].newLine = true;
|
||
this.lineToken(this.chunk.slice(comment.length));
|
||
placeholderToken = this.makeToken('JS', '');
|
||
placeholderToken.generated = true;
|
||
placeholderToken.comments = commentAttachments;
|
||
this.tokens.push(placeholderToken);
|
||
this.newlineToken(0);
|
||
} else {
|
||
attachCommentsToNode(commentAttachments, prev);
|
||
}
|
||
return comment.length;
|
||
}
|
||
|
||
// Matches JavaScript interpolated directly into the source via backticks.
|
||
jsToken() {
|
||
var match, script;
|
||
if (!(this.chunk.charAt(0) === '`' && (match = HERE_JSTOKEN.exec(this.chunk) || JSTOKEN.exec(this.chunk)))) {
|
||
return 0;
|
||
}
|
||
// Convert escaped backticks to backticks, and escaped backslashes
|
||
// just before escaped backticks to backslashes
|
||
script = match[1].replace(/\\+(`|$)/g, function(string) {
|
||
// `string` is always a value like '\`', '\\\`', '\\\\\`', etc.
|
||
// By reducing it to its latter half, we turn '\`' to '`', '\\\`' to '\`', etc.
|
||
return string.slice(-Math.ceil(string.length / 2));
|
||
});
|
||
this.token('JS', script, 0, match[0].length);
|
||
return match[0].length;
|
||
}
|
||
|
||
// Matches regular expression literals, as well as multiline extended ones.
|
||
// Lexing regular expressions is difficult to distinguish from division, so we
|
||
// borrow some basic heuristics from JavaScript and Ruby.
|
||
regexToken() {
|
||
var body, closed, comment, comments, end, flags, index, j, len, match, origin, prev, ref, ref1, regex, tokens;
|
||
switch (false) {
|
||
case !(match = REGEX_ILLEGAL.exec(this.chunk)):
|
||
this.error(`regular expressions cannot begin with ${match[2]}`, {
|
||
offset: match.index + match[1].length
|
||
});
|
||
break;
|
||
case !(match = this.matchWithInterpolations(HEREGEX, '///')):
|
||
({tokens, index} = match);
|
||
comments = this.chunk.slice(0, index).match(/\s+(#(?!{).*)/g);
|
||
if (comments) {
|
||
for (j = 0, len = comments.length; j < len; j++) {
|
||
comment = comments[j];
|
||
this.commentToken(comment);
|
||
}
|
||
}
|
||
break;
|
||
case !(match = REGEX.exec(this.chunk)):
|
||
[regex, body, closed] = match;
|
||
this.validateEscapes(body, {
|
||
isRegex: true,
|
||
offsetInChunk: 1
|
||
});
|
||
index = regex.length;
|
||
prev = this.prev();
|
||
if (prev) {
|
||
if (prev.spaced && (ref = prev[0], indexOf.call(CALLABLE, ref) >= 0)) {
|
||
if (!closed || POSSIBLY_DIVISION.test(regex)) {
|
||
return 0;
|
||
}
|
||
} else if (ref1 = prev[0], indexOf.call(NOT_REGEX, ref1) >= 0) {
|
||
return 0;
|
||
}
|
||
}
|
||
if (!closed) {
|
||
this.error('missing / (unclosed regex)');
|
||
}
|
||
break;
|
||
default:
|
||
return 0;
|
||
}
|
||
[flags] = REGEX_FLAGS.exec(this.chunk.slice(index));
|
||
end = index + flags.length;
|
||
origin = this.makeToken('REGEX', null, 0, end);
|
||
switch (false) {
|
||
case !!VALID_FLAGS.test(flags):
|
||
this.error(`invalid regular expression flags ${flags}`, {
|
||
offset: index,
|
||
length: flags.length
|
||
});
|
||
break;
|
||
case !(regex || tokens.length === 1):
|
||
if (body) {
|
||
body = this.formatRegex(body, {
|
||
flags,
|
||
delimiter: '/'
|
||
});
|
||
} else {
|
||
body = this.formatHeregex(tokens[0][1], {flags});
|
||
}
|
||
this.token('REGEX', `${this.makeDelimitedLiteral(body, {
|
||
delimiter: '/'
|
||
})}${flags}`, 0, end, origin);
|
||
break;
|
||
default:
|
||
this.token('REGEX_START', '(', 0, 0, origin);
|
||
this.token('IDENTIFIER', 'RegExp', 0, 0);
|
||
this.token('CALL_START', '(', 0, 0);
|
||
this.mergeInterpolationTokens(tokens, {
|
||
delimiter: '"',
|
||
double: true
|
||
}, (str) => {
|
||
return this.formatHeregex(str, {flags});
|
||
});
|
||
if (flags) {
|
||
this.token(',', ',', index - 1, 0);
|
||
this.token('STRING', '"' + flags + '"', index - 1, flags.length);
|
||
}
|
||
this.token(')', ')', end - 1, 0);
|
||
this.token('REGEX_END', ')', end - 1, 0);
|
||
}
|
||
return end;
|
||
}
|
||
|
||
// Matches newlines, indents, and outdents, and determines which is which.
|
||
// If we can detect that the current line is continued onto the next line,
|
||
// then the newline is suppressed:
|
||
|
||
// elements
|
||
// .each( ... )
|
||
// .map( ... )
|
||
|
||
// Keeps track of the level of indentation, because a single outdent token
|
||
// can close multiple indents, so we need to know how far in we happen to be.
|
||
lineToken(chunk = this.chunk) {
|
||
var diff, indent, match, minLiteralLength, newIndentLiteral, noNewlines, size;
|
||
if (!(match = MULTI_DENT.exec(chunk))) {
|
||
return 0;
|
||
}
|
||
indent = match[0];
|
||
this.seenFor = false;
|
||
if (!this.importSpecifierList) {
|
||
this.seenImport = false;
|
||
}
|
||
if (!this.exportSpecifierList) {
|
||
this.seenExport = false;
|
||
}
|
||
size = indent.length - 1 - indent.lastIndexOf('\n');
|
||
noNewlines = this.unfinished();
|
||
newIndentLiteral = size > 0 ? indent.slice(-size) : '';
|
||
if (!/^(.?)\1*$/.exec(newIndentLiteral)) {
|
||
this.error('mixed indentation', {
|
||
offset: indent.length
|
||
});
|
||
return indent.length;
|
||
}
|
||
minLiteralLength = Math.min(newIndentLiteral.length, this.indentLiteral.length);
|
||
if (newIndentLiteral.slice(0, minLiteralLength) !== this.indentLiteral.slice(0, minLiteralLength)) {
|
||
this.error('indentation mismatch', {
|
||
offset: indent.length
|
||
});
|
||
return indent.length;
|
||
}
|
||
if (size - this.indebt === this.indent) {
|
||
if (noNewlines) {
|
||
this.suppressNewlines();
|
||
} else {
|
||
this.newlineToken(0);
|
||
}
|
||
return indent.length;
|
||
}
|
||
if (size > this.indent) {
|
||
if (noNewlines) {
|
||
this.indebt = size - this.indent;
|
||
this.suppressNewlines();
|
||
return indent.length;
|
||
}
|
||
if (!this.tokens.length) {
|
||
this.baseIndent = this.indent = size;
|
||
this.indentLiteral = newIndentLiteral;
|
||
return indent.length;
|
||
}
|
||
diff = size - this.indent + this.outdebt;
|
||
this.token('INDENT', diff, indent.length - size, size);
|
||
this.indents.push(diff);
|
||
this.ends.push({
|
||
tag: 'OUTDENT'
|
||
});
|
||
this.outdebt = this.indebt = 0;
|
||
this.indent = size;
|
||
this.indentLiteral = newIndentLiteral;
|
||
} else if (size < this.baseIndent) {
|
||
this.error('missing indentation', {
|
||
offset: indent.length
|
||
});
|
||
} else {
|
||
this.indebt = 0;
|
||
this.outdentToken(this.indent - size, noNewlines, indent.length);
|
||
}
|
||
return indent.length;
|
||
}
|
||
|
||
// Record an outdent token or multiple tokens, if we happen to be moving back
|
||
// inwards past several recorded indents. Sets new @indent value.
|
||
outdentToken(moveOut, noNewlines, outdentLength) {
|
||
var decreasedIndent, dent, lastIndent, ref;
|
||
decreasedIndent = this.indent - moveOut;
|
||
while (moveOut > 0) {
|
||
lastIndent = this.indents[this.indents.length - 1];
|
||
if (!lastIndent) {
|
||
this.outdebt = moveOut = 0;
|
||
} else if (this.outdebt && moveOut <= this.outdebt) {
|
||
this.outdebt -= moveOut;
|
||
moveOut = 0;
|
||
} else {
|
||
dent = this.indents.pop() + this.outdebt;
|
||
if (outdentLength && (ref = this.chunk[outdentLength], indexOf.call(INDENTABLE_CLOSERS, ref) >= 0)) {
|
||
decreasedIndent -= dent - moveOut;
|
||
moveOut = dent;
|
||
}
|
||
this.outdebt = 0;
|
||
// pair might call outdentToken, so preserve decreasedIndent
|
||
this.pair('OUTDENT');
|
||
this.token('OUTDENT', moveOut, 0, outdentLength);
|
||
moveOut -= dent;
|
||
}
|
||
}
|
||
if (dent) {
|
||
this.outdebt -= moveOut;
|
||
}
|
||
this.suppressSemicolons();
|
||
if (!(this.tag() === 'TERMINATOR' || noNewlines)) {
|
||
this.token('TERMINATOR', '\n', outdentLength, 0);
|
||
}
|
||
this.indent = decreasedIndent;
|
||
this.indentLiteral = this.indentLiteral.slice(0, decreasedIndent);
|
||
return this;
|
||
}
|
||
|
||
// Matches and consumes non-meaningful whitespace. Tag the previous token
|
||
// as being “spaced”, because there are some cases where it makes a difference.
|
||
whitespaceToken() {
|
||
var match, nline, prev;
|
||
if (!((match = WHITESPACE.exec(this.chunk)) || (nline = this.chunk.charAt(0) === '\n'))) {
|
||
return 0;
|
||
}
|
||
prev = this.prev();
|
||
if (prev) {
|
||
prev[match ? 'spaced' : 'newLine'] = true;
|
||
}
|
||
if (match) {
|
||
return match[0].length;
|
||
} else {
|
||
return 0;
|
||
}
|
||
}
|
||
|
||
// Generate a newline token. Consecutive newlines get merged together.
|
||
newlineToken(offset) {
|
||
this.suppressSemicolons();
|
||
if (this.tag() !== 'TERMINATOR') {
|
||
this.token('TERMINATOR', '\n', offset, 0);
|
||
}
|
||
return this;
|
||
}
|
||
|
||
// Use a `\` at a line-ending to suppress the newline.
|
||
// The slash is removed here once its job is done.
|
||
suppressNewlines() {
|
||
var prev;
|
||
prev = this.prev();
|
||
if (prev[1] === '\\') {
|
||
if (prev.comments && this.tokens.length > 1) {
|
||
// `@tokens.length` should be at least 2 (some code, then `\`).
|
||
// If something puts a `\` after nothing, they deserve to lose any
|
||
// comments that trail it.
|
||
attachCommentsToNode(prev.comments, this.tokens[this.tokens.length - 2]);
|
||
}
|
||
this.tokens.pop();
|
||
}
|
||
return this;
|
||
}
|
||
|
||
// CSX is like JSX but for CoffeeScript.
|
||
csxToken() {
|
||
var afterTag, colon, csxTag, end, firstChar, id, input, match, origin, prev, prevChar, ref, token, tokens;
|
||
firstChar = this.chunk[0];
|
||
// Check the previous token to detect if attribute is spread.
|
||
prevChar = this.tokens.length > 0 ? this.tokens[this.tokens.length - 1][0] : '';
|
||
if (firstChar === '<') {
|
||
match = CSX_IDENTIFIER.exec(this.chunk.slice(1));
|
||
// Not the right hand side of an unspaced comparison (i.e. `a<b`).
|
||
if (!(match && (this.csxDepth > 0 || !(prev = this.prev()) || prev.spaced || (ref = prev[0], indexOf.call(COMPARABLE_LEFT_SIDE, ref) < 0)))) {
|
||
return 0;
|
||
}
|
||
[input, id, colon] = match;
|
||
origin = this.token('CSX_TAG', id, 1, id.length);
|
||
this.token('CALL_START', '(');
|
||
this.token('[', '[');
|
||
this.ends.push({
|
||
tag: '/>',
|
||
origin: origin,
|
||
name: id
|
||
});
|
||
this.csxDepth++;
|
||
return id.length + 1;
|
||
} else if (csxTag = this.atCSXTag()) {
|
||
if (this.chunk.slice(0, 2) === '/>') {
|
||
this.pair('/>');
|
||
this.token(']', ']', 0, 2);
|
||
this.token('CALL_END', ')', 0, 2);
|
||
this.csxDepth--;
|
||
return 2;
|
||
} else if (firstChar === '{') {
|
||
if (prevChar === ':') {
|
||
token = this.token('(', '(');
|
||
this.csxObjAttribute[this.csxDepth] = false;
|
||
} else {
|
||
token = this.token('{', '{');
|
||
this.csxObjAttribute[this.csxDepth] = true;
|
||
}
|
||
this.ends.push({
|
||
tag: '}',
|
||
origin: token
|
||
});
|
||
return 1;
|
||
} else if (firstChar === '>') {
|
||
// Ignore terminators inside a tag.
|
||
this.pair('/>'); // As if the current tag was self-closing.
|
||
origin = this.token(']', ']');
|
||
this.token(',', ',');
|
||
({
|
||
tokens,
|
||
index: end
|
||
} = this.matchWithInterpolations(INSIDE_CSX, '>', '</', CSX_INTERPOLATION));
|
||
this.mergeInterpolationTokens(tokens, {
|
||
delimiter: '"'
|
||
}, (value, i) => {
|
||
return this.formatString(value, {
|
||
delimiter: '>'
|
||
});
|
||
});
|
||
match = CSX_IDENTIFIER.exec(this.chunk.slice(end));
|
||
if (!match || match[0] !== csxTag.name) {
|
||
this.error(`expected corresponding CSX closing tag for ${csxTag.name}`, csxTag.origin[2]);
|
||
}
|
||
afterTag = end + csxTag.name.length;
|
||
if (this.chunk[afterTag] !== '>') {
|
||
this.error("missing closing > after tag name", {
|
||
offset: afterTag,
|
||
length: 1
|
||
});
|
||
}
|
||
// +1 for the closing `>`.
|
||
this.token('CALL_END', ')', end, csxTag.name.length + 1);
|
||
this.csxDepth--;
|
||
return afterTag + 1;
|
||
} else {
|
||
return 0;
|
||
}
|
||
} else if (this.atCSXTag(1)) {
|
||
if (firstChar === '}') {
|
||
this.pair(firstChar);
|
||
if (this.csxObjAttribute[this.csxDepth]) {
|
||
this.token('}', '}');
|
||
this.csxObjAttribute[this.csxDepth] = false;
|
||
} else {
|
||
this.token(')', ')');
|
||
}
|
||
this.token(',', ',');
|
||
return 1;
|
||
} else {
|
||
return 0;
|
||
}
|
||
} else {
|
||
return 0;
|
||
}
|
||
}
|
||
|
||
atCSXTag(depth = 0) {
|
||
var i, last, ref;
|
||
if (this.csxDepth === 0) {
|
||
return false;
|
||
}
|
||
i = this.ends.length - 1;
|
||
while (((ref = this.ends[i]) != null ? ref.tag : void 0) === 'OUTDENT' || depth-- > 0) { // Ignore indents.
|
||
i--;
|
||
}
|
||
last = this.ends[i];
|
||
return (last != null ? last.tag : void 0) === '/>' && last;
|
||
}
|
||
|
||
// We treat all other single characters as a token. E.g.: `( ) , . !`
|
||
// Multi-character operators are also literal tokens, so that Jison can assign
|
||
// the proper order of operations. There are some symbols that we tag specially
|
||
// here. `;` and newlines are both treated as a `TERMINATOR`, we distinguish
|
||
// parentheses that indicate a method call from regular parentheses, and so on.
|
||
literalToken() {
|
||
var match, message, origin, prev, ref, ref1, ref2, ref3, ref4, skipToken, tag, token, value;
|
||
if (match = OPERATOR.exec(this.chunk)) {
|
||
[value] = match;
|
||
if (CODE.test(value)) {
|
||
this.tagParameters();
|
||
}
|
||
} else {
|
||
value = this.chunk.charAt(0);
|
||
}
|
||
tag = value;
|
||
prev = this.prev();
|
||
if (prev && indexOf.call(['=', ...COMPOUND_ASSIGN], value) >= 0) {
|
||
skipToken = false;
|
||
if (value === '=' && ((ref = prev[1]) === '||' || ref === '&&') && !prev.spaced) {
|
||
prev[0] = 'COMPOUND_ASSIGN';
|
||
prev[1] += '=';
|
||
prev = this.tokens[this.tokens.length - 2];
|
||
skipToken = true;
|
||
}
|
||
if (prev && prev[0] !== 'PROPERTY') {
|
||
origin = (ref1 = prev.origin) != null ? ref1 : prev;
|
||
message = isUnassignable(prev[1], origin[1]);
|
||
if (message) {
|
||
this.error(message, origin[2]);
|
||
}
|
||
}
|
||
if (skipToken) {
|
||
return value.length;
|
||
}
|
||
}
|
||
if (value === '{' && this.seenImport) {
|
||
this.importSpecifierList = true;
|
||
} else if (this.importSpecifierList && value === '}') {
|
||
this.importSpecifierList = false;
|
||
} else if (value === '{' && (prev != null ? prev[0] : void 0) === 'EXPORT') {
|
||
this.exportSpecifierList = true;
|
||
} else if (this.exportSpecifierList && value === '}') {
|
||
this.exportSpecifierList = false;
|
||
}
|
||
if (value === ';') {
|
||
if (ref2 = prev != null ? prev[0] : void 0, indexOf.call(['=', ...UNFINISHED], ref2) >= 0) {
|
||
this.error('unexpected ;');
|
||
}
|
||
this.seenFor = this.seenImport = this.seenExport = false;
|
||
tag = 'TERMINATOR';
|
||
} else if (value === '*' && (prev != null ? prev[0] : void 0) === 'EXPORT') {
|
||
tag = 'EXPORT_ALL';
|
||
} else if (indexOf.call(MATH, value) >= 0) {
|
||
tag = 'MATH';
|
||
} else if (indexOf.call(COMPARE, value) >= 0) {
|
||
tag = 'COMPARE';
|
||
} else if (indexOf.call(COMPOUND_ASSIGN, value) >= 0) {
|
||
tag = 'COMPOUND_ASSIGN';
|
||
} else if (indexOf.call(UNARY, value) >= 0) {
|
||
tag = 'UNARY';
|
||
} else if (indexOf.call(UNARY_MATH, value) >= 0) {
|
||
tag = 'UNARY_MATH';
|
||
} else if (indexOf.call(SHIFT, value) >= 0) {
|
||
tag = 'SHIFT';
|
||
} else if (value === '?' && (prev != null ? prev.spaced : void 0)) {
|
||
tag = 'BIN?';
|
||
} else if (prev) {
|
||
if (value === '(' && !prev.spaced && (ref3 = prev[0], indexOf.call(CALLABLE, ref3) >= 0)) {
|
||
if (prev[0] === '?') {
|
||
prev[0] = 'FUNC_EXIST';
|
||
}
|
||
tag = 'CALL_START';
|
||
} else if (value === '[' && (((ref4 = prev[0], indexOf.call(INDEXABLE, ref4) >= 0) && !prev.spaced) || (prev[0] === '::'))) { // `.prototype` can’t be a method you can call.
|
||
tag = 'INDEX_START';
|
||
switch (prev[0]) {
|
||
case '?':
|
||
prev[0] = 'INDEX_SOAK';
|
||
}
|
||
}
|
||
}
|
||
token = this.makeToken(tag, value);
|
||
switch (value) {
|
||
case '(':
|
||
case '{':
|
||
case '[':
|
||
this.ends.push({
|
||
tag: INVERSES[value],
|
||
origin: token
|
||
});
|
||
break;
|
||
case ')':
|
||
case '}':
|
||
case ']':
|
||
this.pair(value);
|
||
}
|
||
this.tokens.push(this.makeToken(tag, value));
|
||
return value.length;
|
||
}
|
||
|
||
// Token Manipulators
|
||
// ------------------
|
||
|
||
// A source of ambiguity in our grammar used to be parameter lists in function
|
||
// definitions versus argument lists in function calls. Walk backwards, tagging
|
||
// parameters specially in order to make things easier for the parser.
|
||
tagParameters() {
|
||
var i, paramEndToken, stack, tok, tokens;
|
||
if (this.tag() !== ')') {
|
||
return this;
|
||
}
|
||
stack = [];
|
||
({tokens} = this);
|
||
i = tokens.length;
|
||
paramEndToken = tokens[--i];
|
||
paramEndToken[0] = 'PARAM_END';
|
||
while (tok = tokens[--i]) {
|
||
switch (tok[0]) {
|
||
case ')':
|
||
stack.push(tok);
|
||
break;
|
||
case '(':
|
||
case 'CALL_START':
|
||
if (stack.length) {
|
||
stack.pop();
|
||
} else if (tok[0] === '(') {
|
||
tok[0] = 'PARAM_START';
|
||
return this;
|
||
} else {
|
||
paramEndToken[0] = 'CALL_END';
|
||
return this;
|
||
}
|
||
}
|
||
}
|
||
return this;
|
||
}
|
||
|
||
// Close up all remaining open blocks at the end of the file.
|
||
closeIndentation() {
|
||
return this.outdentToken(this.indent);
|
||
}
|
||
|
||
// Match the contents of a delimited token and expand variables and expressions
|
||
// inside it using Ruby-like notation for substitution of arbitrary
|
||
// expressions.
|
||
|
||
// "Hello #{name.capitalize()}."
|
||
|
||
// If it encounters an interpolation, this method will recursively create a new
|
||
// Lexer and tokenize until the `{` of `#{` is balanced with a `}`.
|
||
|
||
// - `regex` matches the contents of a token (but not `delimiter`, and not
|
||
// `#{` if interpolations are desired).
|
||
// - `delimiter` is the delimiter of the token. Examples are `'`, `"`, `'''`,
|
||
// `"""` and `///`.
|
||
// - `closingDelimiter` is different from `delimiter` only in CSX
|
||
// - `interpolators` matches the start of an interpolation, for CSX it's both
|
||
// `{` and `<` (i.e. nested CSX tag)
|
||
|
||
// This method allows us to have strings within interpolations within strings,
|
||
// ad infinitum.
|
||
matchWithInterpolations(regex, delimiter, closingDelimiter, interpolators) {
|
||
var braceInterpolator, close, column, firstToken, index, interpolationOffset, interpolator, lastToken, line, match, nested, offsetInChunk, open, ref, rest, str, strPart, tokens;
|
||
if (closingDelimiter == null) {
|
||
closingDelimiter = delimiter;
|
||
}
|
||
if (interpolators == null) {
|
||
interpolators = /^#\{/;
|
||
}
|
||
tokens = [];
|
||
offsetInChunk = delimiter.length;
|
||
if (this.chunk.slice(0, offsetInChunk) !== delimiter) {
|
||
return null;
|
||
}
|
||
str = this.chunk.slice(offsetInChunk);
|
||
while (true) {
|
||
[strPart] = regex.exec(str);
|
||
this.validateEscapes(strPart, {
|
||
isRegex: delimiter.charAt(0) === '/',
|
||
offsetInChunk
|
||
});
|
||
// Push a fake `'NEOSTRING'` token, which will get turned into a real string later.
|
||
tokens.push(this.makeToken('NEOSTRING', strPart, offsetInChunk));
|
||
str = str.slice(strPart.length);
|
||
offsetInChunk += strPart.length;
|
||
if (!(match = interpolators.exec(str))) {
|
||
break;
|
||
}
|
||
[interpolator] = match;
|
||
// To remove the `#` in `#{`.
|
||
interpolationOffset = interpolator.length - 1;
|
||
[line, column] = this.getLineAndColumnFromChunk(offsetInChunk + interpolationOffset);
|
||
rest = str.slice(interpolationOffset);
|
||
({
|
||
tokens: nested,
|
||
index
|
||
} = new Lexer().tokenize(rest, {
|
||
line: line,
|
||
column: column,
|
||
untilBalanced: true
|
||
}));
|
||
// Account for the `#` in `#{`
|
||
index += interpolationOffset;
|
||
braceInterpolator = str[index - 1] === '}';
|
||
if (braceInterpolator) {
|
||
// Turn the leading and trailing `{` and `}` into parentheses. Unnecessary
|
||
// parentheses will be removed later.
|
||
open = nested[0], close = nested[nested.length - 1];
|
||
open[0] = open[1] = '(';
|
||
close[0] = close[1] = ')';
|
||
close.origin = ['', 'end of interpolation', close[2]];
|
||
}
|
||
if (((ref = nested[1]) != null ? ref[0] : void 0) === 'TERMINATOR') {
|
||
// Remove leading `'TERMINATOR'` (if any).
|
||
nested.splice(1, 1);
|
||
}
|
||
if (!braceInterpolator) {
|
||
// We are not using `{` and `}`, so wrap the interpolated tokens instead.
|
||
open = this.makeToken('(', '(', offsetInChunk, 0);
|
||
close = this.makeToken(')', ')', offsetInChunk + index, 0);
|
||
nested = [open, ...nested, close];
|
||
}
|
||
// Push a fake `'TOKENS'` token, which will get turned into real tokens later.
|
||
tokens.push(['TOKENS', nested]);
|
||
str = str.slice(index);
|
||
offsetInChunk += index;
|
||
}
|
||
if (str.slice(0, closingDelimiter.length) !== closingDelimiter) {
|
||
this.error(`missing ${closingDelimiter}`, {
|
||
length: delimiter.length
|
||
});
|
||
}
|
||
firstToken = tokens[0], lastToken = tokens[tokens.length - 1];
|
||
firstToken[2].first_column -= delimiter.length;
|
||
if (lastToken[1].substr(-1) === '\n') {
|
||
lastToken[2].last_line += 1;
|
||
lastToken[2].last_column = closingDelimiter.length - 1;
|
||
} else {
|
||
lastToken[2].last_column += closingDelimiter.length;
|
||
}
|
||
if (lastToken[1].length === 0) {
|
||
lastToken[2].last_column -= 1;
|
||
}
|
||
return {
|
||
tokens,
|
||
index: offsetInChunk + closingDelimiter.length
|
||
};
|
||
}
|
||
|
||
// Merge the array `tokens` of the fake token types `'TOKENS'` and `'NEOSTRING'`
|
||
// (as returned by `matchWithInterpolations`) into the token stream. The value
|
||
// of `'NEOSTRING'`s are converted using `fn` and turned into strings using
|
||
// `options` first.
|
||
mergeInterpolationTokens(tokens, options, fn) {
|
||
var converted, firstEmptyStringIndex, firstIndex, i, j, k, lastToken, len, len1, locationToken, lparen, placeholderToken, plusToken, rparen, tag, token, tokensToPush, val, value;
|
||
if (tokens.length > 1) {
|
||
lparen = this.token('STRING_START', '(', 0, 0);
|
||
}
|
||
firstIndex = this.tokens.length;
|
||
for (i = j = 0, len = tokens.length; j < len; i = ++j) {
|
||
token = tokens[i];
|
||
[tag, value] = token;
|
||
switch (tag) {
|
||
case 'TOKENS':
|
||
if (value.length === 2) {
|
||
if (!(value[0].comments || value[1].comments)) {
|
||
// Optimize out empty interpolations (an empty pair of parentheses).
|
||
continue;
|
||
}
|
||
// There are comments (and nothing else) in this interpolation.
|
||
if (this.csxDepth === 0) {
|
||
// This is an interpolated string, not a CSX tag; and for whatever
|
||
// reason `` `a${/*test*/}b` `` is invalid JS. So compile to
|
||
// `` `a${/*test*/''}b` `` instead.
|
||
placeholderToken = this.makeToken('STRING', "''");
|
||
} else {
|
||
placeholderToken = this.makeToken('JS', '');
|
||
}
|
||
// Use the same location data as the first parenthesis.
|
||
placeholderToken[2] = value[0][2];
|
||
for (k = 0, len1 = value.length; k < len1; k++) {
|
||
val = value[k];
|
||
if (!val.comments) {
|
||
continue;
|
||
}
|
||
if (placeholderToken.comments == null) {
|
||
placeholderToken.comments = [];
|
||
}
|
||
placeholderToken.comments.push(...val.comments);
|
||
}
|
||
value.splice(1, 0, placeholderToken);
|
||
}
|
||
// Push all the tokens in the fake `'TOKENS'` token. These already have
|
||
// sane location data.
|
||
locationToken = value[0];
|
||
tokensToPush = value;
|
||
break;
|
||
case 'NEOSTRING':
|
||
// Convert `'NEOSTRING'` into `'STRING'`.
|
||
converted = fn.call(this, token[1], i);
|
||
// Optimize out empty strings. We ensure that the tokens stream always
|
||
// starts with a string token, though, to make sure that the result
|
||
// really is a string.
|
||
if (converted.length === 0) {
|
||
if (i === 0) {
|
||
firstEmptyStringIndex = this.tokens.length;
|
||
} else {
|
||
continue;
|
||
}
|
||
}
|
||
// However, there is one case where we can optimize away a starting
|
||
// empty string.
|
||
if (i === 2 && (firstEmptyStringIndex != null)) {
|
||
this.tokens.splice(firstEmptyStringIndex, 2); // Remove empty string and the plus.
|
||
}
|
||
token[0] = 'STRING';
|
||
token[1] = this.makeDelimitedLiteral(converted, options);
|
||
locationToken = token;
|
||
tokensToPush = [token];
|
||
}
|
||
if (this.tokens.length > firstIndex) {
|
||
// Create a 0-length "+" token.
|
||
plusToken = this.token('+', '+');
|
||
plusToken[2] = {
|
||
first_line: locationToken[2].first_line,
|
||
first_column: locationToken[2].first_column,
|
||
last_line: locationToken[2].first_line,
|
||
last_column: locationToken[2].first_column
|
||
};
|
||
}
|
||
this.tokens.push(...tokensToPush);
|
||
}
|
||
if (lparen) {
|
||
lastToken = tokens[tokens.length - 1];
|
||
lparen.origin = [
|
||
'STRING',
|
||
null,
|
||
{
|
||
first_line: lparen[2].first_line,
|
||
first_column: lparen[2].first_column,
|
||
last_line: lastToken[2].last_line,
|
||
last_column: lastToken[2].last_column
|
||
}
|
||
];
|
||
lparen[2] = lparen.origin[2];
|
||
rparen = this.token('STRING_END', ')');
|
||
return rparen[2] = {
|
||
first_line: lastToken[2].last_line,
|
||
first_column: lastToken[2].last_column,
|
||
last_line: lastToken[2].last_line,
|
||
last_column: lastToken[2].last_column
|
||
};
|
||
}
|
||
}
|
||
|
||
// Pairs up a closing token, ensuring that all listed pairs of tokens are
|
||
// correctly balanced throughout the course of the token stream.
|
||
pair(tag) {
|
||
var lastIndent, prev, ref, ref1, wanted;
|
||
ref = this.ends, prev = ref[ref.length - 1];
|
||
if (tag !== (wanted = prev != null ? prev.tag : void 0)) {
|
||
if ('OUTDENT' !== wanted) {
|
||
this.error(`unmatched ${tag}`);
|
||
}
|
||
// Auto-close `INDENT` to support syntax like this:
|
||
|
||
// el.click((event) ->
|
||
// el.hide())
|
||
|
||
ref1 = this.indents, lastIndent = ref1[ref1.length - 1];
|
||
this.outdentToken(lastIndent, true);
|
||
return this.pair(tag);
|
||
}
|
||
return this.ends.pop();
|
||
}
|
||
|
||
// Helpers
|
||
// -------
|
||
|
||
// Returns the line and column number from an offset into the current chunk.
|
||
|
||
// `offset` is a number of characters into `@chunk`.
|
||
getLineAndColumnFromChunk(offset) {
|
||
var column, lastLine, lineCount, ref, string;
|
||
if (offset === 0) {
|
||
return [this.chunkLine, this.chunkColumn];
|
||
}
|
||
if (offset >= this.chunk.length) {
|
||
string = this.chunk;
|
||
} else {
|
||
string = this.chunk.slice(0, +(offset - 1) + 1 || 9e9);
|
||
}
|
||
lineCount = count(string, '\n');
|
||
column = this.chunkColumn;
|
||
if (lineCount > 0) {
|
||
ref = string.split('\n'), lastLine = ref[ref.length - 1];
|
||
column = lastLine.length;
|
||
} else {
|
||
column += string.length;
|
||
}
|
||
return [this.chunkLine + lineCount, column];
|
||
}
|
||
|
||
// Same as `token`, except this just returns the token without adding it
|
||
// to the results.
|
||
makeToken(tag, value, offsetInChunk = 0, length = value.length) {
|
||
var lastCharacter, locationData, token;
|
||
locationData = {};
|
||
[locationData.first_line, locationData.first_column] = this.getLineAndColumnFromChunk(offsetInChunk);
|
||
// Use length - 1 for the final offset - we're supplying the last_line and the last_column,
|
||
// so if last_column == first_column, then we're looking at a character of length 1.
|
||
lastCharacter = length > 0 ? length - 1 : 0;
|
||
[locationData.last_line, locationData.last_column] = this.getLineAndColumnFromChunk(offsetInChunk + lastCharacter);
|
||
token = [tag, value, locationData];
|
||
return token;
|
||
}
|
||
|
||
// Add a token to the results.
|
||
// `offset` is the offset into the current `@chunk` where the token starts.
|
||
// `length` is the length of the token in the `@chunk`, after the offset. If
|
||
// not specified, the length of `value` will be used.
|
||
|
||
// Returns the new token.
|
||
token(tag, value, offsetInChunk, length, origin) {
|
||
var token;
|
||
token = this.makeToken(tag, value, offsetInChunk, length);
|
||
if (origin) {
|
||
token.origin = origin;
|
||
}
|
||
this.tokens.push(token);
|
||
return token;
|
||
}
|
||
|
||
// Peek at the last tag in the token stream.
|
||
tag() {
|
||
var ref, token;
|
||
ref = this.tokens, token = ref[ref.length - 1];
|
||
return token != null ? token[0] : void 0;
|
||
}
|
||
|
||
// Peek at the last value in the token stream.
|
||
value(useOrigin = false) {
|
||
var ref, ref1, token;
|
||
ref = this.tokens, token = ref[ref.length - 1];
|
||
if (useOrigin && ((token != null ? token.origin : void 0) != null)) {
|
||
return (ref1 = token.origin) != null ? ref1[1] : void 0;
|
||
} else {
|
||
return token != null ? token[1] : void 0;
|
||
}
|
||
}
|
||
|
||
// Get the previous token in the token stream.
|
||
prev() {
|
||
return this.tokens[this.tokens.length - 1];
|
||
}
|
||
|
||
// Are we in the midst of an unfinished expression?
|
||
unfinished() {
|
||
var ref;
|
||
return LINE_CONTINUER.test(this.chunk) || (ref = this.tag(), indexOf.call(UNFINISHED, ref) >= 0);
|
||
}
|
||
|
||
formatString(str, options) {
|
||
return this.replaceUnicodeCodePointEscapes(str.replace(STRING_OMIT, '$1'), options);
|
||
}
|
||
|
||
formatHeregex(str, options) {
|
||
return this.formatRegex(str.replace(HEREGEX_OMIT, '$1$2'), merge(options, {
|
||
delimiter: '///'
|
||
}));
|
||
}
|
||
|
||
formatRegex(str, options) {
|
||
return this.replaceUnicodeCodePointEscapes(str, options);
|
||
}
|
||
|
||
unicodeCodePointToUnicodeEscapes(codePoint) {
|
||
var high, low, toUnicodeEscape;
|
||
toUnicodeEscape = function(val) {
|
||
var str;
|
||
str = val.toString(16);
|
||
return `\\u${repeat('0', 4 - str.length)}${str}`;
|
||
};
|
||
if (codePoint < 0x10000) {
|
||
return toUnicodeEscape(codePoint);
|
||
}
|
||
// surrogate pair
|
||
high = Math.floor((codePoint - 0x10000) / 0x400) + 0xD800;
|
||
low = (codePoint - 0x10000) % 0x400 + 0xDC00;
|
||
return `${toUnicodeEscape(high)}${toUnicodeEscape(low)}`;
|
||
}
|
||
|
||
// Replace `\u{...}` with `\uxxxx[\uxxxx]` in regexes without `u` flag
|
||
replaceUnicodeCodePointEscapes(str, options) {
|
||
var shouldReplace;
|
||
shouldReplace = (options.flags != null) && indexOf.call(options.flags, 'u') < 0;
|
||
return str.replace(UNICODE_CODE_POINT_ESCAPE, (match, escapedBackslash, codePointHex, offset) => {
|
||
var codePointDecimal;
|
||
if (escapedBackslash) {
|
||
return escapedBackslash;
|
||
}
|
||
codePointDecimal = parseInt(codePointHex, 16);
|
||
if (codePointDecimal > 0x10ffff) {
|
||
this.error("unicode code point escapes greater than \\u{10ffff} are not allowed", {
|
||
offset: offset + options.delimiter.length,
|
||
length: codePointHex.length + 4
|
||
});
|
||
}
|
||
if (!shouldReplace) {
|
||
return match;
|
||
}
|
||
return this.unicodeCodePointToUnicodeEscapes(codePointDecimal);
|
||
});
|
||
}
|
||
|
||
// Validates escapes in strings and regexes.
|
||
validateEscapes(str, options = {}) {
|
||
var before, hex, invalidEscape, invalidEscapeRegex, match, message, octal, ref, unicode, unicodeCodePoint;
|
||
invalidEscapeRegex = options.isRegex ? REGEX_INVALID_ESCAPE : STRING_INVALID_ESCAPE;
|
||
match = invalidEscapeRegex.exec(str);
|
||
if (!match) {
|
||
return;
|
||
}
|
||
match[0], before = match[1], octal = match[2], hex = match[3], unicodeCodePoint = match[4], unicode = match[5];
|
||
message = octal ? "octal escape sequences are not allowed" : "invalid escape sequence";
|
||
invalidEscape = `\\${octal || hex || unicodeCodePoint || unicode}`;
|
||
return this.error(`${message} ${invalidEscape}`, {
|
||
offset: ((ref = options.offsetInChunk) != null ? ref : 0) + match.index + before.length,
|
||
length: invalidEscape.length
|
||
});
|
||
}
|
||
|
||
// Constructs a string or regex by escaping certain characters.
|
||
makeDelimitedLiteral(body, options = {}) {
|
||
var regex;
|
||
if (body === '' && options.delimiter === '/') {
|
||
body = '(?:)';
|
||
}
|
||
regex = RegExp(`(\\\\\\\\)|(\\\\0(?=[1-7]))|\\\\?(${options.delimiter // Escaped backslash.
|
||
// Null character mistaken as octal escape.
|
||
// (Possibly escaped) delimiter.
|
||
// (Possibly escaped) newlines.
|
||
// Other escapes.
|
||
})|\\\\?(?:(\\n)|(\\r)|(\\u2028)|(\\u2029))|(\\\\.)`, "g");
|
||
body = body.replace(regex, function(match, backslash, nul, delimiter, lf, cr, ls, ps, other) {
|
||
switch (false) {
|
||
// Ignore escaped backslashes.
|
||
case !backslash:
|
||
if (options.double) {
|
||
return backslash + backslash;
|
||
} else {
|
||
return backslash;
|
||
}
|
||
case !nul:
|
||
return '\\x00';
|
||
case !delimiter:
|
||
return `\\${delimiter}`;
|
||
case !lf:
|
||
return '\\n';
|
||
case !cr:
|
||
return '\\r';
|
||
case !ls:
|
||
return '\\u2028';
|
||
case !ps:
|
||
return '\\u2029';
|
||
case !other:
|
||
if (options.double) {
|
||
return `\\${other}`;
|
||
} else {
|
||
return other;
|
||
}
|
||
}
|
||
});
|
||
return `${options.delimiter}${body}${options.delimiter}`;
|
||
}
|
||
|
||
suppressSemicolons() {
|
||
var ref, ref1, results;
|
||
results = [];
|
||
while (this.value() === ';') {
|
||
this.tokens.pop();
|
||
if (ref = (ref1 = this.prev()) != null ? ref1[0] : void 0, indexOf.call(['=', ...UNFINISHED], ref) >= 0) {
|
||
results.push(this.error('unexpected ;'));
|
||
} else {
|
||
results.push(void 0);
|
||
}
|
||
}
|
||
return results;
|
||
}
|
||
|
||
// Throws an error at either a given offset from the current chunk or at the
|
||
// location of a token (`token[2]`).
|
||
error(message, options = {}) {
|
||
var first_column, first_line, location, ref, ref1;
|
||
location = 'first_line' in options ? options : ([first_line, first_column] = this.getLineAndColumnFromChunk((ref = options.offset) != null ? ref : 0), {
|
||
first_line,
|
||
first_column,
|
||
last_column: first_column + ((ref1 = options.length) != null ? ref1 : 1) - 1
|
||
});
|
||
return throwSyntaxError(message, location);
|
||
}
|
||
|
||
};
|
||
|
||
// Helper functions
|
||
// ----------------
|
||
isUnassignable = function(name, displayName = name) {
|
||
switch (false) {
|
||
case indexOf.call([...JS_KEYWORDS, ...COFFEE_KEYWORDS], name) < 0:
|
||
return `keyword '${displayName}' can't be assigned`;
|
||
case indexOf.call(STRICT_PROSCRIBED, name) < 0:
|
||
return `'${displayName}' can't be assigned`;
|
||
case indexOf.call(RESERVED, name) < 0:
|
||
return `reserved word '${displayName}' can't be assigned`;
|
||
default:
|
||
return false;
|
||
}
|
||
};
|
||
|
||
exports.isUnassignable = isUnassignable;
|
||
|
||
// `from` isn’t a CoffeeScript keyword, but it behaves like one in `import` and
|
||
// `export` statements (handled above) and in the declaration line of a `for`
|
||
// loop. Try to detect when `from` is a variable identifier and when it is this
|
||
// “sometimes” keyword.
|
||
isForFrom = function(prev) {
|
||
var ref;
|
||
if (prev[0] === 'IDENTIFIER') {
|
||
// `for i from from`, `for from from iterable`
|
||
if (prev[1] === 'from') {
|
||
prev[1][0] = 'IDENTIFIER';
|
||
true;
|
||
}
|
||
// `for i from iterable`
|
||
return true;
|
||
// `for from…`
|
||
} else if (prev[0] === 'FOR') {
|
||
return false;
|
||
// `for {from}…`, `for [from]…`, `for {a, from}…`, `for {a: from}…`
|
||
} else if ((ref = prev[1]) === '{' || ref === '[' || ref === ',' || ref === ':') {
|
||
return false;
|
||
} else {
|
||
return true;
|
||
}
|
||
};
|
||
|
||
// Constants
|
||
// ---------
|
||
|
||
// Keywords that CoffeeScript shares in common with JavaScript.
|
||
JS_KEYWORDS = ['true', 'false', 'null', 'this', 'new', 'delete', 'typeof', 'in', 'instanceof', 'return', 'throw', 'break', 'continue', 'debugger', 'yield', 'await', 'if', 'else', 'switch', 'for', 'while', 'do', 'try', 'catch', 'finally', 'class', 'extends', 'super', 'import', 'export', 'default'];
|
||
|
||
// CoffeeScript-only keywords.
|
||
COFFEE_KEYWORDS = ['undefined', 'Infinity', 'NaN', 'then', 'unless', 'until', 'loop', 'of', 'by', 'when'];
|
||
|
||
COFFEE_ALIAS_MAP = {
|
||
and: '&&',
|
||
or: '||',
|
||
is: '==',
|
||
isnt: '!=',
|
||
not: '!',
|
||
yes: 'true',
|
||
no: 'false',
|
||
on: 'true',
|
||
off: 'false'
|
||
};
|
||
|
||
COFFEE_ALIASES = (function() {
|
||
var results;
|
||
results = [];
|
||
for (key in COFFEE_ALIAS_MAP) {
|
||
results.push(key);
|
||
}
|
||
return results;
|
||
})();
|
||
|
||
COFFEE_KEYWORDS = COFFEE_KEYWORDS.concat(COFFEE_ALIASES);
|
||
|
||
// The list of keywords that are reserved by JavaScript, but not used, or are
|
||
// used by CoffeeScript internally. We throw an error when these are encountered,
|
||
// to avoid having a JavaScript error at runtime.
|
||
RESERVED = ['case', 'function', 'var', 'void', 'with', 'const', 'let', 'enum', 'native', 'implements', 'interface', 'package', 'private', 'protected', 'public', 'static'];
|
||
|
||
STRICT_PROSCRIBED = ['arguments', 'eval'];
|
||
|
||
// The superset of both JavaScript keywords and reserved words, none of which may
|
||
// be used as identifiers or properties.
|
||
exports.JS_FORBIDDEN = JS_KEYWORDS.concat(RESERVED).concat(STRICT_PROSCRIBED);
|
||
|
||
// The character code of the nasty Microsoft madness otherwise known as the BOM.
|
||
BOM = 65279;
|
||
|
||
// Token matching regexes.
|
||
IDENTIFIER = /^(?!\d)((?:(?!\s)[$\w\x7f-\uffff])+)([^\n\S]*:(?!:))?/; // Is this a property name?
|
||
|
||
CSX_IDENTIFIER = /^(?![\d<])((?:(?!\s)[\.\-$\w\x7f-\uffff])+)/; // Must not start with `<`.
|
||
// Like `IDENTIFIER`, but includes `-`s and `.`s.
|
||
|
||
CSX_ATTRIBUTE = /^(?!\d)((?:(?!\s)[\-$\w\x7f-\uffff])+)([^\S]*=(?!=))?/; // Like `IDENTIFIER`, but includes `-`s.
|
||
// Is this an attribute with a value?
|
||
|
||
NUMBER = /^0b[01]+|^0o[0-7]+|^0x[\da-f]+|^\d*\.?\d+(?:e[+-]?\d+)?/i; // binary
|
||
// octal
|
||
// hex
|
||
// decimal
|
||
|
||
OPERATOR = /^(?:[-=]>|[-+*\/%<>&|^!?=]=|>>>=?|([-+:])\1|([&|<>*\/%])\2=?|\?(\.|::)|\.{2,3})/; // function
|
||
// compound assign / compare
|
||
// zero-fill right shift
|
||
// doubles
|
||
// logic / shift / power / floor division / modulo
|
||
// soak access
|
||
// range or splat
|
||
|
||
WHITESPACE = /^[^\n\S]+/;
|
||
|
||
COMMENT = /^\s*###([^#][\s\S]*?)(?:###[^\n\S]*|###$)|^(?:\s*#(?!##[^#]).*)+/;
|
||
|
||
CODE = /^[-=]>/;
|
||
|
||
MULTI_DENT = /^(?:\n[^\n\S]*)+/;
|
||
|
||
JSTOKEN = /^`(?!``)((?:[^`\\]|\\[\s\S])*)`/;
|
||
|
||
HERE_JSTOKEN = /^```((?:[^`\\]|\\[\s\S]|`(?!``))*)```/;
|
||
|
||
// String-matching-regexes.
|
||
STRING_START = /^(?:'''|"""|'|")/;
|
||
|
||
STRING_SINGLE = /^(?:[^\\']|\\[\s\S])*/;
|
||
|
||
STRING_DOUBLE = /^(?:[^\\"#]|\\[\s\S]|\#(?!\{))*/;
|
||
|
||
HEREDOC_SINGLE = /^(?:[^\\']|\\[\s\S]|'(?!''))*/;
|
||
|
||
HEREDOC_DOUBLE = /^(?:[^\\"#]|\\[\s\S]|"(?!"")|\#(?!\{))*/;
|
||
|
||
INSIDE_CSX = /^(?:[^\{<])*/; // Start of CoffeeScript interpolation. // Similar to `HEREDOC_DOUBLE` but there is no escaping.
|
||
// Maybe CSX tag (`<` not allowed even if bare).
|
||
|
||
CSX_INTERPOLATION = /^(?:\{|<(?!\/))/; // CoffeeScript interpolation.
|
||
// CSX opening tag.
|
||
|
||
STRING_OMIT = /((?:\\\\)+)|\\[^\S\n]*\n\s*/g; // Consume (and preserve) an even number of backslashes.
|
||
// Remove escaped newlines.
|
||
|
||
SIMPLE_STRING_OMIT = /\s*\n\s*/g;
|
||
|
||
HEREDOC_INDENT = /\n+([^\n\S]*)(?=\S)/g;
|
||
|
||
// Regex-matching-regexes.
|
||
REGEX = /^\/(?!\/)((?:[^[\/\n\\]|\\[^\n]|\[(?:\\[^\n]|[^\]\n\\])*\])*)(\/)?/; // Every other thing.
|
||
// Anything but newlines escaped.
|
||
// Character class.
|
||
|
||
REGEX_FLAGS = /^\w*/;
|
||
|
||
VALID_FLAGS = /^(?!.*(.).*\1)[imguy]*$/;
|
||
|
||
HEREGEX = /^(?:[^\\\/#]|\\[\s\S]|\/(?!\/\/)|\#(?!\{))*/;
|
||
|
||
HEREGEX_OMIT = /((?:\\\\)+)|\\(\s)|\s+(?:#.*)?/g; // Consume (and preserve) an even number of backslashes.
|
||
// Preserve escaped whitespace.
|
||
// Remove whitespace and comments.
|
||
|
||
REGEX_ILLEGAL = /^(\/|\/{3}\s*)(\*)/;
|
||
|
||
POSSIBLY_DIVISION = /^\/=?\s/;
|
||
|
||
// Other regexes.
|
||
HERECOMMENT_ILLEGAL = /\*\//;
|
||
|
||
LINE_CONTINUER = /^\s*(?:,|\??\.(?![.\d])|::)/;
|
||
|
||
STRING_INVALID_ESCAPE = /((?:^|[^\\])(?:\\\\)*)\\(?:(0[0-7]|[1-7])|(x(?![\da-fA-F]{2}).{0,2})|(u\{(?![\da-fA-F]{1,}\})[^}]*\}?)|(u(?!\{|[\da-fA-F]{4}).{0,4}))/; // Make sure the escape isn’t escaped.
|
||
// octal escape
|
||
// hex escape
|
||
// unicode code point escape
|
||
// unicode escape
|
||
|
||
REGEX_INVALID_ESCAPE = /((?:^|[^\\])(?:\\\\)*)\\(?:(0[0-7])|(x(?![\da-fA-F]{2}).{0,2})|(u\{(?![\da-fA-F]{1,}\})[^}]*\}?)|(u(?!\{|[\da-fA-F]{4}).{0,4}))/; // Make sure the escape isn’t escaped.
|
||
// octal escape
|
||
// hex escape
|
||
// unicode code point escape
|
||
// unicode escape
|
||
|
||
UNICODE_CODE_POINT_ESCAPE = /(\\\\)|\\u\{([\da-fA-F]+)\}/g; // Make sure the escape isn’t escaped.
|
||
|
||
LEADING_BLANK_LINE = /^[^\n\S]*\n/;
|
||
|
||
TRAILING_BLANK_LINE = /\n[^\n\S]*$/;
|
||
|
||
TRAILING_SPACES = /\s+$/;
|
||
|
||
// Compound assignment tokens.
|
||
COMPOUND_ASSIGN = ['-=', '+=', '/=', '*=', '%=', '||=', '&&=', '?=', '<<=', '>>=', '>>>=', '&=', '^=', '|=', '**=', '//=', '%%='];
|
||
|
||
// Unary tokens.
|
||
UNARY = ['NEW', 'TYPEOF', 'DELETE', 'DO'];
|
||
|
||
UNARY_MATH = ['!', '~'];
|
||
|
||
// Bit-shifting tokens.
|
||
SHIFT = ['<<', '>>', '>>>'];
|
||
|
||
// Comparison tokens.
|
||
COMPARE = ['==', '!=', '<', '>', '<=', '>='];
|
||
|
||
// Mathematical tokens.
|
||
MATH = ['*', '/', '%', '//', '%%'];
|
||
|
||
// Relational tokens that are negatable with `not` prefix.
|
||
RELATION = ['IN', 'OF', 'INSTANCEOF'];
|
||
|
||
// Boolean tokens.
|
||
BOOL = ['TRUE', 'FALSE'];
|
||
|
||
// Tokens which could legitimately be invoked or indexed. An opening
|
||
// parentheses or bracket following these tokens will be recorded as the start
|
||
// of a function invocation or indexing operation.
|
||
CALLABLE = ['IDENTIFIER', 'PROPERTY', ')', ']', '?', '@', 'THIS', 'SUPER'];
|
||
|
||
INDEXABLE = CALLABLE.concat(['NUMBER', 'INFINITY', 'NAN', 'STRING', 'STRING_END', 'REGEX', 'REGEX_END', 'BOOL', 'NULL', 'UNDEFINED', '}', '::']);
|
||
|
||
// Tokens which can be the left-hand side of a less-than comparison, i.e. `a<b`.
|
||
COMPARABLE_LEFT_SIDE = ['IDENTIFIER', ')', ']', 'NUMBER'];
|
||
|
||
// Tokens which a regular expression will never immediately follow (except spaced
|
||
// CALLABLEs in some cases), but which a division operator can.
|
||
|
||
// See: http://www-archive.mozilla.org/js/language/js20-2002-04/rationale/syntax.html#regular-expressions
|
||
NOT_REGEX = INDEXABLE.concat(['++', '--']);
|
||
|
||
// Tokens that, when immediately preceding a `WHEN`, indicate that the `WHEN`
|
||
// occurs at the start of a line. We disambiguate these from trailing whens to
|
||
// avoid an ambiguity in the grammar.
|
||
LINE_BREAK = ['INDENT', 'OUTDENT', 'TERMINATOR'];
|
||
|
||
// Additional indent in front of these is ignored.
|
||
INDENTABLE_CLOSERS = [')', '}', ']'];
|
||
|
||
// Tokens that, when appearing at the end of a line, suppress a following TERMINATOR/INDENT token
|
||
UNFINISHED = ['\\', '.', '?.', '?::', 'UNARY', 'MATH', 'UNARY_MATH', '+', '-', '**', 'SHIFT', 'RELATION', 'COMPARE', '&', '^', '|', '&&', '||', 'BIN?', 'EXTENDS'];
|
||
|
||
}).call(this);
|