Refactor interpolation (and string and regex) handling in lexer
- Fix #3394: Unclosed single-quoted strings (both regular ones and heredocs) used to pass through the lexer, causing a parsing error later, while double-quoted strings caused an error already in the lexing phase. Now both single and double-quoted unclosed strings error out in the lexer (which is the more logical option) with consistent error messages. This also fixes the last comment by @satyr in #3301. - Similar to the above, unclosed heregexes also used to pass through the lexer and not error until in the parsing phase, which resulted in confusing error messages. This has been fixed, too. - Fix #3348, by adding passing tests. - Fix #3529: If a string starts with an interpolation, an empty string is no longer emitted before the interpolation (unless it is needed to coerce the interpolation into a string). - Block comments cannot contain `*/`. Now the error message also shows exactly where the offending `*/`. This improvement might seem unrelated, but I had to touch that code anyway to refactor string and regex related code, and the change was very trivial. Moreover, it's consistent with the next two points. - Regexes cannot start with `*`. Now the error message also shows exactly where the offending `*` is. (It might actually not be exatly at the start in heregexes.) It is a very minor improvement, but it was trivial to add. - Octal escapes in strings are forbidden in CoffeeScript (just like in JavaScript strict mode). However, this used to be the case only for regular strings. Now they are also forbidden in heredocs. Moreover, the errors now point at the offending octal escape. - Invalid regex flags are no longer allowed. This includes repeated modifiers and unknown ones. Moreover, invalid modifiers do not stop a heregex from being matched, which results in better error messages. - Fix #3621: `///a#{1}///` compiles to `RegExp("a" + 1)`. So does `RegExp("a#{1}")`. Still, those two code snippets used to generate different tokens, which is a bit weird, but more importantly causes problems for coffeelint (see clutchski/coffeelint#340). This required lots of tests in test/location.coffee to be updated. Note that some updates to those tests are unrelated to this point; some have been updated to be more consistent (I discovered this because the refactored code happened to be seemingly more correct). - Regular regex literals used to erraneously allow newlines to be escaped, causing invalid JavaScript output. This has been fixed. - Heregexes may now be completely empty (`//////`), instead of erroring out with a confusing message. - Fix #2388: Heredocs and heregexes used to be lexed simply, which meant that you couldn't nest a heredoc within a heredoc (double-quoted, that is) or a heregex inside a heregex. - Fix #2321: If you used division inside interpolation and then a slash later in the string containing that interpolation, the division slash and the latter slash was erraneously matched as a regex. This has been fixed. - Indentation inside interpolations in heredocs no longer affect how much indentation is removed from each line of the heredoc (which is more intuitive). - Whitespace is now correctly trimmed from the start and end of strings in a few edge cases. - Last but not least, the lexing of interpolated strings now seems to be more efficient. For a regular double-quoted string, we used to use a custom function to find the end of it (taking interpolations and interpolations within interpolations etc. into account). Then we used to re-find the interpolations and recursively lex their contents. In effect, the same string was processed twice, or even more in the case of deeper nesting of interpolations. Now the same string is processed just once. - Code duplication between regular strings, heredocs, regular regexes and heregexes has been reduced. - The above two points should result in more easily read code, too.
This commit is contained in:
parent
8e4fb1b937
commit
0dcff507fb
|
@ -41,7 +41,7 @@
|
|||
options.sourceMap = true;
|
||||
options.inline = true;
|
||||
_ref = CoffeeScript.compile(code, options), js = _ref.js, v3SourceMap = _ref.v3SourceMap;
|
||||
return "" + js + "\n//# sourceMappingURL=data:application/json;base64," + (btoa(unescape(encodeURIComponent(v3SourceMap)))) + "\n//# sourceURL=coffeescript";
|
||||
return js + "\n//# sourceMappingURL=data:application/json;base64," + (btoa(unescape(encodeURIComponent(v3SourceMap)))) + "\n//# sourceURL=coffeescript";
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@
|
|||
var cakefilePath, desc, name, relative, spaces, task;
|
||||
relative = path.relative || path.resolve;
|
||||
cakefilePath = path.join(relative(__originalDirname, process.cwd()), 'Cakefile');
|
||||
console.log("" + cakefilePath + " defines the following tasks:\n");
|
||||
console.log(cakefilePath + " defines the following tasks:\n");
|
||||
for (name in tasks) {
|
||||
task = tasks[name];
|
||||
spaces = 20 - name.length;
|
||||
|
|
|
@ -263,7 +263,7 @@
|
|||
if (frame.isEval()) {
|
||||
fileName = frame.getScriptNameOrSourceURL();
|
||||
if (!fileName) {
|
||||
fileLocation = "" + (frame.getEvalOrigin()) + ", ";
|
||||
fileLocation = (frame.getEvalOrigin()) + ", ";
|
||||
}
|
||||
} else {
|
||||
fileName = frame.getFileName();
|
||||
|
@ -272,7 +272,7 @@
|
|||
line = frame.getLineNumber();
|
||||
column = frame.getColumnNumber();
|
||||
source = getSourceMapping(fileName, line, column);
|
||||
fileLocation = source ? "" + fileName + ":" + source[0] + ":" + source[1] : "" + fileName + ":" + line + ":" + column;
|
||||
fileLocation = source ? fileName + ":" + source[0] + ":" + source[1] : fileName + ":" + line + ":" + column;
|
||||
}
|
||||
functionName = frame.getFunctionName();
|
||||
isConstructor = frame.isConstructor();
|
||||
|
@ -283,19 +283,19 @@
|
|||
if (functionName) {
|
||||
tp = as = '';
|
||||
if (typeName && functionName.indexOf(typeName)) {
|
||||
tp = "" + typeName + ".";
|
||||
tp = typeName + ".";
|
||||
}
|
||||
if (methodName && functionName.indexOf("." + methodName) !== functionName.length - methodName.length - 1) {
|
||||
as = " [as " + methodName + "]";
|
||||
}
|
||||
return "" + tp + functionName + as + " (" + fileLocation + ")";
|
||||
} else {
|
||||
return "" + typeName + "." + (methodName || '<anonymous>') + " (" + fileLocation + ")";
|
||||
return typeName + "." + (methodName || '<anonymous>') + " (" + fileLocation + ")";
|
||||
}
|
||||
} else if (isConstructor) {
|
||||
return "new " + (functionName || '<anonymous>') + " (" + fileLocation + ")";
|
||||
} else if (functionName) {
|
||||
return "" + functionName + " (" + fileLocation + ")";
|
||||
return functionName + " (" + fileLocation + ")";
|
||||
} else {
|
||||
return fileLocation;
|
||||
}
|
||||
|
@ -341,7 +341,7 @@
|
|||
}
|
||||
return _results;
|
||||
})();
|
||||
return "" + (err.toString()) + "\n" + (frames.join('\n')) + "\n";
|
||||
return (err.toString()) + "\n" + (frames.join('\n')) + "\n";
|
||||
};
|
||||
|
||||
}).call(this);
|
||||
|
|
|
@ -453,7 +453,7 @@
|
|||
js = ' ';
|
||||
}
|
||||
if (generatedSourceMap) {
|
||||
js = "" + js + "\n//# sourceMappingURL=" + (helpers.baseFileName(sourceMapPath, false, useWinPathSep)) + "\n";
|
||||
js = js + "\n//# sourceMappingURL=" + (helpers.baseFileName(sourceMapPath, false, useWinPathSep)) + "\n";
|
||||
}
|
||||
fs.writeFile(jsPath, js, function(err) {
|
||||
if (err) {
|
||||
|
@ -487,7 +487,7 @@
|
|||
};
|
||||
|
||||
timeLog = function(message) {
|
||||
return console.log("" + ((new Date).toLocaleTimeString()) + " - " + message);
|
||||
return console.log(((new Date).toLocaleTimeString()) + " - " + message);
|
||||
};
|
||||
|
||||
printTokens = function(tokens) {
|
||||
|
|
|
@ -150,7 +150,7 @@
|
|||
locationData = obj;
|
||||
}
|
||||
if (locationData) {
|
||||
return ("" + (locationData.first_line + 1) + ":" + (locationData.first_column + 1) + "-") + ("" + (locationData.last_line + 1) + ":" + (locationData.last_column + 1));
|
||||
return ((locationData.first_line + 1) + ":" + (locationData.first_column + 1) + "-") + ((locationData.last_line + 1) + ":" + (locationData.last_column + 1));
|
||||
} else {
|
||||
return "No location data";
|
||||
}
|
||||
|
@ -231,7 +231,7 @@
|
|||
codeLine = codeLine.slice(0, start) + colorize(codeLine.slice(start, end)) + codeLine.slice(end);
|
||||
marker = colorize(marker);
|
||||
}
|
||||
return "" + filename + ":" + (first_line + 1) + ":" + (first_column + 1) + ": error: " + this.message + "\n" + codeLine + "\n" + marker;
|
||||
return filename + ":" + (first_line + 1) + ":" + (first_column + 1) + ": error: " + this.message + "\n" + codeLine + "\n" + marker;
|
||||
};
|
||||
|
||||
exports.nameWhitespaceCharacter = function(string) {
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// Generated by CoffeeScript 1.8.0
|
||||
(function() {
|
||||
var BOM, BOOL, CALLABLE, CODE, COFFEE_ALIASES, COFFEE_ALIAS_MAP, COFFEE_KEYWORDS, COMMENT, COMPARE, COMPOUND_ASSIGN, HEREDOC, HEREDOC_ILLEGAL, HEREDOC_INDENT, HEREGEX, HEREGEX_OMIT, IDENTIFIER, INDENTABLE_CLOSERS, INDEXABLE, INVERSES, JSTOKEN, JS_FORBIDDEN, JS_KEYWORDS, LINE_BREAK, LINE_CONTINUER, LOGIC, Lexer, MATH, MULTILINER, MULTI_DENT, NOT_REGEX, NOT_SPACED_REGEX, NUMBER, OPERATOR, REGEX, RELATION, RESERVED, Rewriter, SHIFT, SIMPLESTR, STRICT_PROSCRIBED, TRAILING_SPACES, UNARY, UNARY_MATH, WHITESPACE, compact, count, invertLiterate, key, last, locationDataToString, repeat, starts, throwSyntaxError, _ref, _ref1,
|
||||
var BOM, BOOL, CALLABLE, CODE, COFFEE_ALIASES, COFFEE_ALIAS_MAP, COFFEE_KEYWORDS, COMMENT, COMPARE, COMPOUND_ASSIGN, HERECOMMENT_ILLEGAL, HEREDOC_DOUBLE, HEREDOC_INDENT, HEREDOC_SINGLE, HEREGEX, HEREGEX_OMIT, IDENTIFIER, INDENTABLE_CLOSERS, INDEXABLE, INVERSES, JSTOKEN, JS_FORBIDDEN, JS_KEYWORDS, LEADING_BLANK_LINE, LINE_BREAK, LINE_CONTINUER, LOGIC, Lexer, MATH, MULTILINER, MULTI_DENT, NOT_REGEX, NOT_SPACED_REGEX, NUMBER, OCTAL_ESCAPE, OPERATOR, REGEX, REGEX_FLAGS, REGEX_ILLEGAL, RELATION, RESERVED, Rewriter, SHIFT, STRICT_PROSCRIBED, STRING_DOUBLE, STRING_OMIT, STRING_SINGLE, STRING_START, TRAILING_BLANK_LINE, TRAILING_SPACES, UNARY, UNARY_MATH, VALID_FLAGS, WHITESPACE, compact, count, invertLiterate, key, last, locationDataToString, repeat, starts, throwSyntaxError, _ref, _ref1,
|
||||
__indexOf = [].indexOf || function(item) { for (var i = 0, l = this.length; i < l; i++) { if (i in this && this[i] === item) return i; } return -1; };
|
||||
|
||||
_ref = require('./rewriter'), Rewriter = _ref.Rewriter, INVERSES = _ref.INVERSES;
|
||||
|
@ -28,9 +28,15 @@
|
|||
code = this.clean(code);
|
||||
i = 0;
|
||||
while (this.chunk = code.slice(i)) {
|
||||
consumed = this.identifierToken() || this.commentToken() || this.whitespaceToken() || this.lineToken() || this.heredocToken() || this.stringToken() || this.numberToken() || this.regexToken() || this.jsToken() || this.literalToken();
|
||||
consumed = this.identifierToken() || this.commentToken() || this.whitespaceToken() || this.lineToken() || this.stringToken() || this.numberToken() || this.regexToken() || this.jsToken() || this.literalToken();
|
||||
_ref2 = this.getLineAndColumnFromChunk(consumed), this.chunkLine = _ref2[0], this.chunkColumn = _ref2[1];
|
||||
i += consumed;
|
||||
if (opts.untilBalanced && this.ends.length === 0) {
|
||||
return {
|
||||
tokens: this.tokens,
|
||||
index: i
|
||||
};
|
||||
}
|
||||
}
|
||||
this.closeIndentation();
|
||||
if (tag = this.ends.pop()) {
|
||||
|
@ -170,60 +176,87 @@
|
|||
};
|
||||
|
||||
Lexer.prototype.stringToken = function() {
|
||||
var inner, innerLen, numBreak, octalEsc, pos, quote, string, trimmed;
|
||||
switch (quote = this.chunk.charAt(0)) {
|
||||
case "'":
|
||||
string = (SIMPLESTR.exec(this.chunk) || [])[0];
|
||||
break;
|
||||
case '"':
|
||||
string = this.balancedString(this.chunk, '"');
|
||||
}
|
||||
if (!string) {
|
||||
var $, attempt, doc, end, heredoc, i, indent, indentRegex, match, quote, regex, start, token, tokens, _ref2, _ref3;
|
||||
quote = (STRING_START.exec(this.chunk) || [])[0];
|
||||
if (!quote) {
|
||||
return 0;
|
||||
}
|
||||
inner = string.slice(1, -1);
|
||||
trimmed = this.removeNewlines(inner);
|
||||
if (quote === '"' && 0 < string.indexOf('#{', 1)) {
|
||||
numBreak = pos = 0;
|
||||
innerLen = inner.length;
|
||||
while (inner.charAt(pos++) === '\n' && pos < innerLen) {
|
||||
numBreak++;
|
||||
regex = (function() {
|
||||
switch (quote) {
|
||||
case "'":
|
||||
return STRING_SINGLE;
|
||||
case '"':
|
||||
return STRING_DOUBLE;
|
||||
case "'''":
|
||||
return HEREDOC_SINGLE;
|
||||
case '"""':
|
||||
return HEREDOC_DOUBLE;
|
||||
}
|
||||
this.interpolateString(trimmed, {
|
||||
strOffset: 1 + numBreak,
|
||||
lexedLength: string.length
|
||||
});
|
||||
})();
|
||||
heredoc = quote.length === 3;
|
||||
start = quote.length;
|
||||
_ref2 = this.matchWithInterpolations(this.chunk.slice(start), regex, quote, start), tokens = _ref2.tokens, end = _ref2.index;
|
||||
$ = tokens.length - 1;
|
||||
if (heredoc) {
|
||||
indent = null;
|
||||
doc = ((function() {
|
||||
var _i, _len, _results;
|
||||
_results = [];
|
||||
for (i = _i = 0, _len = tokens.length; _i < _len; i = ++_i) {
|
||||
token = tokens[i];
|
||||
if (token[0] === 'NEOSTRING') {
|
||||
_results.push(token[1]);
|
||||
}
|
||||
}
|
||||
return _results;
|
||||
})()).join('#{}');
|
||||
while (match = HEREDOC_INDENT.exec(doc)) {
|
||||
attempt = match[1];
|
||||
if (indent === null || (0 < (_ref3 = attempt.length) && _ref3 < indent.length)) {
|
||||
indent = attempt;
|
||||
}
|
||||
}
|
||||
if (indent) {
|
||||
indentRegex = RegExp("^" + indent, "gm");
|
||||
}
|
||||
this.mergeInterpolationTokens(tokens, {
|
||||
quote: quote[0],
|
||||
start: start,
|
||||
end: end
|
||||
}, (function(_this) {
|
||||
return function(value, i) {
|
||||
value = _this.formatString(value);
|
||||
if (i === 0) {
|
||||
value = value.replace(LEADING_BLANK_LINE, '');
|
||||
}
|
||||
if (i === $) {
|
||||
value = value.replace(TRAILING_BLANK_LINE, '');
|
||||
}
|
||||
value = value.replace(indentRegex, '');
|
||||
value = value.replace(MULTILINER, '\\n');
|
||||
return value;
|
||||
};
|
||||
})(this));
|
||||
} else {
|
||||
this.token('STRING', quote + this.escapeLines(trimmed) + quote, 0, string.length);
|
||||
this.mergeInterpolationTokens(tokens, {
|
||||
quote: quote,
|
||||
start: start,
|
||||
end: end
|
||||
}, (function(_this) {
|
||||
return function(value, i) {
|
||||
value = _this.formatString(value);
|
||||
value = value.replace(STRING_OMIT, function(match, offset) {
|
||||
if ((i === 0 && offset === 0) || (i === $ && offset + match.length === value.length)) {
|
||||
return '';
|
||||
} else {
|
||||
return ' ';
|
||||
}
|
||||
});
|
||||
return value;
|
||||
};
|
||||
})(this));
|
||||
}
|
||||
if (octalEsc = /^(?:\\.|[^\\])*\\(?:0[0-7]|[1-7])/.test(string)) {
|
||||
this.error("octal escape sequences " + string + " are not allowed");
|
||||
}
|
||||
return string.length;
|
||||
};
|
||||
|
||||
Lexer.prototype.heredocToken = function() {
|
||||
var doc, heredoc, match, quote, strOffset;
|
||||
if (!(match = HEREDOC.exec(this.chunk))) {
|
||||
return 0;
|
||||
}
|
||||
heredoc = match[0];
|
||||
quote = heredoc.charAt(0);
|
||||
doc = this.sanitizeHeredoc(match[2], {
|
||||
quote: quote,
|
||||
indent: null
|
||||
});
|
||||
if (quote === '"' && 0 <= doc.indexOf('#{')) {
|
||||
strOffset = match[2].charAt(0) === '\n' ? 4 : 3;
|
||||
this.interpolateString(doc, {
|
||||
heredoc: true,
|
||||
strOffset: strOffset,
|
||||
lexedLength: heredoc.length
|
||||
});
|
||||
} else {
|
||||
this.token('STRING', this.makeString(doc, quote, true), 0, heredoc.length);
|
||||
}
|
||||
return heredoc.length;
|
||||
return end;
|
||||
};
|
||||
|
||||
Lexer.prototype.commentToken = function() {
|
||||
|
@ -233,10 +266,13 @@
|
|||
}
|
||||
comment = match[0], here = match[1];
|
||||
if (here) {
|
||||
this.token('HERECOMMENT', this.sanitizeHeredoc(here, {
|
||||
herecomment: true,
|
||||
indent: repeat(' ', this.indent)
|
||||
}), 0, comment.length);
|
||||
if (match = HERECOMMENT_ILLEGAL.exec(comment)) {
|
||||
this.error("block comments cannot contain " + match[0], match.index);
|
||||
}
|
||||
if (here.indexOf('\n') >= 0) {
|
||||
here = here.replace(RegExp("\\n" + (repeat(' ', this.indent)), "g"), '\n');
|
||||
}
|
||||
this.token('HERECOMMENT', here, 0, comment.length);
|
||||
}
|
||||
return comment.length;
|
||||
};
|
||||
|
@ -251,86 +287,57 @@
|
|||
};
|
||||
|
||||
Lexer.prototype.regexToken = function() {
|
||||
var flags, length, match, prev, regex, _ref2, _ref3;
|
||||
if (this.chunk.charAt(0) !== '/') {
|
||||
return 0;
|
||||
}
|
||||
if (length = this.heregexToken()) {
|
||||
return length;
|
||||
}
|
||||
prev = last(this.tokens);
|
||||
if (prev && (_ref2 = prev[0], __indexOf.call((prev.spaced ? NOT_REGEX : NOT_SPACED_REGEX), _ref2) >= 0)) {
|
||||
return 0;
|
||||
}
|
||||
if (!(match = REGEX.exec(this.chunk))) {
|
||||
return 0;
|
||||
}
|
||||
_ref3 = match, match = _ref3[0], regex = _ref3[1], flags = _ref3[2];
|
||||
if (regex === '//') {
|
||||
return 0;
|
||||
}
|
||||
if (regex.slice(0, 2) === '/*') {
|
||||
this.error('regular expressions cannot begin with `*`');
|
||||
}
|
||||
this.token('REGEX', "" + regex + flags, 0, match.length);
|
||||
return match.length;
|
||||
};
|
||||
|
||||
Lexer.prototype.heregexToken = function() {
|
||||
var body, flags, flagsOffset, heregex, match, plusToken, prev, re, tag, token, tokens, value, _i, _len, _ref2, _ref3, _ref4;
|
||||
if (!(match = HEREGEX.exec(this.chunk))) {
|
||||
return 0;
|
||||
}
|
||||
heregex = match[0], body = match[1], flags = match[2];
|
||||
if (0 > body.indexOf('#{')) {
|
||||
re = this.escapeLines(body.replace(HEREGEX_OMIT, '$1$2').replace(/\//g, '\\/'), true);
|
||||
if (re.match(/^\*/)) {
|
||||
this.error('regular expressions cannot begin with `*`');
|
||||
}
|
||||
this.token('REGEX', "/" + (re || '(?:)') + "/" + flags, 0, heregex.length);
|
||||
return heregex.length;
|
||||
}
|
||||
this.token('IDENTIFIER', 'RegExp', 0, 0);
|
||||
this.token('CALL_START', '(', 0, 0);
|
||||
tokens = [];
|
||||
_ref2 = this.interpolateString(body, {
|
||||
regex: true,
|
||||
strOffset: 3
|
||||
});
|
||||
for (_i = 0, _len = _ref2.length; _i < _len; _i++) {
|
||||
token = _ref2[_i];
|
||||
tag = token[0], value = token[1];
|
||||
if (tag === 'TOKENS') {
|
||||
tokens.push.apply(tokens, value);
|
||||
} else if (tag === 'NEOSTRING') {
|
||||
if (!(value = value.replace(HEREGEX_OMIT, '$1$2'))) {
|
||||
continue;
|
||||
var end, flags, index, match, prev, re, regex, tokens, _ref2, _ref3;
|
||||
switch (false) {
|
||||
case !(match = REGEX_ILLEGAL.exec(this.chunk)):
|
||||
this.error("regular expressions cannot begin with " + match[2], match.index + match[1].length);
|
||||
break;
|
||||
case this.chunk.slice(0, 3) !== '///':
|
||||
_ref2 = this.matchWithInterpolations(this.chunk.slice(3), HEREGEX, '///', 3), tokens = _ref2.tokens, index = _ref2.index;
|
||||
break;
|
||||
case !(match = REGEX.exec(this.chunk)):
|
||||
regex = match[0];
|
||||
index = regex.length;
|
||||
prev = last(this.tokens);
|
||||
if (prev && (_ref3 = prev[0], __indexOf.call((prev.spaced ? NOT_REGEX : NOT_SPACED_REGEX), _ref3) >= 0)) {
|
||||
return 0;
|
||||
}
|
||||
value = value.replace(/\\/g, '\\\\');
|
||||
token[0] = 'STRING';
|
||||
token[1] = this.makeString(value, '"', true);
|
||||
tokens.push(token);
|
||||
} else {
|
||||
this.error("Unexpected " + tag);
|
||||
}
|
||||
prev = last(this.tokens);
|
||||
plusToken = ['+', '+'];
|
||||
plusToken[2] = prev[2];
|
||||
tokens.push(plusToken);
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
tokens.pop();
|
||||
if (((_ref3 = tokens[0]) != null ? _ref3[0] : void 0) !== 'STRING') {
|
||||
this.token('STRING', '""', 0, 0);
|
||||
this.token('+', '+', 0, 0);
|
||||
flags = REGEX_FLAGS.exec(this.chunk.slice(index))[0];
|
||||
end = index + flags.length;
|
||||
switch (false) {
|
||||
case !!VALID_FLAGS.test(flags):
|
||||
this.error("invalid regular expression flags " + flags, index);
|
||||
break;
|
||||
case !regex:
|
||||
this.token('REGEX', "" + regex + flags);
|
||||
break;
|
||||
case tokens.length !== 1:
|
||||
re = this.formatHeregex(tokens[0][1]).replace(/\//g, '\\/');
|
||||
this.token('REGEX', "/" + (re || '(?:)') + "/" + flags);
|
||||
break;
|
||||
default:
|
||||
this.token('IDENTIFIER', 'RegExp', 0, 0);
|
||||
this.token('CALL_START', '(', 0, 0);
|
||||
this.mergeInterpolationTokens(tokens, {
|
||||
quote: '"',
|
||||
start: 3,
|
||||
end: end
|
||||
}, (function(_this) {
|
||||
return function(value) {
|
||||
return _this.formatHeregex(value).replace(/\\/g, '\\\\');
|
||||
};
|
||||
})(this));
|
||||
if (flags) {
|
||||
this.token(',', ',', index, 0);
|
||||
this.token('STRING', '"' + flags + '"', index, flags.length);
|
||||
}
|
||||
this.token(')', ')', end, 0);
|
||||
}
|
||||
(_ref4 = this.tokens).push.apply(_ref4, tokens);
|
||||
if (flags) {
|
||||
flagsOffset = heregex.lastIndexOf(flags);
|
||||
this.token(',', ',', flagsOffset, 0);
|
||||
this.token('STRING', '"' + flags + '"', flagsOffset, flags.length);
|
||||
}
|
||||
this.token(')', ')', heregex.length - 1, 0);
|
||||
return heregex.length;
|
||||
return end;
|
||||
};
|
||||
|
||||
Lexer.prototype.lineToken = function() {
|
||||
|
@ -514,33 +521,6 @@
|
|||
return value.length;
|
||||
};
|
||||
|
||||
Lexer.prototype.sanitizeHeredoc = function(doc, options) {
|
||||
var attempt, herecomment, indent, match, _ref2;
|
||||
indent = options.indent, herecomment = options.herecomment;
|
||||
if (herecomment) {
|
||||
if (HEREDOC_ILLEGAL.test(doc)) {
|
||||
this.error("block comment cannot contain \"*/\", starting");
|
||||
}
|
||||
if (doc.indexOf('\n') < 0) {
|
||||
return doc;
|
||||
}
|
||||
} else {
|
||||
while (match = HEREDOC_INDENT.exec(doc)) {
|
||||
attempt = match[1];
|
||||
if (indent === null || (0 < (_ref2 = attempt.length) && _ref2 < indent.length)) {
|
||||
indent = attempt;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (indent) {
|
||||
doc = doc.replace(RegExp("\\n" + indent, "g"), '\n');
|
||||
}
|
||||
if (!herecomment) {
|
||||
doc = doc.replace(/^\n/, '');
|
||||
}
|
||||
return doc;
|
||||
};
|
||||
|
||||
Lexer.prototype.tagParameters = function() {
|
||||
var i, stack, tok, tokens;
|
||||
if (this.tag() !== ')') {
|
||||
|
@ -574,113 +554,84 @@
|
|||
return this.outdentToken(this.indent);
|
||||
};
|
||||
|
||||
Lexer.prototype.balancedString = function(str, end) {
|
||||
var continueCount, i, letter, match, prev, stack, _i, _ref2;
|
||||
continueCount = 0;
|
||||
stack = [end];
|
||||
for (i = _i = 1, _ref2 = str.length; 1 <= _ref2 ? _i < _ref2 : _i > _ref2; i = 1 <= _ref2 ? ++_i : --_i) {
|
||||
if (continueCount) {
|
||||
--continueCount;
|
||||
continue;
|
||||
Lexer.prototype.matchWithInterpolations = function(str, regex, end, offsetInChunk) {
|
||||
var column, index, line, nested, strPart, tokens, _ref2, _ref3, _ref4;
|
||||
tokens = [];
|
||||
while (true) {
|
||||
strPart = regex.exec(str)[0];
|
||||
tokens.push(this.makeToken('NEOSTRING', strPart, offsetInChunk));
|
||||
str = str.slice(strPart.length);
|
||||
offsetInChunk += strPart.length;
|
||||
if (str.slice(0, 2) !== '#{') {
|
||||
break;
|
||||
}
|
||||
switch (letter = str.charAt(i)) {
|
||||
case '\\':
|
||||
++continueCount;
|
||||
continue;
|
||||
case end:
|
||||
stack.pop();
|
||||
if (!stack.length) {
|
||||
return str.slice(0, +i + 1 || 9e9);
|
||||
}
|
||||
end = stack[stack.length - 1];
|
||||
continue;
|
||||
_ref2 = this.getLineAndColumnFromChunk(offsetInChunk + 1), line = _ref2[0], column = _ref2[1];
|
||||
_ref3 = new Lexer().tokenize(str.slice(1), {
|
||||
line: line,
|
||||
column: column,
|
||||
untilBalanced: true
|
||||
}), nested = _ref3.tokens, index = _ref3.index;
|
||||
index += 1;
|
||||
nested.shift();
|
||||
nested.pop();
|
||||
if (((_ref4 = nested[0]) != null ? _ref4[0] : void 0) === 'TERMINATOR') {
|
||||
nested.shift();
|
||||
}
|
||||
if (end === '}' && (letter === '"' || letter === "'")) {
|
||||
stack.push(end = letter);
|
||||
} else if (end === '}' && letter === '/' && (match = HEREGEX.exec(str.slice(i)) || REGEX.exec(str.slice(i)))) {
|
||||
continueCount += match[0].length - 1;
|
||||
} else if (end === '}' && letter === '{') {
|
||||
stack.push(end = '}');
|
||||
} else if (end === '"' && prev === '#' && letter === '{') {
|
||||
stack.push(end = '}');
|
||||
if (nested.length > 1) {
|
||||
nested.unshift(this.makeToken('(', '(', offsetInChunk + 1, 0));
|
||||
nested.push(this.makeToken(')', ')', offsetInChunk + 1 + index, 0));
|
||||
}
|
||||
prev = letter;
|
||||
tokens.push(['TOKENS', nested]);
|
||||
str = str.slice(index);
|
||||
offsetInChunk += index;
|
||||
}
|
||||
return this.error("missing " + (stack.pop()) + ", starting");
|
||||
if (str.slice(0, end.length) !== end) {
|
||||
this.error("missing " + end);
|
||||
}
|
||||
return {
|
||||
tokens: tokens,
|
||||
index: offsetInChunk + end.length
|
||||
};
|
||||
};
|
||||
|
||||
Lexer.prototype.interpolateString = function(str, options) {
|
||||
var column, errorToken, expr, heredoc, i, inner, interpolated, len, letter, lexedLength, line, locationToken, nested, offsetInChunk, pi, plusToken, popped, regex, rparen, strOffset, tag, token, tokens, value, _i, _len, _ref2, _ref3, _ref4;
|
||||
if (options == null) {
|
||||
options = {};
|
||||
}
|
||||
heredoc = options.heredoc, regex = options.regex, offsetInChunk = options.offsetInChunk, strOffset = options.strOffset, lexedLength = options.lexedLength;
|
||||
offsetInChunk || (offsetInChunk = 0);
|
||||
strOffset || (strOffset = 0);
|
||||
lexedLength || (lexedLength = str.length);
|
||||
tokens = [];
|
||||
pi = 0;
|
||||
i = -1;
|
||||
while (letter = str.charAt(i += 1)) {
|
||||
if (letter === '\\') {
|
||||
i += 1;
|
||||
continue;
|
||||
}
|
||||
if (!(letter === '#' && str.charAt(i + 1) === '{' && (expr = this.balancedString(str.slice(i + 1), '}')))) {
|
||||
continue;
|
||||
}
|
||||
if (pi < i) {
|
||||
tokens.push(this.makeToken('NEOSTRING', str.slice(pi, i), strOffset + pi));
|
||||
}
|
||||
if (!errorToken) {
|
||||
errorToken = this.makeToken('', 'string interpolation', offsetInChunk + i + 1, 2);
|
||||
}
|
||||
inner = expr.slice(1, -1);
|
||||
if (inner.length) {
|
||||
_ref2 = this.getLineAndColumnFromChunk(strOffset + i + 2), line = _ref2[0], column = _ref2[1];
|
||||
nested = new Lexer().tokenize(inner, {
|
||||
line: line,
|
||||
column: column,
|
||||
rewrite: false
|
||||
});
|
||||
popped = nested.pop();
|
||||
if (((_ref3 = nested[0]) != null ? _ref3[0] : void 0) === 'TERMINATOR') {
|
||||
popped = nested.shift();
|
||||
}
|
||||
if (len = nested.length) {
|
||||
if (len > 1) {
|
||||
nested.unshift(this.makeToken('(', '(', strOffset + i + 1, 0));
|
||||
nested.push(this.makeToken(')', ')', strOffset + i + 1 + inner.length, 0));
|
||||
}
|
||||
tokens.push(['TOKENS', nested]);
|
||||
}
|
||||
}
|
||||
i += expr.length;
|
||||
pi = i + 1;
|
||||
}
|
||||
if ((i > pi && pi < str.length)) {
|
||||
tokens.push(this.makeToken('NEOSTRING', str.slice(pi), strOffset + pi));
|
||||
}
|
||||
if (regex) {
|
||||
return tokens;
|
||||
}
|
||||
if (!tokens.length) {
|
||||
return this.token('STRING', '""', offsetInChunk, lexedLength);
|
||||
}
|
||||
if (tokens[0][0] !== 'NEOSTRING') {
|
||||
tokens.unshift(this.makeToken('NEOSTRING', '', offsetInChunk));
|
||||
}
|
||||
Lexer.prototype.mergeInterpolationTokens = function(tokens, _arg, fn) {
|
||||
var converted, end, errorToken, firstEmptyStringIndex, firstIndex, i, interpolated, locationToken, plusToken, quote, rparen, start, tag, token, tokensToPush, value, _i, _len, _ref2;
|
||||
quote = _arg.quote, start = _arg.start, end = _arg.end;
|
||||
if (interpolated = tokens.length > 1) {
|
||||
this.token('(', '(', offsetInChunk, 0, errorToken);
|
||||
errorToken = this.makeToken('', 'interpolation', start + tokens[0][1].length, 2);
|
||||
this.token('(', '(', 0, 0, errorToken);
|
||||
}
|
||||
firstIndex = this.tokens.length;
|
||||
for (i = _i = 0, _len = tokens.length; _i < _len; i = ++_i) {
|
||||
token = tokens[i];
|
||||
tag = token[0], value = token[1];
|
||||
if (i) {
|
||||
if (i) {
|
||||
plusToken = this.token('+', '+');
|
||||
}
|
||||
locationToken = tag === 'TOKENS' ? value[0] : token;
|
||||
switch (tag) {
|
||||
case 'TOKENS':
|
||||
if (value.length === 0) {
|
||||
continue;
|
||||
}
|
||||
locationToken = value[0];
|
||||
tokensToPush = value;
|
||||
break;
|
||||
case 'NEOSTRING':
|
||||
converted = fn(token[1], i);
|
||||
if (converted.length === 0) {
|
||||
if (i === 0) {
|
||||
firstEmptyStringIndex = this.tokens.length;
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (i === 2 && (firstEmptyStringIndex != null)) {
|
||||
this.tokens.splice(firstEmptyStringIndex, 2);
|
||||
}
|
||||
token[0] = 'STRING';
|
||||
token[1] = this.makeString(converted, quote);
|
||||
locationToken = token;
|
||||
tokensToPush = [token];
|
||||
}
|
||||
if (this.tokens.length > firstIndex) {
|
||||
plusToken = this.token('+', '+');
|
||||
plusToken[2] = {
|
||||
first_line: locationToken[2].first_line,
|
||||
first_column: locationToken[2].first_column,
|
||||
|
@ -688,22 +639,12 @@
|
|||
last_column: locationToken[2].first_column
|
||||
};
|
||||
}
|
||||
if (tag === 'TOKENS') {
|
||||
(_ref4 = this.tokens).push.apply(_ref4, value);
|
||||
} else if (tag === 'NEOSTRING') {
|
||||
token[0] = 'STRING';
|
||||
token[1] = this.makeString(value, '"', heredoc);
|
||||
this.tokens.push(token);
|
||||
} else {
|
||||
this.error("Unexpected " + tag);
|
||||
}
|
||||
(_ref2 = this.tokens).push.apply(_ref2, tokensToPush);
|
||||
}
|
||||
if (interpolated) {
|
||||
rparen = this.makeToken(')', ')', offsetInChunk + lexedLength, 0);
|
||||
rparen.stringEnd = true;
|
||||
this.tokens.push(rparen);
|
||||
rparen = this.token(')', ')', end, 0);
|
||||
return rparen.stringEnd = true;
|
||||
}
|
||||
return tokens;
|
||||
};
|
||||
|
||||
Lexer.prototype.pair = function(tag) {
|
||||
|
@ -780,26 +721,22 @@
|
|||
return LINE_CONTINUER.test(this.chunk) || ((_ref2 = this.tag()) === '\\' || _ref2 === '.' || _ref2 === '?.' || _ref2 === '?::' || _ref2 === 'UNARY' || _ref2 === 'MATH' || _ref2 === 'UNARY_MATH' || _ref2 === '+' || _ref2 === '-' || _ref2 === 'YIELD' || _ref2 === '**' || _ref2 === 'SHIFT' || _ref2 === 'RELATION' || _ref2 === 'COMPARE' || _ref2 === 'LOGIC' || _ref2 === 'THROW' || _ref2 === 'EXTENDS');
|
||||
};
|
||||
|
||||
Lexer.prototype.removeNewlines = function(str) {
|
||||
return str.replace(/^\s*\n\s*/, '').replace(/([^\\]|\\\\)\s*\n\s*$/, '$1');
|
||||
};
|
||||
|
||||
Lexer.prototype.escapeLines = function(str, heredoc) {
|
||||
str = str.replace(/\\[^\S\n]*(\n|\\)\s*/g, function(escaped, character) {
|
||||
Lexer.prototype.formatString = function(str) {
|
||||
return str.replace(/\\[^\S\n]*(\n|\\)\s*/g, function(escaped, character) {
|
||||
if (character === '\n') {
|
||||
return '';
|
||||
} else {
|
||||
return escaped;
|
||||
}
|
||||
});
|
||||
if (heredoc) {
|
||||
return str.replace(MULTILINER, '\\n');
|
||||
} else {
|
||||
return str.replace(/\s*\n\s*/g, ' ');
|
||||
}
|
||||
};
|
||||
|
||||
Lexer.prototype.makeString = function(body, quote, heredoc) {
|
||||
Lexer.prototype.formatHeregex = function(str) {
|
||||
return str.replace(HEREGEX_OMIT, '$1$2').replace(MULTILINER, '\\n');
|
||||
};
|
||||
|
||||
Lexer.prototype.makeString = function(body, quote) {
|
||||
var match;
|
||||
if (!body) {
|
||||
return quote + quote;
|
||||
}
|
||||
|
@ -811,7 +748,10 @@
|
|||
}
|
||||
});
|
||||
body = body.replace(RegExp("" + quote, "g"), '\\$&');
|
||||
return quote + this.escapeLines(body, heredoc) + quote;
|
||||
if (match = OCTAL_ESCAPE.exec(body)) {
|
||||
this.error("octal escape sequences are not allowed " + match[2], match.index + match[1].length + 1);
|
||||
}
|
||||
return quote + body + quote;
|
||||
};
|
||||
|
||||
Lexer.prototype.error = function(message, offset) {
|
||||
|
@ -873,8 +813,6 @@
|
|||
|
||||
NUMBER = /^0b[01]+|^0o[0-7]+|^0x[\da-f]+|^\d*\.?\d+(?:e[+-]?\d+)?/i;
|
||||
|
||||
HEREDOC = /^("""|''')((?:\\[\s\S]|[^\\])*?)(?:\n[^\n\S]*)?\1/;
|
||||
|
||||
OPERATOR = /^(?:[-=]>|[-+*\/%<>&|^!?=]=|>>>=?|([-+:])\1|([&|<>*\/%])\2=?|\?(\.|::)|\.{2,3})/;
|
||||
|
||||
WHITESPACE = /^[^\n\S]+/;
|
||||
|
@ -885,24 +823,46 @@
|
|||
|
||||
MULTI_DENT = /^(?:\n[^\n\S]*)+/;
|
||||
|
||||
SIMPLESTR = /^'[^\\']*(?:\\[\s\S][^\\']*)*'/;
|
||||
|
||||
JSTOKEN = /^`[^\\`]*(?:\\.[^\\`]*)*`/;
|
||||
|
||||
REGEX = /^(\/(?![\s=])[^[\/\n\\]*(?:(?:\\[\s\S]|\[[^\]\n\\]*(?:\\[\s\S][^\]\n\\]*)*])[^[\/\n\\]*)*\/)([imgy]{0,4})(?!\w)/;
|
||||
STRING_START = /^(?:'''|"""|'|")/;
|
||||
|
||||
HEREGEX = /^\/{3}((?:\\?[\s\S])+?)\/{3}([imgy]{0,4})(?!\w)/;
|
||||
STRING_SINGLE = /^(?:[^\\']|\\[\s\S])*/;
|
||||
|
||||
STRING_DOUBLE = /^(?:[^\\"#]|\\[\s\S]|\#(?!\{))*/;
|
||||
|
||||
HEREDOC_SINGLE = /^(?:[^\\']|\\[\s\S]|'(?!''))*/;
|
||||
|
||||
HEREDOC_DOUBLE = /^(?:[^\\"#]|\\[\s\S]|"(?!"")|\#(?!\{))*/;
|
||||
|
||||
STRING_OMIT = /\s*\n\s*/g;
|
||||
|
||||
HEREDOC_INDENT = /\n+([^\n\S]*)(?=\S)/g;
|
||||
|
||||
REGEX = /^\/(?![\s=])(?:[^[\/\n\\]|\\.|\[(?:\\.|[^\]\n\\])*])+\//;
|
||||
|
||||
REGEX_FLAGS = /^\w*/;
|
||||
|
||||
VALID_FLAGS = /^(?!.*(.).*\1)[imgy]*$/;
|
||||
|
||||
HEREGEX = /^(?:[^\\\/#]|\\[\s\S]|\/(?!\/\/)|\#(?!\{))*/;
|
||||
|
||||
HEREGEX_OMIT = /((?:\\\\)+)|\\(\s|\/)|\s+(?:#.*)?/g;
|
||||
|
||||
REGEX_ILLEGAL = /^(\/|\/{3}\s*)(\*)/;
|
||||
|
||||
MULTILINER = /\n/g;
|
||||
|
||||
HEREDOC_INDENT = /\n+([^\n\S]*)/g;
|
||||
|
||||
HEREDOC_ILLEGAL = /\*\//;
|
||||
HERECOMMENT_ILLEGAL = /\*\//;
|
||||
|
||||
LINE_CONTINUER = /^\s*(?:,|\??\.(?![.\d])|::)/;
|
||||
|
||||
OCTAL_ESCAPE = /^((?:\\.|[^\\])*)(\\(?:0[0-7]|[1-7]))/;
|
||||
|
||||
LEADING_BLANK_LINE = /^[^\n\S]*\n/;
|
||||
|
||||
TRAILING_BLANK_LINE = /\n[^\n\S]*$/;
|
||||
|
||||
TRAILING_SPACES = /\s+$/;
|
||||
|
||||
COMPOUND_ASSIGN = ['-=', '+=', '/=', '*=', '%=', '||=', '&&=', '?=', '<<=', '>>=', '>>>=', '&=', '^=', '|=', '**=', '//=', '%%='];
|
||||
|
|
|
@ -136,7 +136,7 @@
|
|||
var me;
|
||||
me = this.unwrapAll();
|
||||
if (res) {
|
||||
return new Call(new Literal("" + res + ".push"), [me]);
|
||||
return new Call(new Literal(res + ".push"), [me]);
|
||||
} else {
|
||||
return new Return(me);
|
||||
}
|
||||
|
@ -485,7 +485,7 @@
|
|||
if (i) {
|
||||
fragments.push(this.makeCode('\n'));
|
||||
}
|
||||
fragments.push(this.makeCode("" + this.tab + "var "));
|
||||
fragments.push(this.makeCode(this.tab + "var "));
|
||||
if (declars) {
|
||||
fragments.push(this.makeCode(scope.declaredVariables().join(', ')));
|
||||
}
|
||||
|
@ -910,7 +910,7 @@
|
|||
accesses.push(new Access(new Literal(method.name)));
|
||||
return (new Value(new Literal(method.klass), accesses)).compile(o);
|
||||
} else if (method != null ? method.ctor : void 0) {
|
||||
return "" + method.name + ".__super__.constructor";
|
||||
return method.name + ".__super__.constructor";
|
||||
} else {
|
||||
return this.error('cannot call super outside of an instance method.');
|
||||
}
|
||||
|
@ -1012,7 +1012,7 @@
|
|||
Call.prototype.compileSplat = function(o, splatArgs) {
|
||||
var answer, base, fun, idt, name, ref;
|
||||
if (this.isSuper) {
|
||||
return [].concat(this.makeCode("" + (this.superReference(o)) + ".apply(" + (this.superThis(o)) + ", "), splatArgs, this.makeCode(")"));
|
||||
return [].concat(this.makeCode((this.superReference(o)) + ".apply(" + (this.superThis(o)) + ", "), splatArgs, this.makeCode(")"));
|
||||
}
|
||||
if (this.isNew) {
|
||||
idt = this.tab + TAB;
|
||||
|
@ -1151,23 +1151,23 @@
|
|||
idx = del(o, 'index');
|
||||
idxName = del(o, 'name');
|
||||
namedIndex = idxName && idxName !== idx;
|
||||
varPart = "" + idx + " = " + this.fromC;
|
||||
varPart = idx + " = " + this.fromC;
|
||||
if (this.toC !== this.toVar) {
|
||||
varPart += ", " + this.toC;
|
||||
}
|
||||
if (this.step !== this.stepVar) {
|
||||
varPart += ", " + this.step;
|
||||
}
|
||||
_ref2 = ["" + idx + " <" + this.equals, "" + idx + " >" + this.equals], lt = _ref2[0], gt = _ref2[1];
|
||||
condPart = this.stepNum ? parseNum(this.stepNum[0]) > 0 ? "" + lt + " " + this.toVar : "" + gt + " " + this.toVar : known ? ((_ref3 = [parseNum(this.fromNum[0]), parseNum(this.toNum[0])], from = _ref3[0], to = _ref3[1], _ref3), from <= to ? "" + lt + " " + to : "" + gt + " " + to) : (cond = this.stepVar ? "" + this.stepVar + " > 0" : "" + this.fromVar + " <= " + this.toVar, "" + cond + " ? " + lt + " " + this.toVar + " : " + gt + " " + this.toVar);
|
||||
stepPart = this.stepVar ? "" + idx + " += " + this.stepVar : known ? namedIndex ? from <= to ? "++" + idx : "--" + idx : from <= to ? "" + idx + "++" : "" + idx + "--" : namedIndex ? "" + cond + " ? ++" + idx + " : --" + idx : "" + cond + " ? " + idx + "++ : " + idx + "--";
|
||||
_ref2 = [idx + " <" + this.equals, idx + " >" + this.equals], lt = _ref2[0], gt = _ref2[1];
|
||||
condPart = this.stepNum ? parseNum(this.stepNum[0]) > 0 ? lt + " " + this.toVar : gt + " " + this.toVar : known ? ((_ref3 = [parseNum(this.fromNum[0]), parseNum(this.toNum[0])], from = _ref3[0], to = _ref3[1], _ref3), from <= to ? lt + " " + to : gt + " " + to) : (cond = this.stepVar ? this.stepVar + " > 0" : this.fromVar + " <= " + this.toVar, cond + " ? " + lt + " " + this.toVar + " : " + gt + " " + this.toVar);
|
||||
stepPart = this.stepVar ? idx + " += " + this.stepVar : known ? namedIndex ? from <= to ? "++" + idx : "--" + idx : from <= to ? idx + "++" : idx + "--" : namedIndex ? cond + " ? ++" + idx + " : --" + idx : cond + " ? " + idx + "++ : " + idx + "--";
|
||||
if (namedIndex) {
|
||||
varPart = "" + idxName + " = " + varPart;
|
||||
varPart = idxName + " = " + varPart;
|
||||
}
|
||||
if (namedIndex) {
|
||||
stepPart = "" + idxName + " = " + stepPart;
|
||||
stepPart = idxName + " = " + stepPart;
|
||||
}
|
||||
return [this.makeCode("" + varPart + "; " + condPart + "; " + stepPart)];
|
||||
return [this.makeCode(varPart + "; " + condPart + "; " + stepPart)];
|
||||
};
|
||||
|
||||
Range.prototype.compileArray = function(o) {
|
||||
|
@ -1191,8 +1191,8 @@
|
|||
o.index = i;
|
||||
body = fragmentsToText(this.compileNode(o));
|
||||
} else {
|
||||
vars = ("" + i + " = " + this.fromC) + (this.toC !== this.toVar ? ", " + this.toC : '');
|
||||
cond = "" + this.fromVar + " <= " + this.toVar;
|
||||
vars = (i + " = " + this.fromC) + (this.toC !== this.toVar ? ", " + this.toC : '');
|
||||
cond = this.fromVar + " <= " + this.toVar;
|
||||
body = "var " + vars + "; " + cond + " ? " + i + " <" + this.equals + " " + this.toVar + " : " + i + " >" + this.equals + " " + this.toVar + "; " + cond + " ? " + i + "++ : " + i + "--";
|
||||
}
|
||||
post = "{ " + result + ".push(" + i + "); }\n" + idt + "return " + result + ";\n" + o.indent;
|
||||
|
@ -1289,7 +1289,7 @@
|
|||
}
|
||||
}
|
||||
answer.unshift(this.makeCode("{" + (props.length && '\n')));
|
||||
answer.push(this.makeCode("" + (props.length && '\n' + this.tab) + "}"));
|
||||
answer.push(this.makeCode((props.length && '\n' + this.tab) + "}"));
|
||||
if (this.front) {
|
||||
return this.wrapInBraces(answer);
|
||||
} else {
|
||||
|
@ -1423,7 +1423,7 @@
|
|||
for (_i = 0, _len = _ref2.length; _i < _len; _i++) {
|
||||
bvar = _ref2[_i];
|
||||
lhs = (new Value(new Literal("this"), [new Access(bvar)])).compile(o);
|
||||
this.ctor.body.unshift(new Literal("" + lhs + " = " + (utility('bind')) + "(" + lhs + ", this)"));
|
||||
this.ctor.body.unshift(new Literal(lhs + " = " + (utility('bind')) + "(" + lhs + ", this)"));
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -1510,9 +1510,9 @@
|
|||
if (!this.ctor) {
|
||||
this.ctor = new Code;
|
||||
if (this.externalCtor) {
|
||||
this.ctor.body.push(new Literal("" + this.externalCtor + ".apply(this, arguments)"));
|
||||
this.ctor.body.push(new Literal(this.externalCtor + ".apply(this, arguments)"));
|
||||
} else if (this.parent) {
|
||||
this.ctor.body.push(new Literal("" + name + ".__super__.constructor.apply(this, arguments)"));
|
||||
this.ctor.body.push(new Literal(name + ".__super__.constructor.apply(this, arguments)"));
|
||||
}
|
||||
this.ctor.body.makeReturn();
|
||||
this.body.expressions.unshift(this.ctor);
|
||||
|
@ -1677,7 +1677,7 @@
|
|||
assigns = [];
|
||||
expandedIdx = false;
|
||||
if (!IDENTIFIER.test(vvarText) || this.variable.assigns(vvarText)) {
|
||||
assigns.push([this.makeCode("" + (ref = o.scope.freeVariable('ref')) + " = ")].concat(__slice.call(vvar)));
|
||||
assigns.push([this.makeCode((ref = o.scope.freeVariable('ref')) + " = ")].concat(__slice.call(vvar)));
|
||||
vvar = [this.makeCode(ref)];
|
||||
vvarText = ref;
|
||||
}
|
||||
|
@ -1698,7 +1698,7 @@
|
|||
if (!expandedIdx && obj instanceof Splat) {
|
||||
name = obj.name.unwrap().value;
|
||||
obj = obj.unwrap();
|
||||
val = "" + olen + " <= " + vvarText + ".length ? " + (utility('slice')) + ".call(" + vvarText + ", " + i;
|
||||
val = olen + " <= " + vvarText + ".length ? " + (utility('slice')) + ".call(" + vvarText + ", " + i;
|
||||
if (rest = olen - i - 1) {
|
||||
ivar = o.scope.freeVariable('i');
|
||||
val += ", " + ivar + " = " + vvarText + ".length - " + rest + ") : (" + ivar + " = " + i + ", [])";
|
||||
|
@ -1706,15 +1706,15 @@
|
|||
val += ") : []";
|
||||
}
|
||||
val = new Literal(val);
|
||||
expandedIdx = "" + ivar + "++";
|
||||
expandedIdx = ivar + "++";
|
||||
} else if (!expandedIdx && obj instanceof Expansion) {
|
||||
if (rest = olen - i - 1) {
|
||||
if (rest === 1) {
|
||||
expandedIdx = "" + vvarText + ".length - 1";
|
||||
expandedIdx = vvarText + ".length - 1";
|
||||
} else {
|
||||
ivar = o.scope.freeVariable('i');
|
||||
val = new Literal("" + ivar + " = " + vvarText + ".length - " + rest);
|
||||
expandedIdx = "" + ivar + "++";
|
||||
val = new Literal(ivar + " = " + vvarText + ".length - " + rest);
|
||||
expandedIdx = ivar + "++";
|
||||
assigns.push(val.compileToFragments(o, LEVEL_LIST));
|
||||
}
|
||||
}
|
||||
|
@ -2121,13 +2121,13 @@
|
|||
if (apply) {
|
||||
return fragments;
|
||||
}
|
||||
return [].concat(node.makeCode("" + (utility('slice')) + ".call("), fragments, node.makeCode(")"));
|
||||
return [].concat(node.makeCode((utility('slice')) + ".call("), fragments, node.makeCode(")"));
|
||||
}
|
||||
args = list.slice(index);
|
||||
for (i = _i = 0, _len = args.length; _i < _len; i = ++_i) {
|
||||
node = args[i];
|
||||
compiledNode = node.compileToFragments(o, LEVEL_LIST);
|
||||
args[i] = node instanceof Splat ? [].concat(node.makeCode("" + (utility('slice')) + ".call("), compiledNode, node.makeCode(")")) : [].concat(node.makeCode("["), compiledNode, node.makeCode("]"));
|
||||
args[i] = node instanceof Splat ? [].concat(node.makeCode((utility('slice')) + ".call("), compiledNode, node.makeCode(")")) : [].concat(node.makeCode("["), compiledNode, node.makeCode("]"));
|
||||
}
|
||||
if (index === 0) {
|
||||
node = list[0];
|
||||
|
@ -2620,7 +2620,7 @@
|
|||
tryPart = this.attempt.compileToFragments(o, LEVEL_TOP);
|
||||
catchPart = this.recovery ? (placeholder = new Literal('_error'), this.errorVariable ? this.recovery.unshift(new Assign(this.errorVariable, placeholder)) : void 0, [].concat(this.makeCode(" catch ("), placeholder.compileToFragments(o), this.makeCode(") {\n"), this.recovery.compileToFragments(o, LEVEL_TOP), this.makeCode("\n" + this.tab + "}"))) : !(this.ensure || this.recovery) ? [this.makeCode(' catch (_error) {}')] : [];
|
||||
ensurePart = this.ensure ? [].concat(this.makeCode(" finally {\n"), this.ensure.compileToFragments(o, LEVEL_TOP), this.makeCode("\n" + this.tab + "}")) : [];
|
||||
return [].concat(this.makeCode("" + this.tab + "try {\n"), tryPart, this.makeCode("\n" + this.tab + "}"), catchPart, ensurePart);
|
||||
return [].concat(this.makeCode(this.tab + "try {\n"), tryPart, this.makeCode("\n" + this.tab + "}"), catchPart, ensurePart);
|
||||
};
|
||||
|
||||
return Try;
|
||||
|
@ -2669,7 +2669,7 @@
|
|||
_ref2 = this.negated ? ['===', '||'] : ['!==', '&&'], cmp = _ref2[0], cnj = _ref2[1];
|
||||
code = "typeof " + code + " " + cmp + " \"undefined\" " + cnj + " " + code + " " + cmp + " null";
|
||||
} else {
|
||||
code = "" + code + " " + (this.negated ? '==' : '!=') + " null";
|
||||
code = code + " " + (this.negated ? '==' : '!=') + " null";
|
||||
}
|
||||
return [this.makeCode(o.level <= LEVEL_COND ? code : "(" + code + ")")];
|
||||
};
|
||||
|
@ -2770,7 +2770,7 @@
|
|||
}
|
||||
ivar = (this.object && index) || scope.freeVariable('i');
|
||||
kvar = (this.range && name) || index || ivar;
|
||||
kvarAssign = kvar !== ivar ? "" + kvar + " = " : "";
|
||||
kvarAssign = kvar !== ivar ? kvar + " = " : "";
|
||||
if (this.step && !this.range) {
|
||||
_ref3 = this.cacheToCodeFragments(this.step.cache(o, LEVEL_LIST)), step = _ref3[0], stepVar = _ref3[1];
|
||||
stepNum = stepVar.match(NUMBER);
|
||||
|
@ -2795,7 +2795,7 @@
|
|||
svar = ref;
|
||||
}
|
||||
if (name && !this.pattern) {
|
||||
namePart = "" + name + " = " + svar + "[" + kvar + "]";
|
||||
namePart = name + " = " + svar + "[" + kvar + "]";
|
||||
}
|
||||
if (!this.object) {
|
||||
if (step !== stepVar) {
|
||||
|
@ -2806,8 +2806,8 @@
|
|||
}
|
||||
declare = "" + kvarAssign + ivar + " = 0, " + lvar + " = " + svar + ".length";
|
||||
declareDown = "" + kvarAssign + ivar + " = " + svar + ".length - 1";
|
||||
compare = "" + ivar + " < " + lvar;
|
||||
compareDown = "" + ivar + " >= 0";
|
||||
compare = ivar + " < " + lvar;
|
||||
compareDown = ivar + " >= 0";
|
||||
if (this.step) {
|
||||
if (stepNum) {
|
||||
if (down) {
|
||||
|
@ -2815,14 +2815,14 @@
|
|||
declare = declareDown;
|
||||
}
|
||||
} else {
|
||||
compare = "" + stepVar + " > 0 ? " + compare + " : " + compareDown;
|
||||
compare = stepVar + " > 0 ? " + compare + " : " + compareDown;
|
||||
declare = "(" + stepVar + " > 0 ? (" + declare + ") : " + declareDown + ")";
|
||||
}
|
||||
increment = "" + ivar + " += " + stepVar;
|
||||
increment = ivar + " += " + stepVar;
|
||||
} else {
|
||||
increment = "" + (kvar !== ivar ? "++" + ivar : "" + ivar + "++");
|
||||
increment = "" + (kvar !== ivar ? "++" + ivar : ivar + "++");
|
||||
}
|
||||
forPartFragments = [this.makeCode("" + declare + "; " + compare + "; " + kvarAssign + increment)];
|
||||
forPartFragments = [this.makeCode(declare + "; " + compare + "; " + kvarAssign + increment)];
|
||||
}
|
||||
}
|
||||
if (this.returns) {
|
||||
|
@ -2840,14 +2840,14 @@
|
|||
}
|
||||
}
|
||||
if (this.pattern) {
|
||||
body.expressions.unshift(new Assign(this.name, new Literal("" + svar + "[" + kvar + "]")));
|
||||
body.expressions.unshift(new Assign(this.name, new Literal(svar + "[" + kvar + "]")));
|
||||
}
|
||||
defPartFragments = [].concat(this.makeCode(defPart), this.pluckDirectCall(o, body));
|
||||
if (namePart) {
|
||||
varPart = "\n" + idt1 + namePart + ";";
|
||||
}
|
||||
if (this.object) {
|
||||
forPartFragments = [this.makeCode("" + kvar + " in " + svar)];
|
||||
forPartFragments = [this.makeCode(kvar + " in " + svar)];
|
||||
if (this.own) {
|
||||
guardPart = "\n" + idt1 + "if (!" + (utility('hasProp')) + ".call(" + svar + ", " + kvar + ")) continue;";
|
||||
}
|
||||
|
@ -2858,7 +2858,7 @@
|
|||
if (bodyFragments && (bodyFragments.length > 0)) {
|
||||
bodyFragments = [].concat(this.makeCode("\n"), bodyFragments, this.makeCode("\n"));
|
||||
}
|
||||
return [].concat(defPartFragments, this.makeCode("" + (resultPart || '') + this.tab + "for ("), forPartFragments, this.makeCode(") {" + guardPart + varPart), bodyFragments, this.makeCode("" + this.tab + "}" + (returnResult || '')));
|
||||
return [].concat(defPartFragments, this.makeCode("" + (resultPart || '') + this.tab + "for ("), forPartFragments, this.makeCode(") {" + guardPart + varPart), bodyFragments, this.makeCode(this.tab + "}" + (returnResult || '')));
|
||||
};
|
||||
|
||||
For.prototype.pluckDirectCall = function(o, body) {
|
||||
|
|
|
@ -62,7 +62,7 @@
|
|||
var letPart, lines, rule, spaces, _i, _len, _ref;
|
||||
lines = [];
|
||||
if (this.banner) {
|
||||
lines.unshift("" + this.banner + "\n");
|
||||
lines.unshift(this.banner + "\n");
|
||||
}
|
||||
_ref = this.rules;
|
||||
for (_i = 0, _len = _ref.length; _i < _len; _i++) {
|
||||
|
|
|
@ -58,7 +58,7 @@
|
|||
rli.removeListener('line', nodeLineListener);
|
||||
rli.on('line', function(cmd) {
|
||||
if (multiline.enabled) {
|
||||
multiline.buffer += "" + cmd + "\n";
|
||||
multiline.buffer += cmd + "\n";
|
||||
rli.setPrompt(multiline.prompt);
|
||||
rli.prompt(true);
|
||||
} else {
|
||||
|
@ -118,7 +118,7 @@
|
|||
fd = fs.openSync(filename, 'a');
|
||||
repl.rli.addListener('line', function(code) {
|
||||
if (code && code.length && code !== '.history' && lastLine !== code) {
|
||||
fs.write(fd, "" + code + "\n");
|
||||
fs.write(fd, code + "\n");
|
||||
return lastLine = code;
|
||||
}
|
||||
});
|
||||
|
@ -128,7 +128,7 @@
|
|||
return repl.commands[getCommandId(repl, 'history')] = {
|
||||
help: 'Show command history',
|
||||
action: function() {
|
||||
repl.outputStream.write("" + (repl.rli.history.slice(0).reverse().join('\n')) + "\n");
|
||||
repl.outputStream.write((repl.rli.history.slice(0).reverse().join('\n')) + "\n");
|
||||
return repl.displayPrompt();
|
||||
}
|
||||
};
|
||||
|
|
|
@ -133,7 +133,7 @@
|
|||
for (_i = 0, _len = _ref1.length; _i < _len; _i++) {
|
||||
v = _ref1[_i];
|
||||
if (v.type.assigned) {
|
||||
_results.push("" + v.name + " = " + v.type.value);
|
||||
_results.push(v.name + " = " + v.type.value);
|
||||
}
|
||||
}
|
||||
return _results;
|
||||
|
|
486
src/lexer.coffee
486
src/lexer.coffee
|
@ -32,8 +32,7 @@ exports.Lexer = class Lexer
|
|||
# Each tokenizing method is responsible for returning the number of characters
|
||||
# it has consumed.
|
||||
#
|
||||
# Before returning the token stream, run it through the [Rewriter](rewriter.html)
|
||||
# unless explicitly asked not to.
|
||||
# Before returning the token stream, run it through the [Rewriter](rewriter.html).
|
||||
tokenize: (code, opts = {}) ->
|
||||
@literate = opts.literate # Are we lexing literate CoffeeScript?
|
||||
@indent = 0 # The current indentation level.
|
||||
|
@ -60,7 +59,6 @@ exports.Lexer = class Lexer
|
|||
@commentToken() or
|
||||
@whitespaceToken() or
|
||||
@lineToken() or
|
||||
@heredocToken() or
|
||||
@stringToken() or
|
||||
@numberToken() or
|
||||
@regexToken() or
|
||||
|
@ -72,7 +70,10 @@ exports.Lexer = class Lexer
|
|||
|
||||
i += consumed
|
||||
|
||||
return {@tokens, index: i} if opts.untilBalanced and @ends.length is 0
|
||||
|
||||
@closeIndentation()
|
||||
# TODO: Make this error point to the opener!
|
||||
@error "missing #{tag}" if tag = @ends.pop()
|
||||
return @tokens if opts.rewrite is off
|
||||
(new Rewriter).rewrite @tokens
|
||||
|
@ -186,49 +187,60 @@ exports.Lexer = class Lexer
|
|||
@token 'NUMBER', number, 0, lexedLength
|
||||
lexedLength
|
||||
|
||||
# Matches strings, including multi-line strings. Ensures that quotation marks
|
||||
# are balanced within the string's contents, and within nested interpolations.
|
||||
# Matches strings, including multi-line strings, as well as heredocs, with or without
|
||||
# interpolation.
|
||||
stringToken: ->
|
||||
switch quote = @chunk.charAt 0
|
||||
when "'" then [string] = SIMPLESTR.exec(@chunk) || []
|
||||
when '"' then string = @balancedString @chunk, '"'
|
||||
return 0 unless string
|
||||
inner = string[1...-1]
|
||||
trimmed = @removeNewlines inner
|
||||
if quote is '"' and 0 < string.indexOf '#{', 1
|
||||
numBreak = pos = 0
|
||||
innerLen = inner.length
|
||||
numBreak++ while inner.charAt(pos++) is '\n' and pos < innerLen
|
||||
@interpolateString trimmed, strOffset: 1 + numBreak, lexedLength: string.length
|
||||
else
|
||||
@token 'STRING', quote + @escapeLines(trimmed) + quote, 0, string.length
|
||||
if octalEsc = /^(?:\\.|[^\\])*\\(?:0[0-7]|[1-7])/.test string
|
||||
@error "octal escape sequences #{string} are not allowed"
|
||||
string.length
|
||||
[quote] = STRING_START.exec(@chunk) || []
|
||||
return 0 unless quote
|
||||
regex = switch quote
|
||||
when "'" then STRING_SINGLE
|
||||
when '"' then STRING_DOUBLE
|
||||
when "'''" then HEREDOC_SINGLE
|
||||
when '"""' then HEREDOC_DOUBLE
|
||||
heredoc = quote.length is 3
|
||||
|
||||
# Matches heredocs, adjusting indentation to the correct level, as heredocs
|
||||
# preserve whitespace, but ignore indentation to the left.
|
||||
heredocToken: ->
|
||||
return 0 unless match = HEREDOC.exec @chunk
|
||||
heredoc = match[0]
|
||||
quote = heredoc.charAt 0
|
||||
doc = @sanitizeHeredoc match[2], quote: quote, indent: null
|
||||
if quote is '"' and 0 <= doc.indexOf '#{'
|
||||
strOffset = if match[2].charAt(0) is '\n' then 4 else 3
|
||||
@interpolateString doc, heredoc: yes, strOffset: strOffset, lexedLength: heredoc.length
|
||||
start = quote.length
|
||||
{tokens, index: end} = @matchWithInterpolations @chunk[start..], regex, quote, start
|
||||
$ = tokens.length - 1
|
||||
|
||||
if heredoc
|
||||
# Find the smallest indentation. It will be removed from all lines later.
|
||||
indent = null
|
||||
doc = (token[1] for token, i in tokens when token[0] is 'NEOSTRING').join '#{}'
|
||||
while match = HEREDOC_INDENT.exec doc
|
||||
attempt = match[1]
|
||||
indent = attempt if indent is null or 0 < attempt.length < indent.length
|
||||
indentRegex = /// ^#{indent} ///gm if indent
|
||||
@mergeInterpolationTokens tokens, {quote: quote[0], start, end}, (value, i) =>
|
||||
value = @formatString value
|
||||
value = value.replace LEADING_BLANK_LINE, '' if i is 0
|
||||
value = value.replace TRAILING_BLANK_LINE, '' if i is $
|
||||
value = value.replace indentRegex, ''
|
||||
value = value.replace MULTILINER, '\\n'
|
||||
value
|
||||
else
|
||||
@token 'STRING', @makeString(doc, quote, yes), 0, heredoc.length
|
||||
heredoc.length
|
||||
@mergeInterpolationTokens tokens, {quote, start, end}, (value, i) =>
|
||||
value = @formatString value
|
||||
value = value.replace STRING_OMIT, (match, offset) ->
|
||||
if (i is 0 and offset is 0) or
|
||||
(i is $ and offset + match.length is value.length)
|
||||
''
|
||||
else
|
||||
' '
|
||||
value
|
||||
|
||||
end
|
||||
|
||||
# Matches and consumes comments.
|
||||
commentToken: ->
|
||||
return 0 unless match = @chunk.match COMMENT
|
||||
[comment, here] = match
|
||||
if here
|
||||
@token 'HERECOMMENT',
|
||||
(@sanitizeHeredoc here,
|
||||
herecomment: true, indent: repeat ' ', @indent),
|
||||
0, comment.length
|
||||
if match = HERECOMMENT_ILLEGAL.exec comment
|
||||
@error "block comments cannot contain #{match[0]}", match.index
|
||||
if here.indexOf('\n') >= 0
|
||||
here = here.replace /// \n #{repeat ' ', @indent} ///g, '\n'
|
||||
@token 'HERECOMMENT', here, 0, comment.length
|
||||
comment.length
|
||||
|
||||
# Matches JavaScript interpolated directly into the source via backticks.
|
||||
|
@ -237,70 +249,44 @@ exports.Lexer = class Lexer
|
|||
@token 'JS', (script = match[0])[1...-1], 0, script.length
|
||||
script.length
|
||||
|
||||
# Matches regular expression literals. Lexing regular expressions is difficult
|
||||
# to distinguish from division, so we borrow some basic heuristics from
|
||||
# JavaScript and Ruby.
|
||||
# Matches regular expression literals, as well as multiline extended ones.
|
||||
# Lexing regular expressions is difficult to distinguish from division, so we
|
||||
# borrow some basic heuristics from JavaScript and Ruby.
|
||||
regexToken: ->
|
||||
return 0 if @chunk.charAt(0) isnt '/'
|
||||
return length if length = @heregexToken()
|
||||
|
||||
prev = last @tokens
|
||||
return 0 if prev and (prev[0] in (if prev.spaced then NOT_REGEX else NOT_SPACED_REGEX))
|
||||
return 0 unless match = REGEX.exec @chunk
|
||||
[match, regex, flags] = match
|
||||
# Avoid conflicts with floor division operator.
|
||||
return 0 if regex is '//'
|
||||
if regex[..1] is '/*' then @error 'regular expressions cannot begin with `*`'
|
||||
@token 'REGEX', "#{regex}#{flags}", 0, match.length
|
||||
match.length
|
||||
|
||||
# Matches multiline extended regular expressions.
|
||||
heregexToken: ->
|
||||
return 0 unless match = HEREGEX.exec @chunk
|
||||
[heregex, body, flags] = match
|
||||
if 0 > body.indexOf '#{'
|
||||
re = @escapeLines body.replace(HEREGEX_OMIT, '$1$2').replace(/\//g, '\\/'), yes
|
||||
if re.match /^\*/ then @error 'regular expressions cannot begin with `*`'
|
||||
@token 'REGEX', "/#{ re or '(?:)' }/#{flags}", 0, heregex.length
|
||||
return heregex.length
|
||||
@token 'IDENTIFIER', 'RegExp', 0, 0
|
||||
@token 'CALL_START', '(', 0, 0
|
||||
tokens = []
|
||||
for token in @interpolateString(body, regex: yes, strOffset: 3)
|
||||
[tag, value] = token
|
||||
if tag is 'TOKENS'
|
||||
tokens.push value...
|
||||
else if tag is 'NEOSTRING'
|
||||
continue unless value = value.replace HEREGEX_OMIT, '$1$2'
|
||||
# Convert NEOSTRING into STRING
|
||||
value = value.replace /\\/g, '\\\\'
|
||||
token[0] = 'STRING'
|
||||
token[1] = @makeString(value, '"', yes)
|
||||
tokens.push token
|
||||
switch
|
||||
when match = REGEX_ILLEGAL.exec @chunk
|
||||
@error "regular expressions cannot begin with #{match[2]}", match.index + match[1].length
|
||||
when @chunk[...3] is '///'
|
||||
{tokens, index} = @matchWithInterpolations @chunk[3..], HEREGEX, '///', 3
|
||||
when match = REGEX.exec @chunk
|
||||
[regex] = match
|
||||
index = regex.length
|
||||
prev = last @tokens
|
||||
return 0 if prev and (prev[0] in (if prev.spaced then NOT_REGEX else NOT_SPACED_REGEX))
|
||||
else
|
||||
@error "Unexpected #{tag}"
|
||||
return 0
|
||||
|
||||
prev = last @tokens
|
||||
plusToken = ['+', '+']
|
||||
plusToken[2] = prev[2] # Copy location data
|
||||
tokens.push plusToken
|
||||
[flags] = REGEX_FLAGS.exec @chunk[index..]
|
||||
end = index + flags.length
|
||||
switch
|
||||
when not VALID_FLAGS.test flags
|
||||
@error "invalid regular expression flags #{flags}", index
|
||||
when regex
|
||||
@token 'REGEX', "#{regex}#{flags}"
|
||||
when tokens.length is 1
|
||||
re = @formatHeregex(tokens[0][1]).replace(/\//g, '\\/')
|
||||
@token 'REGEX', "/#{ re or '(?:)' }/#{flags}"
|
||||
else
|
||||
@token 'IDENTIFIER', 'RegExp', 0, 0
|
||||
@token 'CALL_START', '(', 0, 0
|
||||
@mergeInterpolationTokens tokens, {quote: '"', start: 3, end}, (value) =>
|
||||
@formatHeregex(value).replace(/\\/g, '\\\\')
|
||||
if flags
|
||||
@token ',', ',', index, 0
|
||||
@token 'STRING', '"' + flags + '"', index, flags.length
|
||||
@token ')', ')', end, 0
|
||||
|
||||
# Remove the extra "+"
|
||||
tokens.pop()
|
||||
|
||||
unless tokens[0]?[0] is 'STRING'
|
||||
@token 'STRING', '""', 0, 0
|
||||
@token '+', '+', 0, 0
|
||||
@tokens.push tokens...
|
||||
|
||||
if flags
|
||||
# Find the flags in the heregex
|
||||
flagsOffset = heregex.lastIndexOf flags
|
||||
@token ',', ',', flagsOffset, 0
|
||||
@token 'STRING', '"' + flags + '"', flagsOffset, flags.length
|
||||
|
||||
@token ')', ')', heregex.length-1, 0
|
||||
heregex.length
|
||||
end
|
||||
|
||||
# Matches newlines, indents, and outdents, and determines which is which.
|
||||
# If we can detect that the current line is continued onto the the next line,
|
||||
|
@ -442,22 +428,6 @@ exports.Lexer = class Lexer
|
|||
# Token Manipulators
|
||||
# ------------------
|
||||
|
||||
# Sanitize a heredoc or herecomment by
|
||||
# erasing all external indentation on the left-hand side.
|
||||
sanitizeHeredoc: (doc, options) ->
|
||||
{indent, herecomment} = options
|
||||
if herecomment
|
||||
if HEREDOC_ILLEGAL.test doc
|
||||
@error "block comment cannot contain \"*/\", starting"
|
||||
return doc if doc.indexOf('\n') < 0
|
||||
else
|
||||
while match = HEREDOC_INDENT.exec doc
|
||||
attempt = match[1]
|
||||
indent = attempt if indent is null or 0 < attempt.length < indent.length
|
||||
doc = doc.replace /// \n #{indent} ///g, '\n' if indent
|
||||
doc = doc.replace /^\n/, '' unless herecomment
|
||||
doc
|
||||
|
||||
# A source of ambiguity in our grammar used to be parameter lists in function
|
||||
# definitions versus argument lists in function calls. Walk backwards, tagging
|
||||
# parameters specially in order to make things easier for the parser.
|
||||
|
@ -483,133 +453,121 @@ exports.Lexer = class Lexer
|
|||
closeIndentation: ->
|
||||
@outdentToken @indent
|
||||
|
||||
# Matches a balanced group such as a single or double-quoted string. Pass in
|
||||
# a series of delimiters, all of which must be nested correctly within the
|
||||
# contents of the string. This method allows us to have strings within
|
||||
# interpolations within strings, ad infinitum.
|
||||
balancedString: (str, end) ->
|
||||
continueCount = 0
|
||||
stack = [end]
|
||||
for i in [1...str.length]
|
||||
if continueCount
|
||||
--continueCount
|
||||
continue
|
||||
switch letter = str.charAt i
|
||||
when '\\'
|
||||
++continueCount
|
||||
continue
|
||||
when end
|
||||
stack.pop()
|
||||
unless stack.length
|
||||
return str[0..i]
|
||||
end = stack[stack.length - 1]
|
||||
continue
|
||||
if end is '}' and letter in ['"', "'"]
|
||||
stack.push end = letter
|
||||
else if end is '}' and letter is '/' and match = (HEREGEX.exec(str[i..]) or REGEX.exec(str[i..]))
|
||||
continueCount += match[0].length - 1
|
||||
else if end is '}' and letter is '{'
|
||||
stack.push end = '}'
|
||||
else if end is '"' and prev is '#' and letter is '{'
|
||||
stack.push end = '}'
|
||||
prev = letter
|
||||
@error "missing #{ stack.pop() }, starting"
|
||||
|
||||
# Expand variables and expressions inside double-quoted strings using
|
||||
# Ruby-like notation for substitution of arbitrary expressions.
|
||||
# Match the contents of a delimited token and expand variables and expressions
|
||||
# inside it using Ruby-like notation for substitution of arbitrary
|
||||
# expressions.
|
||||
#
|
||||
# "Hello #{name.capitalize()}."
|
||||
#
|
||||
# If it encounters an interpolation, this method will recursively create a
|
||||
# new Lexer, tokenize the interpolated contents, and merge them into the
|
||||
# token stream.
|
||||
# If it encounters an interpolation, this method will recursively create a new
|
||||
# Lexer and tokenize until the `{` of `#{` is balanced with a `}`.
|
||||
#
|
||||
# - `str` is the start of the string contents (IE with the " or """ stripped
|
||||
# off.)
|
||||
# - `options.offsetInChunk` is the start of the interpolated string in the
|
||||
# current chunk, including the " or """, etc... If not provided, this is
|
||||
# assumed to be 0. `options.lexedLength` is the length of the
|
||||
# interpolated string, including both the start and end quotes. Both of these
|
||||
# values are ignored if `options.regex` is true.
|
||||
# - `options.strOffset` is the offset of str, relative to the start of the
|
||||
# current chunk.
|
||||
interpolateString: (str, options = {}) ->
|
||||
{heredoc, regex, offsetInChunk, strOffset, lexedLength} = options
|
||||
offsetInChunk ||= 0
|
||||
strOffset ||= 0
|
||||
lexedLength ||= str.length
|
||||
|
||||
# Parse the string.
|
||||
# - `str` is the start of the token contents (with the starting delimiter
|
||||
# stripped off.)
|
||||
# - `regex` matches the contents of a token (but not `end`, and not `#{` if
|
||||
# interpolations are desired).
|
||||
# - `end` is the terminator of the token.
|
||||
# - `offsetInChunk` is the start of the interpolated string in the current
|
||||
# chunk, including the starting delimiter.
|
||||
#
|
||||
# Examples of delimiters are `'`, `"`, `'''`, `"""` and `///`.
|
||||
#
|
||||
# This method allows us to have strings within interpolations within strings,
|
||||
# ad infinitum.
|
||||
matchWithInterpolations: (str, regex, end, offsetInChunk) ->
|
||||
tokens = []
|
||||
pi = 0
|
||||
i = -1
|
||||
while letter = str.charAt i += 1
|
||||
if letter is '\\'
|
||||
i += 1
|
||||
continue
|
||||
unless letter is '#' and str.charAt(i+1) is '{' and
|
||||
(expr = @balancedString str[i + 1..], '}')
|
||||
continue
|
||||
# NEOSTRING is a fake token. This will be converted to a string below.
|
||||
tokens.push @makeToken('NEOSTRING', str[pi...i], strOffset + pi) if pi < i
|
||||
unless errorToken
|
||||
errorToken = @makeToken '', 'string interpolation', offsetInChunk + i + 1, 2
|
||||
inner = expr[1...-1]
|
||||
if inner.length
|
||||
[line, column] = @getLineAndColumnFromChunk(strOffset + i + 2)
|
||||
nested = new Lexer().tokenize inner, line: line, column: column, rewrite: off
|
||||
popped = nested.pop()
|
||||
popped = nested.shift() if nested[0]?[0] is 'TERMINATOR'
|
||||
if len = nested.length
|
||||
if len > 1
|
||||
nested.unshift @makeToken '(', '(', strOffset + i + 1, 0
|
||||
nested.push @makeToken ')', ')', strOffset + i + 1 + inner.length, 0
|
||||
# Push a fake 'TOKENS' token, which will get turned into real tokens below.
|
||||
tokens.push ['TOKENS', nested]
|
||||
i += expr.length
|
||||
pi = i + 1
|
||||
tokens.push @makeToken('NEOSTRING', str[pi..], strOffset + pi) if i > pi < str.length
|
||||
loop
|
||||
[strPart] = regex.exec str
|
||||
|
||||
# If regex, then return now and let the regex code deal with all these fake tokens
|
||||
return tokens if regex
|
||||
# Push a fake 'NEOSTRING' token, which will get turned into a real string later.
|
||||
tokens.push @makeToken 'NEOSTRING', strPart, offsetInChunk
|
||||
|
||||
# If we didn't find any tokens, then just return an empty string.
|
||||
return @token 'STRING', '""', offsetInChunk, lexedLength unless tokens.length
|
||||
str = str[strPart.length..]
|
||||
offsetInChunk += strPart.length
|
||||
|
||||
# If the first token is not a string, add a fake empty string to the beginning.
|
||||
tokens.unshift @makeToken('NEOSTRING', '', offsetInChunk) unless tokens[0][0] is 'NEOSTRING'
|
||||
break unless str[...2] is '#{'
|
||||
|
||||
# The `1`s are to remove the `#` in `#{`.
|
||||
[line, column] = @getLineAndColumnFromChunk offsetInChunk + 1
|
||||
{tokens: nested, index} =
|
||||
new Lexer().tokenize str[1..], line: line, column: column, untilBalanced: on
|
||||
# Skip the trailing `}`.
|
||||
index += 1
|
||||
|
||||
# Remove leading and trailing `{` and `}`.
|
||||
nested.shift()
|
||||
nested.pop()
|
||||
|
||||
# Remove leading 'TERMINATOR' (if any).
|
||||
nested.shift() if nested[0]?[0] is 'TERMINATOR'
|
||||
|
||||
if nested.length > 1
|
||||
nested.unshift @makeToken '(', '(', offsetInChunk + 1, 0
|
||||
nested.push @makeToken ')', ')', offsetInChunk + 1 + index, 0
|
||||
# Push a fake 'TOKENS' token, which will get turned into real tokens later.
|
||||
tokens.push ['TOKENS', nested]
|
||||
|
||||
str = str[index..]
|
||||
offsetInChunk += index
|
||||
|
||||
unless str[...end.length] is end
|
||||
@error "missing #{end}"
|
||||
|
||||
{tokens, index: offsetInChunk + end.length}
|
||||
|
||||
# Merge the array `tokens` of the fake token types 'TOKENS' and 'NEOSTRING'
|
||||
# (as returned by `matchWithInterpolations`) into the token stream. The value
|
||||
# of 'NEOSTRING's are converted using `fn` and turned into strings using
|
||||
# `quote` first. The tokens are wrapped in parentheses if needed, using
|
||||
# `start` and `end` for their location data.
|
||||
mergeInterpolationTokens: (tokens, {quote, start, end}, fn) ->
|
||||
if interpolated = tokens.length > 1
|
||||
@token '(', '(', offsetInChunk, 0, errorToken
|
||||
errorToken = @makeToken '', 'interpolation', start + tokens[0][1].length, 2
|
||||
@token '(', '(', 0, 0, errorToken
|
||||
|
||||
# Push all the tokens
|
||||
firstIndex = @tokens.length
|
||||
for token, i in tokens
|
||||
[tag, value] = token
|
||||
if i
|
||||
switch tag
|
||||
when 'TOKENS'
|
||||
# Optimize out empty interpolations.
|
||||
continue if value.length is 0
|
||||
# Push all the tokens in the fake 'TOKENS' token. These already have
|
||||
# sane location data.
|
||||
locationToken = value[0]
|
||||
tokensToPush = value
|
||||
when 'NEOSTRING'
|
||||
# Convert 'NEOSTRING' into 'STRING'.
|
||||
converted = fn token[1], i
|
||||
# Optimize out empty strings. We ensure that the tokens stream always
|
||||
# starts with a string token, though, to make sure that the result
|
||||
# really is a string.
|
||||
if converted.length is 0
|
||||
if i is 0
|
||||
firstEmptyStringIndex = @tokens.length
|
||||
else
|
||||
continue
|
||||
# However, there is one case where we can optimize away a starting
|
||||
# empty string.
|
||||
if i is 2 and firstEmptyStringIndex?
|
||||
@tokens.splice firstEmptyStringIndex, 2 # Remove empty string and the plus.
|
||||
token[0] = 'STRING'
|
||||
token[1] = @makeString converted, quote
|
||||
locationToken = token
|
||||
tokensToPush = [token]
|
||||
if @tokens.length > firstIndex
|
||||
# Create a 0-length "+" token.
|
||||
plusToken = @token '+', '+' if i
|
||||
locationToken = if tag == 'TOKENS' then value[0] else token
|
||||
plusToken = @token '+', '+'
|
||||
plusToken[2] =
|
||||
first_line: locationToken[2].first_line
|
||||
first_line: locationToken[2].first_line
|
||||
first_column: locationToken[2].first_column
|
||||
last_line: locationToken[2].first_line
|
||||
last_column: locationToken[2].first_column
|
||||
if tag is 'TOKENS'
|
||||
# Push all the tokens in the fake 'TOKENS' token. These already have
|
||||
# sane location data.
|
||||
@tokens.push value...
|
||||
else if tag is 'NEOSTRING'
|
||||
# Convert NEOSTRING into STRING
|
||||
token[0] = 'STRING'
|
||||
token[1] = @makeString value, '"', heredoc
|
||||
@tokens.push token
|
||||
else
|
||||
@error "Unexpected #{tag}"
|
||||
last_line: locationToken[2].first_line
|
||||
last_column: locationToken[2].first_column
|
||||
@tokens.push tokensToPush...
|
||||
|
||||
if interpolated
|
||||
rparen = @makeToken ')', ')', offsetInChunk + lexedLength, 0
|
||||
rparen = @token ')', ')', end, 0
|
||||
rparen.stringEnd = true
|
||||
@tokens.push rparen
|
||||
tokens
|
||||
|
||||
# Pairs up a closing token, ensuring that all listed pairs of tokens are
|
||||
# correctly balanced throughout the course of the token stream.
|
||||
|
@ -694,29 +652,24 @@ exports.Lexer = class Lexer
|
|||
@tag() in ['\\', '.', '?.', '?::', 'UNARY', 'MATH', 'UNARY_MATH', '+', '-', 'YIELD',
|
||||
'**', 'SHIFT', 'RELATION', 'COMPARE', 'LOGIC', 'THROW', 'EXTENDS']
|
||||
|
||||
# Remove newlines from beginning and (non escaped) from end of string literals.
|
||||
removeNewlines: (str) ->
|
||||
str.replace(/^\s*\n\s*/, '')
|
||||
.replace(/([^\\]|\\\\)\s*\n\s*$/, '$1')
|
||||
|
||||
# Converts newlines for string literals.
|
||||
escapeLines: (str, heredoc) ->
|
||||
# Ignore escaped backslashes and remove escaped newlines
|
||||
str = str.replace /\\[^\S\n]*(\n|\\)\s*/g, (escaped, character) ->
|
||||
formatString: (str) ->
|
||||
# Ignore escaped backslashes and remove escaped newlines.
|
||||
str.replace /\\[^\S\n]*(\n|\\)\s*/g, (escaped, character) ->
|
||||
if character is '\n' then '' else escaped
|
||||
if heredoc
|
||||
str.replace MULTILINER, '\\n'
|
||||
else
|
||||
str.replace /\s*\n\s*/g, ' '
|
||||
|
||||
# Constructs a string token by escaping quotes and newlines.
|
||||
makeString: (body, quote, heredoc) ->
|
||||
formatHeregex: (str) ->
|
||||
str.replace(HEREGEX_OMIT, '$1$2').replace(MULTILINER, '\\n')
|
||||
|
||||
# Constructs a string token by escaping quotes.
|
||||
makeString: (body, quote) ->
|
||||
return quote + quote unless body
|
||||
# Ignore escaped backslashes and unescape quotes
|
||||
# Ignore escaped backslashes and unescape quotes.
|
||||
body = body.replace /// \\( #{quote} | \\ ) ///g, (match, contents) ->
|
||||
if contents is quote then contents else match
|
||||
body = body.replace /// #{quote} ///g, '\\$&'
|
||||
quote + @escapeLines(body, heredoc) + quote
|
||||
if match = OCTAL_ESCAPE.exec body
|
||||
@error "octal escape sequences are not allowed #{match[2]}", match.index + match[1].length + 1
|
||||
quote + body + quote
|
||||
|
||||
# Throws a compiler error on the current position.
|
||||
error: (message, offset = 0) ->
|
||||
|
@ -789,8 +742,6 @@ NUMBER = ///
|
|||
^ \d*\.?\d+ (?:e[+-]?\d+)? # decimal
|
||||
///i
|
||||
|
||||
HEREDOC = /// ^ ("""|''') ((?: \\[\s\S] | [^\\] )*?) (?:\n[^\n\S]*)? \1 ///
|
||||
|
||||
OPERATOR = /// ^ (
|
||||
?: [-=]> # function
|
||||
| [-+*/%<>&|^!?=]= # compound assign / compare
|
||||
|
@ -809,26 +760,34 @@ CODE = /^[-=]>/
|
|||
|
||||
MULTI_DENT = /^(?:\n[^\n\S]*)+/
|
||||
|
||||
SIMPLESTR = /^'[^\\']*(?:\\[\s\S][^\\']*)*'/
|
||||
|
||||
JSTOKEN = /^`[^\\`]*(?:\\.[^\\`]*)*`/
|
||||
|
||||
# String-matching-regexes.
|
||||
STRING_START = /^(?:'''|"""|'|")/
|
||||
|
||||
STRING_SINGLE = /// ^(?: [^\\'] | \\[\s\S] )* ///
|
||||
STRING_DOUBLE = /// ^(?: [^\\"#] | \\[\s\S] | \#(?!\{) )* ///
|
||||
HEREDOC_SINGLE = /// ^(?: [^\\'] | \\[\s\S] | '(?!'') )* ///
|
||||
HEREDOC_DOUBLE = /// ^(?: [^\\"#] | \\[\s\S] | "(?!"") | \#(?!\{) )* ///
|
||||
|
||||
STRING_OMIT = /\s*\n\s*/g
|
||||
HEREDOC_INDENT = /\n+([^\n\S]*)(?=\S)/g
|
||||
|
||||
# Regex-matching-regexes.
|
||||
REGEX = /// ^
|
||||
(/ (?! [\s=] ) # disallow leading whitespace or equals signs
|
||||
[^ [ / \n \\ ]* # every other thing
|
||||
(?:
|
||||
(?: \\[\s\S] # anything escaped
|
||||
| \[ # character class
|
||||
[^ \] \n \\ ]*
|
||||
(?: \\[\s\S] [^ \] \n \\ ]* )*
|
||||
]
|
||||
) [^ [ / \n \\ ]*
|
||||
)*
|
||||
/) ([imgy]{0,4}) (?!\w)
|
||||
/ (?! [\s=] ) ( # disallow leading whitespace or equals sign
|
||||
?: [^ [ / \n \\ ] # every other thing
|
||||
| \\. # anything (but newlines) escaped
|
||||
| \[ # character class
|
||||
(?: \\. | [^ \] \n \\ ] )*
|
||||
]
|
||||
)+ /
|
||||
///
|
||||
|
||||
HEREGEX = /// ^ /{3} ((?:\\?[\s\S])+?) /{3} ([imgy]{0,4}) (?!\w) ///
|
||||
REGEX_FLAGS = /^\w*/
|
||||
VALID_FLAGS = /^(?!.*(.).*\1)[imgy]*$/
|
||||
|
||||
HEREGEX = /// ^(?: [^\\/#] | \\[\s\S] | /(?!//) | \#(?!\{) )* ///
|
||||
|
||||
HEREGEX_OMIT = ///
|
||||
((?:\\\\)+) # consume (and preserve) an even number of backslashes
|
||||
|
@ -836,16 +795,21 @@ HEREGEX_OMIT = ///
|
|||
| \s+(?:#.*)? # remove whitespace and comments
|
||||
///g
|
||||
|
||||
# Token cleaning regexes.
|
||||
MULTILINER = /\n/g
|
||||
REGEX_ILLEGAL = /// ^ ( / | /{3}\s*) (\*) ///
|
||||
|
||||
HEREDOC_INDENT = /\n+([^\n\S]*)/g
|
||||
# Other regexes.
|
||||
MULTILINER = /\n/g
|
||||
|
||||
HEREDOC_ILLEGAL = /\*\//
|
||||
HERECOMMENT_ILLEGAL = /\*\//
|
||||
|
||||
LINE_CONTINUER = /// ^ \s* (?: , | \??\.(?![.\d]) | :: ) ///
|
||||
LINE_CONTINUER = /// ^ \s* (?: , | \??\.(?![.\d]) | :: ) ///
|
||||
|
||||
TRAILING_SPACES = /\s+$/
|
||||
OCTAL_ESCAPE = /// ^ ((?: \\. | [^\\] )*) (\\ (?: 0[0-7] | [1-7] )) ///
|
||||
|
||||
LEADING_BLANK_LINE = /^[^\n\S]*\n/
|
||||
TRAILING_BLANK_LINE = /\n[^\n\S]*$/
|
||||
|
||||
TRAILING_SPACES = /\s+$/
|
||||
|
||||
# Compound assignment tokens.
|
||||
COMPOUND_ASSIGN = [
|
||||
|
|
|
@ -89,7 +89,7 @@ if require?
|
|||
test "#1096: unexpected generated tokens", ->
|
||||
# Unexpected interpolation
|
||||
assertErrorFormat '{"#{key}": val}', '''
|
||||
[stdin]:1:3: error: unexpected string interpolation
|
||||
[stdin]:1:3: error: unexpected interpolation
|
||||
{"#{key}": val}
|
||||
^^
|
||||
'''
|
||||
|
@ -139,3 +139,224 @@ test "explicit indentation errors", ->
|
|||
c
|
||||
^^
|
||||
'''
|
||||
|
||||
test "unclosed strings", ->
|
||||
assertErrorFormat '''
|
||||
'
|
||||
''', '''
|
||||
[stdin]:1:1: error: missing '
|
||||
'
|
||||
^
|
||||
'''
|
||||
assertErrorFormat '''
|
||||
"
|
||||
''', '''
|
||||
[stdin]:1:1: error: missing "
|
||||
"
|
||||
^
|
||||
'''
|
||||
assertErrorFormat """
|
||||
'''
|
||||
""", """
|
||||
[stdin]:1:1: error: missing '''
|
||||
'''
|
||||
^
|
||||
"""
|
||||
assertErrorFormat '''
|
||||
"""
|
||||
''', '''
|
||||
[stdin]:1:1: error: missing """
|
||||
"""
|
||||
^
|
||||
'''
|
||||
assertErrorFormat '''
|
||||
"#{"
|
||||
''', '''
|
||||
[stdin]:1:4: error: missing "
|
||||
"#{"
|
||||
^
|
||||
'''
|
||||
assertErrorFormat '''
|
||||
"""#{"
|
||||
''', '''
|
||||
[stdin]:1:6: error: missing "
|
||||
"""#{"
|
||||
^
|
||||
'''
|
||||
assertErrorFormat '''
|
||||
"#{"""
|
||||
''', '''
|
||||
[stdin]:1:4: error: missing """
|
||||
"#{"""
|
||||
^
|
||||
'''
|
||||
assertErrorFormat '''
|
||||
"""#{"""
|
||||
''', '''
|
||||
[stdin]:1:6: error: missing """
|
||||
"""#{"""
|
||||
^
|
||||
'''
|
||||
assertErrorFormat '''
|
||||
///#{"""
|
||||
''', '''
|
||||
[stdin]:1:6: error: missing """
|
||||
///#{"""
|
||||
^
|
||||
'''
|
||||
assertErrorFormat '''
|
||||
"a
|
||||
#{foo """
|
||||
bar
|
||||
#{ +'12 }
|
||||
baz
|
||||
"""} b"
|
||||
''', '''
|
||||
[stdin]:4:11: error: missing '
|
||||
#{ +'12 }
|
||||
^
|
||||
'''
|
||||
# https://github.com/jashkenas/coffeescript/issues/3301#issuecomment-31735168
|
||||
assertErrorFormat '''
|
||||
# Note the double escaping; this would be `"""a\"""` real code.
|
||||
"""a\\"""
|
||||
''', '''
|
||||
[stdin]:2:1: error: missing """
|
||||
"""a\\"""
|
||||
^
|
||||
'''
|
||||
|
||||
test "unclosed heregexes", ->
|
||||
assertErrorFormat '''
|
||||
///
|
||||
''', '''
|
||||
[stdin]:1:1: error: missing ///
|
||||
///
|
||||
^
|
||||
'''
|
||||
# https://github.com/jashkenas/coffeescript/issues/3301#issuecomment-31735168
|
||||
assertErrorFormat '''
|
||||
# Note the double escaping; this would be `///a\///` real code.
|
||||
///a\\///
|
||||
''', '''
|
||||
[stdin]:2:1: error: missing ///
|
||||
///a\\///
|
||||
^
|
||||
'''
|
||||
|
||||
test "unexpected token after string", ->
|
||||
# Parsing error.
|
||||
assertErrorFormat '''
|
||||
'foo'bar
|
||||
''', '''
|
||||
[stdin]:1:6: error: unexpected bar
|
||||
'foo'bar
|
||||
^^^
|
||||
'''
|
||||
assertErrorFormat '''
|
||||
"foo"bar
|
||||
''', '''
|
||||
[stdin]:1:6: error: unexpected bar
|
||||
"foo"bar
|
||||
^^^
|
||||
'''
|
||||
# Lexing error.
|
||||
assertErrorFormat '''
|
||||
'foo'bar'
|
||||
''', '''
|
||||
[stdin]:1:9: error: missing '
|
||||
'foo'bar'
|
||||
^
|
||||
'''
|
||||
assertErrorFormat '''
|
||||
"foo"bar"
|
||||
''', '''
|
||||
[stdin]:1:9: error: missing "
|
||||
"foo"bar"
|
||||
^
|
||||
'''
|
||||
|
||||
test "#3348: Location data is wrong in interpolations with leading whitespace", ->
|
||||
assertErrorFormat '''
|
||||
"#{ {"#{key}": val} }"
|
||||
''', '''
|
||||
[stdin]:1:7: error: unexpected interpolation
|
||||
"#{ {"#{key}": val} }"
|
||||
^^
|
||||
'''
|
||||
|
||||
test "octal escapes", ->
|
||||
assertErrorFormat '''
|
||||
"a\\0\\tb\\\\\\07c"
|
||||
''', '''
|
||||
[stdin]:1:10: error: octal escape sequences are not allowed \\07
|
||||
"a\\0\\tb\\\\\\07c"
|
||||
\ \ \ \ ^
|
||||
'''
|
||||
|
||||
test "illegal herecomment", ->
|
||||
assertErrorFormat '''
|
||||
###
|
||||
Regex: /a*/g
|
||||
###
|
||||
''', '''
|
||||
[stdin]:2:12: error: block comments cannot contain */
|
||||
Regex: /a*/g
|
||||
^
|
||||
'''
|
||||
|
||||
test "#1724: regular expressions beginning with *", ->
|
||||
assertErrorFormat '''
|
||||
/* foo/
|
||||
''', '''
|
||||
[stdin]:1:2: error: regular expressions cannot begin with *
|
||||
/* foo/
|
||||
^
|
||||
'''
|
||||
assertErrorFormat '''
|
||||
///
|
||||
* foo
|
||||
///
|
||||
''', '''
|
||||
[stdin]:2:3: error: regular expressions cannot begin with *
|
||||
* foo
|
||||
^
|
||||
'''
|
||||
|
||||
test "invalid regex flags", ->
|
||||
assertErrorFormat '''
|
||||
/a/ii
|
||||
''', '''
|
||||
[stdin]:1:4: error: invalid regular expression flags ii
|
||||
/a/ii
|
||||
^
|
||||
'''
|
||||
assertErrorFormat '''
|
||||
/a/G
|
||||
''', '''
|
||||
[stdin]:1:4: error: invalid regular expression flags G
|
||||
/a/G
|
||||
^
|
||||
'''
|
||||
assertErrorFormat '''
|
||||
/a/gimi
|
||||
''', '''
|
||||
[stdin]:1:4: error: invalid regular expression flags gimi
|
||||
/a/gimi
|
||||
^
|
||||
'''
|
||||
assertErrorFormat '''
|
||||
/a/g_
|
||||
''', '''
|
||||
[stdin]:1:4: error: invalid regular expression flags g_
|
||||
/a/g_
|
||||
^
|
||||
'''
|
||||
assertErrorFormat '''
|
||||
///a///ii
|
||||
''', '''
|
||||
[stdin]:1:8: error: invalid regular expression flags ii
|
||||
///a///ii
|
||||
^
|
||||
'''
|
||||
doesNotThrow -> CoffeeScript.compile '/a/ymgi'
|
||||
|
|
|
@ -34,6 +34,17 @@ eq "#{6/2}
|
|||
eq "#{/// "'/'"/" ///}", '/"\'\\/\'"\\/"/' # heregex, stuffed with spicy characters
|
||||
eq "#{/\\'/}", "/\\\\'/"
|
||||
|
||||
# Issue #2321: Regex/division conflict in interpolation
|
||||
eq "#{4/2}/", '2/'
|
||||
curWidth = 4
|
||||
eq "<i style='left:#{ curWidth/2 }%;'></i>", "<i style='left:2%;'></i>"
|
||||
throws -> CoffeeScript.compile '''
|
||||
"<i style='left:#{ curWidth /2 }%;'></i>"'''
|
||||
# valid regex--^^^^^^^^^^^ ^--unclosed string
|
||||
eq "<i style='left:#{ curWidth/2 }%;'></i>", "<i style='left:2%;'></i>"
|
||||
eq "<i style='left:#{ curWidth/ 2 }%;'></i>", "<i style='left:2%;'></i>"
|
||||
eq "<i style='left:#{ curWidth / 2 }%;'></i>", "<i style='left:2%;'></i>"
|
||||
|
||||
hello = 'Hello'
|
||||
world = 'World'
|
||||
ok '#{hello} #{world}!' is '#{hello} #{world}!'
|
||||
|
@ -42,6 +53,10 @@ ok "[#{hello}#{world}]" is '[HelloWorld]'
|
|||
ok "#{hello}##{world}" is 'Hello#World'
|
||||
ok "Hello #{ 1 + 2 } World" is 'Hello 3 World'
|
||||
ok "#{hello} #{ 1 + 2 } #{world}" is "Hello 3 World"
|
||||
ok 1 + "#{2}px" is '12px'
|
||||
ok isNaN "a#{2}" * 2
|
||||
ok "#{2}" is '2'
|
||||
ok "#{2}#{2}" is '22'
|
||||
|
||||
[s, t, r, i, n, g] = ['s', 't', 'r', 'i', 'n', 'g']
|
||||
ok "#{s}#{t}#{r}#{i}#{n}#{g}" is 'string'
|
||||
|
|
|
@ -83,8 +83,8 @@ test 'Verify locations in string interpolation (in "string")', ->
|
|||
test 'Verify locations in string interpolation (in "string", multiple interpolation)', ->
|
||||
tokens = CoffeeScript.tokens '"#{a}b#{c}"'
|
||||
|
||||
eq tokens.length, 10
|
||||
[{}, {}, {}, a, {}, b, {}, c] = tokens
|
||||
eq tokens.length, 8
|
||||
[{}, a, {}, b, {}, c] = tokens
|
||||
|
||||
eq a[2].first_line, 0
|
||||
eq a[2].first_column, 3
|
||||
|
@ -104,8 +104,8 @@ test 'Verify locations in string interpolation (in "string", multiple interpolat
|
|||
test 'Verify locations in string interpolation (in "string", multiple interpolation and line breaks)', ->
|
||||
tokens = CoffeeScript.tokens '"#{a}\nb\n#{c}"'
|
||||
|
||||
eq tokens.length, 10
|
||||
[{}, {}, {}, a, {}, b, {}, c] = tokens
|
||||
eq tokens.length, 8
|
||||
[{}, a, {}, b, {}, c] = tokens
|
||||
|
||||
eq a[2].first_line, 0
|
||||
eq a[2].first_column, 3
|
||||
|
@ -125,8 +125,8 @@ test 'Verify locations in string interpolation (in "string", multiple interpolat
|
|||
test 'Verify locations in string interpolation (in "string", multiple interpolation and starting with line breaks)', ->
|
||||
tokens = CoffeeScript.tokens '"\n#{a}\nb\n#{c}"'
|
||||
|
||||
eq tokens.length, 10
|
||||
[{}, {}, {}, a, {}, b, {}, c] = tokens
|
||||
eq tokens.length, 8
|
||||
[{}, a, {}, b, {}, c] = tokens
|
||||
|
||||
eq a[2].first_line, 1
|
||||
eq a[2].first_column, 2
|
||||
|
@ -146,8 +146,8 @@ test 'Verify locations in string interpolation (in "string", multiple interpolat
|
|||
test 'Verify locations in string interpolation (in "string", multiple interpolation and starting with line breaks)', ->
|
||||
tokens = CoffeeScript.tokens '"\n\n#{a}\n\nb\n\n#{c}"'
|
||||
|
||||
eq tokens.length, 10
|
||||
[{}, {}, {}, a, {}, b, {}, c] = tokens
|
||||
eq tokens.length, 8
|
||||
[{}, a, {}, b, {}, c] = tokens
|
||||
|
||||
eq a[2].first_line, 2
|
||||
eq a[2].first_column, 2
|
||||
|
@ -167,8 +167,8 @@ test 'Verify locations in string interpolation (in "string", multiple interpolat
|
|||
test 'Verify locations in string interpolation (in "string", multiple interpolation and starting with line breaks)', ->
|
||||
tokens = CoffeeScript.tokens '"\n\n\n#{a}\n\n\nb\n\n\n#{c}"'
|
||||
|
||||
eq tokens.length, 10
|
||||
[{}, {}, {}, a, {}, b, {}, c] = tokens
|
||||
eq tokens.length, 8
|
||||
[{}, a, {}, b, {}, c] = tokens
|
||||
|
||||
eq a[2].first_line, 3
|
||||
eq a[2].first_column, 2
|
||||
|
@ -209,13 +209,8 @@ test 'Verify locations in string interpolation (in """string""", line breaks)',
|
|||
test 'Verify locations in string interpolation (in """string""", starting with a line break)', ->
|
||||
tokens = CoffeeScript.tokens '"""\n#{b}\nc"""'
|
||||
|
||||
eq tokens.length, 8
|
||||
[{}, a, {}, b, {}, c] = tokens
|
||||
|
||||
eq a[2].first_line, 0
|
||||
eq a[2].first_column, 0
|
||||
eq a[2].last_line, 0
|
||||
eq a[2].last_column, 0
|
||||
eq tokens.length, 6
|
||||
[{}, b, {}, c] = tokens
|
||||
|
||||
eq b[2].first_line, 1
|
||||
eq b[2].first_column, 2
|
||||
|
@ -233,8 +228,8 @@ test 'Verify locations in string interpolation (in """string""", starting with l
|
|||
eq tokens.length, 8
|
||||
[{}, a, {}, b, {}, c] = tokens
|
||||
|
||||
eq a[2].first_line, 1
|
||||
eq a[2].first_column, 0
|
||||
eq a[2].first_line, 0
|
||||
eq a[2].first_column, 3
|
||||
eq a[2].last_line, 1
|
||||
eq a[2].last_column, 0
|
||||
|
||||
|
@ -251,8 +246,8 @@ test 'Verify locations in string interpolation (in """string""", starting with l
|
|||
test 'Verify locations in string interpolation (in """string""", multiple interpolation)', ->
|
||||
tokens = CoffeeScript.tokens '"""#{a}\nb\n#{c}"""'
|
||||
|
||||
eq tokens.length, 10
|
||||
[{}, {}, {}, a, {}, b, {}, c] = tokens
|
||||
eq tokens.length, 8
|
||||
[{}, a, {}, b, {}, c] = tokens
|
||||
|
||||
eq a[2].first_line, 0
|
||||
eq a[2].first_column, 5
|
||||
|
@ -315,7 +310,7 @@ test 'Verify locations in heregex interpolation (in ///regex///, multiple interp
|
|||
tokens = CoffeeScript.tokens '///#{a}b#{c}///'
|
||||
|
||||
eq tokens.length, 11
|
||||
[{}, {}, {}, {}, a, {}, b, {}, c] = tokens
|
||||
[{}, {}, {}, a, {}, b, {}, c] = tokens
|
||||
|
||||
eq a[2].first_line, 0
|
||||
eq a[2].first_column, 5
|
||||
|
@ -335,8 +330,8 @@ test 'Verify locations in heregex interpolation (in ///regex///, multiple interp
|
|||
test 'Verify locations in heregex interpolation (in ///regex///, multiple interpolation)', ->
|
||||
tokens = CoffeeScript.tokens '///a#{b}c///'
|
||||
|
||||
eq tokens.length, 9
|
||||
[{}, {}, a, {}, b, {}, c] = tokens
|
||||
eq tokens.length, 11
|
||||
[{}, {}, {}, a, {}, b, {}, c] = tokens
|
||||
|
||||
eq a[2].first_line, 0
|
||||
eq a[2].first_column, 3
|
||||
|
@ -357,7 +352,7 @@ test 'Verify locations in heregex interpolation (in ///regex///, multiple interp
|
|||
tokens = CoffeeScript.tokens '///#{a}\nb\n#{c}///'
|
||||
|
||||
eq tokens.length, 11
|
||||
[{}, {}, {}, {}, a, {}, b, {}, c] = tokens
|
||||
[{}, {}, {}, a, {}, b, {}, c] = tokens
|
||||
|
||||
eq a[2].first_line, 0
|
||||
eq a[2].first_column, 5
|
||||
|
@ -378,7 +373,7 @@ test 'Verify locations in heregex interpolation (in ///regex///, multiple interp
|
|||
tokens = CoffeeScript.tokens '///#{a}\n\n\nb\n\n\n#{c}///'
|
||||
|
||||
eq tokens.length, 11
|
||||
[{}, {}, {}, {}, a, {}, b, {}, c] = tokens
|
||||
[{}, {}, {}, a, {}, b, {}, c] = tokens
|
||||
|
||||
eq a[2].first_line, 0
|
||||
eq a[2].first_column, 5
|
||||
|
@ -398,8 +393,8 @@ test 'Verify locations in heregex interpolation (in ///regex///, multiple interp
|
|||
test 'Verify locations in heregex interpolation (in ///regex///, multiple interpolation and line breaks)', ->
|
||||
tokens = CoffeeScript.tokens '///a\n\n\n#{b}\n\n\nc///'
|
||||
|
||||
eq tokens.length, 9
|
||||
[{}, {}, a, {}, b, {}, c] = tokens
|
||||
eq tokens.length, 11
|
||||
[{}, {}, {}, a, {}, b, {}, c] = tokens
|
||||
|
||||
eq a[2].first_line, 0
|
||||
eq a[2].first_column, 3
|
||||
|
@ -416,11 +411,11 @@ test 'Verify locations in heregex interpolation (in ///regex///, multiple interp
|
|||
eq c[2].last_line, 6
|
||||
eq c[2].last_column, 0
|
||||
|
||||
test 'Verify locations in heregex interpolation (in ///regex///, multiple interpolation and line breaks and stating with linebreak)', ->
|
||||
test 'Verify locations in heregex interpolation (in ///regex///, multiple interpolation and line breaks and starting with linebreak)', ->
|
||||
tokens = CoffeeScript.tokens '///\n#{a}\nb\n#{c}///'
|
||||
|
||||
eq tokens.length, 11
|
||||
[{}, {}, {}, {}, a, {}, b, {}, c] = tokens
|
||||
[{}, {}, {}, a, {}, b, {}, c] = tokens
|
||||
|
||||
eq a[2].first_line, 1
|
||||
eq a[2].first_column, 2
|
||||
|
@ -437,11 +432,11 @@ test 'Verify locations in heregex interpolation (in ///regex///, multiple interp
|
|||
eq c[2].last_line, 3
|
||||
eq c[2].last_column, 2
|
||||
|
||||
test 'Verify locations in heregex interpolation (in ///regex///, multiple interpolation and line breaks and stating with linebreak)', ->
|
||||
test 'Verify locations in heregex interpolation (in ///regex///, multiple interpolation and line breaks and starting with linebreak)', ->
|
||||
tokens = CoffeeScript.tokens '///\n\n\n#{a}\n\n\nb\n\n\n#{c}///'
|
||||
|
||||
eq tokens.length, 11
|
||||
[{}, {}, {}, {}, a, {}, b, {}, c] = tokens
|
||||
[{}, {}, {}, a, {}, b, {}, c] = tokens
|
||||
|
||||
eq a[2].first_line, 3
|
||||
eq a[2].first_column, 2
|
||||
|
@ -458,11 +453,11 @@ test 'Verify locations in heregex interpolation (in ///regex///, multiple interp
|
|||
eq c[2].last_line, 9
|
||||
eq c[2].last_column, 2
|
||||
|
||||
test 'Verify locations in heregex interpolation (in ///regex///, multiple interpolation and line breaks and stating with linebreak)', ->
|
||||
test 'Verify locations in heregex interpolation (in ///regex///, multiple interpolation and line breaks and starting with linebreak)', ->
|
||||
tokens = CoffeeScript.tokens '///\n\n\na\n\n\n#{b}\n\n\nc///'
|
||||
|
||||
eq tokens.length, 9
|
||||
[{}, {}, a, {}, b, {}, c] = tokens
|
||||
eq tokens.length, 11
|
||||
[{}, {}, {}, a, {}, b, {}, c] = tokens
|
||||
|
||||
eq a[2].first_line, 0
|
||||
eq a[2].first_column, 3
|
||||
|
@ -479,6 +474,19 @@ test 'Verify locations in heregex interpolation (in ///regex///, multiple interp
|
|||
eq c[2].last_line, 9
|
||||
eq c[2].last_column, 0
|
||||
|
||||
test "#3621: Multiline regex and manual `Regex` call with interpolation should
|
||||
result in the same tokens", ->
|
||||
tokensA = CoffeeScript.tokens 'RegExp(".*#{a}[0-9]")'
|
||||
tokensB = CoffeeScript.tokens '///.*#{a}[0-9]///'
|
||||
eq tokensA.length, tokensB.length
|
||||
for i in [0...tokensA.length] by 1
|
||||
tokenA = tokensA[i]
|
||||
tokenB = tokensB[i]
|
||||
eq tokenA[0], tokenB[0]
|
||||
eq tokenA[1], tokenB[1]
|
||||
eq tokenA.origin?[1], tokenB.origin?[1]
|
||||
eq tokenA.stringEnd, tokenB.stringEnd
|
||||
|
||||
test "Verify all tokens get a location", ->
|
||||
doesNotThrow ->
|
||||
tokens = CoffeeScript.tokens testScript
|
||||
|
|
|
@ -38,8 +38,8 @@ test "#764: regular expressions should be indexable", ->
|
|||
test "#584: slashes are allowed unescaped in character classes", ->
|
||||
ok /^a\/[/]b$/.test 'a//b'
|
||||
|
||||
test "#1724: regular expressions beginning with `*`", ->
|
||||
throws -> CoffeeScript.compile '/*/'
|
||||
test "does not allow to escape newlines", ->
|
||||
throws -> CoffeeScript.compile '/a\\\nb/'
|
||||
|
||||
|
||||
# Heregexe(n|s)
|
||||
|
@ -52,6 +52,14 @@ test "a heregex will ignore whitespace and comments", ->
|
|||
|
||||
test "an empty heregex will compile to an empty, non-capturing group", ->
|
||||
eq /(?:)/ + '', /// /// + ''
|
||||
eq /(?:)/ + '', ////// + ''
|
||||
|
||||
test "heregex starting with slashes", ->
|
||||
ok /////a/\////.test ' //a// '
|
||||
|
||||
test '#2388: `///` in heregex interpolations', ->
|
||||
ok ///a#{///b///}c///.test ' /a/b/c/ '
|
||||
ws = ' \t'
|
||||
scan = (regex) -> regex.exec('\t foo')[0]
|
||||
eq '/\t /', /// #{scan /// [#{ws}]* ///} /// + ''
|
||||
|
||||
test "#1724: regular expressions beginning with `*`", ->
|
||||
throws -> CoffeeScript.compile '/// * ///'
|
||||
|
|
|
@ -55,6 +55,17 @@ test "octal escape sequences prohibited", ->
|
|||
strictOk "`'\\1'`"
|
||||
eq "\\" + "1", `"\\1"`
|
||||
|
||||
# Also test other string types.
|
||||
strict "'\\\\\\1'"
|
||||
eq "\x008", '\08'
|
||||
eq "\\\\" + "1", '\\\\1'
|
||||
strict "'''\\\\\\1'''"
|
||||
eq "\x008", '''\08'''
|
||||
eq "\\\\" + "1", '''\\\\1'''
|
||||
strict '"""\\\\\\1"""'
|
||||
eq "\x008", """\08"""
|
||||
eq "\\\\" + "1", """\\\\1"""
|
||||
|
||||
test "duplicate formal parameters are prohibited", ->
|
||||
nonce = {}
|
||||
# a Param can be an Identifier, ThisProperty( @-param ), Array, or Object
|
||||
|
|
|
@ -114,6 +114,22 @@ test "#3229, multiline strings", ->
|
|||
eq 'first line\
|
||||
\ backslash at BOL', 'first line\ backslash at BOL'
|
||||
|
||||
# Backslashes at end of strings.
|
||||
eq 'first line \ ', 'first line '
|
||||
eq 'first line
|
||||
second line \
|
||||
', 'first line second line '
|
||||
eq 'first line
|
||||
second line
|
||||
\
|
||||
', 'first line second line'
|
||||
eq 'first line
|
||||
second line
|
||||
|
||||
\
|
||||
|
||||
', 'first line second line'
|
||||
|
||||
# Edge case.
|
||||
eq 'lone
|
||||
|
||||
|
@ -164,12 +180,6 @@ test "#3249, escape newlines in heredocs with backslashes", ->
|
|||
|
||||
""", '\n1 2\n'
|
||||
|
||||
# TODO: uncomment when #2388 is fixed
|
||||
# eq """a heredoc #{
|
||||
# "inside \
|
||||
# interpolation"
|
||||
# }""", "a heredoc inside interpolation"
|
||||
|
||||
# Handle escaped backslashes correctly.
|
||||
eq '''
|
||||
escaped backslash at EOL\\
|
||||
|
@ -185,6 +195,25 @@ test "#3249, escape newlines in heredocs with backslashes", ->
|
|||
eq """first line\
|
||||
\ backslash at BOL""", 'first line\ backslash at BOL'
|
||||
|
||||
# Backslashes at end of strings.
|
||||
eq '''first line \ ''', 'first line '
|
||||
eq '''
|
||||
first line
|
||||
second line \
|
||||
''', 'first line\nsecond line '
|
||||
eq '''
|
||||
first line
|
||||
second line
|
||||
\
|
||||
''', 'first line\nsecond line'
|
||||
eq '''
|
||||
first line
|
||||
second line
|
||||
|
||||
\
|
||||
|
||||
''', 'first line\nsecond line\n'
|
||||
|
||||
# Edge cases.
|
||||
eq '''lone
|
||||
|
||||
|
@ -196,6 +225,28 @@ test "#3249, escape newlines in heredocs with backslashes", ->
|
|||
eq '''\
|
||||
''', ''
|
||||
|
||||
test '#2388: `"""` in heredoc interpolations', ->
|
||||
eq """a heredoc #{
|
||||
"inside \
|
||||
interpolation"
|
||||
}""", "a heredoc inside interpolation"
|
||||
eq """a#{"""b"""}c""", 'abc'
|
||||
eq """#{""""""}""", ''
|
||||
|
||||
test "trailing whitespace", ->
|
||||
testTrailing = (str, expected) ->
|
||||
eq CoffeeScript.eval(str.replace /\|$/gm, ''), expected
|
||||
testTrailing '''" |
|
||||
|
|
||||
a |
|
||||
|
|
||||
"''', 'a'
|
||||
testTrailing """''' |
|
||||
|
|
||||
a |
|
||||
|
|
||||
'''""", ' \na \n '
|
||||
|
||||
#647
|
||||
eq "''Hello, World\\''", '''
|
||||
'\'Hello, World\\\''
|
||||
|
@ -259,6 +310,12 @@ ok a is 'more"than"one"quote'
|
|||
a = '''here's an apostrophe'''
|
||||
ok a is "here's an apostrophe"
|
||||
|
||||
a = """""surrounded by two quotes"\""""
|
||||
ok a is '""surrounded by two quotes""'
|
||||
|
||||
a = '''''surrounded by two apostrophes'\''''
|
||||
ok a is "''surrounded by two apostrophes''"
|
||||
|
||||
# The indentation detector ignores blank lines without trailing whitespace
|
||||
a = """
|
||||
one
|
||||
|
@ -272,6 +329,14 @@ eq ''' line 0
|
|||
to the indent level
|
||||
''', ' line 0\nshould not be relevant\n to the indent level'
|
||||
|
||||
eq """
|
||||
interpolation #{
|
||||
"contents"
|
||||
}
|
||||
should not be relevant
|
||||
to the indent level
|
||||
""", 'interpolation contents\nshould not be relevant\n to the indent level'
|
||||
|
||||
eq ''' '\\\' ''', " '\\' "
|
||||
eq """ "\\\" """, ' "\\" '
|
||||
|
||||
|
|
Loading…
Reference in New Issue