Added a small part to the rewriter to allow a better two-function call.
This commit is contained in:
parent
adbcd320b2
commit
3b264c9572
68
lib/lexer.js
68
lib/lexer.js
|
@ -1,5 +1,5 @@
|
|||
(function(){
|
||||
var ACCESSORS, ASSIGNMENT, CALLABLE, CODE, COFFEE_ALIASES, COFFEE_KEYWORDS, COMMENT, COMMENT_CLEANER, CONVERSIONS, HALF_ASSIGNMENTS, HEREDOC, HEREDOC_INDENT, IDENTIFIER, INTERPOLATION, JS_CLEANER, JS_FORBIDDEN, JS_KEYWORDS, KEYWORDS, LAST_DENT, LAST_DENTS, LINE_BREAK, Lexer, MULTILINER, MULTI_DENT, NOT_REGEX, NO_NEWLINE, NUMBER, OPERATOR, REGEX_ESCAPE, REGEX_FLAGS, REGEX_INTERPOLATION, REGEX_START, RESERVED, Rewriter, STRING_NEWLINES, WHITESPACE, _a, balanced_string, compact, count, helpers, include, starts;
|
||||
var ACCESSORS, ASSIGNMENT, CALLABLE, CODE, COFFEE_ALIASES, COFFEE_KEYWORDS, COMMENT, COMMENT_CLEANER, CONVERSIONS, HALF_ASSIGNMENTS, HEREDOC, HEREDOC_INDENT, IDENTIFIER, INTERPOLATION, JS_CLEANER, JS_FORBIDDEN, JS_KEYWORDS, KEYWORDS, LAST_DENT, LAST_DENTS, LINE_BREAK, Lexer, MULTILINER, MULTI_DENT, NOT_REGEX, NO_NEWLINE, NUMBER, OPERATOR, REGEX_ESCAPE, REGEX_FLAGS, REGEX_INTERPOLATION, REGEX_START, RESERVED, Rewriter, STRING_NEWLINES, WHITESPACE, _a, _b, _c, balanced_string, compact, count, helpers, include, starts;
|
||||
var __slice = Array.prototype.slice;
|
||||
// The CoffeeScript Lexer. Uses a series of token-matching regexes to attempt
|
||||
// matches against the beginning of the source code. When a match is found,
|
||||
|
@ -9,20 +9,22 @@
|
|||
// Which is a format that can be fed directly into [Jison](http://github.com/zaach/jison).
|
||||
// Set up the Lexer for both Node.js and the browser, depending on where we are.
|
||||
if ((typeof process !== "undefined" && process !== null)) {
|
||||
Rewriter = require('./rewriter').Rewriter;
|
||||
helpers = require('./helpers').helpers;
|
||||
_a = require('./rewriter');
|
||||
Rewriter = _a.Rewriter;
|
||||
_b = require('./helpers');
|
||||
helpers = _b.helpers;
|
||||
} else {
|
||||
this.exports = this;
|
||||
Rewriter = this.Rewriter;
|
||||
helpers = this.helpers;
|
||||
}
|
||||
// Import the helpers we need.
|
||||
_a = helpers;
|
||||
include = _a.include;
|
||||
count = _a.count;
|
||||
starts = _a.starts;
|
||||
compact = _a.compact;
|
||||
balanced_string = _a.balanced_string;
|
||||
_c = helpers;
|
||||
include = _c.include;
|
||||
count = _c.count;
|
||||
starts = _c.starts;
|
||||
compact = _c.compact;
|
||||
balanced_string = _c.balanced_string;
|
||||
// The Lexer Class
|
||||
// ---------------
|
||||
// The Lexer class reads a stream of CoffeeScript and divvys it up into tagged
|
||||
|
@ -107,10 +109,10 @@
|
|||
// Language extensions get the highest priority, first chance to tag tokens
|
||||
// as something else.
|
||||
Lexer.prototype.extension_token = function extension_token() {
|
||||
var _b, _c, _d, extension;
|
||||
_c = Lexer.extensions;
|
||||
for (_b = 0, _d = _c.length; _b < _d; _b++) {
|
||||
extension = _c[_b];
|
||||
var _d, _e, _f, extension;
|
||||
_e = Lexer.extensions;
|
||||
for (_d = 0, _f = _e.length; _d < _f; _d++) {
|
||||
extension = _e[_d];
|
||||
if (extension.call(this)) {
|
||||
return true;
|
||||
}
|
||||
|
@ -420,7 +422,7 @@
|
|||
// definitions versus argument lists in function calls. Walk backwards, tagging
|
||||
// parameters specially in order to make things easier for the parser.
|
||||
Lexer.prototype.tag_parameters = function tag_parameters() {
|
||||
var _b, i, tok;
|
||||
var _d, i, tok;
|
||||
if (this.tag() !== ')') {
|
||||
return null;
|
||||
}
|
||||
|
@ -431,11 +433,11 @@
|
|||
if (!tok) {
|
||||
return null;
|
||||
}
|
||||
if ((_b = tok[0]) === 'IDENTIFIER') {
|
||||
if ((_d = tok[0]) === 'IDENTIFIER') {
|
||||
tok[0] = 'PARAM';
|
||||
} else if (_b === ')') {
|
||||
} else if (_d === ')') {
|
||||
tok[0] = 'PARAM_END';
|
||||
} else if (_b === '(' || _b === 'CALL_START') {
|
||||
} else if (_d === '(' || _d === 'CALL_START') {
|
||||
tok[0] = 'PARAM_START';
|
||||
return tok[0];
|
||||
}
|
||||
|
@ -465,23 +467,23 @@
|
|||
// new Lexer, tokenize the interpolated contents, and merge them into the
|
||||
// token stream.
|
||||
Lexer.prototype.interpolate_string = function interpolate_string(str, escape_quotes) {
|
||||
var _b, _c, _d, _e, _f, _g, _h, escaped, expr, group, i, idx, inner, interp, interpolated, lexer, match, nested, pi, quote, tag, tok, token, tokens, value;
|
||||
var _d, _e, _f, _g, _h, _i, _j, escaped, expr, group, i, idx, inner, interp, interpolated, lexer, match, nested, pi, quote, tag, tok, token, tokens, value;
|
||||
if (str.length < 3 || !starts(str, '"')) {
|
||||
return this.token('STRING', str);
|
||||
} else {
|
||||
lexer = new Lexer();
|
||||
tokens = [];
|
||||
quote = str.substring(0, 1);
|
||||
_b = [1, 1];
|
||||
i = _b[0];
|
||||
pi = _b[1];
|
||||
_d = [1, 1];
|
||||
i = _d[0];
|
||||
pi = _d[1];
|
||||
while (i < str.length - 1) {
|
||||
if (starts(str, '\\', i)) {
|
||||
i += 1;
|
||||
} else if ((match = str.substring(i).match(INTERPOLATION))) {
|
||||
_c = match;
|
||||
group = _c[0];
|
||||
interp = _c[1];
|
||||
_e = match;
|
||||
group = _e[0];
|
||||
interp = _e[1];
|
||||
if (starts(interp, '@')) {
|
||||
interp = ("this." + (interp.substring(1)));
|
||||
}
|
||||
|
@ -500,9 +502,9 @@
|
|||
nested = lexer.tokenize(("(" + inner + ")"), {
|
||||
line: this.line
|
||||
});
|
||||
_d = nested;
|
||||
for (idx = 0, _e = _d.length; idx < _e; idx++) {
|
||||
tok = _d[idx];
|
||||
_f = nested;
|
||||
for (idx = 0, _g = _f.length; idx < _g; idx++) {
|
||||
tok = _f[idx];
|
||||
tok[0] === 'CALL_END' ? (tok[0] = ')') : null;
|
||||
}
|
||||
nested.pop();
|
||||
|
@ -525,12 +527,12 @@
|
|||
if (interpolated) {
|
||||
this.token('(', '(');
|
||||
}
|
||||
_f = tokens;
|
||||
for (i = 0, _g = _f.length; i < _g; i++) {
|
||||
token = _f[i];
|
||||
_h = token;
|
||||
tag = _h[0];
|
||||
value = _h[1];
|
||||
_h = tokens;
|
||||
for (i = 0, _i = _h.length; i < _i; i++) {
|
||||
token = _h[i];
|
||||
_j = token;
|
||||
tag = _j[0];
|
||||
value = _j[1];
|
||||
if (tag === 'TOKENS') {
|
||||
this.tokens = this.tokens.concat(value);
|
||||
} else if (tag === 'STRING' && escape_quotes) {
|
||||
|
|
125
lib/rewriter.js
125
lib/rewriter.js
|
@ -1,5 +1,5 @@
|
|||
(function(){
|
||||
var BALANCED_PAIRS, EXPRESSION_CLOSE, EXPRESSION_END, EXPRESSION_START, IMPLICIT_BLOCK, IMPLICIT_CALL, IMPLICIT_END, IMPLICIT_FUNC, INVERSES, Rewriter, SINGLE_CLOSERS, SINGLE_LINERS, _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, helpers, include, pair;
|
||||
var BALANCED_PAIRS, EXPRESSION_CLOSE, EXPRESSION_END, EXPRESSION_START, IMPLICIT_BLOCK, IMPLICIT_CALL, IMPLICIT_END, IMPLICIT_FUNC, INVERSES, Rewriter, SINGLE_CLOSERS, SINGLE_LINERS, _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, helpers, include, pair;
|
||||
var __slice = Array.prototype.slice, __bind = function(func, obj, args) {
|
||||
return function() {
|
||||
return func.apply(obj || {}, args ? args.concat(__slice.call(arguments, 0)) : arguments);
|
||||
|
@ -13,14 +13,15 @@
|
|||
// parentheses, balance incorrect nestings, and generally clean things up.
|
||||
// Set up exported variables for both Node.js and the browser.
|
||||
if ((typeof process !== "undefined" && process !== null)) {
|
||||
helpers = require('./helpers').helpers;
|
||||
_a = require('./helpers');
|
||||
helpers = _a.helpers;
|
||||
} else {
|
||||
this.exports = this;
|
||||
helpers = this.helpers;
|
||||
}
|
||||
// Import the helpers we need.
|
||||
_a = helpers;
|
||||
include = _a.include;
|
||||
_b = helpers;
|
||||
include = _b.include;
|
||||
// The **Rewriter** class is used by the [Lexer](lexer.html), directly against
|
||||
// its internal array of tokens.
|
||||
exports.Rewriter = (function() {
|
||||
|
@ -63,13 +64,13 @@
|
|||
// correctly indented, or appear on a line of their own.
|
||||
Rewriter.prototype.adjust_comments = function adjust_comments() {
|
||||
return this.scan_tokens(__bind(function(prev, token, post, i) {
|
||||
var _b, after, before;
|
||||
var _c, after, before;
|
||||
if (!(token[0] === 'COMMENT')) {
|
||||
return 1;
|
||||
}
|
||||
_b = [this.tokens[i - 2], this.tokens[i + 2]];
|
||||
before = _b[0];
|
||||
after = _b[1];
|
||||
_c = [this.tokens[i - 2], this.tokens[i + 2]];
|
||||
before = _c[0];
|
||||
after = _c[1];
|
||||
if (after && after[0] === 'INDENT') {
|
||||
this.tokens.splice(i + 2, 1);
|
||||
before && before[0] === 'OUTDENT' && post && (prev[0] === post[0]) && (post[0] === 'TERMINATOR') ? this.tokens.splice(i - 2, 1) : this.tokens.splice(i, 0, after);
|
||||
|
@ -85,12 +86,12 @@
|
|||
// Leading newlines would introduce an ambiguity in the grammar, so we
|
||||
// dispatch them here.
|
||||
Rewriter.prototype.remove_leading_newlines = function remove_leading_newlines() {
|
||||
var _b;
|
||||
_b = [];
|
||||
var _c;
|
||||
_c = [];
|
||||
while (this.tokens[0] && this.tokens[0][0] === 'TERMINATOR') {
|
||||
_b.push(this.tokens.shift());
|
||||
_c.push(this.tokens.shift());
|
||||
}
|
||||
return _b;
|
||||
return _c;
|
||||
};
|
||||
// Some blocks occur in the middle of expressions -- when we're expecting
|
||||
// this, remove their trailing newlines.
|
||||
|
@ -111,23 +112,23 @@
|
|||
parens = [0];
|
||||
brackets = [0];
|
||||
return this.scan_tokens(__bind(function(prev, token, post, i) {
|
||||
var _b;
|
||||
if ((_b = token[0]) === 'CALL_START') {
|
||||
var _c;
|
||||
if ((_c = token[0]) === 'CALL_START') {
|
||||
parens.push(0);
|
||||
} else if (_b === 'INDEX_START') {
|
||||
} else if (_c === 'INDEX_START') {
|
||||
brackets.push(0);
|
||||
} else if (_b === '(') {
|
||||
} else if (_c === '(') {
|
||||
parens[parens.length - 1] += 1;
|
||||
} else if (_b === '[') {
|
||||
} else if (_c === '[') {
|
||||
brackets[brackets.length - 1] += 1;
|
||||
} else if (_b === ')') {
|
||||
} else if (_c === ')') {
|
||||
if (parens[parens.length - 1] === 0) {
|
||||
parens.pop();
|
||||
token[0] = 'CALL_END';
|
||||
} else {
|
||||
parens[parens.length - 1] -= 1;
|
||||
}
|
||||
} else if (_b === ']') {
|
||||
} else if (_c === ']') {
|
||||
if (brackets[brackets.length - 1] === 0) {
|
||||
brackets.pop();
|
||||
token[0] = 'INDEX_END';
|
||||
|
@ -145,9 +146,9 @@
|
|||
var close_calls, stack;
|
||||
stack = [0];
|
||||
close_calls = __bind(function(i) {
|
||||
var _b, _c, _d, size, tmp;
|
||||
_c = 0; _d = stack[stack.length - 1];
|
||||
for (_b = 0, tmp = _c; (_c <= _d ? tmp < _d : tmp > _d); (_c <= _d ? tmp += 1 : tmp -= 1), _b++) {
|
||||
var _c, _d, _e, size, tmp;
|
||||
_d = 0; _e = stack[stack.length - 1];
|
||||
for (_c = 0, tmp = _d; (_d <= _e ? tmp < _e : tmp > _e); (_d <= _e ? tmp += 1 : tmp -= 1), _c++) {
|
||||
this.tokens.splice(i, 0, ['CALL_END', ')', this.tokens[i][2]]);
|
||||
}
|
||||
size = stack[stack.length - 1] + 1;
|
||||
|
@ -155,7 +156,7 @@
|
|||
return size;
|
||||
}, this);
|
||||
return this.scan_tokens(__bind(function(prev, token, post, i) {
|
||||
var open, size, tag;
|
||||
var j, nx, open, size, tag;
|
||||
tag = token[0];
|
||||
if (tag === 'OUTDENT') {
|
||||
stack[stack.length - 2] += stack.pop();
|
||||
|
@ -179,11 +180,21 @@
|
|||
return 1;
|
||||
}
|
||||
if (open && !token.generated && (!post || include(IMPLICIT_END, tag))) {
|
||||
size = close_calls(i);
|
||||
if (tag !== 'OUTDENT' && include(EXPRESSION_END, tag)) {
|
||||
stack.pop();
|
||||
j = 1;
|
||||
while ((typeof (nx = this.tokens[i + j]) !== "undefined" && (nx = this.tokens[i + j]) !== null) && include(IMPLICIT_END, nx[0])) {
|
||||
j++;
|
||||
}
|
||||
if ((typeof nx !== "undefined" && nx !== null) && nx[0] === ',') {
|
||||
if (tag === 'TERMINATOR') {
|
||||
this.tokens.splice(i, 1);
|
||||
}
|
||||
} else {
|
||||
size = close_calls(i);
|
||||
if (tag !== 'OUTDENT' && include(EXPRESSION_END, tag)) {
|
||||
stack.pop();
|
||||
}
|
||||
return size;
|
||||
}
|
||||
return size;
|
||||
}
|
||||
if (tag !== 'OUTDENT' && include(EXPRESSION_END, tag)) {
|
||||
stack[stack.length - 2] += stack.pop();
|
||||
|
@ -236,17 +247,17 @@
|
|||
// Ensure that all listed pairs of tokens are correctly balanced throughout
|
||||
// the course of the token stream.
|
||||
Rewriter.prototype.ensure_balance = function ensure_balance(pairs) {
|
||||
var _b, _c, key, levels, line, open, open_line, unclosed, value;
|
||||
var _c, _d, key, levels, line, open, open_line, unclosed, value;
|
||||
levels = {};
|
||||
open_line = {};
|
||||
this.scan_tokens(__bind(function(prev, token, post, i) {
|
||||
var _b, _c, _d, _e, close, open, pair;
|
||||
_c = pairs;
|
||||
for (_b = 0, _d = _c.length; _b < _d; _b++) {
|
||||
pair = _c[_b];
|
||||
_e = pair;
|
||||
open = _e[0];
|
||||
close = _e[1];
|
||||
var _c, _d, _e, _f, close, open, pair;
|
||||
_d = pairs;
|
||||
for (_c = 0, _e = _d.length; _c < _e; _c++) {
|
||||
pair = _d[_c];
|
||||
_f = pair;
|
||||
open = _f[0];
|
||||
close = _f[1];
|
||||
levels[open] = levels[open] || 0;
|
||||
if (token[0] === open) {
|
||||
if (levels[open] === 0) {
|
||||
|
@ -264,12 +275,12 @@
|
|||
return 1;
|
||||
}, this));
|
||||
unclosed = (function() {
|
||||
_b = []; _c = levels;
|
||||
for (key in _c) { if (__hasProp.call(_c, key)) {
|
||||
value = _c[key];
|
||||
value > 0 ? _b.push(key) : null;
|
||||
_c = []; _d = levels;
|
||||
for (key in _d) { if (__hasProp.call(_d, key)) {
|
||||
value = _d[key];
|
||||
value > 0 ? _c.push(key) : null;
|
||||
}}
|
||||
return _b;
|
||||
return _c;
|
||||
})();
|
||||
if (unclosed.length) {
|
||||
open = unclosed[0];
|
||||
|
@ -291,12 +302,12 @@
|
|||
// 4. Be careful not to alter array or parentheses delimiters with overzealous
|
||||
// rewriting.
|
||||
Rewriter.prototype.rewrite_closing_parens = function rewrite_closing_parens() {
|
||||
var _b, debt, key, stack, val;
|
||||
var _c, debt, key, stack, val;
|
||||
stack = [];
|
||||
debt = {};
|
||||
_b = INVERSES;
|
||||
for (key in _b) { if (__hasProp.call(_b, key)) {
|
||||
val = _b[key];
|
||||
_c = INVERSES;
|
||||
for (key in _c) { if (__hasProp.call(_c, key)) {
|
||||
val = _c[key];
|
||||
(debt[key] = 0);
|
||||
}}
|
||||
return this.scan_tokens(__bind(function(prev, token, post, i) {
|
||||
|
@ -342,29 +353,29 @@
|
|||
// The inverse mappings of `BALANCED_PAIRS` we're trying to fix up, so we can
|
||||
// look things up from either end.
|
||||
INVERSES = {};
|
||||
_c = BALANCED_PAIRS;
|
||||
for (_b = 0, _d = _c.length; _b < _d; _b++) {
|
||||
pair = _c[_b];
|
||||
_d = BALANCED_PAIRS;
|
||||
for (_c = 0, _e = _d.length; _c < _e; _c++) {
|
||||
pair = _d[_c];
|
||||
INVERSES[pair[0]] = pair[1];
|
||||
INVERSES[pair[1]] = pair[0];
|
||||
}
|
||||
// The tokens that signal the start of a balanced pair.
|
||||
EXPRESSION_START = (function() {
|
||||
_e = []; _g = BALANCED_PAIRS;
|
||||
for (_f = 0, _h = _g.length; _f < _h; _f++) {
|
||||
pair = _g[_f];
|
||||
_e.push(pair[0]);
|
||||
_f = []; _h = BALANCED_PAIRS;
|
||||
for (_g = 0, _i = _h.length; _g < _i; _g++) {
|
||||
pair = _h[_g];
|
||||
_f.push(pair[0]);
|
||||
}
|
||||
return _e;
|
||||
return _f;
|
||||
})();
|
||||
// The tokens that signal the end of a balanced pair.
|
||||
EXPRESSION_END = (function() {
|
||||
_i = []; _k = BALANCED_PAIRS;
|
||||
for (_j = 0, _l = _k.length; _j < _l; _j++) {
|
||||
pair = _k[_j];
|
||||
_i.push(pair[1]);
|
||||
_j = []; _l = BALANCED_PAIRS;
|
||||
for (_k = 0, _m = _l.length; _k < _m; _k++) {
|
||||
pair = _l[_k];
|
||||
_j.push(pair[1]);
|
||||
}
|
||||
return _i;
|
||||
return _j;
|
||||
})();
|
||||
// Tokens that indicate the close of a clause of an expression.
|
||||
EXPRESSION_CLOSE = ['CATCH', 'WHEN', 'ELSE', 'FINALLY'].concat(EXPRESSION_END);
|
||||
|
|
|
@ -9,8 +9,8 @@
|
|||
|
||||
# Set up the Lexer for both Node.js and the browser, depending on where we are.
|
||||
if process?
|
||||
Rewriter: require('./rewriter').Rewriter
|
||||
helpers: require('./helpers').helpers
|
||||
{Rewriter}: require('./rewriter')
|
||||
{helpers}: require('./helpers')
|
||||
else
|
||||
this.exports: this
|
||||
Rewriter: this.Rewriter
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
# Set up exported variables for both Node.js and the browser.
|
||||
if process?
|
||||
helpers: require('./helpers').helpers
|
||||
{helpers}: require('./helpers')
|
||||
else
|
||||
this.exports: this
|
||||
helpers: this.helpers
|
||||
|
@ -135,9 +135,13 @@ exports.Rewriter: class Rewriter
|
|||
stack.push 0
|
||||
return 1
|
||||
if open and !token.generated and (!post or include(IMPLICIT_END, tag))
|
||||
size: close_calls(i)
|
||||
stack.pop() if tag isnt 'OUTDENT' and include EXPRESSION_END, tag
|
||||
return size
|
||||
j: 1; j++ while (nx: @tokens[i + j])? and include(IMPLICIT_END, nx[0])
|
||||
if nx? and nx[0] is ','
|
||||
@tokens.splice(i, 1) if tag is 'TERMINATOR'
|
||||
else
|
||||
size: close_calls(i)
|
||||
stack.pop() if tag isnt 'OUTDENT' and include EXPRESSION_END, tag
|
||||
return size
|
||||
if tag isnt 'OUTDENT' and include EXPRESSION_END, tag
|
||||
stack[stack.length - 2]: + stack.pop()
|
||||
return 1
|
||||
|
|
Loading…
Reference in New Issue