mirror of
https://github.com/jashkenas/coffeescript.git
synced 2022-11-09 12:23:24 -05:00
Fixed lingering CoffeeScript Compiler running live in Internet Explorer bugs. Implemented helpers.index_of and removed named functions. Ticket #366
This commit is contained in:
parent
f84eb9ed47
commit
dfb3a13246
16 changed files with 319 additions and 298 deletions
File diff suppressed because one or more lines are too long
12
lib/cake.js
12
lib/cake.js
|
@ -22,7 +22,7 @@
|
|||
helpers.extend(global, {
|
||||
// Define a Cake task with a short name, an optional sentence description,
|
||||
// and the function to run as the action itself.
|
||||
task: function task(name, description, action) {
|
||||
task: function(name, description, action) {
|
||||
var _a;
|
||||
if (!(action)) {
|
||||
_a = [description, action];
|
||||
|
@ -39,11 +39,11 @@
|
|||
// Define an option that the Cakefile accepts. The parsed options hash,
|
||||
// containing all of the command-line options passed, will be made available
|
||||
// as the first argument to the action.
|
||||
option: function option(letter, flag, description) {
|
||||
option: function(letter, flag, description) {
|
||||
return switches.push([letter, flag, description]);
|
||||
},
|
||||
// Invoke another task in the current Cakefile.
|
||||
invoke: function invoke(name) {
|
||||
invoke: function(name) {
|
||||
if (!(tasks[name])) {
|
||||
no_such_task(name);
|
||||
}
|
||||
|
@ -53,7 +53,7 @@
|
|||
// Run `cake`. Executes all of the tasks you pass, in order. Note that Node's
|
||||
// asynchrony may cause tasks to execute in a different order than you'd expect.
|
||||
// If no tasks are passed, print the help screen.
|
||||
exports.run = function run() {
|
||||
exports.run = function() {
|
||||
return path.exists('Cakefile', function(exists) {
|
||||
var _a, _b, _c, _d, arg, args;
|
||||
if (!(exists)) {
|
||||
|
@ -77,7 +77,7 @@
|
|||
});
|
||||
};
|
||||
// Display the list of Cake tasks in a format similar to `rake -T`
|
||||
print_tasks = function print_tasks() {
|
||||
print_tasks = function() {
|
||||
var _a, _b, _c, _d, desc, i, name, spaces, task;
|
||||
puts('');
|
||||
_a = tasks;
|
||||
|
@ -99,7 +99,7 @@
|
|||
}
|
||||
};
|
||||
// Print an error and exit when attempting to all an undefined task.
|
||||
no_such_task = function no_such_task(task) {
|
||||
no_such_task = function(task) {
|
||||
puts(("No such task: \"" + task + "\""));
|
||||
return process.exit(1);
|
||||
};
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
lexer = new Lexer();
|
||||
// Compile a string of CoffeeScript code to JavaScript, using the Coffee/Jison
|
||||
// compiler.
|
||||
exports.compile = (compile = function compile(code, options) {
|
||||
exports.compile = (compile = function(code, options) {
|
||||
options = options || {};
|
||||
try {
|
||||
return (parser.parse(lexer.tokenize(code))).compile(options);
|
||||
|
@ -40,13 +40,13 @@
|
|||
}
|
||||
});
|
||||
// Tokenize a string of CoffeeScript code, and return the array of tokens.
|
||||
exports.tokens = function tokens(code) {
|
||||
exports.tokens = function(code) {
|
||||
return lexer.tokenize(code);
|
||||
};
|
||||
// Tokenize and parse a string of CoffeeScript code, and return the AST. You can
|
||||
// then compile it by calling `.compile()` on the root, or traverse it by using
|
||||
// `.traverse()` with a callback.
|
||||
exports.nodes = function nodes(code) {
|
||||
exports.nodes = function(code) {
|
||||
return parser.parse(lexer.tokenize(code));
|
||||
};
|
||||
// Compile and execute a string of CoffeeScript (on the server), correctly
|
||||
|
@ -61,14 +61,14 @@
|
|||
// the **Lexer** (as a peer of any of the lexer's tokenizing methods), and
|
||||
// push a token on to the stack that contains a **Node** as the value (as a
|
||||
// peer of the nodes in [nodes.coffee](nodes.html)).
|
||||
exports.extend = function extend(func) {
|
||||
exports.extend = function(func) {
|
||||
return Lexer.extensions.push(func);
|
||||
};
|
||||
// The real Lexer produces a generic stream of tokens. This object provides a
|
||||
// thin wrapper around it, compatible with the Jison API. We can then pass it
|
||||
// directly as a "Jison lexer".
|
||||
parser.lexer = {
|
||||
lex: function lex() {
|
||||
lex: function() {
|
||||
var token;
|
||||
token = this.tokens[this.pos] || [""];
|
||||
this.pos += 1;
|
||||
|
@ -76,15 +76,15 @@
|
|||
this.yytext = token[1];
|
||||
return token[0];
|
||||
},
|
||||
setInput: function setInput(tokens) {
|
||||
setInput: function(tokens) {
|
||||
this.tokens = tokens;
|
||||
this.pos = 0;
|
||||
return this.pos;
|
||||
},
|
||||
upcomingInput: function upcomingInput() {
|
||||
upcomingInput: function() {
|
||||
return "";
|
||||
},
|
||||
showPosition: function showPosition() {
|
||||
showPosition: function() {
|
||||
return this.pos;
|
||||
}
|
||||
};
|
||||
|
@ -93,7 +93,7 @@
|
|||
// on page load. Unfortunately, the text contents of remote scripts cannot be
|
||||
// accessed from the browser, so only inline script tags will work.
|
||||
if ((typeof document !== "undefined" && document !== null) && document.getElementsByTagName) {
|
||||
process_scripts = function process_scripts() {
|
||||
process_scripts = function() {
|
||||
var _a, _b, _c, _d, tag;
|
||||
_a = []; _c = document.getElementsByTagName('script');
|
||||
for (_b = 0, _d = _c.length; _b < _d; _b++) {
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
// Run `coffee` by parsing passed options and determining what action to take.
|
||||
// Many flags cause us to divert before compiling anything. Flags passed after
|
||||
// `--` will be passed verbatim to your script as arguments in `process.argv`
|
||||
exports.run = function run() {
|
||||
exports.run = function() {
|
||||
var flags, separator;
|
||||
parse_options();
|
||||
if (options.help) {
|
||||
|
@ -57,14 +57,14 @@
|
|||
// Asynchronously read in each CoffeeScript in a list of source files and
|
||||
// compile them. If a directory is passed, recursively compile all source
|
||||
// files in it and all subdirectories.
|
||||
compile_scripts = function compile_scripts() {
|
||||
compile_scripts = function() {
|
||||
var _b, _c, _d, _e, base, compile, source;
|
||||
_b = []; _d = sources;
|
||||
for (_c = 0, _e = _d.length; _c < _e; _c++) {
|
||||
source = _d[_c];
|
||||
_b.push((function() {
|
||||
base = source;
|
||||
compile = function compile(source) {
|
||||
compile = function(source) {
|
||||
return path.exists(source, function(exists) {
|
||||
if (!(exists)) {
|
||||
throw new Error(("File not found: " + source));
|
||||
|
@ -99,7 +99,7 @@
|
|||
// Compile a single source script, containing the given code, according to the
|
||||
// requested options. If evaluating the script directly sets `__filename`,
|
||||
// `__dirname` and `module.filename` to be correct relative to the script's path.
|
||||
compile_script = function compile_script(source, code, base) {
|
||||
compile_script = function(source, code, base) {
|
||||
var code_opts, js, o;
|
||||
o = options;
|
||||
code_opts = compile_options(source);
|
||||
|
@ -130,7 +130,7 @@
|
|||
};
|
||||
// Attach the appropriate listeners to compile scripts incoming over **stdin**,
|
||||
// and write them back to **stdout**.
|
||||
compile_stdio = function compile_stdio() {
|
||||
compile_stdio = function() {
|
||||
var code, stdin;
|
||||
code = '';
|
||||
stdin = process.openStdin();
|
||||
|
@ -146,7 +146,7 @@
|
|||
// Watch a source CoffeeScript file using `fs.watchFile`, recompiling it every
|
||||
// time the file is updated. May be used in combination with other options,
|
||||
// such as `--lint` or `--print`.
|
||||
watch = function watch(source, base) {
|
||||
watch = function(source, base) {
|
||||
return fs.watchFile(source, {
|
||||
persistent: true,
|
||||
interval: 500
|
||||
|
@ -165,14 +165,14 @@
|
|||
// Write out a JavaScript source file with the compiled code. By default, files
|
||||
// are written out in `cwd` as `.js` files with the same name, but the output
|
||||
// directory can be customized with `--output`.
|
||||
write_js = function write_js(source, js, base) {
|
||||
write_js = function(source, js, base) {
|
||||
var base_dir, compile, dir, filename, js_path, src_dir;
|
||||
filename = path.basename(source, path.extname(source)) + '.js';
|
||||
src_dir = path.dirname(source);
|
||||
base_dir = src_dir.substring(base.length);
|
||||
dir = options.output ? path.join(options.output, base_dir) : src_dir;
|
||||
js_path = path.join(dir, filename);
|
||||
compile = function compile() {
|
||||
compile = function() {
|
||||
return fs.writeFile(js_path, js);
|
||||
};
|
||||
return path.exists(dir, function(exists) {
|
||||
|
@ -185,9 +185,9 @@
|
|||
};
|
||||
// Pipe compiled JS through JSLint (requires a working `jsl` command), printing
|
||||
// any errors or warnings that arise.
|
||||
lint = function lint(js) {
|
||||
lint = function(js) {
|
||||
var jsl, print_it;
|
||||
print_it = function print_it(buffer) {
|
||||
print_it = function(buffer) {
|
||||
return print(buffer.toString());
|
||||
};
|
||||
jsl = spawn('jsl', ['-nologo', '-stdin']);
|
||||
|
@ -197,7 +197,7 @@
|
|||
return jsl.stdin.end();
|
||||
};
|
||||
// Pretty-print a stream of tokens.
|
||||
print_tokens = function print_tokens(tokens) {
|
||||
print_tokens = function(tokens) {
|
||||
var _b, _c, _d, _e, _f, strings, tag, token, value;
|
||||
strings = (function() {
|
||||
_b = []; _d = tokens;
|
||||
|
@ -216,7 +216,7 @@
|
|||
};
|
||||
// Use the [OptionParser module](optparse.html) to extract all options from
|
||||
// `process.argv` that are specified in `SWITCHES`.
|
||||
parse_options = function parse_options() {
|
||||
parse_options = function() {
|
||||
var o;
|
||||
option_parser = new optparse.OptionParser(SWITCHES, BANNER);
|
||||
o = (options = option_parser.parse(process.argv));
|
||||
|
@ -226,7 +226,7 @@
|
|||
return sources;
|
||||
};
|
||||
// The compile-time options to pass to the CoffeeScript compiler.
|
||||
compile_options = function compile_options(source) {
|
||||
compile_options = function(source) {
|
||||
var o;
|
||||
o = {
|
||||
source: source
|
||||
|
@ -235,12 +235,12 @@
|
|||
return o;
|
||||
};
|
||||
// Print the `--help` usage message and exit.
|
||||
usage = function usage() {
|
||||
usage = function() {
|
||||
puts(option_parser.help());
|
||||
return process.exit(0);
|
||||
};
|
||||
// Print the `--version` message and exit.
|
||||
version = function version() {
|
||||
version = function() {
|
||||
puts(("CoffeeScript version " + CoffeeScript.VERSION));
|
||||
return process.exit(0);
|
||||
};
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
// we pass the pattern-defining string, the action to run, and extra options,
|
||||
// optionally. If no action is specified, we simply pass the value of the
|
||||
// previous nonterminal.
|
||||
o = function o(pattern_string, action, options) {
|
||||
o = function(pattern_string, action, options) {
|
||||
var match;
|
||||
if (!(action)) {
|
||||
return [pattern_string, '$$ = $1;', options];
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
(function(){
|
||||
var balanced_string, compact, count, del, extend, flatten, helpers, include, merge, starts;
|
||||
var balanced_string, compact, count, del, extend, flatten, helpers, include, index_of, merge, starts;
|
||||
var __hasProp = Object.prototype.hasOwnProperty;
|
||||
// This file contains the common helper functions that we'd like to share among
|
||||
// the **Lexer**, **Rewriter**, and the **Nodes**. Merge objects, flatten
|
||||
|
@ -9,16 +9,31 @@
|
|||
this.exports = this;
|
||||
}
|
||||
helpers = (exports.helpers = {});
|
||||
// Cross-browser indexOf, so that IE can join the party.
|
||||
helpers.index_of = (index_of = function(array, item, from) {
|
||||
var _a, _b, index, other;
|
||||
if (array.indexOf) {
|
||||
return array.indexOf(item, from);
|
||||
}
|
||||
_a = array;
|
||||
for (index = 0, _b = _a.length; index < _b; index++) {
|
||||
other = _a[index];
|
||||
if (other === item && (!from || (from <= index))) {
|
||||
return index;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
});
|
||||
// Does a list include a value?
|
||||
helpers.include = (include = function include(list, value) {
|
||||
return list.indexOf(value) >= 0;
|
||||
helpers.include = (include = function(list, value) {
|
||||
return index_of(list, value) >= 0;
|
||||
});
|
||||
// Peek at the beginning of a given string to see if it matches a sequence.
|
||||
helpers.starts = (starts = function starts(string, literal, start) {
|
||||
helpers.starts = (starts = function(string, literal, start) {
|
||||
return string.substring(start, (start || 0) + literal.length) === literal;
|
||||
});
|
||||
// Trim out all falsy values from an array.
|
||||
helpers.compact = (compact = function compact(array) {
|
||||
helpers.compact = (compact = function(array) {
|
||||
var _a, _b, _c, _d, item;
|
||||
_a = []; _c = array;
|
||||
for (_b = 0, _d = _c.length; _b < _d; _b++) {
|
||||
|
@ -28,20 +43,20 @@
|
|||
return _a;
|
||||
});
|
||||
// Count the number of occurences of a character in a string.
|
||||
helpers.count = (count = function count(string, letter) {
|
||||
helpers.count = (count = function(string, letter) {
|
||||
var num, pos;
|
||||
num = 0;
|
||||
pos = string.indexOf(letter);
|
||||
pos = index_of(string, letter);
|
||||
while (pos !== -1) {
|
||||
num += 1;
|
||||
pos = string.indexOf(letter, pos + 1);
|
||||
pos = index_of(string, letter, pos + 1);
|
||||
}
|
||||
return num;
|
||||
});
|
||||
// Merge objects, returning a fresh copy with attributes from both sides.
|
||||
// Used every time `BaseNode#compile` is called, to allow properties in the
|
||||
// options hash to propagate down the tree without polluting other branches.
|
||||
helpers.merge = (merge = function merge(options, overrides) {
|
||||
helpers.merge = (merge = function(options, overrides) {
|
||||
var _a, _b, fresh, key, val;
|
||||
fresh = {};
|
||||
_a = options;
|
||||
|
@ -60,7 +75,7 @@
|
|||
});
|
||||
// Extend a source object with the properties of another object (shallow copy).
|
||||
// We use this to simulate Node's deprecated `process.mixin`
|
||||
helpers.extend = (extend = function extend(object, properties) {
|
||||
helpers.extend = (extend = function(object, properties) {
|
||||
var _a, _b, key, val;
|
||||
_a = []; _b = properties;
|
||||
for (key in _b) { if (__hasProp.call(_b, key)) {
|
||||
|
@ -71,7 +86,7 @@
|
|||
});
|
||||
// Return a completely flattened version of an array. Handy for getting a
|
||||
// list of `children` from the nodes.
|
||||
helpers.flatten = (flatten = function flatten(array) {
|
||||
helpers.flatten = (flatten = function(array) {
|
||||
var _a, _b, _c, item, memo;
|
||||
memo = [];
|
||||
_b = array;
|
||||
|
@ -83,7 +98,7 @@
|
|||
});
|
||||
// Delete a key from an object, returning the value. Useful when a node is
|
||||
// looking for a particular method in an options hash.
|
||||
helpers.del = (del = function del(obj, key) {
|
||||
helpers.del = (del = function(obj, key) {
|
||||
var val;
|
||||
val = obj[key];
|
||||
delete obj[key];
|
||||
|
@ -93,7 +108,7 @@
|
|||
// a series of delimiters, all of which must be nested correctly within the
|
||||
// contents of the string. This method allows us to have strings within
|
||||
// interpolations within strings, ad infinitum.
|
||||
helpers.balanced_string = (balanced_string = function balanced_string(str, delimited, options) {
|
||||
helpers.balanced_string = (balanced_string = function(str, delimited, options) {
|
||||
var _a, _b, _c, _d, close, i, levels, open, pair, slash;
|
||||
options = options || {};
|
||||
slash = delimited[0][0] === '/';
|
||||
|
|
64
lib/lexer.js
64
lib/lexer.js
|
@ -31,7 +31,7 @@
|
|||
// tokens. Some potential ambiguity in the grammar has been avoided by
|
||||
// pushing some extra smarts into the Lexer.
|
||||
exports.Lexer = (function() {
|
||||
Lexer = function Lexer() { };
|
||||
Lexer = function() { };
|
||||
// **tokenize** is the Lexer's main method. Scan by attempting to match tokens
|
||||
// one at a time, using a regular expression anchored at the start of the
|
||||
// remaining code, or a custom recursive token-matching method
|
||||
|
@ -42,7 +42,7 @@
|
|||
// of source.
|
||||
// Before returning the token stream, run it through the [Rewriter](rewriter.html)
|
||||
// unless explicitly asked not to.
|
||||
Lexer.prototype.tokenize = function tokenize(code, options) {
|
||||
Lexer.prototype.tokenize = function(code, options) {
|
||||
var o;
|
||||
code = code.replace(/(\r|\s+$)/g, '');
|
||||
o = options || {};
|
||||
|
@ -71,7 +71,7 @@
|
|||
// At every position, run through this list of attempted matches,
|
||||
// short-circuiting if any of them succeed. Their order determines precedence:
|
||||
// `@literal_token` is the fallback catch-all.
|
||||
Lexer.prototype.extract_next_token = function extract_next_token() {
|
||||
Lexer.prototype.extract_next_token = function() {
|
||||
if (this.extension_token()) {
|
||||
return null;
|
||||
}
|
||||
|
@ -108,7 +108,7 @@
|
|||
// ----------
|
||||
// Language extensions get the highest priority, first chance to tag tokens
|
||||
// as something else.
|
||||
Lexer.prototype.extension_token = function extension_token() {
|
||||
Lexer.prototype.extension_token = function() {
|
||||
var _d, _e, _f, extension;
|
||||
_e = Lexer.extensions;
|
||||
for (_d = 0, _f = _e.length; _d < _f; _d++) {
|
||||
|
@ -125,7 +125,7 @@
|
|||
// allowed in JavaScript, we're careful not to tag them as keywords when
|
||||
// referenced as property names here, so you can still do `jQuery.is()` even
|
||||
// though `is` means `===` otherwise.
|
||||
Lexer.prototype.identifier_token = function identifier_token() {
|
||||
Lexer.prototype.identifier_token = function() {
|
||||
var accessed, id, tag;
|
||||
if (!(id = this.match(IDENTIFIER, 1))) {
|
||||
return false;
|
||||
|
@ -155,7 +155,7 @@
|
|||
return true;
|
||||
};
|
||||
// Matches numbers, including decimals, hex, and exponential notation.
|
||||
Lexer.prototype.number_token = function number_token() {
|
||||
Lexer.prototype.number_token = function() {
|
||||
var number;
|
||||
if (!(number = this.match(NUMBER, 1))) {
|
||||
return false;
|
||||
|
@ -166,7 +166,7 @@
|
|||
};
|
||||
// Matches strings, including multi-line strings. Ensures that quotation marks
|
||||
// are balanced within the string's contents, and within nested interpolations.
|
||||
Lexer.prototype.string_token = function string_token() {
|
||||
Lexer.prototype.string_token = function() {
|
||||
var string;
|
||||
if (!(starts(this.chunk, '"') || starts(this.chunk, "'"))) {
|
||||
return false;
|
||||
|
@ -181,7 +181,7 @@
|
|||
};
|
||||
// Matches heredocs, adjusting indentation to the correct level, as heredocs
|
||||
// preserve whitespace, but ignore indentation to the left.
|
||||
Lexer.prototype.heredoc_token = function heredoc_token() {
|
||||
Lexer.prototype.heredoc_token = function() {
|
||||
var doc, match, quote;
|
||||
if (!(match = this.chunk.match(HEREDOC))) {
|
||||
return false;
|
||||
|
@ -197,7 +197,7 @@
|
|||
};
|
||||
// Matches and conumes comments. We pass through comments into JavaScript,
|
||||
// so they're treated as real tokens, like any other part of the language.
|
||||
Lexer.prototype.comment_token = function comment_token() {
|
||||
Lexer.prototype.comment_token = function() {
|
||||
var comment, i, lines, match;
|
||||
if (!(match = this.chunk.match(COMMENT))) {
|
||||
return false;
|
||||
|
@ -222,7 +222,7 @@
|
|||
return true;
|
||||
};
|
||||
// Matches JavaScript interpolated directly into the source via backticks.
|
||||
Lexer.prototype.js_token = function js_token() {
|
||||
Lexer.prototype.js_token = function() {
|
||||
var script;
|
||||
if (!(starts(this.chunk, '`'))) {
|
||||
return false;
|
||||
|
@ -238,7 +238,7 @@
|
|||
// to distinguish from division, so we borrow some basic heuristics from
|
||||
// JavaScript and Ruby, borrow slash balancing from `@balanced_token`, and
|
||||
// borrow interpolation from `@interpolate_string`.
|
||||
Lexer.prototype.regex_token = function regex_token() {
|
||||
Lexer.prototype.regex_token = function() {
|
||||
var end, flags, regex, str;
|
||||
if (!(this.chunk.match(REGEX_START))) {
|
||||
return false;
|
||||
|
@ -271,7 +271,7 @@
|
|||
};
|
||||
// Matches a token in which which the passed delimiter pairs must be correctly
|
||||
// balanced (ie. strings, JS literals).
|
||||
Lexer.prototype.balanced_token = function balanced_token() {
|
||||
Lexer.prototype.balanced_token = function() {
|
||||
var delimited;
|
||||
var _d = arguments.length, _e = _d >= 1;
|
||||
delimited = __slice.call(arguments, 0, _d - 0);
|
||||
|
@ -285,7 +285,7 @@
|
|||
// .map( ... )
|
||||
// Keeps track of the level of indentation, because a single outdent token
|
||||
// can close multiple indents, so we need to know how far in we happen to be.
|
||||
Lexer.prototype.line_token = function line_token() {
|
||||
Lexer.prototype.line_token = function() {
|
||||
var diff, indent, next_character, no_newlines, prev, size;
|
||||
if (!(indent = this.match(MULTI_DENT, 1))) {
|
||||
return false;
|
||||
|
@ -316,7 +316,7 @@
|
|||
};
|
||||
// Record an outdent token or multiple tokens, if we happen to be moving back
|
||||
// inwards past several recorded indents.
|
||||
Lexer.prototype.outdent_token = function outdent_token(move_out, no_newlines) {
|
||||
Lexer.prototype.outdent_token = function(move_out, no_newlines) {
|
||||
var last_indent;
|
||||
while (move_out > 0 && this.indents.length) {
|
||||
last_indent = this.indents.pop();
|
||||
|
@ -330,7 +330,7 @@
|
|||
};
|
||||
// Matches and consumes non-meaningful whitespace. Tag the previous token
|
||||
// as being "spaced", because there are some cases where it makes a difference.
|
||||
Lexer.prototype.whitespace_token = function whitespace_token() {
|
||||
Lexer.prototype.whitespace_token = function() {
|
||||
var prev, space;
|
||||
if (!(space = this.match(WHITESPACE, 1))) {
|
||||
return false;
|
||||
|
@ -343,7 +343,7 @@
|
|||
return true;
|
||||
};
|
||||
// Generate a newline token. Consecutive newlines get merged together.
|
||||
Lexer.prototype.newline_token = function newline_token(newlines) {
|
||||
Lexer.prototype.newline_token = function(newlines) {
|
||||
if (!(this.tag() === 'TERMINATOR')) {
|
||||
this.token('TERMINATOR', "\n");
|
||||
}
|
||||
|
@ -351,7 +351,7 @@
|
|||
};
|
||||
// Use a `\` at a line-ending to suppress the newline.
|
||||
// The slash is removed here once its job is done.
|
||||
Lexer.prototype.suppress_newlines = function suppress_newlines() {
|
||||
Lexer.prototype.suppress_newlines = function() {
|
||||
if (this.value() === "\\") {
|
||||
this.tokens.pop();
|
||||
}
|
||||
|
@ -362,7 +362,7 @@
|
|||
// the proper order of operations. There are some symbols that we tag specially
|
||||
// here. `;` and newlines are both treated as a `TERMINATOR`, we distinguish
|
||||
// parentheses that indicate a method call from regular parentheses, and so on.
|
||||
Lexer.prototype.literal_token = function literal_token() {
|
||||
Lexer.prototype.literal_token = function() {
|
||||
var match, prev_spaced, space, tag, value;
|
||||
match = this.chunk.match(OPERATOR);
|
||||
value = match && match[1];
|
||||
|
@ -406,7 +406,7 @@
|
|||
// ------------------
|
||||
// As we consume a new `IDENTIFIER`, look at the previous token to determine
|
||||
// if it's a special kind of accessor.
|
||||
Lexer.prototype.name_access_type = function name_access_type() {
|
||||
Lexer.prototype.name_access_type = function() {
|
||||
if (this.value() === '::') {
|
||||
this.tag(1, 'PROTOTYPE_ACCESS');
|
||||
}
|
||||
|
@ -421,7 +421,7 @@
|
|||
};
|
||||
// Sanitize a heredoc or herecomment by escaping internal double quotes and
|
||||
// erasing all external indentation on the left-hand side.
|
||||
Lexer.prototype.sanitize_heredoc = function sanitize_heredoc(doc, options) {
|
||||
Lexer.prototype.sanitize_heredoc = function(doc, options) {
|
||||
var attempt, indent, match;
|
||||
while (match = HEREDOC_INDENT.exec(doc)) {
|
||||
attempt = match[2] || match[3];
|
||||
|
@ -436,7 +436,7 @@
|
|||
return doc.replace(MULTILINER, "\\n").replace(new RegExp(options.quote, 'g'), '\\"');
|
||||
};
|
||||
// Tag a half assignment.
|
||||
Lexer.prototype.tag_half_assignment = function tag_half_assignment(tag) {
|
||||
Lexer.prototype.tag_half_assignment = function(tag) {
|
||||
var last;
|
||||
last = this.tokens.pop();
|
||||
this.tokens.push([("" + tag + "="), ("" + tag + "="), last[2]]);
|
||||
|
@ -445,7 +445,7 @@
|
|||
// A source of ambiguity in our grammar used to be parameter lists in function
|
||||
// definitions versus argument lists in function calls. Walk backwards, tagging
|
||||
// parameters specially in order to make things easier for the parser.
|
||||
Lexer.prototype.tag_parameters = function tag_parameters() {
|
||||
Lexer.prototype.tag_parameters = function() {
|
||||
var _d, i, tok;
|
||||
if (this.tag() !== ')') {
|
||||
return null;
|
||||
|
@ -469,17 +469,17 @@
|
|||
return true;
|
||||
};
|
||||
// Close up all remaining open blocks at the end of the file.
|
||||
Lexer.prototype.close_indentation = function close_indentation() {
|
||||
Lexer.prototype.close_indentation = function() {
|
||||
return this.outdent_token(this.indent);
|
||||
};
|
||||
// The error for when you try to use a forbidden word in JavaScript as
|
||||
// an identifier.
|
||||
Lexer.prototype.identifier_error = function identifier_error(word) {
|
||||
Lexer.prototype.identifier_error = function(word) {
|
||||
throw new Error(("SyntaxError: Reserved word \"" + word + "\" on line " + (this.line + 1)));
|
||||
};
|
||||
// The error for when you try to assign to a reserved word in JavaScript,
|
||||
// like "function" or "default".
|
||||
Lexer.prototype.assignment_error = function assignment_error() {
|
||||
Lexer.prototype.assignment_error = function() {
|
||||
throw new Error(("SyntaxError: Reserved word \"" + (this.value()) + "\" on line " + (this.line + 1) + " can't be assigned"));
|
||||
};
|
||||
// Expand variables and expressions inside double-quoted strings using
|
||||
|
@ -490,7 +490,7 @@
|
|||
// If it encounters an interpolation, this method will recursively create a
|
||||
// new Lexer, tokenize the interpolated contents, and merge them into the
|
||||
// token stream.
|
||||
Lexer.prototype.interpolate_string = function interpolate_string(str, escape_quotes) {
|
||||
Lexer.prototype.interpolate_string = function(str, escape_quotes) {
|
||||
var _d, _e, _f, _g, _h, _i, _j, escaped, expr, group, i, idx, inner, interp, interpolated, lexer, match, nested, pi, quote, tag, tok, token, tokens, value;
|
||||
if (str.length < 3 || !starts(str, '"')) {
|
||||
return this.token('STRING', str);
|
||||
|
@ -578,11 +578,11 @@
|
|||
// Helpers
|
||||
// -------
|
||||
// Add a token to the results, taking note of the line number.
|
||||
Lexer.prototype.token = function token(tag, value) {
|
||||
Lexer.prototype.token = function(tag, value) {
|
||||
return this.tokens.push([tag, value, this.line]);
|
||||
};
|
||||
// Peek at a tag in the current token stream.
|
||||
Lexer.prototype.tag = function tag(index, new_tag) {
|
||||
Lexer.prototype.tag = function(index, new_tag) {
|
||||
var tok;
|
||||
if (!(tok = this.prev(index))) {
|
||||
return null;
|
||||
|
@ -594,7 +594,7 @@
|
|||
return tok[0];
|
||||
};
|
||||
// Peek at a value in the current token stream.
|
||||
Lexer.prototype.value = function value(index, val) {
|
||||
Lexer.prototype.value = function(index, val) {
|
||||
var tok;
|
||||
if (!(tok = this.prev(index))) {
|
||||
return null;
|
||||
|
@ -606,12 +606,12 @@
|
|||
return tok[1];
|
||||
};
|
||||
// Peek at a previous token, entire.
|
||||
Lexer.prototype.prev = function prev(index) {
|
||||
Lexer.prototype.prev = function(index) {
|
||||
return this.tokens[this.tokens.length - (index || 1)];
|
||||
};
|
||||
// Attempt to match a string against the current chunk, returning the indexed
|
||||
// match if successful, and `false` otherwise.
|
||||
Lexer.prototype.match = function match(regex, index) {
|
||||
Lexer.prototype.match = function(regex, index) {
|
||||
var m;
|
||||
if (!(m = this.chunk.match(regex))) {
|
||||
return false;
|
||||
|
@ -623,7 +623,7 @@
|
|||
}
|
||||
};
|
||||
// Are we in the midst of an unfinished expression?
|
||||
Lexer.prototype.unfinished = function unfinished() {
|
||||
Lexer.prototype.unfinished = function() {
|
||||
var prev;
|
||||
prev = this.prev(2);
|
||||
return this.value() && this.value().match && this.value().match(NO_NEWLINE) && prev && (prev[0] !== '.') && !this.value().match(CODE);
|
||||
|
|
324
lib/nodes.js
324
lib/nodes.js
File diff suppressed because it is too large
Load diff
|
@ -5,7 +5,7 @@
|
|||
// parser: new OptionParser switches, help_banner
|
||||
// options: parser.parse process.argv
|
||||
exports.OptionParser = (function() {
|
||||
OptionParser = function OptionParser(rules, banner) {
|
||||
OptionParser = function(rules, banner) {
|
||||
this.banner = banner;
|
||||
this.rules = build_rules(rules);
|
||||
return this;
|
||||
|
@ -18,7 +18,7 @@
|
|||
// containing the remaning non-option arguments. This is a simpler API than
|
||||
// many option parsers that allow you to attach callback actions for every
|
||||
// flag. Instead, you're responsible for interpreting the options object.
|
||||
OptionParser.prototype.parse = function parse(args) {
|
||||
OptionParser.prototype.parse = function(args) {
|
||||
var _a, _b, _c, arg, is_option, matched_rule, options, rule;
|
||||
options = {
|
||||
arguments: []
|
||||
|
@ -47,7 +47,7 @@
|
|||
};
|
||||
// Return the help text for this **OptionParser**, listing and describing all
|
||||
// of the valid options, for `--help` and such.
|
||||
OptionParser.prototype.help = function help() {
|
||||
OptionParser.prototype.help = function() {
|
||||
var _a, _b, _c, _d, _e, _f, i, let_part, lines, rule, spaces;
|
||||
lines = ['Available options:'];
|
||||
if (this.banner) {
|
||||
|
@ -80,7 +80,7 @@
|
|||
OPTIONAL = /\[(.+)\]/;
|
||||
// Build and return the list of option rules. If the optional *short-flag* is
|
||||
// unspecified, leave it out by padding with `null`.
|
||||
build_rules = function build_rules(rules) {
|
||||
build_rules = function(rules) {
|
||||
var _a, _b, _c, _d, tuple;
|
||||
_a = []; _c = rules;
|
||||
for (_b = 0, _d = _c.length; _b < _d; _b++) {
|
||||
|
@ -96,7 +96,7 @@
|
|||
};
|
||||
// Build a rule from a `-o` short flag, a `--output [DIR]` long flag, and the
|
||||
// description of what the option does.
|
||||
build_rule = function build_rule(short_flag, long_flag, description) {
|
||||
build_rule = function(short_flag, long_flag, description) {
|
||||
var match;
|
||||
match = long_flag.match(OPTIONAL);
|
||||
long_flag = long_flag.match(LONG_FLAG)[1];
|
||||
|
@ -110,7 +110,7 @@
|
|||
};
|
||||
// Normalize arguments by expanding merged flags into multiple flags. This allows
|
||||
// you to have `-wl` be the same as `--watch --lint`.
|
||||
normalize_arguments = function normalize_arguments(args) {
|
||||
normalize_arguments = function(args) {
|
||||
var _a, _b, _c, _d, _e, _f, arg, l, match, result;
|
||||
args = args.slice(0);
|
||||
result = [];
|
||||
|
|
|
@ -11,14 +11,14 @@
|
|||
prompt = 'coffee> ';
|
||||
// Quick alias for quitting the REPL.
|
||||
helpers.extend(global, {
|
||||
quit: function quit() {
|
||||
quit: function() {
|
||||
return process.exit(0);
|
||||
}
|
||||
});
|
||||
// The main REPL function. **run** is called every time a line of code is entered.
|
||||
// Attempt to evaluate the command. If there's an exception, print it out instead
|
||||
// of exiting.
|
||||
run = function run(buffer) {
|
||||
run = function(buffer) {
|
||||
var val;
|
||||
try {
|
||||
val = CoffeeScript.run(buffer.toString(), {
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
return function() {
|
||||
return func.apply(obj || {}, args ? args.concat(__slice.call(arguments, 0)) : arguments);
|
||||
};
|
||||
}, __hasProp = Object.prototype.hasOwnProperty;
|
||||
}, __hasProp = Object.prototype.hasOwnProperty;
|
||||
// The CoffeeScript language has a good deal of optional syntax, implicit syntax,
|
||||
// and shorthand syntax. This can greatly complicate a grammar and bloat
|
||||
// the resulting parse table. Instead of making the parser handle it all, we take
|
||||
|
@ -25,13 +25,13 @@
|
|||
// The **Rewriter** class is used by the [Lexer](lexer.html), directly against
|
||||
// its internal array of tokens.
|
||||
exports.Rewriter = (function() {
|
||||
Rewriter = function Rewriter() { };
|
||||
Rewriter = function() { };
|
||||
// Rewrite the token stream in multiple passes, one logical filter at
|
||||
// a time. This could certainly be changed into a single pass through the
|
||||
// stream, with a big ol' efficient switch, but it's much nicer to work with
|
||||
// like this. The order of these passes matters -- indentation must be
|
||||
// corrected before implicit parentheses can be wrapped around blocks of code.
|
||||
Rewriter.prototype.rewrite = function rewrite(tokens) {
|
||||
Rewriter.prototype.rewrite = function(tokens) {
|
||||
this.tokens = tokens;
|
||||
this.adjust_comments();
|
||||
this.remove_leading_newlines();
|
||||
|
@ -48,7 +48,7 @@
|
|||
// forwards (or backwards) in the stream, to make sure we don't miss anything
|
||||
// as tokens are inserted and removed, and the stream changes length under
|
||||
// our feet.
|
||||
Rewriter.prototype.scan_tokens = function scan_tokens(block) {
|
||||
Rewriter.prototype.scan_tokens = function(block) {
|
||||
var i, move;
|
||||
i = 0;
|
||||
while (true) {
|
||||
|
@ -62,7 +62,7 @@
|
|||
};
|
||||
// Massage newlines and indentations so that comments don't have to be
|
||||
// correctly indented, or appear on a line of their own.
|
||||
Rewriter.prototype.adjust_comments = function adjust_comments() {
|
||||
Rewriter.prototype.adjust_comments = function() {
|
||||
return this.scan_tokens(__bind(function(prev, token, post, i) {
|
||||
var _c, after, before;
|
||||
if (!(token[0] === 'COMMENT')) {
|
||||
|
@ -85,7 +85,7 @@
|
|||
};
|
||||
// Leading newlines would introduce an ambiguity in the grammar, so we
|
||||
// dispatch them here.
|
||||
Rewriter.prototype.remove_leading_newlines = function remove_leading_newlines() {
|
||||
Rewriter.prototype.remove_leading_newlines = function() {
|
||||
var _c;
|
||||
_c = [];
|
||||
while (this.tokens[0] && this.tokens[0][0] === 'TERMINATOR') {
|
||||
|
@ -95,7 +95,7 @@
|
|||
};
|
||||
// Some blocks occur in the middle of expressions -- when we're expecting
|
||||
// this, remove their trailing newlines.
|
||||
Rewriter.prototype.remove_mid_expression_newlines = function remove_mid_expression_newlines() {
|
||||
Rewriter.prototype.remove_mid_expression_newlines = function() {
|
||||
return this.scan_tokens(__bind(function(prev, token, post, i) {
|
||||
if (!(post && include(EXPRESSION_CLOSE, post[0]) && token[0] === 'TERMINATOR')) {
|
||||
return 1;
|
||||
|
@ -107,7 +107,7 @@
|
|||
// The lexer has tagged the opening parenthesis of a method call, and the
|
||||
// opening bracket of an indexing operation. Match them with their paired
|
||||
// close.
|
||||
Rewriter.prototype.close_open_calls_and_indexes = function close_open_calls_and_indexes() {
|
||||
Rewriter.prototype.close_open_calls_and_indexes = function() {
|
||||
var brackets, parens;
|
||||
parens = [0];
|
||||
brackets = [0];
|
||||
|
@ -142,7 +142,7 @@
|
|||
// Methods may be optionally called without parentheses, for simple cases.
|
||||
// Insert the implicit parentheses here, so that the parser doesn't have to
|
||||
// deal with them.
|
||||
Rewriter.prototype.add_implicit_parentheses = function add_implicit_parentheses() {
|
||||
Rewriter.prototype.add_implicit_parentheses = function() {
|
||||
var close_calls, stack;
|
||||
stack = [0];
|
||||
close_calls = __bind(function(i) {
|
||||
|
@ -207,7 +207,7 @@
|
|||
// expressions that lack ending delimiters. The **Rewriter** adds the implicit
|
||||
// blocks, so it doesn't need to. ')' can close a single-line block,
|
||||
// but we need to make sure it's balanced.
|
||||
Rewriter.prototype.add_implicit_indentation = function add_implicit_indentation() {
|
||||
Rewriter.prototype.add_implicit_indentation = function() {
|
||||
return this.scan_tokens(__bind(function(prev, token, post, i) {
|
||||
var idx, indent, insertion, outdent, parens, pre, starter, tok;
|
||||
if (!(include(SINGLE_LINERS, token[0]) && post[0] !== 'INDENT' && !(token[0] === 'ELSE' && post[0] === 'IF'))) {
|
||||
|
@ -246,7 +246,7 @@
|
|||
};
|
||||
// Ensure that all listed pairs of tokens are correctly balanced throughout
|
||||
// the course of the token stream.
|
||||
Rewriter.prototype.ensure_balance = function ensure_balance(pairs) {
|
||||
Rewriter.prototype.ensure_balance = function(pairs) {
|
||||
var _c, _d, key, levels, line, open, open_line, unclosed, value;
|
||||
levels = {};
|
||||
open_line = {};
|
||||
|
@ -301,7 +301,7 @@
|
|||
// up balanced in the end.
|
||||
// 4. Be careful not to alter array or parentheses delimiters with overzealous
|
||||
// rewriting.
|
||||
Rewriter.prototype.rewrite_closing_parens = function rewrite_closing_parens() {
|
||||
Rewriter.prototype.rewrite_closing_parens = function() {
|
||||
var _c, debt, key, stack, val;
|
||||
stack = [];
|
||||
debt = {};
|
||||
|
|
26
lib/scope.js
26
lib/scope.js
|
@ -12,7 +12,7 @@
|
|||
this.exports = this;
|
||||
}
|
||||
exports.Scope = (function() {
|
||||
Scope = function Scope(parent, expressions, method) {
|
||||
Scope = function(parent, expressions, method) {
|
||||
var _a;
|
||||
_a = [parent, expressions, method];
|
||||
this.parent = _a[0];
|
||||
|
@ -35,7 +35,7 @@
|
|||
// it wraps.
|
||||
// Look up a variable name in lexical scope, and declare it if it does not
|
||||
// already exist.
|
||||
Scope.prototype.find = function find(name) {
|
||||
Scope.prototype.find = function(name) {
|
||||
if (this.check(name)) {
|
||||
return true;
|
||||
}
|
||||
|
@ -43,7 +43,7 @@
|
|||
return false;
|
||||
};
|
||||
// Test variables and return true the first time fn(v, k) returns true
|
||||
Scope.prototype.any = function any(fn) {
|
||||
Scope.prototype.any = function(fn) {
|
||||
var _a, k, v;
|
||||
_a = this.variables;
|
||||
for (v in _a) { if (__hasProp.call(_a, v)) {
|
||||
|
@ -56,12 +56,12 @@
|
|||
};
|
||||
// Reserve a variable name as originating from a function parameter for this
|
||||
// scope. No `var` required for internal references.
|
||||
Scope.prototype.parameter = function parameter(name) {
|
||||
Scope.prototype.parameter = function(name) {
|
||||
this.variables[name] = 'param';
|
||||
return this.variables[name];
|
||||
};
|
||||
// Just check to see if a variable has already been declared, without reserving.
|
||||
Scope.prototype.check = function check(name) {
|
||||
Scope.prototype.check = function(name) {
|
||||
if (this.variables[name]) {
|
||||
return true;
|
||||
}
|
||||
|
@ -69,7 +69,7 @@
|
|||
};
|
||||
// If we need to store an intermediate result, find an available name for a
|
||||
// compiler-generated variable. `_a`, `_b`, and so on...
|
||||
Scope.prototype.free_variable = function free_variable() {
|
||||
Scope.prototype.free_variable = function() {
|
||||
var ordinal;
|
||||
while (this.check(this.temp_var)) {
|
||||
ordinal = 1 + parseInt(this.temp_var.substr(1), 36);
|
||||
|
@ -80,7 +80,7 @@
|
|||
};
|
||||
// Ensure that an assignment is made at the top of this scope
|
||||
// (or at the top-level scope, if requested).
|
||||
Scope.prototype.assign = function assign(name, value) {
|
||||
Scope.prototype.assign = function(name, value) {
|
||||
this.variables[name] = {
|
||||
value: value,
|
||||
assigned: true
|
||||
|
@ -89,20 +89,20 @@
|
|||
};
|
||||
// Does this scope reference any variables that need to be declared in the
|
||||
// given function body?
|
||||
Scope.prototype.has_declarations = function has_declarations(body) {
|
||||
Scope.prototype.has_declarations = function(body) {
|
||||
return body === this.expressions && this.any(function(k, val) {
|
||||
return val === 'var';
|
||||
});
|
||||
};
|
||||
// Does this scope reference any assignments that need to be declared at the
|
||||
// top of the given function body?
|
||||
Scope.prototype.has_assignments = function has_assignments(body) {
|
||||
Scope.prototype.has_assignments = function(body) {
|
||||
return body === this.expressions && this.any(function(k, val) {
|
||||
return val.assigned;
|
||||
});
|
||||
};
|
||||
// Return the list of variables first declared in this scope.
|
||||
Scope.prototype.declared_variables = function declared_variables() {
|
||||
Scope.prototype.declared_variables = function() {
|
||||
var _a, _b, key, val;
|
||||
return (function() {
|
||||
_a = []; _b = this.variables;
|
||||
|
@ -115,7 +115,7 @@
|
|||
};
|
||||
// Return the list of assignments that are supposed to be made at the top
|
||||
// of this scope.
|
||||
Scope.prototype.assigned_variables = function assigned_variables() {
|
||||
Scope.prototype.assigned_variables = function() {
|
||||
var _a, _b, key, val;
|
||||
_a = []; _b = this.variables;
|
||||
for (key in _b) { if (__hasProp.call(_b, key)) {
|
||||
|
@ -125,11 +125,11 @@
|
|||
return _a;
|
||||
};
|
||||
// Compile the JavaScript for all of the variable declarations in this scope.
|
||||
Scope.prototype.compiled_declarations = function compiled_declarations() {
|
||||
Scope.prototype.compiled_declarations = function() {
|
||||
return this.declared_variables().join(', ');
|
||||
};
|
||||
// Compile the JavaScript for all of the variable assignments in this scope.
|
||||
Scope.prototype.compiled_assignments = function compiled_assignments() {
|
||||
Scope.prototype.compiled_assignments = function() {
|
||||
return this.assigned_variables().join(', ');
|
||||
};
|
||||
return Scope;
|
||||
|
|
|
@ -6,9 +6,17 @@
|
|||
this.exports: this unless process?
|
||||
helpers: exports.helpers: {}
|
||||
|
||||
# Cross-browser indexOf, so that IE can join the party.
|
||||
helpers.index_of: index_of: (array, item, from) ->
|
||||
return array.indexOf item, from if array.indexOf
|
||||
for other, index in array
|
||||
if other is item and (not from or (from <= index))
|
||||
return index
|
||||
-1
|
||||
|
||||
# Does a list include a value?
|
||||
helpers.include: include: (list, value) ->
|
||||
list.indexOf(value) >= 0
|
||||
index_of(list, value) >= 0
|
||||
|
||||
# Peek at the beginning of a given string to see if it matches a sequence.
|
||||
helpers.starts: starts: (string, literal, start) ->
|
||||
|
@ -20,10 +28,10 @@ helpers.compact: compact: (array) -> item for item in array when item
|
|||
# Count the number of occurences of a character in a string.
|
||||
helpers.count: count: (string, letter) ->
|
||||
num: 0
|
||||
pos: string.indexOf(letter)
|
||||
pos: index_of string, letter
|
||||
while pos isnt -1
|
||||
num: + 1
|
||||
pos: string.indexOf(letter, pos + 1)
|
||||
pos: index_of string, letter, pos + 1
|
||||
num
|
||||
|
||||
# Merge objects, returning a fresh copy with attributes from both sides.
|
||||
|
|
|
@ -14,7 +14,7 @@ else
|
|||
Scope: this.Scope
|
||||
|
||||
# Import the helpers we plan to use.
|
||||
{compact, flatten, merge, del}: helpers
|
||||
{compact, flatten, merge, del, index_of}: helpers
|
||||
|
||||
# Helper function that marks a node as a JavaScript *statement*, or as a
|
||||
# *pure_statement*. Statements must be wrapped in a closure when used as an
|
||||
|
@ -627,7 +627,7 @@ exports.ArrayNode: class ArrayNode extends BaseNode
|
|||
else
|
||||
objects.push "$code, "
|
||||
objects: objects.join('')
|
||||
if objects.indexOf('\n') >= 0
|
||||
if index_of(objects, '\n') >= 0
|
||||
"[\n${@idt(1)}$objects\n$@tab]"
|
||||
else
|
||||
"[$objects]"
|
||||
|
@ -767,7 +767,7 @@ exports.AssignNode: class AssignNode extends BaseNode
|
|||
access_class: if is_string or @variable.is_array() then IndexNode else AccessorNode
|
||||
if obj instanceof SplatNode and not splat
|
||||
val: literal(obj.compile_value(o, val_var,
|
||||
(oindex: @variable.base.objects.indexOf(obj)),
|
||||
(oindex: index_of(@variable.base.objects, obj)),
|
||||
(olength: @variable.base.objects.length) - oindex - 1))
|
||||
splat: true
|
||||
else
|
||||
|
@ -835,8 +835,7 @@ exports.CodeNode: class CodeNode extends BaseNode
|
|||
@body.make_return()
|
||||
(o.scope.parameter(param)) for param in params
|
||||
code: if @body.expressions.length then "\n${ @body.compile_with_declarations(o) }\n" else ''
|
||||
name_part: if @name then ' ' + @name else ''
|
||||
func: "function${ if @bound then '' else name_part }(${ params.join(', ') }) {$code${@idt(if @bound then 1 else 0)}}"
|
||||
func: "function(${ params.join(', ') }) {$code${@idt(if @bound then 1 else 0)}}"
|
||||
func: "($func)" if top and not @bound
|
||||
return func unless @bound
|
||||
utility 'slice'
|
||||
|
@ -993,12 +992,12 @@ exports.OpNode: class OpNode extends BaseNode
|
|||
not @second
|
||||
|
||||
is_chainable: ->
|
||||
@CHAINABLE.indexOf(@operator) >= 0
|
||||
index_of(@CHAINABLE, @operator) >= 0
|
||||
|
||||
compile_node: (o) ->
|
||||
o.operation: true
|
||||
return @compile_chain(o) if @is_chainable() and @first.unwrap() instanceof OpNode and @first.unwrap().is_chainable()
|
||||
return @compile_assignment(o) if @ASSIGNMENT.indexOf(@operator) >= 0
|
||||
return @compile_assignment(o) if index_of(@ASSIGNMENT, @operator) >= 0
|
||||
return @compile_unary(o) if @is_unary()
|
||||
return @compile_existence(o) if @operator is '?'
|
||||
[@first.compile(o), @operator, @second.compile(o)].join ' '
|
||||
|
@ -1032,7 +1031,7 @@ exports.OpNode: class OpNode extends BaseNode
|
|||
|
||||
# Compile a unary **OpNode**.
|
||||
compile_unary: (o) ->
|
||||
space: if @PREFIX_OPERATORS.indexOf(@operator) >= 0 then ' ' else ''
|
||||
space: if index_of(@PREFIX_OPERATORS, @operator) >= 0 then ' ' else ''
|
||||
parts: [@operator, space, @first.compile(o)]
|
||||
parts: parts.reverse() if @flip
|
||||
parts.join('')
|
||||
|
|
|
@ -23,7 +23,7 @@ ok x is 100
|
|||
|
||||
# This-assignment.
|
||||
tester: ->
|
||||
@example: -> puts 'example function'
|
||||
@example: -> 'example function'
|
||||
this
|
||||
|
||||
ok tester().example.name is 'example'
|
||||
ok tester().example() is 'example function'
|
|
@ -6,7 +6,6 @@ ok x is 1
|
|||
ok typeof(y.x) is 'function'
|
||||
ok y.x instanceof Function
|
||||
ok y.x() is 3
|
||||
ok y.x.name is 'x'
|
||||
|
||||
|
||||
# The empty function should not cause a syntax error.
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue