1
0
Fork 0
mirror of https://github.com/jashkenas/coffeescript.git synced 2022-11-09 12:23:24 -05:00

First draft of switching the CoffeeScript Compiler over to camelCase. Pour one on the ground for underscores...

This commit is contained in:
Jeremy Ashkenas 2010-06-12 19:05:13 -04:00
parent 1948b0c7c7
commit e14f4c5db1
57 changed files with 1243 additions and 1243 deletions

View file

@ -20,8 +20,8 @@ task 'install', 'install CoffeeScript into /usr/local (or --prefix)', (options)
"cp -rf bin lib LICENSE README package.json src vendor $lib"
"ln -sf $lib/bin/coffee $base/bin/coffee"
"ln -sf $lib/bin/cake $base/bin/cake"
"mkdir -p ~/.node_libraries"
"ln -sf $lib/lib ~/.node_libraries/coffee-script"
"mkdir -p ~/.nodeLibraries"
"ln -sf $lib/lib ~/.nodeLibraries/coffee-script"
].join(' && '), (err, stdout, stderr) ->
if err then print stderr
)
@ -44,8 +44,8 @@ task 'build:parser', 'rebuild the Jison parser (run build first)', ->
require.paths.unshift 'vendor/jison/lib'
parser: require('./lib/grammar').parser
js: parser.generate()
parser_path: 'lib/parser.js'
fs.writeFile parser_path, js
parserPath: 'lib/parser.js'
fs.writeFile parserPath, js
task 'build:ultraviolet', 'build and install the Ultraviolet syntax highlighter', ->
@ -80,20 +80,20 @@ task 'loc', 'count the lines of source code in CoffeeScript', ->
task 'test', 'run the CoffeeScript language test suite', ->
helpers.extend global, require 'assert'
passed_tests: failed_tests: 0
start_time: new Date()
original_ok: ok
passedTests: failedTests: 0
startTime: new Date()
originalOk: ok
helpers.extend global, {
ok: (args...) -> passed_tests += 1; original_ok(args...)
ok: (args...) -> passedTests += 1; originalOk(args...)
CoffeeScript: CoffeeScript
}
red: '\033[0;31m'
green: '\033[0;32m'
reset: '\033[0m'
process.addListener 'exit', ->
time: ((new Date() - start_time) / 1000).toFixed(2)
message: "passed $passed_tests tests in $time seconds$reset"
puts(if failed_tests then "${red}failed $failed_tests and $message" else "$green$message")
time: ((new Date() - startTime) / 1000).toFixed(2)
message: "passed $passedTests tests in $time seconds$reset"
puts(if failedTests then "${red}failed $failedTests and $message" else "$green$message")
fs.readdir 'test', (err, files) ->
files.forEach (file) ->
return unless file.match(/\.coffee$/i)
@ -102,6 +102,6 @@ task 'test', 'run the CoffeeScript language test suite', ->
try
CoffeeScript.run code.toString(), {source: source}
catch err
failed_tests += 1
failedTests += 1
puts "${red}failed:${reset} $source"
puts err.stack

View file

@ -1,9 +1,9 @@
launch() if ignition is on
volume: 10 if band isnt spinal_tap
volume: 10 if band isnt spinalTap
let_the_wild_rumpus_begin() unless answer is no
letTheWildRumpusBegin() unless answer is no
if car.speed < speed_limit then accelerate()
if car.speed < speedLimit then accelerate()
print "My name is " + @name

View file

@ -1,3 +1,3 @@
task 'test', 'run each of the unit tests', ->
for test in test_files
for test in testFiles
fs.readFile test, (err, code) -> eval coffee.compile code

View file

@ -1,9 +1,9 @@
mood: greatly_improved if singing
mood: greatlyImproved if singing
if happy and knows_it
claps_hands()
cha_cha_cha()
if happy and knowsIt
clapsHands()
chaChaCha()
date: if friday then sue else jill
expensive: or do_the_math()
expensive: or doTheMath()

View file

@ -1,8 +1,8 @@
grade: (student) ->
if student.excellent_work
if student.excellentWork
"A+"
else if student.okay_stuff
if student.tried_hard then "B" else "B-"
else if student.okayStuff
if student.triedHard then "B" else "B-"
else
"C"

View file

@ -2,5 +2,5 @@ Account: (customer, cart) ->
@customer: customer
@cart: cart
$('.shopping_cart').bind 'click', (event) =>
$('.shoppingCart').bind 'click', (event) =>
@customer.purchase @cart

View file

@ -1,5 +1,5 @@
weather_report: (location) ->
weatherReport: (location) ->
# Make an Ajax request to fetch the weather...
[location, 72, "Mostly Sunny"]
[city, temp, forecast]: weather_report "Berkeley, CA"
[city, temp, forecast]: weatherReport "Berkeley, CA"

View file

@ -1,4 +1,4 @@
years_old: {max: 10, ida: 9, tim: 11}
yearsOld: {max: 10, ida: 9, tim: 11}
ages: for child, age of years_old
ages: for child, age of yearsOld
child + " is " + age

View file

@ -1,9 +1,9 @@
# Assignment:
number: 42
opposite_day: true
oppositeDay: true
# Conditions:
number: -42 if opposite_day
number: -42 if oppositeDay
# Functions:
square: (x) -> x * x
@ -26,4 +26,4 @@ race: (winner, runners...) ->
alert "I knew it!" if elvis?
# Array comprehensions:
cubed_list: math.cube num for num in list
cubedList: math.cube num for num in list

View file

@ -1,4 +1,4 @@
bait: 1000
and_switch: 0
andSwitch: 0
[bait, and_switch]: [and_switch, bait]
[bait, andSwitch]: [andSwitch, bait]

View file

@ -1,6 +1,6 @@
countdown: num for num in [10..1]
egg_delivery: ->
eggDelivery: ->
for i in [0...eggs.length] by 12
dozen_eggs: eggs[i...i+12]
deliver new egg_carton(dozen)
dozenEggs: eggs[i...i+12]
deliver new eggCarton(dozen)

View file

@ -1,5 +1,5 @@
num: 1
change_numbers: ->
new_num: -1
changeNumbers: ->
newNum: -1
num: 10
new_num: change_numbers()
newNum: changeNumbers()

View file

@ -1,6 +1,6 @@
numbers: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
three_to_six: numbers[3..6]
threeToSix: numbers[3..6]
numbers_copy: numbers[0...numbers.length]
numbersCopy: numbers[0...numbers.length]

View file

@ -1 +1 @@
lottery.draw_winner()?.address?.zipcode
lottery.drawWinner()?.address?.zipcode

View file

@ -1,9 +1,9 @@
gold: silver: the_field: "unknown"
gold: silver: theField: "unknown"
award_medals: (first, second, rest...) ->
awardMedals: (first, second, rest...) ->
gold: first
silver: second
the_field: rest
theField: rest
contenders: [
"Michael Phelps"
@ -18,8 +18,8 @@ contenders: [
"Usain Bolt"
]
award_medals contenders...
awardMedals contenders...
alert "Gold: " + gold
alert "Silver: " + silver
alert "The Field: " + the_field
alert "The Field: " + theField

View file

@ -1,4 +1,4 @@
moby_dick: "Call me Ishmael. Some years ago --
mobyDick: "Call me Ishmael. Some years ago --
never mind how long precisely -- having little
or no money in my purse, and nothing particular
to interest me on shore, I thought I would sail

View file

@ -1,10 +1,10 @@
switch day
when "Mon" then go_to_work()
when "Tue" then go_to_the_park()
when "Thu" then go_ice_fishing()
when "Mon" then goToWork()
when "Tue" then goToThePark()
when "Thu" then goIceFishing()
when "Fri", "Sat"
if day is bingo_day
go_to_bingo()
go_dancing()
when "Sun" then go_to_church()
else go_to_work()
if day is bingoDay
goToBingo()
goDancing()
when "Sun" then goToChurch()
else goToWork()

View file

@ -1,7 +1,7 @@
try
all_hell_breaks_loose()
cats_and_dogs_living_together()
allHellBreaksLoose()
catsAndDogsLivingTogether()
catch error
print error
finally
clean_up()
cleanUp()

View file

@ -1,5 +1,5 @@
# Econ 101
if this.studying_economics
if this.studyingEconomics
buy() while supply > demand
sell() until supply > demand

View file

@ -1,5 +1,5 @@
(function(){
var CoffeeScript, fs, helpers, no_such_task, oparse, options, optparse, path, print_tasks, switches, tasks;
var CoffeeScript, fs, helpers, missingTask, oparse, options, optparse, path, printTasks, switches, tasks;
var __hasProp = Object.prototype.hasOwnProperty;
// `cake` is a simplified version of [Make](http://www.gnu.org/software/make/)
// ([Rake](http://rake.rubyforge.org/), [Jake](http://github.com/280north/jake))
@ -45,7 +45,7 @@
// Invoke another task in the current Cakefile.
invoke: function(name) {
if (!(tasks[name])) {
no_such_task(name);
missingTask(name);
}
return tasks[name].action(options);
}
@ -65,7 +65,7 @@
});
oparse = new optparse.OptionParser(switches);
if (!(args.length)) {
return print_tasks();
return printTasks();
}
options = oparse.parse(args);
_a = []; _c = options.arguments;
@ -77,7 +77,7 @@
});
};
// Display the list of Cake tasks in a format similar to `rake -T`
print_tasks = function() {
printTasks = function() {
var _a, _b, desc, i, name, spaces, task;
puts('');
_a = tasks;
@ -99,7 +99,7 @@
}
};
// Print an error and exit when attempting to all an undefined task.
no_such_task = function(task) {
missingTask = function(task) {
puts(("No such task: \"" + task + "\""));
return process.exit(1);
};

View file

@ -1,5 +1,5 @@
(function(){
var Lexer, compile, helpers, lexer, parser, path, process_scripts;
var Lexer, compile, helpers, lexer, parser, path, processScripts;
// CoffeeScript can be used both on the server, as a command-line compiler based
// on Node.js/V8, or to run CoffeeScripts directly in the browser. This module
// contains the main entry functions for tokenzing, parsing, and compiling source
@ -90,7 +90,7 @@
// on page load. Unfortunately, the text contents of remote scripts cannot be
// accessed from the browser, so only inline script tags will work.
if ((typeof document !== "undefined" && document !== null) && document.getElementsByTagName) {
process_scripts = function() {
processScripts = function() {
var _a, _b, _c, _d, tag;
_a = []; _c = document.getElementsByTagName('script');
for (_b = 0, _d = _c.length; _b < _d; _b++) {
@ -100,9 +100,9 @@
return _a;
};
if (window.addEventListener) {
window.addEventListener('load', process_scripts, false);
window.addEventListener('load', processScripts, false);
} else if (window.attachEvent) {
window.attachEvent('onload', process_scripts);
window.attachEvent('onload', processScripts);
}
}
})();

View file

@ -1,5 +1,5 @@
(function(){
var BANNER, CoffeeScript, SWITCHES, _a, compile_options, compile_script, compile_scripts, compile_stdio, exec, fs, lint, option_parser, options, optparse, parse_options, path, print_tokens, sources, spawn, usage, version, watch, write_js;
var BANNER, CoffeeScript, SWITCHES, _a, compileOptions, compileScript, compileScripts, compileStdio, exec, fs, lint, optionParser, options, optparse, parseOptions, path, printTokens, sources, spawn, usage, version, watch, writeJs;
// The `coffee` utility. Handles command-line compilation of CoffeeScript
// into various forms: saved into `.js` files or printed to stdout, piped to
// [JSLint](http://javascriptlint.com/) or recompiled every time the source is
@ -20,13 +20,13 @@
// Top-level objects shared by all the functions.
options = {};
sources = [];
option_parser = null;
optionParser = null;
// Run `coffee` by parsing passed options and determining what action to take.
// Many flags cause us to divert before compiling anything. Flags passed after
// `--` will be passed verbatim to your script as arguments in `process.argv`
exports.run = function() {
var flags, separator;
parse_options();
parseOptions();
if (options.help) {
return usage();
}
@ -37,10 +37,10 @@
return require('./repl');
}
if (options.stdio) {
return compile_stdio();
return compileStdio();
}
if (options.eval) {
return compile_script('console', sources[0]);
return compileScript('console', sources[0]);
}
if (!(sources.length)) {
return require('./repl');
@ -52,19 +52,19 @@
sources = sources.slice(0, separator);
}
process.ARGV = (process.argv = flags);
return compile_scripts();
return compileScripts();
};
// Asynchronously read in each CoffeeScript in a list of source files and
// compile them. If a directory is passed, recursively compile all
// '.coffee' extension source files in it and all subdirectories.
compile_scripts = function() {
compileScripts = function() {
var _b, _c, _d, _e, base, compile, source;
_b = []; _d = sources;
for (_c = 0, _e = _d.length; _c < _e; _c++) {
source = _d[_c];
_b.push((function() {
base = source;
compile = function(source, top_level) {
compile = function(source, topLevel) {
return path.exists(source, function(exists) {
if (!(exists)) {
throw new Error(("File not found: " + source));
@ -80,9 +80,9 @@
}
return _f;
});
} else if (top_level || path.extname(source) === '.coffee') {
} else if (topLevel || path.extname(source) === '.coffee') {
fs.readFile(source, function(err, code) {
return compile_script(source, code.toString(), base);
return compileScript(source, code.toString(), base);
});
if (options.watch) {
return watch(source, base);
@ -99,23 +99,23 @@
// Compile a single source script, containing the given code, according to the
// requested options. If evaluating the script directly sets `__filename`,
// `__dirname` and `module.filename` to be correct relative to the script's path.
compile_script = function(source, code, base) {
var code_opts, js, o;
compileScript = function(source, code, base) {
var codeOpts, js, o;
o = options;
code_opts = compile_options(source);
codeOpts = compileOptions(source);
try {
if (o.tokens) {
return print_tokens(CoffeeScript.tokens(code));
return printTokens(CoffeeScript.tokens(code));
} else if (o.nodes) {
return puts(CoffeeScript.nodes(code).toString());
} else if (o.run) {
return CoffeeScript.run(code, code_opts);
return CoffeeScript.run(code, codeOpts);
} else {
js = CoffeeScript.compile(code, code_opts);
js = CoffeeScript.compile(code, codeOpts);
if (o.print) {
return print(js);
} else if (o.compile) {
return write_js(source, js, base);
return writeJs(source, js, base);
} else if (o.lint) {
return lint(js);
}
@ -129,7 +129,7 @@
};
// Attach the appropriate listeners to compile scripts incoming over **stdin**,
// and write them back to **stdout**.
compile_stdio = function() {
compileStdio = function() {
var code, stdin;
code = '';
stdin = process.openStdin();
@ -139,7 +139,7 @@
}
});
return stdin.addListener('end', function() {
return compile_script('stdio', code);
return compileScript('stdio', code);
});
};
// Watch a source CoffeeScript file using `fs.watchFile`, recompiling it every
@ -157,22 +157,22 @@
puts(("Compiled " + source));
}
return fs.readFile(source, function(err, code) {
return compile_script(source, code.toString(), base);
return compileScript(source, code.toString(), base);
});
});
};
// Write out a JavaScript source file with the compiled code. By default, files
// are written out in `cwd` as `.js` files with the same name, but the output
// directory can be customized with `--output`.
write_js = function(source, js, base) {
var base_dir, compile, dir, filename, js_path, src_dir;
writeJs = function(source, js, base) {
var baseDir, compile, dir, filename, jsPath, srcDir;
filename = path.basename(source, path.extname(source)) + '.js';
src_dir = path.dirname(source);
base_dir = src_dir.substring(base.length);
dir = options.output ? path.join(options.output, base_dir) : src_dir;
js_path = path.join(dir, filename);
srcDir = path.dirname(source);
baseDir = srcDir.substring(base.length);
dir = options.output ? path.join(options.output, baseDir) : srcDir;
jsPath = path.join(dir, filename);
compile = function() {
return fs.writeFile(js_path, js);
return fs.writeFile(jsPath, js);
};
return path.exists(dir, function(exists) {
if (exists) {
@ -185,18 +185,18 @@
// Pipe compiled JS through JSLint (requires a working `jsl` command), printing
// any errors or warnings that arise.
lint = function(js) {
var jsl, print_it;
print_it = function(buffer) {
var jsl, printIt;
printIt = function(buffer) {
return print(buffer.toString());
};
jsl = spawn('jsl', ['-nologo', '-stdin']);
jsl.stdout.addListener('data', print_it);
jsl.stderr.addListener('data', print_it);
jsl.stdout.addListener('data', printIt);
jsl.stderr.addListener('data', printIt);
jsl.stdin.write(js);
return jsl.stdin.end();
};
// Pretty-print a stream of tokens.
print_tokens = function(tokens) {
printTokens = function(tokens) {
var _b, _c, _d, _e, _f, strings, tag, token, value;
strings = (function() {
_b = []; _d = tokens;
@ -215,17 +215,17 @@
};
// Use the [OptionParser module](optparse.html) to extract all options from
// `process.argv` that are specified in `SWITCHES`.
parse_options = function() {
parseOptions = function() {
var o;
option_parser = new optparse.OptionParser(SWITCHES, BANNER);
o = (options = option_parser.parse(process.argv));
optionParser = new optparse.OptionParser(SWITCHES, BANNER);
o = (options = optionParser.parse(process.argv));
options.run = !(o.compile || o.print || o.lint);
options.print = !!(o.print || (o.eval || o.stdio && o.compile));
sources = options.arguments.slice(2, options.arguments.length);
return sources;
};
// The compile-time options to pass to the CoffeeScript compiler.
compile_options = function(source) {
compileOptions = function(source) {
var o;
o = {
source: source
@ -235,7 +235,7 @@
};
// Print the `--help` usage message and exit.
usage = function() {
puts(option_parser.help());
puts(optionParser.help());
return process.exit(0);
};
// Print the `--version` message and exit.

View file

@ -28,13 +28,13 @@
// we pass the pattern-defining string, the action to run, and extra options,
// optionally. If no action is specified, we simply pass the value of the
// previous nonterminal.
o = function(pattern_string, action, options) {
o = function(patternString, action, options) {
var match;
if (!(action)) {
return [pattern_string, '$$ = $1;', options];
return [patternString, '$$ = $1;', options];
}
action = (match = (action + '').match(unwrap)) ? match[1] : ("(" + action + "())");
return [pattern_string, ("$$ = " + action + ";"), options];
return [patternString, ("$$ = " + action + ";"), options];
};
// Grammatical Rules
// -----------------
@ -265,7 +265,7 @@
o("INDEX_START Expression INDEX_END", function() {
return new IndexNode($2);
}), o("INDEX_SOAK Index", function() {
$2.soak_node = true;
$2.soakNode = true;
return $2;
}), o("INDEX_PROTO Index", function() {
$2.proto = true;
@ -328,7 +328,7 @@
// and calling `super()`
Call: [
o("Invocation"), o("NEW Invocation", function() {
return $2.new_instance();
return $2.newInstance();
}), o("Super")
],
// Extending an object by setting its prototype chain to reference a parent
@ -479,20 +479,20 @@
// or postfix, with a single expression. There is no do..while.
While: [
o("WhileSource Block", function() {
return $1.add_body($2);
return $1.addBody($2);
}), o("Statement WhileSource", function() {
return $2.add_body(Expressions.wrap([$1]));
return $2.addBody(Expressions.wrap([$1]));
}), o("Expression WhileSource", function() {
return $2.add_body(Expressions.wrap([$1]));
return $2.addBody(Expressions.wrap([$1]));
}), o("Loop", function() {
return $1;
})
],
Loop: [
o("LOOP Block", function() {
return new WhileNode(new LiteralNode('true')).add_body($2);
return new WhileNode(new LiteralNode('true')).addBody($2);
}), o("LOOP Expression", function() {
return new WhileNode(new LiteralNode('true')).add_body(Expressions.wrap([$2]));
return new WhileNode(new LiteralNode('true')).addBody(Expressions.wrap([$2]));
})
],
// Array, object, and range comprehensions, at the most generic level.
@ -573,20 +573,20 @@
// switch/case/default by compiling into an if-else chain.
Switch: [
o("SWITCH Expression INDENT Whens OUTDENT", function() {
return $4.switches_over($2);
return $4.switchesOver($2);
}), o("SWITCH Expression INDENT Whens ELSE Block OUTDENT", function() {
return $4.switches_over($2).add_else($6, true);
return $4.switchesOver($2).addElse($6, true);
}), o("SWITCH INDENT Whens OUTDENT", function() {
return $3;
}), o("SWITCH INDENT Whens ELSE Block OUTDENT", function() {
return $3.add_else($5, true);
return $3.addElse($5, true);
})
],
// The inner list of whens is left recursive. At code-generation time, the
// IfNode will rewrite them into a proper chain.
Whens: [
o("When"), o("Whens When", function() {
return $1.add_else($2);
return $1.addElse($2);
})
],
// An individual **When** clause, with action.
@ -615,13 +615,13 @@
invert: true
});
}), o("IfStart ELSE IF Expression Block", function() {
return $1.add_else((new IfNode($4, $5)).force_statement());
return $1.addElse((new IfNode($4, $5)).forceStatement());
})
],
// An **IfStart** can optionally be followed by an else block.
IfBlock: [
o("IfStart"), o("IfStart ELSE Block", function() {
return $1.add_else($3);
return $1.addElse($3);
})
],
// The full complement of *if* expressions, including postfix one-liner

View file

@ -1,5 +1,5 @@
(function(){
var balanced_string, compact, count, del, extend, flatten, helpers, include, index_of, merge, starts;
var balancedString, compact, count, del, extend, flatten, helpers, include, indexOf, merge, starts;
var __hasProp = Object.prototype.hasOwnProperty;
// This file contains the common helper functions that we'd like to share among
// the **Lexer**, **Rewriter**, and the **Nodes**. Merge objects, flatten
@ -10,7 +10,7 @@
}
helpers = (exports.helpers = {});
// Cross-browser indexOf, so that IE can join the party.
helpers.index_of = (index_of = function(array, item, from) {
helpers.indexOf = (indexOf = function(array, item, from) {
var _a, _b, index, other;
if (array.indexOf) {
return array.indexOf(item, from);
@ -26,7 +26,7 @@
});
// Does a list include a value?
helpers.include = (include = function(list, value) {
return index_of(list, value) >= 0;
return indexOf(list, value) >= 0;
});
// Peek at the beginning of a given string to see if it matches a sequence.
helpers.starts = (starts = function(string, literal, start) {
@ -46,10 +46,10 @@
helpers.count = (count = function(string, letter) {
var num, pos;
num = 0;
pos = index_of(string, letter);
pos = indexOf(string, letter);
while (pos !== -1) {
num += 1;
pos = index_of(string, letter, pos + 1);
pos = indexOf(string, letter, pos + 1);
}
return num;
});
@ -108,7 +108,7 @@
// a series of delimiters, all of which must be nested correctly within the
// contents of the string. This method allows us to have strings within
// interpolations within strings, ad infinitum.
helpers.balanced_string = (balanced_string = function(str, delimited, options) {
helpers.balancedString = (balancedString = function(str, delimited, options) {
var _a, _b, _c, _d, close, i, levels, open, pair, slash;
options = options || {};
slash = delimited[0][0] === '/';

View file

@ -1,11 +1,11 @@
(function(){
var ASSIGNED, ASSIGNMENT, CALLABLE, CODE, COFFEE_ALIASES, COFFEE_KEYWORDS, COMMENT, COMMENT_CLEANER, CONVERSIONS, HALF_ASSIGNMENTS, HEREDOC, HEREDOC_INDENT, IDENTIFIER, INTERPOLATION, JS_CLEANER, JS_FORBIDDEN, JS_KEYWORDS, LAST_DENT, LAST_DENTS, LINE_BREAK, Lexer, MULTILINER, MULTI_DENT, NEXT_CHARACTER, NOT_REGEX, NO_NEWLINE, NUMBER, OPERATOR, REGEX_END, REGEX_ESCAPE, REGEX_INTERPOLATION, REGEX_START, RESERVED, Rewriter, STRING_NEWLINES, WHITESPACE, _a, _b, _c, balanced_string, compact, count, helpers, include, starts;
var ASSIGNED, ASSIGNMENT, CALLABLE, CODE, COFFEE_ALIASES, COFFEE_KEYWORDS, COMMENT, COMMENT_CLEANER, CONVERSIONS, HALF_ASSIGNMENTS, HEREDOC, HEREDOC_INDENT, IDENTIFIER, INTERPOLATION, JS_CLEANER, JS_FORBIDDEN, JS_KEYWORDS, LAST_DENT, LAST_DENTS, LINE_BREAK, Lexer, MULTILINER, MULTI_DENT, NEXT_CHARACTER, NOT_REGEX, NO_NEWLINE, NUMBER, OPERATOR, REGEX_END, REGEX_ESCAPE, REGEX_INTERPOLATION, REGEX_START, RESERVED, Rewriter, STRING_NEWLINES, WHITESPACE, _a, _b, _c, balancedString, compact, count, helpers, include, starts;
var __slice = Array.prototype.slice;
// The CoffeeScript Lexer. Uses a series of token-matching regexes to attempt
// matches against the beginning of the source code. When a match is found,
// a token is produced, we consume the match, and start again. Tokens are in the
// form:
// [tag, value, line_number]
// [tag, value, lineNumber]
// Which is a format that can be fed directly into [Jison](http://github.com/zaach/jison).
// Set up the Lexer for both Node.js and the browser, depending on where we are.
if ((typeof process !== "undefined" && process !== null)) {
@ -24,7 +24,7 @@
count = _c.count;
starts = _c.starts;
compact = _c.compact;
balanced_string = _c.balanced_string;
balancedString = _c.balancedString;
// The Lexer Class
// ---------------
// The Lexer class reads a stream of CoffeeScript and divvys it up into tagged
@ -62,9 +62,9 @@
// Stream of parsed tokens in the form ['TYPE', value, line]
while (this.i < this.code.length) {
this.chunk = this.code.slice(this.i);
this.extract_next_token();
this.extractNextToken();
}
this.close_indentation();
this.closeIndentation();
if (o.rewrite === false) {
return this.tokens;
}
@ -72,45 +72,45 @@
};
// At every position, run through this list of attempted matches,
// short-circuiting if any of them succeed. Their order determines precedence:
// `@literal_token` is the fallback catch-all.
Lexer.prototype.extract_next_token = function() {
if (this.extension_token()) {
// `@literalToken` is the fallback catch-all.
Lexer.prototype.extractNextToken = function() {
if (this.extensionToken()) {
return null;
}
if (this.identifier_token()) {
if (this.identifierToken()) {
return null;
}
if (this.number_token()) {
if (this.numberToken()) {
return null;
}
if (this.heredoc_token()) {
if (this.heredocToken()) {
return null;
}
if (this.regex_token()) {
if (this.regexToken()) {
return null;
}
if (this.comment_token()) {
if (this.commentToken()) {
return null;
}
if (this.line_token()) {
if (this.lineToken()) {
return null;
}
if (this.whitespace_token()) {
if (this.whitespaceToken()) {
return null;
}
if (this.js_token()) {
if (this.jsToken()) {
return null;
}
if (this.string_token()) {
if (this.stringToken()) {
return null;
}
return this.literal_token();
return this.literalToken();
};
// Tokenizers
// ----------
// Language extensions get the highest priority, first chance to tag tokens
// as something else.
Lexer.prototype.extension_token = function() {
Lexer.prototype.extensionToken = function() {
var _d, _e, _f, extension;
_e = Lexer.extensions;
for (_d = 0, _f = _e.length; _d < _f; _d++) {
@ -127,29 +127,29 @@
// allowed in JavaScript, we're careful not to tag them as keywords when
// referenced as property names here, so you can still do `jQuery.is()` even
// though `is` means `===` otherwise.
Lexer.prototype.identifier_token = function() {
var forced_identifier, id, tag;
Lexer.prototype.identifierToken = function() {
var forcedIdentifier, id, tag;
if (!(id = this.match(IDENTIFIER, 1))) {
return false;
}
forced_identifier = this.tag_accessor() || this.match(ASSIGNED, 1);
forcedIdentifier = this.tagAccessor() || this.match(ASSIGNED, 1);
tag = 'IDENTIFIER';
if (include(JS_KEYWORDS, id) || (!forced_identifier && include(COFFEE_KEYWORDS, id))) {
if (include(JS_KEYWORDS, id) || (!forcedIdentifier && include(COFFEE_KEYWORDS, id))) {
tag = id.toUpperCase();
}
if (include(RESERVED, id)) {
this.identifier_error(id);
this.identifierError(id);
}
if (tag === 'WHEN' && include(LINE_BREAK, this.tag())) {
tag = 'LEADING_WHEN';
}
this.i += id.length;
if (!(forced_identifier)) {
if (!(forcedIdentifier)) {
if (include(COFFEE_ALIASES, id)) {
tag = (id = CONVERSIONS[id]);
}
if (this.prev() && this.prev()[0] === 'ASSIGN' && include(HALF_ASSIGNMENTS, tag)) {
return this.tag_half_assignment(tag);
return this.tagHalfAssignment(tag);
}
}
this.token(tag, id);
@ -157,7 +157,7 @@
};
// Matches numbers, including decimals, hex, and exponential notation.
// Be careful not to interfere with ranges-in-progress.
Lexer.prototype.number_token = function() {
Lexer.prototype.numberToken = function() {
var number;
if (!(number = this.match(NUMBER, 1))) {
return false;
@ -171,44 +171,44 @@
};
// Matches strings, including multi-line strings. Ensures that quotation marks
// are balanced within the string's contents, and within nested interpolations.
Lexer.prototype.string_token = function() {
Lexer.prototype.stringToken = function() {
var string;
if (!(starts(this.chunk, '"') || starts(this.chunk, "'"))) {
return false;
}
if (!(string = this.balanced_token(['"', '"'], ['${', '}']) || this.balanced_token(["'", "'"]))) {
if (!(string = this.balancedToken(['"', '"'], ['${', '}']) || this.balancedToken(["'", "'"]))) {
return false;
}
this.interpolate_string(string.replace(STRING_NEWLINES, " \\\n"));
this.interpolateString(string.replace(STRING_NEWLINES, " \\\n"));
this.line += count(string, "\n");
this.i += string.length;
return true;
};
// Matches heredocs, adjusting indentation to the correct level, as heredocs
// preserve whitespace, but ignore indentation to the left.
Lexer.prototype.heredoc_token = function() {
Lexer.prototype.heredocToken = function() {
var doc, match, quote;
if (!(match = this.chunk.match(HEREDOC))) {
return false;
}
quote = match[1].substr(0, 1);
doc = this.sanitize_heredoc(match[2] || match[4], {
doc = this.sanitizeHeredoc(match[2] || match[4], {
quote: quote
});
this.interpolate_string(("" + quote + doc + quote));
this.interpolateString(("" + quote + doc + quote));
this.line += count(match[1], "\n");
this.i += match[1].length;
return true;
};
// Matches and conumes comments. We pass through comments into JavaScript,
// so they're treated as real tokens, like any other part of the language.
Lexer.prototype.comment_token = function() {
Lexer.prototype.commentToken = function() {
var comment, i, lines, match;
if (!(match = this.chunk.match(COMMENT))) {
return false;
}
if (match[3]) {
comment = this.sanitize_heredoc(match[3], {
comment = this.sanitizeHeredoc(match[3], {
herecomment: true
});
this.token('HERECOMMENT', comment.split(MULTILINER));
@ -228,12 +228,12 @@
return true;
};
// Matches JavaScript interpolated directly into the source via backticks.
Lexer.prototype.js_token = function() {
Lexer.prototype.jsToken = function() {
var script;
if (!(starts(this.chunk, '`'))) {
return false;
}
if (!(script = this.balanced_token(['`', '`']))) {
if (!(script = this.balancedToken(['`', '`']))) {
return false;
}
this.token('JS', script.replace(JS_CLEANER, ''));
@ -242,9 +242,9 @@
};
// Matches regular expression literals. Lexing regular expressions is difficult
// to distinguish from division, so we borrow some basic heuristics from
// JavaScript and Ruby, borrow slash balancing from `@balanced_token`, and
// borrow interpolation from `@interpolate_string`.
Lexer.prototype.regex_token = function() {
// JavaScript and Ruby, borrow slash balancing from `@balancedToken`, and
// borrow interpolation from `@interpolateString`.
Lexer.prototype.regexToken = function() {
var end, flags, regex, str;
if (!(this.chunk.match(REGEX_START))) {
return false;
@ -252,7 +252,7 @@
if (include(NOT_REGEX, this.tag())) {
return false;
}
if (!(regex = this.balanced_token(['/', '/']))) {
if (!(regex = this.balancedToken(['/', '/']))) {
return false;
}
if (!(end = this.chunk.substr(regex.length).match(REGEX_END))) {
@ -267,7 +267,7 @@
return '\\' + escaped;
});
this.tokens = this.tokens.concat([['(', '('], ['NEW', 'new'], ['IDENTIFIER', 'RegExp'], ['CALL_START', '(']]);
this.interpolate_string(("\"" + str + "\""), true);
this.interpolateString(("\"" + str + "\""), true);
this.tokens = this.tokens.concat([[',', ','], ['STRING', ("\"" + flags + "\"")], [')', ')'], [')', ')']]);
} else {
this.token('REGEX', regex);
@ -277,11 +277,11 @@
};
// Matches a token in which which the passed delimiter pairs must be correctly
// balanced (ie. strings, JS literals).
Lexer.prototype.balanced_token = function() {
Lexer.prototype.balancedToken = function() {
var delimited;
var _d = arguments.length, _e = _d >= 1;
delimited = __slice.call(arguments, 0, _d - 0);
return balanced_string(this.chunk, delimited);
return balancedString(this.chunk, delimited);
};
// Matches newlines, indents, and outdents, and determines which is which.
// If we can detect that the current line is continued onto the the next line,
@ -291,8 +291,8 @@
// .map( ... )
// Keeps track of the level of indentation, because a single outdent token
// can close multiple indents, so we need to know how far in we happen to be.
Lexer.prototype.line_token = function() {
var diff, indent, next_character, no_newlines, prev, size;
Lexer.prototype.lineToken = function() {
var diff, indent, nextCharacter, noNewlines, prev, size;
if (!(indent = this.match(MULTI_DENT, 1))) {
return false;
}
@ -300,50 +300,50 @@
this.i += indent.length;
prev = this.prev(2);
size = indent.match(LAST_DENTS).reverse()[0].match(LAST_DENT)[1].length;
next_character = this.chunk.match(NEXT_CHARACTER)[1];
no_newlines = next_character === '.' || next_character === ',' || this.unfinished();
nextCharacter = this.chunk.match(NEXT_CHARACTER)[1];
noNewlines = nextCharacter === '.' || nextCharacter === ',' || this.unfinished();
if (size === this.indent) {
if (no_newlines) {
return this.suppress_newlines();
if (noNewlines) {
return this.suppressNewlines();
}
return this.newline_token(indent);
return this.newlineToken(indent);
} else if (size > this.indent) {
if (no_newlines) {
return this.suppress_newlines();
if (noNewlines) {
return this.suppressNewlines();
}
diff = size - this.indent;
this.token('INDENT', diff);
this.indents.push(diff);
} else {
this.outdent_token(this.indent - size, no_newlines);
this.outdentToken(this.indent - size, noNewlines);
}
this.indent = size;
return true;
};
// Record an outdent token or multiple tokens, if we happen to be moving back
// inwards past several recorded indents.
Lexer.prototype.outdent_token = function(move_out, no_newlines) {
var last_indent;
if (move_out > -this.outdebt) {
while (move_out > 0 && this.indents.length) {
last_indent = this.indents.pop();
this.token('OUTDENT', last_indent);
move_out -= last_indent;
Lexer.prototype.outdentToken = function(moveOut, noNewlines) {
var lastIndent;
if (moveOut > -this.outdebt) {
while (moveOut > 0 && this.indents.length) {
lastIndent = this.indents.pop();
this.token('OUTDENT', lastIndent);
moveOut -= lastIndent;
}
} else {
this.outdebt += move_out;
this.outdebt += moveOut;
}
if (!(no_newlines)) {
this.outdebt = move_out;
if (!(noNewlines)) {
this.outdebt = moveOut;
}
if (!(this.tag() === 'TERMINATOR' || no_newlines)) {
if (!(this.tag() === 'TERMINATOR' || noNewlines)) {
this.token('TERMINATOR', "\n");
}
return true;
};
// Matches and consumes non-meaningful whitespace. Tag the previous token
// as being "spaced", because there are some cases where it makes a difference.
Lexer.prototype.whitespace_token = function() {
Lexer.prototype.whitespaceToken = function() {
var prev, space;
if (!(space = this.match(WHITESPACE, 1))) {
return false;
@ -356,7 +356,7 @@
return true;
};
// Generate a newline token. Consecutive newlines get merged together.
Lexer.prototype.newline_token = function(newlines) {
Lexer.prototype.newlineToken = function(newlines) {
if (!(this.tag() === 'TERMINATOR')) {
this.token('TERMINATOR', "\n");
}
@ -364,7 +364,7 @@
};
// Use a `\` at a line-ending to suppress the newline.
// The slash is removed here once its job is done.
Lexer.prototype.suppress_newlines = function() {
Lexer.prototype.suppressNewlines = function() {
if (this.value() === "\\") {
this.tokens.pop();
}
@ -375,25 +375,25 @@
// the proper order of operations. There are some symbols that we tag specially
// here. `;` and newlines are both treated as a `TERMINATOR`, we distinguish
// parentheses that indicate a method call from regular parentheses, and so on.
Lexer.prototype.literal_token = function() {
var match, prev_spaced, space, tag, value;
Lexer.prototype.literalToken = function() {
var match, prevSpaced, space, tag, value;
match = this.chunk.match(OPERATOR);
value = match && match[1];
space = match && match[2];
if (value && value.match(CODE)) {
this.tag_parameters();
this.tagParameters();
}
value = value || this.chunk.substr(0, 1);
prev_spaced = this.prev() && this.prev().spaced;
prevSpaced = this.prev() && this.prev().spaced;
tag = value;
if (value.match(ASSIGNMENT)) {
tag = 'ASSIGN';
if (include(JS_FORBIDDEN, this.value)) {
this.assignment_error();
this.assignmentError();
}
} else if (value === ';') {
tag = 'TERMINATOR';
} else if (include(CALLABLE, this.tag()) && !prev_spaced) {
} else if (include(CALLABLE, this.tag()) && !prevSpaced) {
if (value === '(') {
tag = 'CALL_START';
} else if (value === '[') {
@ -407,8 +407,8 @@
}
}
this.i += value.length;
if (space && prev_spaced && this.prev()[0] === 'ASSIGN' && include(HALF_ASSIGNMENTS, tag)) {
return this.tag_half_assignment(tag);
if (space && prevSpaced && this.prev()[0] === 'ASSIGN' && include(HALF_ASSIGNMENTS, tag)) {
return this.tagHalfAssignment(tag);
}
this.token(tag, value);
return true;
@ -418,7 +418,7 @@
// As we consume a new `IDENTIFIER`, look at the previous token to determine
// if it's a special kind of accessor. Return `true` if any type of accessor
// is the previous token.
Lexer.prototype.tag_accessor = function() {
Lexer.prototype.tagAccessor = function() {
var prev;
if ((!(prev = this.prev())) || (prev && prev.spaced)) {
return false;
@ -438,7 +438,7 @@
};
// Sanitize a heredoc or herecomment by escaping internal double quotes and
// erasing all external indentation on the left-hand side.
Lexer.prototype.sanitize_heredoc = function(doc, options) {
Lexer.prototype.sanitizeHeredoc = function(doc, options) {
var _d, attempt, indent, match;
while (match = HEREDOC_INDENT.exec(doc)) {
attempt = (typeof (_d = match[2]) !== "undefined" && _d !== null) ? match[2] : match[3];
@ -453,7 +453,7 @@
return doc.replace(MULTILINER, "\\n").replace(new RegExp(options.quote, 'g'), ("\\" + options.quote));
};
// Tag a half assignment.
Lexer.prototype.tag_half_assignment = function(tag) {
Lexer.prototype.tagHalfAssignment = function(tag) {
var last;
last = this.tokens.pop();
this.tokens.push([("" + tag + "="), ("" + tag + "="), last[2]]);
@ -462,7 +462,7 @@
// A source of ambiguity in our grammar used to be parameter lists in function
// definitions versus argument lists in function calls. Walk backwards, tagging
// parameters specially in order to make things easier for the parser.
Lexer.prototype.tag_parameters = function() {
Lexer.prototype.tagParameters = function() {
var _d, i, tok;
if (this.tag() !== ')') {
return null;
@ -486,17 +486,17 @@
return true;
};
// Close up all remaining open blocks at the end of the file.
Lexer.prototype.close_indentation = function() {
return this.outdent_token(this.indent);
Lexer.prototype.closeIndentation = function() {
return this.outdentToken(this.indent);
};
// The error for when you try to use a forbidden word in JavaScript as
// an identifier.
Lexer.prototype.identifier_error = function(word) {
Lexer.prototype.identifierError = function(word) {
throw new Error(("SyntaxError: Reserved word \"" + word + "\" on line " + (this.line + 1)));
};
// The error for when you try to assign to a reserved word in JavaScript,
// like "function" or "default".
Lexer.prototype.assignment_error = function() {
Lexer.prototype.assignmentError = function() {
throw new Error(("SyntaxError: Reserved word \"" + (this.value()) + "\" on line " + (this.line + 1) + " can't be assigned"));
};
// Expand variables and expressions inside double-quoted strings using
@ -507,7 +507,7 @@
// If it encounters an interpolation, this method will recursively create a
// new Lexer, tokenize the interpolated contents, and merge them into the
// token stream.
Lexer.prototype.interpolate_string = function(str, escape_quotes) {
Lexer.prototype.interpolateString = function(str, escapeQuotes) {
var _d, _e, _f, _g, _h, _i, _j, escaped, expr, group, i, idx, inner, interp, interpolated, lexer, match, nested, pi, quote, tag, tok, token, tokens, value;
if (str.length < 3 || !starts(str, '"')) {
return this.token('STRING', str);
@ -534,7 +534,7 @@
tokens.push(['IDENTIFIER', interp]);
i += group.length - 1;
pi = i + 1;
} else if ((expr = balanced_string(str.substring(i), [['${', '}']]))) {
} else if ((expr = balancedString(str.substring(i), [['${', '}']]))) {
if (pi < i) {
tokens.push(['STRING', ("" + quote + (str.substring(pi, i)) + quote)]);
}
@ -576,7 +576,7 @@
value = _j[1];
if (tag === 'TOKENS') {
this.tokens = this.tokens.concat(value);
} else if (tag === 'STRING' && escape_quotes) {
} else if (tag === 'STRING' && escapeQuotes) {
escaped = value.substring(1, value.length - 1).replace(/"/g, '\\"');
this.token(tag, ("\"" + escaped + "\""));
} else {
@ -599,13 +599,13 @@
return this.tokens.push([tag, value, this.line]);
};
// Peek at a tag in the current token stream.
Lexer.prototype.tag = function(index, new_tag) {
Lexer.prototype.tag = function(index, newTag) {
var tok;
if (!(tok = this.prev(index))) {
return null;
}
if ((typeof new_tag !== "undefined" && new_tag !== null)) {
tok[0] = new_tag;
if ((typeof newTag !== "undefined" && newTag !== null)) {
tok[0] = newTag;
return tok[0];
}
return tok[0];

File diff suppressed because it is too large Load diff

View file

@ -1,13 +1,13 @@
(function(){
var LONG_FLAG, MULTI_FLAG, OPTIONAL, OptionParser, SHORT_FLAG, build_rule, build_rules, normalize_arguments;
var LONG_FLAG, MULTI_FLAG, OPTIONAL, OptionParser, SHORT_FLAG, buildRule, buildRules, normalizeArguments;
// A simple **OptionParser** class to parse option flags from the command-line.
// Use it like so:
// parser: new OptionParser switches, help_banner
// parser: new OptionParser switches, helpBanner
// options: parser.parse process.argv
exports.OptionParser = (function() {
OptionParser = function(rules, banner) {
this.banner = banner;
this.rules = build_rules(rules);
this.rules = buildRules(rules);
return this;
};
// Initialize with a list of valid options, in the form:
@ -19,27 +19,27 @@
// many option parsers that allow you to attach callback actions for every
// flag. Instead, you're responsible for interpreting the options object.
OptionParser.prototype.parse = function(args) {
var _a, _b, _c, arg, is_option, matched_rule, options, rule;
var _a, _b, _c, arg, isOption, matchedRule, options, rule;
options = {
arguments: []
};
args = normalize_arguments(args);
args = normalizeArguments(args);
while ((arg = args.shift())) {
is_option = !!(arg.match(LONG_FLAG) || arg.match(SHORT_FLAG));
matched_rule = false;
isOption = !!(arg.match(LONG_FLAG) || arg.match(SHORT_FLAG));
matchedRule = false;
_b = this.rules;
for (_a = 0, _c = _b.length; _a < _c; _a++) {
rule = _b[_a];
if (rule.short_flag === arg || rule.long_flag === arg) {
options[rule.name] = rule.has_argument ? args.shift() : true;
matched_rule = true;
if (rule.shortFlag === arg || rule.longFlag === arg) {
options[rule.name] = rule.hasArgument ? args.shift() : true;
matchedRule = true;
break;
}
}
if (is_option && !matched_rule) {
if (isOption && !matchedRule) {
throw new Error(("unrecognized option: " + arg));
}
if (!(is_option)) {
if (!(isOption)) {
options.arguments.push(arg);
}
}
@ -48,7 +48,7 @@
// Return the help text for this **OptionParser**, listing and describing all
// of the valid options, for `--help` and such.
OptionParser.prototype.help = function() {
var _a, _b, _c, _d, i, let_part, lines, rule, spaces;
var _a, _b, _c, _d, i, letPart, lines, rule, spaces;
lines = ['Available options:'];
if (this.banner) {
lines.unshift(("" + this.banner + "\n"));
@ -56,7 +56,7 @@
_b = this.rules;
for (_a = 0, _c = _b.length; _a < _c; _a++) {
rule = _b[_a];
spaces = 15 - rule.long_flag.length;
spaces = 15 - rule.longFlag.length;
spaces = spaces > 0 ? (function() {
_d = [];
for (i = 0; i <= spaces; i += 1) {
@ -64,8 +64,8 @@
}
return _d;
})().join('') : '';
let_part = rule.short_flag ? rule.short_flag + ', ' : ' ';
lines.push((" " + let_part + rule.long_flag + spaces + rule.description));
letPart = rule.shortFlag ? rule.shortFlag + ', ' : ' ';
lines.push((" " + letPart + rule.longFlag + spaces + rule.description));
}
return "\n" + (lines.join('\n')) + "\n";
};
@ -80,7 +80,7 @@
OPTIONAL = /\[(.+)\]/;
// Build and return the list of option rules. If the optional *short-flag* is
// unspecified, leave it out by padding with `null`.
build_rules = function(rules) {
buildRules = function(rules) {
var _a, _b, _c, _d, tuple;
_a = []; _c = rules;
for (_b = 0, _d = _c.length; _b < _d; _b++) {
@ -89,28 +89,28 @@
if (tuple.length < 3) {
tuple.unshift(null);
}
return build_rule.apply(this, tuple);
return buildRule.apply(this, tuple);
})());
}
return _a;
};
// Build a rule from a `-o` short flag, a `--output [DIR]` long flag, and the
// description of what the option does.
build_rule = function(short_flag, long_flag, description) {
buildRule = function(shortFlag, longFlag, description) {
var match;
match = long_flag.match(OPTIONAL);
long_flag = long_flag.match(LONG_FLAG)[1];
match = longFlag.match(OPTIONAL);
longFlag = longFlag.match(LONG_FLAG)[1];
return {
name: long_flag.substr(2),
short_flag: short_flag,
long_flag: long_flag,
name: longFlag.substr(2),
shortFlag: shortFlag,
longFlag: longFlag,
description: description,
has_argument: !!(match && match[1])
hasArgument: !!(match && match[1])
};
};
// Normalize arguments by expanding merged flags into multiple flags. This allows
// you to have `-wl` be the same as `--watch --lint`.
normalize_arguments = function(args) {
normalizeArguments = function(args) {
var _a, _b, _c, _d, _e, _f, arg, l, match, result;
args = args.slice(0);
result = [];

View file

@ -184,7 +184,7 @@ break;
case 87:this.$ = new IndexNode($$[$0-3+2-1]);
break;
case 88:this.$ = (function () {
$$[$0-2+2-1].soak_node = true;
$$[$0-2+2-1].soakNode = true;
return $$[$0-2+2-1];
}());
break;
@ -225,7 +225,7 @@ case 104:this.$ = $$[$0-3+1-1].concat($$[$0-3+3-1]);
break;
case 105:this.$ = $$[$0-1+1-1];
break;
case 106:this.$ = $$[$0-2+2-1].new_instance();
case 106:this.$ = $$[$0-2+2-1].newInstance();
break;
case 107:this.$ = $$[$0-1+1-1];
break;
@ -304,17 +304,17 @@ case 138:this.$ = new WhileNode($$[$0-4+2-1], {
guard: $$[$0-4+4-1]
});
break;
case 139:this.$ = $$[$0-2+1-1].add_body($$[$0-2+2-1]);
case 139:this.$ = $$[$0-2+1-1].addBody($$[$0-2+2-1]);
break;
case 140:this.$ = $$[$0-2+2-1].add_body(Expressions.wrap([$$[$0-2+1-1]]));
case 140:this.$ = $$[$0-2+2-1].addBody(Expressions.wrap([$$[$0-2+1-1]]));
break;
case 141:this.$ = $$[$0-2+2-1].add_body(Expressions.wrap([$$[$0-2+1-1]]));
case 141:this.$ = $$[$0-2+2-1].addBody(Expressions.wrap([$$[$0-2+1-1]]));
break;
case 142:this.$ = $$[$0-1+1-1];
break;
case 143:this.$ = new WhileNode(new LiteralNode('true')).add_body($$[$0-2+2-1]);
case 143:this.$ = new WhileNode(new LiteralNode('true')).addBody($$[$0-2+2-1]);
break;
case 144:this.$ = new WhileNode(new LiteralNode('true')).add_body(Expressions.wrap([$$[$0-2+2-1]]));
case 144:this.$ = new WhileNode(new LiteralNode('true')).addBody(Expressions.wrap([$$[$0-2+2-1]]));
break;
case 145:this.$ = new ForNode($$[$0-4+1-1], $$[$0-4+4-1], $$[$0-4+3-1][0], $$[$0-4+3-1][1]);
break;
@ -369,17 +369,17 @@ case 159:this.$ = {
guard: $$[$0-6+6-1]
};
break;
case 160:this.$ = $$[$0-5+4-1].switches_over($$[$0-5+2-1]);
case 160:this.$ = $$[$0-5+4-1].switchesOver($$[$0-5+2-1]);
break;
case 161:this.$ = $$[$0-7+4-1].switches_over($$[$0-7+2-1]).add_else($$[$0-7+6-1], true);
case 161:this.$ = $$[$0-7+4-1].switchesOver($$[$0-7+2-1]).addElse($$[$0-7+6-1], true);
break;
case 162:this.$ = $$[$0-4+3-1];
break;
case 163:this.$ = $$[$0-6+3-1].add_else($$[$0-6+5-1], true);
case 163:this.$ = $$[$0-6+3-1].addElse($$[$0-6+5-1], true);
break;
case 164:this.$ = $$[$0-1+1-1];
break;
case 165:this.$ = $$[$0-2+1-1].add_else($$[$0-2+2-1]);
case 165:this.$ = $$[$0-2+1-1].addElse($$[$0-2+2-1]);
break;
case 166:this.$ = new IfNode($$[$0-3+2-1], $$[$0-3+3-1], {
statement: true
@ -400,11 +400,11 @@ case 170:this.$ = new IfNode($$[$0-3+2-1], $$[$0-3+3-1], {
invert: true
});
break;
case 171:this.$ = $$[$0-5+1-1].add_else((new IfNode($$[$0-5+4-1], $$[$0-5+5-1])).force_statement());
case 171:this.$ = $$[$0-5+1-1].addElse((new IfNode($$[$0-5+4-1], $$[$0-5+5-1])).forceStatement());
break;
case 172:this.$ = $$[$0-1+1-1];
break;
case 173:this.$ = $$[$0-3+1-1].add_else($$[$0-3+3-1]);
case 173:this.$ = $$[$0-3+1-1].addElse($$[$0-3+3-1]);
break;
case 174:this.$ = $$[$0-1+1-1];
break;

View file

@ -23,7 +23,7 @@
var val;
try {
val = CoffeeScript.run(buffer.toString(), {
no_wrap: true,
noWrap: true,
globals: true,
source: 'repl'
});

View file

@ -29,14 +29,14 @@
// corrected before implicit parentheses can be wrapped around blocks of code.
Rewriter.prototype.rewrite = function(tokens) {
this.tokens = tokens;
this.adjust_comments();
this.remove_leading_newlines();
this.remove_mid_expression_newlines();
this.close_open_calls_and_indexes();
this.add_implicit_indentation();
this.add_implicit_parentheses();
this.ensure_balance(BALANCED_PAIRS);
this.rewrite_closing_parens();
this.adjustComments();
this.removeLeadingNewlines();
this.removeMidExpressionNewlines();
this.closeOpenCallsAndIndexes();
this.addImplicitIndentation();
this.addImplicitParentheses();
this.ensureBalance(BALANCED_PAIRS);
this.rewriteClosingParens();
return this.tokens;
};
// Rewrite the token stream, looking one token ahead and behind.
@ -44,7 +44,7 @@
// forwards (or backwards) in the stream, to make sure we don't miss anything
// as tokens are inserted and removed, and the stream changes length under
// our feet.
Rewriter.prototype.scan_tokens = function(block) {
Rewriter.prototype.scanTokens = function(block) {
var i, move;
i = 0;
while (true) {
@ -58,8 +58,8 @@
};
// Massage newlines and indentations so that comments don't have to be
// correctly indented, or appear on a line of their own.
Rewriter.prototype.adjust_comments = function() {
return this.scan_tokens((function(__this) {
Rewriter.prototype.adjustComments = function() {
return this.scanTokens((function(__this) {
var __func = function(prev, token, post, i) {
var _c, after, before;
if (!(include(COMMENTS, token[0]))) {
@ -86,7 +86,7 @@
};
// Leading newlines would introduce an ambiguity in the grammar, so we
// dispatch them here.
Rewriter.prototype.remove_leading_newlines = function() {
Rewriter.prototype.removeLeadingNewlines = function() {
var _c;
_c = [];
while (this.tokens[0] && this.tokens[0][0] === 'TERMINATOR') {
@ -96,8 +96,8 @@
};
// Some blocks occur in the middle of expressions -- when we're expecting
// this, remove their trailing newlines.
Rewriter.prototype.remove_mid_expression_newlines = function() {
return this.scan_tokens((function(__this) {
Rewriter.prototype.removeMidExpressionNewlines = function() {
return this.scanTokens((function(__this) {
var __func = function(prev, token, post, i) {
if (!(post && include(EXPRESSION_CLOSE, post[0]) && token[0] === 'TERMINATOR')) {
return 1;
@ -113,11 +113,11 @@
// The lexer has tagged the opening parenthesis of a method call, and the
// opening bracket of an indexing operation. Match them with their paired
// close.
Rewriter.prototype.close_open_calls_and_indexes = function() {
Rewriter.prototype.closeOpenCallsAndIndexes = function() {
var brackets, parens;
parens = [0];
brackets = [0];
return this.scan_tokens((function(__this) {
return this.scanTokens((function(__this) {
var __func = function(prev, token, post, i) {
var _c;
if ((_c = token[0]) === 'CALL_START') {
@ -153,10 +153,10 @@
// Methods may be optionally called without parentheses, for simple cases.
// Insert the implicit parentheses here, so that the parser doesn't have to
// deal with them.
Rewriter.prototype.add_implicit_parentheses = function() {
var close_calls, stack;
Rewriter.prototype.addImplicitParentheses = function() {
var closeCalls, stack;
stack = [0];
close_calls = (function(__this) {
closeCalls = (function(__this) {
var __func = function(i) {
var _c, size, tmp;
(_c = stack[stack.length - 1]);
@ -172,7 +172,7 @@
return __func.apply(__this, arguments);
});
})(this);
return this.scan_tokens((function(__this) {
return this.scanTokens((function(__this) {
var __func = function(prev, token, post, i) {
var _c, j, nx, open, size, tag;
tag = token[0];
@ -190,7 +190,7 @@
}
if (include(EXPRESSION_START, tag)) {
if (tag === 'INDENT' && !token.generated && open && !(prev && include(IMPLICIT_BLOCK, prev[0]))) {
size = close_calls(i);
size = closeCalls(i);
stack.push(0);
return size;
}
@ -207,7 +207,7 @@
this.tokens.splice(i, 1);
}
} else {
size = close_calls(i);
size = closeCalls(i);
if (tag !== 'OUTDENT' && include(EXPRESSION_END, tag)) {
stack.pop();
}
@ -229,8 +229,8 @@
// expressions that lack ending delimiters. The **Rewriter** adds the implicit
// blocks, so it doesn't need to. ')' can close a single-line block,
// but we need to make sure it's balanced.
Rewriter.prototype.add_implicit_indentation = function() {
return this.scan_tokens((function(__this) {
Rewriter.prototype.addImplicitIndentation = function() {
return this.scanTokens((function(__this) {
var __func = function(prev, token, post, i) {
var idx, indent, insertion, outdent, parens, pre, starter, tok;
if (!(include(SINGLE_LINERS, token[0]) && post[0] !== 'INDENT' && !(token[0] === 'ELSE' && post[0] === 'IF'))) {
@ -273,11 +273,11 @@
};
// Ensure that all listed pairs of tokens are correctly balanced throughout
// the course of the token stream.
Rewriter.prototype.ensure_balance = function(pairs) {
var _c, _d, key, levels, line, open, open_line, unclosed, value;
Rewriter.prototype.ensureBalance = function(pairs) {
var _c, _d, key, levels, line, open, openLine, unclosed, value;
levels = {};
open_line = {};
this.scan_tokens((function(__this) {
openLine = {};
this.scanTokens((function(__this) {
var __func = function(prev, token, post, i) {
var _c, _d, _e, _f, close, open, pair;
_d = pairs;
@ -289,7 +289,7 @@
levels[open] = levels[open] || 0;
if (token[0] === open) {
if (levels[open] === 0) {
open_line[open] = token[2];
openLine[open] = token[2];
}
levels[open] += 1;
}
@ -316,7 +316,7 @@
})();
if (unclosed.length) {
open = unclosed[0];
line = open_line[open] + 1;
line = openLine[open] + 1;
throw new Error(("unclosed " + open + " on line " + line));
}
};
@ -333,7 +333,7 @@
// up balanced in the end.
// 4. Be careful not to alter array or parentheses delimiters with overzealous
// rewriting.
Rewriter.prototype.rewrite_closing_parens = function() {
Rewriter.prototype.rewriteClosingParens = function() {
var _c, debt, key, stack, val;
stack = [];
debt = {};
@ -342,7 +342,7 @@
val = _c[key];
(debt[key] = 0);
}}
return this.scan_tokens((function(__this) {
return this.scanTokens((function(__this) {
var __func = function(prev, token, post, i) {
var inv, match, mtag, oppos, tag;
tag = token[0];

View file

@ -20,10 +20,10 @@
this.method = _a[2];
this.variables = {};
if (this.parent) {
this.temp_var = this.parent.temp_var;
this.tempVar = this.parent.tempVar;
} else {
Scope.root = this;
this.temp_var = '_a';
this.tempVar = '_a';
}
return this;
};
@ -69,14 +69,14 @@
};
// If we need to store an intermediate result, find an available name for a
// compiler-generated variable. `_a`, `_b`, and so on...
Scope.prototype.free_variable = function() {
Scope.prototype.freeVariable = function() {
var ordinal;
while (this.check(this.temp_var)) {
ordinal = 1 + parseInt(this.temp_var.substr(1), 36);
this.temp_var = '_' + ordinal.toString(36).replace(/\d/g, 'a');
while (this.check(this.tempVar)) {
ordinal = 1 + parseInt(this.tempVar.substr(1), 36);
this.tempVar = '_' + ordinal.toString(36).replace(/\d/g, 'a');
}
this.variables[this.temp_var] = 'var';
return this.temp_var;
this.variables[this.tempVar] = 'var';
return this.tempVar;
};
// Ensure that an assignment is made at the top of this scope
// (or at the top-level scope, if requested).
@ -89,20 +89,20 @@
};
// Does this scope reference any variables that need to be declared in the
// given function body?
Scope.prototype.has_declarations = function(body) {
Scope.prototype.hasDeclarations = function(body) {
return body === this.expressions && this.any(function(k, val) {
return val === 'var';
});
};
// Does this scope reference any assignments that need to be declared at the
// top of the given function body?
Scope.prototype.has_assignments = function(body) {
Scope.prototype.hasAssignments = function(body) {
return body === this.expressions && this.any(function(k, val) {
return val.assigned;
});
};
// Return the list of variables first declared in this scope.
Scope.prototype.declared_variables = function() {
Scope.prototype.declaredVariables = function() {
var _a, _b, key, val;
return (function() {
_a = []; _b = this.variables;
@ -115,7 +115,7 @@
};
// Return the list of assignments that are supposed to be made at the top
// of this scope.
Scope.prototype.assigned_variables = function() {
Scope.prototype.assignedVariables = function() {
var _a, _b, key, val;
_a = []; _b = this.variables;
for (key in _b) { if (__hasProp.call(_b, key)) {
@ -125,12 +125,12 @@
return _a;
};
// Compile the JavaScript for all of the variable declarations in this scope.
Scope.prototype.compiled_declarations = function() {
return this.declared_variables().join(', ');
Scope.prototype.compiledDeclarations = function() {
return this.declaredVariables().join(', ');
};
// Compile the JavaScript for all of the variable assignments in this scope.
Scope.prototype.compiled_assignments = function() {
return this.assigned_variables().join(', ');
Scope.prototype.compiledAssignments = function() {
return this.assignedVariables().join(', ');
};
return Scope;
}).call(this);

View file

@ -36,7 +36,7 @@ helpers.extend global, {
# Invoke another task in the current Cakefile.
invoke: (name) ->
no_such_task name unless tasks[name]
missingTask name unless tasks[name]
tasks[name].action options
}
@ -50,12 +50,12 @@ exports.run: ->
args: process.argv[2...process.argv.length]
CoffeeScript.run fs.readFileSync('Cakefile').toString(), {source: 'Cakefile'}
oparse: new optparse.OptionParser switches
return print_tasks() unless args.length
return printTasks() unless args.length
options: oparse.parse(args)
invoke arg for arg in options.arguments
# Display the list of Cake tasks in a format similar to `rake -T`
print_tasks: ->
printTasks: ->
puts ''
for name, task of tasks
spaces: 20 - name.length
@ -65,6 +65,6 @@ print_tasks: ->
puts oparse.help() if switches.length
# Print an error and exit when attempting to all an undefined task.
no_such_task: (task) ->
missingTask: (task) ->
puts "No such task: \"$task\""
process.exit 1

View file

@ -83,10 +83,10 @@ parser.lexer: {
# on page load. Unfortunately, the text contents of remote scripts cannot be
# accessed from the browser, so only inline script tags will work.
if document? and document.getElementsByTagName
process_scripts: ->
processScripts: ->
for tag in document.getElementsByTagName('script') when tag.type is 'text/coffeescript'
eval exports.compile tag.innerHTML
if window.addEventListener
window.addEventListener 'load', process_scripts, false
window.addEventListener 'load', processScripts, false
else if window.attachEvent
window.attachEvent 'onload', process_scripts
window.attachEvent 'onload', processScripts

View file

@ -39,18 +39,18 @@ SWITCHES: [
# Top-level objects shared by all the functions.
options: {}
sources: []
option_parser: null
optionParser: null
# Run `coffee` by parsing passed options and determining what action to take.
# Many flags cause us to divert before compiling anything. Flags passed after
# `--` will be passed verbatim to your script as arguments in `process.argv`
exports.run: ->
parse_options()
parseOptions()
return usage() if options.help
return version() if options.version
return require './repl' if options.interactive
return compile_stdio() if options.stdio
return compile_script 'console', sources[0] if options.eval
return compileStdio() if options.stdio
return compileScript 'console', sources[0] if options.eval
return require './repl' unless sources.length
separator: sources.indexOf '--'
flags: []
@ -58,15 +58,15 @@ exports.run: ->
flags: sources[(separator + 1)...sources.length]
sources: sources[0...separator]
process.ARGV: process.argv: flags
compile_scripts()
compileScripts()
# Asynchronously read in each CoffeeScript in a list of source files and
# compile them. If a directory is passed, recursively compile all
# '.coffee' extension source files in it and all subdirectories.
compile_scripts: ->
compileScripts: ->
for source in sources
base: source
compile: (source, top_level) ->
compile: (source, topLevel) ->
path.exists source, (exists) ->
throw new Error "File not found: $source" unless exists
fs.stat source, (err, stats) ->
@ -74,25 +74,25 @@ compile_scripts: ->
fs.readdir source, (err, files) ->
for file in files
compile path.join(source, file)
else if top_level or path.extname(source) is '.coffee'
fs.readFile source, (err, code) -> compile_script(source, code.toString(), base)
else if topLevel or path.extname(source) is '.coffee'
fs.readFile source, (err, code) -> compileScript(source, code.toString(), base)
watch source, base if options.watch
compile source, true
# Compile a single source script, containing the given code, according to the
# requested options. If evaluating the script directly sets `__filename`,
# `__dirname` and `module.filename` to be correct relative to the script's path.
compile_script: (source, code, base) ->
compileScript: (source, code, base) ->
o: options
code_opts: compile_options source
codeOpts: compileOptions source
try
if o.tokens then print_tokens CoffeeScript.tokens code
if o.tokens then printTokens CoffeeScript.tokens code
else if o.nodes then puts CoffeeScript.nodes(code).toString()
else if o.run then CoffeeScript.run code, code_opts
else if o.run then CoffeeScript.run code, codeOpts
else
js: CoffeeScript.compile code, code_opts
js: CoffeeScript.compile code, codeOpts
if o.print then print js
else if o.compile then write_js source, js, base
else if o.compile then writeJs source, js, base
else if o.lint then lint js
catch err
error(err.stack) and process.exit 1 unless o.watch
@ -100,13 +100,13 @@ compile_script: (source, code, base) ->
# Attach the appropriate listeners to compile scripts incoming over **stdin**,
# and write them back to **stdout**.
compile_stdio: ->
compileStdio: ->
code: ''
stdin: process.openStdin()
stdin.addListener 'data', (buffer) ->
code: + buffer.toString() if buffer
stdin.addListener 'end', ->
compile_script 'stdio', code
compileScript 'stdio', code
# Watch a source CoffeeScript file using `fs.watchFile`, recompiling it every
# time the file is updated. May be used in combination with other options,
@ -115,33 +115,33 @@ watch: (source, base) ->
fs.watchFile source, {persistent: true, interval: 500}, (curr, prev) ->
return if curr.mtime.getTime() is prev.mtime.getTime()
puts "Compiled $source" if options.compile
fs.readFile source, (err, code) -> compile_script(source, code.toString(), base)
fs.readFile source, (err, code) -> compileScript(source, code.toString(), base)
# Write out a JavaScript source file with the compiled code. By default, files
# are written out in `cwd` as `.js` files with the same name, but the output
# directory can be customized with `--output`.
write_js: (source, js, base) ->
writeJs: (source, js, base) ->
filename: path.basename(source, path.extname(source)) + '.js'
src_dir: path.dirname source
base_dir: src_dir.substring base.length
dir: if options.output then path.join options.output, base_dir else src_dir
js_path: path.join dir, filename
compile: -> fs.writeFile js_path, js
srcDir: path.dirname source
baseDir: srcDir.substring base.length
dir: if options.output then path.join options.output, baseDir else srcDir
jsPath: path.join dir, filename
compile: -> fs.writeFile jsPath, js
path.exists dir, (exists) ->
if exists then compile() else exec "mkdir -p $dir", compile
# Pipe compiled JS through JSLint (requires a working `jsl` command), printing
# any errors or warnings that arise.
lint: (js) ->
print_it: (buffer) -> print buffer.toString()
printIt: (buffer) -> print buffer.toString()
jsl: spawn 'jsl', ['-nologo', '-stdin']
jsl.stdout.addListener 'data', print_it
jsl.stderr.addListener 'data', print_it
jsl.stdout.addListener 'data', printIt
jsl.stderr.addListener 'data', printIt
jsl.stdin.write js
jsl.stdin.end()
# Pretty-print a stream of tokens.
print_tokens: (tokens) ->
printTokens: (tokens) ->
strings: for token in tokens
[tag, value]: [token[0], token[1].toString().replace(/\n/, '\\n')]
"[$tag $value]"
@ -149,22 +149,22 @@ print_tokens: (tokens) ->
# Use the [OptionParser module](optparse.html) to extract all options from
# `process.argv` that are specified in `SWITCHES`.
parse_options: ->
option_parser: new optparse.OptionParser SWITCHES, BANNER
o: options: option_parser.parse(process.argv)
parseOptions: ->
optionParser: new optparse.OptionParser SWITCHES, BANNER
o: options: optionParser.parse(process.argv)
options.run: not (o.compile or o.print or o.lint)
options.print: !! (o.print or (o.eval or o.stdio and o.compile))
sources: options.arguments[2...options.arguments.length]
# The compile-time options to pass to the CoffeeScript compiler.
compile_options: (source) ->
compileOptions: (source) ->
o: {source: source}
o['no_wrap']: options['no-wrap']
o
# Print the `--help` usage message and exit.
usage: ->
puts option_parser.help()
puts optionParser.help()
process.exit 0
# Print the `--version` message and exit.

View file

@ -30,10 +30,10 @@ unwrap: /function\s*\(\)\s*\{\s*return\s*([\s\S]*);\s*\}/
# we pass the pattern-defining string, the action to run, and extra options,
# optionally. If no action is specified, we simply pass the value of the
# previous nonterminal.
o: (pattern_string, action, options) ->
return [pattern_string, '$$ = $1;', options] unless action
o: (patternString, action, options) ->
return [patternString, '$$ = $1;', options] unless action
action: if match: (action + '').match(unwrap) then match[1] else "($action())"
[pattern_string, "$$ = $action;", options]
[patternString, "$$ = $action;", options]
# Grammatical Rules
# -----------------
@ -252,7 +252,7 @@ grammar: {
# Indexing into an object or array using bracket notation.
Index: [
o "INDEX_START Expression INDEX_END", -> new IndexNode $2
o "INDEX_SOAK Index", -> $2.soak_node: yes; $2
o "INDEX_SOAK Index", -> $2.soakNode: yes; $2
o "INDEX_PROTO Index", -> $2.proto: yes; $2
]
@ -297,7 +297,7 @@ grammar: {
# and calling `super()`
Call: [
o "Invocation"
o "NEW Invocation", -> $2.new_instance()
o "NEW Invocation", -> $2.newInstance()
o "Super"
]
@ -413,15 +413,15 @@ grammar: {
# The while loop can either be normal, with a block of expressions to execute,
# or postfix, with a single expression. There is no do..while.
While: [
o "WhileSource Block", -> $1.add_body $2
o "Statement WhileSource", -> $2.add_body Expressions.wrap [$1]
o "Expression WhileSource", -> $2.add_body Expressions.wrap [$1]
o "WhileSource Block", -> $1.addBody $2
o "Statement WhileSource", -> $2.addBody Expressions.wrap [$1]
o "Expression WhileSource", -> $2.addBody Expressions.wrap [$1]
o "Loop", -> $1
]
Loop: [
o "LOOP Block", -> new WhileNode(new LiteralNode 'true').add_body $2
o "LOOP Expression", -> new WhileNode(new LiteralNode 'true').add_body Expressions.wrap [$2]
o "LOOP Block", -> new WhileNode(new LiteralNode 'true').addBody $2
o "LOOP Expression", -> new WhileNode(new LiteralNode 'true').addBody Expressions.wrap [$2]
]
# Array, object, and range comprehensions, at the most generic level.
@ -465,17 +465,17 @@ grammar: {
# The CoffeeScript switch/when/else block replaces the JavaScript
# switch/case/default by compiling into an if-else chain.
Switch: [
o "SWITCH Expression INDENT Whens OUTDENT", -> $4.switches_over $2
o "SWITCH Expression INDENT Whens ELSE Block OUTDENT", -> $4.switches_over($2).add_else $6, true
o "SWITCH Expression INDENT Whens OUTDENT", -> $4.switchesOver $2
o "SWITCH Expression INDENT Whens ELSE Block OUTDENT", -> $4.switchesOver($2).addElse $6, true
o "SWITCH INDENT Whens OUTDENT", -> $3
o "SWITCH INDENT Whens ELSE Block OUTDENT", -> $3.add_else $5, true
o "SWITCH INDENT Whens ELSE Block OUTDENT", -> $3.addElse $5, true
]
# The inner list of whens is left recursive. At code-generation time, the
# IfNode will rewrite them into a proper chain.
Whens: [
o "When"
o "Whens When", -> $1.add_else $2
o "Whens When", -> $1.addElse $2
]
# An individual **When** clause, with action.
@ -491,13 +491,13 @@ grammar: {
IfStart: [
o "IF Expression Block", -> new IfNode $2, $3
o "UNLESS Expression Block", -> new IfNode $2, $3, {invert: true}
o "IfStart ELSE IF Expression Block", -> $1.add_else (new IfNode($4, $5)).force_statement()
o "IfStart ELSE IF Expression Block", -> $1.addElse (new IfNode($4, $5)).forceStatement()
]
# An **IfStart** can optionally be followed by an else block.
IfBlock: [
o "IfStart"
o "IfStart ELSE Block", -> $1.add_else $3
o "IfStart ELSE Block", -> $1.addElse $3
]
# The full complement of *if* expressions, including postfix one-liner

View file

@ -7,7 +7,7 @@ this.exports: this unless process?
helpers: exports.helpers: {}
# Cross-browser indexOf, so that IE can join the party.
helpers.index_of: index_of: (array, item, from) ->
helpers.indexOf: indexOf: (array, item, from) ->
return array.indexOf item, from if array.indexOf
for other, index in array
if other is item and (not from or (from <= index))
@ -16,7 +16,7 @@ helpers.index_of: index_of: (array, item, from) ->
# Does a list include a value?
helpers.include: include: (list, value) ->
index_of(list, value) >= 0
indexOf(list, value) >= 0
# Peek at the beginning of a given string to see if it matches a sequence.
helpers.starts: starts: (string, literal, start) ->
@ -28,10 +28,10 @@ helpers.compact: compact: (array) -> item for item in array when item
# Count the number of occurences of a character in a string.
helpers.count: count: (string, letter) ->
num: 0
pos: index_of string, letter
pos: indexOf string, letter
while pos isnt -1
num: + 1
pos: index_of string, letter, pos + 1
pos: indexOf string, letter, pos + 1
num
# Merge objects, returning a fresh copy with attributes from both sides.
@ -67,7 +67,7 @@ helpers.del: del: (obj, key) ->
# a series of delimiters, all of which must be nested correctly within the
# contents of the string. This method allows us to have strings within
# interpolations within strings, ad infinitum.
helpers.balanced_string: balanced_string: (str, delimited, options) ->
helpers.balancedString: balancedString: (str, delimited, options) ->
options: or {}
slash: delimited[0][0] is '/'
levels: []

View file

@ -3,7 +3,7 @@
# a token is produced, we consume the match, and start again. Tokens are in the
# form:
#
# [tag, value, line_number]
# [tag, value, lineNumber]
#
# Which is a format that can be fed directly into [Jison](http://github.com/zaach/jison).
@ -17,7 +17,7 @@ else
helpers: this.helpers
# Import the helpers we need.
{include, count, starts, compact, balanced_string}: helpers
{include, count, starts, compact, balancedString}: helpers
# The Lexer Class
# ---------------
@ -51,33 +51,33 @@ exports.Lexer: class Lexer
@tokens : [] # Stream of parsed tokens in the form ['TYPE', value, line]
while @i < @code.length
@chunk: @code.slice @i
@extract_next_token()
@close_indentation()
@extractNextToken()
@closeIndentation()
return @tokens if o.rewrite is off
(new Rewriter()).rewrite @tokens
# At every position, run through this list of attempted matches,
# short-circuiting if any of them succeed. Their order determines precedence:
# `@literal_token` is the fallback catch-all.
extract_next_token: ->
return if @extension_token()
return if @identifier_token()
return if @number_token()
return if @heredoc_token()
return if @regex_token()
return if @comment_token()
return if @line_token()
return if @whitespace_token()
return if @js_token()
return if @string_token()
return @literal_token()
# `@literalToken` is the fallback catch-all.
extractNextToken: ->
return if @extensionToken()
return if @identifierToken()
return if @numberToken()
return if @heredocToken()
return if @regexToken()
return if @commentToken()
return if @lineToken()
return if @whitespaceToken()
return if @jsToken()
return if @stringToken()
return @literalToken()
# Tokenizers
# ----------
# Language extensions get the highest priority, first chance to tag tokens
# as something else.
extension_token: ->
extensionToken: ->
for extension in Lexer.extensions
return true if extension.call this
false
@ -88,23 +88,23 @@ exports.Lexer: class Lexer
# allowed in JavaScript, we're careful not to tag them as keywords when
# referenced as property names here, so you can still do `jQuery.is()` even
# though `is` means `===` otherwise.
identifier_token: ->
identifierToken: ->
return false unless id: @match IDENTIFIER, 1
forced_identifier: @tag_accessor() or @match ASSIGNED, 1
forcedIdentifier: @tagAccessor() or @match ASSIGNED, 1
tag: 'IDENTIFIER'
tag: id.toUpperCase() if include(JS_KEYWORDS, id) or (not forced_identifier and include(COFFEE_KEYWORDS, id))
@identifier_error id if include RESERVED, id
tag: id.toUpperCase() if include(JS_KEYWORDS, id) or (not forcedIdentifier and include(COFFEE_KEYWORDS, id))
@identifierError id if include RESERVED, id
tag: 'LEADING_WHEN' if tag is 'WHEN' and include LINE_BREAK, @tag()
@i: + id.length
unless forced_identifier
unless forcedIdentifier
tag: id: CONVERSIONS[id] if include COFFEE_ALIASES, id
return @tag_half_assignment tag if @prev() and @prev()[0] is 'ASSIGN' and include HALF_ASSIGNMENTS, tag
return @tagHalfAssignment tag if @prev() and @prev()[0] is 'ASSIGN' and include HALF_ASSIGNMENTS, tag
@token tag, id
true
# Matches numbers, including decimals, hex, and exponential notation.
# Be careful not to interfere with ranges-in-progress.
number_token: ->
numberToken: ->
return false unless number: @match NUMBER, 1
return false if @tag() is '.' and starts number, '.'
@i: + number.length
@ -113,33 +113,33 @@ exports.Lexer: class Lexer
# Matches strings, including multi-line strings. Ensures that quotation marks
# are balanced within the string's contents, and within nested interpolations.
string_token: ->
stringToken: ->
return false unless starts(@chunk, '"') or starts(@chunk, "'")
return false unless string:
@balanced_token(['"', '"'], ['${', '}']) or
@balanced_token ["'", "'"]
@interpolate_string string.replace STRING_NEWLINES, " \\\n"
@balancedToken(['"', '"'], ['${', '}']) or
@balancedToken ["'", "'"]
@interpolateString string.replace STRING_NEWLINES, " \\\n"
@line: + count string, "\n"
@i: + string.length
true
# Matches heredocs, adjusting indentation to the correct level, as heredocs
# preserve whitespace, but ignore indentation to the left.
heredoc_token: ->
heredocToken: ->
return false unless match: @chunk.match(HEREDOC)
quote: match[1].substr 0, 1
doc: @sanitize_heredoc match[2] or match[4], {quote}
@interpolate_string "$quote$doc$quote"
doc: @sanitizeHeredoc match[2] or match[4], {quote}
@interpolateString "$quote$doc$quote"
@line: + count match[1], "\n"
@i: + match[1].length
true
# Matches and conumes comments. We pass through comments into JavaScript,
# so they're treated as real tokens, like any other part of the language.
comment_token: ->
commentToken: ->
return false unless match: @chunk.match(COMMENT)
if match[3]
comment: @sanitize_heredoc match[3], {herecomment: true}
comment: @sanitizeHeredoc match[3], {herecomment: true}
@token 'HERECOMMENT', comment.split MULTILINER
@token 'TERMINATOR', '\n'
else
@ -153,28 +153,28 @@ exports.Lexer: class Lexer
true
# Matches JavaScript interpolated directly into the source via backticks.
js_token: ->
jsToken: ->
return false unless starts @chunk, '`'
return false unless script: @balanced_token ['`', '`']
return false unless script: @balancedToken ['`', '`']
@token 'JS', script.replace JS_CLEANER, ''
@i: + script.length
true
# Matches regular expression literals. Lexing regular expressions is difficult
# to distinguish from division, so we borrow some basic heuristics from
# JavaScript and Ruby, borrow slash balancing from `@balanced_token`, and
# borrow interpolation from `@interpolate_string`.
regex_token: ->
# JavaScript and Ruby, borrow slash balancing from `@balancedToken`, and
# borrow interpolation from `@interpolateString`.
regexToken: ->
return false unless @chunk.match REGEX_START
return false if include NOT_REGEX, @tag()
return false unless regex: @balanced_token ['/', '/']
return false unless regex: @balancedToken ['/', '/']
return false unless end: @chunk.substr(regex.length).match REGEX_END
regex: + flags: end[2] if end[2]
if regex.match REGEX_INTERPOLATION
str: regex.substring(1).split('/')[0]
str: str.replace REGEX_ESCAPE, (escaped) -> '\\' + escaped
@tokens: @tokens.concat [['(', '('], ['NEW', 'new'], ['IDENTIFIER', 'RegExp'], ['CALL_START', '(']]
@interpolate_string "\"$str\"", yes
@interpolateString "\"$str\"", yes
@tokens: @tokens.concat [[',', ','], ['STRING', "\"$flags\""], [')', ')'], [')', ')']]
else
@token 'REGEX', regex
@ -183,8 +183,8 @@ exports.Lexer: class Lexer
# Matches a token in which which the passed delimiter pairs must be correctly
# balanced (ie. strings, JS literals).
balanced_token: (delimited...) ->
balanced_string @chunk, delimited
balancedToken: (delimited...) ->
balancedString @chunk, delimited
# Matches newlines, indents, and outdents, and determines which is which.
# If we can detect that the current line is continued onto the the next line,
@ -196,44 +196,44 @@ exports.Lexer: class Lexer
#
# Keeps track of the level of indentation, because a single outdent token
# can close multiple indents, so we need to know how far in we happen to be.
line_token: ->
lineToken: ->
return false unless indent: @match MULTI_DENT, 1
@line: + count indent, "\n"
@i : + indent.length
prev: @prev(2)
size: indent.match(LAST_DENTS).reverse()[0].match(LAST_DENT)[1].length
next_character: @chunk.match(NEXT_CHARACTER)[1]
no_newlines: next_character is '.' or next_character is ',' or @unfinished()
nextCharacter: @chunk.match(NEXT_CHARACTER)[1]
noNewlines: nextCharacter is '.' or nextCharacter is ',' or @unfinished()
if size is @indent
return @suppress_newlines() if no_newlines
return @newline_token indent
return @suppressNewlines() if noNewlines
return @newlineToken indent
else if size > @indent
return @suppress_newlines() if no_newlines
return @suppressNewlines() if noNewlines
diff: size - @indent
@token 'INDENT', diff
@indents.push diff
else
@outdent_token @indent - size, no_newlines
@outdentToken @indent - size, noNewlines
@indent: size
true
# Record an outdent token or multiple tokens, if we happen to be moving back
# inwards past several recorded indents.
outdent_token: (move_out, no_newlines) ->
if move_out > -@outdebt
while move_out > 0 and @indents.length
last_indent: @indents.pop()
@token 'OUTDENT', last_indent
move_out: - last_indent
outdentToken: (moveOut, noNewlines) ->
if moveOut > -@outdebt
while moveOut > 0 and @indents.length
lastIndent: @indents.pop()
@token 'OUTDENT', lastIndent
moveOut: - lastIndent
else
@outdebt: + move_out
@outdebt: move_out unless no_newlines
@token 'TERMINATOR', "\n" unless @tag() is 'TERMINATOR' or no_newlines
@outdebt: + moveOut
@outdebt: moveOut unless noNewlines
@token 'TERMINATOR', "\n" unless @tag() is 'TERMINATOR' or noNewlines
true
# Matches and consumes non-meaningful whitespace. Tag the previous token
# as being "spaced", because there are some cases where it makes a difference.
whitespace_token: ->
whitespaceToken: ->
return false unless space: @match WHITESPACE, 1
prev: @prev()
prev.spaced: true if prev
@ -241,13 +241,13 @@ exports.Lexer: class Lexer
true
# Generate a newline token. Consecutive newlines get merged together.
newline_token: (newlines) ->
newlineToken: (newlines) ->
@token 'TERMINATOR', "\n" unless @tag() is 'TERMINATOR'
true
# Use a `\` at a line-ending to suppress the newline.
# The slash is removed here once its job is done.
suppress_newlines: ->
suppressNewlines: ->
@tokens.pop() if @value() is "\\"
true
@ -256,20 +256,20 @@ exports.Lexer: class Lexer
# the proper order of operations. There are some symbols that we tag specially
# here. `;` and newlines are both treated as a `TERMINATOR`, we distinguish
# parentheses that indicate a method call from regular parentheses, and so on.
literal_token: ->
literalToken: ->
match: @chunk.match OPERATOR
value: match and match[1]
space: match and match[2]
@tag_parameters() if value and value.match CODE
@tagParameters() if value and value.match CODE
value: or @chunk.substr 0, 1
prev_spaced: @prev() and @prev().spaced
prevSpaced: @prev() and @prev().spaced
tag: value
if value.match ASSIGNMENT
tag: 'ASSIGN'
@assignment_error() if include JS_FORBIDDEN, @value
@assignmentError() if include JS_FORBIDDEN, @value
else if value is ';'
tag: 'TERMINATOR'
else if include(CALLABLE, @tag()) and not prev_spaced
else if include(CALLABLE, @tag()) and not prevSpaced
if value is '('
tag: 'CALL_START'
else if value is '['
@ -277,7 +277,7 @@ exports.Lexer: class Lexer
@tag 1, 'INDEX_SOAK' if @tag() is '?'
@tag 1, 'INDEX_PROTO' if @tag() is '::'
@i: + value.length
return @tag_half_assignment tag if space and prev_spaced and @prev()[0] is 'ASSIGN' and include HALF_ASSIGNMENTS, tag
return @tagHalfAssignment tag if space and prevSpaced and @prev()[0] is 'ASSIGN' and include HALF_ASSIGNMENTS, tag
@token tag, value
true
@ -287,7 +287,7 @@ exports.Lexer: class Lexer
# As we consume a new `IDENTIFIER`, look at the previous token to determine
# if it's a special kind of accessor. Return `true` if any type of accessor
# is the previous token.
tag_accessor: ->
tagAccessor: ->
return false if (not prev: @prev()) or (prev and prev.spaced)
if prev[1] is '::'
@tag 1, 'PROTOTYPE_ACCESS'
@ -302,7 +302,7 @@ exports.Lexer: class Lexer
# Sanitize a heredoc or herecomment by escaping internal double quotes and
# erasing all external indentation on the left-hand side.
sanitize_heredoc: (doc, options) ->
sanitizeHeredoc: (doc, options) ->
while match: HEREDOC_INDENT.exec doc
attempt: if match[2]? then match[2] else match[3]
indent: attempt if not indent or attempt.length < indent.length
@ -312,7 +312,7 @@ exports.Lexer: class Lexer
.replace(new RegExp(options.quote, 'g'), "\\$options.quote")
# Tag a half assignment.
tag_half_assignment: (tag) ->
tagHalfAssignment: (tag) ->
last: @tokens.pop()
@tokens.push ["$tag=", "$tag=", last[2]]
true
@ -320,7 +320,7 @@ exports.Lexer: class Lexer
# A source of ambiguity in our grammar used to be parameter lists in function
# definitions versus argument lists in function calls. Walk backwards, tagging
# parameters specially in order to make things easier for the parser.
tag_parameters: ->
tagParameters: ->
return if @tag() isnt ')'
i: 0
loop
@ -334,17 +334,17 @@ exports.Lexer: class Lexer
true
# Close up all remaining open blocks at the end of the file.
close_indentation: ->
@outdent_token @indent
closeIndentation: ->
@outdentToken @indent
# The error for when you try to use a forbidden word in JavaScript as
# an identifier.
identifier_error: (word) ->
identifierError: (word) ->
throw new Error "SyntaxError: Reserved word \"$word\" on line ${@line + 1}"
# The error for when you try to assign to a reserved word in JavaScript,
# like "function" or "default".
assignment_error: ->
assignmentError: ->
throw new Error "SyntaxError: Reserved word \"${@value()}\" on line ${@line + 1} can't be assigned"
# Expand variables and expressions inside double-quoted strings using
@ -357,7 +357,7 @@ exports.Lexer: class Lexer
# If it encounters an interpolation, this method will recursively create a
# new Lexer, tokenize the interpolated contents, and merge them into the
# token stream.
interpolate_string: (str, escape_quotes) ->
interpolateString: (str, escapeQuotes) ->
if str.length < 3 or not starts str, '"'
@token 'STRING', str
else
@ -375,7 +375,7 @@ exports.Lexer: class Lexer
tokens.push ['IDENTIFIER', interp]
i: + group.length - 1
pi: i + 1
else if (expr: balanced_string str.substring(i), [['${', '}']])
else if (expr: balancedString str.substring(i), [['${', '}']])
tokens.push ['STRING', "$quote${ str.substring(pi, i) }$quote"] if pi < i
inner: expr.substring(2, expr.length - 1)
if inner.length
@ -396,7 +396,7 @@ exports.Lexer: class Lexer
[tag, value]: token
if tag is 'TOKENS'
@tokens: @tokens.concat value
else if tag is 'STRING' and escape_quotes
else if tag is 'STRING' and escapeQuotes
escaped: value.substring(1, value.length - 1).replace(/"/g, '\\"')
@token tag, "\"$escaped\""
else
@ -413,9 +413,9 @@ exports.Lexer: class Lexer
@tokens.push [tag, value, @line]
# Peek at a tag in the current token stream.
tag: (index, new_tag) ->
tag: (index, newTag) ->
return unless tok: @prev index
return tok[0]: new_tag if new_tag?
return tok[0]: newTag if newTag?
tok[0]
# Peek at a value in the current token stream.

File diff suppressed because it is too large Load diff

View file

@ -1,7 +1,7 @@
# A simple **OptionParser** class to parse option flags from the command-line.
# Use it like so:
#
# parser: new OptionParser switches, help_banner
# parser: new OptionParser switches, helpBanner
# options: parser.parse process.argv
exports.OptionParser: class OptionParser
@ -12,7 +12,7 @@ exports.OptionParser: class OptionParser
# Along with an an optional banner for the usage help.
constructor: (rules, banner) ->
@banner: banner
@rules: build_rules(rules)
@rules: buildRules(rules)
# Parse the list of arguments, populating an `options` object with all of the
# specified options, and returning it. `options.arguments` will be an array
@ -21,17 +21,17 @@ exports.OptionParser: class OptionParser
# flag. Instead, you're responsible for interpreting the options object.
parse: (args) ->
options: {arguments: []}
args: normalize_arguments args
args: normalizeArguments args
while (arg: args.shift())
is_option: !!(arg.match(LONG_FLAG) or arg.match(SHORT_FLAG))
matched_rule: no
isOption: !!(arg.match(LONG_FLAG) or arg.match(SHORT_FLAG))
matchedRule: no
for rule in @rules
if rule.short_flag is arg or rule.long_flag is arg
options[rule.name]: if rule.has_argument then args.shift() else true
matched_rule: yes
if rule.shortFlag is arg or rule.longFlag is arg
options[rule.name]: if rule.hasArgument then args.shift() else true
matchedRule: yes
break
throw new Error "unrecognized option: $arg" if is_option and not matched_rule
options.arguments.push arg unless is_option
throw new Error "unrecognized option: $arg" if isOption and not matchedRule
options.arguments.push arg unless isOption
options
# Return the help text for this **OptionParser**, listing and describing all
@ -40,10 +40,10 @@ exports.OptionParser: class OptionParser
lines: ['Available options:']
lines.unshift "$@banner\n" if @banner
for rule in @rules
spaces: 15 - rule.long_flag.length
spaces: 15 - rule.longFlag.length
spaces: if spaces > 0 then (' ' for i in [0..spaces]).join('') else ''
let_part: if rule.short_flag then rule.short_flag + ', ' else ' '
lines.push " $let_part$rule.long_flag$spaces$rule.description"
letPart: if rule.shortFlag then rule.shortFlag + ', ' else ' '
lines.push " $letPart$rule.longFlag$spaces$rule.description"
"\n${ lines.join('\n') }\n"
# Helpers
@ -57,27 +57,27 @@ OPTIONAL: /\[(.+)\]/
# Build and return the list of option rules. If the optional *short-flag* is
# unspecified, leave it out by padding with `null`.
build_rules: (rules) ->
buildRules: (rules) ->
for tuple in rules
tuple.unshift null if tuple.length < 3
build_rule tuple...
buildRule tuple...
# Build a rule from a `-o` short flag, a `--output [DIR]` long flag, and the
# description of what the option does.
build_rule: (short_flag, long_flag, description) ->
match: long_flag.match(OPTIONAL)
long_flag: long_flag.match(LONG_FLAG)[1]
buildRule: (shortFlag, longFlag, description) ->
match: longFlag.match(OPTIONAL)
longFlag: longFlag.match(LONG_FLAG)[1]
{
name: long_flag.substr 2
short_flag: short_flag
long_flag: long_flag
name: longFlag.substr 2
shortFlag: shortFlag
longFlag: longFlag
description: description
has_argument: !!(match and match[1])
hasArgument: !!(match and match[1])
}
# Normalize arguments by expanding merged flags into multiple flags. This allows
# you to have `-wl` be the same as `--watch --lint`.
normalize_arguments: (args) ->
normalizeArguments: (args) ->
args: args.slice 0
result: []
for arg in args

View file

@ -22,7 +22,7 @@ helpers.extend global, {
# of exiting.
run: (buffer) ->
try
val: CoffeeScript.run buffer.toString(), {no_wrap: true, globals: true, source: 'repl'}
val: CoffeeScript.run buffer.toString(), {noWrap: true, globals: true, source: 'repl'}
puts inspect val if val isnt undefined
catch err
puts err.stack or err.toString()

View file

@ -26,14 +26,14 @@ exports.Rewriter: class Rewriter
# corrected before implicit parentheses can be wrapped around blocks of code.
rewrite: (tokens) ->
@tokens: tokens
@adjust_comments()
@remove_leading_newlines()
@remove_mid_expression_newlines()
@close_open_calls_and_indexes()
@add_implicit_indentation()
@add_implicit_parentheses()
@ensure_balance BALANCED_PAIRS
@rewrite_closing_parens()
@adjustComments()
@removeLeadingNewlines()
@removeMidExpressionNewlines()
@closeOpenCallsAndIndexes()
@addImplicitIndentation()
@addImplicitParentheses()
@ensureBalance BALANCED_PAIRS
@rewriteClosingParens()
@tokens
# Rewrite the token stream, looking one token ahead and behind.
@ -41,7 +41,7 @@ exports.Rewriter: class Rewriter
# forwards (or backwards) in the stream, to make sure we don't miss anything
# as tokens are inserted and removed, and the stream changes length under
# our feet.
scan_tokens: (block) ->
scanTokens: (block) ->
i: 0
loop
break unless @tokens[i]
@ -51,8 +51,8 @@ exports.Rewriter: class Rewriter
# Massage newlines and indentations so that comments don't have to be
# correctly indented, or appear on a line of their own.
adjust_comments: ->
@scan_tokens (prev, token, post, i) =>
adjustComments: ->
@scanTokens (prev, token, post, i) =>
return 1 unless include COMMENTS, token[0]
[before, after]: [@tokens[i - 2], @tokens[i + 2]]
if after and after[0] is 'INDENT'
@ -74,13 +74,13 @@ exports.Rewriter: class Rewriter
# Leading newlines would introduce an ambiguity in the grammar, so we
# dispatch them here.
remove_leading_newlines: ->
removeLeadingNewlines: ->
@tokens.shift() while @tokens[0] and @tokens[0][0] is 'TERMINATOR'
# Some blocks occur in the middle of expressions -- when we're expecting
# this, remove their trailing newlines.
remove_mid_expression_newlines: ->
@scan_tokens (prev, token, post, i) =>
removeMidExpressionNewlines: ->
@scanTokens (prev, token, post, i) =>
return 1 unless post and include(EXPRESSION_CLOSE, post[0]) and token[0] is 'TERMINATOR'
@tokens.splice i, 1
return 0
@ -88,10 +88,10 @@ exports.Rewriter: class Rewriter
# The lexer has tagged the opening parenthesis of a method call, and the
# opening bracket of an indexing operation. Match them with their paired
# close.
close_open_calls_and_indexes: ->
closeOpenCallsAndIndexes: ->
parens: [0]
brackets: [0]
@scan_tokens (prev, token, post, i) =>
@scanTokens (prev, token, post, i) =>
switch token[0]
when 'CALL_START' then parens.push 0
when 'INDEX_START' then brackets.push 0
@ -114,15 +114,15 @@ exports.Rewriter: class Rewriter
# Methods may be optionally called without parentheses, for simple cases.
# Insert the implicit parentheses here, so that the parser doesn't have to
# deal with them.
add_implicit_parentheses: ->
addImplicitParentheses: ->
stack: [0]
close_calls: (i) =>
closeCalls: (i) =>
for tmp in [0...stack[stack.length - 1]]
@tokens.splice(i, 0, ['CALL_END', ')', @tokens[i][2]])
size: stack[stack.length - 1] + 1
stack[stack.length - 1]: 0
size
@scan_tokens (prev, token, post, i) =>
@scanTokens (prev, token, post, i) =>
tag: token[0]
stack[stack.length - 2]: + stack.pop() if tag is 'OUTDENT'
open: stack[stack.length - 1] > 0
@ -133,7 +133,7 @@ exports.Rewriter: class Rewriter
return 2
if include(EXPRESSION_START, tag)
if tag is 'INDENT' and !token.generated and open and not (prev and include(IMPLICIT_BLOCK, prev[0]))
size: close_calls(i)
size: closeCalls(i)
stack.push 0
return size
stack.push 0
@ -143,7 +143,7 @@ exports.Rewriter: class Rewriter
if nx? and nx[0] is ','
@tokens.splice(i, 1) if tag is 'TERMINATOR'
else
size: close_calls(i)
size: closeCalls(i)
stack.pop() if tag isnt 'OUTDENT' and include EXPRESSION_END, tag
return size
if tag isnt 'OUTDENT' and include EXPRESSION_END, tag
@ -155,8 +155,8 @@ exports.Rewriter: class Rewriter
# expressions that lack ending delimiters. The **Rewriter** adds the implicit
# blocks, so it doesn't need to. ')' can close a single-line block,
# but we need to make sure it's balanced.
add_implicit_indentation: ->
@scan_tokens (prev, token, post, i) =>
addImplicitIndentation: ->
@scanTokens (prev, token, post, i) =>
return 1 unless include(SINGLE_LINERS, token[0]) and
post[0] isnt 'INDENT' and
not (token[0] is 'ELSE' and post[0] is 'IF')
@ -187,15 +187,15 @@ exports.Rewriter: class Rewriter
# Ensure that all listed pairs of tokens are correctly balanced throughout
# the course of the token stream.
ensure_balance: (pairs) ->
ensureBalance: (pairs) ->
levels: {}
open_line: {}
@scan_tokens (prev, token, post, i) =>
openLine: {}
@scanTokens (prev, token, post, i) =>
for pair in pairs
[open, close]: pair
levels[open]: or 0
if token[0] is open
open_line[open]: token[2] if levels[open] == 0
openLine[open]: token[2] if levels[open] == 0
levels[open]: + 1
levels[open]: - 1 if token[0] is close
throw new Error("too many ${token[1]} on line ${token[2] + 1}") if levels[open] < 0
@ -203,7 +203,7 @@ exports.Rewriter: class Rewriter
unclosed: key for key, value of levels when value > 0
if unclosed.length
open: unclosed[0]
line: open_line[open] + 1
line: openLine[open] + 1
throw new Error "unclosed $open on line $line"
# We'd like to support syntax like this:
@ -222,11 +222,11 @@ exports.Rewriter: class Rewriter
# up balanced in the end.
# 4. Be careful not to alter array or parentheses delimiters with overzealous
# rewriting.
rewrite_closing_parens: ->
rewriteClosingParens: ->
stack: []
debt: {}
(debt[key]: 0) for key, val of INVERSES
@scan_tokens (prev, token, post, i) =>
@scanTokens (prev, token, post, i) =>
tag: token[0]
inv: INVERSES[token[0]]
if include EXPRESSION_START, tag

View file

@ -21,10 +21,10 @@ exports.Scope: class Scope
[@parent, @expressions, @method]: [parent, expressions, method]
@variables: {}
if @parent
@temp_var: @parent.temp_var
@tempVar: @parent.tempVar
else
Scope.root: this
@temp_var: '_a'
@tempVar: '_a'
# Look up a variable name in lexical scope, and declare it if it does not
# already exist.
@ -51,12 +51,12 @@ exports.Scope: class Scope
# If we need to store an intermediate result, find an available name for a
# compiler-generated variable. `_a`, `_b`, and so on...
free_variable: ->
while @check @temp_var
ordinal: 1 + parseInt @temp_var.substr(1), 36
@temp_var: '_' + ordinal.toString(36).replace(/\d/g, 'a')
@variables[@temp_var]: 'var'
@temp_var
freeVariable: ->
while @check @tempVar
ordinal: 1 + parseInt @tempVar.substr(1), 36
@tempVar: '_' + ordinal.toString(36).replace(/\d/g, 'a')
@variables[@tempVar]: 'var'
@tempVar
# Ensure that an assignment is made at the top of this scope
# (or at the top-level scope, if requested).
@ -65,27 +65,27 @@ exports.Scope: class Scope
# Does this scope reference any variables that need to be declared in the
# given function body?
has_declarations: (body) ->
hasDeclarations: (body) ->
body is @expressions and @any (k, val) -> val is 'var'
# Does this scope reference any assignments that need to be declared at the
# top of the given function body?
has_assignments: (body) ->
hasAssignments: (body) ->
body is @expressions and @any (k, val) -> val.assigned
# Return the list of variables first declared in this scope.
declared_variables: ->
declaredVariables: ->
(key for key, val of @variables when val is 'var').sort()
# Return the list of assignments that are supposed to be made at the top
# of this scope.
assigned_variables: ->
assignedVariables: ->
"$key = $val.value" for key, val of @variables when val.assigned
# Compile the JavaScript for all of the variable declarations in this scope.
compiled_declarations: ->
@declared_variables().join ', '
compiledDeclarations: ->
@declaredVariables().join ', '
# Compile the JavaScript for all of the variable assignments in this scope.
compiled_assignments: ->
@assigned_variables().join ', '
compiledAssignments: ->
@assignedVariables().join ', '

View file

@ -17,9 +17,9 @@ ok(area(
) is 100)
sum_of_args: ->
sumOfArgs: ->
sum: 0
sum: + val for val in arguments
sum
ok sum_of_args(1, 2, 3, 4, 5) is 15
ok sumOfArgs(1, 2, 3, 4, 5) is 15

View file

@ -10,13 +10,13 @@ ok result is true and result2 is true
# Can assign a conditional statement.
get_x: -> 10
getX: -> 10
if x: get_x() then 100
if x: getX() then 100
ok x is 10
x: if get_x() then 100
x: if getX() then 100
ok x is 100

View file

@ -1,8 +1,8 @@
# Test with break at the top level.
array: [1,2,3]
call_with_lambda: (l) -> null
callWithLambda: (l) -> null
for i in array
result: call_with_lambda(->)
result: callWithLambda(->)
if i == 2
puts "i = 2"
else
@ -12,10 +12,10 @@ ok result is null
# Test with break *not* at the top level.
some_func: (input) ->
takes_lambda: (l) -> null
someFunc: (input) ->
takesLambda: (l) -> null
for i in [1,2]
result: takes_lambda(->)
result: takesLambda(->)
if input == 1
return 1
else
@ -23,6 +23,6 @@ some_func: (input) ->
return 2
ok some_func(1) is 1
ok some_func(2) is 2
ok someFunc(1) is 1
ok someFunc(2) is 2

View file

@ -1,8 +1,8 @@
# Basic chained function calls.
identity_wrap: (x) ->
identityWrap: (x) ->
-> x
result: identity_wrap(identity_wrap(true))()()
result: identityWrap(identityWrap(true))()()
ok result

View file

@ -102,11 +102,11 @@ ok (new SubClass()).prop is 'top-super-sub'
# '@' referring to the current instance, and not being coerced into a call.
class ClassName
am_i: ->
amI: ->
@ instanceof ClassName
obj: new ClassName()
ok obj.am_i()
ok obj.amI()
# super() calls in constructors of classes that are defined as object properties.

View file

@ -13,7 +13,7 @@ switch 'string'
when false then something()
# comment
when null
something_else()
somethingElse()
->
code()

View file

@ -2,7 +2,7 @@
CoffeeScript: require('./../lib/coffee-script')
Lexer: require('./../lib/lexer')
js: CoffeeScript.compile("one\r\ntwo", {no_wrap: on})
js: CoffeeScript.compile("one\r\ntwo", {noWrap: on})
ok js is "one;\ntwo;"
@ -17,7 +17,7 @@ class SplitNode extends BaseNode
constructor: (variable) ->
@variable: variable
compile_node: (o) ->
compileNode: (o) ->
"'${@variable}'.split('')"
# Extend CoffeeScript with our lexing function that matches --wordgoeshere--
@ -29,7 +29,7 @@ CoffeeScript.extend ->
true
# Compile with the extension.
js: CoffeeScript.compile 'return --tobesplit--', {no_wrap: on}
js: CoffeeScript.compile 'return --tobesplit--', {noWrap: on}
ok js is "return 'tobesplit'.split('');"
@ -42,7 +42,7 @@ class WordArrayNode extends BaseNode
constructor: (words) ->
@words: words
compile_node: (o) ->
compileNode: (o) ->
strings = ("\"$word\"" for word in @words).join ', '
"[$strings]"
@ -52,7 +52,7 @@ CoffeeScript.extend ->
@token 'EXTENSION', new WordArrayNode(words[1].split(/\s+/))
true
js: CoffeeScript.compile 'puts %w{one two three}', {no_wrap: on}
js: CoffeeScript.compile 'puts %w{one two three}', {noWrap: on}
ok js is 'puts(["one", "two", "three"]);'
@ -65,7 +65,7 @@ CoffeeScript.extend ->
@token 'PROPERTY_ACCESS', '.'
@token 'IDENTIFIER', 'push'
js: CoffeeScript.compile 'a << b', {no_wrap: on}
js: CoffeeScript.compile 'a << b', {noWrap: on}
ok js is 'a.push(b);'

View file

@ -80,17 +80,17 @@ ok(num % 2 is 0 for num in array by 2)
# Nested comprehensions.
multi_liner:
multiLiner:
for x in [3..5]
for y in [3..5]
[x, y]
single_liner:
singleLiner:
[x, y] for y in [3..5] for x in [3..5]
ok multi_liner.length is single_liner.length
ok 5 is multi_liner[2][2][1]
ok 5 is single_liner[2][2][1]
ok multiLiner.length is singleLiner.length
ok 5 is multiLiner[2][2][1]
ok 5 is singleLiner[2][2][1]
# Comprehensions within parentheses.

View file

@ -1,8 +1,8 @@
ok(if my_special_variable? then false else true)
ok(if mySpecialVariable? then false else true)
my_special_variable: false
mySpecialVariable: false
ok(if my_special_variable? then true else false)
ok(if mySpecialVariable? then true else false)
# Existential assignment.
@ -23,11 +23,11 @@ ok z is null and x is "EX"
# Only evaluate once.
counter: 0
get_next_node: ->
getNextNode: ->
throw "up" if counter
counter++
ok(if get_next_node()? then true else false)
ok(if getNextNode()? then true else false)
# Existence chains, soaking up undefined properties:

View file

@ -1,4 +1,4 @@
# Ensure that we don't wrap Nodes that are "pure_statement" in a closure.
# Ensure that we don't wrap Nodes that are "pureStatement" in a closure.
items: [1, 2, 3, "bacon", 4, 5]
for item in items

View file

@ -57,8 +57,8 @@ ok 100 > 1 if 1 > 0
ok true unless false
ok true for i in [1..3]
ok_func: (f) -> ok(f())
ok_func -> true
okFunc: (f) -> ok(f())
okFunc -> true
# Optional parens can be used in a nested fashion.
call: (func) -> func()

View file

@ -40,18 +40,18 @@ reg: /\\/
ok reg(str) and str is '\\'
trailing_comma: [1, 2, 3,]
ok (trailing_comma[0] is 1) and (trailing_comma[2] is 3) and (trailing_comma.length is 3)
trailingComma: [1, 2, 3,]
ok (trailingComma[0] is 1) and (trailingComma[2] is 3) and (trailingComma.length is 3)
trailing_comma: [
trailingComma: [
1, 2, 3,
4, 5, 6
7, 8, 9,
]
(sum: (sum or 0) + n) for n in trailing_comma
(sum: (sum or 0) + n) for n in trailingComma
trailing_comma: {k1: "v1", k2: 4, k3: (-> true),}
ok trailing_comma.k3() and (trailing_comma.k2 is 4) and (trailing_comma.k1 is "v1")
trailingComma: {k1: "v1", k2: 4, k3: (-> true),}
ok trailingComma.k3() and (trailingComma.k2 is 4) and (trailingComma.k1 is "v1")
multiline: {a: 15,
b: 26}

View file

@ -1,4 +1,4 @@
# This file is imported by `test_importing.coffee`
# This file is imported by `testImporting.coffee`
local: "from over there"

View file

@ -6,13 +6,13 @@ result: func 1, 2, 3, 4, 5
ok result is "3 4 5"
gold: silver: bronze: the_field: last: null
gold: silver: bronze: theField: last: null
medalists: (first, second, third, rest..., unlucky) ->
gold: first
silver: second
bronze: third
the_field: rest.concat([last])
theField: rest.concat([last])
last: unlucky
contenders: [
@ -34,7 +34,7 @@ ok gold is "Mighty Mouse"
ok silver is "Michael Phelps"
ok bronze is "Liu Xiang"
ok last is "Usain Bolt"
ok the_field.length is 8
ok theField.length is 8
contenders.reverse()
medalists contenders[0...2]..., "Mighty Mouse", contenders[2...contenders.length]...
@ -43,7 +43,7 @@ ok gold is "Usain Bolt"
ok silver is "Asafa Powell"
ok bronze is "Mighty Mouse"
ok last is "Michael Phelps"
ok the_field.length is 8
ok theField.length is 8
medalists contenders..., 'Tim', 'Moe', 'Jim'
ok last is 'Jim'

View file

@ -46,7 +46,7 @@ ok result
result: false
switch "word"
when "one thing"
do_something()
doSomething()
else
result: true unless false
@ -55,9 +55,9 @@ ok result
result: false
switch "word"
when "one thing"
do_something()
doSomething()
when "other thing"
do_something()
doSomething()
else
result: true unless false