jashkenas--coffeescript/lib/lexer.js

742 lines
28 KiB
JavaScript
Raw Normal View History

2010-01-30 05:08:15 +00:00
(function(){
var ASSIGNED, ASSIGNMENT, CALLABLE, CODE, COFFEE_ALIASES, COFFEE_KEYWORDS, COMMENT, COMMENT_CLEANER, CONVERSIONS, HALF_ASSIGNMENTS, HEREDOC, HEREDOC_INDENT, IDENTIFIER, INTERPOLATION, JS_CLEANER, JS_FORBIDDEN, JS_KEYWORDS, LAST_DENT, LAST_DENTS, LINE_BREAK, Lexer, MULTILINER, MULTI_DENT, NEXT_CHARACTER, NOT_REGEX, NO_NEWLINE, NUMBER, OPERATOR, REGEX_END, REGEX_ESCAPE, REGEX_INTERPOLATION, REGEX_START, RESERVED, Rewriter, STRING_NEWLINES, WHITESPACE, _a, _b, _c, balancedString, compact, count, helpers, include, starts;
2010-03-30 23:17:40 +00:00
var __slice = Array.prototype.slice;
// The CoffeeScript Lexer. Uses a series of token-matching regexes to attempt
// matches against the beginning of the source code. When a match is found,
// a token is produced, we consume the match, and start again. Tokens are in the
// form:
// [tag, value, lineNumber]
2010-03-01 02:39:07 +00:00
// Which is a format that can be fed directly into [Jison](http://github.com/zaach/jison).
// Set up the Lexer for both Node.js and the browser, depending on where we are.
if ((typeof process !== "undefined" && process !== null)) {
_a = require('./rewriter');
Rewriter = _a.Rewriter;
_b = require('./helpers');
helpers = _b.helpers;
} else {
this.exports = this;
2010-03-10 14:47:02 +00:00
Rewriter = this.Rewriter;
2010-03-17 03:18:54 +00:00
helpers = this.helpers;
}
// Import the helpers we need.
_c = helpers;
include = _c.include;
count = _c.count;
starts = _c.starts;
compact = _c.compact;
balancedString = _c.balancedString;
// The Lexer Class
// ---------------
// The Lexer class reads a stream of CoffeeScript and divvys it up into tagged
// tokens. Some potential ambiguity in the grammar has been avoided by
// pushing some extra smarts into the Lexer.
exports.Lexer = (function() {
Lexer = function() { };
// **tokenize** is the Lexer's main method. Scan by attempting to match tokens
// one at a time, using a regular expression anchored at the start of the
// remaining code, or a custom recursive token-matching method
// (for interpolations). When the next token has been recorded, we move forward
// within the code past the token, and begin again.
// Each tokenizing method is responsible for incrementing `@i` by the number of
// characters it has consumed. `@i` can be thought of as our finger on the page
// of source.
// Before returning the token stream, run it through the [Rewriter](rewriter.html)
// unless explicitly asked not to.
Lexer.prototype.tokenize = function(code, options) {
var o;
code = code.replace(/(\r|\s+$)/g, '');
o = options || {};
this.code = code;
// The remainder of the source code.
this.i = 0;
// Current character position we're parsing.
this.line = o.line || 0;
// The current line.
this.indent = 0;
// The current indentation level.
this.outdebt = 0;
// The under-outdentation of the last outdent.
this.indents = [];
// The stack of all current indentation levels.
this.tokens = [];
// Stream of parsed tokens in the form ['TYPE', value, line]
while (this.i < this.code.length) {
this.chunk = this.code.slice(this.i);
this.extractNextToken();
}
this.closeIndentation();
if (o.rewrite === false) {
return this.tokens;
}
2010-03-06 01:42:36 +00:00
return (new Rewriter()).rewrite(this.tokens);
};
// At every position, run through this list of attempted matches,
// short-circuiting if any of them succeed. Their order determines precedence:
// `@literalToken` is the fallback catch-all.
Lexer.prototype.extractNextToken = function() {
if (this.extensionToken()) {
return null;
}
if (this.identifierToken()) {
return null;
}
if (this.numberToken()) {
return null;
}
if (this.heredocToken()) {
return null;
}
if (this.regexToken()) {
return null;
}
if (this.commentToken()) {
return null;
}
if (this.lineToken()) {
return null;
}
if (this.whitespaceToken()) {
return null;
}
if (this.jsToken()) {
return null;
}
if (this.stringToken()) {
return null;
}
return this.literalToken();
};
// Tokenizers
// ----------
// Language extensions get the highest priority, first chance to tag tokens
// as something else.
Lexer.prototype.extensionToken = function() {
var _d, _e, _f, extension;
_e = Lexer.extensions;
for (_d = 0, _f = _e.length; _d < _f; _d++) {
extension = _e[_d];
if (extension.call(this)) {
return true;
}
}
return false;
};
// Matches identifying literals: variables, keywords, method names, etc.
// Check to ensure that JavaScript reserved words aren't being used as
// identifiers. Because CoffeeScript reserves a handful of keywords that are
// allowed in JavaScript, we're careful not to tag them as keywords when
// referenced as property names here, so you can still do `jQuery.is()` even
// though `is` means `===` otherwise.
Lexer.prototype.identifierToken = function() {
var close_index, forcedIdentifier, id, tag;
2010-03-23 04:18:50 +00:00
if (!(id = this.match(IDENTIFIER, 1))) {
return false;
}
this.i += id.length;
forcedIdentifier = this.tagAccessor() || this.match(ASSIGNED, 1);
tag = 'IDENTIFIER';
if (include(JS_KEYWORDS, id) || (!forcedIdentifier && include(COFFEE_KEYWORDS, id))) {
tag = id.toUpperCase();
}
if (tag === 'WHEN' && include(LINE_BREAK, this.tag())) {
tag = 'LEADING_WHEN';
}
if (include(JS_FORBIDDEN, id)) {
if (forcedIdentifier) {
tag = 'STRING';
id = ("'" + id + "'");
if (forcedIdentifier === 'accessor') {
close_index = true;
if (this.tag() !== '@') {
this.tokens.pop();
}
this.token('INDEX_START', '[');
}
} else if (include(RESERVED, id)) {
this.identifierError(id);
}
}
if (!(forcedIdentifier)) {
if (include(COFFEE_ALIASES, id)) {
tag = (id = CONVERSIONS[id]);
}
if (this.prev() && this.prev()[0] === 'ASSIGN' && include(HALF_ASSIGNMENTS, tag)) {
return this.tagHalfAssignment(tag);
}
2010-03-22 01:46:06 +00:00
}
this.token(tag, id);
if (close_index) {
this.token(']', ']');
}
return true;
};
// Matches numbers, including decimals, hex, and exponential notation.
// Be careful not to interfere with ranges-in-progress.
Lexer.prototype.numberToken = function() {
var number;
2010-03-23 04:18:50 +00:00
if (!(number = this.match(NUMBER, 1))) {
return false;
}
if (this.tag() === '.' && starts(number, '.')) {
return false;
}
this.i += number.length;
this.token('NUMBER', number);
return true;
};
// Matches strings, including multi-line strings. Ensures that quotation marks
// are balanced within the string's contents, and within nested interpolations.
Lexer.prototype.stringToken = function() {
2010-03-09 02:55:06 +00:00
var string;
if (!(starts(this.chunk, '"') || starts(this.chunk, "'"))) {
return false;
}
if (!(string = this.balancedToken(['"', '"'], ['${', '}']) || this.balancedToken(["'", "'"]))) {
return false;
}
this.interpolateString(string.replace(STRING_NEWLINES, " \\\n"));
this.line += count(string, "\n");
this.i += string.length;
return true;
};
// Matches heredocs, adjusting indentation to the correct level, as heredocs
// preserve whitespace, but ignore indentation to the left.
Lexer.prototype.heredocToken = function() {
var doc, match, quote;
2010-03-23 04:18:50 +00:00
if (!(match = this.chunk.match(HEREDOC))) {
return false;
}
quote = match[1].substr(0, 1);
doc = this.sanitizeHeredoc(match[2] || match[4], {
quote: quote
});
this.interpolateString(("" + quote + doc + quote));
this.line += count(match[1], "\n");
this.i += match[1].length;
return true;
};
// Matches and conumes comments. We pass through comments into JavaScript,
// so they're treated as real tokens, like any other part of the language.
Lexer.prototype.commentToken = function() {
var comment, i, lines, match;
if (!(match = this.chunk.match(COMMENT))) {
return false;
}
if (match[3]) {
comment = this.sanitizeHeredoc(match[3], {
herecomment: true
});
this.token('HERECOMMENT', comment.split(MULTILINER));
this.token('TERMINATOR', '\n');
} else {
lines = compact(match[1].replace(COMMENT_CLEANER, '').split(MULTILINER));
i = this.tokens.length - 1;
if (this.unfinished()) {
while (this.tokens[i] && !include(LINE_BREAK, this.tokens[i][0])) {
i -= 1;
}
}
this.tokens.splice(i + 1, 0, ['COMMENT', lines, this.line], ['TERMINATOR', '\n', this.line]);
}
this.line += count(match[1], "\n");
this.i += match[1].length;
return true;
};
// Matches JavaScript interpolated directly into the source via backticks.
Lexer.prototype.jsToken = function() {
var script;
if (!(starts(this.chunk, '`'))) {
return false;
}
if (!(script = this.balancedToken(['`', '`']))) {
return false;
}
this.token('JS', script.replace(JS_CLEANER, ''));
this.i += script.length;
return true;
};
// Matches regular expression literals. Lexing regular expressions is difficult
// to distinguish from division, so we borrow some basic heuristics from
// JavaScript and Ruby, borrow slash balancing from `@balancedToken`, and
// borrow interpolation from `@interpolateString`.
Lexer.prototype.regexToken = function() {
var end, flags, regex, str;
if (!(this.chunk.match(REGEX_START))) {
return false;
}
if (include(NOT_REGEX, this.tag())) {
return false;
}
if (!(regex = this.balancedToken(['/', '/']))) {
return false;
}
if (!(end = this.chunk.substr(regex.length).match(REGEX_END))) {
return false;
}
if (end[2]) {
regex += (flags = end[2]);
}
if (regex.match(REGEX_INTERPOLATION)) {
str = regex.substring(1).split('/')[0];
str = str.replace(REGEX_ESCAPE, function(escaped) {
return '\\' + escaped;
});
this.tokens = this.tokens.concat([['(', '('], ['NEW', 'new'], ['IDENTIFIER', 'RegExp'], ['CALL_START', '(']]);
this.interpolateString(("\"" + str + "\""), true);
2010-04-04 06:59:44 +00:00
this.tokens = this.tokens.concat([[',', ','], ['STRING', ("\"" + flags + "\"")], [')', ')'], [')', ')']]);
} else {
this.token('REGEX', regex);
}
this.i += regex.length;
return true;
};
// Matches a token in which which the passed delimiter pairs must be correctly
// balanced (ie. strings, JS literals).
Lexer.prototype.balancedToken = function() {
var delimited;
var _d = arguments.length, _e = _d >= 1;
2010-05-08 16:20:14 +00:00
delimited = __slice.call(arguments, 0, _d - 0);
return balancedString(this.chunk, delimited);
};
// Matches newlines, indents, and outdents, and determines which is which.
// If we can detect that the current line is continued onto the the next line,
// then the newline is suppressed:
// elements
// .each( ... )
// .map( ... )
// Keeps track of the level of indentation, because a single outdent token
// can close multiple indents, so we need to know how far in we happen to be.
Lexer.prototype.lineToken = function() {
var diff, indent, nextCharacter, noNewlines, prev, size;
2010-03-23 04:18:50 +00:00
if (!(indent = this.match(MULTI_DENT, 1))) {
return false;
}
this.line += count(indent, "\n");
this.i += indent.length;
prev = this.prev(2);
size = indent.match(LAST_DENTS).reverse()[0].match(LAST_DENT)[1].length;
nextCharacter = this.chunk.match(NEXT_CHARACTER)[1];
noNewlines = nextCharacter === '.' || nextCharacter === ',' || this.unfinished();
if (size === this.indent) {
if (noNewlines) {
return this.suppressNewlines();
}
return this.newlineToken(indent);
} else if (size > this.indent) {
if (noNewlines) {
return this.suppressNewlines();
}
diff = size - this.indent;
this.token('INDENT', diff);
this.indents.push(diff);
} else {
this.outdentToken(this.indent - size, noNewlines);
}
this.indent = size;
return true;
};
// Record an outdent token or multiple tokens, if we happen to be moving back
// inwards past several recorded indents.
Lexer.prototype.outdentToken = function(moveOut, noNewlines) {
var lastIndent;
if (moveOut > -this.outdebt) {
while (moveOut > 0 && this.indents.length) {
lastIndent = this.indents.pop();
this.token('OUTDENT', lastIndent);
moveOut -= lastIndent;
}
} else {
this.outdebt += moveOut;
}
if (!(noNewlines)) {
this.outdebt = moveOut;
}
if (!(this.tag() === 'TERMINATOR' || noNewlines)) {
this.token('TERMINATOR', "\n");
}
return true;
};
// Matches and consumes non-meaningful whitespace. Tag the previous token
// as being "spaced", because there are some cases where it makes a difference.
Lexer.prototype.whitespaceToken = function() {
var prev, space;
2010-03-23 04:18:50 +00:00
if (!(space = this.match(WHITESPACE, 1))) {
return false;
}
prev = this.prev();
if (prev) {
prev.spaced = true;
}
this.i += space.length;
return true;
};
// Generate a newline token. Consecutive newlines get merged together.
Lexer.prototype.newlineToken = function(newlines) {
if (!(this.tag() === 'TERMINATOR')) {
this.token('TERMINATOR', "\n");
}
return true;
};
// Use a `\` at a line-ending to suppress the newline.
// The slash is removed here once its job is done.
Lexer.prototype.suppressNewlines = function() {
if (this.value() === "\\") {
this.tokens.pop();
}
return true;
};
// We treat all other single characters as a token. Eg.: `( ) , . !`
2010-03-01 02:39:07 +00:00
// Multi-character operators are also literal tokens, so that Jison can assign
// the proper order of operations. There are some symbols that we tag specially
// here. `;` and newlines are both treated as a `TERMINATOR`, we distinguish
// parentheses that indicate a method call from regular parentheses, and so on.
Lexer.prototype.literalToken = function() {
var match, prevSpaced, space, tag, value;
match = this.chunk.match(OPERATOR);
value = match && match[1];
2010-03-22 03:06:04 +00:00
space = match && match[2];
if (value && value.match(CODE)) {
this.tagParameters();
}
value = value || this.chunk.substr(0, 1);
prevSpaced = this.prev() && this.prev().spaced;
tag = value;
if (value.match(ASSIGNMENT)) {
tag = 'ASSIGN';
if (include(JS_FORBIDDEN, this.value)) {
this.assignmentError();
}
} else if (value === ';') {
tag = 'TERMINATOR';
} else if (include(CALLABLE, this.tag()) && !prevSpaced) {
2010-04-03 13:58:45 +00:00
if (value === '(') {
tag = 'CALL_START';
} else if (value === '[') {
tag = 'INDEX_START';
if (this.tag() === '?') {
this.tag(1, 'INDEX_SOAK');
}
if (this.tag() === '::') {
this.tag(1, 'INDEX_PROTO');
}
}
}
this.i += value.length;
if (space && prevSpaced && this.prev()[0] === 'ASSIGN' && include(HALF_ASSIGNMENTS, tag)) {
return this.tagHalfAssignment(tag);
2010-03-22 03:06:04 +00:00
}
this.token(tag, value);
return true;
};
// Token Manipulators
// ------------------
2010-03-01 02:39:07 +00:00
// As we consume a new `IDENTIFIER`, look at the previous token to determine
// if it's a special kind of accessor. Return `true` if any type of accessor
// is the previous token.
Lexer.prototype.tagAccessor = function() {
var accessor, prev;
if ((!(prev = this.prev())) || (prev && prev.spaced)) {
return false;
}
accessor = (function() {
if (prev[1] === '::') {
return this.tag(1, 'PROTOTYPE_ACCESS');
} else if (prev[1] === '.' && !(this.value(2) === '.')) {
if (this.tag(2) === '?') {
this.tag(1, 'SOAK_ACCESS');
return this.tokens.splice(-2, 1);
} else {
return this.tag(1, 'PROPERTY_ACCESS');
}
} else {
return prev[0] === '@';
}
}).call(this);
if (accessor) {
return 'accessor';
} else {
return false;
}
};
// Sanitize a heredoc or herecomment by escaping internal double quotes and
// erasing all external indentation on the left-hand side.
Lexer.prototype.sanitizeHeredoc = function(doc, options) {
2010-05-15 05:18:05 +00:00
var _d, attempt, indent, match;
while (match = HEREDOC_INDENT.exec(doc)) {
2010-05-15 05:18:05 +00:00
attempt = (typeof (_d = match[2]) !== "undefined" && _d !== null) ? match[2] : match[3];
if (!indent || attempt.length < indent.length) {
indent = attempt;
}
}
doc = doc.replace(new RegExp("^" + indent, 'gm'), '');
if (options.herecomment) {
return doc;
}
return doc.replace(MULTILINER, "\\n").replace(new RegExp(options.quote, 'g'), ("\\" + options.quote));
};
// Tag a half assignment.
Lexer.prototype.tagHalfAssignment = function(tag) {
var last;
last = this.tokens.pop();
2010-04-04 06:59:44 +00:00
this.tokens.push([("" + tag + "="), ("" + tag + "="), last[2]]);
return true;
};
// A source of ambiguity in our grammar used to be parameter lists in function
// definitions versus argument lists in function calls. Walk backwards, tagging
// parameters specially in order to make things easier for the parser.
Lexer.prototype.tagParameters = function() {
var _d, i, tok;
if (this.tag() !== ')') {
return null;
}
i = 0;
while (true) {
i += 1;
tok = this.prev(i);
if (!tok) {
return null;
}
if ((_d = tok[0]) === 'IDENTIFIER') {
tok[0] = 'PARAM';
} else if (_d === ')') {
tok[0] = 'PARAM_END';
} else if (_d === '(' || _d === 'CALL_START') {
tok[0] = 'PARAM_START';
return tok[0];
}
}
return true;
};
// Close up all remaining open blocks at the end of the file.
Lexer.prototype.closeIndentation = function() {
return this.outdentToken(this.indent);
};
// The error for when you try to use a forbidden word in JavaScript as
// an identifier.
Lexer.prototype.identifierError = function(word) {
2010-04-04 06:59:44 +00:00
throw new Error(("SyntaxError: Reserved word \"" + word + "\" on line " + (this.line + 1)));
};
// The error for when you try to assign to a reserved word in JavaScript,
// like "function" or "default".
Lexer.prototype.assignmentError = function() {
2010-04-04 06:59:44 +00:00
throw new Error(("SyntaxError: Reserved word \"" + (this.value()) + "\" on line " + (this.line + 1) + " can't be assigned"));
};
2010-03-06 01:42:36 +00:00
// Expand variables and expressions inside double-quoted strings using
// [ECMA Harmony's interpolation syntax](http://wiki.ecmascript.org/doku.php?id=strawman:string_interpolation)
// for substitution of bare variables as well as arbitrary expressions.
2010-03-06 01:42:36 +00:00
// "Hello $name."
// "Hello ${name.capitalize()}."
// If it encounters an interpolation, this method will recursively create a
// new Lexer, tokenize the interpolated contents, and merge them into the
// token stream.
Lexer.prototype.interpolateString = function(str, escapeQuotes) {
var _d, _e, _f, _g, _h, _i, _j, escaped, expr, group, i, idx, inner, interp, interpolated, lexer, match, nested, pi, quote, tag, tok, token, tokens, value;
if (str.length < 3 || !starts(str, '"')) {
2010-03-06 01:42:36 +00:00
return this.token('STRING', str);
} else {
2010-03-06 01:42:36 +00:00
lexer = new Lexer();
tokens = [];
2010-03-06 01:42:36 +00:00
quote = str.substring(0, 1);
_d = [1, 1];
i = _d[0];
pi = _d[1];
while (i < str.length - 1) {
if (starts(str, '\\', i)) {
i += 1;
} else if ((match = str.substring(i).match(INTERPOLATION))) {
_e = match;
group = _e[0];
interp = _e[1];
if (starts(interp, '@')) {
2010-04-04 06:59:44 +00:00
interp = ("this." + (interp.substring(1)));
}
if (pi < i) {
2010-04-04 06:59:44 +00:00
tokens.push(['STRING', ("" + quote + (str.substring(pi, i)) + quote)]);
}
tokens.push(['IDENTIFIER', interp]);
i += group.length - 1;
pi = i + 1;
} else if ((expr = balancedString(str.substring(i), [['${', '}']]))) {
if (pi < i) {
2010-04-04 06:59:44 +00:00
tokens.push(['STRING', ("" + quote + (str.substring(pi, i)) + quote)]);
}
inner = expr.substring(2, expr.length - 1);
if (inner.length) {
2010-04-04 06:59:44 +00:00
nested = lexer.tokenize(("(" + inner + ")"), {
line: this.line
});
_f = nested;
for (idx = 0, _g = _f.length; idx < _g; idx++) {
tok = _f[idx];
tok[0] === 'CALL_END' ? (tok[0] = ')') : null;
}
nested.pop();
tokens.push(['TOKENS', nested]);
} else {
2010-04-04 06:59:44 +00:00
tokens.push(['STRING', ("" + quote + quote)]);
}
i += expr.length - 1;
pi = i + 1;
}
i += 1;
}
if (pi < i && pi < str.length - 1) {
2010-04-04 06:59:44 +00:00
tokens.push(['STRING', ("" + quote + (str.substring(pi, i)) + quote)]);
}
if (!(tokens[0][0] === 'STRING')) {
2010-03-23 04:18:50 +00:00
tokens.unshift(['STRING', '""']);
}
2010-04-04 06:59:44 +00:00
interpolated = tokens.length > 1;
if (interpolated) {
this.token('(', '(');
}
_h = tokens;
for (i = 0, _i = _h.length; i < _i; i++) {
token = _h[i];
_j = token;
tag = _j[0];
value = _j[1];
2010-03-09 02:55:06 +00:00
if (tag === 'TOKENS') {
this.tokens = this.tokens.concat(value);
} else if (tag === 'STRING' && escapeQuotes) {
2010-03-09 02:55:06 +00:00
escaped = value.substring(1, value.length - 1).replace(/"/g, '\\"');
2010-04-04 06:59:44 +00:00
this.token(tag, ("\"" + escaped + "\""));
2010-03-09 02:55:06 +00:00
} else {
this.token(tag, value);
}
if (i < tokens.length - 1) {
this.token('+', '+');
}
}
2010-04-04 06:59:44 +00:00
if (interpolated) {
this.token(')', ')');
}
return tokens;
}
};
// Helpers
// -------
// Add a token to the results, taking note of the line number.
Lexer.prototype.token = function(tag, value) {
return this.tokens.push([tag, value, this.line]);
};
// Peek at a tag in the current token stream.
Lexer.prototype.tag = function(index, newTag) {
var tok;
2010-03-23 04:18:50 +00:00
if (!(tok = this.prev(index))) {
return null;
}
if ((typeof newTag !== "undefined" && newTag !== null)) {
tok[0] = newTag;
return tok[0];
}
return tok[0];
};
// Peek at a value in the current token stream.
Lexer.prototype.value = function(index, val) {
var tok;
2010-03-23 04:18:50 +00:00
if (!(tok = this.prev(index))) {
return null;
}
if ((typeof val !== "undefined" && val !== null)) {
tok[1] = val;
return tok[1];
}
return tok[1];
};
// Peek at a previous token, entire.
Lexer.prototype.prev = function(index) {
return this.tokens[this.tokens.length - (index || 1)];
};
// Attempt to match a string against the current chunk, returning the indexed
// match if successful, and `false` otherwise.
Lexer.prototype.match = function(regex, index) {
var m;
2010-03-23 04:18:50 +00:00
if (!(m = this.chunk.match(regex))) {
return false;
}
if (m) {
return m[index];
} else {
return false;
}
};
// Are we in the midst of an unfinished expression?
Lexer.prototype.unfinished = function() {
var prev;
prev = this.prev(2);
return this.value() && this.value().match && this.value().match(NO_NEWLINE) && prev && (prev[0] !== '.') && !this.value().match(CODE);
};
// Lexer Properties
// ----------------
// There are no exensions to the core lexer by default.
Lexer.extensions = [];
return Lexer;
}).call(this);
// Constants
// ---------
// Keywords that CoffeeScript shares in common with JavaScript.
2010-03-28 17:06:16 +00:00
JS_KEYWORDS = ["if", "else", "true", "false", "new", "return", "try", "catch", "finally", "throw", "break", "continue", "for", "in", "while", "delete", "instanceof", "typeof", "switch", "super", "extends", "class", "this", "null"];
// CoffeeScript-only keywords, which we're more relaxed about allowing. They can't
// be used standalone, but you can reference them as an attached property.
2010-03-22 01:46:06 +00:00
COFFEE_ALIASES = ["and", "or", "is", "isnt", "not"];
COFFEE_KEYWORDS = COFFEE_ALIASES.concat(["then", "unless", "until", "loop", "yes", "no", "on", "off", "of", "by", "where", "when"]);
// The list of keywords that are reserved by JavaScript, but not used, or are
// used by CoffeeScript internally. We throw an error when these are encountered,
// to avoid having a JavaScript error at runtime.
RESERVED = ["case", "default", "do", "function", "var", "void", "with", "const", "let", "enum", "export", "import", "native"];
// The superset of both JavaScript keywords and reserved words, none of which may
// be used as identifiers or properties.
JS_FORBIDDEN = JS_KEYWORDS.concat(RESERVED);
// Token matching regexes.
IDENTIFIER = /^([a-zA-Z\$_](\w|\$)*)/;
NUMBER = /^(((\b0(x|X)[0-9a-fA-F]+)|((\b[0-9]+(\.[0-9]+)?|\.[0-9]+)(e[+\-]?[0-9]+)?)))\b/i;
HEREDOC = /^("{6}|'{6}|"{3}\n?([\s\S]*?)\n?([ \t]*)"{3}|'{3}\n?([\s\S]*?)\n?([ \t]*)'{3})/;
INTERPOLATION = /^\$([a-zA-Z_@]\w*(\.\w+)*)/;
2010-03-22 03:06:04 +00:00
OPERATOR = /^([+\*&|\/\-%=<>:!?]+)([ \t]*)/;
WHITESPACE = /^([ \t]+)/;
COMMENT = /^((\n?[ \t]*)?#{3}(?!#)[ \t]*\n+([\s\S]*?)[ \t]*\n+[ \t]*#{3}|((\n?[ \t]*)?#[^\n]*)+)/;
CODE = /^((-|=)>)/;
MULTI_DENT = /^((\n([ \t]*))+)(\.)?/;
LAST_DENTS = /\n([ \t]*)/g;
LAST_DENT = /\n([ \t]*)/;
ASSIGNMENT = /^[:=]$/;
// Regex-matching-regexes.
REGEX_START = /^\/[^\/ ]/;
REGEX_INTERPOLATION = /([^\\]\$[a-zA-Z_@]|[^\\]\$\{.*[^\\]\})/;
REGEX_END = /^(([imgy]{1,4})\b|\W|$)/;
REGEX_ESCAPE = /\\[^\$]/g;
// Token cleaning regexes.
JS_CLEANER = /(^`|`$)/g;
MULTILINER = /\n/g;
STRING_NEWLINES = /\n[ \t]*/g;
COMMENT_CLEANER = /(^[ \t]*#|\n[ \t]*$)/mg;
NO_NEWLINE = /^([+\*&|\/\-%=<>:!.\\][<>=&|]*|and|or|is|isnt|not|delete|typeof|instanceof)$/;
HEREDOC_INDENT = /(\n+([ \t]*)|^([ \t]+))/g;
ASSIGNED = /^([a-zA-Z\$_]\w*[ \t]*?[:=])/;
NEXT_CHARACTER = /^\s*(\S)/;
// Tokens which a regular expression will never immediately follow, but which
// a division operator might.
// See: http://www.mozilla.org/js/language/js20-2002-04/rationale/syntax.html#regular-expressions
// Our list is shorter, due to sans-parentheses method calls.
NOT_REGEX = ['NUMBER', 'REGEX', '++', '--', 'FALSE', 'NULL', 'TRUE', ']'];
// Tokens which could legitimately be invoked or indexed. A opening
// parentheses or bracket following these tokens will be recorded as the start
// of a function invocation or indexing operation.
CALLABLE = ['IDENTIFIER', 'SUPER', ')', ']', '}', 'STRING', '@', 'THIS', '?', '::'];
// Tokens that, when immediately preceding a `WHEN`, indicate that the `WHEN`
// occurs at the start of a line. We disambiguate these from trailing whens to
// avoid an ambiguity in the grammar.
LINE_BREAK = ['INDENT', 'OUTDENT', 'TERMINATOR'];
2010-03-22 03:06:04 +00:00
// Half-assignments...
HALF_ASSIGNMENTS = ['-', '+', '/', '*', '%', '||', '&&', '?'];
// Conversions from CoffeeScript operators into JavaScript ones.
CONVERSIONS = {
'and': '&&',
'or': '||',
'is': '==',
'isnt': '!=',
'not': '!'
};
})();