diff --git a/lib/lexer.js b/lib/lexer.js index 2f453939..bdd3d368 100644 --- a/lib/lexer.js +++ b/lib/lexer.js @@ -1,5 +1,5 @@ (function() { - var ASSIGNED, CALLABLE, CODE, COFFEE_ALIASES, COFFEE_KEYWORDS, COMMENT, COMPARE, COMPOUND_ASSIGN, CONVERSIONS, HEREDOC, HEREDOC_INDENT, IDENTIFIER, JS_CLEANER, JS_FORBIDDEN, JS_KEYWORDS, LAST_DENT, LAST_DENTS, LINE_BREAK, LOGIC, Lexer, MATH, MULTILINER, MULTI_DENT, NEXT_CHARACTER, NOT_REGEX, NO_NEWLINE, NUMBER, OPERATOR, REGEX_END, REGEX_ESCAPE, REGEX_INTERPOLATION, REGEX_START, RESERVED, Rewriter, SHIFT, UNARY, WHITESPACE, _ref, compact, count, include, starts; + var ASSIGNED, CALLABLE, CODE, COFFEE_ALIASES, COFFEE_KEYWORDS, COMMENT, COMPARE, COMPOUND_ASSIGN, CONVERSIONS, HEREDOC, HEREDOC_INDENT, IDENTIFIER, JS_FORBIDDEN, JS_KEYWORDS, LINE_BREAK, LOGIC, Lexer, MATH, MULTILINER, MULTI_DENT, NEXT_CHARACTER, NOT_REGEX, NO_NEWLINE, NUMBER, OPERATOR, REGEX_END, REGEX_ESCAPE, REGEX_INTERPOLATION, REGEX_START, RESERVED, Rewriter, SHIFT, UNARY, WHITESPACE, _ref, compact, count, include, starts; var __slice = Array.prototype.slice; _ref = require('./rewriter'); Rewriter = _ref.Rewriter; @@ -12,7 +12,7 @@ Lexer = function() {}; Lexer.prototype.tokenize = function(code, options) { var o; - code = code.replace(/(\r|\s+$)/g, ''); + code = code.replace(/\r/g, '').replace(/\s+$/, ''); o = options || {}; this.code = code; this.i = 0; @@ -22,8 +22,7 @@ this.outdebt = 0; this.indents = []; this.tokens = []; - while (this.i < this.code.length) { - this.chunk = this.code.slice(this.i); + while ((this.chunk = code.slice(this.i))) { this.extractNextToken(); } this.closeIndentation(); @@ -33,56 +32,27 @@ return (new Rewriter()).rewrite(this.tokens); }; Lexer.prototype.extractNextToken = function() { - if (this.identifierToken()) { - return null; - } - if (this.commentToken()) { - return null; - } - if (this.whitespaceToken()) { - return null; - } - if (this.lineToken()) { - return null; - } - if (this.heredocToken()) { - return null; - } - if (this.stringToken()) { - return null; - } - if (this.numberToken()) { - return null; - } - if (this.regexToken()) { - return null; - } - if (this.jsToken()) { - return null; - } - return this.literalToken(); + return this.identifierToken() || this.commentToken() || this.whitespaceToken() || this.lineToken() || this.heredocToken() || this.stringToken() || this.numberToken() || this.regexToken() || this.jsToken() || this.literalToken(); }; Lexer.prototype.identifierToken = function() { var close_index, forcedIdentifier, id, tag; - if (!(id = this.match(IDENTIFIER, 1))) { + if (!(id = this.match(IDENTIFIER))) { return false; } this.i += id.length; forcedIdentifier = this.tagAccessor() || this.match(ASSIGNED, 1); tag = 'IDENTIFIER'; - if (include(JS_KEYWORDS, id) || (!forcedIdentifier && include(COFFEE_KEYWORDS, id))) { + if (include(JS_KEYWORDS, id) || !forcedIdentifier && include(COFFEE_KEYWORDS, id)) { tag = id.toUpperCase(); - } - if (tag === 'WHEN' && include(LINE_BREAK, this.tag())) { - tag = 'LEADING_WHEN'; - } - if (id === 'all' && this.tag() === 'FOR') { + if (tag === 'WHEN' && include(LINE_BREAK, this.tag())) { + tag = 'LEADING_WHEN'; + } + } else if (id === 'all' && this.tag() === 'FOR') { tag = 'ALL'; } if (include(UNARY, tag)) { tag = 'UNARY'; - } - if (include(JS_FORBIDDEN, id)) { + } else if (include(JS_FORBIDDEN, id)) { if (forcedIdentifier) { tag = 'STRING'; id = ("\"" + (id) + "\""); @@ -101,11 +71,10 @@ if (include(COFFEE_ALIASES, id)) { tag = (id = CONVERSIONS[id]); } - if (include(LOGIC, id)) { - tag = 'LOGIC'; - } if (id === '!') { tag = 'UNARY'; + } else if (include(LOGIC, id)) { + tag = 'LOGIC'; } } this.token(tag, id); @@ -116,10 +85,10 @@ }; Lexer.prototype.numberToken = function() { var number; - if (!(number = this.match(NUMBER, 1))) { + if (!(number = this.match(NUMBER))) { return false; } - if (this.tag() === '.' && starts(number, '.')) { + if (this.tag() === '.' && number.charAt(0) === '.') { return false; } this.i += number.length; @@ -127,43 +96,47 @@ return true; }; Lexer.prototype.stringToken = function() { - var string; - if (!(starts(this.chunk, '"') || starts(this.chunk, "'"))) { + var _ref2, string; + if (!(("'" === (_ref2 = this.chunk.charAt(0)) || '"' === _ref2))) { return false; } if (!(string = this.balancedToken(['"', '"'], ['#{', '}']) || this.balancedToken(["'", "'"]))) { return false; } - this.interpolateString(string.replace(/\n/g, '\\\n')); - this.line += count(string, "\n"); + this.interpolateString(string.replace(MULTILINER, '\\\n')); + this.line += count(string, '\n'); this.i += string.length; return true; }; Lexer.prototype.heredocToken = function() { - var doc, match, quote; + var doc, heredoc, match, quote; if (!(match = this.chunk.match(HEREDOC))) { return false; } - quote = match[1].substr(0, 1); - doc = this.sanitizeHeredoc(match[2] || match[4] || '', { + heredoc = match[0]; + quote = heredoc.charAt(0); + doc = this.sanitizeHeredoc(match[2], { quote: quote }); this.interpolateString(quote + doc + quote, { heredoc: true }); - this.line += count(match[1], "\n"); - this.i += match[1].length; + this.line += count(heredoc, '\n'); + this.i += heredoc.length; return true; }; Lexer.prototype.commentToken = function() { - var match; + var _ref2, comment, here, match; if (!(match = this.chunk.match(COMMENT))) { return false; } - this.line += count(match[1], "\n"); - this.i += match[1].length; - if (match[2]) { - this.token('HERECOMMENT', this.sanitizeHeredoc(match[2], { + _ref2 = match; + comment = _ref2[0]; + here = _ref2[1]; + this.line += count(comment, '\n'); + this.i += comment.length; + if (here) { + this.token('HERECOMMENT', this.sanitizeHeredoc(here, { herecomment: true, indent: Array(this.indent + 1).join(' ') })); @@ -173,13 +146,13 @@ }; Lexer.prototype.jsToken = function() { var script; - if (!(starts(this.chunk, '`'))) { + if (this.chunk.charAt(0) !== '`') { return false; } if (!(script = this.balancedToken(['`', '`']))) { return false; } - this.token('JS', script.replace(JS_CLEANER, '')); + this.token('JS', script.slice(1, -1)); this.i += script.length; return true; }; @@ -197,29 +170,25 @@ if (!(regex = this.balancedToken(['/', '/']))) { return false; } - if (!(end = this.chunk.substr(regex.length).match(REGEX_END))) { + if (!(end = this.chunk.slice(regex.length).match(REGEX_END))) { return false; } - if (end[2]) { - regex += (flags = end[2]); - } - if (regex.match(REGEX_INTERPOLATION)) { - str = regex.substring(1).split('/')[0]; - str = str.replace(REGEX_ESCAPE, function(escaped) { - return '\\' + escaped; - }); - this.tokens = this.tokens.concat([['(', '('], ['NEW', 'new'], ['IDENTIFIER', 'RegExp'], ['CALL_START', '(']]); + flags = end[0]; + if (REGEX_INTERPOLATION.test(regex)) { + str = regex.slice(1, -1); + str = str.replace(REGEX_ESCAPE, '\\$&'); + this.tokens.push(['(', '('], ['NEW', 'new'], ['IDENTIFIER', 'RegExp'], ['CALL_START', '(']); this.interpolateString("\"" + (str) + "\"", { escapeQuotes: true }); if (flags) { - this.tokens.splice(this.tokens.length, 0, [',', ','], ['STRING', ("\"" + (flags) + "\"")]); + this.tokens.push([',', ','], ['STRING', ("\"" + (flags) + "\"")]); } - this.tokens.splice(this.tokens.length, 0, [')', ')'], [')', ')']); + this.tokens.push([')', ')'], [')', ')']); } else { - this.token('REGEX', regex); + this.token('REGEX', regex + flags); } - this.i += regex.length; + this.i += regex.length + flags.length; return true; }; Lexer.prototype.balancedToken = function() { @@ -229,13 +198,13 @@ }; Lexer.prototype.lineToken = function() { var diff, indent, nextCharacter, noNewlines, prev, size; - if (!(indent = this.match(MULTI_DENT, 1))) { + if (!(indent = this.match(MULTI_DENT))) { return false; } - this.line += count(indent, "\n"); + this.line += count(indent, '\n'); this.i += indent.length; prev = this.prev(2); - size = indent.match(LAST_DENTS).reverse()[0].match(LAST_DENT)[1].length; + size = indent.length - 1 - indent.lastIndexOf('\n'); nextCharacter = this.match(NEXT_CHARACTER, 1); noNewlines = nextCharacter === '.' || nextCharacter === ',' || this.unfinished(); if (size - this.indebt === this.indent) { @@ -283,13 +252,13 @@ this.outdebt -= moveOut; } if (!(this.tag() === 'TERMINATOR' || noNewlines)) { - this.token('TERMINATOR', "\n"); + this.token('TERMINATOR', '\n'); } return true; }; Lexer.prototype.whitespaceToken = function() { var prev, space; - if (!(space = this.match(WHITESPACE, 1))) { + if (!(space = this.match(WHITESPACE))) { return false; } prev = this.prev(); @@ -301,25 +270,28 @@ }; Lexer.prototype.newlineToken = function(newlines) { if (this.tag() !== 'TERMINATOR') { - this.token('TERMINATOR', "\n"); + this.token('TERMINATOR', '\n'); } return true; }; Lexer.prototype.suppressNewlines = function() { - if (this.value() === "\\") { + if (this.value() === '\\') { this.tokens.pop(); } return true; }; Lexer.prototype.literalToken = function() { var _ref2, match, prev, space, spaced, tag, value; - match = this.chunk.match(OPERATOR); - value = match && match[1]; - space = match && match[2]; - if (value && value.match(CODE)) { - this.tagParameters(); + if (match = this.chunk.match(OPERATOR)) { + _ref2 = match; + value = _ref2[0]; + space = _ref2[1]; + if (CODE.test(value)) { + this.tagParameters(); + } + } else { + value = this.chunk.charAt(0); } - value || (value = this.chunk.substr(0, 1)); this.i += value.length; spaced = (prev = this.prev()) && prev.spaced; tag = value; @@ -354,11 +326,13 @@ tag = 'CALL_START'; } else if (value === '[') { tag = 'INDEX_START'; - if (this.tag() === '?') { - this.tag(1, 'INDEX_SOAK'); - } - if (this.tag() === '::') { - this.tag(1, 'INDEX_PROTO'); + switch (this.tag()) { + case '?': + this.tag(1, 'INDEX_SOAK'); + break; + case '::': + this.tag(1, 'INDEX_PROTO'); + break; } } } @@ -373,7 +347,7 @@ accessor = (function() { if (prev[1] === '::') { return this.tag(1, 'PROTOTYPE_ACCESS'); - } else if (prev[1] === '.' && !(this.value(2) === '.')) { + } else if (prev[1] === '.' && this.value(2) !== '.') { if (this.tag(2) === '?') { this.tag(1, 'SOAK_ACCESS'); return this.tokens.splice(-2, 1); @@ -393,20 +367,19 @@ return doc; } if (!(options.herecomment)) { - while ((match = HEREDOC_INDENT.exec(doc)) !== null) { - attempt = (typeof (_ref2 = match[2]) !== "undefined" && _ref2 !== null) ? match[2] : match[3]; + while ((match = HEREDOC_INDENT.exec(doc))) { + attempt = (typeof (_ref2 = match[1]) !== "undefined" && _ref2 !== null) ? match[1] : match[2]; if (!(typeof indent !== "undefined" && indent !== null) || (0 < attempt.length) && (attempt.length < indent.length)) { indent = attempt; } } } indent || (indent = ''); - doc = doc.replace(new RegExp("^" + indent, 'gm'), ''); + doc = doc.replace(new RegExp('^' + indent, 'gm'), ''); if (options.herecomment) { return doc; } - doc = doc.replace(/^\n/, ''); - return doc.replace(MULTILINER, "\\n").replace(new RegExp(options.quote, 'g'), "\\" + (options.quote)); + return doc.replace(/^\n/, '').replace(MULTILINER, '\\n').replace(new RegExp(options.quote, 'g'), "\\" + (options.quote)); }; Lexer.prototype.tagParameters = function() { var i, tok; @@ -444,13 +417,14 @@ throw new Error("SyntaxError: Reserved word \"" + (this.value()) + "\" on line " + (this.line + 1) + " can't be assigned"); }; Lexer.prototype.balancedString = function(str, delimited, options) { - var _i, _len, _ref2, _ref3, close, i, levels, open, pair, slash; + var _i, _len, _ref2, _ref3, close, i, levels, open, pair, slash, slen; options || (options = {}); slash = delimited[0][0] === '/'; levels = []; i = 0; - while (i < str.length) { - if (levels.length && starts(str, '\\', i)) { + slen = str.length; + while (i < slen) { + if (levels.length && str.charAt(i) === '\\') { i += 1; } else { _ref2 = delimited; @@ -473,7 +447,7 @@ } } } - if (!levels.length || slash && starts(str, '\n', i)) { + if (!levels.length || slash && str.charAt(i) === '\n') { break; } i += 1; @@ -484,28 +458,29 @@ } throw new Error("SyntaxError: Unterminated " + (levels.pop()[0]) + " starting on line " + (this.line + 1)); } - return !i ? false : str.substring(0, i); + return !i ? false : str.slice(0, i); }; Lexer.prototype.interpolateString = function(str, options) { - var _len, _ref2, _ref3, escaped, expr, i, idx, inner, interpolated, lexer, nested, pi, quote, tag, tok, token, tokens, value; + var _len, _ref2, _ref3, end, escaped, expr, i, idx, inner, interpolated, lexer, nested, pi, quote, tag, tok, token, tokens, value; options || (options = {}); - if (str.length < 3 || !starts(str, '"')) { + if (str.length < 3 || str.charAt(0) !== '"') { return this.token('STRING', str); } else { lexer = new Lexer(); tokens = []; - quote = str.substring(0, 1); + quote = str.charAt(0); _ref2 = [1, 1]; i = _ref2[0]; pi = _ref2[1]; - while (i < str.length - 1) { - if (starts(str, '\\', i)) { + end = str.length - 1; + while (i < end) { + if (str.charAt(i) === '\\') { i += 1; - } else if (expr = this.balancedString(str.substring(i), [['#{', '}']])) { + } else if (expr = this.balancedString(str.slice(i), [['#{', '}']])) { if (pi < i) { - tokens.push(['STRING', quote + str.substring(pi, i) + quote]); + tokens.push(['STRING', quote + str.slice(pi, i) + quote]); } - inner = expr.substring(2, expr.length - 1); + inner = expr.slice(2, -1); if (inner.length) { if (options.heredoc) { inner = inner.replace(new RegExp('\\\\' + quote, 'g'), quote); @@ -530,8 +505,8 @@ } i += 1; } - if (pi < i && pi < str.length - 1) { - tokens.push(['STRING', quote + str.substring(pi, i) + quote]); + if ((i > pi) && (pi < str.length - 1)) { + tokens.push(['STRING', quote + str.slice(pi, i) + quote]); } if (tokens[0][0] !== 'STRING') { tokens.unshift(['STRING', '""']); @@ -549,7 +524,7 @@ if (tag === 'TOKENS') { this.tokens = this.tokens.concat(value); } else if (tag === 'STRING' && options.escapeQuotes) { - escaped = value.substring(1, value.length - 1).replace(/"/g, '\\"'); + escaped = value.slice(1, -1).replace(/"/g, '\\"'); this.token(tag, "\"" + (escaped) + "\""); } else { this.token(tag, value); @@ -592,42 +567,37 @@ }; Lexer.prototype.match = function(regex, index) { var m; - if (!(m = this.chunk.match(regex))) { - return false; - } - return m ? m[index] : false; + return (m = this.chunk.match(regex)) ? m[index || 0] : false; }; Lexer.prototype.unfinished = function() { - var prev; + var prev, value; prev = this.prev(2); - return this.value() && this.value().match && this.value().match(NO_NEWLINE) && prev && (prev[0] !== '.') && !this.value().match(CODE) && !this.chunk.match(ASSIGNED); + value = this.value(); + return value && NO_NEWLINE.test(value) && prev && prev[0] !== '.' && !CODE.test(value) && !ASSIGNED.test(this.chunk); }; return Lexer; })(); - JS_KEYWORDS = ["if", "else", "true", "false", "new", "return", "try", "catch", "finally", "throw", "break", "continue", "for", "in", "while", "delete", "instanceof", "typeof", "switch", "super", "extends", "class", "this", "null", "debugger"]; - COFFEE_ALIASES = ["and", "or", "is", "isnt", "not"]; - COFFEE_KEYWORDS = COFFEE_ALIASES.concat(["then", "unless", "until", "loop", "yes", "no", "on", "off", "of", "by", "where", "when"]); - RESERVED = ["case", "default", "do", "function", "var", "void", "with", "const", "let", "enum", "export", "import", "native", "__hasProp", "__extends", "__slice"]; + JS_KEYWORDS = ['if', 'else', 'true', 'false', 'new', 'return', 'try', 'catch', 'finally', 'throw', 'break', 'continue', 'for', 'in', 'while', 'delete', 'instanceof', 'typeof', 'switch', 'super', 'extends', 'class', 'this', 'null', 'debugger']; + COFFEE_ALIASES = ['and', 'or', 'is', 'isnt', 'not']; + COFFEE_KEYWORDS = COFFEE_ALIASES.concat(['then', 'unless', 'until', 'loop', 'yes', 'no', 'on', 'off', 'of', 'by', 'where', 'when']); + RESERVED = ['case', 'default', 'do', 'function', 'var', 'void', 'with', 'const', 'let', 'enum', 'export', 'import', 'native', '__hasProp', '__extends', '__slice']; JS_FORBIDDEN = JS_KEYWORDS.concat(RESERVED); - IDENTIFIER = /^([a-zA-Z\$_](\w|\$)*)/; - NUMBER = /^(((\b0(x|X)[0-9a-fA-F]+)|((\b[0-9]+(\.[0-9]+)?|\.[0-9]+)(e[+\-]?[0-9]+)?)))\b/i; - HEREDOC = /^("{6}|'{6}|"{3}([\s\S]*?)\n?([ \t]*)"{3}|'{3}([\s\S]*?)\n?([ \t]*)'{3})/; - OPERATOR = /^(-[\-=>]?|\+[+=]?|[*&|\/%=<>^:!?]+)([ \t]*)/; - WHITESPACE = /^([ \t]+)/; - COMMENT = /^(###([^#][\s\S]*?)(###[ \t]*\n|(###)?$)|(\s*#(?!##[^#])[^\n]*)+)/; - CODE = /^((-|=)>)/; - MULTI_DENT = /^((\n([ \t]*))+)(\.)?/; - LAST_DENTS = /\n([ \t]*)/g; - LAST_DENT = /\n([ \t]*)/; + IDENTIFIER = /^[a-zA-Z_$][\w$]*/; + NUMBER = /^(?:0x[\da-f]+)|^(?:\d+(\.\d+)?|\.\d+)(?:e[+-]?\d+)?/i; + HEREDOC = /^("""|''')([\s\S]*?)\n?[ \t]*\1/; + OPERATOR = /^(?:-[-=>]?|\+[+=]?|[*&|\/%=<>^:!?]+)(?=([ \t]*))/; + WHITESPACE = /^[ \t]+/; + COMMENT = /^###([^#][\s\S]*?)(?:###[ \t]*\n|(?:###)?$)|^(?:\s*#(?!##[^#])[^\n]*)+/; + CODE = /^[-=]>/; + MULTI_DENT = /^(?:\n[ \t]*)+/; REGEX_START = /^\/([^\/])/; - REGEX_INTERPOLATION = /([^\\]#\{.*[^\\]\})/; - REGEX_END = /^(([imgy]{1,4})\b|\W|$)/; - REGEX_ESCAPE = /\\[^\$]/g; - JS_CLEANER = /(^`|`$)/g; + REGEX_INTERPOLATION = /[^\\]#\{.*[^\\]\}/; + REGEX_END = /^[imgy]{0,4}(?![a-zA-Z])/; + REGEX_ESCAPE = /\\[^#]/g; MULTILINER = /\n/g; - NO_NEWLINE = /^([+\*&|\/\-%=<>!.\\][<>=&|]*|and|or|is|isnt|not|delete|typeof|instanceof)$/; - HEREDOC_INDENT = /(\n+([ \t]*)|^([ \t]+))/g; - ASSIGNED = /^\s*(([a-zA-Z\$_@]\w*|["'][^\r\n]+?["']|\d+)[ \t]*?[:=][^:=])/; + NO_NEWLINE = /^(?:[-+*&|\/%=<>!.\\][<>=&|]*|and|or|is(?:nt)?|not|delete|typeof|instanceof)$/; + HEREDOC_INDENT = /\n+([ \t]*)|^([ \t]+)/g; + ASSIGNED = /^\s*((?:[a-zA-Z$_@]\w*|["'][^\n]+?["']|\d+)[ \t]*?[:=][^:=])/; NEXT_CHARACTER = /^\s*(\S)/; COMPOUND_ASSIGN = ['-=', '+=', '/=', '*=', '%=', '||=', '&&=', '?=', '<<=', '>>=', '>>>=', '&=', '^=', '|=']; UNARY = ['UMINUS', 'UPLUS', '!', '!!', '~', 'TYPEOF', 'DELETE']; diff --git a/src/lexer.coffee b/src/lexer.coffee index af013438..a265e15e 100644 --- a/src/lexer.coffee +++ b/src/lexer.coffee @@ -43,8 +43,7 @@ exports.Lexer = class Lexer @outdebt = 0 # The under-outdentation at the current level. @indents = [] # The stack of all current indentation levels. @tokens = [] # Stream of parsed tokens in the form ['TYPE', value, line] - while @i < @code.length - @chunk = @code[@i..] + while (@chunk = code[@i..]) @extractNextToken() @closeIndentation() return @tokens if o.rewrite is off @@ -54,16 +53,16 @@ exports.Lexer = class Lexer # short-circuiting if any of them succeed. Their order determines precedence: # `@literalToken` is the fallback catch-all. extractNextToken: -> - return if @identifierToken() - return if @commentToken() - return if @whitespaceToken() - return if @lineToken() - return if @heredocToken() - return if @stringToken() - return if @numberToken() - return if @regexToken() - return if @jsToken() - return @literalToken() + @identifierToken() or + @commentToken() or + @whitespaceToken() or + @lineToken() or + @heredocToken() or + @stringToken() or + @numberToken() or + @regexToken() or + @jsToken() or + @literalToken() # Tokenizers # ---------- @@ -79,11 +78,15 @@ exports.Lexer = class Lexer @i += id.length forcedIdentifier = @tagAccessor() or @match ASSIGNED, 1 tag = 'IDENTIFIER' - tag = id.toUpperCase() if include(JS_KEYWORDS, id) or (not forcedIdentifier and include(COFFEE_KEYWORDS, id)) - tag = 'LEADING_WHEN' if tag is 'WHEN' and include LINE_BREAK, @tag() - tag = 'ALL' if id is 'all' and @tag() is 'FOR' - tag = 'UNARY' if include UNARY, tag - if include(JS_FORBIDDEN, id) + if include(JS_KEYWORDS, id) or + not forcedIdentifier and include(COFFEE_KEYWORDS, id) + tag = id.toUpperCase() + tag = 'LEADING_WHEN' if tag is 'WHEN' and include LINE_BREAK, @tag() + else if id is 'all' and @tag() is 'FOR' + tag = 'ALL' + if include UNARY, tag + tag = 'UNARY' + else if include JS_FORBIDDEN, id if forcedIdentifier tag = 'STRING' id = "\"#{id}\"" @@ -95,8 +98,10 @@ exports.Lexer = class Lexer @identifierError id unless forcedIdentifier tag = id = CONVERSIONS[id] if include COFFEE_ALIASES, id - tag = 'LOGIC' if include LOGIC, id - tag = 'UNARY' if id is '!' + if id is '!' + tag = 'UNARY' + else if include LOGIC, id + tag = 'LOGIC' @token tag, id @token ']', ']' if close_index true @@ -177,7 +182,7 @@ exports.Lexer = class Lexer @i += regex.length + flags.length true - # Matches a token in which which the passed delimiter pairs must be correctly + # Matches a token in which the passed delimiter pairs must be correctly # balanced (ie. strings, JS literals). balancedToken: (delimited...) -> @balancedString @chunk, delimited @@ -292,8 +297,9 @@ exports.Lexer = class Lexer tag = 'CALL_START' else if value is '[' tag = 'INDEX_START' - @tag 1, 'INDEX_SOAK' if @tag() is '?' - @tag 1, 'INDEX_PROTO' if @tag() is '::' + switch @tag() + when '?' then @tag 1, 'INDEX_SOAK' + when '::' then @tag 1, 'INDEX_PROTO' @token tag, value true @@ -307,7 +313,7 @@ exports.Lexer = class Lexer return false if (not prev = @prev()) or (prev and prev.spaced) accessor = if prev[1] is '::' @tag 1, 'PROTOTYPE_ACCESS' - else if prev[1] is '.' and not (@value(2) is '.') + else if prev[1] is '.' and @value(2) isnt '.' if @tag(2) is '?' @tag(1, 'SOAK_ACCESS') @tokens.splice(-2, 1)