1
0
Fork 0
mirror of https://github.com/jashkenas/coffeescript.git synced 2022-11-09 12:23:24 -05:00

removing the 'supress' option from the lexer, just look at the slash.

This commit is contained in:
Jeremy Ashkenas 2010-03-08 20:07:19 -05:00
parent 121f01c06f
commit 3291bd2a4a
4 changed files with 26 additions and 30 deletions

View file

@ -29,7 +29,7 @@ task 'build', 'build the CoffeeScript language from source', ->
task 'build:full', 'checkout /lib, rebuild the source twice, and run the tests', ->
exec 'git co lib && bin/cake build && bin/cake build && bin/cake test', (err, stdout, stderr) ->
exec 'git checkout lib && bin/cake build && bin/cake build && bin/cake test', (err, stdout, stderr) ->
print stdout if stdout
print stderr if stderr
throw err if err

View file

@ -150,7 +150,7 @@
</dict>
</dict>
<key>end</key>
<string>(/)[igm]*</string>
<string>(/)[igmy]*</string>
<key>endCaptures</key>
<dict>
<key>1</key>

View file

@ -129,15 +129,11 @@
// Matches strings, including multi-line strings. Ensures that quotation marks
// are balanced within the string's contents, and within nested interpolations.
Lexer.prototype.string_token = function string_token() {
var merge, string, supress;
var merge, string;
if (!(starts(this.chunk, '"') || starts(this.chunk, "'"))) {
return false;
}
string = this.balanced_token((supress = false), ['"', '"'], ['${', '}']);
if (!(string)) {
string = this.balanced_token((supress = false), ["'", "'"]);
}
if (!(string)) {
if (!((string = this.balanced_token(['"', '"'], ['${', '}']) || this.balanced_token(["'", "'"])))) {
return false;
}
this.interpolate_string(string.replace(STRING_NEWLINES, " \\\n"), (merge = true));
@ -160,11 +156,11 @@
};
// Matches JavaScript interpolated directly into the source via backticks.
Lexer.prototype.js_token = function js_token() {
var script, supress;
var script;
if (!(starts(this.chunk, '`'))) {
return false;
}
if (!((script = this.balanced_token((supress = false), ['`', '`'])))) {
if (!((script = this.balanced_token(['`', '`'])))) {
return false;
}
this.token('JS', script.replace(JS_CLEANER, ''));
@ -175,11 +171,11 @@
// to distinguish from division, so we borrow some basic heuristics from
// JavaScript and Ruby.
Lexer.prototype.regex_token = function regex_token() {
var _a, _b, _c, _d, _e, each, flags, i, index, interp_tokens, merge, regex, str, supress;
var _a, _b, _c, _d, _e, each, flags, i, index, interp_tokens, merge, regex, str;
if (!(starts(this.chunk, '/'))) {
return false;
}
if (!((regex = this.balanced_token((supress = true), ['/', '/'])))) {
if (!((regex = this.balanced_token(['/', '/'])))) {
return false;
}
if (regex.length < 3 || regex.match(/^\/\s+|\n/)) {
@ -225,10 +221,10 @@
};
// Matches a token in which which the passed delimiter pairs must be correctly
// balanced (ie. strings, JS literals).
Lexer.prototype.balanced_token = function balanced_token(supress) {
Lexer.prototype.balanced_token = function balanced_token() {
var delimited;
delimited = Array.prototype.slice.call(arguments, 1);
return this.balanced_string.apply(this, [this.chunk].concat([supress]).concat(delimited));
delimited = Array.prototype.slice.call(arguments, 0);
return this.balanced_string.apply(this, [this.chunk].concat(delimited));
};
// Matches and conumes comments. We pass through comments into JavaScript,
// so they're treated as real tokens, like any other part of the language.
@ -432,9 +428,9 @@
// a series of delimiters, all of which must be nested correctly within the
// contents of the string. This method allows us to have strings within
// interpolations within strings etc...
Lexer.prototype.balanced_string = function balanced_string(str, supress) {
Lexer.prototype.balanced_string = function balanced_string(str) {
var _a, _b, _c, _d, close, delimited, i, levels, open, pair;
delimited = Array.prototype.slice.call(arguments, 2);
delimited = Array.prototype.slice.call(arguments, 1);
levels = [];
i = 0;
while (i < str.length) {
@ -466,7 +462,7 @@
i += 1;
}
if (levels.length) {
if (!(supress)) {
if (!(delimited[0][0] === '/')) {
throw new Error("SyntaxError: Unterminated " + (levels.pop()[0]) + " starting on line " + (this.line + 1));
}
return false;
@ -485,7 +481,7 @@
// new Lexer, tokenize the interpolated contents, and merge them into the
// token stream.
Lexer.prototype.interpolate_string = function interpolate_string(str, merge) {
var _a, _b, _c, _d, _e, _f, _g, each, expr, group, has_string, i, inner, interp, lexer, match, nested, pi, quote, supress, tokens;
var _a, _b, _c, _d, _e, _f, _g, each, expr, group, has_string, i, inner, interp, lexer, match, nested, pi, quote, tokens;
if (str.length < 3 || !starts(str, '"')) {
return this.token('STRING', str);
} else {
@ -511,7 +507,7 @@
tokens.push(['IDENTIFIER', interp]);
i += group.length - 1;
pi = i + 1;
} else if (((expr = this.balanced_string(str.substring(i), (supress = false), ['${', '}'])))) {
} else if (((expr = this.balanced_string(str.substring(i), ['${', '}'])))) {
if (pi < i) {
tokens.push(['STRING', '' + quote + (str.substring(pi, i)) + quote]);
}

View file

@ -96,9 +96,9 @@ exports.Lexer: class Lexer
# are balanced within the string's contents, and within nested interpolations.
string_token: ->
return false unless starts(@chunk, '"') or starts(@chunk, "'")
string: @balanced_token supress: false, ['"', '"'], ['${', '}']
string: @balanced_token supress: false, ["'", "'"] unless string
return false unless string
return false unless string:
@balanced_token(['"', '"'], ['${', '}']) or
@balanced_token ["'", "'"]
@interpolate_string string.replace(STRING_NEWLINES, " \\\n"), merge: true
@line += count string, "\n"
@i += string.length
@ -117,7 +117,7 @@ exports.Lexer: class Lexer
# Matches JavaScript interpolated directly into the source via backticks.
js_token: ->
return false unless starts @chunk, '`'
return false unless script: @balanced_token supress: false, ['`', '`']
return false unless script: @balanced_token ['`', '`']
@token 'JS', script.replace(JS_CLEANER, '')
@i += script.length
true
@ -127,7 +127,7 @@ exports.Lexer: class Lexer
# JavaScript and Ruby.
regex_token: ->
return false unless starts @chunk, '/'
return false unless regex: @balanced_token supress: true, ['/', '/']
return false unless regex: @balanced_token ['/', '/']
return false if regex.length < 3 or regex.match /^\/\s+|\n/
return false if include NOT_REGEX, @tag()
flags: ['i', 'm', 'g', 'y']
@ -153,8 +153,8 @@ exports.Lexer: class Lexer
# Matches a token in which which the passed delimiter pairs must be correctly
# balanced (ie. strings, JS literals).
balanced_token: (supress, delimited...) ->
@balanced_string @chunk, supress, delimited...
balanced_token: (delimited...) ->
@balanced_string @chunk, delimited...
# Matches and conumes comments. We pass through comments into JavaScript,
# so they're treated as real tokens, like any other part of the language.
@ -316,7 +316,7 @@ exports.Lexer: class Lexer
# a series of delimiters, all of which must be nested correctly within the
# contents of the string. This method allows us to have strings within
# interpolations within strings etc...
balanced_string: (str, supress, delimited...) ->
balanced_string: (str, delimited...) ->
levels: []
i: 0
while i < str.length
@ -337,7 +337,7 @@ exports.Lexer: class Lexer
break unless levels.length
i += 1
if levels.length
throw new Error "SyntaxError: Unterminated ${levels.pop()[0]} starting on line ${@line + 1}" unless supress
throw new Error "SyntaxError: Unterminated ${levels.pop()[0]} starting on line ${@line + 1}" unless delimited[0][0] is '/'
return false
return false if i is 0
return str.substring(0, i)
@ -370,7 +370,7 @@ exports.Lexer: class Lexer
tokens.push ['IDENTIFIER', interp]
i += group.length - 1
pi: i + 1
else if (expr: @balanced_string str.substring(i), supress: false, ['${', '}'])
else if (expr: @balanced_string str.substring(i), ['${', '}'])
tokens.push ['STRING', "$quote${ str.substring(pi, i) }$quote"] if pi < i
inner: expr.substring(2, expr.length - 1)
if inner.length