implementing string interpolation using string interpolation

This commit is contained in:
Jeremy Ashkenas 2010-03-06 13:32:43 -05:00
parent 18e5f72a84
commit 15b00cb3ca
2 changed files with 34 additions and 28 deletions

View File

@ -184,7 +184,7 @@
return false;
}
doc = this.sanitize_heredoc(match[2] || match[4]);
this.token('STRING', '"' + doc + '"');
this.token('STRING', "\"" + doc + "\"");
this.line += count(match[1], "\n");
this.i += match[1].length;
return true;
@ -394,19 +394,19 @@
// Error for when you try to use a forbidden word in JavaScript as
// an identifier.
Lexer.prototype.identifier_error = function identifier_error(word) {
throw new Error('SyntaxError: Reserved word "' + word + '" on line ' + this.line);
throw new Error("SyntaxError: Reserved word \"" + word + "\" on line " + this.line);
};
// Error for when you try to assign to a reserved word in JavaScript,
// like "function" or "default".
Lexer.prototype.assignment_error = function assignment_error() {
throw new Error('SyntaxError: Reserved word "' + this.value() + '" on line ' + this.line + ' can\'t be assigned');
throw new Error("SyntaxError: Reserved word \"" + (this.value()) + "\" on line " + this.line + " can't be assigned");
};
// Expand variables and expressions inside double-quoted strings using
// [ECMA Harmony's interpolation syntax](http://wiki.ecmascript.org/doku.php?id=strawman:string_interpolation).
// "Hello $name."
// "Hello ${name.capitalize()}."
Lexer.prototype.interpolate_string = function interpolate_string(str) {
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, before, contents, each, group, i, interp, lexer, match, nested, prev, quote, tok, tokens;
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, before, each, group, i, inner, interp, lexer, match, nested, prev, quote, tok, tokens;
if (str.length < 3 || str.substring(0, 1) !== '"') {
return this.token('STRING', str);
} else {
@ -422,22 +422,24 @@
before = _a[1];
interp = _a[2];
if (before.substring(before.length - 1) === '\\') {
prev = before.substring(0, before.length - 1);
if (before.length) {
tokens.push(['STRING', quote + before.substring(0, before.length - 1) + '$' + interp + quote]);
tokens.push(['STRING', quote + prev + "$" + interp + quote]);
}
} else {
if (before.length) {
tokens.push(['STRING', quote + before + quote]);
}
if (interp.substring(0, 1) === '{') {
nested = lexer.tokenize('(' + interp.substring(1, interp.length - 1) + ')', {
inner = interp.substring(1, interp.length - 1);
nested = lexer.tokenize("(" + inner + ")", {
rewrite: false
});
nested.pop();
tokens.push(['TOKENS', nested]);
} else {
if (interp.substring(0, 1) === '@') {
interp = 'this.' + interp.substring(1);
interp = "this." + (interp.substring(1));
}
tokens.push(['IDENTIFIER', interp]);
}
@ -455,19 +457,21 @@
prev = _f[0];
tok = _f[1];
if (tok[0] === 'STRING' && prev[0] === 'STRING') {
contents = quote + prev[1].substring(1, prev[1].length - 1) + tok[1].substring(1, tok[1].length - 1) + quote;
tokens.splice(i - 1, 2, ['STRING', contents]);
_g = [prev[1].substring(1, prev[1].length - 1), tok[1].substring(1, tok[1].length - 1)];
prev = _g[0];
tok = _g[1];
tokens.splice(i - 1, 2, ['STRING', quote + prev + tok + quote]);
}
}
}
_g = []; _h = tokens;
for (i = 0, _i = _h.length; i < _i; i++) {
each = _h[i];
_g.push((function() {
_h = []; _i = tokens;
for (i = 0, _j = _i.length; i < _j; i++) {
each = _i[i];
_h.push((function() {
if (each[0] === 'TOKENS') {
_j = each[1];
for (_k = 0, _l = _j.length; _k < _l; _k++) {
nested = _j[_k];
_k = each[1];
for (_l = 0, _m = _k.length; _l < _m; _l++) {
nested = _k[_l];
this.token(nested[0], nested[1]);
}
} else {
@ -478,7 +482,7 @@
}
}).call(this));
}
return _g;
return _h;
}
};
// Helpers

View File

@ -177,7 +177,7 @@ exports.Lexer: class Lexer
heredoc_token: ->
return false unless match = @chunk.match(HEREDOC)
doc: @sanitize_heredoc match[2] or match[4]
@token 'STRING', '"' + doc + '"'
@token 'STRING', "\"$doc\""
@line += count match[1], "\n"
@i += match[1].length
true
@ -307,7 +307,7 @@ exports.Lexer: class Lexer
# indentation on the left-hand side.
sanitize_heredoc: (doc) ->
indent: (doc.match(HEREDOC_INDENT) or ['']).sort()[0]
doc.replace(new RegExp("^" + indent, 'gm'), '')
doc.replace(new RegExp("^" +indent, 'gm'), '')
.replace(MULTILINER, "\\n")
.replace(/"/g, '\\"')
@ -335,12 +335,12 @@ exports.Lexer: class Lexer
# Error for when you try to use a forbidden word in JavaScript as
# an identifier.
identifier_error: (word) ->
throw new Error 'SyntaxError: Reserved word "' + word + '" on line ' + @line
throw new Error "SyntaxError: Reserved word \"$word\" on line $@line"
# Error for when you try to assign to a reserved word in JavaScript,
# like "function" or "default".
assignment_error: ->
throw new Error 'SyntaxError: Reserved word "' + @value() + '" on line ' + @line + ' can\'t be assigned'
throw new Error "SyntaxError: Reserved word \"${@value()}\" on line $@line can't be assigned"
# Expand variables and expressions inside double-quoted strings using
# [ECMA Harmony's interpolation syntax](http://wiki.ecmascript.org/doku.php?id=strawman:string_interpolation).
@ -361,26 +361,28 @@ exports.Lexer: class Lexer
if match
[group, before, interp]: match
if before.substring(before.length - 1) is '\\'
tokens.push ['STRING', quote + before.substring(0, before.length - 1) + '$' + interp + quote] if before.length
prev: before.substring(0, before.length - 1)
tokens.push ['STRING', "$quote$prev$$interp$quote"] if before.length
else
tokens.push ['STRING', quote + before + quote] if before.length
tokens.push ['STRING', "$quote$before$quote"] if before.length
if interp.substring(0, 1) is '{'
nested: lexer.tokenize '(' + interp.substring(1, interp.length - 1) + ')', {rewrite: no}
inner: interp.substring(1, interp.length - 1)
nested: lexer.tokenize "($inner)", {rewrite: no}
nested.pop()
tokens.push ['TOKENS', nested]
else
interp: 'this.' + interp.substring(1) if interp.substring(0, 1) is '@'
interp: "this.${ interp.substring(1) }" if interp.substring(0, 1) is '@'
tokens.push ['IDENTIFIER', interp]
str: str.substring(group.length)
else
tokens.push ['STRING', quote + str + quote]
tokens.push ['STRING', "$quote$str$quote"]
str: ''
if tokens.length > 1
for i in [tokens.length - 1..1]
[prev, tok]: [tokens[i - 1], tokens[i]]
if tok[0] is 'STRING' and prev[0] is 'STRING'
contents: quote + prev[1].substring(1, prev[1].length - 1) + tok[1].substring(1, tok[1].length - 1) + quote
tokens.splice i - 1, 2, ['STRING', contents]
[prev, tok]: [prev[1].substring(1, prev[1].length - 1), tok[1].substring(1, tok[1].length - 1)]
tokens.splice i - 1, 2, ['STRING', "$quote$prev$tok$quote"]
for each, i in tokens
if each[0] is 'TOKENS'
@token nested[0], nested[1] for nested in each[1]