1
0
Fork 0
mirror of https://github.com/jashkenas/coffeescript.git synced 2022-11-09 12:23:24 -05:00

Put location data in token[2] instead of in token.locationData

This commit is contained in:
Jason Walton 2013-01-14 15:20:35 -05:00
parent a1ba0a89f8
commit bbbf612f29
10 changed files with 78 additions and 66 deletions

View file

@ -153,9 +153,13 @@
parser.lexer = {
lex: function() {
var tag, token;
token = this.tokens[this.pos++] || [''];
tag = token[0], this.yytext = token[1], this.yylineno = token[2];
this.yylloc = token.locationData;
token = this.tokens[this.pos++];
if (token) {
tag = token[0], this.yytext = token[1], this.yylloc = token[2];
this.yylineno = this.yylloc.first_line;
} else {
tag = '';
}
return tag;
},
setInput: function(tokens) {

View file

@ -447,14 +447,16 @@
};
printTokens = function(tokens) {
var strings, tag, token, value;
var locationData, strings, tag, token, value;
strings = (function() {
var _i, _len, _ref1, _results;
var _i, _len, _results;
_results = [];
for (_i = 0, _len = tokens.length; _i < _len; _i++) {
token = tokens[_i];
_ref1 = [token[0], token[1].toString().replace(/\n/, '\\n')], tag = _ref1[0], value = _ref1[1];
_results.push("[" + tag + " " + value + "]");
tag = token[0];
value = token[1].toString().replace(/\n/, '\\n');
locationData = helpers.locationDataToString(token[2]);
_results.push("[" + tag + " " + value + " " + locationData + "]");
}
return _results;
})();

View file

@ -109,8 +109,8 @@
exports.locationDataToString = function(obj) {
var locationData;
if ("locationData" in obj) {
locationData = obj.locationData;
if (("2" in obj) && ("first_line" in obj[2])) {
locationData = obj[2];
} else if ("first_line" in obj) {
locationData = obj;
}

View file

@ -144,7 +144,7 @@
}
tagToken = this.token(tag, id, 0, idLength);
if (poppedToken) {
_ref4 = [poppedToken.locationData.first_line, poppedToken.locationData.first_column], tagToken.locationData.first_line = _ref4[0], tagToken.locationData.first_column = _ref4[1];
_ref4 = [poppedToken[2].first_line, poppedToken[2].first_column], tagToken[2].first_line = _ref4[0], tagToken[2].first_column = _ref4[1];
}
if (colon) {
colonOffset = input.lastIndexOf(':');
@ -320,7 +320,7 @@
}
prev = last(this.tokens);
plusToken = ['+', '+'];
plusToken.locationData = prev.locationData;
plusToken[2] = prev[2];
tokens.push(plusToken);
}
tokens.pop();
@ -673,11 +673,11 @@
plusToken = this.token('+', '+');
}
locationToken = tag === 'TOKENS' ? value[0] : token;
plusToken.locationData = {
first_line: locationToken.locationData.first_line,
first_column: locationToken.locationData.first_column,
last_line: locationToken.locationData.first_line,
last_column: locationToken.locationData.first_column
plusToken[2] = {
first_line: locationToken[2].first_line,
first_column: locationToken[2].first_column,
last_line: locationToken[2].first_line,
last_column: locationToken[2].first_column
};
}
if (tag === 'TOKENS') {
@ -739,8 +739,7 @@
locationData = {};
_ref2 = this.getLineAndColumnFromChunk(offsetInChunk), locationData.first_line = _ref2[0], locationData.first_column = _ref2[1];
_ref3 = this.getLineAndColumnFromChunk(offsetInChunk + length), locationData.last_line = _ref3[0], locationData.last_column = _ref3[1];
token = [tag, value, locationData.first_line];
token.locationData = locationData;
token = [tag, value, locationData];
return token;
};

View file

@ -134,7 +134,7 @@
};
action = function(token, i) {
var tok;
tok = this.generate('}', '}', token[2]);
tok = this.generate('}', '}');
return this.tokens.splice(i, 0, tok);
};
return this.scanTokens(function(token, i, tokens) {
@ -161,7 +161,7 @@
startsLine = !prevTag || (__indexOf.call(LINEBREAKS, prevTag) >= 0);
value = new String('{');
value.generated = true;
tok = this.generate('{', value, token[2]);
tok = this.generate('{', value);
tokens.splice(idx, 0, tok);
this.detectEnd(i + 2, condition, action);
return 2;
@ -189,7 +189,7 @@
return !token.generated && this.tag(i - 1) !== ',' && (__indexOf.call(IMPLICIT_END, tag) >= 0 || (tag === 'INDENT' && !seenControl)) && (tag !== 'INDENT' || (((_ref = this.tag(i - 2)) !== 'CLASS' && _ref !== 'EXTENDS') && (_ref1 = this.tag(i - 1), __indexOf.call(IMPLICIT_BLOCK, _ref1) < 0) && !((post = this.tokens[i + 1]) && post.generated && post[0] === '{')));
};
action = function(token, i) {
return this.tokens.splice(i, 0, this.generate('CALL_END', ')', token[2]));
return this.tokens.splice(i, 0, this.generate('CALL_END', ')'));
};
return this.scanTokens(function(token, i, tokens) {
var callObject, current, next, prev, tag, _ref, _ref1, _ref2;
@ -213,7 +213,7 @@
if (!(callObject || (prev != null ? prev.spaced : void 0) && (prev.call || (_ref2 = prev[0], __indexOf.call(IMPLICIT_FUNC, _ref2) >= 0)) && (__indexOf.call(IMPLICIT_CALL, tag) >= 0 || !(token.spaced || token.newLine) && __indexOf.call(IMPLICIT_UNSPACED_CALL, tag) >= 0))) {
return 1;
}
tokens.splice(i, 0, this.generate('CALL_START', '(', token[2]));
tokens.splice(i, 0, this.generate('CALL_START', '('));
this.detectEnd(i + 1, condition, action);
if (prev[0] === '?') {
prev[0] = 'FUNC_EXIST';
@ -226,17 +226,17 @@
return this.scanTokens(function(token, i, tokens) {
var prevToken, tag;
tag = token[0];
if ((token.generated || token.explicit) && !token.locationData) {
if ((token.generated || token.explicit) && (!token[2])) {
if (i > 0) {
prevToken = tokens[i - 1];
token.locationData = {
first_line: prevToken.locationData.last_line,
first_column: prevToken.locationData.last_column,
last_line: prevToken.locationData.last_line,
last_column: prevToken.locationData.last_column
token[2] = {
first_line: prevToken[2].last_line,
first_column: prevToken[2].last_column,
last_line: prevToken[2].last_line,
last_column: prevToken[2].last_column
};
} else {
token.locationData = {
token[2] = {
first_line: 0,
first_column: 0,
last_line: 0,
@ -317,8 +317,8 @@
if (implicit == null) {
implicit = false;
}
indent = ['INDENT', 2, token[2]];
outdent = ['OUTDENT', 2, token[2]];
indent = ['INDENT', 2];
outdent = ['OUTDENT', 2];
if (implicit) {
indent.generated = outdent.generated = true;
}
@ -328,9 +328,9 @@
return [indent, outdent];
};
Rewriter.prototype.generate = function(tag, value, line) {
Rewriter.prototype.generate = function(tag, value) {
var tok;
tok = [tag, value, line];
tok = [tag, value];
tok.generated = true;
return tok;
};

View file

@ -122,9 +122,13 @@ lexer = new Lexer
# directly as a "Jison lexer".
parser.lexer =
lex: ->
token = @tokens[@pos++] or ['']
[tag, @yytext, @yylineno] = token
@yylloc = token.locationData
token = @tokens[@pos++]
if token
[tag, @yytext, @yylloc] = token
@yylineno = @yylloc.first_line
else
tag = ''
tag
setInput: (@tokens) ->
@pos = 0

View file

@ -301,8 +301,10 @@ lint = (file, js) ->
# Pretty-print a stream of tokens.
printTokens = (tokens) ->
strings = for token in tokens
[tag, value] = [token[0], token[1].toString().replace(/\n/, '\\n')]
"[#{tag} #{value}]"
tag = token[0]
value = token[1].toString().replace(/\n/, '\\n')
locationData = helpers.locationDataToString token[2]
"[#{tag} #{value} #{locationData}]"
printLine strings.join(' ')
# Use the [OptionParser module](optparse.html) to extract all options from

View file

@ -83,7 +83,7 @@ exports.addLocationDataFn = (first, last) ->
# Convert jison location data to a string.
# `obj` can be a token, or a locationData.
exports.locationDataToString = (obj) ->
if "locationData" of obj then locationData = obj.locationData
if ("2" of obj) and ("first_line" of obj[2]) then locationData = obj[2]
else if "first_line" of obj then locationData = obj
if locationData

View file

@ -3,9 +3,11 @@
# a token is produced, we consume the match, and start again. Tokens are in the
# form:
#
# [tag, value, lineNumber]
# [tag, value, locationData]
#
# Which is a format that can be fed directly into [Jison](http://github.com/zaach/jison).
# where locationData is {first_line, first_column, last_line, last_column}, which is a
# format that can be fed directly into [Jison](http://github.com/zaach/jison). These
# are read by jison in the `parser.lexer` function defined in coffee-script.coffee.
{Rewriter, INVERSES} = require './rewriter'
@ -154,8 +156,8 @@ exports.Lexer = class Lexer
tagToken = @token tag, id, 0, idLength
if poppedToken
[tagToken.locationData.first_line, tagToken.locationData.first_column] =
[poppedToken.locationData.first_line, poppedToken.locationData.first_column]
[tagToken[2].first_line, tagToken[2].first_column] =
[poppedToken[2].first_line, poppedToken[2].first_column]
if colon
colonOffset = input.lastIndexOf ':'
@token ':', ':', colonOffset, colon.length
@ -278,7 +280,7 @@ exports.Lexer = class Lexer
prev = last @tokens
plusToken = ['+', '+']
plusToken.locationData = prev.locationData
plusToken[2] = prev[2] # Copy location data
tokens.push plusToken
# Remove the extra "+"
@ -574,11 +576,11 @@ exports.Lexer = class Lexer
# Create a 0-length "+" token.
plusToken = @token '+', '+' if i
locationToken = if tag == 'TOKENS' then value[0] else token
plusToken.locationData =
first_line: locationToken.locationData.first_line
first_column: locationToken.locationData.first_column
last_line: locationToken.locationData.first_line
last_column: locationToken.locationData.first_column
plusToken[2] =
first_line: locationToken[2].first_line
first_column: locationToken[2].first_column
last_line: locationToken[2].first_line
last_column: locationToken[2].first_column
if tag is 'TOKENS'
# Push all the tokens in the fake 'TOKENS' token. These already have
# sane location data.
@ -646,8 +648,7 @@ exports.Lexer = class Lexer
[locationData.last_line, locationData.last_column] =
@getLineAndColumnFromChunk offsetInChunk + length
token = [tag, value, locationData.first_line]
token.locationData = locationData
token = [tag, value, locationData]
return token

View file

@ -123,7 +123,7 @@ class exports.Rewriter
)
action = (token, i) ->
tok = @generate '}', '}', token[2]
tok = @generate '}', '}'
@tokens.splice i, 0, tok
@scanTokens (token, i, tokens) ->
@ -144,7 +144,7 @@ class exports.Rewriter
startsLine = not prevTag or (prevTag in LINEBREAKS)
value = new String('{')
value.generated = yes
tok = @generate '{', value, token[2]
tok = @generate '{', value
tokens.splice idx, 0, tok
@detectEnd i + 2, condition, action
2
@ -169,7 +169,7 @@ class exports.Rewriter
not ((post = @tokens[i + 1]) and post.generated and post[0] is '{')))
action = (token, i) ->
@tokens.splice i, 0, @generate 'CALL_END', ')', token[2]
@tokens.splice i, 0, @generate 'CALL_END', ')'
@scanTokens (token, i, tokens) ->
tag = token[0]
@ -186,7 +186,7 @@ class exports.Rewriter
return 1 unless callObject or
prev?.spaced and (prev.call or prev[0] in IMPLICIT_FUNC) and
(tag in IMPLICIT_CALL or not (token.spaced or token.newLine) and tag in IMPLICIT_UNSPACED_CALL)
tokens.splice i, 0, @generate 'CALL_START', '(', token[2]
tokens.splice i, 0, @generate 'CALL_START', '('
@detectEnd i + 1, condition, action
prev[0] = 'FUNC_EXIST' if prev[0] is '?'
2
@ -195,16 +195,16 @@ class exports.Rewriter
addLocationDataToGeneratedTokens: ->
@scanTokens (token, i, tokens) ->
tag = token[0]
if (token.generated or token.explicit) and not token.locationData
if (token.generated or token.explicit) and (not token[2])
if i > 0
prevToken = tokens[i-1]
token.locationData =
first_line: prevToken.locationData.last_line
first_column: prevToken.locationData.last_column
last_line: prevToken.locationData.last_line
last_column: prevToken.locationData.last_column
token[2] =
first_line: prevToken[2].last_line
first_column: prevToken[2].last_column
last_line: prevToken[2].last_line
last_column: prevToken[2].last_column
else
token.locationData =
token[2] =
first_line: 0
first_column: 0
last_line: 0
@ -268,15 +268,15 @@ class exports.Rewriter
# Generate the indentation tokens, based on another token on the same line.
indentation: (token, implicit = no) ->
indent = ['INDENT', 2, token[2]]
outdent = ['OUTDENT', 2, token[2]]
indent = ['INDENT', 2]
outdent = ['OUTDENT', 2]
indent.generated = outdent.generated = yes if implicit
indent.explicit = outdent.explicit = yes if not implicit
[indent, outdent]
# Create a generated token: one that exists due to a use of implicit syntax.
generate: (tag, value, line) ->
tok = [tag, value, line]
generate: (tag, value) ->
tok = [tag, value]
tok.generated = yes
tok