mirror of
https://github.com/jashkenas/coffeescript.git
synced 2022-11-09 12:23:24 -05:00
Improve error messages for generated tokens
This commit is contained in:
parent
39cb8815f7
commit
f0463e9981
9 changed files with 111 additions and 25 deletions
|
@ -214,6 +214,7 @@
|
|||
token = this.tokens[this.pos++];
|
||||
if (token) {
|
||||
tag = token[0], this.yytext = token[1], this.yylloc = token[2];
|
||||
this.errorToken = token.origin || token;
|
||||
this.yylineno = this.yylloc.first_line;
|
||||
} else {
|
||||
tag = '';
|
||||
|
@ -232,10 +233,12 @@
|
|||
parser.yy = require('./nodes');
|
||||
|
||||
parser.yy.parseError = function(message, _arg) {
|
||||
var token;
|
||||
var errorLoc, errorText, errorToken, ignored, token, tokens, _ref;
|
||||
token = _arg.token;
|
||||
message = "unexpected " + (token === 1 ? 'end of input' : token);
|
||||
return helpers.throwSyntaxError(message, parser.lexer.yylloc);
|
||||
_ref = parser.lexer, errorToken = _ref.errorToken, tokens = _ref.tokens;
|
||||
ignored = errorToken[0], errorText = errorToken[1], errorLoc = errorToken[2];
|
||||
errorText = errorToken === tokens[tokens.length - 1] ? 'end of input' : helpers.nameWhitespaceCharacter(errorText);
|
||||
return helpers.throwSyntaxError("unexpected " + errorText, errorLoc);
|
||||
};
|
||||
|
||||
formatSourcePosition = function(frame, getSourceMapping) {
|
||||
|
|
|
@ -234,4 +234,19 @@
|
|||
return "" + filename + ":" + (first_line + 1) + ":" + (first_column + 1) + ": error: " + this.message + "\n" + codeLine + "\n" + marker;
|
||||
};
|
||||
|
||||
exports.nameWhitespaceCharacter = function(string) {
|
||||
switch (string) {
|
||||
case ' ':
|
||||
return 'space';
|
||||
case '\n':
|
||||
return 'newline';
|
||||
case '\r':
|
||||
return 'carriage return';
|
||||
case '\t':
|
||||
return 'tab';
|
||||
default:
|
||||
return string;
|
||||
}
|
||||
};
|
||||
|
||||
}).call(this);
|
||||
|
|
|
@ -588,14 +588,14 @@
|
|||
};
|
||||
|
||||
Lexer.prototype.interpolateString = function(str, options) {
|
||||
var column, expr, heredoc, i, inner, interpolated, len, letter, lexedLength, line, locationToken, nested, offsetInChunk, pi, plusToken, popped, regex, rparen, strOffset, tag, token, tokens, value, _i, _len, _ref2, _ref3, _ref4;
|
||||
var column, errorToken, expr, heredoc, i, inner, interpolated, len, letter, lexedLength, line, locationToken, nested, offsetInChunk, pi, plusToken, popped, regex, rparen, strOffset, tag, token, tokens, value, _i, _len, _ref2, _ref3, _ref4;
|
||||
if (options == null) {
|
||||
options = {};
|
||||
}
|
||||
heredoc = options.heredoc, regex = options.regex, offsetInChunk = options.offsetInChunk, strOffset = options.strOffset, lexedLength = options.lexedLength;
|
||||
offsetInChunk = offsetInChunk || 0;
|
||||
strOffset = strOffset || 0;
|
||||
lexedLength = lexedLength || str.length;
|
||||
offsetInChunk || (offsetInChunk = 0);
|
||||
strOffset || (strOffset = 0);
|
||||
lexedLength || (lexedLength = str.length);
|
||||
tokens = [];
|
||||
pi = 0;
|
||||
i = -1;
|
||||
|
@ -610,6 +610,9 @@
|
|||
if (pi < i) {
|
||||
tokens.push(this.makeToken('NEOSTRING', str.slice(pi, i), strOffset + pi));
|
||||
}
|
||||
if (!errorToken) {
|
||||
errorToken = this.makeToken('', 'string interpolation', offsetInChunk + i + 1, 2);
|
||||
}
|
||||
inner = expr.slice(1, -1);
|
||||
if (inner.length) {
|
||||
_ref2 = this.getLineAndColumnFromChunk(strOffset + i + 1), line = _ref2[0], column = _ref2[1];
|
||||
|
@ -646,7 +649,7 @@
|
|||
tokens.unshift(this.makeToken('NEOSTRING', '', offsetInChunk));
|
||||
}
|
||||
if (interpolated = tokens.length > 1) {
|
||||
this.token('(', '(', offsetInChunk, 0);
|
||||
this.token('(', '(', offsetInChunk, 0, errorToken);
|
||||
}
|
||||
for (i = _i = 0, _len = tokens.length; _i < _len; i = ++_i) {
|
||||
token = tokens[i];
|
||||
|
@ -731,9 +734,12 @@
|
|||
return token;
|
||||
};
|
||||
|
||||
Lexer.prototype.token = function(tag, value, offsetInChunk, length) {
|
||||
Lexer.prototype.token = function(tag, value, offsetInChunk, length, origin) {
|
||||
var token;
|
||||
token = this.makeToken(tag, value, offsetInChunk, length);
|
||||
if (origin) {
|
||||
token.origin = origin;
|
||||
}
|
||||
this.tokens.push(token);
|
||||
return token;
|
||||
};
|
||||
|
|
|
@ -4,10 +4,13 @@
|
|||
__indexOf = [].indexOf || function(item) { for (var i = 0, l = this.length; i < l; i++) { if (i in this && this[i] === item) return i; } return -1; },
|
||||
__slice = [].slice;
|
||||
|
||||
generate = function(tag, value) {
|
||||
generate = function(tag, value, origin) {
|
||||
var tok;
|
||||
tok = [tag, value];
|
||||
tok.generated = true;
|
||||
if (origin) {
|
||||
tok.origin = origin;
|
||||
}
|
||||
return tok;
|
||||
};
|
||||
|
||||
|
@ -212,7 +215,7 @@
|
|||
ours: true
|
||||
}
|
||||
]);
|
||||
tokens.splice(idx, 0, generate('{', generate(new String('{'))));
|
||||
tokens.splice(idx, 0, generate('{', generate(new String('{')), token));
|
||||
if (j == null) {
|
||||
return i += 1;
|
||||
}
|
||||
|
@ -220,7 +223,7 @@
|
|||
endImplicitObject = function(j) {
|
||||
j = j != null ? j : i;
|
||||
stack.pop();
|
||||
tokens.splice(j, 0, generate('}', '}'));
|
||||
tokens.splice(j, 0, generate('}', '}', token));
|
||||
return i += 1;
|
||||
};
|
||||
if (inImplicitCall() && (tag === 'IF' || tag === 'TRY' || tag === 'FINALLY' || tag === 'CATCH' || tag === 'CLASS' || tag === 'SWITCH')) {
|
||||
|
|
|
@ -183,6 +183,7 @@ parser.lexer =
|
|||
token = @tokens[@pos++]
|
||||
if token
|
||||
[tag, @yytext, @yylloc] = token
|
||||
@errorToken = token.origin or token
|
||||
@yylineno = @yylloc.first_line
|
||||
else
|
||||
tag = ''
|
||||
|
@ -198,12 +199,21 @@ parser.yy = require './nodes'
|
|||
# Override Jison's default error handling function.
|
||||
parser.yy.parseError = (message, {token}) ->
|
||||
# Disregard Jison's message, it contains redundant line numer information.
|
||||
message = "unexpected #{if token is 1 then 'end of input' else token}"
|
||||
# Disregard the token, we take its value directly from the lexer in case
|
||||
# the error is caused by a generated token which might refer to its origin.
|
||||
{errorToken, tokens} = parser.lexer
|
||||
[ignored, errorText, errorLoc] = errorToken
|
||||
|
||||
errorText = if errorToken is tokens[tokens.length - 1]
|
||||
'end of input'
|
||||
else
|
||||
helpers.nameWhitespaceCharacter errorText
|
||||
|
||||
# The second argument has a `loc` property, which should have the location
|
||||
# data for this token. Unfortunately, Jison seems to send an outdated `loc`
|
||||
# (from the previous token), so we take the location information directly
|
||||
# from the lexer.
|
||||
helpers.throwSyntaxError message, parser.lexer.yylloc
|
||||
helpers.throwSyntaxError "unexpected #{errorText}", errorLoc
|
||||
|
||||
# Based on http://v8.googlecode.com/svn/branches/bleeding_edge/src/messages.js
|
||||
# Modified to handle sourceMap
|
||||
|
|
|
@ -188,3 +188,11 @@ syntaxErrorToString = ->
|
|||
#{codeLine}
|
||||
#{marker}
|
||||
"""
|
||||
|
||||
exports.nameWhitespaceCharacter = (string) ->
|
||||
switch string
|
||||
when ' ' then 'space'
|
||||
when '\n' then 'newline'
|
||||
when '\r' then 'carriage return'
|
||||
when '\t' then 'tab'
|
||||
else string
|
||||
|
|
|
@ -520,9 +520,9 @@ exports.Lexer = class Lexer
|
|||
# current chunk.
|
||||
interpolateString: (str, options = {}) ->
|
||||
{heredoc, regex, offsetInChunk, strOffset, lexedLength} = options
|
||||
offsetInChunk = offsetInChunk || 0
|
||||
strOffset = strOffset || 0
|
||||
lexedLength = lexedLength || str.length
|
||||
offsetInChunk ||= 0
|
||||
strOffset ||= 0
|
||||
lexedLength ||= str.length
|
||||
|
||||
# Parse the string.
|
||||
tokens = []
|
||||
|
@ -537,6 +537,8 @@ exports.Lexer = class Lexer
|
|||
continue
|
||||
# NEOSTRING is a fake token. This will be converted to a string below.
|
||||
tokens.push @makeToken('NEOSTRING', str[pi...i], strOffset + pi) if pi < i
|
||||
unless errorToken
|
||||
errorToken = @makeToken '', 'string interpolation', offsetInChunk + i + 1, 2
|
||||
inner = expr[1...-1]
|
||||
if inner.length
|
||||
[line, column] = @getLineAndColumnFromChunk(strOffset + i + 1)
|
||||
|
@ -562,7 +564,9 @@ exports.Lexer = class Lexer
|
|||
# If the first token is not a string, add a fake empty string to the beginning.
|
||||
tokens.unshift @makeToken('NEOSTRING', '', offsetInChunk) unless tokens[0][0] is 'NEOSTRING'
|
||||
|
||||
@token '(', '(', offsetInChunk, 0 if interpolated = tokens.length > 1
|
||||
if interpolated = tokens.length > 1
|
||||
@token '(', '(', offsetInChunk, 0, errorToken
|
||||
|
||||
# Push all the tokens
|
||||
for token, i in tokens
|
||||
[tag, value] = token
|
||||
|
@ -656,8 +660,9 @@ exports.Lexer = class Lexer
|
|||
# not specified, the length of `value` will be used.
|
||||
#
|
||||
# Returns the new token.
|
||||
token: (tag, value, offsetInChunk, length) ->
|
||||
token: (tag, value, offsetInChunk, length, origin) ->
|
||||
token = @makeToken tag, value, offsetInChunk, length
|
||||
token.origin = origin if origin
|
||||
@tokens.push token
|
||||
token
|
||||
|
||||
|
|
|
@ -6,9 +6,10 @@
|
|||
# parentheses, and generally clean things up.
|
||||
|
||||
# Create a generated token: one that exists due to a use of implicit syntax.
|
||||
generate = (tag, value) ->
|
||||
generate = (tag, value, origin) ->
|
||||
tok = [tag, value]
|
||||
tok.generated = yes
|
||||
tok.origin = origin if origin
|
||||
tok
|
||||
|
||||
# The **Rewriter** class is used by the [Lexer](lexer.html), directly against
|
||||
|
@ -167,13 +168,13 @@ class exports.Rewriter
|
|||
startImplicitObject = (j, startsLine = yes) ->
|
||||
idx = j ? i
|
||||
stack.push ['{', idx, sameLine: yes, startsLine: startsLine, ours: yes]
|
||||
tokens.splice idx, 0, generate '{', generate(new String('{'))
|
||||
tokens.splice idx, 0, generate '{', generate(new String('{')), token
|
||||
i += 1 if not j?
|
||||
|
||||
endImplicitObject = (j) ->
|
||||
j = j ? i
|
||||
stack.pop()
|
||||
tokens.splice j, 0, generate '}', '}'
|
||||
tokens.splice j, 0, generate '}', '}', token
|
||||
i += 1
|
||||
|
||||
# Don't end an implicit call on next indent if any of these are in an argument
|
||||
|
|
|
@ -26,7 +26,7 @@ test "parser error formating", ->
|
|||
foo in bar or in baz
|
||||
''',
|
||||
'''
|
||||
[stdin]:1:15: error: unexpected RELATION
|
||||
[stdin]:1:15: error: unexpected in
|
||||
foo in bar or in baz
|
||||
^^
|
||||
'''
|
||||
|
@ -58,9 +58,44 @@ test "#2849: compilation error in a require()d file", ->
|
|||
require './test/syntax-error'
|
||||
''',
|
||||
"""
|
||||
#{path.join __dirname, 'syntax-error.coffee'}:1:15: error: unexpected RELATION
|
||||
#{path.join __dirname, 'syntax-error.coffee'}:1:15: error: unexpected in
|
||||
foo in bar or in baz
|
||||
^^
|
||||
"""
|
||||
finally
|
||||
fs.unlink 'test/syntax-error.coffee'
|
||||
fs.unlink 'test/syntax-error.coffee'
|
||||
|
||||
test "#1096: unexpected generated tokens", ->
|
||||
# Unexpected interpolation
|
||||
assertErrorFormat '{"#{key}": val}', '''
|
||||
[stdin]:1:3: error: unexpected string interpolation
|
||||
{"#{key}": val}
|
||||
^^
|
||||
'''
|
||||
# Implicit ends
|
||||
assertErrorFormat 'a:, b', '''
|
||||
[stdin]:1:3: error: unexpected ,
|
||||
a:, b
|
||||
^
|
||||
'''
|
||||
# Explicit ends
|
||||
assertErrorFormat '(a:)', '''
|
||||
[stdin]:1:4: error: unexpected )
|
||||
(a:)
|
||||
^
|
||||
'''
|
||||
# Unexpected end of file
|
||||
assertErrorFormat 'a:', '''
|
||||
[stdin]:1:3: error: unexpected end of input
|
||||
a:
|
||||
^
|
||||
'''
|
||||
# Unexpected implicit object
|
||||
assertErrorFormat '''
|
||||
for i in [1]:
|
||||
1
|
||||
''', '''
|
||||
[stdin]:1:13: error: unexpected :
|
||||
for i in [1]:
|
||||
^
|
||||
'''
|
||||
|
|
Loading…
Add table
Reference in a new issue