mirror of
https://github.com/jashkenas/coffeescript.git
synced 2022-11-09 12:23:24 -05:00
Make CoffeeScript work with jison 0.4.14+
Since zaach/jison commit 3548861b, `parser.lexer` is never modified anymore (a copy of it is made, and that copy is modified instead). CoffeeScript itself modifies `parser.lexer` and then accesses those modifications in the custom `parser.yy.parseError` function, but that of course does not work anymore. This commit puts the data that `parser.yy.parseError` needs directly on the `parser` so that it is not lost. Supersedes #3603. Fixes #3608 and zaach/jison#243.
This commit is contained in:
parent
669e7fed10
commit
28c07d30cb
3 changed files with 320 additions and 333 deletions
|
@ -238,18 +238,18 @@
|
|||
parser.lexer = {
|
||||
lex: function() {
|
||||
var tag, token;
|
||||
token = this.tokens[this.pos++];
|
||||
token = parser.tokens[this.pos++];
|
||||
if (token) {
|
||||
tag = token[0], this.yytext = token[1], this.yylloc = token[2];
|
||||
this.errorToken = token.origin || token;
|
||||
parser.errorToken = token.origin || token;
|
||||
this.yylineno = this.yylloc.first_line;
|
||||
} else {
|
||||
tag = '';
|
||||
}
|
||||
return tag;
|
||||
},
|
||||
setInput: function(_at_tokens) {
|
||||
this.tokens = _at_tokens;
|
||||
setInput: function(tokens) {
|
||||
parser.tokens = tokens;
|
||||
return this.pos = 0;
|
||||
},
|
||||
upcomingInput: function() {
|
||||
|
@ -260,9 +260,9 @@
|
|||
parser.yy = require('./nodes');
|
||||
|
||||
parser.yy.parseError = function(message, _arg) {
|
||||
var errorLoc, errorTag, errorText, errorToken, token, tokens, _ref1;
|
||||
var errorLoc, errorTag, errorText, errorToken, token, tokens;
|
||||
token = _arg.token;
|
||||
_ref1 = parser.lexer, errorToken = _ref1.errorToken, tokens = _ref1.tokens;
|
||||
errorToken = parser.errorToken, tokens = parser.tokens;
|
||||
errorTag = errorToken[0], errorText = errorToken[1], errorLoc = errorToken[2];
|
||||
errorText = errorToken === tokens[tokens.length - 1] ? 'end of input' : errorTag === 'INDENT' || errorTag === 'OUTDENT' ? 'indentation' : helpers.nameWhitespaceCharacter(errorText);
|
||||
return helpers.throwSyntaxError("unexpected " + errorText, errorLoc);
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -202,16 +202,17 @@ lexer = new Lexer
|
|||
# directly as a "Jison lexer".
|
||||
parser.lexer =
|
||||
lex: ->
|
||||
token = @tokens[@pos++]
|
||||
token = parser.tokens[@pos++]
|
||||
if token
|
||||
[tag, @yytext, @yylloc] = token
|
||||
@errorToken = token.origin or token
|
||||
parser.errorToken = token.origin or token
|
||||
@yylineno = @yylloc.first_line
|
||||
else
|
||||
tag = ''
|
||||
|
||||
tag
|
||||
setInput: (@tokens) ->
|
||||
setInput: (tokens) ->
|
||||
parser.tokens = tokens
|
||||
@pos = 0
|
||||
upcomingInput: ->
|
||||
""
|
||||
|
@ -223,7 +224,7 @@ parser.yy.parseError = (message, {token}) ->
|
|||
# Disregard Jison's message, it contains redundant line numer information.
|
||||
# Disregard the token, we take its value directly from the lexer in case
|
||||
# the error is caused by a generated token which might refer to its origin.
|
||||
{errorToken, tokens} = parser.lexer
|
||||
{errorToken, tokens} = parser
|
||||
[errorTag, errorText, errorLoc] = errorToken
|
||||
|
||||
errorText = if errorToken is tokens[tokens.length - 1]
|
||||
|
|
Loading…
Reference in a new issue