Fix suppression of empty tokens during error handling at included range boundaries

This commit is contained in:
Max Brunsfeld 2022-11-14 12:20:39 -08:00
parent d07f864815
commit efd22e452b
2 changed files with 14 additions and 5 deletions

View file

@ -447,8 +447,14 @@ static Subtree ts_parser__lex(
// avoid infinite loops which could otherwise occur, because the lexer is
// looking for any possible token, instead of looking for the specific set of
// tokens that are valid in some parse state.
//
// Note that it's possible that the token end position may be *before* the
// original position of the lexer because of the way that tokens are positioned
// at included range boundaries: when a token is terminated at the start of
// an included range, it is marked as ending at the *end* of the preceding
// included range.
if (
self->lexer.token_end_position.bytes == current_position.bytes &&
self->lexer.token_end_position.bytes <= current_position.bytes &&
(error_mode || !ts_stack_has_advanced_since_error(self->stack, version)) &&
!external_scanner_state_changed
) {
@ -525,10 +531,6 @@ static Subtree ts_parser__lex(
self->language
);
} else {
if (self->lexer.token_end_position.bytes < self->lexer.token_start_position.bytes) {
self->lexer.token_start_position = self->lexer.token_end_position;
}
bool is_keyword = false;
TSSymbol symbol = self->lexer.data.result_symbol;
Length padding = length_sub(self->lexer.token_start_position, start_position);