2015-09-19 13:19:49 -07:00
|
|
|
#include "runtime/parser.h"
|
2015-11-20 12:55:01 -08:00
|
|
|
#include <assert.h>
|
2014-07-10 13:14:52 -07:00
|
|
|
#include <stdio.h>
|
2016-06-02 14:04:48 -07:00
|
|
|
#include <limits.h>
|
2014-10-09 14:02:03 -07:00
|
|
|
#include <stdbool.h>
|
2014-07-10 13:14:52 -07:00
|
|
|
#include "tree_sitter/runtime.h"
|
|
|
|
|
#include "runtime/tree.h"
|
2014-07-30 23:40:02 -07:00
|
|
|
#include "runtime/lexer.h"
|
2014-09-26 16:15:07 -07:00
|
|
|
#include "runtime/length.h"
|
2016-02-17 20:41:29 -08:00
|
|
|
#include "runtime/array.h"
|
2015-11-20 12:00:49 -08:00
|
|
|
#include "runtime/language.h"
|
2016-01-15 15:08:42 -08:00
|
|
|
#include "runtime/alloc.h"
|
2016-05-09 14:31:44 -07:00
|
|
|
#include "runtime/reduce_action.h"
|
2016-08-31 10:51:59 -07:00
|
|
|
#include "runtime/error_costs.h"
|
2014-07-10 13:14:52 -07:00
|
|
|
|
2017-07-17 17:12:36 -07:00
|
|
|
#define LOG(...) \
|
2017-08-25 16:26:40 -07:00
|
|
|
if (self->lexer.logger.log || self->print_debugging_graphs) { \
|
2017-07-17 17:12:36 -07:00
|
|
|
snprintf(self->lexer.debug_buffer, TREE_SITTER_SERIALIZATION_BUFFER_SIZE, __VA_ARGS__); \
|
2017-08-25 16:26:40 -07:00
|
|
|
parser__log(self); \
|
2017-10-04 15:09:46 -07:00
|
|
|
}
|
2016-02-23 09:45:27 -08:00
|
|
|
|
2017-09-14 10:54:09 -07:00
|
|
|
#define LOG_STACK() \
|
|
|
|
|
if (self->print_debugging_graphs) { \
|
|
|
|
|
ts_stack_print_dot_graph(self->stack, self->language->symbol_names, stderr); \
|
|
|
|
|
fputs("\n\n", stderr); \
|
2016-02-23 09:45:27 -08:00
|
|
|
}
|
|
|
|
|
|
2016-06-22 21:04:35 -07:00
|
|
|
#define LOG_TREE() \
|
|
|
|
|
if (self->print_debugging_graphs) { \
|
|
|
|
|
ts_tree_print_dot_graph(self->finished_tree, self->language, stderr); \
|
2016-06-22 22:03:27 -07:00
|
|
|
fputs("\n", stderr); \
|
2016-06-22 21:04:35 -07:00
|
|
|
}
|
|
|
|
|
|
2016-03-02 09:55:25 -08:00
|
|
|
#define SYM_NAME(symbol) ts_language_symbol_name(self->language, symbol)
|
2014-10-13 21:20:08 -07:00
|
|
|
|
2017-09-12 16:20:06 -07:00
|
|
|
static const unsigned MAX_VERSION_COUNT = 6;
|
2017-09-12 12:00:00 -07:00
|
|
|
static const unsigned MAX_SUMMARY_DEPTH = 16;
|
2017-09-13 16:49:18 -07:00
|
|
|
static const unsigned MAX_COST_DIFFERENCE = 16 * ERROR_COST_PER_SKIPPED_TREE;
|
2017-06-29 14:58:20 -07:00
|
|
|
|
2016-03-07 20:06:46 -08:00
|
|
|
typedef struct {
|
2017-09-13 16:38:15 -07:00
|
|
|
unsigned cost;
|
|
|
|
|
unsigned push_count;
|
2017-10-09 15:51:22 -07:00
|
|
|
int dynamic_precedence;
|
2017-09-13 16:38:15 -07:00
|
|
|
bool is_in_error;
|
|
|
|
|
} ErrorStatus;
|
|
|
|
|
|
|
|
|
|
typedef enum {
|
|
|
|
|
ErrorComparisonTakeLeft,
|
|
|
|
|
ErrorComparisonPreferLeft,
|
|
|
|
|
ErrorComparisonNone,
|
|
|
|
|
ErrorComparisonPreferRight,
|
|
|
|
|
ErrorComparisonTakeRight,
|
|
|
|
|
} ErrorComparison;
|
2016-03-03 10:21:57 -08:00
|
|
|
|
2017-08-25 16:26:40 -07:00
|
|
|
static void parser__log(Parser *self) {
|
|
|
|
|
if (self->lexer.logger.log) {
|
|
|
|
|
self->lexer.logger.log(
|
|
|
|
|
self->lexer.logger.payload,
|
|
|
|
|
TSLogTypeParse,
|
|
|
|
|
self->lexer.debug_buffer
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (self->print_debugging_graphs) {
|
|
|
|
|
fprintf(stderr, "graph {\nlabel=\"");
|
|
|
|
|
for (char *c = &self->lexer.debug_buffer[0]; *c != 0; c++) {
|
|
|
|
|
if (*c == '"') fputc('\\', stderr);
|
|
|
|
|
fputc(*c, stderr);
|
|
|
|
|
}
|
|
|
|
|
fprintf(stderr, "\"\n}\n\n");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-11-04 09:18:38 -07:00
|
|
|
static bool parser__breakdown_top_of_stack(Parser *self, StackVersion version) {
|
2016-03-31 12:03:07 -07:00
|
|
|
bool did_break_down = false;
|
2016-06-22 22:32:38 -07:00
|
|
|
bool pending = false;
|
2015-12-06 21:10:47 -08:00
|
|
|
|
2015-12-02 11:24:13 -08:00
|
|
|
do {
|
2018-03-29 17:37:54 -07:00
|
|
|
StackSliceArray pop = ts_stack_pop_pending(self->stack, version);
|
|
|
|
|
if (!pop.size) break;
|
2015-12-06 21:10:47 -08:00
|
|
|
|
2016-03-31 12:03:07 -07:00
|
|
|
did_break_down = true;
|
2016-06-22 22:32:38 -07:00
|
|
|
pending = false;
|
2018-03-29 17:37:54 -07:00
|
|
|
for (uint32_t i = 0; i < pop.size; i++) {
|
|
|
|
|
StackSlice slice = pop.contents[i];
|
|
|
|
|
TSStateId state = ts_stack_state(self->stack, slice.version);
|
2016-11-09 20:59:05 -08:00
|
|
|
Tree *parent = *array_front(&slice.trees);
|
2016-06-22 22:32:38 -07:00
|
|
|
|
2016-11-14 12:15:24 -08:00
|
|
|
for (uint32_t j = 0; j < parent->child_count; j++) {
|
2016-11-09 20:59:05 -08:00
|
|
|
Tree *child = parent->children[j];
|
2016-06-22 22:32:38 -07:00
|
|
|
pending = child->child_count > 0;
|
2016-03-31 12:03:07 -07:00
|
|
|
|
2016-06-22 22:32:38 -07:00
|
|
|
if (child->symbol == ts_builtin_sym_error) {
|
2016-10-05 14:02:49 -07:00
|
|
|
state = ERROR_STATE;
|
2016-06-22 22:32:38 -07:00
|
|
|
} else if (!child->extra) {
|
2016-11-14 08:36:06 -08:00
|
|
|
state = ts_language_next_state(self->language, state, child->symbol);
|
2015-12-06 21:10:47 -08:00
|
|
|
}
|
|
|
|
|
|
2018-03-29 17:18:43 -07:00
|
|
|
ts_tree_retain(child);
|
2016-11-04 09:18:38 -07:00
|
|
|
ts_stack_push(self->stack, slice.version, child, pending, state);
|
2016-01-19 18:07:24 -08:00
|
|
|
}
|
2015-12-02 11:24:13 -08:00
|
|
|
|
2016-11-14 12:15:24 -08:00
|
|
|
for (uint32_t j = 1; j < slice.trees.size; j++) {
|
2016-11-09 20:59:05 -08:00
|
|
|
Tree *tree = slice.trees.contents[j];
|
2017-08-29 16:22:27 -07:00
|
|
|
ts_stack_push(self->stack, slice.version, tree, false, state);
|
2016-01-19 18:07:24 -08:00
|
|
|
}
|
2016-01-21 14:07:38 -07:00
|
|
|
|
Simplify error recovery; eliminate recovery states
The previous approach to error recovery relied on special error-recovery
states in the parse table. For each token T, there was an error recovery
state in which the parser looked for *any* token that could follow T.
Unfortunately, sometimes the set of tokens that could follow T contained
conflicts. For example, in JS, the token '}' can be followed by the
open-ended 'template_chars' token, but also by ordinary tokens like
'identifier'. So with the old algorithm, when recovering from an
unexpected '}' token, the lexer had no way to distinguish identifiers
from template_chars.
This commit drops the error recovery states. Instead, when we encounter
an unexpected token T, we recover from the error by finding a previous
state S in the stack in which T would be valid, popping all of the nodes
after S, and wrapping them in an error.
This way, the lexer is always invoked in a normal parse state, in which
it is looking for a non-conflicting set of tokens. Eliminating the error
recovery states also shrinks the lex state machine significantly.
Signed-off-by: Rick Winfrey <rewinfrey@github.com>
2017-09-11 15:22:52 -07:00
|
|
|
ts_stack_decrease_push_count(self->stack, slice.version, parent->child_count + 1);
|
2017-10-05 17:32:21 -07:00
|
|
|
ts_tree_release(&self->tree_pool, parent);
|
2016-06-22 22:32:38 -07:00
|
|
|
array_delete(&slice.trees);
|
2018-03-29 17:37:54 -07:00
|
|
|
|
|
|
|
|
LOG("breakdown_top_of_stack tree:%s", SYM_NAME(parent->symbol));
|
|
|
|
|
LOG_STACK();
|
2016-02-23 17:35:50 -08:00
|
|
|
}
|
2016-06-22 22:32:38 -07:00
|
|
|
} while (pending);
|
2016-01-19 18:07:24 -08:00
|
|
|
|
2016-11-04 09:18:38 -07:00
|
|
|
return did_break_down;
|
2015-12-02 11:24:13 -08:00
|
|
|
}
|
|
|
|
|
|
2017-08-30 16:19:11 -07:00
|
|
|
static void parser__breakdown_lookahead(Parser *self, Tree **lookahead,
|
2016-08-29 12:08:58 -07:00
|
|
|
TSStateId state,
|
|
|
|
|
ReusableNode *reusable_node) {
|
2017-08-30 16:19:11 -07:00
|
|
|
bool did_break_down = false;
|
2017-08-30 16:17:10 -07:00
|
|
|
while (reusable_node->tree->child_count > 0 && reusable_node->tree->parse_state != state) {
|
2016-07-17 13:35:43 -07:00
|
|
|
LOG("state_mismatch sym:%s", SYM_NAME(reusable_node->tree->symbol));
|
2017-01-05 10:06:43 -08:00
|
|
|
reusable_node_breakdown(reusable_node);
|
2017-08-30 16:19:11 -07:00
|
|
|
did_break_down = true;
|
2016-07-17 13:35:43 -07:00
|
|
|
}
|
|
|
|
|
|
2017-08-30 16:19:11 -07:00
|
|
|
if (did_break_down) {
|
2017-10-05 17:32:21 -07:00
|
|
|
ts_tree_release(&self->tree_pool, *lookahead);
|
2016-07-17 13:35:43 -07:00
|
|
|
ts_tree_retain(*lookahead = reusable_node->tree);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-09-13 16:38:15 -07:00
|
|
|
static ErrorComparison parser__compare_versions(Parser *self, ErrorStatus a, ErrorStatus b) {
|
|
|
|
|
if (!a.is_in_error && b.is_in_error) {
|
|
|
|
|
if (a.cost < b.cost) {
|
|
|
|
|
return ErrorComparisonTakeLeft;
|
|
|
|
|
} else {
|
|
|
|
|
return ErrorComparisonPreferLeft;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (a.is_in_error && !b.is_in_error) {
|
|
|
|
|
if (b.cost < a.cost) {
|
|
|
|
|
return ErrorComparisonTakeRight;
|
|
|
|
|
} else {
|
|
|
|
|
return ErrorComparisonPreferRight;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (a.cost < b.cost) {
|
|
|
|
|
if ((b.cost - a.cost) * (1 + a.push_count) > MAX_COST_DIFFERENCE) {
|
|
|
|
|
return ErrorComparisonTakeLeft;
|
|
|
|
|
} else {
|
|
|
|
|
return ErrorComparisonPreferLeft;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (b.cost < a.cost) {
|
|
|
|
|
if ((a.cost - b.cost) * (1 + b.push_count) > MAX_COST_DIFFERENCE) {
|
|
|
|
|
return ErrorComparisonTakeRight;
|
|
|
|
|
} else {
|
|
|
|
|
return ErrorComparisonPreferRight;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-10-09 15:51:22 -07:00
|
|
|
if (a.dynamic_precedence > b.dynamic_precedence) return ErrorComparisonPreferLeft;
|
|
|
|
|
if (b.dynamic_precedence > a.dynamic_precedence) return ErrorComparisonPreferRight;
|
2017-09-13 16:38:15 -07:00
|
|
|
return ErrorComparisonNone;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool parser__better_version_exists(Parser *self, StackVersion version,
|
|
|
|
|
bool is_in_error, unsigned cost) {
|
|
|
|
|
if (self->finished_tree && self->finished_tree->error_cost <= cost) return true;
|
|
|
|
|
|
2018-03-29 17:37:54 -07:00
|
|
|
Length position = ts_stack_position(self->stack, version);
|
2017-10-09 15:51:22 -07:00
|
|
|
ErrorStatus status = {
|
|
|
|
|
.cost = cost,
|
|
|
|
|
.is_in_error = is_in_error,
|
|
|
|
|
.dynamic_precedence = ts_stack_dynamic_precedence(self->stack, version),
|
|
|
|
|
.push_count = 0,
|
|
|
|
|
};
|
2017-09-13 16:38:15 -07:00
|
|
|
|
|
|
|
|
for (StackVersion i = 0, n = ts_stack_version_count(self->stack); i < n; i++) {
|
2017-12-29 16:10:43 -08:00
|
|
|
if (i == version ||
|
|
|
|
|
ts_stack_is_halted(self->stack, i) ||
|
2018-03-29 17:37:54 -07:00
|
|
|
ts_stack_position(self->stack, i).bytes < position.bytes) continue;
|
2017-09-13 16:38:15 -07:00
|
|
|
ErrorStatus status_i = {
|
|
|
|
|
.cost = ts_stack_error_cost(self->stack, i),
|
2018-03-29 17:37:54 -07:00
|
|
|
.is_in_error = ts_stack_state(self->stack, i) == ERROR_STATE,
|
2017-10-09 15:51:22 -07:00
|
|
|
.dynamic_precedence = ts_stack_dynamic_precedence(self->stack, i),
|
2017-09-13 16:38:15 -07:00
|
|
|
.push_count = ts_stack_push_count(self->stack, i)
|
|
|
|
|
};
|
|
|
|
|
switch (parser__compare_versions(self, status, status_i)) {
|
|
|
|
|
case ErrorComparisonTakeRight:
|
|
|
|
|
return true;
|
|
|
|
|
case ErrorComparisonPreferRight:
|
|
|
|
|
if (ts_stack_can_merge(self->stack, i, version)) return true;
|
|
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-24 14:04:55 -08:00
|
|
|
static unsigned parser__condense_stack(Parser *self) {
|
2017-09-13 16:38:15 -07:00
|
|
|
bool made_changes = false;
|
|
|
|
|
unsigned min_error_cost = UINT_MAX;
|
2016-06-02 14:04:48 -07:00
|
|
|
for (StackVersion i = 0; i < ts_stack_version_count(self->stack); i++) {
|
|
|
|
|
if (ts_stack_is_halted(self->stack, i)) {
|
|
|
|
|
ts_stack_remove_version(self->stack, i);
|
|
|
|
|
i--;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2017-09-13 16:38:15 -07:00
|
|
|
ErrorStatus status_i = {
|
|
|
|
|
.cost = ts_stack_error_cost(self->stack, i),
|
|
|
|
|
.push_count = ts_stack_push_count(self->stack, i),
|
2017-10-09 15:51:22 -07:00
|
|
|
.dynamic_precedence = ts_stack_dynamic_precedence(self->stack, i),
|
2018-03-29 17:37:54 -07:00
|
|
|
.is_in_error = ts_stack_state(self->stack, i) == ERROR_STATE,
|
2017-09-13 16:38:15 -07:00
|
|
|
};
|
2018-01-24 14:04:55 -08:00
|
|
|
if (!status_i.is_in_error && status_i.cost < min_error_cost) {
|
|
|
|
|
min_error_cost = status_i.cost;
|
|
|
|
|
}
|
2016-06-02 14:04:48 -07:00
|
|
|
|
2016-08-31 10:51:59 -07:00
|
|
|
for (StackVersion j = 0; j < i; j++) {
|
2017-09-13 16:38:15 -07:00
|
|
|
ErrorStatus status_j = {
|
|
|
|
|
.cost = ts_stack_error_cost(self->stack, j),
|
|
|
|
|
.push_count = ts_stack_push_count(self->stack, j),
|
2017-10-09 15:51:22 -07:00
|
|
|
.dynamic_precedence = ts_stack_dynamic_precedence(self->stack, j),
|
2018-03-29 17:37:54 -07:00
|
|
|
.is_in_error = ts_stack_state(self->stack, j) == ERROR_STATE,
|
2017-09-13 16:38:15 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
bool can_merge = ts_stack_can_merge(self->stack, j, i);
|
|
|
|
|
switch (parser__compare_versions(self, status_j, status_i)) {
|
2017-06-29 14:58:20 -07:00
|
|
|
case ErrorComparisonTakeLeft:
|
2017-09-13 16:38:15 -07:00
|
|
|
made_changes = true;
|
2017-06-29 14:58:20 -07:00
|
|
|
ts_stack_remove_version(self->stack, i);
|
|
|
|
|
i--;
|
|
|
|
|
j = i;
|
|
|
|
|
break;
|
2017-08-04 14:03:41 -07:00
|
|
|
case ErrorComparisonPreferLeft:
|
2017-09-13 16:38:15 -07:00
|
|
|
case ErrorComparisonNone:
|
|
|
|
|
if (can_merge) {
|
|
|
|
|
made_changes = true;
|
2017-08-04 14:03:41 -07:00
|
|
|
ts_stack_force_merge(self->stack, j, i);
|
|
|
|
|
i--;
|
|
|
|
|
j = i;
|
|
|
|
|
}
|
2016-08-31 10:51:59 -07:00
|
|
|
break;
|
2017-08-04 14:03:41 -07:00
|
|
|
case ErrorComparisonPreferRight:
|
2017-09-13 16:38:15 -07:00
|
|
|
made_changes = true;
|
2018-03-05 10:40:05 -08:00
|
|
|
ts_stack_swap_versions(self->stack, i, j);
|
2017-08-04 14:03:41 -07:00
|
|
|
if (can_merge) {
|
2018-03-05 10:40:05 -08:00
|
|
|
ts_stack_force_merge(self->stack, j, i);
|
2017-08-04 14:03:41 -07:00
|
|
|
i--;
|
2017-08-04 14:51:14 -07:00
|
|
|
j = i;
|
2017-08-04 14:03:41 -07:00
|
|
|
}
|
2017-06-29 14:58:20 -07:00
|
|
|
break;
|
2017-09-13 16:38:15 -07:00
|
|
|
case ErrorComparisonTakeRight:
|
|
|
|
|
made_changes = true;
|
|
|
|
|
ts_stack_remove_version(self->stack, j);
|
|
|
|
|
i--;
|
|
|
|
|
j--;
|
2017-08-30 16:36:02 -07:00
|
|
|
break;
|
2016-08-31 10:51:59 -07:00
|
|
|
}
|
2016-06-02 14:04:48 -07:00
|
|
|
}
|
2017-06-29 14:58:20 -07:00
|
|
|
}
|
|
|
|
|
|
2017-07-05 15:34:19 -07:00
|
|
|
while (ts_stack_version_count(self->stack) > MAX_VERSION_COUNT) {
|
|
|
|
|
ts_stack_remove_version(self->stack, MAX_VERSION_COUNT);
|
2017-09-13 16:38:15 -07:00
|
|
|
made_changes = true;
|
2016-06-02 14:04:48 -07:00
|
|
|
}
|
2017-05-01 13:04:06 -07:00
|
|
|
|
2017-09-13 16:38:15 -07:00
|
|
|
if (made_changes) {
|
2017-08-30 16:36:02 -07:00
|
|
|
LOG("condense");
|
|
|
|
|
LOG_STACK();
|
2017-05-01 14:25:25 -07:00
|
|
|
}
|
|
|
|
|
|
2018-01-24 14:04:55 -08:00
|
|
|
return min_error_cost;
|
2016-06-02 14:04:48 -07:00
|
|
|
}
|
|
|
|
|
|
2017-06-27 14:30:46 -07:00
|
|
|
static void parser__restore_external_scanner(Parser *self, Tree *external_token) {
|
2017-08-29 14:40:28 -07:00
|
|
|
if (external_token) {
|
|
|
|
|
self->language->external_scanner.deserialize(
|
|
|
|
|
self->external_scanner_payload,
|
|
|
|
|
ts_external_token_state_data(&external_token->external_token_state),
|
|
|
|
|
external_token->external_token_state.length
|
|
|
|
|
);
|
|
|
|
|
} else {
|
|
|
|
|
self->language->external_scanner.deserialize(self->external_scanner_payload, NULL, 0);
|
2016-12-20 17:06:20 -08:00
|
|
|
}
|
2016-12-05 11:50:24 -08:00
|
|
|
}
|
2016-06-21 07:28:04 -07:00
|
|
|
|
Simplify error recovery; eliminate recovery states
The previous approach to error recovery relied on special error-recovery
states in the parse table. For each token T, there was an error recovery
state in which the parser looked for *any* token that could follow T.
Unfortunately, sometimes the set of tokens that could follow T contained
conflicts. For example, in JS, the token '}' can be followed by the
open-ended 'template_chars' token, but also by ordinary tokens like
'identifier'. So with the old algorithm, when recovering from an
unexpected '}' token, the lexer had no way to distinguish identifiers
from template_chars.
This commit drops the error recovery states. Instead, when we encounter
an unexpected token T, we recover from the error by finding a previous
state S in the stack in which T would be valid, popping all of the nodes
after S, and wrapping them in an error.
This way, the lexer is always invoked in a normal parse state, in which
it is looking for a non-conflicting set of tokens. Eliminating the error
recovery states also shrinks the lex state machine significantly.
Signed-off-by: Rick Winfrey <rewinfrey@github.com>
2017-09-11 15:22:52 -07:00
|
|
|
static Tree *parser__lex(Parser *self, StackVersion version, TSStateId parse_state) {
|
2018-03-29 17:37:54 -07:00
|
|
|
Length start_position = ts_stack_position(self->stack, version);
|
2017-08-29 14:44:24 -07:00
|
|
|
Tree *external_token = ts_stack_last_external_token(self->stack, version);
|
2016-12-05 11:50:24 -08:00
|
|
|
TSLexMode lex_mode = self->language->lex_modes[parse_state];
|
2017-01-07 21:45:28 -08:00
|
|
|
const bool *valid_external_tokens = ts_language_enabled_external_tokens(
|
2016-12-20 17:06:20 -08:00
|
|
|
self->language,
|
2016-12-21 11:24:41 -08:00
|
|
|
lex_mode.external_lex_state
|
2016-12-20 17:06:20 -08:00
|
|
|
);
|
2016-12-05 11:50:24 -08:00
|
|
|
|
2016-12-20 17:06:20 -08:00
|
|
|
bool found_external_token = false;
|
2017-09-01 14:22:50 -07:00
|
|
|
bool error_mode = parse_state == ERROR_STATE;
|
2017-10-31 10:04:44 -07:00
|
|
|
bool skipped_error = false;
|
2016-09-03 23:40:57 -07:00
|
|
|
int32_t first_error_character = 0;
|
2017-10-31 10:04:44 -07:00
|
|
|
Length error_start_position = length_zero();
|
|
|
|
|
Length error_end_position = length_zero();
|
2017-08-29 14:44:24 -07:00
|
|
|
uint32_t last_byte_scanned = start_position.bytes;
|
2016-12-20 17:06:20 -08:00
|
|
|
ts_lexer_reset(&self->lexer, start_position);
|
|
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
|
Length current_position = self->lexer.current_position;
|
|
|
|
|
|
2017-01-07 21:45:28 -08:00
|
|
|
if (valid_external_tokens) {
|
2017-08-29 14:44:24 -07:00
|
|
|
LOG(
|
|
|
|
|
"lex_external state:%d, row:%u, column:%u",
|
|
|
|
|
lex_mode.external_lex_state,
|
|
|
|
|
current_position.extent.row,
|
|
|
|
|
current_position.extent.column
|
|
|
|
|
);
|
2016-12-20 17:06:20 -08:00
|
|
|
ts_lexer_start(&self->lexer);
|
2017-08-29 14:44:24 -07:00
|
|
|
parser__restore_external_scanner(self, external_token);
|
|
|
|
|
if (self->language->external_scanner.scan(
|
2017-06-27 14:30:46 -07:00
|
|
|
self->external_scanner_payload,
|
|
|
|
|
&self->lexer.data,
|
|
|
|
|
valid_external_tokens
|
2017-08-29 14:44:24 -07:00
|
|
|
)) {
|
2017-12-20 16:26:38 -08:00
|
|
|
if (length_is_undefined(self->lexer.token_end_position)) {
|
2017-03-19 22:20:59 -07:00
|
|
|
self->lexer.token_end_position = self->lexer.current_position;
|
|
|
|
|
}
|
2017-06-27 14:30:46 -07:00
|
|
|
|
2017-09-01 14:22:50 -07:00
|
|
|
if (error_mode && self->lexer.token_end_position.bytes <= current_position.bytes) {
|
2017-08-25 16:57:09 -07:00
|
|
|
LOG("disregard_empty_token");
|
2017-06-27 14:30:46 -07:00
|
|
|
} else {
|
2017-08-29 14:44:24 -07:00
|
|
|
found_external_token = true;
|
2017-03-19 22:20:59 -07:00
|
|
|
break;
|
|
|
|
|
}
|
2016-12-20 17:06:20 -08:00
|
|
|
}
|
2017-06-27 14:30:46 -07:00
|
|
|
|
2017-08-29 14:44:24 -07:00
|
|
|
if (self->lexer.current_position.bytes > last_byte_scanned) {
|
|
|
|
|
last_byte_scanned = self->lexer.current_position.bytes;
|
|
|
|
|
}
|
2016-12-20 17:06:20 -08:00
|
|
|
ts_lexer_reset(&self->lexer, current_position);
|
|
|
|
|
}
|
|
|
|
|
|
2017-08-29 14:44:24 -07:00
|
|
|
LOG(
|
|
|
|
|
"lex_internal state:%d, row:%u, column:%u",
|
|
|
|
|
lex_mode.lex_state,
|
|
|
|
|
current_position.extent.row,
|
|
|
|
|
current_position.extent.column
|
|
|
|
|
);
|
2016-12-20 17:06:20 -08:00
|
|
|
ts_lexer_start(&self->lexer);
|
|
|
|
|
if (self->language->lex_fn(&self->lexer.data, lex_mode.lex_state)) {
|
|
|
|
|
break;
|
|
|
|
|
}
|
2016-09-03 23:40:57 -07:00
|
|
|
|
Simplify error recovery; eliminate recovery states
The previous approach to error recovery relied on special error-recovery
states in the parse table. For each token T, there was an error recovery
state in which the parser looked for *any* token that could follow T.
Unfortunately, sometimes the set of tokens that could follow T contained
conflicts. For example, in JS, the token '}' can be followed by the
open-ended 'template_chars' token, but also by ordinary tokens like
'identifier'. So with the old algorithm, when recovering from an
unexpected '}' token, the lexer had no way to distinguish identifiers
from template_chars.
This commit drops the error recovery states. Instead, when we encounter
an unexpected token T, we recover from the error by finding a previous
state S in the stack in which T would be valid, popping all of the nodes
after S, and wrapping them in an error.
This way, the lexer is always invoked in a normal parse state, in which
it is looking for a non-conflicting set of tokens. Eliminating the error
recovery states also shrinks the lex state machine significantly.
Signed-off-by: Rick Winfrey <rewinfrey@github.com>
2017-09-11 15:22:52 -07:00
|
|
|
if (!error_mode) {
|
2016-09-03 22:46:14 -07:00
|
|
|
LOG("retry_in_error_mode");
|
2017-09-01 14:22:50 -07:00
|
|
|
error_mode = true;
|
2016-12-05 11:50:24 -08:00
|
|
|
lex_mode = self->language->lex_modes[ERROR_STATE];
|
2017-01-07 21:45:28 -08:00
|
|
|
valid_external_tokens = ts_language_enabled_external_tokens(
|
2016-12-20 17:06:20 -08:00
|
|
|
self->language,
|
2016-12-21 11:24:41 -08:00
|
|
|
lex_mode.external_lex_state
|
2016-12-20 17:06:20 -08:00
|
|
|
);
|
2017-08-29 14:44:24 -07:00
|
|
|
if (self->lexer.current_position.bytes > last_byte_scanned) {
|
|
|
|
|
last_byte_scanned = self->lexer.current_position.bytes;
|
|
|
|
|
}
|
2016-09-19 13:35:08 -07:00
|
|
|
ts_lexer_reset(&self->lexer, start_position);
|
2016-09-03 22:46:14 -07:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2016-09-19 13:35:08 -07:00
|
|
|
if (!skipped_error) {
|
2016-12-20 17:06:20 -08:00
|
|
|
LOG("skip_unrecognized_character");
|
|
|
|
|
skipped_error = true;
|
2016-09-19 13:35:08 -07:00
|
|
|
error_start_position = self->lexer.token_start_position;
|
2017-02-07 17:48:53 -08:00
|
|
|
error_end_position = self->lexer.token_start_position;
|
2016-10-16 21:10:25 -07:00
|
|
|
first_error_character = self->lexer.data.lookahead;
|
2016-09-03 22:46:14 -07:00
|
|
|
}
|
|
|
|
|
|
2016-09-19 13:35:08 -07:00
|
|
|
if (self->lexer.current_position.bytes == error_end_position.bytes) {
|
2016-10-16 21:10:25 -07:00
|
|
|
if (self->lexer.data.lookahead == 0) {
|
|
|
|
|
self->lexer.data.result_symbol = ts_builtin_sym_error;
|
2016-09-19 13:35:08 -07:00
|
|
|
break;
|
2016-09-03 23:40:57 -07:00
|
|
|
}
|
2016-12-05 16:36:34 -08:00
|
|
|
self->lexer.data.advance(&self->lexer, false);
|
2016-09-03 22:46:14 -07:00
|
|
|
}
|
|
|
|
|
|
2016-09-19 13:35:08 -07:00
|
|
|
error_end_position = self->lexer.current_position;
|
2016-06-17 21:26:03 -07:00
|
|
|
}
|
|
|
|
|
|
2018-03-07 11:56:59 -08:00
|
|
|
if (self->lexer.current_position.bytes > last_byte_scanned) {
|
|
|
|
|
last_byte_scanned = self->lexer.current_position.bytes;
|
|
|
|
|
}
|
|
|
|
|
|
2016-11-09 20:59:05 -08:00
|
|
|
Tree *result;
|
2016-09-03 23:40:57 -07:00
|
|
|
if (skipped_error) {
|
2016-11-09 20:59:05 -08:00
|
|
|
Length padding = length_sub(error_start_position, start_position);
|
|
|
|
|
Length size = length_sub(error_end_position, error_start_position);
|
2017-10-05 17:32:21 -07:00
|
|
|
result = ts_tree_make_error(&self->tree_pool, size, padding, first_error_character, self->language);
|
2016-05-20 20:26:03 -07:00
|
|
|
} else {
|
2017-12-07 11:48:31 -08:00
|
|
|
if (self->lexer.token_end_position.bytes < self->lexer.token_start_position.bytes) {
|
|
|
|
|
self->lexer.token_start_position = self->lexer.token_end_position;
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-07 11:56:59 -08:00
|
|
|
TSSymbol symbol = self->lexer.data.result_symbol;
|
2016-12-02 22:03:48 -08:00
|
|
|
Length padding = length_sub(self->lexer.token_start_position, start_position);
|
2017-03-13 17:03:47 -07:00
|
|
|
Length size = length_sub(self->lexer.token_end_position, self->lexer.token_start_position);
|
2018-03-07 11:56:59 -08:00
|
|
|
|
|
|
|
|
if (found_external_token) {
|
|
|
|
|
symbol = self->language->external_scanner.symbol_map[symbol];
|
|
|
|
|
} else if (symbol == self->language->keyword_capture_token && symbol != 0) {
|
|
|
|
|
uint32_t end_byte = self->lexer.token_end_position.bytes;
|
|
|
|
|
ts_lexer_reset(&self->lexer, self->lexer.token_start_position);
|
|
|
|
|
ts_lexer_start(&self->lexer);
|
|
|
|
|
if (
|
|
|
|
|
self->language->keyword_lex_fn(&self->lexer.data, 0) &&
|
|
|
|
|
self->lexer.token_end_position.bytes == end_byte &&
|
|
|
|
|
ts_language_has_actions(self->language, parse_state, self->lexer.data.result_symbol)
|
|
|
|
|
) {
|
|
|
|
|
symbol = self->lexer.data.result_symbol;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-10-05 17:32:21 -07:00
|
|
|
result = ts_tree_make_leaf(&self->tree_pool, symbol, padding, size, self->language);
|
2016-12-20 17:06:20 -08:00
|
|
|
|
|
|
|
|
if (found_external_token) {
|
|
|
|
|
result->has_external_tokens = true;
|
2017-07-17 17:12:36 -07:00
|
|
|
unsigned length = self->language->external_scanner.serialize(
|
2017-07-14 10:37:26 -07:00
|
|
|
self->external_scanner_payload,
|
2017-07-17 17:12:36 -07:00
|
|
|
self->lexer.debug_buffer
|
2017-07-14 10:37:26 -07:00
|
|
|
);
|
2017-07-17 17:12:36 -07:00
|
|
|
ts_external_token_state_init(&result->external_token_state, self->lexer.debug_buffer, length);
|
2016-12-20 17:06:20 -08:00
|
|
|
}
|
2016-05-20 20:26:03 -07:00
|
|
|
}
|
|
|
|
|
|
2017-08-29 14:44:24 -07:00
|
|
|
result->bytes_scanned = last_byte_scanned - start_position.bytes + 1;
|
2016-09-03 23:40:57 -07:00
|
|
|
result->parse_state = parse_state;
|
2016-12-20 17:06:20 -08:00
|
|
|
result->first_leaf.lex_mode = lex_mode;
|
|
|
|
|
|
|
|
|
|
LOG("lexed_lookahead sym:%s, size:%u", SYM_NAME(result->symbol), result->size.bytes);
|
2016-05-20 20:26:03 -07:00
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
2017-08-30 17:35:12 -07:00
|
|
|
static Tree *parser__get_cached_token(Parser *self, size_t byte_index, Tree *last_external_token) {
|
|
|
|
|
TokenCache *cache = &self->token_cache;
|
|
|
|
|
if (cache->token &&
|
|
|
|
|
cache->byte_index == byte_index &&
|
|
|
|
|
ts_tree_external_token_state_eq(cache->last_external_token, last_external_token)) {
|
|
|
|
|
return cache->token;
|
|
|
|
|
} else {
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void parser__set_cached_token(Parser *self, size_t byte_index, Tree *last_external_token,
|
|
|
|
|
Tree *token) {
|
|
|
|
|
TokenCache *cache = &self->token_cache;
|
|
|
|
|
if (token) ts_tree_retain(token);
|
|
|
|
|
if (last_external_token) ts_tree_retain(last_external_token);
|
2017-10-05 17:32:21 -07:00
|
|
|
if (cache->token) ts_tree_release(&self->tree_pool, cache->token);
|
|
|
|
|
if (cache->last_external_token) ts_tree_release(&self->tree_pool, cache->last_external_token);
|
2017-08-30 17:35:12 -07:00
|
|
|
cache->token = token;
|
|
|
|
|
cache->byte_index = byte_index;
|
|
|
|
|
cache->last_external_token = last_external_token;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool parser__can_reuse_first_leaf(Parser *self, TSStateId state, Tree *tree,
|
2018-03-07 16:13:34 -08:00
|
|
|
TableEntry *table_entry,
|
|
|
|
|
ReusableNode *next_reusable_node) {
|
2017-08-30 17:35:12 -07:00
|
|
|
TSLexMode current_lex_mode = self->language->lex_modes[state];
|
2018-03-07 16:13:34 -08:00
|
|
|
|
|
|
|
|
// If the token was created in a state with the same set of lookaheads, it is reusable.
|
|
|
|
|
if (tree->first_leaf.lex_mode.lex_state == current_lex_mode.lex_state &&
|
|
|
|
|
tree->first_leaf.lex_mode.external_lex_state == current_lex_mode.external_lex_state &&
|
|
|
|
|
(tree->first_leaf.symbol != self->language->keyword_capture_token ||
|
|
|
|
|
tree->parse_state == state)) return true;
|
|
|
|
|
|
|
|
|
|
// Empty tokens are not reusable in states with different lookaheads.
|
|
|
|
|
if (tree->size.bytes == 0 && tree->symbol != ts_builtin_sym_end) return false;
|
|
|
|
|
|
|
|
|
|
// If the current state allows external tokens or other tokens that conflict with this
|
|
|
|
|
// token, this token is not reusable.
|
2018-03-28 10:11:05 -07:00
|
|
|
return current_lex_mode.external_lex_state == 0 && table_entry->is_reusable;
|
2016-07-01 15:08:19 -07:00
|
|
|
}
|
|
|
|
|
|
2017-08-30 17:35:12 -07:00
|
|
|
static Tree *parser__get_lookahead(Parser *self, StackVersion version, TSStateId *state,
|
|
|
|
|
ReusableNode *reusable_node, TableEntry *table_entry) {
|
2018-03-29 17:37:54 -07:00
|
|
|
Length position = ts_stack_position(self->stack, version);
|
2017-08-30 17:35:12 -07:00
|
|
|
Tree *last_external_token = ts_stack_last_external_token(self->stack, version);
|
2014-10-14 09:32:11 -07:00
|
|
|
|
2017-08-30 16:17:10 -07:00
|
|
|
Tree *result;
|
|
|
|
|
while ((result = reusable_node->tree)) {
|
2016-09-13 13:08:52 -07:00
|
|
|
if (reusable_node->byte_index > position.bytes) {
|
2017-08-30 16:17:10 -07:00
|
|
|
LOG("before_reusable_node symbol:%s", SYM_NAME(result->symbol));
|
2015-11-18 08:47:15 -08:00
|
|
|
break;
|
2015-09-13 19:47:45 -07:00
|
|
|
}
|
2014-10-14 09:32:11 -07:00
|
|
|
|
2016-09-13 13:08:52 -07:00
|
|
|
if (reusable_node->byte_index < position.bytes) {
|
2017-08-30 16:17:10 -07:00
|
|
|
LOG("past_reusable_node symbol:%s", SYM_NAME(result->symbol));
|
2017-01-05 10:06:43 -08:00
|
|
|
reusable_node_pop(reusable_node);
|
2015-09-13 19:47:45 -07:00
|
|
|
continue;
|
|
|
|
|
}
|
2014-10-14 09:32:11 -07:00
|
|
|
|
2017-08-30 17:35:12 -07:00
|
|
|
if (!ts_tree_external_token_state_eq(reusable_node->last_external_token, last_external_token)) {
|
2017-08-30 16:17:10 -07:00
|
|
|
LOG("reusable_node_has_different_external_scanner_state symbol:%s", SYM_NAME(result->symbol));
|
|
|
|
|
reusable_node_pop(reusable_node);
|
2016-07-01 15:08:19 -07:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2017-08-30 16:17:10 -07:00
|
|
|
const char *reason = NULL;
|
|
|
|
|
if (result->has_changes) {
|
|
|
|
|
reason = "has_changes";
|
|
|
|
|
} else if (result->symbol == ts_builtin_sym_error) {
|
|
|
|
|
reason = "is_error";
|
2017-12-28 15:48:35 -08:00
|
|
|
} else if (result->is_missing) {
|
|
|
|
|
reason = "is_missing";
|
2017-08-30 16:17:10 -07:00
|
|
|
} else if (result->fragile_left || result->fragile_right) {
|
|
|
|
|
reason = "is_fragile";
|
2017-08-31 12:50:10 -07:00
|
|
|
} else if (self->in_ambiguity && result->child_count) {
|
2017-08-30 16:17:10 -07:00
|
|
|
reason = "in_ambiguity";
|
2014-10-14 09:32:11 -07:00
|
|
|
}
|
|
|
|
|
|
2017-08-30 16:17:10 -07:00
|
|
|
if (reason) {
|
2017-08-30 17:35:12 -07:00
|
|
|
LOG("cant_reuse_node_%s tree:%s", reason, SYM_NAME(result->symbol));
|
2017-01-07 21:45:28 -08:00
|
|
|
if (!reusable_node_breakdown(reusable_node)) {
|
|
|
|
|
reusable_node_pop(reusable_node);
|
|
|
|
|
parser__breakdown_top_of_stack(self, version);
|
2018-03-29 17:37:54 -07:00
|
|
|
*state = ts_stack_state(self->stack, version);
|
2017-01-07 21:45:28 -08:00
|
|
|
}
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2017-08-30 17:35:12 -07:00
|
|
|
ts_language_table_entry(self->language, *state, result->first_leaf.symbol, table_entry);
|
2018-03-07 16:13:34 -08:00
|
|
|
ReusableNode next_reusable_node = reusable_node_after_leaf(reusable_node);
|
|
|
|
|
if (!parser__can_reuse_first_leaf(self, *state, result, table_entry, &next_reusable_node)) {
|
2017-08-30 17:35:12 -07:00
|
|
|
LOG(
|
|
|
|
|
"cant_reuse_node symbol:%s, first_leaf_symbol:%s",
|
|
|
|
|
SYM_NAME(result->symbol),
|
|
|
|
|
SYM_NAME(result->first_leaf.symbol)
|
|
|
|
|
);
|
2018-03-07 16:13:34 -08:00
|
|
|
*reusable_node = next_reusable_node;
|
2017-08-30 17:35:12 -07:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
LOG("reuse_node symbol:%s", SYM_NAME(result->symbol));
|
2016-01-29 17:31:43 -08:00
|
|
|
ts_tree_retain(result);
|
2015-11-18 08:47:15 -08:00
|
|
|
return result;
|
2014-10-14 09:32:11 -07:00
|
|
|
}
|
2014-10-22 12:54:46 -07:00
|
|
|
|
2017-08-30 17:35:12 -07:00
|
|
|
if ((result = parser__get_cached_token(self, position.bytes, last_external_token))) {
|
|
|
|
|
ts_language_table_entry(self->language, *state, result->first_leaf.symbol, table_entry);
|
2018-03-07 16:13:34 -08:00
|
|
|
if (parser__can_reuse_first_leaf(self, *state, result, table_entry, NULL)) {
|
2017-08-30 17:35:12 -07:00
|
|
|
ts_tree_retain(result);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
2016-07-01 15:08:19 -07:00
|
|
|
}
|
|
|
|
|
|
Simplify error recovery; eliminate recovery states
The previous approach to error recovery relied on special error-recovery
states in the parse table. For each token T, there was an error recovery
state in which the parser looked for *any* token that could follow T.
Unfortunately, sometimes the set of tokens that could follow T contained
conflicts. For example, in JS, the token '}' can be followed by the
open-ended 'template_chars' token, but also by ordinary tokens like
'identifier'. So with the old algorithm, when recovering from an
unexpected '}' token, the lexer had no way to distinguish identifiers
from template_chars.
This commit drops the error recovery states. Instead, when we encounter
an unexpected token T, we recover from the error by finding a previous
state S in the stack in which T would be valid, popping all of the nodes
after S, and wrapping them in an error.
This way, the lexer is always invoked in a normal parse state, in which
it is looking for a non-conflicting set of tokens. Eliminating the error
recovery states also shrinks the lex state machine significantly.
Signed-off-by: Rick Winfrey <rewinfrey@github.com>
2017-09-11 15:22:52 -07:00
|
|
|
result = parser__lex(self, version, *state);
|
2017-08-30 17:35:12 -07:00
|
|
|
parser__set_cached_token(self, position.bytes, last_external_token, result);
|
|
|
|
|
ts_language_table_entry(self->language, *state, result->symbol, table_entry);
|
|
|
|
|
return result;
|
2015-11-18 08:47:15 -08:00
|
|
|
}
|
2015-10-06 16:22:58 -07:00
|
|
|
|
2016-11-09 20:59:05 -08:00
|
|
|
static bool parser__select_tree(Parser *self, Tree *left, Tree *right) {
|
2017-07-06 15:20:11 -07:00
|
|
|
if (!left) return true;
|
|
|
|
|
if (!right) return false;
|
|
|
|
|
|
2016-08-30 10:58:25 -07:00
|
|
|
if (right->error_cost < left->error_cost) {
|
2016-06-22 22:36:11 -07:00
|
|
|
LOG("select_smaller_error symbol:%s, over_symbol:%s",
|
2016-06-23 11:42:43 -07:00
|
|
|
SYM_NAME(right->symbol), SYM_NAME(left->symbol));
|
2016-04-24 00:54:20 -07:00
|
|
|
return true;
|
2015-12-08 12:25:41 -08:00
|
|
|
}
|
2017-07-06 15:20:11 -07:00
|
|
|
|
2016-08-30 10:58:25 -07:00
|
|
|
if (left->error_cost < right->error_cost) {
|
2016-06-22 22:36:11 -07:00
|
|
|
LOG("select_smaller_error symbol:%s, over_symbol:%s",
|
2016-06-23 11:42:43 -07:00
|
|
|
SYM_NAME(left->symbol), SYM_NAME(right->symbol));
|
2016-04-24 00:54:20 -07:00
|
|
|
return false;
|
|
|
|
|
}
|
2014-10-09 14:02:03 -07:00
|
|
|
|
2017-07-06 15:20:11 -07:00
|
|
|
if (right->dynamic_precedence > left->dynamic_precedence) {
|
|
|
|
|
LOG("select_higher_precedence symbol:%s, prec:%u, over_symbol:%s, other_prec:%u",
|
|
|
|
|
SYM_NAME(right->symbol), right->dynamic_precedence, SYM_NAME(left->symbol),
|
|
|
|
|
left->dynamic_precedence);
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (left->dynamic_precedence > right->dynamic_precedence) {
|
|
|
|
|
LOG("select_higher_precedence symbol:%s, prec:%u, over_symbol:%s, other_prec:%u",
|
|
|
|
|
SYM_NAME(left->symbol), left->dynamic_precedence, SYM_NAME(right->symbol),
|
|
|
|
|
right->dynamic_precedence);
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (left->error_cost > 0) return true;
|
2017-07-05 17:33:35 -07:00
|
|
|
|
2016-05-09 14:31:44 -07:00
|
|
|
int comparison = ts_tree_compare(left, right);
|
|
|
|
|
switch (comparison) {
|
|
|
|
|
case -1:
|
2016-06-23 11:42:43 -07:00
|
|
|
LOG("select_earlier symbol:%s, over_symbol:%s", SYM_NAME(left->symbol),
|
|
|
|
|
SYM_NAME(right->symbol));
|
2016-05-09 14:31:44 -07:00
|
|
|
return false;
|
|
|
|
|
break;
|
|
|
|
|
case 1:
|
2016-06-23 11:42:43 -07:00
|
|
|
LOG("select_earlier symbol:%s, over_symbol:%s", SYM_NAME(right->symbol),
|
|
|
|
|
SYM_NAME(left->symbol));
|
2016-05-09 14:31:44 -07:00
|
|
|
return true;
|
|
|
|
|
default:
|
2016-06-23 11:42:43 -07:00
|
|
|
LOG("select_existing symbol:%s, over_symbol:%s", SYM_NAME(left->symbol),
|
|
|
|
|
SYM_NAME(right->symbol));
|
2016-05-09 14:31:44 -07:00
|
|
|
return false;
|
|
|
|
|
}
|
2016-04-18 11:16:56 -07:00
|
|
|
}
|
|
|
|
|
|
2016-11-04 09:18:38 -07:00
|
|
|
static void parser__shift(Parser *self, StackVersion version, TSStateId state,
|
2016-11-09 20:59:05 -08:00
|
|
|
Tree *lookahead, bool extra) {
|
2016-06-12 17:26:15 -07:00
|
|
|
if (extra != lookahead->extra) {
|
2017-09-14 12:07:46 -07:00
|
|
|
if (ts_stack_version_count(self->stack) > 1) {
|
2017-10-05 17:32:21 -07:00
|
|
|
lookahead = ts_tree_make_copy(&self->tree_pool, lookahead);
|
2016-04-18 11:16:56 -07:00
|
|
|
} else {
|
|
|
|
|
ts_tree_retain(lookahead);
|
|
|
|
|
}
|
2016-06-12 17:26:15 -07:00
|
|
|
lookahead->extra = extra;
|
2016-04-18 11:16:56 -07:00
|
|
|
} else {
|
|
|
|
|
ts_tree_retain(lookahead);
|
|
|
|
|
}
|
|
|
|
|
|
2016-04-02 20:58:19 -07:00
|
|
|
bool is_pending = lookahead->child_count > 0;
|
2016-11-04 09:18:38 -07:00
|
|
|
ts_stack_push(self->stack, version, lookahead, is_pending, state);
|
2017-06-27 14:30:46 -07:00
|
|
|
if (lookahead->has_external_tokens) {
|
|
|
|
|
ts_stack_set_last_external_token(
|
|
|
|
|
self->stack, version, ts_tree_last_external_token(lookahead)
|
|
|
|
|
);
|
2017-01-07 21:45:28 -08:00
|
|
|
}
|
2014-07-10 13:14:52 -07:00
|
|
|
}
|
|
|
|
|
|
2017-07-14 10:37:26 -07:00
|
|
|
static bool parser__replace_children(Parser *self, Tree *tree, Tree **children, uint32_t count) {
|
2017-08-01 13:28:18 -07:00
|
|
|
self->scratch_tree = *tree;
|
2016-04-24 00:54:20 -07:00
|
|
|
self->scratch_tree.child_count = 0;
|
2017-07-31 11:45:24 -07:00
|
|
|
ts_tree_set_children(&self->scratch_tree, count, children, self->language);
|
2016-08-29 12:08:58 -07:00
|
|
|
if (parser__select_tree(self, tree, &self->scratch_tree)) {
|
2017-08-01 13:28:18 -07:00
|
|
|
*tree = self->scratch_tree;
|
2016-04-24 00:54:20 -07:00
|
|
|
return true;
|
|
|
|
|
} else {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-29 17:37:54 -07:00
|
|
|
static StackSliceArray parser__reduce(Parser *self, StackVersion version, TSSymbol symbol,
|
Simplify error recovery; eliminate recovery states
The previous approach to error recovery relied on special error-recovery
states in the parse table. For each token T, there was an error recovery
state in which the parser looked for *any* token that could follow T.
Unfortunately, sometimes the set of tokens that could follow T contained
conflicts. For example, in JS, the token '}' can be followed by the
open-ended 'template_chars' token, but also by ordinary tokens like
'identifier'. So with the old algorithm, when recovering from an
unexpected '}' token, the lexer had no way to distinguish identifiers
from template_chars.
This commit drops the error recovery states. Instead, when we encounter
an unexpected token T, we recover from the error by finding a previous
state S in the stack in which T would be valid, popping all of the nodes
after S, and wrapping them in an error.
This way, the lexer is always invoked in a normal parse state, in which
it is looking for a non-conflicting set of tokens. Eliminating the error
recovery states also shrinks the lex state machine significantly.
Signed-off-by: Rick Winfrey <rewinfrey@github.com>
2017-09-11 15:22:52 -07:00
|
|
|
uint32_t count, int dynamic_precedence,
|
|
|
|
|
uint16_t alias_sequence_id, bool fragile) {
|
2016-11-14 12:15:24 -08:00
|
|
|
uint32_t initial_version_count = ts_stack_version_count(self->stack);
|
2016-11-14 17:25:55 -08:00
|
|
|
|
2018-03-29 17:37:54 -07:00
|
|
|
StackSliceArray pop = ts_stack_pop_count(self->stack, version, count);
|
2016-04-07 10:40:33 -07:00
|
|
|
|
2018-03-29 17:37:54 -07:00
|
|
|
for (uint32_t i = 0; i < pop.size; i++) {
|
|
|
|
|
StackSlice slice = pop.contents[i];
|
2015-07-08 17:34:21 -07:00
|
|
|
|
2016-11-14 17:25:55 -08:00
|
|
|
// Extra tokens on top of the stack should not be included in this new parent
|
|
|
|
|
// node. They will be re-pushed onto the stack after the parent node is
|
|
|
|
|
// created and pushed.
|
2016-11-14 12:15:24 -08:00
|
|
|
uint32_t child_count = slice.trees.size;
|
2017-07-14 10:37:26 -07:00
|
|
|
while (child_count > 0 && slice.trees.contents[child_count - 1]->extra) {
|
2016-04-07 10:40:33 -07:00
|
|
|
child_count--;
|
2017-07-14 10:37:26 -07:00
|
|
|
}
|
2015-07-08 17:34:21 -07:00
|
|
|
|
2017-10-05 17:32:21 -07:00
|
|
|
Tree *parent = ts_tree_make_node(&self->tree_pool,
|
2017-07-31 11:45:24 -07:00
|
|
|
symbol, child_count, slice.trees.contents, alias_sequence_id, self->language
|
2017-07-14 10:37:26 -07:00
|
|
|
);
|
2016-02-08 12:08:15 -08:00
|
|
|
|
2016-11-14 17:25:55 -08:00
|
|
|
// This pop operation may have caused multiple stack versions to collapse
|
|
|
|
|
// into one, because they all diverged from a common state. In that case,
|
|
|
|
|
// choose one of the arrays of trees to be the parent node's children, and
|
|
|
|
|
// delete the rest of the tree arrays.
|
2018-03-29 17:37:54 -07:00
|
|
|
while (i + 1 < pop.size) {
|
|
|
|
|
StackSlice next_slice = pop.contents[i + 1];
|
2017-07-03 16:18:29 -07:00
|
|
|
if (next_slice.version != slice.version) break;
|
2016-04-24 00:54:20 -07:00
|
|
|
i++;
|
|
|
|
|
|
2016-11-14 12:15:24 -08:00
|
|
|
uint32_t child_count = next_slice.trees.size;
|
2017-07-14 10:37:26 -07:00
|
|
|
while (child_count > 0 && next_slice.trees.contents[child_count - 1]->extra) {
|
2016-04-24 00:54:20 -07:00
|
|
|
child_count--;
|
2017-07-14 10:37:26 -07:00
|
|
|
}
|
2016-04-24 00:54:20 -07:00
|
|
|
|
2017-07-14 10:37:26 -07:00
|
|
|
if (parser__replace_children(self, parent, next_slice.trees.contents, child_count)) {
|
2017-10-05 17:32:21 -07:00
|
|
|
ts_tree_array_delete(&self->tree_pool, &slice.trees);
|
2016-04-24 00:54:20 -07:00
|
|
|
slice = next_slice;
|
|
|
|
|
} else {
|
2017-10-05 17:32:21 -07:00
|
|
|
ts_tree_array_delete(&self->tree_pool, &next_slice.trees);
|
2016-04-24 00:54:20 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-07-06 15:20:11 -07:00
|
|
|
parent->dynamic_precedence += dynamic_precedence;
|
2017-07-31 11:45:24 -07:00
|
|
|
parent->alias_sequence_id = alias_sequence_id;
|
2017-07-06 15:20:11 -07:00
|
|
|
|
2018-03-29 17:37:54 -07:00
|
|
|
TSStateId state = ts_stack_state(self->stack, slice.version);
|
2017-07-14 10:37:26 -07:00
|
|
|
TSStateId next_state = ts_language_next_state(self->language, state, symbol);
|
2018-03-29 17:37:54 -07:00
|
|
|
if (fragile || self->in_ambiguity || pop.size > 1 || initial_version_count > 1) {
|
2016-03-07 20:06:46 -08:00
|
|
|
parent->fragile_left = true;
|
|
|
|
|
parent->fragile_right = true;
|
2016-06-27 14:39:12 -07:00
|
|
|
parent->parse_state = TS_TREE_STATE_NONE;
|
2016-04-07 10:40:33 -07:00
|
|
|
} else {
|
2016-04-18 11:16:56 -07:00
|
|
|
parent->parse_state = state;
|
2016-03-07 20:06:46 -08:00
|
|
|
}
|
2015-07-08 17:34:21 -07:00
|
|
|
|
2016-11-14 17:25:55 -08:00
|
|
|
// Push the parent node onto the stack, along with any extra tokens that
|
|
|
|
|
// were previously on top of the stack.
|
2017-08-29 16:22:27 -07:00
|
|
|
ts_stack_push(self->stack, slice.version, parent, false, next_state);
|
2016-11-14 12:15:24 -08:00
|
|
|
for (uint32_t j = parent->child_count; j < slice.trees.size; j++) {
|
2016-11-09 20:59:05 -08:00
|
|
|
Tree *tree = slice.trees.contents[j];
|
2017-08-29 16:22:27 -07:00
|
|
|
ts_stack_push(self->stack, slice.version, tree, false, next_state);
|
2015-12-02 07:53:15 -08:00
|
|
|
}
|
2015-07-08 17:34:21 -07:00
|
|
|
}
|
2015-05-27 10:53:02 -07:00
|
|
|
|
2016-11-14 17:25:55 -08:00
|
|
|
for (StackVersion i = initial_version_count; i < ts_stack_version_count(self->stack); i++) {
|
2016-06-02 14:04:48 -07:00
|
|
|
for (StackVersion j = initial_version_count; j < i; j++) {
|
|
|
|
|
if (ts_stack_merge(self->stack, j, i)) {
|
|
|
|
|
i--;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-04-24 00:55:19 -07:00
|
|
|
|
2016-11-14 17:25:55 -08:00
|
|
|
return pop;
|
2015-06-15 15:24:15 -07:00
|
|
|
}
|
|
|
|
|
|
2016-11-09 20:59:05 -08:00
|
|
|
static void parser__start(Parser *self, TSInput input, Tree *previous_tree) {
|
2015-09-18 23:20:06 -07:00
|
|
|
if (previous_tree) {
|
2015-11-20 11:49:04 -08:00
|
|
|
LOG("parse_after_edit");
|
2015-09-13 19:47:45 -07:00
|
|
|
} else {
|
2015-11-20 11:49:04 -08:00
|
|
|
LOG("new_parse");
|
2015-09-13 19:47:45 -07:00
|
|
|
}
|
2015-09-18 23:20:06 -07:00
|
|
|
|
2017-07-17 17:12:36 -07:00
|
|
|
if (self->language->external_scanner.deserialize) {
|
|
|
|
|
self->language->external_scanner.deserialize(self->external_scanner_payload, NULL, 0);
|
2016-12-20 13:12:01 -08:00
|
|
|
}
|
2016-11-30 09:34:47 -08:00
|
|
|
|
2015-12-03 10:00:39 -08:00
|
|
|
ts_lexer_set_input(&self->lexer, input);
|
2015-10-14 21:52:13 -07:00
|
|
|
ts_stack_clear(self->stack);
|
2017-01-05 10:06:43 -08:00
|
|
|
self->reusable_node = reusable_node_new(previous_tree);
|
2015-12-24 22:04:20 -08:00
|
|
|
self->finished_tree = NULL;
|
2018-01-09 17:08:36 -08:00
|
|
|
self->accept_count = 0;
|
2018-03-02 15:25:39 -08:00
|
|
|
self->in_ambiguity = false;
|
2015-09-13 19:47:45 -07:00
|
|
|
}
|
|
|
|
|
|
2016-11-04 09:18:38 -07:00
|
|
|
static void parser__accept(Parser *self, StackVersion version,
|
2016-11-09 20:59:05 -08:00
|
|
|
Tree *lookahead) {
|
2016-09-19 13:35:08 -07:00
|
|
|
lookahead->extra = true;
|
|
|
|
|
assert(lookahead->symbol == ts_builtin_sym_end);
|
2018-03-29 17:18:43 -07:00
|
|
|
ts_tree_retain(lookahead);
|
2016-11-04 09:18:38 -07:00
|
|
|
ts_stack_push(self->stack, version, lookahead, false, 1);
|
2018-03-29 17:37:54 -07:00
|
|
|
StackSliceArray pop = ts_stack_pop_all(self->stack, version);
|
2016-03-10 11:57:33 -08:00
|
|
|
|
2018-03-29 17:37:54 -07:00
|
|
|
for (uint32_t i = 0; i < pop.size; i++) {
|
|
|
|
|
StackSlice slice = pop.contents[i];
|
2016-04-24 00:54:20 -07:00
|
|
|
TreeArray trees = slice.trees;
|
|
|
|
|
|
2016-11-09 20:59:05 -08:00
|
|
|
Tree *root = NULL;
|
2017-12-07 11:40:57 -08:00
|
|
|
for (uint32_t j = trees.size - 1; j + 1 > 0; j--) {
|
|
|
|
|
Tree *child = trees.contents[j];
|
|
|
|
|
if (!child->extra) {
|
2017-10-05 17:32:21 -07:00
|
|
|
root = ts_tree_make_copy(&self->tree_pool, child);
|
2017-12-07 11:40:57 -08:00
|
|
|
root->child_count = 0;
|
2018-03-29 17:18:43 -07:00
|
|
|
for (uint32_t k = 0; k < child->child_count; k++) {
|
2017-12-07 11:40:57 -08:00
|
|
|
ts_tree_retain(child->children[k]);
|
2018-03-29 17:18:43 -07:00
|
|
|
}
|
2017-12-07 11:40:57 -08:00
|
|
|
array_splice(&trees, j, 1, child->child_count, child->children);
|
|
|
|
|
ts_tree_set_children(root, trees.size, trees.contents, self->language);
|
2017-10-05 17:32:21 -07:00
|
|
|
ts_tree_release(&self->tree_pool, child);
|
2017-12-07 11:40:57 -08:00
|
|
|
break;
|
2016-04-24 00:54:20 -07:00
|
|
|
}
|
2015-12-02 07:53:15 -08:00
|
|
|
}
|
2016-06-26 11:57:42 -07:00
|
|
|
|
2017-06-27 14:30:46 -07:00
|
|
|
assert(root && root->ref_count > 0);
|
2018-01-09 17:08:36 -08:00
|
|
|
self->accept_count++;
|
2017-06-27 14:30:46 -07:00
|
|
|
|
|
|
|
|
if (self->finished_tree) {
|
|
|
|
|
if (parser__select_tree(self, self->finished_tree, root)) {
|
2017-10-05 17:32:21 -07:00
|
|
|
ts_tree_release(&self->tree_pool, self->finished_tree);
|
2017-06-27 14:30:46 -07:00
|
|
|
self->finished_tree = root;
|
|
|
|
|
} else {
|
2017-10-05 17:32:21 -07:00
|
|
|
ts_tree_release(&self->tree_pool, root);
|
2017-06-27 14:30:46 -07:00
|
|
|
}
|
2016-06-26 11:57:42 -07:00
|
|
|
} else {
|
2017-06-27 14:30:46 -07:00
|
|
|
self->finished_tree = root;
|
2016-06-26 11:57:42 -07:00
|
|
|
}
|
2015-12-02 07:53:15 -08:00
|
|
|
}
|
2016-01-19 18:07:24 -08:00
|
|
|
|
2018-03-29 17:37:54 -07:00
|
|
|
ts_stack_remove_version(self->stack, pop.contents[0].version);
|
2016-06-02 14:04:48 -07:00
|
|
|
ts_stack_halt(self->stack, version);
|
2014-08-09 01:03:55 -07:00
|
|
|
}
|
|
|
|
|
|
2017-12-28 15:48:35 -08:00
|
|
|
static bool parser__do_all_potential_reductions(Parser *self, StackVersion starting_version,
|
|
|
|
|
TSSymbol lookahead_symbol) {
|
|
|
|
|
bool result = false;
|
2017-12-28 14:00:59 -08:00
|
|
|
for (StackVersion version = starting_version;;) {
|
|
|
|
|
uint32_t version_count = ts_stack_version_count(self->stack);
|
|
|
|
|
if (version >= version_count) break;
|
|
|
|
|
|
2018-03-29 17:37:54 -07:00
|
|
|
TSStateId state = ts_stack_state(self->stack, version);
|
2017-12-28 14:00:59 -08:00
|
|
|
bool has_shift_action = false;
|
|
|
|
|
array_clear(&self->reduce_actions);
|
|
|
|
|
|
2017-12-28 15:48:35 -08:00
|
|
|
TSSymbol first_symbol, end_symbol;
|
|
|
|
|
if (lookahead_symbol != 0) {
|
|
|
|
|
first_symbol = lookahead_symbol;
|
|
|
|
|
end_symbol = lookahead_symbol + 1;
|
|
|
|
|
} else {
|
|
|
|
|
first_symbol = 1;
|
|
|
|
|
end_symbol = self->language->token_count;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (TSSymbol symbol = first_symbol; symbol < end_symbol; symbol++) {
|
2017-12-28 14:00:59 -08:00
|
|
|
TableEntry entry;
|
|
|
|
|
ts_language_table_entry(self->language, state, symbol, &entry);
|
|
|
|
|
for (uint32_t i = 0; i < entry.action_count; i++) {
|
|
|
|
|
TSParseAction action = entry.actions[i];
|
|
|
|
|
switch (action.type) {
|
|
|
|
|
case TSParseActionTypeShift:
|
|
|
|
|
case TSParseActionTypeRecover:
|
2017-12-28 17:17:58 -08:00
|
|
|
if (!action.params.extra) has_shift_action = true;
|
2017-12-28 14:00:59 -08:00
|
|
|
break;
|
|
|
|
|
case TSParseActionTypeReduce:
|
|
|
|
|
if (action.params.child_count > 0)
|
|
|
|
|
ts_reduce_action_set_add(&self->reduce_actions, (ReduceAction){
|
|
|
|
|
.symbol = action.params.symbol,
|
|
|
|
|
.count = action.params.child_count,
|
|
|
|
|
.dynamic_precedence = action.params.dynamic_precedence,
|
|
|
|
|
.alias_sequence_id = action.params.alias_sequence_id,
|
|
|
|
|
});
|
|
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|
}
|
2016-05-29 22:36:47 -07:00
|
|
|
}
|
2016-03-10 11:57:33 -08:00
|
|
|
}
|
|
|
|
|
|
2017-12-28 15:48:35 -08:00
|
|
|
bool has_reduce_action = self->reduce_actions.size > 0;
|
2017-12-28 14:00:59 -08:00
|
|
|
for (uint32_t i = 0; i < self->reduce_actions.size; i++) {
|
|
|
|
|
ReduceAction action = self->reduce_actions.contents[i];
|
|
|
|
|
parser__reduce(
|
|
|
|
|
self, version, action.symbol, action.count,
|
|
|
|
|
action.dynamic_precedence, action.alias_sequence_id,
|
|
|
|
|
true
|
|
|
|
|
);
|
|
|
|
|
}
|
2016-05-09 14:31:44 -07:00
|
|
|
|
2017-12-28 15:48:35 -08:00
|
|
|
if (has_shift_action) {
|
|
|
|
|
result = true;
|
|
|
|
|
} else {
|
|
|
|
|
if (has_reduce_action) {
|
|
|
|
|
ts_stack_renumber_version(self->stack, version_count, version);
|
|
|
|
|
continue;
|
|
|
|
|
} else if (lookahead_symbol != 0) {
|
|
|
|
|
ts_stack_remove_version(self->stack, version);
|
|
|
|
|
}
|
2017-12-28 14:00:59 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (version == starting_version) {
|
|
|
|
|
version = version_count;
|
2016-08-29 09:34:08 -07:00
|
|
|
} else {
|
2017-12-28 14:00:59 -08:00
|
|
|
version++;
|
2016-08-29 09:34:08 -07:00
|
|
|
}
|
|
|
|
|
}
|
2017-12-28 15:48:35 -08:00
|
|
|
return result;
|
2016-08-29 09:34:08 -07:00
|
|
|
}
|
|
|
|
|
|
Simplify error recovery; eliminate recovery states
The previous approach to error recovery relied on special error-recovery
states in the parse table. For each token T, there was an error recovery
state in which the parser looked for *any* token that could follow T.
Unfortunately, sometimes the set of tokens that could follow T contained
conflicts. For example, in JS, the token '}' can be followed by the
open-ended 'template_chars' token, but also by ordinary tokens like
'identifier'. So with the old algorithm, when recovering from an
unexpected '}' token, the lexer had no way to distinguish identifiers
from template_chars.
This commit drops the error recovery states. Instead, when we encounter
an unexpected token T, we recover from the error by finding a previous
state S in the stack in which T would be valid, popping all of the nodes
after S, and wrapping them in an error.
This way, the lexer is always invoked in a normal parse state, in which
it is looking for a non-conflicting set of tokens. Eliminating the error
recovery states also shrinks the lex state machine significantly.
Signed-off-by: Rick Winfrey <rewinfrey@github.com>
2017-09-11 15:22:52 -07:00
|
|
|
static void parser__handle_error(Parser *self, StackVersion version, TSSymbol lookahead_symbol) {
|
2018-01-09 17:08:36 -08:00
|
|
|
// If enough parse versions have already completed, just halt this version.
|
|
|
|
|
if (self->accept_count > MAX_VERSION_COUNT) {
|
|
|
|
|
ts_stack_halt(self->stack, version);
|
|
|
|
|
LOG("bail_after_too_many_tries");
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// If there are other in-progress versions that are clearly better than this one,
|
2016-11-14 17:25:55 -08:00
|
|
|
// just halt this version.
|
2017-09-13 16:38:15 -07:00
|
|
|
unsigned new_cost = ts_stack_error_cost(self->stack, version) + ERROR_COST_PER_SKIPPED_TREE;
|
|
|
|
|
if (parser__better_version_exists(self, version, true, new_cost)) {
|
2016-08-29 09:34:08 -07:00
|
|
|
ts_stack_halt(self->stack, version);
|
|
|
|
|
LOG("bail_on_error");
|
2016-11-04 09:18:38 -07:00
|
|
|
return;
|
2016-08-29 09:34:08 -07:00
|
|
|
}
|
|
|
|
|
|
2017-12-28 15:48:35 -08:00
|
|
|
// Perform any reductions that could have happened in this state, regardless of the lookahead.
|
2016-08-29 09:34:08 -07:00
|
|
|
LOG("handle_error");
|
2016-11-14 12:15:24 -08:00
|
|
|
uint32_t previous_version_count = ts_stack_version_count(self->stack);
|
2017-12-28 15:48:35 -08:00
|
|
|
parser__do_all_potential_reductions(self, version, 0);
|
|
|
|
|
uint32_t version_count = ts_stack_version_count(self->stack);
|
2016-05-29 22:36:47 -07:00
|
|
|
|
2016-11-14 17:25:55 -08:00
|
|
|
// Push a discontinuity onto the stack. Merge all of the stack versions that
|
|
|
|
|
// were created in the previous step.
|
2017-12-28 15:48:35 -08:00
|
|
|
bool did_insert_missing_token = false;
|
|
|
|
|
for (StackVersion v = version; v < version_count;) {
|
|
|
|
|
if (!did_insert_missing_token) {
|
2018-03-29 17:37:54 -07:00
|
|
|
TSStateId state = ts_stack_state(self->stack, v);
|
2017-12-28 15:48:35 -08:00
|
|
|
for (TSSymbol missing_symbol = 1;
|
|
|
|
|
missing_symbol < self->language->token_count;
|
|
|
|
|
missing_symbol++) {
|
|
|
|
|
TSStateId state_after_missing_symbol = ts_language_next_state(
|
|
|
|
|
self->language, state, missing_symbol
|
|
|
|
|
);
|
|
|
|
|
if (state_after_missing_symbol == 0) continue;
|
|
|
|
|
|
|
|
|
|
if (ts_language_has_reduce_action(
|
|
|
|
|
self->language,
|
|
|
|
|
state_after_missing_symbol,
|
|
|
|
|
lookahead_symbol
|
|
|
|
|
)) {
|
|
|
|
|
StackVersion version_with_missing_tree = ts_stack_copy_version(self->stack, v);
|
|
|
|
|
Tree *missing_tree = ts_tree_make_missing_leaf(&self->tree_pool, missing_symbol, self->language);
|
|
|
|
|
ts_stack_push(
|
|
|
|
|
self->stack, version_with_missing_tree,
|
|
|
|
|
missing_tree, false,
|
|
|
|
|
state_after_missing_symbol
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
if (parser__do_all_potential_reductions(
|
|
|
|
|
self, version_with_missing_tree,
|
|
|
|
|
lookahead_symbol
|
|
|
|
|
)) {
|
|
|
|
|
LOG("recover_with_missing symbol:%s, state:%u", SYM_NAME(missing_symbol), state_after_missing_symbol);
|
|
|
|
|
LOG_STACK();
|
|
|
|
|
did_insert_missing_token = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ts_stack_push(self->stack, v, NULL, false, ERROR_STATE);
|
|
|
|
|
v = (v == version) ? previous_version_count : v + 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (unsigned i = previous_version_count; i < version_count; i++) {
|
2017-06-29 14:58:20 -07:00
|
|
|
ts_stack_force_merge(self->stack, version, previous_version_count);
|
2016-05-29 22:36:47 -07:00
|
|
|
}
|
Simplify error recovery; eliminate recovery states
The previous approach to error recovery relied on special error-recovery
states in the parse table. For each token T, there was an error recovery
state in which the parser looked for *any* token that could follow T.
Unfortunately, sometimes the set of tokens that could follow T contained
conflicts. For example, in JS, the token '}' can be followed by the
open-ended 'template_chars' token, but also by ordinary tokens like
'identifier'. So with the old algorithm, when recovering from an
unexpected '}' token, the lexer had no way to distinguish identifiers
from template_chars.
This commit drops the error recovery states. Instead, when we encounter
an unexpected token T, we recover from the error by finding a previous
state S in the stack in which T would be valid, popping all of the nodes
after S, and wrapping them in an error.
This way, the lexer is always invoked in a normal parse state, in which
it is looking for a non-conflicting set of tokens. Eliminating the error
recovery states also shrinks the lex state machine significantly.
Signed-off-by: Rick Winfrey <rewinfrey@github.com>
2017-09-11 15:22:52 -07:00
|
|
|
|
2017-09-12 12:00:00 -07:00
|
|
|
ts_stack_record_summary(self->stack, version, MAX_SUMMARY_DEPTH);
|
Simplify error recovery; eliminate recovery states
The previous approach to error recovery relied on special error-recovery
states in the parse table. For each token T, there was an error recovery
state in which the parser looked for *any* token that could follow T.
Unfortunately, sometimes the set of tokens that could follow T contained
conflicts. For example, in JS, the token '}' can be followed by the
open-ended 'template_chars' token, but also by ordinary tokens like
'identifier'. So with the old algorithm, when recovering from an
unexpected '}' token, the lexer had no way to distinguish identifiers
from template_chars.
This commit drops the error recovery states. Instead, when we encounter
an unexpected token T, we recover from the error by finding a previous
state S in the stack in which T would be valid, popping all of the nodes
after S, and wrapping them in an error.
This way, the lexer is always invoked in a normal parse state, in which
it is looking for a non-conflicting set of tokens. Eliminating the error
recovery states also shrinks the lex state machine significantly.
Signed-off-by: Rick Winfrey <rewinfrey@github.com>
2017-09-11 15:22:52 -07:00
|
|
|
LOG_STACK();
|
2016-05-09 14:31:44 -07:00
|
|
|
}
|
|
|
|
|
|
2017-05-01 14:41:55 -07:00
|
|
|
static void parser__halt_parse(Parser *self) {
|
|
|
|
|
LOG("halting_parse");
|
|
|
|
|
LOG_STACK();
|
|
|
|
|
|
|
|
|
|
ts_lexer_advance_to_end(&self->lexer);
|
|
|
|
|
Length remaining_length = length_sub(
|
|
|
|
|
self->lexer.current_position,
|
2018-03-29 17:37:54 -07:00
|
|
|
ts_stack_position(self->stack, 0)
|
2017-05-01 14:41:55 -07:00
|
|
|
);
|
|
|
|
|
|
2017-10-05 17:32:21 -07:00
|
|
|
Tree *filler_node = ts_tree_make_error(&self->tree_pool, remaining_length, length_zero(), 0, self->language);
|
2017-05-01 14:41:55 -07:00
|
|
|
filler_node->visible = false;
|
2017-08-29 16:22:27 -07:00
|
|
|
ts_stack_push(self->stack, 0, filler_node, false, 0);
|
2017-05-01 14:41:55 -07:00
|
|
|
|
|
|
|
|
TreeArray children = array_new();
|
2017-10-05 17:32:21 -07:00
|
|
|
Tree *root_error = ts_tree_make_error_node(&self->tree_pool, &children, self->language);
|
2017-08-29 16:22:27 -07:00
|
|
|
ts_stack_push(self->stack, 0, root_error, false, 0);
|
2017-05-01 14:41:55 -07:00
|
|
|
|
2017-10-05 17:32:21 -07:00
|
|
|
Tree *eof = ts_tree_make_leaf(&self->tree_pool, ts_builtin_sym_end, length_zero(), length_zero(), self->language);
|
2017-05-01 14:41:55 -07:00
|
|
|
parser__accept(self, 0, eof);
|
2017-10-05 17:32:21 -07:00
|
|
|
ts_tree_release(&self->tree_pool, eof);
|
2017-05-01 14:41:55 -07:00
|
|
|
}
|
|
|
|
|
|
2017-12-28 15:48:35 -08:00
|
|
|
static bool parser__recover_to_state(Parser *self, StackVersion version, unsigned depth,
|
|
|
|
|
TSStateId goal_state) {
|
2018-03-29 17:37:54 -07:00
|
|
|
StackSliceArray pop = ts_stack_pop_count(self->stack, version, depth);
|
2017-12-29 13:21:36 -08:00
|
|
|
StackVersion previous_version = STACK_VERSION_NONE;
|
|
|
|
|
|
2018-03-29 17:37:54 -07:00
|
|
|
for (unsigned i = 0; i < pop.size; i++) {
|
|
|
|
|
StackSlice slice = pop.contents[i];
|
2017-12-29 13:21:36 -08:00
|
|
|
|
|
|
|
|
if (slice.version == previous_version) {
|
|
|
|
|
ts_tree_array_delete(&self->tree_pool, &slice.trees);
|
2018-03-29 17:37:54 -07:00
|
|
|
array_erase(&pop, i--);
|
2017-12-29 13:21:36 -08:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-29 17:37:54 -07:00
|
|
|
if (ts_stack_state(self->stack, slice.version) != goal_state) {
|
2017-12-29 13:21:36 -08:00
|
|
|
ts_stack_halt(self->stack, slice.version);
|
|
|
|
|
ts_tree_array_delete(&self->tree_pool, &slice.trees);
|
2018-03-29 17:37:54 -07:00
|
|
|
array_erase(&pop, i--);
|
2017-12-29 13:21:36 -08:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-29 17:37:54 -07:00
|
|
|
StackSliceArray error_pop = ts_stack_pop_error(self->stack, slice.version);
|
|
|
|
|
if (error_pop.size > 0) {
|
|
|
|
|
StackSlice error_slice = error_pop.contents[0];
|
2017-12-29 13:21:36 -08:00
|
|
|
array_push_all(&error_slice.trees, &slice.trees);
|
|
|
|
|
array_delete(&slice.trees);
|
|
|
|
|
slice.trees = error_slice.trees;
|
|
|
|
|
ts_stack_renumber_version(self->stack, error_slice.version, slice.version);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TreeArray trailing_extras = ts_tree_array_remove_trailing_extras(&slice.trees);
|
|
|
|
|
|
|
|
|
|
if (slice.trees.size > 0) {
|
|
|
|
|
Tree *error = ts_tree_make_error_node(&self->tree_pool, &slice.trees, self->language);
|
|
|
|
|
error->extra = true;
|
|
|
|
|
ts_stack_push(self->stack, slice.version, error, false, goal_state);
|
|
|
|
|
} else {
|
|
|
|
|
array_delete(&slice.trees);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (unsigned j = 0; j < trailing_extras.size; j++) {
|
|
|
|
|
Tree *tree = trailing_extras.contents[j];
|
|
|
|
|
ts_stack_push(self->stack, slice.version, tree, false, goal_state);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
previous_version = slice.version;
|
|
|
|
|
array_delete(&trailing_extras);
|
|
|
|
|
}
|
|
|
|
|
|
2017-12-28 15:48:35 -08:00
|
|
|
return previous_version != STACK_VERSION_NONE;
|
2017-12-29 13:21:36 -08:00
|
|
|
}
|
|
|
|
|
|
Simplify error recovery; eliminate recovery states
The previous approach to error recovery relied on special error-recovery
states in the parse table. For each token T, there was an error recovery
state in which the parser looked for *any* token that could follow T.
Unfortunately, sometimes the set of tokens that could follow T contained
conflicts. For example, in JS, the token '}' can be followed by the
open-ended 'template_chars' token, but also by ordinary tokens like
'identifier'. So with the old algorithm, when recovering from an
unexpected '}' token, the lexer had no way to distinguish identifiers
from template_chars.
This commit drops the error recovery states. Instead, when we encounter
an unexpected token T, we recover from the error by finding a previous
state S in the stack in which T would be valid, popping all of the nodes
after S, and wrapping them in an error.
This way, the lexer is always invoked in a normal parse state, in which
it is looking for a non-conflicting set of tokens. Eliminating the error
recovery states also shrinks the lex state machine significantly.
Signed-off-by: Rick Winfrey <rewinfrey@github.com>
2017-09-11 15:22:52 -07:00
|
|
|
static void parser__recover(Parser *self, StackVersion version, Tree *lookahead) {
|
2017-09-12 16:20:06 -07:00
|
|
|
bool did_recover = false;
|
Simplify error recovery; eliminate recovery states
The previous approach to error recovery relied on special error-recovery
states in the parse table. For each token T, there was an error recovery
state in which the parser looked for *any* token that could follow T.
Unfortunately, sometimes the set of tokens that could follow T contained
conflicts. For example, in JS, the token '}' can be followed by the
open-ended 'template_chars' token, but also by ordinary tokens like
'identifier'. So with the old algorithm, when recovering from an
unexpected '}' token, the lexer had no way to distinguish identifiers
from template_chars.
This commit drops the error recovery states. Instead, when we encounter
an unexpected token T, we recover from the error by finding a previous
state S in the stack in which T would be valid, popping all of the nodes
after S, and wrapping them in an error.
This way, the lexer is always invoked in a normal parse state, in which
it is looking for a non-conflicting set of tokens. Eliminating the error
recovery states also shrinks the lex state machine significantly.
Signed-off-by: Rick Winfrey <rewinfrey@github.com>
2017-09-11 15:22:52 -07:00
|
|
|
unsigned previous_version_count = ts_stack_version_count(self->stack);
|
2018-03-29 17:37:54 -07:00
|
|
|
Length position = ts_stack_position(self->stack, version);
|
Simplify error recovery; eliminate recovery states
The previous approach to error recovery relied on special error-recovery
states in the parse table. For each token T, there was an error recovery
state in which the parser looked for *any* token that could follow T.
Unfortunately, sometimes the set of tokens that could follow T contained
conflicts. For example, in JS, the token '}' can be followed by the
open-ended 'template_chars' token, but also by ordinary tokens like
'identifier'. So with the old algorithm, when recovering from an
unexpected '}' token, the lexer had no way to distinguish identifiers
from template_chars.
This commit drops the error recovery states. Instead, when we encounter
an unexpected token T, we recover from the error by finding a previous
state S in the stack in which T would be valid, popping all of the nodes
after S, and wrapping them in an error.
This way, the lexer is always invoked in a normal parse state, in which
it is looking for a non-conflicting set of tokens. Eliminating the error
recovery states also shrinks the lex state machine significantly.
Signed-off-by: Rick Winfrey <rewinfrey@github.com>
2017-09-11 15:22:52 -07:00
|
|
|
StackSummary *summary = ts_stack_get_summary(self->stack, version);
|
2017-12-28 15:48:35 -08:00
|
|
|
|
Simplify error recovery; eliminate recovery states
The previous approach to error recovery relied on special error-recovery
states in the parse table. For each token T, there was an error recovery
state in which the parser looked for *any* token that could follow T.
Unfortunately, sometimes the set of tokens that could follow T contained
conflicts. For example, in JS, the token '}' can be followed by the
open-ended 'template_chars' token, but also by ordinary tokens like
'identifier'. So with the old algorithm, when recovering from an
unexpected '}' token, the lexer had no way to distinguish identifiers
from template_chars.
This commit drops the error recovery states. Instead, when we encounter
an unexpected token T, we recover from the error by finding a previous
state S in the stack in which T would be valid, popping all of the nodes
after S, and wrapping them in an error.
This way, the lexer is always invoked in a normal parse state, in which
it is looking for a non-conflicting set of tokens. Eliminating the error
recovery states also shrinks the lex state machine significantly.
Signed-off-by: Rick Winfrey <rewinfrey@github.com>
2017-09-11 15:22:52 -07:00
|
|
|
for (unsigned i = 0; i < summary->size; i++) {
|
|
|
|
|
StackSummaryEntry entry = summary->contents[i];
|
2017-12-28 15:48:35 -08:00
|
|
|
|
Simplify error recovery; eliminate recovery states
The previous approach to error recovery relied on special error-recovery
states in the parse table. For each token T, there was an error recovery
state in which the parser looked for *any* token that could follow T.
Unfortunately, sometimes the set of tokens that could follow T contained
conflicts. For example, in JS, the token '}' can be followed by the
open-ended 'template_chars' token, but also by ordinary tokens like
'identifier'. So with the old algorithm, when recovering from an
unexpected '}' token, the lexer had no way to distinguish identifiers
from template_chars.
This commit drops the error recovery states. Instead, when we encounter
an unexpected token T, we recover from the error by finding a previous
state S in the stack in which T would be valid, popping all of the nodes
after S, and wrapping them in an error.
This way, the lexer is always invoked in a normal parse state, in which
it is looking for a non-conflicting set of tokens. Eliminating the error
recovery states also shrinks the lex state machine significantly.
Signed-off-by: Rick Winfrey <rewinfrey@github.com>
2017-09-11 15:22:52 -07:00
|
|
|
if (entry.state == ERROR_STATE) continue;
|
2017-12-27 11:18:06 -08:00
|
|
|
if (entry.position.bytes == position.bytes) continue;
|
Simplify error recovery; eliminate recovery states
The previous approach to error recovery relied on special error-recovery
states in the parse table. For each token T, there was an error recovery
state in which the parser looked for *any* token that could follow T.
Unfortunately, sometimes the set of tokens that could follow T contained
conflicts. For example, in JS, the token '}' can be followed by the
open-ended 'template_chars' token, but also by ordinary tokens like
'identifier'. So with the old algorithm, when recovering from an
unexpected '}' token, the lexer had no way to distinguish identifiers
from template_chars.
This commit drops the error recovery states. Instead, when we encounter
an unexpected token T, we recover from the error by finding a previous
state S in the stack in which T would be valid, popping all of the nodes
after S, and wrapping them in an error.
This way, the lexer is always invoked in a normal parse state, in which
it is looking for a non-conflicting set of tokens. Eliminating the error
recovery states also shrinks the lex state machine significantly.
Signed-off-by: Rick Winfrey <rewinfrey@github.com>
2017-09-11 15:22:52 -07:00
|
|
|
unsigned depth = entry.depth + ts_stack_depth_since_error(self->stack, version);
|
|
|
|
|
|
2017-09-13 16:38:15 -07:00
|
|
|
unsigned new_cost =
|
|
|
|
|
depth * ERROR_COST_PER_SKIPPED_TREE +
|
2017-12-20 16:26:38 -08:00
|
|
|
(position.bytes - entry.position.bytes) * ERROR_COST_PER_SKIPPED_CHAR +
|
2017-09-13 16:38:15 -07:00
|
|
|
(position.extent.row - entry.position.extent.row) * ERROR_COST_PER_SKIPPED_LINE;
|
|
|
|
|
if (parser__better_version_exists(self, version, false, new_cost)) break;
|
2017-09-13 09:56:51 -07:00
|
|
|
|
2017-12-28 15:48:35 -08:00
|
|
|
if (ts_language_has_actions(self->language, entry.state, lookahead->symbol)) {
|
|
|
|
|
if (parser__recover_to_state(self, version, depth, entry.state)) {
|
2017-09-12 16:20:06 -07:00
|
|
|
did_recover = true;
|
2017-12-29 13:21:36 -08:00
|
|
|
LOG("recover state:%u, depth:%u", entry.state, depth);
|
|
|
|
|
break;
|
Simplify error recovery; eliminate recovery states
The previous approach to error recovery relied on special error-recovery
states in the parse table. For each token T, there was an error recovery
state in which the parser looked for *any* token that could follow T.
Unfortunately, sometimes the set of tokens that could follow T contained
conflicts. For example, in JS, the token '}' can be followed by the
open-ended 'template_chars' token, but also by ordinary tokens like
'identifier'. So with the old algorithm, when recovering from an
unexpected '}' token, the lexer had no way to distinguish identifiers
from template_chars.
This commit drops the error recovery states. Instead, when we encounter
an unexpected token T, we recover from the error by finding a previous
state S in the stack in which T would be valid, popping all of the nodes
after S, and wrapping them in an error.
This way, the lexer is always invoked in a normal parse state, in which
it is looking for a non-conflicting set of tokens. Eliminating the error
recovery states also shrinks the lex state machine significantly.
Signed-off-by: Rick Winfrey <rewinfrey@github.com>
2017-09-11 15:22:52 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (unsigned i = previous_version_count; i < ts_stack_version_count(self->stack); i++) {
|
|
|
|
|
if (ts_stack_is_halted(self->stack, i)) {
|
2017-12-28 15:48:35 -08:00
|
|
|
ts_stack_remove_version(self->stack, i--);
|
Simplify error recovery; eliminate recovery states
The previous approach to error recovery relied on special error-recovery
states in the parse table. For each token T, there was an error recovery
state in which the parser looked for *any* token that could follow T.
Unfortunately, sometimes the set of tokens that could follow T contained
conflicts. For example, in JS, the token '}' can be followed by the
open-ended 'template_chars' token, but also by ordinary tokens like
'identifier'. So with the old algorithm, when recovering from an
unexpected '}' token, the lexer had no way to distinguish identifiers
from template_chars.
This commit drops the error recovery states. Instead, when we encounter
an unexpected token T, we recover from the error by finding a previous
state S in the stack in which T would be valid, popping all of the nodes
after S, and wrapping them in an error.
This way, the lexer is always invoked in a normal parse state, in which
it is looking for a non-conflicting set of tokens. Eliminating the error
recovery states also shrinks the lex state machine significantly.
Signed-off-by: Rick Winfrey <rewinfrey@github.com>
2017-09-11 15:22:52 -07:00
|
|
|
} else {
|
|
|
|
|
for (unsigned j = 0; j < i; j++) {
|
|
|
|
|
if (ts_stack_can_merge(self->stack, j, i)) {
|
2017-12-28 15:48:35 -08:00
|
|
|
ts_stack_remove_version(self->stack, i--);
|
Simplify error recovery; eliminate recovery states
The previous approach to error recovery relied on special error-recovery
states in the parse table. For each token T, there was an error recovery
state in which the parser looked for *any* token that could follow T.
Unfortunately, sometimes the set of tokens that could follow T contained
conflicts. For example, in JS, the token '}' can be followed by the
open-ended 'template_chars' token, but also by ordinary tokens like
'identifier'. So with the old algorithm, when recovering from an
unexpected '}' token, the lexer had no way to distinguish identifiers
from template_chars.
This commit drops the error recovery states. Instead, when we encounter
an unexpected token T, we recover from the error by finding a previous
state S in the stack in which T would be valid, popping all of the nodes
after S, and wrapping them in an error.
This way, the lexer is always invoked in a normal parse state, in which
it is looking for a non-conflicting set of tokens. Eliminating the error
recovery states also shrinks the lex state machine significantly.
Signed-off-by: Rick Winfrey <rewinfrey@github.com>
2017-09-11 15:22:52 -07:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-09-12 16:20:06 -07:00
|
|
|
if (did_recover && ts_stack_version_count(self->stack) > MAX_VERSION_COUNT) {
|
Simplify error recovery; eliminate recovery states
The previous approach to error recovery relied on special error-recovery
states in the parse table. For each token T, there was an error recovery
state in which the parser looked for *any* token that could follow T.
Unfortunately, sometimes the set of tokens that could follow T contained
conflicts. For example, in JS, the token '}' can be followed by the
open-ended 'template_chars' token, but also by ordinary tokens like
'identifier'. So with the old algorithm, when recovering from an
unexpected '}' token, the lexer had no way to distinguish identifiers
from template_chars.
This commit drops the error recovery states. Instead, when we encounter
an unexpected token T, we recover from the error by finding a previous
state S in the stack in which T would be valid, popping all of the nodes
after S, and wrapping them in an error.
This way, the lexer is always invoked in a normal parse state, in which
it is looking for a non-conflicting set of tokens. Eliminating the error
recovery states also shrinks the lex state machine significantly.
Signed-off-by: Rick Winfrey <rewinfrey@github.com>
2017-09-11 15:22:52 -07:00
|
|
|
ts_stack_halt(self->stack, version);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2016-06-02 14:04:48 -07:00
|
|
|
if (lookahead->symbol == ts_builtin_sym_end) {
|
2016-06-22 22:36:11 -07:00
|
|
|
LOG("recover_eof");
|
2016-06-02 14:04:48 -07:00
|
|
|
TreeArray children = array_new();
|
2017-10-05 17:32:21 -07:00
|
|
|
Tree *parent = ts_tree_make_error_node(&self->tree_pool, &children, self->language);
|
2017-08-29 16:22:27 -07:00
|
|
|
ts_stack_push(self->stack, version, parent, false, 1);
|
2016-11-04 09:18:38 -07:00
|
|
|
parser__accept(self, version, lookahead);
|
2017-06-29 14:58:20 -07:00
|
|
|
return;
|
2016-06-02 14:04:48 -07:00
|
|
|
}
|
|
|
|
|
|
2017-09-14 12:07:46 -07:00
|
|
|
unsigned n;
|
|
|
|
|
const TSParseAction *actions = ts_language_actions(self->language, 1, lookahead->symbol, &n);
|
|
|
|
|
bool extra = n > 0 && actions[n - 1].type == TSParseActionTypeShift && actions[n - 1].params.extra;
|
|
|
|
|
parser__shift(self, version, ERROR_STATE, lookahead, extra);
|
2017-07-03 12:27:23 -07:00
|
|
|
|
2017-09-13 16:38:15 -07:00
|
|
|
if (parser__better_version_exists(self, version, true, ts_stack_error_cost(self->stack, version))) {
|
Simplify error recovery; eliminate recovery states
The previous approach to error recovery relied on special error-recovery
states in the parse table. For each token T, there was an error recovery
state in which the parser looked for *any* token that could follow T.
Unfortunately, sometimes the set of tokens that could follow T contained
conflicts. For example, in JS, the token '}' can be followed by the
open-ended 'template_chars' token, but also by ordinary tokens like
'identifier'. So with the old algorithm, when recovering from an
unexpected '}' token, the lexer had no way to distinguish identifiers
from template_chars.
This commit drops the error recovery states. Instead, when we encounter
an unexpected token T, we recover from the error by finding a previous
state S in the stack in which T would be valid, popping all of the nodes
after S, and wrapping them in an error.
This way, the lexer is always invoked in a normal parse state, in which
it is looking for a non-conflicting set of tokens. Eliminating the error
recovery states also shrinks the lex state machine significantly.
Signed-off-by: Rick Winfrey <rewinfrey@github.com>
2017-09-11 15:22:52 -07:00
|
|
|
ts_stack_halt(self->stack, version);
|
2017-12-28 15:48:35 -08:00
|
|
|
} else {
|
|
|
|
|
LOG("skip_token symbol:%s", SYM_NAME(lookahead->symbol));
|
2017-06-30 17:45:05 -07:00
|
|
|
}
|
2016-03-07 20:06:46 -08:00
|
|
|
}
|
|
|
|
|
|
2017-08-30 16:48:15 -07:00
|
|
|
static void parser__advance(Parser *self, StackVersion version, ReusableNode *reusable_node) {
|
2018-03-29 17:37:54 -07:00
|
|
|
TSStateId state = ts_stack_state(self->stack, version);
|
2017-08-30 17:35:12 -07:00
|
|
|
TableEntry table_entry;
|
|
|
|
|
Tree *lookahead = parser__get_lookahead(self, version, &state, reusable_node, &table_entry);
|
2016-07-17 13:35:43 -07:00
|
|
|
|
2015-10-07 12:58:32 -07:00
|
|
|
for (;;) {
|
2016-04-10 14:12:24 -07:00
|
|
|
StackVersion last_reduction_version = STACK_VERSION_NONE;
|
|
|
|
|
|
2016-11-14 12:15:24 -08:00
|
|
|
for (uint32_t i = 0; i < table_entry.action_count; i++) {
|
2016-07-17 13:35:43 -07:00
|
|
|
TSParseAction action = table_entry.actions[i];
|
2016-04-10 14:12:24 -07:00
|
|
|
|
2015-10-07 12:58:32 -07:00
|
|
|
switch (action.type) {
|
2016-04-10 14:12:24 -07:00
|
|
|
case TSParseActionTypeShift: {
|
2018-01-29 10:40:59 -08:00
|
|
|
if (action.params.repetition) break;
|
2016-11-14 08:36:06 -08:00
|
|
|
TSStateId next_state;
|
2017-07-21 10:17:54 -07:00
|
|
|
if (action.params.extra) {
|
2016-11-14 08:36:06 -08:00
|
|
|
next_state = state;
|
|
|
|
|
LOG("shift_extra");
|
|
|
|
|
} else {
|
2017-07-21 10:17:54 -07:00
|
|
|
next_state = action.params.state;
|
2016-11-14 08:36:06 -08:00
|
|
|
LOG("shift state:%u", next_state);
|
|
|
|
|
}
|
|
|
|
|
|
2016-07-17 13:35:43 -07:00
|
|
|
if (lookahead->child_count > 0) {
|
2017-08-30 16:19:11 -07:00
|
|
|
parser__breakdown_lookahead(self, &lookahead, state, reusable_node);
|
2016-11-14 08:36:06 -08:00
|
|
|
next_state = ts_language_next_state(self->language, state, lookahead->symbol);
|
2016-05-09 14:31:44 -07:00
|
|
|
}
|
|
|
|
|
|
2017-07-21 10:17:54 -07:00
|
|
|
parser__shift(self, version, next_state, lookahead, action.params.extra);
|
2017-08-30 16:48:15 -07:00
|
|
|
if (lookahead == reusable_node->tree) reusable_node_pop(reusable_node);
|
2017-10-05 17:32:21 -07:00
|
|
|
ts_tree_release(&self->tree_pool, lookahead);
|
2016-11-04 09:18:38 -07:00
|
|
|
return;
|
2016-04-10 14:12:24 -07:00
|
|
|
}
|
2015-10-07 12:58:32 -07:00
|
|
|
|
2016-04-10 14:12:24 -07:00
|
|
|
case TSParseActionTypeReduce: {
|
2018-03-02 14:51:54 -08:00
|
|
|
bool is_fragile = table_entry.action_count > 1;
|
2017-07-21 10:17:54 -07:00
|
|
|
LOG("reduce sym:%s, child_count:%u", SYM_NAME(action.params.symbol), action.params.child_count);
|
2018-03-29 17:37:54 -07:00
|
|
|
StackSliceArray reduction = parser__reduce(
|
2017-07-21 10:17:54 -07:00
|
|
|
self, version, action.params.symbol, action.params.child_count,
|
2017-07-31 11:45:24 -07:00
|
|
|
action.params.dynamic_precedence, action.params.alias_sequence_id,
|
2018-03-02 14:51:54 -08:00
|
|
|
is_fragile
|
2017-07-13 17:17:22 -07:00
|
|
|
);
|
2018-03-29 17:37:54 -07:00
|
|
|
StackSlice slice = *array_front(&reduction);
|
2016-11-14 17:25:55 -08:00
|
|
|
last_reduction_version = slice.version;
|
2015-10-07 12:58:32 -07:00
|
|
|
break;
|
2016-04-10 14:12:24 -07:00
|
|
|
}
|
2015-10-07 12:58:32 -07:00
|
|
|
|
2016-04-10 14:12:24 -07:00
|
|
|
case TSParseActionTypeAccept: {
|
2016-06-22 22:36:11 -07:00
|
|
|
LOG("accept");
|
2016-11-04 09:18:38 -07:00
|
|
|
parser__accept(self, version, lookahead);
|
2017-10-05 17:32:21 -07:00
|
|
|
ts_tree_release(&self->tree_pool, lookahead);
|
2016-11-04 09:18:38 -07:00
|
|
|
return;
|
2016-05-09 14:31:44 -07:00
|
|
|
}
|
2016-07-01 15:08:19 -07:00
|
|
|
|
2016-07-17 13:35:43 -07:00
|
|
|
case TSParseActionTypeRecover: {
|
|
|
|
|
while (lookahead->child_count > 0) {
|
Simplify error recovery; eliminate recovery states
The previous approach to error recovery relied on special error-recovery
states in the parse table. For each token T, there was an error recovery
state in which the parser looked for *any* token that could follow T.
Unfortunately, sometimes the set of tokens that could follow T contained
conflicts. For example, in JS, the token '}' can be followed by the
open-ended 'template_chars' token, but also by ordinary tokens like
'identifier'. So with the old algorithm, when recovering from an
unexpected '}' token, the lexer had no way to distinguish identifiers
from template_chars.
This commit drops the error recovery states. Instead, when we encounter
an unexpected token T, we recover from the error by finding a previous
state S in the stack in which T would be valid, popping all of the nodes
after S, and wrapping them in an error.
This way, the lexer is always invoked in a normal parse state, in which
it is looking for a non-conflicting set of tokens. Eliminating the error
recovery states also shrinks the lex state machine significantly.
Signed-off-by: Rick Winfrey <rewinfrey@github.com>
2017-09-11 15:22:52 -07:00
|
|
|
parser__breakdown_lookahead(self, &lookahead, state, reusable_node);
|
2016-07-17 13:35:43 -07:00
|
|
|
}
|
Simplify error recovery; eliminate recovery states
The previous approach to error recovery relied on special error-recovery
states in the parse table. For each token T, there was an error recovery
state in which the parser looked for *any* token that could follow T.
Unfortunately, sometimes the set of tokens that could follow T contained
conflicts. For example, in JS, the token '}' can be followed by the
open-ended 'template_chars' token, but also by ordinary tokens like
'identifier'. So with the old algorithm, when recovering from an
unexpected '}' token, the lexer had no way to distinguish identifiers
from template_chars.
This commit drops the error recovery states. Instead, when we encounter
an unexpected token T, we recover from the error by finding a previous
state S in the stack in which T would be valid, popping all of the nodes
after S, and wrapping them in an error.
This way, the lexer is always invoked in a normal parse state, in which
it is looking for a non-conflicting set of tokens. Eliminating the error
recovery states also shrinks the lex state machine significantly.
Signed-off-by: Rick Winfrey <rewinfrey@github.com>
2017-09-11 15:22:52 -07:00
|
|
|
parser__recover(self, version, lookahead);
|
2017-08-30 16:48:15 -07:00
|
|
|
if (lookahead == reusable_node->tree) reusable_node_pop(reusable_node);
|
2017-10-05 17:32:21 -07:00
|
|
|
ts_tree_release(&self->tree_pool, lookahead);
|
2016-11-04 09:18:38 -07:00
|
|
|
return;
|
2016-07-17 13:35:43 -07:00
|
|
|
}
|
2016-07-01 15:08:19 -07:00
|
|
|
}
|
2016-07-17 13:35:43 -07:00
|
|
|
}
|
2016-07-01 15:08:19 -07:00
|
|
|
|
2016-07-17 13:35:43 -07:00
|
|
|
if (last_reduction_version != STACK_VERSION_NONE) {
|
|
|
|
|
ts_stack_renumber_version(self->stack, last_reduction_version, version);
|
|
|
|
|
LOG_STACK();
|
2017-08-29 16:22:27 -07:00
|
|
|
} else if (!parser__breakdown_top_of_stack(self, version)) {
|
|
|
|
|
if (state == ERROR_STATE) {
|
|
|
|
|
ts_stack_push(self->stack, version, lookahead, false, ERROR_STATE);
|
|
|
|
|
return;
|
|
|
|
|
}
|
2016-07-01 15:08:19 -07:00
|
|
|
|
2017-08-29 16:22:27 -07:00
|
|
|
parser__handle_error(self, version, lookahead->first_leaf.symbol);
|
|
|
|
|
if (ts_stack_is_halted(self->stack, version)) {
|
2017-10-05 17:32:21 -07:00
|
|
|
ts_tree_release(&self->tree_pool, lookahead);
|
2017-08-29 16:22:27 -07:00
|
|
|
return;
|
2017-08-30 17:35:12 -07:00
|
|
|
} else if (lookahead->size.bytes == 0) {
|
2017-10-05 17:32:21 -07:00
|
|
|
ts_tree_release(&self->tree_pool, lookahead);
|
2018-03-29 17:37:54 -07:00
|
|
|
state = ts_stack_state(self->stack, version);
|
2017-08-30 17:35:12 -07:00
|
|
|
lookahead = parser__get_lookahead(self, version, &state, reusable_node, &table_entry);
|
2017-08-29 16:22:27 -07:00
|
|
|
}
|
2016-07-17 13:35:43 -07:00
|
|
|
}
|
2017-08-30 17:35:12 -07:00
|
|
|
|
2018-03-29 17:37:54 -07:00
|
|
|
state = ts_stack_state(self->stack, version);
|
2017-08-30 17:35:12 -07:00
|
|
|
ts_language_table_entry(self->language, state, lookahead->first_leaf.symbol, &table_entry);
|
2015-07-08 17:34:21 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-08-29 12:08:58 -07:00
|
|
|
bool parser_init(Parser *self) {
|
2016-02-04 11:15:46 -08:00
|
|
|
ts_lexer_init(&self->lexer);
|
2016-05-09 14:31:44 -07:00
|
|
|
array_init(&self->reduce_actions);
|
2016-11-04 09:18:38 -07:00
|
|
|
array_grow(&self->reduce_actions, 4);
|
2017-10-05 17:32:21 -07:00
|
|
|
ts_tree_pool_init(&self->tree_pool);
|
|
|
|
|
self->stack = ts_stack_new(&self->tree_pool);
|
2016-11-04 09:18:38 -07:00
|
|
|
self->finished_tree = NULL;
|
2017-08-30 17:35:12 -07:00
|
|
|
parser__set_cached_token(self, 0, NULL, NULL);
|
2016-01-18 10:44:49 -08:00
|
|
|
return true;
|
2015-08-16 19:53:34 -07:00
|
|
|
}
|
|
|
|
|
|
2016-12-02 22:03:48 -08:00
|
|
|
void parser_set_language(Parser *self, const TSLanguage *language) {
|
2016-12-06 10:12:49 -08:00
|
|
|
if (self->external_scanner_payload && self->language->external_scanner.destroy)
|
|
|
|
|
self->language->external_scanner.destroy(self->external_scanner_payload);
|
2016-12-04 14:18:30 -08:00
|
|
|
|
2016-12-06 10:12:49 -08:00
|
|
|
if (language && language->external_scanner.create)
|
2016-12-02 22:03:48 -08:00
|
|
|
self->external_scanner_payload = language->external_scanner.create();
|
2016-12-04 14:18:30 -08:00
|
|
|
else
|
2016-12-02 22:03:48 -08:00
|
|
|
self->external_scanner_payload = NULL;
|
2016-12-04 14:18:30 -08:00
|
|
|
|
|
|
|
|
self->language = language;
|
2016-12-02 22:03:48 -08:00
|
|
|
}
|
|
|
|
|
|
2016-08-29 12:08:58 -07:00
|
|
|
void parser_destroy(Parser *self) {
|
2016-02-04 11:15:46 -08:00
|
|
|
if (self->stack)
|
|
|
|
|
ts_stack_delete(self->stack);
|
2016-05-09 14:31:44 -07:00
|
|
|
if (self->reduce_actions.contents)
|
|
|
|
|
array_delete(&self->reduce_actions);
|
2017-10-05 17:32:21 -07:00
|
|
|
ts_tree_pool_delete(&self->tree_pool);
|
2016-12-06 10:12:49 -08:00
|
|
|
parser_set_language(self, NULL);
|
2015-08-16 19:53:34 -07:00
|
|
|
}
|
|
|
|
|
|
2017-05-01 13:04:06 -07:00
|
|
|
Tree *parser_parse(Parser *self, TSInput input, Tree *old_tree, bool halt_on_error) {
|
2016-08-29 12:08:58 -07:00
|
|
|
parser__start(self, input, old_tree);
|
2015-07-08 17:34:21 -07:00
|
|
|
|
2016-07-01 15:08:19 -07:00
|
|
|
StackVersion version = STACK_VERSION_NONE;
|
2016-11-14 12:15:24 -08:00
|
|
|
uint32_t position = 0, last_position = 0;
|
2016-07-01 15:08:19 -07:00
|
|
|
ReusableNode reusable_node;
|
2015-07-08 17:34:21 -07:00
|
|
|
|
2016-07-01 15:08:19 -07:00
|
|
|
do {
|
2016-06-22 14:10:54 -07:00
|
|
|
for (version = 0; version < ts_stack_version_count(self->stack); version++) {
|
2016-07-01 15:08:19 -07:00
|
|
|
reusable_node = self->reusable_node;
|
2016-04-01 21:17:23 -07:00
|
|
|
|
2016-06-22 14:10:54 -07:00
|
|
|
while (!ts_stack_is_halted(self->stack, version)) {
|
2018-03-29 17:37:54 -07:00
|
|
|
position = ts_stack_position(self->stack, version).bytes;
|
Simplify error recovery; eliminate recovery states
The previous approach to error recovery relied on special error-recovery
states in the parse table. For each token T, there was an error recovery
state in which the parser looked for *any* token that could follow T.
Unfortunately, sometimes the set of tokens that could follow T contained
conflicts. For example, in JS, the token '}' can be followed by the
open-ended 'template_chars' token, but also by ordinary tokens like
'identifier'. So with the old algorithm, when recovering from an
unexpected '}' token, the lexer had no way to distinguish identifiers
from template_chars.
This commit drops the error recovery states. Instead, when we encounter
an unexpected token T, we recover from the error by finding a previous
state S in the stack in which T would be valid, popping all of the nodes
after S, and wrapping them in an error.
This way, the lexer is always invoked in a normal parse state, in which
it is looking for a non-conflicting set of tokens. Eliminating the error
recovery states also shrinks the lex state machine significantly.
Signed-off-by: Rick Winfrey <rewinfrey@github.com>
2017-09-11 15:22:52 -07:00
|
|
|
if (position > last_position || (version > 0 && position == last_position)) {
|
|
|
|
|
last_position = position;
|
2015-12-26 16:49:23 -08:00
|
|
|
break;
|
Simplify error recovery; eliminate recovery states
The previous approach to error recovery relied on special error-recovery
states in the parse table. For each token T, there was an error recovery
state in which the parser looked for *any* token that could follow T.
Unfortunately, sometimes the set of tokens that could follow T contained
conflicts. For example, in JS, the token '}' can be followed by the
open-ended 'template_chars' token, but also by ordinary tokens like
'identifier'. So with the old algorithm, when recovering from an
unexpected '}' token, the lexer had no way to distinguish identifiers
from template_chars.
This commit drops the error recovery states. Instead, when we encounter
an unexpected token T, we recover from the error by finding a previous
state S in the stack in which T would be valid, popping all of the nodes
after S, and wrapping them in an error.
This way, the lexer is always invoked in a normal parse state, in which
it is looking for a non-conflicting set of tokens. Eliminating the error
recovery states also shrinks the lex state machine significantly.
Signed-off-by: Rick Winfrey <rewinfrey@github.com>
2017-09-11 15:22:52 -07:00
|
|
|
}
|
2016-02-23 00:09:42 -08:00
|
|
|
|
2016-11-14 12:15:24 -08:00
|
|
|
LOG("process version:%d, version_count:%u, state:%d, row:%u, col:%u",
|
2016-06-23 11:42:43 -07:00
|
|
|
version, ts_stack_version_count(self->stack),
|
2018-03-29 17:37:54 -07:00
|
|
|
ts_stack_state(self->stack, version),
|
|
|
|
|
ts_stack_position(self->stack, version).extent.row,
|
|
|
|
|
ts_stack_position(self->stack, version).extent.column);
|
2015-11-18 08:47:15 -08:00
|
|
|
|
2016-11-04 09:18:38 -07:00
|
|
|
parser__advance(self, version, &reusable_node);
|
2016-06-21 22:53:48 -07:00
|
|
|
LOG_STACK();
|
2015-12-26 16:49:23 -08:00
|
|
|
}
|
2014-08-09 01:03:55 -07:00
|
|
|
}
|
2015-12-08 13:01:33 -08:00
|
|
|
|
2016-07-01 15:08:19 -07:00
|
|
|
self->reusable_node = reusable_node;
|
2016-05-29 22:36:47 -07:00
|
|
|
|
2018-01-24 14:04:55 -08:00
|
|
|
unsigned min_error_cost = parser__condense_stack(self);
|
|
|
|
|
if (self->finished_tree && self->finished_tree->error_cost < min_error_cost) {
|
|
|
|
|
break;
|
|
|
|
|
} else if (halt_on_error && min_error_cost > 0) {
|
|
|
|
|
parser__halt_parse(self);
|
|
|
|
|
break;
|
2017-05-01 13:04:06 -07:00
|
|
|
}
|
|
|
|
|
|
2017-08-31 12:50:10 -07:00
|
|
|
self->in_ambiguity = version > 1;
|
2016-07-01 15:08:19 -07:00
|
|
|
} while (version != 0);
|
2016-01-29 17:31:43 -08:00
|
|
|
|
2016-06-22 14:10:54 -07:00
|
|
|
ts_stack_clear(self->stack);
|
2017-08-30 17:35:12 -07:00
|
|
|
parser__set_cached_token(self, 0, NULL, NULL);
|
2017-10-05 17:32:21 -07:00
|
|
|
ts_tree_assign_parents(self->finished_tree, &self->tree_pool, self->language);
|
2018-01-29 10:41:07 -08:00
|
|
|
|
|
|
|
|
LOG("done");
|
|
|
|
|
LOG_TREE();
|
2016-06-22 14:10:54 -07:00
|
|
|
return self->finished_tree;
|
2014-07-10 13:14:52 -07:00
|
|
|
}
|