Merge pull request #475 from tree-sitter/null-characters
Don't assume that null characters mean EOF
This commit is contained in:
commit
3214de55f0
10 changed files with 271 additions and 149 deletions
0
.gitmodules
vendored
0
.gitmodules
vendored
|
|
@ -2,7 +2,7 @@ use super::coincident_tokens::CoincidentTokenIndex;
|
|||
use super::token_conflicts::TokenConflictMap;
|
||||
use crate::generate::dedup::split_state_id_groups;
|
||||
use crate::generate::grammars::{LexicalGrammar, SyntaxGrammar};
|
||||
use crate::generate::nfa::{CharacterSet, NfaCursor};
|
||||
use crate::generate::nfa::NfaCursor;
|
||||
use crate::generate::rules::{Symbol, TokenSet};
|
||||
use crate::generate::tables::{AdvanceAction, LexState, LexTable, ParseStateId, ParseTable};
|
||||
use log::info;
|
||||
|
|
@ -189,13 +189,10 @@ impl<'a> LexTableBuilder<'a> {
|
|||
// character that leads to the empty set of NFA states.
|
||||
if eof_valid {
|
||||
let (next_state_id, _) = self.add_state(Vec::new(), false);
|
||||
self.table.states[state_id].advance_actions.push((
|
||||
CharacterSet::empty().add_char('\0'),
|
||||
AdvanceAction {
|
||||
state: next_state_id,
|
||||
in_main_token: true,
|
||||
},
|
||||
));
|
||||
self.table.states[state_id].eof_action = Some(AdvanceAction {
|
||||
state: next_state_id,
|
||||
in_main_token: true,
|
||||
});
|
||||
}
|
||||
|
||||
for transition in transitions {
|
||||
|
|
@ -273,6 +270,7 @@ fn minimize_lex_table(table: &mut LexTable, parse_table: &mut ParseTable) {
|
|||
let signature = (
|
||||
i == 0,
|
||||
state.accept_action,
|
||||
state.eof_action.is_some(),
|
||||
state
|
||||
.advance_actions
|
||||
.iter()
|
||||
|
|
@ -320,6 +318,9 @@ fn minimize_lex_table(table: &mut LexTable, parse_table: &mut ParseTable) {
|
|||
for (_, advance_action) in new_state.advance_actions.iter_mut() {
|
||||
advance_action.state = group_ids_by_state_id[advance_action.state];
|
||||
}
|
||||
if let Some(eof_action) = &mut new_state.eof_action {
|
||||
eof_action.state = group_ids_by_state_id[eof_action.state];
|
||||
}
|
||||
new_states.push(new_state);
|
||||
}
|
||||
|
||||
|
|
@ -364,6 +365,9 @@ fn sort_states(table: &mut LexTable, parse_table: &mut ParseTable) {
|
|||
for (_, advance_action) in state.advance_actions.iter_mut() {
|
||||
advance_action.state = new_ids_by_old_id[advance_action.state];
|
||||
}
|
||||
if let Some(eof_action) = &mut state.eof_action {
|
||||
eof_action.state = new_ids_by_old_id[eof_action.state];
|
||||
}
|
||||
state
|
||||
})
|
||||
.collect();
|
||||
|
|
|
|||
|
|
@ -513,6 +513,13 @@ impl Generator {
|
|||
);
|
||||
indent!(self);
|
||||
add_line!(self, "START_LEXER();");
|
||||
|
||||
if self.next_abi {
|
||||
add_line!(self, "eof = lexer->eof(lexer);");
|
||||
} else {
|
||||
add_line!(self, "eof = lookahead == 0;");
|
||||
}
|
||||
|
||||
add_line!(self, "switch (state) {{");
|
||||
indent!(self);
|
||||
|
||||
|
|
@ -540,6 +547,10 @@ impl Generator {
|
|||
add_line!(self, "ACCEPT_TOKEN({});", self.symbol_ids[&accept_action]);
|
||||
}
|
||||
|
||||
if let Some(eof_action) = state.eof_action {
|
||||
add_line!(self, "if (eof) ADVANCE({});", eof_action.state);
|
||||
}
|
||||
|
||||
let mut ruled_out_characters = HashSet::new();
|
||||
for (characters, action) in state.advance_actions {
|
||||
let previous_length = self.buffer.len();
|
||||
|
|
|
|||
|
|
@ -77,6 +77,7 @@ pub(crate) struct AdvanceAction {
|
|||
#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub(crate) struct LexState {
|
||||
pub accept_action: Option<Symbol>,
|
||||
pub eof_action: Option<AdvanceAction>,
|
||||
pub advance_actions: Vec<(CharacterSet, AdvanceAction)>,
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ const LANGUAGES: &'static [&'static str] = &[
|
|||
"go",
|
||||
"html",
|
||||
"javascript",
|
||||
"json",
|
||||
"python",
|
||||
];
|
||||
|
||||
|
|
|
|||
|
|
@ -258,6 +258,36 @@ fn test_parsing_text_with_byte_order_mark() {
|
|||
assert_eq!(tree.root_node().start_byte(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parsing_invalid_chars_at_eof() {
|
||||
let mut parser = Parser::new();
|
||||
parser.set_language(get_language("json")).unwrap();
|
||||
let tree = parser.parse(b"\xdf", None).unwrap();
|
||||
assert_eq!(tree.root_node().to_sexp(), "(ERROR (UNEXPECTED INVALID))");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parsing_ends_when_input_callback_returns_empty() {
|
||||
let mut parser = Parser::new();
|
||||
parser.set_language(get_language("javascript")).unwrap();
|
||||
let mut i = 0;
|
||||
let source = b"abcdefghijklmnoqrs";
|
||||
let tree = parser
|
||||
.parse_with(
|
||||
&mut |offset, _| {
|
||||
i += 1;
|
||||
if offset >= 6 {
|
||||
b""
|
||||
} else {
|
||||
&source[offset..usize::min(source.len(), offset + 3)]
|
||||
}
|
||||
},
|
||||
None,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(tree.root_node().end_byte(), 6);
|
||||
}
|
||||
|
||||
// Incremental parsing
|
||||
|
||||
#[test]
|
||||
|
|
@ -928,10 +958,10 @@ fn test_parsing_with_a_newly_included_range() {
|
|||
assert_eq!(
|
||||
tree.changed_ranges(&first_tree).collect::<Vec<_>>(),
|
||||
vec![Range {
|
||||
start_byte: first_code_end_index + 1,
|
||||
end_byte: second_code_end_index + 1,
|
||||
start_point: Point::new(0, first_code_end_index + 1),
|
||||
end_point: Point::new(0, second_code_end_index + 1),
|
||||
start_byte: first_code_end_index,
|
||||
end_byte: second_code_end_index,
|
||||
start_point: Point::new(0, first_code_end_index),
|
||||
end_point: Point::new(0, second_code_end_index),
|
||||
}]
|
||||
);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -45,7 +45,8 @@ struct TSLexer {
|
|||
void (*advance)(TSLexer *, bool);
|
||||
void (*mark_end)(TSLexer *);
|
||||
uint32_t (*get_column)(TSLexer *);
|
||||
bool (*is_at_included_range_start)(TSLexer *);
|
||||
bool (*is_at_included_range_start)(const TSLexer *);
|
||||
bool (*eof)(const TSLexer *);
|
||||
};
|
||||
|
||||
typedef enum {
|
||||
|
|
@ -126,6 +127,7 @@ struct TSLanguage {
|
|||
#define START_LEXER() \
|
||||
bool result = false; \
|
||||
bool skip = false; \
|
||||
bool eof = false; \
|
||||
int32_t lookahead; \
|
||||
goto start; \
|
||||
next_state: \
|
||||
|
|
|
|||
320
lib/src/lexer.c
320
lib/src/lexer.c
|
|
@ -4,23 +4,56 @@
|
|||
#include "./length.h"
|
||||
#include "./unicode.h"
|
||||
|
||||
#define LOG(...) \
|
||||
if (self->logger.log) { \
|
||||
snprintf(self->debug_buffer, TREE_SITTER_SERIALIZATION_BUFFER_SIZE, __VA_ARGS__); \
|
||||
self->logger.log(self->logger.payload, TSLogTypeLex, self->debug_buffer); \
|
||||
#define LOG(message, character) \
|
||||
if (self->logger.log) { \
|
||||
snprintf( \
|
||||
self->debug_buffer, \
|
||||
TREE_SITTER_SERIALIZATION_BUFFER_SIZE, \
|
||||
32 <= character && character < 127 ? \
|
||||
message " character:'%c'" : \
|
||||
message " character:%d", \
|
||||
character \
|
||||
); \
|
||||
self->logger.log( \
|
||||
self->logger.payload, \
|
||||
TSLogTypeLex, \
|
||||
self->debug_buffer \
|
||||
); \
|
||||
}
|
||||
|
||||
#define LOG_CHARACTER(message, character) \
|
||||
LOG( \
|
||||
32 <= character && character < 127 ? \
|
||||
message " character:'%c'" : \
|
||||
message " character:%d", character \
|
||||
)
|
||||
|
||||
static const char empty_chunk[3] = { 0, 0 };
|
||||
|
||||
static const int32_t BYTE_ORDER_MARK = 0xFEFF;
|
||||
|
||||
static const TSRange DEFAULT_RANGE = {
|
||||
.start_point = {
|
||||
.row = 0,
|
||||
.column = 0,
|
||||
},
|
||||
.end_point = {
|
||||
.row = UINT32_MAX,
|
||||
.column = UINT32_MAX,
|
||||
},
|
||||
.start_byte = 0,
|
||||
.end_byte = UINT32_MAX
|
||||
};
|
||||
|
||||
// Check if the lexer has reached EOF. This state is stored
|
||||
// by setting the lexer's `current_included_range_index` such that
|
||||
// it has consumed all of its available ranges.
|
||||
static bool ts_lexer__eof(const TSLexer *_self) {
|
||||
Lexer *self = (Lexer *)_self;
|
||||
return self->current_included_range_index == self->included_range_count;
|
||||
}
|
||||
|
||||
// Clear the currently stored chunk of source code, because the lexer's
|
||||
// position has changed.
|
||||
static void ts_lexer__clear_chunk(Lexer *self) {
|
||||
self->chunk = NULL;
|
||||
self->chunk_size = 0;
|
||||
self->chunk_start = 0;
|
||||
}
|
||||
|
||||
// Call the lexer's input callback to obtain a new chunk of source code
|
||||
// for the current position.
|
||||
static void ts_lexer__get_chunk(Lexer *self) {
|
||||
self->chunk_start = self->current_position.bytes;
|
||||
self->chunk = self->input.read(
|
||||
|
|
@ -29,9 +62,15 @@ static void ts_lexer__get_chunk(Lexer *self) {
|
|||
self->current_position.extent,
|
||||
&self->chunk_size
|
||||
);
|
||||
if (!self->chunk_size) self->chunk = empty_chunk;
|
||||
if (!self->chunk_size) {
|
||||
self->current_included_range_index = self->included_range_count;
|
||||
self->chunk = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// Decode the next unicode character in the current chunk of source code.
|
||||
// This assumes that the lexer has already retrieved a chunk of source
|
||||
// code that spans the current position.
|
||||
static void ts_lexer__get_lookahead(Lexer *self) {
|
||||
uint32_t position_in_chunk = self->current_position.bytes - self->chunk_start;
|
||||
const uint8_t *chunk = (const uint8_t *)self->chunk + position_in_chunk;
|
||||
|
|
@ -63,10 +102,17 @@ static void ts_lexer__get_lookahead(Lexer *self) {
|
|||
}
|
||||
}
|
||||
|
||||
static void ts_lexer__advance(TSLexer *payload, bool skip) {
|
||||
Lexer *self = (Lexer *)payload;
|
||||
if (self->chunk == empty_chunk)
|
||||
return;
|
||||
// Advance to the next character in the source code, retrieving a new
|
||||
// chunk of source code if needed.
|
||||
static void ts_lexer__advance(TSLexer *_self, bool skip) {
|
||||
Lexer *self = (Lexer *)_self;
|
||||
if (!self->chunk) return;
|
||||
|
||||
if (skip) {
|
||||
LOG("skip", self->data.lookahead);
|
||||
} else {
|
||||
LOG("consume", self->data.lookahead);
|
||||
}
|
||||
|
||||
if (self->lookahead_size) {
|
||||
self->current_position.bytes += self->lookahead_size;
|
||||
|
|
@ -78,53 +124,65 @@ static void ts_lexer__advance(TSLexer *payload, bool skip) {
|
|||
}
|
||||
}
|
||||
|
||||
TSRange *current_range = &self->included_ranges[self->current_included_range_index];
|
||||
if (self->current_position.bytes == current_range->end_byte) {
|
||||
self->current_included_range_index++;
|
||||
if (self->current_included_range_index == self->included_range_count) {
|
||||
self->data.lookahead = '\0';
|
||||
self->lookahead_size = 1;
|
||||
return;
|
||||
} else {
|
||||
current_range++;
|
||||
self->current_position = (Length) {
|
||||
current_range->start_byte,
|
||||
current_range->start_point,
|
||||
};
|
||||
const TSRange *current_range = NULL;
|
||||
if (self->current_included_range_index < self->included_range_count) {
|
||||
current_range = &self->included_ranges[self->current_included_range_index];
|
||||
if (self->current_position.bytes == current_range->end_byte) {
|
||||
self->current_included_range_index++;
|
||||
if (self->current_included_range_index < self->included_range_count) {
|
||||
current_range++;
|
||||
self->current_position = (Length) {
|
||||
current_range->start_byte,
|
||||
current_range->start_point,
|
||||
};
|
||||
} else {
|
||||
current_range = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (skip) {
|
||||
LOG_CHARACTER("skip", self->data.lookahead);
|
||||
self->token_start_position = self->current_position;
|
||||
if (skip) self->token_start_position = self->current_position;
|
||||
|
||||
if (current_range) {
|
||||
if (self->current_position.bytes >= self->chunk_start + self->chunk_size) {
|
||||
ts_lexer__get_chunk(self);
|
||||
}
|
||||
ts_lexer__get_lookahead(self);
|
||||
} else {
|
||||
LOG_CHARACTER("consume", self->data.lookahead);
|
||||
}
|
||||
|
||||
if (self->current_position.bytes >= self->chunk_start + self->chunk_size) {
|
||||
ts_lexer__get_chunk(self);
|
||||
}
|
||||
|
||||
ts_lexer__get_lookahead(self);
|
||||
}
|
||||
|
||||
static void ts_lexer__mark_end(TSLexer *payload) {
|
||||
Lexer *self = (Lexer *)payload;
|
||||
TSRange *current_included_range = &self->included_ranges[self->current_included_range_index];
|
||||
if (self->current_included_range_index > 0 &&
|
||||
self->current_position.bytes == current_included_range->start_byte) {
|
||||
TSRange *previous_included_range = current_included_range - 1;
|
||||
self->token_end_position = (Length) {
|
||||
previous_included_range->end_byte,
|
||||
previous_included_range->end_point,
|
||||
};
|
||||
} else {
|
||||
self->token_end_position = self->current_position;
|
||||
ts_lexer__clear_chunk(self);
|
||||
self->data.lookahead = '\0';
|
||||
self->lookahead_size = 1;
|
||||
}
|
||||
}
|
||||
|
||||
static uint32_t ts_lexer__get_column(TSLexer *payload) {
|
||||
Lexer *self = (Lexer *)payload;
|
||||
// Mark that a token match has completed. This can be called multiple
|
||||
// times if a longer match is found later.
|
||||
static void ts_lexer__mark_end(TSLexer *_self) {
|
||||
Lexer *self = (Lexer *)_self;
|
||||
if (!ts_lexer__eof(&self->data)) {
|
||||
// If the lexer is right at the beginning of included range,
|
||||
// then the token should be considered to end at the *end* of the
|
||||
// previous included range, rather than here.
|
||||
TSRange *current_included_range = &self->included_ranges[
|
||||
self->current_included_range_index
|
||||
];
|
||||
if (
|
||||
self->current_included_range_index > 0 &&
|
||||
self->current_position.bytes == current_included_range->start_byte
|
||||
) {
|
||||
TSRange *previous_included_range = current_included_range - 1;
|
||||
self->token_end_position = (Length) {
|
||||
previous_included_range->end_byte,
|
||||
previous_included_range->end_point,
|
||||
};
|
||||
return;
|
||||
}
|
||||
}
|
||||
self->token_end_position = self->current_position;
|
||||
}
|
||||
|
||||
static uint32_t ts_lexer__get_column(TSLexer *_self) {
|
||||
Lexer *self = (Lexer *)_self;
|
||||
uint32_t goal_byte = self->current_position.bytes;
|
||||
|
||||
self->current_position.bytes -= self->current_position.extent.column;
|
||||
|
|
@ -136,67 +194,69 @@ static uint32_t ts_lexer__get_column(TSLexer *payload) {
|
|||
|
||||
uint32_t result = 0;
|
||||
while (self->current_position.bytes < goal_byte) {
|
||||
ts_lexer__advance(payload, false);
|
||||
ts_lexer__advance(&self->data, false);
|
||||
result++;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static bool ts_lexer__is_at_included_range_start(TSLexer *payload) {
|
||||
const Lexer *self = (const Lexer *)payload;
|
||||
TSRange *current_range = &self->included_ranges[self->current_included_range_index];
|
||||
return self->current_position.bytes == current_range->start_byte;
|
||||
// Is the lexer at a boundary between two disjoint included ranges of
|
||||
// source code? This is exposed as an API because some languages' external
|
||||
// scanners need to perform custom actions at these bounaries.
|
||||
static bool ts_lexer__is_at_included_range_start(const TSLexer *_self) {
|
||||
const Lexer *self = (const Lexer *)_self;
|
||||
if (self->current_included_range_index < self->included_range_count) {
|
||||
TSRange *current_range = &self->included_ranges[self->current_included_range_index];
|
||||
return self->current_position.bytes == current_range->start_byte;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// The lexer's methods are stored as a struct field so that generated
|
||||
// parsers can call them without needing to be linked against this library.
|
||||
|
||||
void ts_lexer_init(Lexer *self) {
|
||||
*self = (Lexer) {
|
||||
.data = {
|
||||
// The lexer's methods are stored as struct fields so that generated
|
||||
// parsers can call them without needing to be linked against this
|
||||
// library.
|
||||
.advance = ts_lexer__advance,
|
||||
.mark_end = ts_lexer__mark_end,
|
||||
.get_column = ts_lexer__get_column,
|
||||
.is_at_included_range_start = ts_lexer__is_at_included_range_start,
|
||||
.eof = ts_lexer__eof,
|
||||
.lookahead = 0,
|
||||
.result_symbol = 0,
|
||||
},
|
||||
.chunk = NULL,
|
||||
.chunk_size = 0,
|
||||
.chunk_start = 0,
|
||||
.current_position = {UINT32_MAX, {0, 0}},
|
||||
.current_position = {0, {0, 0}},
|
||||
.logger = {
|
||||
.payload = NULL,
|
||||
.log = NULL
|
||||
},
|
||||
.included_ranges = NULL,
|
||||
.included_range_count = 0,
|
||||
.current_included_range_index = 0,
|
||||
};
|
||||
|
||||
self->included_ranges = NULL;
|
||||
ts_lexer_set_included_ranges(self, NULL, 0);
|
||||
ts_lexer_reset(self, length_zero());
|
||||
}
|
||||
|
||||
void ts_lexer_delete(Lexer *self) {
|
||||
ts_free(self->included_ranges);
|
||||
}
|
||||
|
||||
void ts_lexer_set_input(Lexer *self, TSInput input) {
|
||||
self->input = input;
|
||||
self->data.lookahead = 0;
|
||||
self->lookahead_size = 0;
|
||||
self->chunk = 0;
|
||||
self->chunk_start = 0;
|
||||
self->chunk_size = 0;
|
||||
}
|
||||
|
||||
static void ts_lexer_goto(Lexer *self, Length position) {
|
||||
self->current_position = position;
|
||||
bool found_included_range = false;
|
||||
|
||||
// Move to the first valid position at or after the given position.
|
||||
for (unsigned i = 0; i < self->included_range_count; i++) {
|
||||
TSRange *included_range = &self->included_ranges[i];
|
||||
if (included_range->end_byte > position.bytes) {
|
||||
if (included_range->start_byte > position.bytes) {
|
||||
position = (Length) {
|
||||
self->current_position = (Length) {
|
||||
.bytes = included_range->start_byte,
|
||||
.extent = included_range->start_point,
|
||||
};
|
||||
|
|
@ -208,46 +268,61 @@ static void ts_lexer_goto(Lexer *self, Length position) {
|
|||
}
|
||||
}
|
||||
|
||||
if (!found_included_range) {
|
||||
if (found_included_range) {
|
||||
// If the current position is outside of the current chunk of text,
|
||||
// then clear out the current chunk of text.
|
||||
if (self->chunk && (
|
||||
position.bytes < self->chunk_start ||
|
||||
position.bytes >= self->chunk_start + self->chunk_size
|
||||
)) {
|
||||
ts_lexer__clear_chunk(self);
|
||||
}
|
||||
|
||||
self->lookahead_size = 0;
|
||||
self->data.lookahead = '\0';
|
||||
}
|
||||
|
||||
// If the given position is beyond any of included ranges, move to the EOF
|
||||
// state - past the end of the included ranges.
|
||||
else {
|
||||
self->current_included_range_index = self->included_range_count;
|
||||
TSRange *last_included_range = &self->included_ranges[self->included_range_count - 1];
|
||||
position = (Length) {
|
||||
self->current_position = (Length) {
|
||||
.bytes = last_included_range->end_byte,
|
||||
.extent = last_included_range->end_point,
|
||||
};
|
||||
self->chunk = empty_chunk;
|
||||
self->chunk_start = position.bytes;
|
||||
self->chunk_size = 2;
|
||||
ts_lexer__clear_chunk(self);
|
||||
self->lookahead_size = 1;
|
||||
self->data.lookahead = '\0';
|
||||
}
|
||||
|
||||
self->token_start_position = position;
|
||||
self->token_end_position = LENGTH_UNDEFINED;
|
||||
self->current_position = position;
|
||||
|
||||
if (self->chunk && (position.bytes < self->chunk_start ||
|
||||
position.bytes >= self->chunk_start + self->chunk_size)) {
|
||||
self->chunk = 0;
|
||||
self->chunk_start = 0;
|
||||
self->chunk_size = 0;
|
||||
}
|
||||
|
||||
self->lookahead_size = 0;
|
||||
self->data.lookahead = 0;
|
||||
}
|
||||
|
||||
void ts_lexer_set_input(Lexer *self, TSInput input) {
|
||||
self->input = input;
|
||||
ts_lexer__clear_chunk(self);
|
||||
ts_lexer_goto(self, self->current_position);
|
||||
}
|
||||
|
||||
// Move the lexer to the given position. This doesn't do any work
|
||||
// if the parser is already at the given position.
|
||||
void ts_lexer_reset(Lexer *self, Length position) {
|
||||
if (position.bytes != self->current_position.bytes) ts_lexer_goto(self, position);
|
||||
if (position.bytes != self->current_position.bytes) {
|
||||
ts_lexer_goto(self, position);
|
||||
}
|
||||
}
|
||||
|
||||
void ts_lexer_start(Lexer *self) {
|
||||
self->token_start_position = self->current_position;
|
||||
self->token_end_position = LENGTH_UNDEFINED;
|
||||
self->data.result_symbol = 0;
|
||||
if (!self->chunk) ts_lexer__get_chunk(self);
|
||||
if (!self->lookahead_size) ts_lexer__get_lookahead(self);
|
||||
if (
|
||||
self->current_position.bytes == 0 &&
|
||||
self->data.lookahead == BYTE_ORDER_MARK
|
||||
) ts_lexer__advance((TSLexer *)self, true);
|
||||
if (!ts_lexer__eof(&self->data)) {
|
||||
if (!self->chunk_size) ts_lexer__get_chunk(self);
|
||||
if (!self->lookahead_size) ts_lexer__get_lookahead(self);
|
||||
if (
|
||||
self->current_position.bytes == 0 &&
|
||||
self->data.lookahead == BYTE_ORDER_MARK
|
||||
) ts_lexer__advance(&self->data, true);
|
||||
}
|
||||
}
|
||||
|
||||
void ts_lexer_finish(Lexer *self, uint32_t *lookahead_end_byte) {
|
||||
|
|
@ -271,8 +346,8 @@ void ts_lexer_finish(Lexer *self, uint32_t *lookahead_end_byte) {
|
|||
}
|
||||
|
||||
void ts_lexer_advance_to_end(Lexer *self) {
|
||||
while (self->data.lookahead != 0) {
|
||||
ts_lexer__advance((TSLexer *)self, false);
|
||||
while (self->chunk) {
|
||||
ts_lexer__advance(&self->data, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -280,30 +355,19 @@ void ts_lexer_mark_end(Lexer *self) {
|
|||
ts_lexer__mark_end(&self->data);
|
||||
}
|
||||
|
||||
static const TSRange DEFAULT_RANGES[] = {
|
||||
{
|
||||
.start_point = {
|
||||
.row = 0,
|
||||
.column = 0,
|
||||
},
|
||||
.end_point = {
|
||||
.row = UINT32_MAX,
|
||||
.column = UINT32_MAX,
|
||||
},
|
||||
.start_byte = 0,
|
||||
.end_byte = UINT32_MAX
|
||||
}
|
||||
};
|
||||
|
||||
void ts_lexer_set_included_ranges(Lexer *self, const TSRange *ranges, uint32_t count) {
|
||||
void ts_lexer_set_included_ranges(
|
||||
Lexer *self,
|
||||
const TSRange *ranges,
|
||||
uint32_t count
|
||||
) {
|
||||
if (count == 0 || !ranges) {
|
||||
ranges = DEFAULT_RANGES;
|
||||
ranges = &DEFAULT_RANGE;
|
||||
count = 1;
|
||||
}
|
||||
|
||||
size_t sz = count * sizeof(TSRange);
|
||||
self->included_ranges = ts_realloc(self->included_ranges, sz);
|
||||
memcpy(self->included_ranges, ranges, sz);
|
||||
size_t size = count * sizeof(TSRange);
|
||||
self->included_ranges = ts_realloc(self->included_ranges, size);
|
||||
memcpy(self->included_ranges, ranges, size);
|
||||
self->included_range_count = count;
|
||||
ts_lexer_goto(self, self->current_position);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ typedef struct {
|
|||
Length token_start_position;
|
||||
Length token_end_position;
|
||||
|
||||
TSRange * included_ranges;
|
||||
TSRange *included_ranges;
|
||||
size_t included_range_count;
|
||||
size_t current_included_range_index;
|
||||
|
||||
|
|
|
|||
23
test/fixtures/error_corpus/json_errors.txt
vendored
23
test/fixtures/error_corpus/json_errors.txt
vendored
|
|
@ -26,7 +26,7 @@ errors inside arrays
|
|||
|
||||
---
|
||||
|
||||
(value (array
|
||||
(document (array
|
||||
(number)
|
||||
(ERROR)
|
||||
(number)))
|
||||
|
|
@ -39,8 +39,8 @@ errors inside objects
|
|||
|
||||
---
|
||||
|
||||
(value (object
|
||||
(pair (string) (number))
|
||||
(document (object
|
||||
(pair (string (string_content)) (number))
|
||||
(ERROR (UNEXPECTED 'o'))))
|
||||
|
||||
==========================================
|
||||
|
|
@ -51,9 +51,18 @@ errors inside nested objects
|
|||
|
||||
---
|
||||
|
||||
(value (object
|
||||
(pair (string) (object
|
||||
(pair (string) (number))
|
||||
(document (object
|
||||
(pair (string (string_content)) (object
|
||||
(pair (string (string_content)) (number))
|
||||
(ERROR (number))))
|
||||
(pair (string) (number))
|
||||
(pair (string (string_content)) (number))
|
||||
(ERROR)))
|
||||
|
||||
===============================
|
||||
incomplete tokens at EOF
|
||||
========================
|
||||
|
||||
nul
|
||||
---
|
||||
|
||||
(ERROR (UNEXPECTED EOF))
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue