Merge pull request #308 from tree-sitter/real-time-clock
Use CLOCK_MONOTONIC on platforms that support it
This commit is contained in:
commit
2e5d3d3770
2 changed files with 128 additions and 17 deletions
125
lib/src/clock.h
125
lib/src/clock.h
|
|
@ -3,30 +3,137 @@
|
|||
|
||||
#include <stdint.h>
|
||||
|
||||
typedef uint64_t TSDuration;
|
||||
|
||||
#ifdef _WIN32
|
||||
|
||||
#include <windows.h>
|
||||
// Windows:
|
||||
// * Represent a time as a performance counter value.
|
||||
// * Represent a duration as a number of performance counter ticks.
|
||||
|
||||
static inline uint64_t get_clock() {
|
||||
#include <windows.h>
|
||||
typedef uint64_t TSClock;
|
||||
|
||||
static inline TSDuration duration_from_micros(uint64_t micros) {
|
||||
LARGE_INTEGER frequency;
|
||||
QueryPerformanceFrequency(&frequency);
|
||||
return micros * (uint64_t)frequency.QuadPart / 1000000;
|
||||
}
|
||||
|
||||
static inline uint64_t duration_to_micros(TSDuration self) {
|
||||
LARGE_INTEGER frequency;
|
||||
QueryPerformanceFrequency(&frequency);
|
||||
return self * 1000000 / (uint64_t)frequency.QuadPart;
|
||||
}
|
||||
|
||||
static inline TSClock clock_null() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline TSClock clock_now() {
|
||||
LARGE_INTEGER result;
|
||||
QueryPerformanceCounter(&result);
|
||||
return (uint64_t)result.QuadPart;
|
||||
}
|
||||
|
||||
static inline uint64_t get_clocks_per_second() {
|
||||
LARGE_INTEGER result;
|
||||
QueryPerformanceFrequency(&result);
|
||||
return (uint64_t)result.QuadPart;
|
||||
static inline TSClock clock_after(TSClock base, TSDuration duration) {
|
||||
return base + duration;
|
||||
}
|
||||
|
||||
static inline bool clock_is_null(TSClock self) {
|
||||
return !self;
|
||||
}
|
||||
|
||||
static inline bool clock_is_gt(TSClock self, TSClock other) {
|
||||
return self > other;
|
||||
}
|
||||
|
||||
#elif defined(CLOCK_MONOTONIC)
|
||||
|
||||
// POSIX with monotonic clock support (Linux, macOS >= 10.12)
|
||||
// * Represent a time as a monotonic (seconds, nanoseconds) pair.
|
||||
// * Represent a duration as a number of microseconds.
|
||||
//
|
||||
// On these platforms, parse timeouts will correspond accurately to
|
||||
// real time, regardless of what other processes are running.
|
||||
|
||||
#include <time.h>
|
||||
typedef struct timespec TSClock;
|
||||
|
||||
static inline TSDuration duration_from_micros(uint64_t micros) {
|
||||
return micros;
|
||||
}
|
||||
|
||||
static inline uint64_t duration_to_micros(TSDuration self) {
|
||||
return self;
|
||||
}
|
||||
|
||||
static inline TSClock clock_now() {
|
||||
TSClock result;
|
||||
clock_gettime(CLOCK_MONOTONIC, &result);
|
||||
return result;
|
||||
}
|
||||
|
||||
static inline TSClock clock_null() {
|
||||
return (TSClock) {0, 0};
|
||||
}
|
||||
|
||||
static inline TSClock clock_after(TSClock base, TSDuration duration) {
|
||||
TSClock result = base;
|
||||
result.tv_sec += duration / 1000000;
|
||||
result.tv_nsec += (duration % 1000000) * 1000;
|
||||
return result;
|
||||
}
|
||||
|
||||
static inline bool clock_is_null(TSClock self) {
|
||||
return !self.tv_sec;
|
||||
}
|
||||
|
||||
static inline bool clock_is_gt(TSClock self, TSClock other) {
|
||||
if (self.tv_sec > other.tv_sec) return true;
|
||||
if (self.tv_sec < other.tv_sec) return false;
|
||||
return self.tv_nsec > other.tv_nsec;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline uint64_t get_clock() {
|
||||
// POSIX without monotonic clock support
|
||||
// * Represent a time as a process clock value.
|
||||
// * Represent a duration as a number of process clock ticks.
|
||||
//
|
||||
// On these platforms, parse timeouts may be affected by other processes,
|
||||
// which is not ideal, but is better than using a non-monotonic time API
|
||||
// like `gettimeofday`.
|
||||
|
||||
#include <time.h>
|
||||
typedef uint64_t TSClock;
|
||||
|
||||
static inline TSDuration duration_from_micros(uint64_t micros) {
|
||||
return micros * (uint64_t)CLOCKS_PER_SEC / 1000000;
|
||||
}
|
||||
|
||||
static inline uint64_t duration_to_micros(TSDuration self) {
|
||||
return self * 1000000 / (uint64_t)CLOCKS_PER_SEC;
|
||||
}
|
||||
|
||||
static inline TSClock clock_null() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline TSClock clock_now() {
|
||||
return (uint64_t)clock();
|
||||
}
|
||||
|
||||
static inline uint64_t get_clocks_per_second() {
|
||||
return (uint64_t)CLOCKS_PER_SEC;
|
||||
static inline TSClock clock_after(TSClock base, TSDuration duration) {
|
||||
return base + duration;
|
||||
}
|
||||
|
||||
static inline bool clock_is_null(TSClock self) {
|
||||
return !self;
|
||||
}
|
||||
|
||||
static inline bool clock_is_gt(TSClock self, TSClock other) {
|
||||
return self > other;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -66,9 +66,9 @@ struct TSParser {
|
|||
ReusableNode reusable_node;
|
||||
void *external_scanner_payload;
|
||||
FILE *dot_graph_file;
|
||||
TSClock end_clock;
|
||||
TSDuration timeout_duration;
|
||||
unsigned accept_count;
|
||||
uint64_t clock_limit;
|
||||
uint64_t start_clock;
|
||||
unsigned operation_count;
|
||||
const volatile uint32_t *cancellation_flag;
|
||||
bool halt_on_error;
|
||||
|
|
@ -1288,7 +1288,7 @@ static bool ts_parser__advance(
|
|||
self->operation_count = 0;
|
||||
if (
|
||||
(self->cancellation_flag && atomic_load(self->cancellation_flag)) ||
|
||||
(self->clock_limit && get_clock() - self->start_clock > self->clock_limit)
|
||||
(!clock_is_null(self->end_clock) && clock_is_gt(clock_now(), self->end_clock))
|
||||
) {
|
||||
ts_subtree_release(&self->tree_pool, lookahead);
|
||||
return false;
|
||||
|
|
@ -1513,8 +1513,8 @@ TSParser *ts_parser_new() {
|
|||
self->dot_graph_file = NULL;
|
||||
self->halt_on_error = false;
|
||||
self->cancellation_flag = NULL;
|
||||
self->clock_limit = 0;
|
||||
self->start_clock = 0;
|
||||
self->timeout_duration = 0;
|
||||
self->end_clock = clock_null();
|
||||
self->operation_count = 0;
|
||||
self->old_tree = NULL_SUBTREE;
|
||||
self->scratch_tree.ptr = &self->scratch_tree_data;
|
||||
|
|
@ -1598,11 +1598,11 @@ void ts_parser_set_cancellation_flag(TSParser *self, const uint32_t *flag) {
|
|||
}
|
||||
|
||||
uint64_t ts_parser_timeout_micros(const TSParser *self) {
|
||||
return self->clock_limit * 1000000 / get_clocks_per_second();
|
||||
return duration_to_micros(self->timeout_duration);
|
||||
}
|
||||
|
||||
void ts_parser_set_timeout_micros(TSParser *self, uint64_t timeout_micros) {
|
||||
self->clock_limit = timeout_micros * get_clocks_per_second() / 1000000;
|
||||
self->timeout_duration = duration_from_micros(timeout_micros);
|
||||
}
|
||||
|
||||
void ts_parser_set_included_ranges(TSParser *self, const TSRange *ranges, uint32_t count) {
|
||||
|
|
@ -1666,7 +1666,11 @@ TSTree *ts_parser_parse(TSParser *self, const TSTree *old_tree, TSInput input) {
|
|||
|
||||
uint32_t position = 0, last_position = 0, version_count = 0;
|
||||
self->operation_count = 0;
|
||||
self->start_clock = get_clock();
|
||||
if (self->timeout_duration) {
|
||||
self->end_clock = clock_after(clock_now(), self->timeout_duration);
|
||||
} else {
|
||||
self->end_clock = clock_null();
|
||||
}
|
||||
|
||||
do {
|
||||
for (StackVersion version = 0;
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue