Fix typos

This commit is contained in:
Kian-Meng Ang 2022-06-28 19:57:42 +08:00
parent 01df16ca9f
commit b8552ec6c4
11 changed files with 17 additions and 17 deletions

View file

@ -598,7 +598,7 @@ impl<'a> ParseTableBuilder<'a> {
.unwrap();
write!(&mut msg, "Possible interpretations:\n\n").unwrap();
let mut interpretions = conflicting_items
let mut interpretations = conflicting_items
.iter()
.map(|item| {
let mut line = String::new();
@ -652,13 +652,13 @@ impl<'a> ParseTableBuilder<'a> {
})
.collect::<Vec<_>>();
let max_interpretation_length = interpretions
let max_interpretation_length = interpretations
.iter()
.map(|i| i.0.chars().count())
.max()
.unwrap();
interpretions.sort_unstable();
for (i, (line, prec_suffix)) in interpretions.into_iter().enumerate() {
interpretations.sort_unstable();
for (i, (line, prec_suffix)) in interpretations.into_iter().enumerate() {
write!(&mut msg, " {}:", i + 1).unwrap();
msg += &line;
if let Some(prec_suffix) = prec_suffix {

View file

@ -70,7 +70,7 @@ pub(crate) enum Rule {
// Because tokens are represented as small (~400 max) unsigned integers,
// sets of tokens can be efficiently represented as bit vectors with each
// index correspoding to a token, and each value representing whether or not
// index corresponding to a token, and each value representing whether or not
// the token is present in the set.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub(crate) struct TokenSet {

View file

@ -511,7 +511,7 @@ fn test_parsing_after_detecting_error_in_the_middle_of_a_string_token() {
// Delete a suffix of the source code, starting in the middle of the string
// literal, after some whitespace. With this deletion, the remaining string
// content: "c, " looks like two valid python tokens: an identifier and a comma.
// When this edit is undone, in order correctly recover the orginal tree, the
// When this edit is undone, in order correctly recover the original tree, the
// parser needs to remember that before matching the `c` as an identifier, it
// lookahead ahead several bytes, trying to find the closing quotation mark in
// order to match the "string content" node.

View file

@ -2887,7 +2887,7 @@ fn test_query_captures_with_many_nested_results_with_fields() {
.unwrap();
// The outer expression does not match the pattern, but the consequence of the ternary
// is an object that *does* contain many occurences of the pattern.
// is an object that *does* contain many occurrences of the pattern.
let count = 50;
let mut source = "a ? {".to_owned();
for i in 0..count {

View file

@ -37,7 +37,7 @@ pub enum HighlightEvent {
HighlightEnd,
}
/// Contains the data neeeded to higlight code written in a particular language.
/// Contains the data needed to highlight code written in a particular language.
///
/// This struct is immutable and can be shared between threads.
pub struct HighlightConfiguration {

View file

@ -103,7 +103,7 @@ typedef struct {
/*
* Slice - A slice of an external array. Within a query, capture names,
* literal string values, and predicate step informations are stored in three
* literal string values, and predicate step information are stored in three
* contiguous arrays. Individual captures, string values, and predicates are
* represented as slices of these three arrays.
*/
@ -1645,7 +1645,7 @@ static bool ts_query__analyze_patterns(TSQuery *self, unsigned *error_offset) {
// If the state has advanced to a step with an alternative step, then add another state
// at that alternative step. This process is simpler than the process of actually matching a
// pattern during query exection, because for the purposes of query analysis, there is no
// pattern during query execution, because for the purposes of query analysis, there is no
// need to process repetitions.
if (
does_match &&
@ -2045,7 +2045,7 @@ static TSQueryError ts_query__parse_predicate(
// the query's internal state machine representation. For nested patterns,
// this function calls itself recursively.
//
// The caller is repsonsible for passing in a dedicated CaptureQuantifiers.
// The caller is responsible for passing in a dedicated CaptureQuantifiers.
// These should not be shared between different calls to ts_query__parse_pattern!
static TSQueryError ts_query__parse_pattern(
TSQuery *self,
@ -3520,7 +3520,7 @@ static inline bool ts_query_cursor__advance(
// If this state's next step has an alternative step, then copy the state in order
// to pursue both alternatives. The alternative step itself may have an alternative,
// so this is an interative process.
// so this is an interactive process.
unsigned end_index = i + 1;
for (unsigned j = i; j < end_index; j++) {
QueryState *state = &self->states.contents[j];

View file

@ -539,7 +539,7 @@ MutableSubtree ts_subtree_new_node(
return result;
}
// Create a new error node contaning the given children.
// Create a new error node containing the given children.
//
// This node is treated as 'extra'. Its children are prevented from having
// having any effect on the parse state.

View file

@ -41,7 +41,7 @@ if (arg) {
{encoding: 'utf8'}
);
if (diff.length !== 0) {
console.error('There are uncommited changes.');
console.error('There are uncommitted changes.');
process.exit(1);
}

View file

@ -16,7 +16,7 @@ use tree_sitter::{
const MAX_LINE_LEN: usize = 180;
const CANCELLATION_CHECK_INTERVAL: usize = 100;
/// Contains the data neeeded to compute tags for code written in a
/// Contains the data needed to compute tags for code written in a
/// particular language.
#[derive(Debug)]
pub struct TagsConfiguration {

View file

@ -2,7 +2,7 @@
// specified directly in the body of some larger rule) are named according their content. So when
// tokens contains characters that aren't valid in a C string literal, we need to escape those
// characters. This grammar tests that this escaping works. The test is basically that the generated
// parser compiles succesfully.
// parser compiles successfully.
module.exports = grammar({
name: "anonymous_tokens_with_escaped_chars",

View file

@ -30,7 +30,7 @@
},
// Tokens like '+' and '*' are described directly within the
// grammar's rules, as opposed to in a seperate lexer description.
// grammar's rules, as opposed to in a separate lexer description.
"sum": {
"type": "PREC_LEFT",
"value": 1,