diff --git a/cli/src/generate/build_tables/build_parse_table.rs b/cli/src/generate/build_tables/build_parse_table.rs index 59ee631d..10320263 100644 --- a/cli/src/generate/build_tables/build_parse_table.rs +++ b/cli/src/generate/build_tables/build_parse_table.rs @@ -598,7 +598,7 @@ impl<'a> ParseTableBuilder<'a> { .unwrap(); write!(&mut msg, "Possible interpretations:\n\n").unwrap(); - let mut interpretions = conflicting_items + let mut interpretations = conflicting_items .iter() .map(|item| { let mut line = String::new(); @@ -652,13 +652,13 @@ impl<'a> ParseTableBuilder<'a> { }) .collect::>(); - let max_interpretation_length = interpretions + let max_interpretation_length = interpretations .iter() .map(|i| i.0.chars().count()) .max() .unwrap(); - interpretions.sort_unstable(); - for (i, (line, prec_suffix)) in interpretions.into_iter().enumerate() { + interpretations.sort_unstable(); + for (i, (line, prec_suffix)) in interpretations.into_iter().enumerate() { write!(&mut msg, " {}:", i + 1).unwrap(); msg += &line; if let Some(prec_suffix) = prec_suffix { diff --git a/cli/src/generate/rules.rs b/cli/src/generate/rules.rs index 816dfc45..0e3ff898 100644 --- a/cli/src/generate/rules.rs +++ b/cli/src/generate/rules.rs @@ -70,7 +70,7 @@ pub(crate) enum Rule { // Because tokens are represented as small (~400 max) unsigned integers, // sets of tokens can be efficiently represented as bit vectors with each -// index correspoding to a token, and each value representing whether or not +// index corresponding to a token, and each value representing whether or not // the token is present in the set. #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub(crate) struct TokenSet { diff --git a/cli/src/tests/parser_test.rs b/cli/src/tests/parser_test.rs index 1a962e29..12b90af5 100644 --- a/cli/src/tests/parser_test.rs +++ b/cli/src/tests/parser_test.rs @@ -511,7 +511,7 @@ fn test_parsing_after_detecting_error_in_the_middle_of_a_string_token() { // Delete a suffix of the source code, starting in the middle of the string // literal, after some whitespace. With this deletion, the remaining string // content: "c, " looks like two valid python tokens: an identifier and a comma. - // When this edit is undone, in order correctly recover the orginal tree, the + // When this edit is undone, in order correctly recover the original tree, the // parser needs to remember that before matching the `c` as an identifier, it // lookahead ahead several bytes, trying to find the closing quotation mark in // order to match the "string content" node. diff --git a/cli/src/tests/query_test.rs b/cli/src/tests/query_test.rs index df892e96..f51386df 100644 --- a/cli/src/tests/query_test.rs +++ b/cli/src/tests/query_test.rs @@ -2887,7 +2887,7 @@ fn test_query_captures_with_many_nested_results_with_fields() { .unwrap(); // The outer expression does not match the pattern, but the consequence of the ternary - // is an object that *does* contain many occurences of the pattern. + // is an object that *does* contain many occurrences of the pattern. let count = 50; let mut source = "a ? {".to_owned(); for i in 0..count { diff --git a/highlight/src/lib.rs b/highlight/src/lib.rs index 0d097201..8a79c624 100644 --- a/highlight/src/lib.rs +++ b/highlight/src/lib.rs @@ -37,7 +37,7 @@ pub enum HighlightEvent { HighlightEnd, } -/// Contains the data neeeded to higlight code written in a particular language. +/// Contains the data needed to highlight code written in a particular language. /// /// This struct is immutable and can be shared between threads. pub struct HighlightConfiguration { diff --git a/lib/src/query.c b/lib/src/query.c index 33d67648..8fd322eb 100644 --- a/lib/src/query.c +++ b/lib/src/query.c @@ -103,7 +103,7 @@ typedef struct { /* * Slice - A slice of an external array. Within a query, capture names, - * literal string values, and predicate step informations are stored in three + * literal string values, and predicate step information are stored in three * contiguous arrays. Individual captures, string values, and predicates are * represented as slices of these three arrays. */ @@ -1645,7 +1645,7 @@ static bool ts_query__analyze_patterns(TSQuery *self, unsigned *error_offset) { // If the state has advanced to a step with an alternative step, then add another state // at that alternative step. This process is simpler than the process of actually matching a - // pattern during query exection, because for the purposes of query analysis, there is no + // pattern during query execution, because for the purposes of query analysis, there is no // need to process repetitions. if ( does_match && @@ -2045,7 +2045,7 @@ static TSQueryError ts_query__parse_predicate( // the query's internal state machine representation. For nested patterns, // this function calls itself recursively. // -// The caller is repsonsible for passing in a dedicated CaptureQuantifiers. +// The caller is responsible for passing in a dedicated CaptureQuantifiers. // These should not be shared between different calls to ts_query__parse_pattern! static TSQueryError ts_query__parse_pattern( TSQuery *self, @@ -3520,7 +3520,7 @@ static inline bool ts_query_cursor__advance( // If this state's next step has an alternative step, then copy the state in order // to pursue both alternatives. The alternative step itself may have an alternative, - // so this is an interative process. + // so this is an interactive process. unsigned end_index = i + 1; for (unsigned j = i; j < end_index; j++) { QueryState *state = &self->states.contents[j]; diff --git a/lib/src/subtree.c b/lib/src/subtree.c index d6cd2d71..25511574 100644 --- a/lib/src/subtree.c +++ b/lib/src/subtree.c @@ -539,7 +539,7 @@ MutableSubtree ts_subtree_new_node( return result; } -// Create a new error node contaning the given children. +// Create a new error node containing the given children. // // This node is treated as 'extra'. Its children are prevented from having // having any effect on the parse state. diff --git a/script/version b/script/version index 9463a6a0..ce4f6b82 100755 --- a/script/version +++ b/script/version @@ -41,7 +41,7 @@ if (arg) { {encoding: 'utf8'} ); if (diff.length !== 0) { - console.error('There are uncommited changes.'); + console.error('There are uncommitted changes.'); process.exit(1); } diff --git a/tags/src/lib.rs b/tags/src/lib.rs index df147fef..13499d86 100644 --- a/tags/src/lib.rs +++ b/tags/src/lib.rs @@ -16,7 +16,7 @@ use tree_sitter::{ const MAX_LINE_LEN: usize = 180; const CANCELLATION_CHECK_INTERVAL: usize = 100; -/// Contains the data neeeded to compute tags for code written in a +/// Contains the data needed to compute tags for code written in a /// particular language. #[derive(Debug)] pub struct TagsConfiguration { diff --git a/test/fixtures/test_grammars/anonymous_tokens_with_escaped_chars/grammar.js b/test/fixtures/test_grammars/anonymous_tokens_with_escaped_chars/grammar.js index 45b52968..3e7e294b 100644 --- a/test/fixtures/test_grammars/anonymous_tokens_with_escaped_chars/grammar.js +++ b/test/fixtures/test_grammars/anonymous_tokens_with_escaped_chars/grammar.js @@ -2,7 +2,7 @@ // specified directly in the body of some larger rule) are named according their content. So when // tokens contains characters that aren't valid in a C string literal, we need to escape those // characters. This grammar tests that this escaping works. The test is basically that the generated -// parser compiles succesfully. +// parser compiles successfully. module.exports = grammar({ name: "anonymous_tokens_with_escaped_chars", diff --git a/test/fixtures/test_grammars/readme_grammar/grammar.json b/test/fixtures/test_grammars/readme_grammar/grammar.json index fd496068..91958fd4 100644 --- a/test/fixtures/test_grammars/readme_grammar/grammar.json +++ b/test/fixtures/test_grammars/readme_grammar/grammar.json @@ -30,7 +30,7 @@ }, // Tokens like '+' and '*' are described directly within the - // grammar's rules, as opposed to in a seperate lexer description. + // grammar's rules, as opposed to in a separate lexer description. "sum": { "type": "PREC_LEFT", "value": 1,