diff --git a/cli/loader/src/lib.rs b/cli/loader/src/lib.rs index e8d6ea9e..d3428dc7 100644 --- a/cli/loader/src/lib.rs +++ b/cli/loader/src/lib.rs @@ -503,8 +503,8 @@ impl Loader { let library = unsafe { Library::new(&library_path) } .with_context(|| format!("Error opening dynamic library {library_path:?}"))?; let language = unsafe { - let language_fn: Symbol Language> = library - .get(language_fn_name.as_bytes()) + let language_fn = library + .get:: Language>>(language_fn_name.as_bytes()) .with_context(|| format!("Failed to load symbol {language_fn_name}"))?; language_fn() }; diff --git a/cli/src/generate/build_tables/build_lex_table.rs b/cli/src/generate/build_tables/build_lex_table.rs index 09e8b073..bc65447c 100644 --- a/cli/src/generate/build_tables/build_lex_table.rs +++ b/cli/src/generate/build_tables/build_lex_table.rs @@ -26,7 +26,7 @@ pub fn build_lex_table( LexTable::default() }; - let mut parse_state_ids_by_token_set: Vec<(TokenSet, Vec)> = Vec::new(); + let mut parse_state_ids_by_token_set = Vec::<(TokenSet, Vec)>::new(); for (i, state) in parse_table.states.iter().enumerate() { let tokens = state .terminal_entries diff --git a/cli/src/generate/build_tables/build_parse_table.rs b/cli/src/generate/build_tables/build_parse_table.rs index 30850d3d..a9be76a2 100644 --- a/cli/src/generate/build_tables/build_parse_table.rs +++ b/cli/src/generate/build_tables/build_parse_table.rs @@ -455,7 +455,7 @@ impl<'a> ParseTableBuilder<'a> { // REDUCE-REDUCE conflicts where all actions have the *same* // precedence, and there can still be SHIFT/REDUCE conflicts. let mut considered_associativity = false; - let mut shift_precedence: Vec<(&Precedence, Symbol)> = Vec::new(); + let mut shift_precedence = Vec::<(&Precedence, Symbol)>::new(); let mut conflicting_items = HashSet::new(); for (item, lookaheads) in &item_set.entries { if let Some(step) = item.step() { diff --git a/cli/src/generate/nfa.rs b/cli/src/generate/nfa.rs index 2433f520..66f78074 100644 --- a/cli/src/generate/nfa.rs +++ b/cli/src/generate/nfa.rs @@ -464,7 +464,7 @@ impl<'a> NfaCursor<'a> { fn group_transitions<'b>( iter: impl Iterator, ) -> Vec { - let mut result: Vec = Vec::new(); + let mut result = Vec::::new(); for (chars, is_sep, prec, state) in iter { let mut chars = chars.clone(); let mut i = 0; diff --git a/cli/src/generate/prepare_grammar/extract_tokens.rs b/cli/src/generate/prepare_grammar/extract_tokens.rs index 4ba89103..7d87bbd2 100644 --- a/cli/src/generate/prepare_grammar/extract_tokens.rs +++ b/cli/src/generate/prepare_grammar/extract_tokens.rs @@ -67,10 +67,10 @@ pub(super) fn extract_tokens( .expected_conflicts .into_iter() .map(|conflict| { - let mut result: Vec<_> = conflict + let mut result = conflict .iter() .map(|symbol| symbol_replacer.replace_symbol(*symbol)) - .collect(); + .collect::>(); result.sort_unstable(); result.dedup(); result diff --git a/cli/src/generate/prepare_grammar/process_inlines.rs b/cli/src/generate/prepare_grammar/process_inlines.rs index c718a429..e37180b3 100644 --- a/cli/src/generate/prepare_grammar/process_inlines.rs +++ b/cli/src/generate/prepare_grammar/process_inlines.rs @@ -362,10 +362,10 @@ mod tests { let inline_map = process_inlines(&grammar, &LexicalGrammar::default()).unwrap(); - let productions: Vec<&Production> = inline_map + let productions = inline_map .inlined_productions(&grammar.variables[0].productions[0], 1) .unwrap() - .collect(); + .collect::>(); assert_eq!( productions.iter().copied().cloned().collect::>(), @@ -461,10 +461,10 @@ mod tests { let inline_map = process_inlines(&grammar, &LexicalGrammar::default()).unwrap(); - let productions: Vec<_> = inline_map + let productions = inline_map .inlined_productions(&grammar.variables[0].productions[0], 0) .unwrap() - .collect(); + .collect::>(); assert_eq!( productions.iter().copied().cloned().collect::>(), diff --git a/cli/src/generate/render.rs b/cli/src/generate/render.rs index f111ed08..b65e2ebe 100644 --- a/cli/src/generate/render.rs +++ b/cli/src/generate/render.rs @@ -675,7 +675,7 @@ impl Generator { // For each lex state, compute a summary of the code that needs to be // generated. - let state_transition_summaries: Vec> = lex_table + let state_transition_summaries = lex_table .states .iter() .map(|state| { @@ -732,7 +732,7 @@ impl Generator { }) .collect() }) - .collect(); + .collect::>>(); // Generate a helper function for each large character set. let mut sorted_large_char_sets = large_character_sets.iter().collect::>(); @@ -1153,7 +1153,7 @@ impl Generator { let mut index = 0; let mut small_state_indices = Vec::new(); - let mut symbols_by_value: HashMap<(usize, SymbolType), Vec> = HashMap::new(); + let mut symbols_by_value = HashMap::<(usize, SymbolType), Vec>::new(); for state in self.parse_table.states.iter().skip(self.large_state_count) { small_state_indices.push(index); symbols_by_value.clear(); diff --git a/cli/src/parse.rs b/cli/src/parse.rs index ee395205..4849bda3 100644 --- a/cli/src/parse.rs +++ b/cli/src/parse.rs @@ -208,7 +208,7 @@ pub fn parse_file_at_path(parser: &mut Parser, opts: &ParseFileOptions) -> Resul let mut indent_level = 0; let mut did_visit_children = false; let mut had_named_children = false; - let mut tags: Vec<&str> = Vec::new(); + let mut tags = Vec::<&str>::new(); writeln!(&mut stdout, "")?; loop { let node = cursor.node(); diff --git a/cli/src/tests/parser_test.rs b/cli/src/tests/parser_test.rs index 1df134e3..ecd54fcf 100644 --- a/cli/src/tests/parser_test.rs +++ b/cli/src/tests/parser_test.rs @@ -158,10 +158,10 @@ fn test_parsing_with_custom_utf16_input() { let mut parser = Parser::new(); parser.set_language(&get_language("rust")).unwrap(); - let lines: Vec> = ["pub fn foo() {", " 1", "}"] + let lines = ["pub fn foo() {", " 1", "}"] .iter() - .map(|s| s.encode_utf16().collect()) - .collect(); + .map(|s| s.encode_utf16().collect::>()) + .collect::>(); let tree = parser .parse_utf16_with( @@ -1014,11 +1014,11 @@ fn test_parsing_error_in_invalid_included_ranges() { #[test] fn test_parsing_utf16_code_with_errors_at_the_end_of_an_included_range() { let source_code = ""; - let utf16_source_code: Vec = source_code + let utf16_source_code = source_code .as_bytes() .iter() .map(|c| u16::from(*c)) - .collect(); + .collect::>(); let start_byte = 2 * source_code.find("a.").unwrap(); let end_byte = 2 * source_code.find("").unwrap(); diff --git a/cli/src/tests/tags_test.rs b/cli/src/tests/tags_test.rs index 5719269b..6139f732 100644 --- a/cli/src/tests/tags_test.rs +++ b/cli/src/tests/tags_test.rs @@ -397,14 +397,14 @@ fn test_tags_via_c_api() { }) .unwrap(); - let syntax_types: Vec<&str> = unsafe { + let syntax_types = unsafe { let mut len: u32 = 0; let ptr = c::ts_tagger_syntax_kinds_for_scope_name(tagger, c_scope_name.as_ptr(), &mut len); slice::from_raw_parts(ptr, len as usize) .iter() .map(|i| CStr::from_ptr(*i).to_str().unwrap()) - .collect() + .collect::>() }; assert_eq!( diff --git a/cli/src/tests/text_provider_test.rs b/cli/src/tests/text_provider_test.rs index e04beca8..b0b70243 100644 --- a/cli/src/tests/text_provider_test.rs +++ b/cli/src/tests/text_provider_test.rs @@ -72,7 +72,7 @@ fn test_text_provider_for_string() { #[test] fn test_text_provider_for_box_of_str_slice() { - let text: Box = "// comment".to_owned().into_boxed_str(); + let text = "// comment".to_owned().into_boxed_str(); check_parsing(text.as_bytes(), text.as_bytes()); check_parsing(<_ as AsRef>::as_ref(&text), text.as_bytes()); @@ -82,7 +82,7 @@ fn test_text_provider_for_box_of_str_slice() { #[test] fn test_text_provider_for_box_of_bytes_slice() { - let text: Box<[u8]> = "// comment".to_owned().into_boxed_str().into_boxed_bytes(); + let text = "// comment".to_owned().into_boxed_str().into_boxed_bytes(); check_parsing(text.as_ref(), text.as_ref()); check_parsing(text.as_ref(), &*text); @@ -91,15 +91,14 @@ fn test_text_provider_for_box_of_bytes_slice() { #[test] fn test_text_provider_for_vec_of_bytes() { - let text: Vec = "// comment".to_owned().into_bytes(); + let text = "// comment".to_owned().into_bytes(); check_parsing(&*text, &*text); } #[test] fn test_text_provider_for_arc_of_bytes_slice() { - let text: Vec = "// comment".to_owned().into_bytes(); - let text: Arc<[u8]> = Arc::from(text); + let text: Arc<[u8]> = Arc::from("// comment".to_owned().into_bytes()); check_parsing(&*text, &*text); check_parsing(text.as_ref(), text.as_ref()); @@ -149,7 +148,7 @@ fn test_text_provider_callback_with_owned_bytes_vec_slice() { .unwrap_or_default() }, |_node: Node<'_>| { - let slice: Vec = text.to_owned().into_bytes(); + let slice = text.to_owned().into_bytes(); iter::once(slice) }, ); diff --git a/cli/src/wasm.rs b/cli/src/wasm.rs index 7ecdf50c..11dbb5c5 100644 --- a/cli/src/wasm.rs +++ b/cli/src/wasm.rs @@ -48,7 +48,7 @@ pub fn compile_language_to_wasm( // Exit with an error if the external scanner uses symbols from the // C or C++ standard libraries that aren't available to wasm parsers. - let stdlib_symbols: Vec<_> = wasm_stdlib_symbols().collect(); + let stdlib_symbols = wasm_stdlib_symbols().collect::>(); let dylink_symbols = [ "__indirect_function_table", "__memory_base",