Merge pull request #2948 from amaanq/vec-to-slice

refactor: swap `&Vec[T]` with `&[T]` where appropriate
This commit is contained in:
Amaan Qureshi 2024-02-07 03:18:55 -05:00 committed by GitHub
commit 0feaddd0b5
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
10 changed files with 17 additions and 23 deletions

View file

@ -51,7 +51,7 @@ struct ParseTableBuilder<'a> {
item_set_builder: ParseItemSetBuilder<'a>,
syntax_grammar: &'a SyntaxGrammar,
lexical_grammar: &'a LexicalGrammar,
variable_info: &'a Vec<VariableInfo>,
variable_info: &'a [VariableInfo],
core_ids_by_core: HashMap<ParseItemSetCore<'a>, usize>,
state_ids_by_item_set: IndexMap<ParseItemSet<'a>, ParseStateId, BuildHasherDefault<FxHasher>>,
parse_state_info_by_id: Vec<ParseStateInfo<'a>>,
@ -965,7 +965,7 @@ pub fn build_parse_table<'a>(
syntax_grammar: &'a SyntaxGrammar,
lexical_grammar: &'a LexicalGrammar,
inlines: &'a InlinedProductionMap,
variable_info: &'a Vec<VariableInfo>,
variable_info: &'a [VariableInfo],
) -> Result<(ParseTable, Vec<TokenSet>, Vec<ParseStateInfo<'a>>)> {
let actual_conflicts = syntax_grammar.expected_conflicts.iter().cloned().collect();
let item_set_builder = ParseItemSetBuilder::new(syntax_grammar, lexical_grammar, inlines);

View file

@ -34,7 +34,7 @@ impl<'a> CoincidentTokenIndex<'a> {
result
}
pub fn states_with(&self, a: Symbol, b: Symbol) -> &Vec<ParseStateId> {
pub fn states_with(&self, a: Symbol, b: Symbol) -> &[ParseStateId] {
&self.entries[self.index(a.index, b.index)]
}

View file

@ -24,7 +24,7 @@ pub fn build_tables(
syntax_grammar: &SyntaxGrammar,
lexical_grammar: &LexicalGrammar,
simple_aliases: &AliasMap,
variable_info: &Vec<VariableInfo>,
variable_info: &[VariableInfo],
inlines: &InlinedProductionMap,
report_symbol_name: Option<&str>,
) -> Result<(ParseTable, LexTable, LexTable, Option<Symbol>)> {

View file

@ -666,10 +666,7 @@ pub fn generate_node_types_json(
result
}
fn process_supertypes(
info: &mut FieldInfoJSON,
subtype_map: &Vec<(NodeTypeJSON, Vec<NodeTypeJSON>)>,
) {
fn process_supertypes(info: &mut FieldInfoJSON, subtype_map: &[(NodeTypeJSON, Vec<NodeTypeJSON>)]) {
for (supertype, subtypes) in subtype_map {
if info.types.contains(supertype) {
info.types.retain(|t| !subtypes.contains(t));

View file

@ -172,7 +172,7 @@ fn flatten_variable(variable: Variable) -> SyntaxVariable {
}
}
fn symbol_is_used(variables: &Vec<SyntaxVariable>, symbol: Symbol) -> bool {
fn symbol_is_used(variables: &[SyntaxVariable], symbol: Symbol) -> bool {
for variable in variables {
for production in &variable.productions {
for step in &production.steps {

View file

@ -87,7 +87,7 @@ impl InlinedProductionMapBuilder {
&'a mut self,
step_id: ProductionStepId,
grammar: &'a SyntaxGrammar,
) -> &'a Vec<usize> {
) -> &'a [usize] {
// Build a list of productions produced by inlining rules.
let mut i = 0;
let step_index = step_id.step_index;

View file

@ -596,7 +596,7 @@ impl Generator {
let mut flat_field_maps = vec![];
let mut next_flat_field_map_index = 0;
self.get_field_map_id(
&Vec::new(),
Vec::new(),
&mut flat_field_maps,
&mut next_flat_field_map_index,
);
@ -614,7 +614,7 @@ impl Generator {
}
field_map_ids.push((
self.get_field_map_id(
&flat_field_map,
flat_field_map.clone(),
&mut flat_field_maps,
&mut next_flat_field_map_index,
),
@ -1438,7 +1438,7 @@ impl Generator {
fn get_field_map_id(
&self,
flat_field_map: &Vec<(String, FieldLocation)>,
flat_field_map: Vec<(String, FieldLocation)>,
flat_field_maps: &mut Vec<(usize, Vec<(String, FieldLocation)>)>,
next_flat_field_map_index: &mut usize,
) -> usize {
@ -1447,8 +1447,8 @@ impl Generator {
}
let result = *next_flat_field_map_index;
flat_field_maps.push((result, flat_field_map.clone()));
*next_flat_field_map_index += flat_field_map.len();
flat_field_maps.push((result, flat_field_map));
result
}

View file

@ -399,7 +399,7 @@ fn test_feature_corpus_files() {
}
fn check_consistent_sizes(tree: &Tree, input: &[u8]) {
fn check(node: Node, line_offsets: &Vec<usize>) {
fn check(node: Node, line_offsets: &[usize]) {
let start_byte = node.start_byte();
let end_byte = node.end_byte();
let start_point = node.start_position();

View file

@ -5,13 +5,13 @@ use std::str;
#[derive(Debug)]
pub struct ReadRecorder<'a> {
content: &'a Vec<u8>,
content: &'a [u8],
indices_read: Vec<usize>,
}
impl<'a> ReadRecorder<'a> {
#[must_use]
pub const fn new(content: &'a Vec<u8>) -> Self {
pub const fn new(content: &'a [u8]) -> Self {
Self {
content,
indices_read: Vec::new(),

View file

@ -678,14 +678,11 @@ fn test_get_changed_ranges() {
}
}
fn index_of(text: &Vec<u8>, substring: &str) -> usize {
str::from_utf8(text.as_slice())
.unwrap()
.find(substring)
.unwrap()
fn index_of(text: &[u8], substring: &str) -> usize {
str::from_utf8(text).unwrap().find(substring).unwrap()
}
fn range_of(text: &Vec<u8>, substring: &str) -> Range {
fn range_of(text: &[u8], substring: &str) -> Range {
let start_byte = index_of(text, substring);
let end_byte = start_byte + substring.as_bytes().len();
Range {