xref: /openbmc/linux/rust/alloc/vec/spec_extend.rs (revision 3dcb652a)
1 // SPDX-License-Identifier: Apache-2.0 OR MIT
2 
3 use crate::alloc::Allocator;
4 use core::iter::TrustedLen;
5 use core::ptr::{self};
6 use core::slice::{self};
7 
8 use super::{IntoIter, SetLenOnDrop, Vec};
9 
10 // Specialization trait used for Vec::extend
11 pub(super) trait SpecExtend<T, I> {
12     fn spec_extend(&mut self, iter: I);
13 }
14 
15 impl<T, I, A: Allocator> SpecExtend<T, I> for Vec<T, A>
16 where
17     I: Iterator<Item = T>,
18 {
19     default fn spec_extend(&mut self, iter: I) {
20         self.extend_desugared(iter)
21     }
22 }
23 
24 impl<T, I, A: Allocator> SpecExtend<T, I> for Vec<T, A>
25 where
26     I: TrustedLen<Item = T>,
27 {
28     default fn spec_extend(&mut self, iterator: I) {
29         // This is the case for a TrustedLen iterator.
30         let (low, high) = iterator.size_hint();
31         if let Some(additional) = high {
32             debug_assert_eq!(
33                 low,
34                 additional,
35                 "TrustedLen iterator's size hint is not exact: {:?}",
36                 (low, high)
37             );
38             self.reserve(additional);
39             unsafe {
40                 let mut ptr = self.as_mut_ptr().add(self.len());
41                 let mut local_len = SetLenOnDrop::new(&mut self.len);
42                 iterator.for_each(move |element| {
43                     ptr::write(ptr, element);
44                     ptr = ptr.offset(1);
45                     // Since the loop executes user code which can panic we have to bump the pointer
46                     // after each step.
47                     // NB can't overflow since we would have had to alloc the address space
48                     local_len.increment_len(1);
49                 });
50             }
51         } else {
52             // Per TrustedLen contract a `None` upper bound means that the iterator length
53             // truly exceeds usize::MAX, which would eventually lead to a capacity overflow anyway.
54             // Since the other branch already panics eagerly (via `reserve()`) we do the same here.
55             // This avoids additional codegen for a fallback code path which would eventually
56             // panic anyway.
57             panic!("capacity overflow");
58         }
59     }
60 }
61 
62 impl<T, A: Allocator> SpecExtend<T, IntoIter<T>> for Vec<T, A> {
63     fn spec_extend(&mut self, mut iterator: IntoIter<T>) {
64         unsafe {
65             self.append_elements(iterator.as_slice() as _);
66         }
67         iterator.forget_remaining_elements();
68     }
69 }
70 
71 impl<'a, T: 'a, I, A: Allocator + 'a> SpecExtend<&'a T, I> for Vec<T, A>
72 where
73     I: Iterator<Item = &'a T>,
74     T: Clone,
75 {
76     default fn spec_extend(&mut self, iterator: I) {
77         self.spec_extend(iterator.cloned())
78     }
79 }
80 
81 impl<'a, T: 'a, A: Allocator + 'a> SpecExtend<&'a T, slice::Iter<'a, T>> for Vec<T, A>
82 where
83     T: Copy,
84 {
85     fn spec_extend(&mut self, iterator: slice::Iter<'a, T>) {
86         let slice = iterator.as_slice();
87         unsafe { self.append_elements(slice) };
88     }
89 }
90