1 // SPDX-License-Identifier: Apache-2.0 OR MIT 2 3 #![unstable(feature = "raw_vec_internals", reason = "unstable const warnings", issue = "none")] 4 5 use core::alloc::LayoutError; 6 use core::cmp; 7 use core::intrinsics; 8 use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties}; 9 use core::ops::Drop; 10 use core::ptr::{self, NonNull, Unique}; 11 use core::slice; 12 13 #[cfg(not(no_global_oom_handling))] 14 use crate::alloc::handle_alloc_error; 15 use crate::alloc::{Allocator, Global, Layout}; 16 use crate::boxed::Box; 17 use crate::collections::TryReserveError; 18 use crate::collections::TryReserveErrorKind::*; 19 20 #[cfg(test)] 21 mod tests; 22 23 enum AllocInit { 24 /// The contents of the new memory are uninitialized. 25 Uninitialized, 26 /// The new memory is guaranteed to be zeroed. 27 #[allow(dead_code)] 28 Zeroed, 29 } 30 31 /// A low-level utility for more ergonomically allocating, reallocating, and deallocating 32 /// a buffer of memory on the heap without having to worry about all the corner cases 33 /// involved. This type is excellent for building your own data structures like Vec and VecDeque. 34 /// In particular: 35 /// 36 /// * Produces `Unique::dangling()` on zero-sized types. 37 /// * Produces `Unique::dangling()` on zero-length allocations. 38 /// * Avoids freeing `Unique::dangling()`. 39 /// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics). 40 /// * Guards against 32-bit systems allocating more than isize::MAX bytes. 41 /// * Guards against overflowing your length. 42 /// * Calls `handle_alloc_error` for fallible allocations. 43 /// * Contains a `ptr::Unique` and thus endows the user with all related benefits. 44 /// * Uses the excess returned from the allocator to use the largest available capacity. 45 /// 46 /// This type does not in anyway inspect the memory that it manages. When dropped it *will* 47 /// free its memory, but it *won't* try to drop its contents. It is up to the user of `RawVec` 48 /// to handle the actual things *stored* inside of a `RawVec`. 49 /// 50 /// Note that the excess of a zero-sized types is always infinite, so `capacity()` always returns 51 /// `usize::MAX`. This means that you need to be careful when round-tripping this type with a 52 /// `Box<[T]>`, since `capacity()` won't yield the length. 53 #[allow(missing_debug_implementations)] 54 pub(crate) struct RawVec<T, A: Allocator = Global> { 55 ptr: Unique<T>, 56 cap: usize, 57 alloc: A, 58 } 59 60 impl<T> RawVec<T, Global> { 61 /// HACK(Centril): This exists because stable `const fn` can only call stable `const fn`, so 62 /// they cannot call `Self::new()`. 63 /// 64 /// If you change `RawVec<T>::new` or dependencies, please take care to not introduce anything 65 /// that would truly const-call something unstable. 66 pub const NEW: Self = Self::new(); 67 68 /// Creates the biggest possible `RawVec` (on the system heap) 69 /// without allocating. If `T` has positive size, then this makes a 70 /// `RawVec` with capacity `0`. If `T` is zero-sized, then it makes a 71 /// `RawVec` with capacity `usize::MAX`. Useful for implementing 72 /// delayed allocation. 73 #[must_use] 74 pub const fn new() -> Self { 75 Self::new_in(Global) 76 } 77 78 /// Creates a `RawVec` (on the system heap) with exactly the 79 /// capacity and alignment requirements for a `[T; capacity]`. This is 80 /// equivalent to calling `RawVec::new` when `capacity` is `0` or `T` is 81 /// zero-sized. Note that if `T` is zero-sized this means you will 82 /// *not* get a `RawVec` with the requested capacity. 83 /// 84 /// # Panics 85 /// 86 /// Panics if the requested capacity exceeds `isize::MAX` bytes. 87 /// 88 /// # Aborts 89 /// 90 /// Aborts on OOM. 91 #[cfg(not(any(no_global_oom_handling, test)))] 92 #[must_use] 93 #[inline] 94 pub fn with_capacity(capacity: usize) -> Self { 95 Self::with_capacity_in(capacity, Global) 96 } 97 98 /// Like `with_capacity`, but guarantees the buffer is zeroed. 99 #[cfg(not(any(no_global_oom_handling, test)))] 100 #[must_use] 101 #[inline] 102 pub fn with_capacity_zeroed(capacity: usize) -> Self { 103 Self::with_capacity_zeroed_in(capacity, Global) 104 } 105 } 106 107 impl<T, A: Allocator> RawVec<T, A> { 108 // Tiny Vecs are dumb. Skip to: 109 // - 8 if the element size is 1, because any heap allocators is likely 110 // to round up a request of less than 8 bytes to at least 8 bytes. 111 // - 4 if elements are moderate-sized (<= 1 KiB). 112 // - 1 otherwise, to avoid wasting too much space for very short Vecs. 113 pub(crate) const MIN_NON_ZERO_CAP: usize = if mem::size_of::<T>() == 1 { 114 8 115 } else if mem::size_of::<T>() <= 1024 { 116 4 117 } else { 118 1 119 }; 120 121 /// Like `new`, but parameterized over the choice of allocator for 122 /// the returned `RawVec`. 123 pub const fn new_in(alloc: A) -> Self { 124 // `cap: 0` means "unallocated". zero-sized types are ignored. 125 Self { ptr: Unique::dangling(), cap: 0, alloc } 126 } 127 128 /// Like `with_capacity`, but parameterized over the choice of 129 /// allocator for the returned `RawVec`. 130 #[cfg(not(no_global_oom_handling))] 131 #[inline] 132 pub fn with_capacity_in(capacity: usize, alloc: A) -> Self { 133 Self::allocate_in(capacity, AllocInit::Uninitialized, alloc) 134 } 135 136 /// Like `try_with_capacity`, but parameterized over the choice of 137 /// allocator for the returned `RawVec`. 138 #[inline] 139 pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result<Self, TryReserveError> { 140 Self::try_allocate_in(capacity, AllocInit::Uninitialized, alloc) 141 } 142 143 /// Like `with_capacity_zeroed`, but parameterized over the choice 144 /// of allocator for the returned `RawVec`. 145 #[cfg(not(no_global_oom_handling))] 146 #[inline] 147 pub fn with_capacity_zeroed_in(capacity: usize, alloc: A) -> Self { 148 Self::allocate_in(capacity, AllocInit::Zeroed, alloc) 149 } 150 151 /// Converts the entire buffer into `Box<[MaybeUninit<T>]>` with the specified `len`. 152 /// 153 /// Note that this will correctly reconstitute any `cap` changes 154 /// that may have been performed. (See description of type for details.) 155 /// 156 /// # Safety 157 /// 158 /// * `len` must be greater than or equal to the most recently requested capacity, and 159 /// * `len` must be less than or equal to `self.capacity()`. 160 /// 161 /// Note, that the requested capacity and `self.capacity()` could differ, as 162 /// an allocator could overallocate and return a greater memory block than requested. 163 pub unsafe fn into_box(self, len: usize) -> Box<[MaybeUninit<T>], A> { 164 // Sanity-check one half of the safety requirement (we cannot check the other half). 165 debug_assert!( 166 len <= self.capacity(), 167 "`len` must be smaller than or equal to `self.capacity()`" 168 ); 169 170 let me = ManuallyDrop::new(self); 171 unsafe { 172 let slice = slice::from_raw_parts_mut(me.ptr() as *mut MaybeUninit<T>, len); 173 Box::from_raw_in(slice, ptr::read(&me.alloc)) 174 } 175 } 176 177 #[cfg(not(no_global_oom_handling))] 178 fn allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Self { 179 // Don't allocate here because `Drop` will not deallocate when `capacity` is 0. 180 if T::IS_ZST || capacity == 0 { 181 Self::new_in(alloc) 182 } else { 183 // We avoid `unwrap_or_else` here because it bloats the amount of 184 // LLVM IR generated. 185 let layout = match Layout::array::<T>(capacity) { 186 Ok(layout) => layout, 187 Err(_) => capacity_overflow(), 188 }; 189 match alloc_guard(layout.size()) { 190 Ok(_) => {} 191 Err(_) => capacity_overflow(), 192 } 193 let result = match init { 194 AllocInit::Uninitialized => alloc.allocate(layout), 195 AllocInit::Zeroed => alloc.allocate_zeroed(layout), 196 }; 197 let ptr = match result { 198 Ok(ptr) => ptr, 199 Err(_) => handle_alloc_error(layout), 200 }; 201 202 // Allocators currently return a `NonNull<[u8]>` whose length 203 // matches the size requested. If that ever changes, the capacity 204 // here should change to `ptr.len() / mem::size_of::<T>()`. 205 Self { 206 ptr: unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) }, 207 cap: capacity, 208 alloc, 209 } 210 } 211 } 212 213 fn try_allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Result<Self, TryReserveError> { 214 // Don't allocate here because `Drop` will not deallocate when `capacity` is 0. 215 if T::IS_ZST || capacity == 0 { 216 return Ok(Self::new_in(alloc)); 217 } 218 219 let layout = Layout::array::<T>(capacity).map_err(|_| CapacityOverflow)?; 220 alloc_guard(layout.size())?; 221 let result = match init { 222 AllocInit::Uninitialized => alloc.allocate(layout), 223 AllocInit::Zeroed => alloc.allocate_zeroed(layout), 224 }; 225 let ptr = result.map_err(|_| AllocError { layout, non_exhaustive: () })?; 226 227 // Allocators currently return a `NonNull<[u8]>` whose length 228 // matches the size requested. If that ever changes, the capacity 229 // here should change to `ptr.len() / mem::size_of::<T>()`. 230 Ok(Self { 231 ptr: unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) }, 232 cap: capacity, 233 alloc, 234 }) 235 } 236 237 /// Reconstitutes a `RawVec` from a pointer, capacity, and allocator. 238 /// 239 /// # Safety 240 /// 241 /// The `ptr` must be allocated (via the given allocator `alloc`), and with the given 242 /// `capacity`. 243 /// The `capacity` cannot exceed `isize::MAX` for sized types. (only a concern on 32-bit 244 /// systems). ZST vectors may have a capacity up to `usize::MAX`. 245 /// If the `ptr` and `capacity` come from a `RawVec` created via `alloc`, then this is 246 /// guaranteed. 247 #[inline] 248 pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, alloc: A) -> Self { 249 Self { ptr: unsafe { Unique::new_unchecked(ptr) }, cap: capacity, alloc } 250 } 251 252 /// Gets a raw pointer to the start of the allocation. Note that this is 253 /// `Unique::dangling()` if `capacity == 0` or `T` is zero-sized. In the former case, you must 254 /// be careful. 255 #[inline] 256 pub fn ptr(&self) -> *mut T { 257 self.ptr.as_ptr() 258 } 259 260 /// Gets the capacity of the allocation. 261 /// 262 /// This will always be `usize::MAX` if `T` is zero-sized. 263 #[inline(always)] 264 pub fn capacity(&self) -> usize { 265 if T::IS_ZST { usize::MAX } else { self.cap } 266 } 267 268 /// Returns a shared reference to the allocator backing this `RawVec`. 269 pub fn allocator(&self) -> &A { 270 &self.alloc 271 } 272 273 fn current_memory(&self) -> Option<(NonNull<u8>, Layout)> { 274 if T::IS_ZST || self.cap == 0 { 275 None 276 } else { 277 // We have an allocated chunk of memory, so we can bypass runtime 278 // checks to get our current layout. 279 unsafe { 280 let layout = Layout::array::<T>(self.cap).unwrap_unchecked(); 281 Some((self.ptr.cast().into(), layout)) 282 } 283 } 284 } 285 286 /// Ensures that the buffer contains at least enough space to hold `len + 287 /// additional` elements. If it doesn't already have enough capacity, will 288 /// reallocate enough space plus comfortable slack space to get amortized 289 /// *O*(1) behavior. Will limit this behavior if it would needlessly cause 290 /// itself to panic. 291 /// 292 /// If `len` exceeds `self.capacity()`, this may fail to actually allocate 293 /// the requested space. This is not really unsafe, but the unsafe 294 /// code *you* write that relies on the behavior of this function may break. 295 /// 296 /// This is ideal for implementing a bulk-push operation like `extend`. 297 /// 298 /// # Panics 299 /// 300 /// Panics if the new capacity exceeds `isize::MAX` bytes. 301 /// 302 /// # Aborts 303 /// 304 /// Aborts on OOM. 305 #[cfg(not(no_global_oom_handling))] 306 #[inline] 307 pub fn reserve(&mut self, len: usize, additional: usize) { 308 // Callers expect this function to be very cheap when there is already sufficient capacity. 309 // Therefore, we move all the resizing and error-handling logic from grow_amortized and 310 // handle_reserve behind a call, while making sure that this function is likely to be 311 // inlined as just a comparison and a call if the comparison fails. 312 #[cold] 313 fn do_reserve_and_handle<T, A: Allocator>( 314 slf: &mut RawVec<T, A>, 315 len: usize, 316 additional: usize, 317 ) { 318 handle_reserve(slf.grow_amortized(len, additional)); 319 } 320 321 if self.needs_to_grow(len, additional) { 322 do_reserve_and_handle(self, len, additional); 323 } 324 } 325 326 /// A specialized version of `reserve()` used only by the hot and 327 /// oft-instantiated `Vec::push()`, which does its own capacity check. 328 #[cfg(not(no_global_oom_handling))] 329 #[inline(never)] 330 pub fn reserve_for_push(&mut self, len: usize) { 331 handle_reserve(self.grow_amortized(len, 1)); 332 } 333 334 /// The same as `reserve`, but returns on errors instead of panicking or aborting. 335 pub fn try_reserve(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { 336 if self.needs_to_grow(len, additional) { 337 self.grow_amortized(len, additional) 338 } else { 339 Ok(()) 340 } 341 } 342 343 /// The same as `reserve_for_push`, but returns on errors instead of panicking or aborting. 344 #[inline(never)] 345 pub fn try_reserve_for_push(&mut self, len: usize) -> Result<(), TryReserveError> { 346 self.grow_amortized(len, 1) 347 } 348 349 /// Ensures that the buffer contains at least enough space to hold `len + 350 /// additional` elements. If it doesn't already, will reallocate the 351 /// minimum possible amount of memory necessary. Generally this will be 352 /// exactly the amount of memory necessary, but in principle the allocator 353 /// is free to give back more than we asked for. 354 /// 355 /// If `len` exceeds `self.capacity()`, this may fail to actually allocate 356 /// the requested space. This is not really unsafe, but the unsafe code 357 /// *you* write that relies on the behavior of this function may break. 358 /// 359 /// # Panics 360 /// 361 /// Panics if the new capacity exceeds `isize::MAX` bytes. 362 /// 363 /// # Aborts 364 /// 365 /// Aborts on OOM. 366 #[cfg(not(no_global_oom_handling))] 367 pub fn reserve_exact(&mut self, len: usize, additional: usize) { 368 handle_reserve(self.try_reserve_exact(len, additional)); 369 } 370 371 /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting. 372 pub fn try_reserve_exact( 373 &mut self, 374 len: usize, 375 additional: usize, 376 ) -> Result<(), TryReserveError> { 377 if self.needs_to_grow(len, additional) { self.grow_exact(len, additional) } else { Ok(()) } 378 } 379 380 /// Shrinks the buffer down to the specified capacity. If the given amount 381 /// is 0, actually completely deallocates. 382 /// 383 /// # Panics 384 /// 385 /// Panics if the given amount is *larger* than the current capacity. 386 /// 387 /// # Aborts 388 /// 389 /// Aborts on OOM. 390 #[cfg(not(no_global_oom_handling))] 391 pub fn shrink_to_fit(&mut self, cap: usize) { 392 handle_reserve(self.shrink(cap)); 393 } 394 } 395 396 impl<T, A: Allocator> RawVec<T, A> { 397 /// Returns if the buffer needs to grow to fulfill the needed extra capacity. 398 /// Mainly used to make inlining reserve-calls possible without inlining `grow`. 399 fn needs_to_grow(&self, len: usize, additional: usize) -> bool { 400 additional > self.capacity().wrapping_sub(len) 401 } 402 403 fn set_ptr_and_cap(&mut self, ptr: NonNull<[u8]>, cap: usize) { 404 // Allocators currently return a `NonNull<[u8]>` whose length matches 405 // the size requested. If that ever changes, the capacity here should 406 // change to `ptr.len() / mem::size_of::<T>()`. 407 self.ptr = unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) }; 408 self.cap = cap; 409 } 410 411 // This method is usually instantiated many times. So we want it to be as 412 // small as possible, to improve compile times. But we also want as much of 413 // its contents to be statically computable as possible, to make the 414 // generated code run faster. Therefore, this method is carefully written 415 // so that all of the code that depends on `T` is within it, while as much 416 // of the code that doesn't depend on `T` as possible is in functions that 417 // are non-generic over `T`. 418 fn grow_amortized(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { 419 // This is ensured by the calling contexts. 420 debug_assert!(additional > 0); 421 422 if T::IS_ZST { 423 // Since we return a capacity of `usize::MAX` when `elem_size` is 424 // 0, getting to here necessarily means the `RawVec` is overfull. 425 return Err(CapacityOverflow.into()); 426 } 427 428 // Nothing we can really do about these checks, sadly. 429 let required_cap = len.checked_add(additional).ok_or(CapacityOverflow)?; 430 431 // This guarantees exponential growth. The doubling cannot overflow 432 // because `cap <= isize::MAX` and the type of `cap` is `usize`. 433 let cap = cmp::max(self.cap * 2, required_cap); 434 let cap = cmp::max(Self::MIN_NON_ZERO_CAP, cap); 435 436 let new_layout = Layout::array::<T>(cap); 437 438 // `finish_grow` is non-generic over `T`. 439 let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?; 440 self.set_ptr_and_cap(ptr, cap); 441 Ok(()) 442 } 443 444 // The constraints on this method are much the same as those on 445 // `grow_amortized`, but this method is usually instantiated less often so 446 // it's less critical. 447 fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { 448 if T::IS_ZST { 449 // Since we return a capacity of `usize::MAX` when the type size is 450 // 0, getting to here necessarily means the `RawVec` is overfull. 451 return Err(CapacityOverflow.into()); 452 } 453 454 let cap = len.checked_add(additional).ok_or(CapacityOverflow)?; 455 let new_layout = Layout::array::<T>(cap); 456 457 // `finish_grow` is non-generic over `T`. 458 let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?; 459 self.set_ptr_and_cap(ptr, cap); 460 Ok(()) 461 } 462 463 #[cfg(not(no_global_oom_handling))] 464 fn shrink(&mut self, cap: usize) -> Result<(), TryReserveError> { 465 assert!(cap <= self.capacity(), "Tried to shrink to a larger capacity"); 466 467 let (ptr, layout) = if let Some(mem) = self.current_memory() { mem } else { return Ok(()) }; 468 469 let ptr = unsafe { 470 // `Layout::array` cannot overflow here because it would have 471 // overflowed earlier when capacity was larger. 472 let new_layout = Layout::array::<T>(cap).unwrap_unchecked(); 473 self.alloc 474 .shrink(ptr, layout, new_layout) 475 .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })? 476 }; 477 self.set_ptr_and_cap(ptr, cap); 478 Ok(()) 479 } 480 } 481 482 // This function is outside `RawVec` to minimize compile times. See the comment 483 // above `RawVec::grow_amortized` for details. (The `A` parameter isn't 484 // significant, because the number of different `A` types seen in practice is 485 // much smaller than the number of `T` types.) 486 #[inline(never)] 487 fn finish_grow<A>( 488 new_layout: Result<Layout, LayoutError>, 489 current_memory: Option<(NonNull<u8>, Layout)>, 490 alloc: &mut A, 491 ) -> Result<NonNull<[u8]>, TryReserveError> 492 where 493 A: Allocator, 494 { 495 // Check for the error here to minimize the size of `RawVec::grow_*`. 496 let new_layout = new_layout.map_err(|_| CapacityOverflow)?; 497 498 alloc_guard(new_layout.size())?; 499 500 let memory = if let Some((ptr, old_layout)) = current_memory { 501 debug_assert_eq!(old_layout.align(), new_layout.align()); 502 unsafe { 503 // The allocator checks for alignment equality 504 intrinsics::assume(old_layout.align() == new_layout.align()); 505 alloc.grow(ptr, old_layout, new_layout) 506 } 507 } else { 508 alloc.allocate(new_layout) 509 }; 510 511 memory.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () }.into()) 512 } 513 514 unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawVec<T, A> { 515 /// Frees the memory owned by the `RawVec` *without* trying to drop its contents. 516 fn drop(&mut self) { 517 if let Some((ptr, layout)) = self.current_memory() { 518 unsafe { self.alloc.deallocate(ptr, layout) } 519 } 520 } 521 } 522 523 // Central function for reserve error handling. 524 #[cfg(not(no_global_oom_handling))] 525 #[inline] 526 fn handle_reserve(result: Result<(), TryReserveError>) { 527 match result.map_err(|e| e.kind()) { 528 Err(CapacityOverflow) => capacity_overflow(), 529 Err(AllocError { layout, .. }) => handle_alloc_error(layout), 530 Ok(()) => { /* yay */ } 531 } 532 } 533 534 // We need to guarantee the following: 535 // * We don't ever allocate `> isize::MAX` byte-size objects. 536 // * We don't overflow `usize::MAX` and actually allocate too little. 537 // 538 // On 64-bit we just need to check for overflow since trying to allocate 539 // `> isize::MAX` bytes will surely fail. On 32-bit and 16-bit we need to add 540 // an extra guard for this in case we're running on a platform which can use 541 // all 4GB in user-space, e.g., PAE or x32. 542 543 #[inline] 544 fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> { 545 if usize::BITS < 64 && alloc_size > isize::MAX as usize { 546 Err(CapacityOverflow.into()) 547 } else { 548 Ok(()) 549 } 550 } 551 552 // One central function responsible for reporting capacity overflows. This'll 553 // ensure that the code generation related to these panics is minimal as there's 554 // only one location which panics rather than a bunch throughout the module. 555 #[cfg(not(no_global_oom_handling))] 556 fn capacity_overflow() -> ! { 557 panic!("capacity overflow"); 558 } 559