1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * mm/percpu.c - percpu memory allocator 4 * 5 * Copyright (C) 2009 SUSE Linux Products GmbH 6 * Copyright (C) 2009 Tejun Heo <tj@kernel.org> 7 * 8 * Copyright (C) 2017 Facebook Inc. 9 * Copyright (C) 2017 Dennis Zhou <dennis@kernel.org> 10 * 11 * The percpu allocator handles both static and dynamic areas. Percpu 12 * areas are allocated in chunks which are divided into units. There is 13 * a 1-to-1 mapping for units to possible cpus. These units are grouped 14 * based on NUMA properties of the machine. 15 * 16 * c0 c1 c2 17 * ------------------- ------------------- ------------ 18 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u 19 * ------------------- ...... ------------------- .... ------------ 20 * 21 * Allocation is done by offsets into a unit's address space. Ie., an 22 * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0, 23 * c1:u1, c1:u2, etc. On NUMA machines, the mapping may be non-linear 24 * and even sparse. Access is handled by configuring percpu base 25 * registers according to the cpu to unit mappings and offsetting the 26 * base address using pcpu_unit_size. 27 * 28 * There is special consideration for the first chunk which must handle 29 * the static percpu variables in the kernel image as allocation services 30 * are not online yet. In short, the first chunk is structured like so: 31 * 32 * <Static | [Reserved] | Dynamic> 33 * 34 * The static data is copied from the original section managed by the 35 * linker. The reserved section, if non-zero, primarily manages static 36 * percpu variables from kernel modules. Finally, the dynamic section 37 * takes care of normal allocations. 38 * 39 * The allocator organizes chunks into lists according to free size and 40 * memcg-awareness. To make a percpu allocation memcg-aware the __GFP_ACCOUNT 41 * flag should be passed. All memcg-aware allocations are sharing one set 42 * of chunks and all unaccounted allocations and allocations performed 43 * by processes belonging to the root memory cgroup are using the second set. 44 * 45 * The allocator tries to allocate from the fullest chunk first. Each chunk 46 * is managed by a bitmap with metadata blocks. The allocation map is updated 47 * on every allocation and free to reflect the current state while the boundary 48 * map is only updated on allocation. Each metadata block contains 49 * information to help mitigate the need to iterate over large portions 50 * of the bitmap. The reverse mapping from page to chunk is stored in 51 * the page's index. Lastly, units are lazily backed and grow in unison. 52 * 53 * There is a unique conversion that goes on here between bytes and bits. 54 * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE. The chunk 55 * tracks the number of pages it is responsible for in nr_pages. Helper 56 * functions are used to convert from between the bytes, bits, and blocks. 57 * All hints are managed in bits unless explicitly stated. 58 * 59 * To use this allocator, arch code should do the following: 60 * 61 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate 62 * regular address to percpu pointer and back if they need to be 63 * different from the default 64 * 65 * - use pcpu_setup_first_chunk() during percpu area initialization to 66 * setup the first chunk containing the kernel static percpu area 67 */ 68 69 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 70 71 #include <linux/bitmap.h> 72 #include <linux/cpumask.h> 73 #include <linux/memblock.h> 74 #include <linux/err.h> 75 #include <linux/lcm.h> 76 #include <linux/list.h> 77 #include <linux/log2.h> 78 #include <linux/mm.h> 79 #include <linux/module.h> 80 #include <linux/mutex.h> 81 #include <linux/percpu.h> 82 #include <linux/pfn.h> 83 #include <linux/slab.h> 84 #include <linux/spinlock.h> 85 #include <linux/vmalloc.h> 86 #include <linux/workqueue.h> 87 #include <linux/kmemleak.h> 88 #include <linux/sched.h> 89 #include <linux/sched/mm.h> 90 #include <linux/memcontrol.h> 91 92 #include <asm/cacheflush.h> 93 #include <asm/sections.h> 94 #include <asm/tlbflush.h> 95 #include <asm/io.h> 96 97 #define CREATE_TRACE_POINTS 98 #include <trace/events/percpu.h> 99 100 #include "percpu-internal.h" 101 102 /* 103 * The slots are sorted by the size of the biggest continuous free area. 104 * 1-31 bytes share the same slot. 105 */ 106 #define PCPU_SLOT_BASE_SHIFT 5 107 /* chunks in slots below this are subject to being sidelined on failed alloc */ 108 #define PCPU_SLOT_FAIL_THRESHOLD 3 109 110 #define PCPU_EMPTY_POP_PAGES_LOW 2 111 #define PCPU_EMPTY_POP_PAGES_HIGH 4 112 113 #ifdef CONFIG_SMP 114 /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ 115 #ifndef __addr_to_pcpu_ptr 116 #define __addr_to_pcpu_ptr(addr) \ 117 (void __percpu *)((unsigned long)(addr) - \ 118 (unsigned long)pcpu_base_addr + \ 119 (unsigned long)__per_cpu_start) 120 #endif 121 #ifndef __pcpu_ptr_to_addr 122 #define __pcpu_ptr_to_addr(ptr) \ 123 (void __force *)((unsigned long)(ptr) + \ 124 (unsigned long)pcpu_base_addr - \ 125 (unsigned long)__per_cpu_start) 126 #endif 127 #else /* CONFIG_SMP */ 128 /* on UP, it's always identity mapped */ 129 #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr) 130 #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr) 131 #endif /* CONFIG_SMP */ 132 133 static int pcpu_unit_pages __ro_after_init; 134 static int pcpu_unit_size __ro_after_init; 135 static int pcpu_nr_units __ro_after_init; 136 static int pcpu_atom_size __ro_after_init; 137 int pcpu_nr_slots __ro_after_init; 138 static int pcpu_free_slot __ro_after_init; 139 int pcpu_sidelined_slot __ro_after_init; 140 int pcpu_to_depopulate_slot __ro_after_init; 141 static size_t pcpu_chunk_struct_size __ro_after_init; 142 143 /* cpus with the lowest and highest unit addresses */ 144 static unsigned int pcpu_low_unit_cpu __ro_after_init; 145 static unsigned int pcpu_high_unit_cpu __ro_after_init; 146 147 /* the address of the first chunk which starts with the kernel static area */ 148 void *pcpu_base_addr __ro_after_init; 149 EXPORT_SYMBOL_GPL(pcpu_base_addr); 150 151 static const int *pcpu_unit_map __ro_after_init; /* cpu -> unit */ 152 const unsigned long *pcpu_unit_offsets __ro_after_init; /* cpu -> unit offset */ 153 154 /* group information, used for vm allocation */ 155 static int pcpu_nr_groups __ro_after_init; 156 static const unsigned long *pcpu_group_offsets __ro_after_init; 157 static const size_t *pcpu_group_sizes __ro_after_init; 158 159 /* 160 * The first chunk which always exists. Note that unlike other 161 * chunks, this one can be allocated and mapped in several different 162 * ways and thus often doesn't live in the vmalloc area. 163 */ 164 struct pcpu_chunk *pcpu_first_chunk __ro_after_init; 165 166 /* 167 * Optional reserved chunk. This chunk reserves part of the first 168 * chunk and serves it for reserved allocations. When the reserved 169 * region doesn't exist, the following variable is NULL. 170 */ 171 struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init; 172 173 DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */ 174 static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */ 175 176 struct list_head *pcpu_chunk_lists __ro_after_init; /* chunk list slots */ 177 178 /* chunks which need their map areas extended, protected by pcpu_lock */ 179 static LIST_HEAD(pcpu_map_extend_chunks); 180 181 /* 182 * The number of empty populated pages, protected by pcpu_lock. 183 * The reserved chunk doesn't contribute to the count. 184 */ 185 int pcpu_nr_empty_pop_pages; 186 187 /* 188 * The number of populated pages in use by the allocator, protected by 189 * pcpu_lock. This number is kept per a unit per chunk (i.e. when a page gets 190 * allocated/deallocated, it is allocated/deallocated in all units of a chunk 191 * and increments/decrements this count by 1). 192 */ 193 static unsigned long pcpu_nr_populated; 194 195 /* 196 * Balance work is used to populate or destroy chunks asynchronously. We 197 * try to keep the number of populated free pages between 198 * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one 199 * empty chunk. 200 */ 201 static void pcpu_balance_workfn(struct work_struct *work); 202 static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn); 203 static bool pcpu_async_enabled __read_mostly; 204 static bool pcpu_atomic_alloc_failed; 205 206 static void pcpu_schedule_balance_work(void) 207 { 208 if (pcpu_async_enabled) 209 schedule_work(&pcpu_balance_work); 210 } 211 212 /** 213 * pcpu_addr_in_chunk - check if the address is served from this chunk 214 * @chunk: chunk of interest 215 * @addr: percpu address 216 * 217 * RETURNS: 218 * True if the address is served from this chunk. 219 */ 220 static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr) 221 { 222 void *start_addr, *end_addr; 223 224 if (!chunk) 225 return false; 226 227 start_addr = chunk->base_addr + chunk->start_offset; 228 end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE - 229 chunk->end_offset; 230 231 return addr >= start_addr && addr < end_addr; 232 } 233 234 static int __pcpu_size_to_slot(int size) 235 { 236 int highbit = fls(size); /* size is in bytes */ 237 return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); 238 } 239 240 static int pcpu_size_to_slot(int size) 241 { 242 if (size == pcpu_unit_size) 243 return pcpu_free_slot; 244 return __pcpu_size_to_slot(size); 245 } 246 247 static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) 248 { 249 const struct pcpu_block_md *chunk_md = &chunk->chunk_md; 250 251 if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE || 252 chunk_md->contig_hint == 0) 253 return 0; 254 255 return pcpu_size_to_slot(chunk_md->contig_hint * PCPU_MIN_ALLOC_SIZE); 256 } 257 258 /* set the pointer to a chunk in a page struct */ 259 static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu) 260 { 261 page->index = (unsigned long)pcpu; 262 } 263 264 /* obtain pointer to a chunk from a page struct */ 265 static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page) 266 { 267 return (struct pcpu_chunk *)page->index; 268 } 269 270 static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx) 271 { 272 return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; 273 } 274 275 static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx) 276 { 277 return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT); 278 } 279 280 static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, 281 unsigned int cpu, int page_idx) 282 { 283 return (unsigned long)chunk->base_addr + 284 pcpu_unit_page_offset(cpu, page_idx); 285 } 286 287 /* 288 * The following are helper functions to help access bitmaps and convert 289 * between bitmap offsets to address offsets. 290 */ 291 static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index) 292 { 293 return chunk->alloc_map + 294 (index * PCPU_BITMAP_BLOCK_BITS / BITS_PER_LONG); 295 } 296 297 static unsigned long pcpu_off_to_block_index(int off) 298 { 299 return off / PCPU_BITMAP_BLOCK_BITS; 300 } 301 302 static unsigned long pcpu_off_to_block_off(int off) 303 { 304 return off & (PCPU_BITMAP_BLOCK_BITS - 1); 305 } 306 307 static unsigned long pcpu_block_off_to_off(int index, int off) 308 { 309 return index * PCPU_BITMAP_BLOCK_BITS + off; 310 } 311 312 /** 313 * pcpu_check_block_hint - check against the contig hint 314 * @block: block of interest 315 * @bits: size of allocation 316 * @align: alignment of area (max PAGE_SIZE) 317 * 318 * Check to see if the allocation can fit in the block's contig hint. 319 * Note, a chunk uses the same hints as a block so this can also check against 320 * the chunk's contig hint. 321 */ 322 static bool pcpu_check_block_hint(struct pcpu_block_md *block, int bits, 323 size_t align) 324 { 325 int bit_off = ALIGN(block->contig_hint_start, align) - 326 block->contig_hint_start; 327 328 return bit_off + bits <= block->contig_hint; 329 } 330 331 /* 332 * pcpu_next_hint - determine which hint to use 333 * @block: block of interest 334 * @alloc_bits: size of allocation 335 * 336 * This determines if we should scan based on the scan_hint or first_free. 337 * In general, we want to scan from first_free to fulfill allocations by 338 * first fit. However, if we know a scan_hint at position scan_hint_start 339 * cannot fulfill an allocation, we can begin scanning from there knowing 340 * the contig_hint will be our fallback. 341 */ 342 static int pcpu_next_hint(struct pcpu_block_md *block, int alloc_bits) 343 { 344 /* 345 * The three conditions below determine if we can skip past the 346 * scan_hint. First, does the scan hint exist. Second, is the 347 * contig_hint after the scan_hint (possibly not true iff 348 * contig_hint == scan_hint). Third, is the allocation request 349 * larger than the scan_hint. 350 */ 351 if (block->scan_hint && 352 block->contig_hint_start > block->scan_hint_start && 353 alloc_bits > block->scan_hint) 354 return block->scan_hint_start + block->scan_hint; 355 356 return block->first_free; 357 } 358 359 /** 360 * pcpu_next_md_free_region - finds the next hint free area 361 * @chunk: chunk of interest 362 * @bit_off: chunk offset 363 * @bits: size of free area 364 * 365 * Helper function for pcpu_for_each_md_free_region. It checks 366 * block->contig_hint and performs aggregation across blocks to find the 367 * next hint. It modifies bit_off and bits in-place to be consumed in the 368 * loop. 369 */ 370 static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off, 371 int *bits) 372 { 373 int i = pcpu_off_to_block_index(*bit_off); 374 int block_off = pcpu_off_to_block_off(*bit_off); 375 struct pcpu_block_md *block; 376 377 *bits = 0; 378 for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk); 379 block++, i++) { 380 /* handles contig area across blocks */ 381 if (*bits) { 382 *bits += block->left_free; 383 if (block->left_free == PCPU_BITMAP_BLOCK_BITS) 384 continue; 385 return; 386 } 387 388 /* 389 * This checks three things. First is there a contig_hint to 390 * check. Second, have we checked this hint before by 391 * comparing the block_off. Third, is this the same as the 392 * right contig hint. In the last case, it spills over into 393 * the next block and should be handled by the contig area 394 * across blocks code. 395 */ 396 *bits = block->contig_hint; 397 if (*bits && block->contig_hint_start >= block_off && 398 *bits + block->contig_hint_start < PCPU_BITMAP_BLOCK_BITS) { 399 *bit_off = pcpu_block_off_to_off(i, 400 block->contig_hint_start); 401 return; 402 } 403 /* reset to satisfy the second predicate above */ 404 block_off = 0; 405 406 *bits = block->right_free; 407 *bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free; 408 } 409 } 410 411 /** 412 * pcpu_next_fit_region - finds fit areas for a given allocation request 413 * @chunk: chunk of interest 414 * @alloc_bits: size of allocation 415 * @align: alignment of area (max PAGE_SIZE) 416 * @bit_off: chunk offset 417 * @bits: size of free area 418 * 419 * Finds the next free region that is viable for use with a given size and 420 * alignment. This only returns if there is a valid area to be used for this 421 * allocation. block->first_free is returned if the allocation request fits 422 * within the block to see if the request can be fulfilled prior to the contig 423 * hint. 424 */ 425 static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits, 426 int align, int *bit_off, int *bits) 427 { 428 int i = pcpu_off_to_block_index(*bit_off); 429 int block_off = pcpu_off_to_block_off(*bit_off); 430 struct pcpu_block_md *block; 431 432 *bits = 0; 433 for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk); 434 block++, i++) { 435 /* handles contig area across blocks */ 436 if (*bits) { 437 *bits += block->left_free; 438 if (*bits >= alloc_bits) 439 return; 440 if (block->left_free == PCPU_BITMAP_BLOCK_BITS) 441 continue; 442 } 443 444 /* check block->contig_hint */ 445 *bits = ALIGN(block->contig_hint_start, align) - 446 block->contig_hint_start; 447 /* 448 * This uses the block offset to determine if this has been 449 * checked in the prior iteration. 450 */ 451 if (block->contig_hint && 452 block->contig_hint_start >= block_off && 453 block->contig_hint >= *bits + alloc_bits) { 454 int start = pcpu_next_hint(block, alloc_bits); 455 456 *bits += alloc_bits + block->contig_hint_start - 457 start; 458 *bit_off = pcpu_block_off_to_off(i, start); 459 return; 460 } 461 /* reset to satisfy the second predicate above */ 462 block_off = 0; 463 464 *bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free, 465 align); 466 *bits = PCPU_BITMAP_BLOCK_BITS - *bit_off; 467 *bit_off = pcpu_block_off_to_off(i, *bit_off); 468 if (*bits >= alloc_bits) 469 return; 470 } 471 472 /* no valid offsets were found - fail condition */ 473 *bit_off = pcpu_chunk_map_bits(chunk); 474 } 475 476 /* 477 * Metadata free area iterators. These perform aggregation of free areas 478 * based on the metadata blocks and return the offset @bit_off and size in 479 * bits of the free area @bits. pcpu_for_each_fit_region only returns when 480 * a fit is found for the allocation request. 481 */ 482 #define pcpu_for_each_md_free_region(chunk, bit_off, bits) \ 483 for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits)); \ 484 (bit_off) < pcpu_chunk_map_bits((chunk)); \ 485 (bit_off) += (bits) + 1, \ 486 pcpu_next_md_free_region((chunk), &(bit_off), &(bits))) 487 488 #define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) \ 489 for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \ 490 &(bits)); \ 491 (bit_off) < pcpu_chunk_map_bits((chunk)); \ 492 (bit_off) += (bits), \ 493 pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \ 494 &(bits))) 495 496 /** 497 * pcpu_mem_zalloc - allocate memory 498 * @size: bytes to allocate 499 * @gfp: allocation flags 500 * 501 * Allocate @size bytes. If @size is smaller than PAGE_SIZE, 502 * kzalloc() is used; otherwise, the equivalent of vzalloc() is used. 503 * This is to facilitate passing through whitelisted flags. The 504 * returned memory is always zeroed. 505 * 506 * RETURNS: 507 * Pointer to the allocated area on success, NULL on failure. 508 */ 509 static void *pcpu_mem_zalloc(size_t size, gfp_t gfp) 510 { 511 if (WARN_ON_ONCE(!slab_is_available())) 512 return NULL; 513 514 if (size <= PAGE_SIZE) 515 return kzalloc(size, gfp); 516 else 517 return __vmalloc(size, gfp | __GFP_ZERO); 518 } 519 520 /** 521 * pcpu_mem_free - free memory 522 * @ptr: memory to free 523 * 524 * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc(). 525 */ 526 static void pcpu_mem_free(void *ptr) 527 { 528 kvfree(ptr); 529 } 530 531 static void __pcpu_chunk_move(struct pcpu_chunk *chunk, int slot, 532 bool move_front) 533 { 534 if (chunk != pcpu_reserved_chunk) { 535 if (move_front) 536 list_move(&chunk->list, &pcpu_chunk_lists[slot]); 537 else 538 list_move_tail(&chunk->list, &pcpu_chunk_lists[slot]); 539 } 540 } 541 542 static void pcpu_chunk_move(struct pcpu_chunk *chunk, int slot) 543 { 544 __pcpu_chunk_move(chunk, slot, true); 545 } 546 547 /** 548 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot 549 * @chunk: chunk of interest 550 * @oslot: the previous slot it was on 551 * 552 * This function is called after an allocation or free changed @chunk. 553 * New slot according to the changed state is determined and @chunk is 554 * moved to the slot. Note that the reserved chunk is never put on 555 * chunk slots. 556 * 557 * CONTEXT: 558 * pcpu_lock. 559 */ 560 static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) 561 { 562 int nslot = pcpu_chunk_slot(chunk); 563 564 /* leave isolated chunks in-place */ 565 if (chunk->isolated) 566 return; 567 568 if (oslot != nslot) 569 __pcpu_chunk_move(chunk, nslot, oslot < nslot); 570 } 571 572 static void pcpu_isolate_chunk(struct pcpu_chunk *chunk) 573 { 574 lockdep_assert_held(&pcpu_lock); 575 576 if (!chunk->isolated) { 577 chunk->isolated = true; 578 pcpu_nr_empty_pop_pages -= chunk->nr_empty_pop_pages; 579 } 580 list_move(&chunk->list, &pcpu_chunk_lists[pcpu_to_depopulate_slot]); 581 } 582 583 static void pcpu_reintegrate_chunk(struct pcpu_chunk *chunk) 584 { 585 lockdep_assert_held(&pcpu_lock); 586 587 if (chunk->isolated) { 588 chunk->isolated = false; 589 pcpu_nr_empty_pop_pages += chunk->nr_empty_pop_pages; 590 pcpu_chunk_relocate(chunk, -1); 591 } 592 } 593 594 /* 595 * pcpu_update_empty_pages - update empty page counters 596 * @chunk: chunk of interest 597 * @nr: nr of empty pages 598 * 599 * This is used to keep track of the empty pages now based on the premise 600 * a md_block covers a page. The hint update functions recognize if a block 601 * is made full or broken to calculate deltas for keeping track of free pages. 602 */ 603 static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr) 604 { 605 chunk->nr_empty_pop_pages += nr; 606 if (chunk != pcpu_reserved_chunk && !chunk->isolated) 607 pcpu_nr_empty_pop_pages += nr; 608 } 609 610 /* 611 * pcpu_region_overlap - determines if two regions overlap 612 * @a: start of first region, inclusive 613 * @b: end of first region, exclusive 614 * @x: start of second region, inclusive 615 * @y: end of second region, exclusive 616 * 617 * This is used to determine if the hint region [a, b) overlaps with the 618 * allocated region [x, y). 619 */ 620 static inline bool pcpu_region_overlap(int a, int b, int x, int y) 621 { 622 return (a < y) && (x < b); 623 } 624 625 /** 626 * pcpu_block_update - updates a block given a free area 627 * @block: block of interest 628 * @start: start offset in block 629 * @end: end offset in block 630 * 631 * Updates a block given a known free area. The region [start, end) is 632 * expected to be the entirety of the free area within a block. Chooses 633 * the best starting offset if the contig hints are equal. 634 */ 635 static void pcpu_block_update(struct pcpu_block_md *block, int start, int end) 636 { 637 int contig = end - start; 638 639 block->first_free = min(block->first_free, start); 640 if (start == 0) 641 block->left_free = contig; 642 643 if (end == block->nr_bits) 644 block->right_free = contig; 645 646 if (contig > block->contig_hint) { 647 /* promote the old contig_hint to be the new scan_hint */ 648 if (start > block->contig_hint_start) { 649 if (block->contig_hint > block->scan_hint) { 650 block->scan_hint_start = 651 block->contig_hint_start; 652 block->scan_hint = block->contig_hint; 653 } else if (start < block->scan_hint_start) { 654 /* 655 * The old contig_hint == scan_hint. But, the 656 * new contig is larger so hold the invariant 657 * scan_hint_start < contig_hint_start. 658 */ 659 block->scan_hint = 0; 660 } 661 } else { 662 block->scan_hint = 0; 663 } 664 block->contig_hint_start = start; 665 block->contig_hint = contig; 666 } else if (contig == block->contig_hint) { 667 if (block->contig_hint_start && 668 (!start || 669 __ffs(start) > __ffs(block->contig_hint_start))) { 670 /* start has a better alignment so use it */ 671 block->contig_hint_start = start; 672 if (start < block->scan_hint_start && 673 block->contig_hint > block->scan_hint) 674 block->scan_hint = 0; 675 } else if (start > block->scan_hint_start || 676 block->contig_hint > block->scan_hint) { 677 /* 678 * Knowing contig == contig_hint, update the scan_hint 679 * if it is farther than or larger than the current 680 * scan_hint. 681 */ 682 block->scan_hint_start = start; 683 block->scan_hint = contig; 684 } 685 } else { 686 /* 687 * The region is smaller than the contig_hint. So only update 688 * the scan_hint if it is larger than or equal and farther than 689 * the current scan_hint. 690 */ 691 if ((start < block->contig_hint_start && 692 (contig > block->scan_hint || 693 (contig == block->scan_hint && 694 start > block->scan_hint_start)))) { 695 block->scan_hint_start = start; 696 block->scan_hint = contig; 697 } 698 } 699 } 700 701 /* 702 * pcpu_block_update_scan - update a block given a free area from a scan 703 * @chunk: chunk of interest 704 * @bit_off: chunk offset 705 * @bits: size of free area 706 * 707 * Finding the final allocation spot first goes through pcpu_find_block_fit() 708 * to find a block that can hold the allocation and then pcpu_alloc_area() 709 * where a scan is used. When allocations require specific alignments, 710 * we can inadvertently create holes which will not be seen in the alloc 711 * or free paths. 712 * 713 * This takes a given free area hole and updates a block as it may change the 714 * scan_hint. We need to scan backwards to ensure we don't miss free bits 715 * from alignment. 716 */ 717 static void pcpu_block_update_scan(struct pcpu_chunk *chunk, int bit_off, 718 int bits) 719 { 720 int s_off = pcpu_off_to_block_off(bit_off); 721 int e_off = s_off + bits; 722 int s_index, l_bit; 723 struct pcpu_block_md *block; 724 725 if (e_off > PCPU_BITMAP_BLOCK_BITS) 726 return; 727 728 s_index = pcpu_off_to_block_index(bit_off); 729 block = chunk->md_blocks + s_index; 730 731 /* scan backwards in case of alignment skipping free bits */ 732 l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), s_off); 733 s_off = (s_off == l_bit) ? 0 : l_bit + 1; 734 735 pcpu_block_update(block, s_off, e_off); 736 } 737 738 /** 739 * pcpu_chunk_refresh_hint - updates metadata about a chunk 740 * @chunk: chunk of interest 741 * @full_scan: if we should scan from the beginning 742 * 743 * Iterates over the metadata blocks to find the largest contig area. 744 * A full scan can be avoided on the allocation path as this is triggered 745 * if we broke the contig_hint. In doing so, the scan_hint will be before 746 * the contig_hint or after if the scan_hint == contig_hint. This cannot 747 * be prevented on freeing as we want to find the largest area possibly 748 * spanning blocks. 749 */ 750 static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan) 751 { 752 struct pcpu_block_md *chunk_md = &chunk->chunk_md; 753 int bit_off, bits; 754 755 /* promote scan_hint to contig_hint */ 756 if (!full_scan && chunk_md->scan_hint) { 757 bit_off = chunk_md->scan_hint_start + chunk_md->scan_hint; 758 chunk_md->contig_hint_start = chunk_md->scan_hint_start; 759 chunk_md->contig_hint = chunk_md->scan_hint; 760 chunk_md->scan_hint = 0; 761 } else { 762 bit_off = chunk_md->first_free; 763 chunk_md->contig_hint = 0; 764 } 765 766 bits = 0; 767 pcpu_for_each_md_free_region(chunk, bit_off, bits) 768 pcpu_block_update(chunk_md, bit_off, bit_off + bits); 769 } 770 771 /** 772 * pcpu_block_refresh_hint 773 * @chunk: chunk of interest 774 * @index: index of the metadata block 775 * 776 * Scans over the block beginning at first_free and updates the block 777 * metadata accordingly. 778 */ 779 static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index) 780 { 781 struct pcpu_block_md *block = chunk->md_blocks + index; 782 unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index); 783 unsigned int rs, re, start; /* region start, region end */ 784 785 /* promote scan_hint to contig_hint */ 786 if (block->scan_hint) { 787 start = block->scan_hint_start + block->scan_hint; 788 block->contig_hint_start = block->scan_hint_start; 789 block->contig_hint = block->scan_hint; 790 block->scan_hint = 0; 791 } else { 792 start = block->first_free; 793 block->contig_hint = 0; 794 } 795 796 block->right_free = 0; 797 798 /* iterate over free areas and update the contig hints */ 799 bitmap_for_each_clear_region(alloc_map, rs, re, start, 800 PCPU_BITMAP_BLOCK_BITS) 801 pcpu_block_update(block, rs, re); 802 } 803 804 /** 805 * pcpu_block_update_hint_alloc - update hint on allocation path 806 * @chunk: chunk of interest 807 * @bit_off: chunk offset 808 * @bits: size of request 809 * 810 * Updates metadata for the allocation path. The metadata only has to be 811 * refreshed by a full scan iff the chunk's contig hint is broken. Block level 812 * scans are required if the block's contig hint is broken. 813 */ 814 static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off, 815 int bits) 816 { 817 struct pcpu_block_md *chunk_md = &chunk->chunk_md; 818 int nr_empty_pages = 0; 819 struct pcpu_block_md *s_block, *e_block, *block; 820 int s_index, e_index; /* block indexes of the freed allocation */ 821 int s_off, e_off; /* block offsets of the freed allocation */ 822 823 /* 824 * Calculate per block offsets. 825 * The calculation uses an inclusive range, but the resulting offsets 826 * are [start, end). e_index always points to the last block in the 827 * range. 828 */ 829 s_index = pcpu_off_to_block_index(bit_off); 830 e_index = pcpu_off_to_block_index(bit_off + bits - 1); 831 s_off = pcpu_off_to_block_off(bit_off); 832 e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1; 833 834 s_block = chunk->md_blocks + s_index; 835 e_block = chunk->md_blocks + e_index; 836 837 /* 838 * Update s_block. 839 * block->first_free must be updated if the allocation takes its place. 840 * If the allocation breaks the contig_hint, a scan is required to 841 * restore this hint. 842 */ 843 if (s_block->contig_hint == PCPU_BITMAP_BLOCK_BITS) 844 nr_empty_pages++; 845 846 if (s_off == s_block->first_free) 847 s_block->first_free = find_next_zero_bit( 848 pcpu_index_alloc_map(chunk, s_index), 849 PCPU_BITMAP_BLOCK_BITS, 850 s_off + bits); 851 852 if (pcpu_region_overlap(s_block->scan_hint_start, 853 s_block->scan_hint_start + s_block->scan_hint, 854 s_off, 855 s_off + bits)) 856 s_block->scan_hint = 0; 857 858 if (pcpu_region_overlap(s_block->contig_hint_start, 859 s_block->contig_hint_start + 860 s_block->contig_hint, 861 s_off, 862 s_off + bits)) { 863 /* block contig hint is broken - scan to fix it */ 864 if (!s_off) 865 s_block->left_free = 0; 866 pcpu_block_refresh_hint(chunk, s_index); 867 } else { 868 /* update left and right contig manually */ 869 s_block->left_free = min(s_block->left_free, s_off); 870 if (s_index == e_index) 871 s_block->right_free = min_t(int, s_block->right_free, 872 PCPU_BITMAP_BLOCK_BITS - e_off); 873 else 874 s_block->right_free = 0; 875 } 876 877 /* 878 * Update e_block. 879 */ 880 if (s_index != e_index) { 881 if (e_block->contig_hint == PCPU_BITMAP_BLOCK_BITS) 882 nr_empty_pages++; 883 884 /* 885 * When the allocation is across blocks, the end is along 886 * the left part of the e_block. 887 */ 888 e_block->first_free = find_next_zero_bit( 889 pcpu_index_alloc_map(chunk, e_index), 890 PCPU_BITMAP_BLOCK_BITS, e_off); 891 892 if (e_off == PCPU_BITMAP_BLOCK_BITS) { 893 /* reset the block */ 894 e_block++; 895 } else { 896 if (e_off > e_block->scan_hint_start) 897 e_block->scan_hint = 0; 898 899 e_block->left_free = 0; 900 if (e_off > e_block->contig_hint_start) { 901 /* contig hint is broken - scan to fix it */ 902 pcpu_block_refresh_hint(chunk, e_index); 903 } else { 904 e_block->right_free = 905 min_t(int, e_block->right_free, 906 PCPU_BITMAP_BLOCK_BITS - e_off); 907 } 908 } 909 910 /* update in-between md_blocks */ 911 nr_empty_pages += (e_index - s_index - 1); 912 for (block = s_block + 1; block < e_block; block++) { 913 block->scan_hint = 0; 914 block->contig_hint = 0; 915 block->left_free = 0; 916 block->right_free = 0; 917 } 918 } 919 920 if (nr_empty_pages) 921 pcpu_update_empty_pages(chunk, -nr_empty_pages); 922 923 if (pcpu_region_overlap(chunk_md->scan_hint_start, 924 chunk_md->scan_hint_start + 925 chunk_md->scan_hint, 926 bit_off, 927 bit_off + bits)) 928 chunk_md->scan_hint = 0; 929 930 /* 931 * The only time a full chunk scan is required is if the chunk 932 * contig hint is broken. Otherwise, it means a smaller space 933 * was used and therefore the chunk contig hint is still correct. 934 */ 935 if (pcpu_region_overlap(chunk_md->contig_hint_start, 936 chunk_md->contig_hint_start + 937 chunk_md->contig_hint, 938 bit_off, 939 bit_off + bits)) 940 pcpu_chunk_refresh_hint(chunk, false); 941 } 942 943 /** 944 * pcpu_block_update_hint_free - updates the block hints on the free path 945 * @chunk: chunk of interest 946 * @bit_off: chunk offset 947 * @bits: size of request 948 * 949 * Updates metadata for the allocation path. This avoids a blind block 950 * refresh by making use of the block contig hints. If this fails, it scans 951 * forward and backward to determine the extent of the free area. This is 952 * capped at the boundary of blocks. 953 * 954 * A chunk update is triggered if a page becomes free, a block becomes free, 955 * or the free spans across blocks. This tradeoff is to minimize iterating 956 * over the block metadata to update chunk_md->contig_hint. 957 * chunk_md->contig_hint may be off by up to a page, but it will never be more 958 * than the available space. If the contig hint is contained in one block, it 959 * will be accurate. 960 */ 961 static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off, 962 int bits) 963 { 964 int nr_empty_pages = 0; 965 struct pcpu_block_md *s_block, *e_block, *block; 966 int s_index, e_index; /* block indexes of the freed allocation */ 967 int s_off, e_off; /* block offsets of the freed allocation */ 968 int start, end; /* start and end of the whole free area */ 969 970 /* 971 * Calculate per block offsets. 972 * The calculation uses an inclusive range, but the resulting offsets 973 * are [start, end). e_index always points to the last block in the 974 * range. 975 */ 976 s_index = pcpu_off_to_block_index(bit_off); 977 e_index = pcpu_off_to_block_index(bit_off + bits - 1); 978 s_off = pcpu_off_to_block_off(bit_off); 979 e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1; 980 981 s_block = chunk->md_blocks + s_index; 982 e_block = chunk->md_blocks + e_index; 983 984 /* 985 * Check if the freed area aligns with the block->contig_hint. 986 * If it does, then the scan to find the beginning/end of the 987 * larger free area can be avoided. 988 * 989 * start and end refer to beginning and end of the free area 990 * within each their respective blocks. This is not necessarily 991 * the entire free area as it may span blocks past the beginning 992 * or end of the block. 993 */ 994 start = s_off; 995 if (s_off == s_block->contig_hint + s_block->contig_hint_start) { 996 start = s_block->contig_hint_start; 997 } else { 998 /* 999 * Scan backwards to find the extent of the free area. 1000 * find_last_bit returns the starting bit, so if the start bit 1001 * is returned, that means there was no last bit and the 1002 * remainder of the chunk is free. 1003 */ 1004 int l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), 1005 start); 1006 start = (start == l_bit) ? 0 : l_bit + 1; 1007 } 1008 1009 end = e_off; 1010 if (e_off == e_block->contig_hint_start) 1011 end = e_block->contig_hint_start + e_block->contig_hint; 1012 else 1013 end = find_next_bit(pcpu_index_alloc_map(chunk, e_index), 1014 PCPU_BITMAP_BLOCK_BITS, end); 1015 1016 /* update s_block */ 1017 e_off = (s_index == e_index) ? end : PCPU_BITMAP_BLOCK_BITS; 1018 if (!start && e_off == PCPU_BITMAP_BLOCK_BITS) 1019 nr_empty_pages++; 1020 pcpu_block_update(s_block, start, e_off); 1021 1022 /* freeing in the same block */ 1023 if (s_index != e_index) { 1024 /* update e_block */ 1025 if (end == PCPU_BITMAP_BLOCK_BITS) 1026 nr_empty_pages++; 1027 pcpu_block_update(e_block, 0, end); 1028 1029 /* reset md_blocks in the middle */ 1030 nr_empty_pages += (e_index - s_index - 1); 1031 for (block = s_block + 1; block < e_block; block++) { 1032 block->first_free = 0; 1033 block->scan_hint = 0; 1034 block->contig_hint_start = 0; 1035 block->contig_hint = PCPU_BITMAP_BLOCK_BITS; 1036 block->left_free = PCPU_BITMAP_BLOCK_BITS; 1037 block->right_free = PCPU_BITMAP_BLOCK_BITS; 1038 } 1039 } 1040 1041 if (nr_empty_pages) 1042 pcpu_update_empty_pages(chunk, nr_empty_pages); 1043 1044 /* 1045 * Refresh chunk metadata when the free makes a block free or spans 1046 * across blocks. The contig_hint may be off by up to a page, but if 1047 * the contig_hint is contained in a block, it will be accurate with 1048 * the else condition below. 1049 */ 1050 if (((end - start) >= PCPU_BITMAP_BLOCK_BITS) || s_index != e_index) 1051 pcpu_chunk_refresh_hint(chunk, true); 1052 else 1053 pcpu_block_update(&chunk->chunk_md, 1054 pcpu_block_off_to_off(s_index, start), 1055 end); 1056 } 1057 1058 /** 1059 * pcpu_is_populated - determines if the region is populated 1060 * @chunk: chunk of interest 1061 * @bit_off: chunk offset 1062 * @bits: size of area 1063 * @next_off: return value for the next offset to start searching 1064 * 1065 * For atomic allocations, check if the backing pages are populated. 1066 * 1067 * RETURNS: 1068 * Bool if the backing pages are populated. 1069 * next_index is to skip over unpopulated blocks in pcpu_find_block_fit. 1070 */ 1071 static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits, 1072 int *next_off) 1073 { 1074 unsigned int page_start, page_end, rs, re; 1075 1076 page_start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE); 1077 page_end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE); 1078 1079 rs = page_start; 1080 bitmap_next_clear_region(chunk->populated, &rs, &re, page_end); 1081 if (rs >= page_end) 1082 return true; 1083 1084 *next_off = re * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE; 1085 return false; 1086 } 1087 1088 /** 1089 * pcpu_find_block_fit - finds the block index to start searching 1090 * @chunk: chunk of interest 1091 * @alloc_bits: size of request in allocation units 1092 * @align: alignment of area (max PAGE_SIZE bytes) 1093 * @pop_only: use populated regions only 1094 * 1095 * Given a chunk and an allocation spec, find the offset to begin searching 1096 * for a free region. This iterates over the bitmap metadata blocks to 1097 * find an offset that will be guaranteed to fit the requirements. It is 1098 * not quite first fit as if the allocation does not fit in the contig hint 1099 * of a block or chunk, it is skipped. This errs on the side of caution 1100 * to prevent excess iteration. Poor alignment can cause the allocator to 1101 * skip over blocks and chunks that have valid free areas. 1102 * 1103 * RETURNS: 1104 * The offset in the bitmap to begin searching. 1105 * -1 if no offset is found. 1106 */ 1107 static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits, 1108 size_t align, bool pop_only) 1109 { 1110 struct pcpu_block_md *chunk_md = &chunk->chunk_md; 1111 int bit_off, bits, next_off; 1112 1113 /* 1114 * This is an optimization to prevent scanning by assuming if the 1115 * allocation cannot fit in the global hint, there is memory pressure 1116 * and creating a new chunk would happen soon. 1117 */ 1118 if (!pcpu_check_block_hint(chunk_md, alloc_bits, align)) 1119 return -1; 1120 1121 bit_off = pcpu_next_hint(chunk_md, alloc_bits); 1122 bits = 0; 1123 pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) { 1124 if (!pop_only || pcpu_is_populated(chunk, bit_off, bits, 1125 &next_off)) 1126 break; 1127 1128 bit_off = next_off; 1129 bits = 0; 1130 } 1131 1132 if (bit_off == pcpu_chunk_map_bits(chunk)) 1133 return -1; 1134 1135 return bit_off; 1136 } 1137 1138 /* 1139 * pcpu_find_zero_area - modified from bitmap_find_next_zero_area_off() 1140 * @map: the address to base the search on 1141 * @size: the bitmap size in bits 1142 * @start: the bitnumber to start searching at 1143 * @nr: the number of zeroed bits we're looking for 1144 * @align_mask: alignment mask for zero area 1145 * @largest_off: offset of the largest area skipped 1146 * @largest_bits: size of the largest area skipped 1147 * 1148 * The @align_mask should be one less than a power of 2. 1149 * 1150 * This is a modified version of bitmap_find_next_zero_area_off() to remember 1151 * the largest area that was skipped. This is imperfect, but in general is 1152 * good enough. The largest remembered region is the largest failed region 1153 * seen. This does not include anything we possibly skipped due to alignment. 1154 * pcpu_block_update_scan() does scan backwards to try and recover what was 1155 * lost to alignment. While this can cause scanning to miss earlier possible 1156 * free areas, smaller allocations will eventually fill those holes. 1157 */ 1158 static unsigned long pcpu_find_zero_area(unsigned long *map, 1159 unsigned long size, 1160 unsigned long start, 1161 unsigned long nr, 1162 unsigned long align_mask, 1163 unsigned long *largest_off, 1164 unsigned long *largest_bits) 1165 { 1166 unsigned long index, end, i, area_off, area_bits; 1167 again: 1168 index = find_next_zero_bit(map, size, start); 1169 1170 /* Align allocation */ 1171 index = __ALIGN_MASK(index, align_mask); 1172 area_off = index; 1173 1174 end = index + nr; 1175 if (end > size) 1176 return end; 1177 i = find_next_bit(map, end, index); 1178 if (i < end) { 1179 area_bits = i - area_off; 1180 /* remember largest unused area with best alignment */ 1181 if (area_bits > *largest_bits || 1182 (area_bits == *largest_bits && *largest_off && 1183 (!area_off || __ffs(area_off) > __ffs(*largest_off)))) { 1184 *largest_off = area_off; 1185 *largest_bits = area_bits; 1186 } 1187 1188 start = i + 1; 1189 goto again; 1190 } 1191 return index; 1192 } 1193 1194 /** 1195 * pcpu_alloc_area - allocates an area from a pcpu_chunk 1196 * @chunk: chunk of interest 1197 * @alloc_bits: size of request in allocation units 1198 * @align: alignment of area (max PAGE_SIZE) 1199 * @start: bit_off to start searching 1200 * 1201 * This function takes in a @start offset to begin searching to fit an 1202 * allocation of @alloc_bits with alignment @align. It needs to scan 1203 * the allocation map because if it fits within the block's contig hint, 1204 * @start will be block->first_free. This is an attempt to fill the 1205 * allocation prior to breaking the contig hint. The allocation and 1206 * boundary maps are updated accordingly if it confirms a valid 1207 * free area. 1208 * 1209 * RETURNS: 1210 * Allocated addr offset in @chunk on success. 1211 * -1 if no matching area is found. 1212 */ 1213 static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits, 1214 size_t align, int start) 1215 { 1216 struct pcpu_block_md *chunk_md = &chunk->chunk_md; 1217 size_t align_mask = (align) ? (align - 1) : 0; 1218 unsigned long area_off = 0, area_bits = 0; 1219 int bit_off, end, oslot; 1220 1221 lockdep_assert_held(&pcpu_lock); 1222 1223 oslot = pcpu_chunk_slot(chunk); 1224 1225 /* 1226 * Search to find a fit. 1227 */ 1228 end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS, 1229 pcpu_chunk_map_bits(chunk)); 1230 bit_off = pcpu_find_zero_area(chunk->alloc_map, end, start, alloc_bits, 1231 align_mask, &area_off, &area_bits); 1232 if (bit_off >= end) 1233 return -1; 1234 1235 if (area_bits) 1236 pcpu_block_update_scan(chunk, area_off, area_bits); 1237 1238 /* update alloc map */ 1239 bitmap_set(chunk->alloc_map, bit_off, alloc_bits); 1240 1241 /* update boundary map */ 1242 set_bit(bit_off, chunk->bound_map); 1243 bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1); 1244 set_bit(bit_off + alloc_bits, chunk->bound_map); 1245 1246 chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE; 1247 1248 /* update first free bit */ 1249 if (bit_off == chunk_md->first_free) 1250 chunk_md->first_free = find_next_zero_bit( 1251 chunk->alloc_map, 1252 pcpu_chunk_map_bits(chunk), 1253 bit_off + alloc_bits); 1254 1255 pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits); 1256 1257 pcpu_chunk_relocate(chunk, oslot); 1258 1259 return bit_off * PCPU_MIN_ALLOC_SIZE; 1260 } 1261 1262 /** 1263 * pcpu_free_area - frees the corresponding offset 1264 * @chunk: chunk of interest 1265 * @off: addr offset into chunk 1266 * 1267 * This function determines the size of an allocation to free using 1268 * the boundary bitmap and clears the allocation map. 1269 * 1270 * RETURNS: 1271 * Number of freed bytes. 1272 */ 1273 static int pcpu_free_area(struct pcpu_chunk *chunk, int off) 1274 { 1275 struct pcpu_block_md *chunk_md = &chunk->chunk_md; 1276 int bit_off, bits, end, oslot, freed; 1277 1278 lockdep_assert_held(&pcpu_lock); 1279 pcpu_stats_area_dealloc(chunk); 1280 1281 oslot = pcpu_chunk_slot(chunk); 1282 1283 bit_off = off / PCPU_MIN_ALLOC_SIZE; 1284 1285 /* find end index */ 1286 end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk), 1287 bit_off + 1); 1288 bits = end - bit_off; 1289 bitmap_clear(chunk->alloc_map, bit_off, bits); 1290 1291 freed = bits * PCPU_MIN_ALLOC_SIZE; 1292 1293 /* update metadata */ 1294 chunk->free_bytes += freed; 1295 1296 /* update first free bit */ 1297 chunk_md->first_free = min(chunk_md->first_free, bit_off); 1298 1299 pcpu_block_update_hint_free(chunk, bit_off, bits); 1300 1301 pcpu_chunk_relocate(chunk, oslot); 1302 1303 return freed; 1304 } 1305 1306 static void pcpu_init_md_block(struct pcpu_block_md *block, int nr_bits) 1307 { 1308 block->scan_hint = 0; 1309 block->contig_hint = nr_bits; 1310 block->left_free = nr_bits; 1311 block->right_free = nr_bits; 1312 block->first_free = 0; 1313 block->nr_bits = nr_bits; 1314 } 1315 1316 static void pcpu_init_md_blocks(struct pcpu_chunk *chunk) 1317 { 1318 struct pcpu_block_md *md_block; 1319 1320 /* init the chunk's block */ 1321 pcpu_init_md_block(&chunk->chunk_md, pcpu_chunk_map_bits(chunk)); 1322 1323 for (md_block = chunk->md_blocks; 1324 md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk); 1325 md_block++) 1326 pcpu_init_md_block(md_block, PCPU_BITMAP_BLOCK_BITS); 1327 } 1328 1329 /** 1330 * pcpu_alloc_first_chunk - creates chunks that serve the first chunk 1331 * @tmp_addr: the start of the region served 1332 * @map_size: size of the region served 1333 * 1334 * This is responsible for creating the chunks that serve the first chunk. The 1335 * base_addr is page aligned down of @tmp_addr while the region end is page 1336 * aligned up. Offsets are kept track of to determine the region served. All 1337 * this is done to appease the bitmap allocator in avoiding partial blocks. 1338 * 1339 * RETURNS: 1340 * Chunk serving the region at @tmp_addr of @map_size. 1341 */ 1342 static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr, 1343 int map_size) 1344 { 1345 struct pcpu_chunk *chunk; 1346 unsigned long aligned_addr, lcm_align; 1347 int start_offset, offset_bits, region_size, region_bits; 1348 size_t alloc_size; 1349 1350 /* region calculations */ 1351 aligned_addr = tmp_addr & PAGE_MASK; 1352 1353 start_offset = tmp_addr - aligned_addr; 1354 1355 /* 1356 * Align the end of the region with the LCM of PAGE_SIZE and 1357 * PCPU_BITMAP_BLOCK_SIZE. One of these constants is a multiple of 1358 * the other. 1359 */ 1360 lcm_align = lcm(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE); 1361 region_size = ALIGN(start_offset + map_size, lcm_align); 1362 1363 /* allocate chunk */ 1364 alloc_size = struct_size(chunk, populated, 1365 BITS_TO_LONGS(region_size >> PAGE_SHIFT)); 1366 chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 1367 if (!chunk) 1368 panic("%s: Failed to allocate %zu bytes\n", __func__, 1369 alloc_size); 1370 1371 INIT_LIST_HEAD(&chunk->list); 1372 1373 chunk->base_addr = (void *)aligned_addr; 1374 chunk->start_offset = start_offset; 1375 chunk->end_offset = region_size - chunk->start_offset - map_size; 1376 1377 chunk->nr_pages = region_size >> PAGE_SHIFT; 1378 region_bits = pcpu_chunk_map_bits(chunk); 1379 1380 alloc_size = BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]); 1381 chunk->alloc_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 1382 if (!chunk->alloc_map) 1383 panic("%s: Failed to allocate %zu bytes\n", __func__, 1384 alloc_size); 1385 1386 alloc_size = 1387 BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]); 1388 chunk->bound_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 1389 if (!chunk->bound_map) 1390 panic("%s: Failed to allocate %zu bytes\n", __func__, 1391 alloc_size); 1392 1393 alloc_size = pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]); 1394 chunk->md_blocks = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 1395 if (!chunk->md_blocks) 1396 panic("%s: Failed to allocate %zu bytes\n", __func__, 1397 alloc_size); 1398 1399 #ifdef CONFIG_MEMCG_KMEM 1400 /* first chunk is free to use */ 1401 chunk->obj_cgroups = NULL; 1402 #endif 1403 pcpu_init_md_blocks(chunk); 1404 1405 /* manage populated page bitmap */ 1406 chunk->immutable = true; 1407 bitmap_fill(chunk->populated, chunk->nr_pages); 1408 chunk->nr_populated = chunk->nr_pages; 1409 chunk->nr_empty_pop_pages = chunk->nr_pages; 1410 1411 chunk->free_bytes = map_size; 1412 1413 if (chunk->start_offset) { 1414 /* hide the beginning of the bitmap */ 1415 offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE; 1416 bitmap_set(chunk->alloc_map, 0, offset_bits); 1417 set_bit(0, chunk->bound_map); 1418 set_bit(offset_bits, chunk->bound_map); 1419 1420 chunk->chunk_md.first_free = offset_bits; 1421 1422 pcpu_block_update_hint_alloc(chunk, 0, offset_bits); 1423 } 1424 1425 if (chunk->end_offset) { 1426 /* hide the end of the bitmap */ 1427 offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE; 1428 bitmap_set(chunk->alloc_map, 1429 pcpu_chunk_map_bits(chunk) - offset_bits, 1430 offset_bits); 1431 set_bit((start_offset + map_size) / PCPU_MIN_ALLOC_SIZE, 1432 chunk->bound_map); 1433 set_bit(region_bits, chunk->bound_map); 1434 1435 pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk) 1436 - offset_bits, offset_bits); 1437 } 1438 1439 return chunk; 1440 } 1441 1442 static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp) 1443 { 1444 struct pcpu_chunk *chunk; 1445 int region_bits; 1446 1447 chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp); 1448 if (!chunk) 1449 return NULL; 1450 1451 INIT_LIST_HEAD(&chunk->list); 1452 chunk->nr_pages = pcpu_unit_pages; 1453 region_bits = pcpu_chunk_map_bits(chunk); 1454 1455 chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) * 1456 sizeof(chunk->alloc_map[0]), gfp); 1457 if (!chunk->alloc_map) 1458 goto alloc_map_fail; 1459 1460 chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) * 1461 sizeof(chunk->bound_map[0]), gfp); 1462 if (!chunk->bound_map) 1463 goto bound_map_fail; 1464 1465 chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) * 1466 sizeof(chunk->md_blocks[0]), gfp); 1467 if (!chunk->md_blocks) 1468 goto md_blocks_fail; 1469 1470 #ifdef CONFIG_MEMCG_KMEM 1471 if (!mem_cgroup_kmem_disabled()) { 1472 chunk->obj_cgroups = 1473 pcpu_mem_zalloc(pcpu_chunk_map_bits(chunk) * 1474 sizeof(struct obj_cgroup *), gfp); 1475 if (!chunk->obj_cgroups) 1476 goto objcg_fail; 1477 } 1478 #endif 1479 1480 pcpu_init_md_blocks(chunk); 1481 1482 /* init metadata */ 1483 chunk->free_bytes = chunk->nr_pages * PAGE_SIZE; 1484 1485 return chunk; 1486 1487 #ifdef CONFIG_MEMCG_KMEM 1488 objcg_fail: 1489 pcpu_mem_free(chunk->md_blocks); 1490 #endif 1491 md_blocks_fail: 1492 pcpu_mem_free(chunk->bound_map); 1493 bound_map_fail: 1494 pcpu_mem_free(chunk->alloc_map); 1495 alloc_map_fail: 1496 pcpu_mem_free(chunk); 1497 1498 return NULL; 1499 } 1500 1501 static void pcpu_free_chunk(struct pcpu_chunk *chunk) 1502 { 1503 if (!chunk) 1504 return; 1505 #ifdef CONFIG_MEMCG_KMEM 1506 pcpu_mem_free(chunk->obj_cgroups); 1507 #endif 1508 pcpu_mem_free(chunk->md_blocks); 1509 pcpu_mem_free(chunk->bound_map); 1510 pcpu_mem_free(chunk->alloc_map); 1511 pcpu_mem_free(chunk); 1512 } 1513 1514 /** 1515 * pcpu_chunk_populated - post-population bookkeeping 1516 * @chunk: pcpu_chunk which got populated 1517 * @page_start: the start page 1518 * @page_end: the end page 1519 * 1520 * Pages in [@page_start,@page_end) have been populated to @chunk. Update 1521 * the bookkeeping information accordingly. Must be called after each 1522 * successful population. 1523 * 1524 * If this is @for_alloc, do not increment pcpu_nr_empty_pop_pages because it 1525 * is to serve an allocation in that area. 1526 */ 1527 static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start, 1528 int page_end) 1529 { 1530 int nr = page_end - page_start; 1531 1532 lockdep_assert_held(&pcpu_lock); 1533 1534 bitmap_set(chunk->populated, page_start, nr); 1535 chunk->nr_populated += nr; 1536 pcpu_nr_populated += nr; 1537 1538 pcpu_update_empty_pages(chunk, nr); 1539 } 1540 1541 /** 1542 * pcpu_chunk_depopulated - post-depopulation bookkeeping 1543 * @chunk: pcpu_chunk which got depopulated 1544 * @page_start: the start page 1545 * @page_end: the end page 1546 * 1547 * Pages in [@page_start,@page_end) have been depopulated from @chunk. 1548 * Update the bookkeeping information accordingly. Must be called after 1549 * each successful depopulation. 1550 */ 1551 static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk, 1552 int page_start, int page_end) 1553 { 1554 int nr = page_end - page_start; 1555 1556 lockdep_assert_held(&pcpu_lock); 1557 1558 bitmap_clear(chunk->populated, page_start, nr); 1559 chunk->nr_populated -= nr; 1560 pcpu_nr_populated -= nr; 1561 1562 pcpu_update_empty_pages(chunk, -nr); 1563 } 1564 1565 /* 1566 * Chunk management implementation. 1567 * 1568 * To allow different implementations, chunk alloc/free and 1569 * [de]population are implemented in a separate file which is pulled 1570 * into this file and compiled together. The following functions 1571 * should be implemented. 1572 * 1573 * pcpu_populate_chunk - populate the specified range of a chunk 1574 * pcpu_depopulate_chunk - depopulate the specified range of a chunk 1575 * pcpu_create_chunk - create a new chunk 1576 * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop 1577 * pcpu_addr_to_page - translate address to physical address 1578 * pcpu_verify_alloc_info - check alloc_info is acceptable during init 1579 */ 1580 static int pcpu_populate_chunk(struct pcpu_chunk *chunk, 1581 int page_start, int page_end, gfp_t gfp); 1582 static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, 1583 int page_start, int page_end); 1584 static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp); 1585 static void pcpu_destroy_chunk(struct pcpu_chunk *chunk); 1586 static struct page *pcpu_addr_to_page(void *addr); 1587 static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai); 1588 1589 #ifdef CONFIG_NEED_PER_CPU_KM 1590 #include "percpu-km.c" 1591 #else 1592 #include "percpu-vm.c" 1593 #endif 1594 1595 /** 1596 * pcpu_chunk_addr_search - determine chunk containing specified address 1597 * @addr: address for which the chunk needs to be determined. 1598 * 1599 * This is an internal function that handles all but static allocations. 1600 * Static percpu address values should never be passed into the allocator. 1601 * 1602 * RETURNS: 1603 * The address of the found chunk. 1604 */ 1605 static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) 1606 { 1607 /* is it in the dynamic region (first chunk)? */ 1608 if (pcpu_addr_in_chunk(pcpu_first_chunk, addr)) 1609 return pcpu_first_chunk; 1610 1611 /* is it in the reserved region? */ 1612 if (pcpu_addr_in_chunk(pcpu_reserved_chunk, addr)) 1613 return pcpu_reserved_chunk; 1614 1615 /* 1616 * The address is relative to unit0 which might be unused and 1617 * thus unmapped. Offset the address to the unit space of the 1618 * current processor before looking it up in the vmalloc 1619 * space. Note that any possible cpu id can be used here, so 1620 * there's no need to worry about preemption or cpu hotplug. 1621 */ 1622 addr += pcpu_unit_offsets[raw_smp_processor_id()]; 1623 return pcpu_get_page_chunk(pcpu_addr_to_page(addr)); 1624 } 1625 1626 #ifdef CONFIG_MEMCG_KMEM 1627 static bool pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, 1628 struct obj_cgroup **objcgp) 1629 { 1630 struct obj_cgroup *objcg; 1631 1632 if (!memcg_kmem_enabled() || !(gfp & __GFP_ACCOUNT)) 1633 return true; 1634 1635 objcg = get_obj_cgroup_from_current(); 1636 if (!objcg) 1637 return true; 1638 1639 if (obj_cgroup_charge(objcg, gfp, size * num_possible_cpus())) { 1640 obj_cgroup_put(objcg); 1641 return false; 1642 } 1643 1644 *objcgp = objcg; 1645 return true; 1646 } 1647 1648 static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg, 1649 struct pcpu_chunk *chunk, int off, 1650 size_t size) 1651 { 1652 if (!objcg) 1653 return; 1654 1655 if (likely(chunk && chunk->obj_cgroups)) { 1656 chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = objcg; 1657 1658 rcu_read_lock(); 1659 mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B, 1660 size * num_possible_cpus()); 1661 rcu_read_unlock(); 1662 } else { 1663 obj_cgroup_uncharge(objcg, size * num_possible_cpus()); 1664 obj_cgroup_put(objcg); 1665 } 1666 } 1667 1668 static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size) 1669 { 1670 struct obj_cgroup *objcg; 1671 1672 if (unlikely(!chunk->obj_cgroups)) 1673 return; 1674 1675 objcg = chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT]; 1676 if (!objcg) 1677 return; 1678 chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = NULL; 1679 1680 obj_cgroup_uncharge(objcg, size * num_possible_cpus()); 1681 1682 rcu_read_lock(); 1683 mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B, 1684 -(size * num_possible_cpus())); 1685 rcu_read_unlock(); 1686 1687 obj_cgroup_put(objcg); 1688 } 1689 1690 #else /* CONFIG_MEMCG_KMEM */ 1691 static bool 1692 pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, struct obj_cgroup **objcgp) 1693 { 1694 return true; 1695 } 1696 1697 static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg, 1698 struct pcpu_chunk *chunk, int off, 1699 size_t size) 1700 { 1701 } 1702 1703 static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size) 1704 { 1705 } 1706 #endif /* CONFIG_MEMCG_KMEM */ 1707 1708 /** 1709 * pcpu_alloc - the percpu allocator 1710 * @size: size of area to allocate in bytes 1711 * @align: alignment of area (max PAGE_SIZE) 1712 * @reserved: allocate from the reserved chunk if available 1713 * @gfp: allocation flags 1714 * 1715 * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't 1716 * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN 1717 * then no warning will be triggered on invalid or failed allocation 1718 * requests. 1719 * 1720 * RETURNS: 1721 * Percpu pointer to the allocated area on success, NULL on failure. 1722 */ 1723 static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, 1724 gfp_t gfp) 1725 { 1726 gfp_t pcpu_gfp; 1727 bool is_atomic; 1728 bool do_warn; 1729 struct obj_cgroup *objcg = NULL; 1730 static int warn_limit = 10; 1731 struct pcpu_chunk *chunk, *next; 1732 const char *err; 1733 int slot, off, cpu, ret; 1734 unsigned long flags; 1735 void __percpu *ptr; 1736 size_t bits, bit_align; 1737 1738 gfp = current_gfp_context(gfp); 1739 /* whitelisted flags that can be passed to the backing allocators */ 1740 pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); 1741 is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL; 1742 do_warn = !(gfp & __GFP_NOWARN); 1743 1744 /* 1745 * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE, 1746 * therefore alignment must be a minimum of that many bytes. 1747 * An allocation may have internal fragmentation from rounding up 1748 * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes. 1749 */ 1750 if (unlikely(align < PCPU_MIN_ALLOC_SIZE)) 1751 align = PCPU_MIN_ALLOC_SIZE; 1752 1753 size = ALIGN(size, PCPU_MIN_ALLOC_SIZE); 1754 bits = size >> PCPU_MIN_ALLOC_SHIFT; 1755 bit_align = align >> PCPU_MIN_ALLOC_SHIFT; 1756 1757 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE || 1758 !is_power_of_2(align))) { 1759 WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n", 1760 size, align); 1761 return NULL; 1762 } 1763 1764 if (unlikely(!pcpu_memcg_pre_alloc_hook(size, gfp, &objcg))) 1765 return NULL; 1766 1767 if (!is_atomic) { 1768 /* 1769 * pcpu_balance_workfn() allocates memory under this mutex, 1770 * and it may wait for memory reclaim. Allow current task 1771 * to become OOM victim, in case of memory pressure. 1772 */ 1773 if (gfp & __GFP_NOFAIL) { 1774 mutex_lock(&pcpu_alloc_mutex); 1775 } else if (mutex_lock_killable(&pcpu_alloc_mutex)) { 1776 pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size); 1777 return NULL; 1778 } 1779 } 1780 1781 spin_lock_irqsave(&pcpu_lock, flags); 1782 1783 /* serve reserved allocations from the reserved chunk if available */ 1784 if (reserved && pcpu_reserved_chunk) { 1785 chunk = pcpu_reserved_chunk; 1786 1787 off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic); 1788 if (off < 0) { 1789 err = "alloc from reserved chunk failed"; 1790 goto fail_unlock; 1791 } 1792 1793 off = pcpu_alloc_area(chunk, bits, bit_align, off); 1794 if (off >= 0) 1795 goto area_found; 1796 1797 err = "alloc from reserved chunk failed"; 1798 goto fail_unlock; 1799 } 1800 1801 restart: 1802 /* search through normal chunks */ 1803 for (slot = pcpu_size_to_slot(size); slot <= pcpu_free_slot; slot++) { 1804 list_for_each_entry_safe(chunk, next, &pcpu_chunk_lists[slot], 1805 list) { 1806 off = pcpu_find_block_fit(chunk, bits, bit_align, 1807 is_atomic); 1808 if (off < 0) { 1809 if (slot < PCPU_SLOT_FAIL_THRESHOLD) 1810 pcpu_chunk_move(chunk, 0); 1811 continue; 1812 } 1813 1814 off = pcpu_alloc_area(chunk, bits, bit_align, off); 1815 if (off >= 0) { 1816 pcpu_reintegrate_chunk(chunk); 1817 goto area_found; 1818 } 1819 } 1820 } 1821 1822 spin_unlock_irqrestore(&pcpu_lock, flags); 1823 1824 /* 1825 * No space left. Create a new chunk. We don't want multiple 1826 * tasks to create chunks simultaneously. Serialize and create iff 1827 * there's still no empty chunk after grabbing the mutex. 1828 */ 1829 if (is_atomic) { 1830 err = "atomic alloc failed, no space left"; 1831 goto fail; 1832 } 1833 1834 if (list_empty(&pcpu_chunk_lists[pcpu_free_slot])) { 1835 chunk = pcpu_create_chunk(pcpu_gfp); 1836 if (!chunk) { 1837 err = "failed to allocate new chunk"; 1838 goto fail; 1839 } 1840 1841 spin_lock_irqsave(&pcpu_lock, flags); 1842 pcpu_chunk_relocate(chunk, -1); 1843 } else { 1844 spin_lock_irqsave(&pcpu_lock, flags); 1845 } 1846 1847 goto restart; 1848 1849 area_found: 1850 pcpu_stats_area_alloc(chunk, size); 1851 spin_unlock_irqrestore(&pcpu_lock, flags); 1852 1853 /* populate if not all pages are already there */ 1854 if (!is_atomic) { 1855 unsigned int page_start, page_end, rs, re; 1856 1857 page_start = PFN_DOWN(off); 1858 page_end = PFN_UP(off + size); 1859 1860 bitmap_for_each_clear_region(chunk->populated, rs, re, 1861 page_start, page_end) { 1862 WARN_ON(chunk->immutable); 1863 1864 ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp); 1865 1866 spin_lock_irqsave(&pcpu_lock, flags); 1867 if (ret) { 1868 pcpu_free_area(chunk, off); 1869 err = "failed to populate"; 1870 goto fail_unlock; 1871 } 1872 pcpu_chunk_populated(chunk, rs, re); 1873 spin_unlock_irqrestore(&pcpu_lock, flags); 1874 } 1875 1876 mutex_unlock(&pcpu_alloc_mutex); 1877 } 1878 1879 if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW) 1880 pcpu_schedule_balance_work(); 1881 1882 /* clear the areas and return address relative to base address */ 1883 for_each_possible_cpu(cpu) 1884 memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); 1885 1886 ptr = __addr_to_pcpu_ptr(chunk->base_addr + off); 1887 kmemleak_alloc_percpu(ptr, size, gfp); 1888 1889 trace_percpu_alloc_percpu(reserved, is_atomic, size, align, 1890 chunk->base_addr, off, ptr); 1891 1892 pcpu_memcg_post_alloc_hook(objcg, chunk, off, size); 1893 1894 return ptr; 1895 1896 fail_unlock: 1897 spin_unlock_irqrestore(&pcpu_lock, flags); 1898 fail: 1899 trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align); 1900 1901 if (!is_atomic && do_warn && warn_limit) { 1902 pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n", 1903 size, align, is_atomic, err); 1904 dump_stack(); 1905 if (!--warn_limit) 1906 pr_info("limit reached, disable warning\n"); 1907 } 1908 if (is_atomic) { 1909 /* see the flag handling in pcpu_balance_workfn() */ 1910 pcpu_atomic_alloc_failed = true; 1911 pcpu_schedule_balance_work(); 1912 } else { 1913 mutex_unlock(&pcpu_alloc_mutex); 1914 } 1915 1916 pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size); 1917 1918 return NULL; 1919 } 1920 1921 /** 1922 * __alloc_percpu_gfp - allocate dynamic percpu area 1923 * @size: size of area to allocate in bytes 1924 * @align: alignment of area (max PAGE_SIZE) 1925 * @gfp: allocation flags 1926 * 1927 * Allocate zero-filled percpu area of @size bytes aligned at @align. If 1928 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can 1929 * be called from any context but is a lot more likely to fail. If @gfp 1930 * has __GFP_NOWARN then no warning will be triggered on invalid or failed 1931 * allocation requests. 1932 * 1933 * RETURNS: 1934 * Percpu pointer to the allocated area on success, NULL on failure. 1935 */ 1936 void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) 1937 { 1938 return pcpu_alloc(size, align, false, gfp); 1939 } 1940 EXPORT_SYMBOL_GPL(__alloc_percpu_gfp); 1941 1942 /** 1943 * __alloc_percpu - allocate dynamic percpu area 1944 * @size: size of area to allocate in bytes 1945 * @align: alignment of area (max PAGE_SIZE) 1946 * 1947 * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL). 1948 */ 1949 void __percpu *__alloc_percpu(size_t size, size_t align) 1950 { 1951 return pcpu_alloc(size, align, false, GFP_KERNEL); 1952 } 1953 EXPORT_SYMBOL_GPL(__alloc_percpu); 1954 1955 /** 1956 * __alloc_reserved_percpu - allocate reserved percpu area 1957 * @size: size of area to allocate in bytes 1958 * @align: alignment of area (max PAGE_SIZE) 1959 * 1960 * Allocate zero-filled percpu area of @size bytes aligned at @align 1961 * from reserved percpu area if arch has set it up; otherwise, 1962 * allocation is served from the same dynamic area. Might sleep. 1963 * Might trigger writeouts. 1964 * 1965 * CONTEXT: 1966 * Does GFP_KERNEL allocation. 1967 * 1968 * RETURNS: 1969 * Percpu pointer to the allocated area on success, NULL on failure. 1970 */ 1971 void __percpu *__alloc_reserved_percpu(size_t size, size_t align) 1972 { 1973 return pcpu_alloc(size, align, true, GFP_KERNEL); 1974 } 1975 1976 /** 1977 * pcpu_balance_free - manage the amount of free chunks 1978 * @empty_only: free chunks only if there are no populated pages 1979 * 1980 * If empty_only is %false, reclaim all fully free chunks regardless of the 1981 * number of populated pages. Otherwise, only reclaim chunks that have no 1982 * populated pages. 1983 * 1984 * CONTEXT: 1985 * pcpu_lock (can be dropped temporarily) 1986 */ 1987 static void pcpu_balance_free(bool empty_only) 1988 { 1989 LIST_HEAD(to_free); 1990 struct list_head *free_head = &pcpu_chunk_lists[pcpu_free_slot]; 1991 struct pcpu_chunk *chunk, *next; 1992 1993 lockdep_assert_held(&pcpu_lock); 1994 1995 /* 1996 * There's no reason to keep around multiple unused chunks and VM 1997 * areas can be scarce. Destroy all free chunks except for one. 1998 */ 1999 list_for_each_entry_safe(chunk, next, free_head, list) { 2000 WARN_ON(chunk->immutable); 2001 2002 /* spare the first one */ 2003 if (chunk == list_first_entry(free_head, struct pcpu_chunk, list)) 2004 continue; 2005 2006 if (!empty_only || chunk->nr_empty_pop_pages == 0) 2007 list_move(&chunk->list, &to_free); 2008 } 2009 2010 if (list_empty(&to_free)) 2011 return; 2012 2013 spin_unlock_irq(&pcpu_lock); 2014 list_for_each_entry_safe(chunk, next, &to_free, list) { 2015 unsigned int rs, re; 2016 2017 bitmap_for_each_set_region(chunk->populated, rs, re, 0, 2018 chunk->nr_pages) { 2019 pcpu_depopulate_chunk(chunk, rs, re); 2020 spin_lock_irq(&pcpu_lock); 2021 pcpu_chunk_depopulated(chunk, rs, re); 2022 spin_unlock_irq(&pcpu_lock); 2023 } 2024 pcpu_destroy_chunk(chunk); 2025 cond_resched(); 2026 } 2027 spin_lock_irq(&pcpu_lock); 2028 } 2029 2030 /** 2031 * pcpu_balance_populated - manage the amount of populated pages 2032 * 2033 * Maintain a certain amount of populated pages to satisfy atomic allocations. 2034 * It is possible that this is called when physical memory is scarce causing 2035 * OOM killer to be triggered. We should avoid doing so until an actual 2036 * allocation causes the failure as it is possible that requests can be 2037 * serviced from already backed regions. 2038 * 2039 * CONTEXT: 2040 * pcpu_lock (can be dropped temporarily) 2041 */ 2042 static void pcpu_balance_populated(void) 2043 { 2044 /* gfp flags passed to underlying allocators */ 2045 const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN; 2046 struct pcpu_chunk *chunk; 2047 int slot, nr_to_pop, ret; 2048 2049 lockdep_assert_held(&pcpu_lock); 2050 2051 /* 2052 * Ensure there are certain number of free populated pages for 2053 * atomic allocs. Fill up from the most packed so that atomic 2054 * allocs don't increase fragmentation. If atomic allocation 2055 * failed previously, always populate the maximum amount. This 2056 * should prevent atomic allocs larger than PAGE_SIZE from keeping 2057 * failing indefinitely; however, large atomic allocs are not 2058 * something we support properly and can be highly unreliable and 2059 * inefficient. 2060 */ 2061 retry_pop: 2062 if (pcpu_atomic_alloc_failed) { 2063 nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH; 2064 /* best effort anyway, don't worry about synchronization */ 2065 pcpu_atomic_alloc_failed = false; 2066 } else { 2067 nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH - 2068 pcpu_nr_empty_pop_pages, 2069 0, PCPU_EMPTY_POP_PAGES_HIGH); 2070 } 2071 2072 for (slot = pcpu_size_to_slot(PAGE_SIZE); slot <= pcpu_free_slot; slot++) { 2073 unsigned int nr_unpop = 0, rs, re; 2074 2075 if (!nr_to_pop) 2076 break; 2077 2078 list_for_each_entry(chunk, &pcpu_chunk_lists[slot], list) { 2079 nr_unpop = chunk->nr_pages - chunk->nr_populated; 2080 if (nr_unpop) 2081 break; 2082 } 2083 2084 if (!nr_unpop) 2085 continue; 2086 2087 /* @chunk can't go away while pcpu_alloc_mutex is held */ 2088 bitmap_for_each_clear_region(chunk->populated, rs, re, 0, 2089 chunk->nr_pages) { 2090 int nr = min_t(int, re - rs, nr_to_pop); 2091 2092 spin_unlock_irq(&pcpu_lock); 2093 ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp); 2094 cond_resched(); 2095 spin_lock_irq(&pcpu_lock); 2096 if (!ret) { 2097 nr_to_pop -= nr; 2098 pcpu_chunk_populated(chunk, rs, rs + nr); 2099 } else { 2100 nr_to_pop = 0; 2101 } 2102 2103 if (!nr_to_pop) 2104 break; 2105 } 2106 } 2107 2108 if (nr_to_pop) { 2109 /* ran out of chunks to populate, create a new one and retry */ 2110 spin_unlock_irq(&pcpu_lock); 2111 chunk = pcpu_create_chunk(gfp); 2112 cond_resched(); 2113 spin_lock_irq(&pcpu_lock); 2114 if (chunk) { 2115 pcpu_chunk_relocate(chunk, -1); 2116 goto retry_pop; 2117 } 2118 } 2119 } 2120 2121 /** 2122 * pcpu_reclaim_populated - scan over to_depopulate chunks and free empty pages 2123 * 2124 * Scan over chunks in the depopulate list and try to release unused populated 2125 * pages back to the system. Depopulated chunks are sidelined to prevent 2126 * repopulating these pages unless required. Fully free chunks are reintegrated 2127 * and freed accordingly (1 is kept around). If we drop below the empty 2128 * populated pages threshold, reintegrate the chunk if it has empty free pages. 2129 * Each chunk is scanned in the reverse order to keep populated pages close to 2130 * the beginning of the chunk. 2131 * 2132 * CONTEXT: 2133 * pcpu_lock (can be dropped temporarily) 2134 * 2135 */ 2136 static void pcpu_reclaim_populated(void) 2137 { 2138 struct pcpu_chunk *chunk; 2139 struct pcpu_block_md *block; 2140 int i, end; 2141 2142 lockdep_assert_held(&pcpu_lock); 2143 2144 restart: 2145 /* 2146 * Once a chunk is isolated to the to_depopulate list, the chunk is no 2147 * longer discoverable to allocations whom may populate pages. The only 2148 * other accessor is the free path which only returns area back to the 2149 * allocator not touching the populated bitmap. 2150 */ 2151 while (!list_empty(&pcpu_chunk_lists[pcpu_to_depopulate_slot])) { 2152 chunk = list_first_entry(&pcpu_chunk_lists[pcpu_to_depopulate_slot], 2153 struct pcpu_chunk, list); 2154 WARN_ON(chunk->immutable); 2155 2156 /* 2157 * Scan chunk's pages in the reverse order to keep populated 2158 * pages close to the beginning of the chunk. 2159 */ 2160 for (i = chunk->nr_pages - 1, end = -1; i >= 0; i--) { 2161 /* no more work to do */ 2162 if (chunk->nr_empty_pop_pages == 0) 2163 break; 2164 2165 /* reintegrate chunk to prevent atomic alloc failures */ 2166 if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_HIGH) { 2167 pcpu_reintegrate_chunk(chunk); 2168 goto restart; 2169 } 2170 2171 /* 2172 * If the page is empty and populated, start or 2173 * extend the (i, end) range. If i == 0, decrease 2174 * i and perform the depopulation to cover the last 2175 * (first) page in the chunk. 2176 */ 2177 block = chunk->md_blocks + i; 2178 if (block->contig_hint == PCPU_BITMAP_BLOCK_BITS && 2179 test_bit(i, chunk->populated)) { 2180 if (end == -1) 2181 end = i; 2182 if (i > 0) 2183 continue; 2184 i--; 2185 } 2186 2187 /* depopulate if there is an active range */ 2188 if (end == -1) 2189 continue; 2190 2191 spin_unlock_irq(&pcpu_lock); 2192 pcpu_depopulate_chunk(chunk, i + 1, end + 1); 2193 cond_resched(); 2194 spin_lock_irq(&pcpu_lock); 2195 2196 pcpu_chunk_depopulated(chunk, i + 1, end + 1); 2197 2198 /* reset the range and continue */ 2199 end = -1; 2200 } 2201 2202 if (chunk->free_bytes == pcpu_unit_size) 2203 pcpu_reintegrate_chunk(chunk); 2204 else 2205 list_move(&chunk->list, 2206 &pcpu_chunk_lists[pcpu_sidelined_slot]); 2207 } 2208 } 2209 2210 /** 2211 * pcpu_balance_workfn - manage the amount of free chunks and populated pages 2212 * @work: unused 2213 * 2214 * For each chunk type, manage the number of fully free chunks and the number of 2215 * populated pages. An important thing to consider is when pages are freed and 2216 * how they contribute to the global counts. 2217 */ 2218 static void pcpu_balance_workfn(struct work_struct *work) 2219 { 2220 /* 2221 * pcpu_balance_free() is called twice because the first time we may 2222 * trim pages in the active pcpu_nr_empty_pop_pages which may cause us 2223 * to grow other chunks. This then gives pcpu_reclaim_populated() time 2224 * to move fully free chunks to the active list to be freed if 2225 * appropriate. 2226 */ 2227 mutex_lock(&pcpu_alloc_mutex); 2228 spin_lock_irq(&pcpu_lock); 2229 2230 pcpu_balance_free(false); 2231 pcpu_reclaim_populated(); 2232 pcpu_balance_populated(); 2233 pcpu_balance_free(true); 2234 2235 spin_unlock_irq(&pcpu_lock); 2236 mutex_unlock(&pcpu_alloc_mutex); 2237 } 2238 2239 /** 2240 * free_percpu - free percpu area 2241 * @ptr: pointer to area to free 2242 * 2243 * Free percpu area @ptr. 2244 * 2245 * CONTEXT: 2246 * Can be called from atomic context. 2247 */ 2248 void free_percpu(void __percpu *ptr) 2249 { 2250 void *addr; 2251 struct pcpu_chunk *chunk; 2252 unsigned long flags; 2253 int size, off; 2254 bool need_balance = false; 2255 2256 if (!ptr) 2257 return; 2258 2259 kmemleak_free_percpu(ptr); 2260 2261 addr = __pcpu_ptr_to_addr(ptr); 2262 2263 spin_lock_irqsave(&pcpu_lock, flags); 2264 2265 chunk = pcpu_chunk_addr_search(addr); 2266 off = addr - chunk->base_addr; 2267 2268 size = pcpu_free_area(chunk, off); 2269 2270 pcpu_memcg_free_hook(chunk, off, size); 2271 2272 /* 2273 * If there are more than one fully free chunks, wake up grim reaper. 2274 * If the chunk is isolated, it may be in the process of being 2275 * reclaimed. Let reclaim manage cleaning up of that chunk. 2276 */ 2277 if (!chunk->isolated && chunk->free_bytes == pcpu_unit_size) { 2278 struct pcpu_chunk *pos; 2279 2280 list_for_each_entry(pos, &pcpu_chunk_lists[pcpu_free_slot], list) 2281 if (pos != chunk) { 2282 need_balance = true; 2283 break; 2284 } 2285 } else if (pcpu_should_reclaim_chunk(chunk)) { 2286 pcpu_isolate_chunk(chunk); 2287 need_balance = true; 2288 } 2289 2290 trace_percpu_free_percpu(chunk->base_addr, off, ptr); 2291 2292 spin_unlock_irqrestore(&pcpu_lock, flags); 2293 2294 if (need_balance) 2295 pcpu_schedule_balance_work(); 2296 } 2297 EXPORT_SYMBOL_GPL(free_percpu); 2298 2299 bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr) 2300 { 2301 #ifdef CONFIG_SMP 2302 const size_t static_size = __per_cpu_end - __per_cpu_start; 2303 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); 2304 unsigned int cpu; 2305 2306 for_each_possible_cpu(cpu) { 2307 void *start = per_cpu_ptr(base, cpu); 2308 void *va = (void *)addr; 2309 2310 if (va >= start && va < start + static_size) { 2311 if (can_addr) { 2312 *can_addr = (unsigned long) (va - start); 2313 *can_addr += (unsigned long) 2314 per_cpu_ptr(base, get_boot_cpu_id()); 2315 } 2316 return true; 2317 } 2318 } 2319 #endif 2320 /* on UP, can't distinguish from other static vars, always false */ 2321 return false; 2322 } 2323 2324 /** 2325 * is_kernel_percpu_address - test whether address is from static percpu area 2326 * @addr: address to test 2327 * 2328 * Test whether @addr belongs to in-kernel static percpu area. Module 2329 * static percpu areas are not considered. For those, use 2330 * is_module_percpu_address(). 2331 * 2332 * RETURNS: 2333 * %true if @addr is from in-kernel static percpu area, %false otherwise. 2334 */ 2335 bool is_kernel_percpu_address(unsigned long addr) 2336 { 2337 return __is_kernel_percpu_address(addr, NULL); 2338 } 2339 2340 /** 2341 * per_cpu_ptr_to_phys - convert translated percpu address to physical address 2342 * @addr: the address to be converted to physical address 2343 * 2344 * Given @addr which is dereferenceable address obtained via one of 2345 * percpu access macros, this function translates it into its physical 2346 * address. The caller is responsible for ensuring @addr stays valid 2347 * until this function finishes. 2348 * 2349 * percpu allocator has special setup for the first chunk, which currently 2350 * supports either embedding in linear address space or vmalloc mapping, 2351 * and, from the second one, the backing allocator (currently either vm or 2352 * km) provides translation. 2353 * 2354 * The addr can be translated simply without checking if it falls into the 2355 * first chunk. But the current code reflects better how percpu allocator 2356 * actually works, and the verification can discover both bugs in percpu 2357 * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current 2358 * code. 2359 * 2360 * RETURNS: 2361 * The physical address for @addr. 2362 */ 2363 phys_addr_t per_cpu_ptr_to_phys(void *addr) 2364 { 2365 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); 2366 bool in_first_chunk = false; 2367 unsigned long first_low, first_high; 2368 unsigned int cpu; 2369 2370 /* 2371 * The following test on unit_low/high isn't strictly 2372 * necessary but will speed up lookups of addresses which 2373 * aren't in the first chunk. 2374 * 2375 * The address check is against full chunk sizes. pcpu_base_addr 2376 * points to the beginning of the first chunk including the 2377 * static region. Assumes good intent as the first chunk may 2378 * not be full (ie. < pcpu_unit_pages in size). 2379 */ 2380 first_low = (unsigned long)pcpu_base_addr + 2381 pcpu_unit_page_offset(pcpu_low_unit_cpu, 0); 2382 first_high = (unsigned long)pcpu_base_addr + 2383 pcpu_unit_page_offset(pcpu_high_unit_cpu, pcpu_unit_pages); 2384 if ((unsigned long)addr >= first_low && 2385 (unsigned long)addr < first_high) { 2386 for_each_possible_cpu(cpu) { 2387 void *start = per_cpu_ptr(base, cpu); 2388 2389 if (addr >= start && addr < start + pcpu_unit_size) { 2390 in_first_chunk = true; 2391 break; 2392 } 2393 } 2394 } 2395 2396 if (in_first_chunk) { 2397 if (!is_vmalloc_addr(addr)) 2398 return __pa(addr); 2399 else 2400 return page_to_phys(vmalloc_to_page(addr)) + 2401 offset_in_page(addr); 2402 } else 2403 return page_to_phys(pcpu_addr_to_page(addr)) + 2404 offset_in_page(addr); 2405 } 2406 2407 /** 2408 * pcpu_alloc_alloc_info - allocate percpu allocation info 2409 * @nr_groups: the number of groups 2410 * @nr_units: the number of units 2411 * 2412 * Allocate ai which is large enough for @nr_groups groups containing 2413 * @nr_units units. The returned ai's groups[0].cpu_map points to the 2414 * cpu_map array which is long enough for @nr_units and filled with 2415 * NR_CPUS. It's the caller's responsibility to initialize cpu_map 2416 * pointer of other groups. 2417 * 2418 * RETURNS: 2419 * Pointer to the allocated pcpu_alloc_info on success, NULL on 2420 * failure. 2421 */ 2422 struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, 2423 int nr_units) 2424 { 2425 struct pcpu_alloc_info *ai; 2426 size_t base_size, ai_size; 2427 void *ptr; 2428 int unit; 2429 2430 base_size = ALIGN(struct_size(ai, groups, nr_groups), 2431 __alignof__(ai->groups[0].cpu_map[0])); 2432 ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); 2433 2434 ptr = memblock_alloc(PFN_ALIGN(ai_size), PAGE_SIZE); 2435 if (!ptr) 2436 return NULL; 2437 ai = ptr; 2438 ptr += base_size; 2439 2440 ai->groups[0].cpu_map = ptr; 2441 2442 for (unit = 0; unit < nr_units; unit++) 2443 ai->groups[0].cpu_map[unit] = NR_CPUS; 2444 2445 ai->nr_groups = nr_groups; 2446 ai->__ai_size = PFN_ALIGN(ai_size); 2447 2448 return ai; 2449 } 2450 2451 /** 2452 * pcpu_free_alloc_info - free percpu allocation info 2453 * @ai: pcpu_alloc_info to free 2454 * 2455 * Free @ai which was allocated by pcpu_alloc_alloc_info(). 2456 */ 2457 void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) 2458 { 2459 memblock_free_early(__pa(ai), ai->__ai_size); 2460 } 2461 2462 /** 2463 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info 2464 * @lvl: loglevel 2465 * @ai: allocation info to dump 2466 * 2467 * Print out information about @ai using loglevel @lvl. 2468 */ 2469 static void pcpu_dump_alloc_info(const char *lvl, 2470 const struct pcpu_alloc_info *ai) 2471 { 2472 int group_width = 1, cpu_width = 1, width; 2473 char empty_str[] = "--------"; 2474 int alloc = 0, alloc_end = 0; 2475 int group, v; 2476 int upa, apl; /* units per alloc, allocs per line */ 2477 2478 v = ai->nr_groups; 2479 while (v /= 10) 2480 group_width++; 2481 2482 v = num_possible_cpus(); 2483 while (v /= 10) 2484 cpu_width++; 2485 empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0'; 2486 2487 upa = ai->alloc_size / ai->unit_size; 2488 width = upa * (cpu_width + 1) + group_width + 3; 2489 apl = rounddown_pow_of_two(max(60 / width, 1)); 2490 2491 printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu", 2492 lvl, ai->static_size, ai->reserved_size, ai->dyn_size, 2493 ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size); 2494 2495 for (group = 0; group < ai->nr_groups; group++) { 2496 const struct pcpu_group_info *gi = &ai->groups[group]; 2497 int unit = 0, unit_end = 0; 2498 2499 BUG_ON(gi->nr_units % upa); 2500 for (alloc_end += gi->nr_units / upa; 2501 alloc < alloc_end; alloc++) { 2502 if (!(alloc % apl)) { 2503 pr_cont("\n"); 2504 printk("%spcpu-alloc: ", lvl); 2505 } 2506 pr_cont("[%0*d] ", group_width, group); 2507 2508 for (unit_end += upa; unit < unit_end; unit++) 2509 if (gi->cpu_map[unit] != NR_CPUS) 2510 pr_cont("%0*d ", 2511 cpu_width, gi->cpu_map[unit]); 2512 else 2513 pr_cont("%s ", empty_str); 2514 } 2515 } 2516 pr_cont("\n"); 2517 } 2518 2519 /** 2520 * pcpu_setup_first_chunk - initialize the first percpu chunk 2521 * @ai: pcpu_alloc_info describing how to percpu area is shaped 2522 * @base_addr: mapped address 2523 * 2524 * Initialize the first percpu chunk which contains the kernel static 2525 * percpu area. This function is to be called from arch percpu area 2526 * setup path. 2527 * 2528 * @ai contains all information necessary to initialize the first 2529 * chunk and prime the dynamic percpu allocator. 2530 * 2531 * @ai->static_size is the size of static percpu area. 2532 * 2533 * @ai->reserved_size, if non-zero, specifies the amount of bytes to 2534 * reserve after the static area in the first chunk. This reserves 2535 * the first chunk such that it's available only through reserved 2536 * percpu allocation. This is primarily used to serve module percpu 2537 * static areas on architectures where the addressing model has 2538 * limited offset range for symbol relocations to guarantee module 2539 * percpu symbols fall inside the relocatable range. 2540 * 2541 * @ai->dyn_size determines the number of bytes available for dynamic 2542 * allocation in the first chunk. The area between @ai->static_size + 2543 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused. 2544 * 2545 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE 2546 * and equal to or larger than @ai->static_size + @ai->reserved_size + 2547 * @ai->dyn_size. 2548 * 2549 * @ai->atom_size is the allocation atom size and used as alignment 2550 * for vm areas. 2551 * 2552 * @ai->alloc_size is the allocation size and always multiple of 2553 * @ai->atom_size. This is larger than @ai->atom_size if 2554 * @ai->unit_size is larger than @ai->atom_size. 2555 * 2556 * @ai->nr_groups and @ai->groups describe virtual memory layout of 2557 * percpu areas. Units which should be colocated are put into the 2558 * same group. Dynamic VM areas will be allocated according to these 2559 * groupings. If @ai->nr_groups is zero, a single group containing 2560 * all units is assumed. 2561 * 2562 * The caller should have mapped the first chunk at @base_addr and 2563 * copied static data to each unit. 2564 * 2565 * The first chunk will always contain a static and a dynamic region. 2566 * However, the static region is not managed by any chunk. If the first 2567 * chunk also contains a reserved region, it is served by two chunks - 2568 * one for the reserved region and one for the dynamic region. They 2569 * share the same vm, but use offset regions in the area allocation map. 2570 * The chunk serving the dynamic region is circulated in the chunk slots 2571 * and available for dynamic allocation like any other chunk. 2572 */ 2573 void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, 2574 void *base_addr) 2575 { 2576 size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; 2577 size_t static_size, dyn_size; 2578 struct pcpu_chunk *chunk; 2579 unsigned long *group_offsets; 2580 size_t *group_sizes; 2581 unsigned long *unit_off; 2582 unsigned int cpu; 2583 int *unit_map; 2584 int group, unit, i; 2585 int map_size; 2586 unsigned long tmp_addr; 2587 size_t alloc_size; 2588 2589 #define PCPU_SETUP_BUG_ON(cond) do { \ 2590 if (unlikely(cond)) { \ 2591 pr_emerg("failed to initialize, %s\n", #cond); \ 2592 pr_emerg("cpu_possible_mask=%*pb\n", \ 2593 cpumask_pr_args(cpu_possible_mask)); \ 2594 pcpu_dump_alloc_info(KERN_EMERG, ai); \ 2595 BUG(); \ 2596 } \ 2597 } while (0) 2598 2599 /* sanity checks */ 2600 PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); 2601 #ifdef CONFIG_SMP 2602 PCPU_SETUP_BUG_ON(!ai->static_size); 2603 PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start)); 2604 #endif 2605 PCPU_SETUP_BUG_ON(!base_addr); 2606 PCPU_SETUP_BUG_ON(offset_in_page(base_addr)); 2607 PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); 2608 PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size)); 2609 PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); 2610 PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE)); 2611 PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE); 2612 PCPU_SETUP_BUG_ON(!ai->dyn_size); 2613 PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE)); 2614 PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE, PAGE_SIZE) || 2615 IS_ALIGNED(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE))); 2616 PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); 2617 2618 /* process group information and build config tables accordingly */ 2619 alloc_size = ai->nr_groups * sizeof(group_offsets[0]); 2620 group_offsets = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 2621 if (!group_offsets) 2622 panic("%s: Failed to allocate %zu bytes\n", __func__, 2623 alloc_size); 2624 2625 alloc_size = ai->nr_groups * sizeof(group_sizes[0]); 2626 group_sizes = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 2627 if (!group_sizes) 2628 panic("%s: Failed to allocate %zu bytes\n", __func__, 2629 alloc_size); 2630 2631 alloc_size = nr_cpu_ids * sizeof(unit_map[0]); 2632 unit_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 2633 if (!unit_map) 2634 panic("%s: Failed to allocate %zu bytes\n", __func__, 2635 alloc_size); 2636 2637 alloc_size = nr_cpu_ids * sizeof(unit_off[0]); 2638 unit_off = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 2639 if (!unit_off) 2640 panic("%s: Failed to allocate %zu bytes\n", __func__, 2641 alloc_size); 2642 2643 for (cpu = 0; cpu < nr_cpu_ids; cpu++) 2644 unit_map[cpu] = UINT_MAX; 2645 2646 pcpu_low_unit_cpu = NR_CPUS; 2647 pcpu_high_unit_cpu = NR_CPUS; 2648 2649 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { 2650 const struct pcpu_group_info *gi = &ai->groups[group]; 2651 2652 group_offsets[group] = gi->base_offset; 2653 group_sizes[group] = gi->nr_units * ai->unit_size; 2654 2655 for (i = 0; i < gi->nr_units; i++) { 2656 cpu = gi->cpu_map[i]; 2657 if (cpu == NR_CPUS) 2658 continue; 2659 2660 PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids); 2661 PCPU_SETUP_BUG_ON(!cpu_possible(cpu)); 2662 PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX); 2663 2664 unit_map[cpu] = unit + i; 2665 unit_off[cpu] = gi->base_offset + i * ai->unit_size; 2666 2667 /* determine low/high unit_cpu */ 2668 if (pcpu_low_unit_cpu == NR_CPUS || 2669 unit_off[cpu] < unit_off[pcpu_low_unit_cpu]) 2670 pcpu_low_unit_cpu = cpu; 2671 if (pcpu_high_unit_cpu == NR_CPUS || 2672 unit_off[cpu] > unit_off[pcpu_high_unit_cpu]) 2673 pcpu_high_unit_cpu = cpu; 2674 } 2675 } 2676 pcpu_nr_units = unit; 2677 2678 for_each_possible_cpu(cpu) 2679 PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX); 2680 2681 /* we're done parsing the input, undefine BUG macro and dump config */ 2682 #undef PCPU_SETUP_BUG_ON 2683 pcpu_dump_alloc_info(KERN_DEBUG, ai); 2684 2685 pcpu_nr_groups = ai->nr_groups; 2686 pcpu_group_offsets = group_offsets; 2687 pcpu_group_sizes = group_sizes; 2688 pcpu_unit_map = unit_map; 2689 pcpu_unit_offsets = unit_off; 2690 2691 /* determine basic parameters */ 2692 pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT; 2693 pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; 2694 pcpu_atom_size = ai->atom_size; 2695 pcpu_chunk_struct_size = struct_size(chunk, populated, 2696 BITS_TO_LONGS(pcpu_unit_pages)); 2697 2698 pcpu_stats_save_ai(ai); 2699 2700 /* 2701 * Allocate chunk slots. The slots after the active slots are: 2702 * sidelined_slot - isolated, depopulated chunks 2703 * free_slot - fully free chunks 2704 * to_depopulate_slot - isolated, chunks to depopulate 2705 */ 2706 pcpu_sidelined_slot = __pcpu_size_to_slot(pcpu_unit_size) + 1; 2707 pcpu_free_slot = pcpu_sidelined_slot + 1; 2708 pcpu_to_depopulate_slot = pcpu_free_slot + 1; 2709 pcpu_nr_slots = pcpu_to_depopulate_slot + 1; 2710 pcpu_chunk_lists = memblock_alloc(pcpu_nr_slots * 2711 sizeof(pcpu_chunk_lists[0]), 2712 SMP_CACHE_BYTES); 2713 if (!pcpu_chunk_lists) 2714 panic("%s: Failed to allocate %zu bytes\n", __func__, 2715 pcpu_nr_slots * sizeof(pcpu_chunk_lists[0])); 2716 2717 for (i = 0; i < pcpu_nr_slots; i++) 2718 INIT_LIST_HEAD(&pcpu_chunk_lists[i]); 2719 2720 /* 2721 * The end of the static region needs to be aligned with the 2722 * minimum allocation size as this offsets the reserved and 2723 * dynamic region. The first chunk ends page aligned by 2724 * expanding the dynamic region, therefore the dynamic region 2725 * can be shrunk to compensate while still staying above the 2726 * configured sizes. 2727 */ 2728 static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE); 2729 dyn_size = ai->dyn_size - (static_size - ai->static_size); 2730 2731 /* 2732 * Initialize first chunk. 2733 * If the reserved_size is non-zero, this initializes the reserved 2734 * chunk. If the reserved_size is zero, the reserved chunk is NULL 2735 * and the dynamic region is initialized here. The first chunk, 2736 * pcpu_first_chunk, will always point to the chunk that serves 2737 * the dynamic region. 2738 */ 2739 tmp_addr = (unsigned long)base_addr + static_size; 2740 map_size = ai->reserved_size ?: dyn_size; 2741 chunk = pcpu_alloc_first_chunk(tmp_addr, map_size); 2742 2743 /* init dynamic chunk if necessary */ 2744 if (ai->reserved_size) { 2745 pcpu_reserved_chunk = chunk; 2746 2747 tmp_addr = (unsigned long)base_addr + static_size + 2748 ai->reserved_size; 2749 map_size = dyn_size; 2750 chunk = pcpu_alloc_first_chunk(tmp_addr, map_size); 2751 } 2752 2753 /* link the first chunk in */ 2754 pcpu_first_chunk = chunk; 2755 pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages; 2756 pcpu_chunk_relocate(pcpu_first_chunk, -1); 2757 2758 /* include all regions of the first chunk */ 2759 pcpu_nr_populated += PFN_DOWN(size_sum); 2760 2761 pcpu_stats_chunk_alloc(); 2762 trace_percpu_create_chunk(base_addr); 2763 2764 /* we're done */ 2765 pcpu_base_addr = base_addr; 2766 } 2767 2768 #ifdef CONFIG_SMP 2769 2770 const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = { 2771 [PCPU_FC_AUTO] = "auto", 2772 [PCPU_FC_EMBED] = "embed", 2773 [PCPU_FC_PAGE] = "page", 2774 }; 2775 2776 enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO; 2777 2778 static int __init percpu_alloc_setup(char *str) 2779 { 2780 if (!str) 2781 return -EINVAL; 2782 2783 if (0) 2784 /* nada */; 2785 #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK 2786 else if (!strcmp(str, "embed")) 2787 pcpu_chosen_fc = PCPU_FC_EMBED; 2788 #endif 2789 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 2790 else if (!strcmp(str, "page")) 2791 pcpu_chosen_fc = PCPU_FC_PAGE; 2792 #endif 2793 else 2794 pr_warn("unknown allocator %s specified\n", str); 2795 2796 return 0; 2797 } 2798 early_param("percpu_alloc", percpu_alloc_setup); 2799 2800 /* 2801 * pcpu_embed_first_chunk() is used by the generic percpu setup. 2802 * Build it if needed by the arch config or the generic setup is going 2803 * to be used. 2804 */ 2805 #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \ 2806 !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) 2807 #define BUILD_EMBED_FIRST_CHUNK 2808 #endif 2809 2810 /* build pcpu_page_first_chunk() iff needed by the arch config */ 2811 #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) 2812 #define BUILD_PAGE_FIRST_CHUNK 2813 #endif 2814 2815 /* pcpu_build_alloc_info() is used by both embed and page first chunk */ 2816 #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK) 2817 /** 2818 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs 2819 * @reserved_size: the size of reserved percpu area in bytes 2820 * @dyn_size: minimum free size for dynamic allocation in bytes 2821 * @atom_size: allocation atom size 2822 * @cpu_distance_fn: callback to determine distance between cpus, optional 2823 * 2824 * This function determines grouping of units, their mappings to cpus 2825 * and other parameters considering needed percpu size, allocation 2826 * atom size and distances between CPUs. 2827 * 2828 * Groups are always multiples of atom size and CPUs which are of 2829 * LOCAL_DISTANCE both ways are grouped together and share space for 2830 * units in the same group. The returned configuration is guaranteed 2831 * to have CPUs on different nodes on different groups and >=75% usage 2832 * of allocated virtual address space. 2833 * 2834 * RETURNS: 2835 * On success, pointer to the new allocation_info is returned. On 2836 * failure, ERR_PTR value is returned. 2837 */ 2838 static struct pcpu_alloc_info * __init __flatten pcpu_build_alloc_info( 2839 size_t reserved_size, size_t dyn_size, 2840 size_t atom_size, 2841 pcpu_fc_cpu_distance_fn_t cpu_distance_fn) 2842 { 2843 static int group_map[NR_CPUS] __initdata; 2844 static int group_cnt[NR_CPUS] __initdata; 2845 static struct cpumask mask __initdata; 2846 const size_t static_size = __per_cpu_end - __per_cpu_start; 2847 int nr_groups = 1, nr_units = 0; 2848 size_t size_sum, min_unit_size, alloc_size; 2849 int upa, max_upa, best_upa; /* units_per_alloc */ 2850 int last_allocs, group, unit; 2851 unsigned int cpu, tcpu; 2852 struct pcpu_alloc_info *ai; 2853 unsigned int *cpu_map; 2854 2855 /* this function may be called multiple times */ 2856 memset(group_map, 0, sizeof(group_map)); 2857 memset(group_cnt, 0, sizeof(group_cnt)); 2858 cpumask_clear(&mask); 2859 2860 /* calculate size_sum and ensure dyn_size is enough for early alloc */ 2861 size_sum = PFN_ALIGN(static_size + reserved_size + 2862 max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE)); 2863 dyn_size = size_sum - static_size - reserved_size; 2864 2865 /* 2866 * Determine min_unit_size, alloc_size and max_upa such that 2867 * alloc_size is multiple of atom_size and is the smallest 2868 * which can accommodate 4k aligned segments which are equal to 2869 * or larger than min_unit_size. 2870 */ 2871 min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); 2872 2873 /* determine the maximum # of units that can fit in an allocation */ 2874 alloc_size = roundup(min_unit_size, atom_size); 2875 upa = alloc_size / min_unit_size; 2876 while (alloc_size % upa || (offset_in_page(alloc_size / upa))) 2877 upa--; 2878 max_upa = upa; 2879 2880 cpumask_copy(&mask, cpu_possible_mask); 2881 2882 /* group cpus according to their proximity */ 2883 for (group = 0; !cpumask_empty(&mask); group++) { 2884 /* pop the group's first cpu */ 2885 cpu = cpumask_first(&mask); 2886 group_map[cpu] = group; 2887 group_cnt[group]++; 2888 cpumask_clear_cpu(cpu, &mask); 2889 2890 for_each_cpu(tcpu, &mask) { 2891 if (!cpu_distance_fn || 2892 (cpu_distance_fn(cpu, tcpu) == LOCAL_DISTANCE && 2893 cpu_distance_fn(tcpu, cpu) == LOCAL_DISTANCE)) { 2894 group_map[tcpu] = group; 2895 group_cnt[group]++; 2896 cpumask_clear_cpu(tcpu, &mask); 2897 } 2898 } 2899 } 2900 nr_groups = group; 2901 2902 /* 2903 * Wasted space is caused by a ratio imbalance of upa to group_cnt. 2904 * Expand the unit_size until we use >= 75% of the units allocated. 2905 * Related to atom_size, which could be much larger than the unit_size. 2906 */ 2907 last_allocs = INT_MAX; 2908 best_upa = 0; 2909 for (upa = max_upa; upa; upa--) { 2910 int allocs = 0, wasted = 0; 2911 2912 if (alloc_size % upa || (offset_in_page(alloc_size / upa))) 2913 continue; 2914 2915 for (group = 0; group < nr_groups; group++) { 2916 int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); 2917 allocs += this_allocs; 2918 wasted += this_allocs * upa - group_cnt[group]; 2919 } 2920 2921 /* 2922 * Don't accept if wastage is over 1/3. The 2923 * greater-than comparison ensures upa==1 always 2924 * passes the following check. 2925 */ 2926 if (wasted > num_possible_cpus() / 3) 2927 continue; 2928 2929 /* and then don't consume more memory */ 2930 if (allocs > last_allocs) 2931 break; 2932 last_allocs = allocs; 2933 best_upa = upa; 2934 } 2935 BUG_ON(!best_upa); 2936 upa = best_upa; 2937 2938 /* allocate and fill alloc_info */ 2939 for (group = 0; group < nr_groups; group++) 2940 nr_units += roundup(group_cnt[group], upa); 2941 2942 ai = pcpu_alloc_alloc_info(nr_groups, nr_units); 2943 if (!ai) 2944 return ERR_PTR(-ENOMEM); 2945 cpu_map = ai->groups[0].cpu_map; 2946 2947 for (group = 0; group < nr_groups; group++) { 2948 ai->groups[group].cpu_map = cpu_map; 2949 cpu_map += roundup(group_cnt[group], upa); 2950 } 2951 2952 ai->static_size = static_size; 2953 ai->reserved_size = reserved_size; 2954 ai->dyn_size = dyn_size; 2955 ai->unit_size = alloc_size / upa; 2956 ai->atom_size = atom_size; 2957 ai->alloc_size = alloc_size; 2958 2959 for (group = 0, unit = 0; group < nr_groups; group++) { 2960 struct pcpu_group_info *gi = &ai->groups[group]; 2961 2962 /* 2963 * Initialize base_offset as if all groups are located 2964 * back-to-back. The caller should update this to 2965 * reflect actual allocation. 2966 */ 2967 gi->base_offset = unit * ai->unit_size; 2968 2969 for_each_possible_cpu(cpu) 2970 if (group_map[cpu] == group) 2971 gi->cpu_map[gi->nr_units++] = cpu; 2972 gi->nr_units = roundup(gi->nr_units, upa); 2973 unit += gi->nr_units; 2974 } 2975 BUG_ON(unit != nr_units); 2976 2977 return ai; 2978 } 2979 #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */ 2980 2981 #if defined(BUILD_EMBED_FIRST_CHUNK) 2982 /** 2983 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem 2984 * @reserved_size: the size of reserved percpu area in bytes 2985 * @dyn_size: minimum free size for dynamic allocation in bytes 2986 * @atom_size: allocation atom size 2987 * @cpu_distance_fn: callback to determine distance between cpus, optional 2988 * @alloc_fn: function to allocate percpu page 2989 * @free_fn: function to free percpu page 2990 * 2991 * This is a helper to ease setting up embedded first percpu chunk and 2992 * can be called where pcpu_setup_first_chunk() is expected. 2993 * 2994 * If this function is used to setup the first chunk, it is allocated 2995 * by calling @alloc_fn and used as-is without being mapped into 2996 * vmalloc area. Allocations are always whole multiples of @atom_size 2997 * aligned to @atom_size. 2998 * 2999 * This enables the first chunk to piggy back on the linear physical 3000 * mapping which often uses larger page size. Please note that this 3001 * can result in very sparse cpu->unit mapping on NUMA machines thus 3002 * requiring large vmalloc address space. Don't use this allocator if 3003 * vmalloc space is not orders of magnitude larger than distances 3004 * between node memory addresses (ie. 32bit NUMA machines). 3005 * 3006 * @dyn_size specifies the minimum dynamic area size. 3007 * 3008 * If the needed size is smaller than the minimum or specified unit 3009 * size, the leftover is returned using @free_fn. 3010 * 3011 * RETURNS: 3012 * 0 on success, -errno on failure. 3013 */ 3014 int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, 3015 size_t atom_size, 3016 pcpu_fc_cpu_distance_fn_t cpu_distance_fn, 3017 pcpu_fc_alloc_fn_t alloc_fn, 3018 pcpu_fc_free_fn_t free_fn) 3019 { 3020 void *base = (void *)ULONG_MAX; 3021 void **areas = NULL; 3022 struct pcpu_alloc_info *ai; 3023 size_t size_sum, areas_size; 3024 unsigned long max_distance; 3025 int group, i, highest_group, rc = 0; 3026 3027 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size, 3028 cpu_distance_fn); 3029 if (IS_ERR(ai)) 3030 return PTR_ERR(ai); 3031 3032 size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; 3033 areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); 3034 3035 areas = memblock_alloc(areas_size, SMP_CACHE_BYTES); 3036 if (!areas) { 3037 rc = -ENOMEM; 3038 goto out_free; 3039 } 3040 3041 /* allocate, copy and determine base address & max_distance */ 3042 highest_group = 0; 3043 for (group = 0; group < ai->nr_groups; group++) { 3044 struct pcpu_group_info *gi = &ai->groups[group]; 3045 unsigned int cpu = NR_CPUS; 3046 void *ptr; 3047 3048 for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++) 3049 cpu = gi->cpu_map[i]; 3050 BUG_ON(cpu == NR_CPUS); 3051 3052 /* allocate space for the whole group */ 3053 ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size); 3054 if (!ptr) { 3055 rc = -ENOMEM; 3056 goto out_free_areas; 3057 } 3058 /* kmemleak tracks the percpu allocations separately */ 3059 kmemleak_free(ptr); 3060 areas[group] = ptr; 3061 3062 base = min(ptr, base); 3063 if (ptr > areas[highest_group]) 3064 highest_group = group; 3065 } 3066 max_distance = areas[highest_group] - base; 3067 max_distance += ai->unit_size * ai->groups[highest_group].nr_units; 3068 3069 /* warn if maximum distance is further than 75% of vmalloc space */ 3070 if (max_distance > VMALLOC_TOTAL * 3 / 4) { 3071 pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n", 3072 max_distance, VMALLOC_TOTAL); 3073 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 3074 /* and fail if we have fallback */ 3075 rc = -EINVAL; 3076 goto out_free_areas; 3077 #endif 3078 } 3079 3080 /* 3081 * Copy data and free unused parts. This should happen after all 3082 * allocations are complete; otherwise, we may end up with 3083 * overlapping groups. 3084 */ 3085 for (group = 0; group < ai->nr_groups; group++) { 3086 struct pcpu_group_info *gi = &ai->groups[group]; 3087 void *ptr = areas[group]; 3088 3089 for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { 3090 if (gi->cpu_map[i] == NR_CPUS) { 3091 /* unused unit, free whole */ 3092 free_fn(ptr, ai->unit_size); 3093 continue; 3094 } 3095 /* copy and return the unused part */ 3096 memcpy(ptr, __per_cpu_load, ai->static_size); 3097 free_fn(ptr + size_sum, ai->unit_size - size_sum); 3098 } 3099 } 3100 3101 /* base address is now known, determine group base offsets */ 3102 for (group = 0; group < ai->nr_groups; group++) { 3103 ai->groups[group].base_offset = areas[group] - base; 3104 } 3105 3106 pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n", 3107 PFN_DOWN(size_sum), ai->static_size, ai->reserved_size, 3108 ai->dyn_size, ai->unit_size); 3109 3110 pcpu_setup_first_chunk(ai, base); 3111 goto out_free; 3112 3113 out_free_areas: 3114 for (group = 0; group < ai->nr_groups; group++) 3115 if (areas[group]) 3116 free_fn(areas[group], 3117 ai->groups[group].nr_units * ai->unit_size); 3118 out_free: 3119 pcpu_free_alloc_info(ai); 3120 if (areas) 3121 memblock_free_early(__pa(areas), areas_size); 3122 return rc; 3123 } 3124 #endif /* BUILD_EMBED_FIRST_CHUNK */ 3125 3126 #ifdef BUILD_PAGE_FIRST_CHUNK 3127 /** 3128 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages 3129 * @reserved_size: the size of reserved percpu area in bytes 3130 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE 3131 * @free_fn: function to free percpu page, always called with PAGE_SIZE 3132 * @populate_pte_fn: function to populate pte 3133 * 3134 * This is a helper to ease setting up page-remapped first percpu 3135 * chunk and can be called where pcpu_setup_first_chunk() is expected. 3136 * 3137 * This is the basic allocator. Static percpu area is allocated 3138 * page-by-page into vmalloc area. 3139 * 3140 * RETURNS: 3141 * 0 on success, -errno on failure. 3142 */ 3143 int __init pcpu_page_first_chunk(size_t reserved_size, 3144 pcpu_fc_alloc_fn_t alloc_fn, 3145 pcpu_fc_free_fn_t free_fn, 3146 pcpu_fc_populate_pte_fn_t populate_pte_fn) 3147 { 3148 static struct vm_struct vm; 3149 struct pcpu_alloc_info *ai; 3150 char psize_str[16]; 3151 int unit_pages; 3152 size_t pages_size; 3153 struct page **pages; 3154 int unit, i, j, rc = 0; 3155 int upa; 3156 int nr_g0_units; 3157 3158 snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); 3159 3160 ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL); 3161 if (IS_ERR(ai)) 3162 return PTR_ERR(ai); 3163 BUG_ON(ai->nr_groups != 1); 3164 upa = ai->alloc_size/ai->unit_size; 3165 nr_g0_units = roundup(num_possible_cpus(), upa); 3166 if (WARN_ON(ai->groups[0].nr_units != nr_g0_units)) { 3167 pcpu_free_alloc_info(ai); 3168 return -EINVAL; 3169 } 3170 3171 unit_pages = ai->unit_size >> PAGE_SHIFT; 3172 3173 /* unaligned allocations can't be freed, round up to page size */ 3174 pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * 3175 sizeof(pages[0])); 3176 pages = memblock_alloc(pages_size, SMP_CACHE_BYTES); 3177 if (!pages) 3178 panic("%s: Failed to allocate %zu bytes\n", __func__, 3179 pages_size); 3180 3181 /* allocate pages */ 3182 j = 0; 3183 for (unit = 0; unit < num_possible_cpus(); unit++) { 3184 unsigned int cpu = ai->groups[0].cpu_map[unit]; 3185 for (i = 0; i < unit_pages; i++) { 3186 void *ptr; 3187 3188 ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE); 3189 if (!ptr) { 3190 pr_warn("failed to allocate %s page for cpu%u\n", 3191 psize_str, cpu); 3192 goto enomem; 3193 } 3194 /* kmemleak tracks the percpu allocations separately */ 3195 kmemleak_free(ptr); 3196 pages[j++] = virt_to_page(ptr); 3197 } 3198 } 3199 3200 /* allocate vm area, map the pages and copy static data */ 3201 vm.flags = VM_ALLOC; 3202 vm.size = num_possible_cpus() * ai->unit_size; 3203 vm_area_register_early(&vm, PAGE_SIZE); 3204 3205 for (unit = 0; unit < num_possible_cpus(); unit++) { 3206 unsigned long unit_addr = 3207 (unsigned long)vm.addr + unit * ai->unit_size; 3208 3209 for (i = 0; i < unit_pages; i++) 3210 populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); 3211 3212 /* pte already populated, the following shouldn't fail */ 3213 rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages], 3214 unit_pages); 3215 if (rc < 0) 3216 panic("failed to map percpu area, err=%d\n", rc); 3217 3218 /* 3219 * FIXME: Archs with virtual cache should flush local 3220 * cache for the linear mapping here - something 3221 * equivalent to flush_cache_vmap() on the local cpu. 3222 * flush_cache_vmap() can't be used as most supporting 3223 * data structures are not set up yet. 3224 */ 3225 3226 /* copy static data */ 3227 memcpy((void *)unit_addr, __per_cpu_load, ai->static_size); 3228 } 3229 3230 /* we're ready, commit */ 3231 pr_info("%d %s pages/cpu s%zu r%zu d%zu\n", 3232 unit_pages, psize_str, ai->static_size, 3233 ai->reserved_size, ai->dyn_size); 3234 3235 pcpu_setup_first_chunk(ai, vm.addr); 3236 goto out_free_ar; 3237 3238 enomem: 3239 while (--j >= 0) 3240 free_fn(page_address(pages[j]), PAGE_SIZE); 3241 rc = -ENOMEM; 3242 out_free_ar: 3243 memblock_free_early(__pa(pages), pages_size); 3244 pcpu_free_alloc_info(ai); 3245 return rc; 3246 } 3247 #endif /* BUILD_PAGE_FIRST_CHUNK */ 3248 3249 #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA 3250 /* 3251 * Generic SMP percpu area setup. 3252 * 3253 * The embedding helper is used because its behavior closely resembles 3254 * the original non-dynamic generic percpu area setup. This is 3255 * important because many archs have addressing restrictions and might 3256 * fail if the percpu area is located far away from the previous 3257 * location. As an added bonus, in non-NUMA cases, embedding is 3258 * generally a good idea TLB-wise because percpu area can piggy back 3259 * on the physical linear memory mapping which uses large page 3260 * mappings on applicable archs. 3261 */ 3262 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; 3263 EXPORT_SYMBOL(__per_cpu_offset); 3264 3265 static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size, 3266 size_t align) 3267 { 3268 return memblock_alloc_from(size, align, __pa(MAX_DMA_ADDRESS)); 3269 } 3270 3271 static void __init pcpu_dfl_fc_free(void *ptr, size_t size) 3272 { 3273 memblock_free_early(__pa(ptr), size); 3274 } 3275 3276 void __init setup_per_cpu_areas(void) 3277 { 3278 unsigned long delta; 3279 unsigned int cpu; 3280 int rc; 3281 3282 /* 3283 * Always reserve area for module percpu variables. That's 3284 * what the legacy allocator did. 3285 */ 3286 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, 3287 PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL, 3288 pcpu_dfl_fc_alloc, pcpu_dfl_fc_free); 3289 if (rc < 0) 3290 panic("Failed to initialize percpu areas."); 3291 3292 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 3293 for_each_possible_cpu(cpu) 3294 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; 3295 } 3296 #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ 3297 3298 #else /* CONFIG_SMP */ 3299 3300 /* 3301 * UP percpu area setup. 3302 * 3303 * UP always uses km-based percpu allocator with identity mapping. 3304 * Static percpu variables are indistinguishable from the usual static 3305 * variables and don't require any special preparation. 3306 */ 3307 void __init setup_per_cpu_areas(void) 3308 { 3309 const size_t unit_size = 3310 roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE, 3311 PERCPU_DYNAMIC_RESERVE)); 3312 struct pcpu_alloc_info *ai; 3313 void *fc; 3314 3315 ai = pcpu_alloc_alloc_info(1, 1); 3316 fc = memblock_alloc_from(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); 3317 if (!ai || !fc) 3318 panic("Failed to allocate memory for percpu areas."); 3319 /* kmemleak tracks the percpu allocations separately */ 3320 kmemleak_free(fc); 3321 3322 ai->dyn_size = unit_size; 3323 ai->unit_size = unit_size; 3324 ai->atom_size = unit_size; 3325 ai->alloc_size = unit_size; 3326 ai->groups[0].nr_units = 1; 3327 ai->groups[0].cpu_map[0] = 0; 3328 3329 pcpu_setup_first_chunk(ai, fc); 3330 pcpu_free_alloc_info(ai); 3331 } 3332 3333 #endif /* CONFIG_SMP */ 3334 3335 /* 3336 * pcpu_nr_pages - calculate total number of populated backing pages 3337 * 3338 * This reflects the number of pages populated to back chunks. Metadata is 3339 * excluded in the number exposed in meminfo as the number of backing pages 3340 * scales with the number of cpus and can quickly outweigh the memory used for 3341 * metadata. It also keeps this calculation nice and simple. 3342 * 3343 * RETURNS: 3344 * Total number of populated backing pages in use by the allocator. 3345 */ 3346 unsigned long pcpu_nr_pages(void) 3347 { 3348 return pcpu_nr_populated * pcpu_nr_units; 3349 } 3350 3351 /* 3352 * Percpu allocator is initialized early during boot when neither slab or 3353 * workqueue is available. Plug async management until everything is up 3354 * and running. 3355 */ 3356 static int __init percpu_enable_async(void) 3357 { 3358 pcpu_async_enabled = true; 3359 return 0; 3360 } 3361 subsys_initcall(percpu_enable_async); 3362