1 /* 2 * mm/percpu.c - percpu memory allocator 3 * 4 * Copyright (C) 2009 SUSE Linux Products GmbH 5 * Copyright (C) 2009 Tejun Heo <tj@kernel.org> 6 * 7 * Copyright (C) 2017 Facebook Inc. 8 * Copyright (C) 2017 Dennis Zhou <dennisszhou@gmail.com> 9 * 10 * This file is released under the GPLv2 license. 11 * 12 * The percpu allocator handles both static and dynamic areas. Percpu 13 * areas are allocated in chunks which are divided into units. There is 14 * a 1-to-1 mapping for units to possible cpus. These units are grouped 15 * based on NUMA properties of the machine. 16 * 17 * c0 c1 c2 18 * ------------------- ------------------- ------------ 19 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u 20 * ------------------- ...... ------------------- .... ------------ 21 * 22 * Allocation is done by offsets into a unit's address space. Ie., an 23 * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0, 24 * c1:u1, c1:u2, etc. On NUMA machines, the mapping may be non-linear 25 * and even sparse. Access is handled by configuring percpu base 26 * registers according to the cpu to unit mappings and offsetting the 27 * base address using pcpu_unit_size. 28 * 29 * There is special consideration for the first chunk which must handle 30 * the static percpu variables in the kernel image as allocation services 31 * are not online yet. In short, the first chunk is structured like so: 32 * 33 * <Static | [Reserved] | Dynamic> 34 * 35 * The static data is copied from the original section managed by the 36 * linker. The reserved section, if non-zero, primarily manages static 37 * percpu variables from kernel modules. Finally, the dynamic section 38 * takes care of normal allocations. 39 * 40 * The allocator organizes chunks into lists according to free size and 41 * tries to allocate from the fullest chunk first. Each chunk is managed 42 * by a bitmap with metadata blocks. The allocation map is updated on 43 * every allocation and free to reflect the current state while the boundary 44 * map is only updated on allocation. Each metadata block contains 45 * information to help mitigate the need to iterate over large portions 46 * of the bitmap. The reverse mapping from page to chunk is stored in 47 * the page's index. Lastly, units are lazily backed and grow in unison. 48 * 49 * There is a unique conversion that goes on here between bytes and bits. 50 * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE. The chunk 51 * tracks the number of pages it is responsible for in nr_pages. Helper 52 * functions are used to convert from between the bytes, bits, and blocks. 53 * All hints are managed in bits unless explicitly stated. 54 * 55 * To use this allocator, arch code should do the following: 56 * 57 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate 58 * regular address to percpu pointer and back if they need to be 59 * different from the default 60 * 61 * - use pcpu_setup_first_chunk() during percpu area initialization to 62 * setup the first chunk containing the kernel static percpu area 63 */ 64 65 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 66 67 #include <linux/bitmap.h> 68 #include <linux/memblock.h> 69 #include <linux/err.h> 70 #include <linux/lcm.h> 71 #include <linux/list.h> 72 #include <linux/log2.h> 73 #include <linux/mm.h> 74 #include <linux/module.h> 75 #include <linux/mutex.h> 76 #include <linux/percpu.h> 77 #include <linux/pfn.h> 78 #include <linux/slab.h> 79 #include <linux/spinlock.h> 80 #include <linux/vmalloc.h> 81 #include <linux/workqueue.h> 82 #include <linux/kmemleak.h> 83 #include <linux/sched.h> 84 85 #include <asm/cacheflush.h> 86 #include <asm/sections.h> 87 #include <asm/tlbflush.h> 88 #include <asm/io.h> 89 90 #define CREATE_TRACE_POINTS 91 #include <trace/events/percpu.h> 92 93 #include "percpu-internal.h" 94 95 /* the slots are sorted by free bytes left, 1-31 bytes share the same slot */ 96 #define PCPU_SLOT_BASE_SHIFT 5 97 /* chunks in slots below this are subject to being sidelined on failed alloc */ 98 #define PCPU_SLOT_FAIL_THRESHOLD 3 99 100 #define PCPU_EMPTY_POP_PAGES_LOW 2 101 #define PCPU_EMPTY_POP_PAGES_HIGH 4 102 103 #ifdef CONFIG_SMP 104 /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ 105 #ifndef __addr_to_pcpu_ptr 106 #define __addr_to_pcpu_ptr(addr) \ 107 (void __percpu *)((unsigned long)(addr) - \ 108 (unsigned long)pcpu_base_addr + \ 109 (unsigned long)__per_cpu_start) 110 #endif 111 #ifndef __pcpu_ptr_to_addr 112 #define __pcpu_ptr_to_addr(ptr) \ 113 (void __force *)((unsigned long)(ptr) + \ 114 (unsigned long)pcpu_base_addr - \ 115 (unsigned long)__per_cpu_start) 116 #endif 117 #else /* CONFIG_SMP */ 118 /* on UP, it's always identity mapped */ 119 #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr) 120 #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr) 121 #endif /* CONFIG_SMP */ 122 123 static int pcpu_unit_pages __ro_after_init; 124 static int pcpu_unit_size __ro_after_init; 125 static int pcpu_nr_units __ro_after_init; 126 static int pcpu_atom_size __ro_after_init; 127 int pcpu_nr_slots __ro_after_init; 128 static size_t pcpu_chunk_struct_size __ro_after_init; 129 130 /* cpus with the lowest and highest unit addresses */ 131 static unsigned int pcpu_low_unit_cpu __ro_after_init; 132 static unsigned int pcpu_high_unit_cpu __ro_after_init; 133 134 /* the address of the first chunk which starts with the kernel static area */ 135 void *pcpu_base_addr __ro_after_init; 136 EXPORT_SYMBOL_GPL(pcpu_base_addr); 137 138 static const int *pcpu_unit_map __ro_after_init; /* cpu -> unit */ 139 const unsigned long *pcpu_unit_offsets __ro_after_init; /* cpu -> unit offset */ 140 141 /* group information, used for vm allocation */ 142 static int pcpu_nr_groups __ro_after_init; 143 static const unsigned long *pcpu_group_offsets __ro_after_init; 144 static const size_t *pcpu_group_sizes __ro_after_init; 145 146 /* 147 * The first chunk which always exists. Note that unlike other 148 * chunks, this one can be allocated and mapped in several different 149 * ways and thus often doesn't live in the vmalloc area. 150 */ 151 struct pcpu_chunk *pcpu_first_chunk __ro_after_init; 152 153 /* 154 * Optional reserved chunk. This chunk reserves part of the first 155 * chunk and serves it for reserved allocations. When the reserved 156 * region doesn't exist, the following variable is NULL. 157 */ 158 struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init; 159 160 DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */ 161 static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */ 162 163 struct list_head *pcpu_slot __ro_after_init; /* chunk list slots */ 164 165 /* chunks which need their map areas extended, protected by pcpu_lock */ 166 static LIST_HEAD(pcpu_map_extend_chunks); 167 168 /* 169 * The number of empty populated pages, protected by pcpu_lock. The 170 * reserved chunk doesn't contribute to the count. 171 */ 172 int pcpu_nr_empty_pop_pages; 173 174 /* 175 * The number of populated pages in use by the allocator, protected by 176 * pcpu_lock. This number is kept per a unit per chunk (i.e. when a page gets 177 * allocated/deallocated, it is allocated/deallocated in all units of a chunk 178 * and increments/decrements this count by 1). 179 */ 180 static unsigned long pcpu_nr_populated; 181 182 /* 183 * Balance work is used to populate or destroy chunks asynchronously. We 184 * try to keep the number of populated free pages between 185 * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one 186 * empty chunk. 187 */ 188 static void pcpu_balance_workfn(struct work_struct *work); 189 static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn); 190 static bool pcpu_async_enabled __read_mostly; 191 static bool pcpu_atomic_alloc_failed; 192 193 static void pcpu_schedule_balance_work(void) 194 { 195 if (pcpu_async_enabled) 196 schedule_work(&pcpu_balance_work); 197 } 198 199 /** 200 * pcpu_addr_in_chunk - check if the address is served from this chunk 201 * @chunk: chunk of interest 202 * @addr: percpu address 203 * 204 * RETURNS: 205 * True if the address is served from this chunk. 206 */ 207 static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr) 208 { 209 void *start_addr, *end_addr; 210 211 if (!chunk) 212 return false; 213 214 start_addr = chunk->base_addr + chunk->start_offset; 215 end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE - 216 chunk->end_offset; 217 218 return addr >= start_addr && addr < end_addr; 219 } 220 221 static int __pcpu_size_to_slot(int size) 222 { 223 int highbit = fls(size); /* size is in bytes */ 224 return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); 225 } 226 227 static int pcpu_size_to_slot(int size) 228 { 229 if (size == pcpu_unit_size) 230 return pcpu_nr_slots - 1; 231 return __pcpu_size_to_slot(size); 232 } 233 234 static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) 235 { 236 const struct pcpu_block_md *chunk_md = &chunk->chunk_md; 237 238 if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE || 239 chunk_md->contig_hint == 0) 240 return 0; 241 242 return pcpu_size_to_slot(chunk_md->contig_hint * PCPU_MIN_ALLOC_SIZE); 243 } 244 245 /* set the pointer to a chunk in a page struct */ 246 static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu) 247 { 248 page->index = (unsigned long)pcpu; 249 } 250 251 /* obtain pointer to a chunk from a page struct */ 252 static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page) 253 { 254 return (struct pcpu_chunk *)page->index; 255 } 256 257 static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx) 258 { 259 return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; 260 } 261 262 static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx) 263 { 264 return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT); 265 } 266 267 static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, 268 unsigned int cpu, int page_idx) 269 { 270 return (unsigned long)chunk->base_addr + 271 pcpu_unit_page_offset(cpu, page_idx); 272 } 273 274 static void pcpu_next_unpop(unsigned long *bitmap, int *rs, int *re, int end) 275 { 276 *rs = find_next_zero_bit(bitmap, end, *rs); 277 *re = find_next_bit(bitmap, end, *rs + 1); 278 } 279 280 static void pcpu_next_pop(unsigned long *bitmap, int *rs, int *re, int end) 281 { 282 *rs = find_next_bit(bitmap, end, *rs); 283 *re = find_next_zero_bit(bitmap, end, *rs + 1); 284 } 285 286 /* 287 * Bitmap region iterators. Iterates over the bitmap between 288 * [@start, @end) in @chunk. @rs and @re should be integer variables 289 * and will be set to start and end index of the current free region. 290 */ 291 #define pcpu_for_each_unpop_region(bitmap, rs, re, start, end) \ 292 for ((rs) = (start), pcpu_next_unpop((bitmap), &(rs), &(re), (end)); \ 293 (rs) < (re); \ 294 (rs) = (re) + 1, pcpu_next_unpop((bitmap), &(rs), &(re), (end))) 295 296 #define pcpu_for_each_pop_region(bitmap, rs, re, start, end) \ 297 for ((rs) = (start), pcpu_next_pop((bitmap), &(rs), &(re), (end)); \ 298 (rs) < (re); \ 299 (rs) = (re) + 1, pcpu_next_pop((bitmap), &(rs), &(re), (end))) 300 301 /* 302 * The following are helper functions to help access bitmaps and convert 303 * between bitmap offsets to address offsets. 304 */ 305 static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index) 306 { 307 return chunk->alloc_map + 308 (index * PCPU_BITMAP_BLOCK_BITS / BITS_PER_LONG); 309 } 310 311 static unsigned long pcpu_off_to_block_index(int off) 312 { 313 return off / PCPU_BITMAP_BLOCK_BITS; 314 } 315 316 static unsigned long pcpu_off_to_block_off(int off) 317 { 318 return off & (PCPU_BITMAP_BLOCK_BITS - 1); 319 } 320 321 static unsigned long pcpu_block_off_to_off(int index, int off) 322 { 323 return index * PCPU_BITMAP_BLOCK_BITS + off; 324 } 325 326 /* 327 * pcpu_next_hint - determine which hint to use 328 * @block: block of interest 329 * @alloc_bits: size of allocation 330 * 331 * This determines if we should scan based on the scan_hint or first_free. 332 * In general, we want to scan from first_free to fulfill allocations by 333 * first fit. However, if we know a scan_hint at position scan_hint_start 334 * cannot fulfill an allocation, we can begin scanning from there knowing 335 * the contig_hint will be our fallback. 336 */ 337 static int pcpu_next_hint(struct pcpu_block_md *block, int alloc_bits) 338 { 339 /* 340 * The three conditions below determine if we can skip past the 341 * scan_hint. First, does the scan hint exist. Second, is the 342 * contig_hint after the scan_hint (possibly not true iff 343 * contig_hint == scan_hint). Third, is the allocation request 344 * larger than the scan_hint. 345 */ 346 if (block->scan_hint && 347 block->contig_hint_start > block->scan_hint_start && 348 alloc_bits > block->scan_hint) 349 return block->scan_hint_start + block->scan_hint; 350 351 return block->first_free; 352 } 353 354 /** 355 * pcpu_next_md_free_region - finds the next hint free area 356 * @chunk: chunk of interest 357 * @bit_off: chunk offset 358 * @bits: size of free area 359 * 360 * Helper function for pcpu_for_each_md_free_region. It checks 361 * block->contig_hint and performs aggregation across blocks to find the 362 * next hint. It modifies bit_off and bits in-place to be consumed in the 363 * loop. 364 */ 365 static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off, 366 int *bits) 367 { 368 int i = pcpu_off_to_block_index(*bit_off); 369 int block_off = pcpu_off_to_block_off(*bit_off); 370 struct pcpu_block_md *block; 371 372 *bits = 0; 373 for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk); 374 block++, i++) { 375 /* handles contig area across blocks */ 376 if (*bits) { 377 *bits += block->left_free; 378 if (block->left_free == PCPU_BITMAP_BLOCK_BITS) 379 continue; 380 return; 381 } 382 383 /* 384 * This checks three things. First is there a contig_hint to 385 * check. Second, have we checked this hint before by 386 * comparing the block_off. Third, is this the same as the 387 * right contig hint. In the last case, it spills over into 388 * the next block and should be handled by the contig area 389 * across blocks code. 390 */ 391 *bits = block->contig_hint; 392 if (*bits && block->contig_hint_start >= block_off && 393 *bits + block->contig_hint_start < PCPU_BITMAP_BLOCK_BITS) { 394 *bit_off = pcpu_block_off_to_off(i, 395 block->contig_hint_start); 396 return; 397 } 398 /* reset to satisfy the second predicate above */ 399 block_off = 0; 400 401 *bits = block->right_free; 402 *bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free; 403 } 404 } 405 406 /** 407 * pcpu_next_fit_region - finds fit areas for a given allocation request 408 * @chunk: chunk of interest 409 * @alloc_bits: size of allocation 410 * @align: alignment of area (max PAGE_SIZE) 411 * @bit_off: chunk offset 412 * @bits: size of free area 413 * 414 * Finds the next free region that is viable for use with a given size and 415 * alignment. This only returns if there is a valid area to be used for this 416 * allocation. block->first_free is returned if the allocation request fits 417 * within the block to see if the request can be fulfilled prior to the contig 418 * hint. 419 */ 420 static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits, 421 int align, int *bit_off, int *bits) 422 { 423 int i = pcpu_off_to_block_index(*bit_off); 424 int block_off = pcpu_off_to_block_off(*bit_off); 425 struct pcpu_block_md *block; 426 427 *bits = 0; 428 for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk); 429 block++, i++) { 430 /* handles contig area across blocks */ 431 if (*bits) { 432 *bits += block->left_free; 433 if (*bits >= alloc_bits) 434 return; 435 if (block->left_free == PCPU_BITMAP_BLOCK_BITS) 436 continue; 437 } 438 439 /* check block->contig_hint */ 440 *bits = ALIGN(block->contig_hint_start, align) - 441 block->contig_hint_start; 442 /* 443 * This uses the block offset to determine if this has been 444 * checked in the prior iteration. 445 */ 446 if (block->contig_hint && 447 block->contig_hint_start >= block_off && 448 block->contig_hint >= *bits + alloc_bits) { 449 int start = pcpu_next_hint(block, alloc_bits); 450 451 *bits += alloc_bits + block->contig_hint_start - 452 start; 453 *bit_off = pcpu_block_off_to_off(i, start); 454 return; 455 } 456 /* reset to satisfy the second predicate above */ 457 block_off = 0; 458 459 *bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free, 460 align); 461 *bits = PCPU_BITMAP_BLOCK_BITS - *bit_off; 462 *bit_off = pcpu_block_off_to_off(i, *bit_off); 463 if (*bits >= alloc_bits) 464 return; 465 } 466 467 /* no valid offsets were found - fail condition */ 468 *bit_off = pcpu_chunk_map_bits(chunk); 469 } 470 471 /* 472 * Metadata free area iterators. These perform aggregation of free areas 473 * based on the metadata blocks and return the offset @bit_off and size in 474 * bits of the free area @bits. pcpu_for_each_fit_region only returns when 475 * a fit is found for the allocation request. 476 */ 477 #define pcpu_for_each_md_free_region(chunk, bit_off, bits) \ 478 for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits)); \ 479 (bit_off) < pcpu_chunk_map_bits((chunk)); \ 480 (bit_off) += (bits) + 1, \ 481 pcpu_next_md_free_region((chunk), &(bit_off), &(bits))) 482 483 #define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) \ 484 for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \ 485 &(bits)); \ 486 (bit_off) < pcpu_chunk_map_bits((chunk)); \ 487 (bit_off) += (bits), \ 488 pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \ 489 &(bits))) 490 491 /** 492 * pcpu_mem_zalloc - allocate memory 493 * @size: bytes to allocate 494 * @gfp: allocation flags 495 * 496 * Allocate @size bytes. If @size is smaller than PAGE_SIZE, 497 * kzalloc() is used; otherwise, the equivalent of vzalloc() is used. 498 * This is to facilitate passing through whitelisted flags. The 499 * returned memory is always zeroed. 500 * 501 * RETURNS: 502 * Pointer to the allocated area on success, NULL on failure. 503 */ 504 static void *pcpu_mem_zalloc(size_t size, gfp_t gfp) 505 { 506 if (WARN_ON_ONCE(!slab_is_available())) 507 return NULL; 508 509 if (size <= PAGE_SIZE) 510 return kzalloc(size, gfp); 511 else 512 return __vmalloc(size, gfp | __GFP_ZERO, PAGE_KERNEL); 513 } 514 515 /** 516 * pcpu_mem_free - free memory 517 * @ptr: memory to free 518 * 519 * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc(). 520 */ 521 static void pcpu_mem_free(void *ptr) 522 { 523 kvfree(ptr); 524 } 525 526 static void __pcpu_chunk_move(struct pcpu_chunk *chunk, int slot, 527 bool move_front) 528 { 529 if (chunk != pcpu_reserved_chunk) { 530 if (move_front) 531 list_move(&chunk->list, &pcpu_slot[slot]); 532 else 533 list_move_tail(&chunk->list, &pcpu_slot[slot]); 534 } 535 } 536 537 static void pcpu_chunk_move(struct pcpu_chunk *chunk, int slot) 538 { 539 __pcpu_chunk_move(chunk, slot, true); 540 } 541 542 /** 543 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot 544 * @chunk: chunk of interest 545 * @oslot: the previous slot it was on 546 * 547 * This function is called after an allocation or free changed @chunk. 548 * New slot according to the changed state is determined and @chunk is 549 * moved to the slot. Note that the reserved chunk is never put on 550 * chunk slots. 551 * 552 * CONTEXT: 553 * pcpu_lock. 554 */ 555 static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) 556 { 557 int nslot = pcpu_chunk_slot(chunk); 558 559 if (oslot != nslot) 560 __pcpu_chunk_move(chunk, nslot, oslot < nslot); 561 } 562 563 /* 564 * pcpu_update_empty_pages - update empty page counters 565 * @chunk: chunk of interest 566 * @nr: nr of empty pages 567 * 568 * This is used to keep track of the empty pages now based on the premise 569 * a md_block covers a page. The hint update functions recognize if a block 570 * is made full or broken to calculate deltas for keeping track of free pages. 571 */ 572 static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr) 573 { 574 chunk->nr_empty_pop_pages += nr; 575 if (chunk != pcpu_reserved_chunk) 576 pcpu_nr_empty_pop_pages += nr; 577 } 578 579 /* 580 * pcpu_region_overlap - determines if two regions overlap 581 * @a: start of first region, inclusive 582 * @b: end of first region, exclusive 583 * @x: start of second region, inclusive 584 * @y: end of second region, exclusive 585 * 586 * This is used to determine if the hint region [a, b) overlaps with the 587 * allocated region [x, y). 588 */ 589 static inline bool pcpu_region_overlap(int a, int b, int x, int y) 590 { 591 return (a < y) && (x < b); 592 } 593 594 /** 595 * pcpu_block_update - updates a block given a free area 596 * @block: block of interest 597 * @start: start offset in block 598 * @end: end offset in block 599 * 600 * Updates a block given a known free area. The region [start, end) is 601 * expected to be the entirety of the free area within a block. Chooses 602 * the best starting offset if the contig hints are equal. 603 */ 604 static void pcpu_block_update(struct pcpu_block_md *block, int start, int end) 605 { 606 int contig = end - start; 607 608 block->first_free = min(block->first_free, start); 609 if (start == 0) 610 block->left_free = contig; 611 612 if (end == block->nr_bits) 613 block->right_free = contig; 614 615 if (contig > block->contig_hint) { 616 /* promote the old contig_hint to be the new scan_hint */ 617 if (start > block->contig_hint_start) { 618 if (block->contig_hint > block->scan_hint) { 619 block->scan_hint_start = 620 block->contig_hint_start; 621 block->scan_hint = block->contig_hint; 622 } else if (start < block->scan_hint_start) { 623 /* 624 * The old contig_hint == scan_hint. But, the 625 * new contig is larger so hold the invariant 626 * scan_hint_start < contig_hint_start. 627 */ 628 block->scan_hint = 0; 629 } 630 } else { 631 block->scan_hint = 0; 632 } 633 block->contig_hint_start = start; 634 block->contig_hint = contig; 635 } else if (contig == block->contig_hint) { 636 if (block->contig_hint_start && 637 (!start || 638 __ffs(start) > __ffs(block->contig_hint_start))) { 639 /* start has a better alignment so use it */ 640 block->contig_hint_start = start; 641 if (start < block->scan_hint_start && 642 block->contig_hint > block->scan_hint) 643 block->scan_hint = 0; 644 } else if (start > block->scan_hint_start || 645 block->contig_hint > block->scan_hint) { 646 /* 647 * Knowing contig == contig_hint, update the scan_hint 648 * if it is farther than or larger than the current 649 * scan_hint. 650 */ 651 block->scan_hint_start = start; 652 block->scan_hint = contig; 653 } 654 } else { 655 /* 656 * The region is smaller than the contig_hint. So only update 657 * the scan_hint if it is larger than or equal and farther than 658 * the current scan_hint. 659 */ 660 if ((start < block->contig_hint_start && 661 (contig > block->scan_hint || 662 (contig == block->scan_hint && 663 start > block->scan_hint_start)))) { 664 block->scan_hint_start = start; 665 block->scan_hint = contig; 666 } 667 } 668 } 669 670 /* 671 * pcpu_block_update_scan - update a block given a free area from a scan 672 * @chunk: chunk of interest 673 * @bit_off: chunk offset 674 * @bits: size of free area 675 * 676 * Finding the final allocation spot first goes through pcpu_find_block_fit() 677 * to find a block that can hold the allocation and then pcpu_alloc_area() 678 * where a scan is used. When allocations require specific alignments, 679 * we can inadvertently create holes which will not be seen in the alloc 680 * or free paths. 681 * 682 * This takes a given free area hole and updates a block as it may change the 683 * scan_hint. We need to scan backwards to ensure we don't miss free bits 684 * from alignment. 685 */ 686 static void pcpu_block_update_scan(struct pcpu_chunk *chunk, int bit_off, 687 int bits) 688 { 689 int s_off = pcpu_off_to_block_off(bit_off); 690 int e_off = s_off + bits; 691 int s_index, l_bit; 692 struct pcpu_block_md *block; 693 694 if (e_off > PCPU_BITMAP_BLOCK_BITS) 695 return; 696 697 s_index = pcpu_off_to_block_index(bit_off); 698 block = chunk->md_blocks + s_index; 699 700 /* scan backwards in case of alignment skipping free bits */ 701 l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), s_off); 702 s_off = (s_off == l_bit) ? 0 : l_bit + 1; 703 704 pcpu_block_update(block, s_off, e_off); 705 } 706 707 /** 708 * pcpu_chunk_refresh_hint - updates metadata about a chunk 709 * @chunk: chunk of interest 710 * @full_scan: if we should scan from the beginning 711 * 712 * Iterates over the metadata blocks to find the largest contig area. 713 * A full scan can be avoided on the allocation path as this is triggered 714 * if we broke the contig_hint. In doing so, the scan_hint will be before 715 * the contig_hint or after if the scan_hint == contig_hint. This cannot 716 * be prevented on freeing as we want to find the largest area possibly 717 * spanning blocks. 718 */ 719 static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan) 720 { 721 struct pcpu_block_md *chunk_md = &chunk->chunk_md; 722 int bit_off, bits; 723 724 /* promote scan_hint to contig_hint */ 725 if (!full_scan && chunk_md->scan_hint) { 726 bit_off = chunk_md->scan_hint_start + chunk_md->scan_hint; 727 chunk_md->contig_hint_start = chunk_md->scan_hint_start; 728 chunk_md->contig_hint = chunk_md->scan_hint; 729 chunk_md->scan_hint = 0; 730 } else { 731 bit_off = chunk_md->first_free; 732 chunk_md->contig_hint = 0; 733 } 734 735 bits = 0; 736 pcpu_for_each_md_free_region(chunk, bit_off, bits) { 737 pcpu_block_update(chunk_md, bit_off, bit_off + bits); 738 } 739 } 740 741 /** 742 * pcpu_block_refresh_hint 743 * @chunk: chunk of interest 744 * @index: index of the metadata block 745 * 746 * Scans over the block beginning at first_free and updates the block 747 * metadata accordingly. 748 */ 749 static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index) 750 { 751 struct pcpu_block_md *block = chunk->md_blocks + index; 752 unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index); 753 int rs, re, start; /* region start, region end */ 754 755 /* promote scan_hint to contig_hint */ 756 if (block->scan_hint) { 757 start = block->scan_hint_start + block->scan_hint; 758 block->contig_hint_start = block->scan_hint_start; 759 block->contig_hint = block->scan_hint; 760 block->scan_hint = 0; 761 } else { 762 start = block->first_free; 763 block->contig_hint = 0; 764 } 765 766 block->right_free = 0; 767 768 /* iterate over free areas and update the contig hints */ 769 pcpu_for_each_unpop_region(alloc_map, rs, re, start, 770 PCPU_BITMAP_BLOCK_BITS) { 771 pcpu_block_update(block, rs, re); 772 } 773 } 774 775 /** 776 * pcpu_block_update_hint_alloc - update hint on allocation path 777 * @chunk: chunk of interest 778 * @bit_off: chunk offset 779 * @bits: size of request 780 * 781 * Updates metadata for the allocation path. The metadata only has to be 782 * refreshed by a full scan iff the chunk's contig hint is broken. Block level 783 * scans are required if the block's contig hint is broken. 784 */ 785 static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off, 786 int bits) 787 { 788 struct pcpu_block_md *chunk_md = &chunk->chunk_md; 789 int nr_empty_pages = 0; 790 struct pcpu_block_md *s_block, *e_block, *block; 791 int s_index, e_index; /* block indexes of the freed allocation */ 792 int s_off, e_off; /* block offsets of the freed allocation */ 793 794 /* 795 * Calculate per block offsets. 796 * The calculation uses an inclusive range, but the resulting offsets 797 * are [start, end). e_index always points to the last block in the 798 * range. 799 */ 800 s_index = pcpu_off_to_block_index(bit_off); 801 e_index = pcpu_off_to_block_index(bit_off + bits - 1); 802 s_off = pcpu_off_to_block_off(bit_off); 803 e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1; 804 805 s_block = chunk->md_blocks + s_index; 806 e_block = chunk->md_blocks + e_index; 807 808 /* 809 * Update s_block. 810 * block->first_free must be updated if the allocation takes its place. 811 * If the allocation breaks the contig_hint, a scan is required to 812 * restore this hint. 813 */ 814 if (s_block->contig_hint == PCPU_BITMAP_BLOCK_BITS) 815 nr_empty_pages++; 816 817 if (s_off == s_block->first_free) 818 s_block->first_free = find_next_zero_bit( 819 pcpu_index_alloc_map(chunk, s_index), 820 PCPU_BITMAP_BLOCK_BITS, 821 s_off + bits); 822 823 if (pcpu_region_overlap(s_block->scan_hint_start, 824 s_block->scan_hint_start + s_block->scan_hint, 825 s_off, 826 s_off + bits)) 827 s_block->scan_hint = 0; 828 829 if (pcpu_region_overlap(s_block->contig_hint_start, 830 s_block->contig_hint_start + 831 s_block->contig_hint, 832 s_off, 833 s_off + bits)) { 834 /* block contig hint is broken - scan to fix it */ 835 if (!s_off) 836 s_block->left_free = 0; 837 pcpu_block_refresh_hint(chunk, s_index); 838 } else { 839 /* update left and right contig manually */ 840 s_block->left_free = min(s_block->left_free, s_off); 841 if (s_index == e_index) 842 s_block->right_free = min_t(int, s_block->right_free, 843 PCPU_BITMAP_BLOCK_BITS - e_off); 844 else 845 s_block->right_free = 0; 846 } 847 848 /* 849 * Update e_block. 850 */ 851 if (s_index != e_index) { 852 if (e_block->contig_hint == PCPU_BITMAP_BLOCK_BITS) 853 nr_empty_pages++; 854 855 /* 856 * When the allocation is across blocks, the end is along 857 * the left part of the e_block. 858 */ 859 e_block->first_free = find_next_zero_bit( 860 pcpu_index_alloc_map(chunk, e_index), 861 PCPU_BITMAP_BLOCK_BITS, e_off); 862 863 if (e_off == PCPU_BITMAP_BLOCK_BITS) { 864 /* reset the block */ 865 e_block++; 866 } else { 867 if (e_off > e_block->scan_hint_start) 868 e_block->scan_hint = 0; 869 870 e_block->left_free = 0; 871 if (e_off > e_block->contig_hint_start) { 872 /* contig hint is broken - scan to fix it */ 873 pcpu_block_refresh_hint(chunk, e_index); 874 } else { 875 e_block->right_free = 876 min_t(int, e_block->right_free, 877 PCPU_BITMAP_BLOCK_BITS - e_off); 878 } 879 } 880 881 /* update in-between md_blocks */ 882 nr_empty_pages += (e_index - s_index - 1); 883 for (block = s_block + 1; block < e_block; block++) { 884 block->scan_hint = 0; 885 block->contig_hint = 0; 886 block->left_free = 0; 887 block->right_free = 0; 888 } 889 } 890 891 if (nr_empty_pages) 892 pcpu_update_empty_pages(chunk, -nr_empty_pages); 893 894 if (pcpu_region_overlap(chunk_md->scan_hint_start, 895 chunk_md->scan_hint_start + 896 chunk_md->scan_hint, 897 bit_off, 898 bit_off + bits)) 899 chunk_md->scan_hint = 0; 900 901 /* 902 * The only time a full chunk scan is required is if the chunk 903 * contig hint is broken. Otherwise, it means a smaller space 904 * was used and therefore the chunk contig hint is still correct. 905 */ 906 if (pcpu_region_overlap(chunk_md->contig_hint_start, 907 chunk_md->contig_hint_start + 908 chunk_md->contig_hint, 909 bit_off, 910 bit_off + bits)) 911 pcpu_chunk_refresh_hint(chunk, false); 912 } 913 914 /** 915 * pcpu_block_update_hint_free - updates the block hints on the free path 916 * @chunk: chunk of interest 917 * @bit_off: chunk offset 918 * @bits: size of request 919 * 920 * Updates metadata for the allocation path. This avoids a blind block 921 * refresh by making use of the block contig hints. If this fails, it scans 922 * forward and backward to determine the extent of the free area. This is 923 * capped at the boundary of blocks. 924 * 925 * A chunk update is triggered if a page becomes free, a block becomes free, 926 * or the free spans across blocks. This tradeoff is to minimize iterating 927 * over the block metadata to update chunk_md->contig_hint. 928 * chunk_md->contig_hint may be off by up to a page, but it will never be more 929 * than the available space. If the contig hint is contained in one block, it 930 * will be accurate. 931 */ 932 static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off, 933 int bits) 934 { 935 int nr_empty_pages = 0; 936 struct pcpu_block_md *s_block, *e_block, *block; 937 int s_index, e_index; /* block indexes of the freed allocation */ 938 int s_off, e_off; /* block offsets of the freed allocation */ 939 int start, end; /* start and end of the whole free area */ 940 941 /* 942 * Calculate per block offsets. 943 * The calculation uses an inclusive range, but the resulting offsets 944 * are [start, end). e_index always points to the last block in the 945 * range. 946 */ 947 s_index = pcpu_off_to_block_index(bit_off); 948 e_index = pcpu_off_to_block_index(bit_off + bits - 1); 949 s_off = pcpu_off_to_block_off(bit_off); 950 e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1; 951 952 s_block = chunk->md_blocks + s_index; 953 e_block = chunk->md_blocks + e_index; 954 955 /* 956 * Check if the freed area aligns with the block->contig_hint. 957 * If it does, then the scan to find the beginning/end of the 958 * larger free area can be avoided. 959 * 960 * start and end refer to beginning and end of the free area 961 * within each their respective blocks. This is not necessarily 962 * the entire free area as it may span blocks past the beginning 963 * or end of the block. 964 */ 965 start = s_off; 966 if (s_off == s_block->contig_hint + s_block->contig_hint_start) { 967 start = s_block->contig_hint_start; 968 } else { 969 /* 970 * Scan backwards to find the extent of the free area. 971 * find_last_bit returns the starting bit, so if the start bit 972 * is returned, that means there was no last bit and the 973 * remainder of the chunk is free. 974 */ 975 int l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), 976 start); 977 start = (start == l_bit) ? 0 : l_bit + 1; 978 } 979 980 end = e_off; 981 if (e_off == e_block->contig_hint_start) 982 end = e_block->contig_hint_start + e_block->contig_hint; 983 else 984 end = find_next_bit(pcpu_index_alloc_map(chunk, e_index), 985 PCPU_BITMAP_BLOCK_BITS, end); 986 987 /* update s_block */ 988 e_off = (s_index == e_index) ? end : PCPU_BITMAP_BLOCK_BITS; 989 if (!start && e_off == PCPU_BITMAP_BLOCK_BITS) 990 nr_empty_pages++; 991 pcpu_block_update(s_block, start, e_off); 992 993 /* freeing in the same block */ 994 if (s_index != e_index) { 995 /* update e_block */ 996 if (end == PCPU_BITMAP_BLOCK_BITS) 997 nr_empty_pages++; 998 pcpu_block_update(e_block, 0, end); 999 1000 /* reset md_blocks in the middle */ 1001 nr_empty_pages += (e_index - s_index - 1); 1002 for (block = s_block + 1; block < e_block; block++) { 1003 block->first_free = 0; 1004 block->scan_hint = 0; 1005 block->contig_hint_start = 0; 1006 block->contig_hint = PCPU_BITMAP_BLOCK_BITS; 1007 block->left_free = PCPU_BITMAP_BLOCK_BITS; 1008 block->right_free = PCPU_BITMAP_BLOCK_BITS; 1009 } 1010 } 1011 1012 if (nr_empty_pages) 1013 pcpu_update_empty_pages(chunk, nr_empty_pages); 1014 1015 /* 1016 * Refresh chunk metadata when the free makes a block free or spans 1017 * across blocks. The contig_hint may be off by up to a page, but if 1018 * the contig_hint is contained in a block, it will be accurate with 1019 * the else condition below. 1020 */ 1021 if (((end - start) >= PCPU_BITMAP_BLOCK_BITS) || s_index != e_index) 1022 pcpu_chunk_refresh_hint(chunk, true); 1023 else 1024 pcpu_block_update(&chunk->chunk_md, 1025 pcpu_block_off_to_off(s_index, start), 1026 end); 1027 } 1028 1029 /** 1030 * pcpu_is_populated - determines if the region is populated 1031 * @chunk: chunk of interest 1032 * @bit_off: chunk offset 1033 * @bits: size of area 1034 * @next_off: return value for the next offset to start searching 1035 * 1036 * For atomic allocations, check if the backing pages are populated. 1037 * 1038 * RETURNS: 1039 * Bool if the backing pages are populated. 1040 * next_index is to skip over unpopulated blocks in pcpu_find_block_fit. 1041 */ 1042 static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits, 1043 int *next_off) 1044 { 1045 int page_start, page_end, rs, re; 1046 1047 page_start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE); 1048 page_end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE); 1049 1050 rs = page_start; 1051 pcpu_next_unpop(chunk->populated, &rs, &re, page_end); 1052 if (rs >= page_end) 1053 return true; 1054 1055 *next_off = re * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE; 1056 return false; 1057 } 1058 1059 /** 1060 * pcpu_find_block_fit - finds the block index to start searching 1061 * @chunk: chunk of interest 1062 * @alloc_bits: size of request in allocation units 1063 * @align: alignment of area (max PAGE_SIZE bytes) 1064 * @pop_only: use populated regions only 1065 * 1066 * Given a chunk and an allocation spec, find the offset to begin searching 1067 * for a free region. This iterates over the bitmap metadata blocks to 1068 * find an offset that will be guaranteed to fit the requirements. It is 1069 * not quite first fit as if the allocation does not fit in the contig hint 1070 * of a block or chunk, it is skipped. This errs on the side of caution 1071 * to prevent excess iteration. Poor alignment can cause the allocator to 1072 * skip over blocks and chunks that have valid free areas. 1073 * 1074 * RETURNS: 1075 * The offset in the bitmap to begin searching. 1076 * -1 if no offset is found. 1077 */ 1078 static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits, 1079 size_t align, bool pop_only) 1080 { 1081 struct pcpu_block_md *chunk_md = &chunk->chunk_md; 1082 int bit_off, bits, next_off; 1083 1084 /* 1085 * Check to see if the allocation can fit in the chunk's contig hint. 1086 * This is an optimization to prevent scanning by assuming if it 1087 * cannot fit in the global hint, there is memory pressure and creating 1088 * a new chunk would happen soon. 1089 */ 1090 bit_off = ALIGN(chunk_md->contig_hint_start, align) - 1091 chunk_md->contig_hint_start; 1092 if (bit_off + alloc_bits > chunk_md->contig_hint) 1093 return -1; 1094 1095 bit_off = pcpu_next_hint(chunk_md, alloc_bits); 1096 bits = 0; 1097 pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) { 1098 if (!pop_only || pcpu_is_populated(chunk, bit_off, bits, 1099 &next_off)) 1100 break; 1101 1102 bit_off = next_off; 1103 bits = 0; 1104 } 1105 1106 if (bit_off == pcpu_chunk_map_bits(chunk)) 1107 return -1; 1108 1109 return bit_off; 1110 } 1111 1112 /* 1113 * pcpu_find_zero_area - modified from bitmap_find_next_zero_area_off() 1114 * @map: the address to base the search on 1115 * @size: the bitmap size in bits 1116 * @start: the bitnumber to start searching at 1117 * @nr: the number of zeroed bits we're looking for 1118 * @align_mask: alignment mask for zero area 1119 * @largest_off: offset of the largest area skipped 1120 * @largest_bits: size of the largest area skipped 1121 * 1122 * The @align_mask should be one less than a power of 2. 1123 * 1124 * This is a modified version of bitmap_find_next_zero_area_off() to remember 1125 * the largest area that was skipped. This is imperfect, but in general is 1126 * good enough. The largest remembered region is the largest failed region 1127 * seen. This does not include anything we possibly skipped due to alignment. 1128 * pcpu_block_update_scan() does scan backwards to try and recover what was 1129 * lost to alignment. While this can cause scanning to miss earlier possible 1130 * free areas, smaller allocations will eventually fill those holes. 1131 */ 1132 static unsigned long pcpu_find_zero_area(unsigned long *map, 1133 unsigned long size, 1134 unsigned long start, 1135 unsigned long nr, 1136 unsigned long align_mask, 1137 unsigned long *largest_off, 1138 unsigned long *largest_bits) 1139 { 1140 unsigned long index, end, i, area_off, area_bits; 1141 again: 1142 index = find_next_zero_bit(map, size, start); 1143 1144 /* Align allocation */ 1145 index = __ALIGN_MASK(index, align_mask); 1146 area_off = index; 1147 1148 end = index + nr; 1149 if (end > size) 1150 return end; 1151 i = find_next_bit(map, end, index); 1152 if (i < end) { 1153 area_bits = i - area_off; 1154 /* remember largest unused area with best alignment */ 1155 if (area_bits > *largest_bits || 1156 (area_bits == *largest_bits && *largest_off && 1157 (!area_off || __ffs(area_off) > __ffs(*largest_off)))) { 1158 *largest_off = area_off; 1159 *largest_bits = area_bits; 1160 } 1161 1162 start = i + 1; 1163 goto again; 1164 } 1165 return index; 1166 } 1167 1168 /** 1169 * pcpu_alloc_area - allocates an area from a pcpu_chunk 1170 * @chunk: chunk of interest 1171 * @alloc_bits: size of request in allocation units 1172 * @align: alignment of area (max PAGE_SIZE) 1173 * @start: bit_off to start searching 1174 * 1175 * This function takes in a @start offset to begin searching to fit an 1176 * allocation of @alloc_bits with alignment @align. It needs to scan 1177 * the allocation map because if it fits within the block's contig hint, 1178 * @start will be block->first_free. This is an attempt to fill the 1179 * allocation prior to breaking the contig hint. The allocation and 1180 * boundary maps are updated accordingly if it confirms a valid 1181 * free area. 1182 * 1183 * RETURNS: 1184 * Allocated addr offset in @chunk on success. 1185 * -1 if no matching area is found. 1186 */ 1187 static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits, 1188 size_t align, int start) 1189 { 1190 struct pcpu_block_md *chunk_md = &chunk->chunk_md; 1191 size_t align_mask = (align) ? (align - 1) : 0; 1192 unsigned long area_off = 0, area_bits = 0; 1193 int bit_off, end, oslot; 1194 1195 lockdep_assert_held(&pcpu_lock); 1196 1197 oslot = pcpu_chunk_slot(chunk); 1198 1199 /* 1200 * Search to find a fit. 1201 */ 1202 end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS, 1203 pcpu_chunk_map_bits(chunk)); 1204 bit_off = pcpu_find_zero_area(chunk->alloc_map, end, start, alloc_bits, 1205 align_mask, &area_off, &area_bits); 1206 if (bit_off >= end) 1207 return -1; 1208 1209 if (area_bits) 1210 pcpu_block_update_scan(chunk, area_off, area_bits); 1211 1212 /* update alloc map */ 1213 bitmap_set(chunk->alloc_map, bit_off, alloc_bits); 1214 1215 /* update boundary map */ 1216 set_bit(bit_off, chunk->bound_map); 1217 bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1); 1218 set_bit(bit_off + alloc_bits, chunk->bound_map); 1219 1220 chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE; 1221 1222 /* update first free bit */ 1223 if (bit_off == chunk_md->first_free) 1224 chunk_md->first_free = find_next_zero_bit( 1225 chunk->alloc_map, 1226 pcpu_chunk_map_bits(chunk), 1227 bit_off + alloc_bits); 1228 1229 pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits); 1230 1231 pcpu_chunk_relocate(chunk, oslot); 1232 1233 return bit_off * PCPU_MIN_ALLOC_SIZE; 1234 } 1235 1236 /** 1237 * pcpu_free_area - frees the corresponding offset 1238 * @chunk: chunk of interest 1239 * @off: addr offset into chunk 1240 * 1241 * This function determines the size of an allocation to free using 1242 * the boundary bitmap and clears the allocation map. 1243 */ 1244 static void pcpu_free_area(struct pcpu_chunk *chunk, int off) 1245 { 1246 struct pcpu_block_md *chunk_md = &chunk->chunk_md; 1247 int bit_off, bits, end, oslot; 1248 1249 lockdep_assert_held(&pcpu_lock); 1250 pcpu_stats_area_dealloc(chunk); 1251 1252 oslot = pcpu_chunk_slot(chunk); 1253 1254 bit_off = off / PCPU_MIN_ALLOC_SIZE; 1255 1256 /* find end index */ 1257 end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk), 1258 bit_off + 1); 1259 bits = end - bit_off; 1260 bitmap_clear(chunk->alloc_map, bit_off, bits); 1261 1262 /* update metadata */ 1263 chunk->free_bytes += bits * PCPU_MIN_ALLOC_SIZE; 1264 1265 /* update first free bit */ 1266 chunk_md->first_free = min(chunk_md->first_free, bit_off); 1267 1268 pcpu_block_update_hint_free(chunk, bit_off, bits); 1269 1270 pcpu_chunk_relocate(chunk, oslot); 1271 } 1272 1273 static void pcpu_init_md_block(struct pcpu_block_md *block, int nr_bits) 1274 { 1275 block->scan_hint = 0; 1276 block->contig_hint = nr_bits; 1277 block->left_free = nr_bits; 1278 block->right_free = nr_bits; 1279 block->first_free = 0; 1280 block->nr_bits = nr_bits; 1281 } 1282 1283 static void pcpu_init_md_blocks(struct pcpu_chunk *chunk) 1284 { 1285 struct pcpu_block_md *md_block; 1286 1287 /* init the chunk's block */ 1288 pcpu_init_md_block(&chunk->chunk_md, pcpu_chunk_map_bits(chunk)); 1289 1290 for (md_block = chunk->md_blocks; 1291 md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk); 1292 md_block++) 1293 pcpu_init_md_block(md_block, PCPU_BITMAP_BLOCK_BITS); 1294 } 1295 1296 /** 1297 * pcpu_alloc_first_chunk - creates chunks that serve the first chunk 1298 * @tmp_addr: the start of the region served 1299 * @map_size: size of the region served 1300 * 1301 * This is responsible for creating the chunks that serve the first chunk. The 1302 * base_addr is page aligned down of @tmp_addr while the region end is page 1303 * aligned up. Offsets are kept track of to determine the region served. All 1304 * this is done to appease the bitmap allocator in avoiding partial blocks. 1305 * 1306 * RETURNS: 1307 * Chunk serving the region at @tmp_addr of @map_size. 1308 */ 1309 static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr, 1310 int map_size) 1311 { 1312 struct pcpu_chunk *chunk; 1313 unsigned long aligned_addr, lcm_align; 1314 int start_offset, offset_bits, region_size, region_bits; 1315 size_t alloc_size; 1316 1317 /* region calculations */ 1318 aligned_addr = tmp_addr & PAGE_MASK; 1319 1320 start_offset = tmp_addr - aligned_addr; 1321 1322 /* 1323 * Align the end of the region with the LCM of PAGE_SIZE and 1324 * PCPU_BITMAP_BLOCK_SIZE. One of these constants is a multiple of 1325 * the other. 1326 */ 1327 lcm_align = lcm(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE); 1328 region_size = ALIGN(start_offset + map_size, lcm_align); 1329 1330 /* allocate chunk */ 1331 alloc_size = sizeof(struct pcpu_chunk) + 1332 BITS_TO_LONGS(region_size >> PAGE_SHIFT); 1333 chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 1334 if (!chunk) 1335 panic("%s: Failed to allocate %zu bytes\n", __func__, 1336 alloc_size); 1337 1338 INIT_LIST_HEAD(&chunk->list); 1339 1340 chunk->base_addr = (void *)aligned_addr; 1341 chunk->start_offset = start_offset; 1342 chunk->end_offset = region_size - chunk->start_offset - map_size; 1343 1344 chunk->nr_pages = region_size >> PAGE_SHIFT; 1345 region_bits = pcpu_chunk_map_bits(chunk); 1346 1347 alloc_size = BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]); 1348 chunk->alloc_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 1349 if (!chunk->alloc_map) 1350 panic("%s: Failed to allocate %zu bytes\n", __func__, 1351 alloc_size); 1352 1353 alloc_size = 1354 BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]); 1355 chunk->bound_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 1356 if (!chunk->bound_map) 1357 panic("%s: Failed to allocate %zu bytes\n", __func__, 1358 alloc_size); 1359 1360 alloc_size = pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]); 1361 chunk->md_blocks = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 1362 if (!chunk->md_blocks) 1363 panic("%s: Failed to allocate %zu bytes\n", __func__, 1364 alloc_size); 1365 1366 pcpu_init_md_blocks(chunk); 1367 1368 /* manage populated page bitmap */ 1369 chunk->immutable = true; 1370 bitmap_fill(chunk->populated, chunk->nr_pages); 1371 chunk->nr_populated = chunk->nr_pages; 1372 chunk->nr_empty_pop_pages = chunk->nr_pages; 1373 1374 chunk->free_bytes = map_size; 1375 1376 if (chunk->start_offset) { 1377 /* hide the beginning of the bitmap */ 1378 offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE; 1379 bitmap_set(chunk->alloc_map, 0, offset_bits); 1380 set_bit(0, chunk->bound_map); 1381 set_bit(offset_bits, chunk->bound_map); 1382 1383 chunk->chunk_md.first_free = offset_bits; 1384 1385 pcpu_block_update_hint_alloc(chunk, 0, offset_bits); 1386 } 1387 1388 if (chunk->end_offset) { 1389 /* hide the end of the bitmap */ 1390 offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE; 1391 bitmap_set(chunk->alloc_map, 1392 pcpu_chunk_map_bits(chunk) - offset_bits, 1393 offset_bits); 1394 set_bit((start_offset + map_size) / PCPU_MIN_ALLOC_SIZE, 1395 chunk->bound_map); 1396 set_bit(region_bits, chunk->bound_map); 1397 1398 pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk) 1399 - offset_bits, offset_bits); 1400 } 1401 1402 return chunk; 1403 } 1404 1405 static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp) 1406 { 1407 struct pcpu_chunk *chunk; 1408 int region_bits; 1409 1410 chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp); 1411 if (!chunk) 1412 return NULL; 1413 1414 INIT_LIST_HEAD(&chunk->list); 1415 chunk->nr_pages = pcpu_unit_pages; 1416 region_bits = pcpu_chunk_map_bits(chunk); 1417 1418 chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) * 1419 sizeof(chunk->alloc_map[0]), gfp); 1420 if (!chunk->alloc_map) 1421 goto alloc_map_fail; 1422 1423 chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) * 1424 sizeof(chunk->bound_map[0]), gfp); 1425 if (!chunk->bound_map) 1426 goto bound_map_fail; 1427 1428 chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) * 1429 sizeof(chunk->md_blocks[0]), gfp); 1430 if (!chunk->md_blocks) 1431 goto md_blocks_fail; 1432 1433 pcpu_init_md_blocks(chunk); 1434 1435 /* init metadata */ 1436 chunk->free_bytes = chunk->nr_pages * PAGE_SIZE; 1437 1438 return chunk; 1439 1440 md_blocks_fail: 1441 pcpu_mem_free(chunk->bound_map); 1442 bound_map_fail: 1443 pcpu_mem_free(chunk->alloc_map); 1444 alloc_map_fail: 1445 pcpu_mem_free(chunk); 1446 1447 return NULL; 1448 } 1449 1450 static void pcpu_free_chunk(struct pcpu_chunk *chunk) 1451 { 1452 if (!chunk) 1453 return; 1454 pcpu_mem_free(chunk->md_blocks); 1455 pcpu_mem_free(chunk->bound_map); 1456 pcpu_mem_free(chunk->alloc_map); 1457 pcpu_mem_free(chunk); 1458 } 1459 1460 /** 1461 * pcpu_chunk_populated - post-population bookkeeping 1462 * @chunk: pcpu_chunk which got populated 1463 * @page_start: the start page 1464 * @page_end: the end page 1465 * 1466 * Pages in [@page_start,@page_end) have been populated to @chunk. Update 1467 * the bookkeeping information accordingly. Must be called after each 1468 * successful population. 1469 * 1470 * If this is @for_alloc, do not increment pcpu_nr_empty_pop_pages because it 1471 * is to serve an allocation in that area. 1472 */ 1473 static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start, 1474 int page_end) 1475 { 1476 int nr = page_end - page_start; 1477 1478 lockdep_assert_held(&pcpu_lock); 1479 1480 bitmap_set(chunk->populated, page_start, nr); 1481 chunk->nr_populated += nr; 1482 pcpu_nr_populated += nr; 1483 1484 pcpu_update_empty_pages(chunk, nr); 1485 } 1486 1487 /** 1488 * pcpu_chunk_depopulated - post-depopulation bookkeeping 1489 * @chunk: pcpu_chunk which got depopulated 1490 * @page_start: the start page 1491 * @page_end: the end page 1492 * 1493 * Pages in [@page_start,@page_end) have been depopulated from @chunk. 1494 * Update the bookkeeping information accordingly. Must be called after 1495 * each successful depopulation. 1496 */ 1497 static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk, 1498 int page_start, int page_end) 1499 { 1500 int nr = page_end - page_start; 1501 1502 lockdep_assert_held(&pcpu_lock); 1503 1504 bitmap_clear(chunk->populated, page_start, nr); 1505 chunk->nr_populated -= nr; 1506 pcpu_nr_populated -= nr; 1507 1508 pcpu_update_empty_pages(chunk, -nr); 1509 } 1510 1511 /* 1512 * Chunk management implementation. 1513 * 1514 * To allow different implementations, chunk alloc/free and 1515 * [de]population are implemented in a separate file which is pulled 1516 * into this file and compiled together. The following functions 1517 * should be implemented. 1518 * 1519 * pcpu_populate_chunk - populate the specified range of a chunk 1520 * pcpu_depopulate_chunk - depopulate the specified range of a chunk 1521 * pcpu_create_chunk - create a new chunk 1522 * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop 1523 * pcpu_addr_to_page - translate address to physical address 1524 * pcpu_verify_alloc_info - check alloc_info is acceptable during init 1525 */ 1526 static int pcpu_populate_chunk(struct pcpu_chunk *chunk, 1527 int page_start, int page_end, gfp_t gfp); 1528 static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, 1529 int page_start, int page_end); 1530 static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp); 1531 static void pcpu_destroy_chunk(struct pcpu_chunk *chunk); 1532 static struct page *pcpu_addr_to_page(void *addr); 1533 static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai); 1534 1535 #ifdef CONFIG_NEED_PER_CPU_KM 1536 #include "percpu-km.c" 1537 #else 1538 #include "percpu-vm.c" 1539 #endif 1540 1541 /** 1542 * pcpu_chunk_addr_search - determine chunk containing specified address 1543 * @addr: address for which the chunk needs to be determined. 1544 * 1545 * This is an internal function that handles all but static allocations. 1546 * Static percpu address values should never be passed into the allocator. 1547 * 1548 * RETURNS: 1549 * The address of the found chunk. 1550 */ 1551 static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) 1552 { 1553 /* is it in the dynamic region (first chunk)? */ 1554 if (pcpu_addr_in_chunk(pcpu_first_chunk, addr)) 1555 return pcpu_first_chunk; 1556 1557 /* is it in the reserved region? */ 1558 if (pcpu_addr_in_chunk(pcpu_reserved_chunk, addr)) 1559 return pcpu_reserved_chunk; 1560 1561 /* 1562 * The address is relative to unit0 which might be unused and 1563 * thus unmapped. Offset the address to the unit space of the 1564 * current processor before looking it up in the vmalloc 1565 * space. Note that any possible cpu id can be used here, so 1566 * there's no need to worry about preemption or cpu hotplug. 1567 */ 1568 addr += pcpu_unit_offsets[raw_smp_processor_id()]; 1569 return pcpu_get_page_chunk(pcpu_addr_to_page(addr)); 1570 } 1571 1572 /** 1573 * pcpu_alloc - the percpu allocator 1574 * @size: size of area to allocate in bytes 1575 * @align: alignment of area (max PAGE_SIZE) 1576 * @reserved: allocate from the reserved chunk if available 1577 * @gfp: allocation flags 1578 * 1579 * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't 1580 * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN 1581 * then no warning will be triggered on invalid or failed allocation 1582 * requests. 1583 * 1584 * RETURNS: 1585 * Percpu pointer to the allocated area on success, NULL on failure. 1586 */ 1587 static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, 1588 gfp_t gfp) 1589 { 1590 /* whitelisted flags that can be passed to the backing allocators */ 1591 gfp_t pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); 1592 bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL; 1593 bool do_warn = !(gfp & __GFP_NOWARN); 1594 static int warn_limit = 10; 1595 struct pcpu_chunk *chunk, *next; 1596 const char *err; 1597 int slot, off, cpu, ret; 1598 unsigned long flags; 1599 void __percpu *ptr; 1600 size_t bits, bit_align; 1601 1602 /* 1603 * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE, 1604 * therefore alignment must be a minimum of that many bytes. 1605 * An allocation may have internal fragmentation from rounding up 1606 * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes. 1607 */ 1608 if (unlikely(align < PCPU_MIN_ALLOC_SIZE)) 1609 align = PCPU_MIN_ALLOC_SIZE; 1610 1611 size = ALIGN(size, PCPU_MIN_ALLOC_SIZE); 1612 bits = size >> PCPU_MIN_ALLOC_SHIFT; 1613 bit_align = align >> PCPU_MIN_ALLOC_SHIFT; 1614 1615 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE || 1616 !is_power_of_2(align))) { 1617 WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n", 1618 size, align); 1619 return NULL; 1620 } 1621 1622 if (!is_atomic) { 1623 /* 1624 * pcpu_balance_workfn() allocates memory under this mutex, 1625 * and it may wait for memory reclaim. Allow current task 1626 * to become OOM victim, in case of memory pressure. 1627 */ 1628 if (gfp & __GFP_NOFAIL) 1629 mutex_lock(&pcpu_alloc_mutex); 1630 else if (mutex_lock_killable(&pcpu_alloc_mutex)) 1631 return NULL; 1632 } 1633 1634 spin_lock_irqsave(&pcpu_lock, flags); 1635 1636 /* serve reserved allocations from the reserved chunk if available */ 1637 if (reserved && pcpu_reserved_chunk) { 1638 chunk = pcpu_reserved_chunk; 1639 1640 off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic); 1641 if (off < 0) { 1642 err = "alloc from reserved chunk failed"; 1643 goto fail_unlock; 1644 } 1645 1646 off = pcpu_alloc_area(chunk, bits, bit_align, off); 1647 if (off >= 0) 1648 goto area_found; 1649 1650 err = "alloc from reserved chunk failed"; 1651 goto fail_unlock; 1652 } 1653 1654 restart: 1655 /* search through normal chunks */ 1656 for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) { 1657 list_for_each_entry_safe(chunk, next, &pcpu_slot[slot], list) { 1658 off = pcpu_find_block_fit(chunk, bits, bit_align, 1659 is_atomic); 1660 if (off < 0) { 1661 if (slot < PCPU_SLOT_FAIL_THRESHOLD) 1662 pcpu_chunk_move(chunk, 0); 1663 continue; 1664 } 1665 1666 off = pcpu_alloc_area(chunk, bits, bit_align, off); 1667 if (off >= 0) 1668 goto area_found; 1669 1670 } 1671 } 1672 1673 spin_unlock_irqrestore(&pcpu_lock, flags); 1674 1675 /* 1676 * No space left. Create a new chunk. We don't want multiple 1677 * tasks to create chunks simultaneously. Serialize and create iff 1678 * there's still no empty chunk after grabbing the mutex. 1679 */ 1680 if (is_atomic) { 1681 err = "atomic alloc failed, no space left"; 1682 goto fail; 1683 } 1684 1685 if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) { 1686 chunk = pcpu_create_chunk(pcpu_gfp); 1687 if (!chunk) { 1688 err = "failed to allocate new chunk"; 1689 goto fail; 1690 } 1691 1692 spin_lock_irqsave(&pcpu_lock, flags); 1693 pcpu_chunk_relocate(chunk, -1); 1694 } else { 1695 spin_lock_irqsave(&pcpu_lock, flags); 1696 } 1697 1698 goto restart; 1699 1700 area_found: 1701 pcpu_stats_area_alloc(chunk, size); 1702 spin_unlock_irqrestore(&pcpu_lock, flags); 1703 1704 /* populate if not all pages are already there */ 1705 if (!is_atomic) { 1706 int page_start, page_end, rs, re; 1707 1708 page_start = PFN_DOWN(off); 1709 page_end = PFN_UP(off + size); 1710 1711 pcpu_for_each_unpop_region(chunk->populated, rs, re, 1712 page_start, page_end) { 1713 WARN_ON(chunk->immutable); 1714 1715 ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp); 1716 1717 spin_lock_irqsave(&pcpu_lock, flags); 1718 if (ret) { 1719 pcpu_free_area(chunk, off); 1720 err = "failed to populate"; 1721 goto fail_unlock; 1722 } 1723 pcpu_chunk_populated(chunk, rs, re); 1724 spin_unlock_irqrestore(&pcpu_lock, flags); 1725 } 1726 1727 mutex_unlock(&pcpu_alloc_mutex); 1728 } 1729 1730 if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW) 1731 pcpu_schedule_balance_work(); 1732 1733 /* clear the areas and return address relative to base address */ 1734 for_each_possible_cpu(cpu) 1735 memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); 1736 1737 ptr = __addr_to_pcpu_ptr(chunk->base_addr + off); 1738 kmemleak_alloc_percpu(ptr, size, gfp); 1739 1740 trace_percpu_alloc_percpu(reserved, is_atomic, size, align, 1741 chunk->base_addr, off, ptr); 1742 1743 return ptr; 1744 1745 fail_unlock: 1746 spin_unlock_irqrestore(&pcpu_lock, flags); 1747 fail: 1748 trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align); 1749 1750 if (!is_atomic && do_warn && warn_limit) { 1751 pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n", 1752 size, align, is_atomic, err); 1753 dump_stack(); 1754 if (!--warn_limit) 1755 pr_info("limit reached, disable warning\n"); 1756 } 1757 if (is_atomic) { 1758 /* see the flag handling in pcpu_blance_workfn() */ 1759 pcpu_atomic_alloc_failed = true; 1760 pcpu_schedule_balance_work(); 1761 } else { 1762 mutex_unlock(&pcpu_alloc_mutex); 1763 } 1764 return NULL; 1765 } 1766 1767 /** 1768 * __alloc_percpu_gfp - allocate dynamic percpu area 1769 * @size: size of area to allocate in bytes 1770 * @align: alignment of area (max PAGE_SIZE) 1771 * @gfp: allocation flags 1772 * 1773 * Allocate zero-filled percpu area of @size bytes aligned at @align. If 1774 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can 1775 * be called from any context but is a lot more likely to fail. If @gfp 1776 * has __GFP_NOWARN then no warning will be triggered on invalid or failed 1777 * allocation requests. 1778 * 1779 * RETURNS: 1780 * Percpu pointer to the allocated area on success, NULL on failure. 1781 */ 1782 void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) 1783 { 1784 return pcpu_alloc(size, align, false, gfp); 1785 } 1786 EXPORT_SYMBOL_GPL(__alloc_percpu_gfp); 1787 1788 /** 1789 * __alloc_percpu - allocate dynamic percpu area 1790 * @size: size of area to allocate in bytes 1791 * @align: alignment of area (max PAGE_SIZE) 1792 * 1793 * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL). 1794 */ 1795 void __percpu *__alloc_percpu(size_t size, size_t align) 1796 { 1797 return pcpu_alloc(size, align, false, GFP_KERNEL); 1798 } 1799 EXPORT_SYMBOL_GPL(__alloc_percpu); 1800 1801 /** 1802 * __alloc_reserved_percpu - allocate reserved percpu area 1803 * @size: size of area to allocate in bytes 1804 * @align: alignment of area (max PAGE_SIZE) 1805 * 1806 * Allocate zero-filled percpu area of @size bytes aligned at @align 1807 * from reserved percpu area if arch has set it up; otherwise, 1808 * allocation is served from the same dynamic area. Might sleep. 1809 * Might trigger writeouts. 1810 * 1811 * CONTEXT: 1812 * Does GFP_KERNEL allocation. 1813 * 1814 * RETURNS: 1815 * Percpu pointer to the allocated area on success, NULL on failure. 1816 */ 1817 void __percpu *__alloc_reserved_percpu(size_t size, size_t align) 1818 { 1819 return pcpu_alloc(size, align, true, GFP_KERNEL); 1820 } 1821 1822 /** 1823 * pcpu_balance_workfn - manage the amount of free chunks and populated pages 1824 * @work: unused 1825 * 1826 * Reclaim all fully free chunks except for the first one. This is also 1827 * responsible for maintaining the pool of empty populated pages. However, 1828 * it is possible that this is called when physical memory is scarce causing 1829 * OOM killer to be triggered. We should avoid doing so until an actual 1830 * allocation causes the failure as it is possible that requests can be 1831 * serviced from already backed regions. 1832 */ 1833 static void pcpu_balance_workfn(struct work_struct *work) 1834 { 1835 /* gfp flags passed to underlying allocators */ 1836 const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN; 1837 LIST_HEAD(to_free); 1838 struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1]; 1839 struct pcpu_chunk *chunk, *next; 1840 int slot, nr_to_pop, ret; 1841 1842 /* 1843 * There's no reason to keep around multiple unused chunks and VM 1844 * areas can be scarce. Destroy all free chunks except for one. 1845 */ 1846 mutex_lock(&pcpu_alloc_mutex); 1847 spin_lock_irq(&pcpu_lock); 1848 1849 list_for_each_entry_safe(chunk, next, free_head, list) { 1850 WARN_ON(chunk->immutable); 1851 1852 /* spare the first one */ 1853 if (chunk == list_first_entry(free_head, struct pcpu_chunk, list)) 1854 continue; 1855 1856 list_move(&chunk->list, &to_free); 1857 } 1858 1859 spin_unlock_irq(&pcpu_lock); 1860 1861 list_for_each_entry_safe(chunk, next, &to_free, list) { 1862 int rs, re; 1863 1864 pcpu_for_each_pop_region(chunk->populated, rs, re, 0, 1865 chunk->nr_pages) { 1866 pcpu_depopulate_chunk(chunk, rs, re); 1867 spin_lock_irq(&pcpu_lock); 1868 pcpu_chunk_depopulated(chunk, rs, re); 1869 spin_unlock_irq(&pcpu_lock); 1870 } 1871 pcpu_destroy_chunk(chunk); 1872 cond_resched(); 1873 } 1874 1875 /* 1876 * Ensure there are certain number of free populated pages for 1877 * atomic allocs. Fill up from the most packed so that atomic 1878 * allocs don't increase fragmentation. If atomic allocation 1879 * failed previously, always populate the maximum amount. This 1880 * should prevent atomic allocs larger than PAGE_SIZE from keeping 1881 * failing indefinitely; however, large atomic allocs are not 1882 * something we support properly and can be highly unreliable and 1883 * inefficient. 1884 */ 1885 retry_pop: 1886 if (pcpu_atomic_alloc_failed) { 1887 nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH; 1888 /* best effort anyway, don't worry about synchronization */ 1889 pcpu_atomic_alloc_failed = false; 1890 } else { 1891 nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH - 1892 pcpu_nr_empty_pop_pages, 1893 0, PCPU_EMPTY_POP_PAGES_HIGH); 1894 } 1895 1896 for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) { 1897 int nr_unpop = 0, rs, re; 1898 1899 if (!nr_to_pop) 1900 break; 1901 1902 spin_lock_irq(&pcpu_lock); 1903 list_for_each_entry(chunk, &pcpu_slot[slot], list) { 1904 nr_unpop = chunk->nr_pages - chunk->nr_populated; 1905 if (nr_unpop) 1906 break; 1907 } 1908 spin_unlock_irq(&pcpu_lock); 1909 1910 if (!nr_unpop) 1911 continue; 1912 1913 /* @chunk can't go away while pcpu_alloc_mutex is held */ 1914 pcpu_for_each_unpop_region(chunk->populated, rs, re, 0, 1915 chunk->nr_pages) { 1916 int nr = min(re - rs, nr_to_pop); 1917 1918 ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp); 1919 if (!ret) { 1920 nr_to_pop -= nr; 1921 spin_lock_irq(&pcpu_lock); 1922 pcpu_chunk_populated(chunk, rs, rs + nr); 1923 spin_unlock_irq(&pcpu_lock); 1924 } else { 1925 nr_to_pop = 0; 1926 } 1927 1928 if (!nr_to_pop) 1929 break; 1930 } 1931 } 1932 1933 if (nr_to_pop) { 1934 /* ran out of chunks to populate, create a new one and retry */ 1935 chunk = pcpu_create_chunk(gfp); 1936 if (chunk) { 1937 spin_lock_irq(&pcpu_lock); 1938 pcpu_chunk_relocate(chunk, -1); 1939 spin_unlock_irq(&pcpu_lock); 1940 goto retry_pop; 1941 } 1942 } 1943 1944 mutex_unlock(&pcpu_alloc_mutex); 1945 } 1946 1947 /** 1948 * free_percpu - free percpu area 1949 * @ptr: pointer to area to free 1950 * 1951 * Free percpu area @ptr. 1952 * 1953 * CONTEXT: 1954 * Can be called from atomic context. 1955 */ 1956 void free_percpu(void __percpu *ptr) 1957 { 1958 void *addr; 1959 struct pcpu_chunk *chunk; 1960 unsigned long flags; 1961 int off; 1962 bool need_balance = false; 1963 1964 if (!ptr) 1965 return; 1966 1967 kmemleak_free_percpu(ptr); 1968 1969 addr = __pcpu_ptr_to_addr(ptr); 1970 1971 spin_lock_irqsave(&pcpu_lock, flags); 1972 1973 chunk = pcpu_chunk_addr_search(addr); 1974 off = addr - chunk->base_addr; 1975 1976 pcpu_free_area(chunk, off); 1977 1978 /* if there are more than one fully free chunks, wake up grim reaper */ 1979 if (chunk->free_bytes == pcpu_unit_size) { 1980 struct pcpu_chunk *pos; 1981 1982 list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) 1983 if (pos != chunk) { 1984 need_balance = true; 1985 break; 1986 } 1987 } 1988 1989 trace_percpu_free_percpu(chunk->base_addr, off, ptr); 1990 1991 spin_unlock_irqrestore(&pcpu_lock, flags); 1992 1993 if (need_balance) 1994 pcpu_schedule_balance_work(); 1995 } 1996 EXPORT_SYMBOL_GPL(free_percpu); 1997 1998 bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr) 1999 { 2000 #ifdef CONFIG_SMP 2001 const size_t static_size = __per_cpu_end - __per_cpu_start; 2002 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); 2003 unsigned int cpu; 2004 2005 for_each_possible_cpu(cpu) { 2006 void *start = per_cpu_ptr(base, cpu); 2007 void *va = (void *)addr; 2008 2009 if (va >= start && va < start + static_size) { 2010 if (can_addr) { 2011 *can_addr = (unsigned long) (va - start); 2012 *can_addr += (unsigned long) 2013 per_cpu_ptr(base, get_boot_cpu_id()); 2014 } 2015 return true; 2016 } 2017 } 2018 #endif 2019 /* on UP, can't distinguish from other static vars, always false */ 2020 return false; 2021 } 2022 2023 /** 2024 * is_kernel_percpu_address - test whether address is from static percpu area 2025 * @addr: address to test 2026 * 2027 * Test whether @addr belongs to in-kernel static percpu area. Module 2028 * static percpu areas are not considered. For those, use 2029 * is_module_percpu_address(). 2030 * 2031 * RETURNS: 2032 * %true if @addr is from in-kernel static percpu area, %false otherwise. 2033 */ 2034 bool is_kernel_percpu_address(unsigned long addr) 2035 { 2036 return __is_kernel_percpu_address(addr, NULL); 2037 } 2038 2039 /** 2040 * per_cpu_ptr_to_phys - convert translated percpu address to physical address 2041 * @addr: the address to be converted to physical address 2042 * 2043 * Given @addr which is dereferenceable address obtained via one of 2044 * percpu access macros, this function translates it into its physical 2045 * address. The caller is responsible for ensuring @addr stays valid 2046 * until this function finishes. 2047 * 2048 * percpu allocator has special setup for the first chunk, which currently 2049 * supports either embedding in linear address space or vmalloc mapping, 2050 * and, from the second one, the backing allocator (currently either vm or 2051 * km) provides translation. 2052 * 2053 * The addr can be translated simply without checking if it falls into the 2054 * first chunk. But the current code reflects better how percpu allocator 2055 * actually works, and the verification can discover both bugs in percpu 2056 * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current 2057 * code. 2058 * 2059 * RETURNS: 2060 * The physical address for @addr. 2061 */ 2062 phys_addr_t per_cpu_ptr_to_phys(void *addr) 2063 { 2064 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); 2065 bool in_first_chunk = false; 2066 unsigned long first_low, first_high; 2067 unsigned int cpu; 2068 2069 /* 2070 * The following test on unit_low/high isn't strictly 2071 * necessary but will speed up lookups of addresses which 2072 * aren't in the first chunk. 2073 * 2074 * The address check is against full chunk sizes. pcpu_base_addr 2075 * points to the beginning of the first chunk including the 2076 * static region. Assumes good intent as the first chunk may 2077 * not be full (ie. < pcpu_unit_pages in size). 2078 */ 2079 first_low = (unsigned long)pcpu_base_addr + 2080 pcpu_unit_page_offset(pcpu_low_unit_cpu, 0); 2081 first_high = (unsigned long)pcpu_base_addr + 2082 pcpu_unit_page_offset(pcpu_high_unit_cpu, pcpu_unit_pages); 2083 if ((unsigned long)addr >= first_low && 2084 (unsigned long)addr < first_high) { 2085 for_each_possible_cpu(cpu) { 2086 void *start = per_cpu_ptr(base, cpu); 2087 2088 if (addr >= start && addr < start + pcpu_unit_size) { 2089 in_first_chunk = true; 2090 break; 2091 } 2092 } 2093 } 2094 2095 if (in_first_chunk) { 2096 if (!is_vmalloc_addr(addr)) 2097 return __pa(addr); 2098 else 2099 return page_to_phys(vmalloc_to_page(addr)) + 2100 offset_in_page(addr); 2101 } else 2102 return page_to_phys(pcpu_addr_to_page(addr)) + 2103 offset_in_page(addr); 2104 } 2105 2106 /** 2107 * pcpu_alloc_alloc_info - allocate percpu allocation info 2108 * @nr_groups: the number of groups 2109 * @nr_units: the number of units 2110 * 2111 * Allocate ai which is large enough for @nr_groups groups containing 2112 * @nr_units units. The returned ai's groups[0].cpu_map points to the 2113 * cpu_map array which is long enough for @nr_units and filled with 2114 * NR_CPUS. It's the caller's responsibility to initialize cpu_map 2115 * pointer of other groups. 2116 * 2117 * RETURNS: 2118 * Pointer to the allocated pcpu_alloc_info on success, NULL on 2119 * failure. 2120 */ 2121 struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, 2122 int nr_units) 2123 { 2124 struct pcpu_alloc_info *ai; 2125 size_t base_size, ai_size; 2126 void *ptr; 2127 int unit; 2128 2129 base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]), 2130 __alignof__(ai->groups[0].cpu_map[0])); 2131 ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); 2132 2133 ptr = memblock_alloc(PFN_ALIGN(ai_size), PAGE_SIZE); 2134 if (!ptr) 2135 return NULL; 2136 ai = ptr; 2137 ptr += base_size; 2138 2139 ai->groups[0].cpu_map = ptr; 2140 2141 for (unit = 0; unit < nr_units; unit++) 2142 ai->groups[0].cpu_map[unit] = NR_CPUS; 2143 2144 ai->nr_groups = nr_groups; 2145 ai->__ai_size = PFN_ALIGN(ai_size); 2146 2147 return ai; 2148 } 2149 2150 /** 2151 * pcpu_free_alloc_info - free percpu allocation info 2152 * @ai: pcpu_alloc_info to free 2153 * 2154 * Free @ai which was allocated by pcpu_alloc_alloc_info(). 2155 */ 2156 void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) 2157 { 2158 memblock_free_early(__pa(ai), ai->__ai_size); 2159 } 2160 2161 /** 2162 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info 2163 * @lvl: loglevel 2164 * @ai: allocation info to dump 2165 * 2166 * Print out information about @ai using loglevel @lvl. 2167 */ 2168 static void pcpu_dump_alloc_info(const char *lvl, 2169 const struct pcpu_alloc_info *ai) 2170 { 2171 int group_width = 1, cpu_width = 1, width; 2172 char empty_str[] = "--------"; 2173 int alloc = 0, alloc_end = 0; 2174 int group, v; 2175 int upa, apl; /* units per alloc, allocs per line */ 2176 2177 v = ai->nr_groups; 2178 while (v /= 10) 2179 group_width++; 2180 2181 v = num_possible_cpus(); 2182 while (v /= 10) 2183 cpu_width++; 2184 empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0'; 2185 2186 upa = ai->alloc_size / ai->unit_size; 2187 width = upa * (cpu_width + 1) + group_width + 3; 2188 apl = rounddown_pow_of_two(max(60 / width, 1)); 2189 2190 printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu", 2191 lvl, ai->static_size, ai->reserved_size, ai->dyn_size, 2192 ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size); 2193 2194 for (group = 0; group < ai->nr_groups; group++) { 2195 const struct pcpu_group_info *gi = &ai->groups[group]; 2196 int unit = 0, unit_end = 0; 2197 2198 BUG_ON(gi->nr_units % upa); 2199 for (alloc_end += gi->nr_units / upa; 2200 alloc < alloc_end; alloc++) { 2201 if (!(alloc % apl)) { 2202 pr_cont("\n"); 2203 printk("%spcpu-alloc: ", lvl); 2204 } 2205 pr_cont("[%0*d] ", group_width, group); 2206 2207 for (unit_end += upa; unit < unit_end; unit++) 2208 if (gi->cpu_map[unit] != NR_CPUS) 2209 pr_cont("%0*d ", 2210 cpu_width, gi->cpu_map[unit]); 2211 else 2212 pr_cont("%s ", empty_str); 2213 } 2214 } 2215 pr_cont("\n"); 2216 } 2217 2218 /** 2219 * pcpu_setup_first_chunk - initialize the first percpu chunk 2220 * @ai: pcpu_alloc_info describing how to percpu area is shaped 2221 * @base_addr: mapped address 2222 * 2223 * Initialize the first percpu chunk which contains the kernel static 2224 * perpcu area. This function is to be called from arch percpu area 2225 * setup path. 2226 * 2227 * @ai contains all information necessary to initialize the first 2228 * chunk and prime the dynamic percpu allocator. 2229 * 2230 * @ai->static_size is the size of static percpu area. 2231 * 2232 * @ai->reserved_size, if non-zero, specifies the amount of bytes to 2233 * reserve after the static area in the first chunk. This reserves 2234 * the first chunk such that it's available only through reserved 2235 * percpu allocation. This is primarily used to serve module percpu 2236 * static areas on architectures where the addressing model has 2237 * limited offset range for symbol relocations to guarantee module 2238 * percpu symbols fall inside the relocatable range. 2239 * 2240 * @ai->dyn_size determines the number of bytes available for dynamic 2241 * allocation in the first chunk. The area between @ai->static_size + 2242 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused. 2243 * 2244 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE 2245 * and equal to or larger than @ai->static_size + @ai->reserved_size + 2246 * @ai->dyn_size. 2247 * 2248 * @ai->atom_size is the allocation atom size and used as alignment 2249 * for vm areas. 2250 * 2251 * @ai->alloc_size is the allocation size and always multiple of 2252 * @ai->atom_size. This is larger than @ai->atom_size if 2253 * @ai->unit_size is larger than @ai->atom_size. 2254 * 2255 * @ai->nr_groups and @ai->groups describe virtual memory layout of 2256 * percpu areas. Units which should be colocated are put into the 2257 * same group. Dynamic VM areas will be allocated according to these 2258 * groupings. If @ai->nr_groups is zero, a single group containing 2259 * all units is assumed. 2260 * 2261 * The caller should have mapped the first chunk at @base_addr and 2262 * copied static data to each unit. 2263 * 2264 * The first chunk will always contain a static and a dynamic region. 2265 * However, the static region is not managed by any chunk. If the first 2266 * chunk also contains a reserved region, it is served by two chunks - 2267 * one for the reserved region and one for the dynamic region. They 2268 * share the same vm, but use offset regions in the area allocation map. 2269 * The chunk serving the dynamic region is circulated in the chunk slots 2270 * and available for dynamic allocation like any other chunk. 2271 * 2272 * RETURNS: 2273 * 0 on success, -errno on failure. 2274 */ 2275 int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, 2276 void *base_addr) 2277 { 2278 size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; 2279 size_t static_size, dyn_size; 2280 struct pcpu_chunk *chunk; 2281 unsigned long *group_offsets; 2282 size_t *group_sizes; 2283 unsigned long *unit_off; 2284 unsigned int cpu; 2285 int *unit_map; 2286 int group, unit, i; 2287 int map_size; 2288 unsigned long tmp_addr; 2289 size_t alloc_size; 2290 2291 #define PCPU_SETUP_BUG_ON(cond) do { \ 2292 if (unlikely(cond)) { \ 2293 pr_emerg("failed to initialize, %s\n", #cond); \ 2294 pr_emerg("cpu_possible_mask=%*pb\n", \ 2295 cpumask_pr_args(cpu_possible_mask)); \ 2296 pcpu_dump_alloc_info(KERN_EMERG, ai); \ 2297 BUG(); \ 2298 } \ 2299 } while (0) 2300 2301 /* sanity checks */ 2302 PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); 2303 #ifdef CONFIG_SMP 2304 PCPU_SETUP_BUG_ON(!ai->static_size); 2305 PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start)); 2306 #endif 2307 PCPU_SETUP_BUG_ON(!base_addr); 2308 PCPU_SETUP_BUG_ON(offset_in_page(base_addr)); 2309 PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); 2310 PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size)); 2311 PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); 2312 PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE)); 2313 PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE); 2314 PCPU_SETUP_BUG_ON(!ai->dyn_size); 2315 PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE)); 2316 PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE, PAGE_SIZE) || 2317 IS_ALIGNED(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE))); 2318 PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); 2319 2320 /* process group information and build config tables accordingly */ 2321 alloc_size = ai->nr_groups * sizeof(group_offsets[0]); 2322 group_offsets = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 2323 if (!group_offsets) 2324 panic("%s: Failed to allocate %zu bytes\n", __func__, 2325 alloc_size); 2326 2327 alloc_size = ai->nr_groups * sizeof(group_sizes[0]); 2328 group_sizes = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 2329 if (!group_sizes) 2330 panic("%s: Failed to allocate %zu bytes\n", __func__, 2331 alloc_size); 2332 2333 alloc_size = nr_cpu_ids * sizeof(unit_map[0]); 2334 unit_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 2335 if (!unit_map) 2336 panic("%s: Failed to allocate %zu bytes\n", __func__, 2337 alloc_size); 2338 2339 alloc_size = nr_cpu_ids * sizeof(unit_off[0]); 2340 unit_off = memblock_alloc(alloc_size, SMP_CACHE_BYTES); 2341 if (!unit_off) 2342 panic("%s: Failed to allocate %zu bytes\n", __func__, 2343 alloc_size); 2344 2345 for (cpu = 0; cpu < nr_cpu_ids; cpu++) 2346 unit_map[cpu] = UINT_MAX; 2347 2348 pcpu_low_unit_cpu = NR_CPUS; 2349 pcpu_high_unit_cpu = NR_CPUS; 2350 2351 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { 2352 const struct pcpu_group_info *gi = &ai->groups[group]; 2353 2354 group_offsets[group] = gi->base_offset; 2355 group_sizes[group] = gi->nr_units * ai->unit_size; 2356 2357 for (i = 0; i < gi->nr_units; i++) { 2358 cpu = gi->cpu_map[i]; 2359 if (cpu == NR_CPUS) 2360 continue; 2361 2362 PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids); 2363 PCPU_SETUP_BUG_ON(!cpu_possible(cpu)); 2364 PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX); 2365 2366 unit_map[cpu] = unit + i; 2367 unit_off[cpu] = gi->base_offset + i * ai->unit_size; 2368 2369 /* determine low/high unit_cpu */ 2370 if (pcpu_low_unit_cpu == NR_CPUS || 2371 unit_off[cpu] < unit_off[pcpu_low_unit_cpu]) 2372 pcpu_low_unit_cpu = cpu; 2373 if (pcpu_high_unit_cpu == NR_CPUS || 2374 unit_off[cpu] > unit_off[pcpu_high_unit_cpu]) 2375 pcpu_high_unit_cpu = cpu; 2376 } 2377 } 2378 pcpu_nr_units = unit; 2379 2380 for_each_possible_cpu(cpu) 2381 PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX); 2382 2383 /* we're done parsing the input, undefine BUG macro and dump config */ 2384 #undef PCPU_SETUP_BUG_ON 2385 pcpu_dump_alloc_info(KERN_DEBUG, ai); 2386 2387 pcpu_nr_groups = ai->nr_groups; 2388 pcpu_group_offsets = group_offsets; 2389 pcpu_group_sizes = group_sizes; 2390 pcpu_unit_map = unit_map; 2391 pcpu_unit_offsets = unit_off; 2392 2393 /* determine basic parameters */ 2394 pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT; 2395 pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; 2396 pcpu_atom_size = ai->atom_size; 2397 pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) + 2398 BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long); 2399 2400 pcpu_stats_save_ai(ai); 2401 2402 /* 2403 * Allocate chunk slots. The additional last slot is for 2404 * empty chunks. 2405 */ 2406 pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; 2407 pcpu_slot = memblock_alloc(pcpu_nr_slots * sizeof(pcpu_slot[0]), 2408 SMP_CACHE_BYTES); 2409 if (!pcpu_slot) 2410 panic("%s: Failed to allocate %zu bytes\n", __func__, 2411 pcpu_nr_slots * sizeof(pcpu_slot[0])); 2412 for (i = 0; i < pcpu_nr_slots; i++) 2413 INIT_LIST_HEAD(&pcpu_slot[i]); 2414 2415 /* 2416 * The end of the static region needs to be aligned with the 2417 * minimum allocation size as this offsets the reserved and 2418 * dynamic region. The first chunk ends page aligned by 2419 * expanding the dynamic region, therefore the dynamic region 2420 * can be shrunk to compensate while still staying above the 2421 * configured sizes. 2422 */ 2423 static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE); 2424 dyn_size = ai->dyn_size - (static_size - ai->static_size); 2425 2426 /* 2427 * Initialize first chunk. 2428 * If the reserved_size is non-zero, this initializes the reserved 2429 * chunk. If the reserved_size is zero, the reserved chunk is NULL 2430 * and the dynamic region is initialized here. The first chunk, 2431 * pcpu_first_chunk, will always point to the chunk that serves 2432 * the dynamic region. 2433 */ 2434 tmp_addr = (unsigned long)base_addr + static_size; 2435 map_size = ai->reserved_size ?: dyn_size; 2436 chunk = pcpu_alloc_first_chunk(tmp_addr, map_size); 2437 2438 /* init dynamic chunk if necessary */ 2439 if (ai->reserved_size) { 2440 pcpu_reserved_chunk = chunk; 2441 2442 tmp_addr = (unsigned long)base_addr + static_size + 2443 ai->reserved_size; 2444 map_size = dyn_size; 2445 chunk = pcpu_alloc_first_chunk(tmp_addr, map_size); 2446 } 2447 2448 /* link the first chunk in */ 2449 pcpu_first_chunk = chunk; 2450 pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages; 2451 pcpu_chunk_relocate(pcpu_first_chunk, -1); 2452 2453 /* include all regions of the first chunk */ 2454 pcpu_nr_populated += PFN_DOWN(size_sum); 2455 2456 pcpu_stats_chunk_alloc(); 2457 trace_percpu_create_chunk(base_addr); 2458 2459 /* we're done */ 2460 pcpu_base_addr = base_addr; 2461 return 0; 2462 } 2463 2464 #ifdef CONFIG_SMP 2465 2466 const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = { 2467 [PCPU_FC_AUTO] = "auto", 2468 [PCPU_FC_EMBED] = "embed", 2469 [PCPU_FC_PAGE] = "page", 2470 }; 2471 2472 enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO; 2473 2474 static int __init percpu_alloc_setup(char *str) 2475 { 2476 if (!str) 2477 return -EINVAL; 2478 2479 if (0) 2480 /* nada */; 2481 #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK 2482 else if (!strcmp(str, "embed")) 2483 pcpu_chosen_fc = PCPU_FC_EMBED; 2484 #endif 2485 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 2486 else if (!strcmp(str, "page")) 2487 pcpu_chosen_fc = PCPU_FC_PAGE; 2488 #endif 2489 else 2490 pr_warn("unknown allocator %s specified\n", str); 2491 2492 return 0; 2493 } 2494 early_param("percpu_alloc", percpu_alloc_setup); 2495 2496 /* 2497 * pcpu_embed_first_chunk() is used by the generic percpu setup. 2498 * Build it if needed by the arch config or the generic setup is going 2499 * to be used. 2500 */ 2501 #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \ 2502 !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) 2503 #define BUILD_EMBED_FIRST_CHUNK 2504 #endif 2505 2506 /* build pcpu_page_first_chunk() iff needed by the arch config */ 2507 #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) 2508 #define BUILD_PAGE_FIRST_CHUNK 2509 #endif 2510 2511 /* pcpu_build_alloc_info() is used by both embed and page first chunk */ 2512 #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK) 2513 /** 2514 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs 2515 * @reserved_size: the size of reserved percpu area in bytes 2516 * @dyn_size: minimum free size for dynamic allocation in bytes 2517 * @atom_size: allocation atom size 2518 * @cpu_distance_fn: callback to determine distance between cpus, optional 2519 * 2520 * This function determines grouping of units, their mappings to cpus 2521 * and other parameters considering needed percpu size, allocation 2522 * atom size and distances between CPUs. 2523 * 2524 * Groups are always multiples of atom size and CPUs which are of 2525 * LOCAL_DISTANCE both ways are grouped together and share space for 2526 * units in the same group. The returned configuration is guaranteed 2527 * to have CPUs on different nodes on different groups and >=75% usage 2528 * of allocated virtual address space. 2529 * 2530 * RETURNS: 2531 * On success, pointer to the new allocation_info is returned. On 2532 * failure, ERR_PTR value is returned. 2533 */ 2534 static struct pcpu_alloc_info * __init pcpu_build_alloc_info( 2535 size_t reserved_size, size_t dyn_size, 2536 size_t atom_size, 2537 pcpu_fc_cpu_distance_fn_t cpu_distance_fn) 2538 { 2539 static int group_map[NR_CPUS] __initdata; 2540 static int group_cnt[NR_CPUS] __initdata; 2541 const size_t static_size = __per_cpu_end - __per_cpu_start; 2542 int nr_groups = 1, nr_units = 0; 2543 size_t size_sum, min_unit_size, alloc_size; 2544 int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */ 2545 int last_allocs, group, unit; 2546 unsigned int cpu, tcpu; 2547 struct pcpu_alloc_info *ai; 2548 unsigned int *cpu_map; 2549 2550 /* this function may be called multiple times */ 2551 memset(group_map, 0, sizeof(group_map)); 2552 memset(group_cnt, 0, sizeof(group_cnt)); 2553 2554 /* calculate size_sum and ensure dyn_size is enough for early alloc */ 2555 size_sum = PFN_ALIGN(static_size + reserved_size + 2556 max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE)); 2557 dyn_size = size_sum - static_size - reserved_size; 2558 2559 /* 2560 * Determine min_unit_size, alloc_size and max_upa such that 2561 * alloc_size is multiple of atom_size and is the smallest 2562 * which can accommodate 4k aligned segments which are equal to 2563 * or larger than min_unit_size. 2564 */ 2565 min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); 2566 2567 /* determine the maximum # of units that can fit in an allocation */ 2568 alloc_size = roundup(min_unit_size, atom_size); 2569 upa = alloc_size / min_unit_size; 2570 while (alloc_size % upa || (offset_in_page(alloc_size / upa))) 2571 upa--; 2572 max_upa = upa; 2573 2574 /* group cpus according to their proximity */ 2575 for_each_possible_cpu(cpu) { 2576 group = 0; 2577 next_group: 2578 for_each_possible_cpu(tcpu) { 2579 if (cpu == tcpu) 2580 break; 2581 if (group_map[tcpu] == group && cpu_distance_fn && 2582 (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE || 2583 cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) { 2584 group++; 2585 nr_groups = max(nr_groups, group + 1); 2586 goto next_group; 2587 } 2588 } 2589 group_map[cpu] = group; 2590 group_cnt[group]++; 2591 } 2592 2593 /* 2594 * Wasted space is caused by a ratio imbalance of upa to group_cnt. 2595 * Expand the unit_size until we use >= 75% of the units allocated. 2596 * Related to atom_size, which could be much larger than the unit_size. 2597 */ 2598 last_allocs = INT_MAX; 2599 for (upa = max_upa; upa; upa--) { 2600 int allocs = 0, wasted = 0; 2601 2602 if (alloc_size % upa || (offset_in_page(alloc_size / upa))) 2603 continue; 2604 2605 for (group = 0; group < nr_groups; group++) { 2606 int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); 2607 allocs += this_allocs; 2608 wasted += this_allocs * upa - group_cnt[group]; 2609 } 2610 2611 /* 2612 * Don't accept if wastage is over 1/3. The 2613 * greater-than comparison ensures upa==1 always 2614 * passes the following check. 2615 */ 2616 if (wasted > num_possible_cpus() / 3) 2617 continue; 2618 2619 /* and then don't consume more memory */ 2620 if (allocs > last_allocs) 2621 break; 2622 last_allocs = allocs; 2623 best_upa = upa; 2624 } 2625 upa = best_upa; 2626 2627 /* allocate and fill alloc_info */ 2628 for (group = 0; group < nr_groups; group++) 2629 nr_units += roundup(group_cnt[group], upa); 2630 2631 ai = pcpu_alloc_alloc_info(nr_groups, nr_units); 2632 if (!ai) 2633 return ERR_PTR(-ENOMEM); 2634 cpu_map = ai->groups[0].cpu_map; 2635 2636 for (group = 0; group < nr_groups; group++) { 2637 ai->groups[group].cpu_map = cpu_map; 2638 cpu_map += roundup(group_cnt[group], upa); 2639 } 2640 2641 ai->static_size = static_size; 2642 ai->reserved_size = reserved_size; 2643 ai->dyn_size = dyn_size; 2644 ai->unit_size = alloc_size / upa; 2645 ai->atom_size = atom_size; 2646 ai->alloc_size = alloc_size; 2647 2648 for (group = 0, unit = 0; group < nr_groups; group++) { 2649 struct pcpu_group_info *gi = &ai->groups[group]; 2650 2651 /* 2652 * Initialize base_offset as if all groups are located 2653 * back-to-back. The caller should update this to 2654 * reflect actual allocation. 2655 */ 2656 gi->base_offset = unit * ai->unit_size; 2657 2658 for_each_possible_cpu(cpu) 2659 if (group_map[cpu] == group) 2660 gi->cpu_map[gi->nr_units++] = cpu; 2661 gi->nr_units = roundup(gi->nr_units, upa); 2662 unit += gi->nr_units; 2663 } 2664 BUG_ON(unit != nr_units); 2665 2666 return ai; 2667 } 2668 #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */ 2669 2670 #if defined(BUILD_EMBED_FIRST_CHUNK) 2671 /** 2672 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem 2673 * @reserved_size: the size of reserved percpu area in bytes 2674 * @dyn_size: minimum free size for dynamic allocation in bytes 2675 * @atom_size: allocation atom size 2676 * @cpu_distance_fn: callback to determine distance between cpus, optional 2677 * @alloc_fn: function to allocate percpu page 2678 * @free_fn: function to free percpu page 2679 * 2680 * This is a helper to ease setting up embedded first percpu chunk and 2681 * can be called where pcpu_setup_first_chunk() is expected. 2682 * 2683 * If this function is used to setup the first chunk, it is allocated 2684 * by calling @alloc_fn and used as-is without being mapped into 2685 * vmalloc area. Allocations are always whole multiples of @atom_size 2686 * aligned to @atom_size. 2687 * 2688 * This enables the first chunk to piggy back on the linear physical 2689 * mapping which often uses larger page size. Please note that this 2690 * can result in very sparse cpu->unit mapping on NUMA machines thus 2691 * requiring large vmalloc address space. Don't use this allocator if 2692 * vmalloc space is not orders of magnitude larger than distances 2693 * between node memory addresses (ie. 32bit NUMA machines). 2694 * 2695 * @dyn_size specifies the minimum dynamic area size. 2696 * 2697 * If the needed size is smaller than the minimum or specified unit 2698 * size, the leftover is returned using @free_fn. 2699 * 2700 * RETURNS: 2701 * 0 on success, -errno on failure. 2702 */ 2703 int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, 2704 size_t atom_size, 2705 pcpu_fc_cpu_distance_fn_t cpu_distance_fn, 2706 pcpu_fc_alloc_fn_t alloc_fn, 2707 pcpu_fc_free_fn_t free_fn) 2708 { 2709 void *base = (void *)ULONG_MAX; 2710 void **areas = NULL; 2711 struct pcpu_alloc_info *ai; 2712 size_t size_sum, areas_size; 2713 unsigned long max_distance; 2714 int group, i, highest_group, rc; 2715 2716 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size, 2717 cpu_distance_fn); 2718 if (IS_ERR(ai)) 2719 return PTR_ERR(ai); 2720 2721 size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; 2722 areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); 2723 2724 areas = memblock_alloc(areas_size, SMP_CACHE_BYTES); 2725 if (!areas) { 2726 rc = -ENOMEM; 2727 goto out_free; 2728 } 2729 2730 /* allocate, copy and determine base address & max_distance */ 2731 highest_group = 0; 2732 for (group = 0; group < ai->nr_groups; group++) { 2733 struct pcpu_group_info *gi = &ai->groups[group]; 2734 unsigned int cpu = NR_CPUS; 2735 void *ptr; 2736 2737 for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++) 2738 cpu = gi->cpu_map[i]; 2739 BUG_ON(cpu == NR_CPUS); 2740 2741 /* allocate space for the whole group */ 2742 ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size); 2743 if (!ptr) { 2744 rc = -ENOMEM; 2745 goto out_free_areas; 2746 } 2747 /* kmemleak tracks the percpu allocations separately */ 2748 kmemleak_free(ptr); 2749 areas[group] = ptr; 2750 2751 base = min(ptr, base); 2752 if (ptr > areas[highest_group]) 2753 highest_group = group; 2754 } 2755 max_distance = areas[highest_group] - base; 2756 max_distance += ai->unit_size * ai->groups[highest_group].nr_units; 2757 2758 /* warn if maximum distance is further than 75% of vmalloc space */ 2759 if (max_distance > VMALLOC_TOTAL * 3 / 4) { 2760 pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n", 2761 max_distance, VMALLOC_TOTAL); 2762 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 2763 /* and fail if we have fallback */ 2764 rc = -EINVAL; 2765 goto out_free_areas; 2766 #endif 2767 } 2768 2769 /* 2770 * Copy data and free unused parts. This should happen after all 2771 * allocations are complete; otherwise, we may end up with 2772 * overlapping groups. 2773 */ 2774 for (group = 0; group < ai->nr_groups; group++) { 2775 struct pcpu_group_info *gi = &ai->groups[group]; 2776 void *ptr = areas[group]; 2777 2778 for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { 2779 if (gi->cpu_map[i] == NR_CPUS) { 2780 /* unused unit, free whole */ 2781 free_fn(ptr, ai->unit_size); 2782 continue; 2783 } 2784 /* copy and return the unused part */ 2785 memcpy(ptr, __per_cpu_load, ai->static_size); 2786 free_fn(ptr + size_sum, ai->unit_size - size_sum); 2787 } 2788 } 2789 2790 /* base address is now known, determine group base offsets */ 2791 for (group = 0; group < ai->nr_groups; group++) { 2792 ai->groups[group].base_offset = areas[group] - base; 2793 } 2794 2795 pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n", 2796 PFN_DOWN(size_sum), ai->static_size, ai->reserved_size, 2797 ai->dyn_size, ai->unit_size); 2798 2799 rc = pcpu_setup_first_chunk(ai, base); 2800 goto out_free; 2801 2802 out_free_areas: 2803 for (group = 0; group < ai->nr_groups; group++) 2804 if (areas[group]) 2805 free_fn(areas[group], 2806 ai->groups[group].nr_units * ai->unit_size); 2807 out_free: 2808 pcpu_free_alloc_info(ai); 2809 if (areas) 2810 memblock_free_early(__pa(areas), areas_size); 2811 return rc; 2812 } 2813 #endif /* BUILD_EMBED_FIRST_CHUNK */ 2814 2815 #ifdef BUILD_PAGE_FIRST_CHUNK 2816 /** 2817 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages 2818 * @reserved_size: the size of reserved percpu area in bytes 2819 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE 2820 * @free_fn: function to free percpu page, always called with PAGE_SIZE 2821 * @populate_pte_fn: function to populate pte 2822 * 2823 * This is a helper to ease setting up page-remapped first percpu 2824 * chunk and can be called where pcpu_setup_first_chunk() is expected. 2825 * 2826 * This is the basic allocator. Static percpu area is allocated 2827 * page-by-page into vmalloc area. 2828 * 2829 * RETURNS: 2830 * 0 on success, -errno on failure. 2831 */ 2832 int __init pcpu_page_first_chunk(size_t reserved_size, 2833 pcpu_fc_alloc_fn_t alloc_fn, 2834 pcpu_fc_free_fn_t free_fn, 2835 pcpu_fc_populate_pte_fn_t populate_pte_fn) 2836 { 2837 static struct vm_struct vm; 2838 struct pcpu_alloc_info *ai; 2839 char psize_str[16]; 2840 int unit_pages; 2841 size_t pages_size; 2842 struct page **pages; 2843 int unit, i, j, rc; 2844 int upa; 2845 int nr_g0_units; 2846 2847 snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); 2848 2849 ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL); 2850 if (IS_ERR(ai)) 2851 return PTR_ERR(ai); 2852 BUG_ON(ai->nr_groups != 1); 2853 upa = ai->alloc_size/ai->unit_size; 2854 nr_g0_units = roundup(num_possible_cpus(), upa); 2855 if (WARN_ON(ai->groups[0].nr_units != nr_g0_units)) { 2856 pcpu_free_alloc_info(ai); 2857 return -EINVAL; 2858 } 2859 2860 unit_pages = ai->unit_size >> PAGE_SHIFT; 2861 2862 /* unaligned allocations can't be freed, round up to page size */ 2863 pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * 2864 sizeof(pages[0])); 2865 pages = memblock_alloc(pages_size, SMP_CACHE_BYTES); 2866 if (!pages) 2867 panic("%s: Failed to allocate %zu bytes\n", __func__, 2868 pages_size); 2869 2870 /* allocate pages */ 2871 j = 0; 2872 for (unit = 0; unit < num_possible_cpus(); unit++) { 2873 unsigned int cpu = ai->groups[0].cpu_map[unit]; 2874 for (i = 0; i < unit_pages; i++) { 2875 void *ptr; 2876 2877 ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE); 2878 if (!ptr) { 2879 pr_warn("failed to allocate %s page for cpu%u\n", 2880 psize_str, cpu); 2881 goto enomem; 2882 } 2883 /* kmemleak tracks the percpu allocations separately */ 2884 kmemleak_free(ptr); 2885 pages[j++] = virt_to_page(ptr); 2886 } 2887 } 2888 2889 /* allocate vm area, map the pages and copy static data */ 2890 vm.flags = VM_ALLOC; 2891 vm.size = num_possible_cpus() * ai->unit_size; 2892 vm_area_register_early(&vm, PAGE_SIZE); 2893 2894 for (unit = 0; unit < num_possible_cpus(); unit++) { 2895 unsigned long unit_addr = 2896 (unsigned long)vm.addr + unit * ai->unit_size; 2897 2898 for (i = 0; i < unit_pages; i++) 2899 populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); 2900 2901 /* pte already populated, the following shouldn't fail */ 2902 rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages], 2903 unit_pages); 2904 if (rc < 0) 2905 panic("failed to map percpu area, err=%d\n", rc); 2906 2907 /* 2908 * FIXME: Archs with virtual cache should flush local 2909 * cache for the linear mapping here - something 2910 * equivalent to flush_cache_vmap() on the local cpu. 2911 * flush_cache_vmap() can't be used as most supporting 2912 * data structures are not set up yet. 2913 */ 2914 2915 /* copy static data */ 2916 memcpy((void *)unit_addr, __per_cpu_load, ai->static_size); 2917 } 2918 2919 /* we're ready, commit */ 2920 pr_info("%d %s pages/cpu s%zu r%zu d%zu\n", 2921 unit_pages, psize_str, ai->static_size, 2922 ai->reserved_size, ai->dyn_size); 2923 2924 rc = pcpu_setup_first_chunk(ai, vm.addr); 2925 goto out_free_ar; 2926 2927 enomem: 2928 while (--j >= 0) 2929 free_fn(page_address(pages[j]), PAGE_SIZE); 2930 rc = -ENOMEM; 2931 out_free_ar: 2932 memblock_free_early(__pa(pages), pages_size); 2933 pcpu_free_alloc_info(ai); 2934 return rc; 2935 } 2936 #endif /* BUILD_PAGE_FIRST_CHUNK */ 2937 2938 #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA 2939 /* 2940 * Generic SMP percpu area setup. 2941 * 2942 * The embedding helper is used because its behavior closely resembles 2943 * the original non-dynamic generic percpu area setup. This is 2944 * important because many archs have addressing restrictions and might 2945 * fail if the percpu area is located far away from the previous 2946 * location. As an added bonus, in non-NUMA cases, embedding is 2947 * generally a good idea TLB-wise because percpu area can piggy back 2948 * on the physical linear memory mapping which uses large page 2949 * mappings on applicable archs. 2950 */ 2951 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; 2952 EXPORT_SYMBOL(__per_cpu_offset); 2953 2954 static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size, 2955 size_t align) 2956 { 2957 return memblock_alloc_from(size, align, __pa(MAX_DMA_ADDRESS)); 2958 } 2959 2960 static void __init pcpu_dfl_fc_free(void *ptr, size_t size) 2961 { 2962 memblock_free_early(__pa(ptr), size); 2963 } 2964 2965 void __init setup_per_cpu_areas(void) 2966 { 2967 unsigned long delta; 2968 unsigned int cpu; 2969 int rc; 2970 2971 /* 2972 * Always reserve area for module percpu variables. That's 2973 * what the legacy allocator did. 2974 */ 2975 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, 2976 PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL, 2977 pcpu_dfl_fc_alloc, pcpu_dfl_fc_free); 2978 if (rc < 0) 2979 panic("Failed to initialize percpu areas."); 2980 2981 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 2982 for_each_possible_cpu(cpu) 2983 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; 2984 } 2985 #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ 2986 2987 #else /* CONFIG_SMP */ 2988 2989 /* 2990 * UP percpu area setup. 2991 * 2992 * UP always uses km-based percpu allocator with identity mapping. 2993 * Static percpu variables are indistinguishable from the usual static 2994 * variables and don't require any special preparation. 2995 */ 2996 void __init setup_per_cpu_areas(void) 2997 { 2998 const size_t unit_size = 2999 roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE, 3000 PERCPU_DYNAMIC_RESERVE)); 3001 struct pcpu_alloc_info *ai; 3002 void *fc; 3003 3004 ai = pcpu_alloc_alloc_info(1, 1); 3005 fc = memblock_alloc_from(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); 3006 if (!ai || !fc) 3007 panic("Failed to allocate memory for percpu areas."); 3008 /* kmemleak tracks the percpu allocations separately */ 3009 kmemleak_free(fc); 3010 3011 ai->dyn_size = unit_size; 3012 ai->unit_size = unit_size; 3013 ai->atom_size = unit_size; 3014 ai->alloc_size = unit_size; 3015 ai->groups[0].nr_units = 1; 3016 ai->groups[0].cpu_map[0] = 0; 3017 3018 if (pcpu_setup_first_chunk(ai, fc) < 0) 3019 panic("Failed to initialize percpu areas."); 3020 pcpu_free_alloc_info(ai); 3021 } 3022 3023 #endif /* CONFIG_SMP */ 3024 3025 /* 3026 * pcpu_nr_pages - calculate total number of populated backing pages 3027 * 3028 * This reflects the number of pages populated to back chunks. Metadata is 3029 * excluded in the number exposed in meminfo as the number of backing pages 3030 * scales with the number of cpus and can quickly outweigh the memory used for 3031 * metadata. It also keeps this calculation nice and simple. 3032 * 3033 * RETURNS: 3034 * Total number of populated backing pages in use by the allocator. 3035 */ 3036 unsigned long pcpu_nr_pages(void) 3037 { 3038 return pcpu_nr_populated * pcpu_nr_units; 3039 } 3040 3041 /* 3042 * Percpu allocator is initialized early during boot when neither slab or 3043 * workqueue is available. Plug async management until everything is up 3044 * and running. 3045 */ 3046 static int __init percpu_enable_async(void) 3047 { 3048 pcpu_async_enabled = true; 3049 return 0; 3050 } 3051 subsys_initcall(percpu_enable_async); 3052