1 /* 2 * mm/percpu.c - percpu memory allocator 3 * 4 * Copyright (C) 2009 SUSE Linux Products GmbH 5 * Copyright (C) 2009 Tejun Heo <tj@kernel.org> 6 * 7 * This file is released under the GPLv2. 8 * 9 * This is percpu allocator which can handle both static and dynamic 10 * areas. Percpu areas are allocated in chunks. Each chunk is 11 * consisted of boot-time determined number of units and the first 12 * chunk is used for static percpu variables in the kernel image 13 * (special boot time alloc/init handling necessary as these areas 14 * need to be brought up before allocation services are running). 15 * Unit grows as necessary and all units grow or shrink in unison. 16 * When a chunk is filled up, another chunk is allocated. 17 * 18 * c0 c1 c2 19 * ------------------- ------------------- ------------ 20 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u 21 * ------------------- ...... ------------------- .... ------------ 22 * 23 * Allocation is done in offset-size areas of single unit space. Ie, 24 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0, 25 * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to 26 * cpus. On NUMA, the mapping can be non-linear and even sparse. 27 * Percpu access can be done by configuring percpu base registers 28 * according to cpu to unit mapping and pcpu_unit_size. 29 * 30 * There are usually many small percpu allocations many of them being 31 * as small as 4 bytes. The allocator organizes chunks into lists 32 * according to free size and tries to allocate from the fullest one. 33 * Each chunk keeps the maximum contiguous area size hint which is 34 * guaranteed to be equal to or larger than the maximum contiguous 35 * area in the chunk. This helps the allocator not to iterate the 36 * chunk maps unnecessarily. 37 * 38 * Allocation state in each chunk is kept using an array of integers 39 * on chunk->map. A positive value in the map represents a free 40 * region and negative allocated. Allocation inside a chunk is done 41 * by scanning this map sequentially and serving the first matching 42 * entry. This is mostly copied from the percpu_modalloc() allocator. 43 * Chunks can be determined from the address using the index field 44 * in the page struct. The index field contains a pointer to the chunk. 45 * 46 * To use this allocator, arch code should do the followings. 47 * 48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate 49 * regular address to percpu pointer and back if they need to be 50 * different from the default 51 * 52 * - use pcpu_setup_first_chunk() during percpu area initialization to 53 * setup the first chunk containing the kernel static percpu area 54 */ 55 56 #include <linux/bitmap.h> 57 #include <linux/bootmem.h> 58 #include <linux/err.h> 59 #include <linux/list.h> 60 #include <linux/log2.h> 61 #include <linux/mm.h> 62 #include <linux/module.h> 63 #include <linux/mutex.h> 64 #include <linux/percpu.h> 65 #include <linux/pfn.h> 66 #include <linux/slab.h> 67 #include <linux/spinlock.h> 68 #include <linux/vmalloc.h> 69 #include <linux/workqueue.h> 70 #include <linux/kmemleak.h> 71 72 #include <asm/cacheflush.h> 73 #include <asm/sections.h> 74 #include <asm/tlbflush.h> 75 #include <asm/io.h> 76 77 #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ 78 #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ 79 80 #ifdef CONFIG_SMP 81 /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ 82 #ifndef __addr_to_pcpu_ptr 83 #define __addr_to_pcpu_ptr(addr) \ 84 (void __percpu *)((unsigned long)(addr) - \ 85 (unsigned long)pcpu_base_addr + \ 86 (unsigned long)__per_cpu_start) 87 #endif 88 #ifndef __pcpu_ptr_to_addr 89 #define __pcpu_ptr_to_addr(ptr) \ 90 (void __force *)((unsigned long)(ptr) + \ 91 (unsigned long)pcpu_base_addr - \ 92 (unsigned long)__per_cpu_start) 93 #endif 94 #else /* CONFIG_SMP */ 95 /* on UP, it's always identity mapped */ 96 #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr) 97 #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr) 98 #endif /* CONFIG_SMP */ 99 100 struct pcpu_chunk { 101 struct list_head list; /* linked to pcpu_slot lists */ 102 int free_size; /* free bytes in the chunk */ 103 int contig_hint; /* max contiguous size hint */ 104 void *base_addr; /* base address of this chunk */ 105 int map_used; /* # of map entries used before the sentry */ 106 int map_alloc; /* # of map entries allocated */ 107 int *map; /* allocation map */ 108 void *data; /* chunk data */ 109 int first_free; /* no free below this */ 110 bool immutable; /* no [de]population allowed */ 111 unsigned long populated[]; /* populated bitmap */ 112 }; 113 114 static int pcpu_unit_pages __read_mostly; 115 static int pcpu_unit_size __read_mostly; 116 static int pcpu_nr_units __read_mostly; 117 static int pcpu_atom_size __read_mostly; 118 static int pcpu_nr_slots __read_mostly; 119 static size_t pcpu_chunk_struct_size __read_mostly; 120 121 /* cpus with the lowest and highest unit addresses */ 122 static unsigned int pcpu_low_unit_cpu __read_mostly; 123 static unsigned int pcpu_high_unit_cpu __read_mostly; 124 125 /* the address of the first chunk which starts with the kernel static area */ 126 void *pcpu_base_addr __read_mostly; 127 EXPORT_SYMBOL_GPL(pcpu_base_addr); 128 129 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */ 130 const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */ 131 132 /* group information, used for vm allocation */ 133 static int pcpu_nr_groups __read_mostly; 134 static const unsigned long *pcpu_group_offsets __read_mostly; 135 static const size_t *pcpu_group_sizes __read_mostly; 136 137 /* 138 * The first chunk which always exists. Note that unlike other 139 * chunks, this one can be allocated and mapped in several different 140 * ways and thus often doesn't live in the vmalloc area. 141 */ 142 static struct pcpu_chunk *pcpu_first_chunk; 143 144 /* 145 * Optional reserved chunk. This chunk reserves part of the first 146 * chunk and serves it for reserved allocations. The amount of 147 * reserved offset is in pcpu_reserved_chunk_limit. When reserved 148 * area doesn't exist, the following variables contain NULL and 0 149 * respectively. 150 */ 151 static struct pcpu_chunk *pcpu_reserved_chunk; 152 static int pcpu_reserved_chunk_limit; 153 154 /* 155 * Synchronization rules. 156 * 157 * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former 158 * protects allocation/reclaim paths, chunks, populated bitmap and 159 * vmalloc mapping. The latter is a spinlock and protects the index 160 * data structures - chunk slots, chunks and area maps in chunks. 161 * 162 * During allocation, pcpu_alloc_mutex is kept locked all the time and 163 * pcpu_lock is grabbed and released as necessary. All actual memory 164 * allocations are done using GFP_KERNEL with pcpu_lock released. In 165 * general, percpu memory can't be allocated with irq off but 166 * irqsave/restore are still used in alloc path so that it can be used 167 * from early init path - sched_init() specifically. 168 * 169 * Free path accesses and alters only the index data structures, so it 170 * can be safely called from atomic context. When memory needs to be 171 * returned to the system, free path schedules reclaim_work which 172 * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be 173 * reclaimed, release both locks and frees the chunks. Note that it's 174 * necessary to grab both locks to remove a chunk from circulation as 175 * allocation path might be referencing the chunk with only 176 * pcpu_alloc_mutex locked. 177 */ 178 static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */ 179 static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */ 180 181 static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ 182 183 /* reclaim work to release fully free chunks, scheduled from free path */ 184 static void pcpu_reclaim(struct work_struct *work); 185 static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim); 186 187 static bool pcpu_addr_in_first_chunk(void *addr) 188 { 189 void *first_start = pcpu_first_chunk->base_addr; 190 191 return addr >= first_start && addr < first_start + pcpu_unit_size; 192 } 193 194 static bool pcpu_addr_in_reserved_chunk(void *addr) 195 { 196 void *first_start = pcpu_first_chunk->base_addr; 197 198 return addr >= first_start && 199 addr < first_start + pcpu_reserved_chunk_limit; 200 } 201 202 static int __pcpu_size_to_slot(int size) 203 { 204 int highbit = fls(size); /* size is in bytes */ 205 return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); 206 } 207 208 static int pcpu_size_to_slot(int size) 209 { 210 if (size == pcpu_unit_size) 211 return pcpu_nr_slots - 1; 212 return __pcpu_size_to_slot(size); 213 } 214 215 static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) 216 { 217 if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int)) 218 return 0; 219 220 return pcpu_size_to_slot(chunk->free_size); 221 } 222 223 /* set the pointer to a chunk in a page struct */ 224 static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu) 225 { 226 page->index = (unsigned long)pcpu; 227 } 228 229 /* obtain pointer to a chunk from a page struct */ 230 static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page) 231 { 232 return (struct pcpu_chunk *)page->index; 233 } 234 235 static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx) 236 { 237 return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; 238 } 239 240 static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, 241 unsigned int cpu, int page_idx) 242 { 243 return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] + 244 (page_idx << PAGE_SHIFT); 245 } 246 247 static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk, 248 int *rs, int *re, int end) 249 { 250 *rs = find_next_zero_bit(chunk->populated, end, *rs); 251 *re = find_next_bit(chunk->populated, end, *rs + 1); 252 } 253 254 static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk, 255 int *rs, int *re, int end) 256 { 257 *rs = find_next_bit(chunk->populated, end, *rs); 258 *re = find_next_zero_bit(chunk->populated, end, *rs + 1); 259 } 260 261 /* 262 * (Un)populated page region iterators. Iterate over (un)populated 263 * page regions between @start and @end in @chunk. @rs and @re should 264 * be integer variables and will be set to start and end page index of 265 * the current region. 266 */ 267 #define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \ 268 for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \ 269 (rs) < (re); \ 270 (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end))) 271 272 #define pcpu_for_each_pop_region(chunk, rs, re, start, end) \ 273 for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \ 274 (rs) < (re); \ 275 (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end))) 276 277 /** 278 * pcpu_mem_zalloc - allocate memory 279 * @size: bytes to allocate 280 * 281 * Allocate @size bytes. If @size is smaller than PAGE_SIZE, 282 * kzalloc() is used; otherwise, vzalloc() is used. The returned 283 * memory is always zeroed. 284 * 285 * CONTEXT: 286 * Does GFP_KERNEL allocation. 287 * 288 * RETURNS: 289 * Pointer to the allocated area on success, NULL on failure. 290 */ 291 static void *pcpu_mem_zalloc(size_t size) 292 { 293 if (WARN_ON_ONCE(!slab_is_available())) 294 return NULL; 295 296 if (size <= PAGE_SIZE) 297 return kzalloc(size, GFP_KERNEL); 298 else 299 return vzalloc(size); 300 } 301 302 /** 303 * pcpu_mem_free - free memory 304 * @ptr: memory to free 305 * @size: size of the area 306 * 307 * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc(). 308 */ 309 static void pcpu_mem_free(void *ptr, size_t size) 310 { 311 if (size <= PAGE_SIZE) 312 kfree(ptr); 313 else 314 vfree(ptr); 315 } 316 317 /** 318 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot 319 * @chunk: chunk of interest 320 * @oslot: the previous slot it was on 321 * 322 * This function is called after an allocation or free changed @chunk. 323 * New slot according to the changed state is determined and @chunk is 324 * moved to the slot. Note that the reserved chunk is never put on 325 * chunk slots. 326 * 327 * CONTEXT: 328 * pcpu_lock. 329 */ 330 static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) 331 { 332 int nslot = pcpu_chunk_slot(chunk); 333 334 if (chunk != pcpu_reserved_chunk && oslot != nslot) { 335 if (oslot < nslot) 336 list_move(&chunk->list, &pcpu_slot[nslot]); 337 else 338 list_move_tail(&chunk->list, &pcpu_slot[nslot]); 339 } 340 } 341 342 /** 343 * pcpu_need_to_extend - determine whether chunk area map needs to be extended 344 * @chunk: chunk of interest 345 * 346 * Determine whether area map of @chunk needs to be extended to 347 * accommodate a new allocation. 348 * 349 * CONTEXT: 350 * pcpu_lock. 351 * 352 * RETURNS: 353 * New target map allocation length if extension is necessary, 0 354 * otherwise. 355 */ 356 static int pcpu_need_to_extend(struct pcpu_chunk *chunk) 357 { 358 int new_alloc; 359 360 if (chunk->map_alloc >= chunk->map_used + 3) 361 return 0; 362 363 new_alloc = PCPU_DFL_MAP_ALLOC; 364 while (new_alloc < chunk->map_used + 3) 365 new_alloc *= 2; 366 367 return new_alloc; 368 } 369 370 /** 371 * pcpu_extend_area_map - extend area map of a chunk 372 * @chunk: chunk of interest 373 * @new_alloc: new target allocation length of the area map 374 * 375 * Extend area map of @chunk to have @new_alloc entries. 376 * 377 * CONTEXT: 378 * Does GFP_KERNEL allocation. Grabs and releases pcpu_lock. 379 * 380 * RETURNS: 381 * 0 on success, -errno on failure. 382 */ 383 static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc) 384 { 385 int *old = NULL, *new = NULL; 386 size_t old_size = 0, new_size = new_alloc * sizeof(new[0]); 387 unsigned long flags; 388 389 new = pcpu_mem_zalloc(new_size); 390 if (!new) 391 return -ENOMEM; 392 393 /* acquire pcpu_lock and switch to new area map */ 394 spin_lock_irqsave(&pcpu_lock, flags); 395 396 if (new_alloc <= chunk->map_alloc) 397 goto out_unlock; 398 399 old_size = chunk->map_alloc * sizeof(chunk->map[0]); 400 old = chunk->map; 401 402 memcpy(new, old, old_size); 403 404 chunk->map_alloc = new_alloc; 405 chunk->map = new; 406 new = NULL; 407 408 out_unlock: 409 spin_unlock_irqrestore(&pcpu_lock, flags); 410 411 /* 412 * pcpu_mem_free() might end up calling vfree() which uses 413 * IRQ-unsafe lock and thus can't be called under pcpu_lock. 414 */ 415 pcpu_mem_free(old, old_size); 416 pcpu_mem_free(new, new_size); 417 418 return 0; 419 } 420 421 /** 422 * pcpu_alloc_area - allocate area from a pcpu_chunk 423 * @chunk: chunk of interest 424 * @size: wanted size in bytes 425 * @align: wanted align 426 * 427 * Try to allocate @size bytes area aligned at @align from @chunk. 428 * Note that this function only allocates the offset. It doesn't 429 * populate or map the area. 430 * 431 * @chunk->map must have at least two free slots. 432 * 433 * CONTEXT: 434 * pcpu_lock. 435 * 436 * RETURNS: 437 * Allocated offset in @chunk on success, -1 if no matching area is 438 * found. 439 */ 440 static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align) 441 { 442 int oslot = pcpu_chunk_slot(chunk); 443 int max_contig = 0; 444 int i, off; 445 bool seen_free = false; 446 int *p; 447 448 for (i = chunk->first_free, p = chunk->map + i; i < chunk->map_used; i++, p++) { 449 int head, tail; 450 int this_size; 451 452 off = *p; 453 if (off & 1) 454 continue; 455 456 /* extra for alignment requirement */ 457 head = ALIGN(off, align) - off; 458 459 this_size = (p[1] & ~1) - off; 460 if (this_size < head + size) { 461 if (!seen_free) { 462 chunk->first_free = i; 463 seen_free = true; 464 } 465 max_contig = max(this_size, max_contig); 466 continue; 467 } 468 469 /* 470 * If head is small or the previous block is free, 471 * merge'em. Note that 'small' is defined as smaller 472 * than sizeof(int), which is very small but isn't too 473 * uncommon for percpu allocations. 474 */ 475 if (head && (head < sizeof(int) || !(p[-1] & 1))) { 476 *p = off += head; 477 if (p[-1] & 1) 478 chunk->free_size -= head; 479 else 480 max_contig = max(*p - p[-1], max_contig); 481 this_size -= head; 482 head = 0; 483 } 484 485 /* if tail is small, just keep it around */ 486 tail = this_size - head - size; 487 if (tail < sizeof(int)) { 488 tail = 0; 489 size = this_size - head; 490 } 491 492 /* split if warranted */ 493 if (head || tail) { 494 int nr_extra = !!head + !!tail; 495 496 /* insert new subblocks */ 497 memmove(p + nr_extra + 1, p + 1, 498 sizeof(chunk->map[0]) * (chunk->map_used - i)); 499 chunk->map_used += nr_extra; 500 501 if (head) { 502 if (!seen_free) { 503 chunk->first_free = i; 504 seen_free = true; 505 } 506 *++p = off += head; 507 ++i; 508 max_contig = max(head, max_contig); 509 } 510 if (tail) { 511 p[1] = off + size; 512 max_contig = max(tail, max_contig); 513 } 514 } 515 516 if (!seen_free) 517 chunk->first_free = i + 1; 518 519 /* update hint and mark allocated */ 520 if (i + 1 == chunk->map_used) 521 chunk->contig_hint = max_contig; /* fully scanned */ 522 else 523 chunk->contig_hint = max(chunk->contig_hint, 524 max_contig); 525 526 chunk->free_size -= size; 527 *p |= 1; 528 529 pcpu_chunk_relocate(chunk, oslot); 530 return off; 531 } 532 533 chunk->contig_hint = max_contig; /* fully scanned */ 534 pcpu_chunk_relocate(chunk, oslot); 535 536 /* tell the upper layer that this chunk has no matching area */ 537 return -1; 538 } 539 540 /** 541 * pcpu_free_area - free area to a pcpu_chunk 542 * @chunk: chunk of interest 543 * @freeme: offset of area to free 544 * 545 * Free area starting from @freeme to @chunk. Note that this function 546 * only modifies the allocation map. It doesn't depopulate or unmap 547 * the area. 548 * 549 * CONTEXT: 550 * pcpu_lock. 551 */ 552 static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme) 553 { 554 int oslot = pcpu_chunk_slot(chunk); 555 int off = 0; 556 unsigned i, j; 557 int to_free = 0; 558 int *p; 559 560 freeme |= 1; /* we are searching for <given offset, in use> pair */ 561 562 i = 0; 563 j = chunk->map_used; 564 while (i != j) { 565 unsigned k = (i + j) / 2; 566 off = chunk->map[k]; 567 if (off < freeme) 568 i = k + 1; 569 else if (off > freeme) 570 j = k; 571 else 572 i = j = k; 573 } 574 BUG_ON(off != freeme); 575 576 if (i < chunk->first_free) 577 chunk->first_free = i; 578 579 p = chunk->map + i; 580 *p = off &= ~1; 581 chunk->free_size += (p[1] & ~1) - off; 582 583 /* merge with next? */ 584 if (!(p[1] & 1)) 585 to_free++; 586 /* merge with previous? */ 587 if (i > 0 && !(p[-1] & 1)) { 588 to_free++; 589 i--; 590 p--; 591 } 592 if (to_free) { 593 chunk->map_used -= to_free; 594 memmove(p + 1, p + 1 + to_free, 595 (chunk->map_used - i) * sizeof(chunk->map[0])); 596 } 597 598 chunk->contig_hint = max(chunk->map[i + 1] - chunk->map[i] - 1, chunk->contig_hint); 599 pcpu_chunk_relocate(chunk, oslot); 600 } 601 602 static struct pcpu_chunk *pcpu_alloc_chunk(void) 603 { 604 struct pcpu_chunk *chunk; 605 606 chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size); 607 if (!chunk) 608 return NULL; 609 610 chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC * 611 sizeof(chunk->map[0])); 612 if (!chunk->map) { 613 pcpu_mem_free(chunk, pcpu_chunk_struct_size); 614 return NULL; 615 } 616 617 chunk->map_alloc = PCPU_DFL_MAP_ALLOC; 618 chunk->map[0] = 0; 619 chunk->map[1] = pcpu_unit_size | 1; 620 chunk->map_used = 1; 621 622 INIT_LIST_HEAD(&chunk->list); 623 chunk->free_size = pcpu_unit_size; 624 chunk->contig_hint = pcpu_unit_size; 625 626 return chunk; 627 } 628 629 static void pcpu_free_chunk(struct pcpu_chunk *chunk) 630 { 631 if (!chunk) 632 return; 633 pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0])); 634 pcpu_mem_free(chunk, pcpu_chunk_struct_size); 635 } 636 637 /* 638 * Chunk management implementation. 639 * 640 * To allow different implementations, chunk alloc/free and 641 * [de]population are implemented in a separate file which is pulled 642 * into this file and compiled together. The following functions 643 * should be implemented. 644 * 645 * pcpu_populate_chunk - populate the specified range of a chunk 646 * pcpu_depopulate_chunk - depopulate the specified range of a chunk 647 * pcpu_create_chunk - create a new chunk 648 * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop 649 * pcpu_addr_to_page - translate address to physical address 650 * pcpu_verify_alloc_info - check alloc_info is acceptable during init 651 */ 652 static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size); 653 static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size); 654 static struct pcpu_chunk *pcpu_create_chunk(void); 655 static void pcpu_destroy_chunk(struct pcpu_chunk *chunk); 656 static struct page *pcpu_addr_to_page(void *addr); 657 static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai); 658 659 #ifdef CONFIG_NEED_PER_CPU_KM 660 #include "percpu-km.c" 661 #else 662 #include "percpu-vm.c" 663 #endif 664 665 /** 666 * pcpu_chunk_addr_search - determine chunk containing specified address 667 * @addr: address for which the chunk needs to be determined. 668 * 669 * RETURNS: 670 * The address of the found chunk. 671 */ 672 static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) 673 { 674 /* is it in the first chunk? */ 675 if (pcpu_addr_in_first_chunk(addr)) { 676 /* is it in the reserved area? */ 677 if (pcpu_addr_in_reserved_chunk(addr)) 678 return pcpu_reserved_chunk; 679 return pcpu_first_chunk; 680 } 681 682 /* 683 * The address is relative to unit0 which might be unused and 684 * thus unmapped. Offset the address to the unit space of the 685 * current processor before looking it up in the vmalloc 686 * space. Note that any possible cpu id can be used here, so 687 * there's no need to worry about preemption or cpu hotplug. 688 */ 689 addr += pcpu_unit_offsets[raw_smp_processor_id()]; 690 return pcpu_get_page_chunk(pcpu_addr_to_page(addr)); 691 } 692 693 /** 694 * pcpu_alloc - the percpu allocator 695 * @size: size of area to allocate in bytes 696 * @align: alignment of area (max PAGE_SIZE) 697 * @reserved: allocate from the reserved chunk if available 698 * 699 * Allocate percpu area of @size bytes aligned at @align. 700 * 701 * CONTEXT: 702 * Does GFP_KERNEL allocation. 703 * 704 * RETURNS: 705 * Percpu pointer to the allocated area on success, NULL on failure. 706 */ 707 static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved) 708 { 709 static int warn_limit = 10; 710 struct pcpu_chunk *chunk; 711 const char *err; 712 int slot, off, new_alloc; 713 unsigned long flags; 714 void __percpu *ptr; 715 716 /* 717 * We want the lowest bit of offset available for in-use/free 718 * indicator, so force >= 16bit alignment and make size even. 719 */ 720 if (unlikely(align < 2)) 721 align = 2; 722 723 size = ALIGN(size, 2); 724 725 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { 726 WARN(true, "illegal size (%zu) or align (%zu) for " 727 "percpu allocation\n", size, align); 728 return NULL; 729 } 730 731 mutex_lock(&pcpu_alloc_mutex); 732 spin_lock_irqsave(&pcpu_lock, flags); 733 734 /* serve reserved allocations from the reserved chunk if available */ 735 if (reserved && pcpu_reserved_chunk) { 736 chunk = pcpu_reserved_chunk; 737 738 if (size > chunk->contig_hint) { 739 err = "alloc from reserved chunk failed"; 740 goto fail_unlock; 741 } 742 743 while ((new_alloc = pcpu_need_to_extend(chunk))) { 744 spin_unlock_irqrestore(&pcpu_lock, flags); 745 if (pcpu_extend_area_map(chunk, new_alloc) < 0) { 746 err = "failed to extend area map of reserved chunk"; 747 goto fail_unlock_mutex; 748 } 749 spin_lock_irqsave(&pcpu_lock, flags); 750 } 751 752 off = pcpu_alloc_area(chunk, size, align); 753 if (off >= 0) 754 goto area_found; 755 756 err = "alloc from reserved chunk failed"; 757 goto fail_unlock; 758 } 759 760 restart: 761 /* search through normal chunks */ 762 for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) { 763 list_for_each_entry(chunk, &pcpu_slot[slot], list) { 764 if (size > chunk->contig_hint) 765 continue; 766 767 new_alloc = pcpu_need_to_extend(chunk); 768 if (new_alloc) { 769 spin_unlock_irqrestore(&pcpu_lock, flags); 770 if (pcpu_extend_area_map(chunk, 771 new_alloc) < 0) { 772 err = "failed to extend area map"; 773 goto fail_unlock_mutex; 774 } 775 spin_lock_irqsave(&pcpu_lock, flags); 776 /* 777 * pcpu_lock has been dropped, need to 778 * restart cpu_slot list walking. 779 */ 780 goto restart; 781 } 782 783 off = pcpu_alloc_area(chunk, size, align); 784 if (off >= 0) 785 goto area_found; 786 } 787 } 788 789 /* hmmm... no space left, create a new chunk */ 790 spin_unlock_irqrestore(&pcpu_lock, flags); 791 792 chunk = pcpu_create_chunk(); 793 if (!chunk) { 794 err = "failed to allocate new chunk"; 795 goto fail_unlock_mutex; 796 } 797 798 spin_lock_irqsave(&pcpu_lock, flags); 799 pcpu_chunk_relocate(chunk, -1); 800 goto restart; 801 802 area_found: 803 spin_unlock_irqrestore(&pcpu_lock, flags); 804 805 /* populate, map and clear the area */ 806 if (pcpu_populate_chunk(chunk, off, size)) { 807 spin_lock_irqsave(&pcpu_lock, flags); 808 pcpu_free_area(chunk, off); 809 err = "failed to populate"; 810 goto fail_unlock; 811 } 812 813 mutex_unlock(&pcpu_alloc_mutex); 814 815 /* return address relative to base address */ 816 ptr = __addr_to_pcpu_ptr(chunk->base_addr + off); 817 kmemleak_alloc_percpu(ptr, size); 818 return ptr; 819 820 fail_unlock: 821 spin_unlock_irqrestore(&pcpu_lock, flags); 822 fail_unlock_mutex: 823 mutex_unlock(&pcpu_alloc_mutex); 824 if (warn_limit) { 825 pr_warning("PERCPU: allocation failed, size=%zu align=%zu, " 826 "%s\n", size, align, err); 827 dump_stack(); 828 if (!--warn_limit) 829 pr_info("PERCPU: limit reached, disable warning\n"); 830 } 831 return NULL; 832 } 833 834 /** 835 * __alloc_percpu - allocate dynamic percpu area 836 * @size: size of area to allocate in bytes 837 * @align: alignment of area (max PAGE_SIZE) 838 * 839 * Allocate zero-filled percpu area of @size bytes aligned at @align. 840 * Might sleep. Might trigger writeouts. 841 * 842 * CONTEXT: 843 * Does GFP_KERNEL allocation. 844 * 845 * RETURNS: 846 * Percpu pointer to the allocated area on success, NULL on failure. 847 */ 848 void __percpu *__alloc_percpu(size_t size, size_t align) 849 { 850 return pcpu_alloc(size, align, false); 851 } 852 EXPORT_SYMBOL_GPL(__alloc_percpu); 853 854 /** 855 * __alloc_reserved_percpu - allocate reserved percpu area 856 * @size: size of area to allocate in bytes 857 * @align: alignment of area (max PAGE_SIZE) 858 * 859 * Allocate zero-filled percpu area of @size bytes aligned at @align 860 * from reserved percpu area if arch has set it up; otherwise, 861 * allocation is served from the same dynamic area. Might sleep. 862 * Might trigger writeouts. 863 * 864 * CONTEXT: 865 * Does GFP_KERNEL allocation. 866 * 867 * RETURNS: 868 * Percpu pointer to the allocated area on success, NULL on failure. 869 */ 870 void __percpu *__alloc_reserved_percpu(size_t size, size_t align) 871 { 872 return pcpu_alloc(size, align, true); 873 } 874 875 /** 876 * pcpu_reclaim - reclaim fully free chunks, workqueue function 877 * @work: unused 878 * 879 * Reclaim all fully free chunks except for the first one. 880 * 881 * CONTEXT: 882 * workqueue context. 883 */ 884 static void pcpu_reclaim(struct work_struct *work) 885 { 886 LIST_HEAD(todo); 887 struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1]; 888 struct pcpu_chunk *chunk, *next; 889 890 mutex_lock(&pcpu_alloc_mutex); 891 spin_lock_irq(&pcpu_lock); 892 893 list_for_each_entry_safe(chunk, next, head, list) { 894 WARN_ON(chunk->immutable); 895 896 /* spare the first one */ 897 if (chunk == list_first_entry(head, struct pcpu_chunk, list)) 898 continue; 899 900 list_move(&chunk->list, &todo); 901 } 902 903 spin_unlock_irq(&pcpu_lock); 904 905 list_for_each_entry_safe(chunk, next, &todo, list) { 906 pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size); 907 pcpu_destroy_chunk(chunk); 908 } 909 910 mutex_unlock(&pcpu_alloc_mutex); 911 } 912 913 /** 914 * free_percpu - free percpu area 915 * @ptr: pointer to area to free 916 * 917 * Free percpu area @ptr. 918 * 919 * CONTEXT: 920 * Can be called from atomic context. 921 */ 922 void free_percpu(void __percpu *ptr) 923 { 924 void *addr; 925 struct pcpu_chunk *chunk; 926 unsigned long flags; 927 int off; 928 929 if (!ptr) 930 return; 931 932 kmemleak_free_percpu(ptr); 933 934 addr = __pcpu_ptr_to_addr(ptr); 935 936 spin_lock_irqsave(&pcpu_lock, flags); 937 938 chunk = pcpu_chunk_addr_search(addr); 939 off = addr - chunk->base_addr; 940 941 pcpu_free_area(chunk, off); 942 943 /* if there are more than one fully free chunks, wake up grim reaper */ 944 if (chunk->free_size == pcpu_unit_size) { 945 struct pcpu_chunk *pos; 946 947 list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) 948 if (pos != chunk) { 949 schedule_work(&pcpu_reclaim_work); 950 break; 951 } 952 } 953 954 spin_unlock_irqrestore(&pcpu_lock, flags); 955 } 956 EXPORT_SYMBOL_GPL(free_percpu); 957 958 /** 959 * is_kernel_percpu_address - test whether address is from static percpu area 960 * @addr: address to test 961 * 962 * Test whether @addr belongs to in-kernel static percpu area. Module 963 * static percpu areas are not considered. For those, use 964 * is_module_percpu_address(). 965 * 966 * RETURNS: 967 * %true if @addr is from in-kernel static percpu area, %false otherwise. 968 */ 969 bool is_kernel_percpu_address(unsigned long addr) 970 { 971 #ifdef CONFIG_SMP 972 const size_t static_size = __per_cpu_end - __per_cpu_start; 973 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); 974 unsigned int cpu; 975 976 for_each_possible_cpu(cpu) { 977 void *start = per_cpu_ptr(base, cpu); 978 979 if ((void *)addr >= start && (void *)addr < start + static_size) 980 return true; 981 } 982 #endif 983 /* on UP, can't distinguish from other static vars, always false */ 984 return false; 985 } 986 987 /** 988 * per_cpu_ptr_to_phys - convert translated percpu address to physical address 989 * @addr: the address to be converted to physical address 990 * 991 * Given @addr which is dereferenceable address obtained via one of 992 * percpu access macros, this function translates it into its physical 993 * address. The caller is responsible for ensuring @addr stays valid 994 * until this function finishes. 995 * 996 * percpu allocator has special setup for the first chunk, which currently 997 * supports either embedding in linear address space or vmalloc mapping, 998 * and, from the second one, the backing allocator (currently either vm or 999 * km) provides translation. 1000 * 1001 * The addr can be tranlated simply without checking if it falls into the 1002 * first chunk. But the current code reflects better how percpu allocator 1003 * actually works, and the verification can discover both bugs in percpu 1004 * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current 1005 * code. 1006 * 1007 * RETURNS: 1008 * The physical address for @addr. 1009 */ 1010 phys_addr_t per_cpu_ptr_to_phys(void *addr) 1011 { 1012 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); 1013 bool in_first_chunk = false; 1014 unsigned long first_low, first_high; 1015 unsigned int cpu; 1016 1017 /* 1018 * The following test on unit_low/high isn't strictly 1019 * necessary but will speed up lookups of addresses which 1020 * aren't in the first chunk. 1021 */ 1022 first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0); 1023 first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu, 1024 pcpu_unit_pages); 1025 if ((unsigned long)addr >= first_low && 1026 (unsigned long)addr < first_high) { 1027 for_each_possible_cpu(cpu) { 1028 void *start = per_cpu_ptr(base, cpu); 1029 1030 if (addr >= start && addr < start + pcpu_unit_size) { 1031 in_first_chunk = true; 1032 break; 1033 } 1034 } 1035 } 1036 1037 if (in_first_chunk) { 1038 if (!is_vmalloc_addr(addr)) 1039 return __pa(addr); 1040 else 1041 return page_to_phys(vmalloc_to_page(addr)) + 1042 offset_in_page(addr); 1043 } else 1044 return page_to_phys(pcpu_addr_to_page(addr)) + 1045 offset_in_page(addr); 1046 } 1047 1048 /** 1049 * pcpu_alloc_alloc_info - allocate percpu allocation info 1050 * @nr_groups: the number of groups 1051 * @nr_units: the number of units 1052 * 1053 * Allocate ai which is large enough for @nr_groups groups containing 1054 * @nr_units units. The returned ai's groups[0].cpu_map points to the 1055 * cpu_map array which is long enough for @nr_units and filled with 1056 * NR_CPUS. It's the caller's responsibility to initialize cpu_map 1057 * pointer of other groups. 1058 * 1059 * RETURNS: 1060 * Pointer to the allocated pcpu_alloc_info on success, NULL on 1061 * failure. 1062 */ 1063 struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, 1064 int nr_units) 1065 { 1066 struct pcpu_alloc_info *ai; 1067 size_t base_size, ai_size; 1068 void *ptr; 1069 int unit; 1070 1071 base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]), 1072 __alignof__(ai->groups[0].cpu_map[0])); 1073 ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); 1074 1075 ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), 0); 1076 if (!ptr) 1077 return NULL; 1078 ai = ptr; 1079 ptr += base_size; 1080 1081 ai->groups[0].cpu_map = ptr; 1082 1083 for (unit = 0; unit < nr_units; unit++) 1084 ai->groups[0].cpu_map[unit] = NR_CPUS; 1085 1086 ai->nr_groups = nr_groups; 1087 ai->__ai_size = PFN_ALIGN(ai_size); 1088 1089 return ai; 1090 } 1091 1092 /** 1093 * pcpu_free_alloc_info - free percpu allocation info 1094 * @ai: pcpu_alloc_info to free 1095 * 1096 * Free @ai which was allocated by pcpu_alloc_alloc_info(). 1097 */ 1098 void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) 1099 { 1100 memblock_free_early(__pa(ai), ai->__ai_size); 1101 } 1102 1103 /** 1104 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info 1105 * @lvl: loglevel 1106 * @ai: allocation info to dump 1107 * 1108 * Print out information about @ai using loglevel @lvl. 1109 */ 1110 static void pcpu_dump_alloc_info(const char *lvl, 1111 const struct pcpu_alloc_info *ai) 1112 { 1113 int group_width = 1, cpu_width = 1, width; 1114 char empty_str[] = "--------"; 1115 int alloc = 0, alloc_end = 0; 1116 int group, v; 1117 int upa, apl; /* units per alloc, allocs per line */ 1118 1119 v = ai->nr_groups; 1120 while (v /= 10) 1121 group_width++; 1122 1123 v = num_possible_cpus(); 1124 while (v /= 10) 1125 cpu_width++; 1126 empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0'; 1127 1128 upa = ai->alloc_size / ai->unit_size; 1129 width = upa * (cpu_width + 1) + group_width + 3; 1130 apl = rounddown_pow_of_two(max(60 / width, 1)); 1131 1132 printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu", 1133 lvl, ai->static_size, ai->reserved_size, ai->dyn_size, 1134 ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size); 1135 1136 for (group = 0; group < ai->nr_groups; group++) { 1137 const struct pcpu_group_info *gi = &ai->groups[group]; 1138 int unit = 0, unit_end = 0; 1139 1140 BUG_ON(gi->nr_units % upa); 1141 for (alloc_end += gi->nr_units / upa; 1142 alloc < alloc_end; alloc++) { 1143 if (!(alloc % apl)) { 1144 printk(KERN_CONT "\n"); 1145 printk("%spcpu-alloc: ", lvl); 1146 } 1147 printk(KERN_CONT "[%0*d] ", group_width, group); 1148 1149 for (unit_end += upa; unit < unit_end; unit++) 1150 if (gi->cpu_map[unit] != NR_CPUS) 1151 printk(KERN_CONT "%0*d ", cpu_width, 1152 gi->cpu_map[unit]); 1153 else 1154 printk(KERN_CONT "%s ", empty_str); 1155 } 1156 } 1157 printk(KERN_CONT "\n"); 1158 } 1159 1160 /** 1161 * pcpu_setup_first_chunk - initialize the first percpu chunk 1162 * @ai: pcpu_alloc_info describing how to percpu area is shaped 1163 * @base_addr: mapped address 1164 * 1165 * Initialize the first percpu chunk which contains the kernel static 1166 * perpcu area. This function is to be called from arch percpu area 1167 * setup path. 1168 * 1169 * @ai contains all information necessary to initialize the first 1170 * chunk and prime the dynamic percpu allocator. 1171 * 1172 * @ai->static_size is the size of static percpu area. 1173 * 1174 * @ai->reserved_size, if non-zero, specifies the amount of bytes to 1175 * reserve after the static area in the first chunk. This reserves 1176 * the first chunk such that it's available only through reserved 1177 * percpu allocation. This is primarily used to serve module percpu 1178 * static areas on architectures where the addressing model has 1179 * limited offset range for symbol relocations to guarantee module 1180 * percpu symbols fall inside the relocatable range. 1181 * 1182 * @ai->dyn_size determines the number of bytes available for dynamic 1183 * allocation in the first chunk. The area between @ai->static_size + 1184 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused. 1185 * 1186 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE 1187 * and equal to or larger than @ai->static_size + @ai->reserved_size + 1188 * @ai->dyn_size. 1189 * 1190 * @ai->atom_size is the allocation atom size and used as alignment 1191 * for vm areas. 1192 * 1193 * @ai->alloc_size is the allocation size and always multiple of 1194 * @ai->atom_size. This is larger than @ai->atom_size if 1195 * @ai->unit_size is larger than @ai->atom_size. 1196 * 1197 * @ai->nr_groups and @ai->groups describe virtual memory layout of 1198 * percpu areas. Units which should be colocated are put into the 1199 * same group. Dynamic VM areas will be allocated according to these 1200 * groupings. If @ai->nr_groups is zero, a single group containing 1201 * all units is assumed. 1202 * 1203 * The caller should have mapped the first chunk at @base_addr and 1204 * copied static data to each unit. 1205 * 1206 * If the first chunk ends up with both reserved and dynamic areas, it 1207 * is served by two chunks - one to serve the core static and reserved 1208 * areas and the other for the dynamic area. They share the same vm 1209 * and page map but uses different area allocation map to stay away 1210 * from each other. The latter chunk is circulated in the chunk slots 1211 * and available for dynamic allocation like any other chunks. 1212 * 1213 * RETURNS: 1214 * 0 on success, -errno on failure. 1215 */ 1216 int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, 1217 void *base_addr) 1218 { 1219 static char cpus_buf[4096] __initdata; 1220 static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata; 1221 static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata; 1222 size_t dyn_size = ai->dyn_size; 1223 size_t size_sum = ai->static_size + ai->reserved_size + dyn_size; 1224 struct pcpu_chunk *schunk, *dchunk = NULL; 1225 unsigned long *group_offsets; 1226 size_t *group_sizes; 1227 unsigned long *unit_off; 1228 unsigned int cpu; 1229 int *unit_map; 1230 int group, unit, i; 1231 1232 cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask); 1233 1234 #define PCPU_SETUP_BUG_ON(cond) do { \ 1235 if (unlikely(cond)) { \ 1236 pr_emerg("PERCPU: failed to initialize, %s", #cond); \ 1237 pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf); \ 1238 pcpu_dump_alloc_info(KERN_EMERG, ai); \ 1239 BUG(); \ 1240 } \ 1241 } while (0) 1242 1243 /* sanity checks */ 1244 PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); 1245 #ifdef CONFIG_SMP 1246 PCPU_SETUP_BUG_ON(!ai->static_size); 1247 PCPU_SETUP_BUG_ON((unsigned long)__per_cpu_start & ~PAGE_MASK); 1248 #endif 1249 PCPU_SETUP_BUG_ON(!base_addr); 1250 PCPU_SETUP_BUG_ON((unsigned long)base_addr & ~PAGE_MASK); 1251 PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); 1252 PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK); 1253 PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); 1254 PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE); 1255 PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); 1256 1257 /* process group information and build config tables accordingly */ 1258 group_offsets = memblock_virt_alloc(ai->nr_groups * 1259 sizeof(group_offsets[0]), 0); 1260 group_sizes = memblock_virt_alloc(ai->nr_groups * 1261 sizeof(group_sizes[0]), 0); 1262 unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0); 1263 unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0); 1264 1265 for (cpu = 0; cpu < nr_cpu_ids; cpu++) 1266 unit_map[cpu] = UINT_MAX; 1267 1268 pcpu_low_unit_cpu = NR_CPUS; 1269 pcpu_high_unit_cpu = NR_CPUS; 1270 1271 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { 1272 const struct pcpu_group_info *gi = &ai->groups[group]; 1273 1274 group_offsets[group] = gi->base_offset; 1275 group_sizes[group] = gi->nr_units * ai->unit_size; 1276 1277 for (i = 0; i < gi->nr_units; i++) { 1278 cpu = gi->cpu_map[i]; 1279 if (cpu == NR_CPUS) 1280 continue; 1281 1282 PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids); 1283 PCPU_SETUP_BUG_ON(!cpu_possible(cpu)); 1284 PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX); 1285 1286 unit_map[cpu] = unit + i; 1287 unit_off[cpu] = gi->base_offset + i * ai->unit_size; 1288 1289 /* determine low/high unit_cpu */ 1290 if (pcpu_low_unit_cpu == NR_CPUS || 1291 unit_off[cpu] < unit_off[pcpu_low_unit_cpu]) 1292 pcpu_low_unit_cpu = cpu; 1293 if (pcpu_high_unit_cpu == NR_CPUS || 1294 unit_off[cpu] > unit_off[pcpu_high_unit_cpu]) 1295 pcpu_high_unit_cpu = cpu; 1296 } 1297 } 1298 pcpu_nr_units = unit; 1299 1300 for_each_possible_cpu(cpu) 1301 PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX); 1302 1303 /* we're done parsing the input, undefine BUG macro and dump config */ 1304 #undef PCPU_SETUP_BUG_ON 1305 pcpu_dump_alloc_info(KERN_DEBUG, ai); 1306 1307 pcpu_nr_groups = ai->nr_groups; 1308 pcpu_group_offsets = group_offsets; 1309 pcpu_group_sizes = group_sizes; 1310 pcpu_unit_map = unit_map; 1311 pcpu_unit_offsets = unit_off; 1312 1313 /* determine basic parameters */ 1314 pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT; 1315 pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; 1316 pcpu_atom_size = ai->atom_size; 1317 pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) + 1318 BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long); 1319 1320 /* 1321 * Allocate chunk slots. The additional last slot is for 1322 * empty chunks. 1323 */ 1324 pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; 1325 pcpu_slot = memblock_virt_alloc( 1326 pcpu_nr_slots * sizeof(pcpu_slot[0]), 0); 1327 for (i = 0; i < pcpu_nr_slots; i++) 1328 INIT_LIST_HEAD(&pcpu_slot[i]); 1329 1330 /* 1331 * Initialize static chunk. If reserved_size is zero, the 1332 * static chunk covers static area + dynamic allocation area 1333 * in the first chunk. If reserved_size is not zero, it 1334 * covers static area + reserved area (mostly used for module 1335 * static percpu allocation). 1336 */ 1337 schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); 1338 INIT_LIST_HEAD(&schunk->list); 1339 schunk->base_addr = base_addr; 1340 schunk->map = smap; 1341 schunk->map_alloc = ARRAY_SIZE(smap); 1342 schunk->immutable = true; 1343 bitmap_fill(schunk->populated, pcpu_unit_pages); 1344 1345 if (ai->reserved_size) { 1346 schunk->free_size = ai->reserved_size; 1347 pcpu_reserved_chunk = schunk; 1348 pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size; 1349 } else { 1350 schunk->free_size = dyn_size; 1351 dyn_size = 0; /* dynamic area covered */ 1352 } 1353 schunk->contig_hint = schunk->free_size; 1354 1355 schunk->map[0] = 1; 1356 schunk->map[1] = ai->static_size; 1357 schunk->map_used = 1; 1358 if (schunk->free_size) 1359 schunk->map[++schunk->map_used] = 1 | (ai->static_size + schunk->free_size); 1360 else 1361 schunk->map[1] |= 1; 1362 1363 /* init dynamic chunk if necessary */ 1364 if (dyn_size) { 1365 dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); 1366 INIT_LIST_HEAD(&dchunk->list); 1367 dchunk->base_addr = base_addr; 1368 dchunk->map = dmap; 1369 dchunk->map_alloc = ARRAY_SIZE(dmap); 1370 dchunk->immutable = true; 1371 bitmap_fill(dchunk->populated, pcpu_unit_pages); 1372 1373 dchunk->contig_hint = dchunk->free_size = dyn_size; 1374 dchunk->map[0] = 1; 1375 dchunk->map[1] = pcpu_reserved_chunk_limit; 1376 dchunk->map[2] = (pcpu_reserved_chunk_limit + dchunk->free_size) | 1; 1377 dchunk->map_used = 2; 1378 } 1379 1380 /* link the first chunk in */ 1381 pcpu_first_chunk = dchunk ?: schunk; 1382 pcpu_chunk_relocate(pcpu_first_chunk, -1); 1383 1384 /* we're done */ 1385 pcpu_base_addr = base_addr; 1386 return 0; 1387 } 1388 1389 #ifdef CONFIG_SMP 1390 1391 const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = { 1392 [PCPU_FC_AUTO] = "auto", 1393 [PCPU_FC_EMBED] = "embed", 1394 [PCPU_FC_PAGE] = "page", 1395 }; 1396 1397 enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO; 1398 1399 static int __init percpu_alloc_setup(char *str) 1400 { 1401 if (!str) 1402 return -EINVAL; 1403 1404 if (0) 1405 /* nada */; 1406 #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK 1407 else if (!strcmp(str, "embed")) 1408 pcpu_chosen_fc = PCPU_FC_EMBED; 1409 #endif 1410 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 1411 else if (!strcmp(str, "page")) 1412 pcpu_chosen_fc = PCPU_FC_PAGE; 1413 #endif 1414 else 1415 pr_warning("PERCPU: unknown allocator %s specified\n", str); 1416 1417 return 0; 1418 } 1419 early_param("percpu_alloc", percpu_alloc_setup); 1420 1421 /* 1422 * pcpu_embed_first_chunk() is used by the generic percpu setup. 1423 * Build it if needed by the arch config or the generic setup is going 1424 * to be used. 1425 */ 1426 #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \ 1427 !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) 1428 #define BUILD_EMBED_FIRST_CHUNK 1429 #endif 1430 1431 /* build pcpu_page_first_chunk() iff needed by the arch config */ 1432 #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) 1433 #define BUILD_PAGE_FIRST_CHUNK 1434 #endif 1435 1436 /* pcpu_build_alloc_info() is used by both embed and page first chunk */ 1437 #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK) 1438 /** 1439 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs 1440 * @reserved_size: the size of reserved percpu area in bytes 1441 * @dyn_size: minimum free size for dynamic allocation in bytes 1442 * @atom_size: allocation atom size 1443 * @cpu_distance_fn: callback to determine distance between cpus, optional 1444 * 1445 * This function determines grouping of units, their mappings to cpus 1446 * and other parameters considering needed percpu size, allocation 1447 * atom size and distances between CPUs. 1448 * 1449 * Groups are always mutliples of atom size and CPUs which are of 1450 * LOCAL_DISTANCE both ways are grouped together and share space for 1451 * units in the same group. The returned configuration is guaranteed 1452 * to have CPUs on different nodes on different groups and >=75% usage 1453 * of allocated virtual address space. 1454 * 1455 * RETURNS: 1456 * On success, pointer to the new allocation_info is returned. On 1457 * failure, ERR_PTR value is returned. 1458 */ 1459 static struct pcpu_alloc_info * __init pcpu_build_alloc_info( 1460 size_t reserved_size, size_t dyn_size, 1461 size_t atom_size, 1462 pcpu_fc_cpu_distance_fn_t cpu_distance_fn) 1463 { 1464 static int group_map[NR_CPUS] __initdata; 1465 static int group_cnt[NR_CPUS] __initdata; 1466 const size_t static_size = __per_cpu_end - __per_cpu_start; 1467 int nr_groups = 1, nr_units = 0; 1468 size_t size_sum, min_unit_size, alloc_size; 1469 int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */ 1470 int last_allocs, group, unit; 1471 unsigned int cpu, tcpu; 1472 struct pcpu_alloc_info *ai; 1473 unsigned int *cpu_map; 1474 1475 /* this function may be called multiple times */ 1476 memset(group_map, 0, sizeof(group_map)); 1477 memset(group_cnt, 0, sizeof(group_cnt)); 1478 1479 /* calculate size_sum and ensure dyn_size is enough for early alloc */ 1480 size_sum = PFN_ALIGN(static_size + reserved_size + 1481 max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE)); 1482 dyn_size = size_sum - static_size - reserved_size; 1483 1484 /* 1485 * Determine min_unit_size, alloc_size and max_upa such that 1486 * alloc_size is multiple of atom_size and is the smallest 1487 * which can accommodate 4k aligned segments which are equal to 1488 * or larger than min_unit_size. 1489 */ 1490 min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); 1491 1492 alloc_size = roundup(min_unit_size, atom_size); 1493 upa = alloc_size / min_unit_size; 1494 while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) 1495 upa--; 1496 max_upa = upa; 1497 1498 /* group cpus according to their proximity */ 1499 for_each_possible_cpu(cpu) { 1500 group = 0; 1501 next_group: 1502 for_each_possible_cpu(tcpu) { 1503 if (cpu == tcpu) 1504 break; 1505 if (group_map[tcpu] == group && cpu_distance_fn && 1506 (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE || 1507 cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) { 1508 group++; 1509 nr_groups = max(nr_groups, group + 1); 1510 goto next_group; 1511 } 1512 } 1513 group_map[cpu] = group; 1514 group_cnt[group]++; 1515 } 1516 1517 /* 1518 * Expand unit size until address space usage goes over 75% 1519 * and then as much as possible without using more address 1520 * space. 1521 */ 1522 last_allocs = INT_MAX; 1523 for (upa = max_upa; upa; upa--) { 1524 int allocs = 0, wasted = 0; 1525 1526 if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) 1527 continue; 1528 1529 for (group = 0; group < nr_groups; group++) { 1530 int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); 1531 allocs += this_allocs; 1532 wasted += this_allocs * upa - group_cnt[group]; 1533 } 1534 1535 /* 1536 * Don't accept if wastage is over 1/3. The 1537 * greater-than comparison ensures upa==1 always 1538 * passes the following check. 1539 */ 1540 if (wasted > num_possible_cpus() / 3) 1541 continue; 1542 1543 /* and then don't consume more memory */ 1544 if (allocs > last_allocs) 1545 break; 1546 last_allocs = allocs; 1547 best_upa = upa; 1548 } 1549 upa = best_upa; 1550 1551 /* allocate and fill alloc_info */ 1552 for (group = 0; group < nr_groups; group++) 1553 nr_units += roundup(group_cnt[group], upa); 1554 1555 ai = pcpu_alloc_alloc_info(nr_groups, nr_units); 1556 if (!ai) 1557 return ERR_PTR(-ENOMEM); 1558 cpu_map = ai->groups[0].cpu_map; 1559 1560 for (group = 0; group < nr_groups; group++) { 1561 ai->groups[group].cpu_map = cpu_map; 1562 cpu_map += roundup(group_cnt[group], upa); 1563 } 1564 1565 ai->static_size = static_size; 1566 ai->reserved_size = reserved_size; 1567 ai->dyn_size = dyn_size; 1568 ai->unit_size = alloc_size / upa; 1569 ai->atom_size = atom_size; 1570 ai->alloc_size = alloc_size; 1571 1572 for (group = 0, unit = 0; group_cnt[group]; group++) { 1573 struct pcpu_group_info *gi = &ai->groups[group]; 1574 1575 /* 1576 * Initialize base_offset as if all groups are located 1577 * back-to-back. The caller should update this to 1578 * reflect actual allocation. 1579 */ 1580 gi->base_offset = unit * ai->unit_size; 1581 1582 for_each_possible_cpu(cpu) 1583 if (group_map[cpu] == group) 1584 gi->cpu_map[gi->nr_units++] = cpu; 1585 gi->nr_units = roundup(gi->nr_units, upa); 1586 unit += gi->nr_units; 1587 } 1588 BUG_ON(unit != nr_units); 1589 1590 return ai; 1591 } 1592 #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */ 1593 1594 #if defined(BUILD_EMBED_FIRST_CHUNK) 1595 /** 1596 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem 1597 * @reserved_size: the size of reserved percpu area in bytes 1598 * @dyn_size: minimum free size for dynamic allocation in bytes 1599 * @atom_size: allocation atom size 1600 * @cpu_distance_fn: callback to determine distance between cpus, optional 1601 * @alloc_fn: function to allocate percpu page 1602 * @free_fn: function to free percpu page 1603 * 1604 * This is a helper to ease setting up embedded first percpu chunk and 1605 * can be called where pcpu_setup_first_chunk() is expected. 1606 * 1607 * If this function is used to setup the first chunk, it is allocated 1608 * by calling @alloc_fn and used as-is without being mapped into 1609 * vmalloc area. Allocations are always whole multiples of @atom_size 1610 * aligned to @atom_size. 1611 * 1612 * This enables the first chunk to piggy back on the linear physical 1613 * mapping which often uses larger page size. Please note that this 1614 * can result in very sparse cpu->unit mapping on NUMA machines thus 1615 * requiring large vmalloc address space. Don't use this allocator if 1616 * vmalloc space is not orders of magnitude larger than distances 1617 * between node memory addresses (ie. 32bit NUMA machines). 1618 * 1619 * @dyn_size specifies the minimum dynamic area size. 1620 * 1621 * If the needed size is smaller than the minimum or specified unit 1622 * size, the leftover is returned using @free_fn. 1623 * 1624 * RETURNS: 1625 * 0 on success, -errno on failure. 1626 */ 1627 int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, 1628 size_t atom_size, 1629 pcpu_fc_cpu_distance_fn_t cpu_distance_fn, 1630 pcpu_fc_alloc_fn_t alloc_fn, 1631 pcpu_fc_free_fn_t free_fn) 1632 { 1633 void *base = (void *)ULONG_MAX; 1634 void **areas = NULL; 1635 struct pcpu_alloc_info *ai; 1636 size_t size_sum, areas_size, max_distance; 1637 int group, i, rc; 1638 1639 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size, 1640 cpu_distance_fn); 1641 if (IS_ERR(ai)) 1642 return PTR_ERR(ai); 1643 1644 size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; 1645 areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); 1646 1647 areas = memblock_virt_alloc_nopanic(areas_size, 0); 1648 if (!areas) { 1649 rc = -ENOMEM; 1650 goto out_free; 1651 } 1652 1653 /* allocate, copy and determine base address */ 1654 for (group = 0; group < ai->nr_groups; group++) { 1655 struct pcpu_group_info *gi = &ai->groups[group]; 1656 unsigned int cpu = NR_CPUS; 1657 void *ptr; 1658 1659 for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++) 1660 cpu = gi->cpu_map[i]; 1661 BUG_ON(cpu == NR_CPUS); 1662 1663 /* allocate space for the whole group */ 1664 ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size); 1665 if (!ptr) { 1666 rc = -ENOMEM; 1667 goto out_free_areas; 1668 } 1669 /* kmemleak tracks the percpu allocations separately */ 1670 kmemleak_free(ptr); 1671 areas[group] = ptr; 1672 1673 base = min(ptr, base); 1674 } 1675 1676 /* 1677 * Copy data and free unused parts. This should happen after all 1678 * allocations are complete; otherwise, we may end up with 1679 * overlapping groups. 1680 */ 1681 for (group = 0; group < ai->nr_groups; group++) { 1682 struct pcpu_group_info *gi = &ai->groups[group]; 1683 void *ptr = areas[group]; 1684 1685 for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { 1686 if (gi->cpu_map[i] == NR_CPUS) { 1687 /* unused unit, free whole */ 1688 free_fn(ptr, ai->unit_size); 1689 continue; 1690 } 1691 /* copy and return the unused part */ 1692 memcpy(ptr, __per_cpu_load, ai->static_size); 1693 free_fn(ptr + size_sum, ai->unit_size - size_sum); 1694 } 1695 } 1696 1697 /* base address is now known, determine group base offsets */ 1698 max_distance = 0; 1699 for (group = 0; group < ai->nr_groups; group++) { 1700 ai->groups[group].base_offset = areas[group] - base; 1701 max_distance = max_t(size_t, max_distance, 1702 ai->groups[group].base_offset); 1703 } 1704 max_distance += ai->unit_size; 1705 1706 /* warn if maximum distance is further than 75% of vmalloc space */ 1707 if (max_distance > VMALLOC_TOTAL * 3 / 4) { 1708 pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc " 1709 "space 0x%lx\n", max_distance, 1710 VMALLOC_TOTAL); 1711 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 1712 /* and fail if we have fallback */ 1713 rc = -EINVAL; 1714 goto out_free; 1715 #endif 1716 } 1717 1718 pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n", 1719 PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size, 1720 ai->dyn_size, ai->unit_size); 1721 1722 rc = pcpu_setup_first_chunk(ai, base); 1723 goto out_free; 1724 1725 out_free_areas: 1726 for (group = 0; group < ai->nr_groups; group++) 1727 if (areas[group]) 1728 free_fn(areas[group], 1729 ai->groups[group].nr_units * ai->unit_size); 1730 out_free: 1731 pcpu_free_alloc_info(ai); 1732 if (areas) 1733 memblock_free_early(__pa(areas), areas_size); 1734 return rc; 1735 } 1736 #endif /* BUILD_EMBED_FIRST_CHUNK */ 1737 1738 #ifdef BUILD_PAGE_FIRST_CHUNK 1739 /** 1740 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages 1741 * @reserved_size: the size of reserved percpu area in bytes 1742 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE 1743 * @free_fn: function to free percpu page, always called with PAGE_SIZE 1744 * @populate_pte_fn: function to populate pte 1745 * 1746 * This is a helper to ease setting up page-remapped first percpu 1747 * chunk and can be called where pcpu_setup_first_chunk() is expected. 1748 * 1749 * This is the basic allocator. Static percpu area is allocated 1750 * page-by-page into vmalloc area. 1751 * 1752 * RETURNS: 1753 * 0 on success, -errno on failure. 1754 */ 1755 int __init pcpu_page_first_chunk(size_t reserved_size, 1756 pcpu_fc_alloc_fn_t alloc_fn, 1757 pcpu_fc_free_fn_t free_fn, 1758 pcpu_fc_populate_pte_fn_t populate_pte_fn) 1759 { 1760 static struct vm_struct vm; 1761 struct pcpu_alloc_info *ai; 1762 char psize_str[16]; 1763 int unit_pages; 1764 size_t pages_size; 1765 struct page **pages; 1766 int unit, i, j, rc; 1767 1768 snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); 1769 1770 ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL); 1771 if (IS_ERR(ai)) 1772 return PTR_ERR(ai); 1773 BUG_ON(ai->nr_groups != 1); 1774 BUG_ON(ai->groups[0].nr_units != num_possible_cpus()); 1775 1776 unit_pages = ai->unit_size >> PAGE_SHIFT; 1777 1778 /* unaligned allocations can't be freed, round up to page size */ 1779 pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * 1780 sizeof(pages[0])); 1781 pages = memblock_virt_alloc(pages_size, 0); 1782 1783 /* allocate pages */ 1784 j = 0; 1785 for (unit = 0; unit < num_possible_cpus(); unit++) 1786 for (i = 0; i < unit_pages; i++) { 1787 unsigned int cpu = ai->groups[0].cpu_map[unit]; 1788 void *ptr; 1789 1790 ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE); 1791 if (!ptr) { 1792 pr_warning("PERCPU: failed to allocate %s page " 1793 "for cpu%u\n", psize_str, cpu); 1794 goto enomem; 1795 } 1796 /* kmemleak tracks the percpu allocations separately */ 1797 kmemleak_free(ptr); 1798 pages[j++] = virt_to_page(ptr); 1799 } 1800 1801 /* allocate vm area, map the pages and copy static data */ 1802 vm.flags = VM_ALLOC; 1803 vm.size = num_possible_cpus() * ai->unit_size; 1804 vm_area_register_early(&vm, PAGE_SIZE); 1805 1806 for (unit = 0; unit < num_possible_cpus(); unit++) { 1807 unsigned long unit_addr = 1808 (unsigned long)vm.addr + unit * ai->unit_size; 1809 1810 for (i = 0; i < unit_pages; i++) 1811 populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); 1812 1813 /* pte already populated, the following shouldn't fail */ 1814 rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages], 1815 unit_pages); 1816 if (rc < 0) 1817 panic("failed to map percpu area, err=%d\n", rc); 1818 1819 /* 1820 * FIXME: Archs with virtual cache should flush local 1821 * cache for the linear mapping here - something 1822 * equivalent to flush_cache_vmap() on the local cpu. 1823 * flush_cache_vmap() can't be used as most supporting 1824 * data structures are not set up yet. 1825 */ 1826 1827 /* copy static data */ 1828 memcpy((void *)unit_addr, __per_cpu_load, ai->static_size); 1829 } 1830 1831 /* we're ready, commit */ 1832 pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n", 1833 unit_pages, psize_str, vm.addr, ai->static_size, 1834 ai->reserved_size, ai->dyn_size); 1835 1836 rc = pcpu_setup_first_chunk(ai, vm.addr); 1837 goto out_free_ar; 1838 1839 enomem: 1840 while (--j >= 0) 1841 free_fn(page_address(pages[j]), PAGE_SIZE); 1842 rc = -ENOMEM; 1843 out_free_ar: 1844 memblock_free_early(__pa(pages), pages_size); 1845 pcpu_free_alloc_info(ai); 1846 return rc; 1847 } 1848 #endif /* BUILD_PAGE_FIRST_CHUNK */ 1849 1850 #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA 1851 /* 1852 * Generic SMP percpu area setup. 1853 * 1854 * The embedding helper is used because its behavior closely resembles 1855 * the original non-dynamic generic percpu area setup. This is 1856 * important because many archs have addressing restrictions and might 1857 * fail if the percpu area is located far away from the previous 1858 * location. As an added bonus, in non-NUMA cases, embedding is 1859 * generally a good idea TLB-wise because percpu area can piggy back 1860 * on the physical linear memory mapping which uses large page 1861 * mappings on applicable archs. 1862 */ 1863 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; 1864 EXPORT_SYMBOL(__per_cpu_offset); 1865 1866 static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size, 1867 size_t align) 1868 { 1869 return memblock_virt_alloc_from_nopanic( 1870 size, align, __pa(MAX_DMA_ADDRESS)); 1871 } 1872 1873 static void __init pcpu_dfl_fc_free(void *ptr, size_t size) 1874 { 1875 memblock_free_early(__pa(ptr), size); 1876 } 1877 1878 void __init setup_per_cpu_areas(void) 1879 { 1880 unsigned long delta; 1881 unsigned int cpu; 1882 int rc; 1883 1884 /* 1885 * Always reserve area for module percpu variables. That's 1886 * what the legacy allocator did. 1887 */ 1888 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, 1889 PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL, 1890 pcpu_dfl_fc_alloc, pcpu_dfl_fc_free); 1891 if (rc < 0) 1892 panic("Failed to initialize percpu areas."); 1893 1894 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 1895 for_each_possible_cpu(cpu) 1896 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; 1897 } 1898 #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ 1899 1900 #else /* CONFIG_SMP */ 1901 1902 /* 1903 * UP percpu area setup. 1904 * 1905 * UP always uses km-based percpu allocator with identity mapping. 1906 * Static percpu variables are indistinguishable from the usual static 1907 * variables and don't require any special preparation. 1908 */ 1909 void __init setup_per_cpu_areas(void) 1910 { 1911 const size_t unit_size = 1912 roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE, 1913 PERCPU_DYNAMIC_RESERVE)); 1914 struct pcpu_alloc_info *ai; 1915 void *fc; 1916 1917 ai = pcpu_alloc_alloc_info(1, 1); 1918 fc = memblock_virt_alloc_from_nopanic(unit_size, 1919 PAGE_SIZE, 1920 __pa(MAX_DMA_ADDRESS)); 1921 if (!ai || !fc) 1922 panic("Failed to allocate memory for percpu areas."); 1923 /* kmemleak tracks the percpu allocations separately */ 1924 kmemleak_free(fc); 1925 1926 ai->dyn_size = unit_size; 1927 ai->unit_size = unit_size; 1928 ai->atom_size = unit_size; 1929 ai->alloc_size = unit_size; 1930 ai->groups[0].nr_units = 1; 1931 ai->groups[0].cpu_map[0] = 0; 1932 1933 if (pcpu_setup_first_chunk(ai, fc) < 0) 1934 panic("Failed to initialize percpu areas."); 1935 } 1936 1937 #endif /* CONFIG_SMP */ 1938 1939 /* 1940 * First and reserved chunks are initialized with temporary allocation 1941 * map in initdata so that they can be used before slab is online. 1942 * This function is called after slab is brought up and replaces those 1943 * with properly allocated maps. 1944 */ 1945 void __init percpu_init_late(void) 1946 { 1947 struct pcpu_chunk *target_chunks[] = 1948 { pcpu_first_chunk, pcpu_reserved_chunk, NULL }; 1949 struct pcpu_chunk *chunk; 1950 unsigned long flags; 1951 int i; 1952 1953 for (i = 0; (chunk = target_chunks[i]); i++) { 1954 int *map; 1955 const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]); 1956 1957 BUILD_BUG_ON(size > PAGE_SIZE); 1958 1959 map = pcpu_mem_zalloc(size); 1960 BUG_ON(!map); 1961 1962 spin_lock_irqsave(&pcpu_lock, flags); 1963 memcpy(map, chunk->map, size); 1964 chunk->map = map; 1965 spin_unlock_irqrestore(&pcpu_lock, flags); 1966 } 1967 } 1968