1 /* 2 * linux/mm/percpu.c - percpu memory allocator 3 * 4 * Copyright (C) 2009 SUSE Linux Products GmbH 5 * Copyright (C) 2009 Tejun Heo <tj@kernel.org> 6 * 7 * This file is released under the GPLv2. 8 * 9 * This is percpu allocator which can handle both static and dynamic 10 * areas. Percpu areas are allocated in chunks in vmalloc area. Each 11 * chunk is consisted of boot-time determined number of units and the 12 * first chunk is used for static percpu variables in the kernel image 13 * (special boot time alloc/init handling necessary as these areas 14 * need to be brought up before allocation services are running). 15 * Unit grows as necessary and all units grow or shrink in unison. 16 * When a chunk is filled up, another chunk is allocated. ie. in 17 * vmalloc area 18 * 19 * c0 c1 c2 20 * ------------------- ------------------- ------------ 21 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u 22 * ------------------- ...... ------------------- .... ------------ 23 * 24 * Allocation is done in offset-size areas of single unit space. Ie, 25 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0, 26 * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to 27 * cpus. On NUMA, the mapping can be non-linear and even sparse. 28 * Percpu access can be done by configuring percpu base registers 29 * according to cpu to unit mapping and pcpu_unit_size. 30 * 31 * There are usually many small percpu allocations many of them being 32 * as small as 4 bytes. The allocator organizes chunks into lists 33 * according to free size and tries to allocate from the fullest one. 34 * Each chunk keeps the maximum contiguous area size hint which is 35 * guaranteed to be eqaul to or larger than the maximum contiguous 36 * area in the chunk. This helps the allocator not to iterate the 37 * chunk maps unnecessarily. 38 * 39 * Allocation state in each chunk is kept using an array of integers 40 * on chunk->map. A positive value in the map represents a free 41 * region and negative allocated. Allocation inside a chunk is done 42 * by scanning this map sequentially and serving the first matching 43 * entry. This is mostly copied from the percpu_modalloc() allocator. 44 * Chunks can be determined from the address using the index field 45 * in the page struct. The index field contains a pointer to the chunk. 46 * 47 * To use this allocator, arch code should do the followings. 48 * 49 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate 50 * regular address to percpu pointer and back if they need to be 51 * different from the default 52 * 53 * - use pcpu_setup_first_chunk() during percpu area initialization to 54 * setup the first chunk containing the kernel static percpu area 55 */ 56 57 #include <linux/bitmap.h> 58 #include <linux/bootmem.h> 59 #include <linux/err.h> 60 #include <linux/list.h> 61 #include <linux/log2.h> 62 #include <linux/mm.h> 63 #include <linux/module.h> 64 #include <linux/mutex.h> 65 #include <linux/percpu.h> 66 #include <linux/pfn.h> 67 #include <linux/slab.h> 68 #include <linux/spinlock.h> 69 #include <linux/vmalloc.h> 70 #include <linux/workqueue.h> 71 72 #include <asm/cacheflush.h> 73 #include <asm/sections.h> 74 #include <asm/tlbflush.h> 75 #include <asm/io.h> 76 77 #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ 78 #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ 79 80 /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ 81 #ifndef __addr_to_pcpu_ptr 82 #define __addr_to_pcpu_ptr(addr) \ 83 (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \ 84 + (unsigned long)__per_cpu_start) 85 #endif 86 #ifndef __pcpu_ptr_to_addr 87 #define __pcpu_ptr_to_addr(ptr) \ 88 (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \ 89 - (unsigned long)__per_cpu_start) 90 #endif 91 92 struct pcpu_chunk { 93 struct list_head list; /* linked to pcpu_slot lists */ 94 int free_size; /* free bytes in the chunk */ 95 int contig_hint; /* max contiguous size hint */ 96 void *base_addr; /* base address of this chunk */ 97 int map_used; /* # of map entries used */ 98 int map_alloc; /* # of map entries allocated */ 99 int *map; /* allocation map */ 100 struct vm_struct **vms; /* mapped vmalloc regions */ 101 bool immutable; /* no [de]population allowed */ 102 unsigned long populated[]; /* populated bitmap */ 103 }; 104 105 static int pcpu_unit_pages __read_mostly; 106 static int pcpu_unit_size __read_mostly; 107 static int pcpu_nr_units __read_mostly; 108 static int pcpu_atom_size __read_mostly; 109 static int pcpu_nr_slots __read_mostly; 110 static size_t pcpu_chunk_struct_size __read_mostly; 111 112 /* cpus with the lowest and highest unit numbers */ 113 static unsigned int pcpu_first_unit_cpu __read_mostly; 114 static unsigned int pcpu_last_unit_cpu __read_mostly; 115 116 /* the address of the first chunk which starts with the kernel static area */ 117 void *pcpu_base_addr __read_mostly; 118 EXPORT_SYMBOL_GPL(pcpu_base_addr); 119 120 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */ 121 const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */ 122 123 /* group information, used for vm allocation */ 124 static int pcpu_nr_groups __read_mostly; 125 static const unsigned long *pcpu_group_offsets __read_mostly; 126 static const size_t *pcpu_group_sizes __read_mostly; 127 128 /* 129 * The first chunk which always exists. Note that unlike other 130 * chunks, this one can be allocated and mapped in several different 131 * ways and thus often doesn't live in the vmalloc area. 132 */ 133 static struct pcpu_chunk *pcpu_first_chunk; 134 135 /* 136 * Optional reserved chunk. This chunk reserves part of the first 137 * chunk and serves it for reserved allocations. The amount of 138 * reserved offset is in pcpu_reserved_chunk_limit. When reserved 139 * area doesn't exist, the following variables contain NULL and 0 140 * respectively. 141 */ 142 static struct pcpu_chunk *pcpu_reserved_chunk; 143 static int pcpu_reserved_chunk_limit; 144 145 /* 146 * Synchronization rules. 147 * 148 * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former 149 * protects allocation/reclaim paths, chunks, populated bitmap and 150 * vmalloc mapping. The latter is a spinlock and protects the index 151 * data structures - chunk slots, chunks and area maps in chunks. 152 * 153 * During allocation, pcpu_alloc_mutex is kept locked all the time and 154 * pcpu_lock is grabbed and released as necessary. All actual memory 155 * allocations are done using GFP_KERNEL with pcpu_lock released. In 156 * general, percpu memory can't be allocated with irq off but 157 * irqsave/restore are still used in alloc path so that it can be used 158 * from early init path - sched_init() specifically. 159 * 160 * Free path accesses and alters only the index data structures, so it 161 * can be safely called from atomic context. When memory needs to be 162 * returned to the system, free path schedules reclaim_work which 163 * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be 164 * reclaimed, release both locks and frees the chunks. Note that it's 165 * necessary to grab both locks to remove a chunk from circulation as 166 * allocation path might be referencing the chunk with only 167 * pcpu_alloc_mutex locked. 168 */ 169 static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */ 170 static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */ 171 172 static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ 173 174 /* reclaim work to release fully free chunks, scheduled from free path */ 175 static void pcpu_reclaim(struct work_struct *work); 176 static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim); 177 178 static int __pcpu_size_to_slot(int size) 179 { 180 int highbit = fls(size); /* size is in bytes */ 181 return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); 182 } 183 184 static int pcpu_size_to_slot(int size) 185 { 186 if (size == pcpu_unit_size) 187 return pcpu_nr_slots - 1; 188 return __pcpu_size_to_slot(size); 189 } 190 191 static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) 192 { 193 if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int)) 194 return 0; 195 196 return pcpu_size_to_slot(chunk->free_size); 197 } 198 199 static int pcpu_page_idx(unsigned int cpu, int page_idx) 200 { 201 return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx; 202 } 203 204 static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, 205 unsigned int cpu, int page_idx) 206 { 207 return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] + 208 (page_idx << PAGE_SHIFT); 209 } 210 211 static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk, 212 unsigned int cpu, int page_idx) 213 { 214 /* must not be used on pre-mapped chunk */ 215 WARN_ON(chunk->immutable); 216 217 return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx)); 218 } 219 220 /* set the pointer to a chunk in a page struct */ 221 static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu) 222 { 223 page->index = (unsigned long)pcpu; 224 } 225 226 /* obtain pointer to a chunk from a page struct */ 227 static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page) 228 { 229 return (struct pcpu_chunk *)page->index; 230 } 231 232 static void pcpu_next_unpop(struct pcpu_chunk *chunk, int *rs, int *re, int end) 233 { 234 *rs = find_next_zero_bit(chunk->populated, end, *rs); 235 *re = find_next_bit(chunk->populated, end, *rs + 1); 236 } 237 238 static void pcpu_next_pop(struct pcpu_chunk *chunk, int *rs, int *re, int end) 239 { 240 *rs = find_next_bit(chunk->populated, end, *rs); 241 *re = find_next_zero_bit(chunk->populated, end, *rs + 1); 242 } 243 244 /* 245 * (Un)populated page region iterators. Iterate over (un)populated 246 * page regions betwen @start and @end in @chunk. @rs and @re should 247 * be integer variables and will be set to start and end page index of 248 * the current region. 249 */ 250 #define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \ 251 for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \ 252 (rs) < (re); \ 253 (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end))) 254 255 #define pcpu_for_each_pop_region(chunk, rs, re, start, end) \ 256 for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \ 257 (rs) < (re); \ 258 (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end))) 259 260 /** 261 * pcpu_mem_alloc - allocate memory 262 * @size: bytes to allocate 263 * 264 * Allocate @size bytes. If @size is smaller than PAGE_SIZE, 265 * kzalloc() is used; otherwise, vmalloc() is used. The returned 266 * memory is always zeroed. 267 * 268 * CONTEXT: 269 * Does GFP_KERNEL allocation. 270 * 271 * RETURNS: 272 * Pointer to the allocated area on success, NULL on failure. 273 */ 274 static void *pcpu_mem_alloc(size_t size) 275 { 276 if (size <= PAGE_SIZE) 277 return kzalloc(size, GFP_KERNEL); 278 else { 279 void *ptr = vmalloc(size); 280 if (ptr) 281 memset(ptr, 0, size); 282 return ptr; 283 } 284 } 285 286 /** 287 * pcpu_mem_free - free memory 288 * @ptr: memory to free 289 * @size: size of the area 290 * 291 * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc(). 292 */ 293 static void pcpu_mem_free(void *ptr, size_t size) 294 { 295 if (size <= PAGE_SIZE) 296 kfree(ptr); 297 else 298 vfree(ptr); 299 } 300 301 /** 302 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot 303 * @chunk: chunk of interest 304 * @oslot: the previous slot it was on 305 * 306 * This function is called after an allocation or free changed @chunk. 307 * New slot according to the changed state is determined and @chunk is 308 * moved to the slot. Note that the reserved chunk is never put on 309 * chunk slots. 310 * 311 * CONTEXT: 312 * pcpu_lock. 313 */ 314 static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) 315 { 316 int nslot = pcpu_chunk_slot(chunk); 317 318 if (chunk != pcpu_reserved_chunk && oslot != nslot) { 319 if (oslot < nslot) 320 list_move(&chunk->list, &pcpu_slot[nslot]); 321 else 322 list_move_tail(&chunk->list, &pcpu_slot[nslot]); 323 } 324 } 325 326 /** 327 * pcpu_chunk_addr_search - determine chunk containing specified address 328 * @addr: address for which the chunk needs to be determined. 329 * 330 * RETURNS: 331 * The address of the found chunk. 332 */ 333 static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) 334 { 335 void *first_start = pcpu_first_chunk->base_addr; 336 337 /* is it in the first chunk? */ 338 if (addr >= first_start && addr < first_start + pcpu_unit_size) { 339 /* is it in the reserved area? */ 340 if (addr < first_start + pcpu_reserved_chunk_limit) 341 return pcpu_reserved_chunk; 342 return pcpu_first_chunk; 343 } 344 345 /* 346 * The address is relative to unit0 which might be unused and 347 * thus unmapped. Offset the address to the unit space of the 348 * current processor before looking it up in the vmalloc 349 * space. Note that any possible cpu id can be used here, so 350 * there's no need to worry about preemption or cpu hotplug. 351 */ 352 addr += pcpu_unit_offsets[raw_smp_processor_id()]; 353 return pcpu_get_page_chunk(vmalloc_to_page(addr)); 354 } 355 356 /** 357 * pcpu_need_to_extend - determine whether chunk area map needs to be extended 358 * @chunk: chunk of interest 359 * 360 * Determine whether area map of @chunk needs to be extended to 361 * accomodate a new allocation. 362 * 363 * CONTEXT: 364 * pcpu_lock. 365 * 366 * RETURNS: 367 * New target map allocation length if extension is necessary, 0 368 * otherwise. 369 */ 370 static int pcpu_need_to_extend(struct pcpu_chunk *chunk) 371 { 372 int new_alloc; 373 374 if (chunk->map_alloc >= chunk->map_used + 2) 375 return 0; 376 377 new_alloc = PCPU_DFL_MAP_ALLOC; 378 while (new_alloc < chunk->map_used + 2) 379 new_alloc *= 2; 380 381 return new_alloc; 382 } 383 384 /** 385 * pcpu_extend_area_map - extend area map of a chunk 386 * @chunk: chunk of interest 387 * @new_alloc: new target allocation length of the area map 388 * 389 * Extend area map of @chunk to have @new_alloc entries. 390 * 391 * CONTEXT: 392 * Does GFP_KERNEL allocation. Grabs and releases pcpu_lock. 393 * 394 * RETURNS: 395 * 0 on success, -errno on failure. 396 */ 397 static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc) 398 { 399 int *old = NULL, *new = NULL; 400 size_t old_size = 0, new_size = new_alloc * sizeof(new[0]); 401 unsigned long flags; 402 403 new = pcpu_mem_alloc(new_size); 404 if (!new) 405 return -ENOMEM; 406 407 /* acquire pcpu_lock and switch to new area map */ 408 spin_lock_irqsave(&pcpu_lock, flags); 409 410 if (new_alloc <= chunk->map_alloc) 411 goto out_unlock; 412 413 old_size = chunk->map_alloc * sizeof(chunk->map[0]); 414 memcpy(new, chunk->map, old_size); 415 416 /* 417 * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is 418 * one of the first chunks and still using static map. 419 */ 420 if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC) 421 old = chunk->map; 422 423 chunk->map_alloc = new_alloc; 424 chunk->map = new; 425 new = NULL; 426 427 out_unlock: 428 spin_unlock_irqrestore(&pcpu_lock, flags); 429 430 /* 431 * pcpu_mem_free() might end up calling vfree() which uses 432 * IRQ-unsafe lock and thus can't be called under pcpu_lock. 433 */ 434 pcpu_mem_free(old, old_size); 435 pcpu_mem_free(new, new_size); 436 437 return 0; 438 } 439 440 /** 441 * pcpu_split_block - split a map block 442 * @chunk: chunk of interest 443 * @i: index of map block to split 444 * @head: head size in bytes (can be 0) 445 * @tail: tail size in bytes (can be 0) 446 * 447 * Split the @i'th map block into two or three blocks. If @head is 448 * non-zero, @head bytes block is inserted before block @i moving it 449 * to @i+1 and reducing its size by @head bytes. 450 * 451 * If @tail is non-zero, the target block, which can be @i or @i+1 452 * depending on @head, is reduced by @tail bytes and @tail byte block 453 * is inserted after the target block. 454 * 455 * @chunk->map must have enough free slots to accomodate the split. 456 * 457 * CONTEXT: 458 * pcpu_lock. 459 */ 460 static void pcpu_split_block(struct pcpu_chunk *chunk, int i, 461 int head, int tail) 462 { 463 int nr_extra = !!head + !!tail; 464 465 BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra); 466 467 /* insert new subblocks */ 468 memmove(&chunk->map[i + nr_extra], &chunk->map[i], 469 sizeof(chunk->map[0]) * (chunk->map_used - i)); 470 chunk->map_used += nr_extra; 471 472 if (head) { 473 chunk->map[i + 1] = chunk->map[i] - head; 474 chunk->map[i++] = head; 475 } 476 if (tail) { 477 chunk->map[i++] -= tail; 478 chunk->map[i] = tail; 479 } 480 } 481 482 /** 483 * pcpu_alloc_area - allocate area from a pcpu_chunk 484 * @chunk: chunk of interest 485 * @size: wanted size in bytes 486 * @align: wanted align 487 * 488 * Try to allocate @size bytes area aligned at @align from @chunk. 489 * Note that this function only allocates the offset. It doesn't 490 * populate or map the area. 491 * 492 * @chunk->map must have at least two free slots. 493 * 494 * CONTEXT: 495 * pcpu_lock. 496 * 497 * RETURNS: 498 * Allocated offset in @chunk on success, -1 if no matching area is 499 * found. 500 */ 501 static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align) 502 { 503 int oslot = pcpu_chunk_slot(chunk); 504 int max_contig = 0; 505 int i, off; 506 507 for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) { 508 bool is_last = i + 1 == chunk->map_used; 509 int head, tail; 510 511 /* extra for alignment requirement */ 512 head = ALIGN(off, align) - off; 513 BUG_ON(i == 0 && head != 0); 514 515 if (chunk->map[i] < 0) 516 continue; 517 if (chunk->map[i] < head + size) { 518 max_contig = max(chunk->map[i], max_contig); 519 continue; 520 } 521 522 /* 523 * If head is small or the previous block is free, 524 * merge'em. Note that 'small' is defined as smaller 525 * than sizeof(int), which is very small but isn't too 526 * uncommon for percpu allocations. 527 */ 528 if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) { 529 if (chunk->map[i - 1] > 0) 530 chunk->map[i - 1] += head; 531 else { 532 chunk->map[i - 1] -= head; 533 chunk->free_size -= head; 534 } 535 chunk->map[i] -= head; 536 off += head; 537 head = 0; 538 } 539 540 /* if tail is small, just keep it around */ 541 tail = chunk->map[i] - head - size; 542 if (tail < sizeof(int)) 543 tail = 0; 544 545 /* split if warranted */ 546 if (head || tail) { 547 pcpu_split_block(chunk, i, head, tail); 548 if (head) { 549 i++; 550 off += head; 551 max_contig = max(chunk->map[i - 1], max_contig); 552 } 553 if (tail) 554 max_contig = max(chunk->map[i + 1], max_contig); 555 } 556 557 /* update hint and mark allocated */ 558 if (is_last) 559 chunk->contig_hint = max_contig; /* fully scanned */ 560 else 561 chunk->contig_hint = max(chunk->contig_hint, 562 max_contig); 563 564 chunk->free_size -= chunk->map[i]; 565 chunk->map[i] = -chunk->map[i]; 566 567 pcpu_chunk_relocate(chunk, oslot); 568 return off; 569 } 570 571 chunk->contig_hint = max_contig; /* fully scanned */ 572 pcpu_chunk_relocate(chunk, oslot); 573 574 /* tell the upper layer that this chunk has no matching area */ 575 return -1; 576 } 577 578 /** 579 * pcpu_free_area - free area to a pcpu_chunk 580 * @chunk: chunk of interest 581 * @freeme: offset of area to free 582 * 583 * Free area starting from @freeme to @chunk. Note that this function 584 * only modifies the allocation map. It doesn't depopulate or unmap 585 * the area. 586 * 587 * CONTEXT: 588 * pcpu_lock. 589 */ 590 static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme) 591 { 592 int oslot = pcpu_chunk_slot(chunk); 593 int i, off; 594 595 for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) 596 if (off == freeme) 597 break; 598 BUG_ON(off != freeme); 599 BUG_ON(chunk->map[i] > 0); 600 601 chunk->map[i] = -chunk->map[i]; 602 chunk->free_size += chunk->map[i]; 603 604 /* merge with previous? */ 605 if (i > 0 && chunk->map[i - 1] >= 0) { 606 chunk->map[i - 1] += chunk->map[i]; 607 chunk->map_used--; 608 memmove(&chunk->map[i], &chunk->map[i + 1], 609 (chunk->map_used - i) * sizeof(chunk->map[0])); 610 i--; 611 } 612 /* merge with next? */ 613 if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) { 614 chunk->map[i] += chunk->map[i + 1]; 615 chunk->map_used--; 616 memmove(&chunk->map[i + 1], &chunk->map[i + 2], 617 (chunk->map_used - (i + 1)) * sizeof(chunk->map[0])); 618 } 619 620 chunk->contig_hint = max(chunk->map[i], chunk->contig_hint); 621 pcpu_chunk_relocate(chunk, oslot); 622 } 623 624 /** 625 * pcpu_get_pages_and_bitmap - get temp pages array and bitmap 626 * @chunk: chunk of interest 627 * @bitmapp: output parameter for bitmap 628 * @may_alloc: may allocate the array 629 * 630 * Returns pointer to array of pointers to struct page and bitmap, 631 * both of which can be indexed with pcpu_page_idx(). The returned 632 * array is cleared to zero and *@bitmapp is copied from 633 * @chunk->populated. Note that there is only one array and bitmap 634 * and access exclusion is the caller's responsibility. 635 * 636 * CONTEXT: 637 * pcpu_alloc_mutex and does GFP_KERNEL allocation if @may_alloc. 638 * Otherwise, don't care. 639 * 640 * RETURNS: 641 * Pointer to temp pages array on success, NULL on failure. 642 */ 643 static struct page **pcpu_get_pages_and_bitmap(struct pcpu_chunk *chunk, 644 unsigned long **bitmapp, 645 bool may_alloc) 646 { 647 static struct page **pages; 648 static unsigned long *bitmap; 649 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); 650 size_t bitmap_size = BITS_TO_LONGS(pcpu_unit_pages) * 651 sizeof(unsigned long); 652 653 if (!pages || !bitmap) { 654 if (may_alloc && !pages) 655 pages = pcpu_mem_alloc(pages_size); 656 if (may_alloc && !bitmap) 657 bitmap = pcpu_mem_alloc(bitmap_size); 658 if (!pages || !bitmap) 659 return NULL; 660 } 661 662 memset(pages, 0, pages_size); 663 bitmap_copy(bitmap, chunk->populated, pcpu_unit_pages); 664 665 *bitmapp = bitmap; 666 return pages; 667 } 668 669 /** 670 * pcpu_free_pages - free pages which were allocated for @chunk 671 * @chunk: chunk pages were allocated for 672 * @pages: array of pages to be freed, indexed by pcpu_page_idx() 673 * @populated: populated bitmap 674 * @page_start: page index of the first page to be freed 675 * @page_end: page index of the last page to be freed + 1 676 * 677 * Free pages [@page_start and @page_end) in @pages for all units. 678 * The pages were allocated for @chunk. 679 */ 680 static void pcpu_free_pages(struct pcpu_chunk *chunk, 681 struct page **pages, unsigned long *populated, 682 int page_start, int page_end) 683 { 684 unsigned int cpu; 685 int i; 686 687 for_each_possible_cpu(cpu) { 688 for (i = page_start; i < page_end; i++) { 689 struct page *page = pages[pcpu_page_idx(cpu, i)]; 690 691 if (page) 692 __free_page(page); 693 } 694 } 695 } 696 697 /** 698 * pcpu_alloc_pages - allocates pages for @chunk 699 * @chunk: target chunk 700 * @pages: array to put the allocated pages into, indexed by pcpu_page_idx() 701 * @populated: populated bitmap 702 * @page_start: page index of the first page to be allocated 703 * @page_end: page index of the last page to be allocated + 1 704 * 705 * Allocate pages [@page_start,@page_end) into @pages for all units. 706 * The allocation is for @chunk. Percpu core doesn't care about the 707 * content of @pages and will pass it verbatim to pcpu_map_pages(). 708 */ 709 static int pcpu_alloc_pages(struct pcpu_chunk *chunk, 710 struct page **pages, unsigned long *populated, 711 int page_start, int page_end) 712 { 713 const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD; 714 unsigned int cpu; 715 int i; 716 717 for_each_possible_cpu(cpu) { 718 for (i = page_start; i < page_end; i++) { 719 struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; 720 721 *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0); 722 if (!*pagep) { 723 pcpu_free_pages(chunk, pages, populated, 724 page_start, page_end); 725 return -ENOMEM; 726 } 727 } 728 } 729 return 0; 730 } 731 732 /** 733 * pcpu_pre_unmap_flush - flush cache prior to unmapping 734 * @chunk: chunk the regions to be flushed belongs to 735 * @page_start: page index of the first page to be flushed 736 * @page_end: page index of the last page to be flushed + 1 737 * 738 * Pages in [@page_start,@page_end) of @chunk are about to be 739 * unmapped. Flush cache. As each flushing trial can be very 740 * expensive, issue flush on the whole region at once rather than 741 * doing it for each cpu. This could be an overkill but is more 742 * scalable. 743 */ 744 static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk, 745 int page_start, int page_end) 746 { 747 flush_cache_vunmap( 748 pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start), 749 pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end)); 750 } 751 752 static void __pcpu_unmap_pages(unsigned long addr, int nr_pages) 753 { 754 unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT); 755 } 756 757 /** 758 * pcpu_unmap_pages - unmap pages out of a pcpu_chunk 759 * @chunk: chunk of interest 760 * @pages: pages array which can be used to pass information to free 761 * @populated: populated bitmap 762 * @page_start: page index of the first page to unmap 763 * @page_end: page index of the last page to unmap + 1 764 * 765 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk. 766 * Corresponding elements in @pages were cleared by the caller and can 767 * be used to carry information to pcpu_free_pages() which will be 768 * called after all unmaps are finished. The caller should call 769 * proper pre/post flush functions. 770 */ 771 static void pcpu_unmap_pages(struct pcpu_chunk *chunk, 772 struct page **pages, unsigned long *populated, 773 int page_start, int page_end) 774 { 775 unsigned int cpu; 776 int i; 777 778 for_each_possible_cpu(cpu) { 779 for (i = page_start; i < page_end; i++) { 780 struct page *page; 781 782 page = pcpu_chunk_page(chunk, cpu, i); 783 WARN_ON(!page); 784 pages[pcpu_page_idx(cpu, i)] = page; 785 } 786 __pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start), 787 page_end - page_start); 788 } 789 790 for (i = page_start; i < page_end; i++) 791 __clear_bit(i, populated); 792 } 793 794 /** 795 * pcpu_post_unmap_tlb_flush - flush TLB after unmapping 796 * @chunk: pcpu_chunk the regions to be flushed belong to 797 * @page_start: page index of the first page to be flushed 798 * @page_end: page index of the last page to be flushed + 1 799 * 800 * Pages [@page_start,@page_end) of @chunk have been unmapped. Flush 801 * TLB for the regions. This can be skipped if the area is to be 802 * returned to vmalloc as vmalloc will handle TLB flushing lazily. 803 * 804 * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once 805 * for the whole region. 806 */ 807 static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, 808 int page_start, int page_end) 809 { 810 flush_tlb_kernel_range( 811 pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start), 812 pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end)); 813 } 814 815 static int __pcpu_map_pages(unsigned long addr, struct page **pages, 816 int nr_pages) 817 { 818 return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT, 819 PAGE_KERNEL, pages); 820 } 821 822 /** 823 * pcpu_map_pages - map pages into a pcpu_chunk 824 * @chunk: chunk of interest 825 * @pages: pages array containing pages to be mapped 826 * @populated: populated bitmap 827 * @page_start: page index of the first page to map 828 * @page_end: page index of the last page to map + 1 829 * 830 * For each cpu, map pages [@page_start,@page_end) into @chunk. The 831 * caller is responsible for calling pcpu_post_map_flush() after all 832 * mappings are complete. 833 * 834 * This function is responsible for setting corresponding bits in 835 * @chunk->populated bitmap and whatever is necessary for reverse 836 * lookup (addr -> chunk). 837 */ 838 static int pcpu_map_pages(struct pcpu_chunk *chunk, 839 struct page **pages, unsigned long *populated, 840 int page_start, int page_end) 841 { 842 unsigned int cpu, tcpu; 843 int i, err; 844 845 for_each_possible_cpu(cpu) { 846 err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start), 847 &pages[pcpu_page_idx(cpu, page_start)], 848 page_end - page_start); 849 if (err < 0) 850 goto err; 851 } 852 853 /* mapping successful, link chunk and mark populated */ 854 for (i = page_start; i < page_end; i++) { 855 for_each_possible_cpu(cpu) 856 pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)], 857 chunk); 858 __set_bit(i, populated); 859 } 860 861 return 0; 862 863 err: 864 for_each_possible_cpu(tcpu) { 865 if (tcpu == cpu) 866 break; 867 __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start), 868 page_end - page_start); 869 } 870 return err; 871 } 872 873 /** 874 * pcpu_post_map_flush - flush cache after mapping 875 * @chunk: pcpu_chunk the regions to be flushed belong to 876 * @page_start: page index of the first page to be flushed 877 * @page_end: page index of the last page to be flushed + 1 878 * 879 * Pages [@page_start,@page_end) of @chunk have been mapped. Flush 880 * cache. 881 * 882 * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once 883 * for the whole region. 884 */ 885 static void pcpu_post_map_flush(struct pcpu_chunk *chunk, 886 int page_start, int page_end) 887 { 888 flush_cache_vmap( 889 pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start), 890 pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end)); 891 } 892 893 /** 894 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk 895 * @chunk: chunk to depopulate 896 * @off: offset to the area to depopulate 897 * @size: size of the area to depopulate in bytes 898 * @flush: whether to flush cache and tlb or not 899 * 900 * For each cpu, depopulate and unmap pages [@page_start,@page_end) 901 * from @chunk. If @flush is true, vcache is flushed before unmapping 902 * and tlb after. 903 * 904 * CONTEXT: 905 * pcpu_alloc_mutex. 906 */ 907 static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size) 908 { 909 int page_start = PFN_DOWN(off); 910 int page_end = PFN_UP(off + size); 911 struct page **pages; 912 unsigned long *populated; 913 int rs, re; 914 915 /* quick path, check whether it's empty already */ 916 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { 917 if (rs == page_start && re == page_end) 918 return; 919 break; 920 } 921 922 /* immutable chunks can't be depopulated */ 923 WARN_ON(chunk->immutable); 924 925 /* 926 * If control reaches here, there must have been at least one 927 * successful population attempt so the temp pages array must 928 * be available now. 929 */ 930 pages = pcpu_get_pages_and_bitmap(chunk, &populated, false); 931 BUG_ON(!pages); 932 933 /* unmap and free */ 934 pcpu_pre_unmap_flush(chunk, page_start, page_end); 935 936 pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) 937 pcpu_unmap_pages(chunk, pages, populated, rs, re); 938 939 /* no need to flush tlb, vmalloc will handle it lazily */ 940 941 pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) 942 pcpu_free_pages(chunk, pages, populated, rs, re); 943 944 /* commit new bitmap */ 945 bitmap_copy(chunk->populated, populated, pcpu_unit_pages); 946 } 947 948 /** 949 * pcpu_populate_chunk - populate and map an area of a pcpu_chunk 950 * @chunk: chunk of interest 951 * @off: offset to the area to populate 952 * @size: size of the area to populate in bytes 953 * 954 * For each cpu, populate and map pages [@page_start,@page_end) into 955 * @chunk. The area is cleared on return. 956 * 957 * CONTEXT: 958 * pcpu_alloc_mutex, does GFP_KERNEL allocation. 959 */ 960 static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) 961 { 962 int page_start = PFN_DOWN(off); 963 int page_end = PFN_UP(off + size); 964 int free_end = page_start, unmap_end = page_start; 965 struct page **pages; 966 unsigned long *populated; 967 unsigned int cpu; 968 int rs, re, rc; 969 970 /* quick path, check whether all pages are already there */ 971 pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) { 972 if (rs == page_start && re == page_end) 973 goto clear; 974 break; 975 } 976 977 /* need to allocate and map pages, this chunk can't be immutable */ 978 WARN_ON(chunk->immutable); 979 980 pages = pcpu_get_pages_and_bitmap(chunk, &populated, true); 981 if (!pages) 982 return -ENOMEM; 983 984 /* alloc and map */ 985 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { 986 rc = pcpu_alloc_pages(chunk, pages, populated, rs, re); 987 if (rc) 988 goto err_free; 989 free_end = re; 990 } 991 992 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { 993 rc = pcpu_map_pages(chunk, pages, populated, rs, re); 994 if (rc) 995 goto err_unmap; 996 unmap_end = re; 997 } 998 pcpu_post_map_flush(chunk, page_start, page_end); 999 1000 /* commit new bitmap */ 1001 bitmap_copy(chunk->populated, populated, pcpu_unit_pages); 1002 clear: 1003 for_each_possible_cpu(cpu) 1004 memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); 1005 return 0; 1006 1007 err_unmap: 1008 pcpu_pre_unmap_flush(chunk, page_start, unmap_end); 1009 pcpu_for_each_unpop_region(chunk, rs, re, page_start, unmap_end) 1010 pcpu_unmap_pages(chunk, pages, populated, rs, re); 1011 pcpu_post_unmap_tlb_flush(chunk, page_start, unmap_end); 1012 err_free: 1013 pcpu_for_each_unpop_region(chunk, rs, re, page_start, free_end) 1014 pcpu_free_pages(chunk, pages, populated, rs, re); 1015 return rc; 1016 } 1017 1018 static void free_pcpu_chunk(struct pcpu_chunk *chunk) 1019 { 1020 if (!chunk) 1021 return; 1022 if (chunk->vms) 1023 pcpu_free_vm_areas(chunk->vms, pcpu_nr_groups); 1024 pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0])); 1025 kfree(chunk); 1026 } 1027 1028 static struct pcpu_chunk *alloc_pcpu_chunk(void) 1029 { 1030 struct pcpu_chunk *chunk; 1031 1032 chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL); 1033 if (!chunk) 1034 return NULL; 1035 1036 chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0])); 1037 chunk->map_alloc = PCPU_DFL_MAP_ALLOC; 1038 chunk->map[chunk->map_used++] = pcpu_unit_size; 1039 1040 chunk->vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes, 1041 pcpu_nr_groups, pcpu_atom_size, 1042 GFP_KERNEL); 1043 if (!chunk->vms) { 1044 free_pcpu_chunk(chunk); 1045 return NULL; 1046 } 1047 1048 INIT_LIST_HEAD(&chunk->list); 1049 chunk->free_size = pcpu_unit_size; 1050 chunk->contig_hint = pcpu_unit_size; 1051 chunk->base_addr = chunk->vms[0]->addr - pcpu_group_offsets[0]; 1052 1053 return chunk; 1054 } 1055 1056 /** 1057 * pcpu_alloc - the percpu allocator 1058 * @size: size of area to allocate in bytes 1059 * @align: alignment of area (max PAGE_SIZE) 1060 * @reserved: allocate from the reserved chunk if available 1061 * 1062 * Allocate percpu area of @size bytes aligned at @align. 1063 * 1064 * CONTEXT: 1065 * Does GFP_KERNEL allocation. 1066 * 1067 * RETURNS: 1068 * Percpu pointer to the allocated area on success, NULL on failure. 1069 */ 1070 static void *pcpu_alloc(size_t size, size_t align, bool reserved) 1071 { 1072 static int warn_limit = 10; 1073 struct pcpu_chunk *chunk; 1074 const char *err; 1075 int slot, off, new_alloc; 1076 unsigned long flags; 1077 1078 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { 1079 WARN(true, "illegal size (%zu) or align (%zu) for " 1080 "percpu allocation\n", size, align); 1081 return NULL; 1082 } 1083 1084 mutex_lock(&pcpu_alloc_mutex); 1085 spin_lock_irqsave(&pcpu_lock, flags); 1086 1087 /* serve reserved allocations from the reserved chunk if available */ 1088 if (reserved && pcpu_reserved_chunk) { 1089 chunk = pcpu_reserved_chunk; 1090 1091 if (size > chunk->contig_hint) { 1092 err = "alloc from reserved chunk failed"; 1093 goto fail_unlock; 1094 } 1095 1096 while ((new_alloc = pcpu_need_to_extend(chunk))) { 1097 spin_unlock_irqrestore(&pcpu_lock, flags); 1098 if (pcpu_extend_area_map(chunk, new_alloc) < 0) { 1099 err = "failed to extend area map of reserved chunk"; 1100 goto fail_unlock_mutex; 1101 } 1102 spin_lock_irqsave(&pcpu_lock, flags); 1103 } 1104 1105 off = pcpu_alloc_area(chunk, size, align); 1106 if (off >= 0) 1107 goto area_found; 1108 1109 err = "alloc from reserved chunk failed"; 1110 goto fail_unlock; 1111 } 1112 1113 restart: 1114 /* search through normal chunks */ 1115 for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) { 1116 list_for_each_entry(chunk, &pcpu_slot[slot], list) { 1117 if (size > chunk->contig_hint) 1118 continue; 1119 1120 new_alloc = pcpu_need_to_extend(chunk); 1121 if (new_alloc) { 1122 spin_unlock_irqrestore(&pcpu_lock, flags); 1123 if (pcpu_extend_area_map(chunk, 1124 new_alloc) < 0) { 1125 err = "failed to extend area map"; 1126 goto fail_unlock_mutex; 1127 } 1128 spin_lock_irqsave(&pcpu_lock, flags); 1129 /* 1130 * pcpu_lock has been dropped, need to 1131 * restart cpu_slot list walking. 1132 */ 1133 goto restart; 1134 } 1135 1136 off = pcpu_alloc_area(chunk, size, align); 1137 if (off >= 0) 1138 goto area_found; 1139 } 1140 } 1141 1142 /* hmmm... no space left, create a new chunk */ 1143 spin_unlock_irqrestore(&pcpu_lock, flags); 1144 1145 chunk = alloc_pcpu_chunk(); 1146 if (!chunk) { 1147 err = "failed to allocate new chunk"; 1148 goto fail_unlock_mutex; 1149 } 1150 1151 spin_lock_irqsave(&pcpu_lock, flags); 1152 pcpu_chunk_relocate(chunk, -1); 1153 goto restart; 1154 1155 area_found: 1156 spin_unlock_irqrestore(&pcpu_lock, flags); 1157 1158 /* populate, map and clear the area */ 1159 if (pcpu_populate_chunk(chunk, off, size)) { 1160 spin_lock_irqsave(&pcpu_lock, flags); 1161 pcpu_free_area(chunk, off); 1162 err = "failed to populate"; 1163 goto fail_unlock; 1164 } 1165 1166 mutex_unlock(&pcpu_alloc_mutex); 1167 1168 /* return address relative to base address */ 1169 return __addr_to_pcpu_ptr(chunk->base_addr + off); 1170 1171 fail_unlock: 1172 spin_unlock_irqrestore(&pcpu_lock, flags); 1173 fail_unlock_mutex: 1174 mutex_unlock(&pcpu_alloc_mutex); 1175 if (warn_limit) { 1176 pr_warning("PERCPU: allocation failed, size=%zu align=%zu, " 1177 "%s\n", size, align, err); 1178 dump_stack(); 1179 if (!--warn_limit) 1180 pr_info("PERCPU: limit reached, disable warning\n"); 1181 } 1182 return NULL; 1183 } 1184 1185 /** 1186 * __alloc_percpu - allocate dynamic percpu area 1187 * @size: size of area to allocate in bytes 1188 * @align: alignment of area (max PAGE_SIZE) 1189 * 1190 * Allocate percpu area of @size bytes aligned at @align. Might 1191 * sleep. Might trigger writeouts. 1192 * 1193 * CONTEXT: 1194 * Does GFP_KERNEL allocation. 1195 * 1196 * RETURNS: 1197 * Percpu pointer to the allocated area on success, NULL on failure. 1198 */ 1199 void *__alloc_percpu(size_t size, size_t align) 1200 { 1201 return pcpu_alloc(size, align, false); 1202 } 1203 EXPORT_SYMBOL_GPL(__alloc_percpu); 1204 1205 /** 1206 * __alloc_reserved_percpu - allocate reserved percpu area 1207 * @size: size of area to allocate in bytes 1208 * @align: alignment of area (max PAGE_SIZE) 1209 * 1210 * Allocate percpu area of @size bytes aligned at @align from reserved 1211 * percpu area if arch has set it up; otherwise, allocation is served 1212 * from the same dynamic area. Might sleep. Might trigger writeouts. 1213 * 1214 * CONTEXT: 1215 * Does GFP_KERNEL allocation. 1216 * 1217 * RETURNS: 1218 * Percpu pointer to the allocated area on success, NULL on failure. 1219 */ 1220 void *__alloc_reserved_percpu(size_t size, size_t align) 1221 { 1222 return pcpu_alloc(size, align, true); 1223 } 1224 1225 /** 1226 * pcpu_reclaim - reclaim fully free chunks, workqueue function 1227 * @work: unused 1228 * 1229 * Reclaim all fully free chunks except for the first one. 1230 * 1231 * CONTEXT: 1232 * workqueue context. 1233 */ 1234 static void pcpu_reclaim(struct work_struct *work) 1235 { 1236 LIST_HEAD(todo); 1237 struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1]; 1238 struct pcpu_chunk *chunk, *next; 1239 1240 mutex_lock(&pcpu_alloc_mutex); 1241 spin_lock_irq(&pcpu_lock); 1242 1243 list_for_each_entry_safe(chunk, next, head, list) { 1244 WARN_ON(chunk->immutable); 1245 1246 /* spare the first one */ 1247 if (chunk == list_first_entry(head, struct pcpu_chunk, list)) 1248 continue; 1249 1250 list_move(&chunk->list, &todo); 1251 } 1252 1253 spin_unlock_irq(&pcpu_lock); 1254 1255 list_for_each_entry_safe(chunk, next, &todo, list) { 1256 pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size); 1257 free_pcpu_chunk(chunk); 1258 } 1259 1260 mutex_unlock(&pcpu_alloc_mutex); 1261 } 1262 1263 /** 1264 * free_percpu - free percpu area 1265 * @ptr: pointer to area to free 1266 * 1267 * Free percpu area @ptr. 1268 * 1269 * CONTEXT: 1270 * Can be called from atomic context. 1271 */ 1272 void free_percpu(void *ptr) 1273 { 1274 void *addr; 1275 struct pcpu_chunk *chunk; 1276 unsigned long flags; 1277 int off; 1278 1279 if (!ptr) 1280 return; 1281 1282 addr = __pcpu_ptr_to_addr(ptr); 1283 1284 spin_lock_irqsave(&pcpu_lock, flags); 1285 1286 chunk = pcpu_chunk_addr_search(addr); 1287 off = addr - chunk->base_addr; 1288 1289 pcpu_free_area(chunk, off); 1290 1291 /* if there are more than one fully free chunks, wake up grim reaper */ 1292 if (chunk->free_size == pcpu_unit_size) { 1293 struct pcpu_chunk *pos; 1294 1295 list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list) 1296 if (pos != chunk) { 1297 schedule_work(&pcpu_reclaim_work); 1298 break; 1299 } 1300 } 1301 1302 spin_unlock_irqrestore(&pcpu_lock, flags); 1303 } 1304 EXPORT_SYMBOL_GPL(free_percpu); 1305 1306 /** 1307 * per_cpu_ptr_to_phys - convert translated percpu address to physical address 1308 * @addr: the address to be converted to physical address 1309 * 1310 * Given @addr which is dereferenceable address obtained via one of 1311 * percpu access macros, this function translates it into its physical 1312 * address. The caller is responsible for ensuring @addr stays valid 1313 * until this function finishes. 1314 * 1315 * RETURNS: 1316 * The physical address for @addr. 1317 */ 1318 phys_addr_t per_cpu_ptr_to_phys(void *addr) 1319 { 1320 if ((unsigned long)addr < VMALLOC_START || 1321 (unsigned long)addr >= VMALLOC_END) 1322 return __pa(addr); 1323 else 1324 return page_to_phys(vmalloc_to_page(addr)); 1325 } 1326 1327 static inline size_t pcpu_calc_fc_sizes(size_t static_size, 1328 size_t reserved_size, 1329 ssize_t *dyn_sizep) 1330 { 1331 size_t size_sum; 1332 1333 size_sum = PFN_ALIGN(static_size + reserved_size + 1334 (*dyn_sizep >= 0 ? *dyn_sizep : 0)); 1335 if (*dyn_sizep != 0) 1336 *dyn_sizep = size_sum - static_size - reserved_size; 1337 1338 return size_sum; 1339 } 1340 1341 /** 1342 * pcpu_alloc_alloc_info - allocate percpu allocation info 1343 * @nr_groups: the number of groups 1344 * @nr_units: the number of units 1345 * 1346 * Allocate ai which is large enough for @nr_groups groups containing 1347 * @nr_units units. The returned ai's groups[0].cpu_map points to the 1348 * cpu_map array which is long enough for @nr_units and filled with 1349 * NR_CPUS. It's the caller's responsibility to initialize cpu_map 1350 * pointer of other groups. 1351 * 1352 * RETURNS: 1353 * Pointer to the allocated pcpu_alloc_info on success, NULL on 1354 * failure. 1355 */ 1356 struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, 1357 int nr_units) 1358 { 1359 struct pcpu_alloc_info *ai; 1360 size_t base_size, ai_size; 1361 void *ptr; 1362 int unit; 1363 1364 base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]), 1365 __alignof__(ai->groups[0].cpu_map[0])); 1366 ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); 1367 1368 ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size)); 1369 if (!ptr) 1370 return NULL; 1371 ai = ptr; 1372 ptr += base_size; 1373 1374 ai->groups[0].cpu_map = ptr; 1375 1376 for (unit = 0; unit < nr_units; unit++) 1377 ai->groups[0].cpu_map[unit] = NR_CPUS; 1378 1379 ai->nr_groups = nr_groups; 1380 ai->__ai_size = PFN_ALIGN(ai_size); 1381 1382 return ai; 1383 } 1384 1385 /** 1386 * pcpu_free_alloc_info - free percpu allocation info 1387 * @ai: pcpu_alloc_info to free 1388 * 1389 * Free @ai which was allocated by pcpu_alloc_alloc_info(). 1390 */ 1391 void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) 1392 { 1393 free_bootmem(__pa(ai), ai->__ai_size); 1394 } 1395 1396 /** 1397 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs 1398 * @reserved_size: the size of reserved percpu area in bytes 1399 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto 1400 * @atom_size: allocation atom size 1401 * @cpu_distance_fn: callback to determine distance between cpus, optional 1402 * 1403 * This function determines grouping of units, their mappings to cpus 1404 * and other parameters considering needed percpu size, allocation 1405 * atom size and distances between CPUs. 1406 * 1407 * Groups are always mutliples of atom size and CPUs which are of 1408 * LOCAL_DISTANCE both ways are grouped together and share space for 1409 * units in the same group. The returned configuration is guaranteed 1410 * to have CPUs on different nodes on different groups and >=75% usage 1411 * of allocated virtual address space. 1412 * 1413 * RETURNS: 1414 * On success, pointer to the new allocation_info is returned. On 1415 * failure, ERR_PTR value is returned. 1416 */ 1417 struct pcpu_alloc_info * __init pcpu_build_alloc_info( 1418 size_t reserved_size, ssize_t dyn_size, 1419 size_t atom_size, 1420 pcpu_fc_cpu_distance_fn_t cpu_distance_fn) 1421 { 1422 static int group_map[NR_CPUS] __initdata; 1423 static int group_cnt[NR_CPUS] __initdata; 1424 const size_t static_size = __per_cpu_end - __per_cpu_start; 1425 int group_cnt_max = 0, nr_groups = 1, nr_units = 0; 1426 size_t size_sum, min_unit_size, alloc_size; 1427 int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */ 1428 int last_allocs, group, unit; 1429 unsigned int cpu, tcpu; 1430 struct pcpu_alloc_info *ai; 1431 unsigned int *cpu_map; 1432 1433 /* this function may be called multiple times */ 1434 memset(group_map, 0, sizeof(group_map)); 1435 memset(group_cnt, 0, sizeof(group_map)); 1436 1437 /* 1438 * Determine min_unit_size, alloc_size and max_upa such that 1439 * alloc_size is multiple of atom_size and is the smallest 1440 * which can accomodate 4k aligned segments which are equal to 1441 * or larger than min_unit_size. 1442 */ 1443 size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size); 1444 min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); 1445 1446 alloc_size = roundup(min_unit_size, atom_size); 1447 upa = alloc_size / min_unit_size; 1448 while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) 1449 upa--; 1450 max_upa = upa; 1451 1452 /* group cpus according to their proximity */ 1453 for_each_possible_cpu(cpu) { 1454 group = 0; 1455 next_group: 1456 for_each_possible_cpu(tcpu) { 1457 if (cpu == tcpu) 1458 break; 1459 if (group_map[tcpu] == group && cpu_distance_fn && 1460 (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE || 1461 cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) { 1462 group++; 1463 nr_groups = max(nr_groups, group + 1); 1464 goto next_group; 1465 } 1466 } 1467 group_map[cpu] = group; 1468 group_cnt[group]++; 1469 group_cnt_max = max(group_cnt_max, group_cnt[group]); 1470 } 1471 1472 /* 1473 * Expand unit size until address space usage goes over 75% 1474 * and then as much as possible without using more address 1475 * space. 1476 */ 1477 last_allocs = INT_MAX; 1478 for (upa = max_upa; upa; upa--) { 1479 int allocs = 0, wasted = 0; 1480 1481 if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) 1482 continue; 1483 1484 for (group = 0; group < nr_groups; group++) { 1485 int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); 1486 allocs += this_allocs; 1487 wasted += this_allocs * upa - group_cnt[group]; 1488 } 1489 1490 /* 1491 * Don't accept if wastage is over 25%. The 1492 * greater-than comparison ensures upa==1 always 1493 * passes the following check. 1494 */ 1495 if (wasted > num_possible_cpus() / 3) 1496 continue; 1497 1498 /* and then don't consume more memory */ 1499 if (allocs > last_allocs) 1500 break; 1501 last_allocs = allocs; 1502 best_upa = upa; 1503 } 1504 upa = best_upa; 1505 1506 /* allocate and fill alloc_info */ 1507 for (group = 0; group < nr_groups; group++) 1508 nr_units += roundup(group_cnt[group], upa); 1509 1510 ai = pcpu_alloc_alloc_info(nr_groups, nr_units); 1511 if (!ai) 1512 return ERR_PTR(-ENOMEM); 1513 cpu_map = ai->groups[0].cpu_map; 1514 1515 for (group = 0; group < nr_groups; group++) { 1516 ai->groups[group].cpu_map = cpu_map; 1517 cpu_map += roundup(group_cnt[group], upa); 1518 } 1519 1520 ai->static_size = static_size; 1521 ai->reserved_size = reserved_size; 1522 ai->dyn_size = dyn_size; 1523 ai->unit_size = alloc_size / upa; 1524 ai->atom_size = atom_size; 1525 ai->alloc_size = alloc_size; 1526 1527 for (group = 0, unit = 0; group_cnt[group]; group++) { 1528 struct pcpu_group_info *gi = &ai->groups[group]; 1529 1530 /* 1531 * Initialize base_offset as if all groups are located 1532 * back-to-back. The caller should update this to 1533 * reflect actual allocation. 1534 */ 1535 gi->base_offset = unit * ai->unit_size; 1536 1537 for_each_possible_cpu(cpu) 1538 if (group_map[cpu] == group) 1539 gi->cpu_map[gi->nr_units++] = cpu; 1540 gi->nr_units = roundup(gi->nr_units, upa); 1541 unit += gi->nr_units; 1542 } 1543 BUG_ON(unit != nr_units); 1544 1545 return ai; 1546 } 1547 1548 /** 1549 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info 1550 * @lvl: loglevel 1551 * @ai: allocation info to dump 1552 * 1553 * Print out information about @ai using loglevel @lvl. 1554 */ 1555 static void pcpu_dump_alloc_info(const char *lvl, 1556 const struct pcpu_alloc_info *ai) 1557 { 1558 int group_width = 1, cpu_width = 1, width; 1559 char empty_str[] = "--------"; 1560 int alloc = 0, alloc_end = 0; 1561 int group, v; 1562 int upa, apl; /* units per alloc, allocs per line */ 1563 1564 v = ai->nr_groups; 1565 while (v /= 10) 1566 group_width++; 1567 1568 v = num_possible_cpus(); 1569 while (v /= 10) 1570 cpu_width++; 1571 empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0'; 1572 1573 upa = ai->alloc_size / ai->unit_size; 1574 width = upa * (cpu_width + 1) + group_width + 3; 1575 apl = rounddown_pow_of_two(max(60 / width, 1)); 1576 1577 printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu", 1578 lvl, ai->static_size, ai->reserved_size, ai->dyn_size, 1579 ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size); 1580 1581 for (group = 0; group < ai->nr_groups; group++) { 1582 const struct pcpu_group_info *gi = &ai->groups[group]; 1583 int unit = 0, unit_end = 0; 1584 1585 BUG_ON(gi->nr_units % upa); 1586 for (alloc_end += gi->nr_units / upa; 1587 alloc < alloc_end; alloc++) { 1588 if (!(alloc % apl)) { 1589 printk("\n"); 1590 printk("%spcpu-alloc: ", lvl); 1591 } 1592 printk("[%0*d] ", group_width, group); 1593 1594 for (unit_end += upa; unit < unit_end; unit++) 1595 if (gi->cpu_map[unit] != NR_CPUS) 1596 printk("%0*d ", cpu_width, 1597 gi->cpu_map[unit]); 1598 else 1599 printk("%s ", empty_str); 1600 } 1601 } 1602 printk("\n"); 1603 } 1604 1605 /** 1606 * pcpu_setup_first_chunk - initialize the first percpu chunk 1607 * @ai: pcpu_alloc_info describing how to percpu area is shaped 1608 * @base_addr: mapped address 1609 * 1610 * Initialize the first percpu chunk which contains the kernel static 1611 * perpcu area. This function is to be called from arch percpu area 1612 * setup path. 1613 * 1614 * @ai contains all information necessary to initialize the first 1615 * chunk and prime the dynamic percpu allocator. 1616 * 1617 * @ai->static_size is the size of static percpu area. 1618 * 1619 * @ai->reserved_size, if non-zero, specifies the amount of bytes to 1620 * reserve after the static area in the first chunk. This reserves 1621 * the first chunk such that it's available only through reserved 1622 * percpu allocation. This is primarily used to serve module percpu 1623 * static areas on architectures where the addressing model has 1624 * limited offset range for symbol relocations to guarantee module 1625 * percpu symbols fall inside the relocatable range. 1626 * 1627 * @ai->dyn_size determines the number of bytes available for dynamic 1628 * allocation in the first chunk. The area between @ai->static_size + 1629 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused. 1630 * 1631 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE 1632 * and equal to or larger than @ai->static_size + @ai->reserved_size + 1633 * @ai->dyn_size. 1634 * 1635 * @ai->atom_size is the allocation atom size and used as alignment 1636 * for vm areas. 1637 * 1638 * @ai->alloc_size is the allocation size and always multiple of 1639 * @ai->atom_size. This is larger than @ai->atom_size if 1640 * @ai->unit_size is larger than @ai->atom_size. 1641 * 1642 * @ai->nr_groups and @ai->groups describe virtual memory layout of 1643 * percpu areas. Units which should be colocated are put into the 1644 * same group. Dynamic VM areas will be allocated according to these 1645 * groupings. If @ai->nr_groups is zero, a single group containing 1646 * all units is assumed. 1647 * 1648 * The caller should have mapped the first chunk at @base_addr and 1649 * copied static data to each unit. 1650 * 1651 * If the first chunk ends up with both reserved and dynamic areas, it 1652 * is served by two chunks - one to serve the core static and reserved 1653 * areas and the other for the dynamic area. They share the same vm 1654 * and page map but uses different area allocation map to stay away 1655 * from each other. The latter chunk is circulated in the chunk slots 1656 * and available for dynamic allocation like any other chunks. 1657 * 1658 * RETURNS: 1659 * 0 on success, -errno on failure. 1660 */ 1661 int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, 1662 void *base_addr) 1663 { 1664 static char cpus_buf[4096] __initdata; 1665 static int smap[2], dmap[2]; 1666 size_t dyn_size = ai->dyn_size; 1667 size_t size_sum = ai->static_size + ai->reserved_size + dyn_size; 1668 struct pcpu_chunk *schunk, *dchunk = NULL; 1669 unsigned long *group_offsets; 1670 size_t *group_sizes; 1671 unsigned long *unit_off; 1672 unsigned int cpu; 1673 int *unit_map; 1674 int group, unit, i; 1675 1676 cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask); 1677 1678 #define PCPU_SETUP_BUG_ON(cond) do { \ 1679 if (unlikely(cond)) { \ 1680 pr_emerg("PERCPU: failed to initialize, %s", #cond); \ 1681 pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf); \ 1682 pcpu_dump_alloc_info(KERN_EMERG, ai); \ 1683 BUG(); \ 1684 } \ 1685 } while (0) 1686 1687 /* sanity checks */ 1688 BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC || 1689 ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC); 1690 PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); 1691 PCPU_SETUP_BUG_ON(!ai->static_size); 1692 PCPU_SETUP_BUG_ON(!base_addr); 1693 PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); 1694 PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK); 1695 PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); 1696 1697 /* process group information and build config tables accordingly */ 1698 group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0])); 1699 group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0])); 1700 unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0])); 1701 unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0])); 1702 1703 for (cpu = 0; cpu < nr_cpu_ids; cpu++) 1704 unit_map[cpu] = UINT_MAX; 1705 pcpu_first_unit_cpu = NR_CPUS; 1706 1707 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) { 1708 const struct pcpu_group_info *gi = &ai->groups[group]; 1709 1710 group_offsets[group] = gi->base_offset; 1711 group_sizes[group] = gi->nr_units * ai->unit_size; 1712 1713 for (i = 0; i < gi->nr_units; i++) { 1714 cpu = gi->cpu_map[i]; 1715 if (cpu == NR_CPUS) 1716 continue; 1717 1718 PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids); 1719 PCPU_SETUP_BUG_ON(!cpu_possible(cpu)); 1720 PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX); 1721 1722 unit_map[cpu] = unit + i; 1723 unit_off[cpu] = gi->base_offset + i * ai->unit_size; 1724 1725 if (pcpu_first_unit_cpu == NR_CPUS) 1726 pcpu_first_unit_cpu = cpu; 1727 } 1728 } 1729 pcpu_last_unit_cpu = cpu; 1730 pcpu_nr_units = unit; 1731 1732 for_each_possible_cpu(cpu) 1733 PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX); 1734 1735 /* we're done parsing the input, undefine BUG macro and dump config */ 1736 #undef PCPU_SETUP_BUG_ON 1737 pcpu_dump_alloc_info(KERN_INFO, ai); 1738 1739 pcpu_nr_groups = ai->nr_groups; 1740 pcpu_group_offsets = group_offsets; 1741 pcpu_group_sizes = group_sizes; 1742 pcpu_unit_map = unit_map; 1743 pcpu_unit_offsets = unit_off; 1744 1745 /* determine basic parameters */ 1746 pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT; 1747 pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; 1748 pcpu_atom_size = ai->atom_size; 1749 pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) + 1750 BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long); 1751 1752 /* 1753 * Allocate chunk slots. The additional last slot is for 1754 * empty chunks. 1755 */ 1756 pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; 1757 pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0])); 1758 for (i = 0; i < pcpu_nr_slots; i++) 1759 INIT_LIST_HEAD(&pcpu_slot[i]); 1760 1761 /* 1762 * Initialize static chunk. If reserved_size is zero, the 1763 * static chunk covers static area + dynamic allocation area 1764 * in the first chunk. If reserved_size is not zero, it 1765 * covers static area + reserved area (mostly used for module 1766 * static percpu allocation). 1767 */ 1768 schunk = alloc_bootmem(pcpu_chunk_struct_size); 1769 INIT_LIST_HEAD(&schunk->list); 1770 schunk->base_addr = base_addr; 1771 schunk->map = smap; 1772 schunk->map_alloc = ARRAY_SIZE(smap); 1773 schunk->immutable = true; 1774 bitmap_fill(schunk->populated, pcpu_unit_pages); 1775 1776 if (ai->reserved_size) { 1777 schunk->free_size = ai->reserved_size; 1778 pcpu_reserved_chunk = schunk; 1779 pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size; 1780 } else { 1781 schunk->free_size = dyn_size; 1782 dyn_size = 0; /* dynamic area covered */ 1783 } 1784 schunk->contig_hint = schunk->free_size; 1785 1786 schunk->map[schunk->map_used++] = -ai->static_size; 1787 if (schunk->free_size) 1788 schunk->map[schunk->map_used++] = schunk->free_size; 1789 1790 /* init dynamic chunk if necessary */ 1791 if (dyn_size) { 1792 dchunk = alloc_bootmem(pcpu_chunk_struct_size); 1793 INIT_LIST_HEAD(&dchunk->list); 1794 dchunk->base_addr = base_addr; 1795 dchunk->map = dmap; 1796 dchunk->map_alloc = ARRAY_SIZE(dmap); 1797 dchunk->immutable = true; 1798 bitmap_fill(dchunk->populated, pcpu_unit_pages); 1799 1800 dchunk->contig_hint = dchunk->free_size = dyn_size; 1801 dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit; 1802 dchunk->map[dchunk->map_used++] = dchunk->free_size; 1803 } 1804 1805 /* link the first chunk in */ 1806 pcpu_first_chunk = dchunk ?: schunk; 1807 pcpu_chunk_relocate(pcpu_first_chunk, -1); 1808 1809 /* we're done */ 1810 pcpu_base_addr = base_addr; 1811 return 0; 1812 } 1813 1814 const char *pcpu_fc_names[PCPU_FC_NR] __initdata = { 1815 [PCPU_FC_AUTO] = "auto", 1816 [PCPU_FC_EMBED] = "embed", 1817 [PCPU_FC_PAGE] = "page", 1818 }; 1819 1820 enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO; 1821 1822 static int __init percpu_alloc_setup(char *str) 1823 { 1824 if (0) 1825 /* nada */; 1826 #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK 1827 else if (!strcmp(str, "embed")) 1828 pcpu_chosen_fc = PCPU_FC_EMBED; 1829 #endif 1830 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 1831 else if (!strcmp(str, "page")) 1832 pcpu_chosen_fc = PCPU_FC_PAGE; 1833 #endif 1834 else 1835 pr_warning("PERCPU: unknown allocator %s specified\n", str); 1836 1837 return 0; 1838 } 1839 early_param("percpu_alloc", percpu_alloc_setup); 1840 1841 #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \ 1842 !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) 1843 /** 1844 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem 1845 * @reserved_size: the size of reserved percpu area in bytes 1846 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto 1847 * @atom_size: allocation atom size 1848 * @cpu_distance_fn: callback to determine distance between cpus, optional 1849 * @alloc_fn: function to allocate percpu page 1850 * @free_fn: funtion to free percpu page 1851 * 1852 * This is a helper to ease setting up embedded first percpu chunk and 1853 * can be called where pcpu_setup_first_chunk() is expected. 1854 * 1855 * If this function is used to setup the first chunk, it is allocated 1856 * by calling @alloc_fn and used as-is without being mapped into 1857 * vmalloc area. Allocations are always whole multiples of @atom_size 1858 * aligned to @atom_size. 1859 * 1860 * This enables the first chunk to piggy back on the linear physical 1861 * mapping which often uses larger page size. Please note that this 1862 * can result in very sparse cpu->unit mapping on NUMA machines thus 1863 * requiring large vmalloc address space. Don't use this allocator if 1864 * vmalloc space is not orders of magnitude larger than distances 1865 * between node memory addresses (ie. 32bit NUMA machines). 1866 * 1867 * When @dyn_size is positive, dynamic area might be larger than 1868 * specified to fill page alignment. When @dyn_size is auto, 1869 * @dyn_size is just big enough to fill page alignment after static 1870 * and reserved areas. 1871 * 1872 * If the needed size is smaller than the minimum or specified unit 1873 * size, the leftover is returned using @free_fn. 1874 * 1875 * RETURNS: 1876 * 0 on success, -errno on failure. 1877 */ 1878 int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size, 1879 size_t atom_size, 1880 pcpu_fc_cpu_distance_fn_t cpu_distance_fn, 1881 pcpu_fc_alloc_fn_t alloc_fn, 1882 pcpu_fc_free_fn_t free_fn) 1883 { 1884 void *base = (void *)ULONG_MAX; 1885 void **areas = NULL; 1886 struct pcpu_alloc_info *ai; 1887 size_t size_sum, areas_size, max_distance; 1888 int group, i, rc; 1889 1890 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size, 1891 cpu_distance_fn); 1892 if (IS_ERR(ai)) 1893 return PTR_ERR(ai); 1894 1895 size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; 1896 areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); 1897 1898 areas = alloc_bootmem_nopanic(areas_size); 1899 if (!areas) { 1900 rc = -ENOMEM; 1901 goto out_free; 1902 } 1903 1904 /* allocate, copy and determine base address */ 1905 for (group = 0; group < ai->nr_groups; group++) { 1906 struct pcpu_group_info *gi = &ai->groups[group]; 1907 unsigned int cpu = NR_CPUS; 1908 void *ptr; 1909 1910 for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++) 1911 cpu = gi->cpu_map[i]; 1912 BUG_ON(cpu == NR_CPUS); 1913 1914 /* allocate space for the whole group */ 1915 ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size); 1916 if (!ptr) { 1917 rc = -ENOMEM; 1918 goto out_free_areas; 1919 } 1920 areas[group] = ptr; 1921 1922 base = min(ptr, base); 1923 1924 for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { 1925 if (gi->cpu_map[i] == NR_CPUS) { 1926 /* unused unit, free whole */ 1927 free_fn(ptr, ai->unit_size); 1928 continue; 1929 } 1930 /* copy and return the unused part */ 1931 memcpy(ptr, __per_cpu_load, ai->static_size); 1932 free_fn(ptr + size_sum, ai->unit_size - size_sum); 1933 } 1934 } 1935 1936 /* base address is now known, determine group base offsets */ 1937 max_distance = 0; 1938 for (group = 0; group < ai->nr_groups; group++) { 1939 ai->groups[group].base_offset = areas[group] - base; 1940 max_distance = max_t(size_t, max_distance, 1941 ai->groups[group].base_offset); 1942 } 1943 max_distance += ai->unit_size; 1944 1945 /* warn if maximum distance is further than 75% of vmalloc space */ 1946 if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) { 1947 pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc " 1948 "space 0x%lx\n", 1949 max_distance, VMALLOC_END - VMALLOC_START); 1950 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 1951 /* and fail if we have fallback */ 1952 rc = -EINVAL; 1953 goto out_free; 1954 #endif 1955 } 1956 1957 pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n", 1958 PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size, 1959 ai->dyn_size, ai->unit_size); 1960 1961 rc = pcpu_setup_first_chunk(ai, base); 1962 goto out_free; 1963 1964 out_free_areas: 1965 for (group = 0; group < ai->nr_groups; group++) 1966 free_fn(areas[group], 1967 ai->groups[group].nr_units * ai->unit_size); 1968 out_free: 1969 pcpu_free_alloc_info(ai); 1970 if (areas) 1971 free_bootmem(__pa(areas), areas_size); 1972 return rc; 1973 } 1974 #endif /* CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK || 1975 !CONFIG_HAVE_SETUP_PER_CPU_AREA */ 1976 1977 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK 1978 /** 1979 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages 1980 * @reserved_size: the size of reserved percpu area in bytes 1981 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE 1982 * @free_fn: funtion to free percpu page, always called with PAGE_SIZE 1983 * @populate_pte_fn: function to populate pte 1984 * 1985 * This is a helper to ease setting up page-remapped first percpu 1986 * chunk and can be called where pcpu_setup_first_chunk() is expected. 1987 * 1988 * This is the basic allocator. Static percpu area is allocated 1989 * page-by-page into vmalloc area. 1990 * 1991 * RETURNS: 1992 * 0 on success, -errno on failure. 1993 */ 1994 int __init pcpu_page_first_chunk(size_t reserved_size, 1995 pcpu_fc_alloc_fn_t alloc_fn, 1996 pcpu_fc_free_fn_t free_fn, 1997 pcpu_fc_populate_pte_fn_t populate_pte_fn) 1998 { 1999 static struct vm_struct vm; 2000 struct pcpu_alloc_info *ai; 2001 char psize_str[16]; 2002 int unit_pages; 2003 size_t pages_size; 2004 struct page **pages; 2005 int unit, i, j, rc; 2006 2007 snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); 2008 2009 ai = pcpu_build_alloc_info(reserved_size, -1, PAGE_SIZE, NULL); 2010 if (IS_ERR(ai)) 2011 return PTR_ERR(ai); 2012 BUG_ON(ai->nr_groups != 1); 2013 BUG_ON(ai->groups[0].nr_units != num_possible_cpus()); 2014 2015 unit_pages = ai->unit_size >> PAGE_SHIFT; 2016 2017 /* unaligned allocations can't be freed, round up to page size */ 2018 pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * 2019 sizeof(pages[0])); 2020 pages = alloc_bootmem(pages_size); 2021 2022 /* allocate pages */ 2023 j = 0; 2024 for (unit = 0; unit < num_possible_cpus(); unit++) 2025 for (i = 0; i < unit_pages; i++) { 2026 unsigned int cpu = ai->groups[0].cpu_map[unit]; 2027 void *ptr; 2028 2029 ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE); 2030 if (!ptr) { 2031 pr_warning("PERCPU: failed to allocate %s page " 2032 "for cpu%u\n", psize_str, cpu); 2033 goto enomem; 2034 } 2035 pages[j++] = virt_to_page(ptr); 2036 } 2037 2038 /* allocate vm area, map the pages and copy static data */ 2039 vm.flags = VM_ALLOC; 2040 vm.size = num_possible_cpus() * ai->unit_size; 2041 vm_area_register_early(&vm, PAGE_SIZE); 2042 2043 for (unit = 0; unit < num_possible_cpus(); unit++) { 2044 unsigned long unit_addr = 2045 (unsigned long)vm.addr + unit * ai->unit_size; 2046 2047 for (i = 0; i < unit_pages; i++) 2048 populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); 2049 2050 /* pte already populated, the following shouldn't fail */ 2051 rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages], 2052 unit_pages); 2053 if (rc < 0) 2054 panic("failed to map percpu area, err=%d\n", rc); 2055 2056 /* 2057 * FIXME: Archs with virtual cache should flush local 2058 * cache for the linear mapping here - something 2059 * equivalent to flush_cache_vmap() on the local cpu. 2060 * flush_cache_vmap() can't be used as most supporting 2061 * data structures are not set up yet. 2062 */ 2063 2064 /* copy static data */ 2065 memcpy((void *)unit_addr, __per_cpu_load, ai->static_size); 2066 } 2067 2068 /* we're ready, commit */ 2069 pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n", 2070 unit_pages, psize_str, vm.addr, ai->static_size, 2071 ai->reserved_size, ai->dyn_size); 2072 2073 rc = pcpu_setup_first_chunk(ai, vm.addr); 2074 goto out_free_ar; 2075 2076 enomem: 2077 while (--j >= 0) 2078 free_fn(page_address(pages[j]), PAGE_SIZE); 2079 rc = -ENOMEM; 2080 out_free_ar: 2081 free_bootmem(__pa(pages), pages_size); 2082 pcpu_free_alloc_info(ai); 2083 return rc; 2084 } 2085 #endif /* CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK */ 2086 2087 /* 2088 * Generic percpu area setup. 2089 * 2090 * The embedding helper is used because its behavior closely resembles 2091 * the original non-dynamic generic percpu area setup. This is 2092 * important because many archs have addressing restrictions and might 2093 * fail if the percpu area is located far away from the previous 2094 * location. As an added bonus, in non-NUMA cases, embedding is 2095 * generally a good idea TLB-wise because percpu area can piggy back 2096 * on the physical linear memory mapping which uses large page 2097 * mappings on applicable archs. 2098 */ 2099 #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA 2100 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; 2101 EXPORT_SYMBOL(__per_cpu_offset); 2102 2103 static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size, 2104 size_t align) 2105 { 2106 return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS)); 2107 } 2108 2109 static void __init pcpu_dfl_fc_free(void *ptr, size_t size) 2110 { 2111 free_bootmem(__pa(ptr), size); 2112 } 2113 2114 void __init setup_per_cpu_areas(void) 2115 { 2116 unsigned long delta; 2117 unsigned int cpu; 2118 int rc; 2119 2120 /* 2121 * Always reserve area for module percpu variables. That's 2122 * what the legacy allocator did. 2123 */ 2124 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, 2125 PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL, 2126 pcpu_dfl_fc_alloc, pcpu_dfl_fc_free); 2127 if (rc < 0) 2128 panic("Failed to initialized percpu areas."); 2129 2130 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 2131 for_each_possible_cpu(cpu) 2132 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; 2133 } 2134 #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ 2135