1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 1993 Linus Torvalds 4 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 5 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 6 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 7 * Numa awareness, Christoph Lameter, SGI, June 2005 8 * Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019 9 */ 10 11 #include <linux/vmalloc.h> 12 #include <linux/mm.h> 13 #include <linux/module.h> 14 #include <linux/highmem.h> 15 #include <linux/sched/signal.h> 16 #include <linux/slab.h> 17 #include <linux/spinlock.h> 18 #include <linux/interrupt.h> 19 #include <linux/proc_fs.h> 20 #include <linux/seq_file.h> 21 #include <linux/set_memory.h> 22 #include <linux/debugobjects.h> 23 #include <linux/kallsyms.h> 24 #include <linux/list.h> 25 #include <linux/notifier.h> 26 #include <linux/rbtree.h> 27 #include <linux/xarray.h> 28 #include <linux/io.h> 29 #include <linux/rcupdate.h> 30 #include <linux/pfn.h> 31 #include <linux/kmemleak.h> 32 #include <linux/atomic.h> 33 #include <linux/compiler.h> 34 #include <linux/memcontrol.h> 35 #include <linux/llist.h> 36 #include <linux/bitops.h> 37 #include <linux/rbtree_augmented.h> 38 #include <linux/overflow.h> 39 #include <linux/pgtable.h> 40 #include <linux/uaccess.h> 41 #include <linux/hugetlb.h> 42 #include <linux/sched/mm.h> 43 #include <asm/tlbflush.h> 44 #include <asm/shmparam.h> 45 46 #include "internal.h" 47 #include "pgalloc-track.h" 48 49 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP 50 static unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1; 51 52 static int __init set_nohugeiomap(char *str) 53 { 54 ioremap_max_page_shift = PAGE_SHIFT; 55 return 0; 56 } 57 early_param("nohugeiomap", set_nohugeiomap); 58 #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */ 59 static const unsigned int ioremap_max_page_shift = PAGE_SHIFT; 60 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ 61 62 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 63 static bool __ro_after_init vmap_allow_huge = true; 64 65 static int __init set_nohugevmalloc(char *str) 66 { 67 vmap_allow_huge = false; 68 return 0; 69 } 70 early_param("nohugevmalloc", set_nohugevmalloc); 71 #else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */ 72 static const bool vmap_allow_huge = false; 73 #endif /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */ 74 75 bool is_vmalloc_addr(const void *x) 76 { 77 unsigned long addr = (unsigned long)kasan_reset_tag(x); 78 79 return addr >= VMALLOC_START && addr < VMALLOC_END; 80 } 81 EXPORT_SYMBOL(is_vmalloc_addr); 82 83 struct vfree_deferred { 84 struct llist_head list; 85 struct work_struct wq; 86 }; 87 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred); 88 89 static void __vunmap(const void *, int); 90 91 static void free_work(struct work_struct *w) 92 { 93 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq); 94 struct llist_node *t, *llnode; 95 96 llist_for_each_safe(llnode, t, llist_del_all(&p->list)) 97 __vunmap((void *)llnode, 1); 98 } 99 100 /*** Page table manipulation functions ***/ 101 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 102 phys_addr_t phys_addr, pgprot_t prot, 103 unsigned int max_page_shift, pgtbl_mod_mask *mask) 104 { 105 pte_t *pte; 106 u64 pfn; 107 unsigned long size = PAGE_SIZE; 108 109 pfn = phys_addr >> PAGE_SHIFT; 110 pte = pte_alloc_kernel_track(pmd, addr, mask); 111 if (!pte) 112 return -ENOMEM; 113 do { 114 BUG_ON(!pte_none(*pte)); 115 116 #ifdef CONFIG_HUGETLB_PAGE 117 size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift); 118 if (size != PAGE_SIZE) { 119 pte_t entry = pfn_pte(pfn, prot); 120 121 entry = arch_make_huge_pte(entry, ilog2(size), 0); 122 set_huge_pte_at(&init_mm, addr, pte, entry); 123 pfn += PFN_DOWN(size); 124 continue; 125 } 126 #endif 127 set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot)); 128 pfn++; 129 } while (pte += PFN_DOWN(size), addr += size, addr != end); 130 *mask |= PGTBL_PTE_MODIFIED; 131 return 0; 132 } 133 134 static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end, 135 phys_addr_t phys_addr, pgprot_t prot, 136 unsigned int max_page_shift) 137 { 138 if (max_page_shift < PMD_SHIFT) 139 return 0; 140 141 if (!arch_vmap_pmd_supported(prot)) 142 return 0; 143 144 if ((end - addr) != PMD_SIZE) 145 return 0; 146 147 if (!IS_ALIGNED(addr, PMD_SIZE)) 148 return 0; 149 150 if (!IS_ALIGNED(phys_addr, PMD_SIZE)) 151 return 0; 152 153 if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr)) 154 return 0; 155 156 return pmd_set_huge(pmd, phys_addr, prot); 157 } 158 159 static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, 160 phys_addr_t phys_addr, pgprot_t prot, 161 unsigned int max_page_shift, pgtbl_mod_mask *mask) 162 { 163 pmd_t *pmd; 164 unsigned long next; 165 166 pmd = pmd_alloc_track(&init_mm, pud, addr, mask); 167 if (!pmd) 168 return -ENOMEM; 169 do { 170 next = pmd_addr_end(addr, end); 171 172 if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot, 173 max_page_shift)) { 174 *mask |= PGTBL_PMD_MODIFIED; 175 continue; 176 } 177 178 if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask)) 179 return -ENOMEM; 180 } while (pmd++, phys_addr += (next - addr), addr = next, addr != end); 181 return 0; 182 } 183 184 static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end, 185 phys_addr_t phys_addr, pgprot_t prot, 186 unsigned int max_page_shift) 187 { 188 if (max_page_shift < PUD_SHIFT) 189 return 0; 190 191 if (!arch_vmap_pud_supported(prot)) 192 return 0; 193 194 if ((end - addr) != PUD_SIZE) 195 return 0; 196 197 if (!IS_ALIGNED(addr, PUD_SIZE)) 198 return 0; 199 200 if (!IS_ALIGNED(phys_addr, PUD_SIZE)) 201 return 0; 202 203 if (pud_present(*pud) && !pud_free_pmd_page(pud, addr)) 204 return 0; 205 206 return pud_set_huge(pud, phys_addr, prot); 207 } 208 209 static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, 210 phys_addr_t phys_addr, pgprot_t prot, 211 unsigned int max_page_shift, pgtbl_mod_mask *mask) 212 { 213 pud_t *pud; 214 unsigned long next; 215 216 pud = pud_alloc_track(&init_mm, p4d, addr, mask); 217 if (!pud) 218 return -ENOMEM; 219 do { 220 next = pud_addr_end(addr, end); 221 222 if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot, 223 max_page_shift)) { 224 *mask |= PGTBL_PUD_MODIFIED; 225 continue; 226 } 227 228 if (vmap_pmd_range(pud, addr, next, phys_addr, prot, 229 max_page_shift, mask)) 230 return -ENOMEM; 231 } while (pud++, phys_addr += (next - addr), addr = next, addr != end); 232 return 0; 233 } 234 235 static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end, 236 phys_addr_t phys_addr, pgprot_t prot, 237 unsigned int max_page_shift) 238 { 239 if (max_page_shift < P4D_SHIFT) 240 return 0; 241 242 if (!arch_vmap_p4d_supported(prot)) 243 return 0; 244 245 if ((end - addr) != P4D_SIZE) 246 return 0; 247 248 if (!IS_ALIGNED(addr, P4D_SIZE)) 249 return 0; 250 251 if (!IS_ALIGNED(phys_addr, P4D_SIZE)) 252 return 0; 253 254 if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr)) 255 return 0; 256 257 return p4d_set_huge(p4d, phys_addr, prot); 258 } 259 260 static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, 261 phys_addr_t phys_addr, pgprot_t prot, 262 unsigned int max_page_shift, pgtbl_mod_mask *mask) 263 { 264 p4d_t *p4d; 265 unsigned long next; 266 267 p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); 268 if (!p4d) 269 return -ENOMEM; 270 do { 271 next = p4d_addr_end(addr, end); 272 273 if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot, 274 max_page_shift)) { 275 *mask |= PGTBL_P4D_MODIFIED; 276 continue; 277 } 278 279 if (vmap_pud_range(p4d, addr, next, phys_addr, prot, 280 max_page_shift, mask)) 281 return -ENOMEM; 282 } while (p4d++, phys_addr += (next - addr), addr = next, addr != end); 283 return 0; 284 } 285 286 static int vmap_range_noflush(unsigned long addr, unsigned long end, 287 phys_addr_t phys_addr, pgprot_t prot, 288 unsigned int max_page_shift) 289 { 290 pgd_t *pgd; 291 unsigned long start; 292 unsigned long next; 293 int err; 294 pgtbl_mod_mask mask = 0; 295 296 might_sleep(); 297 BUG_ON(addr >= end); 298 299 start = addr; 300 pgd = pgd_offset_k(addr); 301 do { 302 next = pgd_addr_end(addr, end); 303 err = vmap_p4d_range(pgd, addr, next, phys_addr, prot, 304 max_page_shift, &mask); 305 if (err) 306 break; 307 } while (pgd++, phys_addr += (next - addr), addr = next, addr != end); 308 309 if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 310 arch_sync_kernel_mappings(start, end); 311 312 return err; 313 } 314 315 int ioremap_page_range(unsigned long addr, unsigned long end, 316 phys_addr_t phys_addr, pgprot_t prot) 317 { 318 int err; 319 320 err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot), 321 ioremap_max_page_shift); 322 flush_cache_vmap(addr, end); 323 return err; 324 } 325 326 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 327 pgtbl_mod_mask *mask) 328 { 329 pte_t *pte; 330 331 pte = pte_offset_kernel(pmd, addr); 332 do { 333 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); 334 WARN_ON(!pte_none(ptent) && !pte_present(ptent)); 335 } while (pte++, addr += PAGE_SIZE, addr != end); 336 *mask |= PGTBL_PTE_MODIFIED; 337 } 338 339 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, 340 pgtbl_mod_mask *mask) 341 { 342 pmd_t *pmd; 343 unsigned long next; 344 int cleared; 345 346 pmd = pmd_offset(pud, addr); 347 do { 348 next = pmd_addr_end(addr, end); 349 350 cleared = pmd_clear_huge(pmd); 351 if (cleared || pmd_bad(*pmd)) 352 *mask |= PGTBL_PMD_MODIFIED; 353 354 if (cleared) 355 continue; 356 if (pmd_none_or_clear_bad(pmd)) 357 continue; 358 vunmap_pte_range(pmd, addr, next, mask); 359 360 cond_resched(); 361 } while (pmd++, addr = next, addr != end); 362 } 363 364 static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, 365 pgtbl_mod_mask *mask) 366 { 367 pud_t *pud; 368 unsigned long next; 369 int cleared; 370 371 pud = pud_offset(p4d, addr); 372 do { 373 next = pud_addr_end(addr, end); 374 375 cleared = pud_clear_huge(pud); 376 if (cleared || pud_bad(*pud)) 377 *mask |= PGTBL_PUD_MODIFIED; 378 379 if (cleared) 380 continue; 381 if (pud_none_or_clear_bad(pud)) 382 continue; 383 vunmap_pmd_range(pud, addr, next, mask); 384 } while (pud++, addr = next, addr != end); 385 } 386 387 static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, 388 pgtbl_mod_mask *mask) 389 { 390 p4d_t *p4d; 391 unsigned long next; 392 393 p4d = p4d_offset(pgd, addr); 394 do { 395 next = p4d_addr_end(addr, end); 396 397 p4d_clear_huge(p4d); 398 if (p4d_bad(*p4d)) 399 *mask |= PGTBL_P4D_MODIFIED; 400 401 if (p4d_none_or_clear_bad(p4d)) 402 continue; 403 vunmap_pud_range(p4d, addr, next, mask); 404 } while (p4d++, addr = next, addr != end); 405 } 406 407 /* 408 * vunmap_range_noflush is similar to vunmap_range, but does not 409 * flush caches or TLBs. 410 * 411 * The caller is responsible for calling flush_cache_vmap() before calling 412 * this function, and flush_tlb_kernel_range after it has returned 413 * successfully (and before the addresses are expected to cause a page fault 414 * or be re-mapped for something else, if TLB flushes are being delayed or 415 * coalesced). 416 * 417 * This is an internal function only. Do not use outside mm/. 418 */ 419 void vunmap_range_noflush(unsigned long start, unsigned long end) 420 { 421 unsigned long next; 422 pgd_t *pgd; 423 unsigned long addr = start; 424 pgtbl_mod_mask mask = 0; 425 426 BUG_ON(addr >= end); 427 pgd = pgd_offset_k(addr); 428 do { 429 next = pgd_addr_end(addr, end); 430 if (pgd_bad(*pgd)) 431 mask |= PGTBL_PGD_MODIFIED; 432 if (pgd_none_or_clear_bad(pgd)) 433 continue; 434 vunmap_p4d_range(pgd, addr, next, &mask); 435 } while (pgd++, addr = next, addr != end); 436 437 if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 438 arch_sync_kernel_mappings(start, end); 439 } 440 441 /** 442 * vunmap_range - unmap kernel virtual addresses 443 * @addr: start of the VM area to unmap 444 * @end: end of the VM area to unmap (non-inclusive) 445 * 446 * Clears any present PTEs in the virtual address range, flushes TLBs and 447 * caches. Any subsequent access to the address before it has been re-mapped 448 * is a kernel bug. 449 */ 450 void vunmap_range(unsigned long addr, unsigned long end) 451 { 452 flush_cache_vunmap(addr, end); 453 vunmap_range_noflush(addr, end); 454 flush_tlb_kernel_range(addr, end); 455 } 456 457 static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr, 458 unsigned long end, pgprot_t prot, struct page **pages, int *nr, 459 pgtbl_mod_mask *mask) 460 { 461 pte_t *pte; 462 463 /* 464 * nr is a running index into the array which helps higher level 465 * callers keep track of where we're up to. 466 */ 467 468 pte = pte_alloc_kernel_track(pmd, addr, mask); 469 if (!pte) 470 return -ENOMEM; 471 do { 472 struct page *page = pages[*nr]; 473 474 if (WARN_ON(!pte_none(*pte))) 475 return -EBUSY; 476 if (WARN_ON(!page)) 477 return -ENOMEM; 478 if (WARN_ON(!pfn_valid(page_to_pfn(page)))) 479 return -EINVAL; 480 481 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); 482 (*nr)++; 483 } while (pte++, addr += PAGE_SIZE, addr != end); 484 *mask |= PGTBL_PTE_MODIFIED; 485 return 0; 486 } 487 488 static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr, 489 unsigned long end, pgprot_t prot, struct page **pages, int *nr, 490 pgtbl_mod_mask *mask) 491 { 492 pmd_t *pmd; 493 unsigned long next; 494 495 pmd = pmd_alloc_track(&init_mm, pud, addr, mask); 496 if (!pmd) 497 return -ENOMEM; 498 do { 499 next = pmd_addr_end(addr, end); 500 if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask)) 501 return -ENOMEM; 502 } while (pmd++, addr = next, addr != end); 503 return 0; 504 } 505 506 static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr, 507 unsigned long end, pgprot_t prot, struct page **pages, int *nr, 508 pgtbl_mod_mask *mask) 509 { 510 pud_t *pud; 511 unsigned long next; 512 513 pud = pud_alloc_track(&init_mm, p4d, addr, mask); 514 if (!pud) 515 return -ENOMEM; 516 do { 517 next = pud_addr_end(addr, end); 518 if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask)) 519 return -ENOMEM; 520 } while (pud++, addr = next, addr != end); 521 return 0; 522 } 523 524 static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr, 525 unsigned long end, pgprot_t prot, struct page **pages, int *nr, 526 pgtbl_mod_mask *mask) 527 { 528 p4d_t *p4d; 529 unsigned long next; 530 531 p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); 532 if (!p4d) 533 return -ENOMEM; 534 do { 535 next = p4d_addr_end(addr, end); 536 if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask)) 537 return -ENOMEM; 538 } while (p4d++, addr = next, addr != end); 539 return 0; 540 } 541 542 static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end, 543 pgprot_t prot, struct page **pages) 544 { 545 unsigned long start = addr; 546 pgd_t *pgd; 547 unsigned long next; 548 int err = 0; 549 int nr = 0; 550 pgtbl_mod_mask mask = 0; 551 552 BUG_ON(addr >= end); 553 pgd = pgd_offset_k(addr); 554 do { 555 next = pgd_addr_end(addr, end); 556 if (pgd_bad(*pgd)) 557 mask |= PGTBL_PGD_MODIFIED; 558 err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask); 559 if (err) 560 return err; 561 } while (pgd++, addr = next, addr != end); 562 563 if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 564 arch_sync_kernel_mappings(start, end); 565 566 return 0; 567 } 568 569 /* 570 * vmap_pages_range_noflush is similar to vmap_pages_range, but does not 571 * flush caches. 572 * 573 * The caller is responsible for calling flush_cache_vmap() after this 574 * function returns successfully and before the addresses are accessed. 575 * 576 * This is an internal function only. Do not use outside mm/. 577 */ 578 int vmap_pages_range_noflush(unsigned long addr, unsigned long end, 579 pgprot_t prot, struct page **pages, unsigned int page_shift) 580 { 581 unsigned int i, nr = (end - addr) >> PAGE_SHIFT; 582 583 WARN_ON(page_shift < PAGE_SHIFT); 584 585 if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) || 586 page_shift == PAGE_SHIFT) 587 return vmap_small_pages_range_noflush(addr, end, prot, pages); 588 589 for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) { 590 int err; 591 592 err = vmap_range_noflush(addr, addr + (1UL << page_shift), 593 page_to_phys(pages[i]), prot, 594 page_shift); 595 if (err) 596 return err; 597 598 addr += 1UL << page_shift; 599 } 600 601 return 0; 602 } 603 604 /** 605 * vmap_pages_range - map pages to a kernel virtual address 606 * @addr: start of the VM area to map 607 * @end: end of the VM area to map (non-inclusive) 608 * @prot: page protection flags to use 609 * @pages: pages to map (always PAGE_SIZE pages) 610 * @page_shift: maximum shift that the pages may be mapped with, @pages must 611 * be aligned and contiguous up to at least this shift. 612 * 613 * RETURNS: 614 * 0 on success, -errno on failure. 615 */ 616 static int vmap_pages_range(unsigned long addr, unsigned long end, 617 pgprot_t prot, struct page **pages, unsigned int page_shift) 618 { 619 int err; 620 621 err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift); 622 flush_cache_vmap(addr, end); 623 return err; 624 } 625 626 int is_vmalloc_or_module_addr(const void *x) 627 { 628 /* 629 * ARM, x86-64 and sparc64 put modules in a special place, 630 * and fall back on vmalloc() if that fails. Others 631 * just put it in the vmalloc space. 632 */ 633 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR) 634 unsigned long addr = (unsigned long)kasan_reset_tag(x); 635 if (addr >= MODULES_VADDR && addr < MODULES_END) 636 return 1; 637 #endif 638 return is_vmalloc_addr(x); 639 } 640 641 /* 642 * Walk a vmap address to the struct page it maps. Huge vmap mappings will 643 * return the tail page that corresponds to the base page address, which 644 * matches small vmap mappings. 645 */ 646 struct page *vmalloc_to_page(const void *vmalloc_addr) 647 { 648 unsigned long addr = (unsigned long) vmalloc_addr; 649 struct page *page = NULL; 650 pgd_t *pgd = pgd_offset_k(addr); 651 p4d_t *p4d; 652 pud_t *pud; 653 pmd_t *pmd; 654 pte_t *ptep, pte; 655 656 /* 657 * XXX we might need to change this if we add VIRTUAL_BUG_ON for 658 * architectures that do not vmalloc module space 659 */ 660 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); 661 662 if (pgd_none(*pgd)) 663 return NULL; 664 if (WARN_ON_ONCE(pgd_leaf(*pgd))) 665 return NULL; /* XXX: no allowance for huge pgd */ 666 if (WARN_ON_ONCE(pgd_bad(*pgd))) 667 return NULL; 668 669 p4d = p4d_offset(pgd, addr); 670 if (p4d_none(*p4d)) 671 return NULL; 672 if (p4d_leaf(*p4d)) 673 return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT); 674 if (WARN_ON_ONCE(p4d_bad(*p4d))) 675 return NULL; 676 677 pud = pud_offset(p4d, addr); 678 if (pud_none(*pud)) 679 return NULL; 680 if (pud_leaf(*pud)) 681 return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); 682 if (WARN_ON_ONCE(pud_bad(*pud))) 683 return NULL; 684 685 pmd = pmd_offset(pud, addr); 686 if (pmd_none(*pmd)) 687 return NULL; 688 if (pmd_leaf(*pmd)) 689 return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); 690 if (WARN_ON_ONCE(pmd_bad(*pmd))) 691 return NULL; 692 693 ptep = pte_offset_map(pmd, addr); 694 pte = *ptep; 695 if (pte_present(pte)) 696 page = pte_page(pte); 697 pte_unmap(ptep); 698 699 return page; 700 } 701 EXPORT_SYMBOL(vmalloc_to_page); 702 703 /* 704 * Map a vmalloc()-space virtual address to the physical page frame number. 705 */ 706 unsigned long vmalloc_to_pfn(const void *vmalloc_addr) 707 { 708 return page_to_pfn(vmalloc_to_page(vmalloc_addr)); 709 } 710 EXPORT_SYMBOL(vmalloc_to_pfn); 711 712 713 /*** Global kva allocator ***/ 714 715 #define DEBUG_AUGMENT_PROPAGATE_CHECK 0 716 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0 717 718 719 static DEFINE_SPINLOCK(vmap_area_lock); 720 static DEFINE_SPINLOCK(free_vmap_area_lock); 721 /* Export for kexec only */ 722 LIST_HEAD(vmap_area_list); 723 static struct rb_root vmap_area_root = RB_ROOT; 724 static bool vmap_initialized __read_mostly; 725 726 static struct rb_root purge_vmap_area_root = RB_ROOT; 727 static LIST_HEAD(purge_vmap_area_list); 728 static DEFINE_SPINLOCK(purge_vmap_area_lock); 729 730 /* 731 * This kmem_cache is used for vmap_area objects. Instead of 732 * allocating from slab we reuse an object from this cache to 733 * make things faster. Especially in "no edge" splitting of 734 * free block. 735 */ 736 static struct kmem_cache *vmap_area_cachep; 737 738 /* 739 * This linked list is used in pair with free_vmap_area_root. 740 * It gives O(1) access to prev/next to perform fast coalescing. 741 */ 742 static LIST_HEAD(free_vmap_area_list); 743 744 /* 745 * This augment red-black tree represents the free vmap space. 746 * All vmap_area objects in this tree are sorted by va->va_start 747 * address. It is used for allocation and merging when a vmap 748 * object is released. 749 * 750 * Each vmap_area node contains a maximum available free block 751 * of its sub-tree, right or left. Therefore it is possible to 752 * find a lowest match of free area. 753 */ 754 static struct rb_root free_vmap_area_root = RB_ROOT; 755 756 /* 757 * Preload a CPU with one object for "no edge" split case. The 758 * aim is to get rid of allocations from the atomic context, thus 759 * to use more permissive allocation masks. 760 */ 761 static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node); 762 763 static __always_inline unsigned long 764 va_size(struct vmap_area *va) 765 { 766 return (va->va_end - va->va_start); 767 } 768 769 static __always_inline unsigned long 770 get_subtree_max_size(struct rb_node *node) 771 { 772 struct vmap_area *va; 773 774 va = rb_entry_safe(node, struct vmap_area, rb_node); 775 return va ? va->subtree_max_size : 0; 776 } 777 778 RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb, 779 struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size) 780 781 static void purge_vmap_area_lazy(void); 782 static BLOCKING_NOTIFIER_HEAD(vmap_notify_list); 783 static void drain_vmap_area_work(struct work_struct *work); 784 static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work); 785 786 static atomic_long_t nr_vmalloc_pages; 787 788 unsigned long vmalloc_nr_pages(void) 789 { 790 return atomic_long_read(&nr_vmalloc_pages); 791 } 792 793 /* Look up the first VA which satisfies addr < va_end, NULL if none. */ 794 static struct vmap_area *find_vmap_area_exceed_addr(unsigned long addr) 795 { 796 struct vmap_area *va = NULL; 797 struct rb_node *n = vmap_area_root.rb_node; 798 799 addr = (unsigned long)kasan_reset_tag((void *)addr); 800 801 while (n) { 802 struct vmap_area *tmp; 803 804 tmp = rb_entry(n, struct vmap_area, rb_node); 805 if (tmp->va_end > addr) { 806 va = tmp; 807 if (tmp->va_start <= addr) 808 break; 809 810 n = n->rb_left; 811 } else 812 n = n->rb_right; 813 } 814 815 return va; 816 } 817 818 static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root) 819 { 820 struct rb_node *n = root->rb_node; 821 822 addr = (unsigned long)kasan_reset_tag((void *)addr); 823 824 while (n) { 825 struct vmap_area *va; 826 827 va = rb_entry(n, struct vmap_area, rb_node); 828 if (addr < va->va_start) 829 n = n->rb_left; 830 else if (addr >= va->va_end) 831 n = n->rb_right; 832 else 833 return va; 834 } 835 836 return NULL; 837 } 838 839 /* 840 * This function returns back addresses of parent node 841 * and its left or right link for further processing. 842 * 843 * Otherwise NULL is returned. In that case all further 844 * steps regarding inserting of conflicting overlap range 845 * have to be declined and actually considered as a bug. 846 */ 847 static __always_inline struct rb_node ** 848 find_va_links(struct vmap_area *va, 849 struct rb_root *root, struct rb_node *from, 850 struct rb_node **parent) 851 { 852 struct vmap_area *tmp_va; 853 struct rb_node **link; 854 855 if (root) { 856 link = &root->rb_node; 857 if (unlikely(!*link)) { 858 *parent = NULL; 859 return link; 860 } 861 } else { 862 link = &from; 863 } 864 865 /* 866 * Go to the bottom of the tree. When we hit the last point 867 * we end up with parent rb_node and correct direction, i name 868 * it link, where the new va->rb_node will be attached to. 869 */ 870 do { 871 tmp_va = rb_entry(*link, struct vmap_area, rb_node); 872 873 /* 874 * During the traversal we also do some sanity check. 875 * Trigger the BUG() if there are sides(left/right) 876 * or full overlaps. 877 */ 878 if (va->va_end <= tmp_va->va_start) 879 link = &(*link)->rb_left; 880 else if (va->va_start >= tmp_va->va_end) 881 link = &(*link)->rb_right; 882 else { 883 WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n", 884 va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end); 885 886 return NULL; 887 } 888 } while (*link); 889 890 *parent = &tmp_va->rb_node; 891 return link; 892 } 893 894 static __always_inline struct list_head * 895 get_va_next_sibling(struct rb_node *parent, struct rb_node **link) 896 { 897 struct list_head *list; 898 899 if (unlikely(!parent)) 900 /* 901 * The red-black tree where we try to find VA neighbors 902 * before merging or inserting is empty, i.e. it means 903 * there is no free vmap space. Normally it does not 904 * happen but we handle this case anyway. 905 */ 906 return NULL; 907 908 list = &rb_entry(parent, struct vmap_area, rb_node)->list; 909 return (&parent->rb_right == link ? list->next : list); 910 } 911 912 static __always_inline void 913 __link_va(struct vmap_area *va, struct rb_root *root, 914 struct rb_node *parent, struct rb_node **link, 915 struct list_head *head, bool augment) 916 { 917 /* 918 * VA is still not in the list, but we can 919 * identify its future previous list_head node. 920 */ 921 if (likely(parent)) { 922 head = &rb_entry(parent, struct vmap_area, rb_node)->list; 923 if (&parent->rb_right != link) 924 head = head->prev; 925 } 926 927 /* Insert to the rb-tree */ 928 rb_link_node(&va->rb_node, parent, link); 929 if (augment) { 930 /* 931 * Some explanation here. Just perform simple insertion 932 * to the tree. We do not set va->subtree_max_size to 933 * its current size before calling rb_insert_augmented(). 934 * It is because we populate the tree from the bottom 935 * to parent levels when the node _is_ in the tree. 936 * 937 * Therefore we set subtree_max_size to zero after insertion, 938 * to let __augment_tree_propagate_from() puts everything to 939 * the correct order later on. 940 */ 941 rb_insert_augmented(&va->rb_node, 942 root, &free_vmap_area_rb_augment_cb); 943 va->subtree_max_size = 0; 944 } else { 945 rb_insert_color(&va->rb_node, root); 946 } 947 948 /* Address-sort this list */ 949 list_add(&va->list, head); 950 } 951 952 static __always_inline void 953 link_va(struct vmap_area *va, struct rb_root *root, 954 struct rb_node *parent, struct rb_node **link, 955 struct list_head *head) 956 { 957 __link_va(va, root, parent, link, head, false); 958 } 959 960 static __always_inline void 961 link_va_augment(struct vmap_area *va, struct rb_root *root, 962 struct rb_node *parent, struct rb_node **link, 963 struct list_head *head) 964 { 965 __link_va(va, root, parent, link, head, true); 966 } 967 968 static __always_inline void 969 __unlink_va(struct vmap_area *va, struct rb_root *root, bool augment) 970 { 971 if (WARN_ON(RB_EMPTY_NODE(&va->rb_node))) 972 return; 973 974 if (augment) 975 rb_erase_augmented(&va->rb_node, 976 root, &free_vmap_area_rb_augment_cb); 977 else 978 rb_erase(&va->rb_node, root); 979 980 list_del_init(&va->list); 981 RB_CLEAR_NODE(&va->rb_node); 982 } 983 984 static __always_inline void 985 unlink_va(struct vmap_area *va, struct rb_root *root) 986 { 987 __unlink_va(va, root, false); 988 } 989 990 static __always_inline void 991 unlink_va_augment(struct vmap_area *va, struct rb_root *root) 992 { 993 __unlink_va(va, root, true); 994 } 995 996 #if DEBUG_AUGMENT_PROPAGATE_CHECK 997 /* 998 * Gets called when remove the node and rotate. 999 */ 1000 static __always_inline unsigned long 1001 compute_subtree_max_size(struct vmap_area *va) 1002 { 1003 return max3(va_size(va), 1004 get_subtree_max_size(va->rb_node.rb_left), 1005 get_subtree_max_size(va->rb_node.rb_right)); 1006 } 1007 1008 static void 1009 augment_tree_propagate_check(void) 1010 { 1011 struct vmap_area *va; 1012 unsigned long computed_size; 1013 1014 list_for_each_entry(va, &free_vmap_area_list, list) { 1015 computed_size = compute_subtree_max_size(va); 1016 if (computed_size != va->subtree_max_size) 1017 pr_emerg("tree is corrupted: %lu, %lu\n", 1018 va_size(va), va->subtree_max_size); 1019 } 1020 } 1021 #endif 1022 1023 /* 1024 * This function populates subtree_max_size from bottom to upper 1025 * levels starting from VA point. The propagation must be done 1026 * when VA size is modified by changing its va_start/va_end. Or 1027 * in case of newly inserting of VA to the tree. 1028 * 1029 * It means that __augment_tree_propagate_from() must be called: 1030 * - After VA has been inserted to the tree(free path); 1031 * - After VA has been shrunk(allocation path); 1032 * - After VA has been increased(merging path). 1033 * 1034 * Please note that, it does not mean that upper parent nodes 1035 * and their subtree_max_size are recalculated all the time up 1036 * to the root node. 1037 * 1038 * 4--8 1039 * /\ 1040 * / \ 1041 * / \ 1042 * 2--2 8--8 1043 * 1044 * For example if we modify the node 4, shrinking it to 2, then 1045 * no any modification is required. If we shrink the node 2 to 1 1046 * its subtree_max_size is updated only, and set to 1. If we shrink 1047 * the node 8 to 6, then its subtree_max_size is set to 6 and parent 1048 * node becomes 4--6. 1049 */ 1050 static __always_inline void 1051 augment_tree_propagate_from(struct vmap_area *va) 1052 { 1053 /* 1054 * Populate the tree from bottom towards the root until 1055 * the calculated maximum available size of checked node 1056 * is equal to its current one. 1057 */ 1058 free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL); 1059 1060 #if DEBUG_AUGMENT_PROPAGATE_CHECK 1061 augment_tree_propagate_check(); 1062 #endif 1063 } 1064 1065 static void 1066 insert_vmap_area(struct vmap_area *va, 1067 struct rb_root *root, struct list_head *head) 1068 { 1069 struct rb_node **link; 1070 struct rb_node *parent; 1071 1072 link = find_va_links(va, root, NULL, &parent); 1073 if (link) 1074 link_va(va, root, parent, link, head); 1075 } 1076 1077 static void 1078 insert_vmap_area_augment(struct vmap_area *va, 1079 struct rb_node *from, struct rb_root *root, 1080 struct list_head *head) 1081 { 1082 struct rb_node **link; 1083 struct rb_node *parent; 1084 1085 if (from) 1086 link = find_va_links(va, NULL, from, &parent); 1087 else 1088 link = find_va_links(va, root, NULL, &parent); 1089 1090 if (link) { 1091 link_va_augment(va, root, parent, link, head); 1092 augment_tree_propagate_from(va); 1093 } 1094 } 1095 1096 /* 1097 * Merge de-allocated chunk of VA memory with previous 1098 * and next free blocks. If coalesce is not done a new 1099 * free area is inserted. If VA has been merged, it is 1100 * freed. 1101 * 1102 * Please note, it can return NULL in case of overlap 1103 * ranges, followed by WARN() report. Despite it is a 1104 * buggy behaviour, a system can be alive and keep 1105 * ongoing. 1106 */ 1107 static __always_inline struct vmap_area * 1108 __merge_or_add_vmap_area(struct vmap_area *va, 1109 struct rb_root *root, struct list_head *head, bool augment) 1110 { 1111 struct vmap_area *sibling; 1112 struct list_head *next; 1113 struct rb_node **link; 1114 struct rb_node *parent; 1115 bool merged = false; 1116 1117 /* 1118 * Find a place in the tree where VA potentially will be 1119 * inserted, unless it is merged with its sibling/siblings. 1120 */ 1121 link = find_va_links(va, root, NULL, &parent); 1122 if (!link) 1123 return NULL; 1124 1125 /* 1126 * Get next node of VA to check if merging can be done. 1127 */ 1128 next = get_va_next_sibling(parent, link); 1129 if (unlikely(next == NULL)) 1130 goto insert; 1131 1132 /* 1133 * start end 1134 * | | 1135 * |<------VA------>|<-----Next----->| 1136 * | | 1137 * start end 1138 */ 1139 if (next != head) { 1140 sibling = list_entry(next, struct vmap_area, list); 1141 if (sibling->va_start == va->va_end) { 1142 sibling->va_start = va->va_start; 1143 1144 /* Free vmap_area object. */ 1145 kmem_cache_free(vmap_area_cachep, va); 1146 1147 /* Point to the new merged area. */ 1148 va = sibling; 1149 merged = true; 1150 } 1151 } 1152 1153 /* 1154 * start end 1155 * | | 1156 * |<-----Prev----->|<------VA------>| 1157 * | | 1158 * start end 1159 */ 1160 if (next->prev != head) { 1161 sibling = list_entry(next->prev, struct vmap_area, list); 1162 if (sibling->va_end == va->va_start) { 1163 /* 1164 * If both neighbors are coalesced, it is important 1165 * to unlink the "next" node first, followed by merging 1166 * with "previous" one. Otherwise the tree might not be 1167 * fully populated if a sibling's augmented value is 1168 * "normalized" because of rotation operations. 1169 */ 1170 if (merged) 1171 __unlink_va(va, root, augment); 1172 1173 sibling->va_end = va->va_end; 1174 1175 /* Free vmap_area object. */ 1176 kmem_cache_free(vmap_area_cachep, va); 1177 1178 /* Point to the new merged area. */ 1179 va = sibling; 1180 merged = true; 1181 } 1182 } 1183 1184 insert: 1185 if (!merged) 1186 __link_va(va, root, parent, link, head, augment); 1187 1188 return va; 1189 } 1190 1191 static __always_inline struct vmap_area * 1192 merge_or_add_vmap_area(struct vmap_area *va, 1193 struct rb_root *root, struct list_head *head) 1194 { 1195 return __merge_or_add_vmap_area(va, root, head, false); 1196 } 1197 1198 static __always_inline struct vmap_area * 1199 merge_or_add_vmap_area_augment(struct vmap_area *va, 1200 struct rb_root *root, struct list_head *head) 1201 { 1202 va = __merge_or_add_vmap_area(va, root, head, true); 1203 if (va) 1204 augment_tree_propagate_from(va); 1205 1206 return va; 1207 } 1208 1209 static __always_inline bool 1210 is_within_this_va(struct vmap_area *va, unsigned long size, 1211 unsigned long align, unsigned long vstart) 1212 { 1213 unsigned long nva_start_addr; 1214 1215 if (va->va_start > vstart) 1216 nva_start_addr = ALIGN(va->va_start, align); 1217 else 1218 nva_start_addr = ALIGN(vstart, align); 1219 1220 /* Can be overflowed due to big size or alignment. */ 1221 if (nva_start_addr + size < nva_start_addr || 1222 nva_start_addr < vstart) 1223 return false; 1224 1225 return (nva_start_addr + size <= va->va_end); 1226 } 1227 1228 /* 1229 * Find the first free block(lowest start address) in the tree, 1230 * that will accomplish the request corresponding to passing 1231 * parameters. Please note, with an alignment bigger than PAGE_SIZE, 1232 * a search length is adjusted to account for worst case alignment 1233 * overhead. 1234 */ 1235 static __always_inline struct vmap_area * 1236 find_vmap_lowest_match(struct rb_root *root, unsigned long size, 1237 unsigned long align, unsigned long vstart, bool adjust_search_size) 1238 { 1239 struct vmap_area *va; 1240 struct rb_node *node; 1241 unsigned long length; 1242 1243 /* Start from the root. */ 1244 node = root->rb_node; 1245 1246 /* Adjust the search size for alignment overhead. */ 1247 length = adjust_search_size ? size + align - 1 : size; 1248 1249 while (node) { 1250 va = rb_entry(node, struct vmap_area, rb_node); 1251 1252 if (get_subtree_max_size(node->rb_left) >= length && 1253 vstart < va->va_start) { 1254 node = node->rb_left; 1255 } else { 1256 if (is_within_this_va(va, size, align, vstart)) 1257 return va; 1258 1259 /* 1260 * Does not make sense to go deeper towards the right 1261 * sub-tree if it does not have a free block that is 1262 * equal or bigger to the requested search length. 1263 */ 1264 if (get_subtree_max_size(node->rb_right) >= length) { 1265 node = node->rb_right; 1266 continue; 1267 } 1268 1269 /* 1270 * OK. We roll back and find the first right sub-tree, 1271 * that will satisfy the search criteria. It can happen 1272 * due to "vstart" restriction or an alignment overhead 1273 * that is bigger then PAGE_SIZE. 1274 */ 1275 while ((node = rb_parent(node))) { 1276 va = rb_entry(node, struct vmap_area, rb_node); 1277 if (is_within_this_va(va, size, align, vstart)) 1278 return va; 1279 1280 if (get_subtree_max_size(node->rb_right) >= length && 1281 vstart <= va->va_start) { 1282 /* 1283 * Shift the vstart forward. Please note, we update it with 1284 * parent's start address adding "1" because we do not want 1285 * to enter same sub-tree after it has already been checked 1286 * and no suitable free block found there. 1287 */ 1288 vstart = va->va_start + 1; 1289 node = node->rb_right; 1290 break; 1291 } 1292 } 1293 } 1294 } 1295 1296 return NULL; 1297 } 1298 1299 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK 1300 #include <linux/random.h> 1301 1302 static struct vmap_area * 1303 find_vmap_lowest_linear_match(struct list_head *head, unsigned long size, 1304 unsigned long align, unsigned long vstart) 1305 { 1306 struct vmap_area *va; 1307 1308 list_for_each_entry(va, head, list) { 1309 if (!is_within_this_va(va, size, align, vstart)) 1310 continue; 1311 1312 return va; 1313 } 1314 1315 return NULL; 1316 } 1317 1318 static void 1319 find_vmap_lowest_match_check(struct rb_root *root, struct list_head *head, 1320 unsigned long size, unsigned long align) 1321 { 1322 struct vmap_area *va_1, *va_2; 1323 unsigned long vstart; 1324 unsigned int rnd; 1325 1326 get_random_bytes(&rnd, sizeof(rnd)); 1327 vstart = VMALLOC_START + rnd; 1328 1329 va_1 = find_vmap_lowest_match(root, size, align, vstart, false); 1330 va_2 = find_vmap_lowest_linear_match(head, size, align, vstart); 1331 1332 if (va_1 != va_2) 1333 pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n", 1334 va_1, va_2, vstart); 1335 } 1336 #endif 1337 1338 enum fit_type { 1339 NOTHING_FIT = 0, 1340 FL_FIT_TYPE = 1, /* full fit */ 1341 LE_FIT_TYPE = 2, /* left edge fit */ 1342 RE_FIT_TYPE = 3, /* right edge fit */ 1343 NE_FIT_TYPE = 4 /* no edge fit */ 1344 }; 1345 1346 static __always_inline enum fit_type 1347 classify_va_fit_type(struct vmap_area *va, 1348 unsigned long nva_start_addr, unsigned long size) 1349 { 1350 enum fit_type type; 1351 1352 /* Check if it is within VA. */ 1353 if (nva_start_addr < va->va_start || 1354 nva_start_addr + size > va->va_end) 1355 return NOTHING_FIT; 1356 1357 /* Now classify. */ 1358 if (va->va_start == nva_start_addr) { 1359 if (va->va_end == nva_start_addr + size) 1360 type = FL_FIT_TYPE; 1361 else 1362 type = LE_FIT_TYPE; 1363 } else if (va->va_end == nva_start_addr + size) { 1364 type = RE_FIT_TYPE; 1365 } else { 1366 type = NE_FIT_TYPE; 1367 } 1368 1369 return type; 1370 } 1371 1372 static __always_inline int 1373 adjust_va_to_fit_type(struct rb_root *root, struct list_head *head, 1374 struct vmap_area *va, unsigned long nva_start_addr, 1375 unsigned long size) 1376 { 1377 struct vmap_area *lva = NULL; 1378 enum fit_type type = classify_va_fit_type(va, nva_start_addr, size); 1379 1380 if (type == FL_FIT_TYPE) { 1381 /* 1382 * No need to split VA, it fully fits. 1383 * 1384 * | | 1385 * V NVA V 1386 * |---------------| 1387 */ 1388 unlink_va_augment(va, root); 1389 kmem_cache_free(vmap_area_cachep, va); 1390 } else if (type == LE_FIT_TYPE) { 1391 /* 1392 * Split left edge of fit VA. 1393 * 1394 * | | 1395 * V NVA V R 1396 * |-------|-------| 1397 */ 1398 va->va_start += size; 1399 } else if (type == RE_FIT_TYPE) { 1400 /* 1401 * Split right edge of fit VA. 1402 * 1403 * | | 1404 * L V NVA V 1405 * |-------|-------| 1406 */ 1407 va->va_end = nva_start_addr; 1408 } else if (type == NE_FIT_TYPE) { 1409 /* 1410 * Split no edge of fit VA. 1411 * 1412 * | | 1413 * L V NVA V R 1414 * |---|-------|---| 1415 */ 1416 lva = __this_cpu_xchg(ne_fit_preload_node, NULL); 1417 if (unlikely(!lva)) { 1418 /* 1419 * For percpu allocator we do not do any pre-allocation 1420 * and leave it as it is. The reason is it most likely 1421 * never ends up with NE_FIT_TYPE splitting. In case of 1422 * percpu allocations offsets and sizes are aligned to 1423 * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE 1424 * are its main fitting cases. 1425 * 1426 * There are a few exceptions though, as an example it is 1427 * a first allocation (early boot up) when we have "one" 1428 * big free space that has to be split. 1429 * 1430 * Also we can hit this path in case of regular "vmap" 1431 * allocations, if "this" current CPU was not preloaded. 1432 * See the comment in alloc_vmap_area() why. If so, then 1433 * GFP_NOWAIT is used instead to get an extra object for 1434 * split purpose. That is rare and most time does not 1435 * occur. 1436 * 1437 * What happens if an allocation gets failed. Basically, 1438 * an "overflow" path is triggered to purge lazily freed 1439 * areas to free some memory, then, the "retry" path is 1440 * triggered to repeat one more time. See more details 1441 * in alloc_vmap_area() function. 1442 */ 1443 lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT); 1444 if (!lva) 1445 return -1; 1446 } 1447 1448 /* 1449 * Build the remainder. 1450 */ 1451 lva->va_start = va->va_start; 1452 lva->va_end = nva_start_addr; 1453 1454 /* 1455 * Shrink this VA to remaining size. 1456 */ 1457 va->va_start = nva_start_addr + size; 1458 } else { 1459 return -1; 1460 } 1461 1462 if (type != FL_FIT_TYPE) { 1463 augment_tree_propagate_from(va); 1464 1465 if (lva) /* type == NE_FIT_TYPE */ 1466 insert_vmap_area_augment(lva, &va->rb_node, root, head); 1467 } 1468 1469 return 0; 1470 } 1471 1472 /* 1473 * Returns a start address of the newly allocated area, if success. 1474 * Otherwise a vend is returned that indicates failure. 1475 */ 1476 static __always_inline unsigned long 1477 __alloc_vmap_area(struct rb_root *root, struct list_head *head, 1478 unsigned long size, unsigned long align, 1479 unsigned long vstart, unsigned long vend) 1480 { 1481 bool adjust_search_size = true; 1482 unsigned long nva_start_addr; 1483 struct vmap_area *va; 1484 int ret; 1485 1486 /* 1487 * Do not adjust when: 1488 * a) align <= PAGE_SIZE, because it does not make any sense. 1489 * All blocks(their start addresses) are at least PAGE_SIZE 1490 * aligned anyway; 1491 * b) a short range where a requested size corresponds to exactly 1492 * specified [vstart:vend] interval and an alignment > PAGE_SIZE. 1493 * With adjusted search length an allocation would not succeed. 1494 */ 1495 if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size)) 1496 adjust_search_size = false; 1497 1498 va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size); 1499 if (unlikely(!va)) 1500 return vend; 1501 1502 if (va->va_start > vstart) 1503 nva_start_addr = ALIGN(va->va_start, align); 1504 else 1505 nva_start_addr = ALIGN(vstart, align); 1506 1507 /* Check the "vend" restriction. */ 1508 if (nva_start_addr + size > vend) 1509 return vend; 1510 1511 /* Update the free vmap_area. */ 1512 ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size); 1513 if (WARN_ON_ONCE(ret)) 1514 return vend; 1515 1516 #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK 1517 find_vmap_lowest_match_check(root, head, size, align); 1518 #endif 1519 1520 return nva_start_addr; 1521 } 1522 1523 /* 1524 * Free a region of KVA allocated by alloc_vmap_area 1525 */ 1526 static void free_vmap_area(struct vmap_area *va) 1527 { 1528 /* 1529 * Remove from the busy tree/list. 1530 */ 1531 spin_lock(&vmap_area_lock); 1532 unlink_va(va, &vmap_area_root); 1533 spin_unlock(&vmap_area_lock); 1534 1535 /* 1536 * Insert/Merge it back to the free tree/list. 1537 */ 1538 spin_lock(&free_vmap_area_lock); 1539 merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list); 1540 spin_unlock(&free_vmap_area_lock); 1541 } 1542 1543 static inline void 1544 preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node) 1545 { 1546 struct vmap_area *va = NULL; 1547 1548 /* 1549 * Preload this CPU with one extra vmap_area object. It is used 1550 * when fit type of free area is NE_FIT_TYPE. It guarantees that 1551 * a CPU that does an allocation is preloaded. 1552 * 1553 * We do it in non-atomic context, thus it allows us to use more 1554 * permissive allocation masks to be more stable under low memory 1555 * condition and high memory pressure. 1556 */ 1557 if (!this_cpu_read(ne_fit_preload_node)) 1558 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); 1559 1560 spin_lock(lock); 1561 1562 if (va && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, va)) 1563 kmem_cache_free(vmap_area_cachep, va); 1564 } 1565 1566 /* 1567 * Allocate a region of KVA of the specified size and alignment, within the 1568 * vstart and vend. 1569 */ 1570 static struct vmap_area *alloc_vmap_area(unsigned long size, 1571 unsigned long align, 1572 unsigned long vstart, unsigned long vend, 1573 int node, gfp_t gfp_mask) 1574 { 1575 struct vmap_area *va; 1576 unsigned long freed; 1577 unsigned long addr; 1578 int purged = 0; 1579 int ret; 1580 1581 BUG_ON(!size); 1582 BUG_ON(offset_in_page(size)); 1583 BUG_ON(!is_power_of_2(align)); 1584 1585 if (unlikely(!vmap_initialized)) 1586 return ERR_PTR(-EBUSY); 1587 1588 might_sleep(); 1589 gfp_mask = gfp_mask & GFP_RECLAIM_MASK; 1590 1591 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); 1592 if (unlikely(!va)) 1593 return ERR_PTR(-ENOMEM); 1594 1595 /* 1596 * Only scan the relevant parts containing pointers to other objects 1597 * to avoid false negatives. 1598 */ 1599 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask); 1600 1601 retry: 1602 preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node); 1603 addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list, 1604 size, align, vstart, vend); 1605 spin_unlock(&free_vmap_area_lock); 1606 1607 /* 1608 * If an allocation fails, the "vend" address is 1609 * returned. Therefore trigger the overflow path. 1610 */ 1611 if (unlikely(addr == vend)) 1612 goto overflow; 1613 1614 va->va_start = addr; 1615 va->va_end = addr + size; 1616 va->vm = NULL; 1617 1618 spin_lock(&vmap_area_lock); 1619 insert_vmap_area(va, &vmap_area_root, &vmap_area_list); 1620 spin_unlock(&vmap_area_lock); 1621 1622 BUG_ON(!IS_ALIGNED(va->va_start, align)); 1623 BUG_ON(va->va_start < vstart); 1624 BUG_ON(va->va_end > vend); 1625 1626 ret = kasan_populate_vmalloc(addr, size); 1627 if (ret) { 1628 free_vmap_area(va); 1629 return ERR_PTR(ret); 1630 } 1631 1632 return va; 1633 1634 overflow: 1635 if (!purged) { 1636 purge_vmap_area_lazy(); 1637 purged = 1; 1638 goto retry; 1639 } 1640 1641 freed = 0; 1642 blocking_notifier_call_chain(&vmap_notify_list, 0, &freed); 1643 1644 if (freed > 0) { 1645 purged = 0; 1646 goto retry; 1647 } 1648 1649 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) 1650 pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n", 1651 size); 1652 1653 kmem_cache_free(vmap_area_cachep, va); 1654 return ERR_PTR(-EBUSY); 1655 } 1656 1657 int register_vmap_purge_notifier(struct notifier_block *nb) 1658 { 1659 return blocking_notifier_chain_register(&vmap_notify_list, nb); 1660 } 1661 EXPORT_SYMBOL_GPL(register_vmap_purge_notifier); 1662 1663 int unregister_vmap_purge_notifier(struct notifier_block *nb) 1664 { 1665 return blocking_notifier_chain_unregister(&vmap_notify_list, nb); 1666 } 1667 EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier); 1668 1669 /* 1670 * lazy_max_pages is the maximum amount of virtual address space we gather up 1671 * before attempting to purge with a TLB flush. 1672 * 1673 * There is a tradeoff here: a larger number will cover more kernel page tables 1674 * and take slightly longer to purge, but it will linearly reduce the number of 1675 * global TLB flushes that must be performed. It would seem natural to scale 1676 * this number up linearly with the number of CPUs (because vmapping activity 1677 * could also scale linearly with the number of CPUs), however it is likely 1678 * that in practice, workloads might be constrained in other ways that mean 1679 * vmap activity will not scale linearly with CPUs. Also, I want to be 1680 * conservative and not introduce a big latency on huge systems, so go with 1681 * a less aggressive log scale. It will still be an improvement over the old 1682 * code, and it will be simple to change the scale factor if we find that it 1683 * becomes a problem on bigger systems. 1684 */ 1685 static unsigned long lazy_max_pages(void) 1686 { 1687 unsigned int log; 1688 1689 log = fls(num_online_cpus()); 1690 1691 return log * (32UL * 1024 * 1024 / PAGE_SIZE); 1692 } 1693 1694 static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0); 1695 1696 /* 1697 * Serialize vmap purging. There is no actual critical section protected 1698 * by this lock, but we want to avoid concurrent calls for performance 1699 * reasons and to make the pcpu_get_vm_areas more deterministic. 1700 */ 1701 static DEFINE_MUTEX(vmap_purge_lock); 1702 1703 /* for per-CPU blocks */ 1704 static void purge_fragmented_blocks_allcpus(void); 1705 1706 /* 1707 * Purges all lazily-freed vmap areas. 1708 */ 1709 static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end) 1710 { 1711 unsigned long resched_threshold; 1712 struct list_head local_purge_list; 1713 struct vmap_area *va, *n_va; 1714 1715 lockdep_assert_held(&vmap_purge_lock); 1716 1717 spin_lock(&purge_vmap_area_lock); 1718 purge_vmap_area_root = RB_ROOT; 1719 list_replace_init(&purge_vmap_area_list, &local_purge_list); 1720 spin_unlock(&purge_vmap_area_lock); 1721 1722 if (unlikely(list_empty(&local_purge_list))) 1723 return false; 1724 1725 start = min(start, 1726 list_first_entry(&local_purge_list, 1727 struct vmap_area, list)->va_start); 1728 1729 end = max(end, 1730 list_last_entry(&local_purge_list, 1731 struct vmap_area, list)->va_end); 1732 1733 flush_tlb_kernel_range(start, end); 1734 resched_threshold = lazy_max_pages() << 1; 1735 1736 spin_lock(&free_vmap_area_lock); 1737 list_for_each_entry_safe(va, n_va, &local_purge_list, list) { 1738 unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT; 1739 unsigned long orig_start = va->va_start; 1740 unsigned long orig_end = va->va_end; 1741 1742 /* 1743 * Finally insert or merge lazily-freed area. It is 1744 * detached and there is no need to "unlink" it from 1745 * anything. 1746 */ 1747 va = merge_or_add_vmap_area_augment(va, &free_vmap_area_root, 1748 &free_vmap_area_list); 1749 1750 if (!va) 1751 continue; 1752 1753 if (is_vmalloc_or_module_addr((void *)orig_start)) 1754 kasan_release_vmalloc(orig_start, orig_end, 1755 va->va_start, va->va_end); 1756 1757 atomic_long_sub(nr, &vmap_lazy_nr); 1758 1759 if (atomic_long_read(&vmap_lazy_nr) < resched_threshold) 1760 cond_resched_lock(&free_vmap_area_lock); 1761 } 1762 spin_unlock(&free_vmap_area_lock); 1763 return true; 1764 } 1765 1766 /* 1767 * Kick off a purge of the outstanding lazy areas. 1768 */ 1769 static void purge_vmap_area_lazy(void) 1770 { 1771 mutex_lock(&vmap_purge_lock); 1772 purge_fragmented_blocks_allcpus(); 1773 __purge_vmap_area_lazy(ULONG_MAX, 0); 1774 mutex_unlock(&vmap_purge_lock); 1775 } 1776 1777 static void drain_vmap_area_work(struct work_struct *work) 1778 { 1779 unsigned long nr_lazy; 1780 1781 do { 1782 mutex_lock(&vmap_purge_lock); 1783 __purge_vmap_area_lazy(ULONG_MAX, 0); 1784 mutex_unlock(&vmap_purge_lock); 1785 1786 /* Recheck if further work is required. */ 1787 nr_lazy = atomic_long_read(&vmap_lazy_nr); 1788 } while (nr_lazy > lazy_max_pages()); 1789 } 1790 1791 /* 1792 * Free a vmap area, caller ensuring that the area has been unmapped 1793 * and flush_cache_vunmap had been called for the correct range 1794 * previously. 1795 */ 1796 static void free_vmap_area_noflush(struct vmap_area *va) 1797 { 1798 unsigned long nr_lazy; 1799 1800 spin_lock(&vmap_area_lock); 1801 unlink_va(va, &vmap_area_root); 1802 spin_unlock(&vmap_area_lock); 1803 1804 nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >> 1805 PAGE_SHIFT, &vmap_lazy_nr); 1806 1807 /* 1808 * Merge or place it to the purge tree/list. 1809 */ 1810 spin_lock(&purge_vmap_area_lock); 1811 merge_or_add_vmap_area(va, 1812 &purge_vmap_area_root, &purge_vmap_area_list); 1813 spin_unlock(&purge_vmap_area_lock); 1814 1815 /* After this point, we may free va at any time */ 1816 if (unlikely(nr_lazy > lazy_max_pages())) 1817 schedule_work(&drain_vmap_work); 1818 } 1819 1820 /* 1821 * Free and unmap a vmap area 1822 */ 1823 static void free_unmap_vmap_area(struct vmap_area *va) 1824 { 1825 flush_cache_vunmap(va->va_start, va->va_end); 1826 vunmap_range_noflush(va->va_start, va->va_end); 1827 if (debug_pagealloc_enabled_static()) 1828 flush_tlb_kernel_range(va->va_start, va->va_end); 1829 1830 free_vmap_area_noflush(va); 1831 } 1832 1833 struct vmap_area *find_vmap_area(unsigned long addr) 1834 { 1835 struct vmap_area *va; 1836 1837 spin_lock(&vmap_area_lock); 1838 va = __find_vmap_area(addr, &vmap_area_root); 1839 spin_unlock(&vmap_area_lock); 1840 1841 return va; 1842 } 1843 1844 /*** Per cpu kva allocator ***/ 1845 1846 /* 1847 * vmap space is limited especially on 32 bit architectures. Ensure there is 1848 * room for at least 16 percpu vmap blocks per CPU. 1849 */ 1850 /* 1851 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able 1852 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess 1853 * instead (we just need a rough idea) 1854 */ 1855 #if BITS_PER_LONG == 32 1856 #define VMALLOC_SPACE (128UL*1024*1024) 1857 #else 1858 #define VMALLOC_SPACE (128UL*1024*1024*1024) 1859 #endif 1860 1861 #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) 1862 #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ 1863 #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ 1864 #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) 1865 #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ 1866 #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ 1867 #define VMAP_BBMAP_BITS \ 1868 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ 1869 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ 1870 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16)) 1871 1872 #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) 1873 1874 struct vmap_block_queue { 1875 spinlock_t lock; 1876 struct list_head free; 1877 }; 1878 1879 struct vmap_block { 1880 spinlock_t lock; 1881 struct vmap_area *va; 1882 unsigned long free, dirty; 1883 unsigned long dirty_min, dirty_max; /*< dirty range */ 1884 struct list_head free_list; 1885 struct rcu_head rcu_head; 1886 struct list_head purge; 1887 }; 1888 1889 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ 1890 static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); 1891 1892 /* 1893 * XArray of vmap blocks, indexed by address, to quickly find a vmap block 1894 * in the free path. Could get rid of this if we change the API to return a 1895 * "cookie" from alloc, to be passed to free. But no big deal yet. 1896 */ 1897 static DEFINE_XARRAY(vmap_blocks); 1898 1899 /* 1900 * We should probably have a fallback mechanism to allocate virtual memory 1901 * out of partially filled vmap blocks. However vmap block sizing should be 1902 * fairly reasonable according to the vmalloc size, so it shouldn't be a 1903 * big problem. 1904 */ 1905 1906 static unsigned long addr_to_vb_idx(unsigned long addr) 1907 { 1908 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); 1909 addr /= VMAP_BLOCK_SIZE; 1910 return addr; 1911 } 1912 1913 static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off) 1914 { 1915 unsigned long addr; 1916 1917 addr = va_start + (pages_off << PAGE_SHIFT); 1918 BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start)); 1919 return (void *)addr; 1920 } 1921 1922 /** 1923 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this 1924 * block. Of course pages number can't exceed VMAP_BBMAP_BITS 1925 * @order: how many 2^order pages should be occupied in newly allocated block 1926 * @gfp_mask: flags for the page level allocator 1927 * 1928 * Return: virtual address in a newly allocated block or ERR_PTR(-errno) 1929 */ 1930 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) 1931 { 1932 struct vmap_block_queue *vbq; 1933 struct vmap_block *vb; 1934 struct vmap_area *va; 1935 unsigned long vb_idx; 1936 int node, err; 1937 void *vaddr; 1938 1939 node = numa_node_id(); 1940 1941 vb = kmalloc_node(sizeof(struct vmap_block), 1942 gfp_mask & GFP_RECLAIM_MASK, node); 1943 if (unlikely(!vb)) 1944 return ERR_PTR(-ENOMEM); 1945 1946 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, 1947 VMALLOC_START, VMALLOC_END, 1948 node, gfp_mask); 1949 if (IS_ERR(va)) { 1950 kfree(vb); 1951 return ERR_CAST(va); 1952 } 1953 1954 vaddr = vmap_block_vaddr(va->va_start, 0); 1955 spin_lock_init(&vb->lock); 1956 vb->va = va; 1957 /* At least something should be left free */ 1958 BUG_ON(VMAP_BBMAP_BITS <= (1UL << order)); 1959 vb->free = VMAP_BBMAP_BITS - (1UL << order); 1960 vb->dirty = 0; 1961 vb->dirty_min = VMAP_BBMAP_BITS; 1962 vb->dirty_max = 0; 1963 INIT_LIST_HEAD(&vb->free_list); 1964 1965 vb_idx = addr_to_vb_idx(va->va_start); 1966 err = xa_insert(&vmap_blocks, vb_idx, vb, gfp_mask); 1967 if (err) { 1968 kfree(vb); 1969 free_vmap_area(va); 1970 return ERR_PTR(err); 1971 } 1972 1973 vbq = raw_cpu_ptr(&vmap_block_queue); 1974 spin_lock(&vbq->lock); 1975 list_add_tail_rcu(&vb->free_list, &vbq->free); 1976 spin_unlock(&vbq->lock); 1977 1978 return vaddr; 1979 } 1980 1981 static void free_vmap_block(struct vmap_block *vb) 1982 { 1983 struct vmap_block *tmp; 1984 1985 tmp = xa_erase(&vmap_blocks, addr_to_vb_idx(vb->va->va_start)); 1986 BUG_ON(tmp != vb); 1987 1988 free_vmap_area_noflush(vb->va); 1989 kfree_rcu(vb, rcu_head); 1990 } 1991 1992 static void purge_fragmented_blocks(int cpu) 1993 { 1994 LIST_HEAD(purge); 1995 struct vmap_block *vb; 1996 struct vmap_block *n_vb; 1997 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 1998 1999 rcu_read_lock(); 2000 list_for_each_entry_rcu(vb, &vbq->free, free_list) { 2001 2002 if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS)) 2003 continue; 2004 2005 spin_lock(&vb->lock); 2006 if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) { 2007 vb->free = 0; /* prevent further allocs after releasing lock */ 2008 vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */ 2009 vb->dirty_min = 0; 2010 vb->dirty_max = VMAP_BBMAP_BITS; 2011 spin_lock(&vbq->lock); 2012 list_del_rcu(&vb->free_list); 2013 spin_unlock(&vbq->lock); 2014 spin_unlock(&vb->lock); 2015 list_add_tail(&vb->purge, &purge); 2016 } else 2017 spin_unlock(&vb->lock); 2018 } 2019 rcu_read_unlock(); 2020 2021 list_for_each_entry_safe(vb, n_vb, &purge, purge) { 2022 list_del(&vb->purge); 2023 free_vmap_block(vb); 2024 } 2025 } 2026 2027 static void purge_fragmented_blocks_allcpus(void) 2028 { 2029 int cpu; 2030 2031 for_each_possible_cpu(cpu) 2032 purge_fragmented_blocks(cpu); 2033 } 2034 2035 static void *vb_alloc(unsigned long size, gfp_t gfp_mask) 2036 { 2037 struct vmap_block_queue *vbq; 2038 struct vmap_block *vb; 2039 void *vaddr = NULL; 2040 unsigned int order; 2041 2042 BUG_ON(offset_in_page(size)); 2043 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 2044 if (WARN_ON(size == 0)) { 2045 /* 2046 * Allocating 0 bytes isn't what caller wants since 2047 * get_order(0) returns funny result. Just warn and terminate 2048 * early. 2049 */ 2050 return NULL; 2051 } 2052 order = get_order(size); 2053 2054 rcu_read_lock(); 2055 vbq = raw_cpu_ptr(&vmap_block_queue); 2056 list_for_each_entry_rcu(vb, &vbq->free, free_list) { 2057 unsigned long pages_off; 2058 2059 spin_lock(&vb->lock); 2060 if (vb->free < (1UL << order)) { 2061 spin_unlock(&vb->lock); 2062 continue; 2063 } 2064 2065 pages_off = VMAP_BBMAP_BITS - vb->free; 2066 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); 2067 vb->free -= 1UL << order; 2068 if (vb->free == 0) { 2069 spin_lock(&vbq->lock); 2070 list_del_rcu(&vb->free_list); 2071 spin_unlock(&vbq->lock); 2072 } 2073 2074 spin_unlock(&vb->lock); 2075 break; 2076 } 2077 2078 rcu_read_unlock(); 2079 2080 /* Allocate new block if nothing was found */ 2081 if (!vaddr) 2082 vaddr = new_vmap_block(order, gfp_mask); 2083 2084 return vaddr; 2085 } 2086 2087 static void vb_free(unsigned long addr, unsigned long size) 2088 { 2089 unsigned long offset; 2090 unsigned int order; 2091 struct vmap_block *vb; 2092 2093 BUG_ON(offset_in_page(size)); 2094 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 2095 2096 flush_cache_vunmap(addr, addr + size); 2097 2098 order = get_order(size); 2099 offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT; 2100 vb = xa_load(&vmap_blocks, addr_to_vb_idx(addr)); 2101 2102 vunmap_range_noflush(addr, addr + size); 2103 2104 if (debug_pagealloc_enabled_static()) 2105 flush_tlb_kernel_range(addr, addr + size); 2106 2107 spin_lock(&vb->lock); 2108 2109 /* Expand dirty range */ 2110 vb->dirty_min = min(vb->dirty_min, offset); 2111 vb->dirty_max = max(vb->dirty_max, offset + (1UL << order)); 2112 2113 vb->dirty += 1UL << order; 2114 if (vb->dirty == VMAP_BBMAP_BITS) { 2115 BUG_ON(vb->free); 2116 spin_unlock(&vb->lock); 2117 free_vmap_block(vb); 2118 } else 2119 spin_unlock(&vb->lock); 2120 } 2121 2122 static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) 2123 { 2124 int cpu; 2125 2126 if (unlikely(!vmap_initialized)) 2127 return; 2128 2129 might_sleep(); 2130 2131 for_each_possible_cpu(cpu) { 2132 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 2133 struct vmap_block *vb; 2134 2135 rcu_read_lock(); 2136 list_for_each_entry_rcu(vb, &vbq->free, free_list) { 2137 spin_lock(&vb->lock); 2138 if (vb->dirty && vb->dirty != VMAP_BBMAP_BITS) { 2139 unsigned long va_start = vb->va->va_start; 2140 unsigned long s, e; 2141 2142 s = va_start + (vb->dirty_min << PAGE_SHIFT); 2143 e = va_start + (vb->dirty_max << PAGE_SHIFT); 2144 2145 start = min(s, start); 2146 end = max(e, end); 2147 2148 flush = 1; 2149 } 2150 spin_unlock(&vb->lock); 2151 } 2152 rcu_read_unlock(); 2153 } 2154 2155 mutex_lock(&vmap_purge_lock); 2156 purge_fragmented_blocks_allcpus(); 2157 if (!__purge_vmap_area_lazy(start, end) && flush) 2158 flush_tlb_kernel_range(start, end); 2159 mutex_unlock(&vmap_purge_lock); 2160 } 2161 2162 /** 2163 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer 2164 * 2165 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily 2166 * to amortize TLB flushing overheads. What this means is that any page you 2167 * have now, may, in a former life, have been mapped into kernel virtual 2168 * address by the vmap layer and so there might be some CPUs with TLB entries 2169 * still referencing that page (additional to the regular 1:1 kernel mapping). 2170 * 2171 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can 2172 * be sure that none of the pages we have control over will have any aliases 2173 * from the vmap layer. 2174 */ 2175 void vm_unmap_aliases(void) 2176 { 2177 unsigned long start = ULONG_MAX, end = 0; 2178 int flush = 0; 2179 2180 _vm_unmap_aliases(start, end, flush); 2181 } 2182 EXPORT_SYMBOL_GPL(vm_unmap_aliases); 2183 2184 /** 2185 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram 2186 * @mem: the pointer returned by vm_map_ram 2187 * @count: the count passed to that vm_map_ram call (cannot unmap partial) 2188 */ 2189 void vm_unmap_ram(const void *mem, unsigned int count) 2190 { 2191 unsigned long size = (unsigned long)count << PAGE_SHIFT; 2192 unsigned long addr = (unsigned long)kasan_reset_tag(mem); 2193 struct vmap_area *va; 2194 2195 might_sleep(); 2196 BUG_ON(!addr); 2197 BUG_ON(addr < VMALLOC_START); 2198 BUG_ON(addr > VMALLOC_END); 2199 BUG_ON(!PAGE_ALIGNED(addr)); 2200 2201 kasan_poison_vmalloc(mem, size); 2202 2203 if (likely(count <= VMAP_MAX_ALLOC)) { 2204 debug_check_no_locks_freed(mem, size); 2205 vb_free(addr, size); 2206 return; 2207 } 2208 2209 va = find_vmap_area(addr); 2210 BUG_ON(!va); 2211 debug_check_no_locks_freed((void *)va->va_start, 2212 (va->va_end - va->va_start)); 2213 free_unmap_vmap_area(va); 2214 } 2215 EXPORT_SYMBOL(vm_unmap_ram); 2216 2217 /** 2218 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) 2219 * @pages: an array of pointers to the pages to be mapped 2220 * @count: number of pages 2221 * @node: prefer to allocate data structures on this node 2222 * 2223 * If you use this function for less than VMAP_MAX_ALLOC pages, it could be 2224 * faster than vmap so it's good. But if you mix long-life and short-life 2225 * objects with vm_map_ram(), it could consume lots of address space through 2226 * fragmentation (especially on a 32bit machine). You could see failures in 2227 * the end. Please use this function for short-lived objects. 2228 * 2229 * Returns: a pointer to the address that has been mapped, or %NULL on failure 2230 */ 2231 void *vm_map_ram(struct page **pages, unsigned int count, int node) 2232 { 2233 unsigned long size = (unsigned long)count << PAGE_SHIFT; 2234 unsigned long addr; 2235 void *mem; 2236 2237 if (likely(count <= VMAP_MAX_ALLOC)) { 2238 mem = vb_alloc(size, GFP_KERNEL); 2239 if (IS_ERR(mem)) 2240 return NULL; 2241 addr = (unsigned long)mem; 2242 } else { 2243 struct vmap_area *va; 2244 va = alloc_vmap_area(size, PAGE_SIZE, 2245 VMALLOC_START, VMALLOC_END, node, GFP_KERNEL); 2246 if (IS_ERR(va)) 2247 return NULL; 2248 2249 addr = va->va_start; 2250 mem = (void *)addr; 2251 } 2252 2253 if (vmap_pages_range(addr, addr + size, PAGE_KERNEL, 2254 pages, PAGE_SHIFT) < 0) { 2255 vm_unmap_ram(mem, count); 2256 return NULL; 2257 } 2258 2259 /* 2260 * Mark the pages as accessible, now that they are mapped. 2261 * With hardware tag-based KASAN, marking is skipped for 2262 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). 2263 */ 2264 mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_PROT_NORMAL); 2265 2266 return mem; 2267 } 2268 EXPORT_SYMBOL(vm_map_ram); 2269 2270 static struct vm_struct *vmlist __initdata; 2271 2272 static inline unsigned int vm_area_page_order(struct vm_struct *vm) 2273 { 2274 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 2275 return vm->page_order; 2276 #else 2277 return 0; 2278 #endif 2279 } 2280 2281 static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order) 2282 { 2283 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC 2284 vm->page_order = order; 2285 #else 2286 BUG_ON(order != 0); 2287 #endif 2288 } 2289 2290 /** 2291 * vm_area_add_early - add vmap area early during boot 2292 * @vm: vm_struct to add 2293 * 2294 * This function is used to add fixed kernel vm area to vmlist before 2295 * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags 2296 * should contain proper values and the other fields should be zero. 2297 * 2298 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 2299 */ 2300 void __init vm_area_add_early(struct vm_struct *vm) 2301 { 2302 struct vm_struct *tmp, **p; 2303 2304 BUG_ON(vmap_initialized); 2305 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { 2306 if (tmp->addr >= vm->addr) { 2307 BUG_ON(tmp->addr < vm->addr + vm->size); 2308 break; 2309 } else 2310 BUG_ON(tmp->addr + tmp->size > vm->addr); 2311 } 2312 vm->next = *p; 2313 *p = vm; 2314 } 2315 2316 /** 2317 * vm_area_register_early - register vmap area early during boot 2318 * @vm: vm_struct to register 2319 * @align: requested alignment 2320 * 2321 * This function is used to register kernel vm area before 2322 * vmalloc_init() is called. @vm->size and @vm->flags should contain 2323 * proper values on entry and other fields should be zero. On return, 2324 * vm->addr contains the allocated address. 2325 * 2326 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 2327 */ 2328 void __init vm_area_register_early(struct vm_struct *vm, size_t align) 2329 { 2330 unsigned long addr = ALIGN(VMALLOC_START, align); 2331 struct vm_struct *cur, **p; 2332 2333 BUG_ON(vmap_initialized); 2334 2335 for (p = &vmlist; (cur = *p) != NULL; p = &cur->next) { 2336 if ((unsigned long)cur->addr - addr >= vm->size) 2337 break; 2338 addr = ALIGN((unsigned long)cur->addr + cur->size, align); 2339 } 2340 2341 BUG_ON(addr > VMALLOC_END - vm->size); 2342 vm->addr = (void *)addr; 2343 vm->next = *p; 2344 *p = vm; 2345 kasan_populate_early_vm_area_shadow(vm->addr, vm->size); 2346 } 2347 2348 static void vmap_init_free_space(void) 2349 { 2350 unsigned long vmap_start = 1; 2351 const unsigned long vmap_end = ULONG_MAX; 2352 struct vmap_area *busy, *free; 2353 2354 /* 2355 * B F B B B F 2356 * -|-----|.....|-----|-----|-----|.....|- 2357 * | The KVA space | 2358 * |<--------------------------------->| 2359 */ 2360 list_for_each_entry(busy, &vmap_area_list, list) { 2361 if (busy->va_start - vmap_start > 0) { 2362 free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 2363 if (!WARN_ON_ONCE(!free)) { 2364 free->va_start = vmap_start; 2365 free->va_end = busy->va_start; 2366 2367 insert_vmap_area_augment(free, NULL, 2368 &free_vmap_area_root, 2369 &free_vmap_area_list); 2370 } 2371 } 2372 2373 vmap_start = busy->va_end; 2374 } 2375 2376 if (vmap_end - vmap_start > 0) { 2377 free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 2378 if (!WARN_ON_ONCE(!free)) { 2379 free->va_start = vmap_start; 2380 free->va_end = vmap_end; 2381 2382 insert_vmap_area_augment(free, NULL, 2383 &free_vmap_area_root, 2384 &free_vmap_area_list); 2385 } 2386 } 2387 } 2388 2389 void __init vmalloc_init(void) 2390 { 2391 struct vmap_area *va; 2392 struct vm_struct *tmp; 2393 int i; 2394 2395 /* 2396 * Create the cache for vmap_area objects. 2397 */ 2398 vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC); 2399 2400 for_each_possible_cpu(i) { 2401 struct vmap_block_queue *vbq; 2402 struct vfree_deferred *p; 2403 2404 vbq = &per_cpu(vmap_block_queue, i); 2405 spin_lock_init(&vbq->lock); 2406 INIT_LIST_HEAD(&vbq->free); 2407 p = &per_cpu(vfree_deferred, i); 2408 init_llist_head(&p->list); 2409 INIT_WORK(&p->wq, free_work); 2410 } 2411 2412 /* Import existing vmlist entries. */ 2413 for (tmp = vmlist; tmp; tmp = tmp->next) { 2414 va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); 2415 if (WARN_ON_ONCE(!va)) 2416 continue; 2417 2418 va->va_start = (unsigned long)tmp->addr; 2419 va->va_end = va->va_start + tmp->size; 2420 va->vm = tmp; 2421 insert_vmap_area(va, &vmap_area_root, &vmap_area_list); 2422 } 2423 2424 /* 2425 * Now we can initialize a free vmap space. 2426 */ 2427 vmap_init_free_space(); 2428 vmap_initialized = true; 2429 } 2430 2431 static inline void setup_vmalloc_vm_locked(struct vm_struct *vm, 2432 struct vmap_area *va, unsigned long flags, const void *caller) 2433 { 2434 vm->flags = flags; 2435 vm->addr = (void *)va->va_start; 2436 vm->size = va->va_end - va->va_start; 2437 vm->caller = caller; 2438 va->vm = vm; 2439 } 2440 2441 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, 2442 unsigned long flags, const void *caller) 2443 { 2444 spin_lock(&vmap_area_lock); 2445 setup_vmalloc_vm_locked(vm, va, flags, caller); 2446 spin_unlock(&vmap_area_lock); 2447 } 2448 2449 static void clear_vm_uninitialized_flag(struct vm_struct *vm) 2450 { 2451 /* 2452 * Before removing VM_UNINITIALIZED, 2453 * we should make sure that vm has proper values. 2454 * Pair with smp_rmb() in show_numa_info(). 2455 */ 2456 smp_wmb(); 2457 vm->flags &= ~VM_UNINITIALIZED; 2458 } 2459 2460 static struct vm_struct *__get_vm_area_node(unsigned long size, 2461 unsigned long align, unsigned long shift, unsigned long flags, 2462 unsigned long start, unsigned long end, int node, 2463 gfp_t gfp_mask, const void *caller) 2464 { 2465 struct vmap_area *va; 2466 struct vm_struct *area; 2467 unsigned long requested_size = size; 2468 2469 BUG_ON(in_interrupt()); 2470 size = ALIGN(size, 1ul << shift); 2471 if (unlikely(!size)) 2472 return NULL; 2473 2474 if (flags & VM_IOREMAP) 2475 align = 1ul << clamp_t(int, get_count_order_long(size), 2476 PAGE_SHIFT, IOREMAP_MAX_ORDER); 2477 2478 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); 2479 if (unlikely(!area)) 2480 return NULL; 2481 2482 if (!(flags & VM_NO_GUARD)) 2483 size += PAGE_SIZE; 2484 2485 va = alloc_vmap_area(size, align, start, end, node, gfp_mask); 2486 if (IS_ERR(va)) { 2487 kfree(area); 2488 return NULL; 2489 } 2490 2491 setup_vmalloc_vm(area, va, flags, caller); 2492 2493 /* 2494 * Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a 2495 * best-effort approach, as they can be mapped outside of vmalloc code. 2496 * For VM_ALLOC mappings, the pages are marked as accessible after 2497 * getting mapped in __vmalloc_node_range(). 2498 * With hardware tag-based KASAN, marking is skipped for 2499 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). 2500 */ 2501 if (!(flags & VM_ALLOC)) 2502 area->addr = kasan_unpoison_vmalloc(area->addr, requested_size, 2503 KASAN_VMALLOC_PROT_NORMAL); 2504 2505 return area; 2506 } 2507 2508 struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, 2509 unsigned long start, unsigned long end, 2510 const void *caller) 2511 { 2512 return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end, 2513 NUMA_NO_NODE, GFP_KERNEL, caller); 2514 } 2515 2516 /** 2517 * get_vm_area - reserve a contiguous kernel virtual area 2518 * @size: size of the area 2519 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC 2520 * 2521 * Search an area of @size in the kernel virtual mapping area, 2522 * and reserved it for out purposes. Returns the area descriptor 2523 * on success or %NULL on failure. 2524 * 2525 * Return: the area descriptor on success or %NULL on failure. 2526 */ 2527 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 2528 { 2529 return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, 2530 VMALLOC_START, VMALLOC_END, 2531 NUMA_NO_NODE, GFP_KERNEL, 2532 __builtin_return_address(0)); 2533 } 2534 2535 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, 2536 const void *caller) 2537 { 2538 return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, 2539 VMALLOC_START, VMALLOC_END, 2540 NUMA_NO_NODE, GFP_KERNEL, caller); 2541 } 2542 2543 /** 2544 * find_vm_area - find a continuous kernel virtual area 2545 * @addr: base address 2546 * 2547 * Search for the kernel VM area starting at @addr, and return it. 2548 * It is up to the caller to do all required locking to keep the returned 2549 * pointer valid. 2550 * 2551 * Return: the area descriptor on success or %NULL on failure. 2552 */ 2553 struct vm_struct *find_vm_area(const void *addr) 2554 { 2555 struct vmap_area *va; 2556 2557 va = find_vmap_area((unsigned long)addr); 2558 if (!va) 2559 return NULL; 2560 2561 return va->vm; 2562 } 2563 2564 /** 2565 * remove_vm_area - find and remove a continuous kernel virtual area 2566 * @addr: base address 2567 * 2568 * Search for the kernel VM area starting at @addr, and remove it. 2569 * This function returns the found VM area, but using it is NOT safe 2570 * on SMP machines, except for its size or flags. 2571 * 2572 * Return: the area descriptor on success or %NULL on failure. 2573 */ 2574 struct vm_struct *remove_vm_area(const void *addr) 2575 { 2576 struct vmap_area *va; 2577 2578 might_sleep(); 2579 2580 spin_lock(&vmap_area_lock); 2581 va = __find_vmap_area((unsigned long)addr, &vmap_area_root); 2582 if (va && va->vm) { 2583 struct vm_struct *vm = va->vm; 2584 2585 va->vm = NULL; 2586 spin_unlock(&vmap_area_lock); 2587 2588 kasan_free_module_shadow(vm); 2589 free_unmap_vmap_area(va); 2590 2591 return vm; 2592 } 2593 2594 spin_unlock(&vmap_area_lock); 2595 return NULL; 2596 } 2597 2598 static inline void set_area_direct_map(const struct vm_struct *area, 2599 int (*set_direct_map)(struct page *page)) 2600 { 2601 int i; 2602 2603 /* HUGE_VMALLOC passes small pages to set_direct_map */ 2604 for (i = 0; i < area->nr_pages; i++) 2605 if (page_address(area->pages[i])) 2606 set_direct_map(area->pages[i]); 2607 } 2608 2609 /* Handle removing and resetting vm mappings related to the vm_struct. */ 2610 static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages) 2611 { 2612 unsigned long start = ULONG_MAX, end = 0; 2613 unsigned int page_order = vm_area_page_order(area); 2614 int flush_reset = area->flags & VM_FLUSH_RESET_PERMS; 2615 int flush_dmap = 0; 2616 int i; 2617 2618 remove_vm_area(area->addr); 2619 2620 /* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */ 2621 if (!flush_reset) 2622 return; 2623 2624 /* 2625 * If not deallocating pages, just do the flush of the VM area and 2626 * return. 2627 */ 2628 if (!deallocate_pages) { 2629 vm_unmap_aliases(); 2630 return; 2631 } 2632 2633 /* 2634 * If execution gets here, flush the vm mapping and reset the direct 2635 * map. Find the start and end range of the direct mappings to make sure 2636 * the vm_unmap_aliases() flush includes the direct map. 2637 */ 2638 for (i = 0; i < area->nr_pages; i += 1U << page_order) { 2639 unsigned long addr = (unsigned long)page_address(area->pages[i]); 2640 if (addr) { 2641 unsigned long page_size; 2642 2643 page_size = PAGE_SIZE << page_order; 2644 start = min(addr, start); 2645 end = max(addr + page_size, end); 2646 flush_dmap = 1; 2647 } 2648 } 2649 2650 /* 2651 * Set direct map to something invalid so that it won't be cached if 2652 * there are any accesses after the TLB flush, then flush the TLB and 2653 * reset the direct map permissions to the default. 2654 */ 2655 set_area_direct_map(area, set_direct_map_invalid_noflush); 2656 _vm_unmap_aliases(start, end, flush_dmap); 2657 set_area_direct_map(area, set_direct_map_default_noflush); 2658 } 2659 2660 static void __vunmap(const void *addr, int deallocate_pages) 2661 { 2662 struct vm_struct *area; 2663 2664 if (!addr) 2665 return; 2666 2667 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n", 2668 addr)) 2669 return; 2670 2671 area = find_vm_area(addr); 2672 if (unlikely(!area)) { 2673 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", 2674 addr); 2675 return; 2676 } 2677 2678 debug_check_no_locks_freed(area->addr, get_vm_area_size(area)); 2679 debug_check_no_obj_freed(area->addr, get_vm_area_size(area)); 2680 2681 kasan_poison_vmalloc(area->addr, get_vm_area_size(area)); 2682 2683 vm_remove_mappings(area, deallocate_pages); 2684 2685 if (deallocate_pages) { 2686 int i; 2687 2688 for (i = 0; i < area->nr_pages; i++) { 2689 struct page *page = area->pages[i]; 2690 2691 BUG_ON(!page); 2692 mod_memcg_page_state(page, MEMCG_VMALLOC, -1); 2693 /* 2694 * High-order allocs for huge vmallocs are split, so 2695 * can be freed as an array of order-0 allocations 2696 */ 2697 __free_pages(page, 0); 2698 cond_resched(); 2699 } 2700 atomic_long_sub(area->nr_pages, &nr_vmalloc_pages); 2701 2702 kvfree(area->pages); 2703 } 2704 2705 kfree(area); 2706 } 2707 2708 static inline void __vfree_deferred(const void *addr) 2709 { 2710 /* 2711 * Use raw_cpu_ptr() because this can be called from preemptible 2712 * context. Preemption is absolutely fine here, because the llist_add() 2713 * implementation is lockless, so it works even if we are adding to 2714 * another cpu's list. schedule_work() should be fine with this too. 2715 */ 2716 struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred); 2717 2718 if (llist_add((struct llist_node *)addr, &p->list)) 2719 schedule_work(&p->wq); 2720 } 2721 2722 /** 2723 * vfree_atomic - release memory allocated by vmalloc() 2724 * @addr: memory base address 2725 * 2726 * This one is just like vfree() but can be called in any atomic context 2727 * except NMIs. 2728 */ 2729 void vfree_atomic(const void *addr) 2730 { 2731 BUG_ON(in_nmi()); 2732 2733 kmemleak_free(addr); 2734 2735 if (!addr) 2736 return; 2737 __vfree_deferred(addr); 2738 } 2739 2740 static void __vfree(const void *addr) 2741 { 2742 if (unlikely(in_interrupt())) 2743 __vfree_deferred(addr); 2744 else 2745 __vunmap(addr, 1); 2746 } 2747 2748 /** 2749 * vfree - Release memory allocated by vmalloc() 2750 * @addr: Memory base address 2751 * 2752 * Free the virtually continuous memory area starting at @addr, as obtained 2753 * from one of the vmalloc() family of APIs. This will usually also free the 2754 * physical memory underlying the virtual allocation, but that memory is 2755 * reference counted, so it will not be freed until the last user goes away. 2756 * 2757 * If @addr is NULL, no operation is performed. 2758 * 2759 * Context: 2760 * May sleep if called *not* from interrupt context. 2761 * Must not be called in NMI context (strictly speaking, it could be 2762 * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling 2763 * conventions for vfree() arch-dependent would be a really bad idea). 2764 */ 2765 void vfree(const void *addr) 2766 { 2767 BUG_ON(in_nmi()); 2768 2769 kmemleak_free(addr); 2770 2771 might_sleep_if(!in_interrupt()); 2772 2773 if (!addr) 2774 return; 2775 2776 __vfree(addr); 2777 } 2778 EXPORT_SYMBOL(vfree); 2779 2780 /** 2781 * vunmap - release virtual mapping obtained by vmap() 2782 * @addr: memory base address 2783 * 2784 * Free the virtually contiguous memory area starting at @addr, 2785 * which was created from the page array passed to vmap(). 2786 * 2787 * Must not be called in interrupt context. 2788 */ 2789 void vunmap(const void *addr) 2790 { 2791 BUG_ON(in_interrupt()); 2792 might_sleep(); 2793 if (addr) 2794 __vunmap(addr, 0); 2795 } 2796 EXPORT_SYMBOL(vunmap); 2797 2798 /** 2799 * vmap - map an array of pages into virtually contiguous space 2800 * @pages: array of page pointers 2801 * @count: number of pages to map 2802 * @flags: vm_area->flags 2803 * @prot: page protection for the mapping 2804 * 2805 * Maps @count pages from @pages into contiguous kernel virtual space. 2806 * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself 2807 * (which must be kmalloc or vmalloc memory) and one reference per pages in it 2808 * are transferred from the caller to vmap(), and will be freed / dropped when 2809 * vfree() is called on the return value. 2810 * 2811 * Return: the address of the area or %NULL on failure 2812 */ 2813 void *vmap(struct page **pages, unsigned int count, 2814 unsigned long flags, pgprot_t prot) 2815 { 2816 struct vm_struct *area; 2817 unsigned long addr; 2818 unsigned long size; /* In bytes */ 2819 2820 might_sleep(); 2821 2822 /* 2823 * Your top guard is someone else's bottom guard. Not having a top 2824 * guard compromises someone else's mappings too. 2825 */ 2826 if (WARN_ON_ONCE(flags & VM_NO_GUARD)) 2827 flags &= ~VM_NO_GUARD; 2828 2829 if (count > totalram_pages()) 2830 return NULL; 2831 2832 size = (unsigned long)count << PAGE_SHIFT; 2833 area = get_vm_area_caller(size, flags, __builtin_return_address(0)); 2834 if (!area) 2835 return NULL; 2836 2837 addr = (unsigned long)area->addr; 2838 if (vmap_pages_range(addr, addr + size, pgprot_nx(prot), 2839 pages, PAGE_SHIFT) < 0) { 2840 vunmap(area->addr); 2841 return NULL; 2842 } 2843 2844 if (flags & VM_MAP_PUT_PAGES) { 2845 area->pages = pages; 2846 area->nr_pages = count; 2847 } 2848 return area->addr; 2849 } 2850 EXPORT_SYMBOL(vmap); 2851 2852 #ifdef CONFIG_VMAP_PFN 2853 struct vmap_pfn_data { 2854 unsigned long *pfns; 2855 pgprot_t prot; 2856 unsigned int idx; 2857 }; 2858 2859 static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private) 2860 { 2861 struct vmap_pfn_data *data = private; 2862 2863 if (WARN_ON_ONCE(pfn_valid(data->pfns[data->idx]))) 2864 return -EINVAL; 2865 *pte = pte_mkspecial(pfn_pte(data->pfns[data->idx++], data->prot)); 2866 return 0; 2867 } 2868 2869 /** 2870 * vmap_pfn - map an array of PFNs into virtually contiguous space 2871 * @pfns: array of PFNs 2872 * @count: number of pages to map 2873 * @prot: page protection for the mapping 2874 * 2875 * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns 2876 * the start address of the mapping. 2877 */ 2878 void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot) 2879 { 2880 struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) }; 2881 struct vm_struct *area; 2882 2883 area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP, 2884 __builtin_return_address(0)); 2885 if (!area) 2886 return NULL; 2887 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, 2888 count * PAGE_SIZE, vmap_pfn_apply, &data)) { 2889 free_vm_area(area); 2890 return NULL; 2891 } 2892 return area->addr; 2893 } 2894 EXPORT_SYMBOL_GPL(vmap_pfn); 2895 #endif /* CONFIG_VMAP_PFN */ 2896 2897 static inline unsigned int 2898 vm_area_alloc_pages(gfp_t gfp, int nid, 2899 unsigned int order, unsigned int nr_pages, struct page **pages) 2900 { 2901 unsigned int nr_allocated = 0; 2902 struct page *page; 2903 int i; 2904 2905 /* 2906 * For order-0 pages we make use of bulk allocator, if 2907 * the page array is partly or not at all populated due 2908 * to fails, fallback to a single page allocator that is 2909 * more permissive. 2910 */ 2911 if (!order) { 2912 gfp_t bulk_gfp = gfp & ~__GFP_NOFAIL; 2913 2914 while (nr_allocated < nr_pages) { 2915 unsigned int nr, nr_pages_request; 2916 2917 /* 2918 * A maximum allowed request is hard-coded and is 100 2919 * pages per call. That is done in order to prevent a 2920 * long preemption off scenario in the bulk-allocator 2921 * so the range is [1:100]. 2922 */ 2923 nr_pages_request = min(100U, nr_pages - nr_allocated); 2924 2925 /* memory allocation should consider mempolicy, we can't 2926 * wrongly use nearest node when nid == NUMA_NO_NODE, 2927 * otherwise memory may be allocated in only one node, 2928 * but mempolicy wants to alloc memory by interleaving. 2929 */ 2930 if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE) 2931 nr = alloc_pages_bulk_array_mempolicy(bulk_gfp, 2932 nr_pages_request, 2933 pages + nr_allocated); 2934 2935 else 2936 nr = alloc_pages_bulk_array_node(bulk_gfp, nid, 2937 nr_pages_request, 2938 pages + nr_allocated); 2939 2940 nr_allocated += nr; 2941 cond_resched(); 2942 2943 /* 2944 * If zero or pages were obtained partly, 2945 * fallback to a single page allocator. 2946 */ 2947 if (nr != nr_pages_request) 2948 break; 2949 } 2950 } 2951 2952 /* High-order pages or fallback path if "bulk" fails. */ 2953 2954 while (nr_allocated < nr_pages) { 2955 if (fatal_signal_pending(current)) 2956 break; 2957 2958 if (nid == NUMA_NO_NODE) 2959 page = alloc_pages(gfp, order); 2960 else 2961 page = alloc_pages_node(nid, gfp, order); 2962 if (unlikely(!page)) 2963 break; 2964 /* 2965 * Higher order allocations must be able to be treated as 2966 * indepdenent small pages by callers (as they can with 2967 * small-page vmallocs). Some drivers do their own refcounting 2968 * on vmalloc_to_page() pages, some use page->mapping, 2969 * page->lru, etc. 2970 */ 2971 if (order) 2972 split_page(page, order); 2973 2974 /* 2975 * Careful, we allocate and map page-order pages, but 2976 * tracking is done per PAGE_SIZE page so as to keep the 2977 * vm_struct APIs independent of the physical/mapped size. 2978 */ 2979 for (i = 0; i < (1U << order); i++) 2980 pages[nr_allocated + i] = page + i; 2981 2982 cond_resched(); 2983 nr_allocated += 1U << order; 2984 } 2985 2986 return nr_allocated; 2987 } 2988 2989 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 2990 pgprot_t prot, unsigned int page_shift, 2991 int node) 2992 { 2993 const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; 2994 bool nofail = gfp_mask & __GFP_NOFAIL; 2995 unsigned long addr = (unsigned long)area->addr; 2996 unsigned long size = get_vm_area_size(area); 2997 unsigned long array_size; 2998 unsigned int nr_small_pages = size >> PAGE_SHIFT; 2999 unsigned int page_order; 3000 unsigned int flags; 3001 int ret; 3002 3003 array_size = (unsigned long)nr_small_pages * sizeof(struct page *); 3004 gfp_mask |= __GFP_NOWARN; 3005 if (!(gfp_mask & (GFP_DMA | GFP_DMA32))) 3006 gfp_mask |= __GFP_HIGHMEM; 3007 3008 /* Please note that the recursion is strictly bounded. */ 3009 if (array_size > PAGE_SIZE) { 3010 area->pages = __vmalloc_node(array_size, 1, nested_gfp, node, 3011 area->caller); 3012 } else { 3013 area->pages = kmalloc_node(array_size, nested_gfp, node); 3014 } 3015 3016 if (!area->pages) { 3017 warn_alloc(gfp_mask, NULL, 3018 "vmalloc error: size %lu, failed to allocated page array size %lu", 3019 nr_small_pages * PAGE_SIZE, array_size); 3020 free_vm_area(area); 3021 return NULL; 3022 } 3023 3024 set_vm_area_page_order(area, page_shift - PAGE_SHIFT); 3025 page_order = vm_area_page_order(area); 3026 3027 area->nr_pages = vm_area_alloc_pages(gfp_mask | __GFP_NOWARN, 3028 node, page_order, nr_small_pages, area->pages); 3029 3030 atomic_long_add(area->nr_pages, &nr_vmalloc_pages); 3031 if (gfp_mask & __GFP_ACCOUNT) { 3032 int i; 3033 3034 for (i = 0; i < area->nr_pages; i++) 3035 mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC, 1); 3036 } 3037 3038 /* 3039 * If not enough pages were obtained to accomplish an 3040 * allocation request, free them via __vfree() if any. 3041 */ 3042 if (area->nr_pages != nr_small_pages) { 3043 warn_alloc(gfp_mask, NULL, 3044 "vmalloc error: size %lu, page order %u, failed to allocate pages", 3045 area->nr_pages * PAGE_SIZE, page_order); 3046 goto fail; 3047 } 3048 3049 /* 3050 * page tables allocations ignore external gfp mask, enforce it 3051 * by the scope API 3052 */ 3053 if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO) 3054 flags = memalloc_nofs_save(); 3055 else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0) 3056 flags = memalloc_noio_save(); 3057 3058 do { 3059 ret = vmap_pages_range(addr, addr + size, prot, area->pages, 3060 page_shift); 3061 if (nofail && (ret < 0)) 3062 schedule_timeout_uninterruptible(1); 3063 } while (nofail && (ret < 0)); 3064 3065 if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO) 3066 memalloc_nofs_restore(flags); 3067 else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0) 3068 memalloc_noio_restore(flags); 3069 3070 if (ret < 0) { 3071 warn_alloc(gfp_mask, NULL, 3072 "vmalloc error: size %lu, failed to map pages", 3073 area->nr_pages * PAGE_SIZE); 3074 goto fail; 3075 } 3076 3077 return area->addr; 3078 3079 fail: 3080 __vfree(area->addr); 3081 return NULL; 3082 } 3083 3084 /** 3085 * __vmalloc_node_range - allocate virtually contiguous memory 3086 * @size: allocation size 3087 * @align: desired alignment 3088 * @start: vm area range start 3089 * @end: vm area range end 3090 * @gfp_mask: flags for the page level allocator 3091 * @prot: protection mask for the allocated pages 3092 * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD) 3093 * @node: node to use for allocation or NUMA_NO_NODE 3094 * @caller: caller's return address 3095 * 3096 * Allocate enough pages to cover @size from the page level 3097 * allocator with @gfp_mask flags. Please note that the full set of gfp 3098 * flags are not supported. GFP_KERNEL, GFP_NOFS and GFP_NOIO are all 3099 * supported. 3100 * Zone modifiers are not supported. From the reclaim modifiers 3101 * __GFP_DIRECT_RECLAIM is required (aka GFP_NOWAIT is not supported) 3102 * and only __GFP_NOFAIL is supported (i.e. __GFP_NORETRY and 3103 * __GFP_RETRY_MAYFAIL are not supported). 3104 * 3105 * __GFP_NOWARN can be used to suppress failures messages. 3106 * 3107 * Map them into contiguous kernel virtual space, using a pagetable 3108 * protection of @prot. 3109 * 3110 * Return: the address of the area or %NULL on failure 3111 */ 3112 void *__vmalloc_node_range(unsigned long size, unsigned long align, 3113 unsigned long start, unsigned long end, gfp_t gfp_mask, 3114 pgprot_t prot, unsigned long vm_flags, int node, 3115 const void *caller) 3116 { 3117 struct vm_struct *area; 3118 void *ret; 3119 kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE; 3120 unsigned long real_size = size; 3121 unsigned long real_align = align; 3122 unsigned int shift = PAGE_SHIFT; 3123 3124 if (WARN_ON_ONCE(!size)) 3125 return NULL; 3126 3127 if ((size >> PAGE_SHIFT) > totalram_pages()) { 3128 warn_alloc(gfp_mask, NULL, 3129 "vmalloc error: size %lu, exceeds total pages", 3130 real_size); 3131 return NULL; 3132 } 3133 3134 if (vmap_allow_huge && (vm_flags & VM_ALLOW_HUGE_VMAP)) { 3135 unsigned long size_per_node; 3136 3137 /* 3138 * Try huge pages. Only try for PAGE_KERNEL allocations, 3139 * others like modules don't yet expect huge pages in 3140 * their allocations due to apply_to_page_range not 3141 * supporting them. 3142 */ 3143 3144 size_per_node = size; 3145 if (node == NUMA_NO_NODE) 3146 size_per_node /= num_online_nodes(); 3147 if (arch_vmap_pmd_supported(prot) && size_per_node >= PMD_SIZE) 3148 shift = PMD_SHIFT; 3149 else 3150 shift = arch_vmap_pte_supported_shift(size_per_node); 3151 3152 align = max(real_align, 1UL << shift); 3153 size = ALIGN(real_size, 1UL << shift); 3154 } 3155 3156 again: 3157 area = __get_vm_area_node(real_size, align, shift, VM_ALLOC | 3158 VM_UNINITIALIZED | vm_flags, start, end, node, 3159 gfp_mask, caller); 3160 if (!area) { 3161 bool nofail = gfp_mask & __GFP_NOFAIL; 3162 warn_alloc(gfp_mask, NULL, 3163 "vmalloc error: size %lu, vm_struct allocation failed%s", 3164 real_size, (nofail) ? ". Retrying." : ""); 3165 if (nofail) { 3166 schedule_timeout_uninterruptible(1); 3167 goto again; 3168 } 3169 goto fail; 3170 } 3171 3172 /* 3173 * Prepare arguments for __vmalloc_area_node() and 3174 * kasan_unpoison_vmalloc(). 3175 */ 3176 if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) { 3177 if (kasan_hw_tags_enabled()) { 3178 /* 3179 * Modify protection bits to allow tagging. 3180 * This must be done before mapping. 3181 */ 3182 prot = arch_vmap_pgprot_tagged(prot); 3183 3184 /* 3185 * Skip page_alloc poisoning and zeroing for physical 3186 * pages backing VM_ALLOC mapping. Memory is instead 3187 * poisoned and zeroed by kasan_unpoison_vmalloc(). 3188 */ 3189 gfp_mask |= __GFP_SKIP_KASAN_UNPOISON | __GFP_SKIP_ZERO; 3190 } 3191 3192 /* Take note that the mapping is PAGE_KERNEL. */ 3193 kasan_flags |= KASAN_VMALLOC_PROT_NORMAL; 3194 } 3195 3196 /* Allocate physical pages and map them into vmalloc space. */ 3197 ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node); 3198 if (!ret) 3199 goto fail; 3200 3201 /* 3202 * Mark the pages as accessible, now that they are mapped. 3203 * The condition for setting KASAN_VMALLOC_INIT should complement the 3204 * one in post_alloc_hook() with regards to the __GFP_SKIP_ZERO check 3205 * to make sure that memory is initialized under the same conditions. 3206 * Tag-based KASAN modes only assign tags to normal non-executable 3207 * allocations, see __kasan_unpoison_vmalloc(). 3208 */ 3209 kasan_flags |= KASAN_VMALLOC_VM_ALLOC; 3210 if (!want_init_on_free() && want_init_on_alloc(gfp_mask) && 3211 (gfp_mask & __GFP_SKIP_ZERO)) 3212 kasan_flags |= KASAN_VMALLOC_INIT; 3213 /* KASAN_VMALLOC_PROT_NORMAL already set if required. */ 3214 area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags); 3215 3216 /* 3217 * In this function, newly allocated vm_struct has VM_UNINITIALIZED 3218 * flag. It means that vm_struct is not fully initialized. 3219 * Now, it is fully initialized, so remove this flag here. 3220 */ 3221 clear_vm_uninitialized_flag(area); 3222 3223 size = PAGE_ALIGN(size); 3224 if (!(vm_flags & VM_DEFER_KMEMLEAK)) 3225 kmemleak_vmalloc(area, size, gfp_mask); 3226 3227 return area->addr; 3228 3229 fail: 3230 if (shift > PAGE_SHIFT) { 3231 shift = PAGE_SHIFT; 3232 align = real_align; 3233 size = real_size; 3234 goto again; 3235 } 3236 3237 return NULL; 3238 } 3239 3240 /** 3241 * __vmalloc_node - allocate virtually contiguous memory 3242 * @size: allocation size 3243 * @align: desired alignment 3244 * @gfp_mask: flags for the page level allocator 3245 * @node: node to use for allocation or NUMA_NO_NODE 3246 * @caller: caller's return address 3247 * 3248 * Allocate enough pages to cover @size from the page level allocator with 3249 * @gfp_mask flags. Map them into contiguous kernel virtual space. 3250 * 3251 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL 3252 * and __GFP_NOFAIL are not supported 3253 * 3254 * Any use of gfp flags outside of GFP_KERNEL should be consulted 3255 * with mm people. 3256 * 3257 * Return: pointer to the allocated memory or %NULL on error 3258 */ 3259 void *__vmalloc_node(unsigned long size, unsigned long align, 3260 gfp_t gfp_mask, int node, const void *caller) 3261 { 3262 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, 3263 gfp_mask, PAGE_KERNEL, 0, node, caller); 3264 } 3265 /* 3266 * This is only for performance analysis of vmalloc and stress purpose. 3267 * It is required by vmalloc test module, therefore do not use it other 3268 * than that. 3269 */ 3270 #ifdef CONFIG_TEST_VMALLOC_MODULE 3271 EXPORT_SYMBOL_GPL(__vmalloc_node); 3272 #endif 3273 3274 void *__vmalloc(unsigned long size, gfp_t gfp_mask) 3275 { 3276 return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE, 3277 __builtin_return_address(0)); 3278 } 3279 EXPORT_SYMBOL(__vmalloc); 3280 3281 /** 3282 * vmalloc - allocate virtually contiguous memory 3283 * @size: allocation size 3284 * 3285 * Allocate enough pages to cover @size from the page level 3286 * allocator and map them into contiguous kernel virtual space. 3287 * 3288 * For tight control over page level allocator and protection flags 3289 * use __vmalloc() instead. 3290 * 3291 * Return: pointer to the allocated memory or %NULL on error 3292 */ 3293 void *vmalloc(unsigned long size) 3294 { 3295 return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE, 3296 __builtin_return_address(0)); 3297 } 3298 EXPORT_SYMBOL(vmalloc); 3299 3300 /** 3301 * vmalloc_huge - allocate virtually contiguous memory, allow huge pages 3302 * @size: allocation size 3303 * @gfp_mask: flags for the page level allocator 3304 * 3305 * Allocate enough pages to cover @size from the page level 3306 * allocator and map them into contiguous kernel virtual space. 3307 * If @size is greater than or equal to PMD_SIZE, allow using 3308 * huge pages for the memory 3309 * 3310 * Return: pointer to the allocated memory or %NULL on error 3311 */ 3312 void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) 3313 { 3314 return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, 3315 gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP, 3316 NUMA_NO_NODE, __builtin_return_address(0)); 3317 } 3318 EXPORT_SYMBOL_GPL(vmalloc_huge); 3319 3320 /** 3321 * vzalloc - allocate virtually contiguous memory with zero fill 3322 * @size: allocation size 3323 * 3324 * Allocate enough pages to cover @size from the page level 3325 * allocator and map them into contiguous kernel virtual space. 3326 * The memory allocated is set to zero. 3327 * 3328 * For tight control over page level allocator and protection flags 3329 * use __vmalloc() instead. 3330 * 3331 * Return: pointer to the allocated memory or %NULL on error 3332 */ 3333 void *vzalloc(unsigned long size) 3334 { 3335 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE, 3336 __builtin_return_address(0)); 3337 } 3338 EXPORT_SYMBOL(vzalloc); 3339 3340 /** 3341 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace 3342 * @size: allocation size 3343 * 3344 * The resulting memory area is zeroed so it can be mapped to userspace 3345 * without leaking data. 3346 * 3347 * Return: pointer to the allocated memory or %NULL on error 3348 */ 3349 void *vmalloc_user(unsigned long size) 3350 { 3351 return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, 3352 GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL, 3353 VM_USERMAP, NUMA_NO_NODE, 3354 __builtin_return_address(0)); 3355 } 3356 EXPORT_SYMBOL(vmalloc_user); 3357 3358 /** 3359 * vmalloc_node - allocate memory on a specific node 3360 * @size: allocation size 3361 * @node: numa node 3362 * 3363 * Allocate enough pages to cover @size from the page level 3364 * allocator and map them into contiguous kernel virtual space. 3365 * 3366 * For tight control over page level allocator and protection flags 3367 * use __vmalloc() instead. 3368 * 3369 * Return: pointer to the allocated memory or %NULL on error 3370 */ 3371 void *vmalloc_node(unsigned long size, int node) 3372 { 3373 return __vmalloc_node(size, 1, GFP_KERNEL, node, 3374 __builtin_return_address(0)); 3375 } 3376 EXPORT_SYMBOL(vmalloc_node); 3377 3378 /** 3379 * vzalloc_node - allocate memory on a specific node with zero fill 3380 * @size: allocation size 3381 * @node: numa node 3382 * 3383 * Allocate enough pages to cover @size from the page level 3384 * allocator and map them into contiguous kernel virtual space. 3385 * The memory allocated is set to zero. 3386 * 3387 * Return: pointer to the allocated memory or %NULL on error 3388 */ 3389 void *vzalloc_node(unsigned long size, int node) 3390 { 3391 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node, 3392 __builtin_return_address(0)); 3393 } 3394 EXPORT_SYMBOL(vzalloc_node); 3395 3396 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) 3397 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) 3398 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) 3399 #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL) 3400 #else 3401 /* 3402 * 64b systems should always have either DMA or DMA32 zones. For others 3403 * GFP_DMA32 should do the right thing and use the normal zone. 3404 */ 3405 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) 3406 #endif 3407 3408 /** 3409 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 3410 * @size: allocation size 3411 * 3412 * Allocate enough 32bit PA addressable pages to cover @size from the 3413 * page level allocator and map them into contiguous kernel virtual space. 3414 * 3415 * Return: pointer to the allocated memory or %NULL on error 3416 */ 3417 void *vmalloc_32(unsigned long size) 3418 { 3419 return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE, 3420 __builtin_return_address(0)); 3421 } 3422 EXPORT_SYMBOL(vmalloc_32); 3423 3424 /** 3425 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory 3426 * @size: allocation size 3427 * 3428 * The resulting memory area is 32bit addressable and zeroed so it can be 3429 * mapped to userspace without leaking data. 3430 * 3431 * Return: pointer to the allocated memory or %NULL on error 3432 */ 3433 void *vmalloc_32_user(unsigned long size) 3434 { 3435 return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, 3436 GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, 3437 VM_USERMAP, NUMA_NO_NODE, 3438 __builtin_return_address(0)); 3439 } 3440 EXPORT_SYMBOL(vmalloc_32_user); 3441 3442 /* 3443 * small helper routine , copy contents to buf from addr. 3444 * If the page is not present, fill zero. 3445 */ 3446 3447 static int aligned_vread(char *buf, char *addr, unsigned long count) 3448 { 3449 struct page *p; 3450 int copied = 0; 3451 3452 while (count) { 3453 unsigned long offset, length; 3454 3455 offset = offset_in_page(addr); 3456 length = PAGE_SIZE - offset; 3457 if (length > count) 3458 length = count; 3459 p = vmalloc_to_page(addr); 3460 /* 3461 * To do safe access to this _mapped_ area, we need 3462 * lock. But adding lock here means that we need to add 3463 * overhead of vmalloc()/vfree() calls for this _debug_ 3464 * interface, rarely used. Instead of that, we'll use 3465 * kmap() and get small overhead in this access function. 3466 */ 3467 if (p) { 3468 /* We can expect USER0 is not used -- see vread() */ 3469 void *map = kmap_atomic(p); 3470 memcpy(buf, map + offset, length); 3471 kunmap_atomic(map); 3472 } else 3473 memset(buf, 0, length); 3474 3475 addr += length; 3476 buf += length; 3477 copied += length; 3478 count -= length; 3479 } 3480 return copied; 3481 } 3482 3483 /** 3484 * vread() - read vmalloc area in a safe way. 3485 * @buf: buffer for reading data 3486 * @addr: vm address. 3487 * @count: number of bytes to be read. 3488 * 3489 * This function checks that addr is a valid vmalloc'ed area, and 3490 * copy data from that area to a given buffer. If the given memory range 3491 * of [addr...addr+count) includes some valid address, data is copied to 3492 * proper area of @buf. If there are memory holes, they'll be zero-filled. 3493 * IOREMAP area is treated as memory hole and no copy is done. 3494 * 3495 * If [addr...addr+count) doesn't includes any intersects with alive 3496 * vm_struct area, returns 0. @buf should be kernel's buffer. 3497 * 3498 * Note: In usual ops, vread() is never necessary because the caller 3499 * should know vmalloc() area is valid and can use memcpy(). 3500 * This is for routines which have to access vmalloc area without 3501 * any information, as /proc/kcore. 3502 * 3503 * Return: number of bytes for which addr and buf should be increased 3504 * (same number as @count) or %0 if [addr...addr+count) doesn't 3505 * include any intersection with valid vmalloc area 3506 */ 3507 long vread(char *buf, char *addr, unsigned long count) 3508 { 3509 struct vmap_area *va; 3510 struct vm_struct *vm; 3511 char *vaddr, *buf_start = buf; 3512 unsigned long buflen = count; 3513 unsigned long n; 3514 3515 addr = kasan_reset_tag(addr); 3516 3517 /* Don't allow overflow */ 3518 if ((unsigned long) addr + count < count) 3519 count = -(unsigned long) addr; 3520 3521 spin_lock(&vmap_area_lock); 3522 va = find_vmap_area_exceed_addr((unsigned long)addr); 3523 if (!va) 3524 goto finished; 3525 3526 /* no intersects with alive vmap_area */ 3527 if ((unsigned long)addr + count <= va->va_start) 3528 goto finished; 3529 3530 list_for_each_entry_from(va, &vmap_area_list, list) { 3531 if (!count) 3532 break; 3533 3534 if (!va->vm) 3535 continue; 3536 3537 vm = va->vm; 3538 vaddr = (char *) vm->addr; 3539 if (addr >= vaddr + get_vm_area_size(vm)) 3540 continue; 3541 while (addr < vaddr) { 3542 if (count == 0) 3543 goto finished; 3544 *buf = '\0'; 3545 buf++; 3546 addr++; 3547 count--; 3548 } 3549 n = vaddr + get_vm_area_size(vm) - addr; 3550 if (n > count) 3551 n = count; 3552 if (!(vm->flags & VM_IOREMAP)) 3553 aligned_vread(buf, addr, n); 3554 else /* IOREMAP area is treated as memory hole */ 3555 memset(buf, 0, n); 3556 buf += n; 3557 addr += n; 3558 count -= n; 3559 } 3560 finished: 3561 spin_unlock(&vmap_area_lock); 3562 3563 if (buf == buf_start) 3564 return 0; 3565 /* zero-fill memory holes */ 3566 if (buf != buf_start + buflen) 3567 memset(buf, 0, buflen - (buf - buf_start)); 3568 3569 return buflen; 3570 } 3571 3572 /** 3573 * remap_vmalloc_range_partial - map vmalloc pages to userspace 3574 * @vma: vma to cover 3575 * @uaddr: target user address to start at 3576 * @kaddr: virtual address of vmalloc kernel memory 3577 * @pgoff: offset from @kaddr to start at 3578 * @size: size of map area 3579 * 3580 * Returns: 0 for success, -Exxx on failure 3581 * 3582 * This function checks that @kaddr is a valid vmalloc'ed area, 3583 * and that it is big enough to cover the range starting at 3584 * @uaddr in @vma. Will return failure if that criteria isn't 3585 * met. 3586 * 3587 * Similar to remap_pfn_range() (see mm/memory.c) 3588 */ 3589 int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, 3590 void *kaddr, unsigned long pgoff, 3591 unsigned long size) 3592 { 3593 struct vm_struct *area; 3594 unsigned long off; 3595 unsigned long end_index; 3596 3597 if (check_shl_overflow(pgoff, PAGE_SHIFT, &off)) 3598 return -EINVAL; 3599 3600 size = PAGE_ALIGN(size); 3601 3602 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr)) 3603 return -EINVAL; 3604 3605 area = find_vm_area(kaddr); 3606 if (!area) 3607 return -EINVAL; 3608 3609 if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT))) 3610 return -EINVAL; 3611 3612 if (check_add_overflow(size, off, &end_index) || 3613 end_index > get_vm_area_size(area)) 3614 return -EINVAL; 3615 kaddr += off; 3616 3617 do { 3618 struct page *page = vmalloc_to_page(kaddr); 3619 int ret; 3620 3621 ret = vm_insert_page(vma, uaddr, page); 3622 if (ret) 3623 return ret; 3624 3625 uaddr += PAGE_SIZE; 3626 kaddr += PAGE_SIZE; 3627 size -= PAGE_SIZE; 3628 } while (size > 0); 3629 3630 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 3631 3632 return 0; 3633 } 3634 3635 /** 3636 * remap_vmalloc_range - map vmalloc pages to userspace 3637 * @vma: vma to cover (map full range of vma) 3638 * @addr: vmalloc memory 3639 * @pgoff: number of pages into addr before first page to map 3640 * 3641 * Returns: 0 for success, -Exxx on failure 3642 * 3643 * This function checks that addr is a valid vmalloc'ed area, and 3644 * that it is big enough to cover the vma. Will return failure if 3645 * that criteria isn't met. 3646 * 3647 * Similar to remap_pfn_range() (see mm/memory.c) 3648 */ 3649 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 3650 unsigned long pgoff) 3651 { 3652 return remap_vmalloc_range_partial(vma, vma->vm_start, 3653 addr, pgoff, 3654 vma->vm_end - vma->vm_start); 3655 } 3656 EXPORT_SYMBOL(remap_vmalloc_range); 3657 3658 void free_vm_area(struct vm_struct *area) 3659 { 3660 struct vm_struct *ret; 3661 ret = remove_vm_area(area->addr); 3662 BUG_ON(ret != area); 3663 kfree(area); 3664 } 3665 EXPORT_SYMBOL_GPL(free_vm_area); 3666 3667 #ifdef CONFIG_SMP 3668 static struct vmap_area *node_to_va(struct rb_node *n) 3669 { 3670 return rb_entry_safe(n, struct vmap_area, rb_node); 3671 } 3672 3673 /** 3674 * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to 3675 * @addr: target address 3676 * 3677 * Returns: vmap_area if it is found. If there is no such area 3678 * the first highest(reverse order) vmap_area is returned 3679 * i.e. va->va_start < addr && va->va_end < addr or NULL 3680 * if there are no any areas before @addr. 3681 */ 3682 static struct vmap_area * 3683 pvm_find_va_enclose_addr(unsigned long addr) 3684 { 3685 struct vmap_area *va, *tmp; 3686 struct rb_node *n; 3687 3688 n = free_vmap_area_root.rb_node; 3689 va = NULL; 3690 3691 while (n) { 3692 tmp = rb_entry(n, struct vmap_area, rb_node); 3693 if (tmp->va_start <= addr) { 3694 va = tmp; 3695 if (tmp->va_end >= addr) 3696 break; 3697 3698 n = n->rb_right; 3699 } else { 3700 n = n->rb_left; 3701 } 3702 } 3703 3704 return va; 3705 } 3706 3707 /** 3708 * pvm_determine_end_from_reverse - find the highest aligned address 3709 * of free block below VMALLOC_END 3710 * @va: 3711 * in - the VA we start the search(reverse order); 3712 * out - the VA with the highest aligned end address. 3713 * @align: alignment for required highest address 3714 * 3715 * Returns: determined end address within vmap_area 3716 */ 3717 static unsigned long 3718 pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align) 3719 { 3720 unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 3721 unsigned long addr; 3722 3723 if (likely(*va)) { 3724 list_for_each_entry_from_reverse((*va), 3725 &free_vmap_area_list, list) { 3726 addr = min((*va)->va_end & ~(align - 1), vmalloc_end); 3727 if ((*va)->va_start < addr) 3728 return addr; 3729 } 3730 } 3731 3732 return 0; 3733 } 3734 3735 /** 3736 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator 3737 * @offsets: array containing offset of each area 3738 * @sizes: array containing size of each area 3739 * @nr_vms: the number of areas to allocate 3740 * @align: alignment, all entries in @offsets and @sizes must be aligned to this 3741 * 3742 * Returns: kmalloc'd vm_struct pointer array pointing to allocated 3743 * vm_structs on success, %NULL on failure 3744 * 3745 * Percpu allocator wants to use congruent vm areas so that it can 3746 * maintain the offsets among percpu areas. This function allocates 3747 * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to 3748 * be scattered pretty far, distance between two areas easily going up 3749 * to gigabytes. To avoid interacting with regular vmallocs, these 3750 * areas are allocated from top. 3751 * 3752 * Despite its complicated look, this allocator is rather simple. It 3753 * does everything top-down and scans free blocks from the end looking 3754 * for matching base. While scanning, if any of the areas do not fit the 3755 * base address is pulled down to fit the area. Scanning is repeated till 3756 * all the areas fit and then all necessary data structures are inserted 3757 * and the result is returned. 3758 */ 3759 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 3760 const size_t *sizes, int nr_vms, 3761 size_t align) 3762 { 3763 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); 3764 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 3765 struct vmap_area **vas, *va; 3766 struct vm_struct **vms; 3767 int area, area2, last_area, term_area; 3768 unsigned long base, start, size, end, last_end, orig_start, orig_end; 3769 bool purged = false; 3770 3771 /* verify parameters and allocate data structures */ 3772 BUG_ON(offset_in_page(align) || !is_power_of_2(align)); 3773 for (last_area = 0, area = 0; area < nr_vms; area++) { 3774 start = offsets[area]; 3775 end = start + sizes[area]; 3776 3777 /* is everything aligned properly? */ 3778 BUG_ON(!IS_ALIGNED(offsets[area], align)); 3779 BUG_ON(!IS_ALIGNED(sizes[area], align)); 3780 3781 /* detect the area with the highest address */ 3782 if (start > offsets[last_area]) 3783 last_area = area; 3784 3785 for (area2 = area + 1; area2 < nr_vms; area2++) { 3786 unsigned long start2 = offsets[area2]; 3787 unsigned long end2 = start2 + sizes[area2]; 3788 3789 BUG_ON(start2 < end && start < end2); 3790 } 3791 } 3792 last_end = offsets[last_area] + sizes[last_area]; 3793 3794 if (vmalloc_end - vmalloc_start < last_end) { 3795 WARN_ON(true); 3796 return NULL; 3797 } 3798 3799 vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL); 3800 vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL); 3801 if (!vas || !vms) 3802 goto err_free2; 3803 3804 for (area = 0; area < nr_vms; area++) { 3805 vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL); 3806 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); 3807 if (!vas[area] || !vms[area]) 3808 goto err_free; 3809 } 3810 retry: 3811 spin_lock(&free_vmap_area_lock); 3812 3813 /* start scanning - we scan from the top, begin with the last area */ 3814 area = term_area = last_area; 3815 start = offsets[area]; 3816 end = start + sizes[area]; 3817 3818 va = pvm_find_va_enclose_addr(vmalloc_end); 3819 base = pvm_determine_end_from_reverse(&va, align) - end; 3820 3821 while (true) { 3822 /* 3823 * base might have underflowed, add last_end before 3824 * comparing. 3825 */ 3826 if (base + last_end < vmalloc_start + last_end) 3827 goto overflow; 3828 3829 /* 3830 * Fitting base has not been found. 3831 */ 3832 if (va == NULL) 3833 goto overflow; 3834 3835 /* 3836 * If required width exceeds current VA block, move 3837 * base downwards and then recheck. 3838 */ 3839 if (base + end > va->va_end) { 3840 base = pvm_determine_end_from_reverse(&va, align) - end; 3841 term_area = area; 3842 continue; 3843 } 3844 3845 /* 3846 * If this VA does not fit, move base downwards and recheck. 3847 */ 3848 if (base + start < va->va_start) { 3849 va = node_to_va(rb_prev(&va->rb_node)); 3850 base = pvm_determine_end_from_reverse(&va, align) - end; 3851 term_area = area; 3852 continue; 3853 } 3854 3855 /* 3856 * This area fits, move on to the previous one. If 3857 * the previous one is the terminal one, we're done. 3858 */ 3859 area = (area + nr_vms - 1) % nr_vms; 3860 if (area == term_area) 3861 break; 3862 3863 start = offsets[area]; 3864 end = start + sizes[area]; 3865 va = pvm_find_va_enclose_addr(base + end); 3866 } 3867 3868 /* we've found a fitting base, insert all va's */ 3869 for (area = 0; area < nr_vms; area++) { 3870 int ret; 3871 3872 start = base + offsets[area]; 3873 size = sizes[area]; 3874 3875 va = pvm_find_va_enclose_addr(start); 3876 if (WARN_ON_ONCE(va == NULL)) 3877 /* It is a BUG(), but trigger recovery instead. */ 3878 goto recovery; 3879 3880 ret = adjust_va_to_fit_type(&free_vmap_area_root, 3881 &free_vmap_area_list, 3882 va, start, size); 3883 if (WARN_ON_ONCE(unlikely(ret))) 3884 /* It is a BUG(), but trigger recovery instead. */ 3885 goto recovery; 3886 3887 /* Allocated area. */ 3888 va = vas[area]; 3889 va->va_start = start; 3890 va->va_end = start + size; 3891 } 3892 3893 spin_unlock(&free_vmap_area_lock); 3894 3895 /* populate the kasan shadow space */ 3896 for (area = 0; area < nr_vms; area++) { 3897 if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area])) 3898 goto err_free_shadow; 3899 } 3900 3901 /* insert all vm's */ 3902 spin_lock(&vmap_area_lock); 3903 for (area = 0; area < nr_vms; area++) { 3904 insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list); 3905 3906 setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC, 3907 pcpu_get_vm_areas); 3908 } 3909 spin_unlock(&vmap_area_lock); 3910 3911 /* 3912 * Mark allocated areas as accessible. Do it now as a best-effort 3913 * approach, as they can be mapped outside of vmalloc code. 3914 * With hardware tag-based KASAN, marking is skipped for 3915 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). 3916 */ 3917 for (area = 0; area < nr_vms; area++) 3918 vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr, 3919 vms[area]->size, KASAN_VMALLOC_PROT_NORMAL); 3920 3921 kfree(vas); 3922 return vms; 3923 3924 recovery: 3925 /* 3926 * Remove previously allocated areas. There is no 3927 * need in removing these areas from the busy tree, 3928 * because they are inserted only on the final step 3929 * and when pcpu_get_vm_areas() is success. 3930 */ 3931 while (area--) { 3932 orig_start = vas[area]->va_start; 3933 orig_end = vas[area]->va_end; 3934 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, 3935 &free_vmap_area_list); 3936 if (va) 3937 kasan_release_vmalloc(orig_start, orig_end, 3938 va->va_start, va->va_end); 3939 vas[area] = NULL; 3940 } 3941 3942 overflow: 3943 spin_unlock(&free_vmap_area_lock); 3944 if (!purged) { 3945 purge_vmap_area_lazy(); 3946 purged = true; 3947 3948 /* Before "retry", check if we recover. */ 3949 for (area = 0; area < nr_vms; area++) { 3950 if (vas[area]) 3951 continue; 3952 3953 vas[area] = kmem_cache_zalloc( 3954 vmap_area_cachep, GFP_KERNEL); 3955 if (!vas[area]) 3956 goto err_free; 3957 } 3958 3959 goto retry; 3960 } 3961 3962 err_free: 3963 for (area = 0; area < nr_vms; area++) { 3964 if (vas[area]) 3965 kmem_cache_free(vmap_area_cachep, vas[area]); 3966 3967 kfree(vms[area]); 3968 } 3969 err_free2: 3970 kfree(vas); 3971 kfree(vms); 3972 return NULL; 3973 3974 err_free_shadow: 3975 spin_lock(&free_vmap_area_lock); 3976 /* 3977 * We release all the vmalloc shadows, even the ones for regions that 3978 * hadn't been successfully added. This relies on kasan_release_vmalloc 3979 * being able to tolerate this case. 3980 */ 3981 for (area = 0; area < nr_vms; area++) { 3982 orig_start = vas[area]->va_start; 3983 orig_end = vas[area]->va_end; 3984 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, 3985 &free_vmap_area_list); 3986 if (va) 3987 kasan_release_vmalloc(orig_start, orig_end, 3988 va->va_start, va->va_end); 3989 vas[area] = NULL; 3990 kfree(vms[area]); 3991 } 3992 spin_unlock(&free_vmap_area_lock); 3993 kfree(vas); 3994 kfree(vms); 3995 return NULL; 3996 } 3997 3998 /** 3999 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator 4000 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas() 4001 * @nr_vms: the number of allocated areas 4002 * 4003 * Free vm_structs and the array allocated by pcpu_get_vm_areas(). 4004 */ 4005 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) 4006 { 4007 int i; 4008 4009 for (i = 0; i < nr_vms; i++) 4010 free_vm_area(vms[i]); 4011 kfree(vms); 4012 } 4013 #endif /* CONFIG_SMP */ 4014 4015 #ifdef CONFIG_PRINTK 4016 bool vmalloc_dump_obj(void *object) 4017 { 4018 struct vm_struct *vm; 4019 void *objp = (void *)PAGE_ALIGN((unsigned long)object); 4020 4021 vm = find_vm_area(objp); 4022 if (!vm) 4023 return false; 4024 pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n", 4025 vm->nr_pages, (unsigned long)vm->addr, vm->caller); 4026 return true; 4027 } 4028 #endif 4029 4030 #ifdef CONFIG_PROC_FS 4031 static void *s_start(struct seq_file *m, loff_t *pos) 4032 __acquires(&vmap_purge_lock) 4033 __acquires(&vmap_area_lock) 4034 { 4035 mutex_lock(&vmap_purge_lock); 4036 spin_lock(&vmap_area_lock); 4037 4038 return seq_list_start(&vmap_area_list, *pos); 4039 } 4040 4041 static void *s_next(struct seq_file *m, void *p, loff_t *pos) 4042 { 4043 return seq_list_next(p, &vmap_area_list, pos); 4044 } 4045 4046 static void s_stop(struct seq_file *m, void *p) 4047 __releases(&vmap_area_lock) 4048 __releases(&vmap_purge_lock) 4049 { 4050 spin_unlock(&vmap_area_lock); 4051 mutex_unlock(&vmap_purge_lock); 4052 } 4053 4054 static void show_numa_info(struct seq_file *m, struct vm_struct *v) 4055 { 4056 if (IS_ENABLED(CONFIG_NUMA)) { 4057 unsigned int nr, *counters = m->private; 4058 unsigned int step = 1U << vm_area_page_order(v); 4059 4060 if (!counters) 4061 return; 4062 4063 if (v->flags & VM_UNINITIALIZED) 4064 return; 4065 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */ 4066 smp_rmb(); 4067 4068 memset(counters, 0, nr_node_ids * sizeof(unsigned int)); 4069 4070 for (nr = 0; nr < v->nr_pages; nr += step) 4071 counters[page_to_nid(v->pages[nr])] += step; 4072 for_each_node_state(nr, N_HIGH_MEMORY) 4073 if (counters[nr]) 4074 seq_printf(m, " N%u=%u", nr, counters[nr]); 4075 } 4076 } 4077 4078 static void show_purge_info(struct seq_file *m) 4079 { 4080 struct vmap_area *va; 4081 4082 spin_lock(&purge_vmap_area_lock); 4083 list_for_each_entry(va, &purge_vmap_area_list, list) { 4084 seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n", 4085 (void *)va->va_start, (void *)va->va_end, 4086 va->va_end - va->va_start); 4087 } 4088 spin_unlock(&purge_vmap_area_lock); 4089 } 4090 4091 static int s_show(struct seq_file *m, void *p) 4092 { 4093 struct vmap_area *va; 4094 struct vm_struct *v; 4095 4096 va = list_entry(p, struct vmap_area, list); 4097 4098 /* 4099 * s_show can encounter race with remove_vm_area, !vm on behalf 4100 * of vmap area is being tear down or vm_map_ram allocation. 4101 */ 4102 if (!va->vm) { 4103 seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n", 4104 (void *)va->va_start, (void *)va->va_end, 4105 va->va_end - va->va_start); 4106 4107 goto final; 4108 } 4109 4110 v = va->vm; 4111 4112 seq_printf(m, "0x%pK-0x%pK %7ld", 4113 v->addr, v->addr + v->size, v->size); 4114 4115 if (v->caller) 4116 seq_printf(m, " %pS", v->caller); 4117 4118 if (v->nr_pages) 4119 seq_printf(m, " pages=%d", v->nr_pages); 4120 4121 if (v->phys_addr) 4122 seq_printf(m, " phys=%pa", &v->phys_addr); 4123 4124 if (v->flags & VM_IOREMAP) 4125 seq_puts(m, " ioremap"); 4126 4127 if (v->flags & VM_ALLOC) 4128 seq_puts(m, " vmalloc"); 4129 4130 if (v->flags & VM_MAP) 4131 seq_puts(m, " vmap"); 4132 4133 if (v->flags & VM_USERMAP) 4134 seq_puts(m, " user"); 4135 4136 if (v->flags & VM_DMA_COHERENT) 4137 seq_puts(m, " dma-coherent"); 4138 4139 if (is_vmalloc_addr(v->pages)) 4140 seq_puts(m, " vpages"); 4141 4142 show_numa_info(m, v); 4143 seq_putc(m, '\n'); 4144 4145 /* 4146 * As a final step, dump "unpurged" areas. 4147 */ 4148 final: 4149 if (list_is_last(&va->list, &vmap_area_list)) 4150 show_purge_info(m); 4151 4152 return 0; 4153 } 4154 4155 static const struct seq_operations vmalloc_op = { 4156 .start = s_start, 4157 .next = s_next, 4158 .stop = s_stop, 4159 .show = s_show, 4160 }; 4161 4162 static int __init proc_vmalloc_init(void) 4163 { 4164 if (IS_ENABLED(CONFIG_NUMA)) 4165 proc_create_seq_private("vmallocinfo", 0400, NULL, 4166 &vmalloc_op, 4167 nr_node_ids * sizeof(unsigned int), NULL); 4168 else 4169 proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op); 4170 return 0; 4171 } 4172 module_init(proc_vmalloc_init); 4173 4174 #endif 4175