1 /* 2 * linux/mm/vmalloc.c 3 * 4 * Copyright (C) 1993 Linus Torvalds 5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 8 * Numa awareness, Christoph Lameter, SGI, June 2005 9 */ 10 11 #include <linux/vmalloc.h> 12 #include <linux/mm.h> 13 #include <linux/module.h> 14 #include <linux/highmem.h> 15 #include <linux/sched/signal.h> 16 #include <linux/slab.h> 17 #include <linux/spinlock.h> 18 #include <linux/interrupt.h> 19 #include <linux/proc_fs.h> 20 #include <linux/seq_file.h> 21 #include <linux/debugobjects.h> 22 #include <linux/kallsyms.h> 23 #include <linux/list.h> 24 #include <linux/notifier.h> 25 #include <linux/rbtree.h> 26 #include <linux/radix-tree.h> 27 #include <linux/rcupdate.h> 28 #include <linux/pfn.h> 29 #include <linux/kmemleak.h> 30 #include <linux/atomic.h> 31 #include <linux/compiler.h> 32 #include <linux/llist.h> 33 #include <linux/bitops.h> 34 35 #include <linux/uaccess.h> 36 #include <asm/tlbflush.h> 37 #include <asm/shmparam.h> 38 39 #include "internal.h" 40 41 struct vfree_deferred { 42 struct llist_head list; 43 struct work_struct wq; 44 }; 45 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred); 46 47 static void __vunmap(const void *, int); 48 49 static void free_work(struct work_struct *w) 50 { 51 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq); 52 struct llist_node *llnode = llist_del_all(&p->list); 53 while (llnode) { 54 void *p = llnode; 55 llnode = llist_next(llnode); 56 __vunmap(p, 1); 57 } 58 } 59 60 /*** Page table manipulation functions ***/ 61 62 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) 63 { 64 pte_t *pte; 65 66 pte = pte_offset_kernel(pmd, addr); 67 do { 68 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); 69 WARN_ON(!pte_none(ptent) && !pte_present(ptent)); 70 } while (pte++, addr += PAGE_SIZE, addr != end); 71 } 72 73 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) 74 { 75 pmd_t *pmd; 76 unsigned long next; 77 78 pmd = pmd_offset(pud, addr); 79 do { 80 next = pmd_addr_end(addr, end); 81 if (pmd_clear_huge(pmd)) 82 continue; 83 if (pmd_none_or_clear_bad(pmd)) 84 continue; 85 vunmap_pte_range(pmd, addr, next); 86 } while (pmd++, addr = next, addr != end); 87 } 88 89 static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end) 90 { 91 pud_t *pud; 92 unsigned long next; 93 94 pud = pud_offset(p4d, addr); 95 do { 96 next = pud_addr_end(addr, end); 97 if (pud_clear_huge(pud)) 98 continue; 99 if (pud_none_or_clear_bad(pud)) 100 continue; 101 vunmap_pmd_range(pud, addr, next); 102 } while (pud++, addr = next, addr != end); 103 } 104 105 static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end) 106 { 107 p4d_t *p4d; 108 unsigned long next; 109 110 p4d = p4d_offset(pgd, addr); 111 do { 112 next = p4d_addr_end(addr, end); 113 if (p4d_clear_huge(p4d)) 114 continue; 115 if (p4d_none_or_clear_bad(p4d)) 116 continue; 117 vunmap_pud_range(p4d, addr, next); 118 } while (p4d++, addr = next, addr != end); 119 } 120 121 static void vunmap_page_range(unsigned long addr, unsigned long end) 122 { 123 pgd_t *pgd; 124 unsigned long next; 125 126 BUG_ON(addr >= end); 127 pgd = pgd_offset_k(addr); 128 do { 129 next = pgd_addr_end(addr, end); 130 if (pgd_none_or_clear_bad(pgd)) 131 continue; 132 vunmap_p4d_range(pgd, addr, next); 133 } while (pgd++, addr = next, addr != end); 134 } 135 136 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, 137 unsigned long end, pgprot_t prot, struct page **pages, int *nr) 138 { 139 pte_t *pte; 140 141 /* 142 * nr is a running index into the array which helps higher level 143 * callers keep track of where we're up to. 144 */ 145 146 pte = pte_alloc_kernel(pmd, addr); 147 if (!pte) 148 return -ENOMEM; 149 do { 150 struct page *page = pages[*nr]; 151 152 if (WARN_ON(!pte_none(*pte))) 153 return -EBUSY; 154 if (WARN_ON(!page)) 155 return -ENOMEM; 156 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); 157 (*nr)++; 158 } while (pte++, addr += PAGE_SIZE, addr != end); 159 return 0; 160 } 161 162 static int vmap_pmd_range(pud_t *pud, unsigned long addr, 163 unsigned long end, pgprot_t prot, struct page **pages, int *nr) 164 { 165 pmd_t *pmd; 166 unsigned long next; 167 168 pmd = pmd_alloc(&init_mm, pud, addr); 169 if (!pmd) 170 return -ENOMEM; 171 do { 172 next = pmd_addr_end(addr, end); 173 if (vmap_pte_range(pmd, addr, next, prot, pages, nr)) 174 return -ENOMEM; 175 } while (pmd++, addr = next, addr != end); 176 return 0; 177 } 178 179 static int vmap_pud_range(p4d_t *p4d, unsigned long addr, 180 unsigned long end, pgprot_t prot, struct page **pages, int *nr) 181 { 182 pud_t *pud; 183 unsigned long next; 184 185 pud = pud_alloc(&init_mm, p4d, addr); 186 if (!pud) 187 return -ENOMEM; 188 do { 189 next = pud_addr_end(addr, end); 190 if (vmap_pmd_range(pud, addr, next, prot, pages, nr)) 191 return -ENOMEM; 192 } while (pud++, addr = next, addr != end); 193 return 0; 194 } 195 196 static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, 197 unsigned long end, pgprot_t prot, struct page **pages, int *nr) 198 { 199 p4d_t *p4d; 200 unsigned long next; 201 202 p4d = p4d_alloc(&init_mm, pgd, addr); 203 if (!p4d) 204 return -ENOMEM; 205 do { 206 next = p4d_addr_end(addr, end); 207 if (vmap_pud_range(p4d, addr, next, prot, pages, nr)) 208 return -ENOMEM; 209 } while (p4d++, addr = next, addr != end); 210 return 0; 211 } 212 213 /* 214 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and 215 * will have pfns corresponding to the "pages" array. 216 * 217 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] 218 */ 219 static int vmap_page_range_noflush(unsigned long start, unsigned long end, 220 pgprot_t prot, struct page **pages) 221 { 222 pgd_t *pgd; 223 unsigned long next; 224 unsigned long addr = start; 225 int err = 0; 226 int nr = 0; 227 228 BUG_ON(addr >= end); 229 pgd = pgd_offset_k(addr); 230 do { 231 next = pgd_addr_end(addr, end); 232 err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr); 233 if (err) 234 return err; 235 } while (pgd++, addr = next, addr != end); 236 237 return nr; 238 } 239 240 static int vmap_page_range(unsigned long start, unsigned long end, 241 pgprot_t prot, struct page **pages) 242 { 243 int ret; 244 245 ret = vmap_page_range_noflush(start, end, prot, pages); 246 flush_cache_vmap(start, end); 247 return ret; 248 } 249 250 int is_vmalloc_or_module_addr(const void *x) 251 { 252 /* 253 * ARM, x86-64 and sparc64 put modules in a special place, 254 * and fall back on vmalloc() if that fails. Others 255 * just put it in the vmalloc space. 256 */ 257 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR) 258 unsigned long addr = (unsigned long)x; 259 if (addr >= MODULES_VADDR && addr < MODULES_END) 260 return 1; 261 #endif 262 return is_vmalloc_addr(x); 263 } 264 265 /* 266 * Walk a vmap address to the struct page it maps. 267 */ 268 struct page *vmalloc_to_page(const void *vmalloc_addr) 269 { 270 unsigned long addr = (unsigned long) vmalloc_addr; 271 struct page *page = NULL; 272 pgd_t *pgd = pgd_offset_k(addr); 273 p4d_t *p4d; 274 pud_t *pud; 275 pmd_t *pmd; 276 pte_t *ptep, pte; 277 278 /* 279 * XXX we might need to change this if we add VIRTUAL_BUG_ON for 280 * architectures that do not vmalloc module space 281 */ 282 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); 283 284 if (pgd_none(*pgd)) 285 return NULL; 286 p4d = p4d_offset(pgd, addr); 287 if (p4d_none(*p4d)) 288 return NULL; 289 pud = pud_offset(p4d, addr); 290 if (pud_none(*pud)) 291 return NULL; 292 pmd = pmd_offset(pud, addr); 293 if (pmd_none(*pmd)) 294 return NULL; 295 296 ptep = pte_offset_map(pmd, addr); 297 pte = *ptep; 298 if (pte_present(pte)) 299 page = pte_page(pte); 300 pte_unmap(ptep); 301 return page; 302 } 303 EXPORT_SYMBOL(vmalloc_to_page); 304 305 /* 306 * Map a vmalloc()-space virtual address to the physical page frame number. 307 */ 308 unsigned long vmalloc_to_pfn(const void *vmalloc_addr) 309 { 310 return page_to_pfn(vmalloc_to_page(vmalloc_addr)); 311 } 312 EXPORT_SYMBOL(vmalloc_to_pfn); 313 314 315 /*** Global kva allocator ***/ 316 317 #define VM_VM_AREA 0x04 318 319 static DEFINE_SPINLOCK(vmap_area_lock); 320 /* Export for kexec only */ 321 LIST_HEAD(vmap_area_list); 322 static LLIST_HEAD(vmap_purge_list); 323 static struct rb_root vmap_area_root = RB_ROOT; 324 325 /* The vmap cache globals are protected by vmap_area_lock */ 326 static struct rb_node *free_vmap_cache; 327 static unsigned long cached_hole_size; 328 static unsigned long cached_vstart; 329 static unsigned long cached_align; 330 331 static unsigned long vmap_area_pcpu_hole; 332 333 static struct vmap_area *__find_vmap_area(unsigned long addr) 334 { 335 struct rb_node *n = vmap_area_root.rb_node; 336 337 while (n) { 338 struct vmap_area *va; 339 340 va = rb_entry(n, struct vmap_area, rb_node); 341 if (addr < va->va_start) 342 n = n->rb_left; 343 else if (addr >= va->va_end) 344 n = n->rb_right; 345 else 346 return va; 347 } 348 349 return NULL; 350 } 351 352 static void __insert_vmap_area(struct vmap_area *va) 353 { 354 struct rb_node **p = &vmap_area_root.rb_node; 355 struct rb_node *parent = NULL; 356 struct rb_node *tmp; 357 358 while (*p) { 359 struct vmap_area *tmp_va; 360 361 parent = *p; 362 tmp_va = rb_entry(parent, struct vmap_area, rb_node); 363 if (va->va_start < tmp_va->va_end) 364 p = &(*p)->rb_left; 365 else if (va->va_end > tmp_va->va_start) 366 p = &(*p)->rb_right; 367 else 368 BUG(); 369 } 370 371 rb_link_node(&va->rb_node, parent, p); 372 rb_insert_color(&va->rb_node, &vmap_area_root); 373 374 /* address-sort this list */ 375 tmp = rb_prev(&va->rb_node); 376 if (tmp) { 377 struct vmap_area *prev; 378 prev = rb_entry(tmp, struct vmap_area, rb_node); 379 list_add_rcu(&va->list, &prev->list); 380 } else 381 list_add_rcu(&va->list, &vmap_area_list); 382 } 383 384 static void purge_vmap_area_lazy(void); 385 386 static BLOCKING_NOTIFIER_HEAD(vmap_notify_list); 387 388 /* 389 * Allocate a region of KVA of the specified size and alignment, within the 390 * vstart and vend. 391 */ 392 static struct vmap_area *alloc_vmap_area(unsigned long size, 393 unsigned long align, 394 unsigned long vstart, unsigned long vend, 395 int node, gfp_t gfp_mask) 396 { 397 struct vmap_area *va; 398 struct rb_node *n; 399 unsigned long addr; 400 int purged = 0; 401 struct vmap_area *first; 402 403 BUG_ON(!size); 404 BUG_ON(offset_in_page(size)); 405 BUG_ON(!is_power_of_2(align)); 406 407 might_sleep(); 408 409 va = kmalloc_node(sizeof(struct vmap_area), 410 gfp_mask & GFP_RECLAIM_MASK, node); 411 if (unlikely(!va)) 412 return ERR_PTR(-ENOMEM); 413 414 /* 415 * Only scan the relevant parts containing pointers to other objects 416 * to avoid false negatives. 417 */ 418 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK); 419 420 retry: 421 spin_lock(&vmap_area_lock); 422 /* 423 * Invalidate cache if we have more permissive parameters. 424 * cached_hole_size notes the largest hole noticed _below_ 425 * the vmap_area cached in free_vmap_cache: if size fits 426 * into that hole, we want to scan from vstart to reuse 427 * the hole instead of allocating above free_vmap_cache. 428 * Note that __free_vmap_area may update free_vmap_cache 429 * without updating cached_hole_size or cached_align. 430 */ 431 if (!free_vmap_cache || 432 size < cached_hole_size || 433 vstart < cached_vstart || 434 align < cached_align) { 435 nocache: 436 cached_hole_size = 0; 437 free_vmap_cache = NULL; 438 } 439 /* record if we encounter less permissive parameters */ 440 cached_vstart = vstart; 441 cached_align = align; 442 443 /* find starting point for our search */ 444 if (free_vmap_cache) { 445 first = rb_entry(free_vmap_cache, struct vmap_area, rb_node); 446 addr = ALIGN(first->va_end, align); 447 if (addr < vstart) 448 goto nocache; 449 if (addr + size < addr) 450 goto overflow; 451 452 } else { 453 addr = ALIGN(vstart, align); 454 if (addr + size < addr) 455 goto overflow; 456 457 n = vmap_area_root.rb_node; 458 first = NULL; 459 460 while (n) { 461 struct vmap_area *tmp; 462 tmp = rb_entry(n, struct vmap_area, rb_node); 463 if (tmp->va_end >= addr) { 464 first = tmp; 465 if (tmp->va_start <= addr) 466 break; 467 n = n->rb_left; 468 } else 469 n = n->rb_right; 470 } 471 472 if (!first) 473 goto found; 474 } 475 476 /* from the starting point, walk areas until a suitable hole is found */ 477 while (addr + size > first->va_start && addr + size <= vend) { 478 if (addr + cached_hole_size < first->va_start) 479 cached_hole_size = first->va_start - addr; 480 addr = ALIGN(first->va_end, align); 481 if (addr + size < addr) 482 goto overflow; 483 484 if (list_is_last(&first->list, &vmap_area_list)) 485 goto found; 486 487 first = list_next_entry(first, list); 488 } 489 490 found: 491 if (addr + size > vend) 492 goto overflow; 493 494 va->va_start = addr; 495 va->va_end = addr + size; 496 va->flags = 0; 497 __insert_vmap_area(va); 498 free_vmap_cache = &va->rb_node; 499 spin_unlock(&vmap_area_lock); 500 501 BUG_ON(!IS_ALIGNED(va->va_start, align)); 502 BUG_ON(va->va_start < vstart); 503 BUG_ON(va->va_end > vend); 504 505 return va; 506 507 overflow: 508 spin_unlock(&vmap_area_lock); 509 if (!purged) { 510 purge_vmap_area_lazy(); 511 purged = 1; 512 goto retry; 513 } 514 515 if (gfpflags_allow_blocking(gfp_mask)) { 516 unsigned long freed = 0; 517 blocking_notifier_call_chain(&vmap_notify_list, 0, &freed); 518 if (freed > 0) { 519 purged = 0; 520 goto retry; 521 } 522 } 523 524 if (printk_ratelimit()) 525 pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n", 526 size); 527 kfree(va); 528 return ERR_PTR(-EBUSY); 529 } 530 531 int register_vmap_purge_notifier(struct notifier_block *nb) 532 { 533 return blocking_notifier_chain_register(&vmap_notify_list, nb); 534 } 535 EXPORT_SYMBOL_GPL(register_vmap_purge_notifier); 536 537 int unregister_vmap_purge_notifier(struct notifier_block *nb) 538 { 539 return blocking_notifier_chain_unregister(&vmap_notify_list, nb); 540 } 541 EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier); 542 543 static void __free_vmap_area(struct vmap_area *va) 544 { 545 BUG_ON(RB_EMPTY_NODE(&va->rb_node)); 546 547 if (free_vmap_cache) { 548 if (va->va_end < cached_vstart) { 549 free_vmap_cache = NULL; 550 } else { 551 struct vmap_area *cache; 552 cache = rb_entry(free_vmap_cache, struct vmap_area, rb_node); 553 if (va->va_start <= cache->va_start) { 554 free_vmap_cache = rb_prev(&va->rb_node); 555 /* 556 * We don't try to update cached_hole_size or 557 * cached_align, but it won't go very wrong. 558 */ 559 } 560 } 561 } 562 rb_erase(&va->rb_node, &vmap_area_root); 563 RB_CLEAR_NODE(&va->rb_node); 564 list_del_rcu(&va->list); 565 566 /* 567 * Track the highest possible candidate for pcpu area 568 * allocation. Areas outside of vmalloc area can be returned 569 * here too, consider only end addresses which fall inside 570 * vmalloc area proper. 571 */ 572 if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END) 573 vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end); 574 575 kfree_rcu(va, rcu_head); 576 } 577 578 /* 579 * Free a region of KVA allocated by alloc_vmap_area 580 */ 581 static void free_vmap_area(struct vmap_area *va) 582 { 583 spin_lock(&vmap_area_lock); 584 __free_vmap_area(va); 585 spin_unlock(&vmap_area_lock); 586 } 587 588 /* 589 * Clear the pagetable entries of a given vmap_area 590 */ 591 static void unmap_vmap_area(struct vmap_area *va) 592 { 593 vunmap_page_range(va->va_start, va->va_end); 594 } 595 596 static void vmap_debug_free_range(unsigned long start, unsigned long end) 597 { 598 /* 599 * Unmap page tables and force a TLB flush immediately if pagealloc 600 * debugging is enabled. This catches use after free bugs similarly to 601 * those in linear kernel virtual address space after a page has been 602 * freed. 603 * 604 * All the lazy freeing logic is still retained, in order to minimise 605 * intrusiveness of this debugging feature. 606 * 607 * This is going to be *slow* (linear kernel virtual address debugging 608 * doesn't do a broadcast TLB flush so it is a lot faster). 609 */ 610 if (debug_pagealloc_enabled()) { 611 vunmap_page_range(start, end); 612 flush_tlb_kernel_range(start, end); 613 } 614 } 615 616 /* 617 * lazy_max_pages is the maximum amount of virtual address space we gather up 618 * before attempting to purge with a TLB flush. 619 * 620 * There is a tradeoff here: a larger number will cover more kernel page tables 621 * and take slightly longer to purge, but it will linearly reduce the number of 622 * global TLB flushes that must be performed. It would seem natural to scale 623 * this number up linearly with the number of CPUs (because vmapping activity 624 * could also scale linearly with the number of CPUs), however it is likely 625 * that in practice, workloads might be constrained in other ways that mean 626 * vmap activity will not scale linearly with CPUs. Also, I want to be 627 * conservative and not introduce a big latency on huge systems, so go with 628 * a less aggressive log scale. It will still be an improvement over the old 629 * code, and it will be simple to change the scale factor if we find that it 630 * becomes a problem on bigger systems. 631 */ 632 static unsigned long lazy_max_pages(void) 633 { 634 unsigned int log; 635 636 log = fls(num_online_cpus()); 637 638 return log * (32UL * 1024 * 1024 / PAGE_SIZE); 639 } 640 641 static atomic_t vmap_lazy_nr = ATOMIC_INIT(0); 642 643 /* 644 * Serialize vmap purging. There is no actual criticial section protected 645 * by this look, but we want to avoid concurrent calls for performance 646 * reasons and to make the pcpu_get_vm_areas more deterministic. 647 */ 648 static DEFINE_MUTEX(vmap_purge_lock); 649 650 /* for per-CPU blocks */ 651 static void purge_fragmented_blocks_allcpus(void); 652 653 /* 654 * called before a call to iounmap() if the caller wants vm_area_struct's 655 * immediately freed. 656 */ 657 void set_iounmap_nonlazy(void) 658 { 659 atomic_set(&vmap_lazy_nr, lazy_max_pages()+1); 660 } 661 662 /* 663 * Purges all lazily-freed vmap areas. 664 */ 665 static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end) 666 { 667 struct llist_node *valist; 668 struct vmap_area *va; 669 struct vmap_area *n_va; 670 bool do_free = false; 671 672 lockdep_assert_held(&vmap_purge_lock); 673 674 valist = llist_del_all(&vmap_purge_list); 675 llist_for_each_entry(va, valist, purge_list) { 676 if (va->va_start < start) 677 start = va->va_start; 678 if (va->va_end > end) 679 end = va->va_end; 680 do_free = true; 681 } 682 683 if (!do_free) 684 return false; 685 686 flush_tlb_kernel_range(start, end); 687 688 spin_lock(&vmap_area_lock); 689 llist_for_each_entry_safe(va, n_va, valist, purge_list) { 690 int nr = (va->va_end - va->va_start) >> PAGE_SHIFT; 691 692 __free_vmap_area(va); 693 atomic_sub(nr, &vmap_lazy_nr); 694 cond_resched_lock(&vmap_area_lock); 695 } 696 spin_unlock(&vmap_area_lock); 697 return true; 698 } 699 700 /* 701 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody 702 * is already purging. 703 */ 704 static void try_purge_vmap_area_lazy(void) 705 { 706 if (mutex_trylock(&vmap_purge_lock)) { 707 __purge_vmap_area_lazy(ULONG_MAX, 0); 708 mutex_unlock(&vmap_purge_lock); 709 } 710 } 711 712 /* 713 * Kick off a purge of the outstanding lazy areas. 714 */ 715 static void purge_vmap_area_lazy(void) 716 { 717 mutex_lock(&vmap_purge_lock); 718 purge_fragmented_blocks_allcpus(); 719 __purge_vmap_area_lazy(ULONG_MAX, 0); 720 mutex_unlock(&vmap_purge_lock); 721 } 722 723 /* 724 * Free a vmap area, caller ensuring that the area has been unmapped 725 * and flush_cache_vunmap had been called for the correct range 726 * previously. 727 */ 728 static void free_vmap_area_noflush(struct vmap_area *va) 729 { 730 int nr_lazy; 731 732 nr_lazy = atomic_add_return((va->va_end - va->va_start) >> PAGE_SHIFT, 733 &vmap_lazy_nr); 734 735 /* After this point, we may free va at any time */ 736 llist_add(&va->purge_list, &vmap_purge_list); 737 738 if (unlikely(nr_lazy > lazy_max_pages())) 739 try_purge_vmap_area_lazy(); 740 } 741 742 /* 743 * Free and unmap a vmap area 744 */ 745 static void free_unmap_vmap_area(struct vmap_area *va) 746 { 747 flush_cache_vunmap(va->va_start, va->va_end); 748 unmap_vmap_area(va); 749 free_vmap_area_noflush(va); 750 } 751 752 static struct vmap_area *find_vmap_area(unsigned long addr) 753 { 754 struct vmap_area *va; 755 756 spin_lock(&vmap_area_lock); 757 va = __find_vmap_area(addr); 758 spin_unlock(&vmap_area_lock); 759 760 return va; 761 } 762 763 /*** Per cpu kva allocator ***/ 764 765 /* 766 * vmap space is limited especially on 32 bit architectures. Ensure there is 767 * room for at least 16 percpu vmap blocks per CPU. 768 */ 769 /* 770 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able 771 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess 772 * instead (we just need a rough idea) 773 */ 774 #if BITS_PER_LONG == 32 775 #define VMALLOC_SPACE (128UL*1024*1024) 776 #else 777 #define VMALLOC_SPACE (128UL*1024*1024*1024) 778 #endif 779 780 #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) 781 #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ 782 #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ 783 #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) 784 #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ 785 #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ 786 #define VMAP_BBMAP_BITS \ 787 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ 788 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ 789 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16)) 790 791 #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) 792 793 static bool vmap_initialized __read_mostly = false; 794 795 struct vmap_block_queue { 796 spinlock_t lock; 797 struct list_head free; 798 }; 799 800 struct vmap_block { 801 spinlock_t lock; 802 struct vmap_area *va; 803 unsigned long free, dirty; 804 unsigned long dirty_min, dirty_max; /*< dirty range */ 805 struct list_head free_list; 806 struct rcu_head rcu_head; 807 struct list_head purge; 808 }; 809 810 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ 811 static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); 812 813 /* 814 * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block 815 * in the free path. Could get rid of this if we change the API to return a 816 * "cookie" from alloc, to be passed to free. But no big deal yet. 817 */ 818 static DEFINE_SPINLOCK(vmap_block_tree_lock); 819 static RADIX_TREE(vmap_block_tree, GFP_ATOMIC); 820 821 /* 822 * We should probably have a fallback mechanism to allocate virtual memory 823 * out of partially filled vmap blocks. However vmap block sizing should be 824 * fairly reasonable according to the vmalloc size, so it shouldn't be a 825 * big problem. 826 */ 827 828 static unsigned long addr_to_vb_idx(unsigned long addr) 829 { 830 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); 831 addr /= VMAP_BLOCK_SIZE; 832 return addr; 833 } 834 835 static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off) 836 { 837 unsigned long addr; 838 839 addr = va_start + (pages_off << PAGE_SHIFT); 840 BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start)); 841 return (void *)addr; 842 } 843 844 /** 845 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this 846 * block. Of course pages number can't exceed VMAP_BBMAP_BITS 847 * @order: how many 2^order pages should be occupied in newly allocated block 848 * @gfp_mask: flags for the page level allocator 849 * 850 * Returns: virtual address in a newly allocated block or ERR_PTR(-errno) 851 */ 852 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) 853 { 854 struct vmap_block_queue *vbq; 855 struct vmap_block *vb; 856 struct vmap_area *va; 857 unsigned long vb_idx; 858 int node, err; 859 void *vaddr; 860 861 node = numa_node_id(); 862 863 vb = kmalloc_node(sizeof(struct vmap_block), 864 gfp_mask & GFP_RECLAIM_MASK, node); 865 if (unlikely(!vb)) 866 return ERR_PTR(-ENOMEM); 867 868 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, 869 VMALLOC_START, VMALLOC_END, 870 node, gfp_mask); 871 if (IS_ERR(va)) { 872 kfree(vb); 873 return ERR_CAST(va); 874 } 875 876 err = radix_tree_preload(gfp_mask); 877 if (unlikely(err)) { 878 kfree(vb); 879 free_vmap_area(va); 880 return ERR_PTR(err); 881 } 882 883 vaddr = vmap_block_vaddr(va->va_start, 0); 884 spin_lock_init(&vb->lock); 885 vb->va = va; 886 /* At least something should be left free */ 887 BUG_ON(VMAP_BBMAP_BITS <= (1UL << order)); 888 vb->free = VMAP_BBMAP_BITS - (1UL << order); 889 vb->dirty = 0; 890 vb->dirty_min = VMAP_BBMAP_BITS; 891 vb->dirty_max = 0; 892 INIT_LIST_HEAD(&vb->free_list); 893 894 vb_idx = addr_to_vb_idx(va->va_start); 895 spin_lock(&vmap_block_tree_lock); 896 err = radix_tree_insert(&vmap_block_tree, vb_idx, vb); 897 spin_unlock(&vmap_block_tree_lock); 898 BUG_ON(err); 899 radix_tree_preload_end(); 900 901 vbq = &get_cpu_var(vmap_block_queue); 902 spin_lock(&vbq->lock); 903 list_add_tail_rcu(&vb->free_list, &vbq->free); 904 spin_unlock(&vbq->lock); 905 put_cpu_var(vmap_block_queue); 906 907 return vaddr; 908 } 909 910 static void free_vmap_block(struct vmap_block *vb) 911 { 912 struct vmap_block *tmp; 913 unsigned long vb_idx; 914 915 vb_idx = addr_to_vb_idx(vb->va->va_start); 916 spin_lock(&vmap_block_tree_lock); 917 tmp = radix_tree_delete(&vmap_block_tree, vb_idx); 918 spin_unlock(&vmap_block_tree_lock); 919 BUG_ON(tmp != vb); 920 921 free_vmap_area_noflush(vb->va); 922 kfree_rcu(vb, rcu_head); 923 } 924 925 static void purge_fragmented_blocks(int cpu) 926 { 927 LIST_HEAD(purge); 928 struct vmap_block *vb; 929 struct vmap_block *n_vb; 930 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 931 932 rcu_read_lock(); 933 list_for_each_entry_rcu(vb, &vbq->free, free_list) { 934 935 if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS)) 936 continue; 937 938 spin_lock(&vb->lock); 939 if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) { 940 vb->free = 0; /* prevent further allocs after releasing lock */ 941 vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */ 942 vb->dirty_min = 0; 943 vb->dirty_max = VMAP_BBMAP_BITS; 944 spin_lock(&vbq->lock); 945 list_del_rcu(&vb->free_list); 946 spin_unlock(&vbq->lock); 947 spin_unlock(&vb->lock); 948 list_add_tail(&vb->purge, &purge); 949 } else 950 spin_unlock(&vb->lock); 951 } 952 rcu_read_unlock(); 953 954 list_for_each_entry_safe(vb, n_vb, &purge, purge) { 955 list_del(&vb->purge); 956 free_vmap_block(vb); 957 } 958 } 959 960 static void purge_fragmented_blocks_allcpus(void) 961 { 962 int cpu; 963 964 for_each_possible_cpu(cpu) 965 purge_fragmented_blocks(cpu); 966 } 967 968 static void *vb_alloc(unsigned long size, gfp_t gfp_mask) 969 { 970 struct vmap_block_queue *vbq; 971 struct vmap_block *vb; 972 void *vaddr = NULL; 973 unsigned int order; 974 975 BUG_ON(offset_in_page(size)); 976 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 977 if (WARN_ON(size == 0)) { 978 /* 979 * Allocating 0 bytes isn't what caller wants since 980 * get_order(0) returns funny result. Just warn and terminate 981 * early. 982 */ 983 return NULL; 984 } 985 order = get_order(size); 986 987 rcu_read_lock(); 988 vbq = &get_cpu_var(vmap_block_queue); 989 list_for_each_entry_rcu(vb, &vbq->free, free_list) { 990 unsigned long pages_off; 991 992 spin_lock(&vb->lock); 993 if (vb->free < (1UL << order)) { 994 spin_unlock(&vb->lock); 995 continue; 996 } 997 998 pages_off = VMAP_BBMAP_BITS - vb->free; 999 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); 1000 vb->free -= 1UL << order; 1001 if (vb->free == 0) { 1002 spin_lock(&vbq->lock); 1003 list_del_rcu(&vb->free_list); 1004 spin_unlock(&vbq->lock); 1005 } 1006 1007 spin_unlock(&vb->lock); 1008 break; 1009 } 1010 1011 put_cpu_var(vmap_block_queue); 1012 rcu_read_unlock(); 1013 1014 /* Allocate new block if nothing was found */ 1015 if (!vaddr) 1016 vaddr = new_vmap_block(order, gfp_mask); 1017 1018 return vaddr; 1019 } 1020 1021 static void vb_free(const void *addr, unsigned long size) 1022 { 1023 unsigned long offset; 1024 unsigned long vb_idx; 1025 unsigned int order; 1026 struct vmap_block *vb; 1027 1028 BUG_ON(offset_in_page(size)); 1029 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 1030 1031 flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size); 1032 1033 order = get_order(size); 1034 1035 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1); 1036 offset >>= PAGE_SHIFT; 1037 1038 vb_idx = addr_to_vb_idx((unsigned long)addr); 1039 rcu_read_lock(); 1040 vb = radix_tree_lookup(&vmap_block_tree, vb_idx); 1041 rcu_read_unlock(); 1042 BUG_ON(!vb); 1043 1044 vunmap_page_range((unsigned long)addr, (unsigned long)addr + size); 1045 1046 spin_lock(&vb->lock); 1047 1048 /* Expand dirty range */ 1049 vb->dirty_min = min(vb->dirty_min, offset); 1050 vb->dirty_max = max(vb->dirty_max, offset + (1UL << order)); 1051 1052 vb->dirty += 1UL << order; 1053 if (vb->dirty == VMAP_BBMAP_BITS) { 1054 BUG_ON(vb->free); 1055 spin_unlock(&vb->lock); 1056 free_vmap_block(vb); 1057 } else 1058 spin_unlock(&vb->lock); 1059 } 1060 1061 /** 1062 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer 1063 * 1064 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily 1065 * to amortize TLB flushing overheads. What this means is that any page you 1066 * have now, may, in a former life, have been mapped into kernel virtual 1067 * address by the vmap layer and so there might be some CPUs with TLB entries 1068 * still referencing that page (additional to the regular 1:1 kernel mapping). 1069 * 1070 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can 1071 * be sure that none of the pages we have control over will have any aliases 1072 * from the vmap layer. 1073 */ 1074 void vm_unmap_aliases(void) 1075 { 1076 unsigned long start = ULONG_MAX, end = 0; 1077 int cpu; 1078 int flush = 0; 1079 1080 if (unlikely(!vmap_initialized)) 1081 return; 1082 1083 might_sleep(); 1084 1085 for_each_possible_cpu(cpu) { 1086 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 1087 struct vmap_block *vb; 1088 1089 rcu_read_lock(); 1090 list_for_each_entry_rcu(vb, &vbq->free, free_list) { 1091 spin_lock(&vb->lock); 1092 if (vb->dirty) { 1093 unsigned long va_start = vb->va->va_start; 1094 unsigned long s, e; 1095 1096 s = va_start + (vb->dirty_min << PAGE_SHIFT); 1097 e = va_start + (vb->dirty_max << PAGE_SHIFT); 1098 1099 start = min(s, start); 1100 end = max(e, end); 1101 1102 flush = 1; 1103 } 1104 spin_unlock(&vb->lock); 1105 } 1106 rcu_read_unlock(); 1107 } 1108 1109 mutex_lock(&vmap_purge_lock); 1110 purge_fragmented_blocks_allcpus(); 1111 if (!__purge_vmap_area_lazy(start, end) && flush) 1112 flush_tlb_kernel_range(start, end); 1113 mutex_unlock(&vmap_purge_lock); 1114 } 1115 EXPORT_SYMBOL_GPL(vm_unmap_aliases); 1116 1117 /** 1118 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram 1119 * @mem: the pointer returned by vm_map_ram 1120 * @count: the count passed to that vm_map_ram call (cannot unmap partial) 1121 */ 1122 void vm_unmap_ram(const void *mem, unsigned int count) 1123 { 1124 unsigned long size = (unsigned long)count << PAGE_SHIFT; 1125 unsigned long addr = (unsigned long)mem; 1126 struct vmap_area *va; 1127 1128 might_sleep(); 1129 BUG_ON(!addr); 1130 BUG_ON(addr < VMALLOC_START); 1131 BUG_ON(addr > VMALLOC_END); 1132 BUG_ON(!PAGE_ALIGNED(addr)); 1133 1134 debug_check_no_locks_freed(mem, size); 1135 vmap_debug_free_range(addr, addr+size); 1136 1137 if (likely(count <= VMAP_MAX_ALLOC)) { 1138 vb_free(mem, size); 1139 return; 1140 } 1141 1142 va = find_vmap_area(addr); 1143 BUG_ON(!va); 1144 free_unmap_vmap_area(va); 1145 } 1146 EXPORT_SYMBOL(vm_unmap_ram); 1147 1148 /** 1149 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) 1150 * @pages: an array of pointers to the pages to be mapped 1151 * @count: number of pages 1152 * @node: prefer to allocate data structures on this node 1153 * @prot: memory protection to use. PAGE_KERNEL for regular RAM 1154 * 1155 * If you use this function for less than VMAP_MAX_ALLOC pages, it could be 1156 * faster than vmap so it's good. But if you mix long-life and short-life 1157 * objects with vm_map_ram(), it could consume lots of address space through 1158 * fragmentation (especially on a 32bit machine). You could see failures in 1159 * the end. Please use this function for short-lived objects. 1160 * 1161 * Returns: a pointer to the address that has been mapped, or %NULL on failure 1162 */ 1163 void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) 1164 { 1165 unsigned long size = (unsigned long)count << PAGE_SHIFT; 1166 unsigned long addr; 1167 void *mem; 1168 1169 if (likely(count <= VMAP_MAX_ALLOC)) { 1170 mem = vb_alloc(size, GFP_KERNEL); 1171 if (IS_ERR(mem)) 1172 return NULL; 1173 addr = (unsigned long)mem; 1174 } else { 1175 struct vmap_area *va; 1176 va = alloc_vmap_area(size, PAGE_SIZE, 1177 VMALLOC_START, VMALLOC_END, node, GFP_KERNEL); 1178 if (IS_ERR(va)) 1179 return NULL; 1180 1181 addr = va->va_start; 1182 mem = (void *)addr; 1183 } 1184 if (vmap_page_range(addr, addr + size, prot, pages) < 0) { 1185 vm_unmap_ram(mem, count); 1186 return NULL; 1187 } 1188 return mem; 1189 } 1190 EXPORT_SYMBOL(vm_map_ram); 1191 1192 static struct vm_struct *vmlist __initdata; 1193 /** 1194 * vm_area_add_early - add vmap area early during boot 1195 * @vm: vm_struct to add 1196 * 1197 * This function is used to add fixed kernel vm area to vmlist before 1198 * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags 1199 * should contain proper values and the other fields should be zero. 1200 * 1201 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 1202 */ 1203 void __init vm_area_add_early(struct vm_struct *vm) 1204 { 1205 struct vm_struct *tmp, **p; 1206 1207 BUG_ON(vmap_initialized); 1208 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { 1209 if (tmp->addr >= vm->addr) { 1210 BUG_ON(tmp->addr < vm->addr + vm->size); 1211 break; 1212 } else 1213 BUG_ON(tmp->addr + tmp->size > vm->addr); 1214 } 1215 vm->next = *p; 1216 *p = vm; 1217 } 1218 1219 /** 1220 * vm_area_register_early - register vmap area early during boot 1221 * @vm: vm_struct to register 1222 * @align: requested alignment 1223 * 1224 * This function is used to register kernel vm area before 1225 * vmalloc_init() is called. @vm->size and @vm->flags should contain 1226 * proper values on entry and other fields should be zero. On return, 1227 * vm->addr contains the allocated address. 1228 * 1229 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 1230 */ 1231 void __init vm_area_register_early(struct vm_struct *vm, size_t align) 1232 { 1233 static size_t vm_init_off __initdata; 1234 unsigned long addr; 1235 1236 addr = ALIGN(VMALLOC_START + vm_init_off, align); 1237 vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START; 1238 1239 vm->addr = (void *)addr; 1240 1241 vm_area_add_early(vm); 1242 } 1243 1244 void __init vmalloc_init(void) 1245 { 1246 struct vmap_area *va; 1247 struct vm_struct *tmp; 1248 int i; 1249 1250 for_each_possible_cpu(i) { 1251 struct vmap_block_queue *vbq; 1252 struct vfree_deferred *p; 1253 1254 vbq = &per_cpu(vmap_block_queue, i); 1255 spin_lock_init(&vbq->lock); 1256 INIT_LIST_HEAD(&vbq->free); 1257 p = &per_cpu(vfree_deferred, i); 1258 init_llist_head(&p->list); 1259 INIT_WORK(&p->wq, free_work); 1260 } 1261 1262 /* Import existing vmlist entries. */ 1263 for (tmp = vmlist; tmp; tmp = tmp->next) { 1264 va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT); 1265 va->flags = VM_VM_AREA; 1266 va->va_start = (unsigned long)tmp->addr; 1267 va->va_end = va->va_start + tmp->size; 1268 va->vm = tmp; 1269 __insert_vmap_area(va); 1270 } 1271 1272 vmap_area_pcpu_hole = VMALLOC_END; 1273 1274 vmap_initialized = true; 1275 } 1276 1277 /** 1278 * map_kernel_range_noflush - map kernel VM area with the specified pages 1279 * @addr: start of the VM area to map 1280 * @size: size of the VM area to map 1281 * @prot: page protection flags to use 1282 * @pages: pages to map 1283 * 1284 * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size 1285 * specify should have been allocated using get_vm_area() and its 1286 * friends. 1287 * 1288 * NOTE: 1289 * This function does NOT do any cache flushing. The caller is 1290 * responsible for calling flush_cache_vmap() on to-be-mapped areas 1291 * before calling this function. 1292 * 1293 * RETURNS: 1294 * The number of pages mapped on success, -errno on failure. 1295 */ 1296 int map_kernel_range_noflush(unsigned long addr, unsigned long size, 1297 pgprot_t prot, struct page **pages) 1298 { 1299 return vmap_page_range_noflush(addr, addr + size, prot, pages); 1300 } 1301 1302 /** 1303 * unmap_kernel_range_noflush - unmap kernel VM area 1304 * @addr: start of the VM area to unmap 1305 * @size: size of the VM area to unmap 1306 * 1307 * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size 1308 * specify should have been allocated using get_vm_area() and its 1309 * friends. 1310 * 1311 * NOTE: 1312 * This function does NOT do any cache flushing. The caller is 1313 * responsible for calling flush_cache_vunmap() on to-be-mapped areas 1314 * before calling this function and flush_tlb_kernel_range() after. 1315 */ 1316 void unmap_kernel_range_noflush(unsigned long addr, unsigned long size) 1317 { 1318 vunmap_page_range(addr, addr + size); 1319 } 1320 EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush); 1321 1322 /** 1323 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB 1324 * @addr: start of the VM area to unmap 1325 * @size: size of the VM area to unmap 1326 * 1327 * Similar to unmap_kernel_range_noflush() but flushes vcache before 1328 * the unmapping and tlb after. 1329 */ 1330 void unmap_kernel_range(unsigned long addr, unsigned long size) 1331 { 1332 unsigned long end = addr + size; 1333 1334 flush_cache_vunmap(addr, end); 1335 vunmap_page_range(addr, end); 1336 flush_tlb_kernel_range(addr, end); 1337 } 1338 EXPORT_SYMBOL_GPL(unmap_kernel_range); 1339 1340 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages) 1341 { 1342 unsigned long addr = (unsigned long)area->addr; 1343 unsigned long end = addr + get_vm_area_size(area); 1344 int err; 1345 1346 err = vmap_page_range(addr, end, prot, pages); 1347 1348 return err > 0 ? 0 : err; 1349 } 1350 EXPORT_SYMBOL_GPL(map_vm_area); 1351 1352 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, 1353 unsigned long flags, const void *caller) 1354 { 1355 spin_lock(&vmap_area_lock); 1356 vm->flags = flags; 1357 vm->addr = (void *)va->va_start; 1358 vm->size = va->va_end - va->va_start; 1359 vm->caller = caller; 1360 va->vm = vm; 1361 va->flags |= VM_VM_AREA; 1362 spin_unlock(&vmap_area_lock); 1363 } 1364 1365 static void clear_vm_uninitialized_flag(struct vm_struct *vm) 1366 { 1367 /* 1368 * Before removing VM_UNINITIALIZED, 1369 * we should make sure that vm has proper values. 1370 * Pair with smp_rmb() in show_numa_info(). 1371 */ 1372 smp_wmb(); 1373 vm->flags &= ~VM_UNINITIALIZED; 1374 } 1375 1376 static struct vm_struct *__get_vm_area_node(unsigned long size, 1377 unsigned long align, unsigned long flags, unsigned long start, 1378 unsigned long end, int node, gfp_t gfp_mask, const void *caller) 1379 { 1380 struct vmap_area *va; 1381 struct vm_struct *area; 1382 1383 BUG_ON(in_interrupt()); 1384 size = PAGE_ALIGN(size); 1385 if (unlikely(!size)) 1386 return NULL; 1387 1388 if (flags & VM_IOREMAP) 1389 align = 1ul << clamp_t(int, get_count_order_long(size), 1390 PAGE_SHIFT, IOREMAP_MAX_ORDER); 1391 1392 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); 1393 if (unlikely(!area)) 1394 return NULL; 1395 1396 if (!(flags & VM_NO_GUARD)) 1397 size += PAGE_SIZE; 1398 1399 va = alloc_vmap_area(size, align, start, end, node, gfp_mask); 1400 if (IS_ERR(va)) { 1401 kfree(area); 1402 return NULL; 1403 } 1404 1405 setup_vmalloc_vm(area, va, flags, caller); 1406 1407 return area; 1408 } 1409 1410 struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 1411 unsigned long start, unsigned long end) 1412 { 1413 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE, 1414 GFP_KERNEL, __builtin_return_address(0)); 1415 } 1416 EXPORT_SYMBOL_GPL(__get_vm_area); 1417 1418 struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, 1419 unsigned long start, unsigned long end, 1420 const void *caller) 1421 { 1422 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE, 1423 GFP_KERNEL, caller); 1424 } 1425 1426 /** 1427 * get_vm_area - reserve a contiguous kernel virtual area 1428 * @size: size of the area 1429 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC 1430 * 1431 * Search an area of @size in the kernel virtual mapping area, 1432 * and reserved it for out purposes. Returns the area descriptor 1433 * on success or %NULL on failure. 1434 */ 1435 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 1436 { 1437 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, 1438 NUMA_NO_NODE, GFP_KERNEL, 1439 __builtin_return_address(0)); 1440 } 1441 1442 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, 1443 const void *caller) 1444 { 1445 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, 1446 NUMA_NO_NODE, GFP_KERNEL, caller); 1447 } 1448 1449 /** 1450 * find_vm_area - find a continuous kernel virtual area 1451 * @addr: base address 1452 * 1453 * Search for the kernel VM area starting at @addr, and return it. 1454 * It is up to the caller to do all required locking to keep the returned 1455 * pointer valid. 1456 */ 1457 struct vm_struct *find_vm_area(const void *addr) 1458 { 1459 struct vmap_area *va; 1460 1461 va = find_vmap_area((unsigned long)addr); 1462 if (va && va->flags & VM_VM_AREA) 1463 return va->vm; 1464 1465 return NULL; 1466 } 1467 1468 /** 1469 * remove_vm_area - find and remove a continuous kernel virtual area 1470 * @addr: base address 1471 * 1472 * Search for the kernel VM area starting at @addr, and remove it. 1473 * This function returns the found VM area, but using it is NOT safe 1474 * on SMP machines, except for its size or flags. 1475 */ 1476 struct vm_struct *remove_vm_area(const void *addr) 1477 { 1478 struct vmap_area *va; 1479 1480 might_sleep(); 1481 1482 va = find_vmap_area((unsigned long)addr); 1483 if (va && va->flags & VM_VM_AREA) { 1484 struct vm_struct *vm = va->vm; 1485 1486 spin_lock(&vmap_area_lock); 1487 va->vm = NULL; 1488 va->flags &= ~VM_VM_AREA; 1489 spin_unlock(&vmap_area_lock); 1490 1491 vmap_debug_free_range(va->va_start, va->va_end); 1492 kasan_free_shadow(vm); 1493 free_unmap_vmap_area(va); 1494 1495 return vm; 1496 } 1497 return NULL; 1498 } 1499 1500 static void __vunmap(const void *addr, int deallocate_pages) 1501 { 1502 struct vm_struct *area; 1503 1504 if (!addr) 1505 return; 1506 1507 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n", 1508 addr)) 1509 return; 1510 1511 area = remove_vm_area(addr); 1512 if (unlikely(!area)) { 1513 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", 1514 addr); 1515 return; 1516 } 1517 1518 debug_check_no_locks_freed(addr, get_vm_area_size(area)); 1519 debug_check_no_obj_freed(addr, get_vm_area_size(area)); 1520 1521 if (deallocate_pages) { 1522 int i; 1523 1524 for (i = 0; i < area->nr_pages; i++) { 1525 struct page *page = area->pages[i]; 1526 1527 BUG_ON(!page); 1528 __free_pages(page, 0); 1529 } 1530 1531 kvfree(area->pages); 1532 } 1533 1534 kfree(area); 1535 return; 1536 } 1537 1538 static inline void __vfree_deferred(const void *addr) 1539 { 1540 /* 1541 * Use raw_cpu_ptr() because this can be called from preemptible 1542 * context. Preemption is absolutely fine here, because the llist_add() 1543 * implementation is lockless, so it works even if we are adding to 1544 * nother cpu's list. schedule_work() should be fine with this too. 1545 */ 1546 struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred); 1547 1548 if (llist_add((struct llist_node *)addr, &p->list)) 1549 schedule_work(&p->wq); 1550 } 1551 1552 /** 1553 * vfree_atomic - release memory allocated by vmalloc() 1554 * @addr: memory base address 1555 * 1556 * This one is just like vfree() but can be called in any atomic context 1557 * except NMIs. 1558 */ 1559 void vfree_atomic(const void *addr) 1560 { 1561 BUG_ON(in_nmi()); 1562 1563 kmemleak_free(addr); 1564 1565 if (!addr) 1566 return; 1567 __vfree_deferred(addr); 1568 } 1569 1570 /** 1571 * vfree - release memory allocated by vmalloc() 1572 * @addr: memory base address 1573 * 1574 * Free the virtually continuous memory area starting at @addr, as 1575 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is 1576 * NULL, no operation is performed. 1577 * 1578 * Must not be called in NMI context (strictly speaking, only if we don't 1579 * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling 1580 * conventions for vfree() arch-depenedent would be a really bad idea) 1581 * 1582 * NOTE: assumes that the object at @addr has a size >= sizeof(llist_node) 1583 */ 1584 void vfree(const void *addr) 1585 { 1586 BUG_ON(in_nmi()); 1587 1588 kmemleak_free(addr); 1589 1590 if (!addr) 1591 return; 1592 if (unlikely(in_interrupt())) 1593 __vfree_deferred(addr); 1594 else 1595 __vunmap(addr, 1); 1596 } 1597 EXPORT_SYMBOL(vfree); 1598 1599 /** 1600 * vunmap - release virtual mapping obtained by vmap() 1601 * @addr: memory base address 1602 * 1603 * Free the virtually contiguous memory area starting at @addr, 1604 * which was created from the page array passed to vmap(). 1605 * 1606 * Must not be called in interrupt context. 1607 */ 1608 void vunmap(const void *addr) 1609 { 1610 BUG_ON(in_interrupt()); 1611 might_sleep(); 1612 if (addr) 1613 __vunmap(addr, 0); 1614 } 1615 EXPORT_SYMBOL(vunmap); 1616 1617 /** 1618 * vmap - map an array of pages into virtually contiguous space 1619 * @pages: array of page pointers 1620 * @count: number of pages to map 1621 * @flags: vm_area->flags 1622 * @prot: page protection for the mapping 1623 * 1624 * Maps @count pages from @pages into contiguous kernel virtual 1625 * space. 1626 */ 1627 void *vmap(struct page **pages, unsigned int count, 1628 unsigned long flags, pgprot_t prot) 1629 { 1630 struct vm_struct *area; 1631 unsigned long size; /* In bytes */ 1632 1633 might_sleep(); 1634 1635 if (count > totalram_pages) 1636 return NULL; 1637 1638 size = (unsigned long)count << PAGE_SHIFT; 1639 area = get_vm_area_caller(size, flags, __builtin_return_address(0)); 1640 if (!area) 1641 return NULL; 1642 1643 if (map_vm_area(area, prot, pages)) { 1644 vunmap(area->addr); 1645 return NULL; 1646 } 1647 1648 return area->addr; 1649 } 1650 EXPORT_SYMBOL(vmap); 1651 1652 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 1653 pgprot_t prot, int node) 1654 { 1655 struct page **pages; 1656 unsigned int nr_pages, array_size, i; 1657 const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; 1658 const gfp_t alloc_mask = gfp_mask | __GFP_HIGHMEM | __GFP_NOWARN; 1659 1660 nr_pages = get_vm_area_size(area) >> PAGE_SHIFT; 1661 array_size = (nr_pages * sizeof(struct page *)); 1662 1663 area->nr_pages = nr_pages; 1664 /* Please note that the recursion is strictly bounded. */ 1665 if (array_size > PAGE_SIZE) { 1666 pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM, 1667 PAGE_KERNEL, node, area->caller); 1668 } else { 1669 pages = kmalloc_node(array_size, nested_gfp, node); 1670 } 1671 area->pages = pages; 1672 if (!area->pages) { 1673 remove_vm_area(area->addr); 1674 kfree(area); 1675 return NULL; 1676 } 1677 1678 for (i = 0; i < area->nr_pages; i++) { 1679 struct page *page; 1680 1681 if (fatal_signal_pending(current)) { 1682 area->nr_pages = i; 1683 goto fail_no_warn; 1684 } 1685 1686 if (node == NUMA_NO_NODE) 1687 page = alloc_page(alloc_mask); 1688 else 1689 page = alloc_pages_node(node, alloc_mask, 0); 1690 1691 if (unlikely(!page)) { 1692 /* Successfully allocated i pages, free them in __vunmap() */ 1693 area->nr_pages = i; 1694 goto fail; 1695 } 1696 area->pages[i] = page; 1697 if (gfpflags_allow_blocking(gfp_mask)) 1698 cond_resched(); 1699 } 1700 1701 if (map_vm_area(area, prot, pages)) 1702 goto fail; 1703 return area->addr; 1704 1705 fail: 1706 warn_alloc(gfp_mask, NULL, 1707 "vmalloc: allocation failure, allocated %ld of %ld bytes", 1708 (area->nr_pages*PAGE_SIZE), area->size); 1709 fail_no_warn: 1710 vfree(area->addr); 1711 return NULL; 1712 } 1713 1714 /** 1715 * __vmalloc_node_range - allocate virtually contiguous memory 1716 * @size: allocation size 1717 * @align: desired alignment 1718 * @start: vm area range start 1719 * @end: vm area range end 1720 * @gfp_mask: flags for the page level allocator 1721 * @prot: protection mask for the allocated pages 1722 * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD) 1723 * @node: node to use for allocation or NUMA_NO_NODE 1724 * @caller: caller's return address 1725 * 1726 * Allocate enough pages to cover @size from the page level 1727 * allocator with @gfp_mask flags. Map them into contiguous 1728 * kernel virtual space, using a pagetable protection of @prot. 1729 */ 1730 void *__vmalloc_node_range(unsigned long size, unsigned long align, 1731 unsigned long start, unsigned long end, gfp_t gfp_mask, 1732 pgprot_t prot, unsigned long vm_flags, int node, 1733 const void *caller) 1734 { 1735 struct vm_struct *area; 1736 void *addr; 1737 unsigned long real_size = size; 1738 1739 size = PAGE_ALIGN(size); 1740 if (!size || (size >> PAGE_SHIFT) > totalram_pages) 1741 goto fail; 1742 1743 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | 1744 vm_flags, start, end, node, gfp_mask, caller); 1745 if (!area) 1746 goto fail; 1747 1748 addr = __vmalloc_area_node(area, gfp_mask, prot, node); 1749 if (!addr) 1750 return NULL; 1751 1752 /* 1753 * In this function, newly allocated vm_struct has VM_UNINITIALIZED 1754 * flag. It means that vm_struct is not fully initialized. 1755 * Now, it is fully initialized, so remove this flag here. 1756 */ 1757 clear_vm_uninitialized_flag(area); 1758 1759 /* 1760 * A ref_count = 2 is needed because vm_struct allocated in 1761 * __get_vm_area_node() contains a reference to the virtual address of 1762 * the vmalloc'ed block. 1763 */ 1764 kmemleak_alloc(addr, real_size, 2, gfp_mask); 1765 1766 return addr; 1767 1768 fail: 1769 warn_alloc(gfp_mask, NULL, 1770 "vmalloc: allocation failure: %lu bytes", real_size); 1771 return NULL; 1772 } 1773 1774 /** 1775 * __vmalloc_node - allocate virtually contiguous memory 1776 * @size: allocation size 1777 * @align: desired alignment 1778 * @gfp_mask: flags for the page level allocator 1779 * @prot: protection mask for the allocated pages 1780 * @node: node to use for allocation or NUMA_NO_NODE 1781 * @caller: caller's return address 1782 * 1783 * Allocate enough pages to cover @size from the page level 1784 * allocator with @gfp_mask flags. Map them into contiguous 1785 * kernel virtual space, using a pagetable protection of @prot. 1786 * 1787 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_REPEAT 1788 * and __GFP_NOFAIL are not supported 1789 * 1790 * Any use of gfp flags outside of GFP_KERNEL should be consulted 1791 * with mm people. 1792 * 1793 */ 1794 void *__vmalloc_node(unsigned long size, unsigned long align, 1795 gfp_t gfp_mask, pgprot_t prot, 1796 int node, const void *caller) 1797 { 1798 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, 1799 gfp_mask, prot, 0, node, caller); 1800 } 1801 1802 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 1803 { 1804 return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE, 1805 __builtin_return_address(0)); 1806 } 1807 EXPORT_SYMBOL(__vmalloc); 1808 1809 /** 1810 * vmalloc - allocate virtually contiguous memory 1811 * @size: allocation size 1812 * Allocate enough pages to cover @size from the page level 1813 * allocator and map them into contiguous kernel virtual space. 1814 * 1815 * For tight control over page level allocator and protection flags 1816 * use __vmalloc() instead. 1817 */ 1818 void *vmalloc(unsigned long size) 1819 { 1820 return __vmalloc_node_flags(size, NUMA_NO_NODE, 1821 GFP_KERNEL); 1822 } 1823 EXPORT_SYMBOL(vmalloc); 1824 1825 /** 1826 * vzalloc - allocate virtually contiguous memory with zero fill 1827 * @size: allocation size 1828 * Allocate enough pages to cover @size from the page level 1829 * allocator and map them into contiguous kernel virtual space. 1830 * The memory allocated is set to zero. 1831 * 1832 * For tight control over page level allocator and protection flags 1833 * use __vmalloc() instead. 1834 */ 1835 void *vzalloc(unsigned long size) 1836 { 1837 return __vmalloc_node_flags(size, NUMA_NO_NODE, 1838 GFP_KERNEL | __GFP_ZERO); 1839 } 1840 EXPORT_SYMBOL(vzalloc); 1841 1842 /** 1843 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace 1844 * @size: allocation size 1845 * 1846 * The resulting memory area is zeroed so it can be mapped to userspace 1847 * without leaking data. 1848 */ 1849 void *vmalloc_user(unsigned long size) 1850 { 1851 struct vm_struct *area; 1852 void *ret; 1853 1854 ret = __vmalloc_node(size, SHMLBA, 1855 GFP_KERNEL | __GFP_ZERO, 1856 PAGE_KERNEL, NUMA_NO_NODE, 1857 __builtin_return_address(0)); 1858 if (ret) { 1859 area = find_vm_area(ret); 1860 area->flags |= VM_USERMAP; 1861 } 1862 return ret; 1863 } 1864 EXPORT_SYMBOL(vmalloc_user); 1865 1866 /** 1867 * vmalloc_node - allocate memory on a specific node 1868 * @size: allocation size 1869 * @node: numa node 1870 * 1871 * Allocate enough pages to cover @size from the page level 1872 * allocator and map them into contiguous kernel virtual space. 1873 * 1874 * For tight control over page level allocator and protection flags 1875 * use __vmalloc() instead. 1876 */ 1877 void *vmalloc_node(unsigned long size, int node) 1878 { 1879 return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL, 1880 node, __builtin_return_address(0)); 1881 } 1882 EXPORT_SYMBOL(vmalloc_node); 1883 1884 /** 1885 * vzalloc_node - allocate memory on a specific node with zero fill 1886 * @size: allocation size 1887 * @node: numa node 1888 * 1889 * Allocate enough pages to cover @size from the page level 1890 * allocator and map them into contiguous kernel virtual space. 1891 * The memory allocated is set to zero. 1892 * 1893 * For tight control over page level allocator and protection flags 1894 * use __vmalloc_node() instead. 1895 */ 1896 void *vzalloc_node(unsigned long size, int node) 1897 { 1898 return __vmalloc_node_flags(size, node, 1899 GFP_KERNEL | __GFP_ZERO); 1900 } 1901 EXPORT_SYMBOL(vzalloc_node); 1902 1903 #ifndef PAGE_KERNEL_EXEC 1904 # define PAGE_KERNEL_EXEC PAGE_KERNEL 1905 #endif 1906 1907 /** 1908 * vmalloc_exec - allocate virtually contiguous, executable memory 1909 * @size: allocation size 1910 * 1911 * Kernel-internal function to allocate enough pages to cover @size 1912 * the page level allocator and map them into contiguous and 1913 * executable kernel virtual space. 1914 * 1915 * For tight control over page level allocator and protection flags 1916 * use __vmalloc() instead. 1917 */ 1918 1919 void *vmalloc_exec(unsigned long size) 1920 { 1921 return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL_EXEC, 1922 NUMA_NO_NODE, __builtin_return_address(0)); 1923 } 1924 1925 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) 1926 #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL 1927 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) 1928 #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL 1929 #else 1930 #define GFP_VMALLOC32 GFP_KERNEL 1931 #endif 1932 1933 /** 1934 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 1935 * @size: allocation size 1936 * 1937 * Allocate enough 32bit PA addressable pages to cover @size from the 1938 * page level allocator and map them into contiguous kernel virtual space. 1939 */ 1940 void *vmalloc_32(unsigned long size) 1941 { 1942 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL, 1943 NUMA_NO_NODE, __builtin_return_address(0)); 1944 } 1945 EXPORT_SYMBOL(vmalloc_32); 1946 1947 /** 1948 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory 1949 * @size: allocation size 1950 * 1951 * The resulting memory area is 32bit addressable and zeroed so it can be 1952 * mapped to userspace without leaking data. 1953 */ 1954 void *vmalloc_32_user(unsigned long size) 1955 { 1956 struct vm_struct *area; 1957 void *ret; 1958 1959 ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, 1960 NUMA_NO_NODE, __builtin_return_address(0)); 1961 if (ret) { 1962 area = find_vm_area(ret); 1963 area->flags |= VM_USERMAP; 1964 } 1965 return ret; 1966 } 1967 EXPORT_SYMBOL(vmalloc_32_user); 1968 1969 /* 1970 * small helper routine , copy contents to buf from addr. 1971 * If the page is not present, fill zero. 1972 */ 1973 1974 static int aligned_vread(char *buf, char *addr, unsigned long count) 1975 { 1976 struct page *p; 1977 int copied = 0; 1978 1979 while (count) { 1980 unsigned long offset, length; 1981 1982 offset = offset_in_page(addr); 1983 length = PAGE_SIZE - offset; 1984 if (length > count) 1985 length = count; 1986 p = vmalloc_to_page(addr); 1987 /* 1988 * To do safe access to this _mapped_ area, we need 1989 * lock. But adding lock here means that we need to add 1990 * overhead of vmalloc()/vfree() calles for this _debug_ 1991 * interface, rarely used. Instead of that, we'll use 1992 * kmap() and get small overhead in this access function. 1993 */ 1994 if (p) { 1995 /* 1996 * we can expect USER0 is not used (see vread/vwrite's 1997 * function description) 1998 */ 1999 void *map = kmap_atomic(p); 2000 memcpy(buf, map + offset, length); 2001 kunmap_atomic(map); 2002 } else 2003 memset(buf, 0, length); 2004 2005 addr += length; 2006 buf += length; 2007 copied += length; 2008 count -= length; 2009 } 2010 return copied; 2011 } 2012 2013 static int aligned_vwrite(char *buf, char *addr, unsigned long count) 2014 { 2015 struct page *p; 2016 int copied = 0; 2017 2018 while (count) { 2019 unsigned long offset, length; 2020 2021 offset = offset_in_page(addr); 2022 length = PAGE_SIZE - offset; 2023 if (length > count) 2024 length = count; 2025 p = vmalloc_to_page(addr); 2026 /* 2027 * To do safe access to this _mapped_ area, we need 2028 * lock. But adding lock here means that we need to add 2029 * overhead of vmalloc()/vfree() calles for this _debug_ 2030 * interface, rarely used. Instead of that, we'll use 2031 * kmap() and get small overhead in this access function. 2032 */ 2033 if (p) { 2034 /* 2035 * we can expect USER0 is not used (see vread/vwrite's 2036 * function description) 2037 */ 2038 void *map = kmap_atomic(p); 2039 memcpy(map + offset, buf, length); 2040 kunmap_atomic(map); 2041 } 2042 addr += length; 2043 buf += length; 2044 copied += length; 2045 count -= length; 2046 } 2047 return copied; 2048 } 2049 2050 /** 2051 * vread() - read vmalloc area in a safe way. 2052 * @buf: buffer for reading data 2053 * @addr: vm address. 2054 * @count: number of bytes to be read. 2055 * 2056 * Returns # of bytes which addr and buf should be increased. 2057 * (same number to @count). Returns 0 if [addr...addr+count) doesn't 2058 * includes any intersect with alive vmalloc area. 2059 * 2060 * This function checks that addr is a valid vmalloc'ed area, and 2061 * copy data from that area to a given buffer. If the given memory range 2062 * of [addr...addr+count) includes some valid address, data is copied to 2063 * proper area of @buf. If there are memory holes, they'll be zero-filled. 2064 * IOREMAP area is treated as memory hole and no copy is done. 2065 * 2066 * If [addr...addr+count) doesn't includes any intersects with alive 2067 * vm_struct area, returns 0. @buf should be kernel's buffer. 2068 * 2069 * Note: In usual ops, vread() is never necessary because the caller 2070 * should know vmalloc() area is valid and can use memcpy(). 2071 * This is for routines which have to access vmalloc area without 2072 * any informaion, as /dev/kmem. 2073 * 2074 */ 2075 2076 long vread(char *buf, char *addr, unsigned long count) 2077 { 2078 struct vmap_area *va; 2079 struct vm_struct *vm; 2080 char *vaddr, *buf_start = buf; 2081 unsigned long buflen = count; 2082 unsigned long n; 2083 2084 /* Don't allow overflow */ 2085 if ((unsigned long) addr + count < count) 2086 count = -(unsigned long) addr; 2087 2088 spin_lock(&vmap_area_lock); 2089 list_for_each_entry(va, &vmap_area_list, list) { 2090 if (!count) 2091 break; 2092 2093 if (!(va->flags & VM_VM_AREA)) 2094 continue; 2095 2096 vm = va->vm; 2097 vaddr = (char *) vm->addr; 2098 if (addr >= vaddr + get_vm_area_size(vm)) 2099 continue; 2100 while (addr < vaddr) { 2101 if (count == 0) 2102 goto finished; 2103 *buf = '\0'; 2104 buf++; 2105 addr++; 2106 count--; 2107 } 2108 n = vaddr + get_vm_area_size(vm) - addr; 2109 if (n > count) 2110 n = count; 2111 if (!(vm->flags & VM_IOREMAP)) 2112 aligned_vread(buf, addr, n); 2113 else /* IOREMAP area is treated as memory hole */ 2114 memset(buf, 0, n); 2115 buf += n; 2116 addr += n; 2117 count -= n; 2118 } 2119 finished: 2120 spin_unlock(&vmap_area_lock); 2121 2122 if (buf == buf_start) 2123 return 0; 2124 /* zero-fill memory holes */ 2125 if (buf != buf_start + buflen) 2126 memset(buf, 0, buflen - (buf - buf_start)); 2127 2128 return buflen; 2129 } 2130 2131 /** 2132 * vwrite() - write vmalloc area in a safe way. 2133 * @buf: buffer for source data 2134 * @addr: vm address. 2135 * @count: number of bytes to be read. 2136 * 2137 * Returns # of bytes which addr and buf should be incresed. 2138 * (same number to @count). 2139 * If [addr...addr+count) doesn't includes any intersect with valid 2140 * vmalloc area, returns 0. 2141 * 2142 * This function checks that addr is a valid vmalloc'ed area, and 2143 * copy data from a buffer to the given addr. If specified range of 2144 * [addr...addr+count) includes some valid address, data is copied from 2145 * proper area of @buf. If there are memory holes, no copy to hole. 2146 * IOREMAP area is treated as memory hole and no copy is done. 2147 * 2148 * If [addr...addr+count) doesn't includes any intersects with alive 2149 * vm_struct area, returns 0. @buf should be kernel's buffer. 2150 * 2151 * Note: In usual ops, vwrite() is never necessary because the caller 2152 * should know vmalloc() area is valid and can use memcpy(). 2153 * This is for routines which have to access vmalloc area without 2154 * any informaion, as /dev/kmem. 2155 */ 2156 2157 long vwrite(char *buf, char *addr, unsigned long count) 2158 { 2159 struct vmap_area *va; 2160 struct vm_struct *vm; 2161 char *vaddr; 2162 unsigned long n, buflen; 2163 int copied = 0; 2164 2165 /* Don't allow overflow */ 2166 if ((unsigned long) addr + count < count) 2167 count = -(unsigned long) addr; 2168 buflen = count; 2169 2170 spin_lock(&vmap_area_lock); 2171 list_for_each_entry(va, &vmap_area_list, list) { 2172 if (!count) 2173 break; 2174 2175 if (!(va->flags & VM_VM_AREA)) 2176 continue; 2177 2178 vm = va->vm; 2179 vaddr = (char *) vm->addr; 2180 if (addr >= vaddr + get_vm_area_size(vm)) 2181 continue; 2182 while (addr < vaddr) { 2183 if (count == 0) 2184 goto finished; 2185 buf++; 2186 addr++; 2187 count--; 2188 } 2189 n = vaddr + get_vm_area_size(vm) - addr; 2190 if (n > count) 2191 n = count; 2192 if (!(vm->flags & VM_IOREMAP)) { 2193 aligned_vwrite(buf, addr, n); 2194 copied++; 2195 } 2196 buf += n; 2197 addr += n; 2198 count -= n; 2199 } 2200 finished: 2201 spin_unlock(&vmap_area_lock); 2202 if (!copied) 2203 return 0; 2204 return buflen; 2205 } 2206 2207 /** 2208 * remap_vmalloc_range_partial - map vmalloc pages to userspace 2209 * @vma: vma to cover 2210 * @uaddr: target user address to start at 2211 * @kaddr: virtual address of vmalloc kernel memory 2212 * @size: size of map area 2213 * 2214 * Returns: 0 for success, -Exxx on failure 2215 * 2216 * This function checks that @kaddr is a valid vmalloc'ed area, 2217 * and that it is big enough to cover the range starting at 2218 * @uaddr in @vma. Will return failure if that criteria isn't 2219 * met. 2220 * 2221 * Similar to remap_pfn_range() (see mm/memory.c) 2222 */ 2223 int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, 2224 void *kaddr, unsigned long size) 2225 { 2226 struct vm_struct *area; 2227 2228 size = PAGE_ALIGN(size); 2229 2230 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr)) 2231 return -EINVAL; 2232 2233 area = find_vm_area(kaddr); 2234 if (!area) 2235 return -EINVAL; 2236 2237 if (!(area->flags & VM_USERMAP)) 2238 return -EINVAL; 2239 2240 if (kaddr + size > area->addr + area->size) 2241 return -EINVAL; 2242 2243 do { 2244 struct page *page = vmalloc_to_page(kaddr); 2245 int ret; 2246 2247 ret = vm_insert_page(vma, uaddr, page); 2248 if (ret) 2249 return ret; 2250 2251 uaddr += PAGE_SIZE; 2252 kaddr += PAGE_SIZE; 2253 size -= PAGE_SIZE; 2254 } while (size > 0); 2255 2256 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 2257 2258 return 0; 2259 } 2260 EXPORT_SYMBOL(remap_vmalloc_range_partial); 2261 2262 /** 2263 * remap_vmalloc_range - map vmalloc pages to userspace 2264 * @vma: vma to cover (map full range of vma) 2265 * @addr: vmalloc memory 2266 * @pgoff: number of pages into addr before first page to map 2267 * 2268 * Returns: 0 for success, -Exxx on failure 2269 * 2270 * This function checks that addr is a valid vmalloc'ed area, and 2271 * that it is big enough to cover the vma. Will return failure if 2272 * that criteria isn't met. 2273 * 2274 * Similar to remap_pfn_range() (see mm/memory.c) 2275 */ 2276 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 2277 unsigned long pgoff) 2278 { 2279 return remap_vmalloc_range_partial(vma, vma->vm_start, 2280 addr + (pgoff << PAGE_SHIFT), 2281 vma->vm_end - vma->vm_start); 2282 } 2283 EXPORT_SYMBOL(remap_vmalloc_range); 2284 2285 /* 2286 * Implement a stub for vmalloc_sync_all() if the architecture chose not to 2287 * have one. 2288 */ 2289 void __weak vmalloc_sync_all(void) 2290 { 2291 } 2292 2293 2294 static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) 2295 { 2296 pte_t ***p = data; 2297 2298 if (p) { 2299 *(*p) = pte; 2300 (*p)++; 2301 } 2302 return 0; 2303 } 2304 2305 /** 2306 * alloc_vm_area - allocate a range of kernel address space 2307 * @size: size of the area 2308 * @ptes: returns the PTEs for the address space 2309 * 2310 * Returns: NULL on failure, vm_struct on success 2311 * 2312 * This function reserves a range of kernel address space, and 2313 * allocates pagetables to map that range. No actual mappings 2314 * are created. 2315 * 2316 * If @ptes is non-NULL, pointers to the PTEs (in init_mm) 2317 * allocated for the VM area are returned. 2318 */ 2319 struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes) 2320 { 2321 struct vm_struct *area; 2322 2323 area = get_vm_area_caller(size, VM_IOREMAP, 2324 __builtin_return_address(0)); 2325 if (area == NULL) 2326 return NULL; 2327 2328 /* 2329 * This ensures that page tables are constructed for this region 2330 * of kernel virtual address space and mapped into init_mm. 2331 */ 2332 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, 2333 size, f, ptes ? &ptes : NULL)) { 2334 free_vm_area(area); 2335 return NULL; 2336 } 2337 2338 return area; 2339 } 2340 EXPORT_SYMBOL_GPL(alloc_vm_area); 2341 2342 void free_vm_area(struct vm_struct *area) 2343 { 2344 struct vm_struct *ret; 2345 ret = remove_vm_area(area->addr); 2346 BUG_ON(ret != area); 2347 kfree(area); 2348 } 2349 EXPORT_SYMBOL_GPL(free_vm_area); 2350 2351 #ifdef CONFIG_SMP 2352 static struct vmap_area *node_to_va(struct rb_node *n) 2353 { 2354 return rb_entry_safe(n, struct vmap_area, rb_node); 2355 } 2356 2357 /** 2358 * pvm_find_next_prev - find the next and prev vmap_area surrounding @end 2359 * @end: target address 2360 * @pnext: out arg for the next vmap_area 2361 * @pprev: out arg for the previous vmap_area 2362 * 2363 * Returns: %true if either or both of next and prev are found, 2364 * %false if no vmap_area exists 2365 * 2366 * Find vmap_areas end addresses of which enclose @end. ie. if not 2367 * NULL, *pnext->va_end > @end and *pprev->va_end <= @end. 2368 */ 2369 static bool pvm_find_next_prev(unsigned long end, 2370 struct vmap_area **pnext, 2371 struct vmap_area **pprev) 2372 { 2373 struct rb_node *n = vmap_area_root.rb_node; 2374 struct vmap_area *va = NULL; 2375 2376 while (n) { 2377 va = rb_entry(n, struct vmap_area, rb_node); 2378 if (end < va->va_end) 2379 n = n->rb_left; 2380 else if (end > va->va_end) 2381 n = n->rb_right; 2382 else 2383 break; 2384 } 2385 2386 if (!va) 2387 return false; 2388 2389 if (va->va_end > end) { 2390 *pnext = va; 2391 *pprev = node_to_va(rb_prev(&(*pnext)->rb_node)); 2392 } else { 2393 *pprev = va; 2394 *pnext = node_to_va(rb_next(&(*pprev)->rb_node)); 2395 } 2396 return true; 2397 } 2398 2399 /** 2400 * pvm_determine_end - find the highest aligned address between two vmap_areas 2401 * @pnext: in/out arg for the next vmap_area 2402 * @pprev: in/out arg for the previous vmap_area 2403 * @align: alignment 2404 * 2405 * Returns: determined end address 2406 * 2407 * Find the highest aligned address between *@pnext and *@pprev below 2408 * VMALLOC_END. *@pnext and *@pprev are adjusted so that the aligned 2409 * down address is between the end addresses of the two vmap_areas. 2410 * 2411 * Please note that the address returned by this function may fall 2412 * inside *@pnext vmap_area. The caller is responsible for checking 2413 * that. 2414 */ 2415 static unsigned long pvm_determine_end(struct vmap_area **pnext, 2416 struct vmap_area **pprev, 2417 unsigned long align) 2418 { 2419 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 2420 unsigned long addr; 2421 2422 if (*pnext) 2423 addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end); 2424 else 2425 addr = vmalloc_end; 2426 2427 while (*pprev && (*pprev)->va_end > addr) { 2428 *pnext = *pprev; 2429 *pprev = node_to_va(rb_prev(&(*pnext)->rb_node)); 2430 } 2431 2432 return addr; 2433 } 2434 2435 /** 2436 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator 2437 * @offsets: array containing offset of each area 2438 * @sizes: array containing size of each area 2439 * @nr_vms: the number of areas to allocate 2440 * @align: alignment, all entries in @offsets and @sizes must be aligned to this 2441 * 2442 * Returns: kmalloc'd vm_struct pointer array pointing to allocated 2443 * vm_structs on success, %NULL on failure 2444 * 2445 * Percpu allocator wants to use congruent vm areas so that it can 2446 * maintain the offsets among percpu areas. This function allocates 2447 * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to 2448 * be scattered pretty far, distance between two areas easily going up 2449 * to gigabytes. To avoid interacting with regular vmallocs, these 2450 * areas are allocated from top. 2451 * 2452 * Despite its complicated look, this allocator is rather simple. It 2453 * does everything top-down and scans areas from the end looking for 2454 * matching slot. While scanning, if any of the areas overlaps with 2455 * existing vmap_area, the base address is pulled down to fit the 2456 * area. Scanning is repeated till all the areas fit and then all 2457 * necessary data structres are inserted and the result is returned. 2458 */ 2459 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 2460 const size_t *sizes, int nr_vms, 2461 size_t align) 2462 { 2463 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); 2464 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 2465 struct vmap_area **vas, *prev, *next; 2466 struct vm_struct **vms; 2467 int area, area2, last_area, term_area; 2468 unsigned long base, start, end, last_end; 2469 bool purged = false; 2470 2471 /* verify parameters and allocate data structures */ 2472 BUG_ON(offset_in_page(align) || !is_power_of_2(align)); 2473 for (last_area = 0, area = 0; area < nr_vms; area++) { 2474 start = offsets[area]; 2475 end = start + sizes[area]; 2476 2477 /* is everything aligned properly? */ 2478 BUG_ON(!IS_ALIGNED(offsets[area], align)); 2479 BUG_ON(!IS_ALIGNED(sizes[area], align)); 2480 2481 /* detect the area with the highest address */ 2482 if (start > offsets[last_area]) 2483 last_area = area; 2484 2485 for (area2 = 0; area2 < nr_vms; area2++) { 2486 unsigned long start2 = offsets[area2]; 2487 unsigned long end2 = start2 + sizes[area2]; 2488 2489 if (area2 == area) 2490 continue; 2491 2492 BUG_ON(start2 >= start && start2 < end); 2493 BUG_ON(end2 <= end && end2 > start); 2494 } 2495 } 2496 last_end = offsets[last_area] + sizes[last_area]; 2497 2498 if (vmalloc_end - vmalloc_start < last_end) { 2499 WARN_ON(true); 2500 return NULL; 2501 } 2502 2503 vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL); 2504 vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL); 2505 if (!vas || !vms) 2506 goto err_free2; 2507 2508 for (area = 0; area < nr_vms; area++) { 2509 vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL); 2510 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); 2511 if (!vas[area] || !vms[area]) 2512 goto err_free; 2513 } 2514 retry: 2515 spin_lock(&vmap_area_lock); 2516 2517 /* start scanning - we scan from the top, begin with the last area */ 2518 area = term_area = last_area; 2519 start = offsets[area]; 2520 end = start + sizes[area]; 2521 2522 if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) { 2523 base = vmalloc_end - last_end; 2524 goto found; 2525 } 2526 base = pvm_determine_end(&next, &prev, align) - end; 2527 2528 while (true) { 2529 BUG_ON(next && next->va_end <= base + end); 2530 BUG_ON(prev && prev->va_end > base + end); 2531 2532 /* 2533 * base might have underflowed, add last_end before 2534 * comparing. 2535 */ 2536 if (base + last_end < vmalloc_start + last_end) { 2537 spin_unlock(&vmap_area_lock); 2538 if (!purged) { 2539 purge_vmap_area_lazy(); 2540 purged = true; 2541 goto retry; 2542 } 2543 goto err_free; 2544 } 2545 2546 /* 2547 * If next overlaps, move base downwards so that it's 2548 * right below next and then recheck. 2549 */ 2550 if (next && next->va_start < base + end) { 2551 base = pvm_determine_end(&next, &prev, align) - end; 2552 term_area = area; 2553 continue; 2554 } 2555 2556 /* 2557 * If prev overlaps, shift down next and prev and move 2558 * base so that it's right below new next and then 2559 * recheck. 2560 */ 2561 if (prev && prev->va_end > base + start) { 2562 next = prev; 2563 prev = node_to_va(rb_prev(&next->rb_node)); 2564 base = pvm_determine_end(&next, &prev, align) - end; 2565 term_area = area; 2566 continue; 2567 } 2568 2569 /* 2570 * This area fits, move on to the previous one. If 2571 * the previous one is the terminal one, we're done. 2572 */ 2573 area = (area + nr_vms - 1) % nr_vms; 2574 if (area == term_area) 2575 break; 2576 start = offsets[area]; 2577 end = start + sizes[area]; 2578 pvm_find_next_prev(base + end, &next, &prev); 2579 } 2580 found: 2581 /* we've found a fitting base, insert all va's */ 2582 for (area = 0; area < nr_vms; area++) { 2583 struct vmap_area *va = vas[area]; 2584 2585 va->va_start = base + offsets[area]; 2586 va->va_end = va->va_start + sizes[area]; 2587 __insert_vmap_area(va); 2588 } 2589 2590 vmap_area_pcpu_hole = base + offsets[last_area]; 2591 2592 spin_unlock(&vmap_area_lock); 2593 2594 /* insert all vm's */ 2595 for (area = 0; area < nr_vms; area++) 2596 setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC, 2597 pcpu_get_vm_areas); 2598 2599 kfree(vas); 2600 return vms; 2601 2602 err_free: 2603 for (area = 0; area < nr_vms; area++) { 2604 kfree(vas[area]); 2605 kfree(vms[area]); 2606 } 2607 err_free2: 2608 kfree(vas); 2609 kfree(vms); 2610 return NULL; 2611 } 2612 2613 /** 2614 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator 2615 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas() 2616 * @nr_vms: the number of allocated areas 2617 * 2618 * Free vm_structs and the array allocated by pcpu_get_vm_areas(). 2619 */ 2620 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) 2621 { 2622 int i; 2623 2624 for (i = 0; i < nr_vms; i++) 2625 free_vm_area(vms[i]); 2626 kfree(vms); 2627 } 2628 #endif /* CONFIG_SMP */ 2629 2630 #ifdef CONFIG_PROC_FS 2631 static void *s_start(struct seq_file *m, loff_t *pos) 2632 __acquires(&vmap_area_lock) 2633 { 2634 spin_lock(&vmap_area_lock); 2635 return seq_list_start(&vmap_area_list, *pos); 2636 } 2637 2638 static void *s_next(struct seq_file *m, void *p, loff_t *pos) 2639 { 2640 return seq_list_next(p, &vmap_area_list, pos); 2641 } 2642 2643 static void s_stop(struct seq_file *m, void *p) 2644 __releases(&vmap_area_lock) 2645 { 2646 spin_unlock(&vmap_area_lock); 2647 } 2648 2649 static void show_numa_info(struct seq_file *m, struct vm_struct *v) 2650 { 2651 if (IS_ENABLED(CONFIG_NUMA)) { 2652 unsigned int nr, *counters = m->private; 2653 2654 if (!counters) 2655 return; 2656 2657 if (v->flags & VM_UNINITIALIZED) 2658 return; 2659 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */ 2660 smp_rmb(); 2661 2662 memset(counters, 0, nr_node_ids * sizeof(unsigned int)); 2663 2664 for (nr = 0; nr < v->nr_pages; nr++) 2665 counters[page_to_nid(v->pages[nr])]++; 2666 2667 for_each_node_state(nr, N_HIGH_MEMORY) 2668 if (counters[nr]) 2669 seq_printf(m, " N%u=%u", nr, counters[nr]); 2670 } 2671 } 2672 2673 static int s_show(struct seq_file *m, void *p) 2674 { 2675 struct vmap_area *va; 2676 struct vm_struct *v; 2677 2678 va = list_entry(p, struct vmap_area, list); 2679 2680 /* 2681 * s_show can encounter race with remove_vm_area, !VM_VM_AREA on 2682 * behalf of vmap area is being tear down or vm_map_ram allocation. 2683 */ 2684 if (!(va->flags & VM_VM_AREA)) 2685 return 0; 2686 2687 v = va->vm; 2688 2689 seq_printf(m, "0x%pK-0x%pK %7ld", 2690 v->addr, v->addr + v->size, v->size); 2691 2692 if (v->caller) 2693 seq_printf(m, " %pS", v->caller); 2694 2695 if (v->nr_pages) 2696 seq_printf(m, " pages=%d", v->nr_pages); 2697 2698 if (v->phys_addr) 2699 seq_printf(m, " phys=%pa", &v->phys_addr); 2700 2701 if (v->flags & VM_IOREMAP) 2702 seq_puts(m, " ioremap"); 2703 2704 if (v->flags & VM_ALLOC) 2705 seq_puts(m, " vmalloc"); 2706 2707 if (v->flags & VM_MAP) 2708 seq_puts(m, " vmap"); 2709 2710 if (v->flags & VM_USERMAP) 2711 seq_puts(m, " user"); 2712 2713 if (is_vmalloc_addr(v->pages)) 2714 seq_puts(m, " vpages"); 2715 2716 show_numa_info(m, v); 2717 seq_putc(m, '\n'); 2718 return 0; 2719 } 2720 2721 static const struct seq_operations vmalloc_op = { 2722 .start = s_start, 2723 .next = s_next, 2724 .stop = s_stop, 2725 .show = s_show, 2726 }; 2727 2728 static int vmalloc_open(struct inode *inode, struct file *file) 2729 { 2730 if (IS_ENABLED(CONFIG_NUMA)) 2731 return seq_open_private(file, &vmalloc_op, 2732 nr_node_ids * sizeof(unsigned int)); 2733 else 2734 return seq_open(file, &vmalloc_op); 2735 } 2736 2737 static const struct file_operations proc_vmalloc_operations = { 2738 .open = vmalloc_open, 2739 .read = seq_read, 2740 .llseek = seq_lseek, 2741 .release = seq_release_private, 2742 }; 2743 2744 static int __init proc_vmalloc_init(void) 2745 { 2746 proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations); 2747 return 0; 2748 } 2749 module_init(proc_vmalloc_init); 2750 2751 #endif 2752 2753