1 /* 2 * linux/mm/vmalloc.c 3 * 4 * Copyright (C) 1993 Linus Torvalds 5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 8 * Numa awareness, Christoph Lameter, SGI, June 2005 9 */ 10 11 #include <linux/vmalloc.h> 12 #include <linux/mm.h> 13 #include <linux/module.h> 14 #include <linux/highmem.h> 15 #include <linux/sched.h> 16 #include <linux/slab.h> 17 #include <linux/spinlock.h> 18 #include <linux/interrupt.h> 19 #include <linux/proc_fs.h> 20 #include <linux/seq_file.h> 21 #include <linux/debugobjects.h> 22 #include <linux/kallsyms.h> 23 #include <linux/list.h> 24 #include <linux/rbtree.h> 25 #include <linux/radix-tree.h> 26 #include <linux/rcupdate.h> 27 #include <linux/pfn.h> 28 #include <linux/kmemleak.h> 29 #include <linux/atomic.h> 30 #include <linux/llist.h> 31 #include <asm/uaccess.h> 32 #include <asm/tlbflush.h> 33 #include <asm/shmparam.h> 34 35 struct vfree_deferred { 36 struct llist_head list; 37 struct work_struct wq; 38 }; 39 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred); 40 41 static void __vunmap(const void *, int); 42 43 static void free_work(struct work_struct *w) 44 { 45 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq); 46 struct llist_node *llnode = llist_del_all(&p->list); 47 while (llnode) { 48 void *p = llnode; 49 llnode = llist_next(llnode); 50 __vunmap(p, 1); 51 } 52 } 53 54 /*** Page table manipulation functions ***/ 55 56 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) 57 { 58 pte_t *pte; 59 60 pte = pte_offset_kernel(pmd, addr); 61 do { 62 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); 63 WARN_ON(!pte_none(ptent) && !pte_present(ptent)); 64 } while (pte++, addr += PAGE_SIZE, addr != end); 65 } 66 67 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) 68 { 69 pmd_t *pmd; 70 unsigned long next; 71 72 pmd = pmd_offset(pud, addr); 73 do { 74 next = pmd_addr_end(addr, end); 75 if (pmd_none_or_clear_bad(pmd)) 76 continue; 77 vunmap_pte_range(pmd, addr, next); 78 } while (pmd++, addr = next, addr != end); 79 } 80 81 static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end) 82 { 83 pud_t *pud; 84 unsigned long next; 85 86 pud = pud_offset(pgd, addr); 87 do { 88 next = pud_addr_end(addr, end); 89 if (pud_none_or_clear_bad(pud)) 90 continue; 91 vunmap_pmd_range(pud, addr, next); 92 } while (pud++, addr = next, addr != end); 93 } 94 95 static void vunmap_page_range(unsigned long addr, unsigned long end) 96 { 97 pgd_t *pgd; 98 unsigned long next; 99 100 BUG_ON(addr >= end); 101 pgd = pgd_offset_k(addr); 102 do { 103 next = pgd_addr_end(addr, end); 104 if (pgd_none_or_clear_bad(pgd)) 105 continue; 106 vunmap_pud_range(pgd, addr, next); 107 } while (pgd++, addr = next, addr != end); 108 } 109 110 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, 111 unsigned long end, pgprot_t prot, struct page **pages, int *nr) 112 { 113 pte_t *pte; 114 115 /* 116 * nr is a running index into the array which helps higher level 117 * callers keep track of where we're up to. 118 */ 119 120 pte = pte_alloc_kernel(pmd, addr); 121 if (!pte) 122 return -ENOMEM; 123 do { 124 struct page *page = pages[*nr]; 125 126 if (WARN_ON(!pte_none(*pte))) 127 return -EBUSY; 128 if (WARN_ON(!page)) 129 return -ENOMEM; 130 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); 131 (*nr)++; 132 } while (pte++, addr += PAGE_SIZE, addr != end); 133 return 0; 134 } 135 136 static int vmap_pmd_range(pud_t *pud, unsigned long addr, 137 unsigned long end, pgprot_t prot, struct page **pages, int *nr) 138 { 139 pmd_t *pmd; 140 unsigned long next; 141 142 pmd = pmd_alloc(&init_mm, pud, addr); 143 if (!pmd) 144 return -ENOMEM; 145 do { 146 next = pmd_addr_end(addr, end); 147 if (vmap_pte_range(pmd, addr, next, prot, pages, nr)) 148 return -ENOMEM; 149 } while (pmd++, addr = next, addr != end); 150 return 0; 151 } 152 153 static int vmap_pud_range(pgd_t *pgd, unsigned long addr, 154 unsigned long end, pgprot_t prot, struct page **pages, int *nr) 155 { 156 pud_t *pud; 157 unsigned long next; 158 159 pud = pud_alloc(&init_mm, pgd, addr); 160 if (!pud) 161 return -ENOMEM; 162 do { 163 next = pud_addr_end(addr, end); 164 if (vmap_pmd_range(pud, addr, next, prot, pages, nr)) 165 return -ENOMEM; 166 } while (pud++, addr = next, addr != end); 167 return 0; 168 } 169 170 /* 171 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and 172 * will have pfns corresponding to the "pages" array. 173 * 174 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] 175 */ 176 static int vmap_page_range_noflush(unsigned long start, unsigned long end, 177 pgprot_t prot, struct page **pages) 178 { 179 pgd_t *pgd; 180 unsigned long next; 181 unsigned long addr = start; 182 int err = 0; 183 int nr = 0; 184 185 BUG_ON(addr >= end); 186 pgd = pgd_offset_k(addr); 187 do { 188 next = pgd_addr_end(addr, end); 189 err = vmap_pud_range(pgd, addr, next, prot, pages, &nr); 190 if (err) 191 return err; 192 } while (pgd++, addr = next, addr != end); 193 194 return nr; 195 } 196 197 static int vmap_page_range(unsigned long start, unsigned long end, 198 pgprot_t prot, struct page **pages) 199 { 200 int ret; 201 202 ret = vmap_page_range_noflush(start, end, prot, pages); 203 flush_cache_vmap(start, end); 204 return ret; 205 } 206 207 int is_vmalloc_or_module_addr(const void *x) 208 { 209 /* 210 * ARM, x86-64 and sparc64 put modules in a special place, 211 * and fall back on vmalloc() if that fails. Others 212 * just put it in the vmalloc space. 213 */ 214 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR) 215 unsigned long addr = (unsigned long)x; 216 if (addr >= MODULES_VADDR && addr < MODULES_END) 217 return 1; 218 #endif 219 return is_vmalloc_addr(x); 220 } 221 222 /* 223 * Walk a vmap address to the struct page it maps. 224 */ 225 struct page *vmalloc_to_page(const void *vmalloc_addr) 226 { 227 unsigned long addr = (unsigned long) vmalloc_addr; 228 struct page *page = NULL; 229 pgd_t *pgd = pgd_offset_k(addr); 230 231 /* 232 * XXX we might need to change this if we add VIRTUAL_BUG_ON for 233 * architectures that do not vmalloc module space 234 */ 235 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); 236 237 if (!pgd_none(*pgd)) { 238 pud_t *pud = pud_offset(pgd, addr); 239 if (!pud_none(*pud)) { 240 pmd_t *pmd = pmd_offset(pud, addr); 241 if (!pmd_none(*pmd)) { 242 pte_t *ptep, pte; 243 244 ptep = pte_offset_map(pmd, addr); 245 pte = *ptep; 246 if (pte_present(pte)) 247 page = pte_page(pte); 248 pte_unmap(ptep); 249 } 250 } 251 } 252 return page; 253 } 254 EXPORT_SYMBOL(vmalloc_to_page); 255 256 /* 257 * Map a vmalloc()-space virtual address to the physical page frame number. 258 */ 259 unsigned long vmalloc_to_pfn(const void *vmalloc_addr) 260 { 261 return page_to_pfn(vmalloc_to_page(vmalloc_addr)); 262 } 263 EXPORT_SYMBOL(vmalloc_to_pfn); 264 265 266 /*** Global kva allocator ***/ 267 268 #define VM_LAZY_FREE 0x01 269 #define VM_LAZY_FREEING 0x02 270 #define VM_VM_AREA 0x04 271 272 static DEFINE_SPINLOCK(vmap_area_lock); 273 /* Export for kexec only */ 274 LIST_HEAD(vmap_area_list); 275 static struct rb_root vmap_area_root = RB_ROOT; 276 277 /* The vmap cache globals are protected by vmap_area_lock */ 278 static struct rb_node *free_vmap_cache; 279 static unsigned long cached_hole_size; 280 static unsigned long cached_vstart; 281 static unsigned long cached_align; 282 283 static unsigned long vmap_area_pcpu_hole; 284 285 static struct vmap_area *__find_vmap_area(unsigned long addr) 286 { 287 struct rb_node *n = vmap_area_root.rb_node; 288 289 while (n) { 290 struct vmap_area *va; 291 292 va = rb_entry(n, struct vmap_area, rb_node); 293 if (addr < va->va_start) 294 n = n->rb_left; 295 else if (addr >= va->va_end) 296 n = n->rb_right; 297 else 298 return va; 299 } 300 301 return NULL; 302 } 303 304 static void __insert_vmap_area(struct vmap_area *va) 305 { 306 struct rb_node **p = &vmap_area_root.rb_node; 307 struct rb_node *parent = NULL; 308 struct rb_node *tmp; 309 310 while (*p) { 311 struct vmap_area *tmp_va; 312 313 parent = *p; 314 tmp_va = rb_entry(parent, struct vmap_area, rb_node); 315 if (va->va_start < tmp_va->va_end) 316 p = &(*p)->rb_left; 317 else if (va->va_end > tmp_va->va_start) 318 p = &(*p)->rb_right; 319 else 320 BUG(); 321 } 322 323 rb_link_node(&va->rb_node, parent, p); 324 rb_insert_color(&va->rb_node, &vmap_area_root); 325 326 /* address-sort this list */ 327 tmp = rb_prev(&va->rb_node); 328 if (tmp) { 329 struct vmap_area *prev; 330 prev = rb_entry(tmp, struct vmap_area, rb_node); 331 list_add_rcu(&va->list, &prev->list); 332 } else 333 list_add_rcu(&va->list, &vmap_area_list); 334 } 335 336 static void purge_vmap_area_lazy(void); 337 338 /* 339 * Allocate a region of KVA of the specified size and alignment, within the 340 * vstart and vend. 341 */ 342 static struct vmap_area *alloc_vmap_area(unsigned long size, 343 unsigned long align, 344 unsigned long vstart, unsigned long vend, 345 int node, gfp_t gfp_mask) 346 { 347 struct vmap_area *va; 348 struct rb_node *n; 349 unsigned long addr; 350 int purged = 0; 351 struct vmap_area *first; 352 353 BUG_ON(!size); 354 BUG_ON(size & ~PAGE_MASK); 355 BUG_ON(!is_power_of_2(align)); 356 357 va = kmalloc_node(sizeof(struct vmap_area), 358 gfp_mask & GFP_RECLAIM_MASK, node); 359 if (unlikely(!va)) 360 return ERR_PTR(-ENOMEM); 361 362 /* 363 * Only scan the relevant parts containing pointers to other objects 364 * to avoid false negatives. 365 */ 366 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK); 367 368 retry: 369 spin_lock(&vmap_area_lock); 370 /* 371 * Invalidate cache if we have more permissive parameters. 372 * cached_hole_size notes the largest hole noticed _below_ 373 * the vmap_area cached in free_vmap_cache: if size fits 374 * into that hole, we want to scan from vstart to reuse 375 * the hole instead of allocating above free_vmap_cache. 376 * Note that __free_vmap_area may update free_vmap_cache 377 * without updating cached_hole_size or cached_align. 378 */ 379 if (!free_vmap_cache || 380 size < cached_hole_size || 381 vstart < cached_vstart || 382 align < cached_align) { 383 nocache: 384 cached_hole_size = 0; 385 free_vmap_cache = NULL; 386 } 387 /* record if we encounter less permissive parameters */ 388 cached_vstart = vstart; 389 cached_align = align; 390 391 /* find starting point for our search */ 392 if (free_vmap_cache) { 393 first = rb_entry(free_vmap_cache, struct vmap_area, rb_node); 394 addr = ALIGN(first->va_end, align); 395 if (addr < vstart) 396 goto nocache; 397 if (addr + size < addr) 398 goto overflow; 399 400 } else { 401 addr = ALIGN(vstart, align); 402 if (addr + size < addr) 403 goto overflow; 404 405 n = vmap_area_root.rb_node; 406 first = NULL; 407 408 while (n) { 409 struct vmap_area *tmp; 410 tmp = rb_entry(n, struct vmap_area, rb_node); 411 if (tmp->va_end >= addr) { 412 first = tmp; 413 if (tmp->va_start <= addr) 414 break; 415 n = n->rb_left; 416 } else 417 n = n->rb_right; 418 } 419 420 if (!first) 421 goto found; 422 } 423 424 /* from the starting point, walk areas until a suitable hole is found */ 425 while (addr + size > first->va_start && addr + size <= vend) { 426 if (addr + cached_hole_size < first->va_start) 427 cached_hole_size = first->va_start - addr; 428 addr = ALIGN(first->va_end, align); 429 if (addr + size < addr) 430 goto overflow; 431 432 if (list_is_last(&first->list, &vmap_area_list)) 433 goto found; 434 435 first = list_entry(first->list.next, 436 struct vmap_area, list); 437 } 438 439 found: 440 if (addr + size > vend) 441 goto overflow; 442 443 va->va_start = addr; 444 va->va_end = addr + size; 445 va->flags = 0; 446 __insert_vmap_area(va); 447 free_vmap_cache = &va->rb_node; 448 spin_unlock(&vmap_area_lock); 449 450 BUG_ON(va->va_start & (align-1)); 451 BUG_ON(va->va_start < vstart); 452 BUG_ON(va->va_end > vend); 453 454 return va; 455 456 overflow: 457 spin_unlock(&vmap_area_lock); 458 if (!purged) { 459 purge_vmap_area_lazy(); 460 purged = 1; 461 goto retry; 462 } 463 if (printk_ratelimit()) 464 printk(KERN_WARNING 465 "vmap allocation for size %lu failed: " 466 "use vmalloc=<size> to increase size.\n", size); 467 kfree(va); 468 return ERR_PTR(-EBUSY); 469 } 470 471 static void __free_vmap_area(struct vmap_area *va) 472 { 473 BUG_ON(RB_EMPTY_NODE(&va->rb_node)); 474 475 if (free_vmap_cache) { 476 if (va->va_end < cached_vstart) { 477 free_vmap_cache = NULL; 478 } else { 479 struct vmap_area *cache; 480 cache = rb_entry(free_vmap_cache, struct vmap_area, rb_node); 481 if (va->va_start <= cache->va_start) { 482 free_vmap_cache = rb_prev(&va->rb_node); 483 /* 484 * We don't try to update cached_hole_size or 485 * cached_align, but it won't go very wrong. 486 */ 487 } 488 } 489 } 490 rb_erase(&va->rb_node, &vmap_area_root); 491 RB_CLEAR_NODE(&va->rb_node); 492 list_del_rcu(&va->list); 493 494 /* 495 * Track the highest possible candidate for pcpu area 496 * allocation. Areas outside of vmalloc area can be returned 497 * here too, consider only end addresses which fall inside 498 * vmalloc area proper. 499 */ 500 if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END) 501 vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end); 502 503 kfree_rcu(va, rcu_head); 504 } 505 506 /* 507 * Free a region of KVA allocated by alloc_vmap_area 508 */ 509 static void free_vmap_area(struct vmap_area *va) 510 { 511 spin_lock(&vmap_area_lock); 512 __free_vmap_area(va); 513 spin_unlock(&vmap_area_lock); 514 } 515 516 /* 517 * Clear the pagetable entries of a given vmap_area 518 */ 519 static void unmap_vmap_area(struct vmap_area *va) 520 { 521 vunmap_page_range(va->va_start, va->va_end); 522 } 523 524 static void vmap_debug_free_range(unsigned long start, unsigned long end) 525 { 526 /* 527 * Unmap page tables and force a TLB flush immediately if 528 * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free 529 * bugs similarly to those in linear kernel virtual address 530 * space after a page has been freed. 531 * 532 * All the lazy freeing logic is still retained, in order to 533 * minimise intrusiveness of this debugging feature. 534 * 535 * This is going to be *slow* (linear kernel virtual address 536 * debugging doesn't do a broadcast TLB flush so it is a lot 537 * faster). 538 */ 539 #ifdef CONFIG_DEBUG_PAGEALLOC 540 vunmap_page_range(start, end); 541 flush_tlb_kernel_range(start, end); 542 #endif 543 } 544 545 /* 546 * lazy_max_pages is the maximum amount of virtual address space we gather up 547 * before attempting to purge with a TLB flush. 548 * 549 * There is a tradeoff here: a larger number will cover more kernel page tables 550 * and take slightly longer to purge, but it will linearly reduce the number of 551 * global TLB flushes that must be performed. It would seem natural to scale 552 * this number up linearly with the number of CPUs (because vmapping activity 553 * could also scale linearly with the number of CPUs), however it is likely 554 * that in practice, workloads might be constrained in other ways that mean 555 * vmap activity will not scale linearly with CPUs. Also, I want to be 556 * conservative and not introduce a big latency on huge systems, so go with 557 * a less aggressive log scale. It will still be an improvement over the old 558 * code, and it will be simple to change the scale factor if we find that it 559 * becomes a problem on bigger systems. 560 */ 561 static unsigned long lazy_max_pages(void) 562 { 563 unsigned int log; 564 565 log = fls(num_online_cpus()); 566 567 return log * (32UL * 1024 * 1024 / PAGE_SIZE); 568 } 569 570 static atomic_t vmap_lazy_nr = ATOMIC_INIT(0); 571 572 /* for per-CPU blocks */ 573 static void purge_fragmented_blocks_allcpus(void); 574 575 /* 576 * called before a call to iounmap() if the caller wants vm_area_struct's 577 * immediately freed. 578 */ 579 void set_iounmap_nonlazy(void) 580 { 581 atomic_set(&vmap_lazy_nr, lazy_max_pages()+1); 582 } 583 584 /* 585 * Purges all lazily-freed vmap areas. 586 * 587 * If sync is 0 then don't purge if there is already a purge in progress. 588 * If force_flush is 1, then flush kernel TLBs between *start and *end even 589 * if we found no lazy vmap areas to unmap (callers can use this to optimise 590 * their own TLB flushing). 591 * Returns with *start = min(*start, lowest purged address) 592 * *end = max(*end, highest purged address) 593 */ 594 static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, 595 int sync, int force_flush) 596 { 597 static DEFINE_SPINLOCK(purge_lock); 598 LIST_HEAD(valist); 599 struct vmap_area *va; 600 struct vmap_area *n_va; 601 int nr = 0; 602 603 /* 604 * If sync is 0 but force_flush is 1, we'll go sync anyway but callers 605 * should not expect such behaviour. This just simplifies locking for 606 * the case that isn't actually used at the moment anyway. 607 */ 608 if (!sync && !force_flush) { 609 if (!spin_trylock(&purge_lock)) 610 return; 611 } else 612 spin_lock(&purge_lock); 613 614 if (sync) 615 purge_fragmented_blocks_allcpus(); 616 617 rcu_read_lock(); 618 list_for_each_entry_rcu(va, &vmap_area_list, list) { 619 if (va->flags & VM_LAZY_FREE) { 620 if (va->va_start < *start) 621 *start = va->va_start; 622 if (va->va_end > *end) 623 *end = va->va_end; 624 nr += (va->va_end - va->va_start) >> PAGE_SHIFT; 625 list_add_tail(&va->purge_list, &valist); 626 va->flags |= VM_LAZY_FREEING; 627 va->flags &= ~VM_LAZY_FREE; 628 } 629 } 630 rcu_read_unlock(); 631 632 if (nr) 633 atomic_sub(nr, &vmap_lazy_nr); 634 635 if (nr || force_flush) 636 flush_tlb_kernel_range(*start, *end); 637 638 if (nr) { 639 spin_lock(&vmap_area_lock); 640 list_for_each_entry_safe(va, n_va, &valist, purge_list) 641 __free_vmap_area(va); 642 spin_unlock(&vmap_area_lock); 643 } 644 spin_unlock(&purge_lock); 645 } 646 647 /* 648 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody 649 * is already purging. 650 */ 651 static void try_purge_vmap_area_lazy(void) 652 { 653 unsigned long start = ULONG_MAX, end = 0; 654 655 __purge_vmap_area_lazy(&start, &end, 0, 0); 656 } 657 658 /* 659 * Kick off a purge of the outstanding lazy areas. 660 */ 661 static void purge_vmap_area_lazy(void) 662 { 663 unsigned long start = ULONG_MAX, end = 0; 664 665 __purge_vmap_area_lazy(&start, &end, 1, 0); 666 } 667 668 /* 669 * Free a vmap area, caller ensuring that the area has been unmapped 670 * and flush_cache_vunmap had been called for the correct range 671 * previously. 672 */ 673 static void free_vmap_area_noflush(struct vmap_area *va) 674 { 675 va->flags |= VM_LAZY_FREE; 676 atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); 677 if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages())) 678 try_purge_vmap_area_lazy(); 679 } 680 681 /* 682 * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been 683 * called for the correct range previously. 684 */ 685 static void free_unmap_vmap_area_noflush(struct vmap_area *va) 686 { 687 unmap_vmap_area(va); 688 free_vmap_area_noflush(va); 689 } 690 691 /* 692 * Free and unmap a vmap area 693 */ 694 static void free_unmap_vmap_area(struct vmap_area *va) 695 { 696 flush_cache_vunmap(va->va_start, va->va_end); 697 free_unmap_vmap_area_noflush(va); 698 } 699 700 static struct vmap_area *find_vmap_area(unsigned long addr) 701 { 702 struct vmap_area *va; 703 704 spin_lock(&vmap_area_lock); 705 va = __find_vmap_area(addr); 706 spin_unlock(&vmap_area_lock); 707 708 return va; 709 } 710 711 static void free_unmap_vmap_area_addr(unsigned long addr) 712 { 713 struct vmap_area *va; 714 715 va = find_vmap_area(addr); 716 BUG_ON(!va); 717 free_unmap_vmap_area(va); 718 } 719 720 721 /*** Per cpu kva allocator ***/ 722 723 /* 724 * vmap space is limited especially on 32 bit architectures. Ensure there is 725 * room for at least 16 percpu vmap blocks per CPU. 726 */ 727 /* 728 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able 729 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess 730 * instead (we just need a rough idea) 731 */ 732 #if BITS_PER_LONG == 32 733 #define VMALLOC_SPACE (128UL*1024*1024) 734 #else 735 #define VMALLOC_SPACE (128UL*1024*1024*1024) 736 #endif 737 738 #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) 739 #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ 740 #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ 741 #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) 742 #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ 743 #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ 744 #define VMAP_BBMAP_BITS \ 745 VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ 746 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ 747 VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16)) 748 749 #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) 750 751 static bool vmap_initialized __read_mostly = false; 752 753 struct vmap_block_queue { 754 spinlock_t lock; 755 struct list_head free; 756 }; 757 758 struct vmap_block { 759 spinlock_t lock; 760 struct vmap_area *va; 761 unsigned long free, dirty; 762 DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS); 763 struct list_head free_list; 764 struct rcu_head rcu_head; 765 struct list_head purge; 766 }; 767 768 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ 769 static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); 770 771 /* 772 * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block 773 * in the free path. Could get rid of this if we change the API to return a 774 * "cookie" from alloc, to be passed to free. But no big deal yet. 775 */ 776 static DEFINE_SPINLOCK(vmap_block_tree_lock); 777 static RADIX_TREE(vmap_block_tree, GFP_ATOMIC); 778 779 /* 780 * We should probably have a fallback mechanism to allocate virtual memory 781 * out of partially filled vmap blocks. However vmap block sizing should be 782 * fairly reasonable according to the vmalloc size, so it shouldn't be a 783 * big problem. 784 */ 785 786 static unsigned long addr_to_vb_idx(unsigned long addr) 787 { 788 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); 789 addr /= VMAP_BLOCK_SIZE; 790 return addr; 791 } 792 793 static struct vmap_block *new_vmap_block(gfp_t gfp_mask) 794 { 795 struct vmap_block_queue *vbq; 796 struct vmap_block *vb; 797 struct vmap_area *va; 798 unsigned long vb_idx; 799 int node, err; 800 801 node = numa_node_id(); 802 803 vb = kmalloc_node(sizeof(struct vmap_block), 804 gfp_mask & GFP_RECLAIM_MASK, node); 805 if (unlikely(!vb)) 806 return ERR_PTR(-ENOMEM); 807 808 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, 809 VMALLOC_START, VMALLOC_END, 810 node, gfp_mask); 811 if (IS_ERR(va)) { 812 kfree(vb); 813 return ERR_CAST(va); 814 } 815 816 err = radix_tree_preload(gfp_mask); 817 if (unlikely(err)) { 818 kfree(vb); 819 free_vmap_area(va); 820 return ERR_PTR(err); 821 } 822 823 spin_lock_init(&vb->lock); 824 vb->va = va; 825 vb->free = VMAP_BBMAP_BITS; 826 vb->dirty = 0; 827 bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS); 828 INIT_LIST_HEAD(&vb->free_list); 829 830 vb_idx = addr_to_vb_idx(va->va_start); 831 spin_lock(&vmap_block_tree_lock); 832 err = radix_tree_insert(&vmap_block_tree, vb_idx, vb); 833 spin_unlock(&vmap_block_tree_lock); 834 BUG_ON(err); 835 radix_tree_preload_end(); 836 837 vbq = &get_cpu_var(vmap_block_queue); 838 spin_lock(&vbq->lock); 839 list_add_rcu(&vb->free_list, &vbq->free); 840 spin_unlock(&vbq->lock); 841 put_cpu_var(vmap_block_queue); 842 843 return vb; 844 } 845 846 static void free_vmap_block(struct vmap_block *vb) 847 { 848 struct vmap_block *tmp; 849 unsigned long vb_idx; 850 851 vb_idx = addr_to_vb_idx(vb->va->va_start); 852 spin_lock(&vmap_block_tree_lock); 853 tmp = radix_tree_delete(&vmap_block_tree, vb_idx); 854 spin_unlock(&vmap_block_tree_lock); 855 BUG_ON(tmp != vb); 856 857 free_vmap_area_noflush(vb->va); 858 kfree_rcu(vb, rcu_head); 859 } 860 861 static void purge_fragmented_blocks(int cpu) 862 { 863 LIST_HEAD(purge); 864 struct vmap_block *vb; 865 struct vmap_block *n_vb; 866 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 867 868 rcu_read_lock(); 869 list_for_each_entry_rcu(vb, &vbq->free, free_list) { 870 871 if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS)) 872 continue; 873 874 spin_lock(&vb->lock); 875 if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) { 876 vb->free = 0; /* prevent further allocs after releasing lock */ 877 vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */ 878 bitmap_fill(vb->dirty_map, VMAP_BBMAP_BITS); 879 spin_lock(&vbq->lock); 880 list_del_rcu(&vb->free_list); 881 spin_unlock(&vbq->lock); 882 spin_unlock(&vb->lock); 883 list_add_tail(&vb->purge, &purge); 884 } else 885 spin_unlock(&vb->lock); 886 } 887 rcu_read_unlock(); 888 889 list_for_each_entry_safe(vb, n_vb, &purge, purge) { 890 list_del(&vb->purge); 891 free_vmap_block(vb); 892 } 893 } 894 895 static void purge_fragmented_blocks_allcpus(void) 896 { 897 int cpu; 898 899 for_each_possible_cpu(cpu) 900 purge_fragmented_blocks(cpu); 901 } 902 903 static void *vb_alloc(unsigned long size, gfp_t gfp_mask) 904 { 905 struct vmap_block_queue *vbq; 906 struct vmap_block *vb; 907 unsigned long addr = 0; 908 unsigned int order; 909 910 BUG_ON(size & ~PAGE_MASK); 911 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 912 if (WARN_ON(size == 0)) { 913 /* 914 * Allocating 0 bytes isn't what caller wants since 915 * get_order(0) returns funny result. Just warn and terminate 916 * early. 917 */ 918 return NULL; 919 } 920 order = get_order(size); 921 922 again: 923 rcu_read_lock(); 924 vbq = &get_cpu_var(vmap_block_queue); 925 list_for_each_entry_rcu(vb, &vbq->free, free_list) { 926 int i; 927 928 spin_lock(&vb->lock); 929 if (vb->free < 1UL << order) 930 goto next; 931 932 i = VMAP_BBMAP_BITS - vb->free; 933 addr = vb->va->va_start + (i << PAGE_SHIFT); 934 BUG_ON(addr_to_vb_idx(addr) != 935 addr_to_vb_idx(vb->va->va_start)); 936 vb->free -= 1UL << order; 937 if (vb->free == 0) { 938 spin_lock(&vbq->lock); 939 list_del_rcu(&vb->free_list); 940 spin_unlock(&vbq->lock); 941 } 942 spin_unlock(&vb->lock); 943 break; 944 next: 945 spin_unlock(&vb->lock); 946 } 947 948 put_cpu_var(vmap_block_queue); 949 rcu_read_unlock(); 950 951 if (!addr) { 952 vb = new_vmap_block(gfp_mask); 953 if (IS_ERR(vb)) 954 return vb; 955 goto again; 956 } 957 958 return (void *)addr; 959 } 960 961 static void vb_free(const void *addr, unsigned long size) 962 { 963 unsigned long offset; 964 unsigned long vb_idx; 965 unsigned int order; 966 struct vmap_block *vb; 967 968 BUG_ON(size & ~PAGE_MASK); 969 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 970 971 flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size); 972 973 order = get_order(size); 974 975 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1); 976 977 vb_idx = addr_to_vb_idx((unsigned long)addr); 978 rcu_read_lock(); 979 vb = radix_tree_lookup(&vmap_block_tree, vb_idx); 980 rcu_read_unlock(); 981 BUG_ON(!vb); 982 983 vunmap_page_range((unsigned long)addr, (unsigned long)addr + size); 984 985 spin_lock(&vb->lock); 986 BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order)); 987 988 vb->dirty += 1UL << order; 989 if (vb->dirty == VMAP_BBMAP_BITS) { 990 BUG_ON(vb->free); 991 spin_unlock(&vb->lock); 992 free_vmap_block(vb); 993 } else 994 spin_unlock(&vb->lock); 995 } 996 997 /** 998 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer 999 * 1000 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily 1001 * to amortize TLB flushing overheads. What this means is that any page you 1002 * have now, may, in a former life, have been mapped into kernel virtual 1003 * address by the vmap layer and so there might be some CPUs with TLB entries 1004 * still referencing that page (additional to the regular 1:1 kernel mapping). 1005 * 1006 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can 1007 * be sure that none of the pages we have control over will have any aliases 1008 * from the vmap layer. 1009 */ 1010 void vm_unmap_aliases(void) 1011 { 1012 unsigned long start = ULONG_MAX, end = 0; 1013 int cpu; 1014 int flush = 0; 1015 1016 if (unlikely(!vmap_initialized)) 1017 return; 1018 1019 for_each_possible_cpu(cpu) { 1020 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 1021 struct vmap_block *vb; 1022 1023 rcu_read_lock(); 1024 list_for_each_entry_rcu(vb, &vbq->free, free_list) { 1025 int i, j; 1026 1027 spin_lock(&vb->lock); 1028 i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS); 1029 if (i < VMAP_BBMAP_BITS) { 1030 unsigned long s, e; 1031 1032 j = find_last_bit(vb->dirty_map, 1033 VMAP_BBMAP_BITS); 1034 j = j + 1; /* need exclusive index */ 1035 1036 s = vb->va->va_start + (i << PAGE_SHIFT); 1037 e = vb->va->va_start + (j << PAGE_SHIFT); 1038 flush = 1; 1039 1040 if (s < start) 1041 start = s; 1042 if (e > end) 1043 end = e; 1044 } 1045 spin_unlock(&vb->lock); 1046 } 1047 rcu_read_unlock(); 1048 } 1049 1050 __purge_vmap_area_lazy(&start, &end, 1, flush); 1051 } 1052 EXPORT_SYMBOL_GPL(vm_unmap_aliases); 1053 1054 /** 1055 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram 1056 * @mem: the pointer returned by vm_map_ram 1057 * @count: the count passed to that vm_map_ram call (cannot unmap partial) 1058 */ 1059 void vm_unmap_ram(const void *mem, unsigned int count) 1060 { 1061 unsigned long size = count << PAGE_SHIFT; 1062 unsigned long addr = (unsigned long)mem; 1063 1064 BUG_ON(!addr); 1065 BUG_ON(addr < VMALLOC_START); 1066 BUG_ON(addr > VMALLOC_END); 1067 BUG_ON(addr & (PAGE_SIZE-1)); 1068 1069 debug_check_no_locks_freed(mem, size); 1070 vmap_debug_free_range(addr, addr+size); 1071 1072 if (likely(count <= VMAP_MAX_ALLOC)) 1073 vb_free(mem, size); 1074 else 1075 free_unmap_vmap_area_addr(addr); 1076 } 1077 EXPORT_SYMBOL(vm_unmap_ram); 1078 1079 /** 1080 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) 1081 * @pages: an array of pointers to the pages to be mapped 1082 * @count: number of pages 1083 * @node: prefer to allocate data structures on this node 1084 * @prot: memory protection to use. PAGE_KERNEL for regular RAM 1085 * 1086 * Returns: a pointer to the address that has been mapped, or %NULL on failure 1087 */ 1088 void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) 1089 { 1090 unsigned long size = count << PAGE_SHIFT; 1091 unsigned long addr; 1092 void *mem; 1093 1094 if (likely(count <= VMAP_MAX_ALLOC)) { 1095 mem = vb_alloc(size, GFP_KERNEL); 1096 if (IS_ERR(mem)) 1097 return NULL; 1098 addr = (unsigned long)mem; 1099 } else { 1100 struct vmap_area *va; 1101 va = alloc_vmap_area(size, PAGE_SIZE, 1102 VMALLOC_START, VMALLOC_END, node, GFP_KERNEL); 1103 if (IS_ERR(va)) 1104 return NULL; 1105 1106 addr = va->va_start; 1107 mem = (void *)addr; 1108 } 1109 if (vmap_page_range(addr, addr + size, prot, pages) < 0) { 1110 vm_unmap_ram(mem, count); 1111 return NULL; 1112 } 1113 return mem; 1114 } 1115 EXPORT_SYMBOL(vm_map_ram); 1116 1117 static struct vm_struct *vmlist __initdata; 1118 /** 1119 * vm_area_add_early - add vmap area early during boot 1120 * @vm: vm_struct to add 1121 * 1122 * This function is used to add fixed kernel vm area to vmlist before 1123 * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags 1124 * should contain proper values and the other fields should be zero. 1125 * 1126 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 1127 */ 1128 void __init vm_area_add_early(struct vm_struct *vm) 1129 { 1130 struct vm_struct *tmp, **p; 1131 1132 BUG_ON(vmap_initialized); 1133 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { 1134 if (tmp->addr >= vm->addr) { 1135 BUG_ON(tmp->addr < vm->addr + vm->size); 1136 break; 1137 } else 1138 BUG_ON(tmp->addr + tmp->size > vm->addr); 1139 } 1140 vm->next = *p; 1141 *p = vm; 1142 } 1143 1144 /** 1145 * vm_area_register_early - register vmap area early during boot 1146 * @vm: vm_struct to register 1147 * @align: requested alignment 1148 * 1149 * This function is used to register kernel vm area before 1150 * vmalloc_init() is called. @vm->size and @vm->flags should contain 1151 * proper values on entry and other fields should be zero. On return, 1152 * vm->addr contains the allocated address. 1153 * 1154 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 1155 */ 1156 void __init vm_area_register_early(struct vm_struct *vm, size_t align) 1157 { 1158 static size_t vm_init_off __initdata; 1159 unsigned long addr; 1160 1161 addr = ALIGN(VMALLOC_START + vm_init_off, align); 1162 vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START; 1163 1164 vm->addr = (void *)addr; 1165 1166 vm_area_add_early(vm); 1167 } 1168 1169 void __init vmalloc_init(void) 1170 { 1171 struct vmap_area *va; 1172 struct vm_struct *tmp; 1173 int i; 1174 1175 for_each_possible_cpu(i) { 1176 struct vmap_block_queue *vbq; 1177 struct vfree_deferred *p; 1178 1179 vbq = &per_cpu(vmap_block_queue, i); 1180 spin_lock_init(&vbq->lock); 1181 INIT_LIST_HEAD(&vbq->free); 1182 p = &per_cpu(vfree_deferred, i); 1183 init_llist_head(&p->list); 1184 INIT_WORK(&p->wq, free_work); 1185 } 1186 1187 /* Import existing vmlist entries. */ 1188 for (tmp = vmlist; tmp; tmp = tmp->next) { 1189 va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT); 1190 va->flags = VM_VM_AREA; 1191 va->va_start = (unsigned long)tmp->addr; 1192 va->va_end = va->va_start + tmp->size; 1193 va->vm = tmp; 1194 __insert_vmap_area(va); 1195 } 1196 1197 vmap_area_pcpu_hole = VMALLOC_END; 1198 1199 vmap_initialized = true; 1200 } 1201 1202 /** 1203 * map_kernel_range_noflush - map kernel VM area with the specified pages 1204 * @addr: start of the VM area to map 1205 * @size: size of the VM area to map 1206 * @prot: page protection flags to use 1207 * @pages: pages to map 1208 * 1209 * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size 1210 * specify should have been allocated using get_vm_area() and its 1211 * friends. 1212 * 1213 * NOTE: 1214 * This function does NOT do any cache flushing. The caller is 1215 * responsible for calling flush_cache_vmap() on to-be-mapped areas 1216 * before calling this function. 1217 * 1218 * RETURNS: 1219 * The number of pages mapped on success, -errno on failure. 1220 */ 1221 int map_kernel_range_noflush(unsigned long addr, unsigned long size, 1222 pgprot_t prot, struct page **pages) 1223 { 1224 return vmap_page_range_noflush(addr, addr + size, prot, pages); 1225 } 1226 1227 /** 1228 * unmap_kernel_range_noflush - unmap kernel VM area 1229 * @addr: start of the VM area to unmap 1230 * @size: size of the VM area to unmap 1231 * 1232 * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size 1233 * specify should have been allocated using get_vm_area() and its 1234 * friends. 1235 * 1236 * NOTE: 1237 * This function does NOT do any cache flushing. The caller is 1238 * responsible for calling flush_cache_vunmap() on to-be-mapped areas 1239 * before calling this function and flush_tlb_kernel_range() after. 1240 */ 1241 void unmap_kernel_range_noflush(unsigned long addr, unsigned long size) 1242 { 1243 vunmap_page_range(addr, addr + size); 1244 } 1245 EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush); 1246 1247 /** 1248 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB 1249 * @addr: start of the VM area to unmap 1250 * @size: size of the VM area to unmap 1251 * 1252 * Similar to unmap_kernel_range_noflush() but flushes vcache before 1253 * the unmapping and tlb after. 1254 */ 1255 void unmap_kernel_range(unsigned long addr, unsigned long size) 1256 { 1257 unsigned long end = addr + size; 1258 1259 flush_cache_vunmap(addr, end); 1260 vunmap_page_range(addr, end); 1261 flush_tlb_kernel_range(addr, end); 1262 } 1263 1264 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages) 1265 { 1266 unsigned long addr = (unsigned long)area->addr; 1267 unsigned long end = addr + get_vm_area_size(area); 1268 int err; 1269 1270 err = vmap_page_range(addr, end, prot, *pages); 1271 if (err > 0) { 1272 *pages += err; 1273 err = 0; 1274 } 1275 1276 return err; 1277 } 1278 EXPORT_SYMBOL_GPL(map_vm_area); 1279 1280 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, 1281 unsigned long flags, const void *caller) 1282 { 1283 spin_lock(&vmap_area_lock); 1284 vm->flags = flags; 1285 vm->addr = (void *)va->va_start; 1286 vm->size = va->va_end - va->va_start; 1287 vm->caller = caller; 1288 va->vm = vm; 1289 va->flags |= VM_VM_AREA; 1290 spin_unlock(&vmap_area_lock); 1291 } 1292 1293 static void clear_vm_uninitialized_flag(struct vm_struct *vm) 1294 { 1295 /* 1296 * Before removing VM_UNINITIALIZED, 1297 * we should make sure that vm has proper values. 1298 * Pair with smp_rmb() in show_numa_info(). 1299 */ 1300 smp_wmb(); 1301 vm->flags &= ~VM_UNINITIALIZED; 1302 } 1303 1304 static struct vm_struct *__get_vm_area_node(unsigned long size, 1305 unsigned long align, unsigned long flags, unsigned long start, 1306 unsigned long end, int node, gfp_t gfp_mask, const void *caller) 1307 { 1308 struct vmap_area *va; 1309 struct vm_struct *area; 1310 1311 BUG_ON(in_interrupt()); 1312 if (flags & VM_IOREMAP) 1313 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER); 1314 1315 size = PAGE_ALIGN(size); 1316 if (unlikely(!size)) 1317 return NULL; 1318 1319 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); 1320 if (unlikely(!area)) 1321 return NULL; 1322 1323 /* 1324 * We always allocate a guard page. 1325 */ 1326 size += PAGE_SIZE; 1327 1328 va = alloc_vmap_area(size, align, start, end, node, gfp_mask); 1329 if (IS_ERR(va)) { 1330 kfree(area); 1331 return NULL; 1332 } 1333 1334 setup_vmalloc_vm(area, va, flags, caller); 1335 1336 return area; 1337 } 1338 1339 struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 1340 unsigned long start, unsigned long end) 1341 { 1342 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE, 1343 GFP_KERNEL, __builtin_return_address(0)); 1344 } 1345 EXPORT_SYMBOL_GPL(__get_vm_area); 1346 1347 struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, 1348 unsigned long start, unsigned long end, 1349 const void *caller) 1350 { 1351 return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE, 1352 GFP_KERNEL, caller); 1353 } 1354 1355 /** 1356 * get_vm_area - reserve a contiguous kernel virtual area 1357 * @size: size of the area 1358 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC 1359 * 1360 * Search an area of @size in the kernel virtual mapping area, 1361 * and reserved it for out purposes. Returns the area descriptor 1362 * on success or %NULL on failure. 1363 */ 1364 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 1365 { 1366 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, 1367 NUMA_NO_NODE, GFP_KERNEL, 1368 __builtin_return_address(0)); 1369 } 1370 1371 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, 1372 const void *caller) 1373 { 1374 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, 1375 NUMA_NO_NODE, GFP_KERNEL, caller); 1376 } 1377 1378 /** 1379 * find_vm_area - find a continuous kernel virtual area 1380 * @addr: base address 1381 * 1382 * Search for the kernel VM area starting at @addr, and return it. 1383 * It is up to the caller to do all required locking to keep the returned 1384 * pointer valid. 1385 */ 1386 struct vm_struct *find_vm_area(const void *addr) 1387 { 1388 struct vmap_area *va; 1389 1390 va = find_vmap_area((unsigned long)addr); 1391 if (va && va->flags & VM_VM_AREA) 1392 return va->vm; 1393 1394 return NULL; 1395 } 1396 1397 /** 1398 * remove_vm_area - find and remove a continuous kernel virtual area 1399 * @addr: base address 1400 * 1401 * Search for the kernel VM area starting at @addr, and remove it. 1402 * This function returns the found VM area, but using it is NOT safe 1403 * on SMP machines, except for its size or flags. 1404 */ 1405 struct vm_struct *remove_vm_area(const void *addr) 1406 { 1407 struct vmap_area *va; 1408 1409 va = find_vmap_area((unsigned long)addr); 1410 if (va && va->flags & VM_VM_AREA) { 1411 struct vm_struct *vm = va->vm; 1412 1413 spin_lock(&vmap_area_lock); 1414 va->vm = NULL; 1415 va->flags &= ~VM_VM_AREA; 1416 spin_unlock(&vmap_area_lock); 1417 1418 vmap_debug_free_range(va->va_start, va->va_end); 1419 free_unmap_vmap_area(va); 1420 vm->size -= PAGE_SIZE; 1421 1422 return vm; 1423 } 1424 return NULL; 1425 } 1426 1427 static void __vunmap(const void *addr, int deallocate_pages) 1428 { 1429 struct vm_struct *area; 1430 1431 if (!addr) 1432 return; 1433 1434 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n", 1435 addr)) 1436 return; 1437 1438 area = remove_vm_area(addr); 1439 if (unlikely(!area)) { 1440 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", 1441 addr); 1442 return; 1443 } 1444 1445 debug_check_no_locks_freed(addr, area->size); 1446 debug_check_no_obj_freed(addr, area->size); 1447 1448 if (deallocate_pages) { 1449 int i; 1450 1451 for (i = 0; i < area->nr_pages; i++) { 1452 struct page *page = area->pages[i]; 1453 1454 BUG_ON(!page); 1455 __free_page(page); 1456 } 1457 1458 if (area->flags & VM_VPAGES) 1459 vfree(area->pages); 1460 else 1461 kfree(area->pages); 1462 } 1463 1464 kfree(area); 1465 return; 1466 } 1467 1468 /** 1469 * vfree - release memory allocated by vmalloc() 1470 * @addr: memory base address 1471 * 1472 * Free the virtually continuous memory area starting at @addr, as 1473 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is 1474 * NULL, no operation is performed. 1475 * 1476 * Must not be called in NMI context (strictly speaking, only if we don't 1477 * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling 1478 * conventions for vfree() arch-depenedent would be a really bad idea) 1479 * 1480 * NOTE: assumes that the object at *addr has a size >= sizeof(llist_node) 1481 */ 1482 void vfree(const void *addr) 1483 { 1484 BUG_ON(in_nmi()); 1485 1486 kmemleak_free(addr); 1487 1488 if (!addr) 1489 return; 1490 if (unlikely(in_interrupt())) { 1491 struct vfree_deferred *p = &__get_cpu_var(vfree_deferred); 1492 if (llist_add((struct llist_node *)addr, &p->list)) 1493 schedule_work(&p->wq); 1494 } else 1495 __vunmap(addr, 1); 1496 } 1497 EXPORT_SYMBOL(vfree); 1498 1499 /** 1500 * vunmap - release virtual mapping obtained by vmap() 1501 * @addr: memory base address 1502 * 1503 * Free the virtually contiguous memory area starting at @addr, 1504 * which was created from the page array passed to vmap(). 1505 * 1506 * Must not be called in interrupt context. 1507 */ 1508 void vunmap(const void *addr) 1509 { 1510 BUG_ON(in_interrupt()); 1511 might_sleep(); 1512 if (addr) 1513 __vunmap(addr, 0); 1514 } 1515 EXPORT_SYMBOL(vunmap); 1516 1517 /** 1518 * vmap - map an array of pages into virtually contiguous space 1519 * @pages: array of page pointers 1520 * @count: number of pages to map 1521 * @flags: vm_area->flags 1522 * @prot: page protection for the mapping 1523 * 1524 * Maps @count pages from @pages into contiguous kernel virtual 1525 * space. 1526 */ 1527 void *vmap(struct page **pages, unsigned int count, 1528 unsigned long flags, pgprot_t prot) 1529 { 1530 struct vm_struct *area; 1531 1532 might_sleep(); 1533 1534 if (count > totalram_pages) 1535 return NULL; 1536 1537 area = get_vm_area_caller((count << PAGE_SHIFT), flags, 1538 __builtin_return_address(0)); 1539 if (!area) 1540 return NULL; 1541 1542 if (map_vm_area(area, prot, &pages)) { 1543 vunmap(area->addr); 1544 return NULL; 1545 } 1546 1547 return area->addr; 1548 } 1549 EXPORT_SYMBOL(vmap); 1550 1551 static void *__vmalloc_node(unsigned long size, unsigned long align, 1552 gfp_t gfp_mask, pgprot_t prot, 1553 int node, const void *caller); 1554 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 1555 pgprot_t prot, int node) 1556 { 1557 const int order = 0; 1558 struct page **pages; 1559 unsigned int nr_pages, array_size, i; 1560 gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; 1561 1562 nr_pages = get_vm_area_size(area) >> PAGE_SHIFT; 1563 array_size = (nr_pages * sizeof(struct page *)); 1564 1565 area->nr_pages = nr_pages; 1566 /* Please note that the recursion is strictly bounded. */ 1567 if (array_size > PAGE_SIZE) { 1568 pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM, 1569 PAGE_KERNEL, node, area->caller); 1570 area->flags |= VM_VPAGES; 1571 } else { 1572 pages = kmalloc_node(array_size, nested_gfp, node); 1573 } 1574 area->pages = pages; 1575 if (!area->pages) { 1576 remove_vm_area(area->addr); 1577 kfree(area); 1578 return NULL; 1579 } 1580 1581 for (i = 0; i < area->nr_pages; i++) { 1582 struct page *page; 1583 gfp_t tmp_mask = gfp_mask | __GFP_NOWARN; 1584 1585 if (node == NUMA_NO_NODE) 1586 page = alloc_page(tmp_mask); 1587 else 1588 page = alloc_pages_node(node, tmp_mask, order); 1589 1590 if (unlikely(!page)) { 1591 /* Successfully allocated i pages, free them in __vunmap() */ 1592 area->nr_pages = i; 1593 goto fail; 1594 } 1595 area->pages[i] = page; 1596 } 1597 1598 if (map_vm_area(area, prot, &pages)) 1599 goto fail; 1600 return area->addr; 1601 1602 fail: 1603 warn_alloc_failed(gfp_mask, order, 1604 "vmalloc: allocation failure, allocated %ld of %ld bytes\n", 1605 (area->nr_pages*PAGE_SIZE), area->size); 1606 vfree(area->addr); 1607 return NULL; 1608 } 1609 1610 /** 1611 * __vmalloc_node_range - allocate virtually contiguous memory 1612 * @size: allocation size 1613 * @align: desired alignment 1614 * @start: vm area range start 1615 * @end: vm area range end 1616 * @gfp_mask: flags for the page level allocator 1617 * @prot: protection mask for the allocated pages 1618 * @node: node to use for allocation or NUMA_NO_NODE 1619 * @caller: caller's return address 1620 * 1621 * Allocate enough pages to cover @size from the page level 1622 * allocator with @gfp_mask flags. Map them into contiguous 1623 * kernel virtual space, using a pagetable protection of @prot. 1624 */ 1625 void *__vmalloc_node_range(unsigned long size, unsigned long align, 1626 unsigned long start, unsigned long end, gfp_t gfp_mask, 1627 pgprot_t prot, int node, const void *caller) 1628 { 1629 struct vm_struct *area; 1630 void *addr; 1631 unsigned long real_size = size; 1632 1633 size = PAGE_ALIGN(size); 1634 if (!size || (size >> PAGE_SHIFT) > totalram_pages) 1635 goto fail; 1636 1637 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED, 1638 start, end, node, gfp_mask, caller); 1639 if (!area) 1640 goto fail; 1641 1642 addr = __vmalloc_area_node(area, gfp_mask, prot, node); 1643 if (!addr) 1644 return NULL; 1645 1646 /* 1647 * In this function, newly allocated vm_struct has VM_UNINITIALIZED 1648 * flag. It means that vm_struct is not fully initialized. 1649 * Now, it is fully initialized, so remove this flag here. 1650 */ 1651 clear_vm_uninitialized_flag(area); 1652 1653 /* 1654 * A ref_count = 2 is needed because vm_struct allocated in 1655 * __get_vm_area_node() contains a reference to the virtual address of 1656 * the vmalloc'ed block. 1657 */ 1658 kmemleak_alloc(addr, real_size, 2, gfp_mask); 1659 1660 return addr; 1661 1662 fail: 1663 warn_alloc_failed(gfp_mask, 0, 1664 "vmalloc: allocation failure: %lu bytes\n", 1665 real_size); 1666 return NULL; 1667 } 1668 1669 /** 1670 * __vmalloc_node - allocate virtually contiguous memory 1671 * @size: allocation size 1672 * @align: desired alignment 1673 * @gfp_mask: flags for the page level allocator 1674 * @prot: protection mask for the allocated pages 1675 * @node: node to use for allocation or NUMA_NO_NODE 1676 * @caller: caller's return address 1677 * 1678 * Allocate enough pages to cover @size from the page level 1679 * allocator with @gfp_mask flags. Map them into contiguous 1680 * kernel virtual space, using a pagetable protection of @prot. 1681 */ 1682 static void *__vmalloc_node(unsigned long size, unsigned long align, 1683 gfp_t gfp_mask, pgprot_t prot, 1684 int node, const void *caller) 1685 { 1686 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, 1687 gfp_mask, prot, node, caller); 1688 } 1689 1690 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 1691 { 1692 return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE, 1693 __builtin_return_address(0)); 1694 } 1695 EXPORT_SYMBOL(__vmalloc); 1696 1697 static inline void *__vmalloc_node_flags(unsigned long size, 1698 int node, gfp_t flags) 1699 { 1700 return __vmalloc_node(size, 1, flags, PAGE_KERNEL, 1701 node, __builtin_return_address(0)); 1702 } 1703 1704 /** 1705 * vmalloc - allocate virtually contiguous memory 1706 * @size: allocation size 1707 * Allocate enough pages to cover @size from the page level 1708 * allocator and map them into contiguous kernel virtual space. 1709 * 1710 * For tight control over page level allocator and protection flags 1711 * use __vmalloc() instead. 1712 */ 1713 void *vmalloc(unsigned long size) 1714 { 1715 return __vmalloc_node_flags(size, NUMA_NO_NODE, 1716 GFP_KERNEL | __GFP_HIGHMEM); 1717 } 1718 EXPORT_SYMBOL(vmalloc); 1719 1720 /** 1721 * vzalloc - allocate virtually contiguous memory with zero fill 1722 * @size: allocation size 1723 * Allocate enough pages to cover @size from the page level 1724 * allocator and map them into contiguous kernel virtual space. 1725 * The memory allocated is set to zero. 1726 * 1727 * For tight control over page level allocator and protection flags 1728 * use __vmalloc() instead. 1729 */ 1730 void *vzalloc(unsigned long size) 1731 { 1732 return __vmalloc_node_flags(size, NUMA_NO_NODE, 1733 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); 1734 } 1735 EXPORT_SYMBOL(vzalloc); 1736 1737 /** 1738 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace 1739 * @size: allocation size 1740 * 1741 * The resulting memory area is zeroed so it can be mapped to userspace 1742 * without leaking data. 1743 */ 1744 void *vmalloc_user(unsigned long size) 1745 { 1746 struct vm_struct *area; 1747 void *ret; 1748 1749 ret = __vmalloc_node(size, SHMLBA, 1750 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, 1751 PAGE_KERNEL, NUMA_NO_NODE, 1752 __builtin_return_address(0)); 1753 if (ret) { 1754 area = find_vm_area(ret); 1755 area->flags |= VM_USERMAP; 1756 } 1757 return ret; 1758 } 1759 EXPORT_SYMBOL(vmalloc_user); 1760 1761 /** 1762 * vmalloc_node - allocate memory on a specific node 1763 * @size: allocation size 1764 * @node: numa node 1765 * 1766 * Allocate enough pages to cover @size from the page level 1767 * allocator and map them into contiguous kernel virtual space. 1768 * 1769 * For tight control over page level allocator and protection flags 1770 * use __vmalloc() instead. 1771 */ 1772 void *vmalloc_node(unsigned long size, int node) 1773 { 1774 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, 1775 node, __builtin_return_address(0)); 1776 } 1777 EXPORT_SYMBOL(vmalloc_node); 1778 1779 /** 1780 * vzalloc_node - allocate memory on a specific node with zero fill 1781 * @size: allocation size 1782 * @node: numa node 1783 * 1784 * Allocate enough pages to cover @size from the page level 1785 * allocator and map them into contiguous kernel virtual space. 1786 * The memory allocated is set to zero. 1787 * 1788 * For tight control over page level allocator and protection flags 1789 * use __vmalloc_node() instead. 1790 */ 1791 void *vzalloc_node(unsigned long size, int node) 1792 { 1793 return __vmalloc_node_flags(size, node, 1794 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); 1795 } 1796 EXPORT_SYMBOL(vzalloc_node); 1797 1798 #ifndef PAGE_KERNEL_EXEC 1799 # define PAGE_KERNEL_EXEC PAGE_KERNEL 1800 #endif 1801 1802 /** 1803 * vmalloc_exec - allocate virtually contiguous, executable memory 1804 * @size: allocation size 1805 * 1806 * Kernel-internal function to allocate enough pages to cover @size 1807 * the page level allocator and map them into contiguous and 1808 * executable kernel virtual space. 1809 * 1810 * For tight control over page level allocator and protection flags 1811 * use __vmalloc() instead. 1812 */ 1813 1814 void *vmalloc_exec(unsigned long size) 1815 { 1816 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, 1817 NUMA_NO_NODE, __builtin_return_address(0)); 1818 } 1819 1820 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) 1821 #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL 1822 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) 1823 #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL 1824 #else 1825 #define GFP_VMALLOC32 GFP_KERNEL 1826 #endif 1827 1828 /** 1829 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 1830 * @size: allocation size 1831 * 1832 * Allocate enough 32bit PA addressable pages to cover @size from the 1833 * page level allocator and map them into contiguous kernel virtual space. 1834 */ 1835 void *vmalloc_32(unsigned long size) 1836 { 1837 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL, 1838 NUMA_NO_NODE, __builtin_return_address(0)); 1839 } 1840 EXPORT_SYMBOL(vmalloc_32); 1841 1842 /** 1843 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory 1844 * @size: allocation size 1845 * 1846 * The resulting memory area is 32bit addressable and zeroed so it can be 1847 * mapped to userspace without leaking data. 1848 */ 1849 void *vmalloc_32_user(unsigned long size) 1850 { 1851 struct vm_struct *area; 1852 void *ret; 1853 1854 ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, 1855 NUMA_NO_NODE, __builtin_return_address(0)); 1856 if (ret) { 1857 area = find_vm_area(ret); 1858 area->flags |= VM_USERMAP; 1859 } 1860 return ret; 1861 } 1862 EXPORT_SYMBOL(vmalloc_32_user); 1863 1864 /* 1865 * small helper routine , copy contents to buf from addr. 1866 * If the page is not present, fill zero. 1867 */ 1868 1869 static int aligned_vread(char *buf, char *addr, unsigned long count) 1870 { 1871 struct page *p; 1872 int copied = 0; 1873 1874 while (count) { 1875 unsigned long offset, length; 1876 1877 offset = (unsigned long)addr & ~PAGE_MASK; 1878 length = PAGE_SIZE - offset; 1879 if (length > count) 1880 length = count; 1881 p = vmalloc_to_page(addr); 1882 /* 1883 * To do safe access to this _mapped_ area, we need 1884 * lock. But adding lock here means that we need to add 1885 * overhead of vmalloc()/vfree() calles for this _debug_ 1886 * interface, rarely used. Instead of that, we'll use 1887 * kmap() and get small overhead in this access function. 1888 */ 1889 if (p) { 1890 /* 1891 * we can expect USER0 is not used (see vread/vwrite's 1892 * function description) 1893 */ 1894 void *map = kmap_atomic(p); 1895 memcpy(buf, map + offset, length); 1896 kunmap_atomic(map); 1897 } else 1898 memset(buf, 0, length); 1899 1900 addr += length; 1901 buf += length; 1902 copied += length; 1903 count -= length; 1904 } 1905 return copied; 1906 } 1907 1908 static int aligned_vwrite(char *buf, char *addr, unsigned long count) 1909 { 1910 struct page *p; 1911 int copied = 0; 1912 1913 while (count) { 1914 unsigned long offset, length; 1915 1916 offset = (unsigned long)addr & ~PAGE_MASK; 1917 length = PAGE_SIZE - offset; 1918 if (length > count) 1919 length = count; 1920 p = vmalloc_to_page(addr); 1921 /* 1922 * To do safe access to this _mapped_ area, we need 1923 * lock. But adding lock here means that we need to add 1924 * overhead of vmalloc()/vfree() calles for this _debug_ 1925 * interface, rarely used. Instead of that, we'll use 1926 * kmap() and get small overhead in this access function. 1927 */ 1928 if (p) { 1929 /* 1930 * we can expect USER0 is not used (see vread/vwrite's 1931 * function description) 1932 */ 1933 void *map = kmap_atomic(p); 1934 memcpy(map + offset, buf, length); 1935 kunmap_atomic(map); 1936 } 1937 addr += length; 1938 buf += length; 1939 copied += length; 1940 count -= length; 1941 } 1942 return copied; 1943 } 1944 1945 /** 1946 * vread() - read vmalloc area in a safe way. 1947 * @buf: buffer for reading data 1948 * @addr: vm address. 1949 * @count: number of bytes to be read. 1950 * 1951 * Returns # of bytes which addr and buf should be increased. 1952 * (same number to @count). Returns 0 if [addr...addr+count) doesn't 1953 * includes any intersect with alive vmalloc area. 1954 * 1955 * This function checks that addr is a valid vmalloc'ed area, and 1956 * copy data from that area to a given buffer. If the given memory range 1957 * of [addr...addr+count) includes some valid address, data is copied to 1958 * proper area of @buf. If there are memory holes, they'll be zero-filled. 1959 * IOREMAP area is treated as memory hole and no copy is done. 1960 * 1961 * If [addr...addr+count) doesn't includes any intersects with alive 1962 * vm_struct area, returns 0. @buf should be kernel's buffer. 1963 * 1964 * Note: In usual ops, vread() is never necessary because the caller 1965 * should know vmalloc() area is valid and can use memcpy(). 1966 * This is for routines which have to access vmalloc area without 1967 * any informaion, as /dev/kmem. 1968 * 1969 */ 1970 1971 long vread(char *buf, char *addr, unsigned long count) 1972 { 1973 struct vmap_area *va; 1974 struct vm_struct *vm; 1975 char *vaddr, *buf_start = buf; 1976 unsigned long buflen = count; 1977 unsigned long n; 1978 1979 /* Don't allow overflow */ 1980 if ((unsigned long) addr + count < count) 1981 count = -(unsigned long) addr; 1982 1983 spin_lock(&vmap_area_lock); 1984 list_for_each_entry(va, &vmap_area_list, list) { 1985 if (!count) 1986 break; 1987 1988 if (!(va->flags & VM_VM_AREA)) 1989 continue; 1990 1991 vm = va->vm; 1992 vaddr = (char *) vm->addr; 1993 if (addr >= vaddr + get_vm_area_size(vm)) 1994 continue; 1995 while (addr < vaddr) { 1996 if (count == 0) 1997 goto finished; 1998 *buf = '\0'; 1999 buf++; 2000 addr++; 2001 count--; 2002 } 2003 n = vaddr + get_vm_area_size(vm) - addr; 2004 if (n > count) 2005 n = count; 2006 if (!(vm->flags & VM_IOREMAP)) 2007 aligned_vread(buf, addr, n); 2008 else /* IOREMAP area is treated as memory hole */ 2009 memset(buf, 0, n); 2010 buf += n; 2011 addr += n; 2012 count -= n; 2013 } 2014 finished: 2015 spin_unlock(&vmap_area_lock); 2016 2017 if (buf == buf_start) 2018 return 0; 2019 /* zero-fill memory holes */ 2020 if (buf != buf_start + buflen) 2021 memset(buf, 0, buflen - (buf - buf_start)); 2022 2023 return buflen; 2024 } 2025 2026 /** 2027 * vwrite() - write vmalloc area in a safe way. 2028 * @buf: buffer for source data 2029 * @addr: vm address. 2030 * @count: number of bytes to be read. 2031 * 2032 * Returns # of bytes which addr and buf should be incresed. 2033 * (same number to @count). 2034 * If [addr...addr+count) doesn't includes any intersect with valid 2035 * vmalloc area, returns 0. 2036 * 2037 * This function checks that addr is a valid vmalloc'ed area, and 2038 * copy data from a buffer to the given addr. If specified range of 2039 * [addr...addr+count) includes some valid address, data is copied from 2040 * proper area of @buf. If there are memory holes, no copy to hole. 2041 * IOREMAP area is treated as memory hole and no copy is done. 2042 * 2043 * If [addr...addr+count) doesn't includes any intersects with alive 2044 * vm_struct area, returns 0. @buf should be kernel's buffer. 2045 * 2046 * Note: In usual ops, vwrite() is never necessary because the caller 2047 * should know vmalloc() area is valid and can use memcpy(). 2048 * This is for routines which have to access vmalloc area without 2049 * any informaion, as /dev/kmem. 2050 */ 2051 2052 long vwrite(char *buf, char *addr, unsigned long count) 2053 { 2054 struct vmap_area *va; 2055 struct vm_struct *vm; 2056 char *vaddr; 2057 unsigned long n, buflen; 2058 int copied = 0; 2059 2060 /* Don't allow overflow */ 2061 if ((unsigned long) addr + count < count) 2062 count = -(unsigned long) addr; 2063 buflen = count; 2064 2065 spin_lock(&vmap_area_lock); 2066 list_for_each_entry(va, &vmap_area_list, list) { 2067 if (!count) 2068 break; 2069 2070 if (!(va->flags & VM_VM_AREA)) 2071 continue; 2072 2073 vm = va->vm; 2074 vaddr = (char *) vm->addr; 2075 if (addr >= vaddr + get_vm_area_size(vm)) 2076 continue; 2077 while (addr < vaddr) { 2078 if (count == 0) 2079 goto finished; 2080 buf++; 2081 addr++; 2082 count--; 2083 } 2084 n = vaddr + get_vm_area_size(vm) - addr; 2085 if (n > count) 2086 n = count; 2087 if (!(vm->flags & VM_IOREMAP)) { 2088 aligned_vwrite(buf, addr, n); 2089 copied++; 2090 } 2091 buf += n; 2092 addr += n; 2093 count -= n; 2094 } 2095 finished: 2096 spin_unlock(&vmap_area_lock); 2097 if (!copied) 2098 return 0; 2099 return buflen; 2100 } 2101 2102 /** 2103 * remap_vmalloc_range_partial - map vmalloc pages to userspace 2104 * @vma: vma to cover 2105 * @uaddr: target user address to start at 2106 * @kaddr: virtual address of vmalloc kernel memory 2107 * @size: size of map area 2108 * 2109 * Returns: 0 for success, -Exxx on failure 2110 * 2111 * This function checks that @kaddr is a valid vmalloc'ed area, 2112 * and that it is big enough to cover the range starting at 2113 * @uaddr in @vma. Will return failure if that criteria isn't 2114 * met. 2115 * 2116 * Similar to remap_pfn_range() (see mm/memory.c) 2117 */ 2118 int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, 2119 void *kaddr, unsigned long size) 2120 { 2121 struct vm_struct *area; 2122 2123 size = PAGE_ALIGN(size); 2124 2125 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr)) 2126 return -EINVAL; 2127 2128 area = find_vm_area(kaddr); 2129 if (!area) 2130 return -EINVAL; 2131 2132 if (!(area->flags & VM_USERMAP)) 2133 return -EINVAL; 2134 2135 if (kaddr + size > area->addr + area->size) 2136 return -EINVAL; 2137 2138 do { 2139 struct page *page = vmalloc_to_page(kaddr); 2140 int ret; 2141 2142 ret = vm_insert_page(vma, uaddr, page); 2143 if (ret) 2144 return ret; 2145 2146 uaddr += PAGE_SIZE; 2147 kaddr += PAGE_SIZE; 2148 size -= PAGE_SIZE; 2149 } while (size > 0); 2150 2151 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 2152 2153 return 0; 2154 } 2155 EXPORT_SYMBOL(remap_vmalloc_range_partial); 2156 2157 /** 2158 * remap_vmalloc_range - map vmalloc pages to userspace 2159 * @vma: vma to cover (map full range of vma) 2160 * @addr: vmalloc memory 2161 * @pgoff: number of pages into addr before first page to map 2162 * 2163 * Returns: 0 for success, -Exxx on failure 2164 * 2165 * This function checks that addr is a valid vmalloc'ed area, and 2166 * that it is big enough to cover the vma. Will return failure if 2167 * that criteria isn't met. 2168 * 2169 * Similar to remap_pfn_range() (see mm/memory.c) 2170 */ 2171 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 2172 unsigned long pgoff) 2173 { 2174 return remap_vmalloc_range_partial(vma, vma->vm_start, 2175 addr + (pgoff << PAGE_SHIFT), 2176 vma->vm_end - vma->vm_start); 2177 } 2178 EXPORT_SYMBOL(remap_vmalloc_range); 2179 2180 /* 2181 * Implement a stub for vmalloc_sync_all() if the architecture chose not to 2182 * have one. 2183 */ 2184 void __attribute__((weak)) vmalloc_sync_all(void) 2185 { 2186 } 2187 2188 2189 static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) 2190 { 2191 pte_t ***p = data; 2192 2193 if (p) { 2194 *(*p) = pte; 2195 (*p)++; 2196 } 2197 return 0; 2198 } 2199 2200 /** 2201 * alloc_vm_area - allocate a range of kernel address space 2202 * @size: size of the area 2203 * @ptes: returns the PTEs for the address space 2204 * 2205 * Returns: NULL on failure, vm_struct on success 2206 * 2207 * This function reserves a range of kernel address space, and 2208 * allocates pagetables to map that range. No actual mappings 2209 * are created. 2210 * 2211 * If @ptes is non-NULL, pointers to the PTEs (in init_mm) 2212 * allocated for the VM area are returned. 2213 */ 2214 struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes) 2215 { 2216 struct vm_struct *area; 2217 2218 area = get_vm_area_caller(size, VM_IOREMAP, 2219 __builtin_return_address(0)); 2220 if (area == NULL) 2221 return NULL; 2222 2223 /* 2224 * This ensures that page tables are constructed for this region 2225 * of kernel virtual address space and mapped into init_mm. 2226 */ 2227 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, 2228 size, f, ptes ? &ptes : NULL)) { 2229 free_vm_area(area); 2230 return NULL; 2231 } 2232 2233 return area; 2234 } 2235 EXPORT_SYMBOL_GPL(alloc_vm_area); 2236 2237 void free_vm_area(struct vm_struct *area) 2238 { 2239 struct vm_struct *ret; 2240 ret = remove_vm_area(area->addr); 2241 BUG_ON(ret != area); 2242 kfree(area); 2243 } 2244 EXPORT_SYMBOL_GPL(free_vm_area); 2245 2246 #ifdef CONFIG_SMP 2247 static struct vmap_area *node_to_va(struct rb_node *n) 2248 { 2249 return n ? rb_entry(n, struct vmap_area, rb_node) : NULL; 2250 } 2251 2252 /** 2253 * pvm_find_next_prev - find the next and prev vmap_area surrounding @end 2254 * @end: target address 2255 * @pnext: out arg for the next vmap_area 2256 * @pprev: out arg for the previous vmap_area 2257 * 2258 * Returns: %true if either or both of next and prev are found, 2259 * %false if no vmap_area exists 2260 * 2261 * Find vmap_areas end addresses of which enclose @end. ie. if not 2262 * NULL, *pnext->va_end > @end and *pprev->va_end <= @end. 2263 */ 2264 static bool pvm_find_next_prev(unsigned long end, 2265 struct vmap_area **pnext, 2266 struct vmap_area **pprev) 2267 { 2268 struct rb_node *n = vmap_area_root.rb_node; 2269 struct vmap_area *va = NULL; 2270 2271 while (n) { 2272 va = rb_entry(n, struct vmap_area, rb_node); 2273 if (end < va->va_end) 2274 n = n->rb_left; 2275 else if (end > va->va_end) 2276 n = n->rb_right; 2277 else 2278 break; 2279 } 2280 2281 if (!va) 2282 return false; 2283 2284 if (va->va_end > end) { 2285 *pnext = va; 2286 *pprev = node_to_va(rb_prev(&(*pnext)->rb_node)); 2287 } else { 2288 *pprev = va; 2289 *pnext = node_to_va(rb_next(&(*pprev)->rb_node)); 2290 } 2291 return true; 2292 } 2293 2294 /** 2295 * pvm_determine_end - find the highest aligned address between two vmap_areas 2296 * @pnext: in/out arg for the next vmap_area 2297 * @pprev: in/out arg for the previous vmap_area 2298 * @align: alignment 2299 * 2300 * Returns: determined end address 2301 * 2302 * Find the highest aligned address between *@pnext and *@pprev below 2303 * VMALLOC_END. *@pnext and *@pprev are adjusted so that the aligned 2304 * down address is between the end addresses of the two vmap_areas. 2305 * 2306 * Please note that the address returned by this function may fall 2307 * inside *@pnext vmap_area. The caller is responsible for checking 2308 * that. 2309 */ 2310 static unsigned long pvm_determine_end(struct vmap_area **pnext, 2311 struct vmap_area **pprev, 2312 unsigned long align) 2313 { 2314 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 2315 unsigned long addr; 2316 2317 if (*pnext) 2318 addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end); 2319 else 2320 addr = vmalloc_end; 2321 2322 while (*pprev && (*pprev)->va_end > addr) { 2323 *pnext = *pprev; 2324 *pprev = node_to_va(rb_prev(&(*pnext)->rb_node)); 2325 } 2326 2327 return addr; 2328 } 2329 2330 /** 2331 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator 2332 * @offsets: array containing offset of each area 2333 * @sizes: array containing size of each area 2334 * @nr_vms: the number of areas to allocate 2335 * @align: alignment, all entries in @offsets and @sizes must be aligned to this 2336 * 2337 * Returns: kmalloc'd vm_struct pointer array pointing to allocated 2338 * vm_structs on success, %NULL on failure 2339 * 2340 * Percpu allocator wants to use congruent vm areas so that it can 2341 * maintain the offsets among percpu areas. This function allocates 2342 * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to 2343 * be scattered pretty far, distance between two areas easily going up 2344 * to gigabytes. To avoid interacting with regular vmallocs, these 2345 * areas are allocated from top. 2346 * 2347 * Despite its complicated look, this allocator is rather simple. It 2348 * does everything top-down and scans areas from the end looking for 2349 * matching slot. While scanning, if any of the areas overlaps with 2350 * existing vmap_area, the base address is pulled down to fit the 2351 * area. Scanning is repeated till all the areas fit and then all 2352 * necessary data structres are inserted and the result is returned. 2353 */ 2354 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 2355 const size_t *sizes, int nr_vms, 2356 size_t align) 2357 { 2358 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); 2359 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 2360 struct vmap_area **vas, *prev, *next; 2361 struct vm_struct **vms; 2362 int area, area2, last_area, term_area; 2363 unsigned long base, start, end, last_end; 2364 bool purged = false; 2365 2366 /* verify parameters and allocate data structures */ 2367 BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align)); 2368 for (last_area = 0, area = 0; area < nr_vms; area++) { 2369 start = offsets[area]; 2370 end = start + sizes[area]; 2371 2372 /* is everything aligned properly? */ 2373 BUG_ON(!IS_ALIGNED(offsets[area], align)); 2374 BUG_ON(!IS_ALIGNED(sizes[area], align)); 2375 2376 /* detect the area with the highest address */ 2377 if (start > offsets[last_area]) 2378 last_area = area; 2379 2380 for (area2 = 0; area2 < nr_vms; area2++) { 2381 unsigned long start2 = offsets[area2]; 2382 unsigned long end2 = start2 + sizes[area2]; 2383 2384 if (area2 == area) 2385 continue; 2386 2387 BUG_ON(start2 >= start && start2 < end); 2388 BUG_ON(end2 <= end && end2 > start); 2389 } 2390 } 2391 last_end = offsets[last_area] + sizes[last_area]; 2392 2393 if (vmalloc_end - vmalloc_start < last_end) { 2394 WARN_ON(true); 2395 return NULL; 2396 } 2397 2398 vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL); 2399 vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL); 2400 if (!vas || !vms) 2401 goto err_free2; 2402 2403 for (area = 0; area < nr_vms; area++) { 2404 vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL); 2405 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); 2406 if (!vas[area] || !vms[area]) 2407 goto err_free; 2408 } 2409 retry: 2410 spin_lock(&vmap_area_lock); 2411 2412 /* start scanning - we scan from the top, begin with the last area */ 2413 area = term_area = last_area; 2414 start = offsets[area]; 2415 end = start + sizes[area]; 2416 2417 if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) { 2418 base = vmalloc_end - last_end; 2419 goto found; 2420 } 2421 base = pvm_determine_end(&next, &prev, align) - end; 2422 2423 while (true) { 2424 BUG_ON(next && next->va_end <= base + end); 2425 BUG_ON(prev && prev->va_end > base + end); 2426 2427 /* 2428 * base might have underflowed, add last_end before 2429 * comparing. 2430 */ 2431 if (base + last_end < vmalloc_start + last_end) { 2432 spin_unlock(&vmap_area_lock); 2433 if (!purged) { 2434 purge_vmap_area_lazy(); 2435 purged = true; 2436 goto retry; 2437 } 2438 goto err_free; 2439 } 2440 2441 /* 2442 * If next overlaps, move base downwards so that it's 2443 * right below next and then recheck. 2444 */ 2445 if (next && next->va_start < base + end) { 2446 base = pvm_determine_end(&next, &prev, align) - end; 2447 term_area = area; 2448 continue; 2449 } 2450 2451 /* 2452 * If prev overlaps, shift down next and prev and move 2453 * base so that it's right below new next and then 2454 * recheck. 2455 */ 2456 if (prev && prev->va_end > base + start) { 2457 next = prev; 2458 prev = node_to_va(rb_prev(&next->rb_node)); 2459 base = pvm_determine_end(&next, &prev, align) - end; 2460 term_area = area; 2461 continue; 2462 } 2463 2464 /* 2465 * This area fits, move on to the previous one. If 2466 * the previous one is the terminal one, we're done. 2467 */ 2468 area = (area + nr_vms - 1) % nr_vms; 2469 if (area == term_area) 2470 break; 2471 start = offsets[area]; 2472 end = start + sizes[area]; 2473 pvm_find_next_prev(base + end, &next, &prev); 2474 } 2475 found: 2476 /* we've found a fitting base, insert all va's */ 2477 for (area = 0; area < nr_vms; area++) { 2478 struct vmap_area *va = vas[area]; 2479 2480 va->va_start = base + offsets[area]; 2481 va->va_end = va->va_start + sizes[area]; 2482 __insert_vmap_area(va); 2483 } 2484 2485 vmap_area_pcpu_hole = base + offsets[last_area]; 2486 2487 spin_unlock(&vmap_area_lock); 2488 2489 /* insert all vm's */ 2490 for (area = 0; area < nr_vms; area++) 2491 setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC, 2492 pcpu_get_vm_areas); 2493 2494 kfree(vas); 2495 return vms; 2496 2497 err_free: 2498 for (area = 0; area < nr_vms; area++) { 2499 kfree(vas[area]); 2500 kfree(vms[area]); 2501 } 2502 err_free2: 2503 kfree(vas); 2504 kfree(vms); 2505 return NULL; 2506 } 2507 2508 /** 2509 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator 2510 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas() 2511 * @nr_vms: the number of allocated areas 2512 * 2513 * Free vm_structs and the array allocated by pcpu_get_vm_areas(). 2514 */ 2515 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) 2516 { 2517 int i; 2518 2519 for (i = 0; i < nr_vms; i++) 2520 free_vm_area(vms[i]); 2521 kfree(vms); 2522 } 2523 #endif /* CONFIG_SMP */ 2524 2525 #ifdef CONFIG_PROC_FS 2526 static void *s_start(struct seq_file *m, loff_t *pos) 2527 __acquires(&vmap_area_lock) 2528 { 2529 loff_t n = *pos; 2530 struct vmap_area *va; 2531 2532 spin_lock(&vmap_area_lock); 2533 va = list_entry((&vmap_area_list)->next, typeof(*va), list); 2534 while (n > 0 && &va->list != &vmap_area_list) { 2535 n--; 2536 va = list_entry(va->list.next, typeof(*va), list); 2537 } 2538 if (!n && &va->list != &vmap_area_list) 2539 return va; 2540 2541 return NULL; 2542 2543 } 2544 2545 static void *s_next(struct seq_file *m, void *p, loff_t *pos) 2546 { 2547 struct vmap_area *va = p, *next; 2548 2549 ++*pos; 2550 next = list_entry(va->list.next, typeof(*va), list); 2551 if (&next->list != &vmap_area_list) 2552 return next; 2553 2554 return NULL; 2555 } 2556 2557 static void s_stop(struct seq_file *m, void *p) 2558 __releases(&vmap_area_lock) 2559 { 2560 spin_unlock(&vmap_area_lock); 2561 } 2562 2563 static void show_numa_info(struct seq_file *m, struct vm_struct *v) 2564 { 2565 if (IS_ENABLED(CONFIG_NUMA)) { 2566 unsigned int nr, *counters = m->private; 2567 2568 if (!counters) 2569 return; 2570 2571 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */ 2572 smp_rmb(); 2573 if (v->flags & VM_UNINITIALIZED) 2574 return; 2575 2576 memset(counters, 0, nr_node_ids * sizeof(unsigned int)); 2577 2578 for (nr = 0; nr < v->nr_pages; nr++) 2579 counters[page_to_nid(v->pages[nr])]++; 2580 2581 for_each_node_state(nr, N_HIGH_MEMORY) 2582 if (counters[nr]) 2583 seq_printf(m, " N%u=%u", nr, counters[nr]); 2584 } 2585 } 2586 2587 static int s_show(struct seq_file *m, void *p) 2588 { 2589 struct vmap_area *va = p; 2590 struct vm_struct *v; 2591 2592 /* 2593 * s_show can encounter race with remove_vm_area, !VM_VM_AREA on 2594 * behalf of vmap area is being tear down or vm_map_ram allocation. 2595 */ 2596 if (!(va->flags & VM_VM_AREA)) 2597 return 0; 2598 2599 v = va->vm; 2600 2601 seq_printf(m, "0x%pK-0x%pK %7ld", 2602 v->addr, v->addr + v->size, v->size); 2603 2604 if (v->caller) 2605 seq_printf(m, " %pS", v->caller); 2606 2607 if (v->nr_pages) 2608 seq_printf(m, " pages=%d", v->nr_pages); 2609 2610 if (v->phys_addr) 2611 seq_printf(m, " phys=%llx", (unsigned long long)v->phys_addr); 2612 2613 if (v->flags & VM_IOREMAP) 2614 seq_printf(m, " ioremap"); 2615 2616 if (v->flags & VM_ALLOC) 2617 seq_printf(m, " vmalloc"); 2618 2619 if (v->flags & VM_MAP) 2620 seq_printf(m, " vmap"); 2621 2622 if (v->flags & VM_USERMAP) 2623 seq_printf(m, " user"); 2624 2625 if (v->flags & VM_VPAGES) 2626 seq_printf(m, " vpages"); 2627 2628 show_numa_info(m, v); 2629 seq_putc(m, '\n'); 2630 return 0; 2631 } 2632 2633 static const struct seq_operations vmalloc_op = { 2634 .start = s_start, 2635 .next = s_next, 2636 .stop = s_stop, 2637 .show = s_show, 2638 }; 2639 2640 static int vmalloc_open(struct inode *inode, struct file *file) 2641 { 2642 unsigned int *ptr = NULL; 2643 int ret; 2644 2645 if (IS_ENABLED(CONFIG_NUMA)) { 2646 ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL); 2647 if (ptr == NULL) 2648 return -ENOMEM; 2649 } 2650 ret = seq_open(file, &vmalloc_op); 2651 if (!ret) { 2652 struct seq_file *m = file->private_data; 2653 m->private = ptr; 2654 } else 2655 kfree(ptr); 2656 return ret; 2657 } 2658 2659 static const struct file_operations proc_vmalloc_operations = { 2660 .open = vmalloc_open, 2661 .read = seq_read, 2662 .llseek = seq_lseek, 2663 .release = seq_release_private, 2664 }; 2665 2666 static int __init proc_vmalloc_init(void) 2667 { 2668 proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations); 2669 return 0; 2670 } 2671 module_init(proc_vmalloc_init); 2672 2673 void get_vmalloc_info(struct vmalloc_info *vmi) 2674 { 2675 struct vmap_area *va; 2676 unsigned long free_area_size; 2677 unsigned long prev_end; 2678 2679 vmi->used = 0; 2680 vmi->largest_chunk = 0; 2681 2682 prev_end = VMALLOC_START; 2683 2684 spin_lock(&vmap_area_lock); 2685 2686 if (list_empty(&vmap_area_list)) { 2687 vmi->largest_chunk = VMALLOC_TOTAL; 2688 goto out; 2689 } 2690 2691 list_for_each_entry(va, &vmap_area_list, list) { 2692 unsigned long addr = va->va_start; 2693 2694 /* 2695 * Some archs keep another range for modules in vmalloc space 2696 */ 2697 if (addr < VMALLOC_START) 2698 continue; 2699 if (addr >= VMALLOC_END) 2700 break; 2701 2702 if (va->flags & (VM_LAZY_FREE | VM_LAZY_FREEING)) 2703 continue; 2704 2705 vmi->used += (va->va_end - va->va_start); 2706 2707 free_area_size = addr - prev_end; 2708 if (vmi->largest_chunk < free_area_size) 2709 vmi->largest_chunk = free_area_size; 2710 2711 prev_end = va->va_end; 2712 } 2713 2714 if (VMALLOC_END - prev_end > vmi->largest_chunk) 2715 vmi->largest_chunk = VMALLOC_END - prev_end; 2716 2717 out: 2718 spin_unlock(&vmap_area_lock); 2719 } 2720 #endif 2721 2722