1 /* 2 * linux/mm/vmalloc.c 3 * 4 * Copyright (C) 1993 Linus Torvalds 5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 8 * Numa awareness, Christoph Lameter, SGI, June 2005 9 */ 10 11 #include <linux/vmalloc.h> 12 #include <linux/mm.h> 13 #include <linux/module.h> 14 #include <linux/highmem.h> 15 #include <linux/slab.h> 16 #include <linux/spinlock.h> 17 #include <linux/interrupt.h> 18 #include <linux/proc_fs.h> 19 #include <linux/seq_file.h> 20 #include <linux/debugobjects.h> 21 #include <linux/kallsyms.h> 22 #include <linux/list.h> 23 #include <linux/rbtree.h> 24 #include <linux/radix-tree.h> 25 #include <linux/rcupdate.h> 26 #include <linux/bootmem.h> 27 #include <linux/pfn.h> 28 29 #include <asm/atomic.h> 30 #include <asm/uaccess.h> 31 #include <asm/tlbflush.h> 32 33 34 /*** Page table manipulation functions ***/ 35 36 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) 37 { 38 pte_t *pte; 39 40 pte = pte_offset_kernel(pmd, addr); 41 do { 42 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); 43 WARN_ON(!pte_none(ptent) && !pte_present(ptent)); 44 } while (pte++, addr += PAGE_SIZE, addr != end); 45 } 46 47 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) 48 { 49 pmd_t *pmd; 50 unsigned long next; 51 52 pmd = pmd_offset(pud, addr); 53 do { 54 next = pmd_addr_end(addr, end); 55 if (pmd_none_or_clear_bad(pmd)) 56 continue; 57 vunmap_pte_range(pmd, addr, next); 58 } while (pmd++, addr = next, addr != end); 59 } 60 61 static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end) 62 { 63 pud_t *pud; 64 unsigned long next; 65 66 pud = pud_offset(pgd, addr); 67 do { 68 next = pud_addr_end(addr, end); 69 if (pud_none_or_clear_bad(pud)) 70 continue; 71 vunmap_pmd_range(pud, addr, next); 72 } while (pud++, addr = next, addr != end); 73 } 74 75 static void vunmap_page_range(unsigned long addr, unsigned long end) 76 { 77 pgd_t *pgd; 78 unsigned long next; 79 80 BUG_ON(addr >= end); 81 pgd = pgd_offset_k(addr); 82 do { 83 next = pgd_addr_end(addr, end); 84 if (pgd_none_or_clear_bad(pgd)) 85 continue; 86 vunmap_pud_range(pgd, addr, next); 87 } while (pgd++, addr = next, addr != end); 88 } 89 90 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, 91 unsigned long end, pgprot_t prot, struct page **pages, int *nr) 92 { 93 pte_t *pte; 94 95 /* 96 * nr is a running index into the array which helps higher level 97 * callers keep track of where we're up to. 98 */ 99 100 pte = pte_alloc_kernel(pmd, addr); 101 if (!pte) 102 return -ENOMEM; 103 do { 104 struct page *page = pages[*nr]; 105 106 if (WARN_ON(!pte_none(*pte))) 107 return -EBUSY; 108 if (WARN_ON(!page)) 109 return -ENOMEM; 110 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); 111 (*nr)++; 112 } while (pte++, addr += PAGE_SIZE, addr != end); 113 return 0; 114 } 115 116 static int vmap_pmd_range(pud_t *pud, unsigned long addr, 117 unsigned long end, pgprot_t prot, struct page **pages, int *nr) 118 { 119 pmd_t *pmd; 120 unsigned long next; 121 122 pmd = pmd_alloc(&init_mm, pud, addr); 123 if (!pmd) 124 return -ENOMEM; 125 do { 126 next = pmd_addr_end(addr, end); 127 if (vmap_pte_range(pmd, addr, next, prot, pages, nr)) 128 return -ENOMEM; 129 } while (pmd++, addr = next, addr != end); 130 return 0; 131 } 132 133 static int vmap_pud_range(pgd_t *pgd, unsigned long addr, 134 unsigned long end, pgprot_t prot, struct page **pages, int *nr) 135 { 136 pud_t *pud; 137 unsigned long next; 138 139 pud = pud_alloc(&init_mm, pgd, addr); 140 if (!pud) 141 return -ENOMEM; 142 do { 143 next = pud_addr_end(addr, end); 144 if (vmap_pmd_range(pud, addr, next, prot, pages, nr)) 145 return -ENOMEM; 146 } while (pud++, addr = next, addr != end); 147 return 0; 148 } 149 150 /* 151 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and 152 * will have pfns corresponding to the "pages" array. 153 * 154 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] 155 */ 156 static int vmap_page_range_noflush(unsigned long start, unsigned long end, 157 pgprot_t prot, struct page **pages) 158 { 159 pgd_t *pgd; 160 unsigned long next; 161 unsigned long addr = start; 162 int err = 0; 163 int nr = 0; 164 165 BUG_ON(addr >= end); 166 pgd = pgd_offset_k(addr); 167 do { 168 next = pgd_addr_end(addr, end); 169 err = vmap_pud_range(pgd, addr, next, prot, pages, &nr); 170 if (err) 171 break; 172 } while (pgd++, addr = next, addr != end); 173 174 if (unlikely(err)) 175 return err; 176 return nr; 177 } 178 179 static int vmap_page_range(unsigned long start, unsigned long end, 180 pgprot_t prot, struct page **pages) 181 { 182 int ret; 183 184 ret = vmap_page_range_noflush(start, end, prot, pages); 185 flush_cache_vmap(start, end); 186 return ret; 187 } 188 189 static inline int is_vmalloc_or_module_addr(const void *x) 190 { 191 /* 192 * ARM, x86-64 and sparc64 put modules in a special place, 193 * and fall back on vmalloc() if that fails. Others 194 * just put it in the vmalloc space. 195 */ 196 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR) 197 unsigned long addr = (unsigned long)x; 198 if (addr >= MODULES_VADDR && addr < MODULES_END) 199 return 1; 200 #endif 201 return is_vmalloc_addr(x); 202 } 203 204 /* 205 * Walk a vmap address to the struct page it maps. 206 */ 207 struct page *vmalloc_to_page(const void *vmalloc_addr) 208 { 209 unsigned long addr = (unsigned long) vmalloc_addr; 210 struct page *page = NULL; 211 pgd_t *pgd = pgd_offset_k(addr); 212 213 /* 214 * XXX we might need to change this if we add VIRTUAL_BUG_ON for 215 * architectures that do not vmalloc module space 216 */ 217 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); 218 219 if (!pgd_none(*pgd)) { 220 pud_t *pud = pud_offset(pgd, addr); 221 if (!pud_none(*pud)) { 222 pmd_t *pmd = pmd_offset(pud, addr); 223 if (!pmd_none(*pmd)) { 224 pte_t *ptep, pte; 225 226 ptep = pte_offset_map(pmd, addr); 227 pte = *ptep; 228 if (pte_present(pte)) 229 page = pte_page(pte); 230 pte_unmap(ptep); 231 } 232 } 233 } 234 return page; 235 } 236 EXPORT_SYMBOL(vmalloc_to_page); 237 238 /* 239 * Map a vmalloc()-space virtual address to the physical page frame number. 240 */ 241 unsigned long vmalloc_to_pfn(const void *vmalloc_addr) 242 { 243 return page_to_pfn(vmalloc_to_page(vmalloc_addr)); 244 } 245 EXPORT_SYMBOL(vmalloc_to_pfn); 246 247 248 /*** Global kva allocator ***/ 249 250 #define VM_LAZY_FREE 0x01 251 #define VM_LAZY_FREEING 0x02 252 #define VM_VM_AREA 0x04 253 254 struct vmap_area { 255 unsigned long va_start; 256 unsigned long va_end; 257 unsigned long flags; 258 struct rb_node rb_node; /* address sorted rbtree */ 259 struct list_head list; /* address sorted list */ 260 struct list_head purge_list; /* "lazy purge" list */ 261 void *private; 262 struct rcu_head rcu_head; 263 }; 264 265 static DEFINE_SPINLOCK(vmap_area_lock); 266 static struct rb_root vmap_area_root = RB_ROOT; 267 static LIST_HEAD(vmap_area_list); 268 269 static struct vmap_area *__find_vmap_area(unsigned long addr) 270 { 271 struct rb_node *n = vmap_area_root.rb_node; 272 273 while (n) { 274 struct vmap_area *va; 275 276 va = rb_entry(n, struct vmap_area, rb_node); 277 if (addr < va->va_start) 278 n = n->rb_left; 279 else if (addr > va->va_start) 280 n = n->rb_right; 281 else 282 return va; 283 } 284 285 return NULL; 286 } 287 288 static void __insert_vmap_area(struct vmap_area *va) 289 { 290 struct rb_node **p = &vmap_area_root.rb_node; 291 struct rb_node *parent = NULL; 292 struct rb_node *tmp; 293 294 while (*p) { 295 struct vmap_area *tmp; 296 297 parent = *p; 298 tmp = rb_entry(parent, struct vmap_area, rb_node); 299 if (va->va_start < tmp->va_end) 300 p = &(*p)->rb_left; 301 else if (va->va_end > tmp->va_start) 302 p = &(*p)->rb_right; 303 else 304 BUG(); 305 } 306 307 rb_link_node(&va->rb_node, parent, p); 308 rb_insert_color(&va->rb_node, &vmap_area_root); 309 310 /* address-sort this list so it is usable like the vmlist */ 311 tmp = rb_prev(&va->rb_node); 312 if (tmp) { 313 struct vmap_area *prev; 314 prev = rb_entry(tmp, struct vmap_area, rb_node); 315 list_add_rcu(&va->list, &prev->list); 316 } else 317 list_add_rcu(&va->list, &vmap_area_list); 318 } 319 320 static void purge_vmap_area_lazy(void); 321 322 /* 323 * Allocate a region of KVA of the specified size and alignment, within the 324 * vstart and vend. 325 */ 326 static struct vmap_area *alloc_vmap_area(unsigned long size, 327 unsigned long align, 328 unsigned long vstart, unsigned long vend, 329 int node, gfp_t gfp_mask) 330 { 331 struct vmap_area *va; 332 struct rb_node *n; 333 unsigned long addr; 334 int purged = 0; 335 336 BUG_ON(!size); 337 BUG_ON(size & ~PAGE_MASK); 338 339 va = kmalloc_node(sizeof(struct vmap_area), 340 gfp_mask & GFP_RECLAIM_MASK, node); 341 if (unlikely(!va)) 342 return ERR_PTR(-ENOMEM); 343 344 retry: 345 addr = ALIGN(vstart, align); 346 347 spin_lock(&vmap_area_lock); 348 if (addr + size - 1 < addr) 349 goto overflow; 350 351 /* XXX: could have a last_hole cache */ 352 n = vmap_area_root.rb_node; 353 if (n) { 354 struct vmap_area *first = NULL; 355 356 do { 357 struct vmap_area *tmp; 358 tmp = rb_entry(n, struct vmap_area, rb_node); 359 if (tmp->va_end >= addr) { 360 if (!first && tmp->va_start < addr + size) 361 first = tmp; 362 n = n->rb_left; 363 } else { 364 first = tmp; 365 n = n->rb_right; 366 } 367 } while (n); 368 369 if (!first) 370 goto found; 371 372 if (first->va_end < addr) { 373 n = rb_next(&first->rb_node); 374 if (n) 375 first = rb_entry(n, struct vmap_area, rb_node); 376 else 377 goto found; 378 } 379 380 while (addr + size > first->va_start && addr + size <= vend) { 381 addr = ALIGN(first->va_end + PAGE_SIZE, align); 382 if (addr + size - 1 < addr) 383 goto overflow; 384 385 n = rb_next(&first->rb_node); 386 if (n) 387 first = rb_entry(n, struct vmap_area, rb_node); 388 else 389 goto found; 390 } 391 } 392 found: 393 if (addr + size > vend) { 394 overflow: 395 spin_unlock(&vmap_area_lock); 396 if (!purged) { 397 purge_vmap_area_lazy(); 398 purged = 1; 399 goto retry; 400 } 401 if (printk_ratelimit()) 402 printk(KERN_WARNING 403 "vmap allocation for size %lu failed: " 404 "use vmalloc=<size> to increase size.\n", size); 405 return ERR_PTR(-EBUSY); 406 } 407 408 BUG_ON(addr & (align-1)); 409 410 va->va_start = addr; 411 va->va_end = addr + size; 412 va->flags = 0; 413 __insert_vmap_area(va); 414 spin_unlock(&vmap_area_lock); 415 416 return va; 417 } 418 419 static void rcu_free_va(struct rcu_head *head) 420 { 421 struct vmap_area *va = container_of(head, struct vmap_area, rcu_head); 422 423 kfree(va); 424 } 425 426 static void __free_vmap_area(struct vmap_area *va) 427 { 428 BUG_ON(RB_EMPTY_NODE(&va->rb_node)); 429 rb_erase(&va->rb_node, &vmap_area_root); 430 RB_CLEAR_NODE(&va->rb_node); 431 list_del_rcu(&va->list); 432 433 call_rcu(&va->rcu_head, rcu_free_va); 434 } 435 436 /* 437 * Free a region of KVA allocated by alloc_vmap_area 438 */ 439 static void free_vmap_area(struct vmap_area *va) 440 { 441 spin_lock(&vmap_area_lock); 442 __free_vmap_area(va); 443 spin_unlock(&vmap_area_lock); 444 } 445 446 /* 447 * Clear the pagetable entries of a given vmap_area 448 */ 449 static void unmap_vmap_area(struct vmap_area *va) 450 { 451 vunmap_page_range(va->va_start, va->va_end); 452 } 453 454 static void vmap_debug_free_range(unsigned long start, unsigned long end) 455 { 456 /* 457 * Unmap page tables and force a TLB flush immediately if 458 * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free 459 * bugs similarly to those in linear kernel virtual address 460 * space after a page has been freed. 461 * 462 * All the lazy freeing logic is still retained, in order to 463 * minimise intrusiveness of this debugging feature. 464 * 465 * This is going to be *slow* (linear kernel virtual address 466 * debugging doesn't do a broadcast TLB flush so it is a lot 467 * faster). 468 */ 469 #ifdef CONFIG_DEBUG_PAGEALLOC 470 vunmap_page_range(start, end); 471 flush_tlb_kernel_range(start, end); 472 #endif 473 } 474 475 /* 476 * lazy_max_pages is the maximum amount of virtual address space we gather up 477 * before attempting to purge with a TLB flush. 478 * 479 * There is a tradeoff here: a larger number will cover more kernel page tables 480 * and take slightly longer to purge, but it will linearly reduce the number of 481 * global TLB flushes that must be performed. It would seem natural to scale 482 * this number up linearly with the number of CPUs (because vmapping activity 483 * could also scale linearly with the number of CPUs), however it is likely 484 * that in practice, workloads might be constrained in other ways that mean 485 * vmap activity will not scale linearly with CPUs. Also, I want to be 486 * conservative and not introduce a big latency on huge systems, so go with 487 * a less aggressive log scale. It will still be an improvement over the old 488 * code, and it will be simple to change the scale factor if we find that it 489 * becomes a problem on bigger systems. 490 */ 491 static unsigned long lazy_max_pages(void) 492 { 493 unsigned int log; 494 495 log = fls(num_online_cpus()); 496 497 return log * (32UL * 1024 * 1024 / PAGE_SIZE); 498 } 499 500 static atomic_t vmap_lazy_nr = ATOMIC_INIT(0); 501 502 /* 503 * Purges all lazily-freed vmap areas. 504 * 505 * If sync is 0 then don't purge if there is already a purge in progress. 506 * If force_flush is 1, then flush kernel TLBs between *start and *end even 507 * if we found no lazy vmap areas to unmap (callers can use this to optimise 508 * their own TLB flushing). 509 * Returns with *start = min(*start, lowest purged address) 510 * *end = max(*end, highest purged address) 511 */ 512 static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, 513 int sync, int force_flush) 514 { 515 static DEFINE_SPINLOCK(purge_lock); 516 LIST_HEAD(valist); 517 struct vmap_area *va; 518 struct vmap_area *n_va; 519 int nr = 0; 520 521 /* 522 * If sync is 0 but force_flush is 1, we'll go sync anyway but callers 523 * should not expect such behaviour. This just simplifies locking for 524 * the case that isn't actually used at the moment anyway. 525 */ 526 if (!sync && !force_flush) { 527 if (!spin_trylock(&purge_lock)) 528 return; 529 } else 530 spin_lock(&purge_lock); 531 532 rcu_read_lock(); 533 list_for_each_entry_rcu(va, &vmap_area_list, list) { 534 if (va->flags & VM_LAZY_FREE) { 535 if (va->va_start < *start) 536 *start = va->va_start; 537 if (va->va_end > *end) 538 *end = va->va_end; 539 nr += (va->va_end - va->va_start) >> PAGE_SHIFT; 540 unmap_vmap_area(va); 541 list_add_tail(&va->purge_list, &valist); 542 va->flags |= VM_LAZY_FREEING; 543 va->flags &= ~VM_LAZY_FREE; 544 } 545 } 546 rcu_read_unlock(); 547 548 if (nr) { 549 BUG_ON(nr > atomic_read(&vmap_lazy_nr)); 550 atomic_sub(nr, &vmap_lazy_nr); 551 } 552 553 if (nr || force_flush) 554 flush_tlb_kernel_range(*start, *end); 555 556 if (nr) { 557 spin_lock(&vmap_area_lock); 558 list_for_each_entry_safe(va, n_va, &valist, purge_list) 559 __free_vmap_area(va); 560 spin_unlock(&vmap_area_lock); 561 } 562 spin_unlock(&purge_lock); 563 } 564 565 /* 566 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody 567 * is already purging. 568 */ 569 static void try_purge_vmap_area_lazy(void) 570 { 571 unsigned long start = ULONG_MAX, end = 0; 572 573 __purge_vmap_area_lazy(&start, &end, 0, 0); 574 } 575 576 /* 577 * Kick off a purge of the outstanding lazy areas. 578 */ 579 static void purge_vmap_area_lazy(void) 580 { 581 unsigned long start = ULONG_MAX, end = 0; 582 583 __purge_vmap_area_lazy(&start, &end, 1, 0); 584 } 585 586 /* 587 * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been 588 * called for the correct range previously. 589 */ 590 static void free_unmap_vmap_area_noflush(struct vmap_area *va) 591 { 592 va->flags |= VM_LAZY_FREE; 593 atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); 594 if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages())) 595 try_purge_vmap_area_lazy(); 596 } 597 598 /* 599 * Free and unmap a vmap area 600 */ 601 static void free_unmap_vmap_area(struct vmap_area *va) 602 { 603 flush_cache_vunmap(va->va_start, va->va_end); 604 free_unmap_vmap_area_noflush(va); 605 } 606 607 static struct vmap_area *find_vmap_area(unsigned long addr) 608 { 609 struct vmap_area *va; 610 611 spin_lock(&vmap_area_lock); 612 va = __find_vmap_area(addr); 613 spin_unlock(&vmap_area_lock); 614 615 return va; 616 } 617 618 static void free_unmap_vmap_area_addr(unsigned long addr) 619 { 620 struct vmap_area *va; 621 622 va = find_vmap_area(addr); 623 BUG_ON(!va); 624 free_unmap_vmap_area(va); 625 } 626 627 628 /*** Per cpu kva allocator ***/ 629 630 /* 631 * vmap space is limited especially on 32 bit architectures. Ensure there is 632 * room for at least 16 percpu vmap blocks per CPU. 633 */ 634 /* 635 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able 636 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess 637 * instead (we just need a rough idea) 638 */ 639 #if BITS_PER_LONG == 32 640 #define VMALLOC_SPACE (128UL*1024*1024) 641 #else 642 #define VMALLOC_SPACE (128UL*1024*1024*1024) 643 #endif 644 645 #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) 646 #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ 647 #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ 648 #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) 649 #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ 650 #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ 651 #define VMAP_BBMAP_BITS VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ 652 VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ 653 VMALLOC_PAGES / NR_CPUS / 16)) 654 655 #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) 656 657 static bool vmap_initialized __read_mostly = false; 658 659 struct vmap_block_queue { 660 spinlock_t lock; 661 struct list_head free; 662 struct list_head dirty; 663 unsigned int nr_dirty; 664 }; 665 666 struct vmap_block { 667 spinlock_t lock; 668 struct vmap_area *va; 669 struct vmap_block_queue *vbq; 670 unsigned long free, dirty; 671 DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS); 672 DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS); 673 union { 674 struct list_head free_list; 675 struct rcu_head rcu_head; 676 }; 677 }; 678 679 /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ 680 static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); 681 682 /* 683 * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block 684 * in the free path. Could get rid of this if we change the API to return a 685 * "cookie" from alloc, to be passed to free. But no big deal yet. 686 */ 687 static DEFINE_SPINLOCK(vmap_block_tree_lock); 688 static RADIX_TREE(vmap_block_tree, GFP_ATOMIC); 689 690 /* 691 * We should probably have a fallback mechanism to allocate virtual memory 692 * out of partially filled vmap blocks. However vmap block sizing should be 693 * fairly reasonable according to the vmalloc size, so it shouldn't be a 694 * big problem. 695 */ 696 697 static unsigned long addr_to_vb_idx(unsigned long addr) 698 { 699 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); 700 addr /= VMAP_BLOCK_SIZE; 701 return addr; 702 } 703 704 static struct vmap_block *new_vmap_block(gfp_t gfp_mask) 705 { 706 struct vmap_block_queue *vbq; 707 struct vmap_block *vb; 708 struct vmap_area *va; 709 unsigned long vb_idx; 710 int node, err; 711 712 node = numa_node_id(); 713 714 vb = kmalloc_node(sizeof(struct vmap_block), 715 gfp_mask & GFP_RECLAIM_MASK, node); 716 if (unlikely(!vb)) 717 return ERR_PTR(-ENOMEM); 718 719 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, 720 VMALLOC_START, VMALLOC_END, 721 node, gfp_mask); 722 if (unlikely(IS_ERR(va))) { 723 kfree(vb); 724 return ERR_PTR(PTR_ERR(va)); 725 } 726 727 err = radix_tree_preload(gfp_mask); 728 if (unlikely(err)) { 729 kfree(vb); 730 free_vmap_area(va); 731 return ERR_PTR(err); 732 } 733 734 spin_lock_init(&vb->lock); 735 vb->va = va; 736 vb->free = VMAP_BBMAP_BITS; 737 vb->dirty = 0; 738 bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS); 739 bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS); 740 INIT_LIST_HEAD(&vb->free_list); 741 742 vb_idx = addr_to_vb_idx(va->va_start); 743 spin_lock(&vmap_block_tree_lock); 744 err = radix_tree_insert(&vmap_block_tree, vb_idx, vb); 745 spin_unlock(&vmap_block_tree_lock); 746 BUG_ON(err); 747 radix_tree_preload_end(); 748 749 vbq = &get_cpu_var(vmap_block_queue); 750 vb->vbq = vbq; 751 spin_lock(&vbq->lock); 752 list_add(&vb->free_list, &vbq->free); 753 spin_unlock(&vbq->lock); 754 put_cpu_var(vmap_cpu_blocks); 755 756 return vb; 757 } 758 759 static void rcu_free_vb(struct rcu_head *head) 760 { 761 struct vmap_block *vb = container_of(head, struct vmap_block, rcu_head); 762 763 kfree(vb); 764 } 765 766 static void free_vmap_block(struct vmap_block *vb) 767 { 768 struct vmap_block *tmp; 769 unsigned long vb_idx; 770 771 BUG_ON(!list_empty(&vb->free_list)); 772 773 vb_idx = addr_to_vb_idx(vb->va->va_start); 774 spin_lock(&vmap_block_tree_lock); 775 tmp = radix_tree_delete(&vmap_block_tree, vb_idx); 776 spin_unlock(&vmap_block_tree_lock); 777 BUG_ON(tmp != vb); 778 779 free_unmap_vmap_area_noflush(vb->va); 780 call_rcu(&vb->rcu_head, rcu_free_vb); 781 } 782 783 static void *vb_alloc(unsigned long size, gfp_t gfp_mask) 784 { 785 struct vmap_block_queue *vbq; 786 struct vmap_block *vb; 787 unsigned long addr = 0; 788 unsigned int order; 789 790 BUG_ON(size & ~PAGE_MASK); 791 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 792 order = get_order(size); 793 794 again: 795 rcu_read_lock(); 796 vbq = &get_cpu_var(vmap_block_queue); 797 list_for_each_entry_rcu(vb, &vbq->free, free_list) { 798 int i; 799 800 spin_lock(&vb->lock); 801 i = bitmap_find_free_region(vb->alloc_map, 802 VMAP_BBMAP_BITS, order); 803 804 if (i >= 0) { 805 addr = vb->va->va_start + (i << PAGE_SHIFT); 806 BUG_ON(addr_to_vb_idx(addr) != 807 addr_to_vb_idx(vb->va->va_start)); 808 vb->free -= 1UL << order; 809 if (vb->free == 0) { 810 spin_lock(&vbq->lock); 811 list_del_init(&vb->free_list); 812 spin_unlock(&vbq->lock); 813 } 814 spin_unlock(&vb->lock); 815 break; 816 } 817 spin_unlock(&vb->lock); 818 } 819 put_cpu_var(vmap_cpu_blocks); 820 rcu_read_unlock(); 821 822 if (!addr) { 823 vb = new_vmap_block(gfp_mask); 824 if (IS_ERR(vb)) 825 return vb; 826 goto again; 827 } 828 829 return (void *)addr; 830 } 831 832 static void vb_free(const void *addr, unsigned long size) 833 { 834 unsigned long offset; 835 unsigned long vb_idx; 836 unsigned int order; 837 struct vmap_block *vb; 838 839 BUG_ON(size & ~PAGE_MASK); 840 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 841 842 flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size); 843 844 order = get_order(size); 845 846 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1); 847 848 vb_idx = addr_to_vb_idx((unsigned long)addr); 849 rcu_read_lock(); 850 vb = radix_tree_lookup(&vmap_block_tree, vb_idx); 851 rcu_read_unlock(); 852 BUG_ON(!vb); 853 854 spin_lock(&vb->lock); 855 bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order); 856 857 vb->dirty += 1UL << order; 858 if (vb->dirty == VMAP_BBMAP_BITS) { 859 BUG_ON(vb->free || !list_empty(&vb->free_list)); 860 spin_unlock(&vb->lock); 861 free_vmap_block(vb); 862 } else 863 spin_unlock(&vb->lock); 864 } 865 866 /** 867 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer 868 * 869 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily 870 * to amortize TLB flushing overheads. What this means is that any page you 871 * have now, may, in a former life, have been mapped into kernel virtual 872 * address by the vmap layer and so there might be some CPUs with TLB entries 873 * still referencing that page (additional to the regular 1:1 kernel mapping). 874 * 875 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can 876 * be sure that none of the pages we have control over will have any aliases 877 * from the vmap layer. 878 */ 879 void vm_unmap_aliases(void) 880 { 881 unsigned long start = ULONG_MAX, end = 0; 882 int cpu; 883 int flush = 0; 884 885 if (unlikely(!vmap_initialized)) 886 return; 887 888 for_each_possible_cpu(cpu) { 889 struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); 890 struct vmap_block *vb; 891 892 rcu_read_lock(); 893 list_for_each_entry_rcu(vb, &vbq->free, free_list) { 894 int i; 895 896 spin_lock(&vb->lock); 897 i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS); 898 while (i < VMAP_BBMAP_BITS) { 899 unsigned long s, e; 900 int j; 901 j = find_next_zero_bit(vb->dirty_map, 902 VMAP_BBMAP_BITS, i); 903 904 s = vb->va->va_start + (i << PAGE_SHIFT); 905 e = vb->va->va_start + (j << PAGE_SHIFT); 906 vunmap_page_range(s, e); 907 flush = 1; 908 909 if (s < start) 910 start = s; 911 if (e > end) 912 end = e; 913 914 i = j; 915 i = find_next_bit(vb->dirty_map, 916 VMAP_BBMAP_BITS, i); 917 } 918 spin_unlock(&vb->lock); 919 } 920 rcu_read_unlock(); 921 } 922 923 __purge_vmap_area_lazy(&start, &end, 1, flush); 924 } 925 EXPORT_SYMBOL_GPL(vm_unmap_aliases); 926 927 /** 928 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram 929 * @mem: the pointer returned by vm_map_ram 930 * @count: the count passed to that vm_map_ram call (cannot unmap partial) 931 */ 932 void vm_unmap_ram(const void *mem, unsigned int count) 933 { 934 unsigned long size = count << PAGE_SHIFT; 935 unsigned long addr = (unsigned long)mem; 936 937 BUG_ON(!addr); 938 BUG_ON(addr < VMALLOC_START); 939 BUG_ON(addr > VMALLOC_END); 940 BUG_ON(addr & (PAGE_SIZE-1)); 941 942 debug_check_no_locks_freed(mem, size); 943 vmap_debug_free_range(addr, addr+size); 944 945 if (likely(count <= VMAP_MAX_ALLOC)) 946 vb_free(mem, size); 947 else 948 free_unmap_vmap_area_addr(addr); 949 } 950 EXPORT_SYMBOL(vm_unmap_ram); 951 952 /** 953 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) 954 * @pages: an array of pointers to the pages to be mapped 955 * @count: number of pages 956 * @node: prefer to allocate data structures on this node 957 * @prot: memory protection to use. PAGE_KERNEL for regular RAM 958 * 959 * Returns: a pointer to the address that has been mapped, or %NULL on failure 960 */ 961 void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) 962 { 963 unsigned long size = count << PAGE_SHIFT; 964 unsigned long addr; 965 void *mem; 966 967 if (likely(count <= VMAP_MAX_ALLOC)) { 968 mem = vb_alloc(size, GFP_KERNEL); 969 if (IS_ERR(mem)) 970 return NULL; 971 addr = (unsigned long)mem; 972 } else { 973 struct vmap_area *va; 974 va = alloc_vmap_area(size, PAGE_SIZE, 975 VMALLOC_START, VMALLOC_END, node, GFP_KERNEL); 976 if (IS_ERR(va)) 977 return NULL; 978 979 addr = va->va_start; 980 mem = (void *)addr; 981 } 982 if (vmap_page_range(addr, addr + size, prot, pages) < 0) { 983 vm_unmap_ram(mem, count); 984 return NULL; 985 } 986 return mem; 987 } 988 EXPORT_SYMBOL(vm_map_ram); 989 990 /** 991 * vm_area_register_early - register vmap area early during boot 992 * @vm: vm_struct to register 993 * @align: requested alignment 994 * 995 * This function is used to register kernel vm area before 996 * vmalloc_init() is called. @vm->size and @vm->flags should contain 997 * proper values on entry and other fields should be zero. On return, 998 * vm->addr contains the allocated address. 999 * 1000 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. 1001 */ 1002 void __init vm_area_register_early(struct vm_struct *vm, size_t align) 1003 { 1004 static size_t vm_init_off __initdata; 1005 unsigned long addr; 1006 1007 addr = ALIGN(VMALLOC_START + vm_init_off, align); 1008 vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START; 1009 1010 vm->addr = (void *)addr; 1011 1012 vm->next = vmlist; 1013 vmlist = vm; 1014 } 1015 1016 void __init vmalloc_init(void) 1017 { 1018 struct vmap_area *va; 1019 struct vm_struct *tmp; 1020 int i; 1021 1022 for_each_possible_cpu(i) { 1023 struct vmap_block_queue *vbq; 1024 1025 vbq = &per_cpu(vmap_block_queue, i); 1026 spin_lock_init(&vbq->lock); 1027 INIT_LIST_HEAD(&vbq->free); 1028 INIT_LIST_HEAD(&vbq->dirty); 1029 vbq->nr_dirty = 0; 1030 } 1031 1032 /* Import existing vmlist entries. */ 1033 for (tmp = vmlist; tmp; tmp = tmp->next) { 1034 va = alloc_bootmem(sizeof(struct vmap_area)); 1035 va->flags = tmp->flags | VM_VM_AREA; 1036 va->va_start = (unsigned long)tmp->addr; 1037 va->va_end = va->va_start + tmp->size; 1038 __insert_vmap_area(va); 1039 } 1040 vmap_initialized = true; 1041 } 1042 1043 /** 1044 * map_kernel_range_noflush - map kernel VM area with the specified pages 1045 * @addr: start of the VM area to map 1046 * @size: size of the VM area to map 1047 * @prot: page protection flags to use 1048 * @pages: pages to map 1049 * 1050 * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size 1051 * specify should have been allocated using get_vm_area() and its 1052 * friends. 1053 * 1054 * NOTE: 1055 * This function does NOT do any cache flushing. The caller is 1056 * responsible for calling flush_cache_vmap() on to-be-mapped areas 1057 * before calling this function. 1058 * 1059 * RETURNS: 1060 * The number of pages mapped on success, -errno on failure. 1061 */ 1062 int map_kernel_range_noflush(unsigned long addr, unsigned long size, 1063 pgprot_t prot, struct page **pages) 1064 { 1065 return vmap_page_range_noflush(addr, addr + size, prot, pages); 1066 } 1067 1068 /** 1069 * unmap_kernel_range_noflush - unmap kernel VM area 1070 * @addr: start of the VM area to unmap 1071 * @size: size of the VM area to unmap 1072 * 1073 * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size 1074 * specify should have been allocated using get_vm_area() and its 1075 * friends. 1076 * 1077 * NOTE: 1078 * This function does NOT do any cache flushing. The caller is 1079 * responsible for calling flush_cache_vunmap() on to-be-mapped areas 1080 * before calling this function and flush_tlb_kernel_range() after. 1081 */ 1082 void unmap_kernel_range_noflush(unsigned long addr, unsigned long size) 1083 { 1084 vunmap_page_range(addr, addr + size); 1085 } 1086 1087 /** 1088 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB 1089 * @addr: start of the VM area to unmap 1090 * @size: size of the VM area to unmap 1091 * 1092 * Similar to unmap_kernel_range_noflush() but flushes vcache before 1093 * the unmapping and tlb after. 1094 */ 1095 void unmap_kernel_range(unsigned long addr, unsigned long size) 1096 { 1097 unsigned long end = addr + size; 1098 1099 flush_cache_vunmap(addr, end); 1100 vunmap_page_range(addr, end); 1101 flush_tlb_kernel_range(addr, end); 1102 } 1103 1104 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages) 1105 { 1106 unsigned long addr = (unsigned long)area->addr; 1107 unsigned long end = addr + area->size - PAGE_SIZE; 1108 int err; 1109 1110 err = vmap_page_range(addr, end, prot, *pages); 1111 if (err > 0) { 1112 *pages += err; 1113 err = 0; 1114 } 1115 1116 return err; 1117 } 1118 EXPORT_SYMBOL_GPL(map_vm_area); 1119 1120 /*** Old vmalloc interfaces ***/ 1121 DEFINE_RWLOCK(vmlist_lock); 1122 struct vm_struct *vmlist; 1123 1124 static struct vm_struct *__get_vm_area_node(unsigned long size, 1125 unsigned long flags, unsigned long start, unsigned long end, 1126 int node, gfp_t gfp_mask, void *caller) 1127 { 1128 static struct vmap_area *va; 1129 struct vm_struct *area; 1130 struct vm_struct *tmp, **p; 1131 unsigned long align = 1; 1132 1133 BUG_ON(in_interrupt()); 1134 if (flags & VM_IOREMAP) { 1135 int bit = fls(size); 1136 1137 if (bit > IOREMAP_MAX_ORDER) 1138 bit = IOREMAP_MAX_ORDER; 1139 else if (bit < PAGE_SHIFT) 1140 bit = PAGE_SHIFT; 1141 1142 align = 1ul << bit; 1143 } 1144 1145 size = PAGE_ALIGN(size); 1146 if (unlikely(!size)) 1147 return NULL; 1148 1149 area = kmalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); 1150 if (unlikely(!area)) 1151 return NULL; 1152 1153 /* 1154 * We always allocate a guard page. 1155 */ 1156 size += PAGE_SIZE; 1157 1158 va = alloc_vmap_area(size, align, start, end, node, gfp_mask); 1159 if (IS_ERR(va)) { 1160 kfree(area); 1161 return NULL; 1162 } 1163 1164 area->flags = flags; 1165 area->addr = (void *)va->va_start; 1166 area->size = size; 1167 area->pages = NULL; 1168 area->nr_pages = 0; 1169 area->phys_addr = 0; 1170 area->caller = caller; 1171 va->private = area; 1172 va->flags |= VM_VM_AREA; 1173 1174 write_lock(&vmlist_lock); 1175 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { 1176 if (tmp->addr >= area->addr) 1177 break; 1178 } 1179 area->next = *p; 1180 *p = area; 1181 write_unlock(&vmlist_lock); 1182 1183 return area; 1184 } 1185 1186 struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 1187 unsigned long start, unsigned long end) 1188 { 1189 return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL, 1190 __builtin_return_address(0)); 1191 } 1192 EXPORT_SYMBOL_GPL(__get_vm_area); 1193 1194 struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, 1195 unsigned long start, unsigned long end, 1196 void *caller) 1197 { 1198 return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL, 1199 caller); 1200 } 1201 1202 /** 1203 * get_vm_area - reserve a contiguous kernel virtual area 1204 * @size: size of the area 1205 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC 1206 * 1207 * Search an area of @size in the kernel virtual mapping area, 1208 * and reserved it for out purposes. Returns the area descriptor 1209 * on success or %NULL on failure. 1210 */ 1211 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) 1212 { 1213 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, 1214 -1, GFP_KERNEL, __builtin_return_address(0)); 1215 } 1216 1217 struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, 1218 void *caller) 1219 { 1220 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, 1221 -1, GFP_KERNEL, caller); 1222 } 1223 1224 struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, 1225 int node, gfp_t gfp_mask) 1226 { 1227 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node, 1228 gfp_mask, __builtin_return_address(0)); 1229 } 1230 1231 static struct vm_struct *find_vm_area(const void *addr) 1232 { 1233 struct vmap_area *va; 1234 1235 va = find_vmap_area((unsigned long)addr); 1236 if (va && va->flags & VM_VM_AREA) 1237 return va->private; 1238 1239 return NULL; 1240 } 1241 1242 /** 1243 * remove_vm_area - find and remove a continuous kernel virtual area 1244 * @addr: base address 1245 * 1246 * Search for the kernel VM area starting at @addr, and remove it. 1247 * This function returns the found VM area, but using it is NOT safe 1248 * on SMP machines, except for its size or flags. 1249 */ 1250 struct vm_struct *remove_vm_area(const void *addr) 1251 { 1252 struct vmap_area *va; 1253 1254 va = find_vmap_area((unsigned long)addr); 1255 if (va && va->flags & VM_VM_AREA) { 1256 struct vm_struct *vm = va->private; 1257 struct vm_struct *tmp, **p; 1258 1259 vmap_debug_free_range(va->va_start, va->va_end); 1260 free_unmap_vmap_area(va); 1261 vm->size -= PAGE_SIZE; 1262 1263 write_lock(&vmlist_lock); 1264 for (p = &vmlist; (tmp = *p) != vm; p = &tmp->next) 1265 ; 1266 *p = tmp->next; 1267 write_unlock(&vmlist_lock); 1268 1269 return vm; 1270 } 1271 return NULL; 1272 } 1273 1274 static void __vunmap(const void *addr, int deallocate_pages) 1275 { 1276 struct vm_struct *area; 1277 1278 if (!addr) 1279 return; 1280 1281 if ((PAGE_SIZE-1) & (unsigned long)addr) { 1282 WARN(1, KERN_ERR "Trying to vfree() bad address (%p)\n", addr); 1283 return; 1284 } 1285 1286 area = remove_vm_area(addr); 1287 if (unlikely(!area)) { 1288 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", 1289 addr); 1290 return; 1291 } 1292 1293 debug_check_no_locks_freed(addr, area->size); 1294 debug_check_no_obj_freed(addr, area->size); 1295 1296 if (deallocate_pages) { 1297 int i; 1298 1299 for (i = 0; i < area->nr_pages; i++) { 1300 struct page *page = area->pages[i]; 1301 1302 BUG_ON(!page); 1303 __free_page(page); 1304 } 1305 1306 if (area->flags & VM_VPAGES) 1307 vfree(area->pages); 1308 else 1309 kfree(area->pages); 1310 } 1311 1312 kfree(area); 1313 return; 1314 } 1315 1316 /** 1317 * vfree - release memory allocated by vmalloc() 1318 * @addr: memory base address 1319 * 1320 * Free the virtually continuous memory area starting at @addr, as 1321 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is 1322 * NULL, no operation is performed. 1323 * 1324 * Must not be called in interrupt context. 1325 */ 1326 void vfree(const void *addr) 1327 { 1328 BUG_ON(in_interrupt()); 1329 __vunmap(addr, 1); 1330 } 1331 EXPORT_SYMBOL(vfree); 1332 1333 /** 1334 * vunmap - release virtual mapping obtained by vmap() 1335 * @addr: memory base address 1336 * 1337 * Free the virtually contiguous memory area starting at @addr, 1338 * which was created from the page array passed to vmap(). 1339 * 1340 * Must not be called in interrupt context. 1341 */ 1342 void vunmap(const void *addr) 1343 { 1344 BUG_ON(in_interrupt()); 1345 might_sleep(); 1346 __vunmap(addr, 0); 1347 } 1348 EXPORT_SYMBOL(vunmap); 1349 1350 /** 1351 * vmap - map an array of pages into virtually contiguous space 1352 * @pages: array of page pointers 1353 * @count: number of pages to map 1354 * @flags: vm_area->flags 1355 * @prot: page protection for the mapping 1356 * 1357 * Maps @count pages from @pages into contiguous kernel virtual 1358 * space. 1359 */ 1360 void *vmap(struct page **pages, unsigned int count, 1361 unsigned long flags, pgprot_t prot) 1362 { 1363 struct vm_struct *area; 1364 1365 might_sleep(); 1366 1367 if (count > num_physpages) 1368 return NULL; 1369 1370 area = get_vm_area_caller((count << PAGE_SHIFT), flags, 1371 __builtin_return_address(0)); 1372 if (!area) 1373 return NULL; 1374 1375 if (map_vm_area(area, prot, &pages)) { 1376 vunmap(area->addr); 1377 return NULL; 1378 } 1379 1380 return area->addr; 1381 } 1382 EXPORT_SYMBOL(vmap); 1383 1384 static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, 1385 int node, void *caller); 1386 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 1387 pgprot_t prot, int node, void *caller) 1388 { 1389 struct page **pages; 1390 unsigned int nr_pages, array_size, i; 1391 1392 nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT; 1393 array_size = (nr_pages * sizeof(struct page *)); 1394 1395 area->nr_pages = nr_pages; 1396 /* Please note that the recursion is strictly bounded. */ 1397 if (array_size > PAGE_SIZE) { 1398 pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO, 1399 PAGE_KERNEL, node, caller); 1400 area->flags |= VM_VPAGES; 1401 } else { 1402 pages = kmalloc_node(array_size, 1403 (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO, 1404 node); 1405 } 1406 area->pages = pages; 1407 area->caller = caller; 1408 if (!area->pages) { 1409 remove_vm_area(area->addr); 1410 kfree(area); 1411 return NULL; 1412 } 1413 1414 for (i = 0; i < area->nr_pages; i++) { 1415 struct page *page; 1416 1417 if (node < 0) 1418 page = alloc_page(gfp_mask); 1419 else 1420 page = alloc_pages_node(node, gfp_mask, 0); 1421 1422 if (unlikely(!page)) { 1423 /* Successfully allocated i pages, free them in __vunmap() */ 1424 area->nr_pages = i; 1425 goto fail; 1426 } 1427 area->pages[i] = page; 1428 } 1429 1430 if (map_vm_area(area, prot, &pages)) 1431 goto fail; 1432 return area->addr; 1433 1434 fail: 1435 vfree(area->addr); 1436 return NULL; 1437 } 1438 1439 void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) 1440 { 1441 return __vmalloc_area_node(area, gfp_mask, prot, -1, 1442 __builtin_return_address(0)); 1443 } 1444 1445 /** 1446 * __vmalloc_node - allocate virtually contiguous memory 1447 * @size: allocation size 1448 * @gfp_mask: flags for the page level allocator 1449 * @prot: protection mask for the allocated pages 1450 * @node: node to use for allocation or -1 1451 * @caller: caller's return address 1452 * 1453 * Allocate enough pages to cover @size from the page level 1454 * allocator with @gfp_mask flags. Map them into contiguous 1455 * kernel virtual space, using a pagetable protection of @prot. 1456 */ 1457 static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, 1458 int node, void *caller) 1459 { 1460 struct vm_struct *area; 1461 1462 size = PAGE_ALIGN(size); 1463 if (!size || (size >> PAGE_SHIFT) > num_physpages) 1464 return NULL; 1465 1466 area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END, 1467 node, gfp_mask, caller); 1468 1469 if (!area) 1470 return NULL; 1471 1472 return __vmalloc_area_node(area, gfp_mask, prot, node, caller); 1473 } 1474 1475 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 1476 { 1477 return __vmalloc_node(size, gfp_mask, prot, -1, 1478 __builtin_return_address(0)); 1479 } 1480 EXPORT_SYMBOL(__vmalloc); 1481 1482 /** 1483 * vmalloc - allocate virtually contiguous memory 1484 * @size: allocation size 1485 * Allocate enough pages to cover @size from the page level 1486 * allocator and map them into contiguous kernel virtual space. 1487 * 1488 * For tight control over page level allocator and protection flags 1489 * use __vmalloc() instead. 1490 */ 1491 void *vmalloc(unsigned long size) 1492 { 1493 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, 1494 -1, __builtin_return_address(0)); 1495 } 1496 EXPORT_SYMBOL(vmalloc); 1497 1498 /** 1499 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace 1500 * @size: allocation size 1501 * 1502 * The resulting memory area is zeroed so it can be mapped to userspace 1503 * without leaking data. 1504 */ 1505 void *vmalloc_user(unsigned long size) 1506 { 1507 struct vm_struct *area; 1508 void *ret; 1509 1510 ret = __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, 1511 PAGE_KERNEL, -1, __builtin_return_address(0)); 1512 if (ret) { 1513 area = find_vm_area(ret); 1514 area->flags |= VM_USERMAP; 1515 } 1516 return ret; 1517 } 1518 EXPORT_SYMBOL(vmalloc_user); 1519 1520 /** 1521 * vmalloc_node - allocate memory on a specific node 1522 * @size: allocation size 1523 * @node: numa node 1524 * 1525 * Allocate enough pages to cover @size from the page level 1526 * allocator and map them into contiguous kernel virtual space. 1527 * 1528 * For tight control over page level allocator and protection flags 1529 * use __vmalloc() instead. 1530 */ 1531 void *vmalloc_node(unsigned long size, int node) 1532 { 1533 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, 1534 node, __builtin_return_address(0)); 1535 } 1536 EXPORT_SYMBOL(vmalloc_node); 1537 1538 #ifndef PAGE_KERNEL_EXEC 1539 # define PAGE_KERNEL_EXEC PAGE_KERNEL 1540 #endif 1541 1542 /** 1543 * vmalloc_exec - allocate virtually contiguous, executable memory 1544 * @size: allocation size 1545 * 1546 * Kernel-internal function to allocate enough pages to cover @size 1547 * the page level allocator and map them into contiguous and 1548 * executable kernel virtual space. 1549 * 1550 * For tight control over page level allocator and protection flags 1551 * use __vmalloc() instead. 1552 */ 1553 1554 void *vmalloc_exec(unsigned long size) 1555 { 1556 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, 1557 -1, __builtin_return_address(0)); 1558 } 1559 1560 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) 1561 #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL 1562 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) 1563 #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL 1564 #else 1565 #define GFP_VMALLOC32 GFP_KERNEL 1566 #endif 1567 1568 /** 1569 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 1570 * @size: allocation size 1571 * 1572 * Allocate enough 32bit PA addressable pages to cover @size from the 1573 * page level allocator and map them into contiguous kernel virtual space. 1574 */ 1575 void *vmalloc_32(unsigned long size) 1576 { 1577 return __vmalloc_node(size, GFP_VMALLOC32, PAGE_KERNEL, 1578 -1, __builtin_return_address(0)); 1579 } 1580 EXPORT_SYMBOL(vmalloc_32); 1581 1582 /** 1583 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory 1584 * @size: allocation size 1585 * 1586 * The resulting memory area is 32bit addressable and zeroed so it can be 1587 * mapped to userspace without leaking data. 1588 */ 1589 void *vmalloc_32_user(unsigned long size) 1590 { 1591 struct vm_struct *area; 1592 void *ret; 1593 1594 ret = __vmalloc_node(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, 1595 -1, __builtin_return_address(0)); 1596 if (ret) { 1597 area = find_vm_area(ret); 1598 area->flags |= VM_USERMAP; 1599 } 1600 return ret; 1601 } 1602 EXPORT_SYMBOL(vmalloc_32_user); 1603 1604 long vread(char *buf, char *addr, unsigned long count) 1605 { 1606 struct vm_struct *tmp; 1607 char *vaddr, *buf_start = buf; 1608 unsigned long n; 1609 1610 /* Don't allow overflow */ 1611 if ((unsigned long) addr + count < count) 1612 count = -(unsigned long) addr; 1613 1614 read_lock(&vmlist_lock); 1615 for (tmp = vmlist; tmp; tmp = tmp->next) { 1616 vaddr = (char *) tmp->addr; 1617 if (addr >= vaddr + tmp->size - PAGE_SIZE) 1618 continue; 1619 while (addr < vaddr) { 1620 if (count == 0) 1621 goto finished; 1622 *buf = '\0'; 1623 buf++; 1624 addr++; 1625 count--; 1626 } 1627 n = vaddr + tmp->size - PAGE_SIZE - addr; 1628 do { 1629 if (count == 0) 1630 goto finished; 1631 *buf = *addr; 1632 buf++; 1633 addr++; 1634 count--; 1635 } while (--n > 0); 1636 } 1637 finished: 1638 read_unlock(&vmlist_lock); 1639 return buf - buf_start; 1640 } 1641 1642 long vwrite(char *buf, char *addr, unsigned long count) 1643 { 1644 struct vm_struct *tmp; 1645 char *vaddr, *buf_start = buf; 1646 unsigned long n; 1647 1648 /* Don't allow overflow */ 1649 if ((unsigned long) addr + count < count) 1650 count = -(unsigned long) addr; 1651 1652 read_lock(&vmlist_lock); 1653 for (tmp = vmlist; tmp; tmp = tmp->next) { 1654 vaddr = (char *) tmp->addr; 1655 if (addr >= vaddr + tmp->size - PAGE_SIZE) 1656 continue; 1657 while (addr < vaddr) { 1658 if (count == 0) 1659 goto finished; 1660 buf++; 1661 addr++; 1662 count--; 1663 } 1664 n = vaddr + tmp->size - PAGE_SIZE - addr; 1665 do { 1666 if (count == 0) 1667 goto finished; 1668 *addr = *buf; 1669 buf++; 1670 addr++; 1671 count--; 1672 } while (--n > 0); 1673 } 1674 finished: 1675 read_unlock(&vmlist_lock); 1676 return buf - buf_start; 1677 } 1678 1679 /** 1680 * remap_vmalloc_range - map vmalloc pages to userspace 1681 * @vma: vma to cover (map full range of vma) 1682 * @addr: vmalloc memory 1683 * @pgoff: number of pages into addr before first page to map 1684 * 1685 * Returns: 0 for success, -Exxx on failure 1686 * 1687 * This function checks that addr is a valid vmalloc'ed area, and 1688 * that it is big enough to cover the vma. Will return failure if 1689 * that criteria isn't met. 1690 * 1691 * Similar to remap_pfn_range() (see mm/memory.c) 1692 */ 1693 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 1694 unsigned long pgoff) 1695 { 1696 struct vm_struct *area; 1697 unsigned long uaddr = vma->vm_start; 1698 unsigned long usize = vma->vm_end - vma->vm_start; 1699 1700 if ((PAGE_SIZE-1) & (unsigned long)addr) 1701 return -EINVAL; 1702 1703 area = find_vm_area(addr); 1704 if (!area) 1705 return -EINVAL; 1706 1707 if (!(area->flags & VM_USERMAP)) 1708 return -EINVAL; 1709 1710 if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE) 1711 return -EINVAL; 1712 1713 addr += pgoff << PAGE_SHIFT; 1714 do { 1715 struct page *page = vmalloc_to_page(addr); 1716 int ret; 1717 1718 ret = vm_insert_page(vma, uaddr, page); 1719 if (ret) 1720 return ret; 1721 1722 uaddr += PAGE_SIZE; 1723 addr += PAGE_SIZE; 1724 usize -= PAGE_SIZE; 1725 } while (usize > 0); 1726 1727 /* Prevent "things" like memory migration? VM_flags need a cleanup... */ 1728 vma->vm_flags |= VM_RESERVED; 1729 1730 return 0; 1731 } 1732 EXPORT_SYMBOL(remap_vmalloc_range); 1733 1734 /* 1735 * Implement a stub for vmalloc_sync_all() if the architecture chose not to 1736 * have one. 1737 */ 1738 void __attribute__((weak)) vmalloc_sync_all(void) 1739 { 1740 } 1741 1742 1743 static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) 1744 { 1745 /* apply_to_page_range() does all the hard work. */ 1746 return 0; 1747 } 1748 1749 /** 1750 * alloc_vm_area - allocate a range of kernel address space 1751 * @size: size of the area 1752 * 1753 * Returns: NULL on failure, vm_struct on success 1754 * 1755 * This function reserves a range of kernel address space, and 1756 * allocates pagetables to map that range. No actual mappings 1757 * are created. If the kernel address space is not shared 1758 * between processes, it syncs the pagetable across all 1759 * processes. 1760 */ 1761 struct vm_struct *alloc_vm_area(size_t size) 1762 { 1763 struct vm_struct *area; 1764 1765 area = get_vm_area_caller(size, VM_IOREMAP, 1766 __builtin_return_address(0)); 1767 if (area == NULL) 1768 return NULL; 1769 1770 /* 1771 * This ensures that page tables are constructed for this region 1772 * of kernel virtual address space and mapped into init_mm. 1773 */ 1774 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, 1775 area->size, f, NULL)) { 1776 free_vm_area(area); 1777 return NULL; 1778 } 1779 1780 /* Make sure the pagetables are constructed in process kernel 1781 mappings */ 1782 vmalloc_sync_all(); 1783 1784 return area; 1785 } 1786 EXPORT_SYMBOL_GPL(alloc_vm_area); 1787 1788 void free_vm_area(struct vm_struct *area) 1789 { 1790 struct vm_struct *ret; 1791 ret = remove_vm_area(area->addr); 1792 BUG_ON(ret != area); 1793 kfree(area); 1794 } 1795 EXPORT_SYMBOL_GPL(free_vm_area); 1796 1797 1798 #ifdef CONFIG_PROC_FS 1799 static void *s_start(struct seq_file *m, loff_t *pos) 1800 { 1801 loff_t n = *pos; 1802 struct vm_struct *v; 1803 1804 read_lock(&vmlist_lock); 1805 v = vmlist; 1806 while (n > 0 && v) { 1807 n--; 1808 v = v->next; 1809 } 1810 if (!n) 1811 return v; 1812 1813 return NULL; 1814 1815 } 1816 1817 static void *s_next(struct seq_file *m, void *p, loff_t *pos) 1818 { 1819 struct vm_struct *v = p; 1820 1821 ++*pos; 1822 return v->next; 1823 } 1824 1825 static void s_stop(struct seq_file *m, void *p) 1826 { 1827 read_unlock(&vmlist_lock); 1828 } 1829 1830 static void show_numa_info(struct seq_file *m, struct vm_struct *v) 1831 { 1832 if (NUMA_BUILD) { 1833 unsigned int nr, *counters = m->private; 1834 1835 if (!counters) 1836 return; 1837 1838 memset(counters, 0, nr_node_ids * sizeof(unsigned int)); 1839 1840 for (nr = 0; nr < v->nr_pages; nr++) 1841 counters[page_to_nid(v->pages[nr])]++; 1842 1843 for_each_node_state(nr, N_HIGH_MEMORY) 1844 if (counters[nr]) 1845 seq_printf(m, " N%u=%u", nr, counters[nr]); 1846 } 1847 } 1848 1849 static int s_show(struct seq_file *m, void *p) 1850 { 1851 struct vm_struct *v = p; 1852 1853 seq_printf(m, "0x%p-0x%p %7ld", 1854 v->addr, v->addr + v->size, v->size); 1855 1856 if (v->caller) { 1857 char buff[KSYM_SYMBOL_LEN]; 1858 1859 seq_putc(m, ' '); 1860 sprint_symbol(buff, (unsigned long)v->caller); 1861 seq_puts(m, buff); 1862 } 1863 1864 if (v->nr_pages) 1865 seq_printf(m, " pages=%d", v->nr_pages); 1866 1867 if (v->phys_addr) 1868 seq_printf(m, " phys=%lx", v->phys_addr); 1869 1870 if (v->flags & VM_IOREMAP) 1871 seq_printf(m, " ioremap"); 1872 1873 if (v->flags & VM_ALLOC) 1874 seq_printf(m, " vmalloc"); 1875 1876 if (v->flags & VM_MAP) 1877 seq_printf(m, " vmap"); 1878 1879 if (v->flags & VM_USERMAP) 1880 seq_printf(m, " user"); 1881 1882 if (v->flags & VM_VPAGES) 1883 seq_printf(m, " vpages"); 1884 1885 show_numa_info(m, v); 1886 seq_putc(m, '\n'); 1887 return 0; 1888 } 1889 1890 static const struct seq_operations vmalloc_op = { 1891 .start = s_start, 1892 .next = s_next, 1893 .stop = s_stop, 1894 .show = s_show, 1895 }; 1896 1897 static int vmalloc_open(struct inode *inode, struct file *file) 1898 { 1899 unsigned int *ptr = NULL; 1900 int ret; 1901 1902 if (NUMA_BUILD) 1903 ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL); 1904 ret = seq_open(file, &vmalloc_op); 1905 if (!ret) { 1906 struct seq_file *m = file->private_data; 1907 m->private = ptr; 1908 } else 1909 kfree(ptr); 1910 return ret; 1911 } 1912 1913 static const struct file_operations proc_vmalloc_operations = { 1914 .open = vmalloc_open, 1915 .read = seq_read, 1916 .llseek = seq_lseek, 1917 .release = seq_release_private, 1918 }; 1919 1920 static int __init proc_vmalloc_init(void) 1921 { 1922 proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations); 1923 return 0; 1924 } 1925 module_init(proc_vmalloc_init); 1926 #endif 1927 1928