1 /* 2 * linux/mm/nommu.c 3 * 4 * Replacement code for mm functions to support CPU's that don't 5 * have any form of memory management unit (thus no virtual memory). 6 * 7 * See Documentation/nommu-mmap.txt 8 * 9 * Copyright (c) 2004-2008 David Howells <dhowells@redhat.com> 10 * Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com> 11 * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org> 12 * Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com> 13 * Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org> 14 */ 15 16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 17 18 #include <linux/export.h> 19 #include <linux/mm.h> 20 #include <linux/vmacache.h> 21 #include <linux/mman.h> 22 #include <linux/swap.h> 23 #include <linux/file.h> 24 #include <linux/highmem.h> 25 #include <linux/pagemap.h> 26 #include <linux/slab.h> 27 #include <linux/vmalloc.h> 28 #include <linux/blkdev.h> 29 #include <linux/backing-dev.h> 30 #include <linux/compiler.h> 31 #include <linux/mount.h> 32 #include <linux/personality.h> 33 #include <linux/security.h> 34 #include <linux/syscalls.h> 35 #include <linux/audit.h> 36 #include <linux/sched/sysctl.h> 37 #include <linux/printk.h> 38 39 #include <asm/uaccess.h> 40 #include <asm/tlb.h> 41 #include <asm/tlbflush.h> 42 #include <asm/mmu_context.h> 43 #include "internal.h" 44 45 #if 0 46 #define kenter(FMT, ...) \ 47 printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__) 48 #define kleave(FMT, ...) \ 49 printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__) 50 #define kdebug(FMT, ...) \ 51 printk(KERN_DEBUG "xxx" FMT"yyy\n", ##__VA_ARGS__) 52 #else 53 #define kenter(FMT, ...) \ 54 no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__) 55 #define kleave(FMT, ...) \ 56 no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__) 57 #define kdebug(FMT, ...) \ 58 no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__) 59 #endif 60 61 void *high_memory; 62 EXPORT_SYMBOL(high_memory); 63 struct page *mem_map; 64 unsigned long max_mapnr; 65 unsigned long highest_memmap_pfn; 66 struct percpu_counter vm_committed_as; 67 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ 68 int sysctl_overcommit_ratio = 50; /* default is 50% */ 69 unsigned long sysctl_overcommit_kbytes __read_mostly; 70 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; 71 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS; 72 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ 73 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ 74 int heap_stack_gap = 0; 75 76 atomic_long_t mmap_pages_allocated; 77 78 /* 79 * The global memory commitment made in the system can be a metric 80 * that can be used to drive ballooning decisions when Linux is hosted 81 * as a guest. On Hyper-V, the host implements a policy engine for dynamically 82 * balancing memory across competing virtual machines that are hosted. 83 * Several metrics drive this policy engine including the guest reported 84 * memory commitment. 85 */ 86 unsigned long vm_memory_committed(void) 87 { 88 return percpu_counter_read_positive(&vm_committed_as); 89 } 90 91 EXPORT_SYMBOL_GPL(vm_memory_committed); 92 93 EXPORT_SYMBOL(mem_map); 94 95 /* list of mapped, potentially shareable regions */ 96 static struct kmem_cache *vm_region_jar; 97 struct rb_root nommu_region_tree = RB_ROOT; 98 DECLARE_RWSEM(nommu_region_sem); 99 100 const struct vm_operations_struct generic_file_vm_ops = { 101 }; 102 103 /* 104 * Return the total memory allocated for this pointer, not 105 * just what the caller asked for. 106 * 107 * Doesn't have to be accurate, i.e. may have races. 108 */ 109 unsigned int kobjsize(const void *objp) 110 { 111 struct page *page; 112 113 /* 114 * If the object we have should not have ksize performed on it, 115 * return size of 0 116 */ 117 if (!objp || !virt_addr_valid(objp)) 118 return 0; 119 120 page = virt_to_head_page(objp); 121 122 /* 123 * If the allocator sets PageSlab, we know the pointer came from 124 * kmalloc(). 125 */ 126 if (PageSlab(page)) 127 return ksize(objp); 128 129 /* 130 * If it's not a compound page, see if we have a matching VMA 131 * region. This test is intentionally done in reverse order, 132 * so if there's no VMA, we still fall through and hand back 133 * PAGE_SIZE for 0-order pages. 134 */ 135 if (!PageCompound(page)) { 136 struct vm_area_struct *vma; 137 138 vma = find_vma(current->mm, (unsigned long)objp); 139 if (vma) 140 return vma->vm_end - vma->vm_start; 141 } 142 143 /* 144 * The ksize() function is only guaranteed to work for pointers 145 * returned by kmalloc(). So handle arbitrary pointers here. 146 */ 147 return PAGE_SIZE << compound_order(page); 148 } 149 150 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 151 unsigned long start, unsigned long nr_pages, 152 unsigned int foll_flags, struct page **pages, 153 struct vm_area_struct **vmas, int *nonblocking) 154 { 155 struct vm_area_struct *vma; 156 unsigned long vm_flags; 157 int i; 158 159 /* calculate required read or write permissions. 160 * If FOLL_FORCE is set, we only require the "MAY" flags. 161 */ 162 vm_flags = (foll_flags & FOLL_WRITE) ? 163 (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); 164 vm_flags &= (foll_flags & FOLL_FORCE) ? 165 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); 166 167 for (i = 0; i < nr_pages; i++) { 168 vma = find_vma(mm, start); 169 if (!vma) 170 goto finish_or_fault; 171 172 /* protect what we can, including chardevs */ 173 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || 174 !(vm_flags & vma->vm_flags)) 175 goto finish_or_fault; 176 177 if (pages) { 178 pages[i] = virt_to_page(start); 179 if (pages[i]) 180 page_cache_get(pages[i]); 181 } 182 if (vmas) 183 vmas[i] = vma; 184 start = (start + PAGE_SIZE) & PAGE_MASK; 185 } 186 187 return i; 188 189 finish_or_fault: 190 return i ? : -EFAULT; 191 } 192 193 /* 194 * get a list of pages in an address range belonging to the specified process 195 * and indicate the VMA that covers each page 196 * - this is potentially dodgy as we may end incrementing the page count of a 197 * slab page or a secondary page from a compound page 198 * - don't permit access to VMAs that don't support it, such as I/O mappings 199 */ 200 long get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 201 unsigned long start, unsigned long nr_pages, 202 int write, int force, struct page **pages, 203 struct vm_area_struct **vmas) 204 { 205 int flags = 0; 206 207 if (write) 208 flags |= FOLL_WRITE; 209 if (force) 210 flags |= FOLL_FORCE; 211 212 return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, 213 NULL); 214 } 215 EXPORT_SYMBOL(get_user_pages); 216 217 /** 218 * follow_pfn - look up PFN at a user virtual address 219 * @vma: memory mapping 220 * @address: user virtual address 221 * @pfn: location to store found PFN 222 * 223 * Only IO mappings and raw PFN mappings are allowed. 224 * 225 * Returns zero and the pfn at @pfn on success, -ve otherwise. 226 */ 227 int follow_pfn(struct vm_area_struct *vma, unsigned long address, 228 unsigned long *pfn) 229 { 230 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) 231 return -EINVAL; 232 233 *pfn = address >> PAGE_SHIFT; 234 return 0; 235 } 236 EXPORT_SYMBOL(follow_pfn); 237 238 LIST_HEAD(vmap_area_list); 239 240 void vfree(const void *addr) 241 { 242 kfree(addr); 243 } 244 EXPORT_SYMBOL(vfree); 245 246 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 247 { 248 /* 249 * You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc() 250 * returns only a logical address. 251 */ 252 return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM); 253 } 254 EXPORT_SYMBOL(__vmalloc); 255 256 void *vmalloc_user(unsigned long size) 257 { 258 void *ret; 259 260 ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, 261 PAGE_KERNEL); 262 if (ret) { 263 struct vm_area_struct *vma; 264 265 down_write(¤t->mm->mmap_sem); 266 vma = find_vma(current->mm, (unsigned long)ret); 267 if (vma) 268 vma->vm_flags |= VM_USERMAP; 269 up_write(¤t->mm->mmap_sem); 270 } 271 272 return ret; 273 } 274 EXPORT_SYMBOL(vmalloc_user); 275 276 struct page *vmalloc_to_page(const void *addr) 277 { 278 return virt_to_page(addr); 279 } 280 EXPORT_SYMBOL(vmalloc_to_page); 281 282 unsigned long vmalloc_to_pfn(const void *addr) 283 { 284 return page_to_pfn(virt_to_page(addr)); 285 } 286 EXPORT_SYMBOL(vmalloc_to_pfn); 287 288 long vread(char *buf, char *addr, unsigned long count) 289 { 290 /* Don't allow overflow */ 291 if ((unsigned long) buf + count < count) 292 count = -(unsigned long) buf; 293 294 memcpy(buf, addr, count); 295 return count; 296 } 297 298 long vwrite(char *buf, char *addr, unsigned long count) 299 { 300 /* Don't allow overflow */ 301 if ((unsigned long) addr + count < count) 302 count = -(unsigned long) addr; 303 304 memcpy(addr, buf, count); 305 return count; 306 } 307 308 /* 309 * vmalloc - allocate virtually continguos memory 310 * 311 * @size: allocation size 312 * 313 * Allocate enough pages to cover @size from the page level 314 * allocator and map them into continguos kernel virtual space. 315 * 316 * For tight control over page level allocator and protection flags 317 * use __vmalloc() instead. 318 */ 319 void *vmalloc(unsigned long size) 320 { 321 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); 322 } 323 EXPORT_SYMBOL(vmalloc); 324 325 /* 326 * vzalloc - allocate virtually continguos memory with zero fill 327 * 328 * @size: allocation size 329 * 330 * Allocate enough pages to cover @size from the page level 331 * allocator and map them into continguos kernel virtual space. 332 * The memory allocated is set to zero. 333 * 334 * For tight control over page level allocator and protection flags 335 * use __vmalloc() instead. 336 */ 337 void *vzalloc(unsigned long size) 338 { 339 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, 340 PAGE_KERNEL); 341 } 342 EXPORT_SYMBOL(vzalloc); 343 344 /** 345 * vmalloc_node - allocate memory on a specific node 346 * @size: allocation size 347 * @node: numa node 348 * 349 * Allocate enough pages to cover @size from the page level 350 * allocator and map them into contiguous kernel virtual space. 351 * 352 * For tight control over page level allocator and protection flags 353 * use __vmalloc() instead. 354 */ 355 void *vmalloc_node(unsigned long size, int node) 356 { 357 return vmalloc(size); 358 } 359 EXPORT_SYMBOL(vmalloc_node); 360 361 /** 362 * vzalloc_node - allocate memory on a specific node with zero fill 363 * @size: allocation size 364 * @node: numa node 365 * 366 * Allocate enough pages to cover @size from the page level 367 * allocator and map them into contiguous kernel virtual space. 368 * The memory allocated is set to zero. 369 * 370 * For tight control over page level allocator and protection flags 371 * use __vmalloc() instead. 372 */ 373 void *vzalloc_node(unsigned long size, int node) 374 { 375 return vzalloc(size); 376 } 377 EXPORT_SYMBOL(vzalloc_node); 378 379 #ifndef PAGE_KERNEL_EXEC 380 # define PAGE_KERNEL_EXEC PAGE_KERNEL 381 #endif 382 383 /** 384 * vmalloc_exec - allocate virtually contiguous, executable memory 385 * @size: allocation size 386 * 387 * Kernel-internal function to allocate enough pages to cover @size 388 * the page level allocator and map them into contiguous and 389 * executable kernel virtual space. 390 * 391 * For tight control over page level allocator and protection flags 392 * use __vmalloc() instead. 393 */ 394 395 void *vmalloc_exec(unsigned long size) 396 { 397 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC); 398 } 399 400 /** 401 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 402 * @size: allocation size 403 * 404 * Allocate enough 32bit PA addressable pages to cover @size from the 405 * page level allocator and map them into continguos kernel virtual space. 406 */ 407 void *vmalloc_32(unsigned long size) 408 { 409 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL); 410 } 411 EXPORT_SYMBOL(vmalloc_32); 412 413 /** 414 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory 415 * @size: allocation size 416 * 417 * The resulting memory area is 32bit addressable and zeroed so it can be 418 * mapped to userspace without leaking data. 419 * 420 * VM_USERMAP is set on the corresponding VMA so that subsequent calls to 421 * remap_vmalloc_range() are permissible. 422 */ 423 void *vmalloc_32_user(unsigned long size) 424 { 425 /* 426 * We'll have to sort out the ZONE_DMA bits for 64-bit, 427 * but for now this can simply use vmalloc_user() directly. 428 */ 429 return vmalloc_user(size); 430 } 431 EXPORT_SYMBOL(vmalloc_32_user); 432 433 void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot) 434 { 435 BUG(); 436 return NULL; 437 } 438 EXPORT_SYMBOL(vmap); 439 440 void vunmap(const void *addr) 441 { 442 BUG(); 443 } 444 EXPORT_SYMBOL(vunmap); 445 446 void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) 447 { 448 BUG(); 449 return NULL; 450 } 451 EXPORT_SYMBOL(vm_map_ram); 452 453 void vm_unmap_ram(const void *mem, unsigned int count) 454 { 455 BUG(); 456 } 457 EXPORT_SYMBOL(vm_unmap_ram); 458 459 void vm_unmap_aliases(void) 460 { 461 } 462 EXPORT_SYMBOL_GPL(vm_unmap_aliases); 463 464 /* 465 * Implement a stub for vmalloc_sync_all() if the architecture chose not to 466 * have one. 467 */ 468 void __weak vmalloc_sync_all(void) 469 { 470 } 471 472 /** 473 * alloc_vm_area - allocate a range of kernel address space 474 * @size: size of the area 475 * 476 * Returns: NULL on failure, vm_struct on success 477 * 478 * This function reserves a range of kernel address space, and 479 * allocates pagetables to map that range. No actual mappings 480 * are created. If the kernel address space is not shared 481 * between processes, it syncs the pagetable across all 482 * processes. 483 */ 484 struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes) 485 { 486 BUG(); 487 return NULL; 488 } 489 EXPORT_SYMBOL_GPL(alloc_vm_area); 490 491 void free_vm_area(struct vm_struct *area) 492 { 493 BUG(); 494 } 495 EXPORT_SYMBOL_GPL(free_vm_area); 496 497 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, 498 struct page *page) 499 { 500 return -EINVAL; 501 } 502 EXPORT_SYMBOL(vm_insert_page); 503 504 /* 505 * sys_brk() for the most part doesn't need the global kernel 506 * lock, except when an application is doing something nasty 507 * like trying to un-brk an area that has already been mapped 508 * to a regular file. in this case, the unmapping will need 509 * to invoke file system routines that need the global lock. 510 */ 511 SYSCALL_DEFINE1(brk, unsigned long, brk) 512 { 513 struct mm_struct *mm = current->mm; 514 515 if (brk < mm->start_brk || brk > mm->context.end_brk) 516 return mm->brk; 517 518 if (mm->brk == brk) 519 return mm->brk; 520 521 /* 522 * Always allow shrinking brk 523 */ 524 if (brk <= mm->brk) { 525 mm->brk = brk; 526 return brk; 527 } 528 529 /* 530 * Ok, looks good - let it rip. 531 */ 532 flush_icache_range(mm->brk, brk); 533 return mm->brk = brk; 534 } 535 536 /* 537 * initialise the VMA and region record slabs 538 */ 539 void __init mmap_init(void) 540 { 541 int ret; 542 543 ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL); 544 VM_BUG_ON(ret); 545 vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC); 546 } 547 548 /* 549 * validate the region tree 550 * - the caller must hold the region lock 551 */ 552 #ifdef CONFIG_DEBUG_NOMMU_REGIONS 553 static noinline void validate_nommu_regions(void) 554 { 555 struct vm_region *region, *last; 556 struct rb_node *p, *lastp; 557 558 lastp = rb_first(&nommu_region_tree); 559 if (!lastp) 560 return; 561 562 last = rb_entry(lastp, struct vm_region, vm_rb); 563 BUG_ON(unlikely(last->vm_end <= last->vm_start)); 564 BUG_ON(unlikely(last->vm_top < last->vm_end)); 565 566 while ((p = rb_next(lastp))) { 567 region = rb_entry(p, struct vm_region, vm_rb); 568 last = rb_entry(lastp, struct vm_region, vm_rb); 569 570 BUG_ON(unlikely(region->vm_end <= region->vm_start)); 571 BUG_ON(unlikely(region->vm_top < region->vm_end)); 572 BUG_ON(unlikely(region->vm_start < last->vm_top)); 573 574 lastp = p; 575 } 576 } 577 #else 578 static void validate_nommu_regions(void) 579 { 580 } 581 #endif 582 583 /* 584 * add a region into the global tree 585 */ 586 static void add_nommu_region(struct vm_region *region) 587 { 588 struct vm_region *pregion; 589 struct rb_node **p, *parent; 590 591 validate_nommu_regions(); 592 593 parent = NULL; 594 p = &nommu_region_tree.rb_node; 595 while (*p) { 596 parent = *p; 597 pregion = rb_entry(parent, struct vm_region, vm_rb); 598 if (region->vm_start < pregion->vm_start) 599 p = &(*p)->rb_left; 600 else if (region->vm_start > pregion->vm_start) 601 p = &(*p)->rb_right; 602 else if (pregion == region) 603 return; 604 else 605 BUG(); 606 } 607 608 rb_link_node(®ion->vm_rb, parent, p); 609 rb_insert_color(®ion->vm_rb, &nommu_region_tree); 610 611 validate_nommu_regions(); 612 } 613 614 /* 615 * delete a region from the global tree 616 */ 617 static void delete_nommu_region(struct vm_region *region) 618 { 619 BUG_ON(!nommu_region_tree.rb_node); 620 621 validate_nommu_regions(); 622 rb_erase(®ion->vm_rb, &nommu_region_tree); 623 validate_nommu_regions(); 624 } 625 626 /* 627 * free a contiguous series of pages 628 */ 629 static void free_page_series(unsigned long from, unsigned long to) 630 { 631 for (; from < to; from += PAGE_SIZE) { 632 struct page *page = virt_to_page(from); 633 634 kdebug("- free %lx", from); 635 atomic_long_dec(&mmap_pages_allocated); 636 if (page_count(page) != 1) 637 kdebug("free page %p: refcount not one: %d", 638 page, page_count(page)); 639 put_page(page); 640 } 641 } 642 643 /* 644 * release a reference to a region 645 * - the caller must hold the region semaphore for writing, which this releases 646 * - the region may not have been added to the tree yet, in which case vm_top 647 * will equal vm_start 648 */ 649 static void __put_nommu_region(struct vm_region *region) 650 __releases(nommu_region_sem) 651 { 652 kenter("%p{%d}", region, region->vm_usage); 653 654 BUG_ON(!nommu_region_tree.rb_node); 655 656 if (--region->vm_usage == 0) { 657 if (region->vm_top > region->vm_start) 658 delete_nommu_region(region); 659 up_write(&nommu_region_sem); 660 661 if (region->vm_file) 662 fput(region->vm_file); 663 664 /* IO memory and memory shared directly out of the pagecache 665 * from ramfs/tmpfs mustn't be released here */ 666 if (region->vm_flags & VM_MAPPED_COPY) { 667 kdebug("free series"); 668 free_page_series(region->vm_start, region->vm_top); 669 } 670 kmem_cache_free(vm_region_jar, region); 671 } else { 672 up_write(&nommu_region_sem); 673 } 674 } 675 676 /* 677 * release a reference to a region 678 */ 679 static void put_nommu_region(struct vm_region *region) 680 { 681 down_write(&nommu_region_sem); 682 __put_nommu_region(region); 683 } 684 685 /* 686 * update protection on a vma 687 */ 688 static void protect_vma(struct vm_area_struct *vma, unsigned long flags) 689 { 690 #ifdef CONFIG_MPU 691 struct mm_struct *mm = vma->vm_mm; 692 long start = vma->vm_start & PAGE_MASK; 693 while (start < vma->vm_end) { 694 protect_page(mm, start, flags); 695 start += PAGE_SIZE; 696 } 697 update_protections(mm); 698 #endif 699 } 700 701 /* 702 * add a VMA into a process's mm_struct in the appropriate place in the list 703 * and tree and add to the address space's page tree also if not an anonymous 704 * page 705 * - should be called with mm->mmap_sem held writelocked 706 */ 707 static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) 708 { 709 struct vm_area_struct *pvma, *prev; 710 struct address_space *mapping; 711 struct rb_node **p, *parent, *rb_prev; 712 713 kenter(",%p", vma); 714 715 BUG_ON(!vma->vm_region); 716 717 mm->map_count++; 718 vma->vm_mm = mm; 719 720 protect_vma(vma, vma->vm_flags); 721 722 /* add the VMA to the mapping */ 723 if (vma->vm_file) { 724 mapping = vma->vm_file->f_mapping; 725 726 i_mmap_lock_write(mapping); 727 flush_dcache_mmap_lock(mapping); 728 vma_interval_tree_insert(vma, &mapping->i_mmap); 729 flush_dcache_mmap_unlock(mapping); 730 i_mmap_unlock_write(mapping); 731 } 732 733 /* add the VMA to the tree */ 734 parent = rb_prev = NULL; 735 p = &mm->mm_rb.rb_node; 736 while (*p) { 737 parent = *p; 738 pvma = rb_entry(parent, struct vm_area_struct, vm_rb); 739 740 /* sort by: start addr, end addr, VMA struct addr in that order 741 * (the latter is necessary as we may get identical VMAs) */ 742 if (vma->vm_start < pvma->vm_start) 743 p = &(*p)->rb_left; 744 else if (vma->vm_start > pvma->vm_start) { 745 rb_prev = parent; 746 p = &(*p)->rb_right; 747 } else if (vma->vm_end < pvma->vm_end) 748 p = &(*p)->rb_left; 749 else if (vma->vm_end > pvma->vm_end) { 750 rb_prev = parent; 751 p = &(*p)->rb_right; 752 } else if (vma < pvma) 753 p = &(*p)->rb_left; 754 else if (vma > pvma) { 755 rb_prev = parent; 756 p = &(*p)->rb_right; 757 } else 758 BUG(); 759 } 760 761 rb_link_node(&vma->vm_rb, parent, p); 762 rb_insert_color(&vma->vm_rb, &mm->mm_rb); 763 764 /* add VMA to the VMA list also */ 765 prev = NULL; 766 if (rb_prev) 767 prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); 768 769 __vma_link_list(mm, vma, prev, parent); 770 } 771 772 /* 773 * delete a VMA from its owning mm_struct and address space 774 */ 775 static void delete_vma_from_mm(struct vm_area_struct *vma) 776 { 777 int i; 778 struct address_space *mapping; 779 struct mm_struct *mm = vma->vm_mm; 780 struct task_struct *curr = current; 781 782 kenter("%p", vma); 783 784 protect_vma(vma, 0); 785 786 mm->map_count--; 787 for (i = 0; i < VMACACHE_SIZE; i++) { 788 /* if the vma is cached, invalidate the entire cache */ 789 if (curr->vmacache[i] == vma) { 790 vmacache_invalidate(mm); 791 break; 792 } 793 } 794 795 /* remove the VMA from the mapping */ 796 if (vma->vm_file) { 797 mapping = vma->vm_file->f_mapping; 798 799 i_mmap_lock_write(mapping); 800 flush_dcache_mmap_lock(mapping); 801 vma_interval_tree_remove(vma, &mapping->i_mmap); 802 flush_dcache_mmap_unlock(mapping); 803 i_mmap_unlock_write(mapping); 804 } 805 806 /* remove from the MM's tree and list */ 807 rb_erase(&vma->vm_rb, &mm->mm_rb); 808 809 if (vma->vm_prev) 810 vma->vm_prev->vm_next = vma->vm_next; 811 else 812 mm->mmap = vma->vm_next; 813 814 if (vma->vm_next) 815 vma->vm_next->vm_prev = vma->vm_prev; 816 } 817 818 /* 819 * destroy a VMA record 820 */ 821 static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) 822 { 823 kenter("%p", vma); 824 if (vma->vm_ops && vma->vm_ops->close) 825 vma->vm_ops->close(vma); 826 if (vma->vm_file) 827 fput(vma->vm_file); 828 put_nommu_region(vma->vm_region); 829 kmem_cache_free(vm_area_cachep, vma); 830 } 831 832 /* 833 * look up the first VMA in which addr resides, NULL if none 834 * - should be called with mm->mmap_sem at least held readlocked 835 */ 836 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) 837 { 838 struct vm_area_struct *vma; 839 840 /* check the cache first */ 841 vma = vmacache_find(mm, addr); 842 if (likely(vma)) 843 return vma; 844 845 /* trawl the list (there may be multiple mappings in which addr 846 * resides) */ 847 for (vma = mm->mmap; vma; vma = vma->vm_next) { 848 if (vma->vm_start > addr) 849 return NULL; 850 if (vma->vm_end > addr) { 851 vmacache_update(addr, vma); 852 return vma; 853 } 854 } 855 856 return NULL; 857 } 858 EXPORT_SYMBOL(find_vma); 859 860 /* 861 * find a VMA 862 * - we don't extend stack VMAs under NOMMU conditions 863 */ 864 struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr) 865 { 866 return find_vma(mm, addr); 867 } 868 869 /* 870 * expand a stack to a given address 871 * - not supported under NOMMU conditions 872 */ 873 int expand_stack(struct vm_area_struct *vma, unsigned long address) 874 { 875 return -ENOMEM; 876 } 877 878 /* 879 * look up the first VMA exactly that exactly matches addr 880 * - should be called with mm->mmap_sem at least held readlocked 881 */ 882 static struct vm_area_struct *find_vma_exact(struct mm_struct *mm, 883 unsigned long addr, 884 unsigned long len) 885 { 886 struct vm_area_struct *vma; 887 unsigned long end = addr + len; 888 889 /* check the cache first */ 890 vma = vmacache_find_exact(mm, addr, end); 891 if (vma) 892 return vma; 893 894 /* trawl the list (there may be multiple mappings in which addr 895 * resides) */ 896 for (vma = mm->mmap; vma; vma = vma->vm_next) { 897 if (vma->vm_start < addr) 898 continue; 899 if (vma->vm_start > addr) 900 return NULL; 901 if (vma->vm_end == end) { 902 vmacache_update(addr, vma); 903 return vma; 904 } 905 } 906 907 return NULL; 908 } 909 910 /* 911 * determine whether a mapping should be permitted and, if so, what sort of 912 * mapping we're capable of supporting 913 */ 914 static int validate_mmap_request(struct file *file, 915 unsigned long addr, 916 unsigned long len, 917 unsigned long prot, 918 unsigned long flags, 919 unsigned long pgoff, 920 unsigned long *_capabilities) 921 { 922 unsigned long capabilities, rlen; 923 int ret; 924 925 /* do the simple checks first */ 926 if (flags & MAP_FIXED) { 927 printk(KERN_DEBUG 928 "%d: Can't do fixed-address/overlay mmap of RAM\n", 929 current->pid); 930 return -EINVAL; 931 } 932 933 if ((flags & MAP_TYPE) != MAP_PRIVATE && 934 (flags & MAP_TYPE) != MAP_SHARED) 935 return -EINVAL; 936 937 if (!len) 938 return -EINVAL; 939 940 /* Careful about overflows.. */ 941 rlen = PAGE_ALIGN(len); 942 if (!rlen || rlen > TASK_SIZE) 943 return -ENOMEM; 944 945 /* offset overflow? */ 946 if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff) 947 return -EOVERFLOW; 948 949 if (file) { 950 /* validate file mapping requests */ 951 struct address_space *mapping; 952 953 /* files must support mmap */ 954 if (!file->f_op->mmap) 955 return -ENODEV; 956 957 /* work out if what we've got could possibly be shared 958 * - we support chardevs that provide their own "memory" 959 * - we support files/blockdevs that are memory backed 960 */ 961 mapping = file->f_mapping; 962 if (!mapping) 963 mapping = file_inode(file)->i_mapping; 964 965 capabilities = 0; 966 if (mapping && mapping->backing_dev_info) 967 capabilities = mapping->backing_dev_info->capabilities; 968 969 if (!capabilities) { 970 /* no explicit capabilities set, so assume some 971 * defaults */ 972 switch (file_inode(file)->i_mode & S_IFMT) { 973 case S_IFREG: 974 case S_IFBLK: 975 capabilities = BDI_CAP_MAP_COPY; 976 break; 977 978 case S_IFCHR: 979 capabilities = 980 BDI_CAP_MAP_DIRECT | 981 BDI_CAP_READ_MAP | 982 BDI_CAP_WRITE_MAP; 983 break; 984 985 default: 986 return -EINVAL; 987 } 988 } 989 990 /* eliminate any capabilities that we can't support on this 991 * device */ 992 if (!file->f_op->get_unmapped_area) 993 capabilities &= ~BDI_CAP_MAP_DIRECT; 994 if (!file->f_op->read) 995 capabilities &= ~BDI_CAP_MAP_COPY; 996 997 /* The file shall have been opened with read permission. */ 998 if (!(file->f_mode & FMODE_READ)) 999 return -EACCES; 1000 1001 if (flags & MAP_SHARED) { 1002 /* do checks for writing, appending and locking */ 1003 if ((prot & PROT_WRITE) && 1004 !(file->f_mode & FMODE_WRITE)) 1005 return -EACCES; 1006 1007 if (IS_APPEND(file_inode(file)) && 1008 (file->f_mode & FMODE_WRITE)) 1009 return -EACCES; 1010 1011 if (locks_verify_locked(file)) 1012 return -EAGAIN; 1013 1014 if (!(capabilities & BDI_CAP_MAP_DIRECT)) 1015 return -ENODEV; 1016 1017 /* we mustn't privatise shared mappings */ 1018 capabilities &= ~BDI_CAP_MAP_COPY; 1019 } else { 1020 /* we're going to read the file into private memory we 1021 * allocate */ 1022 if (!(capabilities & BDI_CAP_MAP_COPY)) 1023 return -ENODEV; 1024 1025 /* we don't permit a private writable mapping to be 1026 * shared with the backing device */ 1027 if (prot & PROT_WRITE) 1028 capabilities &= ~BDI_CAP_MAP_DIRECT; 1029 } 1030 1031 if (capabilities & BDI_CAP_MAP_DIRECT) { 1032 if (((prot & PROT_READ) && !(capabilities & BDI_CAP_READ_MAP)) || 1033 ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) || 1034 ((prot & PROT_EXEC) && !(capabilities & BDI_CAP_EXEC_MAP)) 1035 ) { 1036 capabilities &= ~BDI_CAP_MAP_DIRECT; 1037 if (flags & MAP_SHARED) { 1038 printk(KERN_WARNING 1039 "MAP_SHARED not completely supported on !MMU\n"); 1040 return -EINVAL; 1041 } 1042 } 1043 } 1044 1045 /* handle executable mappings and implied executable 1046 * mappings */ 1047 if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) { 1048 if (prot & PROT_EXEC) 1049 return -EPERM; 1050 } else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) { 1051 /* handle implication of PROT_EXEC by PROT_READ */ 1052 if (current->personality & READ_IMPLIES_EXEC) { 1053 if (capabilities & BDI_CAP_EXEC_MAP) 1054 prot |= PROT_EXEC; 1055 } 1056 } else if ((prot & PROT_READ) && 1057 (prot & PROT_EXEC) && 1058 !(capabilities & BDI_CAP_EXEC_MAP) 1059 ) { 1060 /* backing file is not executable, try to copy */ 1061 capabilities &= ~BDI_CAP_MAP_DIRECT; 1062 } 1063 } else { 1064 /* anonymous mappings are always memory backed and can be 1065 * privately mapped 1066 */ 1067 capabilities = BDI_CAP_MAP_COPY; 1068 1069 /* handle PROT_EXEC implication by PROT_READ */ 1070 if ((prot & PROT_READ) && 1071 (current->personality & READ_IMPLIES_EXEC)) 1072 prot |= PROT_EXEC; 1073 } 1074 1075 /* allow the security API to have its say */ 1076 ret = security_mmap_addr(addr); 1077 if (ret < 0) 1078 return ret; 1079 1080 /* looks okay */ 1081 *_capabilities = capabilities; 1082 return 0; 1083 } 1084 1085 /* 1086 * we've determined that we can make the mapping, now translate what we 1087 * now know into VMA flags 1088 */ 1089 static unsigned long determine_vm_flags(struct file *file, 1090 unsigned long prot, 1091 unsigned long flags, 1092 unsigned long capabilities) 1093 { 1094 unsigned long vm_flags; 1095 1096 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags); 1097 /* vm_flags |= mm->def_flags; */ 1098 1099 if (!(capabilities & BDI_CAP_MAP_DIRECT)) { 1100 /* attempt to share read-only copies of mapped file chunks */ 1101 vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; 1102 if (file && !(prot & PROT_WRITE)) 1103 vm_flags |= VM_MAYSHARE; 1104 } else { 1105 /* overlay a shareable mapping on the backing device or inode 1106 * if possible - used for chardevs, ramfs/tmpfs/shmfs and 1107 * romfs/cramfs */ 1108 vm_flags |= VM_MAYSHARE | (capabilities & BDI_CAP_VMFLAGS); 1109 if (flags & MAP_SHARED) 1110 vm_flags |= VM_SHARED; 1111 } 1112 1113 /* refuse to let anyone share private mappings with this process if 1114 * it's being traced - otherwise breakpoints set in it may interfere 1115 * with another untraced process 1116 */ 1117 if ((flags & MAP_PRIVATE) && current->ptrace) 1118 vm_flags &= ~VM_MAYSHARE; 1119 1120 return vm_flags; 1121 } 1122 1123 /* 1124 * set up a shared mapping on a file (the driver or filesystem provides and 1125 * pins the storage) 1126 */ 1127 static int do_mmap_shared_file(struct vm_area_struct *vma) 1128 { 1129 int ret; 1130 1131 ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); 1132 if (ret == 0) { 1133 vma->vm_region->vm_top = vma->vm_region->vm_end; 1134 return 0; 1135 } 1136 if (ret != -ENOSYS) 1137 return ret; 1138 1139 /* getting -ENOSYS indicates that direct mmap isn't possible (as 1140 * opposed to tried but failed) so we can only give a suitable error as 1141 * it's not possible to make a private copy if MAP_SHARED was given */ 1142 return -ENODEV; 1143 } 1144 1145 /* 1146 * set up a private mapping or an anonymous shared mapping 1147 */ 1148 static int do_mmap_private(struct vm_area_struct *vma, 1149 struct vm_region *region, 1150 unsigned long len, 1151 unsigned long capabilities) 1152 { 1153 unsigned long total, point; 1154 void *base; 1155 int ret, order; 1156 1157 /* invoke the file's mapping function so that it can keep track of 1158 * shared mappings on devices or memory 1159 * - VM_MAYSHARE will be set if it may attempt to share 1160 */ 1161 if (capabilities & BDI_CAP_MAP_DIRECT) { 1162 ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); 1163 if (ret == 0) { 1164 /* shouldn't return success if we're not sharing */ 1165 BUG_ON(!(vma->vm_flags & VM_MAYSHARE)); 1166 vma->vm_region->vm_top = vma->vm_region->vm_end; 1167 return 0; 1168 } 1169 if (ret != -ENOSYS) 1170 return ret; 1171 1172 /* getting an ENOSYS error indicates that direct mmap isn't 1173 * possible (as opposed to tried but failed) so we'll try to 1174 * make a private copy of the data and map that instead */ 1175 } 1176 1177 1178 /* allocate some memory to hold the mapping 1179 * - note that this may not return a page-aligned address if the object 1180 * we're allocating is smaller than a page 1181 */ 1182 order = get_order(len); 1183 kdebug("alloc order %d for %lx", order, len); 1184 1185 total = 1 << order; 1186 point = len >> PAGE_SHIFT; 1187 1188 /* we don't want to allocate a power-of-2 sized page set */ 1189 if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) { 1190 total = point; 1191 kdebug("try to alloc exact %lu pages", total); 1192 base = alloc_pages_exact(len, GFP_KERNEL); 1193 } else { 1194 base = (void *)__get_free_pages(GFP_KERNEL, order); 1195 } 1196 1197 if (!base) 1198 goto enomem; 1199 1200 atomic_long_add(total, &mmap_pages_allocated); 1201 1202 region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY; 1203 region->vm_start = (unsigned long) base; 1204 region->vm_end = region->vm_start + len; 1205 region->vm_top = region->vm_start + (total << PAGE_SHIFT); 1206 1207 vma->vm_start = region->vm_start; 1208 vma->vm_end = region->vm_start + len; 1209 1210 if (vma->vm_file) { 1211 /* read the contents of a file into the copy */ 1212 mm_segment_t old_fs; 1213 loff_t fpos; 1214 1215 fpos = vma->vm_pgoff; 1216 fpos <<= PAGE_SHIFT; 1217 1218 old_fs = get_fs(); 1219 set_fs(KERNEL_DS); 1220 ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos); 1221 set_fs(old_fs); 1222 1223 if (ret < 0) 1224 goto error_free; 1225 1226 /* clear the last little bit */ 1227 if (ret < len) 1228 memset(base + ret, 0, len - ret); 1229 1230 } 1231 1232 return 0; 1233 1234 error_free: 1235 free_page_series(region->vm_start, region->vm_top); 1236 region->vm_start = vma->vm_start = 0; 1237 region->vm_end = vma->vm_end = 0; 1238 region->vm_top = 0; 1239 return ret; 1240 1241 enomem: 1242 pr_err("Allocation of length %lu from process %d (%s) failed\n", 1243 len, current->pid, current->comm); 1244 show_free_areas(0); 1245 return -ENOMEM; 1246 } 1247 1248 /* 1249 * handle mapping creation for uClinux 1250 */ 1251 unsigned long do_mmap_pgoff(struct file *file, 1252 unsigned long addr, 1253 unsigned long len, 1254 unsigned long prot, 1255 unsigned long flags, 1256 unsigned long pgoff, 1257 unsigned long *populate) 1258 { 1259 struct vm_area_struct *vma; 1260 struct vm_region *region; 1261 struct rb_node *rb; 1262 unsigned long capabilities, vm_flags, result; 1263 int ret; 1264 1265 kenter(",%lx,%lx,%lx,%lx,%lx", addr, len, prot, flags, pgoff); 1266 1267 *populate = 0; 1268 1269 /* decide whether we should attempt the mapping, and if so what sort of 1270 * mapping */ 1271 ret = validate_mmap_request(file, addr, len, prot, flags, pgoff, 1272 &capabilities); 1273 if (ret < 0) { 1274 kleave(" = %d [val]", ret); 1275 return ret; 1276 } 1277 1278 /* we ignore the address hint */ 1279 addr = 0; 1280 len = PAGE_ALIGN(len); 1281 1282 /* we've determined that we can make the mapping, now translate what we 1283 * now know into VMA flags */ 1284 vm_flags = determine_vm_flags(file, prot, flags, capabilities); 1285 1286 /* we're going to need to record the mapping */ 1287 region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL); 1288 if (!region) 1289 goto error_getting_region; 1290 1291 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 1292 if (!vma) 1293 goto error_getting_vma; 1294 1295 region->vm_usage = 1; 1296 region->vm_flags = vm_flags; 1297 region->vm_pgoff = pgoff; 1298 1299 INIT_LIST_HEAD(&vma->anon_vma_chain); 1300 vma->vm_flags = vm_flags; 1301 vma->vm_pgoff = pgoff; 1302 1303 if (file) { 1304 region->vm_file = get_file(file); 1305 vma->vm_file = get_file(file); 1306 } 1307 1308 down_write(&nommu_region_sem); 1309 1310 /* if we want to share, we need to check for regions created by other 1311 * mmap() calls that overlap with our proposed mapping 1312 * - we can only share with a superset match on most regular files 1313 * - shared mappings on character devices and memory backed files are 1314 * permitted to overlap inexactly as far as we are concerned for in 1315 * these cases, sharing is handled in the driver or filesystem rather 1316 * than here 1317 */ 1318 if (vm_flags & VM_MAYSHARE) { 1319 struct vm_region *pregion; 1320 unsigned long pglen, rpglen, pgend, rpgend, start; 1321 1322 pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1323 pgend = pgoff + pglen; 1324 1325 for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) { 1326 pregion = rb_entry(rb, struct vm_region, vm_rb); 1327 1328 if (!(pregion->vm_flags & VM_MAYSHARE)) 1329 continue; 1330 1331 /* search for overlapping mappings on the same file */ 1332 if (file_inode(pregion->vm_file) != 1333 file_inode(file)) 1334 continue; 1335 1336 if (pregion->vm_pgoff >= pgend) 1337 continue; 1338 1339 rpglen = pregion->vm_end - pregion->vm_start; 1340 rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT; 1341 rpgend = pregion->vm_pgoff + rpglen; 1342 if (pgoff >= rpgend) 1343 continue; 1344 1345 /* handle inexactly overlapping matches between 1346 * mappings */ 1347 if ((pregion->vm_pgoff != pgoff || rpglen != pglen) && 1348 !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) { 1349 /* new mapping is not a subset of the region */ 1350 if (!(capabilities & BDI_CAP_MAP_DIRECT)) 1351 goto sharing_violation; 1352 continue; 1353 } 1354 1355 /* we've found a region we can share */ 1356 pregion->vm_usage++; 1357 vma->vm_region = pregion; 1358 start = pregion->vm_start; 1359 start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT; 1360 vma->vm_start = start; 1361 vma->vm_end = start + len; 1362 1363 if (pregion->vm_flags & VM_MAPPED_COPY) { 1364 kdebug("share copy"); 1365 vma->vm_flags |= VM_MAPPED_COPY; 1366 } else { 1367 kdebug("share mmap"); 1368 ret = do_mmap_shared_file(vma); 1369 if (ret < 0) { 1370 vma->vm_region = NULL; 1371 vma->vm_start = 0; 1372 vma->vm_end = 0; 1373 pregion->vm_usage--; 1374 pregion = NULL; 1375 goto error_just_free; 1376 } 1377 } 1378 fput(region->vm_file); 1379 kmem_cache_free(vm_region_jar, region); 1380 region = pregion; 1381 result = start; 1382 goto share; 1383 } 1384 1385 /* obtain the address at which to make a shared mapping 1386 * - this is the hook for quasi-memory character devices to 1387 * tell us the location of a shared mapping 1388 */ 1389 if (capabilities & BDI_CAP_MAP_DIRECT) { 1390 addr = file->f_op->get_unmapped_area(file, addr, len, 1391 pgoff, flags); 1392 if (IS_ERR_VALUE(addr)) { 1393 ret = addr; 1394 if (ret != -ENOSYS) 1395 goto error_just_free; 1396 1397 /* the driver refused to tell us where to site 1398 * the mapping so we'll have to attempt to copy 1399 * it */ 1400 ret = -ENODEV; 1401 if (!(capabilities & BDI_CAP_MAP_COPY)) 1402 goto error_just_free; 1403 1404 capabilities &= ~BDI_CAP_MAP_DIRECT; 1405 } else { 1406 vma->vm_start = region->vm_start = addr; 1407 vma->vm_end = region->vm_end = addr + len; 1408 } 1409 } 1410 } 1411 1412 vma->vm_region = region; 1413 1414 /* set up the mapping 1415 * - the region is filled in if BDI_CAP_MAP_DIRECT is still set 1416 */ 1417 if (file && vma->vm_flags & VM_SHARED) 1418 ret = do_mmap_shared_file(vma); 1419 else 1420 ret = do_mmap_private(vma, region, len, capabilities); 1421 if (ret < 0) 1422 goto error_just_free; 1423 add_nommu_region(region); 1424 1425 /* clear anonymous mappings that don't ask for uninitialized data */ 1426 if (!vma->vm_file && !(flags & MAP_UNINITIALIZED)) 1427 memset((void *)region->vm_start, 0, 1428 region->vm_end - region->vm_start); 1429 1430 /* okay... we have a mapping; now we have to register it */ 1431 result = vma->vm_start; 1432 1433 current->mm->total_vm += len >> PAGE_SHIFT; 1434 1435 share: 1436 add_vma_to_mm(current->mm, vma); 1437 1438 /* we flush the region from the icache only when the first executable 1439 * mapping of it is made */ 1440 if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) { 1441 flush_icache_range(region->vm_start, region->vm_end); 1442 region->vm_icache_flushed = true; 1443 } 1444 1445 up_write(&nommu_region_sem); 1446 1447 kleave(" = %lx", result); 1448 return result; 1449 1450 error_just_free: 1451 up_write(&nommu_region_sem); 1452 error: 1453 if (region->vm_file) 1454 fput(region->vm_file); 1455 kmem_cache_free(vm_region_jar, region); 1456 if (vma->vm_file) 1457 fput(vma->vm_file); 1458 kmem_cache_free(vm_area_cachep, vma); 1459 kleave(" = %d", ret); 1460 return ret; 1461 1462 sharing_violation: 1463 up_write(&nommu_region_sem); 1464 printk(KERN_WARNING "Attempt to share mismatched mappings\n"); 1465 ret = -EINVAL; 1466 goto error; 1467 1468 error_getting_vma: 1469 kmem_cache_free(vm_region_jar, region); 1470 printk(KERN_WARNING "Allocation of vma for %lu byte allocation" 1471 " from process %d failed\n", 1472 len, current->pid); 1473 show_free_areas(0); 1474 return -ENOMEM; 1475 1476 error_getting_region: 1477 printk(KERN_WARNING "Allocation of vm region for %lu byte allocation" 1478 " from process %d failed\n", 1479 len, current->pid); 1480 show_free_areas(0); 1481 return -ENOMEM; 1482 } 1483 1484 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, 1485 unsigned long, prot, unsigned long, flags, 1486 unsigned long, fd, unsigned long, pgoff) 1487 { 1488 struct file *file = NULL; 1489 unsigned long retval = -EBADF; 1490 1491 audit_mmap_fd(fd, flags); 1492 if (!(flags & MAP_ANONYMOUS)) { 1493 file = fget(fd); 1494 if (!file) 1495 goto out; 1496 } 1497 1498 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); 1499 1500 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); 1501 1502 if (file) 1503 fput(file); 1504 out: 1505 return retval; 1506 } 1507 1508 #ifdef __ARCH_WANT_SYS_OLD_MMAP 1509 struct mmap_arg_struct { 1510 unsigned long addr; 1511 unsigned long len; 1512 unsigned long prot; 1513 unsigned long flags; 1514 unsigned long fd; 1515 unsigned long offset; 1516 }; 1517 1518 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) 1519 { 1520 struct mmap_arg_struct a; 1521 1522 if (copy_from_user(&a, arg, sizeof(a))) 1523 return -EFAULT; 1524 if (a.offset & ~PAGE_MASK) 1525 return -EINVAL; 1526 1527 return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, 1528 a.offset >> PAGE_SHIFT); 1529 } 1530 #endif /* __ARCH_WANT_SYS_OLD_MMAP */ 1531 1532 /* 1533 * split a vma into two pieces at address 'addr', a new vma is allocated either 1534 * for the first part or the tail. 1535 */ 1536 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, 1537 unsigned long addr, int new_below) 1538 { 1539 struct vm_area_struct *new; 1540 struct vm_region *region; 1541 unsigned long npages; 1542 1543 kenter(""); 1544 1545 /* we're only permitted to split anonymous regions (these should have 1546 * only a single usage on the region) */ 1547 if (vma->vm_file) 1548 return -ENOMEM; 1549 1550 if (mm->map_count >= sysctl_max_map_count) 1551 return -ENOMEM; 1552 1553 region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL); 1554 if (!region) 1555 return -ENOMEM; 1556 1557 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 1558 if (!new) { 1559 kmem_cache_free(vm_region_jar, region); 1560 return -ENOMEM; 1561 } 1562 1563 /* most fields are the same, copy all, and then fixup */ 1564 *new = *vma; 1565 *region = *vma->vm_region; 1566 new->vm_region = region; 1567 1568 npages = (addr - vma->vm_start) >> PAGE_SHIFT; 1569 1570 if (new_below) { 1571 region->vm_top = region->vm_end = new->vm_end = addr; 1572 } else { 1573 region->vm_start = new->vm_start = addr; 1574 region->vm_pgoff = new->vm_pgoff += npages; 1575 } 1576 1577 if (new->vm_ops && new->vm_ops->open) 1578 new->vm_ops->open(new); 1579 1580 delete_vma_from_mm(vma); 1581 down_write(&nommu_region_sem); 1582 delete_nommu_region(vma->vm_region); 1583 if (new_below) { 1584 vma->vm_region->vm_start = vma->vm_start = addr; 1585 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages; 1586 } else { 1587 vma->vm_region->vm_end = vma->vm_end = addr; 1588 vma->vm_region->vm_top = addr; 1589 } 1590 add_nommu_region(vma->vm_region); 1591 add_nommu_region(new->vm_region); 1592 up_write(&nommu_region_sem); 1593 add_vma_to_mm(mm, vma); 1594 add_vma_to_mm(mm, new); 1595 return 0; 1596 } 1597 1598 /* 1599 * shrink a VMA by removing the specified chunk from either the beginning or 1600 * the end 1601 */ 1602 static int shrink_vma(struct mm_struct *mm, 1603 struct vm_area_struct *vma, 1604 unsigned long from, unsigned long to) 1605 { 1606 struct vm_region *region; 1607 1608 kenter(""); 1609 1610 /* adjust the VMA's pointers, which may reposition it in the MM's tree 1611 * and list */ 1612 delete_vma_from_mm(vma); 1613 if (from > vma->vm_start) 1614 vma->vm_end = from; 1615 else 1616 vma->vm_start = to; 1617 add_vma_to_mm(mm, vma); 1618 1619 /* cut the backing region down to size */ 1620 region = vma->vm_region; 1621 BUG_ON(region->vm_usage != 1); 1622 1623 down_write(&nommu_region_sem); 1624 delete_nommu_region(region); 1625 if (from > region->vm_start) { 1626 to = region->vm_top; 1627 region->vm_top = region->vm_end = from; 1628 } else { 1629 region->vm_start = to; 1630 } 1631 add_nommu_region(region); 1632 up_write(&nommu_region_sem); 1633 1634 free_page_series(from, to); 1635 return 0; 1636 } 1637 1638 /* 1639 * release a mapping 1640 * - under NOMMU conditions the chunk to be unmapped must be backed by a single 1641 * VMA, though it need not cover the whole VMA 1642 */ 1643 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) 1644 { 1645 struct vm_area_struct *vma; 1646 unsigned long end; 1647 int ret; 1648 1649 kenter(",%lx,%zx", start, len); 1650 1651 len = PAGE_ALIGN(len); 1652 if (len == 0) 1653 return -EINVAL; 1654 1655 end = start + len; 1656 1657 /* find the first potentially overlapping VMA */ 1658 vma = find_vma(mm, start); 1659 if (!vma) { 1660 static int limit; 1661 if (limit < 5) { 1662 printk(KERN_WARNING 1663 "munmap of memory not mmapped by process %d" 1664 " (%s): 0x%lx-0x%lx\n", 1665 current->pid, current->comm, 1666 start, start + len - 1); 1667 limit++; 1668 } 1669 return -EINVAL; 1670 } 1671 1672 /* we're allowed to split an anonymous VMA but not a file-backed one */ 1673 if (vma->vm_file) { 1674 do { 1675 if (start > vma->vm_start) { 1676 kleave(" = -EINVAL [miss]"); 1677 return -EINVAL; 1678 } 1679 if (end == vma->vm_end) 1680 goto erase_whole_vma; 1681 vma = vma->vm_next; 1682 } while (vma); 1683 kleave(" = -EINVAL [split file]"); 1684 return -EINVAL; 1685 } else { 1686 /* the chunk must be a subset of the VMA found */ 1687 if (start == vma->vm_start && end == vma->vm_end) 1688 goto erase_whole_vma; 1689 if (start < vma->vm_start || end > vma->vm_end) { 1690 kleave(" = -EINVAL [superset]"); 1691 return -EINVAL; 1692 } 1693 if (start & ~PAGE_MASK) { 1694 kleave(" = -EINVAL [unaligned start]"); 1695 return -EINVAL; 1696 } 1697 if (end != vma->vm_end && end & ~PAGE_MASK) { 1698 kleave(" = -EINVAL [unaligned split]"); 1699 return -EINVAL; 1700 } 1701 if (start != vma->vm_start && end != vma->vm_end) { 1702 ret = split_vma(mm, vma, start, 1); 1703 if (ret < 0) { 1704 kleave(" = %d [split]", ret); 1705 return ret; 1706 } 1707 } 1708 return shrink_vma(mm, vma, start, end); 1709 } 1710 1711 erase_whole_vma: 1712 delete_vma_from_mm(vma); 1713 delete_vma(mm, vma); 1714 kleave(" = 0"); 1715 return 0; 1716 } 1717 EXPORT_SYMBOL(do_munmap); 1718 1719 int vm_munmap(unsigned long addr, size_t len) 1720 { 1721 struct mm_struct *mm = current->mm; 1722 int ret; 1723 1724 down_write(&mm->mmap_sem); 1725 ret = do_munmap(mm, addr, len); 1726 up_write(&mm->mmap_sem); 1727 return ret; 1728 } 1729 EXPORT_SYMBOL(vm_munmap); 1730 1731 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) 1732 { 1733 return vm_munmap(addr, len); 1734 } 1735 1736 /* 1737 * release all the mappings made in a process's VM space 1738 */ 1739 void exit_mmap(struct mm_struct *mm) 1740 { 1741 struct vm_area_struct *vma; 1742 1743 if (!mm) 1744 return; 1745 1746 kenter(""); 1747 1748 mm->total_vm = 0; 1749 1750 while ((vma = mm->mmap)) { 1751 mm->mmap = vma->vm_next; 1752 delete_vma_from_mm(vma); 1753 delete_vma(mm, vma); 1754 cond_resched(); 1755 } 1756 1757 kleave(""); 1758 } 1759 1760 unsigned long vm_brk(unsigned long addr, unsigned long len) 1761 { 1762 return -ENOMEM; 1763 } 1764 1765 /* 1766 * expand (or shrink) an existing mapping, potentially moving it at the same 1767 * time (controlled by the MREMAP_MAYMOVE flag and available VM space) 1768 * 1769 * under NOMMU conditions, we only permit changing a mapping's size, and only 1770 * as long as it stays within the region allocated by do_mmap_private() and the 1771 * block is not shareable 1772 * 1773 * MREMAP_FIXED is not supported under NOMMU conditions 1774 */ 1775 static unsigned long do_mremap(unsigned long addr, 1776 unsigned long old_len, unsigned long new_len, 1777 unsigned long flags, unsigned long new_addr) 1778 { 1779 struct vm_area_struct *vma; 1780 1781 /* insanity checks first */ 1782 old_len = PAGE_ALIGN(old_len); 1783 new_len = PAGE_ALIGN(new_len); 1784 if (old_len == 0 || new_len == 0) 1785 return (unsigned long) -EINVAL; 1786 1787 if (addr & ~PAGE_MASK) 1788 return -EINVAL; 1789 1790 if (flags & MREMAP_FIXED && new_addr != addr) 1791 return (unsigned long) -EINVAL; 1792 1793 vma = find_vma_exact(current->mm, addr, old_len); 1794 if (!vma) 1795 return (unsigned long) -EINVAL; 1796 1797 if (vma->vm_end != vma->vm_start + old_len) 1798 return (unsigned long) -EFAULT; 1799 1800 if (vma->vm_flags & VM_MAYSHARE) 1801 return (unsigned long) -EPERM; 1802 1803 if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start) 1804 return (unsigned long) -ENOMEM; 1805 1806 /* all checks complete - do it */ 1807 vma->vm_end = vma->vm_start + new_len; 1808 return vma->vm_start; 1809 } 1810 1811 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, 1812 unsigned long, new_len, unsigned long, flags, 1813 unsigned long, new_addr) 1814 { 1815 unsigned long ret; 1816 1817 down_write(¤t->mm->mmap_sem); 1818 ret = do_mremap(addr, old_len, new_len, flags, new_addr); 1819 up_write(¤t->mm->mmap_sem); 1820 return ret; 1821 } 1822 1823 struct page *follow_page_mask(struct vm_area_struct *vma, 1824 unsigned long address, unsigned int flags, 1825 unsigned int *page_mask) 1826 { 1827 *page_mask = 0; 1828 return NULL; 1829 } 1830 1831 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, 1832 unsigned long pfn, unsigned long size, pgprot_t prot) 1833 { 1834 if (addr != (pfn << PAGE_SHIFT)) 1835 return -EINVAL; 1836 1837 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; 1838 return 0; 1839 } 1840 EXPORT_SYMBOL(remap_pfn_range); 1841 1842 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) 1843 { 1844 unsigned long pfn = start >> PAGE_SHIFT; 1845 unsigned long vm_len = vma->vm_end - vma->vm_start; 1846 1847 pfn += vma->vm_pgoff; 1848 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); 1849 } 1850 EXPORT_SYMBOL(vm_iomap_memory); 1851 1852 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 1853 unsigned long pgoff) 1854 { 1855 unsigned int size = vma->vm_end - vma->vm_start; 1856 1857 if (!(vma->vm_flags & VM_USERMAP)) 1858 return -EINVAL; 1859 1860 vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT)); 1861 vma->vm_end = vma->vm_start + size; 1862 1863 return 0; 1864 } 1865 EXPORT_SYMBOL(remap_vmalloc_range); 1866 1867 unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr, 1868 unsigned long len, unsigned long pgoff, unsigned long flags) 1869 { 1870 return -ENOMEM; 1871 } 1872 1873 void unmap_mapping_range(struct address_space *mapping, 1874 loff_t const holebegin, loff_t const holelen, 1875 int even_cows) 1876 { 1877 } 1878 EXPORT_SYMBOL(unmap_mapping_range); 1879 1880 /* 1881 * Check that a process has enough memory to allocate a new virtual 1882 * mapping. 0 means there is enough memory for the allocation to 1883 * succeed and -ENOMEM implies there is not. 1884 * 1885 * We currently support three overcommit policies, which are set via the 1886 * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting 1887 * 1888 * Strict overcommit modes added 2002 Feb 26 by Alan Cox. 1889 * Additional code 2002 Jul 20 by Robert Love. 1890 * 1891 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. 1892 * 1893 * Note this is a helper function intended to be used by LSMs which 1894 * wish to use this logic. 1895 */ 1896 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) 1897 { 1898 unsigned long free, allowed, reserve; 1899 1900 vm_acct_memory(pages); 1901 1902 /* 1903 * Sometimes we want to use more memory than we have 1904 */ 1905 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) 1906 return 0; 1907 1908 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { 1909 free = global_page_state(NR_FREE_PAGES); 1910 free += global_page_state(NR_FILE_PAGES); 1911 1912 /* 1913 * shmem pages shouldn't be counted as free in this 1914 * case, they can't be purged, only swapped out, and 1915 * that won't affect the overall amount of available 1916 * memory in the system. 1917 */ 1918 free -= global_page_state(NR_SHMEM); 1919 1920 free += get_nr_swap_pages(); 1921 1922 /* 1923 * Any slabs which are created with the 1924 * SLAB_RECLAIM_ACCOUNT flag claim to have contents 1925 * which are reclaimable, under pressure. The dentry 1926 * cache and most inode caches should fall into this 1927 */ 1928 free += global_page_state(NR_SLAB_RECLAIMABLE); 1929 1930 /* 1931 * Leave reserved pages. The pages are not for anonymous pages. 1932 */ 1933 if (free <= totalreserve_pages) 1934 goto error; 1935 else 1936 free -= totalreserve_pages; 1937 1938 /* 1939 * Reserve some for root 1940 */ 1941 if (!cap_sys_admin) 1942 free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); 1943 1944 if (free > pages) 1945 return 0; 1946 1947 goto error; 1948 } 1949 1950 allowed = vm_commit_limit(); 1951 /* 1952 * Reserve some 3% for root 1953 */ 1954 if (!cap_sys_admin) 1955 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); 1956 1957 /* 1958 * Don't let a single process grow so big a user can't recover 1959 */ 1960 if (mm) { 1961 reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); 1962 allowed -= min(mm->total_vm / 32, reserve); 1963 } 1964 1965 if (percpu_counter_read_positive(&vm_committed_as) < allowed) 1966 return 0; 1967 1968 error: 1969 vm_unacct_memory(pages); 1970 1971 return -ENOMEM; 1972 } 1973 1974 int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1975 { 1976 BUG(); 1977 return 0; 1978 } 1979 EXPORT_SYMBOL(filemap_fault); 1980 1981 void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf) 1982 { 1983 BUG(); 1984 } 1985 EXPORT_SYMBOL(filemap_map_pages); 1986 1987 int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr, 1988 unsigned long size, pgoff_t pgoff) 1989 { 1990 BUG(); 1991 return 0; 1992 } 1993 EXPORT_SYMBOL(generic_file_remap_pages); 1994 1995 static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, 1996 unsigned long addr, void *buf, int len, int write) 1997 { 1998 struct vm_area_struct *vma; 1999 2000 down_read(&mm->mmap_sem); 2001 2002 /* the access must start within one of the target process's mappings */ 2003 vma = find_vma(mm, addr); 2004 if (vma) { 2005 /* don't overrun this mapping */ 2006 if (addr + len >= vma->vm_end) 2007 len = vma->vm_end - addr; 2008 2009 /* only read or write mappings where it is permitted */ 2010 if (write && vma->vm_flags & VM_MAYWRITE) 2011 copy_to_user_page(vma, NULL, addr, 2012 (void *) addr, buf, len); 2013 else if (!write && vma->vm_flags & VM_MAYREAD) 2014 copy_from_user_page(vma, NULL, addr, 2015 buf, (void *) addr, len); 2016 else 2017 len = 0; 2018 } else { 2019 len = 0; 2020 } 2021 2022 up_read(&mm->mmap_sem); 2023 2024 return len; 2025 } 2026 2027 /** 2028 * @access_remote_vm - access another process' address space 2029 * @mm: the mm_struct of the target address space 2030 * @addr: start address to access 2031 * @buf: source or destination buffer 2032 * @len: number of bytes to transfer 2033 * @write: whether the access is a write 2034 * 2035 * The caller must hold a reference on @mm. 2036 */ 2037 int access_remote_vm(struct mm_struct *mm, unsigned long addr, 2038 void *buf, int len, int write) 2039 { 2040 return __access_remote_vm(NULL, mm, addr, buf, len, write); 2041 } 2042 2043 /* 2044 * Access another process' address space. 2045 * - source/target buffer must be kernel space 2046 */ 2047 int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) 2048 { 2049 struct mm_struct *mm; 2050 2051 if (addr + len < addr) 2052 return 0; 2053 2054 mm = get_task_mm(tsk); 2055 if (!mm) 2056 return 0; 2057 2058 len = __access_remote_vm(tsk, mm, addr, buf, len, write); 2059 2060 mmput(mm); 2061 return len; 2062 } 2063 2064 /** 2065 * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode 2066 * @inode: The inode to check 2067 * @size: The current filesize of the inode 2068 * @newsize: The proposed filesize of the inode 2069 * 2070 * Check the shared mappings on an inode on behalf of a shrinking truncate to 2071 * make sure that that any outstanding VMAs aren't broken and then shrink the 2072 * vm_regions that extend that beyond so that do_mmap_pgoff() doesn't 2073 * automatically grant mappings that are too large. 2074 */ 2075 int nommu_shrink_inode_mappings(struct inode *inode, size_t size, 2076 size_t newsize) 2077 { 2078 struct vm_area_struct *vma; 2079 struct vm_region *region; 2080 pgoff_t low, high; 2081 size_t r_size, r_top; 2082 2083 low = newsize >> PAGE_SHIFT; 2084 high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 2085 2086 down_write(&nommu_region_sem); 2087 i_mmap_lock_read(inode->i_mapping); 2088 2089 /* search for VMAs that fall within the dead zone */ 2090 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) { 2091 /* found one - only interested if it's shared out of the page 2092 * cache */ 2093 if (vma->vm_flags & VM_SHARED) { 2094 i_mmap_unlock_read(inode->i_mapping); 2095 up_write(&nommu_region_sem); 2096 return -ETXTBSY; /* not quite true, but near enough */ 2097 } 2098 } 2099 2100 /* reduce any regions that overlap the dead zone - if in existence, 2101 * these will be pointed to by VMAs that don't overlap the dead zone 2102 * 2103 * we don't check for any regions that start beyond the EOF as there 2104 * shouldn't be any 2105 */ 2106 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) { 2107 if (!(vma->vm_flags & VM_SHARED)) 2108 continue; 2109 2110 region = vma->vm_region; 2111 r_size = region->vm_top - region->vm_start; 2112 r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size; 2113 2114 if (r_top > newsize) { 2115 region->vm_top -= r_top - newsize; 2116 if (region->vm_end > region->vm_top) 2117 region->vm_end = region->vm_top; 2118 } 2119 } 2120 2121 i_mmap_unlock_read(inode->i_mapping); 2122 up_write(&nommu_region_sem); 2123 return 0; 2124 } 2125 2126 /* 2127 * Initialise sysctl_user_reserve_kbytes. 2128 * 2129 * This is intended to prevent a user from starting a single memory hogging 2130 * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER 2131 * mode. 2132 * 2133 * The default value is min(3% of free memory, 128MB) 2134 * 128MB is enough to recover with sshd/login, bash, and top/kill. 2135 */ 2136 static int __meminit init_user_reserve(void) 2137 { 2138 unsigned long free_kbytes; 2139 2140 free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); 2141 2142 sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17); 2143 return 0; 2144 } 2145 module_init(init_user_reserve) 2146 2147 /* 2148 * Initialise sysctl_admin_reserve_kbytes. 2149 * 2150 * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin 2151 * to log in and kill a memory hogging process. 2152 * 2153 * Systems with more than 256MB will reserve 8MB, enough to recover 2154 * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will 2155 * only reserve 3% of free pages by default. 2156 */ 2157 static int __meminit init_admin_reserve(void) 2158 { 2159 unsigned long free_kbytes; 2160 2161 free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); 2162 2163 sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13); 2164 return 0; 2165 } 2166 module_init(init_admin_reserve) 2167