1 /* 2 * linux/mm/nommu.c 3 * 4 * Replacement code for mm functions to support CPU's that don't 5 * have any form of memory management unit (thus no virtual memory). 6 * 7 * See Documentation/nommu-mmap.txt 8 * 9 * Copyright (c) 2004-2008 David Howells <dhowells@redhat.com> 10 * Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com> 11 * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org> 12 * Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com> 13 * Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org> 14 */ 15 16 #include <linux/export.h> 17 #include <linux/mm.h> 18 #include <linux/vmacache.h> 19 #include <linux/mman.h> 20 #include <linux/swap.h> 21 #include <linux/file.h> 22 #include <linux/highmem.h> 23 #include <linux/pagemap.h> 24 #include <linux/slab.h> 25 #include <linux/vmalloc.h> 26 #include <linux/blkdev.h> 27 #include <linux/backing-dev.h> 28 #include <linux/compiler.h> 29 #include <linux/mount.h> 30 #include <linux/personality.h> 31 #include <linux/security.h> 32 #include <linux/syscalls.h> 33 #include <linux/audit.h> 34 #include <linux/sched/sysctl.h> 35 36 #include <asm/uaccess.h> 37 #include <asm/tlb.h> 38 #include <asm/tlbflush.h> 39 #include <asm/mmu_context.h> 40 #include "internal.h" 41 42 #if 0 43 #define kenter(FMT, ...) \ 44 printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__) 45 #define kleave(FMT, ...) \ 46 printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__) 47 #define kdebug(FMT, ...) \ 48 printk(KERN_DEBUG "xxx" FMT"yyy\n", ##__VA_ARGS__) 49 #else 50 #define kenter(FMT, ...) \ 51 no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__) 52 #define kleave(FMT, ...) \ 53 no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__) 54 #define kdebug(FMT, ...) \ 55 no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__) 56 #endif 57 58 void *high_memory; 59 struct page *mem_map; 60 unsigned long max_mapnr; 61 unsigned long highest_memmap_pfn; 62 struct percpu_counter vm_committed_as; 63 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ 64 int sysctl_overcommit_ratio = 50; /* default is 50% */ 65 unsigned long sysctl_overcommit_kbytes __read_mostly; 66 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; 67 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS; 68 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ 69 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ 70 int heap_stack_gap = 0; 71 72 atomic_long_t mmap_pages_allocated; 73 74 /* 75 * The global memory commitment made in the system can be a metric 76 * that can be used to drive ballooning decisions when Linux is hosted 77 * as a guest. On Hyper-V, the host implements a policy engine for dynamically 78 * balancing memory across competing virtual machines that are hosted. 79 * Several metrics drive this policy engine including the guest reported 80 * memory commitment. 81 */ 82 unsigned long vm_memory_committed(void) 83 { 84 return percpu_counter_read_positive(&vm_committed_as); 85 } 86 87 EXPORT_SYMBOL_GPL(vm_memory_committed); 88 89 EXPORT_SYMBOL(mem_map); 90 91 /* list of mapped, potentially shareable regions */ 92 static struct kmem_cache *vm_region_jar; 93 struct rb_root nommu_region_tree = RB_ROOT; 94 DECLARE_RWSEM(nommu_region_sem); 95 96 const struct vm_operations_struct generic_file_vm_ops = { 97 }; 98 99 /* 100 * Return the total memory allocated for this pointer, not 101 * just what the caller asked for. 102 * 103 * Doesn't have to be accurate, i.e. may have races. 104 */ 105 unsigned int kobjsize(const void *objp) 106 { 107 struct page *page; 108 109 /* 110 * If the object we have should not have ksize performed on it, 111 * return size of 0 112 */ 113 if (!objp || !virt_addr_valid(objp)) 114 return 0; 115 116 page = virt_to_head_page(objp); 117 118 /* 119 * If the allocator sets PageSlab, we know the pointer came from 120 * kmalloc(). 121 */ 122 if (PageSlab(page)) 123 return ksize(objp); 124 125 /* 126 * If it's not a compound page, see if we have a matching VMA 127 * region. This test is intentionally done in reverse order, 128 * so if there's no VMA, we still fall through and hand back 129 * PAGE_SIZE for 0-order pages. 130 */ 131 if (!PageCompound(page)) { 132 struct vm_area_struct *vma; 133 134 vma = find_vma(current->mm, (unsigned long)objp); 135 if (vma) 136 return vma->vm_end - vma->vm_start; 137 } 138 139 /* 140 * The ksize() function is only guaranteed to work for pointers 141 * returned by kmalloc(). So handle arbitrary pointers here. 142 */ 143 return PAGE_SIZE << compound_order(page); 144 } 145 146 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 147 unsigned long start, unsigned long nr_pages, 148 unsigned int foll_flags, struct page **pages, 149 struct vm_area_struct **vmas, int *nonblocking) 150 { 151 struct vm_area_struct *vma; 152 unsigned long vm_flags; 153 int i; 154 155 /* calculate required read or write permissions. 156 * If FOLL_FORCE is set, we only require the "MAY" flags. 157 */ 158 vm_flags = (foll_flags & FOLL_WRITE) ? 159 (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); 160 vm_flags &= (foll_flags & FOLL_FORCE) ? 161 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); 162 163 for (i = 0; i < nr_pages; i++) { 164 vma = find_vma(mm, start); 165 if (!vma) 166 goto finish_or_fault; 167 168 /* protect what we can, including chardevs */ 169 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || 170 !(vm_flags & vma->vm_flags)) 171 goto finish_or_fault; 172 173 if (pages) { 174 pages[i] = virt_to_page(start); 175 if (pages[i]) 176 page_cache_get(pages[i]); 177 } 178 if (vmas) 179 vmas[i] = vma; 180 start = (start + PAGE_SIZE) & PAGE_MASK; 181 } 182 183 return i; 184 185 finish_or_fault: 186 return i ? : -EFAULT; 187 } 188 189 /* 190 * get a list of pages in an address range belonging to the specified process 191 * and indicate the VMA that covers each page 192 * - this is potentially dodgy as we may end incrementing the page count of a 193 * slab page or a secondary page from a compound page 194 * - don't permit access to VMAs that don't support it, such as I/O mappings 195 */ 196 long get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 197 unsigned long start, unsigned long nr_pages, 198 int write, int force, struct page **pages, 199 struct vm_area_struct **vmas) 200 { 201 int flags = 0; 202 203 if (write) 204 flags |= FOLL_WRITE; 205 if (force) 206 flags |= FOLL_FORCE; 207 208 return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, 209 NULL); 210 } 211 EXPORT_SYMBOL(get_user_pages); 212 213 /** 214 * follow_pfn - look up PFN at a user virtual address 215 * @vma: memory mapping 216 * @address: user virtual address 217 * @pfn: location to store found PFN 218 * 219 * Only IO mappings and raw PFN mappings are allowed. 220 * 221 * Returns zero and the pfn at @pfn on success, -ve otherwise. 222 */ 223 int follow_pfn(struct vm_area_struct *vma, unsigned long address, 224 unsigned long *pfn) 225 { 226 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) 227 return -EINVAL; 228 229 *pfn = address >> PAGE_SHIFT; 230 return 0; 231 } 232 EXPORT_SYMBOL(follow_pfn); 233 234 LIST_HEAD(vmap_area_list); 235 236 void vfree(const void *addr) 237 { 238 kfree(addr); 239 } 240 EXPORT_SYMBOL(vfree); 241 242 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 243 { 244 /* 245 * You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc() 246 * returns only a logical address. 247 */ 248 return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM); 249 } 250 EXPORT_SYMBOL(__vmalloc); 251 252 void *vmalloc_user(unsigned long size) 253 { 254 void *ret; 255 256 ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, 257 PAGE_KERNEL); 258 if (ret) { 259 struct vm_area_struct *vma; 260 261 down_write(¤t->mm->mmap_sem); 262 vma = find_vma(current->mm, (unsigned long)ret); 263 if (vma) 264 vma->vm_flags |= VM_USERMAP; 265 up_write(¤t->mm->mmap_sem); 266 } 267 268 return ret; 269 } 270 EXPORT_SYMBOL(vmalloc_user); 271 272 struct page *vmalloc_to_page(const void *addr) 273 { 274 return virt_to_page(addr); 275 } 276 EXPORT_SYMBOL(vmalloc_to_page); 277 278 unsigned long vmalloc_to_pfn(const void *addr) 279 { 280 return page_to_pfn(virt_to_page(addr)); 281 } 282 EXPORT_SYMBOL(vmalloc_to_pfn); 283 284 long vread(char *buf, char *addr, unsigned long count) 285 { 286 /* Don't allow overflow */ 287 if ((unsigned long) buf + count < count) 288 count = -(unsigned long) buf; 289 290 memcpy(buf, addr, count); 291 return count; 292 } 293 294 long vwrite(char *buf, char *addr, unsigned long count) 295 { 296 /* Don't allow overflow */ 297 if ((unsigned long) addr + count < count) 298 count = -(unsigned long) addr; 299 300 memcpy(addr, buf, count); 301 return count; 302 } 303 304 /* 305 * vmalloc - allocate virtually continguos memory 306 * 307 * @size: allocation size 308 * 309 * Allocate enough pages to cover @size from the page level 310 * allocator and map them into continguos kernel virtual space. 311 * 312 * For tight control over page level allocator and protection flags 313 * use __vmalloc() instead. 314 */ 315 void *vmalloc(unsigned long size) 316 { 317 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); 318 } 319 EXPORT_SYMBOL(vmalloc); 320 321 /* 322 * vzalloc - allocate virtually continguos memory with zero fill 323 * 324 * @size: allocation size 325 * 326 * Allocate enough pages to cover @size from the page level 327 * allocator and map them into continguos kernel virtual space. 328 * The memory allocated is set to zero. 329 * 330 * For tight control over page level allocator and protection flags 331 * use __vmalloc() instead. 332 */ 333 void *vzalloc(unsigned long size) 334 { 335 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, 336 PAGE_KERNEL); 337 } 338 EXPORT_SYMBOL(vzalloc); 339 340 /** 341 * vmalloc_node - allocate memory on a specific node 342 * @size: allocation size 343 * @node: numa node 344 * 345 * Allocate enough pages to cover @size from the page level 346 * allocator and map them into contiguous kernel virtual space. 347 * 348 * For tight control over page level allocator and protection flags 349 * use __vmalloc() instead. 350 */ 351 void *vmalloc_node(unsigned long size, int node) 352 { 353 return vmalloc(size); 354 } 355 EXPORT_SYMBOL(vmalloc_node); 356 357 /** 358 * vzalloc_node - allocate memory on a specific node with zero fill 359 * @size: allocation size 360 * @node: numa node 361 * 362 * Allocate enough pages to cover @size from the page level 363 * allocator and map them into contiguous kernel virtual space. 364 * The memory allocated is set to zero. 365 * 366 * For tight control over page level allocator and protection flags 367 * use __vmalloc() instead. 368 */ 369 void *vzalloc_node(unsigned long size, int node) 370 { 371 return vzalloc(size); 372 } 373 EXPORT_SYMBOL(vzalloc_node); 374 375 #ifndef PAGE_KERNEL_EXEC 376 # define PAGE_KERNEL_EXEC PAGE_KERNEL 377 #endif 378 379 /** 380 * vmalloc_exec - allocate virtually contiguous, executable memory 381 * @size: allocation size 382 * 383 * Kernel-internal function to allocate enough pages to cover @size 384 * the page level allocator and map them into contiguous and 385 * executable kernel virtual space. 386 * 387 * For tight control over page level allocator and protection flags 388 * use __vmalloc() instead. 389 */ 390 391 void *vmalloc_exec(unsigned long size) 392 { 393 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC); 394 } 395 396 /** 397 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) 398 * @size: allocation size 399 * 400 * Allocate enough 32bit PA addressable pages to cover @size from the 401 * page level allocator and map them into continguos kernel virtual space. 402 */ 403 void *vmalloc_32(unsigned long size) 404 { 405 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL); 406 } 407 EXPORT_SYMBOL(vmalloc_32); 408 409 /** 410 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory 411 * @size: allocation size 412 * 413 * The resulting memory area is 32bit addressable and zeroed so it can be 414 * mapped to userspace without leaking data. 415 * 416 * VM_USERMAP is set on the corresponding VMA so that subsequent calls to 417 * remap_vmalloc_range() are permissible. 418 */ 419 void *vmalloc_32_user(unsigned long size) 420 { 421 /* 422 * We'll have to sort out the ZONE_DMA bits for 64-bit, 423 * but for now this can simply use vmalloc_user() directly. 424 */ 425 return vmalloc_user(size); 426 } 427 EXPORT_SYMBOL(vmalloc_32_user); 428 429 void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot) 430 { 431 BUG(); 432 return NULL; 433 } 434 EXPORT_SYMBOL(vmap); 435 436 void vunmap(const void *addr) 437 { 438 BUG(); 439 } 440 EXPORT_SYMBOL(vunmap); 441 442 void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) 443 { 444 BUG(); 445 return NULL; 446 } 447 EXPORT_SYMBOL(vm_map_ram); 448 449 void vm_unmap_ram(const void *mem, unsigned int count) 450 { 451 BUG(); 452 } 453 EXPORT_SYMBOL(vm_unmap_ram); 454 455 void vm_unmap_aliases(void) 456 { 457 } 458 EXPORT_SYMBOL_GPL(vm_unmap_aliases); 459 460 /* 461 * Implement a stub for vmalloc_sync_all() if the architecture chose not to 462 * have one. 463 */ 464 void __weak vmalloc_sync_all(void) 465 { 466 } 467 468 /** 469 * alloc_vm_area - allocate a range of kernel address space 470 * @size: size of the area 471 * 472 * Returns: NULL on failure, vm_struct on success 473 * 474 * This function reserves a range of kernel address space, and 475 * allocates pagetables to map that range. No actual mappings 476 * are created. If the kernel address space is not shared 477 * between processes, it syncs the pagetable across all 478 * processes. 479 */ 480 struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes) 481 { 482 BUG(); 483 return NULL; 484 } 485 EXPORT_SYMBOL_GPL(alloc_vm_area); 486 487 void free_vm_area(struct vm_struct *area) 488 { 489 BUG(); 490 } 491 EXPORT_SYMBOL_GPL(free_vm_area); 492 493 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, 494 struct page *page) 495 { 496 return -EINVAL; 497 } 498 EXPORT_SYMBOL(vm_insert_page); 499 500 /* 501 * sys_brk() for the most part doesn't need the global kernel 502 * lock, except when an application is doing something nasty 503 * like trying to un-brk an area that has already been mapped 504 * to a regular file. in this case, the unmapping will need 505 * to invoke file system routines that need the global lock. 506 */ 507 SYSCALL_DEFINE1(brk, unsigned long, brk) 508 { 509 struct mm_struct *mm = current->mm; 510 511 if (brk < mm->start_brk || brk > mm->context.end_brk) 512 return mm->brk; 513 514 if (mm->brk == brk) 515 return mm->brk; 516 517 /* 518 * Always allow shrinking brk 519 */ 520 if (brk <= mm->brk) { 521 mm->brk = brk; 522 return brk; 523 } 524 525 /* 526 * Ok, looks good - let it rip. 527 */ 528 flush_icache_range(mm->brk, brk); 529 return mm->brk = brk; 530 } 531 532 /* 533 * initialise the VMA and region record slabs 534 */ 535 void __init mmap_init(void) 536 { 537 int ret; 538 539 ret = percpu_counter_init(&vm_committed_as, 0); 540 VM_BUG_ON(ret); 541 vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC); 542 } 543 544 /* 545 * validate the region tree 546 * - the caller must hold the region lock 547 */ 548 #ifdef CONFIG_DEBUG_NOMMU_REGIONS 549 static noinline void validate_nommu_regions(void) 550 { 551 struct vm_region *region, *last; 552 struct rb_node *p, *lastp; 553 554 lastp = rb_first(&nommu_region_tree); 555 if (!lastp) 556 return; 557 558 last = rb_entry(lastp, struct vm_region, vm_rb); 559 BUG_ON(unlikely(last->vm_end <= last->vm_start)); 560 BUG_ON(unlikely(last->vm_top < last->vm_end)); 561 562 while ((p = rb_next(lastp))) { 563 region = rb_entry(p, struct vm_region, vm_rb); 564 last = rb_entry(lastp, struct vm_region, vm_rb); 565 566 BUG_ON(unlikely(region->vm_end <= region->vm_start)); 567 BUG_ON(unlikely(region->vm_top < region->vm_end)); 568 BUG_ON(unlikely(region->vm_start < last->vm_top)); 569 570 lastp = p; 571 } 572 } 573 #else 574 static void validate_nommu_regions(void) 575 { 576 } 577 #endif 578 579 /* 580 * add a region into the global tree 581 */ 582 static void add_nommu_region(struct vm_region *region) 583 { 584 struct vm_region *pregion; 585 struct rb_node **p, *parent; 586 587 validate_nommu_regions(); 588 589 parent = NULL; 590 p = &nommu_region_tree.rb_node; 591 while (*p) { 592 parent = *p; 593 pregion = rb_entry(parent, struct vm_region, vm_rb); 594 if (region->vm_start < pregion->vm_start) 595 p = &(*p)->rb_left; 596 else if (region->vm_start > pregion->vm_start) 597 p = &(*p)->rb_right; 598 else if (pregion == region) 599 return; 600 else 601 BUG(); 602 } 603 604 rb_link_node(®ion->vm_rb, parent, p); 605 rb_insert_color(®ion->vm_rb, &nommu_region_tree); 606 607 validate_nommu_regions(); 608 } 609 610 /* 611 * delete a region from the global tree 612 */ 613 static void delete_nommu_region(struct vm_region *region) 614 { 615 BUG_ON(!nommu_region_tree.rb_node); 616 617 validate_nommu_regions(); 618 rb_erase(®ion->vm_rb, &nommu_region_tree); 619 validate_nommu_regions(); 620 } 621 622 /* 623 * free a contiguous series of pages 624 */ 625 static void free_page_series(unsigned long from, unsigned long to) 626 { 627 for (; from < to; from += PAGE_SIZE) { 628 struct page *page = virt_to_page(from); 629 630 kdebug("- free %lx", from); 631 atomic_long_dec(&mmap_pages_allocated); 632 if (page_count(page) != 1) 633 kdebug("free page %p: refcount not one: %d", 634 page, page_count(page)); 635 put_page(page); 636 } 637 } 638 639 /* 640 * release a reference to a region 641 * - the caller must hold the region semaphore for writing, which this releases 642 * - the region may not have been added to the tree yet, in which case vm_top 643 * will equal vm_start 644 */ 645 static void __put_nommu_region(struct vm_region *region) 646 __releases(nommu_region_sem) 647 { 648 kenter("%p{%d}", region, region->vm_usage); 649 650 BUG_ON(!nommu_region_tree.rb_node); 651 652 if (--region->vm_usage == 0) { 653 if (region->vm_top > region->vm_start) 654 delete_nommu_region(region); 655 up_write(&nommu_region_sem); 656 657 if (region->vm_file) 658 fput(region->vm_file); 659 660 /* IO memory and memory shared directly out of the pagecache 661 * from ramfs/tmpfs mustn't be released here */ 662 if (region->vm_flags & VM_MAPPED_COPY) { 663 kdebug("free series"); 664 free_page_series(region->vm_start, region->vm_top); 665 } 666 kmem_cache_free(vm_region_jar, region); 667 } else { 668 up_write(&nommu_region_sem); 669 } 670 } 671 672 /* 673 * release a reference to a region 674 */ 675 static void put_nommu_region(struct vm_region *region) 676 { 677 down_write(&nommu_region_sem); 678 __put_nommu_region(region); 679 } 680 681 /* 682 * update protection on a vma 683 */ 684 static void protect_vma(struct vm_area_struct *vma, unsigned long flags) 685 { 686 #ifdef CONFIG_MPU 687 struct mm_struct *mm = vma->vm_mm; 688 long start = vma->vm_start & PAGE_MASK; 689 while (start < vma->vm_end) { 690 protect_page(mm, start, flags); 691 start += PAGE_SIZE; 692 } 693 update_protections(mm); 694 #endif 695 } 696 697 /* 698 * add a VMA into a process's mm_struct in the appropriate place in the list 699 * and tree and add to the address space's page tree also if not an anonymous 700 * page 701 * - should be called with mm->mmap_sem held writelocked 702 */ 703 static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) 704 { 705 struct vm_area_struct *pvma, *prev; 706 struct address_space *mapping; 707 struct rb_node **p, *parent, *rb_prev; 708 709 kenter(",%p", vma); 710 711 BUG_ON(!vma->vm_region); 712 713 mm->map_count++; 714 vma->vm_mm = mm; 715 716 protect_vma(vma, vma->vm_flags); 717 718 /* add the VMA to the mapping */ 719 if (vma->vm_file) { 720 mapping = vma->vm_file->f_mapping; 721 722 mutex_lock(&mapping->i_mmap_mutex); 723 flush_dcache_mmap_lock(mapping); 724 vma_interval_tree_insert(vma, &mapping->i_mmap); 725 flush_dcache_mmap_unlock(mapping); 726 mutex_unlock(&mapping->i_mmap_mutex); 727 } 728 729 /* add the VMA to the tree */ 730 parent = rb_prev = NULL; 731 p = &mm->mm_rb.rb_node; 732 while (*p) { 733 parent = *p; 734 pvma = rb_entry(parent, struct vm_area_struct, vm_rb); 735 736 /* sort by: start addr, end addr, VMA struct addr in that order 737 * (the latter is necessary as we may get identical VMAs) */ 738 if (vma->vm_start < pvma->vm_start) 739 p = &(*p)->rb_left; 740 else if (vma->vm_start > pvma->vm_start) { 741 rb_prev = parent; 742 p = &(*p)->rb_right; 743 } else if (vma->vm_end < pvma->vm_end) 744 p = &(*p)->rb_left; 745 else if (vma->vm_end > pvma->vm_end) { 746 rb_prev = parent; 747 p = &(*p)->rb_right; 748 } else if (vma < pvma) 749 p = &(*p)->rb_left; 750 else if (vma > pvma) { 751 rb_prev = parent; 752 p = &(*p)->rb_right; 753 } else 754 BUG(); 755 } 756 757 rb_link_node(&vma->vm_rb, parent, p); 758 rb_insert_color(&vma->vm_rb, &mm->mm_rb); 759 760 /* add VMA to the VMA list also */ 761 prev = NULL; 762 if (rb_prev) 763 prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); 764 765 __vma_link_list(mm, vma, prev, parent); 766 } 767 768 /* 769 * delete a VMA from its owning mm_struct and address space 770 */ 771 static void delete_vma_from_mm(struct vm_area_struct *vma) 772 { 773 int i; 774 struct address_space *mapping; 775 struct mm_struct *mm = vma->vm_mm; 776 struct task_struct *curr = current; 777 778 kenter("%p", vma); 779 780 protect_vma(vma, 0); 781 782 mm->map_count--; 783 for (i = 0; i < VMACACHE_SIZE; i++) { 784 /* if the vma is cached, invalidate the entire cache */ 785 if (curr->vmacache[i] == vma) { 786 vmacache_invalidate(curr->mm); 787 break; 788 } 789 } 790 791 /* remove the VMA from the mapping */ 792 if (vma->vm_file) { 793 mapping = vma->vm_file->f_mapping; 794 795 mutex_lock(&mapping->i_mmap_mutex); 796 flush_dcache_mmap_lock(mapping); 797 vma_interval_tree_remove(vma, &mapping->i_mmap); 798 flush_dcache_mmap_unlock(mapping); 799 mutex_unlock(&mapping->i_mmap_mutex); 800 } 801 802 /* remove from the MM's tree and list */ 803 rb_erase(&vma->vm_rb, &mm->mm_rb); 804 805 if (vma->vm_prev) 806 vma->vm_prev->vm_next = vma->vm_next; 807 else 808 mm->mmap = vma->vm_next; 809 810 if (vma->vm_next) 811 vma->vm_next->vm_prev = vma->vm_prev; 812 } 813 814 /* 815 * destroy a VMA record 816 */ 817 static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) 818 { 819 kenter("%p", vma); 820 if (vma->vm_ops && vma->vm_ops->close) 821 vma->vm_ops->close(vma); 822 if (vma->vm_file) 823 fput(vma->vm_file); 824 put_nommu_region(vma->vm_region); 825 kmem_cache_free(vm_area_cachep, vma); 826 } 827 828 /* 829 * look up the first VMA in which addr resides, NULL if none 830 * - should be called with mm->mmap_sem at least held readlocked 831 */ 832 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) 833 { 834 struct vm_area_struct *vma; 835 836 /* check the cache first */ 837 vma = vmacache_find(mm, addr); 838 if (likely(vma)) 839 return vma; 840 841 /* trawl the list (there may be multiple mappings in which addr 842 * resides) */ 843 for (vma = mm->mmap; vma; vma = vma->vm_next) { 844 if (vma->vm_start > addr) 845 return NULL; 846 if (vma->vm_end > addr) { 847 vmacache_update(addr, vma); 848 return vma; 849 } 850 } 851 852 return NULL; 853 } 854 EXPORT_SYMBOL(find_vma); 855 856 /* 857 * find a VMA 858 * - we don't extend stack VMAs under NOMMU conditions 859 */ 860 struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr) 861 { 862 return find_vma(mm, addr); 863 } 864 865 /* 866 * expand a stack to a given address 867 * - not supported under NOMMU conditions 868 */ 869 int expand_stack(struct vm_area_struct *vma, unsigned long address) 870 { 871 return -ENOMEM; 872 } 873 874 /* 875 * look up the first VMA exactly that exactly matches addr 876 * - should be called with mm->mmap_sem at least held readlocked 877 */ 878 static struct vm_area_struct *find_vma_exact(struct mm_struct *mm, 879 unsigned long addr, 880 unsigned long len) 881 { 882 struct vm_area_struct *vma; 883 unsigned long end = addr + len; 884 885 /* check the cache first */ 886 vma = vmacache_find_exact(mm, addr, end); 887 if (vma) 888 return vma; 889 890 /* trawl the list (there may be multiple mappings in which addr 891 * resides) */ 892 for (vma = mm->mmap; vma; vma = vma->vm_next) { 893 if (vma->vm_start < addr) 894 continue; 895 if (vma->vm_start > addr) 896 return NULL; 897 if (vma->vm_end == end) { 898 vmacache_update(addr, vma); 899 return vma; 900 } 901 } 902 903 return NULL; 904 } 905 906 /* 907 * determine whether a mapping should be permitted and, if so, what sort of 908 * mapping we're capable of supporting 909 */ 910 static int validate_mmap_request(struct file *file, 911 unsigned long addr, 912 unsigned long len, 913 unsigned long prot, 914 unsigned long flags, 915 unsigned long pgoff, 916 unsigned long *_capabilities) 917 { 918 unsigned long capabilities, rlen; 919 int ret; 920 921 /* do the simple checks first */ 922 if (flags & MAP_FIXED) { 923 printk(KERN_DEBUG 924 "%d: Can't do fixed-address/overlay mmap of RAM\n", 925 current->pid); 926 return -EINVAL; 927 } 928 929 if ((flags & MAP_TYPE) != MAP_PRIVATE && 930 (flags & MAP_TYPE) != MAP_SHARED) 931 return -EINVAL; 932 933 if (!len) 934 return -EINVAL; 935 936 /* Careful about overflows.. */ 937 rlen = PAGE_ALIGN(len); 938 if (!rlen || rlen > TASK_SIZE) 939 return -ENOMEM; 940 941 /* offset overflow? */ 942 if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff) 943 return -EOVERFLOW; 944 945 if (file) { 946 /* validate file mapping requests */ 947 struct address_space *mapping; 948 949 /* files must support mmap */ 950 if (!file->f_op->mmap) 951 return -ENODEV; 952 953 /* work out if what we've got could possibly be shared 954 * - we support chardevs that provide their own "memory" 955 * - we support files/blockdevs that are memory backed 956 */ 957 mapping = file->f_mapping; 958 if (!mapping) 959 mapping = file_inode(file)->i_mapping; 960 961 capabilities = 0; 962 if (mapping && mapping->backing_dev_info) 963 capabilities = mapping->backing_dev_info->capabilities; 964 965 if (!capabilities) { 966 /* no explicit capabilities set, so assume some 967 * defaults */ 968 switch (file_inode(file)->i_mode & S_IFMT) { 969 case S_IFREG: 970 case S_IFBLK: 971 capabilities = BDI_CAP_MAP_COPY; 972 break; 973 974 case S_IFCHR: 975 capabilities = 976 BDI_CAP_MAP_DIRECT | 977 BDI_CAP_READ_MAP | 978 BDI_CAP_WRITE_MAP; 979 break; 980 981 default: 982 return -EINVAL; 983 } 984 } 985 986 /* eliminate any capabilities that we can't support on this 987 * device */ 988 if (!file->f_op->get_unmapped_area) 989 capabilities &= ~BDI_CAP_MAP_DIRECT; 990 if (!file->f_op->read) 991 capabilities &= ~BDI_CAP_MAP_COPY; 992 993 /* The file shall have been opened with read permission. */ 994 if (!(file->f_mode & FMODE_READ)) 995 return -EACCES; 996 997 if (flags & MAP_SHARED) { 998 /* do checks for writing, appending and locking */ 999 if ((prot & PROT_WRITE) && 1000 !(file->f_mode & FMODE_WRITE)) 1001 return -EACCES; 1002 1003 if (IS_APPEND(file_inode(file)) && 1004 (file->f_mode & FMODE_WRITE)) 1005 return -EACCES; 1006 1007 if (locks_verify_locked(file)) 1008 return -EAGAIN; 1009 1010 if (!(capabilities & BDI_CAP_MAP_DIRECT)) 1011 return -ENODEV; 1012 1013 /* we mustn't privatise shared mappings */ 1014 capabilities &= ~BDI_CAP_MAP_COPY; 1015 } else { 1016 /* we're going to read the file into private memory we 1017 * allocate */ 1018 if (!(capabilities & BDI_CAP_MAP_COPY)) 1019 return -ENODEV; 1020 1021 /* we don't permit a private writable mapping to be 1022 * shared with the backing device */ 1023 if (prot & PROT_WRITE) 1024 capabilities &= ~BDI_CAP_MAP_DIRECT; 1025 } 1026 1027 if (capabilities & BDI_CAP_MAP_DIRECT) { 1028 if (((prot & PROT_READ) && !(capabilities & BDI_CAP_READ_MAP)) || 1029 ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) || 1030 ((prot & PROT_EXEC) && !(capabilities & BDI_CAP_EXEC_MAP)) 1031 ) { 1032 capabilities &= ~BDI_CAP_MAP_DIRECT; 1033 if (flags & MAP_SHARED) { 1034 printk(KERN_WARNING 1035 "MAP_SHARED not completely supported on !MMU\n"); 1036 return -EINVAL; 1037 } 1038 } 1039 } 1040 1041 /* handle executable mappings and implied executable 1042 * mappings */ 1043 if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) { 1044 if (prot & PROT_EXEC) 1045 return -EPERM; 1046 } else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) { 1047 /* handle implication of PROT_EXEC by PROT_READ */ 1048 if (current->personality & READ_IMPLIES_EXEC) { 1049 if (capabilities & BDI_CAP_EXEC_MAP) 1050 prot |= PROT_EXEC; 1051 } 1052 } else if ((prot & PROT_READ) && 1053 (prot & PROT_EXEC) && 1054 !(capabilities & BDI_CAP_EXEC_MAP) 1055 ) { 1056 /* backing file is not executable, try to copy */ 1057 capabilities &= ~BDI_CAP_MAP_DIRECT; 1058 } 1059 } else { 1060 /* anonymous mappings are always memory backed and can be 1061 * privately mapped 1062 */ 1063 capabilities = BDI_CAP_MAP_COPY; 1064 1065 /* handle PROT_EXEC implication by PROT_READ */ 1066 if ((prot & PROT_READ) && 1067 (current->personality & READ_IMPLIES_EXEC)) 1068 prot |= PROT_EXEC; 1069 } 1070 1071 /* allow the security API to have its say */ 1072 ret = security_mmap_addr(addr); 1073 if (ret < 0) 1074 return ret; 1075 1076 /* looks okay */ 1077 *_capabilities = capabilities; 1078 return 0; 1079 } 1080 1081 /* 1082 * we've determined that we can make the mapping, now translate what we 1083 * now know into VMA flags 1084 */ 1085 static unsigned long determine_vm_flags(struct file *file, 1086 unsigned long prot, 1087 unsigned long flags, 1088 unsigned long capabilities) 1089 { 1090 unsigned long vm_flags; 1091 1092 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags); 1093 /* vm_flags |= mm->def_flags; */ 1094 1095 if (!(capabilities & BDI_CAP_MAP_DIRECT)) { 1096 /* attempt to share read-only copies of mapped file chunks */ 1097 vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; 1098 if (file && !(prot & PROT_WRITE)) 1099 vm_flags |= VM_MAYSHARE; 1100 } else { 1101 /* overlay a shareable mapping on the backing device or inode 1102 * if possible - used for chardevs, ramfs/tmpfs/shmfs and 1103 * romfs/cramfs */ 1104 vm_flags |= VM_MAYSHARE | (capabilities & BDI_CAP_VMFLAGS); 1105 if (flags & MAP_SHARED) 1106 vm_flags |= VM_SHARED; 1107 } 1108 1109 /* refuse to let anyone share private mappings with this process if 1110 * it's being traced - otherwise breakpoints set in it may interfere 1111 * with another untraced process 1112 */ 1113 if ((flags & MAP_PRIVATE) && current->ptrace) 1114 vm_flags &= ~VM_MAYSHARE; 1115 1116 return vm_flags; 1117 } 1118 1119 /* 1120 * set up a shared mapping on a file (the driver or filesystem provides and 1121 * pins the storage) 1122 */ 1123 static int do_mmap_shared_file(struct vm_area_struct *vma) 1124 { 1125 int ret; 1126 1127 ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); 1128 if (ret == 0) { 1129 vma->vm_region->vm_top = vma->vm_region->vm_end; 1130 return 0; 1131 } 1132 if (ret != -ENOSYS) 1133 return ret; 1134 1135 /* getting -ENOSYS indicates that direct mmap isn't possible (as 1136 * opposed to tried but failed) so we can only give a suitable error as 1137 * it's not possible to make a private copy if MAP_SHARED was given */ 1138 return -ENODEV; 1139 } 1140 1141 /* 1142 * set up a private mapping or an anonymous shared mapping 1143 */ 1144 static int do_mmap_private(struct vm_area_struct *vma, 1145 struct vm_region *region, 1146 unsigned long len, 1147 unsigned long capabilities) 1148 { 1149 struct page *pages; 1150 unsigned long total, point, n; 1151 void *base; 1152 int ret, order; 1153 1154 /* invoke the file's mapping function so that it can keep track of 1155 * shared mappings on devices or memory 1156 * - VM_MAYSHARE will be set if it may attempt to share 1157 */ 1158 if (capabilities & BDI_CAP_MAP_DIRECT) { 1159 ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); 1160 if (ret == 0) { 1161 /* shouldn't return success if we're not sharing */ 1162 BUG_ON(!(vma->vm_flags & VM_MAYSHARE)); 1163 vma->vm_region->vm_top = vma->vm_region->vm_end; 1164 return 0; 1165 } 1166 if (ret != -ENOSYS) 1167 return ret; 1168 1169 /* getting an ENOSYS error indicates that direct mmap isn't 1170 * possible (as opposed to tried but failed) so we'll try to 1171 * make a private copy of the data and map that instead */ 1172 } 1173 1174 1175 /* allocate some memory to hold the mapping 1176 * - note that this may not return a page-aligned address if the object 1177 * we're allocating is smaller than a page 1178 */ 1179 order = get_order(len); 1180 kdebug("alloc order %d for %lx", order, len); 1181 1182 pages = alloc_pages(GFP_KERNEL, order); 1183 if (!pages) 1184 goto enomem; 1185 1186 total = 1 << order; 1187 atomic_long_add(total, &mmap_pages_allocated); 1188 1189 point = len >> PAGE_SHIFT; 1190 1191 /* we allocated a power-of-2 sized page set, so we may want to trim off 1192 * the excess */ 1193 if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) { 1194 while (total > point) { 1195 order = ilog2(total - point); 1196 n = 1 << order; 1197 kdebug("shave %lu/%lu @%lu", n, total - point, total); 1198 atomic_long_sub(n, &mmap_pages_allocated); 1199 total -= n; 1200 set_page_refcounted(pages + total); 1201 __free_pages(pages + total, order); 1202 } 1203 } 1204 1205 for (point = 1; point < total; point++) 1206 set_page_refcounted(&pages[point]); 1207 1208 base = page_address(pages); 1209 region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY; 1210 region->vm_start = (unsigned long) base; 1211 region->vm_end = region->vm_start + len; 1212 region->vm_top = region->vm_start + (total << PAGE_SHIFT); 1213 1214 vma->vm_start = region->vm_start; 1215 vma->vm_end = region->vm_start + len; 1216 1217 if (vma->vm_file) { 1218 /* read the contents of a file into the copy */ 1219 mm_segment_t old_fs; 1220 loff_t fpos; 1221 1222 fpos = vma->vm_pgoff; 1223 fpos <<= PAGE_SHIFT; 1224 1225 old_fs = get_fs(); 1226 set_fs(KERNEL_DS); 1227 ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos); 1228 set_fs(old_fs); 1229 1230 if (ret < 0) 1231 goto error_free; 1232 1233 /* clear the last little bit */ 1234 if (ret < len) 1235 memset(base + ret, 0, len - ret); 1236 1237 } 1238 1239 return 0; 1240 1241 error_free: 1242 free_page_series(region->vm_start, region->vm_top); 1243 region->vm_start = vma->vm_start = 0; 1244 region->vm_end = vma->vm_end = 0; 1245 region->vm_top = 0; 1246 return ret; 1247 1248 enomem: 1249 printk("Allocation of length %lu from process %d (%s) failed\n", 1250 len, current->pid, current->comm); 1251 show_free_areas(0); 1252 return -ENOMEM; 1253 } 1254 1255 /* 1256 * handle mapping creation for uClinux 1257 */ 1258 unsigned long do_mmap_pgoff(struct file *file, 1259 unsigned long addr, 1260 unsigned long len, 1261 unsigned long prot, 1262 unsigned long flags, 1263 unsigned long pgoff, 1264 unsigned long *populate) 1265 { 1266 struct vm_area_struct *vma; 1267 struct vm_region *region; 1268 struct rb_node *rb; 1269 unsigned long capabilities, vm_flags, result; 1270 int ret; 1271 1272 kenter(",%lx,%lx,%lx,%lx,%lx", addr, len, prot, flags, pgoff); 1273 1274 *populate = 0; 1275 1276 /* decide whether we should attempt the mapping, and if so what sort of 1277 * mapping */ 1278 ret = validate_mmap_request(file, addr, len, prot, flags, pgoff, 1279 &capabilities); 1280 if (ret < 0) { 1281 kleave(" = %d [val]", ret); 1282 return ret; 1283 } 1284 1285 /* we ignore the address hint */ 1286 addr = 0; 1287 len = PAGE_ALIGN(len); 1288 1289 /* we've determined that we can make the mapping, now translate what we 1290 * now know into VMA flags */ 1291 vm_flags = determine_vm_flags(file, prot, flags, capabilities); 1292 1293 /* we're going to need to record the mapping */ 1294 region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL); 1295 if (!region) 1296 goto error_getting_region; 1297 1298 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 1299 if (!vma) 1300 goto error_getting_vma; 1301 1302 region->vm_usage = 1; 1303 region->vm_flags = vm_flags; 1304 region->vm_pgoff = pgoff; 1305 1306 INIT_LIST_HEAD(&vma->anon_vma_chain); 1307 vma->vm_flags = vm_flags; 1308 vma->vm_pgoff = pgoff; 1309 1310 if (file) { 1311 region->vm_file = get_file(file); 1312 vma->vm_file = get_file(file); 1313 } 1314 1315 down_write(&nommu_region_sem); 1316 1317 /* if we want to share, we need to check for regions created by other 1318 * mmap() calls that overlap with our proposed mapping 1319 * - we can only share with a superset match on most regular files 1320 * - shared mappings on character devices and memory backed files are 1321 * permitted to overlap inexactly as far as we are concerned for in 1322 * these cases, sharing is handled in the driver or filesystem rather 1323 * than here 1324 */ 1325 if (vm_flags & VM_MAYSHARE) { 1326 struct vm_region *pregion; 1327 unsigned long pglen, rpglen, pgend, rpgend, start; 1328 1329 pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1330 pgend = pgoff + pglen; 1331 1332 for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) { 1333 pregion = rb_entry(rb, struct vm_region, vm_rb); 1334 1335 if (!(pregion->vm_flags & VM_MAYSHARE)) 1336 continue; 1337 1338 /* search for overlapping mappings on the same file */ 1339 if (file_inode(pregion->vm_file) != 1340 file_inode(file)) 1341 continue; 1342 1343 if (pregion->vm_pgoff >= pgend) 1344 continue; 1345 1346 rpglen = pregion->vm_end - pregion->vm_start; 1347 rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT; 1348 rpgend = pregion->vm_pgoff + rpglen; 1349 if (pgoff >= rpgend) 1350 continue; 1351 1352 /* handle inexactly overlapping matches between 1353 * mappings */ 1354 if ((pregion->vm_pgoff != pgoff || rpglen != pglen) && 1355 !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) { 1356 /* new mapping is not a subset of the region */ 1357 if (!(capabilities & BDI_CAP_MAP_DIRECT)) 1358 goto sharing_violation; 1359 continue; 1360 } 1361 1362 /* we've found a region we can share */ 1363 pregion->vm_usage++; 1364 vma->vm_region = pregion; 1365 start = pregion->vm_start; 1366 start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT; 1367 vma->vm_start = start; 1368 vma->vm_end = start + len; 1369 1370 if (pregion->vm_flags & VM_MAPPED_COPY) { 1371 kdebug("share copy"); 1372 vma->vm_flags |= VM_MAPPED_COPY; 1373 } else { 1374 kdebug("share mmap"); 1375 ret = do_mmap_shared_file(vma); 1376 if (ret < 0) { 1377 vma->vm_region = NULL; 1378 vma->vm_start = 0; 1379 vma->vm_end = 0; 1380 pregion->vm_usage--; 1381 pregion = NULL; 1382 goto error_just_free; 1383 } 1384 } 1385 fput(region->vm_file); 1386 kmem_cache_free(vm_region_jar, region); 1387 region = pregion; 1388 result = start; 1389 goto share; 1390 } 1391 1392 /* obtain the address at which to make a shared mapping 1393 * - this is the hook for quasi-memory character devices to 1394 * tell us the location of a shared mapping 1395 */ 1396 if (capabilities & BDI_CAP_MAP_DIRECT) { 1397 addr = file->f_op->get_unmapped_area(file, addr, len, 1398 pgoff, flags); 1399 if (IS_ERR_VALUE(addr)) { 1400 ret = addr; 1401 if (ret != -ENOSYS) 1402 goto error_just_free; 1403 1404 /* the driver refused to tell us where to site 1405 * the mapping so we'll have to attempt to copy 1406 * it */ 1407 ret = -ENODEV; 1408 if (!(capabilities & BDI_CAP_MAP_COPY)) 1409 goto error_just_free; 1410 1411 capabilities &= ~BDI_CAP_MAP_DIRECT; 1412 } else { 1413 vma->vm_start = region->vm_start = addr; 1414 vma->vm_end = region->vm_end = addr + len; 1415 } 1416 } 1417 } 1418 1419 vma->vm_region = region; 1420 1421 /* set up the mapping 1422 * - the region is filled in if BDI_CAP_MAP_DIRECT is still set 1423 */ 1424 if (file && vma->vm_flags & VM_SHARED) 1425 ret = do_mmap_shared_file(vma); 1426 else 1427 ret = do_mmap_private(vma, region, len, capabilities); 1428 if (ret < 0) 1429 goto error_just_free; 1430 add_nommu_region(region); 1431 1432 /* clear anonymous mappings that don't ask for uninitialized data */ 1433 if (!vma->vm_file && !(flags & MAP_UNINITIALIZED)) 1434 memset((void *)region->vm_start, 0, 1435 region->vm_end - region->vm_start); 1436 1437 /* okay... we have a mapping; now we have to register it */ 1438 result = vma->vm_start; 1439 1440 current->mm->total_vm += len >> PAGE_SHIFT; 1441 1442 share: 1443 add_vma_to_mm(current->mm, vma); 1444 1445 /* we flush the region from the icache only when the first executable 1446 * mapping of it is made */ 1447 if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) { 1448 flush_icache_range(region->vm_start, region->vm_end); 1449 region->vm_icache_flushed = true; 1450 } 1451 1452 up_write(&nommu_region_sem); 1453 1454 kleave(" = %lx", result); 1455 return result; 1456 1457 error_just_free: 1458 up_write(&nommu_region_sem); 1459 error: 1460 if (region->vm_file) 1461 fput(region->vm_file); 1462 kmem_cache_free(vm_region_jar, region); 1463 if (vma->vm_file) 1464 fput(vma->vm_file); 1465 kmem_cache_free(vm_area_cachep, vma); 1466 kleave(" = %d", ret); 1467 return ret; 1468 1469 sharing_violation: 1470 up_write(&nommu_region_sem); 1471 printk(KERN_WARNING "Attempt to share mismatched mappings\n"); 1472 ret = -EINVAL; 1473 goto error; 1474 1475 error_getting_vma: 1476 kmem_cache_free(vm_region_jar, region); 1477 printk(KERN_WARNING "Allocation of vma for %lu byte allocation" 1478 " from process %d failed\n", 1479 len, current->pid); 1480 show_free_areas(0); 1481 return -ENOMEM; 1482 1483 error_getting_region: 1484 printk(KERN_WARNING "Allocation of vm region for %lu byte allocation" 1485 " from process %d failed\n", 1486 len, current->pid); 1487 show_free_areas(0); 1488 return -ENOMEM; 1489 } 1490 1491 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, 1492 unsigned long, prot, unsigned long, flags, 1493 unsigned long, fd, unsigned long, pgoff) 1494 { 1495 struct file *file = NULL; 1496 unsigned long retval = -EBADF; 1497 1498 audit_mmap_fd(fd, flags); 1499 if (!(flags & MAP_ANONYMOUS)) { 1500 file = fget(fd); 1501 if (!file) 1502 goto out; 1503 } 1504 1505 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); 1506 1507 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); 1508 1509 if (file) 1510 fput(file); 1511 out: 1512 return retval; 1513 } 1514 1515 #ifdef __ARCH_WANT_SYS_OLD_MMAP 1516 struct mmap_arg_struct { 1517 unsigned long addr; 1518 unsigned long len; 1519 unsigned long prot; 1520 unsigned long flags; 1521 unsigned long fd; 1522 unsigned long offset; 1523 }; 1524 1525 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) 1526 { 1527 struct mmap_arg_struct a; 1528 1529 if (copy_from_user(&a, arg, sizeof(a))) 1530 return -EFAULT; 1531 if (a.offset & ~PAGE_MASK) 1532 return -EINVAL; 1533 1534 return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, 1535 a.offset >> PAGE_SHIFT); 1536 } 1537 #endif /* __ARCH_WANT_SYS_OLD_MMAP */ 1538 1539 /* 1540 * split a vma into two pieces at address 'addr', a new vma is allocated either 1541 * for the first part or the tail. 1542 */ 1543 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, 1544 unsigned long addr, int new_below) 1545 { 1546 struct vm_area_struct *new; 1547 struct vm_region *region; 1548 unsigned long npages; 1549 1550 kenter(""); 1551 1552 /* we're only permitted to split anonymous regions (these should have 1553 * only a single usage on the region) */ 1554 if (vma->vm_file) 1555 return -ENOMEM; 1556 1557 if (mm->map_count >= sysctl_max_map_count) 1558 return -ENOMEM; 1559 1560 region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL); 1561 if (!region) 1562 return -ENOMEM; 1563 1564 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 1565 if (!new) { 1566 kmem_cache_free(vm_region_jar, region); 1567 return -ENOMEM; 1568 } 1569 1570 /* most fields are the same, copy all, and then fixup */ 1571 *new = *vma; 1572 *region = *vma->vm_region; 1573 new->vm_region = region; 1574 1575 npages = (addr - vma->vm_start) >> PAGE_SHIFT; 1576 1577 if (new_below) { 1578 region->vm_top = region->vm_end = new->vm_end = addr; 1579 } else { 1580 region->vm_start = new->vm_start = addr; 1581 region->vm_pgoff = new->vm_pgoff += npages; 1582 } 1583 1584 if (new->vm_ops && new->vm_ops->open) 1585 new->vm_ops->open(new); 1586 1587 delete_vma_from_mm(vma); 1588 down_write(&nommu_region_sem); 1589 delete_nommu_region(vma->vm_region); 1590 if (new_below) { 1591 vma->vm_region->vm_start = vma->vm_start = addr; 1592 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages; 1593 } else { 1594 vma->vm_region->vm_end = vma->vm_end = addr; 1595 vma->vm_region->vm_top = addr; 1596 } 1597 add_nommu_region(vma->vm_region); 1598 add_nommu_region(new->vm_region); 1599 up_write(&nommu_region_sem); 1600 add_vma_to_mm(mm, vma); 1601 add_vma_to_mm(mm, new); 1602 return 0; 1603 } 1604 1605 /* 1606 * shrink a VMA by removing the specified chunk from either the beginning or 1607 * the end 1608 */ 1609 static int shrink_vma(struct mm_struct *mm, 1610 struct vm_area_struct *vma, 1611 unsigned long from, unsigned long to) 1612 { 1613 struct vm_region *region; 1614 1615 kenter(""); 1616 1617 /* adjust the VMA's pointers, which may reposition it in the MM's tree 1618 * and list */ 1619 delete_vma_from_mm(vma); 1620 if (from > vma->vm_start) 1621 vma->vm_end = from; 1622 else 1623 vma->vm_start = to; 1624 add_vma_to_mm(mm, vma); 1625 1626 /* cut the backing region down to size */ 1627 region = vma->vm_region; 1628 BUG_ON(region->vm_usage != 1); 1629 1630 down_write(&nommu_region_sem); 1631 delete_nommu_region(region); 1632 if (from > region->vm_start) { 1633 to = region->vm_top; 1634 region->vm_top = region->vm_end = from; 1635 } else { 1636 region->vm_start = to; 1637 } 1638 add_nommu_region(region); 1639 up_write(&nommu_region_sem); 1640 1641 free_page_series(from, to); 1642 return 0; 1643 } 1644 1645 /* 1646 * release a mapping 1647 * - under NOMMU conditions the chunk to be unmapped must be backed by a single 1648 * VMA, though it need not cover the whole VMA 1649 */ 1650 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) 1651 { 1652 struct vm_area_struct *vma; 1653 unsigned long end; 1654 int ret; 1655 1656 kenter(",%lx,%zx", start, len); 1657 1658 len = PAGE_ALIGN(len); 1659 if (len == 0) 1660 return -EINVAL; 1661 1662 end = start + len; 1663 1664 /* find the first potentially overlapping VMA */ 1665 vma = find_vma(mm, start); 1666 if (!vma) { 1667 static int limit; 1668 if (limit < 5) { 1669 printk(KERN_WARNING 1670 "munmap of memory not mmapped by process %d" 1671 " (%s): 0x%lx-0x%lx\n", 1672 current->pid, current->comm, 1673 start, start + len - 1); 1674 limit++; 1675 } 1676 return -EINVAL; 1677 } 1678 1679 /* we're allowed to split an anonymous VMA but not a file-backed one */ 1680 if (vma->vm_file) { 1681 do { 1682 if (start > vma->vm_start) { 1683 kleave(" = -EINVAL [miss]"); 1684 return -EINVAL; 1685 } 1686 if (end == vma->vm_end) 1687 goto erase_whole_vma; 1688 vma = vma->vm_next; 1689 } while (vma); 1690 kleave(" = -EINVAL [split file]"); 1691 return -EINVAL; 1692 } else { 1693 /* the chunk must be a subset of the VMA found */ 1694 if (start == vma->vm_start && end == vma->vm_end) 1695 goto erase_whole_vma; 1696 if (start < vma->vm_start || end > vma->vm_end) { 1697 kleave(" = -EINVAL [superset]"); 1698 return -EINVAL; 1699 } 1700 if (start & ~PAGE_MASK) { 1701 kleave(" = -EINVAL [unaligned start]"); 1702 return -EINVAL; 1703 } 1704 if (end != vma->vm_end && end & ~PAGE_MASK) { 1705 kleave(" = -EINVAL [unaligned split]"); 1706 return -EINVAL; 1707 } 1708 if (start != vma->vm_start && end != vma->vm_end) { 1709 ret = split_vma(mm, vma, start, 1); 1710 if (ret < 0) { 1711 kleave(" = %d [split]", ret); 1712 return ret; 1713 } 1714 } 1715 return shrink_vma(mm, vma, start, end); 1716 } 1717 1718 erase_whole_vma: 1719 delete_vma_from_mm(vma); 1720 delete_vma(mm, vma); 1721 kleave(" = 0"); 1722 return 0; 1723 } 1724 EXPORT_SYMBOL(do_munmap); 1725 1726 int vm_munmap(unsigned long addr, size_t len) 1727 { 1728 struct mm_struct *mm = current->mm; 1729 int ret; 1730 1731 down_write(&mm->mmap_sem); 1732 ret = do_munmap(mm, addr, len); 1733 up_write(&mm->mmap_sem); 1734 return ret; 1735 } 1736 EXPORT_SYMBOL(vm_munmap); 1737 1738 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) 1739 { 1740 return vm_munmap(addr, len); 1741 } 1742 1743 /* 1744 * release all the mappings made in a process's VM space 1745 */ 1746 void exit_mmap(struct mm_struct *mm) 1747 { 1748 struct vm_area_struct *vma; 1749 1750 if (!mm) 1751 return; 1752 1753 kenter(""); 1754 1755 mm->total_vm = 0; 1756 1757 while ((vma = mm->mmap)) { 1758 mm->mmap = vma->vm_next; 1759 delete_vma_from_mm(vma); 1760 delete_vma(mm, vma); 1761 cond_resched(); 1762 } 1763 1764 kleave(""); 1765 } 1766 1767 unsigned long vm_brk(unsigned long addr, unsigned long len) 1768 { 1769 return -ENOMEM; 1770 } 1771 1772 /* 1773 * expand (or shrink) an existing mapping, potentially moving it at the same 1774 * time (controlled by the MREMAP_MAYMOVE flag and available VM space) 1775 * 1776 * under NOMMU conditions, we only permit changing a mapping's size, and only 1777 * as long as it stays within the region allocated by do_mmap_private() and the 1778 * block is not shareable 1779 * 1780 * MREMAP_FIXED is not supported under NOMMU conditions 1781 */ 1782 static unsigned long do_mremap(unsigned long addr, 1783 unsigned long old_len, unsigned long new_len, 1784 unsigned long flags, unsigned long new_addr) 1785 { 1786 struct vm_area_struct *vma; 1787 1788 /* insanity checks first */ 1789 old_len = PAGE_ALIGN(old_len); 1790 new_len = PAGE_ALIGN(new_len); 1791 if (old_len == 0 || new_len == 0) 1792 return (unsigned long) -EINVAL; 1793 1794 if (addr & ~PAGE_MASK) 1795 return -EINVAL; 1796 1797 if (flags & MREMAP_FIXED && new_addr != addr) 1798 return (unsigned long) -EINVAL; 1799 1800 vma = find_vma_exact(current->mm, addr, old_len); 1801 if (!vma) 1802 return (unsigned long) -EINVAL; 1803 1804 if (vma->vm_end != vma->vm_start + old_len) 1805 return (unsigned long) -EFAULT; 1806 1807 if (vma->vm_flags & VM_MAYSHARE) 1808 return (unsigned long) -EPERM; 1809 1810 if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start) 1811 return (unsigned long) -ENOMEM; 1812 1813 /* all checks complete - do it */ 1814 vma->vm_end = vma->vm_start + new_len; 1815 return vma->vm_start; 1816 } 1817 1818 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, 1819 unsigned long, new_len, unsigned long, flags, 1820 unsigned long, new_addr) 1821 { 1822 unsigned long ret; 1823 1824 down_write(¤t->mm->mmap_sem); 1825 ret = do_mremap(addr, old_len, new_len, flags, new_addr); 1826 up_write(¤t->mm->mmap_sem); 1827 return ret; 1828 } 1829 1830 struct page *follow_page_mask(struct vm_area_struct *vma, 1831 unsigned long address, unsigned int flags, 1832 unsigned int *page_mask) 1833 { 1834 *page_mask = 0; 1835 return NULL; 1836 } 1837 1838 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, 1839 unsigned long pfn, unsigned long size, pgprot_t prot) 1840 { 1841 if (addr != (pfn << PAGE_SHIFT)) 1842 return -EINVAL; 1843 1844 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; 1845 return 0; 1846 } 1847 EXPORT_SYMBOL(remap_pfn_range); 1848 1849 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) 1850 { 1851 unsigned long pfn = start >> PAGE_SHIFT; 1852 unsigned long vm_len = vma->vm_end - vma->vm_start; 1853 1854 pfn += vma->vm_pgoff; 1855 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); 1856 } 1857 EXPORT_SYMBOL(vm_iomap_memory); 1858 1859 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 1860 unsigned long pgoff) 1861 { 1862 unsigned int size = vma->vm_end - vma->vm_start; 1863 1864 if (!(vma->vm_flags & VM_USERMAP)) 1865 return -EINVAL; 1866 1867 vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT)); 1868 vma->vm_end = vma->vm_start + size; 1869 1870 return 0; 1871 } 1872 EXPORT_SYMBOL(remap_vmalloc_range); 1873 1874 unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr, 1875 unsigned long len, unsigned long pgoff, unsigned long flags) 1876 { 1877 return -ENOMEM; 1878 } 1879 1880 void unmap_mapping_range(struct address_space *mapping, 1881 loff_t const holebegin, loff_t const holelen, 1882 int even_cows) 1883 { 1884 } 1885 EXPORT_SYMBOL(unmap_mapping_range); 1886 1887 /* 1888 * Check that a process has enough memory to allocate a new virtual 1889 * mapping. 0 means there is enough memory for the allocation to 1890 * succeed and -ENOMEM implies there is not. 1891 * 1892 * We currently support three overcommit policies, which are set via the 1893 * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting 1894 * 1895 * Strict overcommit modes added 2002 Feb 26 by Alan Cox. 1896 * Additional code 2002 Jul 20 by Robert Love. 1897 * 1898 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. 1899 * 1900 * Note this is a helper function intended to be used by LSMs which 1901 * wish to use this logic. 1902 */ 1903 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) 1904 { 1905 unsigned long free, allowed, reserve; 1906 1907 vm_acct_memory(pages); 1908 1909 /* 1910 * Sometimes we want to use more memory than we have 1911 */ 1912 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) 1913 return 0; 1914 1915 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { 1916 free = global_page_state(NR_FREE_PAGES); 1917 free += global_page_state(NR_FILE_PAGES); 1918 1919 /* 1920 * shmem pages shouldn't be counted as free in this 1921 * case, they can't be purged, only swapped out, and 1922 * that won't affect the overall amount of available 1923 * memory in the system. 1924 */ 1925 free -= global_page_state(NR_SHMEM); 1926 1927 free += get_nr_swap_pages(); 1928 1929 /* 1930 * Any slabs which are created with the 1931 * SLAB_RECLAIM_ACCOUNT flag claim to have contents 1932 * which are reclaimable, under pressure. The dentry 1933 * cache and most inode caches should fall into this 1934 */ 1935 free += global_page_state(NR_SLAB_RECLAIMABLE); 1936 1937 /* 1938 * Leave reserved pages. The pages are not for anonymous pages. 1939 */ 1940 if (free <= totalreserve_pages) 1941 goto error; 1942 else 1943 free -= totalreserve_pages; 1944 1945 /* 1946 * Reserve some for root 1947 */ 1948 if (!cap_sys_admin) 1949 free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); 1950 1951 if (free > pages) 1952 return 0; 1953 1954 goto error; 1955 } 1956 1957 allowed = vm_commit_limit(); 1958 /* 1959 * Reserve some 3% for root 1960 */ 1961 if (!cap_sys_admin) 1962 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); 1963 1964 /* 1965 * Don't let a single process grow so big a user can't recover 1966 */ 1967 if (mm) { 1968 reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); 1969 allowed -= min(mm->total_vm / 32, reserve); 1970 } 1971 1972 if (percpu_counter_read_positive(&vm_committed_as) < allowed) 1973 return 0; 1974 1975 error: 1976 vm_unacct_memory(pages); 1977 1978 return -ENOMEM; 1979 } 1980 1981 int in_gate_area_no_mm(unsigned long addr) 1982 { 1983 return 0; 1984 } 1985 1986 int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1987 { 1988 BUG(); 1989 return 0; 1990 } 1991 EXPORT_SYMBOL(filemap_fault); 1992 1993 void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf) 1994 { 1995 BUG(); 1996 } 1997 EXPORT_SYMBOL(filemap_map_pages); 1998 1999 int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr, 2000 unsigned long size, pgoff_t pgoff) 2001 { 2002 BUG(); 2003 return 0; 2004 } 2005 EXPORT_SYMBOL(generic_file_remap_pages); 2006 2007 static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, 2008 unsigned long addr, void *buf, int len, int write) 2009 { 2010 struct vm_area_struct *vma; 2011 2012 down_read(&mm->mmap_sem); 2013 2014 /* the access must start within one of the target process's mappings */ 2015 vma = find_vma(mm, addr); 2016 if (vma) { 2017 /* don't overrun this mapping */ 2018 if (addr + len >= vma->vm_end) 2019 len = vma->vm_end - addr; 2020 2021 /* only read or write mappings where it is permitted */ 2022 if (write && vma->vm_flags & VM_MAYWRITE) 2023 copy_to_user_page(vma, NULL, addr, 2024 (void *) addr, buf, len); 2025 else if (!write && vma->vm_flags & VM_MAYREAD) 2026 copy_from_user_page(vma, NULL, addr, 2027 buf, (void *) addr, len); 2028 else 2029 len = 0; 2030 } else { 2031 len = 0; 2032 } 2033 2034 up_read(&mm->mmap_sem); 2035 2036 return len; 2037 } 2038 2039 /** 2040 * @access_remote_vm - access another process' address space 2041 * @mm: the mm_struct of the target address space 2042 * @addr: start address to access 2043 * @buf: source or destination buffer 2044 * @len: number of bytes to transfer 2045 * @write: whether the access is a write 2046 * 2047 * The caller must hold a reference on @mm. 2048 */ 2049 int access_remote_vm(struct mm_struct *mm, unsigned long addr, 2050 void *buf, int len, int write) 2051 { 2052 return __access_remote_vm(NULL, mm, addr, buf, len, write); 2053 } 2054 2055 /* 2056 * Access another process' address space. 2057 * - source/target buffer must be kernel space 2058 */ 2059 int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) 2060 { 2061 struct mm_struct *mm; 2062 2063 if (addr + len < addr) 2064 return 0; 2065 2066 mm = get_task_mm(tsk); 2067 if (!mm) 2068 return 0; 2069 2070 len = __access_remote_vm(tsk, mm, addr, buf, len, write); 2071 2072 mmput(mm); 2073 return len; 2074 } 2075 2076 /** 2077 * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode 2078 * @inode: The inode to check 2079 * @size: The current filesize of the inode 2080 * @newsize: The proposed filesize of the inode 2081 * 2082 * Check the shared mappings on an inode on behalf of a shrinking truncate to 2083 * make sure that that any outstanding VMAs aren't broken and then shrink the 2084 * vm_regions that extend that beyond so that do_mmap_pgoff() doesn't 2085 * automatically grant mappings that are too large. 2086 */ 2087 int nommu_shrink_inode_mappings(struct inode *inode, size_t size, 2088 size_t newsize) 2089 { 2090 struct vm_area_struct *vma; 2091 struct vm_region *region; 2092 pgoff_t low, high; 2093 size_t r_size, r_top; 2094 2095 low = newsize >> PAGE_SHIFT; 2096 high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 2097 2098 down_write(&nommu_region_sem); 2099 mutex_lock(&inode->i_mapping->i_mmap_mutex); 2100 2101 /* search for VMAs that fall within the dead zone */ 2102 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) { 2103 /* found one - only interested if it's shared out of the page 2104 * cache */ 2105 if (vma->vm_flags & VM_SHARED) { 2106 mutex_unlock(&inode->i_mapping->i_mmap_mutex); 2107 up_write(&nommu_region_sem); 2108 return -ETXTBSY; /* not quite true, but near enough */ 2109 } 2110 } 2111 2112 /* reduce any regions that overlap the dead zone - if in existence, 2113 * these will be pointed to by VMAs that don't overlap the dead zone 2114 * 2115 * we don't check for any regions that start beyond the EOF as there 2116 * shouldn't be any 2117 */ 2118 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 2119 0, ULONG_MAX) { 2120 if (!(vma->vm_flags & VM_SHARED)) 2121 continue; 2122 2123 region = vma->vm_region; 2124 r_size = region->vm_top - region->vm_start; 2125 r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size; 2126 2127 if (r_top > newsize) { 2128 region->vm_top -= r_top - newsize; 2129 if (region->vm_end > region->vm_top) 2130 region->vm_end = region->vm_top; 2131 } 2132 } 2133 2134 mutex_unlock(&inode->i_mapping->i_mmap_mutex); 2135 up_write(&nommu_region_sem); 2136 return 0; 2137 } 2138 2139 /* 2140 * Initialise sysctl_user_reserve_kbytes. 2141 * 2142 * This is intended to prevent a user from starting a single memory hogging 2143 * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER 2144 * mode. 2145 * 2146 * The default value is min(3% of free memory, 128MB) 2147 * 128MB is enough to recover with sshd/login, bash, and top/kill. 2148 */ 2149 static int __meminit init_user_reserve(void) 2150 { 2151 unsigned long free_kbytes; 2152 2153 free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); 2154 2155 sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17); 2156 return 0; 2157 } 2158 module_init(init_user_reserve) 2159 2160 /* 2161 * Initialise sysctl_admin_reserve_kbytes. 2162 * 2163 * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin 2164 * to log in and kill a memory hogging process. 2165 * 2166 * Systems with more than 256MB will reserve 8MB, enough to recover 2167 * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will 2168 * only reserve 3% of free pages by default. 2169 */ 2170 static int __meminit init_admin_reserve(void) 2171 { 2172 unsigned long free_kbytes; 2173 2174 free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); 2175 2176 sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13); 2177 return 0; 2178 } 2179 module_init(init_admin_reserve) 2180