1 /* 2 * linux/mm/memory.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 */ 6 7 /* 8 * demand-loading started 01.12.91 - seems it is high on the list of 9 * things wanted, and it should be easy to implement. - Linus 10 */ 11 12 /* 13 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared 14 * pages started 02.12.91, seems to work. - Linus. 15 * 16 * Tested sharing by executing about 30 /bin/sh: under the old kernel it 17 * would have taken more than the 6M I have free, but it worked well as 18 * far as I could see. 19 * 20 * Also corrected some "invalidate()"s - I wasn't doing enough of them. 21 */ 22 23 /* 24 * Real VM (paging to/from disk) started 18.12.91. Much more work and 25 * thought has to go into this. Oh, well.. 26 * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why. 27 * Found it. Everything seems to work now. 28 * 20.12.91 - Ok, making the swap-device changeable like the root. 29 */ 30 31 /* 32 * 05.04.94 - Multi-page memory management added for v1.1. 33 * Idea by Alex Bligh (alex@cconcepts.co.uk) 34 * 35 * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG 36 * (Gerhard.Wichert@pdb.siemens.de) 37 * 38 * Aug/Sep 2004 Changed to four level page tables (Andi Kleen) 39 */ 40 41 #include <linux/kernel_stat.h> 42 #include <linux/mm.h> 43 #include <linux/hugetlb.h> 44 #include <linux/mman.h> 45 #include <linux/swap.h> 46 #include <linux/highmem.h> 47 #include <linux/pagemap.h> 48 #include <linux/rmap.h> 49 #include <linux/module.h> 50 #include <linux/delayacct.h> 51 #include <linux/init.h> 52 #include <linux/writeback.h> 53 #include <linux/memcontrol.h> 54 55 #include <asm/pgalloc.h> 56 #include <asm/uaccess.h> 57 #include <asm/tlb.h> 58 #include <asm/tlbflush.h> 59 #include <asm/pgtable.h> 60 61 #include <linux/swapops.h> 62 #include <linux/elf.h> 63 64 #include "internal.h" 65 66 #ifndef CONFIG_NEED_MULTIPLE_NODES 67 /* use the per-pgdat data instead for discontigmem - mbligh */ 68 unsigned long max_mapnr; 69 struct page *mem_map; 70 71 EXPORT_SYMBOL(max_mapnr); 72 EXPORT_SYMBOL(mem_map); 73 #endif 74 75 unsigned long num_physpages; 76 /* 77 * A number of key systems in x86 including ioremap() rely on the assumption 78 * that high_memory defines the upper bound on direct map memory, then end 79 * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and 80 * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL 81 * and ZONE_HIGHMEM. 82 */ 83 void * high_memory; 84 85 EXPORT_SYMBOL(num_physpages); 86 EXPORT_SYMBOL(high_memory); 87 88 /* 89 * Randomize the address space (stacks, mmaps, brk, etc.). 90 * 91 * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization, 92 * as ancient (libc5 based) binaries can segfault. ) 93 */ 94 int randomize_va_space __read_mostly = 95 #ifdef CONFIG_COMPAT_BRK 96 1; 97 #else 98 2; 99 #endif 100 101 static int __init disable_randmaps(char *s) 102 { 103 randomize_va_space = 0; 104 return 1; 105 } 106 __setup("norandmaps", disable_randmaps); 107 108 109 /* 110 * If a p?d_bad entry is found while walking page tables, report 111 * the error, before resetting entry to p?d_none. Usually (but 112 * very seldom) called out from the p?d_none_or_clear_bad macros. 113 */ 114 115 void pgd_clear_bad(pgd_t *pgd) 116 { 117 pgd_ERROR(*pgd); 118 pgd_clear(pgd); 119 } 120 121 void pud_clear_bad(pud_t *pud) 122 { 123 pud_ERROR(*pud); 124 pud_clear(pud); 125 } 126 127 void pmd_clear_bad(pmd_t *pmd) 128 { 129 pmd_ERROR(*pmd); 130 pmd_clear(pmd); 131 } 132 133 /* 134 * Note: this doesn't free the actual pages themselves. That 135 * has been handled earlier when unmapping all the memory regions. 136 */ 137 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd) 138 { 139 pgtable_t token = pmd_pgtable(*pmd); 140 pmd_clear(pmd); 141 pte_free_tlb(tlb, token); 142 tlb->mm->nr_ptes--; 143 } 144 145 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, 146 unsigned long addr, unsigned long end, 147 unsigned long floor, unsigned long ceiling) 148 { 149 pmd_t *pmd; 150 unsigned long next; 151 unsigned long start; 152 153 start = addr; 154 pmd = pmd_offset(pud, addr); 155 do { 156 next = pmd_addr_end(addr, end); 157 if (pmd_none_or_clear_bad(pmd)) 158 continue; 159 free_pte_range(tlb, pmd); 160 } while (pmd++, addr = next, addr != end); 161 162 start &= PUD_MASK; 163 if (start < floor) 164 return; 165 if (ceiling) { 166 ceiling &= PUD_MASK; 167 if (!ceiling) 168 return; 169 } 170 if (end - 1 > ceiling - 1) 171 return; 172 173 pmd = pmd_offset(pud, start); 174 pud_clear(pud); 175 pmd_free_tlb(tlb, pmd); 176 } 177 178 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, 179 unsigned long addr, unsigned long end, 180 unsigned long floor, unsigned long ceiling) 181 { 182 pud_t *pud; 183 unsigned long next; 184 unsigned long start; 185 186 start = addr; 187 pud = pud_offset(pgd, addr); 188 do { 189 next = pud_addr_end(addr, end); 190 if (pud_none_or_clear_bad(pud)) 191 continue; 192 free_pmd_range(tlb, pud, addr, next, floor, ceiling); 193 } while (pud++, addr = next, addr != end); 194 195 start &= PGDIR_MASK; 196 if (start < floor) 197 return; 198 if (ceiling) { 199 ceiling &= PGDIR_MASK; 200 if (!ceiling) 201 return; 202 } 203 if (end - 1 > ceiling - 1) 204 return; 205 206 pud = pud_offset(pgd, start); 207 pgd_clear(pgd); 208 pud_free_tlb(tlb, pud); 209 } 210 211 /* 212 * This function frees user-level page tables of a process. 213 * 214 * Must be called with pagetable lock held. 215 */ 216 void free_pgd_range(struct mmu_gather *tlb, 217 unsigned long addr, unsigned long end, 218 unsigned long floor, unsigned long ceiling) 219 { 220 pgd_t *pgd; 221 unsigned long next; 222 unsigned long start; 223 224 /* 225 * The next few lines have given us lots of grief... 226 * 227 * Why are we testing PMD* at this top level? Because often 228 * there will be no work to do at all, and we'd prefer not to 229 * go all the way down to the bottom just to discover that. 230 * 231 * Why all these "- 1"s? Because 0 represents both the bottom 232 * of the address space and the top of it (using -1 for the 233 * top wouldn't help much: the masks would do the wrong thing). 234 * The rule is that addr 0 and floor 0 refer to the bottom of 235 * the address space, but end 0 and ceiling 0 refer to the top 236 * Comparisons need to use "end - 1" and "ceiling - 1" (though 237 * that end 0 case should be mythical). 238 * 239 * Wherever addr is brought up or ceiling brought down, we must 240 * be careful to reject "the opposite 0" before it confuses the 241 * subsequent tests. But what about where end is brought down 242 * by PMD_SIZE below? no, end can't go down to 0 there. 243 * 244 * Whereas we round start (addr) and ceiling down, by different 245 * masks at different levels, in order to test whether a table 246 * now has no other vmas using it, so can be freed, we don't 247 * bother to round floor or end up - the tests don't need that. 248 */ 249 250 addr &= PMD_MASK; 251 if (addr < floor) { 252 addr += PMD_SIZE; 253 if (!addr) 254 return; 255 } 256 if (ceiling) { 257 ceiling &= PMD_MASK; 258 if (!ceiling) 259 return; 260 } 261 if (end - 1 > ceiling - 1) 262 end -= PMD_SIZE; 263 if (addr > end - 1) 264 return; 265 266 start = addr; 267 pgd = pgd_offset(tlb->mm, addr); 268 do { 269 next = pgd_addr_end(addr, end); 270 if (pgd_none_or_clear_bad(pgd)) 271 continue; 272 free_pud_range(tlb, pgd, addr, next, floor, ceiling); 273 } while (pgd++, addr = next, addr != end); 274 } 275 276 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, 277 unsigned long floor, unsigned long ceiling) 278 { 279 while (vma) { 280 struct vm_area_struct *next = vma->vm_next; 281 unsigned long addr = vma->vm_start; 282 283 /* 284 * Hide vma from rmap and vmtruncate before freeing pgtables 285 */ 286 anon_vma_unlink(vma); 287 unlink_file_vma(vma); 288 289 if (is_vm_hugetlb_page(vma)) { 290 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, 291 floor, next? next->vm_start: ceiling); 292 } else { 293 /* 294 * Optimization: gather nearby vmas into one call down 295 */ 296 while (next && next->vm_start <= vma->vm_end + PMD_SIZE 297 && !is_vm_hugetlb_page(next)) { 298 vma = next; 299 next = vma->vm_next; 300 anon_vma_unlink(vma); 301 unlink_file_vma(vma); 302 } 303 free_pgd_range(tlb, addr, vma->vm_end, 304 floor, next? next->vm_start: ceiling); 305 } 306 vma = next; 307 } 308 } 309 310 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address) 311 { 312 pgtable_t new = pte_alloc_one(mm, address); 313 if (!new) 314 return -ENOMEM; 315 316 /* 317 * Ensure all pte setup (eg. pte page lock and page clearing) are 318 * visible before the pte is made visible to other CPUs by being 319 * put into page tables. 320 * 321 * The other side of the story is the pointer chasing in the page 322 * table walking code (when walking the page table without locking; 323 * ie. most of the time). Fortunately, these data accesses consist 324 * of a chain of data-dependent loads, meaning most CPUs (alpha 325 * being the notable exception) will already guarantee loads are 326 * seen in-order. See the alpha page table accessors for the 327 * smp_read_barrier_depends() barriers in page table walking code. 328 */ 329 smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */ 330 331 spin_lock(&mm->page_table_lock); 332 if (!pmd_present(*pmd)) { /* Has another populated it ? */ 333 mm->nr_ptes++; 334 pmd_populate(mm, pmd, new); 335 new = NULL; 336 } 337 spin_unlock(&mm->page_table_lock); 338 if (new) 339 pte_free(mm, new); 340 return 0; 341 } 342 343 int __pte_alloc_kernel(pmd_t *pmd, unsigned long address) 344 { 345 pte_t *new = pte_alloc_one_kernel(&init_mm, address); 346 if (!new) 347 return -ENOMEM; 348 349 smp_wmb(); /* See comment in __pte_alloc */ 350 351 spin_lock(&init_mm.page_table_lock); 352 if (!pmd_present(*pmd)) { /* Has another populated it ? */ 353 pmd_populate_kernel(&init_mm, pmd, new); 354 new = NULL; 355 } 356 spin_unlock(&init_mm.page_table_lock); 357 if (new) 358 pte_free_kernel(&init_mm, new); 359 return 0; 360 } 361 362 static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss) 363 { 364 if (file_rss) 365 add_mm_counter(mm, file_rss, file_rss); 366 if (anon_rss) 367 add_mm_counter(mm, anon_rss, anon_rss); 368 } 369 370 /* 371 * This function is called to print an error when a bad pte 372 * is found. For example, we might have a PFN-mapped pte in 373 * a region that doesn't allow it. 374 * 375 * The calling function must still handle the error. 376 */ 377 static void print_bad_pte(struct vm_area_struct *vma, pte_t pte, 378 unsigned long vaddr) 379 { 380 printk(KERN_ERR "Bad pte = %08llx, process = %s, " 381 "vm_flags = %lx, vaddr = %lx\n", 382 (long long)pte_val(pte), 383 (vma->vm_mm == current->mm ? current->comm : "???"), 384 vma->vm_flags, vaddr); 385 dump_stack(); 386 } 387 388 static inline int is_cow_mapping(unsigned int flags) 389 { 390 return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 391 } 392 393 /* 394 * vm_normal_page -- This function gets the "struct page" associated with a pte. 395 * 396 * "Special" mappings do not wish to be associated with a "struct page" (either 397 * it doesn't exist, or it exists but they don't want to touch it). In this 398 * case, NULL is returned here. "Normal" mappings do have a struct page. 399 * 400 * There are 2 broad cases. Firstly, an architecture may define a pte_special() 401 * pte bit, in which case this function is trivial. Secondly, an architecture 402 * may not have a spare pte bit, which requires a more complicated scheme, 403 * described below. 404 * 405 * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a 406 * special mapping (even if there are underlying and valid "struct pages"). 407 * COWed pages of a VM_PFNMAP are always normal. 408 * 409 * The way we recognize COWed pages within VM_PFNMAP mappings is through the 410 * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit 411 * set, and the vm_pgoff will point to the first PFN mapped: thus every special 412 * mapping will always honor the rule 413 * 414 * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT) 415 * 416 * And for normal mappings this is false. 417 * 418 * This restricts such mappings to be a linear translation from virtual address 419 * to pfn. To get around this restriction, we allow arbitrary mappings so long 420 * as the vma is not a COW mapping; in that case, we know that all ptes are 421 * special (because none can have been COWed). 422 * 423 * 424 * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP. 425 * 426 * VM_MIXEDMAP mappings can likewise contain memory with or without "struct 427 * page" backing, however the difference is that _all_ pages with a struct 428 * page (that is, those where pfn_valid is true) are refcounted and considered 429 * normal pages by the VM. The disadvantage is that pages are refcounted 430 * (which can be slower and simply not an option for some PFNMAP users). The 431 * advantage is that we don't have to follow the strict linearity rule of 432 * PFNMAP mappings in order to support COWable mappings. 433 * 434 */ 435 #ifdef __HAVE_ARCH_PTE_SPECIAL 436 # define HAVE_PTE_SPECIAL 1 437 #else 438 # define HAVE_PTE_SPECIAL 0 439 #endif 440 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, 441 pte_t pte) 442 { 443 unsigned long pfn; 444 445 if (HAVE_PTE_SPECIAL) { 446 if (likely(!pte_special(pte))) { 447 VM_BUG_ON(!pfn_valid(pte_pfn(pte))); 448 return pte_page(pte); 449 } 450 VM_BUG_ON(!(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))); 451 return NULL; 452 } 453 454 /* !HAVE_PTE_SPECIAL case follows: */ 455 456 pfn = pte_pfn(pte); 457 458 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { 459 if (vma->vm_flags & VM_MIXEDMAP) { 460 if (!pfn_valid(pfn)) 461 return NULL; 462 goto out; 463 } else { 464 unsigned long off; 465 off = (addr - vma->vm_start) >> PAGE_SHIFT; 466 if (pfn == vma->vm_pgoff + off) 467 return NULL; 468 if (!is_cow_mapping(vma->vm_flags)) 469 return NULL; 470 } 471 } 472 473 VM_BUG_ON(!pfn_valid(pfn)); 474 475 /* 476 * NOTE! We still have PageReserved() pages in the page tables. 477 * 478 * eg. VDSO mappings can cause them to exist. 479 */ 480 out: 481 return pfn_to_page(pfn); 482 } 483 484 /* 485 * copy one vm_area from one task to the other. Assumes the page tables 486 * already present in the new task to be cleared in the whole range 487 * covered by this vma. 488 */ 489 490 static inline void 491 copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, 492 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma, 493 unsigned long addr, int *rss) 494 { 495 unsigned long vm_flags = vma->vm_flags; 496 pte_t pte = *src_pte; 497 struct page *page; 498 499 /* pte contains position in swap or file, so copy. */ 500 if (unlikely(!pte_present(pte))) { 501 if (!pte_file(pte)) { 502 swp_entry_t entry = pte_to_swp_entry(pte); 503 504 swap_duplicate(entry); 505 /* make sure dst_mm is on swapoff's mmlist. */ 506 if (unlikely(list_empty(&dst_mm->mmlist))) { 507 spin_lock(&mmlist_lock); 508 if (list_empty(&dst_mm->mmlist)) 509 list_add(&dst_mm->mmlist, 510 &src_mm->mmlist); 511 spin_unlock(&mmlist_lock); 512 } 513 if (is_write_migration_entry(entry) && 514 is_cow_mapping(vm_flags)) { 515 /* 516 * COW mappings require pages in both parent 517 * and child to be set to read. 518 */ 519 make_migration_entry_read(&entry); 520 pte = swp_entry_to_pte(entry); 521 set_pte_at(src_mm, addr, src_pte, pte); 522 } 523 } 524 goto out_set_pte; 525 } 526 527 /* 528 * If it's a COW mapping, write protect it both 529 * in the parent and the child 530 */ 531 if (is_cow_mapping(vm_flags)) { 532 ptep_set_wrprotect(src_mm, addr, src_pte); 533 pte = pte_wrprotect(pte); 534 } 535 536 /* 537 * If it's a shared mapping, mark it clean in 538 * the child 539 */ 540 if (vm_flags & VM_SHARED) 541 pte = pte_mkclean(pte); 542 pte = pte_mkold(pte); 543 544 page = vm_normal_page(vma, addr, pte); 545 if (page) { 546 get_page(page); 547 page_dup_rmap(page, vma, addr); 548 rss[!!PageAnon(page)]++; 549 } 550 551 out_set_pte: 552 set_pte_at(dst_mm, addr, dst_pte, pte); 553 } 554 555 static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 556 pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, 557 unsigned long addr, unsigned long end) 558 { 559 pte_t *src_pte, *dst_pte; 560 spinlock_t *src_ptl, *dst_ptl; 561 int progress = 0; 562 int rss[2]; 563 564 again: 565 rss[1] = rss[0] = 0; 566 dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); 567 if (!dst_pte) 568 return -ENOMEM; 569 src_pte = pte_offset_map_nested(src_pmd, addr); 570 src_ptl = pte_lockptr(src_mm, src_pmd); 571 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 572 arch_enter_lazy_mmu_mode(); 573 574 do { 575 /* 576 * We are holding two locks at this point - either of them 577 * could generate latencies in another task on another CPU. 578 */ 579 if (progress >= 32) { 580 progress = 0; 581 if (need_resched() || 582 spin_needbreak(src_ptl) || spin_needbreak(dst_ptl)) 583 break; 584 } 585 if (pte_none(*src_pte)) { 586 progress++; 587 continue; 588 } 589 copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss); 590 progress += 8; 591 } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); 592 593 arch_leave_lazy_mmu_mode(); 594 spin_unlock(src_ptl); 595 pte_unmap_nested(src_pte - 1); 596 add_mm_rss(dst_mm, rss[0], rss[1]); 597 pte_unmap_unlock(dst_pte - 1, dst_ptl); 598 cond_resched(); 599 if (addr != end) 600 goto again; 601 return 0; 602 } 603 604 static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 605 pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma, 606 unsigned long addr, unsigned long end) 607 { 608 pmd_t *src_pmd, *dst_pmd; 609 unsigned long next; 610 611 dst_pmd = pmd_alloc(dst_mm, dst_pud, addr); 612 if (!dst_pmd) 613 return -ENOMEM; 614 src_pmd = pmd_offset(src_pud, addr); 615 do { 616 next = pmd_addr_end(addr, end); 617 if (pmd_none_or_clear_bad(src_pmd)) 618 continue; 619 if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd, 620 vma, addr, next)) 621 return -ENOMEM; 622 } while (dst_pmd++, src_pmd++, addr = next, addr != end); 623 return 0; 624 } 625 626 static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 627 pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma, 628 unsigned long addr, unsigned long end) 629 { 630 pud_t *src_pud, *dst_pud; 631 unsigned long next; 632 633 dst_pud = pud_alloc(dst_mm, dst_pgd, addr); 634 if (!dst_pud) 635 return -ENOMEM; 636 src_pud = pud_offset(src_pgd, addr); 637 do { 638 next = pud_addr_end(addr, end); 639 if (pud_none_or_clear_bad(src_pud)) 640 continue; 641 if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud, 642 vma, addr, next)) 643 return -ENOMEM; 644 } while (dst_pud++, src_pud++, addr = next, addr != end); 645 return 0; 646 } 647 648 int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 649 struct vm_area_struct *vma) 650 { 651 pgd_t *src_pgd, *dst_pgd; 652 unsigned long next; 653 unsigned long addr = vma->vm_start; 654 unsigned long end = vma->vm_end; 655 656 /* 657 * Don't copy ptes where a page fault will fill them correctly. 658 * Fork becomes much lighter when there are big shared or private 659 * readonly mappings. The tradeoff is that copy_page_range is more 660 * efficient than faulting. 661 */ 662 if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) { 663 if (!vma->anon_vma) 664 return 0; 665 } 666 667 if (is_vm_hugetlb_page(vma)) 668 return copy_hugetlb_page_range(dst_mm, src_mm, vma); 669 670 dst_pgd = pgd_offset(dst_mm, addr); 671 src_pgd = pgd_offset(src_mm, addr); 672 do { 673 next = pgd_addr_end(addr, end); 674 if (pgd_none_or_clear_bad(src_pgd)) 675 continue; 676 if (copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd, 677 vma, addr, next)) 678 return -ENOMEM; 679 } while (dst_pgd++, src_pgd++, addr = next, addr != end); 680 return 0; 681 } 682 683 static unsigned long zap_pte_range(struct mmu_gather *tlb, 684 struct vm_area_struct *vma, pmd_t *pmd, 685 unsigned long addr, unsigned long end, 686 long *zap_work, struct zap_details *details) 687 { 688 struct mm_struct *mm = tlb->mm; 689 pte_t *pte; 690 spinlock_t *ptl; 691 int file_rss = 0; 692 int anon_rss = 0; 693 694 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 695 arch_enter_lazy_mmu_mode(); 696 do { 697 pte_t ptent = *pte; 698 if (pte_none(ptent)) { 699 (*zap_work)--; 700 continue; 701 } 702 703 (*zap_work) -= PAGE_SIZE; 704 705 if (pte_present(ptent)) { 706 struct page *page; 707 708 page = vm_normal_page(vma, addr, ptent); 709 if (unlikely(details) && page) { 710 /* 711 * unmap_shared_mapping_pages() wants to 712 * invalidate cache without truncating: 713 * unmap shared but keep private pages. 714 */ 715 if (details->check_mapping && 716 details->check_mapping != page->mapping) 717 continue; 718 /* 719 * Each page->index must be checked when 720 * invalidating or truncating nonlinear. 721 */ 722 if (details->nonlinear_vma && 723 (page->index < details->first_index || 724 page->index > details->last_index)) 725 continue; 726 } 727 ptent = ptep_get_and_clear_full(mm, addr, pte, 728 tlb->fullmm); 729 tlb_remove_tlb_entry(tlb, pte, addr); 730 if (unlikely(!page)) 731 continue; 732 if (unlikely(details) && details->nonlinear_vma 733 && linear_page_index(details->nonlinear_vma, 734 addr) != page->index) 735 set_pte_at(mm, addr, pte, 736 pgoff_to_pte(page->index)); 737 if (PageAnon(page)) 738 anon_rss--; 739 else { 740 if (pte_dirty(ptent)) 741 set_page_dirty(page); 742 if (pte_young(ptent)) 743 SetPageReferenced(page); 744 file_rss--; 745 } 746 page_remove_rmap(page, vma); 747 tlb_remove_page(tlb, page); 748 continue; 749 } 750 /* 751 * If details->check_mapping, we leave swap entries; 752 * if details->nonlinear_vma, we leave file entries. 753 */ 754 if (unlikely(details)) 755 continue; 756 if (!pte_file(ptent)) 757 free_swap_and_cache(pte_to_swp_entry(ptent)); 758 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); 759 } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0)); 760 761 add_mm_rss(mm, file_rss, anon_rss); 762 arch_leave_lazy_mmu_mode(); 763 pte_unmap_unlock(pte - 1, ptl); 764 765 return addr; 766 } 767 768 static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, 769 struct vm_area_struct *vma, pud_t *pud, 770 unsigned long addr, unsigned long end, 771 long *zap_work, struct zap_details *details) 772 { 773 pmd_t *pmd; 774 unsigned long next; 775 776 pmd = pmd_offset(pud, addr); 777 do { 778 next = pmd_addr_end(addr, end); 779 if (pmd_none_or_clear_bad(pmd)) { 780 (*zap_work)--; 781 continue; 782 } 783 next = zap_pte_range(tlb, vma, pmd, addr, next, 784 zap_work, details); 785 } while (pmd++, addr = next, (addr != end && *zap_work > 0)); 786 787 return addr; 788 } 789 790 static inline unsigned long zap_pud_range(struct mmu_gather *tlb, 791 struct vm_area_struct *vma, pgd_t *pgd, 792 unsigned long addr, unsigned long end, 793 long *zap_work, struct zap_details *details) 794 { 795 pud_t *pud; 796 unsigned long next; 797 798 pud = pud_offset(pgd, addr); 799 do { 800 next = pud_addr_end(addr, end); 801 if (pud_none_or_clear_bad(pud)) { 802 (*zap_work)--; 803 continue; 804 } 805 next = zap_pmd_range(tlb, vma, pud, addr, next, 806 zap_work, details); 807 } while (pud++, addr = next, (addr != end && *zap_work > 0)); 808 809 return addr; 810 } 811 812 static unsigned long unmap_page_range(struct mmu_gather *tlb, 813 struct vm_area_struct *vma, 814 unsigned long addr, unsigned long end, 815 long *zap_work, struct zap_details *details) 816 { 817 pgd_t *pgd; 818 unsigned long next; 819 820 if (details && !details->check_mapping && !details->nonlinear_vma) 821 details = NULL; 822 823 BUG_ON(addr >= end); 824 tlb_start_vma(tlb, vma); 825 pgd = pgd_offset(vma->vm_mm, addr); 826 do { 827 next = pgd_addr_end(addr, end); 828 if (pgd_none_or_clear_bad(pgd)) { 829 (*zap_work)--; 830 continue; 831 } 832 next = zap_pud_range(tlb, vma, pgd, addr, next, 833 zap_work, details); 834 } while (pgd++, addr = next, (addr != end && *zap_work > 0)); 835 tlb_end_vma(tlb, vma); 836 837 return addr; 838 } 839 840 #ifdef CONFIG_PREEMPT 841 # define ZAP_BLOCK_SIZE (8 * PAGE_SIZE) 842 #else 843 /* No preempt: go for improved straight-line efficiency */ 844 # define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE) 845 #endif 846 847 /** 848 * unmap_vmas - unmap a range of memory covered by a list of vma's 849 * @tlbp: address of the caller's struct mmu_gather 850 * @vma: the starting vma 851 * @start_addr: virtual address at which to start unmapping 852 * @end_addr: virtual address at which to end unmapping 853 * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here 854 * @details: details of nonlinear truncation or shared cache invalidation 855 * 856 * Returns the end address of the unmapping (restart addr if interrupted). 857 * 858 * Unmap all pages in the vma list. 859 * 860 * We aim to not hold locks for too long (for scheduling latency reasons). 861 * So zap pages in ZAP_BLOCK_SIZE bytecounts. This means we need to 862 * return the ending mmu_gather to the caller. 863 * 864 * Only addresses between `start' and `end' will be unmapped. 865 * 866 * The VMA list must be sorted in ascending virtual address order. 867 * 868 * unmap_vmas() assumes that the caller will flush the whole unmapped address 869 * range after unmap_vmas() returns. So the only responsibility here is to 870 * ensure that any thus-far unmapped pages are flushed before unmap_vmas() 871 * drops the lock and schedules. 872 */ 873 unsigned long unmap_vmas(struct mmu_gather **tlbp, 874 struct vm_area_struct *vma, unsigned long start_addr, 875 unsigned long end_addr, unsigned long *nr_accounted, 876 struct zap_details *details) 877 { 878 long zap_work = ZAP_BLOCK_SIZE; 879 unsigned long tlb_start = 0; /* For tlb_finish_mmu */ 880 int tlb_start_valid = 0; 881 unsigned long start = start_addr; 882 spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL; 883 int fullmm = (*tlbp)->fullmm; 884 885 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) { 886 unsigned long end; 887 888 start = max(vma->vm_start, start_addr); 889 if (start >= vma->vm_end) 890 continue; 891 end = min(vma->vm_end, end_addr); 892 if (end <= vma->vm_start) 893 continue; 894 895 if (vma->vm_flags & VM_ACCOUNT) 896 *nr_accounted += (end - start) >> PAGE_SHIFT; 897 898 while (start != end) { 899 if (!tlb_start_valid) { 900 tlb_start = start; 901 tlb_start_valid = 1; 902 } 903 904 if (unlikely(is_vm_hugetlb_page(vma))) { 905 /* 906 * It is undesirable to test vma->vm_file as it 907 * should be non-null for valid hugetlb area. 908 * However, vm_file will be NULL in the error 909 * cleanup path of do_mmap_pgoff. When 910 * hugetlbfs ->mmap method fails, 911 * do_mmap_pgoff() nullifies vma->vm_file 912 * before calling this function to clean up. 913 * Since no pte has actually been setup, it is 914 * safe to do nothing in this case. 915 */ 916 if (vma->vm_file) { 917 unmap_hugepage_range(vma, start, end, NULL); 918 zap_work -= (end - start) / 919 pages_per_huge_page(hstate_vma(vma)); 920 } 921 922 start = end; 923 } else 924 start = unmap_page_range(*tlbp, vma, 925 start, end, &zap_work, details); 926 927 if (zap_work > 0) { 928 BUG_ON(start != end); 929 break; 930 } 931 932 tlb_finish_mmu(*tlbp, tlb_start, start); 933 934 if (need_resched() || 935 (i_mmap_lock && spin_needbreak(i_mmap_lock))) { 936 if (i_mmap_lock) { 937 *tlbp = NULL; 938 goto out; 939 } 940 cond_resched(); 941 } 942 943 *tlbp = tlb_gather_mmu(vma->vm_mm, fullmm); 944 tlb_start_valid = 0; 945 zap_work = ZAP_BLOCK_SIZE; 946 } 947 } 948 out: 949 return start; /* which is now the end (or restart) address */ 950 } 951 952 /** 953 * zap_page_range - remove user pages in a given range 954 * @vma: vm_area_struct holding the applicable pages 955 * @address: starting address of pages to zap 956 * @size: number of bytes to zap 957 * @details: details of nonlinear truncation or shared cache invalidation 958 */ 959 unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, 960 unsigned long size, struct zap_details *details) 961 { 962 struct mm_struct *mm = vma->vm_mm; 963 struct mmu_gather *tlb; 964 unsigned long end = address + size; 965 unsigned long nr_accounted = 0; 966 967 lru_add_drain(); 968 tlb = tlb_gather_mmu(mm, 0); 969 update_hiwater_rss(mm); 970 end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details); 971 if (tlb) 972 tlb_finish_mmu(tlb, address, end); 973 return end; 974 } 975 976 /* 977 * Do a quick page-table lookup for a single page. 978 */ 979 struct page *follow_page(struct vm_area_struct *vma, unsigned long address, 980 unsigned int flags) 981 { 982 pgd_t *pgd; 983 pud_t *pud; 984 pmd_t *pmd; 985 pte_t *ptep, pte; 986 spinlock_t *ptl; 987 struct page *page; 988 struct mm_struct *mm = vma->vm_mm; 989 990 page = follow_huge_addr(mm, address, flags & FOLL_WRITE); 991 if (!IS_ERR(page)) { 992 BUG_ON(flags & FOLL_GET); 993 goto out; 994 } 995 996 page = NULL; 997 pgd = pgd_offset(mm, address); 998 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 999 goto no_page_table; 1000 1001 pud = pud_offset(pgd, address); 1002 if (pud_none(*pud)) 1003 goto no_page_table; 1004 if (pud_huge(*pud)) { 1005 BUG_ON(flags & FOLL_GET); 1006 page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE); 1007 goto out; 1008 } 1009 if (unlikely(pud_bad(*pud))) 1010 goto no_page_table; 1011 1012 pmd = pmd_offset(pud, address); 1013 if (pmd_none(*pmd)) 1014 goto no_page_table; 1015 if (pmd_huge(*pmd)) { 1016 BUG_ON(flags & FOLL_GET); 1017 page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE); 1018 goto out; 1019 } 1020 if (unlikely(pmd_bad(*pmd))) 1021 goto no_page_table; 1022 1023 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 1024 1025 pte = *ptep; 1026 if (!pte_present(pte)) 1027 goto no_page; 1028 if ((flags & FOLL_WRITE) && !pte_write(pte)) 1029 goto unlock; 1030 page = vm_normal_page(vma, address, pte); 1031 if (unlikely(!page)) 1032 goto bad_page; 1033 1034 if (flags & FOLL_GET) 1035 get_page(page); 1036 if (flags & FOLL_TOUCH) { 1037 if ((flags & FOLL_WRITE) && 1038 !pte_dirty(pte) && !PageDirty(page)) 1039 set_page_dirty(page); 1040 mark_page_accessed(page); 1041 } 1042 unlock: 1043 pte_unmap_unlock(ptep, ptl); 1044 out: 1045 return page; 1046 1047 bad_page: 1048 pte_unmap_unlock(ptep, ptl); 1049 return ERR_PTR(-EFAULT); 1050 1051 no_page: 1052 pte_unmap_unlock(ptep, ptl); 1053 if (!pte_none(pte)) 1054 return page; 1055 /* Fall through to ZERO_PAGE handling */ 1056 no_page_table: 1057 /* 1058 * When core dumping an enormous anonymous area that nobody 1059 * has touched so far, we don't want to allocate page tables. 1060 */ 1061 if (flags & FOLL_ANON) { 1062 page = ZERO_PAGE(0); 1063 if (flags & FOLL_GET) 1064 get_page(page); 1065 BUG_ON(flags & FOLL_WRITE); 1066 } 1067 return page; 1068 } 1069 1070 /* Can we do the FOLL_ANON optimization? */ 1071 static inline int use_zero_page(struct vm_area_struct *vma) 1072 { 1073 /* 1074 * We don't want to optimize FOLL_ANON for make_pages_present() 1075 * when it tries to page in a VM_LOCKED region. As to VM_SHARED, 1076 * we want to get the page from the page tables to make sure 1077 * that we serialize and update with any other user of that 1078 * mapping. 1079 */ 1080 if (vma->vm_flags & (VM_LOCKED | VM_SHARED)) 1081 return 0; 1082 /* 1083 * And if we have a fault routine, it's not an anonymous region. 1084 */ 1085 return !vma->vm_ops || !vma->vm_ops->fault; 1086 } 1087 1088 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 1089 unsigned long start, int len, int write, int force, 1090 struct page **pages, struct vm_area_struct **vmas) 1091 { 1092 int i; 1093 unsigned int vm_flags; 1094 1095 if (len <= 0) 1096 return 0; 1097 /* 1098 * Require read or write permissions. 1099 * If 'force' is set, we only require the "MAY" flags. 1100 */ 1101 vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); 1102 vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); 1103 i = 0; 1104 1105 do { 1106 struct vm_area_struct *vma; 1107 unsigned int foll_flags; 1108 1109 vma = find_extend_vma(mm, start); 1110 if (!vma && in_gate_area(tsk, start)) { 1111 unsigned long pg = start & PAGE_MASK; 1112 struct vm_area_struct *gate_vma = get_gate_vma(tsk); 1113 pgd_t *pgd; 1114 pud_t *pud; 1115 pmd_t *pmd; 1116 pte_t *pte; 1117 if (write) /* user gate pages are read-only */ 1118 return i ? : -EFAULT; 1119 if (pg > TASK_SIZE) 1120 pgd = pgd_offset_k(pg); 1121 else 1122 pgd = pgd_offset_gate(mm, pg); 1123 BUG_ON(pgd_none(*pgd)); 1124 pud = pud_offset(pgd, pg); 1125 BUG_ON(pud_none(*pud)); 1126 pmd = pmd_offset(pud, pg); 1127 if (pmd_none(*pmd)) 1128 return i ? : -EFAULT; 1129 pte = pte_offset_map(pmd, pg); 1130 if (pte_none(*pte)) { 1131 pte_unmap(pte); 1132 return i ? : -EFAULT; 1133 } 1134 if (pages) { 1135 struct page *page = vm_normal_page(gate_vma, start, *pte); 1136 pages[i] = page; 1137 if (page) 1138 get_page(page); 1139 } 1140 pte_unmap(pte); 1141 if (vmas) 1142 vmas[i] = gate_vma; 1143 i++; 1144 start += PAGE_SIZE; 1145 len--; 1146 continue; 1147 } 1148 1149 if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP)) 1150 || !(vm_flags & vma->vm_flags)) 1151 return i ? : -EFAULT; 1152 1153 if (is_vm_hugetlb_page(vma)) { 1154 i = follow_hugetlb_page(mm, vma, pages, vmas, 1155 &start, &len, i, write); 1156 continue; 1157 } 1158 1159 foll_flags = FOLL_TOUCH; 1160 if (pages) 1161 foll_flags |= FOLL_GET; 1162 if (!write && use_zero_page(vma)) 1163 foll_flags |= FOLL_ANON; 1164 1165 do { 1166 struct page *page; 1167 1168 /* 1169 * If tsk is ooming, cut off its access to large memory 1170 * allocations. It has a pending SIGKILL, but it can't 1171 * be processed until returning to user space. 1172 */ 1173 if (unlikely(test_tsk_thread_flag(tsk, TIF_MEMDIE))) 1174 return i ? i : -ENOMEM; 1175 1176 if (write) 1177 foll_flags |= FOLL_WRITE; 1178 1179 cond_resched(); 1180 while (!(page = follow_page(vma, start, foll_flags))) { 1181 int ret; 1182 ret = handle_mm_fault(mm, vma, start, 1183 foll_flags & FOLL_WRITE); 1184 if (ret & VM_FAULT_ERROR) { 1185 if (ret & VM_FAULT_OOM) 1186 return i ? i : -ENOMEM; 1187 else if (ret & VM_FAULT_SIGBUS) 1188 return i ? i : -EFAULT; 1189 BUG(); 1190 } 1191 if (ret & VM_FAULT_MAJOR) 1192 tsk->maj_flt++; 1193 else 1194 tsk->min_flt++; 1195 1196 /* 1197 * The VM_FAULT_WRITE bit tells us that 1198 * do_wp_page has broken COW when necessary, 1199 * even if maybe_mkwrite decided not to set 1200 * pte_write. We can thus safely do subsequent 1201 * page lookups as if they were reads. 1202 */ 1203 if (ret & VM_FAULT_WRITE) 1204 foll_flags &= ~FOLL_WRITE; 1205 1206 cond_resched(); 1207 } 1208 if (IS_ERR(page)) 1209 return i ? i : PTR_ERR(page); 1210 if (pages) { 1211 pages[i] = page; 1212 1213 flush_anon_page(vma, page, start); 1214 flush_dcache_page(page); 1215 } 1216 if (vmas) 1217 vmas[i] = vma; 1218 i++; 1219 start += PAGE_SIZE; 1220 len--; 1221 } while (len && start < vma->vm_end); 1222 } while (len); 1223 return i; 1224 } 1225 EXPORT_SYMBOL(get_user_pages); 1226 1227 pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, 1228 spinlock_t **ptl) 1229 { 1230 pgd_t * pgd = pgd_offset(mm, addr); 1231 pud_t * pud = pud_alloc(mm, pgd, addr); 1232 if (pud) { 1233 pmd_t * pmd = pmd_alloc(mm, pud, addr); 1234 if (pmd) 1235 return pte_alloc_map_lock(mm, pmd, addr, ptl); 1236 } 1237 return NULL; 1238 } 1239 1240 /* 1241 * This is the old fallback for page remapping. 1242 * 1243 * For historical reasons, it only allows reserved pages. Only 1244 * old drivers should use this, and they needed to mark their 1245 * pages reserved for the old functions anyway. 1246 */ 1247 static int insert_page(struct vm_area_struct *vma, unsigned long addr, 1248 struct page *page, pgprot_t prot) 1249 { 1250 struct mm_struct *mm = vma->vm_mm; 1251 int retval; 1252 pte_t *pte; 1253 spinlock_t *ptl; 1254 1255 retval = mem_cgroup_charge(page, mm, GFP_KERNEL); 1256 if (retval) 1257 goto out; 1258 1259 retval = -EINVAL; 1260 if (PageAnon(page)) 1261 goto out_uncharge; 1262 retval = -ENOMEM; 1263 flush_dcache_page(page); 1264 pte = get_locked_pte(mm, addr, &ptl); 1265 if (!pte) 1266 goto out_uncharge; 1267 retval = -EBUSY; 1268 if (!pte_none(*pte)) 1269 goto out_unlock; 1270 1271 /* Ok, finally just insert the thing.. */ 1272 get_page(page); 1273 inc_mm_counter(mm, file_rss); 1274 page_add_file_rmap(page); 1275 set_pte_at(mm, addr, pte, mk_pte(page, prot)); 1276 1277 retval = 0; 1278 pte_unmap_unlock(pte, ptl); 1279 return retval; 1280 out_unlock: 1281 pte_unmap_unlock(pte, ptl); 1282 out_uncharge: 1283 mem_cgroup_uncharge_page(page); 1284 out: 1285 return retval; 1286 } 1287 1288 /** 1289 * vm_insert_page - insert single page into user vma 1290 * @vma: user vma to map to 1291 * @addr: target user address of this page 1292 * @page: source kernel page 1293 * 1294 * This allows drivers to insert individual pages they've allocated 1295 * into a user vma. 1296 * 1297 * The page has to be a nice clean _individual_ kernel allocation. 1298 * If you allocate a compound page, you need to have marked it as 1299 * such (__GFP_COMP), or manually just split the page up yourself 1300 * (see split_page()). 1301 * 1302 * NOTE! Traditionally this was done with "remap_pfn_range()" which 1303 * took an arbitrary page protection parameter. This doesn't allow 1304 * that. Your vma protection will have to be set up correctly, which 1305 * means that if you want a shared writable mapping, you'd better 1306 * ask for a shared writable mapping! 1307 * 1308 * The page does not need to be reserved. 1309 */ 1310 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, 1311 struct page *page) 1312 { 1313 if (addr < vma->vm_start || addr >= vma->vm_end) 1314 return -EFAULT; 1315 if (!page_count(page)) 1316 return -EINVAL; 1317 vma->vm_flags |= VM_INSERTPAGE; 1318 return insert_page(vma, addr, page, vma->vm_page_prot); 1319 } 1320 EXPORT_SYMBOL(vm_insert_page); 1321 1322 static int insert_pfn(struct vm_area_struct *vma, unsigned long addr, 1323 unsigned long pfn, pgprot_t prot) 1324 { 1325 struct mm_struct *mm = vma->vm_mm; 1326 int retval; 1327 pte_t *pte, entry; 1328 spinlock_t *ptl; 1329 1330 retval = -ENOMEM; 1331 pte = get_locked_pte(mm, addr, &ptl); 1332 if (!pte) 1333 goto out; 1334 retval = -EBUSY; 1335 if (!pte_none(*pte)) 1336 goto out_unlock; 1337 1338 /* Ok, finally just insert the thing.. */ 1339 entry = pte_mkspecial(pfn_pte(pfn, prot)); 1340 set_pte_at(mm, addr, pte, entry); 1341 update_mmu_cache(vma, addr, entry); /* XXX: why not for insert_page? */ 1342 1343 retval = 0; 1344 out_unlock: 1345 pte_unmap_unlock(pte, ptl); 1346 out: 1347 return retval; 1348 } 1349 1350 /** 1351 * vm_insert_pfn - insert single pfn into user vma 1352 * @vma: user vma to map to 1353 * @addr: target user address of this page 1354 * @pfn: source kernel pfn 1355 * 1356 * Similar to vm_inert_page, this allows drivers to insert individual pages 1357 * they've allocated into a user vma. Same comments apply. 1358 * 1359 * This function should only be called from a vm_ops->fault handler, and 1360 * in that case the handler should return NULL. 1361 * 1362 * vma cannot be a COW mapping. 1363 * 1364 * As this is called only for pages that do not currently exist, we 1365 * do not need to flush old virtual caches or the TLB. 1366 */ 1367 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 1368 unsigned long pfn) 1369 { 1370 /* 1371 * Technically, architectures with pte_special can avoid all these 1372 * restrictions (same for remap_pfn_range). However we would like 1373 * consistency in testing and feature parity among all, so we should 1374 * try to keep these invariants in place for everybody. 1375 */ 1376 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); 1377 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 1378 (VM_PFNMAP|VM_MIXEDMAP)); 1379 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 1380 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); 1381 1382 if (addr < vma->vm_start || addr >= vma->vm_end) 1383 return -EFAULT; 1384 return insert_pfn(vma, addr, pfn, vma->vm_page_prot); 1385 } 1386 EXPORT_SYMBOL(vm_insert_pfn); 1387 1388 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, 1389 unsigned long pfn) 1390 { 1391 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP)); 1392 1393 if (addr < vma->vm_start || addr >= vma->vm_end) 1394 return -EFAULT; 1395 1396 /* 1397 * If we don't have pte special, then we have to use the pfn_valid() 1398 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must* 1399 * refcount the page if pfn_valid is true (hence insert_page rather 1400 * than insert_pfn). 1401 */ 1402 if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) { 1403 struct page *page; 1404 1405 page = pfn_to_page(pfn); 1406 return insert_page(vma, addr, page, vma->vm_page_prot); 1407 } 1408 return insert_pfn(vma, addr, pfn, vma->vm_page_prot); 1409 } 1410 EXPORT_SYMBOL(vm_insert_mixed); 1411 1412 /* 1413 * maps a range of physical memory into the requested pages. the old 1414 * mappings are removed. any references to nonexistent pages results 1415 * in null mappings (currently treated as "copy-on-access") 1416 */ 1417 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, 1418 unsigned long addr, unsigned long end, 1419 unsigned long pfn, pgprot_t prot) 1420 { 1421 pte_t *pte; 1422 spinlock_t *ptl; 1423 1424 pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); 1425 if (!pte) 1426 return -ENOMEM; 1427 arch_enter_lazy_mmu_mode(); 1428 do { 1429 BUG_ON(!pte_none(*pte)); 1430 set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot))); 1431 pfn++; 1432 } while (pte++, addr += PAGE_SIZE, addr != end); 1433 arch_leave_lazy_mmu_mode(); 1434 pte_unmap_unlock(pte - 1, ptl); 1435 return 0; 1436 } 1437 1438 static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, 1439 unsigned long addr, unsigned long end, 1440 unsigned long pfn, pgprot_t prot) 1441 { 1442 pmd_t *pmd; 1443 unsigned long next; 1444 1445 pfn -= addr >> PAGE_SHIFT; 1446 pmd = pmd_alloc(mm, pud, addr); 1447 if (!pmd) 1448 return -ENOMEM; 1449 do { 1450 next = pmd_addr_end(addr, end); 1451 if (remap_pte_range(mm, pmd, addr, next, 1452 pfn + (addr >> PAGE_SHIFT), prot)) 1453 return -ENOMEM; 1454 } while (pmd++, addr = next, addr != end); 1455 return 0; 1456 } 1457 1458 static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd, 1459 unsigned long addr, unsigned long end, 1460 unsigned long pfn, pgprot_t prot) 1461 { 1462 pud_t *pud; 1463 unsigned long next; 1464 1465 pfn -= addr >> PAGE_SHIFT; 1466 pud = pud_alloc(mm, pgd, addr); 1467 if (!pud) 1468 return -ENOMEM; 1469 do { 1470 next = pud_addr_end(addr, end); 1471 if (remap_pmd_range(mm, pud, addr, next, 1472 pfn + (addr >> PAGE_SHIFT), prot)) 1473 return -ENOMEM; 1474 } while (pud++, addr = next, addr != end); 1475 return 0; 1476 } 1477 1478 /** 1479 * remap_pfn_range - remap kernel memory to userspace 1480 * @vma: user vma to map to 1481 * @addr: target user address to start at 1482 * @pfn: physical address of kernel memory 1483 * @size: size of map area 1484 * @prot: page protection flags for this mapping 1485 * 1486 * Note: this is only safe if the mm semaphore is held when called. 1487 */ 1488 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, 1489 unsigned long pfn, unsigned long size, pgprot_t prot) 1490 { 1491 pgd_t *pgd; 1492 unsigned long next; 1493 unsigned long end = addr + PAGE_ALIGN(size); 1494 struct mm_struct *mm = vma->vm_mm; 1495 int err; 1496 1497 /* 1498 * Physically remapped pages are special. Tell the 1499 * rest of the world about it: 1500 * VM_IO tells people not to look at these pages 1501 * (accesses can have side effects). 1502 * VM_RESERVED is specified all over the place, because 1503 * in 2.4 it kept swapout's vma scan off this vma; but 1504 * in 2.6 the LRU scan won't even find its pages, so this 1505 * flag means no more than count its pages in reserved_vm, 1506 * and omit it from core dump, even when VM_IO turned off. 1507 * VM_PFNMAP tells the core MM that the base pages are just 1508 * raw PFN mappings, and do not have a "struct page" associated 1509 * with them. 1510 * 1511 * There's a horrible special case to handle copy-on-write 1512 * behaviour that some programs depend on. We mark the "original" 1513 * un-COW'ed pages by matching them up with "vma->vm_pgoff". 1514 */ 1515 if (is_cow_mapping(vma->vm_flags)) { 1516 if (addr != vma->vm_start || end != vma->vm_end) 1517 return -EINVAL; 1518 vma->vm_pgoff = pfn; 1519 } 1520 1521 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; 1522 1523 BUG_ON(addr >= end); 1524 pfn -= addr >> PAGE_SHIFT; 1525 pgd = pgd_offset(mm, addr); 1526 flush_cache_range(vma, addr, end); 1527 do { 1528 next = pgd_addr_end(addr, end); 1529 err = remap_pud_range(mm, pgd, addr, next, 1530 pfn + (addr >> PAGE_SHIFT), prot); 1531 if (err) 1532 break; 1533 } while (pgd++, addr = next, addr != end); 1534 return err; 1535 } 1536 EXPORT_SYMBOL(remap_pfn_range); 1537 1538 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, 1539 unsigned long addr, unsigned long end, 1540 pte_fn_t fn, void *data) 1541 { 1542 pte_t *pte; 1543 int err; 1544 pgtable_t token; 1545 spinlock_t *uninitialized_var(ptl); 1546 1547 pte = (mm == &init_mm) ? 1548 pte_alloc_kernel(pmd, addr) : 1549 pte_alloc_map_lock(mm, pmd, addr, &ptl); 1550 if (!pte) 1551 return -ENOMEM; 1552 1553 BUG_ON(pmd_huge(*pmd)); 1554 1555 token = pmd_pgtable(*pmd); 1556 1557 do { 1558 err = fn(pte, token, addr, data); 1559 if (err) 1560 break; 1561 } while (pte++, addr += PAGE_SIZE, addr != end); 1562 1563 if (mm != &init_mm) 1564 pte_unmap_unlock(pte-1, ptl); 1565 return err; 1566 } 1567 1568 static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, 1569 unsigned long addr, unsigned long end, 1570 pte_fn_t fn, void *data) 1571 { 1572 pmd_t *pmd; 1573 unsigned long next; 1574 int err; 1575 1576 BUG_ON(pud_huge(*pud)); 1577 1578 pmd = pmd_alloc(mm, pud, addr); 1579 if (!pmd) 1580 return -ENOMEM; 1581 do { 1582 next = pmd_addr_end(addr, end); 1583 err = apply_to_pte_range(mm, pmd, addr, next, fn, data); 1584 if (err) 1585 break; 1586 } while (pmd++, addr = next, addr != end); 1587 return err; 1588 } 1589 1590 static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd, 1591 unsigned long addr, unsigned long end, 1592 pte_fn_t fn, void *data) 1593 { 1594 pud_t *pud; 1595 unsigned long next; 1596 int err; 1597 1598 pud = pud_alloc(mm, pgd, addr); 1599 if (!pud) 1600 return -ENOMEM; 1601 do { 1602 next = pud_addr_end(addr, end); 1603 err = apply_to_pmd_range(mm, pud, addr, next, fn, data); 1604 if (err) 1605 break; 1606 } while (pud++, addr = next, addr != end); 1607 return err; 1608 } 1609 1610 /* 1611 * Scan a region of virtual memory, filling in page tables as necessary 1612 * and calling a provided function on each leaf page table. 1613 */ 1614 int apply_to_page_range(struct mm_struct *mm, unsigned long addr, 1615 unsigned long size, pte_fn_t fn, void *data) 1616 { 1617 pgd_t *pgd; 1618 unsigned long next; 1619 unsigned long end = addr + size; 1620 int err; 1621 1622 BUG_ON(addr >= end); 1623 pgd = pgd_offset(mm, addr); 1624 do { 1625 next = pgd_addr_end(addr, end); 1626 err = apply_to_pud_range(mm, pgd, addr, next, fn, data); 1627 if (err) 1628 break; 1629 } while (pgd++, addr = next, addr != end); 1630 return err; 1631 } 1632 EXPORT_SYMBOL_GPL(apply_to_page_range); 1633 1634 /* 1635 * handle_pte_fault chooses page fault handler according to an entry 1636 * which was read non-atomically. Before making any commitment, on 1637 * those architectures or configurations (e.g. i386 with PAE) which 1638 * might give a mix of unmatched parts, do_swap_page and do_file_page 1639 * must check under lock before unmapping the pte and proceeding 1640 * (but do_wp_page is only called after already making such a check; 1641 * and do_anonymous_page and do_no_page can safely check later on). 1642 */ 1643 static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, 1644 pte_t *page_table, pte_t orig_pte) 1645 { 1646 int same = 1; 1647 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) 1648 if (sizeof(pte_t) > sizeof(unsigned long)) { 1649 spinlock_t *ptl = pte_lockptr(mm, pmd); 1650 spin_lock(ptl); 1651 same = pte_same(*page_table, orig_pte); 1652 spin_unlock(ptl); 1653 } 1654 #endif 1655 pte_unmap(page_table); 1656 return same; 1657 } 1658 1659 /* 1660 * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when 1661 * servicing faults for write access. In the normal case, do always want 1662 * pte_mkwrite. But get_user_pages can cause write faults for mappings 1663 * that do not have writing enabled, when used by access_process_vm. 1664 */ 1665 static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) 1666 { 1667 if (likely(vma->vm_flags & VM_WRITE)) 1668 pte = pte_mkwrite(pte); 1669 return pte; 1670 } 1671 1672 static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) 1673 { 1674 /* 1675 * If the source page was a PFN mapping, we don't have 1676 * a "struct page" for it. We do a best-effort copy by 1677 * just copying from the original user address. If that 1678 * fails, we just zero-fill it. Live with it. 1679 */ 1680 if (unlikely(!src)) { 1681 void *kaddr = kmap_atomic(dst, KM_USER0); 1682 void __user *uaddr = (void __user *)(va & PAGE_MASK); 1683 1684 /* 1685 * This really shouldn't fail, because the page is there 1686 * in the page tables. But it might just be unreadable, 1687 * in which case we just give up and fill the result with 1688 * zeroes. 1689 */ 1690 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) 1691 memset(kaddr, 0, PAGE_SIZE); 1692 kunmap_atomic(kaddr, KM_USER0); 1693 flush_dcache_page(dst); 1694 } else 1695 copy_user_highpage(dst, src, va, vma); 1696 } 1697 1698 /* 1699 * This routine handles present pages, when users try to write 1700 * to a shared page. It is done by copying the page to a new address 1701 * and decrementing the shared-page counter for the old page. 1702 * 1703 * Note that this routine assumes that the protection checks have been 1704 * done by the caller (the low-level page fault routine in most cases). 1705 * Thus we can safely just mark it writable once we've done any necessary 1706 * COW. 1707 * 1708 * We also mark the page dirty at this point even though the page will 1709 * change only once the write actually happens. This avoids a few races, 1710 * and potentially makes it more efficient. 1711 * 1712 * We enter with non-exclusive mmap_sem (to exclude vma changes, 1713 * but allow concurrent faults), with pte both mapped and locked. 1714 * We return with mmap_sem still held, but pte unmapped and unlocked. 1715 */ 1716 static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, 1717 unsigned long address, pte_t *page_table, pmd_t *pmd, 1718 spinlock_t *ptl, pte_t orig_pte) 1719 { 1720 struct page *old_page, *new_page; 1721 pte_t entry; 1722 int reuse = 0, ret = 0; 1723 int page_mkwrite = 0; 1724 struct page *dirty_page = NULL; 1725 1726 old_page = vm_normal_page(vma, address, orig_pte); 1727 if (!old_page) { 1728 /* 1729 * VM_MIXEDMAP !pfn_valid() case 1730 * 1731 * We should not cow pages in a shared writeable mapping. 1732 * Just mark the pages writable as we can't do any dirty 1733 * accounting on raw pfn maps. 1734 */ 1735 if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == 1736 (VM_WRITE|VM_SHARED)) 1737 goto reuse; 1738 goto gotten; 1739 } 1740 1741 /* 1742 * Take out anonymous pages first, anonymous shared vmas are 1743 * not dirty accountable. 1744 */ 1745 if (PageAnon(old_page)) { 1746 if (!TestSetPageLocked(old_page)) { 1747 reuse = can_share_swap_page(old_page); 1748 unlock_page(old_page); 1749 } 1750 } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == 1751 (VM_WRITE|VM_SHARED))) { 1752 /* 1753 * Only catch write-faults on shared writable pages, 1754 * read-only shared pages can get COWed by 1755 * get_user_pages(.write=1, .force=1). 1756 */ 1757 if (vma->vm_ops && vma->vm_ops->page_mkwrite) { 1758 /* 1759 * Notify the address space that the page is about to 1760 * become writable so that it can prohibit this or wait 1761 * for the page to get into an appropriate state. 1762 * 1763 * We do this without the lock held, so that it can 1764 * sleep if it needs to. 1765 */ 1766 page_cache_get(old_page); 1767 pte_unmap_unlock(page_table, ptl); 1768 1769 if (vma->vm_ops->page_mkwrite(vma, old_page) < 0) 1770 goto unwritable_page; 1771 1772 /* 1773 * Since we dropped the lock we need to revalidate 1774 * the PTE as someone else may have changed it. If 1775 * they did, we just return, as we can count on the 1776 * MMU to tell us if they didn't also make it writable. 1777 */ 1778 page_table = pte_offset_map_lock(mm, pmd, address, 1779 &ptl); 1780 page_cache_release(old_page); 1781 if (!pte_same(*page_table, orig_pte)) 1782 goto unlock; 1783 1784 page_mkwrite = 1; 1785 } 1786 dirty_page = old_page; 1787 get_page(dirty_page); 1788 reuse = 1; 1789 } 1790 1791 if (reuse) { 1792 reuse: 1793 flush_cache_page(vma, address, pte_pfn(orig_pte)); 1794 entry = pte_mkyoung(orig_pte); 1795 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1796 if (ptep_set_access_flags(vma, address, page_table, entry,1)) 1797 update_mmu_cache(vma, address, entry); 1798 ret |= VM_FAULT_WRITE; 1799 goto unlock; 1800 } 1801 1802 /* 1803 * Ok, we need to copy. Oh, well.. 1804 */ 1805 page_cache_get(old_page); 1806 gotten: 1807 pte_unmap_unlock(page_table, ptl); 1808 1809 if (unlikely(anon_vma_prepare(vma))) 1810 goto oom; 1811 VM_BUG_ON(old_page == ZERO_PAGE(0)); 1812 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 1813 if (!new_page) 1814 goto oom; 1815 cow_user_page(new_page, old_page, address, vma); 1816 __SetPageUptodate(new_page); 1817 1818 if (mem_cgroup_charge(new_page, mm, GFP_KERNEL)) 1819 goto oom_free_new; 1820 1821 /* 1822 * Re-check the pte - we dropped the lock 1823 */ 1824 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 1825 if (likely(pte_same(*page_table, orig_pte))) { 1826 if (old_page) { 1827 if (!PageAnon(old_page)) { 1828 dec_mm_counter(mm, file_rss); 1829 inc_mm_counter(mm, anon_rss); 1830 } 1831 } else 1832 inc_mm_counter(mm, anon_rss); 1833 flush_cache_page(vma, address, pte_pfn(orig_pte)); 1834 entry = mk_pte(new_page, vma->vm_page_prot); 1835 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1836 /* 1837 * Clear the pte entry and flush it first, before updating the 1838 * pte with the new entry. This will avoid a race condition 1839 * seen in the presence of one thread doing SMC and another 1840 * thread doing COW. 1841 */ 1842 ptep_clear_flush(vma, address, page_table); 1843 set_pte_at(mm, address, page_table, entry); 1844 update_mmu_cache(vma, address, entry); 1845 lru_cache_add_active(new_page); 1846 page_add_new_anon_rmap(new_page, vma, address); 1847 1848 if (old_page) { 1849 /* 1850 * Only after switching the pte to the new page may 1851 * we remove the mapcount here. Otherwise another 1852 * process may come and find the rmap count decremented 1853 * before the pte is switched to the new page, and 1854 * "reuse" the old page writing into it while our pte 1855 * here still points into it and can be read by other 1856 * threads. 1857 * 1858 * The critical issue is to order this 1859 * page_remove_rmap with the ptp_clear_flush above. 1860 * Those stores are ordered by (if nothing else,) 1861 * the barrier present in the atomic_add_negative 1862 * in page_remove_rmap. 1863 * 1864 * Then the TLB flush in ptep_clear_flush ensures that 1865 * no process can access the old page before the 1866 * decremented mapcount is visible. And the old page 1867 * cannot be reused until after the decremented 1868 * mapcount is visible. So transitively, TLBs to 1869 * old page will be flushed before it can be reused. 1870 */ 1871 page_remove_rmap(old_page, vma); 1872 } 1873 1874 /* Free the old page.. */ 1875 new_page = old_page; 1876 ret |= VM_FAULT_WRITE; 1877 } else 1878 mem_cgroup_uncharge_page(new_page); 1879 1880 if (new_page) 1881 page_cache_release(new_page); 1882 if (old_page) 1883 page_cache_release(old_page); 1884 unlock: 1885 pte_unmap_unlock(page_table, ptl); 1886 if (dirty_page) { 1887 if (vma->vm_file) 1888 file_update_time(vma->vm_file); 1889 1890 /* 1891 * Yes, Virginia, this is actually required to prevent a race 1892 * with clear_page_dirty_for_io() from clearing the page dirty 1893 * bit after it clear all dirty ptes, but before a racing 1894 * do_wp_page installs a dirty pte. 1895 * 1896 * do_no_page is protected similarly. 1897 */ 1898 wait_on_page_locked(dirty_page); 1899 set_page_dirty_balance(dirty_page, page_mkwrite); 1900 put_page(dirty_page); 1901 } 1902 return ret; 1903 oom_free_new: 1904 page_cache_release(new_page); 1905 oom: 1906 if (old_page) 1907 page_cache_release(old_page); 1908 return VM_FAULT_OOM; 1909 1910 unwritable_page: 1911 page_cache_release(old_page); 1912 return VM_FAULT_SIGBUS; 1913 } 1914 1915 /* 1916 * Helper functions for unmap_mapping_range(). 1917 * 1918 * __ Notes on dropping i_mmap_lock to reduce latency while unmapping __ 1919 * 1920 * We have to restart searching the prio_tree whenever we drop the lock, 1921 * since the iterator is only valid while the lock is held, and anyway 1922 * a later vma might be split and reinserted earlier while lock dropped. 1923 * 1924 * The list of nonlinear vmas could be handled more efficiently, using 1925 * a placeholder, but handle it in the same way until a need is shown. 1926 * It is important to search the prio_tree before nonlinear list: a vma 1927 * may become nonlinear and be shifted from prio_tree to nonlinear list 1928 * while the lock is dropped; but never shifted from list to prio_tree. 1929 * 1930 * In order to make forward progress despite restarting the search, 1931 * vm_truncate_count is used to mark a vma as now dealt with, so we can 1932 * quickly skip it next time around. Since the prio_tree search only 1933 * shows us those vmas affected by unmapping the range in question, we 1934 * can't efficiently keep all vmas in step with mapping->truncate_count: 1935 * so instead reset them all whenever it wraps back to 0 (then go to 1). 1936 * mapping->truncate_count and vma->vm_truncate_count are protected by 1937 * i_mmap_lock. 1938 * 1939 * In order to make forward progress despite repeatedly restarting some 1940 * large vma, note the restart_addr from unmap_vmas when it breaks out: 1941 * and restart from that address when we reach that vma again. It might 1942 * have been split or merged, shrunk or extended, but never shifted: so 1943 * restart_addr remains valid so long as it remains in the vma's range. 1944 * unmap_mapping_range forces truncate_count to leap over page-aligned 1945 * values so we can save vma's restart_addr in its truncate_count field. 1946 */ 1947 #define is_restart_addr(truncate_count) (!((truncate_count) & ~PAGE_MASK)) 1948 1949 static void reset_vma_truncate_counts(struct address_space *mapping) 1950 { 1951 struct vm_area_struct *vma; 1952 struct prio_tree_iter iter; 1953 1954 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX) 1955 vma->vm_truncate_count = 0; 1956 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) 1957 vma->vm_truncate_count = 0; 1958 } 1959 1960 static int unmap_mapping_range_vma(struct vm_area_struct *vma, 1961 unsigned long start_addr, unsigned long end_addr, 1962 struct zap_details *details) 1963 { 1964 unsigned long restart_addr; 1965 int need_break; 1966 1967 /* 1968 * files that support invalidating or truncating portions of the 1969 * file from under mmaped areas must have their ->fault function 1970 * return a locked page (and set VM_FAULT_LOCKED in the return). 1971 * This provides synchronisation against concurrent unmapping here. 1972 */ 1973 1974 again: 1975 restart_addr = vma->vm_truncate_count; 1976 if (is_restart_addr(restart_addr) && start_addr < restart_addr) { 1977 start_addr = restart_addr; 1978 if (start_addr >= end_addr) { 1979 /* Top of vma has been split off since last time */ 1980 vma->vm_truncate_count = details->truncate_count; 1981 return 0; 1982 } 1983 } 1984 1985 restart_addr = zap_page_range(vma, start_addr, 1986 end_addr - start_addr, details); 1987 need_break = need_resched() || spin_needbreak(details->i_mmap_lock); 1988 1989 if (restart_addr >= end_addr) { 1990 /* We have now completed this vma: mark it so */ 1991 vma->vm_truncate_count = details->truncate_count; 1992 if (!need_break) 1993 return 0; 1994 } else { 1995 /* Note restart_addr in vma's truncate_count field */ 1996 vma->vm_truncate_count = restart_addr; 1997 if (!need_break) 1998 goto again; 1999 } 2000 2001 spin_unlock(details->i_mmap_lock); 2002 cond_resched(); 2003 spin_lock(details->i_mmap_lock); 2004 return -EINTR; 2005 } 2006 2007 static inline void unmap_mapping_range_tree(struct prio_tree_root *root, 2008 struct zap_details *details) 2009 { 2010 struct vm_area_struct *vma; 2011 struct prio_tree_iter iter; 2012 pgoff_t vba, vea, zba, zea; 2013 2014 restart: 2015 vma_prio_tree_foreach(vma, &iter, root, 2016 details->first_index, details->last_index) { 2017 /* Skip quickly over those we have already dealt with */ 2018 if (vma->vm_truncate_count == details->truncate_count) 2019 continue; 2020 2021 vba = vma->vm_pgoff; 2022 vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1; 2023 /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */ 2024 zba = details->first_index; 2025 if (zba < vba) 2026 zba = vba; 2027 zea = details->last_index; 2028 if (zea > vea) 2029 zea = vea; 2030 2031 if (unmap_mapping_range_vma(vma, 2032 ((zba - vba) << PAGE_SHIFT) + vma->vm_start, 2033 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, 2034 details) < 0) 2035 goto restart; 2036 } 2037 } 2038 2039 static inline void unmap_mapping_range_list(struct list_head *head, 2040 struct zap_details *details) 2041 { 2042 struct vm_area_struct *vma; 2043 2044 /* 2045 * In nonlinear VMAs there is no correspondence between virtual address 2046 * offset and file offset. So we must perform an exhaustive search 2047 * across *all* the pages in each nonlinear VMA, not just the pages 2048 * whose virtual address lies outside the file truncation point. 2049 */ 2050 restart: 2051 list_for_each_entry(vma, head, shared.vm_set.list) { 2052 /* Skip quickly over those we have already dealt with */ 2053 if (vma->vm_truncate_count == details->truncate_count) 2054 continue; 2055 details->nonlinear_vma = vma; 2056 if (unmap_mapping_range_vma(vma, vma->vm_start, 2057 vma->vm_end, details) < 0) 2058 goto restart; 2059 } 2060 } 2061 2062 /** 2063 * unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file. 2064 * @mapping: the address space containing mmaps to be unmapped. 2065 * @holebegin: byte in first page to unmap, relative to the start of 2066 * the underlying file. This will be rounded down to a PAGE_SIZE 2067 * boundary. Note that this is different from vmtruncate(), which 2068 * must keep the partial page. In contrast, we must get rid of 2069 * partial pages. 2070 * @holelen: size of prospective hole in bytes. This will be rounded 2071 * up to a PAGE_SIZE boundary. A holelen of zero truncates to the 2072 * end of the file. 2073 * @even_cows: 1 when truncating a file, unmap even private COWed pages; 2074 * but 0 when invalidating pagecache, don't throw away private data. 2075 */ 2076 void unmap_mapping_range(struct address_space *mapping, 2077 loff_t const holebegin, loff_t const holelen, int even_cows) 2078 { 2079 struct zap_details details; 2080 pgoff_t hba = holebegin >> PAGE_SHIFT; 2081 pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; 2082 2083 /* Check for overflow. */ 2084 if (sizeof(holelen) > sizeof(hlen)) { 2085 long long holeend = 2086 (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; 2087 if (holeend & ~(long long)ULONG_MAX) 2088 hlen = ULONG_MAX - hba + 1; 2089 } 2090 2091 details.check_mapping = even_cows? NULL: mapping; 2092 details.nonlinear_vma = NULL; 2093 details.first_index = hba; 2094 details.last_index = hba + hlen - 1; 2095 if (details.last_index < details.first_index) 2096 details.last_index = ULONG_MAX; 2097 details.i_mmap_lock = &mapping->i_mmap_lock; 2098 2099 spin_lock(&mapping->i_mmap_lock); 2100 2101 /* Protect against endless unmapping loops */ 2102 mapping->truncate_count++; 2103 if (unlikely(is_restart_addr(mapping->truncate_count))) { 2104 if (mapping->truncate_count == 0) 2105 reset_vma_truncate_counts(mapping); 2106 mapping->truncate_count++; 2107 } 2108 details.truncate_count = mapping->truncate_count; 2109 2110 if (unlikely(!prio_tree_empty(&mapping->i_mmap))) 2111 unmap_mapping_range_tree(&mapping->i_mmap, &details); 2112 if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) 2113 unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details); 2114 spin_unlock(&mapping->i_mmap_lock); 2115 } 2116 EXPORT_SYMBOL(unmap_mapping_range); 2117 2118 /** 2119 * vmtruncate - unmap mappings "freed" by truncate() syscall 2120 * @inode: inode of the file used 2121 * @offset: file offset to start truncating 2122 * 2123 * NOTE! We have to be ready to update the memory sharing 2124 * between the file and the memory map for a potential last 2125 * incomplete page. Ugly, but necessary. 2126 */ 2127 int vmtruncate(struct inode * inode, loff_t offset) 2128 { 2129 if (inode->i_size < offset) { 2130 unsigned long limit; 2131 2132 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; 2133 if (limit != RLIM_INFINITY && offset > limit) 2134 goto out_sig; 2135 if (offset > inode->i_sb->s_maxbytes) 2136 goto out_big; 2137 i_size_write(inode, offset); 2138 } else { 2139 struct address_space *mapping = inode->i_mapping; 2140 2141 /* 2142 * truncation of in-use swapfiles is disallowed - it would 2143 * cause subsequent swapout to scribble on the now-freed 2144 * blocks. 2145 */ 2146 if (IS_SWAPFILE(inode)) 2147 return -ETXTBSY; 2148 i_size_write(inode, offset); 2149 2150 /* 2151 * unmap_mapping_range is called twice, first simply for 2152 * efficiency so that truncate_inode_pages does fewer 2153 * single-page unmaps. However after this first call, and 2154 * before truncate_inode_pages finishes, it is possible for 2155 * private pages to be COWed, which remain after 2156 * truncate_inode_pages finishes, hence the second 2157 * unmap_mapping_range call must be made for correctness. 2158 */ 2159 unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1); 2160 truncate_inode_pages(mapping, offset); 2161 unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1); 2162 } 2163 2164 if (inode->i_op && inode->i_op->truncate) 2165 inode->i_op->truncate(inode); 2166 return 0; 2167 2168 out_sig: 2169 send_sig(SIGXFSZ, current, 0); 2170 out_big: 2171 return -EFBIG; 2172 } 2173 EXPORT_SYMBOL(vmtruncate); 2174 2175 int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end) 2176 { 2177 struct address_space *mapping = inode->i_mapping; 2178 2179 /* 2180 * If the underlying filesystem is not going to provide 2181 * a way to truncate a range of blocks (punch a hole) - 2182 * we should return failure right now. 2183 */ 2184 if (!inode->i_op || !inode->i_op->truncate_range) 2185 return -ENOSYS; 2186 2187 mutex_lock(&inode->i_mutex); 2188 down_write(&inode->i_alloc_sem); 2189 unmap_mapping_range(mapping, offset, (end - offset), 1); 2190 truncate_inode_pages_range(mapping, offset, end); 2191 unmap_mapping_range(mapping, offset, (end - offset), 1); 2192 inode->i_op->truncate_range(inode, offset, end); 2193 up_write(&inode->i_alloc_sem); 2194 mutex_unlock(&inode->i_mutex); 2195 2196 return 0; 2197 } 2198 2199 /* 2200 * We enter with non-exclusive mmap_sem (to exclude vma changes, 2201 * but allow concurrent faults), and pte mapped but not yet locked. 2202 * We return with mmap_sem still held, but pte unmapped and unlocked. 2203 */ 2204 static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, 2205 unsigned long address, pte_t *page_table, pmd_t *pmd, 2206 int write_access, pte_t orig_pte) 2207 { 2208 spinlock_t *ptl; 2209 struct page *page; 2210 swp_entry_t entry; 2211 pte_t pte; 2212 int ret = 0; 2213 2214 if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) 2215 goto out; 2216 2217 entry = pte_to_swp_entry(orig_pte); 2218 if (is_migration_entry(entry)) { 2219 migration_entry_wait(mm, pmd, address); 2220 goto out; 2221 } 2222 delayacct_set_flag(DELAYACCT_PF_SWAPIN); 2223 page = lookup_swap_cache(entry); 2224 if (!page) { 2225 grab_swap_token(); /* Contend for token _before_ read-in */ 2226 page = swapin_readahead(entry, 2227 GFP_HIGHUSER_MOVABLE, vma, address); 2228 if (!page) { 2229 /* 2230 * Back out if somebody else faulted in this pte 2231 * while we released the pte lock. 2232 */ 2233 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 2234 if (likely(pte_same(*page_table, orig_pte))) 2235 ret = VM_FAULT_OOM; 2236 delayacct_clear_flag(DELAYACCT_PF_SWAPIN); 2237 goto unlock; 2238 } 2239 2240 /* Had to read the page from swap area: Major fault */ 2241 ret = VM_FAULT_MAJOR; 2242 count_vm_event(PGMAJFAULT); 2243 } 2244 2245 if (mem_cgroup_charge(page, mm, GFP_KERNEL)) { 2246 delayacct_clear_flag(DELAYACCT_PF_SWAPIN); 2247 ret = VM_FAULT_OOM; 2248 goto out; 2249 } 2250 2251 mark_page_accessed(page); 2252 lock_page(page); 2253 delayacct_clear_flag(DELAYACCT_PF_SWAPIN); 2254 2255 /* 2256 * Back out if somebody else already faulted in this pte. 2257 */ 2258 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 2259 if (unlikely(!pte_same(*page_table, orig_pte))) 2260 goto out_nomap; 2261 2262 if (unlikely(!PageUptodate(page))) { 2263 ret = VM_FAULT_SIGBUS; 2264 goto out_nomap; 2265 } 2266 2267 /* The page isn't present yet, go ahead with the fault. */ 2268 2269 inc_mm_counter(mm, anon_rss); 2270 pte = mk_pte(page, vma->vm_page_prot); 2271 if (write_access && can_share_swap_page(page)) { 2272 pte = maybe_mkwrite(pte_mkdirty(pte), vma); 2273 write_access = 0; 2274 } 2275 2276 flush_icache_page(vma, page); 2277 set_pte_at(mm, address, page_table, pte); 2278 page_add_anon_rmap(page, vma, address); 2279 2280 swap_free(entry); 2281 if (vm_swap_full()) 2282 remove_exclusive_swap_page(page); 2283 unlock_page(page); 2284 2285 if (write_access) { 2286 ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); 2287 if (ret & VM_FAULT_ERROR) 2288 ret &= VM_FAULT_ERROR; 2289 goto out; 2290 } 2291 2292 /* No need to invalidate - it was non-present before */ 2293 update_mmu_cache(vma, address, pte); 2294 unlock: 2295 pte_unmap_unlock(page_table, ptl); 2296 out: 2297 return ret; 2298 out_nomap: 2299 mem_cgroup_uncharge_page(page); 2300 pte_unmap_unlock(page_table, ptl); 2301 unlock_page(page); 2302 page_cache_release(page); 2303 return ret; 2304 } 2305 2306 /* 2307 * We enter with non-exclusive mmap_sem (to exclude vma changes, 2308 * but allow concurrent faults), and pte mapped but not yet locked. 2309 * We return with mmap_sem still held, but pte unmapped and unlocked. 2310 */ 2311 static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, 2312 unsigned long address, pte_t *page_table, pmd_t *pmd, 2313 int write_access) 2314 { 2315 struct page *page; 2316 spinlock_t *ptl; 2317 pte_t entry; 2318 2319 /* Allocate our own private page. */ 2320 pte_unmap(page_table); 2321 2322 if (unlikely(anon_vma_prepare(vma))) 2323 goto oom; 2324 page = alloc_zeroed_user_highpage_movable(vma, address); 2325 if (!page) 2326 goto oom; 2327 __SetPageUptodate(page); 2328 2329 if (mem_cgroup_charge(page, mm, GFP_KERNEL)) 2330 goto oom_free_page; 2331 2332 entry = mk_pte(page, vma->vm_page_prot); 2333 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 2334 2335 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 2336 if (!pte_none(*page_table)) 2337 goto release; 2338 inc_mm_counter(mm, anon_rss); 2339 lru_cache_add_active(page); 2340 page_add_new_anon_rmap(page, vma, address); 2341 set_pte_at(mm, address, page_table, entry); 2342 2343 /* No need to invalidate - it was non-present before */ 2344 update_mmu_cache(vma, address, entry); 2345 unlock: 2346 pte_unmap_unlock(page_table, ptl); 2347 return 0; 2348 release: 2349 mem_cgroup_uncharge_page(page); 2350 page_cache_release(page); 2351 goto unlock; 2352 oom_free_page: 2353 page_cache_release(page); 2354 oom: 2355 return VM_FAULT_OOM; 2356 } 2357 2358 /* 2359 * __do_fault() tries to create a new page mapping. It aggressively 2360 * tries to share with existing pages, but makes a separate copy if 2361 * the FAULT_FLAG_WRITE is set in the flags parameter in order to avoid 2362 * the next page fault. 2363 * 2364 * As this is called only for pages that do not currently exist, we 2365 * do not need to flush old virtual caches or the TLB. 2366 * 2367 * We enter with non-exclusive mmap_sem (to exclude vma changes, 2368 * but allow concurrent faults), and pte neither mapped nor locked. 2369 * We return with mmap_sem still held, but pte unmapped and unlocked. 2370 */ 2371 static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, 2372 unsigned long address, pmd_t *pmd, 2373 pgoff_t pgoff, unsigned int flags, pte_t orig_pte) 2374 { 2375 pte_t *page_table; 2376 spinlock_t *ptl; 2377 struct page *page; 2378 pte_t entry; 2379 int anon = 0; 2380 struct page *dirty_page = NULL; 2381 struct vm_fault vmf; 2382 int ret; 2383 int page_mkwrite = 0; 2384 2385 vmf.virtual_address = (void __user *)(address & PAGE_MASK); 2386 vmf.pgoff = pgoff; 2387 vmf.flags = flags; 2388 vmf.page = NULL; 2389 2390 ret = vma->vm_ops->fault(vma, &vmf); 2391 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) 2392 return ret; 2393 2394 /* 2395 * For consistency in subsequent calls, make the faulted page always 2396 * locked. 2397 */ 2398 if (unlikely(!(ret & VM_FAULT_LOCKED))) 2399 lock_page(vmf.page); 2400 else 2401 VM_BUG_ON(!PageLocked(vmf.page)); 2402 2403 /* 2404 * Should we do an early C-O-W break? 2405 */ 2406 page = vmf.page; 2407 if (flags & FAULT_FLAG_WRITE) { 2408 if (!(vma->vm_flags & VM_SHARED)) { 2409 anon = 1; 2410 if (unlikely(anon_vma_prepare(vma))) { 2411 ret = VM_FAULT_OOM; 2412 goto out; 2413 } 2414 page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, 2415 vma, address); 2416 if (!page) { 2417 ret = VM_FAULT_OOM; 2418 goto out; 2419 } 2420 copy_user_highpage(page, vmf.page, address, vma); 2421 __SetPageUptodate(page); 2422 } else { 2423 /* 2424 * If the page will be shareable, see if the backing 2425 * address space wants to know that the page is about 2426 * to become writable 2427 */ 2428 if (vma->vm_ops->page_mkwrite) { 2429 unlock_page(page); 2430 if (vma->vm_ops->page_mkwrite(vma, page) < 0) { 2431 ret = VM_FAULT_SIGBUS; 2432 anon = 1; /* no anon but release vmf.page */ 2433 goto out_unlocked; 2434 } 2435 lock_page(page); 2436 /* 2437 * XXX: this is not quite right (racy vs 2438 * invalidate) to unlock and relock the page 2439 * like this, however a better fix requires 2440 * reworking page_mkwrite locking API, which 2441 * is better done later. 2442 */ 2443 if (!page->mapping) { 2444 ret = 0; 2445 anon = 1; /* no anon but release vmf.page */ 2446 goto out; 2447 } 2448 page_mkwrite = 1; 2449 } 2450 } 2451 2452 } 2453 2454 if (mem_cgroup_charge(page, mm, GFP_KERNEL)) { 2455 ret = VM_FAULT_OOM; 2456 goto out; 2457 } 2458 2459 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 2460 2461 /* 2462 * This silly early PAGE_DIRTY setting removes a race 2463 * due to the bad i386 page protection. But it's valid 2464 * for other architectures too. 2465 * 2466 * Note that if write_access is true, we either now have 2467 * an exclusive copy of the page, or this is a shared mapping, 2468 * so we can make it writable and dirty to avoid having to 2469 * handle that later. 2470 */ 2471 /* Only go through if we didn't race with anybody else... */ 2472 if (likely(pte_same(*page_table, orig_pte))) { 2473 flush_icache_page(vma, page); 2474 entry = mk_pte(page, vma->vm_page_prot); 2475 if (flags & FAULT_FLAG_WRITE) 2476 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 2477 set_pte_at(mm, address, page_table, entry); 2478 if (anon) { 2479 inc_mm_counter(mm, anon_rss); 2480 lru_cache_add_active(page); 2481 page_add_new_anon_rmap(page, vma, address); 2482 } else { 2483 inc_mm_counter(mm, file_rss); 2484 page_add_file_rmap(page); 2485 if (flags & FAULT_FLAG_WRITE) { 2486 dirty_page = page; 2487 get_page(dirty_page); 2488 } 2489 } 2490 2491 /* no need to invalidate: a not-present page won't be cached */ 2492 update_mmu_cache(vma, address, entry); 2493 } else { 2494 mem_cgroup_uncharge_page(page); 2495 if (anon) 2496 page_cache_release(page); 2497 else 2498 anon = 1; /* no anon but release faulted_page */ 2499 } 2500 2501 pte_unmap_unlock(page_table, ptl); 2502 2503 out: 2504 unlock_page(vmf.page); 2505 out_unlocked: 2506 if (anon) 2507 page_cache_release(vmf.page); 2508 else if (dirty_page) { 2509 if (vma->vm_file) 2510 file_update_time(vma->vm_file); 2511 2512 set_page_dirty_balance(dirty_page, page_mkwrite); 2513 put_page(dirty_page); 2514 } 2515 2516 return ret; 2517 } 2518 2519 static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma, 2520 unsigned long address, pte_t *page_table, pmd_t *pmd, 2521 int write_access, pte_t orig_pte) 2522 { 2523 pgoff_t pgoff = (((address & PAGE_MASK) 2524 - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 2525 unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0); 2526 2527 pte_unmap(page_table); 2528 return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); 2529 } 2530 2531 /* 2532 * Fault of a previously existing named mapping. Repopulate the pte 2533 * from the encoded file_pte if possible. This enables swappable 2534 * nonlinear vmas. 2535 * 2536 * We enter with non-exclusive mmap_sem (to exclude vma changes, 2537 * but allow concurrent faults), and pte mapped but not yet locked. 2538 * We return with mmap_sem still held, but pte unmapped and unlocked. 2539 */ 2540 static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma, 2541 unsigned long address, pte_t *page_table, pmd_t *pmd, 2542 int write_access, pte_t orig_pte) 2543 { 2544 unsigned int flags = FAULT_FLAG_NONLINEAR | 2545 (write_access ? FAULT_FLAG_WRITE : 0); 2546 pgoff_t pgoff; 2547 2548 if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) 2549 return 0; 2550 2551 if (unlikely(!(vma->vm_flags & VM_NONLINEAR) || 2552 !(vma->vm_flags & VM_CAN_NONLINEAR))) { 2553 /* 2554 * Page table corrupted: show pte and kill process. 2555 */ 2556 print_bad_pte(vma, orig_pte, address); 2557 return VM_FAULT_OOM; 2558 } 2559 2560 pgoff = pte_to_pgoff(orig_pte); 2561 return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); 2562 } 2563 2564 /* 2565 * These routines also need to handle stuff like marking pages dirty 2566 * and/or accessed for architectures that don't do it in hardware (most 2567 * RISC architectures). The early dirtying is also good on the i386. 2568 * 2569 * There is also a hook called "update_mmu_cache()" that architectures 2570 * with external mmu caches can use to update those (ie the Sparc or 2571 * PowerPC hashed page tables that act as extended TLBs). 2572 * 2573 * We enter with non-exclusive mmap_sem (to exclude vma changes, 2574 * but allow concurrent faults), and pte mapped but not yet locked. 2575 * We return with mmap_sem still held, but pte unmapped and unlocked. 2576 */ 2577 static inline int handle_pte_fault(struct mm_struct *mm, 2578 struct vm_area_struct *vma, unsigned long address, 2579 pte_t *pte, pmd_t *pmd, int write_access) 2580 { 2581 pte_t entry; 2582 spinlock_t *ptl; 2583 2584 entry = *pte; 2585 if (!pte_present(entry)) { 2586 if (pte_none(entry)) { 2587 if (vma->vm_ops) { 2588 if (likely(vma->vm_ops->fault)) 2589 return do_linear_fault(mm, vma, address, 2590 pte, pmd, write_access, entry); 2591 } 2592 return do_anonymous_page(mm, vma, address, 2593 pte, pmd, write_access); 2594 } 2595 if (pte_file(entry)) 2596 return do_nonlinear_fault(mm, vma, address, 2597 pte, pmd, write_access, entry); 2598 return do_swap_page(mm, vma, address, 2599 pte, pmd, write_access, entry); 2600 } 2601 2602 ptl = pte_lockptr(mm, pmd); 2603 spin_lock(ptl); 2604 if (unlikely(!pte_same(*pte, entry))) 2605 goto unlock; 2606 if (write_access) { 2607 if (!pte_write(entry)) 2608 return do_wp_page(mm, vma, address, 2609 pte, pmd, ptl, entry); 2610 entry = pte_mkdirty(entry); 2611 } 2612 entry = pte_mkyoung(entry); 2613 if (ptep_set_access_flags(vma, address, pte, entry, write_access)) { 2614 update_mmu_cache(vma, address, entry); 2615 } else { 2616 /* 2617 * This is needed only for protection faults but the arch code 2618 * is not yet telling us if this is a protection fault or not. 2619 * This still avoids useless tlb flushes for .text page faults 2620 * with threads. 2621 */ 2622 if (write_access) 2623 flush_tlb_page(vma, address); 2624 } 2625 unlock: 2626 pte_unmap_unlock(pte, ptl); 2627 return 0; 2628 } 2629 2630 /* 2631 * By the time we get here, we already hold the mm semaphore 2632 */ 2633 int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, 2634 unsigned long address, int write_access) 2635 { 2636 pgd_t *pgd; 2637 pud_t *pud; 2638 pmd_t *pmd; 2639 pte_t *pte; 2640 2641 __set_current_state(TASK_RUNNING); 2642 2643 count_vm_event(PGFAULT); 2644 2645 if (unlikely(is_vm_hugetlb_page(vma))) 2646 return hugetlb_fault(mm, vma, address, write_access); 2647 2648 pgd = pgd_offset(mm, address); 2649 pud = pud_alloc(mm, pgd, address); 2650 if (!pud) 2651 return VM_FAULT_OOM; 2652 pmd = pmd_alloc(mm, pud, address); 2653 if (!pmd) 2654 return VM_FAULT_OOM; 2655 pte = pte_alloc_map(mm, pmd, address); 2656 if (!pte) 2657 return VM_FAULT_OOM; 2658 2659 return handle_pte_fault(mm, vma, address, pte, pmd, write_access); 2660 } 2661 2662 #ifndef __PAGETABLE_PUD_FOLDED 2663 /* 2664 * Allocate page upper directory. 2665 * We've already handled the fast-path in-line. 2666 */ 2667 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) 2668 { 2669 pud_t *new = pud_alloc_one(mm, address); 2670 if (!new) 2671 return -ENOMEM; 2672 2673 smp_wmb(); /* See comment in __pte_alloc */ 2674 2675 spin_lock(&mm->page_table_lock); 2676 if (pgd_present(*pgd)) /* Another has populated it */ 2677 pud_free(mm, new); 2678 else 2679 pgd_populate(mm, pgd, new); 2680 spin_unlock(&mm->page_table_lock); 2681 return 0; 2682 } 2683 #endif /* __PAGETABLE_PUD_FOLDED */ 2684 2685 #ifndef __PAGETABLE_PMD_FOLDED 2686 /* 2687 * Allocate page middle directory. 2688 * We've already handled the fast-path in-line. 2689 */ 2690 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) 2691 { 2692 pmd_t *new = pmd_alloc_one(mm, address); 2693 if (!new) 2694 return -ENOMEM; 2695 2696 smp_wmb(); /* See comment in __pte_alloc */ 2697 2698 spin_lock(&mm->page_table_lock); 2699 #ifndef __ARCH_HAS_4LEVEL_HACK 2700 if (pud_present(*pud)) /* Another has populated it */ 2701 pmd_free(mm, new); 2702 else 2703 pud_populate(mm, pud, new); 2704 #else 2705 if (pgd_present(*pud)) /* Another has populated it */ 2706 pmd_free(mm, new); 2707 else 2708 pgd_populate(mm, pud, new); 2709 #endif /* __ARCH_HAS_4LEVEL_HACK */ 2710 spin_unlock(&mm->page_table_lock); 2711 return 0; 2712 } 2713 #endif /* __PAGETABLE_PMD_FOLDED */ 2714 2715 int make_pages_present(unsigned long addr, unsigned long end) 2716 { 2717 int ret, len, write; 2718 struct vm_area_struct * vma; 2719 2720 vma = find_vma(current->mm, addr); 2721 if (!vma) 2722 return -1; 2723 write = (vma->vm_flags & VM_WRITE) != 0; 2724 BUG_ON(addr >= end); 2725 BUG_ON(end > vma->vm_end); 2726 len = DIV_ROUND_UP(end, PAGE_SIZE) - addr/PAGE_SIZE; 2727 ret = get_user_pages(current, current->mm, addr, 2728 len, write, 0, NULL, NULL); 2729 if (ret < 0) 2730 return ret; 2731 return ret == len ? 0 : -1; 2732 } 2733 2734 #if !defined(__HAVE_ARCH_GATE_AREA) 2735 2736 #if defined(AT_SYSINFO_EHDR) 2737 static struct vm_area_struct gate_vma; 2738 2739 static int __init gate_vma_init(void) 2740 { 2741 gate_vma.vm_mm = NULL; 2742 gate_vma.vm_start = FIXADDR_USER_START; 2743 gate_vma.vm_end = FIXADDR_USER_END; 2744 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; 2745 gate_vma.vm_page_prot = __P101; 2746 /* 2747 * Make sure the vDSO gets into every core dump. 2748 * Dumping its contents makes post-mortem fully interpretable later 2749 * without matching up the same kernel and hardware config to see 2750 * what PC values meant. 2751 */ 2752 gate_vma.vm_flags |= VM_ALWAYSDUMP; 2753 return 0; 2754 } 2755 __initcall(gate_vma_init); 2756 #endif 2757 2758 struct vm_area_struct *get_gate_vma(struct task_struct *tsk) 2759 { 2760 #ifdef AT_SYSINFO_EHDR 2761 return &gate_vma; 2762 #else 2763 return NULL; 2764 #endif 2765 } 2766 2767 int in_gate_area_no_task(unsigned long addr) 2768 { 2769 #ifdef AT_SYSINFO_EHDR 2770 if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END)) 2771 return 1; 2772 #endif 2773 return 0; 2774 } 2775 2776 #endif /* __HAVE_ARCH_GATE_AREA */ 2777 2778 #ifdef CONFIG_HAVE_IOREMAP_PROT 2779 static resource_size_t follow_phys(struct vm_area_struct *vma, 2780 unsigned long address, unsigned int flags, 2781 unsigned long *prot) 2782 { 2783 pgd_t *pgd; 2784 pud_t *pud; 2785 pmd_t *pmd; 2786 pte_t *ptep, pte; 2787 spinlock_t *ptl; 2788 resource_size_t phys_addr = 0; 2789 struct mm_struct *mm = vma->vm_mm; 2790 2791 VM_BUG_ON(!(vma->vm_flags & (VM_IO | VM_PFNMAP))); 2792 2793 pgd = pgd_offset(mm, address); 2794 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 2795 goto no_page_table; 2796 2797 pud = pud_offset(pgd, address); 2798 if (pud_none(*pud) || unlikely(pud_bad(*pud))) 2799 goto no_page_table; 2800 2801 pmd = pmd_offset(pud, address); 2802 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) 2803 goto no_page_table; 2804 2805 /* We cannot handle huge page PFN maps. Luckily they don't exist. */ 2806 if (pmd_huge(*pmd)) 2807 goto no_page_table; 2808 2809 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 2810 if (!ptep) 2811 goto out; 2812 2813 pte = *ptep; 2814 if (!pte_present(pte)) 2815 goto unlock; 2816 if ((flags & FOLL_WRITE) && !pte_write(pte)) 2817 goto unlock; 2818 phys_addr = pte_pfn(pte); 2819 phys_addr <<= PAGE_SHIFT; /* Shift here to avoid overflow on PAE */ 2820 2821 *prot = pgprot_val(pte_pgprot(pte)); 2822 2823 unlock: 2824 pte_unmap_unlock(ptep, ptl); 2825 out: 2826 return phys_addr; 2827 no_page_table: 2828 return 0; 2829 } 2830 2831 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, 2832 void *buf, int len, int write) 2833 { 2834 resource_size_t phys_addr; 2835 unsigned long prot = 0; 2836 void *maddr; 2837 int offset = addr & (PAGE_SIZE-1); 2838 2839 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) 2840 return -EINVAL; 2841 2842 phys_addr = follow_phys(vma, addr, write, &prot); 2843 2844 if (!phys_addr) 2845 return -EINVAL; 2846 2847 maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot); 2848 if (write) 2849 memcpy_toio(maddr + offset, buf, len); 2850 else 2851 memcpy_fromio(buf, maddr + offset, len); 2852 iounmap(maddr); 2853 2854 return len; 2855 } 2856 #endif 2857 2858 /* 2859 * Access another process' address space. 2860 * Source/target buffer must be kernel space, 2861 * Do not walk the page table directly, use get_user_pages 2862 */ 2863 int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) 2864 { 2865 struct mm_struct *mm; 2866 struct vm_area_struct *vma; 2867 void *old_buf = buf; 2868 2869 mm = get_task_mm(tsk); 2870 if (!mm) 2871 return 0; 2872 2873 down_read(&mm->mmap_sem); 2874 /* ignore errors, just check how much was successfully transferred */ 2875 while (len) { 2876 int bytes, ret, offset; 2877 void *maddr; 2878 struct page *page = NULL; 2879 2880 ret = get_user_pages(tsk, mm, addr, 1, 2881 write, 1, &page, &vma); 2882 if (ret <= 0) { 2883 /* 2884 * Check if this is a VM_IO | VM_PFNMAP VMA, which 2885 * we can access using slightly different code. 2886 */ 2887 #ifdef CONFIG_HAVE_IOREMAP_PROT 2888 vma = find_vma(mm, addr); 2889 if (!vma) 2890 break; 2891 if (vma->vm_ops && vma->vm_ops->access) 2892 ret = vma->vm_ops->access(vma, addr, buf, 2893 len, write); 2894 if (ret <= 0) 2895 #endif 2896 break; 2897 bytes = ret; 2898 } else { 2899 bytes = len; 2900 offset = addr & (PAGE_SIZE-1); 2901 if (bytes > PAGE_SIZE-offset) 2902 bytes = PAGE_SIZE-offset; 2903 2904 maddr = kmap(page); 2905 if (write) { 2906 copy_to_user_page(vma, page, addr, 2907 maddr + offset, buf, bytes); 2908 set_page_dirty_lock(page); 2909 } else { 2910 copy_from_user_page(vma, page, addr, 2911 buf, maddr + offset, bytes); 2912 } 2913 kunmap(page); 2914 page_cache_release(page); 2915 } 2916 len -= bytes; 2917 buf += bytes; 2918 addr += bytes; 2919 } 2920 up_read(&mm->mmap_sem); 2921 mmput(mm); 2922 2923 return buf - old_buf; 2924 } 2925 2926 /* 2927 * Print the name of a VMA. 2928 */ 2929 void print_vma_addr(char *prefix, unsigned long ip) 2930 { 2931 struct mm_struct *mm = current->mm; 2932 struct vm_area_struct *vma; 2933 2934 /* 2935 * Do not print if we are in atomic 2936 * contexts (in exception stacks, etc.): 2937 */ 2938 if (preempt_count()) 2939 return; 2940 2941 down_read(&mm->mmap_sem); 2942 vma = find_vma(mm, ip); 2943 if (vma && vma->vm_file) { 2944 struct file *f = vma->vm_file; 2945 char *buf = (char *)__get_free_page(GFP_KERNEL); 2946 if (buf) { 2947 char *p, *s; 2948 2949 p = d_path(&f->f_path, buf, PAGE_SIZE); 2950 if (IS_ERR(p)) 2951 p = "?"; 2952 s = strrchr(p, '/'); 2953 if (s) 2954 p = s+1; 2955 printk("%s%s[%lx+%lx]", prefix, p, 2956 vma->vm_start, 2957 vma->vm_end - vma->vm_start); 2958 free_page((unsigned long)buf); 2959 } 2960 } 2961 up_read(¤t->mm->mmap_sem); 2962 } 2963