1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/mm/memory.c 4 * 5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 6 */ 7 8 /* 9 * demand-loading started 01.12.91 - seems it is high on the list of 10 * things wanted, and it should be easy to implement. - Linus 11 */ 12 13 /* 14 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared 15 * pages started 02.12.91, seems to work. - Linus. 16 * 17 * Tested sharing by executing about 30 /bin/sh: under the old kernel it 18 * would have taken more than the 6M I have free, but it worked well as 19 * far as I could see. 20 * 21 * Also corrected some "invalidate()"s - I wasn't doing enough of them. 22 */ 23 24 /* 25 * Real VM (paging to/from disk) started 18.12.91. Much more work and 26 * thought has to go into this. Oh, well.. 27 * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why. 28 * Found it. Everything seems to work now. 29 * 20.12.91 - Ok, making the swap-device changeable like the root. 30 */ 31 32 /* 33 * 05.04.94 - Multi-page memory management added for v1.1. 34 * Idea by Alex Bligh (alex@cconcepts.co.uk) 35 * 36 * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG 37 * (Gerhard.Wichert@pdb.siemens.de) 38 * 39 * Aug/Sep 2004 Changed to four level page tables (Andi Kleen) 40 */ 41 42 #include <linux/kernel_stat.h> 43 #include <linux/mm.h> 44 #include <linux/mm_inline.h> 45 #include <linux/sched/mm.h> 46 #include <linux/sched/coredump.h> 47 #include <linux/sched/numa_balancing.h> 48 #include <linux/sched/task.h> 49 #include <linux/hugetlb.h> 50 #include <linux/mman.h> 51 #include <linux/swap.h> 52 #include <linux/highmem.h> 53 #include <linux/pagemap.h> 54 #include <linux/memremap.h> 55 #include <linux/ksm.h> 56 #include <linux/rmap.h> 57 #include <linux/export.h> 58 #include <linux/delayacct.h> 59 #include <linux/init.h> 60 #include <linux/pfn_t.h> 61 #include <linux/writeback.h> 62 #include <linux/memcontrol.h> 63 #include <linux/mmu_notifier.h> 64 #include <linux/swapops.h> 65 #include <linux/elf.h> 66 #include <linux/gfp.h> 67 #include <linux/migrate.h> 68 #include <linux/string.h> 69 #include <linux/memory-tiers.h> 70 #include <linux/debugfs.h> 71 #include <linux/userfaultfd_k.h> 72 #include <linux/dax.h> 73 #include <linux/oom.h> 74 #include <linux/numa.h> 75 #include <linux/perf_event.h> 76 #include <linux/ptrace.h> 77 #include <linux/vmalloc.h> 78 #include <linux/sched/sysctl.h> 79 80 #include <trace/events/kmem.h> 81 82 #include <asm/io.h> 83 #include <asm/mmu_context.h> 84 #include <asm/pgalloc.h> 85 #include <linux/uaccess.h> 86 #include <asm/tlb.h> 87 #include <asm/tlbflush.h> 88 89 #include "pgalloc-track.h" 90 #include "internal.h" 91 #include "swap.h" 92 93 #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST) 94 #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid. 95 #endif 96 97 #ifndef CONFIG_NUMA 98 unsigned long max_mapnr; 99 EXPORT_SYMBOL(max_mapnr); 100 101 struct page *mem_map; 102 EXPORT_SYMBOL(mem_map); 103 #endif 104 105 static vm_fault_t do_fault(struct vm_fault *vmf); 106 107 /* 108 * A number of key systems in x86 including ioremap() rely on the assumption 109 * that high_memory defines the upper bound on direct map memory, then end 110 * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and 111 * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL 112 * and ZONE_HIGHMEM. 113 */ 114 void *high_memory; 115 EXPORT_SYMBOL(high_memory); 116 117 /* 118 * Randomize the address space (stacks, mmaps, brk, etc.). 119 * 120 * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization, 121 * as ancient (libc5 based) binaries can segfault. ) 122 */ 123 int randomize_va_space __read_mostly = 124 #ifdef CONFIG_COMPAT_BRK 125 1; 126 #else 127 2; 128 #endif 129 130 #ifndef arch_wants_old_prefaulted_pte 131 static inline bool arch_wants_old_prefaulted_pte(void) 132 { 133 /* 134 * Transitioning a PTE from 'old' to 'young' can be expensive on 135 * some architectures, even if it's performed in hardware. By 136 * default, "false" means prefaulted entries will be 'young'. 137 */ 138 return false; 139 } 140 #endif 141 142 static int __init disable_randmaps(char *s) 143 { 144 randomize_va_space = 0; 145 return 1; 146 } 147 __setup("norandmaps", disable_randmaps); 148 149 unsigned long zero_pfn __read_mostly; 150 EXPORT_SYMBOL(zero_pfn); 151 152 unsigned long highest_memmap_pfn __read_mostly; 153 154 /* 155 * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init() 156 */ 157 static int __init init_zero_pfn(void) 158 { 159 zero_pfn = page_to_pfn(ZERO_PAGE(0)); 160 return 0; 161 } 162 early_initcall(init_zero_pfn); 163 164 void mm_trace_rss_stat(struct mm_struct *mm, int member, long count) 165 { 166 trace_rss_stat(mm, member, count); 167 } 168 169 #if defined(SPLIT_RSS_COUNTING) 170 171 void sync_mm_rss(struct mm_struct *mm) 172 { 173 int i; 174 175 for (i = 0; i < NR_MM_COUNTERS; i++) { 176 if (current->rss_stat.count[i]) { 177 add_mm_counter(mm, i, current->rss_stat.count[i]); 178 current->rss_stat.count[i] = 0; 179 } 180 } 181 current->rss_stat.events = 0; 182 } 183 184 static void add_mm_counter_fast(struct mm_struct *mm, int member, int val) 185 { 186 struct task_struct *task = current; 187 188 if (likely(task->mm == mm)) 189 task->rss_stat.count[member] += val; 190 else 191 add_mm_counter(mm, member, val); 192 } 193 #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1) 194 #define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1) 195 196 /* sync counter once per 64 page faults */ 197 #define TASK_RSS_EVENTS_THRESH (64) 198 static void check_sync_rss_stat(struct task_struct *task) 199 { 200 if (unlikely(task != current)) 201 return; 202 if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH)) 203 sync_mm_rss(task->mm); 204 } 205 #else /* SPLIT_RSS_COUNTING */ 206 207 #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member) 208 #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member) 209 210 static void check_sync_rss_stat(struct task_struct *task) 211 { 212 } 213 214 #endif /* SPLIT_RSS_COUNTING */ 215 216 /* 217 * Note: this doesn't free the actual pages themselves. That 218 * has been handled earlier when unmapping all the memory regions. 219 */ 220 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, 221 unsigned long addr) 222 { 223 pgtable_t token = pmd_pgtable(*pmd); 224 pmd_clear(pmd); 225 pte_free_tlb(tlb, token, addr); 226 mm_dec_nr_ptes(tlb->mm); 227 } 228 229 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, 230 unsigned long addr, unsigned long end, 231 unsigned long floor, unsigned long ceiling) 232 { 233 pmd_t *pmd; 234 unsigned long next; 235 unsigned long start; 236 237 start = addr; 238 pmd = pmd_offset(pud, addr); 239 do { 240 next = pmd_addr_end(addr, end); 241 if (pmd_none_or_clear_bad(pmd)) 242 continue; 243 free_pte_range(tlb, pmd, addr); 244 } while (pmd++, addr = next, addr != end); 245 246 start &= PUD_MASK; 247 if (start < floor) 248 return; 249 if (ceiling) { 250 ceiling &= PUD_MASK; 251 if (!ceiling) 252 return; 253 } 254 if (end - 1 > ceiling - 1) 255 return; 256 257 pmd = pmd_offset(pud, start); 258 pud_clear(pud); 259 pmd_free_tlb(tlb, pmd, start); 260 mm_dec_nr_pmds(tlb->mm); 261 } 262 263 static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d, 264 unsigned long addr, unsigned long end, 265 unsigned long floor, unsigned long ceiling) 266 { 267 pud_t *pud; 268 unsigned long next; 269 unsigned long start; 270 271 start = addr; 272 pud = pud_offset(p4d, addr); 273 do { 274 next = pud_addr_end(addr, end); 275 if (pud_none_or_clear_bad(pud)) 276 continue; 277 free_pmd_range(tlb, pud, addr, next, floor, ceiling); 278 } while (pud++, addr = next, addr != end); 279 280 start &= P4D_MASK; 281 if (start < floor) 282 return; 283 if (ceiling) { 284 ceiling &= P4D_MASK; 285 if (!ceiling) 286 return; 287 } 288 if (end - 1 > ceiling - 1) 289 return; 290 291 pud = pud_offset(p4d, start); 292 p4d_clear(p4d); 293 pud_free_tlb(tlb, pud, start); 294 mm_dec_nr_puds(tlb->mm); 295 } 296 297 static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd, 298 unsigned long addr, unsigned long end, 299 unsigned long floor, unsigned long ceiling) 300 { 301 p4d_t *p4d; 302 unsigned long next; 303 unsigned long start; 304 305 start = addr; 306 p4d = p4d_offset(pgd, addr); 307 do { 308 next = p4d_addr_end(addr, end); 309 if (p4d_none_or_clear_bad(p4d)) 310 continue; 311 free_pud_range(tlb, p4d, addr, next, floor, ceiling); 312 } while (p4d++, addr = next, addr != end); 313 314 start &= PGDIR_MASK; 315 if (start < floor) 316 return; 317 if (ceiling) { 318 ceiling &= PGDIR_MASK; 319 if (!ceiling) 320 return; 321 } 322 if (end - 1 > ceiling - 1) 323 return; 324 325 p4d = p4d_offset(pgd, start); 326 pgd_clear(pgd); 327 p4d_free_tlb(tlb, p4d, start); 328 } 329 330 /* 331 * This function frees user-level page tables of a process. 332 */ 333 void free_pgd_range(struct mmu_gather *tlb, 334 unsigned long addr, unsigned long end, 335 unsigned long floor, unsigned long ceiling) 336 { 337 pgd_t *pgd; 338 unsigned long next; 339 340 /* 341 * The next few lines have given us lots of grief... 342 * 343 * Why are we testing PMD* at this top level? Because often 344 * there will be no work to do at all, and we'd prefer not to 345 * go all the way down to the bottom just to discover that. 346 * 347 * Why all these "- 1"s? Because 0 represents both the bottom 348 * of the address space and the top of it (using -1 for the 349 * top wouldn't help much: the masks would do the wrong thing). 350 * The rule is that addr 0 and floor 0 refer to the bottom of 351 * the address space, but end 0 and ceiling 0 refer to the top 352 * Comparisons need to use "end - 1" and "ceiling - 1" (though 353 * that end 0 case should be mythical). 354 * 355 * Wherever addr is brought up or ceiling brought down, we must 356 * be careful to reject "the opposite 0" before it confuses the 357 * subsequent tests. But what about where end is brought down 358 * by PMD_SIZE below? no, end can't go down to 0 there. 359 * 360 * Whereas we round start (addr) and ceiling down, by different 361 * masks at different levels, in order to test whether a table 362 * now has no other vmas using it, so can be freed, we don't 363 * bother to round floor or end up - the tests don't need that. 364 */ 365 366 addr &= PMD_MASK; 367 if (addr < floor) { 368 addr += PMD_SIZE; 369 if (!addr) 370 return; 371 } 372 if (ceiling) { 373 ceiling &= PMD_MASK; 374 if (!ceiling) 375 return; 376 } 377 if (end - 1 > ceiling - 1) 378 end -= PMD_SIZE; 379 if (addr > end - 1) 380 return; 381 /* 382 * We add page table cache pages with PAGE_SIZE, 383 * (see pte_free_tlb()), flush the tlb if we need 384 */ 385 tlb_change_page_size(tlb, PAGE_SIZE); 386 pgd = pgd_offset(tlb->mm, addr); 387 do { 388 next = pgd_addr_end(addr, end); 389 if (pgd_none_or_clear_bad(pgd)) 390 continue; 391 free_p4d_range(tlb, pgd, addr, next, floor, ceiling); 392 } while (pgd++, addr = next, addr != end); 393 } 394 395 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, 396 unsigned long floor, unsigned long ceiling) 397 { 398 while (vma) { 399 struct vm_area_struct *next = vma->vm_next; 400 unsigned long addr = vma->vm_start; 401 402 /* 403 * Hide vma from rmap and truncate_pagecache before freeing 404 * pgtables 405 */ 406 unlink_anon_vmas(vma); 407 unlink_file_vma(vma); 408 409 if (is_vm_hugetlb_page(vma)) { 410 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, 411 floor, next ? next->vm_start : ceiling); 412 } else { 413 /* 414 * Optimization: gather nearby vmas into one call down 415 */ 416 while (next && next->vm_start <= vma->vm_end + PMD_SIZE 417 && !is_vm_hugetlb_page(next)) { 418 vma = next; 419 next = vma->vm_next; 420 unlink_anon_vmas(vma); 421 unlink_file_vma(vma); 422 } 423 free_pgd_range(tlb, addr, vma->vm_end, 424 floor, next ? next->vm_start : ceiling); 425 } 426 vma = next; 427 } 428 } 429 430 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte) 431 { 432 spinlock_t *ptl = pmd_lock(mm, pmd); 433 434 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ 435 mm_inc_nr_ptes(mm); 436 /* 437 * Ensure all pte setup (eg. pte page lock and page clearing) are 438 * visible before the pte is made visible to other CPUs by being 439 * put into page tables. 440 * 441 * The other side of the story is the pointer chasing in the page 442 * table walking code (when walking the page table without locking; 443 * ie. most of the time). Fortunately, these data accesses consist 444 * of a chain of data-dependent loads, meaning most CPUs (alpha 445 * being the notable exception) will already guarantee loads are 446 * seen in-order. See the alpha page table accessors for the 447 * smp_rmb() barriers in page table walking code. 448 */ 449 smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */ 450 pmd_populate(mm, pmd, *pte); 451 *pte = NULL; 452 } 453 spin_unlock(ptl); 454 } 455 456 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd) 457 { 458 pgtable_t new = pte_alloc_one(mm); 459 if (!new) 460 return -ENOMEM; 461 462 pmd_install(mm, pmd, &new); 463 if (new) 464 pte_free(mm, new); 465 return 0; 466 } 467 468 int __pte_alloc_kernel(pmd_t *pmd) 469 { 470 pte_t *new = pte_alloc_one_kernel(&init_mm); 471 if (!new) 472 return -ENOMEM; 473 474 spin_lock(&init_mm.page_table_lock); 475 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ 476 smp_wmb(); /* See comment in pmd_install() */ 477 pmd_populate_kernel(&init_mm, pmd, new); 478 new = NULL; 479 } 480 spin_unlock(&init_mm.page_table_lock); 481 if (new) 482 pte_free_kernel(&init_mm, new); 483 return 0; 484 } 485 486 static inline void init_rss_vec(int *rss) 487 { 488 memset(rss, 0, sizeof(int) * NR_MM_COUNTERS); 489 } 490 491 static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss) 492 { 493 int i; 494 495 if (current->mm == mm) 496 sync_mm_rss(mm); 497 for (i = 0; i < NR_MM_COUNTERS; i++) 498 if (rss[i]) 499 add_mm_counter(mm, i, rss[i]); 500 } 501 502 /* 503 * This function is called to print an error when a bad pte 504 * is found. For example, we might have a PFN-mapped pte in 505 * a region that doesn't allow it. 506 * 507 * The calling function must still handle the error. 508 */ 509 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, 510 pte_t pte, struct page *page) 511 { 512 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); 513 p4d_t *p4d = p4d_offset(pgd, addr); 514 pud_t *pud = pud_offset(p4d, addr); 515 pmd_t *pmd = pmd_offset(pud, addr); 516 struct address_space *mapping; 517 pgoff_t index; 518 static unsigned long resume; 519 static unsigned long nr_shown; 520 static unsigned long nr_unshown; 521 522 /* 523 * Allow a burst of 60 reports, then keep quiet for that minute; 524 * or allow a steady drip of one report per second. 525 */ 526 if (nr_shown == 60) { 527 if (time_before(jiffies, resume)) { 528 nr_unshown++; 529 return; 530 } 531 if (nr_unshown) { 532 pr_alert("BUG: Bad page map: %lu messages suppressed\n", 533 nr_unshown); 534 nr_unshown = 0; 535 } 536 nr_shown = 0; 537 } 538 if (nr_shown++ == 0) 539 resume = jiffies + 60 * HZ; 540 541 mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; 542 index = linear_page_index(vma, addr); 543 544 pr_alert("BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n", 545 current->comm, 546 (long long)pte_val(pte), (long long)pmd_val(*pmd)); 547 if (page) 548 dump_page(page, "bad pte"); 549 pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n", 550 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); 551 pr_alert("file:%pD fault:%ps mmap:%ps read_folio:%ps\n", 552 vma->vm_file, 553 vma->vm_ops ? vma->vm_ops->fault : NULL, 554 vma->vm_file ? vma->vm_file->f_op->mmap : NULL, 555 mapping ? mapping->a_ops->read_folio : NULL); 556 dump_stack(); 557 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 558 } 559 560 /* 561 * vm_normal_page -- This function gets the "struct page" associated with a pte. 562 * 563 * "Special" mappings do not wish to be associated with a "struct page" (either 564 * it doesn't exist, or it exists but they don't want to touch it). In this 565 * case, NULL is returned here. "Normal" mappings do have a struct page. 566 * 567 * There are 2 broad cases. Firstly, an architecture may define a pte_special() 568 * pte bit, in which case this function is trivial. Secondly, an architecture 569 * may not have a spare pte bit, which requires a more complicated scheme, 570 * described below. 571 * 572 * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a 573 * special mapping (even if there are underlying and valid "struct pages"). 574 * COWed pages of a VM_PFNMAP are always normal. 575 * 576 * The way we recognize COWed pages within VM_PFNMAP mappings is through the 577 * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit 578 * set, and the vm_pgoff will point to the first PFN mapped: thus every special 579 * mapping will always honor the rule 580 * 581 * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT) 582 * 583 * And for normal mappings this is false. 584 * 585 * This restricts such mappings to be a linear translation from virtual address 586 * to pfn. To get around this restriction, we allow arbitrary mappings so long 587 * as the vma is not a COW mapping; in that case, we know that all ptes are 588 * special (because none can have been COWed). 589 * 590 * 591 * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP. 592 * 593 * VM_MIXEDMAP mappings can likewise contain memory with or without "struct 594 * page" backing, however the difference is that _all_ pages with a struct 595 * page (that is, those where pfn_valid is true) are refcounted and considered 596 * normal pages by the VM. The disadvantage is that pages are refcounted 597 * (which can be slower and simply not an option for some PFNMAP users). The 598 * advantage is that we don't have to follow the strict linearity rule of 599 * PFNMAP mappings in order to support COWable mappings. 600 * 601 */ 602 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, 603 pte_t pte) 604 { 605 unsigned long pfn = pte_pfn(pte); 606 607 if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) { 608 if (likely(!pte_special(pte))) 609 goto check_pfn; 610 if (vma->vm_ops && vma->vm_ops->find_special_page) 611 return vma->vm_ops->find_special_page(vma, addr); 612 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) 613 return NULL; 614 if (is_zero_pfn(pfn)) 615 return NULL; 616 if (pte_devmap(pte)) 617 /* 618 * NOTE: New users of ZONE_DEVICE will not set pte_devmap() 619 * and will have refcounts incremented on their struct pages 620 * when they are inserted into PTEs, thus they are safe to 621 * return here. Legacy ZONE_DEVICE pages that set pte_devmap() 622 * do not have refcounts. Example of legacy ZONE_DEVICE is 623 * MEMORY_DEVICE_FS_DAX type in pmem or virtio_fs drivers. 624 */ 625 return NULL; 626 627 print_bad_pte(vma, addr, pte, NULL); 628 return NULL; 629 } 630 631 /* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */ 632 633 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { 634 if (vma->vm_flags & VM_MIXEDMAP) { 635 if (!pfn_valid(pfn)) 636 return NULL; 637 goto out; 638 } else { 639 unsigned long off; 640 off = (addr - vma->vm_start) >> PAGE_SHIFT; 641 if (pfn == vma->vm_pgoff + off) 642 return NULL; 643 if (!is_cow_mapping(vma->vm_flags)) 644 return NULL; 645 } 646 } 647 648 if (is_zero_pfn(pfn)) 649 return NULL; 650 651 check_pfn: 652 if (unlikely(pfn > highest_memmap_pfn)) { 653 print_bad_pte(vma, addr, pte, NULL); 654 return NULL; 655 } 656 657 /* 658 * NOTE! We still have PageReserved() pages in the page tables. 659 * eg. VDSO mappings can cause them to exist. 660 */ 661 out: 662 return pfn_to_page(pfn); 663 } 664 665 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 666 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, 667 pmd_t pmd) 668 { 669 unsigned long pfn = pmd_pfn(pmd); 670 671 /* 672 * There is no pmd_special() but there may be special pmds, e.g. 673 * in a direct-access (dax) mapping, so let's just replicate the 674 * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here. 675 */ 676 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { 677 if (vma->vm_flags & VM_MIXEDMAP) { 678 if (!pfn_valid(pfn)) 679 return NULL; 680 goto out; 681 } else { 682 unsigned long off; 683 off = (addr - vma->vm_start) >> PAGE_SHIFT; 684 if (pfn == vma->vm_pgoff + off) 685 return NULL; 686 if (!is_cow_mapping(vma->vm_flags)) 687 return NULL; 688 } 689 } 690 691 if (pmd_devmap(pmd)) 692 return NULL; 693 if (is_huge_zero_pmd(pmd)) 694 return NULL; 695 if (unlikely(pfn > highest_memmap_pfn)) 696 return NULL; 697 698 /* 699 * NOTE! We still have PageReserved() pages in the page tables. 700 * eg. VDSO mappings can cause them to exist. 701 */ 702 out: 703 return pfn_to_page(pfn); 704 } 705 #endif 706 707 static void restore_exclusive_pte(struct vm_area_struct *vma, 708 struct page *page, unsigned long address, 709 pte_t *ptep) 710 { 711 pte_t pte; 712 swp_entry_t entry; 713 714 pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot))); 715 if (pte_swp_soft_dirty(*ptep)) 716 pte = pte_mksoft_dirty(pte); 717 718 entry = pte_to_swp_entry(*ptep); 719 if (pte_swp_uffd_wp(*ptep)) 720 pte = pte_mkuffd_wp(pte); 721 else if (is_writable_device_exclusive_entry(entry)) 722 pte = maybe_mkwrite(pte_mkdirty(pte), vma); 723 724 VM_BUG_ON(pte_write(pte) && !(PageAnon(page) && PageAnonExclusive(page))); 725 726 /* 727 * No need to take a page reference as one was already 728 * created when the swap entry was made. 729 */ 730 if (PageAnon(page)) 731 page_add_anon_rmap(page, vma, address, RMAP_NONE); 732 else 733 /* 734 * Currently device exclusive access only supports anonymous 735 * memory so the entry shouldn't point to a filebacked page. 736 */ 737 WARN_ON_ONCE(1); 738 739 set_pte_at(vma->vm_mm, address, ptep, pte); 740 741 /* 742 * No need to invalidate - it was non-present before. However 743 * secondary CPUs may have mappings that need invalidating. 744 */ 745 update_mmu_cache(vma, address, ptep); 746 } 747 748 /* 749 * Tries to restore an exclusive pte if the page lock can be acquired without 750 * sleeping. 751 */ 752 static int 753 try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma, 754 unsigned long addr) 755 { 756 swp_entry_t entry = pte_to_swp_entry(*src_pte); 757 struct page *page = pfn_swap_entry_to_page(entry); 758 759 if (trylock_page(page)) { 760 restore_exclusive_pte(vma, page, addr, src_pte); 761 unlock_page(page); 762 return 0; 763 } 764 765 return -EBUSY; 766 } 767 768 /* 769 * copy one vm_area from one task to the other. Assumes the page tables 770 * already present in the new task to be cleared in the whole range 771 * covered by this vma. 772 */ 773 774 static unsigned long 775 copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, 776 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma, 777 struct vm_area_struct *src_vma, unsigned long addr, int *rss) 778 { 779 unsigned long vm_flags = dst_vma->vm_flags; 780 pte_t pte = *src_pte; 781 struct page *page; 782 swp_entry_t entry = pte_to_swp_entry(pte); 783 784 if (likely(!non_swap_entry(entry))) { 785 if (swap_duplicate(entry) < 0) 786 return -EIO; 787 788 /* make sure dst_mm is on swapoff's mmlist. */ 789 if (unlikely(list_empty(&dst_mm->mmlist))) { 790 spin_lock(&mmlist_lock); 791 if (list_empty(&dst_mm->mmlist)) 792 list_add(&dst_mm->mmlist, 793 &src_mm->mmlist); 794 spin_unlock(&mmlist_lock); 795 } 796 /* Mark the swap entry as shared. */ 797 if (pte_swp_exclusive(*src_pte)) { 798 pte = pte_swp_clear_exclusive(*src_pte); 799 set_pte_at(src_mm, addr, src_pte, pte); 800 } 801 rss[MM_SWAPENTS]++; 802 } else if (is_migration_entry(entry)) { 803 page = pfn_swap_entry_to_page(entry); 804 805 rss[mm_counter(page)]++; 806 807 if (!is_readable_migration_entry(entry) && 808 is_cow_mapping(vm_flags)) { 809 /* 810 * COW mappings require pages in both parent and child 811 * to be set to read. A previously exclusive entry is 812 * now shared. 813 */ 814 entry = make_readable_migration_entry( 815 swp_offset(entry)); 816 pte = swp_entry_to_pte(entry); 817 if (pte_swp_soft_dirty(*src_pte)) 818 pte = pte_swp_mksoft_dirty(pte); 819 if (pte_swp_uffd_wp(*src_pte)) 820 pte = pte_swp_mkuffd_wp(pte); 821 set_pte_at(src_mm, addr, src_pte, pte); 822 } 823 } else if (is_device_private_entry(entry)) { 824 page = pfn_swap_entry_to_page(entry); 825 826 /* 827 * Update rss count even for unaddressable pages, as 828 * they should treated just like normal pages in this 829 * respect. 830 * 831 * We will likely want to have some new rss counters 832 * for unaddressable pages, at some point. But for now 833 * keep things as they are. 834 */ 835 get_page(page); 836 rss[mm_counter(page)]++; 837 /* Cannot fail as these pages cannot get pinned. */ 838 BUG_ON(page_try_dup_anon_rmap(page, false, src_vma)); 839 840 /* 841 * We do not preserve soft-dirty information, because so 842 * far, checkpoint/restore is the only feature that 843 * requires that. And checkpoint/restore does not work 844 * when a device driver is involved (you cannot easily 845 * save and restore device driver state). 846 */ 847 if (is_writable_device_private_entry(entry) && 848 is_cow_mapping(vm_flags)) { 849 entry = make_readable_device_private_entry( 850 swp_offset(entry)); 851 pte = swp_entry_to_pte(entry); 852 if (pte_swp_uffd_wp(*src_pte)) 853 pte = pte_swp_mkuffd_wp(pte); 854 set_pte_at(src_mm, addr, src_pte, pte); 855 } 856 } else if (is_device_exclusive_entry(entry)) { 857 /* 858 * Make device exclusive entries present by restoring the 859 * original entry then copying as for a present pte. Device 860 * exclusive entries currently only support private writable 861 * (ie. COW) mappings. 862 */ 863 VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags)); 864 if (try_restore_exclusive_pte(src_pte, src_vma, addr)) 865 return -EBUSY; 866 return -ENOENT; 867 } else if (is_pte_marker_entry(entry)) { 868 /* 869 * We're copying the pgtable should only because dst_vma has 870 * uffd-wp enabled, do sanity check. 871 */ 872 WARN_ON_ONCE(!userfaultfd_wp(dst_vma)); 873 set_pte_at(dst_mm, addr, dst_pte, pte); 874 return 0; 875 } 876 if (!userfaultfd_wp(dst_vma)) 877 pte = pte_swp_clear_uffd_wp(pte); 878 set_pte_at(dst_mm, addr, dst_pte, pte); 879 return 0; 880 } 881 882 /* 883 * Copy a present and normal page. 884 * 885 * NOTE! The usual case is that this isn't required; 886 * instead, the caller can just increase the page refcount 887 * and re-use the pte the traditional way. 888 * 889 * And if we need a pre-allocated page but don't yet have 890 * one, return a negative error to let the preallocation 891 * code know so that it can do so outside the page table 892 * lock. 893 */ 894 static inline int 895 copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 896 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss, 897 struct page **prealloc, struct page *page) 898 { 899 struct page *new_page; 900 pte_t pte; 901 902 new_page = *prealloc; 903 if (!new_page) 904 return -EAGAIN; 905 906 /* 907 * We have a prealloc page, all good! Take it 908 * over and copy the page & arm it. 909 */ 910 *prealloc = NULL; 911 copy_user_highpage(new_page, page, addr, src_vma); 912 __SetPageUptodate(new_page); 913 page_add_new_anon_rmap(new_page, dst_vma, addr); 914 lru_cache_add_inactive_or_unevictable(new_page, dst_vma); 915 rss[mm_counter(new_page)]++; 916 917 /* All done, just insert the new page copy in the child */ 918 pte = mk_pte(new_page, dst_vma->vm_page_prot); 919 pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma); 920 if (userfaultfd_pte_wp(dst_vma, *src_pte)) 921 /* Uffd-wp needs to be delivered to dest pte as well */ 922 pte = pte_wrprotect(pte_mkuffd_wp(pte)); 923 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); 924 return 0; 925 } 926 927 /* 928 * Copy one pte. Returns 0 if succeeded, or -EAGAIN if one preallocated page 929 * is required to copy this pte. 930 */ 931 static inline int 932 copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 933 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss, 934 struct page **prealloc) 935 { 936 struct mm_struct *src_mm = src_vma->vm_mm; 937 unsigned long vm_flags = src_vma->vm_flags; 938 pte_t pte = *src_pte; 939 struct page *page; 940 941 page = vm_normal_page(src_vma, addr, pte); 942 if (page && PageAnon(page)) { 943 /* 944 * If this page may have been pinned by the parent process, 945 * copy the page immediately for the child so that we'll always 946 * guarantee the pinned page won't be randomly replaced in the 947 * future. 948 */ 949 get_page(page); 950 if (unlikely(page_try_dup_anon_rmap(page, false, src_vma))) { 951 /* Page maybe pinned, we have to copy. */ 952 put_page(page); 953 return copy_present_page(dst_vma, src_vma, dst_pte, src_pte, 954 addr, rss, prealloc, page); 955 } 956 rss[mm_counter(page)]++; 957 } else if (page) { 958 get_page(page); 959 page_dup_file_rmap(page, false); 960 rss[mm_counter(page)]++; 961 } 962 963 /* 964 * If it's a COW mapping, write protect it both 965 * in the parent and the child 966 */ 967 if (is_cow_mapping(vm_flags) && pte_write(pte)) { 968 ptep_set_wrprotect(src_mm, addr, src_pte); 969 pte = pte_wrprotect(pte); 970 } 971 VM_BUG_ON(page && PageAnon(page) && PageAnonExclusive(page)); 972 973 /* 974 * If it's a shared mapping, mark it clean in 975 * the child 976 */ 977 if (vm_flags & VM_SHARED) 978 pte = pte_mkclean(pte); 979 pte = pte_mkold(pte); 980 981 if (!userfaultfd_wp(dst_vma)) 982 pte = pte_clear_uffd_wp(pte); 983 984 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); 985 return 0; 986 } 987 988 static inline struct page * 989 page_copy_prealloc(struct mm_struct *src_mm, struct vm_area_struct *vma, 990 unsigned long addr) 991 { 992 struct page *new_page; 993 994 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, addr); 995 if (!new_page) 996 return NULL; 997 998 if (mem_cgroup_charge(page_folio(new_page), src_mm, GFP_KERNEL)) { 999 put_page(new_page); 1000 return NULL; 1001 } 1002 cgroup_throttle_swaprate(new_page, GFP_KERNEL); 1003 1004 return new_page; 1005 } 1006 1007 static int 1008 copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 1009 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 1010 unsigned long end) 1011 { 1012 struct mm_struct *dst_mm = dst_vma->vm_mm; 1013 struct mm_struct *src_mm = src_vma->vm_mm; 1014 pte_t *orig_src_pte, *orig_dst_pte; 1015 pte_t *src_pte, *dst_pte; 1016 spinlock_t *src_ptl, *dst_ptl; 1017 int progress, ret = 0; 1018 int rss[NR_MM_COUNTERS]; 1019 swp_entry_t entry = (swp_entry_t){0}; 1020 struct page *prealloc = NULL; 1021 1022 again: 1023 progress = 0; 1024 init_rss_vec(rss); 1025 1026 dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); 1027 if (!dst_pte) { 1028 ret = -ENOMEM; 1029 goto out; 1030 } 1031 src_pte = pte_offset_map(src_pmd, addr); 1032 src_ptl = pte_lockptr(src_mm, src_pmd); 1033 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 1034 orig_src_pte = src_pte; 1035 orig_dst_pte = dst_pte; 1036 arch_enter_lazy_mmu_mode(); 1037 1038 do { 1039 /* 1040 * We are holding two locks at this point - either of them 1041 * could generate latencies in another task on another CPU. 1042 */ 1043 if (progress >= 32) { 1044 progress = 0; 1045 if (need_resched() || 1046 spin_needbreak(src_ptl) || spin_needbreak(dst_ptl)) 1047 break; 1048 } 1049 if (pte_none(*src_pte)) { 1050 progress++; 1051 continue; 1052 } 1053 if (unlikely(!pte_present(*src_pte))) { 1054 ret = copy_nonpresent_pte(dst_mm, src_mm, 1055 dst_pte, src_pte, 1056 dst_vma, src_vma, 1057 addr, rss); 1058 if (ret == -EIO) { 1059 entry = pte_to_swp_entry(*src_pte); 1060 break; 1061 } else if (ret == -EBUSY) { 1062 break; 1063 } else if (!ret) { 1064 progress += 8; 1065 continue; 1066 } 1067 1068 /* 1069 * Device exclusive entry restored, continue by copying 1070 * the now present pte. 1071 */ 1072 WARN_ON_ONCE(ret != -ENOENT); 1073 } 1074 /* copy_present_pte() will clear `*prealloc' if consumed */ 1075 ret = copy_present_pte(dst_vma, src_vma, dst_pte, src_pte, 1076 addr, rss, &prealloc); 1077 /* 1078 * If we need a pre-allocated page for this pte, drop the 1079 * locks, allocate, and try again. 1080 */ 1081 if (unlikely(ret == -EAGAIN)) 1082 break; 1083 if (unlikely(prealloc)) { 1084 /* 1085 * pre-alloc page cannot be reused by next time so as 1086 * to strictly follow mempolicy (e.g., alloc_page_vma() 1087 * will allocate page according to address). This 1088 * could only happen if one pinned pte changed. 1089 */ 1090 put_page(prealloc); 1091 prealloc = NULL; 1092 } 1093 progress += 8; 1094 } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); 1095 1096 arch_leave_lazy_mmu_mode(); 1097 spin_unlock(src_ptl); 1098 pte_unmap(orig_src_pte); 1099 add_mm_rss_vec(dst_mm, rss); 1100 pte_unmap_unlock(orig_dst_pte, dst_ptl); 1101 cond_resched(); 1102 1103 if (ret == -EIO) { 1104 VM_WARN_ON_ONCE(!entry.val); 1105 if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) { 1106 ret = -ENOMEM; 1107 goto out; 1108 } 1109 entry.val = 0; 1110 } else if (ret == -EBUSY) { 1111 goto out; 1112 } else if (ret == -EAGAIN) { 1113 prealloc = page_copy_prealloc(src_mm, src_vma, addr); 1114 if (!prealloc) 1115 return -ENOMEM; 1116 } else if (ret) { 1117 VM_WARN_ON_ONCE(1); 1118 } 1119 1120 /* We've captured and resolved the error. Reset, try again. */ 1121 ret = 0; 1122 1123 if (addr != end) 1124 goto again; 1125 out: 1126 if (unlikely(prealloc)) 1127 put_page(prealloc); 1128 return ret; 1129 } 1130 1131 static inline int 1132 copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 1133 pud_t *dst_pud, pud_t *src_pud, unsigned long addr, 1134 unsigned long end) 1135 { 1136 struct mm_struct *dst_mm = dst_vma->vm_mm; 1137 struct mm_struct *src_mm = src_vma->vm_mm; 1138 pmd_t *src_pmd, *dst_pmd; 1139 unsigned long next; 1140 1141 dst_pmd = pmd_alloc(dst_mm, dst_pud, addr); 1142 if (!dst_pmd) 1143 return -ENOMEM; 1144 src_pmd = pmd_offset(src_pud, addr); 1145 do { 1146 next = pmd_addr_end(addr, end); 1147 if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd) 1148 || pmd_devmap(*src_pmd)) { 1149 int err; 1150 VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma); 1151 err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd, 1152 addr, dst_vma, src_vma); 1153 if (err == -ENOMEM) 1154 return -ENOMEM; 1155 if (!err) 1156 continue; 1157 /* fall through */ 1158 } 1159 if (pmd_none_or_clear_bad(src_pmd)) 1160 continue; 1161 if (copy_pte_range(dst_vma, src_vma, dst_pmd, src_pmd, 1162 addr, next)) 1163 return -ENOMEM; 1164 } while (dst_pmd++, src_pmd++, addr = next, addr != end); 1165 return 0; 1166 } 1167 1168 static inline int 1169 copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 1170 p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr, 1171 unsigned long end) 1172 { 1173 struct mm_struct *dst_mm = dst_vma->vm_mm; 1174 struct mm_struct *src_mm = src_vma->vm_mm; 1175 pud_t *src_pud, *dst_pud; 1176 unsigned long next; 1177 1178 dst_pud = pud_alloc(dst_mm, dst_p4d, addr); 1179 if (!dst_pud) 1180 return -ENOMEM; 1181 src_pud = pud_offset(src_p4d, addr); 1182 do { 1183 next = pud_addr_end(addr, end); 1184 if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) { 1185 int err; 1186 1187 VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma); 1188 err = copy_huge_pud(dst_mm, src_mm, 1189 dst_pud, src_pud, addr, src_vma); 1190 if (err == -ENOMEM) 1191 return -ENOMEM; 1192 if (!err) 1193 continue; 1194 /* fall through */ 1195 } 1196 if (pud_none_or_clear_bad(src_pud)) 1197 continue; 1198 if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud, 1199 addr, next)) 1200 return -ENOMEM; 1201 } while (dst_pud++, src_pud++, addr = next, addr != end); 1202 return 0; 1203 } 1204 1205 static inline int 1206 copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 1207 pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr, 1208 unsigned long end) 1209 { 1210 struct mm_struct *dst_mm = dst_vma->vm_mm; 1211 p4d_t *src_p4d, *dst_p4d; 1212 unsigned long next; 1213 1214 dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr); 1215 if (!dst_p4d) 1216 return -ENOMEM; 1217 src_p4d = p4d_offset(src_pgd, addr); 1218 do { 1219 next = p4d_addr_end(addr, end); 1220 if (p4d_none_or_clear_bad(src_p4d)) 1221 continue; 1222 if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d, 1223 addr, next)) 1224 return -ENOMEM; 1225 } while (dst_p4d++, src_p4d++, addr = next, addr != end); 1226 return 0; 1227 } 1228 1229 /* 1230 * Return true if the vma needs to copy the pgtable during this fork(). Return 1231 * false when we can speed up fork() by allowing lazy page faults later until 1232 * when the child accesses the memory range. 1233 */ 1234 static bool 1235 vma_needs_copy(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) 1236 { 1237 /* 1238 * Always copy pgtables when dst_vma has uffd-wp enabled even if it's 1239 * file-backed (e.g. shmem). Because when uffd-wp is enabled, pgtable 1240 * contains uffd-wp protection information, that's something we can't 1241 * retrieve from page cache, and skip copying will lose those info. 1242 */ 1243 if (userfaultfd_wp(dst_vma)) 1244 return true; 1245 1246 if (src_vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) 1247 return true; 1248 1249 if (src_vma->anon_vma) 1250 return true; 1251 1252 /* 1253 * Don't copy ptes where a page fault will fill them correctly. Fork 1254 * becomes much lighter when there are big shared or private readonly 1255 * mappings. The tradeoff is that copy_page_range is more efficient 1256 * than faulting. 1257 */ 1258 return false; 1259 } 1260 1261 int 1262 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) 1263 { 1264 pgd_t *src_pgd, *dst_pgd; 1265 unsigned long next; 1266 unsigned long addr = src_vma->vm_start; 1267 unsigned long end = src_vma->vm_end; 1268 struct mm_struct *dst_mm = dst_vma->vm_mm; 1269 struct mm_struct *src_mm = src_vma->vm_mm; 1270 struct mmu_notifier_range range; 1271 bool is_cow; 1272 int ret; 1273 1274 if (!vma_needs_copy(dst_vma, src_vma)) 1275 return 0; 1276 1277 if (is_vm_hugetlb_page(src_vma)) 1278 return copy_hugetlb_page_range(dst_mm, src_mm, dst_vma, src_vma); 1279 1280 if (unlikely(src_vma->vm_flags & VM_PFNMAP)) { 1281 /* 1282 * We do not free on error cases below as remove_vma 1283 * gets called on error from higher level routine 1284 */ 1285 ret = track_pfn_copy(src_vma); 1286 if (ret) 1287 return ret; 1288 } 1289 1290 /* 1291 * We need to invalidate the secondary MMU mappings only when 1292 * there could be a permission downgrade on the ptes of the 1293 * parent mm. And a permission downgrade will only happen if 1294 * is_cow_mapping() returns true. 1295 */ 1296 is_cow = is_cow_mapping(src_vma->vm_flags); 1297 1298 if (is_cow) { 1299 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 1300 0, src_vma, src_mm, addr, end); 1301 mmu_notifier_invalidate_range_start(&range); 1302 /* 1303 * Disabling preemption is not needed for the write side, as 1304 * the read side doesn't spin, but goes to the mmap_lock. 1305 * 1306 * Use the raw variant of the seqcount_t write API to avoid 1307 * lockdep complaining about preemptibility. 1308 */ 1309 mmap_assert_write_locked(src_mm); 1310 raw_write_seqcount_begin(&src_mm->write_protect_seq); 1311 } 1312 1313 ret = 0; 1314 dst_pgd = pgd_offset(dst_mm, addr); 1315 src_pgd = pgd_offset(src_mm, addr); 1316 do { 1317 next = pgd_addr_end(addr, end); 1318 if (pgd_none_or_clear_bad(src_pgd)) 1319 continue; 1320 if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd, 1321 addr, next))) { 1322 ret = -ENOMEM; 1323 break; 1324 } 1325 } while (dst_pgd++, src_pgd++, addr = next, addr != end); 1326 1327 if (is_cow) { 1328 raw_write_seqcount_end(&src_mm->write_protect_seq); 1329 mmu_notifier_invalidate_range_end(&range); 1330 } 1331 return ret; 1332 } 1333 1334 /* 1335 * Parameter block passed down to zap_pte_range in exceptional cases. 1336 */ 1337 struct zap_details { 1338 struct folio *single_folio; /* Locked folio to be unmapped */ 1339 bool even_cows; /* Zap COWed private pages too? */ 1340 zap_flags_t zap_flags; /* Extra flags for zapping */ 1341 }; 1342 1343 /* Whether we should zap all COWed (private) pages too */ 1344 static inline bool should_zap_cows(struct zap_details *details) 1345 { 1346 /* By default, zap all pages */ 1347 if (!details) 1348 return true; 1349 1350 /* Or, we zap COWed pages only if the caller wants to */ 1351 return details->even_cows; 1352 } 1353 1354 /* Decides whether we should zap this page with the page pointer specified */ 1355 static inline bool should_zap_page(struct zap_details *details, struct page *page) 1356 { 1357 /* If we can make a decision without *page.. */ 1358 if (should_zap_cows(details)) 1359 return true; 1360 1361 /* E.g. the caller passes NULL for the case of a zero page */ 1362 if (!page) 1363 return true; 1364 1365 /* Otherwise we should only zap non-anon pages */ 1366 return !PageAnon(page); 1367 } 1368 1369 static inline bool zap_drop_file_uffd_wp(struct zap_details *details) 1370 { 1371 if (!details) 1372 return false; 1373 1374 return details->zap_flags & ZAP_FLAG_DROP_MARKER; 1375 } 1376 1377 /* 1378 * This function makes sure that we'll replace the none pte with an uffd-wp 1379 * swap special pte marker when necessary. Must be with the pgtable lock held. 1380 */ 1381 static inline void 1382 zap_install_uffd_wp_if_needed(struct vm_area_struct *vma, 1383 unsigned long addr, pte_t *pte, 1384 struct zap_details *details, pte_t pteval) 1385 { 1386 if (zap_drop_file_uffd_wp(details)) 1387 return; 1388 1389 pte_install_uffd_wp_if_needed(vma, addr, pte, pteval); 1390 } 1391 1392 static unsigned long zap_pte_range(struct mmu_gather *tlb, 1393 struct vm_area_struct *vma, pmd_t *pmd, 1394 unsigned long addr, unsigned long end, 1395 struct zap_details *details) 1396 { 1397 struct mm_struct *mm = tlb->mm; 1398 int force_flush = 0; 1399 int rss[NR_MM_COUNTERS]; 1400 spinlock_t *ptl; 1401 pte_t *start_pte; 1402 pte_t *pte; 1403 swp_entry_t entry; 1404 1405 tlb_change_page_size(tlb, PAGE_SIZE); 1406 again: 1407 init_rss_vec(rss); 1408 start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 1409 pte = start_pte; 1410 flush_tlb_batched_pending(mm); 1411 arch_enter_lazy_mmu_mode(); 1412 do { 1413 pte_t ptent = *pte; 1414 struct page *page; 1415 1416 if (pte_none(ptent)) 1417 continue; 1418 1419 if (need_resched()) 1420 break; 1421 1422 if (pte_present(ptent)) { 1423 page = vm_normal_page(vma, addr, ptent); 1424 if (unlikely(!should_zap_page(details, page))) 1425 continue; 1426 ptent = ptep_get_and_clear_full(mm, addr, pte, 1427 tlb->fullmm); 1428 tlb_remove_tlb_entry(tlb, pte, addr); 1429 zap_install_uffd_wp_if_needed(vma, addr, pte, details, 1430 ptent); 1431 if (unlikely(!page)) 1432 continue; 1433 1434 if (!PageAnon(page)) { 1435 if (pte_dirty(ptent)) { 1436 force_flush = 1; 1437 set_page_dirty(page); 1438 } 1439 if (pte_young(ptent) && 1440 likely(!(vma->vm_flags & VM_SEQ_READ))) 1441 mark_page_accessed(page); 1442 } 1443 rss[mm_counter(page)]--; 1444 page_remove_rmap(page, vma, false); 1445 if (unlikely(page_mapcount(page) < 0)) 1446 print_bad_pte(vma, addr, ptent, page); 1447 if (unlikely(__tlb_remove_page(tlb, page))) { 1448 force_flush = 1; 1449 addr += PAGE_SIZE; 1450 break; 1451 } 1452 continue; 1453 } 1454 1455 entry = pte_to_swp_entry(ptent); 1456 if (is_device_private_entry(entry) || 1457 is_device_exclusive_entry(entry)) { 1458 page = pfn_swap_entry_to_page(entry); 1459 if (unlikely(!should_zap_page(details, page))) 1460 continue; 1461 /* 1462 * Both device private/exclusive mappings should only 1463 * work with anonymous page so far, so we don't need to 1464 * consider uffd-wp bit when zap. For more information, 1465 * see zap_install_uffd_wp_if_needed(). 1466 */ 1467 WARN_ON_ONCE(!vma_is_anonymous(vma)); 1468 rss[mm_counter(page)]--; 1469 if (is_device_private_entry(entry)) 1470 page_remove_rmap(page, vma, false); 1471 put_page(page); 1472 } else if (!non_swap_entry(entry)) { 1473 /* Genuine swap entry, hence a private anon page */ 1474 if (!should_zap_cows(details)) 1475 continue; 1476 rss[MM_SWAPENTS]--; 1477 if (unlikely(!free_swap_and_cache(entry))) 1478 print_bad_pte(vma, addr, ptent, NULL); 1479 } else if (is_migration_entry(entry)) { 1480 page = pfn_swap_entry_to_page(entry); 1481 if (!should_zap_page(details, page)) 1482 continue; 1483 rss[mm_counter(page)]--; 1484 } else if (pte_marker_entry_uffd_wp(entry)) { 1485 /* Only drop the uffd-wp marker if explicitly requested */ 1486 if (!zap_drop_file_uffd_wp(details)) 1487 continue; 1488 } else if (is_hwpoison_entry(entry) || 1489 is_swapin_error_entry(entry)) { 1490 if (!should_zap_cows(details)) 1491 continue; 1492 } else { 1493 /* We should have covered all the swap entry types */ 1494 WARN_ON_ONCE(1); 1495 } 1496 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); 1497 zap_install_uffd_wp_if_needed(vma, addr, pte, details, ptent); 1498 } while (pte++, addr += PAGE_SIZE, addr != end); 1499 1500 add_mm_rss_vec(mm, rss); 1501 arch_leave_lazy_mmu_mode(); 1502 1503 /* Do the actual TLB flush before dropping ptl */ 1504 if (force_flush) 1505 tlb_flush_mmu_tlbonly(tlb); 1506 pte_unmap_unlock(start_pte, ptl); 1507 1508 /* 1509 * If we forced a TLB flush (either due to running out of 1510 * batch buffers or because we needed to flush dirty TLB 1511 * entries before releasing the ptl), free the batched 1512 * memory too. Restart if we didn't do everything. 1513 */ 1514 if (force_flush) { 1515 force_flush = 0; 1516 tlb_flush_mmu(tlb); 1517 } 1518 1519 if (addr != end) { 1520 cond_resched(); 1521 goto again; 1522 } 1523 1524 return addr; 1525 } 1526 1527 static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, 1528 struct vm_area_struct *vma, pud_t *pud, 1529 unsigned long addr, unsigned long end, 1530 struct zap_details *details) 1531 { 1532 pmd_t *pmd; 1533 unsigned long next; 1534 1535 pmd = pmd_offset(pud, addr); 1536 do { 1537 next = pmd_addr_end(addr, end); 1538 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { 1539 if (next - addr != HPAGE_PMD_SIZE) 1540 __split_huge_pmd(vma, pmd, addr, false, NULL); 1541 else if (zap_huge_pmd(tlb, vma, pmd, addr)) 1542 goto next; 1543 /* fall through */ 1544 } else if (details && details->single_folio && 1545 folio_test_pmd_mappable(details->single_folio) && 1546 next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) { 1547 spinlock_t *ptl = pmd_lock(tlb->mm, pmd); 1548 /* 1549 * Take and drop THP pmd lock so that we cannot return 1550 * prematurely, while zap_huge_pmd() has cleared *pmd, 1551 * but not yet decremented compound_mapcount(). 1552 */ 1553 spin_unlock(ptl); 1554 } 1555 1556 /* 1557 * Here there can be other concurrent MADV_DONTNEED or 1558 * trans huge page faults running, and if the pmd is 1559 * none or trans huge it can change under us. This is 1560 * because MADV_DONTNEED holds the mmap_lock in read 1561 * mode. 1562 */ 1563 if (pmd_none_or_trans_huge_or_clear_bad(pmd)) 1564 goto next; 1565 next = zap_pte_range(tlb, vma, pmd, addr, next, details); 1566 next: 1567 cond_resched(); 1568 } while (pmd++, addr = next, addr != end); 1569 1570 return addr; 1571 } 1572 1573 static inline unsigned long zap_pud_range(struct mmu_gather *tlb, 1574 struct vm_area_struct *vma, p4d_t *p4d, 1575 unsigned long addr, unsigned long end, 1576 struct zap_details *details) 1577 { 1578 pud_t *pud; 1579 unsigned long next; 1580 1581 pud = pud_offset(p4d, addr); 1582 do { 1583 next = pud_addr_end(addr, end); 1584 if (pud_trans_huge(*pud) || pud_devmap(*pud)) { 1585 if (next - addr != HPAGE_PUD_SIZE) { 1586 mmap_assert_locked(tlb->mm); 1587 split_huge_pud(vma, pud, addr); 1588 } else if (zap_huge_pud(tlb, vma, pud, addr)) 1589 goto next; 1590 /* fall through */ 1591 } 1592 if (pud_none_or_clear_bad(pud)) 1593 continue; 1594 next = zap_pmd_range(tlb, vma, pud, addr, next, details); 1595 next: 1596 cond_resched(); 1597 } while (pud++, addr = next, addr != end); 1598 1599 return addr; 1600 } 1601 1602 static inline unsigned long zap_p4d_range(struct mmu_gather *tlb, 1603 struct vm_area_struct *vma, pgd_t *pgd, 1604 unsigned long addr, unsigned long end, 1605 struct zap_details *details) 1606 { 1607 p4d_t *p4d; 1608 unsigned long next; 1609 1610 p4d = p4d_offset(pgd, addr); 1611 do { 1612 next = p4d_addr_end(addr, end); 1613 if (p4d_none_or_clear_bad(p4d)) 1614 continue; 1615 next = zap_pud_range(tlb, vma, p4d, addr, next, details); 1616 } while (p4d++, addr = next, addr != end); 1617 1618 return addr; 1619 } 1620 1621 void unmap_page_range(struct mmu_gather *tlb, 1622 struct vm_area_struct *vma, 1623 unsigned long addr, unsigned long end, 1624 struct zap_details *details) 1625 { 1626 pgd_t *pgd; 1627 unsigned long next; 1628 1629 BUG_ON(addr >= end); 1630 tlb_start_vma(tlb, vma); 1631 pgd = pgd_offset(vma->vm_mm, addr); 1632 do { 1633 next = pgd_addr_end(addr, end); 1634 if (pgd_none_or_clear_bad(pgd)) 1635 continue; 1636 next = zap_p4d_range(tlb, vma, pgd, addr, next, details); 1637 } while (pgd++, addr = next, addr != end); 1638 tlb_end_vma(tlb, vma); 1639 } 1640 1641 1642 static void unmap_single_vma(struct mmu_gather *tlb, 1643 struct vm_area_struct *vma, unsigned long start_addr, 1644 unsigned long end_addr, 1645 struct zap_details *details) 1646 { 1647 unsigned long start = max(vma->vm_start, start_addr); 1648 unsigned long end; 1649 1650 if (start >= vma->vm_end) 1651 return; 1652 end = min(vma->vm_end, end_addr); 1653 if (end <= vma->vm_start) 1654 return; 1655 1656 if (vma->vm_file) 1657 uprobe_munmap(vma, start, end); 1658 1659 if (unlikely(vma->vm_flags & VM_PFNMAP)) 1660 untrack_pfn(vma, 0, 0); 1661 1662 if (start != end) { 1663 if (unlikely(is_vm_hugetlb_page(vma))) { 1664 /* 1665 * It is undesirable to test vma->vm_file as it 1666 * should be non-null for valid hugetlb area. 1667 * However, vm_file will be NULL in the error 1668 * cleanup path of mmap_region. When 1669 * hugetlbfs ->mmap method fails, 1670 * mmap_region() nullifies vma->vm_file 1671 * before calling this function to clean up. 1672 * Since no pte has actually been setup, it is 1673 * safe to do nothing in this case. 1674 */ 1675 if (vma->vm_file) { 1676 zap_flags_t zap_flags = details ? 1677 details->zap_flags : 0; 1678 i_mmap_lock_write(vma->vm_file->f_mapping); 1679 __unmap_hugepage_range_final(tlb, vma, start, end, 1680 NULL, zap_flags); 1681 i_mmap_unlock_write(vma->vm_file->f_mapping); 1682 } 1683 } else 1684 unmap_page_range(tlb, vma, start, end, details); 1685 } 1686 } 1687 1688 /** 1689 * unmap_vmas - unmap a range of memory covered by a list of vma's 1690 * @tlb: address of the caller's struct mmu_gather 1691 * @vma: the starting vma 1692 * @start_addr: virtual address at which to start unmapping 1693 * @end_addr: virtual address at which to end unmapping 1694 * 1695 * Unmap all pages in the vma list. 1696 * 1697 * Only addresses between `start' and `end' will be unmapped. 1698 * 1699 * The VMA list must be sorted in ascending virtual address order. 1700 * 1701 * unmap_vmas() assumes that the caller will flush the whole unmapped address 1702 * range after unmap_vmas() returns. So the only responsibility here is to 1703 * ensure that any thus-far unmapped pages are flushed before unmap_vmas() 1704 * drops the lock and schedules. 1705 */ 1706 void unmap_vmas(struct mmu_gather *tlb, 1707 struct vm_area_struct *vma, unsigned long start_addr, 1708 unsigned long end_addr) 1709 { 1710 struct mmu_notifier_range range; 1711 struct zap_details details = { 1712 .zap_flags = ZAP_FLAG_DROP_MARKER, 1713 /* Careful - we need to zap private pages too! */ 1714 .even_cows = true, 1715 }; 1716 1717 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm, 1718 start_addr, end_addr); 1719 mmu_notifier_invalidate_range_start(&range); 1720 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) 1721 unmap_single_vma(tlb, vma, start_addr, end_addr, &details); 1722 mmu_notifier_invalidate_range_end(&range); 1723 } 1724 1725 /** 1726 * zap_page_range - remove user pages in a given range 1727 * @vma: vm_area_struct holding the applicable pages 1728 * @start: starting address of pages to zap 1729 * @size: number of bytes to zap 1730 * 1731 * Caller must protect the VMA list 1732 */ 1733 void zap_page_range(struct vm_area_struct *vma, unsigned long start, 1734 unsigned long size) 1735 { 1736 struct mmu_notifier_range range; 1737 struct mmu_gather tlb; 1738 1739 lru_add_drain(); 1740 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, 1741 start, start + size); 1742 tlb_gather_mmu(&tlb, vma->vm_mm); 1743 update_hiwater_rss(vma->vm_mm); 1744 mmu_notifier_invalidate_range_start(&range); 1745 for ( ; vma && vma->vm_start < range.end; vma = vma->vm_next) 1746 unmap_single_vma(&tlb, vma, start, range.end, NULL); 1747 mmu_notifier_invalidate_range_end(&range); 1748 tlb_finish_mmu(&tlb); 1749 } 1750 1751 /** 1752 * zap_page_range_single - remove user pages in a given range 1753 * @vma: vm_area_struct holding the applicable pages 1754 * @address: starting address of pages to zap 1755 * @size: number of bytes to zap 1756 * @details: details of shared cache invalidation 1757 * 1758 * The range must fit into one VMA. 1759 */ 1760 static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, 1761 unsigned long size, struct zap_details *details) 1762 { 1763 struct mmu_notifier_range range; 1764 struct mmu_gather tlb; 1765 1766 lru_add_drain(); 1767 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, 1768 address, address + size); 1769 tlb_gather_mmu(&tlb, vma->vm_mm); 1770 update_hiwater_rss(vma->vm_mm); 1771 mmu_notifier_invalidate_range_start(&range); 1772 unmap_single_vma(&tlb, vma, address, range.end, details); 1773 mmu_notifier_invalidate_range_end(&range); 1774 tlb_finish_mmu(&tlb); 1775 } 1776 1777 /** 1778 * zap_vma_ptes - remove ptes mapping the vma 1779 * @vma: vm_area_struct holding ptes to be zapped 1780 * @address: starting address of pages to zap 1781 * @size: number of bytes to zap 1782 * 1783 * This function only unmaps ptes assigned to VM_PFNMAP vmas. 1784 * 1785 * The entire address range must be fully contained within the vma. 1786 * 1787 */ 1788 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 1789 unsigned long size) 1790 { 1791 if (!range_in_vma(vma, address, address + size) || 1792 !(vma->vm_flags & VM_PFNMAP)) 1793 return; 1794 1795 zap_page_range_single(vma, address, size, NULL); 1796 } 1797 EXPORT_SYMBOL_GPL(zap_vma_ptes); 1798 1799 static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr) 1800 { 1801 pgd_t *pgd; 1802 p4d_t *p4d; 1803 pud_t *pud; 1804 pmd_t *pmd; 1805 1806 pgd = pgd_offset(mm, addr); 1807 p4d = p4d_alloc(mm, pgd, addr); 1808 if (!p4d) 1809 return NULL; 1810 pud = pud_alloc(mm, p4d, addr); 1811 if (!pud) 1812 return NULL; 1813 pmd = pmd_alloc(mm, pud, addr); 1814 if (!pmd) 1815 return NULL; 1816 1817 VM_BUG_ON(pmd_trans_huge(*pmd)); 1818 return pmd; 1819 } 1820 1821 pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, 1822 spinlock_t **ptl) 1823 { 1824 pmd_t *pmd = walk_to_pmd(mm, addr); 1825 1826 if (!pmd) 1827 return NULL; 1828 return pte_alloc_map_lock(mm, pmd, addr, ptl); 1829 } 1830 1831 static int validate_page_before_insert(struct page *page) 1832 { 1833 if (PageAnon(page) || PageSlab(page) || page_has_type(page)) 1834 return -EINVAL; 1835 flush_dcache_page(page); 1836 return 0; 1837 } 1838 1839 static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte, 1840 unsigned long addr, struct page *page, pgprot_t prot) 1841 { 1842 if (!pte_none(*pte)) 1843 return -EBUSY; 1844 /* Ok, finally just insert the thing.. */ 1845 get_page(page); 1846 inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page)); 1847 page_add_file_rmap(page, vma, false); 1848 set_pte_at(vma->vm_mm, addr, pte, mk_pte(page, prot)); 1849 return 0; 1850 } 1851 1852 /* 1853 * This is the old fallback for page remapping. 1854 * 1855 * For historical reasons, it only allows reserved pages. Only 1856 * old drivers should use this, and they needed to mark their 1857 * pages reserved for the old functions anyway. 1858 */ 1859 static int insert_page(struct vm_area_struct *vma, unsigned long addr, 1860 struct page *page, pgprot_t prot) 1861 { 1862 int retval; 1863 pte_t *pte; 1864 spinlock_t *ptl; 1865 1866 retval = validate_page_before_insert(page); 1867 if (retval) 1868 goto out; 1869 retval = -ENOMEM; 1870 pte = get_locked_pte(vma->vm_mm, addr, &ptl); 1871 if (!pte) 1872 goto out; 1873 retval = insert_page_into_pte_locked(vma, pte, addr, page, prot); 1874 pte_unmap_unlock(pte, ptl); 1875 out: 1876 return retval; 1877 } 1878 1879 #ifdef pte_index 1880 static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte, 1881 unsigned long addr, struct page *page, pgprot_t prot) 1882 { 1883 int err; 1884 1885 if (!page_count(page)) 1886 return -EINVAL; 1887 err = validate_page_before_insert(page); 1888 if (err) 1889 return err; 1890 return insert_page_into_pte_locked(vma, pte, addr, page, prot); 1891 } 1892 1893 /* insert_pages() amortizes the cost of spinlock operations 1894 * when inserting pages in a loop. Arch *must* define pte_index. 1895 */ 1896 static int insert_pages(struct vm_area_struct *vma, unsigned long addr, 1897 struct page **pages, unsigned long *num, pgprot_t prot) 1898 { 1899 pmd_t *pmd = NULL; 1900 pte_t *start_pte, *pte; 1901 spinlock_t *pte_lock; 1902 struct mm_struct *const mm = vma->vm_mm; 1903 unsigned long curr_page_idx = 0; 1904 unsigned long remaining_pages_total = *num; 1905 unsigned long pages_to_write_in_pmd; 1906 int ret; 1907 more: 1908 ret = -EFAULT; 1909 pmd = walk_to_pmd(mm, addr); 1910 if (!pmd) 1911 goto out; 1912 1913 pages_to_write_in_pmd = min_t(unsigned long, 1914 remaining_pages_total, PTRS_PER_PTE - pte_index(addr)); 1915 1916 /* Allocate the PTE if necessary; takes PMD lock once only. */ 1917 ret = -ENOMEM; 1918 if (pte_alloc(mm, pmd)) 1919 goto out; 1920 1921 while (pages_to_write_in_pmd) { 1922 int pte_idx = 0; 1923 const int batch_size = min_t(int, pages_to_write_in_pmd, 8); 1924 1925 start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock); 1926 for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) { 1927 int err = insert_page_in_batch_locked(vma, pte, 1928 addr, pages[curr_page_idx], prot); 1929 if (unlikely(err)) { 1930 pte_unmap_unlock(start_pte, pte_lock); 1931 ret = err; 1932 remaining_pages_total -= pte_idx; 1933 goto out; 1934 } 1935 addr += PAGE_SIZE; 1936 ++curr_page_idx; 1937 } 1938 pte_unmap_unlock(start_pte, pte_lock); 1939 pages_to_write_in_pmd -= batch_size; 1940 remaining_pages_total -= batch_size; 1941 } 1942 if (remaining_pages_total) 1943 goto more; 1944 ret = 0; 1945 out: 1946 *num = remaining_pages_total; 1947 return ret; 1948 } 1949 #endif /* ifdef pte_index */ 1950 1951 /** 1952 * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock. 1953 * @vma: user vma to map to 1954 * @addr: target start user address of these pages 1955 * @pages: source kernel pages 1956 * @num: in: number of pages to map. out: number of pages that were *not* 1957 * mapped. (0 means all pages were successfully mapped). 1958 * 1959 * Preferred over vm_insert_page() when inserting multiple pages. 1960 * 1961 * In case of error, we may have mapped a subset of the provided 1962 * pages. It is the caller's responsibility to account for this case. 1963 * 1964 * The same restrictions apply as in vm_insert_page(). 1965 */ 1966 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, 1967 struct page **pages, unsigned long *num) 1968 { 1969 #ifdef pte_index 1970 const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1; 1971 1972 if (addr < vma->vm_start || end_addr >= vma->vm_end) 1973 return -EFAULT; 1974 if (!(vma->vm_flags & VM_MIXEDMAP)) { 1975 BUG_ON(mmap_read_trylock(vma->vm_mm)); 1976 BUG_ON(vma->vm_flags & VM_PFNMAP); 1977 vma->vm_flags |= VM_MIXEDMAP; 1978 } 1979 /* Defer page refcount checking till we're about to map that page. */ 1980 return insert_pages(vma, addr, pages, num, vma->vm_page_prot); 1981 #else 1982 unsigned long idx = 0, pgcount = *num; 1983 int err = -EINVAL; 1984 1985 for (; idx < pgcount; ++idx) { 1986 err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]); 1987 if (err) 1988 break; 1989 } 1990 *num = pgcount - idx; 1991 return err; 1992 #endif /* ifdef pte_index */ 1993 } 1994 EXPORT_SYMBOL(vm_insert_pages); 1995 1996 /** 1997 * vm_insert_page - insert single page into user vma 1998 * @vma: user vma to map to 1999 * @addr: target user address of this page 2000 * @page: source kernel page 2001 * 2002 * This allows drivers to insert individual pages they've allocated 2003 * into a user vma. 2004 * 2005 * The page has to be a nice clean _individual_ kernel allocation. 2006 * If you allocate a compound page, you need to have marked it as 2007 * such (__GFP_COMP), or manually just split the page up yourself 2008 * (see split_page()). 2009 * 2010 * NOTE! Traditionally this was done with "remap_pfn_range()" which 2011 * took an arbitrary page protection parameter. This doesn't allow 2012 * that. Your vma protection will have to be set up correctly, which 2013 * means that if you want a shared writable mapping, you'd better 2014 * ask for a shared writable mapping! 2015 * 2016 * The page does not need to be reserved. 2017 * 2018 * Usually this function is called from f_op->mmap() handler 2019 * under mm->mmap_lock write-lock, so it can change vma->vm_flags. 2020 * Caller must set VM_MIXEDMAP on vma if it wants to call this 2021 * function from other places, for example from page-fault handler. 2022 * 2023 * Return: %0 on success, negative error code otherwise. 2024 */ 2025 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, 2026 struct page *page) 2027 { 2028 if (addr < vma->vm_start || addr >= vma->vm_end) 2029 return -EFAULT; 2030 if (!page_count(page)) 2031 return -EINVAL; 2032 if (!(vma->vm_flags & VM_MIXEDMAP)) { 2033 BUG_ON(mmap_read_trylock(vma->vm_mm)); 2034 BUG_ON(vma->vm_flags & VM_PFNMAP); 2035 vma->vm_flags |= VM_MIXEDMAP; 2036 } 2037 return insert_page(vma, addr, page, vma->vm_page_prot); 2038 } 2039 EXPORT_SYMBOL(vm_insert_page); 2040 2041 /* 2042 * __vm_map_pages - maps range of kernel pages into user vma 2043 * @vma: user vma to map to 2044 * @pages: pointer to array of source kernel pages 2045 * @num: number of pages in page array 2046 * @offset: user's requested vm_pgoff 2047 * 2048 * This allows drivers to map range of kernel pages into a user vma. 2049 * 2050 * Return: 0 on success and error code otherwise. 2051 */ 2052 static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages, 2053 unsigned long num, unsigned long offset) 2054 { 2055 unsigned long count = vma_pages(vma); 2056 unsigned long uaddr = vma->vm_start; 2057 int ret, i; 2058 2059 /* Fail if the user requested offset is beyond the end of the object */ 2060 if (offset >= num) 2061 return -ENXIO; 2062 2063 /* Fail if the user requested size exceeds available object size */ 2064 if (count > num - offset) 2065 return -ENXIO; 2066 2067 for (i = 0; i < count; i++) { 2068 ret = vm_insert_page(vma, uaddr, pages[offset + i]); 2069 if (ret < 0) 2070 return ret; 2071 uaddr += PAGE_SIZE; 2072 } 2073 2074 return 0; 2075 } 2076 2077 /** 2078 * vm_map_pages - maps range of kernel pages starts with non zero offset 2079 * @vma: user vma to map to 2080 * @pages: pointer to array of source kernel pages 2081 * @num: number of pages in page array 2082 * 2083 * Maps an object consisting of @num pages, catering for the user's 2084 * requested vm_pgoff 2085 * 2086 * If we fail to insert any page into the vma, the function will return 2087 * immediately leaving any previously inserted pages present. Callers 2088 * from the mmap handler may immediately return the error as their caller 2089 * will destroy the vma, removing any successfully inserted pages. Other 2090 * callers should make their own arrangements for calling unmap_region(). 2091 * 2092 * Context: Process context. Called by mmap handlers. 2093 * Return: 0 on success and error code otherwise. 2094 */ 2095 int vm_map_pages(struct vm_area_struct *vma, struct page **pages, 2096 unsigned long num) 2097 { 2098 return __vm_map_pages(vma, pages, num, vma->vm_pgoff); 2099 } 2100 EXPORT_SYMBOL(vm_map_pages); 2101 2102 /** 2103 * vm_map_pages_zero - map range of kernel pages starts with zero offset 2104 * @vma: user vma to map to 2105 * @pages: pointer to array of source kernel pages 2106 * @num: number of pages in page array 2107 * 2108 * Similar to vm_map_pages(), except that it explicitly sets the offset 2109 * to 0. This function is intended for the drivers that did not consider 2110 * vm_pgoff. 2111 * 2112 * Context: Process context. Called by mmap handlers. 2113 * Return: 0 on success and error code otherwise. 2114 */ 2115 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, 2116 unsigned long num) 2117 { 2118 return __vm_map_pages(vma, pages, num, 0); 2119 } 2120 EXPORT_SYMBOL(vm_map_pages_zero); 2121 2122 static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr, 2123 pfn_t pfn, pgprot_t prot, bool mkwrite) 2124 { 2125 struct mm_struct *mm = vma->vm_mm; 2126 pte_t *pte, entry; 2127 spinlock_t *ptl; 2128 2129 pte = get_locked_pte(mm, addr, &ptl); 2130 if (!pte) 2131 return VM_FAULT_OOM; 2132 if (!pte_none(*pte)) { 2133 if (mkwrite) { 2134 /* 2135 * For read faults on private mappings the PFN passed 2136 * in may not match the PFN we have mapped if the 2137 * mapped PFN is a writeable COW page. In the mkwrite 2138 * case we are creating a writable PTE for a shared 2139 * mapping and we expect the PFNs to match. If they 2140 * don't match, we are likely racing with block 2141 * allocation and mapping invalidation so just skip the 2142 * update. 2143 */ 2144 if (pte_pfn(*pte) != pfn_t_to_pfn(pfn)) { 2145 WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte))); 2146 goto out_unlock; 2147 } 2148 entry = pte_mkyoung(*pte); 2149 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 2150 if (ptep_set_access_flags(vma, addr, pte, entry, 1)) 2151 update_mmu_cache(vma, addr, pte); 2152 } 2153 goto out_unlock; 2154 } 2155 2156 /* Ok, finally just insert the thing.. */ 2157 if (pfn_t_devmap(pfn)) 2158 entry = pte_mkdevmap(pfn_t_pte(pfn, prot)); 2159 else 2160 entry = pte_mkspecial(pfn_t_pte(pfn, prot)); 2161 2162 if (mkwrite) { 2163 entry = pte_mkyoung(entry); 2164 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 2165 } 2166 2167 set_pte_at(mm, addr, pte, entry); 2168 update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ 2169 2170 out_unlock: 2171 pte_unmap_unlock(pte, ptl); 2172 return VM_FAULT_NOPAGE; 2173 } 2174 2175 /** 2176 * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot 2177 * @vma: user vma to map to 2178 * @addr: target user address of this page 2179 * @pfn: source kernel pfn 2180 * @pgprot: pgprot flags for the inserted page 2181 * 2182 * This is exactly like vmf_insert_pfn(), except that it allows drivers 2183 * to override pgprot on a per-page basis. 2184 * 2185 * This only makes sense for IO mappings, and it makes no sense for 2186 * COW mappings. In general, using multiple vmas is preferable; 2187 * vmf_insert_pfn_prot should only be used if using multiple VMAs is 2188 * impractical. 2189 * 2190 * See vmf_insert_mixed_prot() for a discussion of the implication of using 2191 * a value of @pgprot different from that of @vma->vm_page_prot. 2192 * 2193 * Context: Process context. May allocate using %GFP_KERNEL. 2194 * Return: vm_fault_t value. 2195 */ 2196 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, 2197 unsigned long pfn, pgprot_t pgprot) 2198 { 2199 /* 2200 * Technically, architectures with pte_special can avoid all these 2201 * restrictions (same for remap_pfn_range). However we would like 2202 * consistency in testing and feature parity among all, so we should 2203 * try to keep these invariants in place for everybody. 2204 */ 2205 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); 2206 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 2207 (VM_PFNMAP|VM_MIXEDMAP)); 2208 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 2209 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); 2210 2211 if (addr < vma->vm_start || addr >= vma->vm_end) 2212 return VM_FAULT_SIGBUS; 2213 2214 if (!pfn_modify_allowed(pfn, pgprot)) 2215 return VM_FAULT_SIGBUS; 2216 2217 track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV)); 2218 2219 return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot, 2220 false); 2221 } 2222 EXPORT_SYMBOL(vmf_insert_pfn_prot); 2223 2224 /** 2225 * vmf_insert_pfn - insert single pfn into user vma 2226 * @vma: user vma to map to 2227 * @addr: target user address of this page 2228 * @pfn: source kernel pfn 2229 * 2230 * Similar to vm_insert_page, this allows drivers to insert individual pages 2231 * they've allocated into a user vma. Same comments apply. 2232 * 2233 * This function should only be called from a vm_ops->fault handler, and 2234 * in that case the handler should return the result of this function. 2235 * 2236 * vma cannot be a COW mapping. 2237 * 2238 * As this is called only for pages that do not currently exist, we 2239 * do not need to flush old virtual caches or the TLB. 2240 * 2241 * Context: Process context. May allocate using %GFP_KERNEL. 2242 * Return: vm_fault_t value. 2243 */ 2244 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 2245 unsigned long pfn) 2246 { 2247 return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot); 2248 } 2249 EXPORT_SYMBOL(vmf_insert_pfn); 2250 2251 static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn) 2252 { 2253 /* these checks mirror the abort conditions in vm_normal_page */ 2254 if (vma->vm_flags & VM_MIXEDMAP) 2255 return true; 2256 if (pfn_t_devmap(pfn)) 2257 return true; 2258 if (pfn_t_special(pfn)) 2259 return true; 2260 if (is_zero_pfn(pfn_t_to_pfn(pfn))) 2261 return true; 2262 return false; 2263 } 2264 2265 static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma, 2266 unsigned long addr, pfn_t pfn, pgprot_t pgprot, 2267 bool mkwrite) 2268 { 2269 int err; 2270 2271 BUG_ON(!vm_mixed_ok(vma, pfn)); 2272 2273 if (addr < vma->vm_start || addr >= vma->vm_end) 2274 return VM_FAULT_SIGBUS; 2275 2276 track_pfn_insert(vma, &pgprot, pfn); 2277 2278 if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot)) 2279 return VM_FAULT_SIGBUS; 2280 2281 /* 2282 * If we don't have pte special, then we have to use the pfn_valid() 2283 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must* 2284 * refcount the page if pfn_valid is true (hence insert_page rather 2285 * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP 2286 * without pte special, it would there be refcounted as a normal page. 2287 */ 2288 if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && 2289 !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) { 2290 struct page *page; 2291 2292 /* 2293 * At this point we are committed to insert_page() 2294 * regardless of whether the caller specified flags that 2295 * result in pfn_t_has_page() == false. 2296 */ 2297 page = pfn_to_page(pfn_t_to_pfn(pfn)); 2298 err = insert_page(vma, addr, page, pgprot); 2299 } else { 2300 return insert_pfn(vma, addr, pfn, pgprot, mkwrite); 2301 } 2302 2303 if (err == -ENOMEM) 2304 return VM_FAULT_OOM; 2305 if (err < 0 && err != -EBUSY) 2306 return VM_FAULT_SIGBUS; 2307 2308 return VM_FAULT_NOPAGE; 2309 } 2310 2311 /** 2312 * vmf_insert_mixed_prot - insert single pfn into user vma with specified pgprot 2313 * @vma: user vma to map to 2314 * @addr: target user address of this page 2315 * @pfn: source kernel pfn 2316 * @pgprot: pgprot flags for the inserted page 2317 * 2318 * This is exactly like vmf_insert_mixed(), except that it allows drivers 2319 * to override pgprot on a per-page basis. 2320 * 2321 * Typically this function should be used by drivers to set caching- and 2322 * encryption bits different than those of @vma->vm_page_prot, because 2323 * the caching- or encryption mode may not be known at mmap() time. 2324 * This is ok as long as @vma->vm_page_prot is not used by the core vm 2325 * to set caching and encryption bits for those vmas (except for COW pages). 2326 * This is ensured by core vm only modifying these page table entries using 2327 * functions that don't touch caching- or encryption bits, using pte_modify() 2328 * if needed. (See for example mprotect()). 2329 * Also when new page-table entries are created, this is only done using the 2330 * fault() callback, and never using the value of vma->vm_page_prot, 2331 * except for page-table entries that point to anonymous pages as the result 2332 * of COW. 2333 * 2334 * Context: Process context. May allocate using %GFP_KERNEL. 2335 * Return: vm_fault_t value. 2336 */ 2337 vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr, 2338 pfn_t pfn, pgprot_t pgprot) 2339 { 2340 return __vm_insert_mixed(vma, addr, pfn, pgprot, false); 2341 } 2342 EXPORT_SYMBOL(vmf_insert_mixed_prot); 2343 2344 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, 2345 pfn_t pfn) 2346 { 2347 return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, false); 2348 } 2349 EXPORT_SYMBOL(vmf_insert_mixed); 2350 2351 /* 2352 * If the insertion of PTE failed because someone else already added a 2353 * different entry in the mean time, we treat that as success as we assume 2354 * the same entry was actually inserted. 2355 */ 2356 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, 2357 unsigned long addr, pfn_t pfn) 2358 { 2359 return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, true); 2360 } 2361 EXPORT_SYMBOL(vmf_insert_mixed_mkwrite); 2362 2363 /* 2364 * maps a range of physical memory into the requested pages. the old 2365 * mappings are removed. any references to nonexistent pages results 2366 * in null mappings (currently treated as "copy-on-access") 2367 */ 2368 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, 2369 unsigned long addr, unsigned long end, 2370 unsigned long pfn, pgprot_t prot) 2371 { 2372 pte_t *pte, *mapped_pte; 2373 spinlock_t *ptl; 2374 int err = 0; 2375 2376 mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); 2377 if (!pte) 2378 return -ENOMEM; 2379 arch_enter_lazy_mmu_mode(); 2380 do { 2381 BUG_ON(!pte_none(*pte)); 2382 if (!pfn_modify_allowed(pfn, prot)) { 2383 err = -EACCES; 2384 break; 2385 } 2386 set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot))); 2387 pfn++; 2388 } while (pte++, addr += PAGE_SIZE, addr != end); 2389 arch_leave_lazy_mmu_mode(); 2390 pte_unmap_unlock(mapped_pte, ptl); 2391 return err; 2392 } 2393 2394 static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, 2395 unsigned long addr, unsigned long end, 2396 unsigned long pfn, pgprot_t prot) 2397 { 2398 pmd_t *pmd; 2399 unsigned long next; 2400 int err; 2401 2402 pfn -= addr >> PAGE_SHIFT; 2403 pmd = pmd_alloc(mm, pud, addr); 2404 if (!pmd) 2405 return -ENOMEM; 2406 VM_BUG_ON(pmd_trans_huge(*pmd)); 2407 do { 2408 next = pmd_addr_end(addr, end); 2409 err = remap_pte_range(mm, pmd, addr, next, 2410 pfn + (addr >> PAGE_SHIFT), prot); 2411 if (err) 2412 return err; 2413 } while (pmd++, addr = next, addr != end); 2414 return 0; 2415 } 2416 2417 static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d, 2418 unsigned long addr, unsigned long end, 2419 unsigned long pfn, pgprot_t prot) 2420 { 2421 pud_t *pud; 2422 unsigned long next; 2423 int err; 2424 2425 pfn -= addr >> PAGE_SHIFT; 2426 pud = pud_alloc(mm, p4d, addr); 2427 if (!pud) 2428 return -ENOMEM; 2429 do { 2430 next = pud_addr_end(addr, end); 2431 err = remap_pmd_range(mm, pud, addr, next, 2432 pfn + (addr >> PAGE_SHIFT), prot); 2433 if (err) 2434 return err; 2435 } while (pud++, addr = next, addr != end); 2436 return 0; 2437 } 2438 2439 static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd, 2440 unsigned long addr, unsigned long end, 2441 unsigned long pfn, pgprot_t prot) 2442 { 2443 p4d_t *p4d; 2444 unsigned long next; 2445 int err; 2446 2447 pfn -= addr >> PAGE_SHIFT; 2448 p4d = p4d_alloc(mm, pgd, addr); 2449 if (!p4d) 2450 return -ENOMEM; 2451 do { 2452 next = p4d_addr_end(addr, end); 2453 err = remap_pud_range(mm, p4d, addr, next, 2454 pfn + (addr >> PAGE_SHIFT), prot); 2455 if (err) 2456 return err; 2457 } while (p4d++, addr = next, addr != end); 2458 return 0; 2459 } 2460 2461 /* 2462 * Variant of remap_pfn_range that does not call track_pfn_remap. The caller 2463 * must have pre-validated the caching bits of the pgprot_t. 2464 */ 2465 int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, 2466 unsigned long pfn, unsigned long size, pgprot_t prot) 2467 { 2468 pgd_t *pgd; 2469 unsigned long next; 2470 unsigned long end = addr + PAGE_ALIGN(size); 2471 struct mm_struct *mm = vma->vm_mm; 2472 int err; 2473 2474 if (WARN_ON_ONCE(!PAGE_ALIGNED(addr))) 2475 return -EINVAL; 2476 2477 /* 2478 * Physically remapped pages are special. Tell the 2479 * rest of the world about it: 2480 * VM_IO tells people not to look at these pages 2481 * (accesses can have side effects). 2482 * VM_PFNMAP tells the core MM that the base pages are just 2483 * raw PFN mappings, and do not have a "struct page" associated 2484 * with them. 2485 * VM_DONTEXPAND 2486 * Disable vma merging and expanding with mremap(). 2487 * VM_DONTDUMP 2488 * Omit vma from core dump, even when VM_IO turned off. 2489 * 2490 * There's a horrible special case to handle copy-on-write 2491 * behaviour that some programs depend on. We mark the "original" 2492 * un-COW'ed pages by matching them up with "vma->vm_pgoff". 2493 * See vm_normal_page() for details. 2494 */ 2495 if (is_cow_mapping(vma->vm_flags)) { 2496 if (addr != vma->vm_start || end != vma->vm_end) 2497 return -EINVAL; 2498 vma->vm_pgoff = pfn; 2499 } 2500 2501 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; 2502 2503 BUG_ON(addr >= end); 2504 pfn -= addr >> PAGE_SHIFT; 2505 pgd = pgd_offset(mm, addr); 2506 flush_cache_range(vma, addr, end); 2507 do { 2508 next = pgd_addr_end(addr, end); 2509 err = remap_p4d_range(mm, pgd, addr, next, 2510 pfn + (addr >> PAGE_SHIFT), prot); 2511 if (err) 2512 return err; 2513 } while (pgd++, addr = next, addr != end); 2514 2515 return 0; 2516 } 2517 2518 /** 2519 * remap_pfn_range - remap kernel memory to userspace 2520 * @vma: user vma to map to 2521 * @addr: target page aligned user address to start at 2522 * @pfn: page frame number of kernel physical memory address 2523 * @size: size of mapping area 2524 * @prot: page protection flags for this mapping 2525 * 2526 * Note: this is only safe if the mm semaphore is held when called. 2527 * 2528 * Return: %0 on success, negative error code otherwise. 2529 */ 2530 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, 2531 unsigned long pfn, unsigned long size, pgprot_t prot) 2532 { 2533 int err; 2534 2535 err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size)); 2536 if (err) 2537 return -EINVAL; 2538 2539 err = remap_pfn_range_notrack(vma, addr, pfn, size, prot); 2540 if (err) 2541 untrack_pfn(vma, pfn, PAGE_ALIGN(size)); 2542 return err; 2543 } 2544 EXPORT_SYMBOL(remap_pfn_range); 2545 2546 /** 2547 * vm_iomap_memory - remap memory to userspace 2548 * @vma: user vma to map to 2549 * @start: start of the physical memory to be mapped 2550 * @len: size of area 2551 * 2552 * This is a simplified io_remap_pfn_range() for common driver use. The 2553 * driver just needs to give us the physical memory range to be mapped, 2554 * we'll figure out the rest from the vma information. 2555 * 2556 * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get 2557 * whatever write-combining details or similar. 2558 * 2559 * Return: %0 on success, negative error code otherwise. 2560 */ 2561 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) 2562 { 2563 unsigned long vm_len, pfn, pages; 2564 2565 /* Check that the physical memory area passed in looks valid */ 2566 if (start + len < start) 2567 return -EINVAL; 2568 /* 2569 * You *really* shouldn't map things that aren't page-aligned, 2570 * but we've historically allowed it because IO memory might 2571 * just have smaller alignment. 2572 */ 2573 len += start & ~PAGE_MASK; 2574 pfn = start >> PAGE_SHIFT; 2575 pages = (len + ~PAGE_MASK) >> PAGE_SHIFT; 2576 if (pfn + pages < pfn) 2577 return -EINVAL; 2578 2579 /* We start the mapping 'vm_pgoff' pages into the area */ 2580 if (vma->vm_pgoff > pages) 2581 return -EINVAL; 2582 pfn += vma->vm_pgoff; 2583 pages -= vma->vm_pgoff; 2584 2585 /* Can we fit all of the mapping? */ 2586 vm_len = vma->vm_end - vma->vm_start; 2587 if (vm_len >> PAGE_SHIFT > pages) 2588 return -EINVAL; 2589 2590 /* Ok, let it rip */ 2591 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); 2592 } 2593 EXPORT_SYMBOL(vm_iomap_memory); 2594 2595 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, 2596 unsigned long addr, unsigned long end, 2597 pte_fn_t fn, void *data, bool create, 2598 pgtbl_mod_mask *mask) 2599 { 2600 pte_t *pte, *mapped_pte; 2601 int err = 0; 2602 spinlock_t *ptl; 2603 2604 if (create) { 2605 mapped_pte = pte = (mm == &init_mm) ? 2606 pte_alloc_kernel_track(pmd, addr, mask) : 2607 pte_alloc_map_lock(mm, pmd, addr, &ptl); 2608 if (!pte) 2609 return -ENOMEM; 2610 } else { 2611 mapped_pte = pte = (mm == &init_mm) ? 2612 pte_offset_kernel(pmd, addr) : 2613 pte_offset_map_lock(mm, pmd, addr, &ptl); 2614 } 2615 2616 BUG_ON(pmd_huge(*pmd)); 2617 2618 arch_enter_lazy_mmu_mode(); 2619 2620 if (fn) { 2621 do { 2622 if (create || !pte_none(*pte)) { 2623 err = fn(pte++, addr, data); 2624 if (err) 2625 break; 2626 } 2627 } while (addr += PAGE_SIZE, addr != end); 2628 } 2629 *mask |= PGTBL_PTE_MODIFIED; 2630 2631 arch_leave_lazy_mmu_mode(); 2632 2633 if (mm != &init_mm) 2634 pte_unmap_unlock(mapped_pte, ptl); 2635 return err; 2636 } 2637 2638 static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, 2639 unsigned long addr, unsigned long end, 2640 pte_fn_t fn, void *data, bool create, 2641 pgtbl_mod_mask *mask) 2642 { 2643 pmd_t *pmd; 2644 unsigned long next; 2645 int err = 0; 2646 2647 BUG_ON(pud_huge(*pud)); 2648 2649 if (create) { 2650 pmd = pmd_alloc_track(mm, pud, addr, mask); 2651 if (!pmd) 2652 return -ENOMEM; 2653 } else { 2654 pmd = pmd_offset(pud, addr); 2655 } 2656 do { 2657 next = pmd_addr_end(addr, end); 2658 if (pmd_none(*pmd) && !create) 2659 continue; 2660 if (WARN_ON_ONCE(pmd_leaf(*pmd))) 2661 return -EINVAL; 2662 if (!pmd_none(*pmd) && WARN_ON_ONCE(pmd_bad(*pmd))) { 2663 if (!create) 2664 continue; 2665 pmd_clear_bad(pmd); 2666 } 2667 err = apply_to_pte_range(mm, pmd, addr, next, 2668 fn, data, create, mask); 2669 if (err) 2670 break; 2671 } while (pmd++, addr = next, addr != end); 2672 2673 return err; 2674 } 2675 2676 static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d, 2677 unsigned long addr, unsigned long end, 2678 pte_fn_t fn, void *data, bool create, 2679 pgtbl_mod_mask *mask) 2680 { 2681 pud_t *pud; 2682 unsigned long next; 2683 int err = 0; 2684 2685 if (create) { 2686 pud = pud_alloc_track(mm, p4d, addr, mask); 2687 if (!pud) 2688 return -ENOMEM; 2689 } else { 2690 pud = pud_offset(p4d, addr); 2691 } 2692 do { 2693 next = pud_addr_end(addr, end); 2694 if (pud_none(*pud) && !create) 2695 continue; 2696 if (WARN_ON_ONCE(pud_leaf(*pud))) 2697 return -EINVAL; 2698 if (!pud_none(*pud) && WARN_ON_ONCE(pud_bad(*pud))) { 2699 if (!create) 2700 continue; 2701 pud_clear_bad(pud); 2702 } 2703 err = apply_to_pmd_range(mm, pud, addr, next, 2704 fn, data, create, mask); 2705 if (err) 2706 break; 2707 } while (pud++, addr = next, addr != end); 2708 2709 return err; 2710 } 2711 2712 static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd, 2713 unsigned long addr, unsigned long end, 2714 pte_fn_t fn, void *data, bool create, 2715 pgtbl_mod_mask *mask) 2716 { 2717 p4d_t *p4d; 2718 unsigned long next; 2719 int err = 0; 2720 2721 if (create) { 2722 p4d = p4d_alloc_track(mm, pgd, addr, mask); 2723 if (!p4d) 2724 return -ENOMEM; 2725 } else { 2726 p4d = p4d_offset(pgd, addr); 2727 } 2728 do { 2729 next = p4d_addr_end(addr, end); 2730 if (p4d_none(*p4d) && !create) 2731 continue; 2732 if (WARN_ON_ONCE(p4d_leaf(*p4d))) 2733 return -EINVAL; 2734 if (!p4d_none(*p4d) && WARN_ON_ONCE(p4d_bad(*p4d))) { 2735 if (!create) 2736 continue; 2737 p4d_clear_bad(p4d); 2738 } 2739 err = apply_to_pud_range(mm, p4d, addr, next, 2740 fn, data, create, mask); 2741 if (err) 2742 break; 2743 } while (p4d++, addr = next, addr != end); 2744 2745 return err; 2746 } 2747 2748 static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr, 2749 unsigned long size, pte_fn_t fn, 2750 void *data, bool create) 2751 { 2752 pgd_t *pgd; 2753 unsigned long start = addr, next; 2754 unsigned long end = addr + size; 2755 pgtbl_mod_mask mask = 0; 2756 int err = 0; 2757 2758 if (WARN_ON(addr >= end)) 2759 return -EINVAL; 2760 2761 pgd = pgd_offset(mm, addr); 2762 do { 2763 next = pgd_addr_end(addr, end); 2764 if (pgd_none(*pgd) && !create) 2765 continue; 2766 if (WARN_ON_ONCE(pgd_leaf(*pgd))) 2767 return -EINVAL; 2768 if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) { 2769 if (!create) 2770 continue; 2771 pgd_clear_bad(pgd); 2772 } 2773 err = apply_to_p4d_range(mm, pgd, addr, next, 2774 fn, data, create, &mask); 2775 if (err) 2776 break; 2777 } while (pgd++, addr = next, addr != end); 2778 2779 if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 2780 arch_sync_kernel_mappings(start, start + size); 2781 2782 return err; 2783 } 2784 2785 /* 2786 * Scan a region of virtual memory, filling in page tables as necessary 2787 * and calling a provided function on each leaf page table. 2788 */ 2789 int apply_to_page_range(struct mm_struct *mm, unsigned long addr, 2790 unsigned long size, pte_fn_t fn, void *data) 2791 { 2792 return __apply_to_page_range(mm, addr, size, fn, data, true); 2793 } 2794 EXPORT_SYMBOL_GPL(apply_to_page_range); 2795 2796 /* 2797 * Scan a region of virtual memory, calling a provided function on 2798 * each leaf page table where it exists. 2799 * 2800 * Unlike apply_to_page_range, this does _not_ fill in page tables 2801 * where they are absent. 2802 */ 2803 int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr, 2804 unsigned long size, pte_fn_t fn, void *data) 2805 { 2806 return __apply_to_page_range(mm, addr, size, fn, data, false); 2807 } 2808 EXPORT_SYMBOL_GPL(apply_to_existing_page_range); 2809 2810 /* 2811 * handle_pte_fault chooses page fault handler according to an entry which was 2812 * read non-atomically. Before making any commitment, on those architectures 2813 * or configurations (e.g. i386 with PAE) which might give a mix of unmatched 2814 * parts, do_swap_page must check under lock before unmapping the pte and 2815 * proceeding (but do_wp_page is only called after already making such a check; 2816 * and do_anonymous_page can safely check later on). 2817 */ 2818 static inline int pte_unmap_same(struct vm_fault *vmf) 2819 { 2820 int same = 1; 2821 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION) 2822 if (sizeof(pte_t) > sizeof(unsigned long)) { 2823 spinlock_t *ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); 2824 spin_lock(ptl); 2825 same = pte_same(*vmf->pte, vmf->orig_pte); 2826 spin_unlock(ptl); 2827 } 2828 #endif 2829 pte_unmap(vmf->pte); 2830 vmf->pte = NULL; 2831 return same; 2832 } 2833 2834 static inline bool __wp_page_copy_user(struct page *dst, struct page *src, 2835 struct vm_fault *vmf) 2836 { 2837 bool ret; 2838 void *kaddr; 2839 void __user *uaddr; 2840 bool locked = false; 2841 struct vm_area_struct *vma = vmf->vma; 2842 struct mm_struct *mm = vma->vm_mm; 2843 unsigned long addr = vmf->address; 2844 2845 if (likely(src)) { 2846 copy_user_highpage(dst, src, addr, vma); 2847 return true; 2848 } 2849 2850 /* 2851 * If the source page was a PFN mapping, we don't have 2852 * a "struct page" for it. We do a best-effort copy by 2853 * just copying from the original user address. If that 2854 * fails, we just zero-fill it. Live with it. 2855 */ 2856 kaddr = kmap_atomic(dst); 2857 uaddr = (void __user *)(addr & PAGE_MASK); 2858 2859 /* 2860 * On architectures with software "accessed" bits, we would 2861 * take a double page fault, so mark it accessed here. 2862 */ 2863 if (!arch_has_hw_pte_young() && !pte_young(vmf->orig_pte)) { 2864 pte_t entry; 2865 2866 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); 2867 locked = true; 2868 if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { 2869 /* 2870 * Other thread has already handled the fault 2871 * and update local tlb only 2872 */ 2873 update_mmu_tlb(vma, addr, vmf->pte); 2874 ret = false; 2875 goto pte_unlock; 2876 } 2877 2878 entry = pte_mkyoung(vmf->orig_pte); 2879 if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0)) 2880 update_mmu_cache(vma, addr, vmf->pte); 2881 } 2882 2883 /* 2884 * This really shouldn't fail, because the page is there 2885 * in the page tables. But it might just be unreadable, 2886 * in which case we just give up and fill the result with 2887 * zeroes. 2888 */ 2889 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) { 2890 if (locked) 2891 goto warn; 2892 2893 /* Re-validate under PTL if the page is still mapped */ 2894 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); 2895 locked = true; 2896 if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { 2897 /* The PTE changed under us, update local tlb */ 2898 update_mmu_tlb(vma, addr, vmf->pte); 2899 ret = false; 2900 goto pte_unlock; 2901 } 2902 2903 /* 2904 * The same page can be mapped back since last copy attempt. 2905 * Try to copy again under PTL. 2906 */ 2907 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) { 2908 /* 2909 * Give a warn in case there can be some obscure 2910 * use-case 2911 */ 2912 warn: 2913 WARN_ON_ONCE(1); 2914 clear_page(kaddr); 2915 } 2916 } 2917 2918 ret = true; 2919 2920 pte_unlock: 2921 if (locked) 2922 pte_unmap_unlock(vmf->pte, vmf->ptl); 2923 kunmap_atomic(kaddr); 2924 flush_dcache_page(dst); 2925 2926 return ret; 2927 } 2928 2929 static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma) 2930 { 2931 struct file *vm_file = vma->vm_file; 2932 2933 if (vm_file) 2934 return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO; 2935 2936 /* 2937 * Special mappings (e.g. VDSO) do not have any file so fake 2938 * a default GFP_KERNEL for them. 2939 */ 2940 return GFP_KERNEL; 2941 } 2942 2943 /* 2944 * Notify the address space that the page is about to become writable so that 2945 * it can prohibit this or wait for the page to get into an appropriate state. 2946 * 2947 * We do this without the lock held, so that it can sleep if it needs to. 2948 */ 2949 static vm_fault_t do_page_mkwrite(struct vm_fault *vmf) 2950 { 2951 vm_fault_t ret; 2952 struct page *page = vmf->page; 2953 unsigned int old_flags = vmf->flags; 2954 2955 vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; 2956 2957 if (vmf->vma->vm_file && 2958 IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host)) 2959 return VM_FAULT_SIGBUS; 2960 2961 ret = vmf->vma->vm_ops->page_mkwrite(vmf); 2962 /* Restore original flags so that caller is not surprised */ 2963 vmf->flags = old_flags; 2964 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) 2965 return ret; 2966 if (unlikely(!(ret & VM_FAULT_LOCKED))) { 2967 lock_page(page); 2968 if (!page->mapping) { 2969 unlock_page(page); 2970 return 0; /* retry */ 2971 } 2972 ret |= VM_FAULT_LOCKED; 2973 } else 2974 VM_BUG_ON_PAGE(!PageLocked(page), page); 2975 return ret; 2976 } 2977 2978 /* 2979 * Handle dirtying of a page in shared file mapping on a write fault. 2980 * 2981 * The function expects the page to be locked and unlocks it. 2982 */ 2983 static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf) 2984 { 2985 struct vm_area_struct *vma = vmf->vma; 2986 struct address_space *mapping; 2987 struct page *page = vmf->page; 2988 bool dirtied; 2989 bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite; 2990 2991 dirtied = set_page_dirty(page); 2992 VM_BUG_ON_PAGE(PageAnon(page), page); 2993 /* 2994 * Take a local copy of the address_space - page.mapping may be zeroed 2995 * by truncate after unlock_page(). The address_space itself remains 2996 * pinned by vma->vm_file's reference. We rely on unlock_page()'s 2997 * release semantics to prevent the compiler from undoing this copying. 2998 */ 2999 mapping = page_rmapping(page); 3000 unlock_page(page); 3001 3002 if (!page_mkwrite) 3003 file_update_time(vma->vm_file); 3004 3005 /* 3006 * Throttle page dirtying rate down to writeback speed. 3007 * 3008 * mapping may be NULL here because some device drivers do not 3009 * set page.mapping but still dirty their pages 3010 * 3011 * Drop the mmap_lock before waiting on IO, if we can. The file 3012 * is pinning the mapping, as per above. 3013 */ 3014 if ((dirtied || page_mkwrite) && mapping) { 3015 struct file *fpin; 3016 3017 fpin = maybe_unlock_mmap_for_io(vmf, NULL); 3018 balance_dirty_pages_ratelimited(mapping); 3019 if (fpin) { 3020 fput(fpin); 3021 return VM_FAULT_COMPLETED; 3022 } 3023 } 3024 3025 return 0; 3026 } 3027 3028 /* 3029 * Handle write page faults for pages that can be reused in the current vma 3030 * 3031 * This can happen either due to the mapping being with the VM_SHARED flag, 3032 * or due to us being the last reference standing to the page. In either 3033 * case, all we need to do here is to mark the page as writable and update 3034 * any related book-keeping. 3035 */ 3036 static inline void wp_page_reuse(struct vm_fault *vmf) 3037 __releases(vmf->ptl) 3038 { 3039 struct vm_area_struct *vma = vmf->vma; 3040 struct page *page = vmf->page; 3041 pte_t entry; 3042 3043 VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE)); 3044 VM_BUG_ON(page && PageAnon(page) && !PageAnonExclusive(page)); 3045 3046 /* 3047 * Clear the pages cpupid information as the existing 3048 * information potentially belongs to a now completely 3049 * unrelated process. 3050 */ 3051 if (page) 3052 page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1); 3053 3054 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); 3055 entry = pte_mkyoung(vmf->orig_pte); 3056 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 3057 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) 3058 update_mmu_cache(vma, vmf->address, vmf->pte); 3059 pte_unmap_unlock(vmf->pte, vmf->ptl); 3060 count_vm_event(PGREUSE); 3061 } 3062 3063 /* 3064 * Handle the case of a page which we actually need to copy to a new page, 3065 * either due to COW or unsharing. 3066 * 3067 * Called with mmap_lock locked and the old page referenced, but 3068 * without the ptl held. 3069 * 3070 * High level logic flow: 3071 * 3072 * - Allocate a page, copy the content of the old page to the new one. 3073 * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc. 3074 * - Take the PTL. If the pte changed, bail out and release the allocated page 3075 * - If the pte is still the way we remember it, update the page table and all 3076 * relevant references. This includes dropping the reference the page-table 3077 * held to the old page, as well as updating the rmap. 3078 * - In any case, unlock the PTL and drop the reference we took to the old page. 3079 */ 3080 static vm_fault_t wp_page_copy(struct vm_fault *vmf) 3081 { 3082 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; 3083 struct vm_area_struct *vma = vmf->vma; 3084 struct mm_struct *mm = vma->vm_mm; 3085 struct page *old_page = vmf->page; 3086 struct page *new_page = NULL; 3087 pte_t entry; 3088 int page_copied = 0; 3089 struct mmu_notifier_range range; 3090 3091 delayacct_wpcopy_start(); 3092 3093 if (unlikely(anon_vma_prepare(vma))) 3094 goto oom; 3095 3096 if (is_zero_pfn(pte_pfn(vmf->orig_pte))) { 3097 new_page = alloc_zeroed_user_highpage_movable(vma, 3098 vmf->address); 3099 if (!new_page) 3100 goto oom; 3101 } else { 3102 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, 3103 vmf->address); 3104 if (!new_page) 3105 goto oom; 3106 3107 if (!__wp_page_copy_user(new_page, old_page, vmf)) { 3108 /* 3109 * COW failed, if the fault was solved by other, 3110 * it's fine. If not, userspace would re-fault on 3111 * the same address and we will handle the fault 3112 * from the second attempt. 3113 */ 3114 put_page(new_page); 3115 if (old_page) 3116 put_page(old_page); 3117 3118 delayacct_wpcopy_end(); 3119 return 0; 3120 } 3121 } 3122 3123 if (mem_cgroup_charge(page_folio(new_page), mm, GFP_KERNEL)) 3124 goto oom_free_new; 3125 cgroup_throttle_swaprate(new_page, GFP_KERNEL); 3126 3127 __SetPageUptodate(new_page); 3128 3129 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, 3130 vmf->address & PAGE_MASK, 3131 (vmf->address & PAGE_MASK) + PAGE_SIZE); 3132 mmu_notifier_invalidate_range_start(&range); 3133 3134 /* 3135 * Re-check the pte - we dropped the lock 3136 */ 3137 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl); 3138 if (likely(pte_same(*vmf->pte, vmf->orig_pte))) { 3139 if (old_page) { 3140 if (!PageAnon(old_page)) { 3141 dec_mm_counter_fast(mm, 3142 mm_counter_file(old_page)); 3143 inc_mm_counter_fast(mm, MM_ANONPAGES); 3144 } 3145 } else { 3146 inc_mm_counter_fast(mm, MM_ANONPAGES); 3147 } 3148 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); 3149 entry = mk_pte(new_page, vma->vm_page_prot); 3150 entry = pte_sw_mkyoung(entry); 3151 if (unlikely(unshare)) { 3152 if (pte_soft_dirty(vmf->orig_pte)) 3153 entry = pte_mksoft_dirty(entry); 3154 if (pte_uffd_wp(vmf->orig_pte)) 3155 entry = pte_mkuffd_wp(entry); 3156 } else { 3157 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 3158 } 3159 3160 /* 3161 * Clear the pte entry and flush it first, before updating the 3162 * pte with the new entry, to keep TLBs on different CPUs in 3163 * sync. This code used to set the new PTE then flush TLBs, but 3164 * that left a window where the new PTE could be loaded into 3165 * some TLBs while the old PTE remains in others. 3166 */ 3167 ptep_clear_flush_notify(vma, vmf->address, vmf->pte); 3168 page_add_new_anon_rmap(new_page, vma, vmf->address); 3169 lru_cache_add_inactive_or_unevictable(new_page, vma); 3170 /* 3171 * We call the notify macro here because, when using secondary 3172 * mmu page tables (such as kvm shadow page tables), we want the 3173 * new page to be mapped directly into the secondary page table. 3174 */ 3175 BUG_ON(unshare && pte_write(entry)); 3176 set_pte_at_notify(mm, vmf->address, vmf->pte, entry); 3177 update_mmu_cache(vma, vmf->address, vmf->pte); 3178 if (old_page) { 3179 /* 3180 * Only after switching the pte to the new page may 3181 * we remove the mapcount here. Otherwise another 3182 * process may come and find the rmap count decremented 3183 * before the pte is switched to the new page, and 3184 * "reuse" the old page writing into it while our pte 3185 * here still points into it and can be read by other 3186 * threads. 3187 * 3188 * The critical issue is to order this 3189 * page_remove_rmap with the ptp_clear_flush above. 3190 * Those stores are ordered by (if nothing else,) 3191 * the barrier present in the atomic_add_negative 3192 * in page_remove_rmap. 3193 * 3194 * Then the TLB flush in ptep_clear_flush ensures that 3195 * no process can access the old page before the 3196 * decremented mapcount is visible. And the old page 3197 * cannot be reused until after the decremented 3198 * mapcount is visible. So transitively, TLBs to 3199 * old page will be flushed before it can be reused. 3200 */ 3201 page_remove_rmap(old_page, vma, false); 3202 } 3203 3204 /* Free the old page.. */ 3205 new_page = old_page; 3206 page_copied = 1; 3207 } else { 3208 update_mmu_tlb(vma, vmf->address, vmf->pte); 3209 } 3210 3211 if (new_page) 3212 put_page(new_page); 3213 3214 pte_unmap_unlock(vmf->pte, vmf->ptl); 3215 /* 3216 * No need to double call mmu_notifier->invalidate_range() callback as 3217 * the above ptep_clear_flush_notify() did already call it. 3218 */ 3219 mmu_notifier_invalidate_range_only_end(&range); 3220 if (old_page) { 3221 if (page_copied) 3222 free_swap_cache(old_page); 3223 put_page(old_page); 3224 } 3225 3226 delayacct_wpcopy_end(); 3227 return (page_copied && !unshare) ? VM_FAULT_WRITE : 0; 3228 oom_free_new: 3229 put_page(new_page); 3230 oom: 3231 if (old_page) 3232 put_page(old_page); 3233 3234 delayacct_wpcopy_end(); 3235 return VM_FAULT_OOM; 3236 } 3237 3238 /** 3239 * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE 3240 * writeable once the page is prepared 3241 * 3242 * @vmf: structure describing the fault 3243 * 3244 * This function handles all that is needed to finish a write page fault in a 3245 * shared mapping due to PTE being read-only once the mapped page is prepared. 3246 * It handles locking of PTE and modifying it. 3247 * 3248 * The function expects the page to be locked or other protection against 3249 * concurrent faults / writeback (such as DAX radix tree locks). 3250 * 3251 * Return: %0 on success, %VM_FAULT_NOPAGE when PTE got changed before 3252 * we acquired PTE lock. 3253 */ 3254 vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf) 3255 { 3256 WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED)); 3257 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, 3258 &vmf->ptl); 3259 /* 3260 * We might have raced with another page fault while we released the 3261 * pte_offset_map_lock. 3262 */ 3263 if (!pte_same(*vmf->pte, vmf->orig_pte)) { 3264 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); 3265 pte_unmap_unlock(vmf->pte, vmf->ptl); 3266 return VM_FAULT_NOPAGE; 3267 } 3268 wp_page_reuse(vmf); 3269 return 0; 3270 } 3271 3272 /* 3273 * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED 3274 * mapping 3275 */ 3276 static vm_fault_t wp_pfn_shared(struct vm_fault *vmf) 3277 { 3278 struct vm_area_struct *vma = vmf->vma; 3279 3280 if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) { 3281 vm_fault_t ret; 3282 3283 pte_unmap_unlock(vmf->pte, vmf->ptl); 3284 vmf->flags |= FAULT_FLAG_MKWRITE; 3285 ret = vma->vm_ops->pfn_mkwrite(vmf); 3286 if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)) 3287 return ret; 3288 return finish_mkwrite_fault(vmf); 3289 } 3290 wp_page_reuse(vmf); 3291 return VM_FAULT_WRITE; 3292 } 3293 3294 static vm_fault_t wp_page_shared(struct vm_fault *vmf) 3295 __releases(vmf->ptl) 3296 { 3297 struct vm_area_struct *vma = vmf->vma; 3298 vm_fault_t ret = VM_FAULT_WRITE; 3299 3300 get_page(vmf->page); 3301 3302 if (vma->vm_ops && vma->vm_ops->page_mkwrite) { 3303 vm_fault_t tmp; 3304 3305 pte_unmap_unlock(vmf->pte, vmf->ptl); 3306 tmp = do_page_mkwrite(vmf); 3307 if (unlikely(!tmp || (tmp & 3308 (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { 3309 put_page(vmf->page); 3310 return tmp; 3311 } 3312 tmp = finish_mkwrite_fault(vmf); 3313 if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) { 3314 unlock_page(vmf->page); 3315 put_page(vmf->page); 3316 return tmp; 3317 } 3318 } else { 3319 wp_page_reuse(vmf); 3320 lock_page(vmf->page); 3321 } 3322 ret |= fault_dirty_shared_page(vmf); 3323 put_page(vmf->page); 3324 3325 return ret; 3326 } 3327 3328 /* 3329 * This routine handles present pages, when 3330 * * users try to write to a shared page (FAULT_FLAG_WRITE) 3331 * * GUP wants to take a R/O pin on a possibly shared anonymous page 3332 * (FAULT_FLAG_UNSHARE) 3333 * 3334 * It is done by copying the page to a new address and decrementing the 3335 * shared-page counter for the old page. 3336 * 3337 * Note that this routine assumes that the protection checks have been 3338 * done by the caller (the low-level page fault routine in most cases). 3339 * Thus, with FAULT_FLAG_WRITE, we can safely just mark it writable once we've 3340 * done any necessary COW. 3341 * 3342 * In case of FAULT_FLAG_WRITE, we also mark the page dirty at this point even 3343 * though the page will change only once the write actually happens. This 3344 * avoids a few races, and potentially makes it more efficient. 3345 * 3346 * We enter with non-exclusive mmap_lock (to exclude vma changes, 3347 * but allow concurrent faults), with pte both mapped and locked. 3348 * We return with mmap_lock still held, but pte unmapped and unlocked. 3349 */ 3350 static vm_fault_t do_wp_page(struct vm_fault *vmf) 3351 __releases(vmf->ptl) 3352 { 3353 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; 3354 struct vm_area_struct *vma = vmf->vma; 3355 3356 VM_BUG_ON(unshare && (vmf->flags & FAULT_FLAG_WRITE)); 3357 VM_BUG_ON(!unshare && !(vmf->flags & FAULT_FLAG_WRITE)); 3358 3359 if (likely(!unshare)) { 3360 if (userfaultfd_pte_wp(vma, *vmf->pte)) { 3361 pte_unmap_unlock(vmf->pte, vmf->ptl); 3362 return handle_userfault(vmf, VM_UFFD_WP); 3363 } 3364 3365 /* 3366 * Userfaultfd write-protect can defer flushes. Ensure the TLB 3367 * is flushed in this case before copying. 3368 */ 3369 if (unlikely(userfaultfd_wp(vmf->vma) && 3370 mm_tlb_flush_pending(vmf->vma->vm_mm))) 3371 flush_tlb_page(vmf->vma, vmf->address); 3372 } 3373 3374 vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte); 3375 if (!vmf->page) { 3376 if (unlikely(unshare)) { 3377 /* No anonymous page -> nothing to do. */ 3378 pte_unmap_unlock(vmf->pte, vmf->ptl); 3379 return 0; 3380 } 3381 3382 /* 3383 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a 3384 * VM_PFNMAP VMA. 3385 * 3386 * We should not cow pages in a shared writeable mapping. 3387 * Just mark the pages writable and/or call ops->pfn_mkwrite. 3388 */ 3389 if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == 3390 (VM_WRITE|VM_SHARED)) 3391 return wp_pfn_shared(vmf); 3392 3393 pte_unmap_unlock(vmf->pte, vmf->ptl); 3394 return wp_page_copy(vmf); 3395 } 3396 3397 /* 3398 * Take out anonymous pages first, anonymous shared vmas are 3399 * not dirty accountable. 3400 */ 3401 if (PageAnon(vmf->page)) { 3402 struct page *page = vmf->page; 3403 3404 /* 3405 * If the page is exclusive to this process we must reuse the 3406 * page without further checks. 3407 */ 3408 if (PageAnonExclusive(page)) 3409 goto reuse; 3410 3411 /* 3412 * We have to verify under page lock: these early checks are 3413 * just an optimization to avoid locking the page and freeing 3414 * the swapcache if there is little hope that we can reuse. 3415 * 3416 * PageKsm() doesn't necessarily raise the page refcount. 3417 */ 3418 if (PageKsm(page) || page_count(page) > 3) 3419 goto copy; 3420 if (!PageLRU(page)) 3421 /* 3422 * Note: We cannot easily detect+handle references from 3423 * remote LRU pagevecs or references to PageLRU() pages. 3424 */ 3425 lru_add_drain(); 3426 if (page_count(page) > 1 + PageSwapCache(page)) 3427 goto copy; 3428 if (!trylock_page(page)) 3429 goto copy; 3430 if (PageSwapCache(page)) 3431 try_to_free_swap(page); 3432 if (PageKsm(page) || page_count(page) != 1) { 3433 unlock_page(page); 3434 goto copy; 3435 } 3436 /* 3437 * Ok, we've got the only page reference from our mapping 3438 * and the page is locked, it's dark out, and we're wearing 3439 * sunglasses. Hit it. 3440 */ 3441 page_move_anon_rmap(page, vma); 3442 unlock_page(page); 3443 reuse: 3444 if (unlikely(unshare)) { 3445 pte_unmap_unlock(vmf->pte, vmf->ptl); 3446 return 0; 3447 } 3448 wp_page_reuse(vmf); 3449 return VM_FAULT_WRITE; 3450 } else if (unshare) { 3451 /* No anonymous page -> nothing to do. */ 3452 pte_unmap_unlock(vmf->pte, vmf->ptl); 3453 return 0; 3454 } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == 3455 (VM_WRITE|VM_SHARED))) { 3456 return wp_page_shared(vmf); 3457 } 3458 copy: 3459 /* 3460 * Ok, we need to copy. Oh, well.. 3461 */ 3462 get_page(vmf->page); 3463 3464 pte_unmap_unlock(vmf->pte, vmf->ptl); 3465 #ifdef CONFIG_KSM 3466 if (PageKsm(vmf->page)) 3467 count_vm_event(COW_KSM); 3468 #endif 3469 return wp_page_copy(vmf); 3470 } 3471 3472 static void unmap_mapping_range_vma(struct vm_area_struct *vma, 3473 unsigned long start_addr, unsigned long end_addr, 3474 struct zap_details *details) 3475 { 3476 zap_page_range_single(vma, start_addr, end_addr - start_addr, details); 3477 } 3478 3479 static inline void unmap_mapping_range_tree(struct rb_root_cached *root, 3480 pgoff_t first_index, 3481 pgoff_t last_index, 3482 struct zap_details *details) 3483 { 3484 struct vm_area_struct *vma; 3485 pgoff_t vba, vea, zba, zea; 3486 3487 vma_interval_tree_foreach(vma, root, first_index, last_index) { 3488 vba = vma->vm_pgoff; 3489 vea = vba + vma_pages(vma) - 1; 3490 zba = max(first_index, vba); 3491 zea = min(last_index, vea); 3492 3493 unmap_mapping_range_vma(vma, 3494 ((zba - vba) << PAGE_SHIFT) + vma->vm_start, 3495 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, 3496 details); 3497 } 3498 } 3499 3500 /** 3501 * unmap_mapping_folio() - Unmap single folio from processes. 3502 * @folio: The locked folio to be unmapped. 3503 * 3504 * Unmap this folio from any userspace process which still has it mmaped. 3505 * Typically, for efficiency, the range of nearby pages has already been 3506 * unmapped by unmap_mapping_pages() or unmap_mapping_range(). But once 3507 * truncation or invalidation holds the lock on a folio, it may find that 3508 * the page has been remapped again: and then uses unmap_mapping_folio() 3509 * to unmap it finally. 3510 */ 3511 void unmap_mapping_folio(struct folio *folio) 3512 { 3513 struct address_space *mapping = folio->mapping; 3514 struct zap_details details = { }; 3515 pgoff_t first_index; 3516 pgoff_t last_index; 3517 3518 VM_BUG_ON(!folio_test_locked(folio)); 3519 3520 first_index = folio->index; 3521 last_index = folio->index + folio_nr_pages(folio) - 1; 3522 3523 details.even_cows = false; 3524 details.single_folio = folio; 3525 details.zap_flags = ZAP_FLAG_DROP_MARKER; 3526 3527 i_mmap_lock_read(mapping); 3528 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))) 3529 unmap_mapping_range_tree(&mapping->i_mmap, first_index, 3530 last_index, &details); 3531 i_mmap_unlock_read(mapping); 3532 } 3533 3534 /** 3535 * unmap_mapping_pages() - Unmap pages from processes. 3536 * @mapping: The address space containing pages to be unmapped. 3537 * @start: Index of first page to be unmapped. 3538 * @nr: Number of pages to be unmapped. 0 to unmap to end of file. 3539 * @even_cows: Whether to unmap even private COWed pages. 3540 * 3541 * Unmap the pages in this address space from any userspace process which 3542 * has them mmaped. Generally, you want to remove COWed pages as well when 3543 * a file is being truncated, but not when invalidating pages from the page 3544 * cache. 3545 */ 3546 void unmap_mapping_pages(struct address_space *mapping, pgoff_t start, 3547 pgoff_t nr, bool even_cows) 3548 { 3549 struct zap_details details = { }; 3550 pgoff_t first_index = start; 3551 pgoff_t last_index = start + nr - 1; 3552 3553 details.even_cows = even_cows; 3554 if (last_index < first_index) 3555 last_index = ULONG_MAX; 3556 3557 i_mmap_lock_read(mapping); 3558 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))) 3559 unmap_mapping_range_tree(&mapping->i_mmap, first_index, 3560 last_index, &details); 3561 i_mmap_unlock_read(mapping); 3562 } 3563 EXPORT_SYMBOL_GPL(unmap_mapping_pages); 3564 3565 /** 3566 * unmap_mapping_range - unmap the portion of all mmaps in the specified 3567 * address_space corresponding to the specified byte range in the underlying 3568 * file. 3569 * 3570 * @mapping: the address space containing mmaps to be unmapped. 3571 * @holebegin: byte in first page to unmap, relative to the start of 3572 * the underlying file. This will be rounded down to a PAGE_SIZE 3573 * boundary. Note that this is different from truncate_pagecache(), which 3574 * must keep the partial page. In contrast, we must get rid of 3575 * partial pages. 3576 * @holelen: size of prospective hole in bytes. This will be rounded 3577 * up to a PAGE_SIZE boundary. A holelen of zero truncates to the 3578 * end of the file. 3579 * @even_cows: 1 when truncating a file, unmap even private COWed pages; 3580 * but 0 when invalidating pagecache, don't throw away private data. 3581 */ 3582 void unmap_mapping_range(struct address_space *mapping, 3583 loff_t const holebegin, loff_t const holelen, int even_cows) 3584 { 3585 pgoff_t hba = holebegin >> PAGE_SHIFT; 3586 pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; 3587 3588 /* Check for overflow. */ 3589 if (sizeof(holelen) > sizeof(hlen)) { 3590 long long holeend = 3591 (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; 3592 if (holeend & ~(long long)ULONG_MAX) 3593 hlen = ULONG_MAX - hba + 1; 3594 } 3595 3596 unmap_mapping_pages(mapping, hba, hlen, even_cows); 3597 } 3598 EXPORT_SYMBOL(unmap_mapping_range); 3599 3600 /* 3601 * Restore a potential device exclusive pte to a working pte entry 3602 */ 3603 static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf) 3604 { 3605 struct page *page = vmf->page; 3606 struct vm_area_struct *vma = vmf->vma; 3607 struct mmu_notifier_range range; 3608 3609 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) 3610 return VM_FAULT_RETRY; 3611 mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma, 3612 vma->vm_mm, vmf->address & PAGE_MASK, 3613 (vmf->address & PAGE_MASK) + PAGE_SIZE, NULL); 3614 mmu_notifier_invalidate_range_start(&range); 3615 3616 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, 3617 &vmf->ptl); 3618 if (likely(pte_same(*vmf->pte, vmf->orig_pte))) 3619 restore_exclusive_pte(vma, page, vmf->address, vmf->pte); 3620 3621 pte_unmap_unlock(vmf->pte, vmf->ptl); 3622 unlock_page(page); 3623 3624 mmu_notifier_invalidate_range_end(&range); 3625 return 0; 3626 } 3627 3628 static inline bool should_try_to_free_swap(struct page *page, 3629 struct vm_area_struct *vma, 3630 unsigned int fault_flags) 3631 { 3632 if (!PageSwapCache(page)) 3633 return false; 3634 if (mem_cgroup_swap_full(page) || (vma->vm_flags & VM_LOCKED) || 3635 PageMlocked(page)) 3636 return true; 3637 /* 3638 * If we want to map a page that's in the swapcache writable, we 3639 * have to detect via the refcount if we're really the exclusive 3640 * user. Try freeing the swapcache to get rid of the swapcache 3641 * reference only in case it's likely that we'll be the exlusive user. 3642 */ 3643 return (fault_flags & FAULT_FLAG_WRITE) && !PageKsm(page) && 3644 page_count(page) == 2; 3645 } 3646 3647 static vm_fault_t pte_marker_clear(struct vm_fault *vmf) 3648 { 3649 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, 3650 vmf->address, &vmf->ptl); 3651 /* 3652 * Be careful so that we will only recover a special uffd-wp pte into a 3653 * none pte. Otherwise it means the pte could have changed, so retry. 3654 */ 3655 if (is_pte_marker(*vmf->pte)) 3656 pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte); 3657 pte_unmap_unlock(vmf->pte, vmf->ptl); 3658 return 0; 3659 } 3660 3661 /* 3662 * This is actually a page-missing access, but with uffd-wp special pte 3663 * installed. It means this pte was wr-protected before being unmapped. 3664 */ 3665 static vm_fault_t pte_marker_handle_uffd_wp(struct vm_fault *vmf) 3666 { 3667 /* 3668 * Just in case there're leftover special ptes even after the region 3669 * got unregistered - we can simply clear them. We can also do that 3670 * proactively when e.g. when we do UFFDIO_UNREGISTER upon some uffd-wp 3671 * ranges, but it should be more efficient to be done lazily here. 3672 */ 3673 if (unlikely(!userfaultfd_wp(vmf->vma) || vma_is_anonymous(vmf->vma))) 3674 return pte_marker_clear(vmf); 3675 3676 /* do_fault() can handle pte markers too like none pte */ 3677 return do_fault(vmf); 3678 } 3679 3680 static vm_fault_t handle_pte_marker(struct vm_fault *vmf) 3681 { 3682 swp_entry_t entry = pte_to_swp_entry(vmf->orig_pte); 3683 unsigned long marker = pte_marker_get(entry); 3684 3685 /* 3686 * PTE markers should always be with file-backed memories, and the 3687 * marker should never be empty. If anything weird happened, the best 3688 * thing to do is to kill the process along with its mm. 3689 */ 3690 if (WARN_ON_ONCE(vma_is_anonymous(vmf->vma) || !marker)) 3691 return VM_FAULT_SIGBUS; 3692 3693 if (pte_marker_entry_uffd_wp(entry)) 3694 return pte_marker_handle_uffd_wp(vmf); 3695 3696 /* This is an unknown pte marker */ 3697 return VM_FAULT_SIGBUS; 3698 } 3699 3700 /* 3701 * We enter with non-exclusive mmap_lock (to exclude vma changes, 3702 * but allow concurrent faults), and pte mapped but not yet locked. 3703 * We return with pte unmapped and unlocked. 3704 * 3705 * We return with the mmap_lock locked or unlocked in the same cases 3706 * as does filemap_fault(). 3707 */ 3708 vm_fault_t do_swap_page(struct vm_fault *vmf) 3709 { 3710 struct vm_area_struct *vma = vmf->vma; 3711 struct page *page = NULL, *swapcache; 3712 struct swap_info_struct *si = NULL; 3713 rmap_t rmap_flags = RMAP_NONE; 3714 bool exclusive = false; 3715 swp_entry_t entry; 3716 pte_t pte; 3717 int locked; 3718 vm_fault_t ret = 0; 3719 void *shadow = NULL; 3720 3721 if (!pte_unmap_same(vmf)) 3722 goto out; 3723 3724 entry = pte_to_swp_entry(vmf->orig_pte); 3725 if (unlikely(non_swap_entry(entry))) { 3726 if (is_migration_entry(entry)) { 3727 migration_entry_wait(vma->vm_mm, vmf->pmd, 3728 vmf->address); 3729 } else if (is_device_exclusive_entry(entry)) { 3730 vmf->page = pfn_swap_entry_to_page(entry); 3731 ret = remove_device_exclusive_entry(vmf); 3732 } else if (is_device_private_entry(entry)) { 3733 vmf->page = pfn_swap_entry_to_page(entry); 3734 ret = vmf->page->pgmap->ops->migrate_to_ram(vmf); 3735 } else if (is_hwpoison_entry(entry)) { 3736 ret = VM_FAULT_HWPOISON; 3737 } else if (is_swapin_error_entry(entry)) { 3738 ret = VM_FAULT_SIGBUS; 3739 } else if (is_pte_marker_entry(entry)) { 3740 ret = handle_pte_marker(vmf); 3741 } else { 3742 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL); 3743 ret = VM_FAULT_SIGBUS; 3744 } 3745 goto out; 3746 } 3747 3748 /* Prevent swapoff from happening to us. */ 3749 si = get_swap_device(entry); 3750 if (unlikely(!si)) 3751 goto out; 3752 3753 page = lookup_swap_cache(entry, vma, vmf->address); 3754 swapcache = page; 3755 3756 if (!page) { 3757 if (data_race(si->flags & SWP_SYNCHRONOUS_IO) && 3758 __swap_count(entry) == 1) { 3759 /* skip swapcache */ 3760 page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, 3761 vmf->address); 3762 if (page) { 3763 __SetPageLocked(page); 3764 __SetPageSwapBacked(page); 3765 3766 if (mem_cgroup_swapin_charge_page(page, 3767 vma->vm_mm, GFP_KERNEL, entry)) { 3768 ret = VM_FAULT_OOM; 3769 goto out_page; 3770 } 3771 mem_cgroup_swapin_uncharge_swap(entry); 3772 3773 shadow = get_shadow_from_swap_cache(entry); 3774 if (shadow) 3775 workingset_refault(page_folio(page), 3776 shadow); 3777 3778 lru_cache_add(page); 3779 3780 /* To provide entry to swap_readpage() */ 3781 set_page_private(page, entry.val); 3782 swap_readpage(page, true, NULL); 3783 set_page_private(page, 0); 3784 } 3785 } else { 3786 page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, 3787 vmf); 3788 swapcache = page; 3789 } 3790 3791 if (!page) { 3792 /* 3793 * Back out if somebody else faulted in this pte 3794 * while we released the pte lock. 3795 */ 3796 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 3797 vmf->address, &vmf->ptl); 3798 if (likely(pte_same(*vmf->pte, vmf->orig_pte))) 3799 ret = VM_FAULT_OOM; 3800 goto unlock; 3801 } 3802 3803 /* Had to read the page from swap area: Major fault */ 3804 ret = VM_FAULT_MAJOR; 3805 count_vm_event(PGMAJFAULT); 3806 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); 3807 } else if (PageHWPoison(page)) { 3808 /* 3809 * hwpoisoned dirty swapcache pages are kept for killing 3810 * owner processes (which may be unknown at hwpoison time) 3811 */ 3812 ret = VM_FAULT_HWPOISON; 3813 goto out_release; 3814 } 3815 3816 locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags); 3817 3818 if (!locked) { 3819 ret |= VM_FAULT_RETRY; 3820 goto out_release; 3821 } 3822 3823 if (swapcache) { 3824 /* 3825 * Make sure try_to_free_swap or swapoff did not release the 3826 * swapcache from under us. The page pin, and pte_same test 3827 * below, are not enough to exclude that. Even if it is still 3828 * swapcache, we need to check that the page's swap has not 3829 * changed. 3830 */ 3831 if (unlikely(!PageSwapCache(page) || 3832 page_private(page) != entry.val)) 3833 goto out_page; 3834 3835 /* 3836 * KSM sometimes has to copy on read faults, for example, if 3837 * page->index of !PageKSM() pages would be nonlinear inside the 3838 * anon VMA -- PageKSM() is lost on actual swapout. 3839 */ 3840 page = ksm_might_need_to_copy(page, vma, vmf->address); 3841 if (unlikely(!page)) { 3842 ret = VM_FAULT_OOM; 3843 page = swapcache; 3844 goto out_page; 3845 } 3846 3847 /* 3848 * If we want to map a page that's in the swapcache writable, we 3849 * have to detect via the refcount if we're really the exclusive 3850 * owner. Try removing the extra reference from the local LRU 3851 * pagevecs if required. 3852 */ 3853 if ((vmf->flags & FAULT_FLAG_WRITE) && page == swapcache && 3854 !PageKsm(page) && !PageLRU(page)) 3855 lru_add_drain(); 3856 } 3857 3858 cgroup_throttle_swaprate(page, GFP_KERNEL); 3859 3860 /* 3861 * Back out if somebody else already faulted in this pte. 3862 */ 3863 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, 3864 &vmf->ptl); 3865 if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) 3866 goto out_nomap; 3867 3868 if (unlikely(!PageUptodate(page))) { 3869 ret = VM_FAULT_SIGBUS; 3870 goto out_nomap; 3871 } 3872 3873 /* 3874 * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte 3875 * must never point at an anonymous page in the swapcache that is 3876 * PG_anon_exclusive. Sanity check that this holds and especially, that 3877 * no filesystem set PG_mappedtodisk on a page in the swapcache. Sanity 3878 * check after taking the PT lock and making sure that nobody 3879 * concurrently faulted in this page and set PG_anon_exclusive. 3880 */ 3881 BUG_ON(!PageAnon(page) && PageMappedToDisk(page)); 3882 BUG_ON(PageAnon(page) && PageAnonExclusive(page)); 3883 3884 /* 3885 * Check under PT lock (to protect against concurrent fork() sharing 3886 * the swap entry concurrently) for certainly exclusive pages. 3887 */ 3888 if (!PageKsm(page)) { 3889 /* 3890 * Note that pte_swp_exclusive() == false for architectures 3891 * without __HAVE_ARCH_PTE_SWP_EXCLUSIVE. 3892 */ 3893 exclusive = pte_swp_exclusive(vmf->orig_pte); 3894 if (page != swapcache) { 3895 /* 3896 * We have a fresh page that is not exposed to the 3897 * swapcache -> certainly exclusive. 3898 */ 3899 exclusive = true; 3900 } else if (exclusive && PageWriteback(page) && 3901 data_race(si->flags & SWP_STABLE_WRITES)) { 3902 /* 3903 * This is tricky: not all swap backends support 3904 * concurrent page modifications while under writeback. 3905 * 3906 * So if we stumble over such a page in the swapcache 3907 * we must not set the page exclusive, otherwise we can 3908 * map it writable without further checks and modify it 3909 * while still under writeback. 3910 * 3911 * For these problematic swap backends, simply drop the 3912 * exclusive marker: this is perfectly fine as we start 3913 * writeback only if we fully unmapped the page and 3914 * there are no unexpected references on the page after 3915 * unmapping succeeded. After fully unmapped, no 3916 * further GUP references (FOLL_GET and FOLL_PIN) can 3917 * appear, so dropping the exclusive marker and mapping 3918 * it only R/O is fine. 3919 */ 3920 exclusive = false; 3921 } 3922 } 3923 3924 /* 3925 * Remove the swap entry and conditionally try to free up the swapcache. 3926 * We're already holding a reference on the page but haven't mapped it 3927 * yet. 3928 */ 3929 swap_free(entry); 3930 if (should_try_to_free_swap(page, vma, vmf->flags)) 3931 try_to_free_swap(page); 3932 3933 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); 3934 dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS); 3935 pte = mk_pte(page, vma->vm_page_prot); 3936 3937 /* 3938 * Same logic as in do_wp_page(); however, optimize for pages that are 3939 * certainly not shared either because we just allocated them without 3940 * exposing them to the swapcache or because the swap entry indicates 3941 * exclusivity. 3942 */ 3943 if (!PageKsm(page) && (exclusive || page_count(page) == 1)) { 3944 if (vmf->flags & FAULT_FLAG_WRITE) { 3945 pte = maybe_mkwrite(pte_mkdirty(pte), vma); 3946 vmf->flags &= ~FAULT_FLAG_WRITE; 3947 ret |= VM_FAULT_WRITE; 3948 } 3949 rmap_flags |= RMAP_EXCLUSIVE; 3950 } 3951 flush_icache_page(vma, page); 3952 if (pte_swp_soft_dirty(vmf->orig_pte)) 3953 pte = pte_mksoft_dirty(pte); 3954 if (pte_swp_uffd_wp(vmf->orig_pte)) { 3955 pte = pte_mkuffd_wp(pte); 3956 pte = pte_wrprotect(pte); 3957 } 3958 vmf->orig_pte = pte; 3959 3960 /* ksm created a completely new copy */ 3961 if (unlikely(page != swapcache && swapcache)) { 3962 page_add_new_anon_rmap(page, vma, vmf->address); 3963 lru_cache_add_inactive_or_unevictable(page, vma); 3964 } else { 3965 page_add_anon_rmap(page, vma, vmf->address, rmap_flags); 3966 } 3967 3968 VM_BUG_ON(!PageAnon(page) || (pte_write(pte) && !PageAnonExclusive(page))); 3969 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); 3970 arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte); 3971 3972 unlock_page(page); 3973 if (page != swapcache && swapcache) { 3974 /* 3975 * Hold the lock to avoid the swap entry to be reused 3976 * until we take the PT lock for the pte_same() check 3977 * (to avoid false positives from pte_same). For 3978 * further safety release the lock after the swap_free 3979 * so that the swap count won't change under a 3980 * parallel locked swapcache. 3981 */ 3982 unlock_page(swapcache); 3983 put_page(swapcache); 3984 } 3985 3986 if (vmf->flags & FAULT_FLAG_WRITE) { 3987 ret |= do_wp_page(vmf); 3988 if (ret & VM_FAULT_ERROR) 3989 ret &= VM_FAULT_ERROR; 3990 goto out; 3991 } 3992 3993 /* No need to invalidate - it was non-present before */ 3994 update_mmu_cache(vma, vmf->address, vmf->pte); 3995 unlock: 3996 pte_unmap_unlock(vmf->pte, vmf->ptl); 3997 out: 3998 if (si) 3999 put_swap_device(si); 4000 return ret; 4001 out_nomap: 4002 pte_unmap_unlock(vmf->pte, vmf->ptl); 4003 out_page: 4004 unlock_page(page); 4005 out_release: 4006 put_page(page); 4007 if (page != swapcache && swapcache) { 4008 unlock_page(swapcache); 4009 put_page(swapcache); 4010 } 4011 if (si) 4012 put_swap_device(si); 4013 return ret; 4014 } 4015 4016 /* 4017 * We enter with non-exclusive mmap_lock (to exclude vma changes, 4018 * but allow concurrent faults), and pte mapped but not yet locked. 4019 * We return with mmap_lock still held, but pte unmapped and unlocked. 4020 */ 4021 static vm_fault_t do_anonymous_page(struct vm_fault *vmf) 4022 { 4023 struct vm_area_struct *vma = vmf->vma; 4024 struct page *page; 4025 vm_fault_t ret = 0; 4026 pte_t entry; 4027 4028 /* File mapping without ->vm_ops ? */ 4029 if (vma->vm_flags & VM_SHARED) 4030 return VM_FAULT_SIGBUS; 4031 4032 /* 4033 * Use pte_alloc() instead of pte_alloc_map(). We can't run 4034 * pte_offset_map() on pmds where a huge pmd might be created 4035 * from a different thread. 4036 * 4037 * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when 4038 * parallel threads are excluded by other means. 4039 * 4040 * Here we only have mmap_read_lock(mm). 4041 */ 4042 if (pte_alloc(vma->vm_mm, vmf->pmd)) 4043 return VM_FAULT_OOM; 4044 4045 /* See comment in handle_pte_fault() */ 4046 if (unlikely(pmd_trans_unstable(vmf->pmd))) 4047 return 0; 4048 4049 /* Use the zero-page for reads */ 4050 if (!(vmf->flags & FAULT_FLAG_WRITE) && 4051 !mm_forbids_zeropage(vma->vm_mm)) { 4052 entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address), 4053 vma->vm_page_prot)); 4054 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 4055 vmf->address, &vmf->ptl); 4056 if (!pte_none(*vmf->pte)) { 4057 update_mmu_tlb(vma, vmf->address, vmf->pte); 4058 goto unlock; 4059 } 4060 ret = check_stable_address_space(vma->vm_mm); 4061 if (ret) 4062 goto unlock; 4063 /* Deliver the page fault to userland, check inside PT lock */ 4064 if (userfaultfd_missing(vma)) { 4065 pte_unmap_unlock(vmf->pte, vmf->ptl); 4066 return handle_userfault(vmf, VM_UFFD_MISSING); 4067 } 4068 goto setpte; 4069 } 4070 4071 /* Allocate our own private page. */ 4072 if (unlikely(anon_vma_prepare(vma))) 4073 goto oom; 4074 page = alloc_zeroed_user_highpage_movable(vma, vmf->address); 4075 if (!page) 4076 goto oom; 4077 4078 if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL)) 4079 goto oom_free_page; 4080 cgroup_throttle_swaprate(page, GFP_KERNEL); 4081 4082 /* 4083 * The memory barrier inside __SetPageUptodate makes sure that 4084 * preceding stores to the page contents become visible before 4085 * the set_pte_at() write. 4086 */ 4087 __SetPageUptodate(page); 4088 4089 entry = mk_pte(page, vma->vm_page_prot); 4090 entry = pte_sw_mkyoung(entry); 4091 if (vma->vm_flags & VM_WRITE) 4092 entry = pte_mkwrite(pte_mkdirty(entry)); 4093 4094 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, 4095 &vmf->ptl); 4096 if (!pte_none(*vmf->pte)) { 4097 update_mmu_cache(vma, vmf->address, vmf->pte); 4098 goto release; 4099 } 4100 4101 ret = check_stable_address_space(vma->vm_mm); 4102 if (ret) 4103 goto release; 4104 4105 /* Deliver the page fault to userland, check inside PT lock */ 4106 if (userfaultfd_missing(vma)) { 4107 pte_unmap_unlock(vmf->pte, vmf->ptl); 4108 put_page(page); 4109 return handle_userfault(vmf, VM_UFFD_MISSING); 4110 } 4111 4112 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); 4113 page_add_new_anon_rmap(page, vma, vmf->address); 4114 lru_cache_add_inactive_or_unevictable(page, vma); 4115 setpte: 4116 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); 4117 4118 /* No need to invalidate - it was non-present before */ 4119 update_mmu_cache(vma, vmf->address, vmf->pte); 4120 unlock: 4121 pte_unmap_unlock(vmf->pte, vmf->ptl); 4122 return ret; 4123 release: 4124 put_page(page); 4125 goto unlock; 4126 oom_free_page: 4127 put_page(page); 4128 oom: 4129 return VM_FAULT_OOM; 4130 } 4131 4132 /* 4133 * The mmap_lock must have been held on entry, and may have been 4134 * released depending on flags and vma->vm_ops->fault() return value. 4135 * See filemap_fault() and __lock_page_retry(). 4136 */ 4137 static vm_fault_t __do_fault(struct vm_fault *vmf) 4138 { 4139 struct vm_area_struct *vma = vmf->vma; 4140 vm_fault_t ret; 4141 4142 /* 4143 * Preallocate pte before we take page_lock because this might lead to 4144 * deadlocks for memcg reclaim which waits for pages under writeback: 4145 * lock_page(A) 4146 * SetPageWriteback(A) 4147 * unlock_page(A) 4148 * lock_page(B) 4149 * lock_page(B) 4150 * pte_alloc_one 4151 * shrink_page_list 4152 * wait_on_page_writeback(A) 4153 * SetPageWriteback(B) 4154 * unlock_page(B) 4155 * # flush A, B to clear the writeback 4156 */ 4157 if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) { 4158 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); 4159 if (!vmf->prealloc_pte) 4160 return VM_FAULT_OOM; 4161 } 4162 4163 ret = vma->vm_ops->fault(vmf); 4164 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY | 4165 VM_FAULT_DONE_COW))) 4166 return ret; 4167 4168 if (unlikely(PageHWPoison(vmf->page))) { 4169 struct page *page = vmf->page; 4170 vm_fault_t poisonret = VM_FAULT_HWPOISON; 4171 if (ret & VM_FAULT_LOCKED) { 4172 if (page_mapped(page)) 4173 unmap_mapping_pages(page_mapping(page), 4174 page->index, 1, false); 4175 /* Retry if a clean page was removed from the cache. */ 4176 if (invalidate_inode_page(page)) 4177 poisonret = VM_FAULT_NOPAGE; 4178 unlock_page(page); 4179 } 4180 put_page(page); 4181 vmf->page = NULL; 4182 return poisonret; 4183 } 4184 4185 if (unlikely(!(ret & VM_FAULT_LOCKED))) 4186 lock_page(vmf->page); 4187 else 4188 VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page); 4189 4190 return ret; 4191 } 4192 4193 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4194 static void deposit_prealloc_pte(struct vm_fault *vmf) 4195 { 4196 struct vm_area_struct *vma = vmf->vma; 4197 4198 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); 4199 /* 4200 * We are going to consume the prealloc table, 4201 * count that as nr_ptes. 4202 */ 4203 mm_inc_nr_ptes(vma->vm_mm); 4204 vmf->prealloc_pte = NULL; 4205 } 4206 4207 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) 4208 { 4209 struct vm_area_struct *vma = vmf->vma; 4210 bool write = vmf->flags & FAULT_FLAG_WRITE; 4211 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 4212 pmd_t entry; 4213 int i; 4214 vm_fault_t ret = VM_FAULT_FALLBACK; 4215 4216 if (!transhuge_vma_suitable(vma, haddr)) 4217 return ret; 4218 4219 page = compound_head(page); 4220 if (compound_order(page) != HPAGE_PMD_ORDER) 4221 return ret; 4222 4223 /* 4224 * Just backoff if any subpage of a THP is corrupted otherwise 4225 * the corrupted page may mapped by PMD silently to escape the 4226 * check. This kind of THP just can be PTE mapped. Access to 4227 * the corrupted subpage should trigger SIGBUS as expected. 4228 */ 4229 if (unlikely(PageHasHWPoisoned(page))) 4230 return ret; 4231 4232 /* 4233 * Archs like ppc64 need additional space to store information 4234 * related to pte entry. Use the preallocated table for that. 4235 */ 4236 if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) { 4237 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); 4238 if (!vmf->prealloc_pte) 4239 return VM_FAULT_OOM; 4240 } 4241 4242 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 4243 if (unlikely(!pmd_none(*vmf->pmd))) 4244 goto out; 4245 4246 for (i = 0; i < HPAGE_PMD_NR; i++) 4247 flush_icache_page(vma, page + i); 4248 4249 entry = mk_huge_pmd(page, vma->vm_page_prot); 4250 if (write) 4251 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 4252 4253 add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR); 4254 page_add_file_rmap(page, vma, true); 4255 4256 /* 4257 * deposit and withdraw with pmd lock held 4258 */ 4259 if (arch_needs_pgtable_deposit()) 4260 deposit_prealloc_pte(vmf); 4261 4262 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); 4263 4264 update_mmu_cache_pmd(vma, haddr, vmf->pmd); 4265 4266 /* fault is handled */ 4267 ret = 0; 4268 count_vm_event(THP_FILE_MAPPED); 4269 out: 4270 spin_unlock(vmf->ptl); 4271 return ret; 4272 } 4273 #else 4274 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) 4275 { 4276 return VM_FAULT_FALLBACK; 4277 } 4278 #endif 4279 4280 void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr) 4281 { 4282 struct vm_area_struct *vma = vmf->vma; 4283 bool uffd_wp = pte_marker_uffd_wp(vmf->orig_pte); 4284 bool write = vmf->flags & FAULT_FLAG_WRITE; 4285 bool prefault = vmf->address != addr; 4286 pte_t entry; 4287 4288 flush_icache_page(vma, page); 4289 entry = mk_pte(page, vma->vm_page_prot); 4290 4291 if (prefault && arch_wants_old_prefaulted_pte()) 4292 entry = pte_mkold(entry); 4293 else 4294 entry = pte_sw_mkyoung(entry); 4295 4296 if (write) 4297 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 4298 if (unlikely(uffd_wp)) 4299 entry = pte_mkuffd_wp(pte_wrprotect(entry)); 4300 /* copy-on-write page */ 4301 if (write && !(vma->vm_flags & VM_SHARED)) { 4302 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); 4303 page_add_new_anon_rmap(page, vma, addr); 4304 lru_cache_add_inactive_or_unevictable(page, vma); 4305 } else { 4306 inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page)); 4307 page_add_file_rmap(page, vma, false); 4308 } 4309 set_pte_at(vma->vm_mm, addr, vmf->pte, entry); 4310 } 4311 4312 static bool vmf_pte_changed(struct vm_fault *vmf) 4313 { 4314 if (vmf->flags & FAULT_FLAG_ORIG_PTE_VALID) 4315 return !pte_same(*vmf->pte, vmf->orig_pte); 4316 4317 return !pte_none(*vmf->pte); 4318 } 4319 4320 /** 4321 * finish_fault - finish page fault once we have prepared the page to fault 4322 * 4323 * @vmf: structure describing the fault 4324 * 4325 * This function handles all that is needed to finish a page fault once the 4326 * page to fault in is prepared. It handles locking of PTEs, inserts PTE for 4327 * given page, adds reverse page mapping, handles memcg charges and LRU 4328 * addition. 4329 * 4330 * The function expects the page to be locked and on success it consumes a 4331 * reference of a page being mapped (for the PTE which maps it). 4332 * 4333 * Return: %0 on success, %VM_FAULT_ code in case of error. 4334 */ 4335 vm_fault_t finish_fault(struct vm_fault *vmf) 4336 { 4337 struct vm_area_struct *vma = vmf->vma; 4338 struct page *page; 4339 vm_fault_t ret; 4340 4341 /* Did we COW the page? */ 4342 if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) 4343 page = vmf->cow_page; 4344 else 4345 page = vmf->page; 4346 4347 /* 4348 * check even for read faults because we might have lost our CoWed 4349 * page 4350 */ 4351 if (!(vma->vm_flags & VM_SHARED)) { 4352 ret = check_stable_address_space(vma->vm_mm); 4353 if (ret) 4354 return ret; 4355 } 4356 4357 if (pmd_none(*vmf->pmd)) { 4358 if (PageTransCompound(page)) { 4359 ret = do_set_pmd(vmf, page); 4360 if (ret != VM_FAULT_FALLBACK) 4361 return ret; 4362 } 4363 4364 if (vmf->prealloc_pte) 4365 pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte); 4366 else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) 4367 return VM_FAULT_OOM; 4368 } 4369 4370 /* 4371 * See comment in handle_pte_fault() for how this scenario happens, we 4372 * need to return NOPAGE so that we drop this page. 4373 */ 4374 if (pmd_devmap_trans_unstable(vmf->pmd)) 4375 return VM_FAULT_NOPAGE; 4376 4377 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 4378 vmf->address, &vmf->ptl); 4379 4380 /* Re-check under ptl */ 4381 if (likely(!vmf_pte_changed(vmf))) { 4382 do_set_pte(vmf, page, vmf->address); 4383 4384 /* no need to invalidate: a not-present page won't be cached */ 4385 update_mmu_cache(vma, vmf->address, vmf->pte); 4386 4387 ret = 0; 4388 } else { 4389 update_mmu_tlb(vma, vmf->address, vmf->pte); 4390 ret = VM_FAULT_NOPAGE; 4391 } 4392 4393 pte_unmap_unlock(vmf->pte, vmf->ptl); 4394 return ret; 4395 } 4396 4397 static unsigned long fault_around_bytes __read_mostly = 4398 rounddown_pow_of_two(65536); 4399 4400 #ifdef CONFIG_DEBUG_FS 4401 static int fault_around_bytes_get(void *data, u64 *val) 4402 { 4403 *val = fault_around_bytes; 4404 return 0; 4405 } 4406 4407 /* 4408 * fault_around_bytes must be rounded down to the nearest page order as it's 4409 * what do_fault_around() expects to see. 4410 */ 4411 static int fault_around_bytes_set(void *data, u64 val) 4412 { 4413 if (val / PAGE_SIZE > PTRS_PER_PTE) 4414 return -EINVAL; 4415 if (val > PAGE_SIZE) 4416 fault_around_bytes = rounddown_pow_of_two(val); 4417 else 4418 fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */ 4419 return 0; 4420 } 4421 DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops, 4422 fault_around_bytes_get, fault_around_bytes_set, "%llu\n"); 4423 4424 static int __init fault_around_debugfs(void) 4425 { 4426 debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL, 4427 &fault_around_bytes_fops); 4428 return 0; 4429 } 4430 late_initcall(fault_around_debugfs); 4431 #endif 4432 4433 /* 4434 * do_fault_around() tries to map few pages around the fault address. The hope 4435 * is that the pages will be needed soon and this will lower the number of 4436 * faults to handle. 4437 * 4438 * It uses vm_ops->map_pages() to map the pages, which skips the page if it's 4439 * not ready to be mapped: not up-to-date, locked, etc. 4440 * 4441 * This function doesn't cross the VMA boundaries, in order to call map_pages() 4442 * only once. 4443 * 4444 * fault_around_bytes defines how many bytes we'll try to map. 4445 * do_fault_around() expects it to be set to a power of two less than or equal 4446 * to PTRS_PER_PTE. 4447 * 4448 * The virtual address of the area that we map is naturally aligned to 4449 * fault_around_bytes rounded down to the machine page size 4450 * (and therefore to page order). This way it's easier to guarantee 4451 * that we don't cross page table boundaries. 4452 */ 4453 static vm_fault_t do_fault_around(struct vm_fault *vmf) 4454 { 4455 unsigned long address = vmf->address, nr_pages, mask; 4456 pgoff_t start_pgoff = vmf->pgoff; 4457 pgoff_t end_pgoff; 4458 int off; 4459 4460 nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT; 4461 mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK; 4462 4463 address = max(address & mask, vmf->vma->vm_start); 4464 off = ((vmf->address - address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); 4465 start_pgoff -= off; 4466 4467 /* 4468 * end_pgoff is either the end of the page table, the end of 4469 * the vma or nr_pages from start_pgoff, depending what is nearest. 4470 */ 4471 end_pgoff = start_pgoff - 4472 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) + 4473 PTRS_PER_PTE - 1; 4474 end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1, 4475 start_pgoff + nr_pages - 1); 4476 4477 if (pmd_none(*vmf->pmd)) { 4478 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm); 4479 if (!vmf->prealloc_pte) 4480 return VM_FAULT_OOM; 4481 } 4482 4483 return vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff); 4484 } 4485 4486 /* Return true if we should do read fault-around, false otherwise */ 4487 static inline bool should_fault_around(struct vm_fault *vmf) 4488 { 4489 /* No ->map_pages? No way to fault around... */ 4490 if (!vmf->vma->vm_ops->map_pages) 4491 return false; 4492 4493 if (uffd_disable_fault_around(vmf->vma)) 4494 return false; 4495 4496 return fault_around_bytes >> PAGE_SHIFT > 1; 4497 } 4498 4499 static vm_fault_t do_read_fault(struct vm_fault *vmf) 4500 { 4501 vm_fault_t ret = 0; 4502 4503 /* 4504 * Let's call ->map_pages() first and use ->fault() as fallback 4505 * if page by the offset is not ready to be mapped (cold cache or 4506 * something). 4507 */ 4508 if (should_fault_around(vmf)) { 4509 ret = do_fault_around(vmf); 4510 if (ret) 4511 return ret; 4512 } 4513 4514 ret = __do_fault(vmf); 4515 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 4516 return ret; 4517 4518 ret |= finish_fault(vmf); 4519 unlock_page(vmf->page); 4520 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 4521 put_page(vmf->page); 4522 return ret; 4523 } 4524 4525 static vm_fault_t do_cow_fault(struct vm_fault *vmf) 4526 { 4527 struct vm_area_struct *vma = vmf->vma; 4528 vm_fault_t ret; 4529 4530 if (unlikely(anon_vma_prepare(vma))) 4531 return VM_FAULT_OOM; 4532 4533 vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address); 4534 if (!vmf->cow_page) 4535 return VM_FAULT_OOM; 4536 4537 if (mem_cgroup_charge(page_folio(vmf->cow_page), vma->vm_mm, 4538 GFP_KERNEL)) { 4539 put_page(vmf->cow_page); 4540 return VM_FAULT_OOM; 4541 } 4542 cgroup_throttle_swaprate(vmf->cow_page, GFP_KERNEL); 4543 4544 ret = __do_fault(vmf); 4545 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 4546 goto uncharge_out; 4547 if (ret & VM_FAULT_DONE_COW) 4548 return ret; 4549 4550 copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma); 4551 __SetPageUptodate(vmf->cow_page); 4552 4553 ret |= finish_fault(vmf); 4554 unlock_page(vmf->page); 4555 put_page(vmf->page); 4556 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 4557 goto uncharge_out; 4558 return ret; 4559 uncharge_out: 4560 put_page(vmf->cow_page); 4561 return ret; 4562 } 4563 4564 static vm_fault_t do_shared_fault(struct vm_fault *vmf) 4565 { 4566 struct vm_area_struct *vma = vmf->vma; 4567 vm_fault_t ret, tmp; 4568 4569 ret = __do_fault(vmf); 4570 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 4571 return ret; 4572 4573 /* 4574 * Check if the backing address space wants to know that the page is 4575 * about to become writable 4576 */ 4577 if (vma->vm_ops->page_mkwrite) { 4578 unlock_page(vmf->page); 4579 tmp = do_page_mkwrite(vmf); 4580 if (unlikely(!tmp || 4581 (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { 4582 put_page(vmf->page); 4583 return tmp; 4584 } 4585 } 4586 4587 ret |= finish_fault(vmf); 4588 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | 4589 VM_FAULT_RETRY))) { 4590 unlock_page(vmf->page); 4591 put_page(vmf->page); 4592 return ret; 4593 } 4594 4595 ret |= fault_dirty_shared_page(vmf); 4596 return ret; 4597 } 4598 4599 /* 4600 * We enter with non-exclusive mmap_lock (to exclude vma changes, 4601 * but allow concurrent faults). 4602 * The mmap_lock may have been released depending on flags and our 4603 * return value. See filemap_fault() and __folio_lock_or_retry(). 4604 * If mmap_lock is released, vma may become invalid (for example 4605 * by other thread calling munmap()). 4606 */ 4607 static vm_fault_t do_fault(struct vm_fault *vmf) 4608 { 4609 struct vm_area_struct *vma = vmf->vma; 4610 struct mm_struct *vm_mm = vma->vm_mm; 4611 vm_fault_t ret; 4612 4613 /* 4614 * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND 4615 */ 4616 if (!vma->vm_ops->fault) { 4617 /* 4618 * If we find a migration pmd entry or a none pmd entry, which 4619 * should never happen, return SIGBUS 4620 */ 4621 if (unlikely(!pmd_present(*vmf->pmd))) 4622 ret = VM_FAULT_SIGBUS; 4623 else { 4624 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, 4625 vmf->pmd, 4626 vmf->address, 4627 &vmf->ptl); 4628 /* 4629 * Make sure this is not a temporary clearing of pte 4630 * by holding ptl and checking again. A R/M/W update 4631 * of pte involves: take ptl, clearing the pte so that 4632 * we don't have concurrent modification by hardware 4633 * followed by an update. 4634 */ 4635 if (unlikely(pte_none(*vmf->pte))) 4636 ret = VM_FAULT_SIGBUS; 4637 else 4638 ret = VM_FAULT_NOPAGE; 4639 4640 pte_unmap_unlock(vmf->pte, vmf->ptl); 4641 } 4642 } else if (!(vmf->flags & FAULT_FLAG_WRITE)) 4643 ret = do_read_fault(vmf); 4644 else if (!(vma->vm_flags & VM_SHARED)) 4645 ret = do_cow_fault(vmf); 4646 else 4647 ret = do_shared_fault(vmf); 4648 4649 /* preallocated pagetable is unused: free it */ 4650 if (vmf->prealloc_pte) { 4651 pte_free(vm_mm, vmf->prealloc_pte); 4652 vmf->prealloc_pte = NULL; 4653 } 4654 return ret; 4655 } 4656 4657 int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, 4658 unsigned long addr, int page_nid, int *flags) 4659 { 4660 get_page(page); 4661 4662 count_vm_numa_event(NUMA_HINT_FAULTS); 4663 if (page_nid == numa_node_id()) { 4664 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); 4665 *flags |= TNF_FAULT_LOCAL; 4666 } 4667 4668 return mpol_misplaced(page, vma, addr); 4669 } 4670 4671 static vm_fault_t do_numa_page(struct vm_fault *vmf) 4672 { 4673 struct vm_area_struct *vma = vmf->vma; 4674 struct page *page = NULL; 4675 int page_nid = NUMA_NO_NODE; 4676 int last_cpupid; 4677 int target_nid; 4678 pte_t pte, old_pte; 4679 bool was_writable = pte_savedwrite(vmf->orig_pte); 4680 int flags = 0; 4681 4682 /* 4683 * The "pte" at this point cannot be used safely without 4684 * validation through pte_unmap_same(). It's of NUMA type but 4685 * the pfn may be screwed if the read is non atomic. 4686 */ 4687 vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd); 4688 spin_lock(vmf->ptl); 4689 if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) { 4690 pte_unmap_unlock(vmf->pte, vmf->ptl); 4691 goto out; 4692 } 4693 4694 /* Get the normal PTE */ 4695 old_pte = ptep_get(vmf->pte); 4696 pte = pte_modify(old_pte, vma->vm_page_prot); 4697 4698 page = vm_normal_page(vma, vmf->address, pte); 4699 if (!page || is_zone_device_page(page)) 4700 goto out_map; 4701 4702 /* TODO: handle PTE-mapped THP */ 4703 if (PageCompound(page)) 4704 goto out_map; 4705 4706 /* 4707 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as 4708 * much anyway since they can be in shared cache state. This misses 4709 * the case where a mapping is writable but the process never writes 4710 * to it but pte_write gets cleared during protection updates and 4711 * pte_dirty has unpredictable behaviour between PTE scan updates, 4712 * background writeback, dirty balancing and application behaviour. 4713 */ 4714 if (!was_writable) 4715 flags |= TNF_NO_GROUP; 4716 4717 /* 4718 * Flag if the page is shared between multiple address spaces. This 4719 * is later used when determining whether to group tasks together 4720 */ 4721 if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED)) 4722 flags |= TNF_SHARED; 4723 4724 page_nid = page_to_nid(page); 4725 /* 4726 * For memory tiering mode, cpupid of slow memory page is used 4727 * to record page access time. So use default value. 4728 */ 4729 if ((sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) && 4730 !node_is_toptier(page_nid)) 4731 last_cpupid = (-1 & LAST_CPUPID_MASK); 4732 else 4733 last_cpupid = page_cpupid_last(page); 4734 target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid, 4735 &flags); 4736 if (target_nid == NUMA_NO_NODE) { 4737 put_page(page); 4738 goto out_map; 4739 } 4740 pte_unmap_unlock(vmf->pte, vmf->ptl); 4741 4742 /* Migrate to the requested node */ 4743 if (migrate_misplaced_page(page, vma, target_nid)) { 4744 page_nid = target_nid; 4745 flags |= TNF_MIGRATED; 4746 } else { 4747 flags |= TNF_MIGRATE_FAIL; 4748 vmf->pte = pte_offset_map(vmf->pmd, vmf->address); 4749 spin_lock(vmf->ptl); 4750 if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) { 4751 pte_unmap_unlock(vmf->pte, vmf->ptl); 4752 goto out; 4753 } 4754 goto out_map; 4755 } 4756 4757 out: 4758 if (page_nid != NUMA_NO_NODE) 4759 task_numa_fault(last_cpupid, page_nid, 1, flags); 4760 return 0; 4761 out_map: 4762 /* 4763 * Make it present again, depending on how arch implements 4764 * non-accessible ptes, some can allow access by kernel mode. 4765 */ 4766 old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte); 4767 pte = pte_modify(old_pte, vma->vm_page_prot); 4768 pte = pte_mkyoung(pte); 4769 if (was_writable) 4770 pte = pte_mkwrite(pte); 4771 ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte); 4772 update_mmu_cache(vma, vmf->address, vmf->pte); 4773 pte_unmap_unlock(vmf->pte, vmf->ptl); 4774 goto out; 4775 } 4776 4777 static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf) 4778 { 4779 if (vma_is_anonymous(vmf->vma)) 4780 return do_huge_pmd_anonymous_page(vmf); 4781 if (vmf->vma->vm_ops->huge_fault) 4782 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD); 4783 return VM_FAULT_FALLBACK; 4784 } 4785 4786 /* `inline' is required to avoid gcc 4.1.2 build error */ 4787 static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf) 4788 { 4789 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; 4790 4791 if (vma_is_anonymous(vmf->vma)) { 4792 if (likely(!unshare) && 4793 userfaultfd_huge_pmd_wp(vmf->vma, vmf->orig_pmd)) 4794 return handle_userfault(vmf, VM_UFFD_WP); 4795 return do_huge_pmd_wp_page(vmf); 4796 } 4797 if (vmf->vma->vm_ops->huge_fault) { 4798 vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD); 4799 4800 if (!(ret & VM_FAULT_FALLBACK)) 4801 return ret; 4802 } 4803 4804 /* COW or write-notify handled on pte level: split pmd. */ 4805 __split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL); 4806 4807 return VM_FAULT_FALLBACK; 4808 } 4809 4810 static vm_fault_t create_huge_pud(struct vm_fault *vmf) 4811 { 4812 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ 4813 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) 4814 /* No support for anonymous transparent PUD pages yet */ 4815 if (vma_is_anonymous(vmf->vma)) 4816 return VM_FAULT_FALLBACK; 4817 if (vmf->vma->vm_ops->huge_fault) 4818 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD); 4819 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 4820 return VM_FAULT_FALLBACK; 4821 } 4822 4823 static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud) 4824 { 4825 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ 4826 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) 4827 /* No support for anonymous transparent PUD pages yet */ 4828 if (vma_is_anonymous(vmf->vma)) 4829 goto split; 4830 if (vmf->vma->vm_ops->huge_fault) { 4831 vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD); 4832 4833 if (!(ret & VM_FAULT_FALLBACK)) 4834 return ret; 4835 } 4836 split: 4837 /* COW or write-notify not handled on PUD level: split pud.*/ 4838 __split_huge_pud(vmf->vma, vmf->pud, vmf->address); 4839 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 4840 return VM_FAULT_FALLBACK; 4841 } 4842 4843 /* 4844 * These routines also need to handle stuff like marking pages dirty 4845 * and/or accessed for architectures that don't do it in hardware (most 4846 * RISC architectures). The early dirtying is also good on the i386. 4847 * 4848 * There is also a hook called "update_mmu_cache()" that architectures 4849 * with external mmu caches can use to update those (ie the Sparc or 4850 * PowerPC hashed page tables that act as extended TLBs). 4851 * 4852 * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow 4853 * concurrent faults). 4854 * 4855 * The mmap_lock may have been released depending on flags and our return value. 4856 * See filemap_fault() and __folio_lock_or_retry(). 4857 */ 4858 static vm_fault_t handle_pte_fault(struct vm_fault *vmf) 4859 { 4860 pte_t entry; 4861 4862 if (unlikely(pmd_none(*vmf->pmd))) { 4863 /* 4864 * Leave __pte_alloc() until later: because vm_ops->fault may 4865 * want to allocate huge page, and if we expose page table 4866 * for an instant, it will be difficult to retract from 4867 * concurrent faults and from rmap lookups. 4868 */ 4869 vmf->pte = NULL; 4870 vmf->flags &= ~FAULT_FLAG_ORIG_PTE_VALID; 4871 } else { 4872 /* 4873 * If a huge pmd materialized under us just retry later. Use 4874 * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead 4875 * of pmd_trans_huge() to ensure the pmd didn't become 4876 * pmd_trans_huge under us and then back to pmd_none, as a 4877 * result of MADV_DONTNEED running immediately after a huge pmd 4878 * fault in a different thread of this mm, in turn leading to a 4879 * misleading pmd_trans_huge() retval. All we have to ensure is 4880 * that it is a regular pmd that we can walk with 4881 * pte_offset_map() and we can do that through an atomic read 4882 * in C, which is what pmd_trans_unstable() provides. 4883 */ 4884 if (pmd_devmap_trans_unstable(vmf->pmd)) 4885 return 0; 4886 /* 4887 * A regular pmd is established and it can't morph into a huge 4888 * pmd from under us anymore at this point because we hold the 4889 * mmap_lock read mode and khugepaged takes it in write mode. 4890 * So now it's safe to run pte_offset_map(). 4891 */ 4892 vmf->pte = pte_offset_map(vmf->pmd, vmf->address); 4893 vmf->orig_pte = *vmf->pte; 4894 vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID; 4895 4896 /* 4897 * some architectures can have larger ptes than wordsize, 4898 * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and 4899 * CONFIG_32BIT=y, so READ_ONCE cannot guarantee atomic 4900 * accesses. The code below just needs a consistent view 4901 * for the ifs and we later double check anyway with the 4902 * ptl lock held. So here a barrier will do. 4903 */ 4904 barrier(); 4905 if (pte_none(vmf->orig_pte)) { 4906 pte_unmap(vmf->pte); 4907 vmf->pte = NULL; 4908 } 4909 } 4910 4911 if (!vmf->pte) { 4912 if (vma_is_anonymous(vmf->vma)) 4913 return do_anonymous_page(vmf); 4914 else 4915 return do_fault(vmf); 4916 } 4917 4918 if (!pte_present(vmf->orig_pte)) 4919 return do_swap_page(vmf); 4920 4921 if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) 4922 return do_numa_page(vmf); 4923 4924 vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); 4925 spin_lock(vmf->ptl); 4926 entry = vmf->orig_pte; 4927 if (unlikely(!pte_same(*vmf->pte, entry))) { 4928 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); 4929 goto unlock; 4930 } 4931 if (vmf->flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) { 4932 if (!pte_write(entry)) 4933 return do_wp_page(vmf); 4934 else if (likely(vmf->flags & FAULT_FLAG_WRITE)) 4935 entry = pte_mkdirty(entry); 4936 } 4937 entry = pte_mkyoung(entry); 4938 if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry, 4939 vmf->flags & FAULT_FLAG_WRITE)) { 4940 update_mmu_cache(vmf->vma, vmf->address, vmf->pte); 4941 } else { 4942 /* Skip spurious TLB flush for retried page fault */ 4943 if (vmf->flags & FAULT_FLAG_TRIED) 4944 goto unlock; 4945 /* 4946 * This is needed only for protection faults but the arch code 4947 * is not yet telling us if this is a protection fault or not. 4948 * This still avoids useless tlb flushes for .text page faults 4949 * with threads. 4950 */ 4951 if (vmf->flags & FAULT_FLAG_WRITE) 4952 flush_tlb_fix_spurious_fault(vmf->vma, vmf->address); 4953 } 4954 unlock: 4955 pte_unmap_unlock(vmf->pte, vmf->ptl); 4956 return 0; 4957 } 4958 4959 /* 4960 * By the time we get here, we already hold the mm semaphore 4961 * 4962 * The mmap_lock may have been released depending on flags and our 4963 * return value. See filemap_fault() and __folio_lock_or_retry(). 4964 */ 4965 static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, 4966 unsigned long address, unsigned int flags) 4967 { 4968 struct vm_fault vmf = { 4969 .vma = vma, 4970 .address = address & PAGE_MASK, 4971 .real_address = address, 4972 .flags = flags, 4973 .pgoff = linear_page_index(vma, address), 4974 .gfp_mask = __get_fault_gfp_mask(vma), 4975 }; 4976 struct mm_struct *mm = vma->vm_mm; 4977 unsigned long vm_flags = vma->vm_flags; 4978 pgd_t *pgd; 4979 p4d_t *p4d; 4980 vm_fault_t ret; 4981 4982 pgd = pgd_offset(mm, address); 4983 p4d = p4d_alloc(mm, pgd, address); 4984 if (!p4d) 4985 return VM_FAULT_OOM; 4986 4987 vmf.pud = pud_alloc(mm, p4d, address); 4988 if (!vmf.pud) 4989 return VM_FAULT_OOM; 4990 retry_pud: 4991 if (pud_none(*vmf.pud) && 4992 hugepage_vma_check(vma, vm_flags, false, true, true)) { 4993 ret = create_huge_pud(&vmf); 4994 if (!(ret & VM_FAULT_FALLBACK)) 4995 return ret; 4996 } else { 4997 pud_t orig_pud = *vmf.pud; 4998 4999 barrier(); 5000 if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) { 5001 5002 /* 5003 * TODO once we support anonymous PUDs: NUMA case and 5004 * FAULT_FLAG_UNSHARE handling. 5005 */ 5006 if ((flags & FAULT_FLAG_WRITE) && !pud_write(orig_pud)) { 5007 ret = wp_huge_pud(&vmf, orig_pud); 5008 if (!(ret & VM_FAULT_FALLBACK)) 5009 return ret; 5010 } else { 5011 huge_pud_set_accessed(&vmf, orig_pud); 5012 return 0; 5013 } 5014 } 5015 } 5016 5017 vmf.pmd = pmd_alloc(mm, vmf.pud, address); 5018 if (!vmf.pmd) 5019 return VM_FAULT_OOM; 5020 5021 /* Huge pud page fault raced with pmd_alloc? */ 5022 if (pud_trans_unstable(vmf.pud)) 5023 goto retry_pud; 5024 5025 if (pmd_none(*vmf.pmd) && 5026 hugepage_vma_check(vma, vm_flags, false, true, true)) { 5027 ret = create_huge_pmd(&vmf); 5028 if (!(ret & VM_FAULT_FALLBACK)) 5029 return ret; 5030 } else { 5031 vmf.orig_pmd = *vmf.pmd; 5032 5033 barrier(); 5034 if (unlikely(is_swap_pmd(vmf.orig_pmd))) { 5035 VM_BUG_ON(thp_migration_supported() && 5036 !is_pmd_migration_entry(vmf.orig_pmd)); 5037 if (is_pmd_migration_entry(vmf.orig_pmd)) 5038 pmd_migration_entry_wait(mm, vmf.pmd); 5039 return 0; 5040 } 5041 if (pmd_trans_huge(vmf.orig_pmd) || pmd_devmap(vmf.orig_pmd)) { 5042 if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma)) 5043 return do_huge_pmd_numa_page(&vmf); 5044 5045 if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) && 5046 !pmd_write(vmf.orig_pmd)) { 5047 ret = wp_huge_pmd(&vmf); 5048 if (!(ret & VM_FAULT_FALLBACK)) 5049 return ret; 5050 } else { 5051 huge_pmd_set_accessed(&vmf); 5052 return 0; 5053 } 5054 } 5055 } 5056 5057 return handle_pte_fault(&vmf); 5058 } 5059 5060 /** 5061 * mm_account_fault - Do page fault accounting 5062 * 5063 * @regs: the pt_regs struct pointer. When set to NULL, will skip accounting 5064 * of perf event counters, but we'll still do the per-task accounting to 5065 * the task who triggered this page fault. 5066 * @address: the faulted address. 5067 * @flags: the fault flags. 5068 * @ret: the fault retcode. 5069 * 5070 * This will take care of most of the page fault accounting. Meanwhile, it 5071 * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter 5072 * updates. However, note that the handling of PERF_COUNT_SW_PAGE_FAULTS should 5073 * still be in per-arch page fault handlers at the entry of page fault. 5074 */ 5075 static inline void mm_account_fault(struct pt_regs *regs, 5076 unsigned long address, unsigned int flags, 5077 vm_fault_t ret) 5078 { 5079 bool major; 5080 5081 /* 5082 * We don't do accounting for some specific faults: 5083 * 5084 * - Unsuccessful faults (e.g. when the address wasn't valid). That 5085 * includes arch_vma_access_permitted() failing before reaching here. 5086 * So this is not a "this many hardware page faults" counter. We 5087 * should use the hw profiling for that. 5088 * 5089 * - Incomplete faults (VM_FAULT_RETRY). They will only be counted 5090 * once they're completed. 5091 */ 5092 if (ret & (VM_FAULT_ERROR | VM_FAULT_RETRY)) 5093 return; 5094 5095 /* 5096 * We define the fault as a major fault when the final successful fault 5097 * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't 5098 * handle it immediately previously). 5099 */ 5100 major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED); 5101 5102 if (major) 5103 current->maj_flt++; 5104 else 5105 current->min_flt++; 5106 5107 /* 5108 * If the fault is done for GUP, regs will be NULL. We only do the 5109 * accounting for the per thread fault counters who triggered the 5110 * fault, and we skip the perf event updates. 5111 */ 5112 if (!regs) 5113 return; 5114 5115 if (major) 5116 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); 5117 else 5118 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); 5119 } 5120 5121 #ifdef CONFIG_LRU_GEN 5122 static void lru_gen_enter_fault(struct vm_area_struct *vma) 5123 { 5124 /* the LRU algorithm doesn't apply to sequential or random reads */ 5125 current->in_lru_fault = !(vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ)); 5126 } 5127 5128 static void lru_gen_exit_fault(void) 5129 { 5130 current->in_lru_fault = false; 5131 } 5132 #else 5133 static void lru_gen_enter_fault(struct vm_area_struct *vma) 5134 { 5135 } 5136 5137 static void lru_gen_exit_fault(void) 5138 { 5139 } 5140 #endif /* CONFIG_LRU_GEN */ 5141 5142 /* 5143 * By the time we get here, we already hold the mm semaphore 5144 * 5145 * The mmap_lock may have been released depending on flags and our 5146 * return value. See filemap_fault() and __folio_lock_or_retry(). 5147 */ 5148 vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, 5149 unsigned int flags, struct pt_regs *regs) 5150 { 5151 vm_fault_t ret; 5152 5153 __set_current_state(TASK_RUNNING); 5154 5155 count_vm_event(PGFAULT); 5156 count_memcg_event_mm(vma->vm_mm, PGFAULT); 5157 5158 /* do counter updates before entering really critical section. */ 5159 check_sync_rss_stat(current); 5160 5161 if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE, 5162 flags & FAULT_FLAG_INSTRUCTION, 5163 flags & FAULT_FLAG_REMOTE)) 5164 return VM_FAULT_SIGSEGV; 5165 5166 /* 5167 * Enable the memcg OOM handling for faults triggered in user 5168 * space. Kernel faults are handled more gracefully. 5169 */ 5170 if (flags & FAULT_FLAG_USER) 5171 mem_cgroup_enter_user_fault(); 5172 5173 lru_gen_enter_fault(vma); 5174 5175 if (unlikely(is_vm_hugetlb_page(vma))) 5176 ret = hugetlb_fault(vma->vm_mm, vma, address, flags); 5177 else 5178 ret = __handle_mm_fault(vma, address, flags); 5179 5180 lru_gen_exit_fault(); 5181 5182 if (flags & FAULT_FLAG_USER) { 5183 mem_cgroup_exit_user_fault(); 5184 /* 5185 * The task may have entered a memcg OOM situation but 5186 * if the allocation error was handled gracefully (no 5187 * VM_FAULT_OOM), there is no need to kill anything. 5188 * Just clean up the OOM state peacefully. 5189 */ 5190 if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM)) 5191 mem_cgroup_oom_synchronize(false); 5192 } 5193 5194 mm_account_fault(regs, address, flags, ret); 5195 5196 return ret; 5197 } 5198 EXPORT_SYMBOL_GPL(handle_mm_fault); 5199 5200 #ifndef __PAGETABLE_P4D_FOLDED 5201 /* 5202 * Allocate p4d page table. 5203 * We've already handled the fast-path in-line. 5204 */ 5205 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) 5206 { 5207 p4d_t *new = p4d_alloc_one(mm, address); 5208 if (!new) 5209 return -ENOMEM; 5210 5211 spin_lock(&mm->page_table_lock); 5212 if (pgd_present(*pgd)) { /* Another has populated it */ 5213 p4d_free(mm, new); 5214 } else { 5215 smp_wmb(); /* See comment in pmd_install() */ 5216 pgd_populate(mm, pgd, new); 5217 } 5218 spin_unlock(&mm->page_table_lock); 5219 return 0; 5220 } 5221 #endif /* __PAGETABLE_P4D_FOLDED */ 5222 5223 #ifndef __PAGETABLE_PUD_FOLDED 5224 /* 5225 * Allocate page upper directory. 5226 * We've already handled the fast-path in-line. 5227 */ 5228 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address) 5229 { 5230 pud_t *new = pud_alloc_one(mm, address); 5231 if (!new) 5232 return -ENOMEM; 5233 5234 spin_lock(&mm->page_table_lock); 5235 if (!p4d_present(*p4d)) { 5236 mm_inc_nr_puds(mm); 5237 smp_wmb(); /* See comment in pmd_install() */ 5238 p4d_populate(mm, p4d, new); 5239 } else /* Another has populated it */ 5240 pud_free(mm, new); 5241 spin_unlock(&mm->page_table_lock); 5242 return 0; 5243 } 5244 #endif /* __PAGETABLE_PUD_FOLDED */ 5245 5246 #ifndef __PAGETABLE_PMD_FOLDED 5247 /* 5248 * Allocate page middle directory. 5249 * We've already handled the fast-path in-line. 5250 */ 5251 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) 5252 { 5253 spinlock_t *ptl; 5254 pmd_t *new = pmd_alloc_one(mm, address); 5255 if (!new) 5256 return -ENOMEM; 5257 5258 ptl = pud_lock(mm, pud); 5259 if (!pud_present(*pud)) { 5260 mm_inc_nr_pmds(mm); 5261 smp_wmb(); /* See comment in pmd_install() */ 5262 pud_populate(mm, pud, new); 5263 } else { /* Another has populated it */ 5264 pmd_free(mm, new); 5265 } 5266 spin_unlock(ptl); 5267 return 0; 5268 } 5269 #endif /* __PAGETABLE_PMD_FOLDED */ 5270 5271 /** 5272 * follow_pte - look up PTE at a user virtual address 5273 * @mm: the mm_struct of the target address space 5274 * @address: user virtual address 5275 * @ptepp: location to store found PTE 5276 * @ptlp: location to store the lock for the PTE 5277 * 5278 * On a successful return, the pointer to the PTE is stored in @ptepp; 5279 * the corresponding lock is taken and its location is stored in @ptlp. 5280 * The contents of the PTE are only stable until @ptlp is released; 5281 * any further use, if any, must be protected against invalidation 5282 * with MMU notifiers. 5283 * 5284 * Only IO mappings and raw PFN mappings are allowed. The mmap semaphore 5285 * should be taken for read. 5286 * 5287 * KVM uses this function. While it is arguably less bad than ``follow_pfn``, 5288 * it is not a good general-purpose API. 5289 * 5290 * Return: zero on success, -ve otherwise. 5291 */ 5292 int follow_pte(struct mm_struct *mm, unsigned long address, 5293 pte_t **ptepp, spinlock_t **ptlp) 5294 { 5295 pgd_t *pgd; 5296 p4d_t *p4d; 5297 pud_t *pud; 5298 pmd_t *pmd; 5299 pte_t *ptep; 5300 5301 pgd = pgd_offset(mm, address); 5302 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 5303 goto out; 5304 5305 p4d = p4d_offset(pgd, address); 5306 if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d))) 5307 goto out; 5308 5309 pud = pud_offset(p4d, address); 5310 if (pud_none(*pud) || unlikely(pud_bad(*pud))) 5311 goto out; 5312 5313 pmd = pmd_offset(pud, address); 5314 VM_BUG_ON(pmd_trans_huge(*pmd)); 5315 5316 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) 5317 goto out; 5318 5319 ptep = pte_offset_map_lock(mm, pmd, address, ptlp); 5320 if (!pte_present(*ptep)) 5321 goto unlock; 5322 *ptepp = ptep; 5323 return 0; 5324 unlock: 5325 pte_unmap_unlock(ptep, *ptlp); 5326 out: 5327 return -EINVAL; 5328 } 5329 EXPORT_SYMBOL_GPL(follow_pte); 5330 5331 /** 5332 * follow_pfn - look up PFN at a user virtual address 5333 * @vma: memory mapping 5334 * @address: user virtual address 5335 * @pfn: location to store found PFN 5336 * 5337 * Only IO mappings and raw PFN mappings are allowed. 5338 * 5339 * This function does not allow the caller to read the permissions 5340 * of the PTE. Do not use it. 5341 * 5342 * Return: zero and the pfn at @pfn on success, -ve otherwise. 5343 */ 5344 int follow_pfn(struct vm_area_struct *vma, unsigned long address, 5345 unsigned long *pfn) 5346 { 5347 int ret = -EINVAL; 5348 spinlock_t *ptl; 5349 pte_t *ptep; 5350 5351 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) 5352 return ret; 5353 5354 ret = follow_pte(vma->vm_mm, address, &ptep, &ptl); 5355 if (ret) 5356 return ret; 5357 *pfn = pte_pfn(*ptep); 5358 pte_unmap_unlock(ptep, ptl); 5359 return 0; 5360 } 5361 EXPORT_SYMBOL(follow_pfn); 5362 5363 #ifdef CONFIG_HAVE_IOREMAP_PROT 5364 int follow_phys(struct vm_area_struct *vma, 5365 unsigned long address, unsigned int flags, 5366 unsigned long *prot, resource_size_t *phys) 5367 { 5368 int ret = -EINVAL; 5369 pte_t *ptep, pte; 5370 spinlock_t *ptl; 5371 5372 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) 5373 goto out; 5374 5375 if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) 5376 goto out; 5377 pte = *ptep; 5378 5379 if ((flags & FOLL_WRITE) && !pte_write(pte)) 5380 goto unlock; 5381 5382 *prot = pgprot_val(pte_pgprot(pte)); 5383 *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT; 5384 5385 ret = 0; 5386 unlock: 5387 pte_unmap_unlock(ptep, ptl); 5388 out: 5389 return ret; 5390 } 5391 5392 /** 5393 * generic_access_phys - generic implementation for iomem mmap access 5394 * @vma: the vma to access 5395 * @addr: userspace address, not relative offset within @vma 5396 * @buf: buffer to read/write 5397 * @len: length of transfer 5398 * @write: set to FOLL_WRITE when writing, otherwise reading 5399 * 5400 * This is a generic implementation for &vm_operations_struct.access for an 5401 * iomem mapping. This callback is used by access_process_vm() when the @vma is 5402 * not page based. 5403 */ 5404 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, 5405 void *buf, int len, int write) 5406 { 5407 resource_size_t phys_addr; 5408 unsigned long prot = 0; 5409 void __iomem *maddr; 5410 pte_t *ptep, pte; 5411 spinlock_t *ptl; 5412 int offset = offset_in_page(addr); 5413 int ret = -EINVAL; 5414 5415 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) 5416 return -EINVAL; 5417 5418 retry: 5419 if (follow_pte(vma->vm_mm, addr, &ptep, &ptl)) 5420 return -EINVAL; 5421 pte = *ptep; 5422 pte_unmap_unlock(ptep, ptl); 5423 5424 prot = pgprot_val(pte_pgprot(pte)); 5425 phys_addr = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT; 5426 5427 if ((write & FOLL_WRITE) && !pte_write(pte)) 5428 return -EINVAL; 5429 5430 maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot); 5431 if (!maddr) 5432 return -ENOMEM; 5433 5434 if (follow_pte(vma->vm_mm, addr, &ptep, &ptl)) 5435 goto out_unmap; 5436 5437 if (!pte_same(pte, *ptep)) { 5438 pte_unmap_unlock(ptep, ptl); 5439 iounmap(maddr); 5440 5441 goto retry; 5442 } 5443 5444 if (write) 5445 memcpy_toio(maddr + offset, buf, len); 5446 else 5447 memcpy_fromio(buf, maddr + offset, len); 5448 ret = len; 5449 pte_unmap_unlock(ptep, ptl); 5450 out_unmap: 5451 iounmap(maddr); 5452 5453 return ret; 5454 } 5455 EXPORT_SYMBOL_GPL(generic_access_phys); 5456 #endif 5457 5458 /* 5459 * Access another process' address space as given in mm. 5460 */ 5461 int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, 5462 int len, unsigned int gup_flags) 5463 { 5464 struct vm_area_struct *vma; 5465 void *old_buf = buf; 5466 int write = gup_flags & FOLL_WRITE; 5467 5468 if (mmap_read_lock_killable(mm)) 5469 return 0; 5470 5471 /* ignore errors, just check how much was successfully transferred */ 5472 while (len) { 5473 int bytes, ret, offset; 5474 void *maddr; 5475 struct page *page = NULL; 5476 5477 ret = get_user_pages_remote(mm, addr, 1, 5478 gup_flags, &page, &vma, NULL); 5479 if (ret <= 0) { 5480 #ifndef CONFIG_HAVE_IOREMAP_PROT 5481 break; 5482 #else 5483 /* 5484 * Check if this is a VM_IO | VM_PFNMAP VMA, which 5485 * we can access using slightly different code. 5486 */ 5487 vma = vma_lookup(mm, addr); 5488 if (!vma) 5489 break; 5490 if (vma->vm_ops && vma->vm_ops->access) 5491 ret = vma->vm_ops->access(vma, addr, buf, 5492 len, write); 5493 if (ret <= 0) 5494 break; 5495 bytes = ret; 5496 #endif 5497 } else { 5498 bytes = len; 5499 offset = addr & (PAGE_SIZE-1); 5500 if (bytes > PAGE_SIZE-offset) 5501 bytes = PAGE_SIZE-offset; 5502 5503 maddr = kmap(page); 5504 if (write) { 5505 copy_to_user_page(vma, page, addr, 5506 maddr + offset, buf, bytes); 5507 set_page_dirty_lock(page); 5508 } else { 5509 copy_from_user_page(vma, page, addr, 5510 buf, maddr + offset, bytes); 5511 } 5512 kunmap(page); 5513 put_page(page); 5514 } 5515 len -= bytes; 5516 buf += bytes; 5517 addr += bytes; 5518 } 5519 mmap_read_unlock(mm); 5520 5521 return buf - old_buf; 5522 } 5523 5524 /** 5525 * access_remote_vm - access another process' address space 5526 * @mm: the mm_struct of the target address space 5527 * @addr: start address to access 5528 * @buf: source or destination buffer 5529 * @len: number of bytes to transfer 5530 * @gup_flags: flags modifying lookup behaviour 5531 * 5532 * The caller must hold a reference on @mm. 5533 * 5534 * Return: number of bytes copied from source to destination. 5535 */ 5536 int access_remote_vm(struct mm_struct *mm, unsigned long addr, 5537 void *buf, int len, unsigned int gup_flags) 5538 { 5539 return __access_remote_vm(mm, addr, buf, len, gup_flags); 5540 } 5541 5542 /* 5543 * Access another process' address space. 5544 * Source/target buffer must be kernel space, 5545 * Do not walk the page table directly, use get_user_pages 5546 */ 5547 int access_process_vm(struct task_struct *tsk, unsigned long addr, 5548 void *buf, int len, unsigned int gup_flags) 5549 { 5550 struct mm_struct *mm; 5551 int ret; 5552 5553 mm = get_task_mm(tsk); 5554 if (!mm) 5555 return 0; 5556 5557 ret = __access_remote_vm(mm, addr, buf, len, gup_flags); 5558 5559 mmput(mm); 5560 5561 return ret; 5562 } 5563 EXPORT_SYMBOL_GPL(access_process_vm); 5564 5565 /* 5566 * Print the name of a VMA. 5567 */ 5568 void print_vma_addr(char *prefix, unsigned long ip) 5569 { 5570 struct mm_struct *mm = current->mm; 5571 struct vm_area_struct *vma; 5572 5573 /* 5574 * we might be running from an atomic context so we cannot sleep 5575 */ 5576 if (!mmap_read_trylock(mm)) 5577 return; 5578 5579 vma = find_vma(mm, ip); 5580 if (vma && vma->vm_file) { 5581 struct file *f = vma->vm_file; 5582 char *buf = (char *)__get_free_page(GFP_NOWAIT); 5583 if (buf) { 5584 char *p; 5585 5586 p = file_path(f, buf, PAGE_SIZE); 5587 if (IS_ERR(p)) 5588 p = "?"; 5589 printk("%s%s[%lx+%lx]", prefix, kbasename(p), 5590 vma->vm_start, 5591 vma->vm_end - vma->vm_start); 5592 free_page((unsigned long)buf); 5593 } 5594 } 5595 mmap_read_unlock(mm); 5596 } 5597 5598 #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP) 5599 void __might_fault(const char *file, int line) 5600 { 5601 if (pagefault_disabled()) 5602 return; 5603 __might_sleep(file, line); 5604 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) 5605 if (current->mm) 5606 might_lock_read(¤t->mm->mmap_lock); 5607 #endif 5608 } 5609 EXPORT_SYMBOL(__might_fault); 5610 #endif 5611 5612 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) 5613 /* 5614 * Process all subpages of the specified huge page with the specified 5615 * operation. The target subpage will be processed last to keep its 5616 * cache lines hot. 5617 */ 5618 static inline void process_huge_page( 5619 unsigned long addr_hint, unsigned int pages_per_huge_page, 5620 void (*process_subpage)(unsigned long addr, int idx, void *arg), 5621 void *arg) 5622 { 5623 int i, n, base, l; 5624 unsigned long addr = addr_hint & 5625 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1); 5626 5627 /* Process target subpage last to keep its cache lines hot */ 5628 might_sleep(); 5629 n = (addr_hint - addr) / PAGE_SIZE; 5630 if (2 * n <= pages_per_huge_page) { 5631 /* If target subpage in first half of huge page */ 5632 base = 0; 5633 l = n; 5634 /* Process subpages at the end of huge page */ 5635 for (i = pages_per_huge_page - 1; i >= 2 * n; i--) { 5636 cond_resched(); 5637 process_subpage(addr + i * PAGE_SIZE, i, arg); 5638 } 5639 } else { 5640 /* If target subpage in second half of huge page */ 5641 base = pages_per_huge_page - 2 * (pages_per_huge_page - n); 5642 l = pages_per_huge_page - n; 5643 /* Process subpages at the begin of huge page */ 5644 for (i = 0; i < base; i++) { 5645 cond_resched(); 5646 process_subpage(addr + i * PAGE_SIZE, i, arg); 5647 } 5648 } 5649 /* 5650 * Process remaining subpages in left-right-left-right pattern 5651 * towards the target subpage 5652 */ 5653 for (i = 0; i < l; i++) { 5654 int left_idx = base + i; 5655 int right_idx = base + 2 * l - 1 - i; 5656 5657 cond_resched(); 5658 process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg); 5659 cond_resched(); 5660 process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg); 5661 } 5662 } 5663 5664 static void clear_gigantic_page(struct page *page, 5665 unsigned long addr, 5666 unsigned int pages_per_huge_page) 5667 { 5668 int i; 5669 struct page *p = page; 5670 5671 might_sleep(); 5672 for (i = 0; i < pages_per_huge_page; 5673 i++, p = mem_map_next(p, page, i)) { 5674 cond_resched(); 5675 clear_user_highpage(p, addr + i * PAGE_SIZE); 5676 } 5677 } 5678 5679 static void clear_subpage(unsigned long addr, int idx, void *arg) 5680 { 5681 struct page *page = arg; 5682 5683 clear_user_highpage(page + idx, addr); 5684 } 5685 5686 void clear_huge_page(struct page *page, 5687 unsigned long addr_hint, unsigned int pages_per_huge_page) 5688 { 5689 unsigned long addr = addr_hint & 5690 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1); 5691 5692 if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) { 5693 clear_gigantic_page(page, addr, pages_per_huge_page); 5694 return; 5695 } 5696 5697 process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page); 5698 } 5699 5700 static void copy_user_gigantic_page(struct page *dst, struct page *src, 5701 unsigned long addr, 5702 struct vm_area_struct *vma, 5703 unsigned int pages_per_huge_page) 5704 { 5705 int i; 5706 struct page *dst_base = dst; 5707 struct page *src_base = src; 5708 5709 for (i = 0; i < pages_per_huge_page; ) { 5710 cond_resched(); 5711 copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); 5712 5713 i++; 5714 dst = mem_map_next(dst, dst_base, i); 5715 src = mem_map_next(src, src_base, i); 5716 } 5717 } 5718 5719 struct copy_subpage_arg { 5720 struct page *dst; 5721 struct page *src; 5722 struct vm_area_struct *vma; 5723 }; 5724 5725 static void copy_subpage(unsigned long addr, int idx, void *arg) 5726 { 5727 struct copy_subpage_arg *copy_arg = arg; 5728 5729 copy_user_highpage(copy_arg->dst + idx, copy_arg->src + idx, 5730 addr, copy_arg->vma); 5731 } 5732 5733 void copy_user_huge_page(struct page *dst, struct page *src, 5734 unsigned long addr_hint, struct vm_area_struct *vma, 5735 unsigned int pages_per_huge_page) 5736 { 5737 unsigned long addr = addr_hint & 5738 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1); 5739 struct copy_subpage_arg arg = { 5740 .dst = dst, 5741 .src = src, 5742 .vma = vma, 5743 }; 5744 5745 if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) { 5746 copy_user_gigantic_page(dst, src, addr, vma, 5747 pages_per_huge_page); 5748 return; 5749 } 5750 5751 process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg); 5752 } 5753 5754 long copy_huge_page_from_user(struct page *dst_page, 5755 const void __user *usr_src, 5756 unsigned int pages_per_huge_page, 5757 bool allow_pagefault) 5758 { 5759 void *page_kaddr; 5760 unsigned long i, rc = 0; 5761 unsigned long ret_val = pages_per_huge_page * PAGE_SIZE; 5762 struct page *subpage = dst_page; 5763 5764 for (i = 0; i < pages_per_huge_page; 5765 i++, subpage = mem_map_next(subpage, dst_page, i)) { 5766 if (allow_pagefault) 5767 page_kaddr = kmap(subpage); 5768 else 5769 page_kaddr = kmap_atomic(subpage); 5770 rc = copy_from_user(page_kaddr, 5771 usr_src + i * PAGE_SIZE, PAGE_SIZE); 5772 if (allow_pagefault) 5773 kunmap(subpage); 5774 else 5775 kunmap_atomic(page_kaddr); 5776 5777 ret_val -= (PAGE_SIZE - rc); 5778 if (rc) 5779 break; 5780 5781 flush_dcache_page(subpage); 5782 5783 cond_resched(); 5784 } 5785 return ret_val; 5786 } 5787 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ 5788 5789 #if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS 5790 5791 static struct kmem_cache *page_ptl_cachep; 5792 5793 void __init ptlock_cache_init(void) 5794 { 5795 page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0, 5796 SLAB_PANIC, NULL); 5797 } 5798 5799 bool ptlock_alloc(struct page *page) 5800 { 5801 spinlock_t *ptl; 5802 5803 ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL); 5804 if (!ptl) 5805 return false; 5806 page->ptl = ptl; 5807 return true; 5808 } 5809 5810 void ptlock_free(struct page *page) 5811 { 5812 kmem_cache_free(page_ptl_cachep, page->ptl); 5813 } 5814 #endif 5815