1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/mm/memory.c 4 * 5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 6 */ 7 8 /* 9 * demand-loading started 01.12.91 - seems it is high on the list of 10 * things wanted, and it should be easy to implement. - Linus 11 */ 12 13 /* 14 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared 15 * pages started 02.12.91, seems to work. - Linus. 16 * 17 * Tested sharing by executing about 30 /bin/sh: under the old kernel it 18 * would have taken more than the 6M I have free, but it worked well as 19 * far as I could see. 20 * 21 * Also corrected some "invalidate()"s - I wasn't doing enough of them. 22 */ 23 24 /* 25 * Real VM (paging to/from disk) started 18.12.91. Much more work and 26 * thought has to go into this. Oh, well.. 27 * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why. 28 * Found it. Everything seems to work now. 29 * 20.12.91 - Ok, making the swap-device changeable like the root. 30 */ 31 32 /* 33 * 05.04.94 - Multi-page memory management added for v1.1. 34 * Idea by Alex Bligh (alex@cconcepts.co.uk) 35 * 36 * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG 37 * (Gerhard.Wichert@pdb.siemens.de) 38 * 39 * Aug/Sep 2004 Changed to four level page tables (Andi Kleen) 40 */ 41 42 #include <linux/kernel_stat.h> 43 #include <linux/mm.h> 44 #include <linux/mm_inline.h> 45 #include <linux/sched/mm.h> 46 #include <linux/sched/coredump.h> 47 #include <linux/sched/numa_balancing.h> 48 #include <linux/sched/task.h> 49 #include <linux/hugetlb.h> 50 #include <linux/mman.h> 51 #include <linux/swap.h> 52 #include <linux/highmem.h> 53 #include <linux/pagemap.h> 54 #include <linux/memremap.h> 55 #include <linux/kmsan.h> 56 #include <linux/ksm.h> 57 #include <linux/rmap.h> 58 #include <linux/export.h> 59 #include <linux/delayacct.h> 60 #include <linux/init.h> 61 #include <linux/pfn_t.h> 62 #include <linux/writeback.h> 63 #include <linux/memcontrol.h> 64 #include <linux/mmu_notifier.h> 65 #include <linux/swapops.h> 66 #include <linux/elf.h> 67 #include <linux/gfp.h> 68 #include <linux/migrate.h> 69 #include <linux/string.h> 70 #include <linux/memory-tiers.h> 71 #include <linux/debugfs.h> 72 #include <linux/userfaultfd_k.h> 73 #include <linux/dax.h> 74 #include <linux/oom.h> 75 #include <linux/numa.h> 76 #include <linux/perf_event.h> 77 #include <linux/ptrace.h> 78 #include <linux/vmalloc.h> 79 #include <linux/sched/sysctl.h> 80 81 #include <trace/events/kmem.h> 82 83 #include <asm/io.h> 84 #include <asm/mmu_context.h> 85 #include <asm/pgalloc.h> 86 #include <linux/uaccess.h> 87 #include <asm/tlb.h> 88 #include <asm/tlbflush.h> 89 90 #include "pgalloc-track.h" 91 #include "internal.h" 92 #include "swap.h" 93 94 #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST) 95 #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid. 96 #endif 97 98 #ifndef CONFIG_NUMA 99 unsigned long max_mapnr; 100 EXPORT_SYMBOL(max_mapnr); 101 102 struct page *mem_map; 103 EXPORT_SYMBOL(mem_map); 104 #endif 105 106 static vm_fault_t do_fault(struct vm_fault *vmf); 107 108 /* 109 * A number of key systems in x86 including ioremap() rely on the assumption 110 * that high_memory defines the upper bound on direct map memory, then end 111 * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and 112 * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL 113 * and ZONE_HIGHMEM. 114 */ 115 void *high_memory; 116 EXPORT_SYMBOL(high_memory); 117 118 /* 119 * Randomize the address space (stacks, mmaps, brk, etc.). 120 * 121 * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization, 122 * as ancient (libc5 based) binaries can segfault. ) 123 */ 124 int randomize_va_space __read_mostly = 125 #ifdef CONFIG_COMPAT_BRK 126 1; 127 #else 128 2; 129 #endif 130 131 #ifndef arch_wants_old_prefaulted_pte 132 static inline bool arch_wants_old_prefaulted_pte(void) 133 { 134 /* 135 * Transitioning a PTE from 'old' to 'young' can be expensive on 136 * some architectures, even if it's performed in hardware. By 137 * default, "false" means prefaulted entries will be 'young'. 138 */ 139 return false; 140 } 141 #endif 142 143 static int __init disable_randmaps(char *s) 144 { 145 randomize_va_space = 0; 146 return 1; 147 } 148 __setup("norandmaps", disable_randmaps); 149 150 unsigned long zero_pfn __read_mostly; 151 EXPORT_SYMBOL(zero_pfn); 152 153 unsigned long highest_memmap_pfn __read_mostly; 154 155 /* 156 * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init() 157 */ 158 static int __init init_zero_pfn(void) 159 { 160 zero_pfn = page_to_pfn(ZERO_PAGE(0)); 161 return 0; 162 } 163 early_initcall(init_zero_pfn); 164 165 void mm_trace_rss_stat(struct mm_struct *mm, int member, long count) 166 { 167 trace_rss_stat(mm, member, count); 168 } 169 170 #if defined(SPLIT_RSS_COUNTING) 171 172 void sync_mm_rss(struct mm_struct *mm) 173 { 174 int i; 175 176 for (i = 0; i < NR_MM_COUNTERS; i++) { 177 if (current->rss_stat.count[i]) { 178 add_mm_counter(mm, i, current->rss_stat.count[i]); 179 current->rss_stat.count[i] = 0; 180 } 181 } 182 current->rss_stat.events = 0; 183 } 184 185 static void add_mm_counter_fast(struct mm_struct *mm, int member, int val) 186 { 187 struct task_struct *task = current; 188 189 if (likely(task->mm == mm)) 190 task->rss_stat.count[member] += val; 191 else 192 add_mm_counter(mm, member, val); 193 } 194 #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1) 195 #define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1) 196 197 /* sync counter once per 64 page faults */ 198 #define TASK_RSS_EVENTS_THRESH (64) 199 static void check_sync_rss_stat(struct task_struct *task) 200 { 201 if (unlikely(task != current)) 202 return; 203 if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH)) 204 sync_mm_rss(task->mm); 205 } 206 #else /* SPLIT_RSS_COUNTING */ 207 208 #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member) 209 #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member) 210 211 static void check_sync_rss_stat(struct task_struct *task) 212 { 213 } 214 215 #endif /* SPLIT_RSS_COUNTING */ 216 217 /* 218 * Note: this doesn't free the actual pages themselves. That 219 * has been handled earlier when unmapping all the memory regions. 220 */ 221 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, 222 unsigned long addr) 223 { 224 pgtable_t token = pmd_pgtable(*pmd); 225 pmd_clear(pmd); 226 pte_free_tlb(tlb, token, addr); 227 mm_dec_nr_ptes(tlb->mm); 228 } 229 230 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, 231 unsigned long addr, unsigned long end, 232 unsigned long floor, unsigned long ceiling) 233 { 234 pmd_t *pmd; 235 unsigned long next; 236 unsigned long start; 237 238 start = addr; 239 pmd = pmd_offset(pud, addr); 240 do { 241 next = pmd_addr_end(addr, end); 242 if (pmd_none_or_clear_bad(pmd)) 243 continue; 244 free_pte_range(tlb, pmd, addr); 245 } while (pmd++, addr = next, addr != end); 246 247 start &= PUD_MASK; 248 if (start < floor) 249 return; 250 if (ceiling) { 251 ceiling &= PUD_MASK; 252 if (!ceiling) 253 return; 254 } 255 if (end - 1 > ceiling - 1) 256 return; 257 258 pmd = pmd_offset(pud, start); 259 pud_clear(pud); 260 pmd_free_tlb(tlb, pmd, start); 261 mm_dec_nr_pmds(tlb->mm); 262 } 263 264 static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d, 265 unsigned long addr, unsigned long end, 266 unsigned long floor, unsigned long ceiling) 267 { 268 pud_t *pud; 269 unsigned long next; 270 unsigned long start; 271 272 start = addr; 273 pud = pud_offset(p4d, addr); 274 do { 275 next = pud_addr_end(addr, end); 276 if (pud_none_or_clear_bad(pud)) 277 continue; 278 free_pmd_range(tlb, pud, addr, next, floor, ceiling); 279 } while (pud++, addr = next, addr != end); 280 281 start &= P4D_MASK; 282 if (start < floor) 283 return; 284 if (ceiling) { 285 ceiling &= P4D_MASK; 286 if (!ceiling) 287 return; 288 } 289 if (end - 1 > ceiling - 1) 290 return; 291 292 pud = pud_offset(p4d, start); 293 p4d_clear(p4d); 294 pud_free_tlb(tlb, pud, start); 295 mm_dec_nr_puds(tlb->mm); 296 } 297 298 static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd, 299 unsigned long addr, unsigned long end, 300 unsigned long floor, unsigned long ceiling) 301 { 302 p4d_t *p4d; 303 unsigned long next; 304 unsigned long start; 305 306 start = addr; 307 p4d = p4d_offset(pgd, addr); 308 do { 309 next = p4d_addr_end(addr, end); 310 if (p4d_none_or_clear_bad(p4d)) 311 continue; 312 free_pud_range(tlb, p4d, addr, next, floor, ceiling); 313 } while (p4d++, addr = next, addr != end); 314 315 start &= PGDIR_MASK; 316 if (start < floor) 317 return; 318 if (ceiling) { 319 ceiling &= PGDIR_MASK; 320 if (!ceiling) 321 return; 322 } 323 if (end - 1 > ceiling - 1) 324 return; 325 326 p4d = p4d_offset(pgd, start); 327 pgd_clear(pgd); 328 p4d_free_tlb(tlb, p4d, start); 329 } 330 331 /* 332 * This function frees user-level page tables of a process. 333 */ 334 void free_pgd_range(struct mmu_gather *tlb, 335 unsigned long addr, unsigned long end, 336 unsigned long floor, unsigned long ceiling) 337 { 338 pgd_t *pgd; 339 unsigned long next; 340 341 /* 342 * The next few lines have given us lots of grief... 343 * 344 * Why are we testing PMD* at this top level? Because often 345 * there will be no work to do at all, and we'd prefer not to 346 * go all the way down to the bottom just to discover that. 347 * 348 * Why all these "- 1"s? Because 0 represents both the bottom 349 * of the address space and the top of it (using -1 for the 350 * top wouldn't help much: the masks would do the wrong thing). 351 * The rule is that addr 0 and floor 0 refer to the bottom of 352 * the address space, but end 0 and ceiling 0 refer to the top 353 * Comparisons need to use "end - 1" and "ceiling - 1" (though 354 * that end 0 case should be mythical). 355 * 356 * Wherever addr is brought up or ceiling brought down, we must 357 * be careful to reject "the opposite 0" before it confuses the 358 * subsequent tests. But what about where end is brought down 359 * by PMD_SIZE below? no, end can't go down to 0 there. 360 * 361 * Whereas we round start (addr) and ceiling down, by different 362 * masks at different levels, in order to test whether a table 363 * now has no other vmas using it, so can be freed, we don't 364 * bother to round floor or end up - the tests don't need that. 365 */ 366 367 addr &= PMD_MASK; 368 if (addr < floor) { 369 addr += PMD_SIZE; 370 if (!addr) 371 return; 372 } 373 if (ceiling) { 374 ceiling &= PMD_MASK; 375 if (!ceiling) 376 return; 377 } 378 if (end - 1 > ceiling - 1) 379 end -= PMD_SIZE; 380 if (addr > end - 1) 381 return; 382 /* 383 * We add page table cache pages with PAGE_SIZE, 384 * (see pte_free_tlb()), flush the tlb if we need 385 */ 386 tlb_change_page_size(tlb, PAGE_SIZE); 387 pgd = pgd_offset(tlb->mm, addr); 388 do { 389 next = pgd_addr_end(addr, end); 390 if (pgd_none_or_clear_bad(pgd)) 391 continue; 392 free_p4d_range(tlb, pgd, addr, next, floor, ceiling); 393 } while (pgd++, addr = next, addr != end); 394 } 395 396 void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt, 397 struct vm_area_struct *vma, unsigned long floor, 398 unsigned long ceiling) 399 { 400 MA_STATE(mas, mt, vma->vm_end, vma->vm_end); 401 402 do { 403 unsigned long addr = vma->vm_start; 404 struct vm_area_struct *next; 405 406 /* 407 * Note: USER_PGTABLES_CEILING may be passed as ceiling and may 408 * be 0. This will underflow and is okay. 409 */ 410 next = mas_find(&mas, ceiling - 1); 411 412 /* 413 * Hide vma from rmap and truncate_pagecache before freeing 414 * pgtables 415 */ 416 unlink_anon_vmas(vma); 417 unlink_file_vma(vma); 418 419 if (is_vm_hugetlb_page(vma)) { 420 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, 421 floor, next ? next->vm_start : ceiling); 422 } else { 423 /* 424 * Optimization: gather nearby vmas into one call down 425 */ 426 while (next && next->vm_start <= vma->vm_end + PMD_SIZE 427 && !is_vm_hugetlb_page(next)) { 428 vma = next; 429 next = mas_find(&mas, ceiling - 1); 430 unlink_anon_vmas(vma); 431 unlink_file_vma(vma); 432 } 433 free_pgd_range(tlb, addr, vma->vm_end, 434 floor, next ? next->vm_start : ceiling); 435 } 436 vma = next; 437 } while (vma); 438 } 439 440 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte) 441 { 442 spinlock_t *ptl = pmd_lock(mm, pmd); 443 444 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ 445 mm_inc_nr_ptes(mm); 446 /* 447 * Ensure all pte setup (eg. pte page lock and page clearing) are 448 * visible before the pte is made visible to other CPUs by being 449 * put into page tables. 450 * 451 * The other side of the story is the pointer chasing in the page 452 * table walking code (when walking the page table without locking; 453 * ie. most of the time). Fortunately, these data accesses consist 454 * of a chain of data-dependent loads, meaning most CPUs (alpha 455 * being the notable exception) will already guarantee loads are 456 * seen in-order. See the alpha page table accessors for the 457 * smp_rmb() barriers in page table walking code. 458 */ 459 smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */ 460 pmd_populate(mm, pmd, *pte); 461 *pte = NULL; 462 } 463 spin_unlock(ptl); 464 } 465 466 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd) 467 { 468 pgtable_t new = pte_alloc_one(mm); 469 if (!new) 470 return -ENOMEM; 471 472 pmd_install(mm, pmd, &new); 473 if (new) 474 pte_free(mm, new); 475 return 0; 476 } 477 478 int __pte_alloc_kernel(pmd_t *pmd) 479 { 480 pte_t *new = pte_alloc_one_kernel(&init_mm); 481 if (!new) 482 return -ENOMEM; 483 484 spin_lock(&init_mm.page_table_lock); 485 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ 486 smp_wmb(); /* See comment in pmd_install() */ 487 pmd_populate_kernel(&init_mm, pmd, new); 488 new = NULL; 489 } 490 spin_unlock(&init_mm.page_table_lock); 491 if (new) 492 pte_free_kernel(&init_mm, new); 493 return 0; 494 } 495 496 static inline void init_rss_vec(int *rss) 497 { 498 memset(rss, 0, sizeof(int) * NR_MM_COUNTERS); 499 } 500 501 static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss) 502 { 503 int i; 504 505 if (current->mm == mm) 506 sync_mm_rss(mm); 507 for (i = 0; i < NR_MM_COUNTERS; i++) 508 if (rss[i]) 509 add_mm_counter(mm, i, rss[i]); 510 } 511 512 /* 513 * This function is called to print an error when a bad pte 514 * is found. For example, we might have a PFN-mapped pte in 515 * a region that doesn't allow it. 516 * 517 * The calling function must still handle the error. 518 */ 519 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, 520 pte_t pte, struct page *page) 521 { 522 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); 523 p4d_t *p4d = p4d_offset(pgd, addr); 524 pud_t *pud = pud_offset(p4d, addr); 525 pmd_t *pmd = pmd_offset(pud, addr); 526 struct address_space *mapping; 527 pgoff_t index; 528 static unsigned long resume; 529 static unsigned long nr_shown; 530 static unsigned long nr_unshown; 531 532 /* 533 * Allow a burst of 60 reports, then keep quiet for that minute; 534 * or allow a steady drip of one report per second. 535 */ 536 if (nr_shown == 60) { 537 if (time_before(jiffies, resume)) { 538 nr_unshown++; 539 return; 540 } 541 if (nr_unshown) { 542 pr_alert("BUG: Bad page map: %lu messages suppressed\n", 543 nr_unshown); 544 nr_unshown = 0; 545 } 546 nr_shown = 0; 547 } 548 if (nr_shown++ == 0) 549 resume = jiffies + 60 * HZ; 550 551 mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; 552 index = linear_page_index(vma, addr); 553 554 pr_alert("BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n", 555 current->comm, 556 (long long)pte_val(pte), (long long)pmd_val(*pmd)); 557 if (page) 558 dump_page(page, "bad pte"); 559 pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n", 560 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); 561 pr_alert("file:%pD fault:%ps mmap:%ps read_folio:%ps\n", 562 vma->vm_file, 563 vma->vm_ops ? vma->vm_ops->fault : NULL, 564 vma->vm_file ? vma->vm_file->f_op->mmap : NULL, 565 mapping ? mapping->a_ops->read_folio : NULL); 566 dump_stack(); 567 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 568 } 569 570 /* 571 * vm_normal_page -- This function gets the "struct page" associated with a pte. 572 * 573 * "Special" mappings do not wish to be associated with a "struct page" (either 574 * it doesn't exist, or it exists but they don't want to touch it). In this 575 * case, NULL is returned here. "Normal" mappings do have a struct page. 576 * 577 * There are 2 broad cases. Firstly, an architecture may define a pte_special() 578 * pte bit, in which case this function is trivial. Secondly, an architecture 579 * may not have a spare pte bit, which requires a more complicated scheme, 580 * described below. 581 * 582 * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a 583 * special mapping (even if there are underlying and valid "struct pages"). 584 * COWed pages of a VM_PFNMAP are always normal. 585 * 586 * The way we recognize COWed pages within VM_PFNMAP mappings is through the 587 * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit 588 * set, and the vm_pgoff will point to the first PFN mapped: thus every special 589 * mapping will always honor the rule 590 * 591 * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT) 592 * 593 * And for normal mappings this is false. 594 * 595 * This restricts such mappings to be a linear translation from virtual address 596 * to pfn. To get around this restriction, we allow arbitrary mappings so long 597 * as the vma is not a COW mapping; in that case, we know that all ptes are 598 * special (because none can have been COWed). 599 * 600 * 601 * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP. 602 * 603 * VM_MIXEDMAP mappings can likewise contain memory with or without "struct 604 * page" backing, however the difference is that _all_ pages with a struct 605 * page (that is, those where pfn_valid is true) are refcounted and considered 606 * normal pages by the VM. The disadvantage is that pages are refcounted 607 * (which can be slower and simply not an option for some PFNMAP users). The 608 * advantage is that we don't have to follow the strict linearity rule of 609 * PFNMAP mappings in order to support COWable mappings. 610 * 611 */ 612 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, 613 pte_t pte) 614 { 615 unsigned long pfn = pte_pfn(pte); 616 617 if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) { 618 if (likely(!pte_special(pte))) 619 goto check_pfn; 620 if (vma->vm_ops && vma->vm_ops->find_special_page) 621 return vma->vm_ops->find_special_page(vma, addr); 622 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) 623 return NULL; 624 if (is_zero_pfn(pfn)) 625 return NULL; 626 if (pte_devmap(pte)) 627 /* 628 * NOTE: New users of ZONE_DEVICE will not set pte_devmap() 629 * and will have refcounts incremented on their struct pages 630 * when they are inserted into PTEs, thus they are safe to 631 * return here. Legacy ZONE_DEVICE pages that set pte_devmap() 632 * do not have refcounts. Example of legacy ZONE_DEVICE is 633 * MEMORY_DEVICE_FS_DAX type in pmem or virtio_fs drivers. 634 */ 635 return NULL; 636 637 print_bad_pte(vma, addr, pte, NULL); 638 return NULL; 639 } 640 641 /* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */ 642 643 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { 644 if (vma->vm_flags & VM_MIXEDMAP) { 645 if (!pfn_valid(pfn)) 646 return NULL; 647 goto out; 648 } else { 649 unsigned long off; 650 off = (addr - vma->vm_start) >> PAGE_SHIFT; 651 if (pfn == vma->vm_pgoff + off) 652 return NULL; 653 if (!is_cow_mapping(vma->vm_flags)) 654 return NULL; 655 } 656 } 657 658 if (is_zero_pfn(pfn)) 659 return NULL; 660 661 check_pfn: 662 if (unlikely(pfn > highest_memmap_pfn)) { 663 print_bad_pte(vma, addr, pte, NULL); 664 return NULL; 665 } 666 667 /* 668 * NOTE! We still have PageReserved() pages in the page tables. 669 * eg. VDSO mappings can cause them to exist. 670 */ 671 out: 672 return pfn_to_page(pfn); 673 } 674 675 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 676 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, 677 pmd_t pmd) 678 { 679 unsigned long pfn = pmd_pfn(pmd); 680 681 /* 682 * There is no pmd_special() but there may be special pmds, e.g. 683 * in a direct-access (dax) mapping, so let's just replicate the 684 * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here. 685 */ 686 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { 687 if (vma->vm_flags & VM_MIXEDMAP) { 688 if (!pfn_valid(pfn)) 689 return NULL; 690 goto out; 691 } else { 692 unsigned long off; 693 off = (addr - vma->vm_start) >> PAGE_SHIFT; 694 if (pfn == vma->vm_pgoff + off) 695 return NULL; 696 if (!is_cow_mapping(vma->vm_flags)) 697 return NULL; 698 } 699 } 700 701 if (pmd_devmap(pmd)) 702 return NULL; 703 if (is_huge_zero_pmd(pmd)) 704 return NULL; 705 if (unlikely(pfn > highest_memmap_pfn)) 706 return NULL; 707 708 /* 709 * NOTE! We still have PageReserved() pages in the page tables. 710 * eg. VDSO mappings can cause them to exist. 711 */ 712 out: 713 return pfn_to_page(pfn); 714 } 715 #endif 716 717 static void restore_exclusive_pte(struct vm_area_struct *vma, 718 struct page *page, unsigned long address, 719 pte_t *ptep) 720 { 721 pte_t pte; 722 swp_entry_t entry; 723 724 pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot))); 725 if (pte_swp_soft_dirty(*ptep)) 726 pte = pte_mksoft_dirty(pte); 727 728 entry = pte_to_swp_entry(*ptep); 729 if (pte_swp_uffd_wp(*ptep)) 730 pte = pte_mkuffd_wp(pte); 731 else if (is_writable_device_exclusive_entry(entry)) 732 pte = maybe_mkwrite(pte_mkdirty(pte), vma); 733 734 VM_BUG_ON(pte_write(pte) && !(PageAnon(page) && PageAnonExclusive(page))); 735 736 /* 737 * No need to take a page reference as one was already 738 * created when the swap entry was made. 739 */ 740 if (PageAnon(page)) 741 page_add_anon_rmap(page, vma, address, RMAP_NONE); 742 else 743 /* 744 * Currently device exclusive access only supports anonymous 745 * memory so the entry shouldn't point to a filebacked page. 746 */ 747 WARN_ON_ONCE(1); 748 749 set_pte_at(vma->vm_mm, address, ptep, pte); 750 751 /* 752 * No need to invalidate - it was non-present before. However 753 * secondary CPUs may have mappings that need invalidating. 754 */ 755 update_mmu_cache(vma, address, ptep); 756 } 757 758 /* 759 * Tries to restore an exclusive pte if the page lock can be acquired without 760 * sleeping. 761 */ 762 static int 763 try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma, 764 unsigned long addr) 765 { 766 swp_entry_t entry = pte_to_swp_entry(*src_pte); 767 struct page *page = pfn_swap_entry_to_page(entry); 768 769 if (trylock_page(page)) { 770 restore_exclusive_pte(vma, page, addr, src_pte); 771 unlock_page(page); 772 return 0; 773 } 774 775 return -EBUSY; 776 } 777 778 /* 779 * copy one vm_area from one task to the other. Assumes the page tables 780 * already present in the new task to be cleared in the whole range 781 * covered by this vma. 782 */ 783 784 static unsigned long 785 copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, 786 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma, 787 struct vm_area_struct *src_vma, unsigned long addr, int *rss) 788 { 789 unsigned long vm_flags = dst_vma->vm_flags; 790 pte_t pte = *src_pte; 791 struct page *page; 792 swp_entry_t entry = pte_to_swp_entry(pte); 793 794 if (likely(!non_swap_entry(entry))) { 795 if (swap_duplicate(entry) < 0) 796 return -EIO; 797 798 /* make sure dst_mm is on swapoff's mmlist. */ 799 if (unlikely(list_empty(&dst_mm->mmlist))) { 800 spin_lock(&mmlist_lock); 801 if (list_empty(&dst_mm->mmlist)) 802 list_add(&dst_mm->mmlist, 803 &src_mm->mmlist); 804 spin_unlock(&mmlist_lock); 805 } 806 /* Mark the swap entry as shared. */ 807 if (pte_swp_exclusive(*src_pte)) { 808 pte = pte_swp_clear_exclusive(*src_pte); 809 set_pte_at(src_mm, addr, src_pte, pte); 810 } 811 rss[MM_SWAPENTS]++; 812 } else if (is_migration_entry(entry)) { 813 page = pfn_swap_entry_to_page(entry); 814 815 rss[mm_counter(page)]++; 816 817 if (!is_readable_migration_entry(entry) && 818 is_cow_mapping(vm_flags)) { 819 /* 820 * COW mappings require pages in both parent and child 821 * to be set to read. A previously exclusive entry is 822 * now shared. 823 */ 824 entry = make_readable_migration_entry( 825 swp_offset(entry)); 826 pte = swp_entry_to_pte(entry); 827 if (pte_swp_soft_dirty(*src_pte)) 828 pte = pte_swp_mksoft_dirty(pte); 829 if (pte_swp_uffd_wp(*src_pte)) 830 pte = pte_swp_mkuffd_wp(pte); 831 set_pte_at(src_mm, addr, src_pte, pte); 832 } 833 } else if (is_device_private_entry(entry)) { 834 page = pfn_swap_entry_to_page(entry); 835 836 /* 837 * Update rss count even for unaddressable pages, as 838 * they should treated just like normal pages in this 839 * respect. 840 * 841 * We will likely want to have some new rss counters 842 * for unaddressable pages, at some point. But for now 843 * keep things as they are. 844 */ 845 get_page(page); 846 rss[mm_counter(page)]++; 847 /* Cannot fail as these pages cannot get pinned. */ 848 BUG_ON(page_try_dup_anon_rmap(page, false, src_vma)); 849 850 /* 851 * We do not preserve soft-dirty information, because so 852 * far, checkpoint/restore is the only feature that 853 * requires that. And checkpoint/restore does not work 854 * when a device driver is involved (you cannot easily 855 * save and restore device driver state). 856 */ 857 if (is_writable_device_private_entry(entry) && 858 is_cow_mapping(vm_flags)) { 859 entry = make_readable_device_private_entry( 860 swp_offset(entry)); 861 pte = swp_entry_to_pte(entry); 862 if (pte_swp_uffd_wp(*src_pte)) 863 pte = pte_swp_mkuffd_wp(pte); 864 set_pte_at(src_mm, addr, src_pte, pte); 865 } 866 } else if (is_device_exclusive_entry(entry)) { 867 /* 868 * Make device exclusive entries present by restoring the 869 * original entry then copying as for a present pte. Device 870 * exclusive entries currently only support private writable 871 * (ie. COW) mappings. 872 */ 873 VM_BUG_ON(!is_cow_mapping(src_vma->vm_flags)); 874 if (try_restore_exclusive_pte(src_pte, src_vma, addr)) 875 return -EBUSY; 876 return -ENOENT; 877 } else if (is_pte_marker_entry(entry)) { 878 /* 879 * We're copying the pgtable should only because dst_vma has 880 * uffd-wp enabled, do sanity check. 881 */ 882 WARN_ON_ONCE(!userfaultfd_wp(dst_vma)); 883 set_pte_at(dst_mm, addr, dst_pte, pte); 884 return 0; 885 } 886 if (!userfaultfd_wp(dst_vma)) 887 pte = pte_swp_clear_uffd_wp(pte); 888 set_pte_at(dst_mm, addr, dst_pte, pte); 889 return 0; 890 } 891 892 /* 893 * Copy a present and normal page. 894 * 895 * NOTE! The usual case is that this isn't required; 896 * instead, the caller can just increase the page refcount 897 * and re-use the pte the traditional way. 898 * 899 * And if we need a pre-allocated page but don't yet have 900 * one, return a negative error to let the preallocation 901 * code know so that it can do so outside the page table 902 * lock. 903 */ 904 static inline int 905 copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 906 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss, 907 struct page **prealloc, struct page *page) 908 { 909 struct page *new_page; 910 pte_t pte; 911 912 new_page = *prealloc; 913 if (!new_page) 914 return -EAGAIN; 915 916 /* 917 * We have a prealloc page, all good! Take it 918 * over and copy the page & arm it. 919 */ 920 *prealloc = NULL; 921 copy_user_highpage(new_page, page, addr, src_vma); 922 __SetPageUptodate(new_page); 923 page_add_new_anon_rmap(new_page, dst_vma, addr); 924 lru_cache_add_inactive_or_unevictable(new_page, dst_vma); 925 rss[mm_counter(new_page)]++; 926 927 /* All done, just insert the new page copy in the child */ 928 pte = mk_pte(new_page, dst_vma->vm_page_prot); 929 pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma); 930 if (userfaultfd_pte_wp(dst_vma, *src_pte)) 931 /* Uffd-wp needs to be delivered to dest pte as well */ 932 pte = pte_wrprotect(pte_mkuffd_wp(pte)); 933 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); 934 return 0; 935 } 936 937 /* 938 * Copy one pte. Returns 0 if succeeded, or -EAGAIN if one preallocated page 939 * is required to copy this pte. 940 */ 941 static inline int 942 copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 943 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss, 944 struct page **prealloc) 945 { 946 struct mm_struct *src_mm = src_vma->vm_mm; 947 unsigned long vm_flags = src_vma->vm_flags; 948 pte_t pte = *src_pte; 949 struct page *page; 950 951 page = vm_normal_page(src_vma, addr, pte); 952 if (page && PageAnon(page)) { 953 /* 954 * If this page may have been pinned by the parent process, 955 * copy the page immediately for the child so that we'll always 956 * guarantee the pinned page won't be randomly replaced in the 957 * future. 958 */ 959 get_page(page); 960 if (unlikely(page_try_dup_anon_rmap(page, false, src_vma))) { 961 /* Page maybe pinned, we have to copy. */ 962 put_page(page); 963 return copy_present_page(dst_vma, src_vma, dst_pte, src_pte, 964 addr, rss, prealloc, page); 965 } 966 rss[mm_counter(page)]++; 967 } else if (page) { 968 get_page(page); 969 page_dup_file_rmap(page, false); 970 rss[mm_counter(page)]++; 971 } 972 973 /* 974 * If it's a COW mapping, write protect it both 975 * in the parent and the child 976 */ 977 if (is_cow_mapping(vm_flags) && pte_write(pte)) { 978 ptep_set_wrprotect(src_mm, addr, src_pte); 979 pte = pte_wrprotect(pte); 980 } 981 VM_BUG_ON(page && PageAnon(page) && PageAnonExclusive(page)); 982 983 /* 984 * If it's a shared mapping, mark it clean in 985 * the child 986 */ 987 if (vm_flags & VM_SHARED) 988 pte = pte_mkclean(pte); 989 pte = pte_mkold(pte); 990 991 if (!userfaultfd_wp(dst_vma)) 992 pte = pte_clear_uffd_wp(pte); 993 994 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); 995 return 0; 996 } 997 998 static inline struct page * 999 page_copy_prealloc(struct mm_struct *src_mm, struct vm_area_struct *vma, 1000 unsigned long addr) 1001 { 1002 struct page *new_page; 1003 1004 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, addr); 1005 if (!new_page) 1006 return NULL; 1007 1008 if (mem_cgroup_charge(page_folio(new_page), src_mm, GFP_KERNEL)) { 1009 put_page(new_page); 1010 return NULL; 1011 } 1012 cgroup_throttle_swaprate(new_page, GFP_KERNEL); 1013 1014 return new_page; 1015 } 1016 1017 static int 1018 copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 1019 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, 1020 unsigned long end) 1021 { 1022 struct mm_struct *dst_mm = dst_vma->vm_mm; 1023 struct mm_struct *src_mm = src_vma->vm_mm; 1024 pte_t *orig_src_pte, *orig_dst_pte; 1025 pte_t *src_pte, *dst_pte; 1026 spinlock_t *src_ptl, *dst_ptl; 1027 int progress, ret = 0; 1028 int rss[NR_MM_COUNTERS]; 1029 swp_entry_t entry = (swp_entry_t){0}; 1030 struct page *prealloc = NULL; 1031 1032 again: 1033 progress = 0; 1034 init_rss_vec(rss); 1035 1036 dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); 1037 if (!dst_pte) { 1038 ret = -ENOMEM; 1039 goto out; 1040 } 1041 src_pte = pte_offset_map(src_pmd, addr); 1042 src_ptl = pte_lockptr(src_mm, src_pmd); 1043 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 1044 orig_src_pte = src_pte; 1045 orig_dst_pte = dst_pte; 1046 arch_enter_lazy_mmu_mode(); 1047 1048 do { 1049 /* 1050 * We are holding two locks at this point - either of them 1051 * could generate latencies in another task on another CPU. 1052 */ 1053 if (progress >= 32) { 1054 progress = 0; 1055 if (need_resched() || 1056 spin_needbreak(src_ptl) || spin_needbreak(dst_ptl)) 1057 break; 1058 } 1059 if (pte_none(*src_pte)) { 1060 progress++; 1061 continue; 1062 } 1063 if (unlikely(!pte_present(*src_pte))) { 1064 ret = copy_nonpresent_pte(dst_mm, src_mm, 1065 dst_pte, src_pte, 1066 dst_vma, src_vma, 1067 addr, rss); 1068 if (ret == -EIO) { 1069 entry = pte_to_swp_entry(*src_pte); 1070 break; 1071 } else if (ret == -EBUSY) { 1072 break; 1073 } else if (!ret) { 1074 progress += 8; 1075 continue; 1076 } 1077 1078 /* 1079 * Device exclusive entry restored, continue by copying 1080 * the now present pte. 1081 */ 1082 WARN_ON_ONCE(ret != -ENOENT); 1083 } 1084 /* copy_present_pte() will clear `*prealloc' if consumed */ 1085 ret = copy_present_pte(dst_vma, src_vma, dst_pte, src_pte, 1086 addr, rss, &prealloc); 1087 /* 1088 * If we need a pre-allocated page for this pte, drop the 1089 * locks, allocate, and try again. 1090 */ 1091 if (unlikely(ret == -EAGAIN)) 1092 break; 1093 if (unlikely(prealloc)) { 1094 /* 1095 * pre-alloc page cannot be reused by next time so as 1096 * to strictly follow mempolicy (e.g., alloc_page_vma() 1097 * will allocate page according to address). This 1098 * could only happen if one pinned pte changed. 1099 */ 1100 put_page(prealloc); 1101 prealloc = NULL; 1102 } 1103 progress += 8; 1104 } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); 1105 1106 arch_leave_lazy_mmu_mode(); 1107 spin_unlock(src_ptl); 1108 pte_unmap(orig_src_pte); 1109 add_mm_rss_vec(dst_mm, rss); 1110 pte_unmap_unlock(orig_dst_pte, dst_ptl); 1111 cond_resched(); 1112 1113 if (ret == -EIO) { 1114 VM_WARN_ON_ONCE(!entry.val); 1115 if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) { 1116 ret = -ENOMEM; 1117 goto out; 1118 } 1119 entry.val = 0; 1120 } else if (ret == -EBUSY) { 1121 goto out; 1122 } else if (ret == -EAGAIN) { 1123 prealloc = page_copy_prealloc(src_mm, src_vma, addr); 1124 if (!prealloc) 1125 return -ENOMEM; 1126 } else if (ret) { 1127 VM_WARN_ON_ONCE(1); 1128 } 1129 1130 /* We've captured and resolved the error. Reset, try again. */ 1131 ret = 0; 1132 1133 if (addr != end) 1134 goto again; 1135 out: 1136 if (unlikely(prealloc)) 1137 put_page(prealloc); 1138 return ret; 1139 } 1140 1141 static inline int 1142 copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 1143 pud_t *dst_pud, pud_t *src_pud, unsigned long addr, 1144 unsigned long end) 1145 { 1146 struct mm_struct *dst_mm = dst_vma->vm_mm; 1147 struct mm_struct *src_mm = src_vma->vm_mm; 1148 pmd_t *src_pmd, *dst_pmd; 1149 unsigned long next; 1150 1151 dst_pmd = pmd_alloc(dst_mm, dst_pud, addr); 1152 if (!dst_pmd) 1153 return -ENOMEM; 1154 src_pmd = pmd_offset(src_pud, addr); 1155 do { 1156 next = pmd_addr_end(addr, end); 1157 if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd) 1158 || pmd_devmap(*src_pmd)) { 1159 int err; 1160 VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma); 1161 err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd, 1162 addr, dst_vma, src_vma); 1163 if (err == -ENOMEM) 1164 return -ENOMEM; 1165 if (!err) 1166 continue; 1167 /* fall through */ 1168 } 1169 if (pmd_none_or_clear_bad(src_pmd)) 1170 continue; 1171 if (copy_pte_range(dst_vma, src_vma, dst_pmd, src_pmd, 1172 addr, next)) 1173 return -ENOMEM; 1174 } while (dst_pmd++, src_pmd++, addr = next, addr != end); 1175 return 0; 1176 } 1177 1178 static inline int 1179 copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 1180 p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr, 1181 unsigned long end) 1182 { 1183 struct mm_struct *dst_mm = dst_vma->vm_mm; 1184 struct mm_struct *src_mm = src_vma->vm_mm; 1185 pud_t *src_pud, *dst_pud; 1186 unsigned long next; 1187 1188 dst_pud = pud_alloc(dst_mm, dst_p4d, addr); 1189 if (!dst_pud) 1190 return -ENOMEM; 1191 src_pud = pud_offset(src_p4d, addr); 1192 do { 1193 next = pud_addr_end(addr, end); 1194 if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) { 1195 int err; 1196 1197 VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma); 1198 err = copy_huge_pud(dst_mm, src_mm, 1199 dst_pud, src_pud, addr, src_vma); 1200 if (err == -ENOMEM) 1201 return -ENOMEM; 1202 if (!err) 1203 continue; 1204 /* fall through */ 1205 } 1206 if (pud_none_or_clear_bad(src_pud)) 1207 continue; 1208 if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud, 1209 addr, next)) 1210 return -ENOMEM; 1211 } while (dst_pud++, src_pud++, addr = next, addr != end); 1212 return 0; 1213 } 1214 1215 static inline int 1216 copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, 1217 pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr, 1218 unsigned long end) 1219 { 1220 struct mm_struct *dst_mm = dst_vma->vm_mm; 1221 p4d_t *src_p4d, *dst_p4d; 1222 unsigned long next; 1223 1224 dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr); 1225 if (!dst_p4d) 1226 return -ENOMEM; 1227 src_p4d = p4d_offset(src_pgd, addr); 1228 do { 1229 next = p4d_addr_end(addr, end); 1230 if (p4d_none_or_clear_bad(src_p4d)) 1231 continue; 1232 if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d, 1233 addr, next)) 1234 return -ENOMEM; 1235 } while (dst_p4d++, src_p4d++, addr = next, addr != end); 1236 return 0; 1237 } 1238 1239 /* 1240 * Return true if the vma needs to copy the pgtable during this fork(). Return 1241 * false when we can speed up fork() by allowing lazy page faults later until 1242 * when the child accesses the memory range. 1243 */ 1244 static bool 1245 vma_needs_copy(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) 1246 { 1247 /* 1248 * Always copy pgtables when dst_vma has uffd-wp enabled even if it's 1249 * file-backed (e.g. shmem). Because when uffd-wp is enabled, pgtable 1250 * contains uffd-wp protection information, that's something we can't 1251 * retrieve from page cache, and skip copying will lose those info. 1252 */ 1253 if (userfaultfd_wp(dst_vma)) 1254 return true; 1255 1256 if (src_vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) 1257 return true; 1258 1259 if (src_vma->anon_vma) 1260 return true; 1261 1262 /* 1263 * Don't copy ptes where a page fault will fill them correctly. Fork 1264 * becomes much lighter when there are big shared or private readonly 1265 * mappings. The tradeoff is that copy_page_range is more efficient 1266 * than faulting. 1267 */ 1268 return false; 1269 } 1270 1271 int 1272 copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) 1273 { 1274 pgd_t *src_pgd, *dst_pgd; 1275 unsigned long next; 1276 unsigned long addr = src_vma->vm_start; 1277 unsigned long end = src_vma->vm_end; 1278 struct mm_struct *dst_mm = dst_vma->vm_mm; 1279 struct mm_struct *src_mm = src_vma->vm_mm; 1280 struct mmu_notifier_range range; 1281 bool is_cow; 1282 int ret; 1283 1284 if (!vma_needs_copy(dst_vma, src_vma)) 1285 return 0; 1286 1287 if (is_vm_hugetlb_page(src_vma)) 1288 return copy_hugetlb_page_range(dst_mm, src_mm, dst_vma, src_vma); 1289 1290 if (unlikely(src_vma->vm_flags & VM_PFNMAP)) { 1291 /* 1292 * We do not free on error cases below as remove_vma 1293 * gets called on error from higher level routine 1294 */ 1295 ret = track_pfn_copy(src_vma); 1296 if (ret) 1297 return ret; 1298 } 1299 1300 /* 1301 * We need to invalidate the secondary MMU mappings only when 1302 * there could be a permission downgrade on the ptes of the 1303 * parent mm. And a permission downgrade will only happen if 1304 * is_cow_mapping() returns true. 1305 */ 1306 is_cow = is_cow_mapping(src_vma->vm_flags); 1307 1308 if (is_cow) { 1309 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 1310 0, src_vma, src_mm, addr, end); 1311 mmu_notifier_invalidate_range_start(&range); 1312 /* 1313 * Disabling preemption is not needed for the write side, as 1314 * the read side doesn't spin, but goes to the mmap_lock. 1315 * 1316 * Use the raw variant of the seqcount_t write API to avoid 1317 * lockdep complaining about preemptibility. 1318 */ 1319 mmap_assert_write_locked(src_mm); 1320 raw_write_seqcount_begin(&src_mm->write_protect_seq); 1321 } 1322 1323 ret = 0; 1324 dst_pgd = pgd_offset(dst_mm, addr); 1325 src_pgd = pgd_offset(src_mm, addr); 1326 do { 1327 next = pgd_addr_end(addr, end); 1328 if (pgd_none_or_clear_bad(src_pgd)) 1329 continue; 1330 if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd, 1331 addr, next))) { 1332 ret = -ENOMEM; 1333 break; 1334 } 1335 } while (dst_pgd++, src_pgd++, addr = next, addr != end); 1336 1337 if (is_cow) { 1338 raw_write_seqcount_end(&src_mm->write_protect_seq); 1339 mmu_notifier_invalidate_range_end(&range); 1340 } 1341 return ret; 1342 } 1343 1344 /* 1345 * Parameter block passed down to zap_pte_range in exceptional cases. 1346 */ 1347 struct zap_details { 1348 struct folio *single_folio; /* Locked folio to be unmapped */ 1349 bool even_cows; /* Zap COWed private pages too? */ 1350 zap_flags_t zap_flags; /* Extra flags for zapping */ 1351 }; 1352 1353 /* Whether we should zap all COWed (private) pages too */ 1354 static inline bool should_zap_cows(struct zap_details *details) 1355 { 1356 /* By default, zap all pages */ 1357 if (!details) 1358 return true; 1359 1360 /* Or, we zap COWed pages only if the caller wants to */ 1361 return details->even_cows; 1362 } 1363 1364 /* Decides whether we should zap this page with the page pointer specified */ 1365 static inline bool should_zap_page(struct zap_details *details, struct page *page) 1366 { 1367 /* If we can make a decision without *page.. */ 1368 if (should_zap_cows(details)) 1369 return true; 1370 1371 /* E.g. the caller passes NULL for the case of a zero page */ 1372 if (!page) 1373 return true; 1374 1375 /* Otherwise we should only zap non-anon pages */ 1376 return !PageAnon(page); 1377 } 1378 1379 static inline bool zap_drop_file_uffd_wp(struct zap_details *details) 1380 { 1381 if (!details) 1382 return false; 1383 1384 return details->zap_flags & ZAP_FLAG_DROP_MARKER; 1385 } 1386 1387 /* 1388 * This function makes sure that we'll replace the none pte with an uffd-wp 1389 * swap special pte marker when necessary. Must be with the pgtable lock held. 1390 */ 1391 static inline void 1392 zap_install_uffd_wp_if_needed(struct vm_area_struct *vma, 1393 unsigned long addr, pte_t *pte, 1394 struct zap_details *details, pte_t pteval) 1395 { 1396 #ifdef CONFIG_PTE_MARKER_UFFD_WP 1397 if (zap_drop_file_uffd_wp(details)) 1398 return; 1399 1400 pte_install_uffd_wp_if_needed(vma, addr, pte, pteval); 1401 #endif 1402 } 1403 1404 static unsigned long zap_pte_range(struct mmu_gather *tlb, 1405 struct vm_area_struct *vma, pmd_t *pmd, 1406 unsigned long addr, unsigned long end, 1407 struct zap_details *details) 1408 { 1409 struct mm_struct *mm = tlb->mm; 1410 int force_flush = 0; 1411 int rss[NR_MM_COUNTERS]; 1412 spinlock_t *ptl; 1413 pte_t *start_pte; 1414 pte_t *pte; 1415 swp_entry_t entry; 1416 1417 tlb_change_page_size(tlb, PAGE_SIZE); 1418 again: 1419 init_rss_vec(rss); 1420 start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 1421 pte = start_pte; 1422 flush_tlb_batched_pending(mm); 1423 arch_enter_lazy_mmu_mode(); 1424 do { 1425 pte_t ptent = *pte; 1426 struct page *page; 1427 1428 if (pte_none(ptent)) 1429 continue; 1430 1431 if (need_resched()) 1432 break; 1433 1434 if (pte_present(ptent)) { 1435 page = vm_normal_page(vma, addr, ptent); 1436 if (unlikely(!should_zap_page(details, page))) 1437 continue; 1438 ptent = ptep_get_and_clear_full(mm, addr, pte, 1439 tlb->fullmm); 1440 tlb_remove_tlb_entry(tlb, pte, addr); 1441 zap_install_uffd_wp_if_needed(vma, addr, pte, details, 1442 ptent); 1443 if (unlikely(!page)) 1444 continue; 1445 1446 if (!PageAnon(page)) { 1447 if (pte_dirty(ptent)) { 1448 force_flush = 1; 1449 set_page_dirty(page); 1450 } 1451 if (pte_young(ptent) && 1452 likely(!(vma->vm_flags & VM_SEQ_READ))) 1453 mark_page_accessed(page); 1454 } 1455 rss[mm_counter(page)]--; 1456 page_remove_rmap(page, vma, false); 1457 if (unlikely(page_mapcount(page) < 0)) 1458 print_bad_pte(vma, addr, ptent, page); 1459 if (unlikely(__tlb_remove_page(tlb, page))) { 1460 force_flush = 1; 1461 addr += PAGE_SIZE; 1462 break; 1463 } 1464 continue; 1465 } 1466 1467 entry = pte_to_swp_entry(ptent); 1468 if (is_device_private_entry(entry) || 1469 is_device_exclusive_entry(entry)) { 1470 page = pfn_swap_entry_to_page(entry); 1471 if (unlikely(!should_zap_page(details, page))) 1472 continue; 1473 /* 1474 * Both device private/exclusive mappings should only 1475 * work with anonymous page so far, so we don't need to 1476 * consider uffd-wp bit when zap. For more information, 1477 * see zap_install_uffd_wp_if_needed(). 1478 */ 1479 WARN_ON_ONCE(!vma_is_anonymous(vma)); 1480 rss[mm_counter(page)]--; 1481 if (is_device_private_entry(entry)) 1482 page_remove_rmap(page, vma, false); 1483 put_page(page); 1484 } else if (!non_swap_entry(entry)) { 1485 /* Genuine swap entry, hence a private anon page */ 1486 if (!should_zap_cows(details)) 1487 continue; 1488 rss[MM_SWAPENTS]--; 1489 if (unlikely(!free_swap_and_cache(entry))) 1490 print_bad_pte(vma, addr, ptent, NULL); 1491 } else if (is_migration_entry(entry)) { 1492 page = pfn_swap_entry_to_page(entry); 1493 if (!should_zap_page(details, page)) 1494 continue; 1495 rss[mm_counter(page)]--; 1496 } else if (pte_marker_entry_uffd_wp(entry)) { 1497 /* Only drop the uffd-wp marker if explicitly requested */ 1498 if (!zap_drop_file_uffd_wp(details)) 1499 continue; 1500 } else if (is_hwpoison_entry(entry) || 1501 is_swapin_error_entry(entry)) { 1502 if (!should_zap_cows(details)) 1503 continue; 1504 } else { 1505 /* We should have covered all the swap entry types */ 1506 WARN_ON_ONCE(1); 1507 } 1508 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); 1509 zap_install_uffd_wp_if_needed(vma, addr, pte, details, ptent); 1510 } while (pte++, addr += PAGE_SIZE, addr != end); 1511 1512 add_mm_rss_vec(mm, rss); 1513 arch_leave_lazy_mmu_mode(); 1514 1515 /* Do the actual TLB flush before dropping ptl */ 1516 if (force_flush) 1517 tlb_flush_mmu_tlbonly(tlb); 1518 pte_unmap_unlock(start_pte, ptl); 1519 1520 /* 1521 * If we forced a TLB flush (either due to running out of 1522 * batch buffers or because we needed to flush dirty TLB 1523 * entries before releasing the ptl), free the batched 1524 * memory too. Restart if we didn't do everything. 1525 */ 1526 if (force_flush) { 1527 force_flush = 0; 1528 tlb_flush_mmu(tlb); 1529 } 1530 1531 if (addr != end) { 1532 cond_resched(); 1533 goto again; 1534 } 1535 1536 return addr; 1537 } 1538 1539 static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, 1540 struct vm_area_struct *vma, pud_t *pud, 1541 unsigned long addr, unsigned long end, 1542 struct zap_details *details) 1543 { 1544 pmd_t *pmd; 1545 unsigned long next; 1546 1547 pmd = pmd_offset(pud, addr); 1548 do { 1549 next = pmd_addr_end(addr, end); 1550 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { 1551 if (next - addr != HPAGE_PMD_SIZE) 1552 __split_huge_pmd(vma, pmd, addr, false, NULL); 1553 else if (zap_huge_pmd(tlb, vma, pmd, addr)) 1554 goto next; 1555 /* fall through */ 1556 } else if (details && details->single_folio && 1557 folio_test_pmd_mappable(details->single_folio) && 1558 next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) { 1559 spinlock_t *ptl = pmd_lock(tlb->mm, pmd); 1560 /* 1561 * Take and drop THP pmd lock so that we cannot return 1562 * prematurely, while zap_huge_pmd() has cleared *pmd, 1563 * but not yet decremented compound_mapcount(). 1564 */ 1565 spin_unlock(ptl); 1566 } 1567 1568 /* 1569 * Here there can be other concurrent MADV_DONTNEED or 1570 * trans huge page faults running, and if the pmd is 1571 * none or trans huge it can change under us. This is 1572 * because MADV_DONTNEED holds the mmap_lock in read 1573 * mode. 1574 */ 1575 if (pmd_none_or_trans_huge_or_clear_bad(pmd)) 1576 goto next; 1577 next = zap_pte_range(tlb, vma, pmd, addr, next, details); 1578 next: 1579 cond_resched(); 1580 } while (pmd++, addr = next, addr != end); 1581 1582 return addr; 1583 } 1584 1585 static inline unsigned long zap_pud_range(struct mmu_gather *tlb, 1586 struct vm_area_struct *vma, p4d_t *p4d, 1587 unsigned long addr, unsigned long end, 1588 struct zap_details *details) 1589 { 1590 pud_t *pud; 1591 unsigned long next; 1592 1593 pud = pud_offset(p4d, addr); 1594 do { 1595 next = pud_addr_end(addr, end); 1596 if (pud_trans_huge(*pud) || pud_devmap(*pud)) { 1597 if (next - addr != HPAGE_PUD_SIZE) { 1598 mmap_assert_locked(tlb->mm); 1599 split_huge_pud(vma, pud, addr); 1600 } else if (zap_huge_pud(tlb, vma, pud, addr)) 1601 goto next; 1602 /* fall through */ 1603 } 1604 if (pud_none_or_clear_bad(pud)) 1605 continue; 1606 next = zap_pmd_range(tlb, vma, pud, addr, next, details); 1607 next: 1608 cond_resched(); 1609 } while (pud++, addr = next, addr != end); 1610 1611 return addr; 1612 } 1613 1614 static inline unsigned long zap_p4d_range(struct mmu_gather *tlb, 1615 struct vm_area_struct *vma, pgd_t *pgd, 1616 unsigned long addr, unsigned long end, 1617 struct zap_details *details) 1618 { 1619 p4d_t *p4d; 1620 unsigned long next; 1621 1622 p4d = p4d_offset(pgd, addr); 1623 do { 1624 next = p4d_addr_end(addr, end); 1625 if (p4d_none_or_clear_bad(p4d)) 1626 continue; 1627 next = zap_pud_range(tlb, vma, p4d, addr, next, details); 1628 } while (p4d++, addr = next, addr != end); 1629 1630 return addr; 1631 } 1632 1633 void unmap_page_range(struct mmu_gather *tlb, 1634 struct vm_area_struct *vma, 1635 unsigned long addr, unsigned long end, 1636 struct zap_details *details) 1637 { 1638 pgd_t *pgd; 1639 unsigned long next; 1640 1641 BUG_ON(addr >= end); 1642 tlb_start_vma(tlb, vma); 1643 pgd = pgd_offset(vma->vm_mm, addr); 1644 do { 1645 next = pgd_addr_end(addr, end); 1646 if (pgd_none_or_clear_bad(pgd)) 1647 continue; 1648 next = zap_p4d_range(tlb, vma, pgd, addr, next, details); 1649 } while (pgd++, addr = next, addr != end); 1650 tlb_end_vma(tlb, vma); 1651 } 1652 1653 1654 static void unmap_single_vma(struct mmu_gather *tlb, 1655 struct vm_area_struct *vma, unsigned long start_addr, 1656 unsigned long end_addr, 1657 struct zap_details *details) 1658 { 1659 unsigned long start = max(vma->vm_start, start_addr); 1660 unsigned long end; 1661 1662 if (start >= vma->vm_end) 1663 return; 1664 end = min(vma->vm_end, end_addr); 1665 if (end <= vma->vm_start) 1666 return; 1667 1668 if (vma->vm_file) 1669 uprobe_munmap(vma, start, end); 1670 1671 if (unlikely(vma->vm_flags & VM_PFNMAP)) 1672 untrack_pfn(vma, 0, 0); 1673 1674 if (start != end) { 1675 if (unlikely(is_vm_hugetlb_page(vma))) { 1676 /* 1677 * It is undesirable to test vma->vm_file as it 1678 * should be non-null for valid hugetlb area. 1679 * However, vm_file will be NULL in the error 1680 * cleanup path of mmap_region. When 1681 * hugetlbfs ->mmap method fails, 1682 * mmap_region() nullifies vma->vm_file 1683 * before calling this function to clean up. 1684 * Since no pte has actually been setup, it is 1685 * safe to do nothing in this case. 1686 */ 1687 if (vma->vm_file) { 1688 zap_flags_t zap_flags = details ? 1689 details->zap_flags : 0; 1690 __unmap_hugepage_range_final(tlb, vma, start, end, 1691 NULL, zap_flags); 1692 } 1693 } else 1694 unmap_page_range(tlb, vma, start, end, details); 1695 } 1696 } 1697 1698 /** 1699 * unmap_vmas - unmap a range of memory covered by a list of vma's 1700 * @tlb: address of the caller's struct mmu_gather 1701 * @mt: the maple tree 1702 * @vma: the starting vma 1703 * @start_addr: virtual address at which to start unmapping 1704 * @end_addr: virtual address at which to end unmapping 1705 * 1706 * Unmap all pages in the vma list. 1707 * 1708 * Only addresses between `start' and `end' will be unmapped. 1709 * 1710 * The VMA list must be sorted in ascending virtual address order. 1711 * 1712 * unmap_vmas() assumes that the caller will flush the whole unmapped address 1713 * range after unmap_vmas() returns. So the only responsibility here is to 1714 * ensure that any thus-far unmapped pages are flushed before unmap_vmas() 1715 * drops the lock and schedules. 1716 */ 1717 void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt, 1718 struct vm_area_struct *vma, unsigned long start_addr, 1719 unsigned long end_addr) 1720 { 1721 struct mmu_notifier_range range; 1722 struct zap_details details = { 1723 .zap_flags = ZAP_FLAG_DROP_MARKER, 1724 /* Careful - we need to zap private pages too! */ 1725 .even_cows = true, 1726 }; 1727 MA_STATE(mas, mt, vma->vm_end, vma->vm_end); 1728 1729 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm, 1730 start_addr, end_addr); 1731 mmu_notifier_invalidate_range_start(&range); 1732 do { 1733 unmap_single_vma(tlb, vma, start_addr, end_addr, &details); 1734 } while ((vma = mas_find(&mas, end_addr - 1)) != NULL); 1735 mmu_notifier_invalidate_range_end(&range); 1736 } 1737 1738 /** 1739 * zap_page_range - remove user pages in a given range 1740 * @vma: vm_area_struct holding the applicable pages 1741 * @start: starting address of pages to zap 1742 * @size: number of bytes to zap 1743 * 1744 * Caller must protect the VMA list 1745 */ 1746 void zap_page_range(struct vm_area_struct *vma, unsigned long start, 1747 unsigned long size) 1748 { 1749 struct maple_tree *mt = &vma->vm_mm->mm_mt; 1750 unsigned long end = start + size; 1751 struct mmu_notifier_range range; 1752 struct mmu_gather tlb; 1753 MA_STATE(mas, mt, vma->vm_end, vma->vm_end); 1754 1755 lru_add_drain(); 1756 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, 1757 start, start + size); 1758 tlb_gather_mmu(&tlb, vma->vm_mm); 1759 update_hiwater_rss(vma->vm_mm); 1760 mmu_notifier_invalidate_range_start(&range); 1761 do { 1762 unmap_single_vma(&tlb, vma, start, range.end, NULL); 1763 } while ((vma = mas_find(&mas, end - 1)) != NULL); 1764 mmu_notifier_invalidate_range_end(&range); 1765 tlb_finish_mmu(&tlb); 1766 } 1767 1768 /** 1769 * zap_page_range_single - remove user pages in a given range 1770 * @vma: vm_area_struct holding the applicable pages 1771 * @address: starting address of pages to zap 1772 * @size: number of bytes to zap 1773 * @details: details of shared cache invalidation 1774 * 1775 * The range must fit into one VMA. 1776 */ 1777 static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, 1778 unsigned long size, struct zap_details *details) 1779 { 1780 struct mmu_notifier_range range; 1781 struct mmu_gather tlb; 1782 1783 lru_add_drain(); 1784 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, 1785 address, address + size); 1786 tlb_gather_mmu(&tlb, vma->vm_mm); 1787 update_hiwater_rss(vma->vm_mm); 1788 mmu_notifier_invalidate_range_start(&range); 1789 unmap_single_vma(&tlb, vma, address, range.end, details); 1790 mmu_notifier_invalidate_range_end(&range); 1791 tlb_finish_mmu(&tlb); 1792 } 1793 1794 /** 1795 * zap_vma_ptes - remove ptes mapping the vma 1796 * @vma: vm_area_struct holding ptes to be zapped 1797 * @address: starting address of pages to zap 1798 * @size: number of bytes to zap 1799 * 1800 * This function only unmaps ptes assigned to VM_PFNMAP vmas. 1801 * 1802 * The entire address range must be fully contained within the vma. 1803 * 1804 */ 1805 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 1806 unsigned long size) 1807 { 1808 if (!range_in_vma(vma, address, address + size) || 1809 !(vma->vm_flags & VM_PFNMAP)) 1810 return; 1811 1812 zap_page_range_single(vma, address, size, NULL); 1813 } 1814 EXPORT_SYMBOL_GPL(zap_vma_ptes); 1815 1816 static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr) 1817 { 1818 pgd_t *pgd; 1819 p4d_t *p4d; 1820 pud_t *pud; 1821 pmd_t *pmd; 1822 1823 pgd = pgd_offset(mm, addr); 1824 p4d = p4d_alloc(mm, pgd, addr); 1825 if (!p4d) 1826 return NULL; 1827 pud = pud_alloc(mm, p4d, addr); 1828 if (!pud) 1829 return NULL; 1830 pmd = pmd_alloc(mm, pud, addr); 1831 if (!pmd) 1832 return NULL; 1833 1834 VM_BUG_ON(pmd_trans_huge(*pmd)); 1835 return pmd; 1836 } 1837 1838 pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, 1839 spinlock_t **ptl) 1840 { 1841 pmd_t *pmd = walk_to_pmd(mm, addr); 1842 1843 if (!pmd) 1844 return NULL; 1845 return pte_alloc_map_lock(mm, pmd, addr, ptl); 1846 } 1847 1848 static int validate_page_before_insert(struct page *page) 1849 { 1850 if (PageAnon(page) || PageSlab(page) || page_has_type(page)) 1851 return -EINVAL; 1852 flush_dcache_page(page); 1853 return 0; 1854 } 1855 1856 static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte, 1857 unsigned long addr, struct page *page, pgprot_t prot) 1858 { 1859 if (!pte_none(*pte)) 1860 return -EBUSY; 1861 /* Ok, finally just insert the thing.. */ 1862 get_page(page); 1863 inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page)); 1864 page_add_file_rmap(page, vma, false); 1865 set_pte_at(vma->vm_mm, addr, pte, mk_pte(page, prot)); 1866 return 0; 1867 } 1868 1869 /* 1870 * This is the old fallback for page remapping. 1871 * 1872 * For historical reasons, it only allows reserved pages. Only 1873 * old drivers should use this, and they needed to mark their 1874 * pages reserved for the old functions anyway. 1875 */ 1876 static int insert_page(struct vm_area_struct *vma, unsigned long addr, 1877 struct page *page, pgprot_t prot) 1878 { 1879 int retval; 1880 pte_t *pte; 1881 spinlock_t *ptl; 1882 1883 retval = validate_page_before_insert(page); 1884 if (retval) 1885 goto out; 1886 retval = -ENOMEM; 1887 pte = get_locked_pte(vma->vm_mm, addr, &ptl); 1888 if (!pte) 1889 goto out; 1890 retval = insert_page_into_pte_locked(vma, pte, addr, page, prot); 1891 pte_unmap_unlock(pte, ptl); 1892 out: 1893 return retval; 1894 } 1895 1896 #ifdef pte_index 1897 static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte, 1898 unsigned long addr, struct page *page, pgprot_t prot) 1899 { 1900 int err; 1901 1902 if (!page_count(page)) 1903 return -EINVAL; 1904 err = validate_page_before_insert(page); 1905 if (err) 1906 return err; 1907 return insert_page_into_pte_locked(vma, pte, addr, page, prot); 1908 } 1909 1910 /* insert_pages() amortizes the cost of spinlock operations 1911 * when inserting pages in a loop. Arch *must* define pte_index. 1912 */ 1913 static int insert_pages(struct vm_area_struct *vma, unsigned long addr, 1914 struct page **pages, unsigned long *num, pgprot_t prot) 1915 { 1916 pmd_t *pmd = NULL; 1917 pte_t *start_pte, *pte; 1918 spinlock_t *pte_lock; 1919 struct mm_struct *const mm = vma->vm_mm; 1920 unsigned long curr_page_idx = 0; 1921 unsigned long remaining_pages_total = *num; 1922 unsigned long pages_to_write_in_pmd; 1923 int ret; 1924 more: 1925 ret = -EFAULT; 1926 pmd = walk_to_pmd(mm, addr); 1927 if (!pmd) 1928 goto out; 1929 1930 pages_to_write_in_pmd = min_t(unsigned long, 1931 remaining_pages_total, PTRS_PER_PTE - pte_index(addr)); 1932 1933 /* Allocate the PTE if necessary; takes PMD lock once only. */ 1934 ret = -ENOMEM; 1935 if (pte_alloc(mm, pmd)) 1936 goto out; 1937 1938 while (pages_to_write_in_pmd) { 1939 int pte_idx = 0; 1940 const int batch_size = min_t(int, pages_to_write_in_pmd, 8); 1941 1942 start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock); 1943 for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) { 1944 int err = insert_page_in_batch_locked(vma, pte, 1945 addr, pages[curr_page_idx], prot); 1946 if (unlikely(err)) { 1947 pte_unmap_unlock(start_pte, pte_lock); 1948 ret = err; 1949 remaining_pages_total -= pte_idx; 1950 goto out; 1951 } 1952 addr += PAGE_SIZE; 1953 ++curr_page_idx; 1954 } 1955 pte_unmap_unlock(start_pte, pte_lock); 1956 pages_to_write_in_pmd -= batch_size; 1957 remaining_pages_total -= batch_size; 1958 } 1959 if (remaining_pages_total) 1960 goto more; 1961 ret = 0; 1962 out: 1963 *num = remaining_pages_total; 1964 return ret; 1965 } 1966 #endif /* ifdef pte_index */ 1967 1968 /** 1969 * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock. 1970 * @vma: user vma to map to 1971 * @addr: target start user address of these pages 1972 * @pages: source kernel pages 1973 * @num: in: number of pages to map. out: number of pages that were *not* 1974 * mapped. (0 means all pages were successfully mapped). 1975 * 1976 * Preferred over vm_insert_page() when inserting multiple pages. 1977 * 1978 * In case of error, we may have mapped a subset of the provided 1979 * pages. It is the caller's responsibility to account for this case. 1980 * 1981 * The same restrictions apply as in vm_insert_page(). 1982 */ 1983 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, 1984 struct page **pages, unsigned long *num) 1985 { 1986 #ifdef pte_index 1987 const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1; 1988 1989 if (addr < vma->vm_start || end_addr >= vma->vm_end) 1990 return -EFAULT; 1991 if (!(vma->vm_flags & VM_MIXEDMAP)) { 1992 BUG_ON(mmap_read_trylock(vma->vm_mm)); 1993 BUG_ON(vma->vm_flags & VM_PFNMAP); 1994 vma->vm_flags |= VM_MIXEDMAP; 1995 } 1996 /* Defer page refcount checking till we're about to map that page. */ 1997 return insert_pages(vma, addr, pages, num, vma->vm_page_prot); 1998 #else 1999 unsigned long idx = 0, pgcount = *num; 2000 int err = -EINVAL; 2001 2002 for (; idx < pgcount; ++idx) { 2003 err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]); 2004 if (err) 2005 break; 2006 } 2007 *num = pgcount - idx; 2008 return err; 2009 #endif /* ifdef pte_index */ 2010 } 2011 EXPORT_SYMBOL(vm_insert_pages); 2012 2013 /** 2014 * vm_insert_page - insert single page into user vma 2015 * @vma: user vma to map to 2016 * @addr: target user address of this page 2017 * @page: source kernel page 2018 * 2019 * This allows drivers to insert individual pages they've allocated 2020 * into a user vma. 2021 * 2022 * The page has to be a nice clean _individual_ kernel allocation. 2023 * If you allocate a compound page, you need to have marked it as 2024 * such (__GFP_COMP), or manually just split the page up yourself 2025 * (see split_page()). 2026 * 2027 * NOTE! Traditionally this was done with "remap_pfn_range()" which 2028 * took an arbitrary page protection parameter. This doesn't allow 2029 * that. Your vma protection will have to be set up correctly, which 2030 * means that if you want a shared writable mapping, you'd better 2031 * ask for a shared writable mapping! 2032 * 2033 * The page does not need to be reserved. 2034 * 2035 * Usually this function is called from f_op->mmap() handler 2036 * under mm->mmap_lock write-lock, so it can change vma->vm_flags. 2037 * Caller must set VM_MIXEDMAP on vma if it wants to call this 2038 * function from other places, for example from page-fault handler. 2039 * 2040 * Return: %0 on success, negative error code otherwise. 2041 */ 2042 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, 2043 struct page *page) 2044 { 2045 if (addr < vma->vm_start || addr >= vma->vm_end) 2046 return -EFAULT; 2047 if (!page_count(page)) 2048 return -EINVAL; 2049 if (!(vma->vm_flags & VM_MIXEDMAP)) { 2050 BUG_ON(mmap_read_trylock(vma->vm_mm)); 2051 BUG_ON(vma->vm_flags & VM_PFNMAP); 2052 vma->vm_flags |= VM_MIXEDMAP; 2053 } 2054 return insert_page(vma, addr, page, vma->vm_page_prot); 2055 } 2056 EXPORT_SYMBOL(vm_insert_page); 2057 2058 /* 2059 * __vm_map_pages - maps range of kernel pages into user vma 2060 * @vma: user vma to map to 2061 * @pages: pointer to array of source kernel pages 2062 * @num: number of pages in page array 2063 * @offset: user's requested vm_pgoff 2064 * 2065 * This allows drivers to map range of kernel pages into a user vma. 2066 * 2067 * Return: 0 on success and error code otherwise. 2068 */ 2069 static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages, 2070 unsigned long num, unsigned long offset) 2071 { 2072 unsigned long count = vma_pages(vma); 2073 unsigned long uaddr = vma->vm_start; 2074 int ret, i; 2075 2076 /* Fail if the user requested offset is beyond the end of the object */ 2077 if (offset >= num) 2078 return -ENXIO; 2079 2080 /* Fail if the user requested size exceeds available object size */ 2081 if (count > num - offset) 2082 return -ENXIO; 2083 2084 for (i = 0; i < count; i++) { 2085 ret = vm_insert_page(vma, uaddr, pages[offset + i]); 2086 if (ret < 0) 2087 return ret; 2088 uaddr += PAGE_SIZE; 2089 } 2090 2091 return 0; 2092 } 2093 2094 /** 2095 * vm_map_pages - maps range of kernel pages starts with non zero offset 2096 * @vma: user vma to map to 2097 * @pages: pointer to array of source kernel pages 2098 * @num: number of pages in page array 2099 * 2100 * Maps an object consisting of @num pages, catering for the user's 2101 * requested vm_pgoff 2102 * 2103 * If we fail to insert any page into the vma, the function will return 2104 * immediately leaving any previously inserted pages present. Callers 2105 * from the mmap handler may immediately return the error as their caller 2106 * will destroy the vma, removing any successfully inserted pages. Other 2107 * callers should make their own arrangements for calling unmap_region(). 2108 * 2109 * Context: Process context. Called by mmap handlers. 2110 * Return: 0 on success and error code otherwise. 2111 */ 2112 int vm_map_pages(struct vm_area_struct *vma, struct page **pages, 2113 unsigned long num) 2114 { 2115 return __vm_map_pages(vma, pages, num, vma->vm_pgoff); 2116 } 2117 EXPORT_SYMBOL(vm_map_pages); 2118 2119 /** 2120 * vm_map_pages_zero - map range of kernel pages starts with zero offset 2121 * @vma: user vma to map to 2122 * @pages: pointer to array of source kernel pages 2123 * @num: number of pages in page array 2124 * 2125 * Similar to vm_map_pages(), except that it explicitly sets the offset 2126 * to 0. This function is intended for the drivers that did not consider 2127 * vm_pgoff. 2128 * 2129 * Context: Process context. Called by mmap handlers. 2130 * Return: 0 on success and error code otherwise. 2131 */ 2132 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, 2133 unsigned long num) 2134 { 2135 return __vm_map_pages(vma, pages, num, 0); 2136 } 2137 EXPORT_SYMBOL(vm_map_pages_zero); 2138 2139 static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr, 2140 pfn_t pfn, pgprot_t prot, bool mkwrite) 2141 { 2142 struct mm_struct *mm = vma->vm_mm; 2143 pte_t *pte, entry; 2144 spinlock_t *ptl; 2145 2146 pte = get_locked_pte(mm, addr, &ptl); 2147 if (!pte) 2148 return VM_FAULT_OOM; 2149 if (!pte_none(*pte)) { 2150 if (mkwrite) { 2151 /* 2152 * For read faults on private mappings the PFN passed 2153 * in may not match the PFN we have mapped if the 2154 * mapped PFN is a writeable COW page. In the mkwrite 2155 * case we are creating a writable PTE for a shared 2156 * mapping and we expect the PFNs to match. If they 2157 * don't match, we are likely racing with block 2158 * allocation and mapping invalidation so just skip the 2159 * update. 2160 */ 2161 if (pte_pfn(*pte) != pfn_t_to_pfn(pfn)) { 2162 WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte))); 2163 goto out_unlock; 2164 } 2165 entry = pte_mkyoung(*pte); 2166 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 2167 if (ptep_set_access_flags(vma, addr, pte, entry, 1)) 2168 update_mmu_cache(vma, addr, pte); 2169 } 2170 goto out_unlock; 2171 } 2172 2173 /* Ok, finally just insert the thing.. */ 2174 if (pfn_t_devmap(pfn)) 2175 entry = pte_mkdevmap(pfn_t_pte(pfn, prot)); 2176 else 2177 entry = pte_mkspecial(pfn_t_pte(pfn, prot)); 2178 2179 if (mkwrite) { 2180 entry = pte_mkyoung(entry); 2181 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 2182 } 2183 2184 set_pte_at(mm, addr, pte, entry); 2185 update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ 2186 2187 out_unlock: 2188 pte_unmap_unlock(pte, ptl); 2189 return VM_FAULT_NOPAGE; 2190 } 2191 2192 /** 2193 * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot 2194 * @vma: user vma to map to 2195 * @addr: target user address of this page 2196 * @pfn: source kernel pfn 2197 * @pgprot: pgprot flags for the inserted page 2198 * 2199 * This is exactly like vmf_insert_pfn(), except that it allows drivers 2200 * to override pgprot on a per-page basis. 2201 * 2202 * This only makes sense for IO mappings, and it makes no sense for 2203 * COW mappings. In general, using multiple vmas is preferable; 2204 * vmf_insert_pfn_prot should only be used if using multiple VMAs is 2205 * impractical. 2206 * 2207 * See vmf_insert_mixed_prot() for a discussion of the implication of using 2208 * a value of @pgprot different from that of @vma->vm_page_prot. 2209 * 2210 * Context: Process context. May allocate using %GFP_KERNEL. 2211 * Return: vm_fault_t value. 2212 */ 2213 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, 2214 unsigned long pfn, pgprot_t pgprot) 2215 { 2216 /* 2217 * Technically, architectures with pte_special can avoid all these 2218 * restrictions (same for remap_pfn_range). However we would like 2219 * consistency in testing and feature parity among all, so we should 2220 * try to keep these invariants in place for everybody. 2221 */ 2222 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); 2223 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 2224 (VM_PFNMAP|VM_MIXEDMAP)); 2225 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 2226 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); 2227 2228 if (addr < vma->vm_start || addr >= vma->vm_end) 2229 return VM_FAULT_SIGBUS; 2230 2231 if (!pfn_modify_allowed(pfn, pgprot)) 2232 return VM_FAULT_SIGBUS; 2233 2234 track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV)); 2235 2236 return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot, 2237 false); 2238 } 2239 EXPORT_SYMBOL(vmf_insert_pfn_prot); 2240 2241 /** 2242 * vmf_insert_pfn - insert single pfn into user vma 2243 * @vma: user vma to map to 2244 * @addr: target user address of this page 2245 * @pfn: source kernel pfn 2246 * 2247 * Similar to vm_insert_page, this allows drivers to insert individual pages 2248 * they've allocated into a user vma. Same comments apply. 2249 * 2250 * This function should only be called from a vm_ops->fault handler, and 2251 * in that case the handler should return the result of this function. 2252 * 2253 * vma cannot be a COW mapping. 2254 * 2255 * As this is called only for pages that do not currently exist, we 2256 * do not need to flush old virtual caches or the TLB. 2257 * 2258 * Context: Process context. May allocate using %GFP_KERNEL. 2259 * Return: vm_fault_t value. 2260 */ 2261 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 2262 unsigned long pfn) 2263 { 2264 return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot); 2265 } 2266 EXPORT_SYMBOL(vmf_insert_pfn); 2267 2268 static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn) 2269 { 2270 /* these checks mirror the abort conditions in vm_normal_page */ 2271 if (vma->vm_flags & VM_MIXEDMAP) 2272 return true; 2273 if (pfn_t_devmap(pfn)) 2274 return true; 2275 if (pfn_t_special(pfn)) 2276 return true; 2277 if (is_zero_pfn(pfn_t_to_pfn(pfn))) 2278 return true; 2279 return false; 2280 } 2281 2282 static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma, 2283 unsigned long addr, pfn_t pfn, pgprot_t pgprot, 2284 bool mkwrite) 2285 { 2286 int err; 2287 2288 BUG_ON(!vm_mixed_ok(vma, pfn)); 2289 2290 if (addr < vma->vm_start || addr >= vma->vm_end) 2291 return VM_FAULT_SIGBUS; 2292 2293 track_pfn_insert(vma, &pgprot, pfn); 2294 2295 if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot)) 2296 return VM_FAULT_SIGBUS; 2297 2298 /* 2299 * If we don't have pte special, then we have to use the pfn_valid() 2300 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must* 2301 * refcount the page if pfn_valid is true (hence insert_page rather 2302 * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP 2303 * without pte special, it would there be refcounted as a normal page. 2304 */ 2305 if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && 2306 !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) { 2307 struct page *page; 2308 2309 /* 2310 * At this point we are committed to insert_page() 2311 * regardless of whether the caller specified flags that 2312 * result in pfn_t_has_page() == false. 2313 */ 2314 page = pfn_to_page(pfn_t_to_pfn(pfn)); 2315 err = insert_page(vma, addr, page, pgprot); 2316 } else { 2317 return insert_pfn(vma, addr, pfn, pgprot, mkwrite); 2318 } 2319 2320 if (err == -ENOMEM) 2321 return VM_FAULT_OOM; 2322 if (err < 0 && err != -EBUSY) 2323 return VM_FAULT_SIGBUS; 2324 2325 return VM_FAULT_NOPAGE; 2326 } 2327 2328 /** 2329 * vmf_insert_mixed_prot - insert single pfn into user vma with specified pgprot 2330 * @vma: user vma to map to 2331 * @addr: target user address of this page 2332 * @pfn: source kernel pfn 2333 * @pgprot: pgprot flags for the inserted page 2334 * 2335 * This is exactly like vmf_insert_mixed(), except that it allows drivers 2336 * to override pgprot on a per-page basis. 2337 * 2338 * Typically this function should be used by drivers to set caching- and 2339 * encryption bits different than those of @vma->vm_page_prot, because 2340 * the caching- or encryption mode may not be known at mmap() time. 2341 * This is ok as long as @vma->vm_page_prot is not used by the core vm 2342 * to set caching and encryption bits for those vmas (except for COW pages). 2343 * This is ensured by core vm only modifying these page table entries using 2344 * functions that don't touch caching- or encryption bits, using pte_modify() 2345 * if needed. (See for example mprotect()). 2346 * Also when new page-table entries are created, this is only done using the 2347 * fault() callback, and never using the value of vma->vm_page_prot, 2348 * except for page-table entries that point to anonymous pages as the result 2349 * of COW. 2350 * 2351 * Context: Process context. May allocate using %GFP_KERNEL. 2352 * Return: vm_fault_t value. 2353 */ 2354 vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr, 2355 pfn_t pfn, pgprot_t pgprot) 2356 { 2357 return __vm_insert_mixed(vma, addr, pfn, pgprot, false); 2358 } 2359 EXPORT_SYMBOL(vmf_insert_mixed_prot); 2360 2361 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, 2362 pfn_t pfn) 2363 { 2364 return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, false); 2365 } 2366 EXPORT_SYMBOL(vmf_insert_mixed); 2367 2368 /* 2369 * If the insertion of PTE failed because someone else already added a 2370 * different entry in the mean time, we treat that as success as we assume 2371 * the same entry was actually inserted. 2372 */ 2373 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, 2374 unsigned long addr, pfn_t pfn) 2375 { 2376 return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, true); 2377 } 2378 EXPORT_SYMBOL(vmf_insert_mixed_mkwrite); 2379 2380 /* 2381 * maps a range of physical memory into the requested pages. the old 2382 * mappings are removed. any references to nonexistent pages results 2383 * in null mappings (currently treated as "copy-on-access") 2384 */ 2385 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, 2386 unsigned long addr, unsigned long end, 2387 unsigned long pfn, pgprot_t prot) 2388 { 2389 pte_t *pte, *mapped_pte; 2390 spinlock_t *ptl; 2391 int err = 0; 2392 2393 mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); 2394 if (!pte) 2395 return -ENOMEM; 2396 arch_enter_lazy_mmu_mode(); 2397 do { 2398 BUG_ON(!pte_none(*pte)); 2399 if (!pfn_modify_allowed(pfn, prot)) { 2400 err = -EACCES; 2401 break; 2402 } 2403 set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot))); 2404 pfn++; 2405 } while (pte++, addr += PAGE_SIZE, addr != end); 2406 arch_leave_lazy_mmu_mode(); 2407 pte_unmap_unlock(mapped_pte, ptl); 2408 return err; 2409 } 2410 2411 static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, 2412 unsigned long addr, unsigned long end, 2413 unsigned long pfn, pgprot_t prot) 2414 { 2415 pmd_t *pmd; 2416 unsigned long next; 2417 int err; 2418 2419 pfn -= addr >> PAGE_SHIFT; 2420 pmd = pmd_alloc(mm, pud, addr); 2421 if (!pmd) 2422 return -ENOMEM; 2423 VM_BUG_ON(pmd_trans_huge(*pmd)); 2424 do { 2425 next = pmd_addr_end(addr, end); 2426 err = remap_pte_range(mm, pmd, addr, next, 2427 pfn + (addr >> PAGE_SHIFT), prot); 2428 if (err) 2429 return err; 2430 } while (pmd++, addr = next, addr != end); 2431 return 0; 2432 } 2433 2434 static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d, 2435 unsigned long addr, unsigned long end, 2436 unsigned long pfn, pgprot_t prot) 2437 { 2438 pud_t *pud; 2439 unsigned long next; 2440 int err; 2441 2442 pfn -= addr >> PAGE_SHIFT; 2443 pud = pud_alloc(mm, p4d, addr); 2444 if (!pud) 2445 return -ENOMEM; 2446 do { 2447 next = pud_addr_end(addr, end); 2448 err = remap_pmd_range(mm, pud, addr, next, 2449 pfn + (addr >> PAGE_SHIFT), prot); 2450 if (err) 2451 return err; 2452 } while (pud++, addr = next, addr != end); 2453 return 0; 2454 } 2455 2456 static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd, 2457 unsigned long addr, unsigned long end, 2458 unsigned long pfn, pgprot_t prot) 2459 { 2460 p4d_t *p4d; 2461 unsigned long next; 2462 int err; 2463 2464 pfn -= addr >> PAGE_SHIFT; 2465 p4d = p4d_alloc(mm, pgd, addr); 2466 if (!p4d) 2467 return -ENOMEM; 2468 do { 2469 next = p4d_addr_end(addr, end); 2470 err = remap_pud_range(mm, p4d, addr, next, 2471 pfn + (addr >> PAGE_SHIFT), prot); 2472 if (err) 2473 return err; 2474 } while (p4d++, addr = next, addr != end); 2475 return 0; 2476 } 2477 2478 /* 2479 * Variant of remap_pfn_range that does not call track_pfn_remap. The caller 2480 * must have pre-validated the caching bits of the pgprot_t. 2481 */ 2482 int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, 2483 unsigned long pfn, unsigned long size, pgprot_t prot) 2484 { 2485 pgd_t *pgd; 2486 unsigned long next; 2487 unsigned long end = addr + PAGE_ALIGN(size); 2488 struct mm_struct *mm = vma->vm_mm; 2489 int err; 2490 2491 if (WARN_ON_ONCE(!PAGE_ALIGNED(addr))) 2492 return -EINVAL; 2493 2494 /* 2495 * Physically remapped pages are special. Tell the 2496 * rest of the world about it: 2497 * VM_IO tells people not to look at these pages 2498 * (accesses can have side effects). 2499 * VM_PFNMAP tells the core MM that the base pages are just 2500 * raw PFN mappings, and do not have a "struct page" associated 2501 * with them. 2502 * VM_DONTEXPAND 2503 * Disable vma merging and expanding with mremap(). 2504 * VM_DONTDUMP 2505 * Omit vma from core dump, even when VM_IO turned off. 2506 * 2507 * There's a horrible special case to handle copy-on-write 2508 * behaviour that some programs depend on. We mark the "original" 2509 * un-COW'ed pages by matching them up with "vma->vm_pgoff". 2510 * See vm_normal_page() for details. 2511 */ 2512 if (is_cow_mapping(vma->vm_flags)) { 2513 if (addr != vma->vm_start || end != vma->vm_end) 2514 return -EINVAL; 2515 vma->vm_pgoff = pfn; 2516 } 2517 2518 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; 2519 2520 BUG_ON(addr >= end); 2521 pfn -= addr >> PAGE_SHIFT; 2522 pgd = pgd_offset(mm, addr); 2523 flush_cache_range(vma, addr, end); 2524 do { 2525 next = pgd_addr_end(addr, end); 2526 err = remap_p4d_range(mm, pgd, addr, next, 2527 pfn + (addr >> PAGE_SHIFT), prot); 2528 if (err) 2529 return err; 2530 } while (pgd++, addr = next, addr != end); 2531 2532 return 0; 2533 } 2534 2535 /** 2536 * remap_pfn_range - remap kernel memory to userspace 2537 * @vma: user vma to map to 2538 * @addr: target page aligned user address to start at 2539 * @pfn: page frame number of kernel physical memory address 2540 * @size: size of mapping area 2541 * @prot: page protection flags for this mapping 2542 * 2543 * Note: this is only safe if the mm semaphore is held when called. 2544 * 2545 * Return: %0 on success, negative error code otherwise. 2546 */ 2547 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, 2548 unsigned long pfn, unsigned long size, pgprot_t prot) 2549 { 2550 int err; 2551 2552 err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size)); 2553 if (err) 2554 return -EINVAL; 2555 2556 err = remap_pfn_range_notrack(vma, addr, pfn, size, prot); 2557 if (err) 2558 untrack_pfn(vma, pfn, PAGE_ALIGN(size)); 2559 return err; 2560 } 2561 EXPORT_SYMBOL(remap_pfn_range); 2562 2563 /** 2564 * vm_iomap_memory - remap memory to userspace 2565 * @vma: user vma to map to 2566 * @start: start of the physical memory to be mapped 2567 * @len: size of area 2568 * 2569 * This is a simplified io_remap_pfn_range() for common driver use. The 2570 * driver just needs to give us the physical memory range to be mapped, 2571 * we'll figure out the rest from the vma information. 2572 * 2573 * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get 2574 * whatever write-combining details or similar. 2575 * 2576 * Return: %0 on success, negative error code otherwise. 2577 */ 2578 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) 2579 { 2580 unsigned long vm_len, pfn, pages; 2581 2582 /* Check that the physical memory area passed in looks valid */ 2583 if (start + len < start) 2584 return -EINVAL; 2585 /* 2586 * You *really* shouldn't map things that aren't page-aligned, 2587 * but we've historically allowed it because IO memory might 2588 * just have smaller alignment. 2589 */ 2590 len += start & ~PAGE_MASK; 2591 pfn = start >> PAGE_SHIFT; 2592 pages = (len + ~PAGE_MASK) >> PAGE_SHIFT; 2593 if (pfn + pages < pfn) 2594 return -EINVAL; 2595 2596 /* We start the mapping 'vm_pgoff' pages into the area */ 2597 if (vma->vm_pgoff > pages) 2598 return -EINVAL; 2599 pfn += vma->vm_pgoff; 2600 pages -= vma->vm_pgoff; 2601 2602 /* Can we fit all of the mapping? */ 2603 vm_len = vma->vm_end - vma->vm_start; 2604 if (vm_len >> PAGE_SHIFT > pages) 2605 return -EINVAL; 2606 2607 /* Ok, let it rip */ 2608 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); 2609 } 2610 EXPORT_SYMBOL(vm_iomap_memory); 2611 2612 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, 2613 unsigned long addr, unsigned long end, 2614 pte_fn_t fn, void *data, bool create, 2615 pgtbl_mod_mask *mask) 2616 { 2617 pte_t *pte, *mapped_pte; 2618 int err = 0; 2619 spinlock_t *ptl; 2620 2621 if (create) { 2622 mapped_pte = pte = (mm == &init_mm) ? 2623 pte_alloc_kernel_track(pmd, addr, mask) : 2624 pte_alloc_map_lock(mm, pmd, addr, &ptl); 2625 if (!pte) 2626 return -ENOMEM; 2627 } else { 2628 mapped_pte = pte = (mm == &init_mm) ? 2629 pte_offset_kernel(pmd, addr) : 2630 pte_offset_map_lock(mm, pmd, addr, &ptl); 2631 } 2632 2633 BUG_ON(pmd_huge(*pmd)); 2634 2635 arch_enter_lazy_mmu_mode(); 2636 2637 if (fn) { 2638 do { 2639 if (create || !pte_none(*pte)) { 2640 err = fn(pte++, addr, data); 2641 if (err) 2642 break; 2643 } 2644 } while (addr += PAGE_SIZE, addr != end); 2645 } 2646 *mask |= PGTBL_PTE_MODIFIED; 2647 2648 arch_leave_lazy_mmu_mode(); 2649 2650 if (mm != &init_mm) 2651 pte_unmap_unlock(mapped_pte, ptl); 2652 return err; 2653 } 2654 2655 static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, 2656 unsigned long addr, unsigned long end, 2657 pte_fn_t fn, void *data, bool create, 2658 pgtbl_mod_mask *mask) 2659 { 2660 pmd_t *pmd; 2661 unsigned long next; 2662 int err = 0; 2663 2664 BUG_ON(pud_huge(*pud)); 2665 2666 if (create) { 2667 pmd = pmd_alloc_track(mm, pud, addr, mask); 2668 if (!pmd) 2669 return -ENOMEM; 2670 } else { 2671 pmd = pmd_offset(pud, addr); 2672 } 2673 do { 2674 next = pmd_addr_end(addr, end); 2675 if (pmd_none(*pmd) && !create) 2676 continue; 2677 if (WARN_ON_ONCE(pmd_leaf(*pmd))) 2678 return -EINVAL; 2679 if (!pmd_none(*pmd) && WARN_ON_ONCE(pmd_bad(*pmd))) { 2680 if (!create) 2681 continue; 2682 pmd_clear_bad(pmd); 2683 } 2684 err = apply_to_pte_range(mm, pmd, addr, next, 2685 fn, data, create, mask); 2686 if (err) 2687 break; 2688 } while (pmd++, addr = next, addr != end); 2689 2690 return err; 2691 } 2692 2693 static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d, 2694 unsigned long addr, unsigned long end, 2695 pte_fn_t fn, void *data, bool create, 2696 pgtbl_mod_mask *mask) 2697 { 2698 pud_t *pud; 2699 unsigned long next; 2700 int err = 0; 2701 2702 if (create) { 2703 pud = pud_alloc_track(mm, p4d, addr, mask); 2704 if (!pud) 2705 return -ENOMEM; 2706 } else { 2707 pud = pud_offset(p4d, addr); 2708 } 2709 do { 2710 next = pud_addr_end(addr, end); 2711 if (pud_none(*pud) && !create) 2712 continue; 2713 if (WARN_ON_ONCE(pud_leaf(*pud))) 2714 return -EINVAL; 2715 if (!pud_none(*pud) && WARN_ON_ONCE(pud_bad(*pud))) { 2716 if (!create) 2717 continue; 2718 pud_clear_bad(pud); 2719 } 2720 err = apply_to_pmd_range(mm, pud, addr, next, 2721 fn, data, create, mask); 2722 if (err) 2723 break; 2724 } while (pud++, addr = next, addr != end); 2725 2726 return err; 2727 } 2728 2729 static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd, 2730 unsigned long addr, unsigned long end, 2731 pte_fn_t fn, void *data, bool create, 2732 pgtbl_mod_mask *mask) 2733 { 2734 p4d_t *p4d; 2735 unsigned long next; 2736 int err = 0; 2737 2738 if (create) { 2739 p4d = p4d_alloc_track(mm, pgd, addr, mask); 2740 if (!p4d) 2741 return -ENOMEM; 2742 } else { 2743 p4d = p4d_offset(pgd, addr); 2744 } 2745 do { 2746 next = p4d_addr_end(addr, end); 2747 if (p4d_none(*p4d) && !create) 2748 continue; 2749 if (WARN_ON_ONCE(p4d_leaf(*p4d))) 2750 return -EINVAL; 2751 if (!p4d_none(*p4d) && WARN_ON_ONCE(p4d_bad(*p4d))) { 2752 if (!create) 2753 continue; 2754 p4d_clear_bad(p4d); 2755 } 2756 err = apply_to_pud_range(mm, p4d, addr, next, 2757 fn, data, create, mask); 2758 if (err) 2759 break; 2760 } while (p4d++, addr = next, addr != end); 2761 2762 return err; 2763 } 2764 2765 static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr, 2766 unsigned long size, pte_fn_t fn, 2767 void *data, bool create) 2768 { 2769 pgd_t *pgd; 2770 unsigned long start = addr, next; 2771 unsigned long end = addr + size; 2772 pgtbl_mod_mask mask = 0; 2773 int err = 0; 2774 2775 if (WARN_ON(addr >= end)) 2776 return -EINVAL; 2777 2778 pgd = pgd_offset(mm, addr); 2779 do { 2780 next = pgd_addr_end(addr, end); 2781 if (pgd_none(*pgd) && !create) 2782 continue; 2783 if (WARN_ON_ONCE(pgd_leaf(*pgd))) 2784 return -EINVAL; 2785 if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) { 2786 if (!create) 2787 continue; 2788 pgd_clear_bad(pgd); 2789 } 2790 err = apply_to_p4d_range(mm, pgd, addr, next, 2791 fn, data, create, &mask); 2792 if (err) 2793 break; 2794 } while (pgd++, addr = next, addr != end); 2795 2796 if (mask & ARCH_PAGE_TABLE_SYNC_MASK) 2797 arch_sync_kernel_mappings(start, start + size); 2798 2799 return err; 2800 } 2801 2802 /* 2803 * Scan a region of virtual memory, filling in page tables as necessary 2804 * and calling a provided function on each leaf page table. 2805 */ 2806 int apply_to_page_range(struct mm_struct *mm, unsigned long addr, 2807 unsigned long size, pte_fn_t fn, void *data) 2808 { 2809 return __apply_to_page_range(mm, addr, size, fn, data, true); 2810 } 2811 EXPORT_SYMBOL_GPL(apply_to_page_range); 2812 2813 /* 2814 * Scan a region of virtual memory, calling a provided function on 2815 * each leaf page table where it exists. 2816 * 2817 * Unlike apply_to_page_range, this does _not_ fill in page tables 2818 * where they are absent. 2819 */ 2820 int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr, 2821 unsigned long size, pte_fn_t fn, void *data) 2822 { 2823 return __apply_to_page_range(mm, addr, size, fn, data, false); 2824 } 2825 EXPORT_SYMBOL_GPL(apply_to_existing_page_range); 2826 2827 /* 2828 * handle_pte_fault chooses page fault handler according to an entry which was 2829 * read non-atomically. Before making any commitment, on those architectures 2830 * or configurations (e.g. i386 with PAE) which might give a mix of unmatched 2831 * parts, do_swap_page must check under lock before unmapping the pte and 2832 * proceeding (but do_wp_page is only called after already making such a check; 2833 * and do_anonymous_page can safely check later on). 2834 */ 2835 static inline int pte_unmap_same(struct vm_fault *vmf) 2836 { 2837 int same = 1; 2838 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION) 2839 if (sizeof(pte_t) > sizeof(unsigned long)) { 2840 spinlock_t *ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); 2841 spin_lock(ptl); 2842 same = pte_same(*vmf->pte, vmf->orig_pte); 2843 spin_unlock(ptl); 2844 } 2845 #endif 2846 pte_unmap(vmf->pte); 2847 vmf->pte = NULL; 2848 return same; 2849 } 2850 2851 static inline bool __wp_page_copy_user(struct page *dst, struct page *src, 2852 struct vm_fault *vmf) 2853 { 2854 bool ret; 2855 void *kaddr; 2856 void __user *uaddr; 2857 bool locked = false; 2858 struct vm_area_struct *vma = vmf->vma; 2859 struct mm_struct *mm = vma->vm_mm; 2860 unsigned long addr = vmf->address; 2861 2862 if (likely(src)) { 2863 copy_user_highpage(dst, src, addr, vma); 2864 return true; 2865 } 2866 2867 /* 2868 * If the source page was a PFN mapping, we don't have 2869 * a "struct page" for it. We do a best-effort copy by 2870 * just copying from the original user address. If that 2871 * fails, we just zero-fill it. Live with it. 2872 */ 2873 kaddr = kmap_atomic(dst); 2874 uaddr = (void __user *)(addr & PAGE_MASK); 2875 2876 /* 2877 * On architectures with software "accessed" bits, we would 2878 * take a double page fault, so mark it accessed here. 2879 */ 2880 if (!arch_has_hw_pte_young() && !pte_young(vmf->orig_pte)) { 2881 pte_t entry; 2882 2883 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); 2884 locked = true; 2885 if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { 2886 /* 2887 * Other thread has already handled the fault 2888 * and update local tlb only 2889 */ 2890 update_mmu_tlb(vma, addr, vmf->pte); 2891 ret = false; 2892 goto pte_unlock; 2893 } 2894 2895 entry = pte_mkyoung(vmf->orig_pte); 2896 if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0)) 2897 update_mmu_cache(vma, addr, vmf->pte); 2898 } 2899 2900 /* 2901 * This really shouldn't fail, because the page is there 2902 * in the page tables. But it might just be unreadable, 2903 * in which case we just give up and fill the result with 2904 * zeroes. 2905 */ 2906 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) { 2907 if (locked) 2908 goto warn; 2909 2910 /* Re-validate under PTL if the page is still mapped */ 2911 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); 2912 locked = true; 2913 if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { 2914 /* The PTE changed under us, update local tlb */ 2915 update_mmu_tlb(vma, addr, vmf->pte); 2916 ret = false; 2917 goto pte_unlock; 2918 } 2919 2920 /* 2921 * The same page can be mapped back since last copy attempt. 2922 * Try to copy again under PTL. 2923 */ 2924 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) { 2925 /* 2926 * Give a warn in case there can be some obscure 2927 * use-case 2928 */ 2929 warn: 2930 WARN_ON_ONCE(1); 2931 clear_page(kaddr); 2932 } 2933 } 2934 2935 ret = true; 2936 2937 pte_unlock: 2938 if (locked) 2939 pte_unmap_unlock(vmf->pte, vmf->ptl); 2940 kunmap_atomic(kaddr); 2941 flush_dcache_page(dst); 2942 2943 return ret; 2944 } 2945 2946 static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma) 2947 { 2948 struct file *vm_file = vma->vm_file; 2949 2950 if (vm_file) 2951 return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO; 2952 2953 /* 2954 * Special mappings (e.g. VDSO) do not have any file so fake 2955 * a default GFP_KERNEL for them. 2956 */ 2957 return GFP_KERNEL; 2958 } 2959 2960 /* 2961 * Notify the address space that the page is about to become writable so that 2962 * it can prohibit this or wait for the page to get into an appropriate state. 2963 * 2964 * We do this without the lock held, so that it can sleep if it needs to. 2965 */ 2966 static vm_fault_t do_page_mkwrite(struct vm_fault *vmf) 2967 { 2968 vm_fault_t ret; 2969 struct page *page = vmf->page; 2970 unsigned int old_flags = vmf->flags; 2971 2972 vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; 2973 2974 if (vmf->vma->vm_file && 2975 IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host)) 2976 return VM_FAULT_SIGBUS; 2977 2978 ret = vmf->vma->vm_ops->page_mkwrite(vmf); 2979 /* Restore original flags so that caller is not surprised */ 2980 vmf->flags = old_flags; 2981 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) 2982 return ret; 2983 if (unlikely(!(ret & VM_FAULT_LOCKED))) { 2984 lock_page(page); 2985 if (!page->mapping) { 2986 unlock_page(page); 2987 return 0; /* retry */ 2988 } 2989 ret |= VM_FAULT_LOCKED; 2990 } else 2991 VM_BUG_ON_PAGE(!PageLocked(page), page); 2992 return ret; 2993 } 2994 2995 /* 2996 * Handle dirtying of a page in shared file mapping on a write fault. 2997 * 2998 * The function expects the page to be locked and unlocks it. 2999 */ 3000 static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf) 3001 { 3002 struct vm_area_struct *vma = vmf->vma; 3003 struct address_space *mapping; 3004 struct page *page = vmf->page; 3005 bool dirtied; 3006 bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite; 3007 3008 dirtied = set_page_dirty(page); 3009 VM_BUG_ON_PAGE(PageAnon(page), page); 3010 /* 3011 * Take a local copy of the address_space - page.mapping may be zeroed 3012 * by truncate after unlock_page(). The address_space itself remains 3013 * pinned by vma->vm_file's reference. We rely on unlock_page()'s 3014 * release semantics to prevent the compiler from undoing this copying. 3015 */ 3016 mapping = page_rmapping(page); 3017 unlock_page(page); 3018 3019 if (!page_mkwrite) 3020 file_update_time(vma->vm_file); 3021 3022 /* 3023 * Throttle page dirtying rate down to writeback speed. 3024 * 3025 * mapping may be NULL here because some device drivers do not 3026 * set page.mapping but still dirty their pages 3027 * 3028 * Drop the mmap_lock before waiting on IO, if we can. The file 3029 * is pinning the mapping, as per above. 3030 */ 3031 if ((dirtied || page_mkwrite) && mapping) { 3032 struct file *fpin; 3033 3034 fpin = maybe_unlock_mmap_for_io(vmf, NULL); 3035 balance_dirty_pages_ratelimited(mapping); 3036 if (fpin) { 3037 fput(fpin); 3038 return VM_FAULT_COMPLETED; 3039 } 3040 } 3041 3042 return 0; 3043 } 3044 3045 /* 3046 * Handle write page faults for pages that can be reused in the current vma 3047 * 3048 * This can happen either due to the mapping being with the VM_SHARED flag, 3049 * or due to us being the last reference standing to the page. In either 3050 * case, all we need to do here is to mark the page as writable and update 3051 * any related book-keeping. 3052 */ 3053 static inline void wp_page_reuse(struct vm_fault *vmf) 3054 __releases(vmf->ptl) 3055 { 3056 struct vm_area_struct *vma = vmf->vma; 3057 struct page *page = vmf->page; 3058 pte_t entry; 3059 3060 VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE)); 3061 VM_BUG_ON(page && PageAnon(page) && !PageAnonExclusive(page)); 3062 3063 /* 3064 * Clear the pages cpupid information as the existing 3065 * information potentially belongs to a now completely 3066 * unrelated process. 3067 */ 3068 if (page) 3069 page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1); 3070 3071 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); 3072 entry = pte_mkyoung(vmf->orig_pte); 3073 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 3074 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1)) 3075 update_mmu_cache(vma, vmf->address, vmf->pte); 3076 pte_unmap_unlock(vmf->pte, vmf->ptl); 3077 count_vm_event(PGREUSE); 3078 } 3079 3080 /* 3081 * Handle the case of a page which we actually need to copy to a new page, 3082 * either due to COW or unsharing. 3083 * 3084 * Called with mmap_lock locked and the old page referenced, but 3085 * without the ptl held. 3086 * 3087 * High level logic flow: 3088 * 3089 * - Allocate a page, copy the content of the old page to the new one. 3090 * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc. 3091 * - Take the PTL. If the pte changed, bail out and release the allocated page 3092 * - If the pte is still the way we remember it, update the page table and all 3093 * relevant references. This includes dropping the reference the page-table 3094 * held to the old page, as well as updating the rmap. 3095 * - In any case, unlock the PTL and drop the reference we took to the old page. 3096 */ 3097 static vm_fault_t wp_page_copy(struct vm_fault *vmf) 3098 { 3099 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; 3100 struct vm_area_struct *vma = vmf->vma; 3101 struct mm_struct *mm = vma->vm_mm; 3102 struct page *old_page = vmf->page; 3103 struct page *new_page = NULL; 3104 pte_t entry; 3105 int page_copied = 0; 3106 struct mmu_notifier_range range; 3107 3108 delayacct_wpcopy_start(); 3109 3110 if (unlikely(anon_vma_prepare(vma))) 3111 goto oom; 3112 3113 if (is_zero_pfn(pte_pfn(vmf->orig_pte))) { 3114 new_page = alloc_zeroed_user_highpage_movable(vma, 3115 vmf->address); 3116 if (!new_page) 3117 goto oom; 3118 } else { 3119 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, 3120 vmf->address); 3121 if (!new_page) 3122 goto oom; 3123 3124 if (!__wp_page_copy_user(new_page, old_page, vmf)) { 3125 /* 3126 * COW failed, if the fault was solved by other, 3127 * it's fine. If not, userspace would re-fault on 3128 * the same address and we will handle the fault 3129 * from the second attempt. 3130 */ 3131 put_page(new_page); 3132 if (old_page) 3133 put_page(old_page); 3134 3135 delayacct_wpcopy_end(); 3136 return 0; 3137 } 3138 kmsan_copy_page_meta(new_page, old_page); 3139 } 3140 3141 if (mem_cgroup_charge(page_folio(new_page), mm, GFP_KERNEL)) 3142 goto oom_free_new; 3143 cgroup_throttle_swaprate(new_page, GFP_KERNEL); 3144 3145 __SetPageUptodate(new_page); 3146 3147 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, 3148 vmf->address & PAGE_MASK, 3149 (vmf->address & PAGE_MASK) + PAGE_SIZE); 3150 mmu_notifier_invalidate_range_start(&range); 3151 3152 /* 3153 * Re-check the pte - we dropped the lock 3154 */ 3155 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl); 3156 if (likely(pte_same(*vmf->pte, vmf->orig_pte))) { 3157 if (old_page) { 3158 if (!PageAnon(old_page)) { 3159 dec_mm_counter_fast(mm, 3160 mm_counter_file(old_page)); 3161 inc_mm_counter_fast(mm, MM_ANONPAGES); 3162 } 3163 } else { 3164 inc_mm_counter_fast(mm, MM_ANONPAGES); 3165 } 3166 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); 3167 entry = mk_pte(new_page, vma->vm_page_prot); 3168 entry = pte_sw_mkyoung(entry); 3169 if (unlikely(unshare)) { 3170 if (pte_soft_dirty(vmf->orig_pte)) 3171 entry = pte_mksoft_dirty(entry); 3172 if (pte_uffd_wp(vmf->orig_pte)) 3173 entry = pte_mkuffd_wp(entry); 3174 } else { 3175 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 3176 } 3177 3178 /* 3179 * Clear the pte entry and flush it first, before updating the 3180 * pte with the new entry, to keep TLBs on different CPUs in 3181 * sync. This code used to set the new PTE then flush TLBs, but 3182 * that left a window where the new PTE could be loaded into 3183 * some TLBs while the old PTE remains in others. 3184 */ 3185 ptep_clear_flush_notify(vma, vmf->address, vmf->pte); 3186 page_add_new_anon_rmap(new_page, vma, vmf->address); 3187 lru_cache_add_inactive_or_unevictable(new_page, vma); 3188 /* 3189 * We call the notify macro here because, when using secondary 3190 * mmu page tables (such as kvm shadow page tables), we want the 3191 * new page to be mapped directly into the secondary page table. 3192 */ 3193 BUG_ON(unshare && pte_write(entry)); 3194 set_pte_at_notify(mm, vmf->address, vmf->pte, entry); 3195 update_mmu_cache(vma, vmf->address, vmf->pte); 3196 if (old_page) { 3197 /* 3198 * Only after switching the pte to the new page may 3199 * we remove the mapcount here. Otherwise another 3200 * process may come and find the rmap count decremented 3201 * before the pte is switched to the new page, and 3202 * "reuse" the old page writing into it while our pte 3203 * here still points into it and can be read by other 3204 * threads. 3205 * 3206 * The critical issue is to order this 3207 * page_remove_rmap with the ptp_clear_flush above. 3208 * Those stores are ordered by (if nothing else,) 3209 * the barrier present in the atomic_add_negative 3210 * in page_remove_rmap. 3211 * 3212 * Then the TLB flush in ptep_clear_flush ensures that 3213 * no process can access the old page before the 3214 * decremented mapcount is visible. And the old page 3215 * cannot be reused until after the decremented 3216 * mapcount is visible. So transitively, TLBs to 3217 * old page will be flushed before it can be reused. 3218 */ 3219 page_remove_rmap(old_page, vma, false); 3220 } 3221 3222 /* Free the old page.. */ 3223 new_page = old_page; 3224 page_copied = 1; 3225 } else { 3226 update_mmu_tlb(vma, vmf->address, vmf->pte); 3227 } 3228 3229 if (new_page) 3230 put_page(new_page); 3231 3232 pte_unmap_unlock(vmf->pte, vmf->ptl); 3233 /* 3234 * No need to double call mmu_notifier->invalidate_range() callback as 3235 * the above ptep_clear_flush_notify() did already call it. 3236 */ 3237 mmu_notifier_invalidate_range_only_end(&range); 3238 if (old_page) { 3239 if (page_copied) 3240 free_swap_cache(old_page); 3241 put_page(old_page); 3242 } 3243 3244 delayacct_wpcopy_end(); 3245 return (page_copied && !unshare) ? VM_FAULT_WRITE : 0; 3246 oom_free_new: 3247 put_page(new_page); 3248 oom: 3249 if (old_page) 3250 put_page(old_page); 3251 3252 delayacct_wpcopy_end(); 3253 return VM_FAULT_OOM; 3254 } 3255 3256 /** 3257 * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE 3258 * writeable once the page is prepared 3259 * 3260 * @vmf: structure describing the fault 3261 * 3262 * This function handles all that is needed to finish a write page fault in a 3263 * shared mapping due to PTE being read-only once the mapped page is prepared. 3264 * It handles locking of PTE and modifying it. 3265 * 3266 * The function expects the page to be locked or other protection against 3267 * concurrent faults / writeback (such as DAX radix tree locks). 3268 * 3269 * Return: %0 on success, %VM_FAULT_NOPAGE when PTE got changed before 3270 * we acquired PTE lock. 3271 */ 3272 vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf) 3273 { 3274 WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED)); 3275 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, 3276 &vmf->ptl); 3277 /* 3278 * We might have raced with another page fault while we released the 3279 * pte_offset_map_lock. 3280 */ 3281 if (!pte_same(*vmf->pte, vmf->orig_pte)) { 3282 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); 3283 pte_unmap_unlock(vmf->pte, vmf->ptl); 3284 return VM_FAULT_NOPAGE; 3285 } 3286 wp_page_reuse(vmf); 3287 return 0; 3288 } 3289 3290 /* 3291 * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED 3292 * mapping 3293 */ 3294 static vm_fault_t wp_pfn_shared(struct vm_fault *vmf) 3295 { 3296 struct vm_area_struct *vma = vmf->vma; 3297 3298 if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) { 3299 vm_fault_t ret; 3300 3301 pte_unmap_unlock(vmf->pte, vmf->ptl); 3302 vmf->flags |= FAULT_FLAG_MKWRITE; 3303 ret = vma->vm_ops->pfn_mkwrite(vmf); 3304 if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)) 3305 return ret; 3306 return finish_mkwrite_fault(vmf); 3307 } 3308 wp_page_reuse(vmf); 3309 return VM_FAULT_WRITE; 3310 } 3311 3312 static vm_fault_t wp_page_shared(struct vm_fault *vmf) 3313 __releases(vmf->ptl) 3314 { 3315 struct vm_area_struct *vma = vmf->vma; 3316 vm_fault_t ret = VM_FAULT_WRITE; 3317 3318 get_page(vmf->page); 3319 3320 if (vma->vm_ops && vma->vm_ops->page_mkwrite) { 3321 vm_fault_t tmp; 3322 3323 pte_unmap_unlock(vmf->pte, vmf->ptl); 3324 tmp = do_page_mkwrite(vmf); 3325 if (unlikely(!tmp || (tmp & 3326 (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { 3327 put_page(vmf->page); 3328 return tmp; 3329 } 3330 tmp = finish_mkwrite_fault(vmf); 3331 if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) { 3332 unlock_page(vmf->page); 3333 put_page(vmf->page); 3334 return tmp; 3335 } 3336 } else { 3337 wp_page_reuse(vmf); 3338 lock_page(vmf->page); 3339 } 3340 ret |= fault_dirty_shared_page(vmf); 3341 put_page(vmf->page); 3342 3343 return ret; 3344 } 3345 3346 /* 3347 * This routine handles present pages, when 3348 * * users try to write to a shared page (FAULT_FLAG_WRITE) 3349 * * GUP wants to take a R/O pin on a possibly shared anonymous page 3350 * (FAULT_FLAG_UNSHARE) 3351 * 3352 * It is done by copying the page to a new address and decrementing the 3353 * shared-page counter for the old page. 3354 * 3355 * Note that this routine assumes that the protection checks have been 3356 * done by the caller (the low-level page fault routine in most cases). 3357 * Thus, with FAULT_FLAG_WRITE, we can safely just mark it writable once we've 3358 * done any necessary COW. 3359 * 3360 * In case of FAULT_FLAG_WRITE, we also mark the page dirty at this point even 3361 * though the page will change only once the write actually happens. This 3362 * avoids a few races, and potentially makes it more efficient. 3363 * 3364 * We enter with non-exclusive mmap_lock (to exclude vma changes, 3365 * but allow concurrent faults), with pte both mapped and locked. 3366 * We return with mmap_lock still held, but pte unmapped and unlocked. 3367 */ 3368 static vm_fault_t do_wp_page(struct vm_fault *vmf) 3369 __releases(vmf->ptl) 3370 { 3371 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; 3372 struct vm_area_struct *vma = vmf->vma; 3373 struct folio *folio; 3374 3375 VM_BUG_ON(unshare && (vmf->flags & FAULT_FLAG_WRITE)); 3376 VM_BUG_ON(!unshare && !(vmf->flags & FAULT_FLAG_WRITE)); 3377 3378 if (likely(!unshare)) { 3379 if (userfaultfd_pte_wp(vma, *vmf->pte)) { 3380 pte_unmap_unlock(vmf->pte, vmf->ptl); 3381 return handle_userfault(vmf, VM_UFFD_WP); 3382 } 3383 3384 /* 3385 * Userfaultfd write-protect can defer flushes. Ensure the TLB 3386 * is flushed in this case before copying. 3387 */ 3388 if (unlikely(userfaultfd_wp(vmf->vma) && 3389 mm_tlb_flush_pending(vmf->vma->vm_mm))) 3390 flush_tlb_page(vmf->vma, vmf->address); 3391 } 3392 3393 vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte); 3394 if (!vmf->page) { 3395 if (unlikely(unshare)) { 3396 /* No anonymous page -> nothing to do. */ 3397 pte_unmap_unlock(vmf->pte, vmf->ptl); 3398 return 0; 3399 } 3400 3401 /* 3402 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a 3403 * VM_PFNMAP VMA. 3404 * 3405 * We should not cow pages in a shared writeable mapping. 3406 * Just mark the pages writable and/or call ops->pfn_mkwrite. 3407 */ 3408 if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == 3409 (VM_WRITE|VM_SHARED)) 3410 return wp_pfn_shared(vmf); 3411 3412 pte_unmap_unlock(vmf->pte, vmf->ptl); 3413 return wp_page_copy(vmf); 3414 } 3415 3416 /* 3417 * Take out anonymous pages first, anonymous shared vmas are 3418 * not dirty accountable. 3419 */ 3420 folio = page_folio(vmf->page); 3421 if (folio_test_anon(folio)) { 3422 /* 3423 * If the page is exclusive to this process we must reuse the 3424 * page without further checks. 3425 */ 3426 if (PageAnonExclusive(vmf->page)) 3427 goto reuse; 3428 3429 /* 3430 * We have to verify under folio lock: these early checks are 3431 * just an optimization to avoid locking the folio and freeing 3432 * the swapcache if there is little hope that we can reuse. 3433 * 3434 * KSM doesn't necessarily raise the folio refcount. 3435 */ 3436 if (folio_test_ksm(folio) || folio_ref_count(folio) > 3) 3437 goto copy; 3438 if (!folio_test_lru(folio)) 3439 /* 3440 * Note: We cannot easily detect+handle references from 3441 * remote LRU pagevecs or references to LRU folios. 3442 */ 3443 lru_add_drain(); 3444 if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio)) 3445 goto copy; 3446 if (!folio_trylock(folio)) 3447 goto copy; 3448 if (folio_test_swapcache(folio)) 3449 folio_free_swap(folio); 3450 if (folio_test_ksm(folio) || folio_ref_count(folio) != 1) { 3451 folio_unlock(folio); 3452 goto copy; 3453 } 3454 /* 3455 * Ok, we've got the only folio reference from our mapping 3456 * and the folio is locked, it's dark out, and we're wearing 3457 * sunglasses. Hit it. 3458 */ 3459 page_move_anon_rmap(vmf->page, vma); 3460 folio_unlock(folio); 3461 reuse: 3462 if (unlikely(unshare)) { 3463 pte_unmap_unlock(vmf->pte, vmf->ptl); 3464 return 0; 3465 } 3466 wp_page_reuse(vmf); 3467 return VM_FAULT_WRITE; 3468 } else if (unshare) { 3469 /* No anonymous page -> nothing to do. */ 3470 pte_unmap_unlock(vmf->pte, vmf->ptl); 3471 return 0; 3472 } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == 3473 (VM_WRITE|VM_SHARED))) { 3474 return wp_page_shared(vmf); 3475 } 3476 copy: 3477 /* 3478 * Ok, we need to copy. Oh, well.. 3479 */ 3480 get_page(vmf->page); 3481 3482 pte_unmap_unlock(vmf->pte, vmf->ptl); 3483 #ifdef CONFIG_KSM 3484 if (PageKsm(vmf->page)) 3485 count_vm_event(COW_KSM); 3486 #endif 3487 return wp_page_copy(vmf); 3488 } 3489 3490 static void unmap_mapping_range_vma(struct vm_area_struct *vma, 3491 unsigned long start_addr, unsigned long end_addr, 3492 struct zap_details *details) 3493 { 3494 zap_page_range_single(vma, start_addr, end_addr - start_addr, details); 3495 } 3496 3497 static inline void unmap_mapping_range_tree(struct rb_root_cached *root, 3498 pgoff_t first_index, 3499 pgoff_t last_index, 3500 struct zap_details *details) 3501 { 3502 struct vm_area_struct *vma; 3503 pgoff_t vba, vea, zba, zea; 3504 3505 vma_interval_tree_foreach(vma, root, first_index, last_index) { 3506 vba = vma->vm_pgoff; 3507 vea = vba + vma_pages(vma) - 1; 3508 zba = max(first_index, vba); 3509 zea = min(last_index, vea); 3510 3511 unmap_mapping_range_vma(vma, 3512 ((zba - vba) << PAGE_SHIFT) + vma->vm_start, 3513 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, 3514 details); 3515 } 3516 } 3517 3518 /** 3519 * unmap_mapping_folio() - Unmap single folio from processes. 3520 * @folio: The locked folio to be unmapped. 3521 * 3522 * Unmap this folio from any userspace process which still has it mmaped. 3523 * Typically, for efficiency, the range of nearby pages has already been 3524 * unmapped by unmap_mapping_pages() or unmap_mapping_range(). But once 3525 * truncation or invalidation holds the lock on a folio, it may find that 3526 * the page has been remapped again: and then uses unmap_mapping_folio() 3527 * to unmap it finally. 3528 */ 3529 void unmap_mapping_folio(struct folio *folio) 3530 { 3531 struct address_space *mapping = folio->mapping; 3532 struct zap_details details = { }; 3533 pgoff_t first_index; 3534 pgoff_t last_index; 3535 3536 VM_BUG_ON(!folio_test_locked(folio)); 3537 3538 first_index = folio->index; 3539 last_index = folio->index + folio_nr_pages(folio) - 1; 3540 3541 details.even_cows = false; 3542 details.single_folio = folio; 3543 details.zap_flags = ZAP_FLAG_DROP_MARKER; 3544 3545 i_mmap_lock_read(mapping); 3546 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))) 3547 unmap_mapping_range_tree(&mapping->i_mmap, first_index, 3548 last_index, &details); 3549 i_mmap_unlock_read(mapping); 3550 } 3551 3552 /** 3553 * unmap_mapping_pages() - Unmap pages from processes. 3554 * @mapping: The address space containing pages to be unmapped. 3555 * @start: Index of first page to be unmapped. 3556 * @nr: Number of pages to be unmapped. 0 to unmap to end of file. 3557 * @even_cows: Whether to unmap even private COWed pages. 3558 * 3559 * Unmap the pages in this address space from any userspace process which 3560 * has them mmaped. Generally, you want to remove COWed pages as well when 3561 * a file is being truncated, but not when invalidating pages from the page 3562 * cache. 3563 */ 3564 void unmap_mapping_pages(struct address_space *mapping, pgoff_t start, 3565 pgoff_t nr, bool even_cows) 3566 { 3567 struct zap_details details = { }; 3568 pgoff_t first_index = start; 3569 pgoff_t last_index = start + nr - 1; 3570 3571 details.even_cows = even_cows; 3572 if (last_index < first_index) 3573 last_index = ULONG_MAX; 3574 3575 i_mmap_lock_read(mapping); 3576 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))) 3577 unmap_mapping_range_tree(&mapping->i_mmap, first_index, 3578 last_index, &details); 3579 i_mmap_unlock_read(mapping); 3580 } 3581 EXPORT_SYMBOL_GPL(unmap_mapping_pages); 3582 3583 /** 3584 * unmap_mapping_range - unmap the portion of all mmaps in the specified 3585 * address_space corresponding to the specified byte range in the underlying 3586 * file. 3587 * 3588 * @mapping: the address space containing mmaps to be unmapped. 3589 * @holebegin: byte in first page to unmap, relative to the start of 3590 * the underlying file. This will be rounded down to a PAGE_SIZE 3591 * boundary. Note that this is different from truncate_pagecache(), which 3592 * must keep the partial page. In contrast, we must get rid of 3593 * partial pages. 3594 * @holelen: size of prospective hole in bytes. This will be rounded 3595 * up to a PAGE_SIZE boundary. A holelen of zero truncates to the 3596 * end of the file. 3597 * @even_cows: 1 when truncating a file, unmap even private COWed pages; 3598 * but 0 when invalidating pagecache, don't throw away private data. 3599 */ 3600 void unmap_mapping_range(struct address_space *mapping, 3601 loff_t const holebegin, loff_t const holelen, int even_cows) 3602 { 3603 pgoff_t hba = holebegin >> PAGE_SHIFT; 3604 pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; 3605 3606 /* Check for overflow. */ 3607 if (sizeof(holelen) > sizeof(hlen)) { 3608 long long holeend = 3609 (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; 3610 if (holeend & ~(long long)ULONG_MAX) 3611 hlen = ULONG_MAX - hba + 1; 3612 } 3613 3614 unmap_mapping_pages(mapping, hba, hlen, even_cows); 3615 } 3616 EXPORT_SYMBOL(unmap_mapping_range); 3617 3618 /* 3619 * Restore a potential device exclusive pte to a working pte entry 3620 */ 3621 static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf) 3622 { 3623 struct folio *folio = page_folio(vmf->page); 3624 struct vm_area_struct *vma = vmf->vma; 3625 struct mmu_notifier_range range; 3626 3627 if (!folio_lock_or_retry(folio, vma->vm_mm, vmf->flags)) 3628 return VM_FAULT_RETRY; 3629 mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma, 3630 vma->vm_mm, vmf->address & PAGE_MASK, 3631 (vmf->address & PAGE_MASK) + PAGE_SIZE, NULL); 3632 mmu_notifier_invalidate_range_start(&range); 3633 3634 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, 3635 &vmf->ptl); 3636 if (likely(pte_same(*vmf->pte, vmf->orig_pte))) 3637 restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte); 3638 3639 pte_unmap_unlock(vmf->pte, vmf->ptl); 3640 folio_unlock(folio); 3641 3642 mmu_notifier_invalidate_range_end(&range); 3643 return 0; 3644 } 3645 3646 static inline bool should_try_to_free_swap(struct folio *folio, 3647 struct vm_area_struct *vma, 3648 unsigned int fault_flags) 3649 { 3650 if (!folio_test_swapcache(folio)) 3651 return false; 3652 if (mem_cgroup_swap_full(folio) || (vma->vm_flags & VM_LOCKED) || 3653 folio_test_mlocked(folio)) 3654 return true; 3655 /* 3656 * If we want to map a page that's in the swapcache writable, we 3657 * have to detect via the refcount if we're really the exclusive 3658 * user. Try freeing the swapcache to get rid of the swapcache 3659 * reference only in case it's likely that we'll be the exlusive user. 3660 */ 3661 return (fault_flags & FAULT_FLAG_WRITE) && !folio_test_ksm(folio) && 3662 folio_ref_count(folio) == 2; 3663 } 3664 3665 static vm_fault_t pte_marker_clear(struct vm_fault *vmf) 3666 { 3667 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, 3668 vmf->address, &vmf->ptl); 3669 /* 3670 * Be careful so that we will only recover a special uffd-wp pte into a 3671 * none pte. Otherwise it means the pte could have changed, so retry. 3672 */ 3673 if (is_pte_marker(*vmf->pte)) 3674 pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte); 3675 pte_unmap_unlock(vmf->pte, vmf->ptl); 3676 return 0; 3677 } 3678 3679 /* 3680 * This is actually a page-missing access, but with uffd-wp special pte 3681 * installed. It means this pte was wr-protected before being unmapped. 3682 */ 3683 static vm_fault_t pte_marker_handle_uffd_wp(struct vm_fault *vmf) 3684 { 3685 /* 3686 * Just in case there're leftover special ptes even after the region 3687 * got unregistered - we can simply clear them. We can also do that 3688 * proactively when e.g. when we do UFFDIO_UNREGISTER upon some uffd-wp 3689 * ranges, but it should be more efficient to be done lazily here. 3690 */ 3691 if (unlikely(!userfaultfd_wp(vmf->vma) || vma_is_anonymous(vmf->vma))) 3692 return pte_marker_clear(vmf); 3693 3694 /* do_fault() can handle pte markers too like none pte */ 3695 return do_fault(vmf); 3696 } 3697 3698 static vm_fault_t handle_pte_marker(struct vm_fault *vmf) 3699 { 3700 swp_entry_t entry = pte_to_swp_entry(vmf->orig_pte); 3701 unsigned long marker = pte_marker_get(entry); 3702 3703 /* 3704 * PTE markers should always be with file-backed memories, and the 3705 * marker should never be empty. If anything weird happened, the best 3706 * thing to do is to kill the process along with its mm. 3707 */ 3708 if (WARN_ON_ONCE(vma_is_anonymous(vmf->vma) || !marker)) 3709 return VM_FAULT_SIGBUS; 3710 3711 if (pte_marker_entry_uffd_wp(entry)) 3712 return pte_marker_handle_uffd_wp(vmf); 3713 3714 /* This is an unknown pte marker */ 3715 return VM_FAULT_SIGBUS; 3716 } 3717 3718 /* 3719 * We enter with non-exclusive mmap_lock (to exclude vma changes, 3720 * but allow concurrent faults), and pte mapped but not yet locked. 3721 * We return with pte unmapped and unlocked. 3722 * 3723 * We return with the mmap_lock locked or unlocked in the same cases 3724 * as does filemap_fault(). 3725 */ 3726 vm_fault_t do_swap_page(struct vm_fault *vmf) 3727 { 3728 struct vm_area_struct *vma = vmf->vma; 3729 struct folio *swapcache, *folio = NULL; 3730 struct page *page; 3731 struct swap_info_struct *si = NULL; 3732 rmap_t rmap_flags = RMAP_NONE; 3733 bool exclusive = false; 3734 swp_entry_t entry; 3735 pte_t pte; 3736 int locked; 3737 vm_fault_t ret = 0; 3738 void *shadow = NULL; 3739 3740 if (!pte_unmap_same(vmf)) 3741 goto out; 3742 3743 entry = pte_to_swp_entry(vmf->orig_pte); 3744 if (unlikely(non_swap_entry(entry))) { 3745 if (is_migration_entry(entry)) { 3746 migration_entry_wait(vma->vm_mm, vmf->pmd, 3747 vmf->address); 3748 } else if (is_device_exclusive_entry(entry)) { 3749 vmf->page = pfn_swap_entry_to_page(entry); 3750 ret = remove_device_exclusive_entry(vmf); 3751 } else if (is_device_private_entry(entry)) { 3752 vmf->page = pfn_swap_entry_to_page(entry); 3753 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 3754 vmf->address, &vmf->ptl); 3755 if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) { 3756 spin_unlock(vmf->ptl); 3757 goto out; 3758 } 3759 3760 /* 3761 * Get a page reference while we know the page can't be 3762 * freed. 3763 */ 3764 get_page(vmf->page); 3765 pte_unmap_unlock(vmf->pte, vmf->ptl); 3766 vmf->page->pgmap->ops->migrate_to_ram(vmf); 3767 put_page(vmf->page); 3768 } else if (is_hwpoison_entry(entry)) { 3769 ret = VM_FAULT_HWPOISON; 3770 } else if (is_swapin_error_entry(entry)) { 3771 ret = VM_FAULT_SIGBUS; 3772 } else if (is_pte_marker_entry(entry)) { 3773 ret = handle_pte_marker(vmf); 3774 } else { 3775 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL); 3776 ret = VM_FAULT_SIGBUS; 3777 } 3778 goto out; 3779 } 3780 3781 /* Prevent swapoff from happening to us. */ 3782 si = get_swap_device(entry); 3783 if (unlikely(!si)) 3784 goto out; 3785 3786 folio = swap_cache_get_folio(entry, vma, vmf->address); 3787 if (folio) 3788 page = folio_file_page(folio, swp_offset(entry)); 3789 swapcache = folio; 3790 3791 if (!folio) { 3792 if (data_race(si->flags & SWP_SYNCHRONOUS_IO) && 3793 __swap_count(entry) == 1) { 3794 /* skip swapcache */ 3795 folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, 3796 vma, vmf->address, false); 3797 page = &folio->page; 3798 if (folio) { 3799 __folio_set_locked(folio); 3800 __folio_set_swapbacked(folio); 3801 3802 if (mem_cgroup_swapin_charge_folio(folio, 3803 vma->vm_mm, GFP_KERNEL, 3804 entry)) { 3805 ret = VM_FAULT_OOM; 3806 goto out_page; 3807 } 3808 mem_cgroup_swapin_uncharge_swap(entry); 3809 3810 shadow = get_shadow_from_swap_cache(entry); 3811 if (shadow) 3812 workingset_refault(folio, shadow); 3813 3814 folio_add_lru(folio); 3815 3816 /* To provide entry to swap_readpage() */ 3817 folio_set_swap_entry(folio, entry); 3818 swap_readpage(page, true, NULL); 3819 folio->private = NULL; 3820 } 3821 } else { 3822 page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, 3823 vmf); 3824 if (page) 3825 folio = page_folio(page); 3826 swapcache = folio; 3827 } 3828 3829 if (!folio) { 3830 /* 3831 * Back out if somebody else faulted in this pte 3832 * while we released the pte lock. 3833 */ 3834 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 3835 vmf->address, &vmf->ptl); 3836 if (likely(pte_same(*vmf->pte, vmf->orig_pte))) 3837 ret = VM_FAULT_OOM; 3838 goto unlock; 3839 } 3840 3841 /* Had to read the page from swap area: Major fault */ 3842 ret = VM_FAULT_MAJOR; 3843 count_vm_event(PGMAJFAULT); 3844 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT); 3845 } else if (PageHWPoison(page)) { 3846 /* 3847 * hwpoisoned dirty swapcache pages are kept for killing 3848 * owner processes (which may be unknown at hwpoison time) 3849 */ 3850 ret = VM_FAULT_HWPOISON; 3851 goto out_release; 3852 } 3853 3854 locked = folio_lock_or_retry(folio, vma->vm_mm, vmf->flags); 3855 3856 if (!locked) { 3857 ret |= VM_FAULT_RETRY; 3858 goto out_release; 3859 } 3860 3861 if (swapcache) { 3862 /* 3863 * Make sure folio_free_swap() or swapoff did not release the 3864 * swapcache from under us. The page pin, and pte_same test 3865 * below, are not enough to exclude that. Even if it is still 3866 * swapcache, we need to check that the page's swap has not 3867 * changed. 3868 */ 3869 if (unlikely(!folio_test_swapcache(folio) || 3870 page_private(page) != entry.val)) 3871 goto out_page; 3872 3873 /* 3874 * KSM sometimes has to copy on read faults, for example, if 3875 * page->index of !PageKSM() pages would be nonlinear inside the 3876 * anon VMA -- PageKSM() is lost on actual swapout. 3877 */ 3878 page = ksm_might_need_to_copy(page, vma, vmf->address); 3879 if (unlikely(!page)) { 3880 ret = VM_FAULT_OOM; 3881 goto out_page; 3882 } 3883 folio = page_folio(page); 3884 3885 /* 3886 * If we want to map a page that's in the swapcache writable, we 3887 * have to detect via the refcount if we're really the exclusive 3888 * owner. Try removing the extra reference from the local LRU 3889 * pagevecs if required. 3890 */ 3891 if ((vmf->flags & FAULT_FLAG_WRITE) && folio == swapcache && 3892 !folio_test_ksm(folio) && !folio_test_lru(folio)) 3893 lru_add_drain(); 3894 } 3895 3896 cgroup_throttle_swaprate(page, GFP_KERNEL); 3897 3898 /* 3899 * Back out if somebody else already faulted in this pte. 3900 */ 3901 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, 3902 &vmf->ptl); 3903 if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) 3904 goto out_nomap; 3905 3906 if (unlikely(!folio_test_uptodate(folio))) { 3907 ret = VM_FAULT_SIGBUS; 3908 goto out_nomap; 3909 } 3910 3911 /* 3912 * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte 3913 * must never point at an anonymous page in the swapcache that is 3914 * PG_anon_exclusive. Sanity check that this holds and especially, that 3915 * no filesystem set PG_mappedtodisk on a page in the swapcache. Sanity 3916 * check after taking the PT lock and making sure that nobody 3917 * concurrently faulted in this page and set PG_anon_exclusive. 3918 */ 3919 BUG_ON(!folio_test_anon(folio) && folio_test_mappedtodisk(folio)); 3920 BUG_ON(folio_test_anon(folio) && PageAnonExclusive(page)); 3921 3922 /* 3923 * Check under PT lock (to protect against concurrent fork() sharing 3924 * the swap entry concurrently) for certainly exclusive pages. 3925 */ 3926 if (!folio_test_ksm(folio)) { 3927 /* 3928 * Note that pte_swp_exclusive() == false for architectures 3929 * without __HAVE_ARCH_PTE_SWP_EXCLUSIVE. 3930 */ 3931 exclusive = pte_swp_exclusive(vmf->orig_pte); 3932 if (folio != swapcache) { 3933 /* 3934 * We have a fresh page that is not exposed to the 3935 * swapcache -> certainly exclusive. 3936 */ 3937 exclusive = true; 3938 } else if (exclusive && folio_test_writeback(folio) && 3939 data_race(si->flags & SWP_STABLE_WRITES)) { 3940 /* 3941 * This is tricky: not all swap backends support 3942 * concurrent page modifications while under writeback. 3943 * 3944 * So if we stumble over such a page in the swapcache 3945 * we must not set the page exclusive, otherwise we can 3946 * map it writable without further checks and modify it 3947 * while still under writeback. 3948 * 3949 * For these problematic swap backends, simply drop the 3950 * exclusive marker: this is perfectly fine as we start 3951 * writeback only if we fully unmapped the page and 3952 * there are no unexpected references on the page after 3953 * unmapping succeeded. After fully unmapped, no 3954 * further GUP references (FOLL_GET and FOLL_PIN) can 3955 * appear, so dropping the exclusive marker and mapping 3956 * it only R/O is fine. 3957 */ 3958 exclusive = false; 3959 } 3960 } 3961 3962 /* 3963 * Remove the swap entry and conditionally try to free up the swapcache. 3964 * We're already holding a reference on the page but haven't mapped it 3965 * yet. 3966 */ 3967 swap_free(entry); 3968 if (should_try_to_free_swap(folio, vma, vmf->flags)) 3969 folio_free_swap(folio); 3970 3971 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); 3972 dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS); 3973 pte = mk_pte(page, vma->vm_page_prot); 3974 3975 /* 3976 * Same logic as in do_wp_page(); however, optimize for pages that are 3977 * certainly not shared either because we just allocated them without 3978 * exposing them to the swapcache or because the swap entry indicates 3979 * exclusivity. 3980 */ 3981 if (!folio_test_ksm(folio) && 3982 (exclusive || folio_ref_count(folio) == 1)) { 3983 if (vmf->flags & FAULT_FLAG_WRITE) { 3984 pte = maybe_mkwrite(pte_mkdirty(pte), vma); 3985 vmf->flags &= ~FAULT_FLAG_WRITE; 3986 ret |= VM_FAULT_WRITE; 3987 } 3988 rmap_flags |= RMAP_EXCLUSIVE; 3989 } 3990 flush_icache_page(vma, page); 3991 if (pte_swp_soft_dirty(vmf->orig_pte)) 3992 pte = pte_mksoft_dirty(pte); 3993 if (pte_swp_uffd_wp(vmf->orig_pte)) { 3994 pte = pte_mkuffd_wp(pte); 3995 pte = pte_wrprotect(pte); 3996 } 3997 vmf->orig_pte = pte; 3998 3999 /* ksm created a completely new copy */ 4000 if (unlikely(folio != swapcache && swapcache)) { 4001 page_add_new_anon_rmap(page, vma, vmf->address); 4002 folio_add_lru_vma(folio, vma); 4003 } else { 4004 page_add_anon_rmap(page, vma, vmf->address, rmap_flags); 4005 } 4006 4007 VM_BUG_ON(!folio_test_anon(folio) || 4008 (pte_write(pte) && !PageAnonExclusive(page))); 4009 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); 4010 arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte); 4011 4012 folio_unlock(folio); 4013 if (folio != swapcache && swapcache) { 4014 /* 4015 * Hold the lock to avoid the swap entry to be reused 4016 * until we take the PT lock for the pte_same() check 4017 * (to avoid false positives from pte_same). For 4018 * further safety release the lock after the swap_free 4019 * so that the swap count won't change under a 4020 * parallel locked swapcache. 4021 */ 4022 folio_unlock(swapcache); 4023 folio_put(swapcache); 4024 } 4025 4026 if (vmf->flags & FAULT_FLAG_WRITE) { 4027 ret |= do_wp_page(vmf); 4028 if (ret & VM_FAULT_ERROR) 4029 ret &= VM_FAULT_ERROR; 4030 goto out; 4031 } 4032 4033 /* No need to invalidate - it was non-present before */ 4034 update_mmu_cache(vma, vmf->address, vmf->pte); 4035 unlock: 4036 pte_unmap_unlock(vmf->pte, vmf->ptl); 4037 out: 4038 if (si) 4039 put_swap_device(si); 4040 return ret; 4041 out_nomap: 4042 pte_unmap_unlock(vmf->pte, vmf->ptl); 4043 out_page: 4044 folio_unlock(folio); 4045 out_release: 4046 folio_put(folio); 4047 if (folio != swapcache && swapcache) { 4048 folio_unlock(swapcache); 4049 folio_put(swapcache); 4050 } 4051 if (si) 4052 put_swap_device(si); 4053 return ret; 4054 } 4055 4056 /* 4057 * We enter with non-exclusive mmap_lock (to exclude vma changes, 4058 * but allow concurrent faults), and pte mapped but not yet locked. 4059 * We return with mmap_lock still held, but pte unmapped and unlocked. 4060 */ 4061 static vm_fault_t do_anonymous_page(struct vm_fault *vmf) 4062 { 4063 struct vm_area_struct *vma = vmf->vma; 4064 struct page *page; 4065 vm_fault_t ret = 0; 4066 pte_t entry; 4067 4068 /* File mapping without ->vm_ops ? */ 4069 if (vma->vm_flags & VM_SHARED) 4070 return VM_FAULT_SIGBUS; 4071 4072 /* 4073 * Use pte_alloc() instead of pte_alloc_map(). We can't run 4074 * pte_offset_map() on pmds where a huge pmd might be created 4075 * from a different thread. 4076 * 4077 * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when 4078 * parallel threads are excluded by other means. 4079 * 4080 * Here we only have mmap_read_lock(mm). 4081 */ 4082 if (pte_alloc(vma->vm_mm, vmf->pmd)) 4083 return VM_FAULT_OOM; 4084 4085 /* See comment in handle_pte_fault() */ 4086 if (unlikely(pmd_trans_unstable(vmf->pmd))) 4087 return 0; 4088 4089 /* Use the zero-page for reads */ 4090 if (!(vmf->flags & FAULT_FLAG_WRITE) && 4091 !mm_forbids_zeropage(vma->vm_mm)) { 4092 entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address), 4093 vma->vm_page_prot)); 4094 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 4095 vmf->address, &vmf->ptl); 4096 if (!pte_none(*vmf->pte)) { 4097 update_mmu_tlb(vma, vmf->address, vmf->pte); 4098 goto unlock; 4099 } 4100 ret = check_stable_address_space(vma->vm_mm); 4101 if (ret) 4102 goto unlock; 4103 /* Deliver the page fault to userland, check inside PT lock */ 4104 if (userfaultfd_missing(vma)) { 4105 pte_unmap_unlock(vmf->pte, vmf->ptl); 4106 return handle_userfault(vmf, VM_UFFD_MISSING); 4107 } 4108 goto setpte; 4109 } 4110 4111 /* Allocate our own private page. */ 4112 if (unlikely(anon_vma_prepare(vma))) 4113 goto oom; 4114 page = alloc_zeroed_user_highpage_movable(vma, vmf->address); 4115 if (!page) 4116 goto oom; 4117 4118 if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL)) 4119 goto oom_free_page; 4120 cgroup_throttle_swaprate(page, GFP_KERNEL); 4121 4122 /* 4123 * The memory barrier inside __SetPageUptodate makes sure that 4124 * preceding stores to the page contents become visible before 4125 * the set_pte_at() write. 4126 */ 4127 __SetPageUptodate(page); 4128 4129 entry = mk_pte(page, vma->vm_page_prot); 4130 entry = pte_sw_mkyoung(entry); 4131 if (vma->vm_flags & VM_WRITE) 4132 entry = pte_mkwrite(pte_mkdirty(entry)); 4133 4134 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, 4135 &vmf->ptl); 4136 if (!pte_none(*vmf->pte)) { 4137 update_mmu_tlb(vma, vmf->address, vmf->pte); 4138 goto release; 4139 } 4140 4141 ret = check_stable_address_space(vma->vm_mm); 4142 if (ret) 4143 goto release; 4144 4145 /* Deliver the page fault to userland, check inside PT lock */ 4146 if (userfaultfd_missing(vma)) { 4147 pte_unmap_unlock(vmf->pte, vmf->ptl); 4148 put_page(page); 4149 return handle_userfault(vmf, VM_UFFD_MISSING); 4150 } 4151 4152 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); 4153 page_add_new_anon_rmap(page, vma, vmf->address); 4154 lru_cache_add_inactive_or_unevictable(page, vma); 4155 setpte: 4156 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); 4157 4158 /* No need to invalidate - it was non-present before */ 4159 update_mmu_cache(vma, vmf->address, vmf->pte); 4160 unlock: 4161 pte_unmap_unlock(vmf->pte, vmf->ptl); 4162 return ret; 4163 release: 4164 put_page(page); 4165 goto unlock; 4166 oom_free_page: 4167 put_page(page); 4168 oom: 4169 return VM_FAULT_OOM; 4170 } 4171 4172 /* 4173 * The mmap_lock must have been held on entry, and may have been 4174 * released depending on flags and vma->vm_ops->fault() return value. 4175 * See filemap_fault() and __lock_page_retry(). 4176 */ 4177 static vm_fault_t __do_fault(struct vm_fault *vmf) 4178 { 4179 struct vm_area_struct *vma = vmf->vma; 4180 vm_fault_t ret; 4181 4182 /* 4183 * Preallocate pte before we take page_lock because this might lead to 4184 * deadlocks for memcg reclaim which waits for pages under writeback: 4185 * lock_page(A) 4186 * SetPageWriteback(A) 4187 * unlock_page(A) 4188 * lock_page(B) 4189 * lock_page(B) 4190 * pte_alloc_one 4191 * shrink_page_list 4192 * wait_on_page_writeback(A) 4193 * SetPageWriteback(B) 4194 * unlock_page(B) 4195 * # flush A, B to clear the writeback 4196 */ 4197 if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) { 4198 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); 4199 if (!vmf->prealloc_pte) 4200 return VM_FAULT_OOM; 4201 } 4202 4203 ret = vma->vm_ops->fault(vmf); 4204 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY | 4205 VM_FAULT_DONE_COW))) 4206 return ret; 4207 4208 if (unlikely(PageHWPoison(vmf->page))) { 4209 struct page *page = vmf->page; 4210 vm_fault_t poisonret = VM_FAULT_HWPOISON; 4211 if (ret & VM_FAULT_LOCKED) { 4212 if (page_mapped(page)) 4213 unmap_mapping_pages(page_mapping(page), 4214 page->index, 1, false); 4215 /* Retry if a clean page was removed from the cache. */ 4216 if (invalidate_inode_page(page)) 4217 poisonret = VM_FAULT_NOPAGE; 4218 unlock_page(page); 4219 } 4220 put_page(page); 4221 vmf->page = NULL; 4222 return poisonret; 4223 } 4224 4225 if (unlikely(!(ret & VM_FAULT_LOCKED))) 4226 lock_page(vmf->page); 4227 else 4228 VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page); 4229 4230 return ret; 4231 } 4232 4233 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4234 static void deposit_prealloc_pte(struct vm_fault *vmf) 4235 { 4236 struct vm_area_struct *vma = vmf->vma; 4237 4238 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); 4239 /* 4240 * We are going to consume the prealloc table, 4241 * count that as nr_ptes. 4242 */ 4243 mm_inc_nr_ptes(vma->vm_mm); 4244 vmf->prealloc_pte = NULL; 4245 } 4246 4247 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) 4248 { 4249 struct vm_area_struct *vma = vmf->vma; 4250 bool write = vmf->flags & FAULT_FLAG_WRITE; 4251 unsigned long haddr = vmf->address & HPAGE_PMD_MASK; 4252 pmd_t entry; 4253 int i; 4254 vm_fault_t ret = VM_FAULT_FALLBACK; 4255 4256 if (!transhuge_vma_suitable(vma, haddr)) 4257 return ret; 4258 4259 page = compound_head(page); 4260 if (compound_order(page) != HPAGE_PMD_ORDER) 4261 return ret; 4262 4263 /* 4264 * Just backoff if any subpage of a THP is corrupted otherwise 4265 * the corrupted page may mapped by PMD silently to escape the 4266 * check. This kind of THP just can be PTE mapped. Access to 4267 * the corrupted subpage should trigger SIGBUS as expected. 4268 */ 4269 if (unlikely(PageHasHWPoisoned(page))) 4270 return ret; 4271 4272 /* 4273 * Archs like ppc64 need additional space to store information 4274 * related to pte entry. Use the preallocated table for that. 4275 */ 4276 if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) { 4277 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm); 4278 if (!vmf->prealloc_pte) 4279 return VM_FAULT_OOM; 4280 } 4281 4282 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 4283 if (unlikely(!pmd_none(*vmf->pmd))) 4284 goto out; 4285 4286 for (i = 0; i < HPAGE_PMD_NR; i++) 4287 flush_icache_page(vma, page + i); 4288 4289 entry = mk_huge_pmd(page, vma->vm_page_prot); 4290 if (write) 4291 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 4292 4293 add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR); 4294 page_add_file_rmap(page, vma, true); 4295 4296 /* 4297 * deposit and withdraw with pmd lock held 4298 */ 4299 if (arch_needs_pgtable_deposit()) 4300 deposit_prealloc_pte(vmf); 4301 4302 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); 4303 4304 update_mmu_cache_pmd(vma, haddr, vmf->pmd); 4305 4306 /* fault is handled */ 4307 ret = 0; 4308 count_vm_event(THP_FILE_MAPPED); 4309 out: 4310 spin_unlock(vmf->ptl); 4311 return ret; 4312 } 4313 #else 4314 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) 4315 { 4316 return VM_FAULT_FALLBACK; 4317 } 4318 #endif 4319 4320 void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr) 4321 { 4322 struct vm_area_struct *vma = vmf->vma; 4323 bool uffd_wp = pte_marker_uffd_wp(vmf->orig_pte); 4324 bool write = vmf->flags & FAULT_FLAG_WRITE; 4325 bool prefault = vmf->address != addr; 4326 pte_t entry; 4327 4328 flush_icache_page(vma, page); 4329 entry = mk_pte(page, vma->vm_page_prot); 4330 4331 if (prefault && arch_wants_old_prefaulted_pte()) 4332 entry = pte_mkold(entry); 4333 else 4334 entry = pte_sw_mkyoung(entry); 4335 4336 if (write) 4337 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 4338 if (unlikely(uffd_wp)) 4339 entry = pte_mkuffd_wp(pte_wrprotect(entry)); 4340 /* copy-on-write page */ 4341 if (write && !(vma->vm_flags & VM_SHARED)) { 4342 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); 4343 page_add_new_anon_rmap(page, vma, addr); 4344 lru_cache_add_inactive_or_unevictable(page, vma); 4345 } else { 4346 inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page)); 4347 page_add_file_rmap(page, vma, false); 4348 } 4349 set_pte_at(vma->vm_mm, addr, vmf->pte, entry); 4350 } 4351 4352 static bool vmf_pte_changed(struct vm_fault *vmf) 4353 { 4354 if (vmf->flags & FAULT_FLAG_ORIG_PTE_VALID) 4355 return !pte_same(*vmf->pte, vmf->orig_pte); 4356 4357 return !pte_none(*vmf->pte); 4358 } 4359 4360 /** 4361 * finish_fault - finish page fault once we have prepared the page to fault 4362 * 4363 * @vmf: structure describing the fault 4364 * 4365 * This function handles all that is needed to finish a page fault once the 4366 * page to fault in is prepared. It handles locking of PTEs, inserts PTE for 4367 * given page, adds reverse page mapping, handles memcg charges and LRU 4368 * addition. 4369 * 4370 * The function expects the page to be locked and on success it consumes a 4371 * reference of a page being mapped (for the PTE which maps it). 4372 * 4373 * Return: %0 on success, %VM_FAULT_ code in case of error. 4374 */ 4375 vm_fault_t finish_fault(struct vm_fault *vmf) 4376 { 4377 struct vm_area_struct *vma = vmf->vma; 4378 struct page *page; 4379 vm_fault_t ret; 4380 4381 /* Did we COW the page? */ 4382 if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) 4383 page = vmf->cow_page; 4384 else 4385 page = vmf->page; 4386 4387 /* 4388 * check even for read faults because we might have lost our CoWed 4389 * page 4390 */ 4391 if (!(vma->vm_flags & VM_SHARED)) { 4392 ret = check_stable_address_space(vma->vm_mm); 4393 if (ret) 4394 return ret; 4395 } 4396 4397 if (pmd_none(*vmf->pmd)) { 4398 if (PageTransCompound(page)) { 4399 ret = do_set_pmd(vmf, page); 4400 if (ret != VM_FAULT_FALLBACK) 4401 return ret; 4402 } 4403 4404 if (vmf->prealloc_pte) 4405 pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte); 4406 else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) 4407 return VM_FAULT_OOM; 4408 } 4409 4410 /* 4411 * See comment in handle_pte_fault() for how this scenario happens, we 4412 * need to return NOPAGE so that we drop this page. 4413 */ 4414 if (pmd_devmap_trans_unstable(vmf->pmd)) 4415 return VM_FAULT_NOPAGE; 4416 4417 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 4418 vmf->address, &vmf->ptl); 4419 4420 /* Re-check under ptl */ 4421 if (likely(!vmf_pte_changed(vmf))) { 4422 do_set_pte(vmf, page, vmf->address); 4423 4424 /* no need to invalidate: a not-present page won't be cached */ 4425 update_mmu_cache(vma, vmf->address, vmf->pte); 4426 4427 ret = 0; 4428 } else { 4429 update_mmu_tlb(vma, vmf->address, vmf->pte); 4430 ret = VM_FAULT_NOPAGE; 4431 } 4432 4433 pte_unmap_unlock(vmf->pte, vmf->ptl); 4434 return ret; 4435 } 4436 4437 static unsigned long fault_around_bytes __read_mostly = 4438 rounddown_pow_of_two(65536); 4439 4440 #ifdef CONFIG_DEBUG_FS 4441 static int fault_around_bytes_get(void *data, u64 *val) 4442 { 4443 *val = fault_around_bytes; 4444 return 0; 4445 } 4446 4447 /* 4448 * fault_around_bytes must be rounded down to the nearest page order as it's 4449 * what do_fault_around() expects to see. 4450 */ 4451 static int fault_around_bytes_set(void *data, u64 val) 4452 { 4453 if (val / PAGE_SIZE > PTRS_PER_PTE) 4454 return -EINVAL; 4455 if (val > PAGE_SIZE) 4456 fault_around_bytes = rounddown_pow_of_two(val); 4457 else 4458 fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */ 4459 return 0; 4460 } 4461 DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops, 4462 fault_around_bytes_get, fault_around_bytes_set, "%llu\n"); 4463 4464 static int __init fault_around_debugfs(void) 4465 { 4466 debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL, 4467 &fault_around_bytes_fops); 4468 return 0; 4469 } 4470 late_initcall(fault_around_debugfs); 4471 #endif 4472 4473 /* 4474 * do_fault_around() tries to map few pages around the fault address. The hope 4475 * is that the pages will be needed soon and this will lower the number of 4476 * faults to handle. 4477 * 4478 * It uses vm_ops->map_pages() to map the pages, which skips the page if it's 4479 * not ready to be mapped: not up-to-date, locked, etc. 4480 * 4481 * This function doesn't cross the VMA boundaries, in order to call map_pages() 4482 * only once. 4483 * 4484 * fault_around_bytes defines how many bytes we'll try to map. 4485 * do_fault_around() expects it to be set to a power of two less than or equal 4486 * to PTRS_PER_PTE. 4487 * 4488 * The virtual address of the area that we map is naturally aligned to 4489 * fault_around_bytes rounded down to the machine page size 4490 * (and therefore to page order). This way it's easier to guarantee 4491 * that we don't cross page table boundaries. 4492 */ 4493 static vm_fault_t do_fault_around(struct vm_fault *vmf) 4494 { 4495 unsigned long address = vmf->address, nr_pages, mask; 4496 pgoff_t start_pgoff = vmf->pgoff; 4497 pgoff_t end_pgoff; 4498 int off; 4499 4500 nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT; 4501 mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK; 4502 4503 address = max(address & mask, vmf->vma->vm_start); 4504 off = ((vmf->address - address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); 4505 start_pgoff -= off; 4506 4507 /* 4508 * end_pgoff is either the end of the page table, the end of 4509 * the vma or nr_pages from start_pgoff, depending what is nearest. 4510 */ 4511 end_pgoff = start_pgoff - 4512 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) + 4513 PTRS_PER_PTE - 1; 4514 end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1, 4515 start_pgoff + nr_pages - 1); 4516 4517 if (pmd_none(*vmf->pmd)) { 4518 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm); 4519 if (!vmf->prealloc_pte) 4520 return VM_FAULT_OOM; 4521 } 4522 4523 return vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff); 4524 } 4525 4526 /* Return true if we should do read fault-around, false otherwise */ 4527 static inline bool should_fault_around(struct vm_fault *vmf) 4528 { 4529 /* No ->map_pages? No way to fault around... */ 4530 if (!vmf->vma->vm_ops->map_pages) 4531 return false; 4532 4533 if (uffd_disable_fault_around(vmf->vma)) 4534 return false; 4535 4536 return fault_around_bytes >> PAGE_SHIFT > 1; 4537 } 4538 4539 static vm_fault_t do_read_fault(struct vm_fault *vmf) 4540 { 4541 vm_fault_t ret = 0; 4542 4543 /* 4544 * Let's call ->map_pages() first and use ->fault() as fallback 4545 * if page by the offset is not ready to be mapped (cold cache or 4546 * something). 4547 */ 4548 if (should_fault_around(vmf)) { 4549 ret = do_fault_around(vmf); 4550 if (ret) 4551 return ret; 4552 } 4553 4554 ret = __do_fault(vmf); 4555 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 4556 return ret; 4557 4558 ret |= finish_fault(vmf); 4559 unlock_page(vmf->page); 4560 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 4561 put_page(vmf->page); 4562 return ret; 4563 } 4564 4565 static vm_fault_t do_cow_fault(struct vm_fault *vmf) 4566 { 4567 struct vm_area_struct *vma = vmf->vma; 4568 vm_fault_t ret; 4569 4570 if (unlikely(anon_vma_prepare(vma))) 4571 return VM_FAULT_OOM; 4572 4573 vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address); 4574 if (!vmf->cow_page) 4575 return VM_FAULT_OOM; 4576 4577 if (mem_cgroup_charge(page_folio(vmf->cow_page), vma->vm_mm, 4578 GFP_KERNEL)) { 4579 put_page(vmf->cow_page); 4580 return VM_FAULT_OOM; 4581 } 4582 cgroup_throttle_swaprate(vmf->cow_page, GFP_KERNEL); 4583 4584 ret = __do_fault(vmf); 4585 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 4586 goto uncharge_out; 4587 if (ret & VM_FAULT_DONE_COW) 4588 return ret; 4589 4590 copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma); 4591 __SetPageUptodate(vmf->cow_page); 4592 4593 ret |= finish_fault(vmf); 4594 unlock_page(vmf->page); 4595 put_page(vmf->page); 4596 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 4597 goto uncharge_out; 4598 return ret; 4599 uncharge_out: 4600 put_page(vmf->cow_page); 4601 return ret; 4602 } 4603 4604 static vm_fault_t do_shared_fault(struct vm_fault *vmf) 4605 { 4606 struct vm_area_struct *vma = vmf->vma; 4607 vm_fault_t ret, tmp; 4608 4609 ret = __do_fault(vmf); 4610 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 4611 return ret; 4612 4613 /* 4614 * Check if the backing address space wants to know that the page is 4615 * about to become writable 4616 */ 4617 if (vma->vm_ops->page_mkwrite) { 4618 unlock_page(vmf->page); 4619 tmp = do_page_mkwrite(vmf); 4620 if (unlikely(!tmp || 4621 (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { 4622 put_page(vmf->page); 4623 return tmp; 4624 } 4625 } 4626 4627 ret |= finish_fault(vmf); 4628 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | 4629 VM_FAULT_RETRY))) { 4630 unlock_page(vmf->page); 4631 put_page(vmf->page); 4632 return ret; 4633 } 4634 4635 ret |= fault_dirty_shared_page(vmf); 4636 return ret; 4637 } 4638 4639 /* 4640 * We enter with non-exclusive mmap_lock (to exclude vma changes, 4641 * but allow concurrent faults). 4642 * The mmap_lock may have been released depending on flags and our 4643 * return value. See filemap_fault() and __folio_lock_or_retry(). 4644 * If mmap_lock is released, vma may become invalid (for example 4645 * by other thread calling munmap()). 4646 */ 4647 static vm_fault_t do_fault(struct vm_fault *vmf) 4648 { 4649 struct vm_area_struct *vma = vmf->vma; 4650 struct mm_struct *vm_mm = vma->vm_mm; 4651 vm_fault_t ret; 4652 4653 /* 4654 * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND 4655 */ 4656 if (!vma->vm_ops->fault) { 4657 /* 4658 * If we find a migration pmd entry or a none pmd entry, which 4659 * should never happen, return SIGBUS 4660 */ 4661 if (unlikely(!pmd_present(*vmf->pmd))) 4662 ret = VM_FAULT_SIGBUS; 4663 else { 4664 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, 4665 vmf->pmd, 4666 vmf->address, 4667 &vmf->ptl); 4668 /* 4669 * Make sure this is not a temporary clearing of pte 4670 * by holding ptl and checking again. A R/M/W update 4671 * of pte involves: take ptl, clearing the pte so that 4672 * we don't have concurrent modification by hardware 4673 * followed by an update. 4674 */ 4675 if (unlikely(pte_none(*vmf->pte))) 4676 ret = VM_FAULT_SIGBUS; 4677 else 4678 ret = VM_FAULT_NOPAGE; 4679 4680 pte_unmap_unlock(vmf->pte, vmf->ptl); 4681 } 4682 } else if (!(vmf->flags & FAULT_FLAG_WRITE)) 4683 ret = do_read_fault(vmf); 4684 else if (!(vma->vm_flags & VM_SHARED)) 4685 ret = do_cow_fault(vmf); 4686 else 4687 ret = do_shared_fault(vmf); 4688 4689 /* preallocated pagetable is unused: free it */ 4690 if (vmf->prealloc_pte) { 4691 pte_free(vm_mm, vmf->prealloc_pte); 4692 vmf->prealloc_pte = NULL; 4693 } 4694 return ret; 4695 } 4696 4697 int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, 4698 unsigned long addr, int page_nid, int *flags) 4699 { 4700 get_page(page); 4701 4702 count_vm_numa_event(NUMA_HINT_FAULTS); 4703 if (page_nid == numa_node_id()) { 4704 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); 4705 *flags |= TNF_FAULT_LOCAL; 4706 } 4707 4708 return mpol_misplaced(page, vma, addr); 4709 } 4710 4711 static vm_fault_t do_numa_page(struct vm_fault *vmf) 4712 { 4713 struct vm_area_struct *vma = vmf->vma; 4714 struct page *page = NULL; 4715 int page_nid = NUMA_NO_NODE; 4716 int last_cpupid; 4717 int target_nid; 4718 pte_t pte, old_pte; 4719 bool was_writable = pte_savedwrite(vmf->orig_pte); 4720 int flags = 0; 4721 4722 /* 4723 * The "pte" at this point cannot be used safely without 4724 * validation through pte_unmap_same(). It's of NUMA type but 4725 * the pfn may be screwed if the read is non atomic. 4726 */ 4727 vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd); 4728 spin_lock(vmf->ptl); 4729 if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) { 4730 pte_unmap_unlock(vmf->pte, vmf->ptl); 4731 goto out; 4732 } 4733 4734 /* Get the normal PTE */ 4735 old_pte = ptep_get(vmf->pte); 4736 pte = pte_modify(old_pte, vma->vm_page_prot); 4737 4738 page = vm_normal_page(vma, vmf->address, pte); 4739 if (!page || is_zone_device_page(page)) 4740 goto out_map; 4741 4742 /* TODO: handle PTE-mapped THP */ 4743 if (PageCompound(page)) 4744 goto out_map; 4745 4746 /* 4747 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as 4748 * much anyway since they can be in shared cache state. This misses 4749 * the case where a mapping is writable but the process never writes 4750 * to it but pte_write gets cleared during protection updates and 4751 * pte_dirty has unpredictable behaviour between PTE scan updates, 4752 * background writeback, dirty balancing and application behaviour. 4753 */ 4754 if (!was_writable) 4755 flags |= TNF_NO_GROUP; 4756 4757 /* 4758 * Flag if the page is shared between multiple address spaces. This 4759 * is later used when determining whether to group tasks together 4760 */ 4761 if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED)) 4762 flags |= TNF_SHARED; 4763 4764 page_nid = page_to_nid(page); 4765 /* 4766 * For memory tiering mode, cpupid of slow memory page is used 4767 * to record page access time. So use default value. 4768 */ 4769 if ((sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) && 4770 !node_is_toptier(page_nid)) 4771 last_cpupid = (-1 & LAST_CPUPID_MASK); 4772 else 4773 last_cpupid = page_cpupid_last(page); 4774 target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid, 4775 &flags); 4776 if (target_nid == NUMA_NO_NODE) { 4777 put_page(page); 4778 goto out_map; 4779 } 4780 pte_unmap_unlock(vmf->pte, vmf->ptl); 4781 4782 /* Migrate to the requested node */ 4783 if (migrate_misplaced_page(page, vma, target_nid)) { 4784 page_nid = target_nid; 4785 flags |= TNF_MIGRATED; 4786 } else { 4787 flags |= TNF_MIGRATE_FAIL; 4788 vmf->pte = pte_offset_map(vmf->pmd, vmf->address); 4789 spin_lock(vmf->ptl); 4790 if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) { 4791 pte_unmap_unlock(vmf->pte, vmf->ptl); 4792 goto out; 4793 } 4794 goto out_map; 4795 } 4796 4797 out: 4798 if (page_nid != NUMA_NO_NODE) 4799 task_numa_fault(last_cpupid, page_nid, 1, flags); 4800 return 0; 4801 out_map: 4802 /* 4803 * Make it present again, depending on how arch implements 4804 * non-accessible ptes, some can allow access by kernel mode. 4805 */ 4806 old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte); 4807 pte = pte_modify(old_pte, vma->vm_page_prot); 4808 pte = pte_mkyoung(pte); 4809 if (was_writable) 4810 pte = pte_mkwrite(pte); 4811 ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte); 4812 update_mmu_cache(vma, vmf->address, vmf->pte); 4813 pte_unmap_unlock(vmf->pte, vmf->ptl); 4814 goto out; 4815 } 4816 4817 static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf) 4818 { 4819 if (vma_is_anonymous(vmf->vma)) 4820 return do_huge_pmd_anonymous_page(vmf); 4821 if (vmf->vma->vm_ops->huge_fault) 4822 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD); 4823 return VM_FAULT_FALLBACK; 4824 } 4825 4826 /* `inline' is required to avoid gcc 4.1.2 build error */ 4827 static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf) 4828 { 4829 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; 4830 4831 if (vma_is_anonymous(vmf->vma)) { 4832 if (likely(!unshare) && 4833 userfaultfd_huge_pmd_wp(vmf->vma, vmf->orig_pmd)) 4834 return handle_userfault(vmf, VM_UFFD_WP); 4835 return do_huge_pmd_wp_page(vmf); 4836 } 4837 if (vmf->vma->vm_ops->huge_fault) { 4838 vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD); 4839 4840 if (!(ret & VM_FAULT_FALLBACK)) 4841 return ret; 4842 } 4843 4844 /* COW or write-notify handled on pte level: split pmd. */ 4845 __split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL); 4846 4847 return VM_FAULT_FALLBACK; 4848 } 4849 4850 static vm_fault_t create_huge_pud(struct vm_fault *vmf) 4851 { 4852 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ 4853 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) 4854 /* No support for anonymous transparent PUD pages yet */ 4855 if (vma_is_anonymous(vmf->vma)) 4856 return VM_FAULT_FALLBACK; 4857 if (vmf->vma->vm_ops->huge_fault) 4858 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD); 4859 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 4860 return VM_FAULT_FALLBACK; 4861 } 4862 4863 static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud) 4864 { 4865 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ 4866 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) 4867 /* No support for anonymous transparent PUD pages yet */ 4868 if (vma_is_anonymous(vmf->vma)) 4869 goto split; 4870 if (vmf->vma->vm_ops->huge_fault) { 4871 vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD); 4872 4873 if (!(ret & VM_FAULT_FALLBACK)) 4874 return ret; 4875 } 4876 split: 4877 /* COW or write-notify not handled on PUD level: split pud.*/ 4878 __split_huge_pud(vmf->vma, vmf->pud, vmf->address); 4879 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ 4880 return VM_FAULT_FALLBACK; 4881 } 4882 4883 /* 4884 * These routines also need to handle stuff like marking pages dirty 4885 * and/or accessed for architectures that don't do it in hardware (most 4886 * RISC architectures). The early dirtying is also good on the i386. 4887 * 4888 * There is also a hook called "update_mmu_cache()" that architectures 4889 * with external mmu caches can use to update those (ie the Sparc or 4890 * PowerPC hashed page tables that act as extended TLBs). 4891 * 4892 * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow 4893 * concurrent faults). 4894 * 4895 * The mmap_lock may have been released depending on flags and our return value. 4896 * See filemap_fault() and __folio_lock_or_retry(). 4897 */ 4898 static vm_fault_t handle_pte_fault(struct vm_fault *vmf) 4899 { 4900 pte_t entry; 4901 4902 if (unlikely(pmd_none(*vmf->pmd))) { 4903 /* 4904 * Leave __pte_alloc() until later: because vm_ops->fault may 4905 * want to allocate huge page, and if we expose page table 4906 * for an instant, it will be difficult to retract from 4907 * concurrent faults and from rmap lookups. 4908 */ 4909 vmf->pte = NULL; 4910 vmf->flags &= ~FAULT_FLAG_ORIG_PTE_VALID; 4911 } else { 4912 /* 4913 * If a huge pmd materialized under us just retry later. Use 4914 * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead 4915 * of pmd_trans_huge() to ensure the pmd didn't become 4916 * pmd_trans_huge under us and then back to pmd_none, as a 4917 * result of MADV_DONTNEED running immediately after a huge pmd 4918 * fault in a different thread of this mm, in turn leading to a 4919 * misleading pmd_trans_huge() retval. All we have to ensure is 4920 * that it is a regular pmd that we can walk with 4921 * pte_offset_map() and we can do that through an atomic read 4922 * in C, which is what pmd_trans_unstable() provides. 4923 */ 4924 if (pmd_devmap_trans_unstable(vmf->pmd)) 4925 return 0; 4926 /* 4927 * A regular pmd is established and it can't morph into a huge 4928 * pmd from under us anymore at this point because we hold the 4929 * mmap_lock read mode and khugepaged takes it in write mode. 4930 * So now it's safe to run pte_offset_map(). 4931 */ 4932 vmf->pte = pte_offset_map(vmf->pmd, vmf->address); 4933 vmf->orig_pte = *vmf->pte; 4934 vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID; 4935 4936 /* 4937 * some architectures can have larger ptes than wordsize, 4938 * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and 4939 * CONFIG_32BIT=y, so READ_ONCE cannot guarantee atomic 4940 * accesses. The code below just needs a consistent view 4941 * for the ifs and we later double check anyway with the 4942 * ptl lock held. So here a barrier will do. 4943 */ 4944 barrier(); 4945 if (pte_none(vmf->orig_pte)) { 4946 pte_unmap(vmf->pte); 4947 vmf->pte = NULL; 4948 } 4949 } 4950 4951 if (!vmf->pte) { 4952 if (vma_is_anonymous(vmf->vma)) 4953 return do_anonymous_page(vmf); 4954 else 4955 return do_fault(vmf); 4956 } 4957 4958 if (!pte_present(vmf->orig_pte)) 4959 return do_swap_page(vmf); 4960 4961 if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) 4962 return do_numa_page(vmf); 4963 4964 vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); 4965 spin_lock(vmf->ptl); 4966 entry = vmf->orig_pte; 4967 if (unlikely(!pte_same(*vmf->pte, entry))) { 4968 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); 4969 goto unlock; 4970 } 4971 if (vmf->flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) { 4972 if (!pte_write(entry)) 4973 return do_wp_page(vmf); 4974 else if (likely(vmf->flags & FAULT_FLAG_WRITE)) 4975 entry = pte_mkdirty(entry); 4976 } 4977 entry = pte_mkyoung(entry); 4978 if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry, 4979 vmf->flags & FAULT_FLAG_WRITE)) { 4980 update_mmu_cache(vmf->vma, vmf->address, vmf->pte); 4981 } else { 4982 /* Skip spurious TLB flush for retried page fault */ 4983 if (vmf->flags & FAULT_FLAG_TRIED) 4984 goto unlock; 4985 /* 4986 * This is needed only for protection faults but the arch code 4987 * is not yet telling us if this is a protection fault or not. 4988 * This still avoids useless tlb flushes for .text page faults 4989 * with threads. 4990 */ 4991 if (vmf->flags & FAULT_FLAG_WRITE) 4992 flush_tlb_fix_spurious_fault(vmf->vma, vmf->address); 4993 } 4994 unlock: 4995 pte_unmap_unlock(vmf->pte, vmf->ptl); 4996 return 0; 4997 } 4998 4999 /* 5000 * By the time we get here, we already hold the mm semaphore 5001 * 5002 * The mmap_lock may have been released depending on flags and our 5003 * return value. See filemap_fault() and __folio_lock_or_retry(). 5004 */ 5005 static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, 5006 unsigned long address, unsigned int flags) 5007 { 5008 struct vm_fault vmf = { 5009 .vma = vma, 5010 .address = address & PAGE_MASK, 5011 .real_address = address, 5012 .flags = flags, 5013 .pgoff = linear_page_index(vma, address), 5014 .gfp_mask = __get_fault_gfp_mask(vma), 5015 }; 5016 struct mm_struct *mm = vma->vm_mm; 5017 unsigned long vm_flags = vma->vm_flags; 5018 pgd_t *pgd; 5019 p4d_t *p4d; 5020 vm_fault_t ret; 5021 5022 pgd = pgd_offset(mm, address); 5023 p4d = p4d_alloc(mm, pgd, address); 5024 if (!p4d) 5025 return VM_FAULT_OOM; 5026 5027 vmf.pud = pud_alloc(mm, p4d, address); 5028 if (!vmf.pud) 5029 return VM_FAULT_OOM; 5030 retry_pud: 5031 if (pud_none(*vmf.pud) && 5032 hugepage_vma_check(vma, vm_flags, false, true, true)) { 5033 ret = create_huge_pud(&vmf); 5034 if (!(ret & VM_FAULT_FALLBACK)) 5035 return ret; 5036 } else { 5037 pud_t orig_pud = *vmf.pud; 5038 5039 barrier(); 5040 if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) { 5041 5042 /* 5043 * TODO once we support anonymous PUDs: NUMA case and 5044 * FAULT_FLAG_UNSHARE handling. 5045 */ 5046 if ((flags & FAULT_FLAG_WRITE) && !pud_write(orig_pud)) { 5047 ret = wp_huge_pud(&vmf, orig_pud); 5048 if (!(ret & VM_FAULT_FALLBACK)) 5049 return ret; 5050 } else { 5051 huge_pud_set_accessed(&vmf, orig_pud); 5052 return 0; 5053 } 5054 } 5055 } 5056 5057 vmf.pmd = pmd_alloc(mm, vmf.pud, address); 5058 if (!vmf.pmd) 5059 return VM_FAULT_OOM; 5060 5061 /* Huge pud page fault raced with pmd_alloc? */ 5062 if (pud_trans_unstable(vmf.pud)) 5063 goto retry_pud; 5064 5065 if (pmd_none(*vmf.pmd) && 5066 hugepage_vma_check(vma, vm_flags, false, true, true)) { 5067 ret = create_huge_pmd(&vmf); 5068 if (!(ret & VM_FAULT_FALLBACK)) 5069 return ret; 5070 } else { 5071 vmf.orig_pmd = *vmf.pmd; 5072 5073 barrier(); 5074 if (unlikely(is_swap_pmd(vmf.orig_pmd))) { 5075 VM_BUG_ON(thp_migration_supported() && 5076 !is_pmd_migration_entry(vmf.orig_pmd)); 5077 if (is_pmd_migration_entry(vmf.orig_pmd)) 5078 pmd_migration_entry_wait(mm, vmf.pmd); 5079 return 0; 5080 } 5081 if (pmd_trans_huge(vmf.orig_pmd) || pmd_devmap(vmf.orig_pmd)) { 5082 if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma)) 5083 return do_huge_pmd_numa_page(&vmf); 5084 5085 if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) && 5086 !pmd_write(vmf.orig_pmd)) { 5087 ret = wp_huge_pmd(&vmf); 5088 if (!(ret & VM_FAULT_FALLBACK)) 5089 return ret; 5090 } else { 5091 huge_pmd_set_accessed(&vmf); 5092 return 0; 5093 } 5094 } 5095 } 5096 5097 return handle_pte_fault(&vmf); 5098 } 5099 5100 /** 5101 * mm_account_fault - Do page fault accounting 5102 * 5103 * @regs: the pt_regs struct pointer. When set to NULL, will skip accounting 5104 * of perf event counters, but we'll still do the per-task accounting to 5105 * the task who triggered this page fault. 5106 * @address: the faulted address. 5107 * @flags: the fault flags. 5108 * @ret: the fault retcode. 5109 * 5110 * This will take care of most of the page fault accounting. Meanwhile, it 5111 * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter 5112 * updates. However, note that the handling of PERF_COUNT_SW_PAGE_FAULTS should 5113 * still be in per-arch page fault handlers at the entry of page fault. 5114 */ 5115 static inline void mm_account_fault(struct pt_regs *regs, 5116 unsigned long address, unsigned int flags, 5117 vm_fault_t ret) 5118 { 5119 bool major; 5120 5121 /* 5122 * We don't do accounting for some specific faults: 5123 * 5124 * - Unsuccessful faults (e.g. when the address wasn't valid). That 5125 * includes arch_vma_access_permitted() failing before reaching here. 5126 * So this is not a "this many hardware page faults" counter. We 5127 * should use the hw profiling for that. 5128 * 5129 * - Incomplete faults (VM_FAULT_RETRY). They will only be counted 5130 * once they're completed. 5131 */ 5132 if (ret & (VM_FAULT_ERROR | VM_FAULT_RETRY)) 5133 return; 5134 5135 /* 5136 * We define the fault as a major fault when the final successful fault 5137 * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't 5138 * handle it immediately previously). 5139 */ 5140 major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED); 5141 5142 if (major) 5143 current->maj_flt++; 5144 else 5145 current->min_flt++; 5146 5147 /* 5148 * If the fault is done for GUP, regs will be NULL. We only do the 5149 * accounting for the per thread fault counters who triggered the 5150 * fault, and we skip the perf event updates. 5151 */ 5152 if (!regs) 5153 return; 5154 5155 if (major) 5156 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); 5157 else 5158 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); 5159 } 5160 5161 #ifdef CONFIG_LRU_GEN 5162 static void lru_gen_enter_fault(struct vm_area_struct *vma) 5163 { 5164 /* the LRU algorithm doesn't apply to sequential or random reads */ 5165 current->in_lru_fault = !(vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ)); 5166 } 5167 5168 static void lru_gen_exit_fault(void) 5169 { 5170 current->in_lru_fault = false; 5171 } 5172 #else 5173 static void lru_gen_enter_fault(struct vm_area_struct *vma) 5174 { 5175 } 5176 5177 static void lru_gen_exit_fault(void) 5178 { 5179 } 5180 #endif /* CONFIG_LRU_GEN */ 5181 5182 /* 5183 * By the time we get here, we already hold the mm semaphore 5184 * 5185 * The mmap_lock may have been released depending on flags and our 5186 * return value. See filemap_fault() and __folio_lock_or_retry(). 5187 */ 5188 vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, 5189 unsigned int flags, struct pt_regs *regs) 5190 { 5191 vm_fault_t ret; 5192 5193 __set_current_state(TASK_RUNNING); 5194 5195 count_vm_event(PGFAULT); 5196 count_memcg_event_mm(vma->vm_mm, PGFAULT); 5197 5198 /* do counter updates before entering really critical section. */ 5199 check_sync_rss_stat(current); 5200 5201 if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE, 5202 flags & FAULT_FLAG_INSTRUCTION, 5203 flags & FAULT_FLAG_REMOTE)) 5204 return VM_FAULT_SIGSEGV; 5205 5206 /* 5207 * Enable the memcg OOM handling for faults triggered in user 5208 * space. Kernel faults are handled more gracefully. 5209 */ 5210 if (flags & FAULT_FLAG_USER) 5211 mem_cgroup_enter_user_fault(); 5212 5213 lru_gen_enter_fault(vma); 5214 5215 if (unlikely(is_vm_hugetlb_page(vma))) 5216 ret = hugetlb_fault(vma->vm_mm, vma, address, flags); 5217 else 5218 ret = __handle_mm_fault(vma, address, flags); 5219 5220 lru_gen_exit_fault(); 5221 5222 if (flags & FAULT_FLAG_USER) { 5223 mem_cgroup_exit_user_fault(); 5224 /* 5225 * The task may have entered a memcg OOM situation but 5226 * if the allocation error was handled gracefully (no 5227 * VM_FAULT_OOM), there is no need to kill anything. 5228 * Just clean up the OOM state peacefully. 5229 */ 5230 if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM)) 5231 mem_cgroup_oom_synchronize(false); 5232 } 5233 5234 mm_account_fault(regs, address, flags, ret); 5235 5236 return ret; 5237 } 5238 EXPORT_SYMBOL_GPL(handle_mm_fault); 5239 5240 #ifndef __PAGETABLE_P4D_FOLDED 5241 /* 5242 * Allocate p4d page table. 5243 * We've already handled the fast-path in-line. 5244 */ 5245 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) 5246 { 5247 p4d_t *new = p4d_alloc_one(mm, address); 5248 if (!new) 5249 return -ENOMEM; 5250 5251 spin_lock(&mm->page_table_lock); 5252 if (pgd_present(*pgd)) { /* Another has populated it */ 5253 p4d_free(mm, new); 5254 } else { 5255 smp_wmb(); /* See comment in pmd_install() */ 5256 pgd_populate(mm, pgd, new); 5257 } 5258 spin_unlock(&mm->page_table_lock); 5259 return 0; 5260 } 5261 #endif /* __PAGETABLE_P4D_FOLDED */ 5262 5263 #ifndef __PAGETABLE_PUD_FOLDED 5264 /* 5265 * Allocate page upper directory. 5266 * We've already handled the fast-path in-line. 5267 */ 5268 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address) 5269 { 5270 pud_t *new = pud_alloc_one(mm, address); 5271 if (!new) 5272 return -ENOMEM; 5273 5274 spin_lock(&mm->page_table_lock); 5275 if (!p4d_present(*p4d)) { 5276 mm_inc_nr_puds(mm); 5277 smp_wmb(); /* See comment in pmd_install() */ 5278 p4d_populate(mm, p4d, new); 5279 } else /* Another has populated it */ 5280 pud_free(mm, new); 5281 spin_unlock(&mm->page_table_lock); 5282 return 0; 5283 } 5284 #endif /* __PAGETABLE_PUD_FOLDED */ 5285 5286 #ifndef __PAGETABLE_PMD_FOLDED 5287 /* 5288 * Allocate page middle directory. 5289 * We've already handled the fast-path in-line. 5290 */ 5291 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) 5292 { 5293 spinlock_t *ptl; 5294 pmd_t *new = pmd_alloc_one(mm, address); 5295 if (!new) 5296 return -ENOMEM; 5297 5298 ptl = pud_lock(mm, pud); 5299 if (!pud_present(*pud)) { 5300 mm_inc_nr_pmds(mm); 5301 smp_wmb(); /* See comment in pmd_install() */ 5302 pud_populate(mm, pud, new); 5303 } else { /* Another has populated it */ 5304 pmd_free(mm, new); 5305 } 5306 spin_unlock(ptl); 5307 return 0; 5308 } 5309 #endif /* __PAGETABLE_PMD_FOLDED */ 5310 5311 /** 5312 * follow_pte - look up PTE at a user virtual address 5313 * @mm: the mm_struct of the target address space 5314 * @address: user virtual address 5315 * @ptepp: location to store found PTE 5316 * @ptlp: location to store the lock for the PTE 5317 * 5318 * On a successful return, the pointer to the PTE is stored in @ptepp; 5319 * the corresponding lock is taken and its location is stored in @ptlp. 5320 * The contents of the PTE are only stable until @ptlp is released; 5321 * any further use, if any, must be protected against invalidation 5322 * with MMU notifiers. 5323 * 5324 * Only IO mappings and raw PFN mappings are allowed. The mmap semaphore 5325 * should be taken for read. 5326 * 5327 * KVM uses this function. While it is arguably less bad than ``follow_pfn``, 5328 * it is not a good general-purpose API. 5329 * 5330 * Return: zero on success, -ve otherwise. 5331 */ 5332 int follow_pte(struct mm_struct *mm, unsigned long address, 5333 pte_t **ptepp, spinlock_t **ptlp) 5334 { 5335 pgd_t *pgd; 5336 p4d_t *p4d; 5337 pud_t *pud; 5338 pmd_t *pmd; 5339 pte_t *ptep; 5340 5341 pgd = pgd_offset(mm, address); 5342 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 5343 goto out; 5344 5345 p4d = p4d_offset(pgd, address); 5346 if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d))) 5347 goto out; 5348 5349 pud = pud_offset(p4d, address); 5350 if (pud_none(*pud) || unlikely(pud_bad(*pud))) 5351 goto out; 5352 5353 pmd = pmd_offset(pud, address); 5354 VM_BUG_ON(pmd_trans_huge(*pmd)); 5355 5356 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) 5357 goto out; 5358 5359 ptep = pte_offset_map_lock(mm, pmd, address, ptlp); 5360 if (!pte_present(*ptep)) 5361 goto unlock; 5362 *ptepp = ptep; 5363 return 0; 5364 unlock: 5365 pte_unmap_unlock(ptep, *ptlp); 5366 out: 5367 return -EINVAL; 5368 } 5369 EXPORT_SYMBOL_GPL(follow_pte); 5370 5371 /** 5372 * follow_pfn - look up PFN at a user virtual address 5373 * @vma: memory mapping 5374 * @address: user virtual address 5375 * @pfn: location to store found PFN 5376 * 5377 * Only IO mappings and raw PFN mappings are allowed. 5378 * 5379 * This function does not allow the caller to read the permissions 5380 * of the PTE. Do not use it. 5381 * 5382 * Return: zero and the pfn at @pfn on success, -ve otherwise. 5383 */ 5384 int follow_pfn(struct vm_area_struct *vma, unsigned long address, 5385 unsigned long *pfn) 5386 { 5387 int ret = -EINVAL; 5388 spinlock_t *ptl; 5389 pte_t *ptep; 5390 5391 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) 5392 return ret; 5393 5394 ret = follow_pte(vma->vm_mm, address, &ptep, &ptl); 5395 if (ret) 5396 return ret; 5397 *pfn = pte_pfn(*ptep); 5398 pte_unmap_unlock(ptep, ptl); 5399 return 0; 5400 } 5401 EXPORT_SYMBOL(follow_pfn); 5402 5403 #ifdef CONFIG_HAVE_IOREMAP_PROT 5404 int follow_phys(struct vm_area_struct *vma, 5405 unsigned long address, unsigned int flags, 5406 unsigned long *prot, resource_size_t *phys) 5407 { 5408 int ret = -EINVAL; 5409 pte_t *ptep, pte; 5410 spinlock_t *ptl; 5411 5412 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) 5413 goto out; 5414 5415 if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) 5416 goto out; 5417 pte = *ptep; 5418 5419 if ((flags & FOLL_WRITE) && !pte_write(pte)) 5420 goto unlock; 5421 5422 *prot = pgprot_val(pte_pgprot(pte)); 5423 *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT; 5424 5425 ret = 0; 5426 unlock: 5427 pte_unmap_unlock(ptep, ptl); 5428 out: 5429 return ret; 5430 } 5431 5432 /** 5433 * generic_access_phys - generic implementation for iomem mmap access 5434 * @vma: the vma to access 5435 * @addr: userspace address, not relative offset within @vma 5436 * @buf: buffer to read/write 5437 * @len: length of transfer 5438 * @write: set to FOLL_WRITE when writing, otherwise reading 5439 * 5440 * This is a generic implementation for &vm_operations_struct.access for an 5441 * iomem mapping. This callback is used by access_process_vm() when the @vma is 5442 * not page based. 5443 */ 5444 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, 5445 void *buf, int len, int write) 5446 { 5447 resource_size_t phys_addr; 5448 unsigned long prot = 0; 5449 void __iomem *maddr; 5450 pte_t *ptep, pte; 5451 spinlock_t *ptl; 5452 int offset = offset_in_page(addr); 5453 int ret = -EINVAL; 5454 5455 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) 5456 return -EINVAL; 5457 5458 retry: 5459 if (follow_pte(vma->vm_mm, addr, &ptep, &ptl)) 5460 return -EINVAL; 5461 pte = *ptep; 5462 pte_unmap_unlock(ptep, ptl); 5463 5464 prot = pgprot_val(pte_pgprot(pte)); 5465 phys_addr = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT; 5466 5467 if ((write & FOLL_WRITE) && !pte_write(pte)) 5468 return -EINVAL; 5469 5470 maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot); 5471 if (!maddr) 5472 return -ENOMEM; 5473 5474 if (follow_pte(vma->vm_mm, addr, &ptep, &ptl)) 5475 goto out_unmap; 5476 5477 if (!pte_same(pte, *ptep)) { 5478 pte_unmap_unlock(ptep, ptl); 5479 iounmap(maddr); 5480 5481 goto retry; 5482 } 5483 5484 if (write) 5485 memcpy_toio(maddr + offset, buf, len); 5486 else 5487 memcpy_fromio(buf, maddr + offset, len); 5488 ret = len; 5489 pte_unmap_unlock(ptep, ptl); 5490 out_unmap: 5491 iounmap(maddr); 5492 5493 return ret; 5494 } 5495 EXPORT_SYMBOL_GPL(generic_access_phys); 5496 #endif 5497 5498 /* 5499 * Access another process' address space as given in mm. 5500 */ 5501 int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, 5502 int len, unsigned int gup_flags) 5503 { 5504 struct vm_area_struct *vma; 5505 void *old_buf = buf; 5506 int write = gup_flags & FOLL_WRITE; 5507 5508 if (mmap_read_lock_killable(mm)) 5509 return 0; 5510 5511 /* ignore errors, just check how much was successfully transferred */ 5512 while (len) { 5513 int bytes, ret, offset; 5514 void *maddr; 5515 struct page *page = NULL; 5516 5517 ret = get_user_pages_remote(mm, addr, 1, 5518 gup_flags, &page, &vma, NULL); 5519 if (ret <= 0) { 5520 #ifndef CONFIG_HAVE_IOREMAP_PROT 5521 break; 5522 #else 5523 /* 5524 * Check if this is a VM_IO | VM_PFNMAP VMA, which 5525 * we can access using slightly different code. 5526 */ 5527 vma = vma_lookup(mm, addr); 5528 if (!vma) 5529 break; 5530 if (vma->vm_ops && vma->vm_ops->access) 5531 ret = vma->vm_ops->access(vma, addr, buf, 5532 len, write); 5533 if (ret <= 0) 5534 break; 5535 bytes = ret; 5536 #endif 5537 } else { 5538 bytes = len; 5539 offset = addr & (PAGE_SIZE-1); 5540 if (bytes > PAGE_SIZE-offset) 5541 bytes = PAGE_SIZE-offset; 5542 5543 maddr = kmap(page); 5544 if (write) { 5545 copy_to_user_page(vma, page, addr, 5546 maddr + offset, buf, bytes); 5547 set_page_dirty_lock(page); 5548 } else { 5549 copy_from_user_page(vma, page, addr, 5550 buf, maddr + offset, bytes); 5551 } 5552 kunmap(page); 5553 put_page(page); 5554 } 5555 len -= bytes; 5556 buf += bytes; 5557 addr += bytes; 5558 } 5559 mmap_read_unlock(mm); 5560 5561 return buf - old_buf; 5562 } 5563 5564 /** 5565 * access_remote_vm - access another process' address space 5566 * @mm: the mm_struct of the target address space 5567 * @addr: start address to access 5568 * @buf: source or destination buffer 5569 * @len: number of bytes to transfer 5570 * @gup_flags: flags modifying lookup behaviour 5571 * 5572 * The caller must hold a reference on @mm. 5573 * 5574 * Return: number of bytes copied from source to destination. 5575 */ 5576 int access_remote_vm(struct mm_struct *mm, unsigned long addr, 5577 void *buf, int len, unsigned int gup_flags) 5578 { 5579 return __access_remote_vm(mm, addr, buf, len, gup_flags); 5580 } 5581 5582 /* 5583 * Access another process' address space. 5584 * Source/target buffer must be kernel space, 5585 * Do not walk the page table directly, use get_user_pages 5586 */ 5587 int access_process_vm(struct task_struct *tsk, unsigned long addr, 5588 void *buf, int len, unsigned int gup_flags) 5589 { 5590 struct mm_struct *mm; 5591 int ret; 5592 5593 mm = get_task_mm(tsk); 5594 if (!mm) 5595 return 0; 5596 5597 ret = __access_remote_vm(mm, addr, buf, len, gup_flags); 5598 5599 mmput(mm); 5600 5601 return ret; 5602 } 5603 EXPORT_SYMBOL_GPL(access_process_vm); 5604 5605 /* 5606 * Print the name of a VMA. 5607 */ 5608 void print_vma_addr(char *prefix, unsigned long ip) 5609 { 5610 struct mm_struct *mm = current->mm; 5611 struct vm_area_struct *vma; 5612 5613 /* 5614 * we might be running from an atomic context so we cannot sleep 5615 */ 5616 if (!mmap_read_trylock(mm)) 5617 return; 5618 5619 vma = find_vma(mm, ip); 5620 if (vma && vma->vm_file) { 5621 struct file *f = vma->vm_file; 5622 char *buf = (char *)__get_free_page(GFP_NOWAIT); 5623 if (buf) { 5624 char *p; 5625 5626 p = file_path(f, buf, PAGE_SIZE); 5627 if (IS_ERR(p)) 5628 p = "?"; 5629 printk("%s%s[%lx+%lx]", prefix, kbasename(p), 5630 vma->vm_start, 5631 vma->vm_end - vma->vm_start); 5632 free_page((unsigned long)buf); 5633 } 5634 } 5635 mmap_read_unlock(mm); 5636 } 5637 5638 #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP) 5639 void __might_fault(const char *file, int line) 5640 { 5641 if (pagefault_disabled()) 5642 return; 5643 __might_sleep(file, line); 5644 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) 5645 if (current->mm) 5646 might_lock_read(¤t->mm->mmap_lock); 5647 #endif 5648 } 5649 EXPORT_SYMBOL(__might_fault); 5650 #endif 5651 5652 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) 5653 /* 5654 * Process all subpages of the specified huge page with the specified 5655 * operation. The target subpage will be processed last to keep its 5656 * cache lines hot. 5657 */ 5658 static inline void process_huge_page( 5659 unsigned long addr_hint, unsigned int pages_per_huge_page, 5660 void (*process_subpage)(unsigned long addr, int idx, void *arg), 5661 void *arg) 5662 { 5663 int i, n, base, l; 5664 unsigned long addr = addr_hint & 5665 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1); 5666 5667 /* Process target subpage last to keep its cache lines hot */ 5668 might_sleep(); 5669 n = (addr_hint - addr) / PAGE_SIZE; 5670 if (2 * n <= pages_per_huge_page) { 5671 /* If target subpage in first half of huge page */ 5672 base = 0; 5673 l = n; 5674 /* Process subpages at the end of huge page */ 5675 for (i = pages_per_huge_page - 1; i >= 2 * n; i--) { 5676 cond_resched(); 5677 process_subpage(addr + i * PAGE_SIZE, i, arg); 5678 } 5679 } else { 5680 /* If target subpage in second half of huge page */ 5681 base = pages_per_huge_page - 2 * (pages_per_huge_page - n); 5682 l = pages_per_huge_page - n; 5683 /* Process subpages at the begin of huge page */ 5684 for (i = 0; i < base; i++) { 5685 cond_resched(); 5686 process_subpage(addr + i * PAGE_SIZE, i, arg); 5687 } 5688 } 5689 /* 5690 * Process remaining subpages in left-right-left-right pattern 5691 * towards the target subpage 5692 */ 5693 for (i = 0; i < l; i++) { 5694 int left_idx = base + i; 5695 int right_idx = base + 2 * l - 1 - i; 5696 5697 cond_resched(); 5698 process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg); 5699 cond_resched(); 5700 process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg); 5701 } 5702 } 5703 5704 static void clear_gigantic_page(struct page *page, 5705 unsigned long addr, 5706 unsigned int pages_per_huge_page) 5707 { 5708 int i; 5709 struct page *p; 5710 5711 might_sleep(); 5712 for (i = 0; i < pages_per_huge_page; i++) { 5713 p = nth_page(page, i); 5714 cond_resched(); 5715 clear_user_highpage(p, addr + i * PAGE_SIZE); 5716 } 5717 } 5718 5719 static void clear_subpage(unsigned long addr, int idx, void *arg) 5720 { 5721 struct page *page = arg; 5722 5723 clear_user_highpage(page + idx, addr); 5724 } 5725 5726 void clear_huge_page(struct page *page, 5727 unsigned long addr_hint, unsigned int pages_per_huge_page) 5728 { 5729 unsigned long addr = addr_hint & 5730 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1); 5731 5732 if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) { 5733 clear_gigantic_page(page, addr, pages_per_huge_page); 5734 return; 5735 } 5736 5737 process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page); 5738 } 5739 5740 static void copy_user_gigantic_page(struct page *dst, struct page *src, 5741 unsigned long addr, 5742 struct vm_area_struct *vma, 5743 unsigned int pages_per_huge_page) 5744 { 5745 int i; 5746 struct page *dst_base = dst; 5747 struct page *src_base = src; 5748 5749 for (i = 0; i < pages_per_huge_page; i++) { 5750 dst = nth_page(dst_base, i); 5751 src = nth_page(src_base, i); 5752 5753 cond_resched(); 5754 copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); 5755 } 5756 } 5757 5758 struct copy_subpage_arg { 5759 struct page *dst; 5760 struct page *src; 5761 struct vm_area_struct *vma; 5762 }; 5763 5764 static void copy_subpage(unsigned long addr, int idx, void *arg) 5765 { 5766 struct copy_subpage_arg *copy_arg = arg; 5767 5768 copy_user_highpage(copy_arg->dst + idx, copy_arg->src + idx, 5769 addr, copy_arg->vma); 5770 } 5771 5772 void copy_user_huge_page(struct page *dst, struct page *src, 5773 unsigned long addr_hint, struct vm_area_struct *vma, 5774 unsigned int pages_per_huge_page) 5775 { 5776 unsigned long addr = addr_hint & 5777 ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1); 5778 struct copy_subpage_arg arg = { 5779 .dst = dst, 5780 .src = src, 5781 .vma = vma, 5782 }; 5783 5784 if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) { 5785 copy_user_gigantic_page(dst, src, addr, vma, 5786 pages_per_huge_page); 5787 return; 5788 } 5789 5790 process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg); 5791 } 5792 5793 long copy_huge_page_from_user(struct page *dst_page, 5794 const void __user *usr_src, 5795 unsigned int pages_per_huge_page, 5796 bool allow_pagefault) 5797 { 5798 void *page_kaddr; 5799 unsigned long i, rc = 0; 5800 unsigned long ret_val = pages_per_huge_page * PAGE_SIZE; 5801 struct page *subpage; 5802 5803 for (i = 0; i < pages_per_huge_page; i++) { 5804 subpage = nth_page(dst_page, i); 5805 if (allow_pagefault) 5806 page_kaddr = kmap(subpage); 5807 else 5808 page_kaddr = kmap_atomic(subpage); 5809 rc = copy_from_user(page_kaddr, 5810 usr_src + i * PAGE_SIZE, PAGE_SIZE); 5811 if (allow_pagefault) 5812 kunmap(subpage); 5813 else 5814 kunmap_atomic(page_kaddr); 5815 5816 ret_val -= (PAGE_SIZE - rc); 5817 if (rc) 5818 break; 5819 5820 flush_dcache_page(subpage); 5821 5822 cond_resched(); 5823 } 5824 return ret_val; 5825 } 5826 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ 5827 5828 #if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS 5829 5830 static struct kmem_cache *page_ptl_cachep; 5831 5832 void __init ptlock_cache_init(void) 5833 { 5834 page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0, 5835 SLAB_PANIC, NULL); 5836 } 5837 5838 bool ptlock_alloc(struct page *page) 5839 { 5840 spinlock_t *ptl; 5841 5842 ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL); 5843 if (!ptl) 5844 return false; 5845 page->ptl = ptl; 5846 return true; 5847 } 5848 5849 void ptlock_free(struct page *page) 5850 { 5851 kmem_cache_free(page_ptl_cachep, page->ptl); 5852 } 5853 #endif 5854