1 /* 2 * mm/rmap.c - physical to virtual reverse mappings 3 * 4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br> 5 * Released under the General Public License (GPL). 6 * 7 * Simple, low overhead reverse mapping scheme. 8 * Please try to keep this thing as modular as possible. 9 * 10 * Provides methods for unmapping each kind of mapped page: 11 * the anon methods track anonymous pages, and 12 * the file methods track pages belonging to an inode. 13 * 14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001 15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 17 * Contributions by Hugh Dickins 2003, 2004 18 */ 19 20 /* 21 * Lock ordering in mm: 22 * 23 * inode->i_rwsem (while writing or truncating, not reading or faulting) 24 * mm->mmap_lock 25 * mapping->invalidate_lock (in filemap_fault) 26 * page->flags PG_locked (lock_page) 27 * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share, see hugetlbfs below) 28 * mapping->i_mmap_rwsem 29 * anon_vma->rwsem 30 * mm->page_table_lock or pte_lock 31 * swap_lock (in swap_duplicate, swap_info_get) 32 * mmlist_lock (in mmput, drain_mmlist and others) 33 * mapping->private_lock (in block_dirty_folio) 34 * folio_lock_memcg move_lock (in block_dirty_folio) 35 * i_pages lock (widely used) 36 * lruvec->lru_lock (in folio_lruvec_lock_irq) 37 * inode->i_lock (in set_page_dirty's __mark_inode_dirty) 38 * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) 39 * sb_lock (within inode_lock in fs/fs-writeback.c) 40 * i_pages lock (widely used, in set_page_dirty, 41 * in arch-dependent flush_dcache_mmap_lock, 42 * within bdi.wb->list_lock in __sync_single_inode) 43 * 44 * anon_vma->rwsem,mapping->i_mmap_rwsem (memory_failure, collect_procs_anon) 45 * ->tasklist_lock 46 * pte map lock 47 * 48 * hugetlbfs PageHuge() take locks in this order: 49 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) 50 * vma_lock (hugetlb specific lock for pmd_sharing) 51 * mapping->i_mmap_rwsem (also used for hugetlb pmd sharing) 52 * page->flags PG_locked (lock_page) 53 */ 54 55 #include <linux/mm.h> 56 #include <linux/sched/mm.h> 57 #include <linux/sched/task.h> 58 #include <linux/pagemap.h> 59 #include <linux/swap.h> 60 #include <linux/swapops.h> 61 #include <linux/slab.h> 62 #include <linux/init.h> 63 #include <linux/ksm.h> 64 #include <linux/rmap.h> 65 #include <linux/rcupdate.h> 66 #include <linux/export.h> 67 #include <linux/memcontrol.h> 68 #include <linux/mmu_notifier.h> 69 #include <linux/migrate.h> 70 #include <linux/hugetlb.h> 71 #include <linux/huge_mm.h> 72 #include <linux/backing-dev.h> 73 #include <linux/page_idle.h> 74 #include <linux/memremap.h> 75 #include <linux/userfaultfd_k.h> 76 #include <linux/mm_inline.h> 77 78 #include <asm/tlbflush.h> 79 80 #define CREATE_TRACE_POINTS 81 #include <trace/events/tlb.h> 82 #include <trace/events/migrate.h> 83 84 #include "internal.h" 85 86 static struct kmem_cache *anon_vma_cachep; 87 static struct kmem_cache *anon_vma_chain_cachep; 88 89 static inline struct anon_vma *anon_vma_alloc(void) 90 { 91 struct anon_vma *anon_vma; 92 93 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 94 if (anon_vma) { 95 atomic_set(&anon_vma->refcount, 1); 96 anon_vma->num_children = 0; 97 anon_vma->num_active_vmas = 0; 98 anon_vma->parent = anon_vma; 99 /* 100 * Initialise the anon_vma root to point to itself. If called 101 * from fork, the root will be reset to the parents anon_vma. 102 */ 103 anon_vma->root = anon_vma; 104 } 105 106 return anon_vma; 107 } 108 109 static inline void anon_vma_free(struct anon_vma *anon_vma) 110 { 111 VM_BUG_ON(atomic_read(&anon_vma->refcount)); 112 113 /* 114 * Synchronize against folio_lock_anon_vma_read() such that 115 * we can safely hold the lock without the anon_vma getting 116 * freed. 117 * 118 * Relies on the full mb implied by the atomic_dec_and_test() from 119 * put_anon_vma() against the acquire barrier implied by 120 * down_read_trylock() from folio_lock_anon_vma_read(). This orders: 121 * 122 * folio_lock_anon_vma_read() VS put_anon_vma() 123 * down_read_trylock() atomic_dec_and_test() 124 * LOCK MB 125 * atomic_read() rwsem_is_locked() 126 * 127 * LOCK should suffice since the actual taking of the lock must 128 * happen _before_ what follows. 129 */ 130 might_sleep(); 131 if (rwsem_is_locked(&anon_vma->root->rwsem)) { 132 anon_vma_lock_write(anon_vma); 133 anon_vma_unlock_write(anon_vma); 134 } 135 136 kmem_cache_free(anon_vma_cachep, anon_vma); 137 } 138 139 static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) 140 { 141 return kmem_cache_alloc(anon_vma_chain_cachep, gfp); 142 } 143 144 static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) 145 { 146 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); 147 } 148 149 static void anon_vma_chain_link(struct vm_area_struct *vma, 150 struct anon_vma_chain *avc, 151 struct anon_vma *anon_vma) 152 { 153 avc->vma = vma; 154 avc->anon_vma = anon_vma; 155 list_add(&avc->same_vma, &vma->anon_vma_chain); 156 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); 157 } 158 159 /** 160 * __anon_vma_prepare - attach an anon_vma to a memory region 161 * @vma: the memory region in question 162 * 163 * This makes sure the memory mapping described by 'vma' has 164 * an 'anon_vma' attached to it, so that we can associate the 165 * anonymous pages mapped into it with that anon_vma. 166 * 167 * The common case will be that we already have one, which 168 * is handled inline by anon_vma_prepare(). But if 169 * not we either need to find an adjacent mapping that we 170 * can re-use the anon_vma from (very common when the only 171 * reason for splitting a vma has been mprotect()), or we 172 * allocate a new one. 173 * 174 * Anon-vma allocations are very subtle, because we may have 175 * optimistically looked up an anon_vma in folio_lock_anon_vma_read() 176 * and that may actually touch the rwsem even in the newly 177 * allocated vma (it depends on RCU to make sure that the 178 * anon_vma isn't actually destroyed). 179 * 180 * As a result, we need to do proper anon_vma locking even 181 * for the new allocation. At the same time, we do not want 182 * to do any locking for the common case of already having 183 * an anon_vma. 184 * 185 * This must be called with the mmap_lock held for reading. 186 */ 187 int __anon_vma_prepare(struct vm_area_struct *vma) 188 { 189 struct mm_struct *mm = vma->vm_mm; 190 struct anon_vma *anon_vma, *allocated; 191 struct anon_vma_chain *avc; 192 193 might_sleep(); 194 195 avc = anon_vma_chain_alloc(GFP_KERNEL); 196 if (!avc) 197 goto out_enomem; 198 199 anon_vma = find_mergeable_anon_vma(vma); 200 allocated = NULL; 201 if (!anon_vma) { 202 anon_vma = anon_vma_alloc(); 203 if (unlikely(!anon_vma)) 204 goto out_enomem_free_avc; 205 anon_vma->num_children++; /* self-parent link for new root */ 206 allocated = anon_vma; 207 } 208 209 anon_vma_lock_write(anon_vma); 210 /* page_table_lock to protect against threads */ 211 spin_lock(&mm->page_table_lock); 212 if (likely(!vma->anon_vma)) { 213 vma->anon_vma = anon_vma; 214 anon_vma_chain_link(vma, avc, anon_vma); 215 anon_vma->num_active_vmas++; 216 allocated = NULL; 217 avc = NULL; 218 } 219 spin_unlock(&mm->page_table_lock); 220 anon_vma_unlock_write(anon_vma); 221 222 if (unlikely(allocated)) 223 put_anon_vma(allocated); 224 if (unlikely(avc)) 225 anon_vma_chain_free(avc); 226 227 return 0; 228 229 out_enomem_free_avc: 230 anon_vma_chain_free(avc); 231 out_enomem: 232 return -ENOMEM; 233 } 234 235 /* 236 * This is a useful helper function for locking the anon_vma root as 237 * we traverse the vma->anon_vma_chain, looping over anon_vma's that 238 * have the same vma. 239 * 240 * Such anon_vma's should have the same root, so you'd expect to see 241 * just a single mutex_lock for the whole traversal. 242 */ 243 static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) 244 { 245 struct anon_vma *new_root = anon_vma->root; 246 if (new_root != root) { 247 if (WARN_ON_ONCE(root)) 248 up_write(&root->rwsem); 249 root = new_root; 250 down_write(&root->rwsem); 251 } 252 return root; 253 } 254 255 static inline void unlock_anon_vma_root(struct anon_vma *root) 256 { 257 if (root) 258 up_write(&root->rwsem); 259 } 260 261 /* 262 * Attach the anon_vmas from src to dst. 263 * Returns 0 on success, -ENOMEM on failure. 264 * 265 * anon_vma_clone() is called by vma_expand(), vma_merge(), __split_vma(), 266 * copy_vma() and anon_vma_fork(). The first four want an exact copy of src, 267 * while the last one, anon_vma_fork(), may try to reuse an existing anon_vma to 268 * prevent endless growth of anon_vma. Since dst->anon_vma is set to NULL before 269 * call, we can identify this case by checking (!dst->anon_vma && 270 * src->anon_vma). 271 * 272 * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find 273 * and reuse existing anon_vma which has no vmas and only one child anon_vma. 274 * This prevents degradation of anon_vma hierarchy to endless linear chain in 275 * case of constantly forking task. On the other hand, an anon_vma with more 276 * than one child isn't reused even if there was no alive vma, thus rmap 277 * walker has a good chance of avoiding scanning the whole hierarchy when it 278 * searches where page is mapped. 279 */ 280 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) 281 { 282 struct anon_vma_chain *avc, *pavc; 283 struct anon_vma *root = NULL; 284 285 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { 286 struct anon_vma *anon_vma; 287 288 avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); 289 if (unlikely(!avc)) { 290 unlock_anon_vma_root(root); 291 root = NULL; 292 avc = anon_vma_chain_alloc(GFP_KERNEL); 293 if (!avc) 294 goto enomem_failure; 295 } 296 anon_vma = pavc->anon_vma; 297 root = lock_anon_vma_root(root, anon_vma); 298 anon_vma_chain_link(dst, avc, anon_vma); 299 300 /* 301 * Reuse existing anon_vma if it has no vma and only one 302 * anon_vma child. 303 * 304 * Root anon_vma is never reused: 305 * it has self-parent reference and at least one child. 306 */ 307 if (!dst->anon_vma && src->anon_vma && 308 anon_vma->num_children < 2 && 309 anon_vma->num_active_vmas == 0) 310 dst->anon_vma = anon_vma; 311 } 312 if (dst->anon_vma) 313 dst->anon_vma->num_active_vmas++; 314 unlock_anon_vma_root(root); 315 return 0; 316 317 enomem_failure: 318 /* 319 * dst->anon_vma is dropped here otherwise its num_active_vmas can 320 * be incorrectly decremented in unlink_anon_vmas(). 321 * We can safely do this because callers of anon_vma_clone() don't care 322 * about dst->anon_vma if anon_vma_clone() failed. 323 */ 324 dst->anon_vma = NULL; 325 unlink_anon_vmas(dst); 326 return -ENOMEM; 327 } 328 329 /* 330 * Attach vma to its own anon_vma, as well as to the anon_vmas that 331 * the corresponding VMA in the parent process is attached to. 332 * Returns 0 on success, non-zero on failure. 333 */ 334 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) 335 { 336 struct anon_vma_chain *avc; 337 struct anon_vma *anon_vma; 338 int error; 339 340 /* Don't bother if the parent process has no anon_vma here. */ 341 if (!pvma->anon_vma) 342 return 0; 343 344 /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ 345 vma->anon_vma = NULL; 346 347 /* 348 * First, attach the new VMA to the parent VMA's anon_vmas, 349 * so rmap can find non-COWed pages in child processes. 350 */ 351 error = anon_vma_clone(vma, pvma); 352 if (error) 353 return error; 354 355 /* An existing anon_vma has been reused, all done then. */ 356 if (vma->anon_vma) 357 return 0; 358 359 /* Then add our own anon_vma. */ 360 anon_vma = anon_vma_alloc(); 361 if (!anon_vma) 362 goto out_error; 363 anon_vma->num_active_vmas++; 364 avc = anon_vma_chain_alloc(GFP_KERNEL); 365 if (!avc) 366 goto out_error_free_anon_vma; 367 368 /* 369 * The root anon_vma's rwsem is the lock actually used when we 370 * lock any of the anon_vmas in this anon_vma tree. 371 */ 372 anon_vma->root = pvma->anon_vma->root; 373 anon_vma->parent = pvma->anon_vma; 374 /* 375 * With refcounts, an anon_vma can stay around longer than the 376 * process it belongs to. The root anon_vma needs to be pinned until 377 * this anon_vma is freed, because the lock lives in the root. 378 */ 379 get_anon_vma(anon_vma->root); 380 /* Mark this anon_vma as the one where our new (COWed) pages go. */ 381 vma->anon_vma = anon_vma; 382 anon_vma_lock_write(anon_vma); 383 anon_vma_chain_link(vma, avc, anon_vma); 384 anon_vma->parent->num_children++; 385 anon_vma_unlock_write(anon_vma); 386 387 return 0; 388 389 out_error_free_anon_vma: 390 put_anon_vma(anon_vma); 391 out_error: 392 unlink_anon_vmas(vma); 393 return -ENOMEM; 394 } 395 396 void unlink_anon_vmas(struct vm_area_struct *vma) 397 { 398 struct anon_vma_chain *avc, *next; 399 struct anon_vma *root = NULL; 400 401 /* 402 * Unlink each anon_vma chained to the VMA. This list is ordered 403 * from newest to oldest, ensuring the root anon_vma gets freed last. 404 */ 405 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 406 struct anon_vma *anon_vma = avc->anon_vma; 407 408 root = lock_anon_vma_root(root, anon_vma); 409 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); 410 411 /* 412 * Leave empty anon_vmas on the list - we'll need 413 * to free them outside the lock. 414 */ 415 if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) { 416 anon_vma->parent->num_children--; 417 continue; 418 } 419 420 list_del(&avc->same_vma); 421 anon_vma_chain_free(avc); 422 } 423 if (vma->anon_vma) { 424 vma->anon_vma->num_active_vmas--; 425 426 /* 427 * vma would still be needed after unlink, and anon_vma will be prepared 428 * when handle fault. 429 */ 430 vma->anon_vma = NULL; 431 } 432 unlock_anon_vma_root(root); 433 434 /* 435 * Iterate the list once more, it now only contains empty and unlinked 436 * anon_vmas, destroy them. Could not do before due to __put_anon_vma() 437 * needing to write-acquire the anon_vma->root->rwsem. 438 */ 439 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 440 struct anon_vma *anon_vma = avc->anon_vma; 441 442 VM_WARN_ON(anon_vma->num_children); 443 VM_WARN_ON(anon_vma->num_active_vmas); 444 put_anon_vma(anon_vma); 445 446 list_del(&avc->same_vma); 447 anon_vma_chain_free(avc); 448 } 449 } 450 451 static void anon_vma_ctor(void *data) 452 { 453 struct anon_vma *anon_vma = data; 454 455 init_rwsem(&anon_vma->rwsem); 456 atomic_set(&anon_vma->refcount, 0); 457 anon_vma->rb_root = RB_ROOT_CACHED; 458 } 459 460 void __init anon_vma_init(void) 461 { 462 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 463 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, 464 anon_vma_ctor); 465 anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, 466 SLAB_PANIC|SLAB_ACCOUNT); 467 } 468 469 /* 470 * Getting a lock on a stable anon_vma from a page off the LRU is tricky! 471 * 472 * Since there is no serialization what so ever against page_remove_rmap() 473 * the best this function can do is return a refcount increased anon_vma 474 * that might have been relevant to this page. 475 * 476 * The page might have been remapped to a different anon_vma or the anon_vma 477 * returned may already be freed (and even reused). 478 * 479 * In case it was remapped to a different anon_vma, the new anon_vma will be a 480 * child of the old anon_vma, and the anon_vma lifetime rules will therefore 481 * ensure that any anon_vma obtained from the page will still be valid for as 482 * long as we observe page_mapped() [ hence all those page_mapped() tests ]. 483 * 484 * All users of this function must be very careful when walking the anon_vma 485 * chain and verify that the page in question is indeed mapped in it 486 * [ something equivalent to page_mapped_in_vma() ]. 487 * 488 * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from 489 * page_remove_rmap() that the anon_vma pointer from page->mapping is valid 490 * if there is a mapcount, we can dereference the anon_vma after observing 491 * those. 492 */ 493 struct anon_vma *folio_get_anon_vma(struct folio *folio) 494 { 495 struct anon_vma *anon_vma = NULL; 496 unsigned long anon_mapping; 497 498 rcu_read_lock(); 499 anon_mapping = (unsigned long)READ_ONCE(folio->mapping); 500 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 501 goto out; 502 if (!folio_mapped(folio)) 503 goto out; 504 505 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 506 if (!atomic_inc_not_zero(&anon_vma->refcount)) { 507 anon_vma = NULL; 508 goto out; 509 } 510 511 /* 512 * If this folio is still mapped, then its anon_vma cannot have been 513 * freed. But if it has been unmapped, we have no security against the 514 * anon_vma structure being freed and reused (for another anon_vma: 515 * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero() 516 * above cannot corrupt). 517 */ 518 if (!folio_mapped(folio)) { 519 rcu_read_unlock(); 520 put_anon_vma(anon_vma); 521 return NULL; 522 } 523 out: 524 rcu_read_unlock(); 525 526 return anon_vma; 527 } 528 529 /* 530 * Similar to folio_get_anon_vma() except it locks the anon_vma. 531 * 532 * Its a little more complex as it tries to keep the fast path to a single 533 * atomic op -- the trylock. If we fail the trylock, we fall back to getting a 534 * reference like with folio_get_anon_vma() and then block on the mutex 535 * on !rwc->try_lock case. 536 */ 537 struct anon_vma *folio_lock_anon_vma_read(struct folio *folio, 538 struct rmap_walk_control *rwc) 539 { 540 struct anon_vma *anon_vma = NULL; 541 struct anon_vma *root_anon_vma; 542 unsigned long anon_mapping; 543 544 rcu_read_lock(); 545 anon_mapping = (unsigned long)READ_ONCE(folio->mapping); 546 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 547 goto out; 548 if (!folio_mapped(folio)) 549 goto out; 550 551 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 552 root_anon_vma = READ_ONCE(anon_vma->root); 553 if (down_read_trylock(&root_anon_vma->rwsem)) { 554 /* 555 * If the folio is still mapped, then this anon_vma is still 556 * its anon_vma, and holding the mutex ensures that it will 557 * not go away, see anon_vma_free(). 558 */ 559 if (!folio_mapped(folio)) { 560 up_read(&root_anon_vma->rwsem); 561 anon_vma = NULL; 562 } 563 goto out; 564 } 565 566 if (rwc && rwc->try_lock) { 567 anon_vma = NULL; 568 rwc->contended = true; 569 goto out; 570 } 571 572 /* trylock failed, we got to sleep */ 573 if (!atomic_inc_not_zero(&anon_vma->refcount)) { 574 anon_vma = NULL; 575 goto out; 576 } 577 578 if (!folio_mapped(folio)) { 579 rcu_read_unlock(); 580 put_anon_vma(anon_vma); 581 return NULL; 582 } 583 584 /* we pinned the anon_vma, its safe to sleep */ 585 rcu_read_unlock(); 586 anon_vma_lock_read(anon_vma); 587 588 if (atomic_dec_and_test(&anon_vma->refcount)) { 589 /* 590 * Oops, we held the last refcount, release the lock 591 * and bail -- can't simply use put_anon_vma() because 592 * we'll deadlock on the anon_vma_lock_write() recursion. 593 */ 594 anon_vma_unlock_read(anon_vma); 595 __put_anon_vma(anon_vma); 596 anon_vma = NULL; 597 } 598 599 return anon_vma; 600 601 out: 602 rcu_read_unlock(); 603 return anon_vma; 604 } 605 606 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 607 /* 608 * Flush TLB entries for recently unmapped pages from remote CPUs. It is 609 * important if a PTE was dirty when it was unmapped that it's flushed 610 * before any IO is initiated on the page to prevent lost writes. Similarly, 611 * it must be flushed before freeing to prevent data leakage. 612 */ 613 void try_to_unmap_flush(void) 614 { 615 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 616 617 if (!tlb_ubc->flush_required) 618 return; 619 620 arch_tlbbatch_flush(&tlb_ubc->arch); 621 tlb_ubc->flush_required = false; 622 tlb_ubc->writable = false; 623 } 624 625 /* Flush iff there are potentially writable TLB entries that can race with IO */ 626 void try_to_unmap_flush_dirty(void) 627 { 628 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 629 630 if (tlb_ubc->writable) 631 try_to_unmap_flush(); 632 } 633 634 /* 635 * Bits 0-14 of mm->tlb_flush_batched record pending generations. 636 * Bits 16-30 of mm->tlb_flush_batched bit record flushed generations. 637 */ 638 #define TLB_FLUSH_BATCH_FLUSHED_SHIFT 16 639 #define TLB_FLUSH_BATCH_PENDING_MASK \ 640 ((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT - 1)) - 1) 641 #define TLB_FLUSH_BATCH_PENDING_LARGE \ 642 (TLB_FLUSH_BATCH_PENDING_MASK / 2) 643 644 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) 645 { 646 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 647 int batch; 648 649 arch_tlbbatch_add_mm(&tlb_ubc->arch, mm); 650 tlb_ubc->flush_required = true; 651 652 /* 653 * Ensure compiler does not re-order the setting of tlb_flush_batched 654 * before the PTE is cleared. 655 */ 656 barrier(); 657 batch = atomic_read(&mm->tlb_flush_batched); 658 retry: 659 if ((batch & TLB_FLUSH_BATCH_PENDING_MASK) > TLB_FLUSH_BATCH_PENDING_LARGE) { 660 /* 661 * Prevent `pending' from catching up with `flushed' because of 662 * overflow. Reset `pending' and `flushed' to be 1 and 0 if 663 * `pending' becomes large. 664 */ 665 if (!atomic_try_cmpxchg(&mm->tlb_flush_batched, &batch, 1)) 666 goto retry; 667 } else { 668 atomic_inc(&mm->tlb_flush_batched); 669 } 670 671 /* 672 * If the PTE was dirty then it's best to assume it's writable. The 673 * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() 674 * before the page is queued for IO. 675 */ 676 if (writable) 677 tlb_ubc->writable = true; 678 } 679 680 /* 681 * Returns true if the TLB flush should be deferred to the end of a batch of 682 * unmap operations to reduce IPIs. 683 */ 684 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 685 { 686 bool should_defer = false; 687 688 if (!(flags & TTU_BATCH_FLUSH)) 689 return false; 690 691 /* If remote CPUs need to be flushed then defer batch the flush */ 692 if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids) 693 should_defer = true; 694 put_cpu(); 695 696 return should_defer; 697 } 698 699 /* 700 * Reclaim unmaps pages under the PTL but do not flush the TLB prior to 701 * releasing the PTL if TLB flushes are batched. It's possible for a parallel 702 * operation such as mprotect or munmap to race between reclaim unmapping 703 * the page and flushing the page. If this race occurs, it potentially allows 704 * access to data via a stale TLB entry. Tracking all mm's that have TLB 705 * batching in flight would be expensive during reclaim so instead track 706 * whether TLB batching occurred in the past and if so then do a flush here 707 * if required. This will cost one additional flush per reclaim cycle paid 708 * by the first operation at risk such as mprotect and mumap. 709 * 710 * This must be called under the PTL so that an access to tlb_flush_batched 711 * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise 712 * via the PTL. 713 */ 714 void flush_tlb_batched_pending(struct mm_struct *mm) 715 { 716 int batch = atomic_read(&mm->tlb_flush_batched); 717 int pending = batch & TLB_FLUSH_BATCH_PENDING_MASK; 718 int flushed = batch >> TLB_FLUSH_BATCH_FLUSHED_SHIFT; 719 720 if (pending != flushed) { 721 flush_tlb_mm(mm); 722 /* 723 * If the new TLB flushing is pending during flushing, leave 724 * mm->tlb_flush_batched as is, to avoid losing flushing. 725 */ 726 atomic_cmpxchg(&mm->tlb_flush_batched, batch, 727 pending | (pending << TLB_FLUSH_BATCH_FLUSHED_SHIFT)); 728 } 729 } 730 #else 731 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) 732 { 733 } 734 735 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 736 { 737 return false; 738 } 739 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ 740 741 /* 742 * At what user virtual address is page expected in vma? 743 * Caller should check the page is actually part of the vma. 744 */ 745 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 746 { 747 struct folio *folio = page_folio(page); 748 if (folio_test_anon(folio)) { 749 struct anon_vma *page__anon_vma = folio_anon_vma(folio); 750 /* 751 * Note: swapoff's unuse_vma() is more efficient with this 752 * check, and needs it to match anon_vma when KSM is active. 753 */ 754 if (!vma->anon_vma || !page__anon_vma || 755 vma->anon_vma->root != page__anon_vma->root) 756 return -EFAULT; 757 } else if (!vma->vm_file) { 758 return -EFAULT; 759 } else if (vma->vm_file->f_mapping != folio->mapping) { 760 return -EFAULT; 761 } 762 763 return vma_address(page, vma); 764 } 765 766 /* 767 * Returns the actual pmd_t* where we expect 'address' to be mapped from, or 768 * NULL if it doesn't exist. No guarantees / checks on what the pmd_t* 769 * represents. 770 */ 771 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) 772 { 773 pgd_t *pgd; 774 p4d_t *p4d; 775 pud_t *pud; 776 pmd_t *pmd = NULL; 777 778 pgd = pgd_offset(mm, address); 779 if (!pgd_present(*pgd)) 780 goto out; 781 782 p4d = p4d_offset(pgd, address); 783 if (!p4d_present(*p4d)) 784 goto out; 785 786 pud = pud_offset(p4d, address); 787 if (!pud_present(*pud)) 788 goto out; 789 790 pmd = pmd_offset(pud, address); 791 out: 792 return pmd; 793 } 794 795 struct folio_referenced_arg { 796 int mapcount; 797 int referenced; 798 unsigned long vm_flags; 799 struct mem_cgroup *memcg; 800 }; 801 /* 802 * arg: folio_referenced_arg will be passed 803 */ 804 static bool folio_referenced_one(struct folio *folio, 805 struct vm_area_struct *vma, unsigned long address, void *arg) 806 { 807 struct folio_referenced_arg *pra = arg; 808 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 809 int referenced = 0; 810 811 while (page_vma_mapped_walk(&pvmw)) { 812 address = pvmw.address; 813 814 if ((vma->vm_flags & VM_LOCKED) && 815 (!folio_test_large(folio) || !pvmw.pte)) { 816 /* Restore the mlock which got missed */ 817 mlock_vma_folio(folio, vma, !pvmw.pte); 818 page_vma_mapped_walk_done(&pvmw); 819 pra->vm_flags |= VM_LOCKED; 820 return false; /* To break the loop */ 821 } 822 823 if (pvmw.pte) { 824 if (lru_gen_enabled() && pte_young(*pvmw.pte)) { 825 lru_gen_look_around(&pvmw); 826 referenced++; 827 } 828 829 if (ptep_clear_flush_young_notify(vma, address, 830 pvmw.pte)) 831 referenced++; 832 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { 833 if (pmdp_clear_flush_young_notify(vma, address, 834 pvmw.pmd)) 835 referenced++; 836 } else { 837 /* unexpected pmd-mapped folio? */ 838 WARN_ON_ONCE(1); 839 } 840 841 pra->mapcount--; 842 } 843 844 if (referenced) 845 folio_clear_idle(folio); 846 if (folio_test_clear_young(folio)) 847 referenced++; 848 849 if (referenced) { 850 pra->referenced++; 851 pra->vm_flags |= vma->vm_flags & ~VM_LOCKED; 852 } 853 854 if (!pra->mapcount) 855 return false; /* To break the loop */ 856 857 return true; 858 } 859 860 static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg) 861 { 862 struct folio_referenced_arg *pra = arg; 863 struct mem_cgroup *memcg = pra->memcg; 864 865 /* 866 * Ignore references from this mapping if it has no recency. If the 867 * folio has been used in another mapping, we will catch it; if this 868 * other mapping is already gone, the unmap path will have set the 869 * referenced flag or activated the folio in zap_pte_range(). 870 */ 871 if (!vma_has_recency(vma)) 872 return true; 873 874 /* 875 * If we are reclaiming on behalf of a cgroup, skip counting on behalf 876 * of references from different cgroups. 877 */ 878 if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) 879 return true; 880 881 return false; 882 } 883 884 /** 885 * folio_referenced() - Test if the folio was referenced. 886 * @folio: The folio to test. 887 * @is_locked: Caller holds lock on the folio. 888 * @memcg: target memory cgroup 889 * @vm_flags: A combination of all the vma->vm_flags which referenced the folio. 890 * 891 * Quick test_and_clear_referenced for all mappings of a folio, 892 * 893 * Return: The number of mappings which referenced the folio. Return -1 if 894 * the function bailed out due to rmap lock contention. 895 */ 896 int folio_referenced(struct folio *folio, int is_locked, 897 struct mem_cgroup *memcg, unsigned long *vm_flags) 898 { 899 int we_locked = 0; 900 struct folio_referenced_arg pra = { 901 .mapcount = folio_mapcount(folio), 902 .memcg = memcg, 903 }; 904 struct rmap_walk_control rwc = { 905 .rmap_one = folio_referenced_one, 906 .arg = (void *)&pra, 907 .anon_lock = folio_lock_anon_vma_read, 908 .try_lock = true, 909 .invalid_vma = invalid_folio_referenced_vma, 910 }; 911 912 *vm_flags = 0; 913 if (!pra.mapcount) 914 return 0; 915 916 if (!folio_raw_mapping(folio)) 917 return 0; 918 919 if (!is_locked && (!folio_test_anon(folio) || folio_test_ksm(folio))) { 920 we_locked = folio_trylock(folio); 921 if (!we_locked) 922 return 1; 923 } 924 925 rmap_walk(folio, &rwc); 926 *vm_flags = pra.vm_flags; 927 928 if (we_locked) 929 folio_unlock(folio); 930 931 return rwc.contended ? -1 : pra.referenced; 932 } 933 934 static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw) 935 { 936 int cleaned = 0; 937 struct vm_area_struct *vma = pvmw->vma; 938 struct mmu_notifier_range range; 939 unsigned long address = pvmw->address; 940 941 /* 942 * We have to assume the worse case ie pmd for invalidation. Note that 943 * the folio can not be freed from this function. 944 */ 945 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 0, 946 vma->vm_mm, address, vma_address_end(pvmw)); 947 mmu_notifier_invalidate_range_start(&range); 948 949 while (page_vma_mapped_walk(pvmw)) { 950 int ret = 0; 951 952 address = pvmw->address; 953 if (pvmw->pte) { 954 pte_t entry; 955 pte_t *pte = pvmw->pte; 956 957 if (!pte_dirty(*pte) && !pte_write(*pte)) 958 continue; 959 960 flush_cache_page(vma, address, pte_pfn(*pte)); 961 entry = ptep_clear_flush(vma, address, pte); 962 entry = pte_wrprotect(entry); 963 entry = pte_mkclean(entry); 964 set_pte_at(vma->vm_mm, address, pte, entry); 965 ret = 1; 966 } else { 967 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 968 pmd_t *pmd = pvmw->pmd; 969 pmd_t entry; 970 971 if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) 972 continue; 973 974 flush_cache_range(vma, address, 975 address + HPAGE_PMD_SIZE); 976 entry = pmdp_invalidate(vma, address, pmd); 977 entry = pmd_wrprotect(entry); 978 entry = pmd_mkclean(entry); 979 set_pmd_at(vma->vm_mm, address, pmd, entry); 980 ret = 1; 981 #else 982 /* unexpected pmd-mapped folio? */ 983 WARN_ON_ONCE(1); 984 #endif 985 } 986 987 /* 988 * No need to call mmu_notifier_invalidate_range() as we are 989 * downgrading page table protection not changing it to point 990 * to a new page. 991 * 992 * See Documentation/mm/mmu_notifier.rst 993 */ 994 if (ret) 995 cleaned++; 996 } 997 998 mmu_notifier_invalidate_range_end(&range); 999 1000 return cleaned; 1001 } 1002 1003 static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma, 1004 unsigned long address, void *arg) 1005 { 1006 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC); 1007 int *cleaned = arg; 1008 1009 *cleaned += page_vma_mkclean_one(&pvmw); 1010 1011 return true; 1012 } 1013 1014 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) 1015 { 1016 if (vma->vm_flags & VM_SHARED) 1017 return false; 1018 1019 return true; 1020 } 1021 1022 int folio_mkclean(struct folio *folio) 1023 { 1024 int cleaned = 0; 1025 struct address_space *mapping; 1026 struct rmap_walk_control rwc = { 1027 .arg = (void *)&cleaned, 1028 .rmap_one = page_mkclean_one, 1029 .invalid_vma = invalid_mkclean_vma, 1030 }; 1031 1032 BUG_ON(!folio_test_locked(folio)); 1033 1034 if (!folio_mapped(folio)) 1035 return 0; 1036 1037 mapping = folio_mapping(folio); 1038 if (!mapping) 1039 return 0; 1040 1041 rmap_walk(folio, &rwc); 1042 1043 return cleaned; 1044 } 1045 EXPORT_SYMBOL_GPL(folio_mkclean); 1046 1047 /** 1048 * pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of 1049 * [@pfn, @pfn + @nr_pages) at the specific offset (@pgoff) 1050 * within the @vma of shared mappings. And since clean PTEs 1051 * should also be readonly, write protects them too. 1052 * @pfn: start pfn. 1053 * @nr_pages: number of physically contiguous pages srarting with @pfn. 1054 * @pgoff: page offset that the @pfn mapped with. 1055 * @vma: vma that @pfn mapped within. 1056 * 1057 * Returns the number of cleaned PTEs (including PMDs). 1058 */ 1059 int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff, 1060 struct vm_area_struct *vma) 1061 { 1062 struct page_vma_mapped_walk pvmw = { 1063 .pfn = pfn, 1064 .nr_pages = nr_pages, 1065 .pgoff = pgoff, 1066 .vma = vma, 1067 .flags = PVMW_SYNC, 1068 }; 1069 1070 if (invalid_mkclean_vma(vma, NULL)) 1071 return 0; 1072 1073 pvmw.address = vma_pgoff_address(pgoff, nr_pages, vma); 1074 VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma); 1075 1076 return page_vma_mkclean_one(&pvmw); 1077 } 1078 1079 int folio_total_mapcount(struct folio *folio) 1080 { 1081 int mapcount = folio_entire_mapcount(folio); 1082 int nr_pages; 1083 int i; 1084 1085 /* In the common case, avoid the loop when no pages mapped by PTE */ 1086 if (folio_nr_pages_mapped(folio) == 0) 1087 return mapcount; 1088 /* 1089 * Add all the PTE mappings of those pages mapped by PTE. 1090 * Limit the loop to folio_nr_pages_mapped()? 1091 * Perhaps: given all the raciness, that may be a good or a bad idea. 1092 */ 1093 nr_pages = folio_nr_pages(folio); 1094 for (i = 0; i < nr_pages; i++) 1095 mapcount += atomic_read(&folio_page(folio, i)->_mapcount); 1096 1097 /* But each of those _mapcounts was based on -1 */ 1098 mapcount += nr_pages; 1099 return mapcount; 1100 } 1101 1102 /** 1103 * page_move_anon_rmap - move a page to our anon_vma 1104 * @page: the page to move to our anon_vma 1105 * @vma: the vma the page belongs to 1106 * 1107 * When a page belongs exclusively to one process after a COW event, 1108 * that page can be moved into the anon_vma that belongs to just that 1109 * process, so the rmap code will not search the parent or sibling 1110 * processes. 1111 */ 1112 void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) 1113 { 1114 void *anon_vma = vma->anon_vma; 1115 struct folio *folio = page_folio(page); 1116 1117 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 1118 VM_BUG_ON_VMA(!anon_vma, vma); 1119 1120 anon_vma += PAGE_MAPPING_ANON; 1121 /* 1122 * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written 1123 * simultaneously, so a concurrent reader (eg folio_referenced()'s 1124 * folio_test_anon()) will not see one without the other. 1125 */ 1126 WRITE_ONCE(folio->mapping, anon_vma); 1127 SetPageAnonExclusive(page); 1128 } 1129 1130 /** 1131 * __page_set_anon_rmap - set up new anonymous rmap 1132 * @folio: Folio which contains page. 1133 * @page: Page to add to rmap. 1134 * @vma: VM area to add page to. 1135 * @address: User virtual address of the mapping 1136 * @exclusive: the page is exclusively owned by the current process 1137 */ 1138 static void __page_set_anon_rmap(struct folio *folio, struct page *page, 1139 struct vm_area_struct *vma, unsigned long address, int exclusive) 1140 { 1141 struct anon_vma *anon_vma = vma->anon_vma; 1142 1143 BUG_ON(!anon_vma); 1144 1145 if (folio_test_anon(folio)) 1146 goto out; 1147 1148 /* 1149 * If the page isn't exclusively mapped into this vma, 1150 * we must use the _oldest_ possible anon_vma for the 1151 * page mapping! 1152 */ 1153 if (!exclusive) 1154 anon_vma = anon_vma->root; 1155 1156 /* 1157 * page_idle does a lockless/optimistic rmap scan on folio->mapping. 1158 * Make sure the compiler doesn't split the stores of anon_vma and 1159 * the PAGE_MAPPING_ANON type identifier, otherwise the rmap code 1160 * could mistake the mapping for a struct address_space and crash. 1161 */ 1162 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 1163 WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma); 1164 folio->index = linear_page_index(vma, address); 1165 out: 1166 if (exclusive) 1167 SetPageAnonExclusive(page); 1168 } 1169 1170 /** 1171 * __page_check_anon_rmap - sanity check anonymous rmap addition 1172 * @page: the page to add the mapping to 1173 * @vma: the vm area in which the mapping is added 1174 * @address: the user virtual address mapped 1175 */ 1176 static void __page_check_anon_rmap(struct page *page, 1177 struct vm_area_struct *vma, unsigned long address) 1178 { 1179 struct folio *folio = page_folio(page); 1180 /* 1181 * The page's anon-rmap details (mapping and index) are guaranteed to 1182 * be set up correctly at this point. 1183 * 1184 * We have exclusion against page_add_anon_rmap because the caller 1185 * always holds the page locked. 1186 * 1187 * We have exclusion against page_add_new_anon_rmap because those pages 1188 * are initially only visible via the pagetables, and the pte is locked 1189 * over the call to page_add_new_anon_rmap. 1190 */ 1191 VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root, 1192 folio); 1193 VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address), 1194 page); 1195 } 1196 1197 /** 1198 * page_add_anon_rmap - add pte mapping to an anonymous page 1199 * @page: the page to add the mapping to 1200 * @vma: the vm area in which the mapping is added 1201 * @address: the user virtual address mapped 1202 * @flags: the rmap flags 1203 * 1204 * The caller needs to hold the pte lock, and the page must be locked in 1205 * the anon_vma case: to serialize mapping,index checking after setting, 1206 * and to ensure that PageAnon is not being upgraded racily to PageKsm 1207 * (but PageKsm is never downgraded to PageAnon). 1208 */ 1209 void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, 1210 unsigned long address, rmap_t flags) 1211 { 1212 struct folio *folio = page_folio(page); 1213 atomic_t *mapped = &folio->_nr_pages_mapped; 1214 int nr = 0, nr_pmdmapped = 0; 1215 bool compound = flags & RMAP_COMPOUND; 1216 bool first = true; 1217 1218 /* Is page being mapped by PTE? Is this its first map to be added? */ 1219 if (likely(!compound)) { 1220 first = atomic_inc_and_test(&page->_mapcount); 1221 nr = first; 1222 if (first && folio_test_large(folio)) { 1223 nr = atomic_inc_return_relaxed(mapped); 1224 nr = (nr < COMPOUND_MAPPED); 1225 } 1226 } else if (folio_test_pmd_mappable(folio)) { 1227 /* That test is redundant: it's for safety or to optimize out */ 1228 1229 first = atomic_inc_and_test(&folio->_entire_mapcount); 1230 if (first) { 1231 nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped); 1232 if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) { 1233 nr_pmdmapped = folio_nr_pages(folio); 1234 nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); 1235 /* Raced ahead of a remove and another add? */ 1236 if (unlikely(nr < 0)) 1237 nr = 0; 1238 } else { 1239 /* Raced ahead of a remove of COMPOUND_MAPPED */ 1240 nr = 0; 1241 } 1242 } 1243 } 1244 1245 VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page); 1246 VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page); 1247 1248 if (nr_pmdmapped) 1249 __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr_pmdmapped); 1250 if (nr) 1251 __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr); 1252 1253 if (likely(!folio_test_ksm(folio))) { 1254 /* address might be in next vma when migration races vma_merge */ 1255 if (first) 1256 __page_set_anon_rmap(folio, page, vma, address, 1257 !!(flags & RMAP_EXCLUSIVE)); 1258 else 1259 __page_check_anon_rmap(page, vma, address); 1260 } 1261 1262 mlock_vma_folio(folio, vma, compound); 1263 } 1264 1265 /** 1266 * folio_add_new_anon_rmap - Add mapping to a new anonymous folio. 1267 * @folio: The folio to add the mapping to. 1268 * @vma: the vm area in which the mapping is added 1269 * @address: the user virtual address mapped 1270 * 1271 * Like page_add_anon_rmap() but must only be called on *new* folios. 1272 * This means the inc-and-test can be bypassed. 1273 * The folio does not have to be locked. 1274 * 1275 * If the folio is large, it is accounted as a THP. As the folio 1276 * is new, it's assumed to be mapped exclusively by a single process. 1277 */ 1278 void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, 1279 unsigned long address) 1280 { 1281 int nr; 1282 1283 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 1284 __folio_set_swapbacked(folio); 1285 1286 if (likely(!folio_test_pmd_mappable(folio))) { 1287 /* increment count (starts at -1) */ 1288 atomic_set(&folio->_mapcount, 0); 1289 nr = 1; 1290 } else { 1291 /* increment count (starts at -1) */ 1292 atomic_set(&folio->_entire_mapcount, 0); 1293 atomic_set(&folio->_nr_pages_mapped, COMPOUND_MAPPED); 1294 nr = folio_nr_pages(folio); 1295 __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr); 1296 } 1297 1298 __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr); 1299 __page_set_anon_rmap(folio, &folio->page, vma, address, 1); 1300 } 1301 1302 /** 1303 * page_add_file_rmap - add pte mapping to a file page 1304 * @page: the page to add the mapping to 1305 * @vma: the vm area in which the mapping is added 1306 * @compound: charge the page as compound or small page 1307 * 1308 * The caller needs to hold the pte lock. 1309 */ 1310 void page_add_file_rmap(struct page *page, struct vm_area_struct *vma, 1311 bool compound) 1312 { 1313 struct folio *folio = page_folio(page); 1314 atomic_t *mapped = &folio->_nr_pages_mapped; 1315 int nr = 0, nr_pmdmapped = 0; 1316 bool first; 1317 1318 VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); 1319 1320 /* Is page being mapped by PTE? Is this its first map to be added? */ 1321 if (likely(!compound)) { 1322 first = atomic_inc_and_test(&page->_mapcount); 1323 nr = first; 1324 if (first && folio_test_large(folio)) { 1325 nr = atomic_inc_return_relaxed(mapped); 1326 nr = (nr < COMPOUND_MAPPED); 1327 } 1328 } else if (folio_test_pmd_mappable(folio)) { 1329 /* That test is redundant: it's for safety or to optimize out */ 1330 1331 first = atomic_inc_and_test(&folio->_entire_mapcount); 1332 if (first) { 1333 nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped); 1334 if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) { 1335 nr_pmdmapped = folio_nr_pages(folio); 1336 nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); 1337 /* Raced ahead of a remove and another add? */ 1338 if (unlikely(nr < 0)) 1339 nr = 0; 1340 } else { 1341 /* Raced ahead of a remove of COMPOUND_MAPPED */ 1342 nr = 0; 1343 } 1344 } 1345 } 1346 1347 if (nr_pmdmapped) 1348 __lruvec_stat_mod_folio(folio, folio_test_swapbacked(folio) ? 1349 NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, nr_pmdmapped); 1350 if (nr) 1351 __lruvec_stat_mod_folio(folio, NR_FILE_MAPPED, nr); 1352 1353 mlock_vma_folio(folio, vma, compound); 1354 } 1355 1356 /** 1357 * page_remove_rmap - take down pte mapping from a page 1358 * @page: page to remove mapping from 1359 * @vma: the vm area from which the mapping is removed 1360 * @compound: uncharge the page as compound or small page 1361 * 1362 * The caller needs to hold the pte lock. 1363 */ 1364 void page_remove_rmap(struct page *page, struct vm_area_struct *vma, 1365 bool compound) 1366 { 1367 struct folio *folio = page_folio(page); 1368 atomic_t *mapped = &folio->_nr_pages_mapped; 1369 int nr = 0, nr_pmdmapped = 0; 1370 bool last; 1371 enum node_stat_item idx; 1372 1373 VM_BUG_ON_PAGE(compound && !PageHead(page), page); 1374 1375 /* Hugetlb pages are not counted in NR_*MAPPED */ 1376 if (unlikely(folio_test_hugetlb(folio))) { 1377 /* hugetlb pages are always mapped with pmds */ 1378 atomic_dec(&folio->_entire_mapcount); 1379 return; 1380 } 1381 1382 /* Is page being unmapped by PTE? Is this its last map to be removed? */ 1383 if (likely(!compound)) { 1384 last = atomic_add_negative(-1, &page->_mapcount); 1385 nr = last; 1386 if (last && folio_test_large(folio)) { 1387 nr = atomic_dec_return_relaxed(mapped); 1388 nr = (nr < COMPOUND_MAPPED); 1389 } 1390 } else if (folio_test_pmd_mappable(folio)) { 1391 /* That test is redundant: it's for safety or to optimize out */ 1392 1393 last = atomic_add_negative(-1, &folio->_entire_mapcount); 1394 if (last) { 1395 nr = atomic_sub_return_relaxed(COMPOUND_MAPPED, mapped); 1396 if (likely(nr < COMPOUND_MAPPED)) { 1397 nr_pmdmapped = folio_nr_pages(folio); 1398 nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); 1399 /* Raced ahead of another remove and an add? */ 1400 if (unlikely(nr < 0)) 1401 nr = 0; 1402 } else { 1403 /* An add of COMPOUND_MAPPED raced ahead */ 1404 nr = 0; 1405 } 1406 } 1407 } 1408 1409 if (nr_pmdmapped) { 1410 if (folio_test_anon(folio)) 1411 idx = NR_ANON_THPS; 1412 else if (folio_test_swapbacked(folio)) 1413 idx = NR_SHMEM_PMDMAPPED; 1414 else 1415 idx = NR_FILE_PMDMAPPED; 1416 __lruvec_stat_mod_folio(folio, idx, -nr_pmdmapped); 1417 } 1418 if (nr) { 1419 idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED; 1420 __lruvec_stat_mod_folio(folio, idx, -nr); 1421 1422 /* 1423 * Queue anon THP for deferred split if at least one 1424 * page of the folio is unmapped and at least one page 1425 * is still mapped. 1426 */ 1427 if (folio_test_pmd_mappable(folio) && folio_test_anon(folio)) 1428 if (!compound || nr < nr_pmdmapped) 1429 deferred_split_folio(folio); 1430 } 1431 1432 /* 1433 * It would be tidy to reset folio_test_anon mapping when fully 1434 * unmapped, but that might overwrite a racing page_add_anon_rmap 1435 * which increments mapcount after us but sets mapping before us: 1436 * so leave the reset to free_pages_prepare, and remember that 1437 * it's only reliable while mapped. 1438 */ 1439 1440 munlock_vma_folio(folio, vma, compound); 1441 } 1442 1443 /* 1444 * @arg: enum ttu_flags will be passed to this argument 1445 */ 1446 static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, 1447 unsigned long address, void *arg) 1448 { 1449 struct mm_struct *mm = vma->vm_mm; 1450 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 1451 pte_t pteval; 1452 struct page *subpage; 1453 bool anon_exclusive, ret = true; 1454 struct mmu_notifier_range range; 1455 enum ttu_flags flags = (enum ttu_flags)(long)arg; 1456 1457 /* 1458 * When racing against e.g. zap_pte_range() on another cpu, 1459 * in between its ptep_get_and_clear_full() and page_remove_rmap(), 1460 * try_to_unmap() may return before page_mapped() has become false, 1461 * if page table locking is skipped: use TTU_SYNC to wait for that. 1462 */ 1463 if (flags & TTU_SYNC) 1464 pvmw.flags = PVMW_SYNC; 1465 1466 if (flags & TTU_SPLIT_HUGE_PMD) 1467 split_huge_pmd_address(vma, address, false, folio); 1468 1469 /* 1470 * For THP, we have to assume the worse case ie pmd for invalidation. 1471 * For hugetlb, it could be much worse if we need to do pud 1472 * invalidation in the case of pmd sharing. 1473 * 1474 * Note that the folio can not be freed in this function as call of 1475 * try_to_unmap() must hold a reference on the folio. 1476 */ 1477 range.end = vma_address_end(&pvmw); 1478 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, 1479 address, range.end); 1480 if (folio_test_hugetlb(folio)) { 1481 /* 1482 * If sharing is possible, start and end will be adjusted 1483 * accordingly. 1484 */ 1485 adjust_range_if_pmd_sharing_possible(vma, &range.start, 1486 &range.end); 1487 } 1488 mmu_notifier_invalidate_range_start(&range); 1489 1490 while (page_vma_mapped_walk(&pvmw)) { 1491 /* Unexpected PMD-mapped THP? */ 1492 VM_BUG_ON_FOLIO(!pvmw.pte, folio); 1493 1494 /* 1495 * If the folio is in an mlock()d vma, we must not swap it out. 1496 */ 1497 if (!(flags & TTU_IGNORE_MLOCK) && 1498 (vma->vm_flags & VM_LOCKED)) { 1499 /* Restore the mlock which got missed */ 1500 mlock_vma_folio(folio, vma, false); 1501 page_vma_mapped_walk_done(&pvmw); 1502 ret = false; 1503 break; 1504 } 1505 1506 subpage = folio_page(folio, 1507 pte_pfn(*pvmw.pte) - folio_pfn(folio)); 1508 address = pvmw.address; 1509 anon_exclusive = folio_test_anon(folio) && 1510 PageAnonExclusive(subpage); 1511 1512 if (folio_test_hugetlb(folio)) { 1513 bool anon = folio_test_anon(folio); 1514 1515 /* 1516 * The try_to_unmap() is only passed a hugetlb page 1517 * in the case where the hugetlb page is poisoned. 1518 */ 1519 VM_BUG_ON_PAGE(!PageHWPoison(subpage), subpage); 1520 /* 1521 * huge_pmd_unshare may unmap an entire PMD page. 1522 * There is no way of knowing exactly which PMDs may 1523 * be cached for this mm, so we must flush them all. 1524 * start/end were already adjusted above to cover this 1525 * range. 1526 */ 1527 flush_cache_range(vma, range.start, range.end); 1528 1529 /* 1530 * To call huge_pmd_unshare, i_mmap_rwsem must be 1531 * held in write mode. Caller needs to explicitly 1532 * do this outside rmap routines. 1533 * 1534 * We also must hold hugetlb vma_lock in write mode. 1535 * Lock order dictates acquiring vma_lock BEFORE 1536 * i_mmap_rwsem. We can only try lock here and fail 1537 * if unsuccessful. 1538 */ 1539 if (!anon) { 1540 VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); 1541 if (!hugetlb_vma_trylock_write(vma)) { 1542 page_vma_mapped_walk_done(&pvmw); 1543 ret = false; 1544 break; 1545 } 1546 if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) { 1547 hugetlb_vma_unlock_write(vma); 1548 flush_tlb_range(vma, 1549 range.start, range.end); 1550 mmu_notifier_invalidate_range(mm, 1551 range.start, range.end); 1552 /* 1553 * The ref count of the PMD page was 1554 * dropped which is part of the way map 1555 * counting is done for shared PMDs. 1556 * Return 'true' here. When there is 1557 * no other sharing, huge_pmd_unshare 1558 * returns false and we will unmap the 1559 * actual page and drop map count 1560 * to zero. 1561 */ 1562 page_vma_mapped_walk_done(&pvmw); 1563 break; 1564 } 1565 hugetlb_vma_unlock_write(vma); 1566 } 1567 pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); 1568 } else { 1569 flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); 1570 /* Nuke the page table entry. */ 1571 if (should_defer_flush(mm, flags)) { 1572 /* 1573 * We clear the PTE but do not flush so potentially 1574 * a remote CPU could still be writing to the folio. 1575 * If the entry was previously clean then the 1576 * architecture must guarantee that a clear->dirty 1577 * transition on a cached TLB entry is written through 1578 * and traps if the PTE is unmapped. 1579 */ 1580 pteval = ptep_get_and_clear(mm, address, pvmw.pte); 1581 1582 set_tlb_ubc_flush_pending(mm, pte_dirty(pteval)); 1583 } else { 1584 pteval = ptep_clear_flush(vma, address, pvmw.pte); 1585 } 1586 } 1587 1588 /* 1589 * Now the pte is cleared. If this pte was uffd-wp armed, 1590 * we may want to replace a none pte with a marker pte if 1591 * it's file-backed, so we don't lose the tracking info. 1592 */ 1593 pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval); 1594 1595 /* Set the dirty flag on the folio now the pte is gone. */ 1596 if (pte_dirty(pteval)) 1597 folio_mark_dirty(folio); 1598 1599 /* Update high watermark before we lower rss */ 1600 update_hiwater_rss(mm); 1601 1602 if (PageHWPoison(subpage) && (flags & TTU_HWPOISON)) { 1603 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 1604 if (folio_test_hugetlb(folio)) { 1605 hugetlb_count_sub(folio_nr_pages(folio), mm); 1606 set_huge_pte_at(mm, address, pvmw.pte, pteval); 1607 } else { 1608 dec_mm_counter(mm, mm_counter(&folio->page)); 1609 set_pte_at(mm, address, pvmw.pte, pteval); 1610 } 1611 1612 } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { 1613 /* 1614 * The guest indicated that the page content is of no 1615 * interest anymore. Simply discard the pte, vmscan 1616 * will take care of the rest. 1617 * A future reference will then fault in a new zero 1618 * page. When userfaultfd is active, we must not drop 1619 * this page though, as its main user (postcopy 1620 * migration) will not expect userfaults on already 1621 * copied pages. 1622 */ 1623 dec_mm_counter(mm, mm_counter(&folio->page)); 1624 /* We have to invalidate as we cleared the pte */ 1625 mmu_notifier_invalidate_range(mm, address, 1626 address + PAGE_SIZE); 1627 } else if (folio_test_anon(folio)) { 1628 swp_entry_t entry = { .val = page_private(subpage) }; 1629 pte_t swp_pte; 1630 /* 1631 * Store the swap location in the pte. 1632 * See handle_pte_fault() ... 1633 */ 1634 if (unlikely(folio_test_swapbacked(folio) != 1635 folio_test_swapcache(folio))) { 1636 WARN_ON_ONCE(1); 1637 ret = false; 1638 /* We have to invalidate as we cleared the pte */ 1639 mmu_notifier_invalidate_range(mm, address, 1640 address + PAGE_SIZE); 1641 page_vma_mapped_walk_done(&pvmw); 1642 break; 1643 } 1644 1645 /* MADV_FREE page check */ 1646 if (!folio_test_swapbacked(folio)) { 1647 int ref_count, map_count; 1648 1649 /* 1650 * Synchronize with gup_pte_range(): 1651 * - clear PTE; barrier; read refcount 1652 * - inc refcount; barrier; read PTE 1653 */ 1654 smp_mb(); 1655 1656 ref_count = folio_ref_count(folio); 1657 map_count = folio_mapcount(folio); 1658 1659 /* 1660 * Order reads for page refcount and dirty flag 1661 * (see comments in __remove_mapping()). 1662 */ 1663 smp_rmb(); 1664 1665 /* 1666 * The only page refs must be one from isolation 1667 * plus the rmap(s) (dropped by discard:). 1668 */ 1669 if (ref_count == 1 + map_count && 1670 !folio_test_dirty(folio)) { 1671 /* Invalidate as we cleared the pte */ 1672 mmu_notifier_invalidate_range(mm, 1673 address, address + PAGE_SIZE); 1674 dec_mm_counter(mm, MM_ANONPAGES); 1675 goto discard; 1676 } 1677 1678 /* 1679 * If the folio was redirtied, it cannot be 1680 * discarded. Remap the page to page table. 1681 */ 1682 set_pte_at(mm, address, pvmw.pte, pteval); 1683 folio_set_swapbacked(folio); 1684 ret = false; 1685 page_vma_mapped_walk_done(&pvmw); 1686 break; 1687 } 1688 1689 if (swap_duplicate(entry) < 0) { 1690 set_pte_at(mm, address, pvmw.pte, pteval); 1691 ret = false; 1692 page_vma_mapped_walk_done(&pvmw); 1693 break; 1694 } 1695 if (arch_unmap_one(mm, vma, address, pteval) < 0) { 1696 swap_free(entry); 1697 set_pte_at(mm, address, pvmw.pte, pteval); 1698 ret = false; 1699 page_vma_mapped_walk_done(&pvmw); 1700 break; 1701 } 1702 1703 /* See page_try_share_anon_rmap(): clear PTE first. */ 1704 if (anon_exclusive && 1705 page_try_share_anon_rmap(subpage)) { 1706 swap_free(entry); 1707 set_pte_at(mm, address, pvmw.pte, pteval); 1708 ret = false; 1709 page_vma_mapped_walk_done(&pvmw); 1710 break; 1711 } 1712 if (list_empty(&mm->mmlist)) { 1713 spin_lock(&mmlist_lock); 1714 if (list_empty(&mm->mmlist)) 1715 list_add(&mm->mmlist, &init_mm.mmlist); 1716 spin_unlock(&mmlist_lock); 1717 } 1718 dec_mm_counter(mm, MM_ANONPAGES); 1719 inc_mm_counter(mm, MM_SWAPENTS); 1720 swp_pte = swp_entry_to_pte(entry); 1721 if (anon_exclusive) 1722 swp_pte = pte_swp_mkexclusive(swp_pte); 1723 if (pte_soft_dirty(pteval)) 1724 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1725 if (pte_uffd_wp(pteval)) 1726 swp_pte = pte_swp_mkuffd_wp(swp_pte); 1727 set_pte_at(mm, address, pvmw.pte, swp_pte); 1728 /* Invalidate as we cleared the pte */ 1729 mmu_notifier_invalidate_range(mm, address, 1730 address + PAGE_SIZE); 1731 } else { 1732 /* 1733 * This is a locked file-backed folio, 1734 * so it cannot be removed from the page 1735 * cache and replaced by a new folio before 1736 * mmu_notifier_invalidate_range_end, so no 1737 * concurrent thread might update its page table 1738 * to point at a new folio while a device is 1739 * still using this folio. 1740 * 1741 * See Documentation/mm/mmu_notifier.rst 1742 */ 1743 dec_mm_counter(mm, mm_counter_file(&folio->page)); 1744 } 1745 discard: 1746 /* 1747 * No need to call mmu_notifier_invalidate_range() it has be 1748 * done above for all cases requiring it to happen under page 1749 * table lock before mmu_notifier_invalidate_range_end() 1750 * 1751 * See Documentation/mm/mmu_notifier.rst 1752 */ 1753 page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); 1754 if (vma->vm_flags & VM_LOCKED) 1755 mlock_drain_local(); 1756 folio_put(folio); 1757 } 1758 1759 mmu_notifier_invalidate_range_end(&range); 1760 1761 return ret; 1762 } 1763 1764 static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) 1765 { 1766 return vma_is_temporary_stack(vma); 1767 } 1768 1769 static int folio_not_mapped(struct folio *folio) 1770 { 1771 return !folio_mapped(folio); 1772 } 1773 1774 /** 1775 * try_to_unmap - Try to remove all page table mappings to a folio. 1776 * @folio: The folio to unmap. 1777 * @flags: action and flags 1778 * 1779 * Tries to remove all the page table entries which are mapping this 1780 * folio. It is the caller's responsibility to check if the folio is 1781 * still mapped if needed (use TTU_SYNC to prevent accounting races). 1782 * 1783 * Context: Caller must hold the folio lock. 1784 */ 1785 void try_to_unmap(struct folio *folio, enum ttu_flags flags) 1786 { 1787 struct rmap_walk_control rwc = { 1788 .rmap_one = try_to_unmap_one, 1789 .arg = (void *)flags, 1790 .done = folio_not_mapped, 1791 .anon_lock = folio_lock_anon_vma_read, 1792 }; 1793 1794 if (flags & TTU_RMAP_LOCKED) 1795 rmap_walk_locked(folio, &rwc); 1796 else 1797 rmap_walk(folio, &rwc); 1798 } 1799 1800 /* 1801 * @arg: enum ttu_flags will be passed to this argument. 1802 * 1803 * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs 1804 * containing migration entries. 1805 */ 1806 static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, 1807 unsigned long address, void *arg) 1808 { 1809 struct mm_struct *mm = vma->vm_mm; 1810 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 1811 pte_t pteval; 1812 struct page *subpage; 1813 bool anon_exclusive, ret = true; 1814 struct mmu_notifier_range range; 1815 enum ttu_flags flags = (enum ttu_flags)(long)arg; 1816 1817 /* 1818 * When racing against e.g. zap_pte_range() on another cpu, 1819 * in between its ptep_get_and_clear_full() and page_remove_rmap(), 1820 * try_to_migrate() may return before page_mapped() has become false, 1821 * if page table locking is skipped: use TTU_SYNC to wait for that. 1822 */ 1823 if (flags & TTU_SYNC) 1824 pvmw.flags = PVMW_SYNC; 1825 1826 /* 1827 * unmap_page() in mm/huge_memory.c is the only user of migration with 1828 * TTU_SPLIT_HUGE_PMD and it wants to freeze. 1829 */ 1830 if (flags & TTU_SPLIT_HUGE_PMD) 1831 split_huge_pmd_address(vma, address, true, folio); 1832 1833 /* 1834 * For THP, we have to assume the worse case ie pmd for invalidation. 1835 * For hugetlb, it could be much worse if we need to do pud 1836 * invalidation in the case of pmd sharing. 1837 * 1838 * Note that the page can not be free in this function as call of 1839 * try_to_unmap() must hold a reference on the page. 1840 */ 1841 range.end = vma_address_end(&pvmw); 1842 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, 1843 address, range.end); 1844 if (folio_test_hugetlb(folio)) { 1845 /* 1846 * If sharing is possible, start and end will be adjusted 1847 * accordingly. 1848 */ 1849 adjust_range_if_pmd_sharing_possible(vma, &range.start, 1850 &range.end); 1851 } 1852 mmu_notifier_invalidate_range_start(&range); 1853 1854 while (page_vma_mapped_walk(&pvmw)) { 1855 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 1856 /* PMD-mapped THP migration entry */ 1857 if (!pvmw.pte) { 1858 subpage = folio_page(folio, 1859 pmd_pfn(*pvmw.pmd) - folio_pfn(folio)); 1860 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || 1861 !folio_test_pmd_mappable(folio), folio); 1862 1863 if (set_pmd_migration_entry(&pvmw, subpage)) { 1864 ret = false; 1865 page_vma_mapped_walk_done(&pvmw); 1866 break; 1867 } 1868 continue; 1869 } 1870 #endif 1871 1872 /* Unexpected PMD-mapped THP? */ 1873 VM_BUG_ON_FOLIO(!pvmw.pte, folio); 1874 1875 if (folio_is_zone_device(folio)) { 1876 /* 1877 * Our PTE is a non-present device exclusive entry and 1878 * calculating the subpage as for the common case would 1879 * result in an invalid pointer. 1880 * 1881 * Since only PAGE_SIZE pages can currently be 1882 * migrated, just set it to page. This will need to be 1883 * changed when hugepage migrations to device private 1884 * memory are supported. 1885 */ 1886 VM_BUG_ON_FOLIO(folio_nr_pages(folio) > 1, folio); 1887 subpage = &folio->page; 1888 } else { 1889 subpage = folio_page(folio, 1890 pte_pfn(*pvmw.pte) - folio_pfn(folio)); 1891 } 1892 address = pvmw.address; 1893 anon_exclusive = folio_test_anon(folio) && 1894 PageAnonExclusive(subpage); 1895 1896 if (folio_test_hugetlb(folio)) { 1897 bool anon = folio_test_anon(folio); 1898 1899 /* 1900 * huge_pmd_unshare may unmap an entire PMD page. 1901 * There is no way of knowing exactly which PMDs may 1902 * be cached for this mm, so we must flush them all. 1903 * start/end were already adjusted above to cover this 1904 * range. 1905 */ 1906 flush_cache_range(vma, range.start, range.end); 1907 1908 /* 1909 * To call huge_pmd_unshare, i_mmap_rwsem must be 1910 * held in write mode. Caller needs to explicitly 1911 * do this outside rmap routines. 1912 * 1913 * We also must hold hugetlb vma_lock in write mode. 1914 * Lock order dictates acquiring vma_lock BEFORE 1915 * i_mmap_rwsem. We can only try lock here and 1916 * fail if unsuccessful. 1917 */ 1918 if (!anon) { 1919 VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); 1920 if (!hugetlb_vma_trylock_write(vma)) { 1921 page_vma_mapped_walk_done(&pvmw); 1922 ret = false; 1923 break; 1924 } 1925 if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) { 1926 hugetlb_vma_unlock_write(vma); 1927 flush_tlb_range(vma, 1928 range.start, range.end); 1929 mmu_notifier_invalidate_range(mm, 1930 range.start, range.end); 1931 1932 /* 1933 * The ref count of the PMD page was 1934 * dropped which is part of the way map 1935 * counting is done for shared PMDs. 1936 * Return 'true' here. When there is 1937 * no other sharing, huge_pmd_unshare 1938 * returns false and we will unmap the 1939 * actual page and drop map count 1940 * to zero. 1941 */ 1942 page_vma_mapped_walk_done(&pvmw); 1943 break; 1944 } 1945 hugetlb_vma_unlock_write(vma); 1946 } 1947 /* Nuke the hugetlb page table entry */ 1948 pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); 1949 } else { 1950 flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); 1951 /* Nuke the page table entry. */ 1952 if (should_defer_flush(mm, flags)) { 1953 /* 1954 * We clear the PTE but do not flush so potentially 1955 * a remote CPU could still be writing to the folio. 1956 * If the entry was previously clean then the 1957 * architecture must guarantee that a clear->dirty 1958 * transition on a cached TLB entry is written through 1959 * and traps if the PTE is unmapped. 1960 */ 1961 pteval = ptep_get_and_clear(mm, address, pvmw.pte); 1962 1963 set_tlb_ubc_flush_pending(mm, pte_dirty(pteval)); 1964 } else { 1965 pteval = ptep_clear_flush(vma, address, pvmw.pte); 1966 } 1967 } 1968 1969 /* Set the dirty flag on the folio now the pte is gone. */ 1970 if (pte_dirty(pteval)) 1971 folio_mark_dirty(folio); 1972 1973 /* Update high watermark before we lower rss */ 1974 update_hiwater_rss(mm); 1975 1976 if (folio_is_device_private(folio)) { 1977 unsigned long pfn = folio_pfn(folio); 1978 swp_entry_t entry; 1979 pte_t swp_pte; 1980 1981 if (anon_exclusive) 1982 BUG_ON(page_try_share_anon_rmap(subpage)); 1983 1984 /* 1985 * Store the pfn of the page in a special migration 1986 * pte. do_swap_page() will wait until the migration 1987 * pte is removed and then restart fault handling. 1988 */ 1989 entry = pte_to_swp_entry(pteval); 1990 if (is_writable_device_private_entry(entry)) 1991 entry = make_writable_migration_entry(pfn); 1992 else if (anon_exclusive) 1993 entry = make_readable_exclusive_migration_entry(pfn); 1994 else 1995 entry = make_readable_migration_entry(pfn); 1996 swp_pte = swp_entry_to_pte(entry); 1997 1998 /* 1999 * pteval maps a zone device page and is therefore 2000 * a swap pte. 2001 */ 2002 if (pte_swp_soft_dirty(pteval)) 2003 swp_pte = pte_swp_mksoft_dirty(swp_pte); 2004 if (pte_swp_uffd_wp(pteval)) 2005 swp_pte = pte_swp_mkuffd_wp(swp_pte); 2006 set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); 2007 trace_set_migration_pte(pvmw.address, pte_val(swp_pte), 2008 compound_order(&folio->page)); 2009 /* 2010 * No need to invalidate here it will synchronize on 2011 * against the special swap migration pte. 2012 */ 2013 } else if (PageHWPoison(subpage)) { 2014 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 2015 if (folio_test_hugetlb(folio)) { 2016 hugetlb_count_sub(folio_nr_pages(folio), mm); 2017 set_huge_pte_at(mm, address, pvmw.pte, pteval); 2018 } else { 2019 dec_mm_counter(mm, mm_counter(&folio->page)); 2020 set_pte_at(mm, address, pvmw.pte, pteval); 2021 } 2022 2023 } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { 2024 /* 2025 * The guest indicated that the page content is of no 2026 * interest anymore. Simply discard the pte, vmscan 2027 * will take care of the rest. 2028 * A future reference will then fault in a new zero 2029 * page. When userfaultfd is active, we must not drop 2030 * this page though, as its main user (postcopy 2031 * migration) will not expect userfaults on already 2032 * copied pages. 2033 */ 2034 dec_mm_counter(mm, mm_counter(&folio->page)); 2035 /* We have to invalidate as we cleared the pte */ 2036 mmu_notifier_invalidate_range(mm, address, 2037 address + PAGE_SIZE); 2038 } else { 2039 swp_entry_t entry; 2040 pte_t swp_pte; 2041 2042 if (arch_unmap_one(mm, vma, address, pteval) < 0) { 2043 if (folio_test_hugetlb(folio)) 2044 set_huge_pte_at(mm, address, pvmw.pte, pteval); 2045 else 2046 set_pte_at(mm, address, pvmw.pte, pteval); 2047 ret = false; 2048 page_vma_mapped_walk_done(&pvmw); 2049 break; 2050 } 2051 VM_BUG_ON_PAGE(pte_write(pteval) && folio_test_anon(folio) && 2052 !anon_exclusive, subpage); 2053 2054 /* See page_try_share_anon_rmap(): clear PTE first. */ 2055 if (anon_exclusive && 2056 page_try_share_anon_rmap(subpage)) { 2057 if (folio_test_hugetlb(folio)) 2058 set_huge_pte_at(mm, address, pvmw.pte, pteval); 2059 else 2060 set_pte_at(mm, address, pvmw.pte, pteval); 2061 ret = false; 2062 page_vma_mapped_walk_done(&pvmw); 2063 break; 2064 } 2065 2066 /* 2067 * Store the pfn of the page in a special migration 2068 * pte. do_swap_page() will wait until the migration 2069 * pte is removed and then restart fault handling. 2070 */ 2071 if (pte_write(pteval)) 2072 entry = make_writable_migration_entry( 2073 page_to_pfn(subpage)); 2074 else if (anon_exclusive) 2075 entry = make_readable_exclusive_migration_entry( 2076 page_to_pfn(subpage)); 2077 else 2078 entry = make_readable_migration_entry( 2079 page_to_pfn(subpage)); 2080 if (pte_young(pteval)) 2081 entry = make_migration_entry_young(entry); 2082 if (pte_dirty(pteval)) 2083 entry = make_migration_entry_dirty(entry); 2084 swp_pte = swp_entry_to_pte(entry); 2085 if (pte_soft_dirty(pteval)) 2086 swp_pte = pte_swp_mksoft_dirty(swp_pte); 2087 if (pte_uffd_wp(pteval)) 2088 swp_pte = pte_swp_mkuffd_wp(swp_pte); 2089 if (folio_test_hugetlb(folio)) 2090 set_huge_pte_at(mm, address, pvmw.pte, swp_pte); 2091 else 2092 set_pte_at(mm, address, pvmw.pte, swp_pte); 2093 trace_set_migration_pte(address, pte_val(swp_pte), 2094 compound_order(&folio->page)); 2095 /* 2096 * No need to invalidate here it will synchronize on 2097 * against the special swap migration pte. 2098 */ 2099 } 2100 2101 /* 2102 * No need to call mmu_notifier_invalidate_range() it has be 2103 * done above for all cases requiring it to happen under page 2104 * table lock before mmu_notifier_invalidate_range_end() 2105 * 2106 * See Documentation/mm/mmu_notifier.rst 2107 */ 2108 page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); 2109 if (vma->vm_flags & VM_LOCKED) 2110 mlock_drain_local(); 2111 folio_put(folio); 2112 } 2113 2114 mmu_notifier_invalidate_range_end(&range); 2115 2116 return ret; 2117 } 2118 2119 /** 2120 * try_to_migrate - try to replace all page table mappings with swap entries 2121 * @folio: the folio to replace page table entries for 2122 * @flags: action and flags 2123 * 2124 * Tries to remove all the page table entries which are mapping this folio and 2125 * replace them with special swap entries. Caller must hold the folio lock. 2126 */ 2127 void try_to_migrate(struct folio *folio, enum ttu_flags flags) 2128 { 2129 struct rmap_walk_control rwc = { 2130 .rmap_one = try_to_migrate_one, 2131 .arg = (void *)flags, 2132 .done = folio_not_mapped, 2133 .anon_lock = folio_lock_anon_vma_read, 2134 }; 2135 2136 /* 2137 * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and 2138 * TTU_SPLIT_HUGE_PMD, TTU_SYNC, and TTU_BATCH_FLUSH flags. 2139 */ 2140 if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | 2141 TTU_SYNC | TTU_BATCH_FLUSH))) 2142 return; 2143 2144 if (folio_is_zone_device(folio) && 2145 (!folio_is_device_private(folio) && !folio_is_device_coherent(folio))) 2146 return; 2147 2148 /* 2149 * During exec, a temporary VMA is setup and later moved. 2150 * The VMA is moved under the anon_vma lock but not the 2151 * page tables leading to a race where migration cannot 2152 * find the migration ptes. Rather than increasing the 2153 * locking requirements of exec(), migration skips 2154 * temporary VMAs until after exec() completes. 2155 */ 2156 if (!folio_test_ksm(folio) && folio_test_anon(folio)) 2157 rwc.invalid_vma = invalid_migration_vma; 2158 2159 if (flags & TTU_RMAP_LOCKED) 2160 rmap_walk_locked(folio, &rwc); 2161 else 2162 rmap_walk(folio, &rwc); 2163 } 2164 2165 #ifdef CONFIG_DEVICE_PRIVATE 2166 struct make_exclusive_args { 2167 struct mm_struct *mm; 2168 unsigned long address; 2169 void *owner; 2170 bool valid; 2171 }; 2172 2173 static bool page_make_device_exclusive_one(struct folio *folio, 2174 struct vm_area_struct *vma, unsigned long address, void *priv) 2175 { 2176 struct mm_struct *mm = vma->vm_mm; 2177 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 2178 struct make_exclusive_args *args = priv; 2179 pte_t pteval; 2180 struct page *subpage; 2181 bool ret = true; 2182 struct mmu_notifier_range range; 2183 swp_entry_t entry; 2184 pte_t swp_pte; 2185 2186 mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, 2187 vma->vm_mm, address, min(vma->vm_end, 2188 address + folio_size(folio)), 2189 args->owner); 2190 mmu_notifier_invalidate_range_start(&range); 2191 2192 while (page_vma_mapped_walk(&pvmw)) { 2193 /* Unexpected PMD-mapped THP? */ 2194 VM_BUG_ON_FOLIO(!pvmw.pte, folio); 2195 2196 if (!pte_present(*pvmw.pte)) { 2197 ret = false; 2198 page_vma_mapped_walk_done(&pvmw); 2199 break; 2200 } 2201 2202 subpage = folio_page(folio, 2203 pte_pfn(*pvmw.pte) - folio_pfn(folio)); 2204 address = pvmw.address; 2205 2206 /* Nuke the page table entry. */ 2207 flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); 2208 pteval = ptep_clear_flush(vma, address, pvmw.pte); 2209 2210 /* Set the dirty flag on the folio now the pte is gone. */ 2211 if (pte_dirty(pteval)) 2212 folio_mark_dirty(folio); 2213 2214 /* 2215 * Check that our target page is still mapped at the expected 2216 * address. 2217 */ 2218 if (args->mm == mm && args->address == address && 2219 pte_write(pteval)) 2220 args->valid = true; 2221 2222 /* 2223 * Store the pfn of the page in a special migration 2224 * pte. do_swap_page() will wait until the migration 2225 * pte is removed and then restart fault handling. 2226 */ 2227 if (pte_write(pteval)) 2228 entry = make_writable_device_exclusive_entry( 2229 page_to_pfn(subpage)); 2230 else 2231 entry = make_readable_device_exclusive_entry( 2232 page_to_pfn(subpage)); 2233 swp_pte = swp_entry_to_pte(entry); 2234 if (pte_soft_dirty(pteval)) 2235 swp_pte = pte_swp_mksoft_dirty(swp_pte); 2236 if (pte_uffd_wp(pteval)) 2237 swp_pte = pte_swp_mkuffd_wp(swp_pte); 2238 2239 set_pte_at(mm, address, pvmw.pte, swp_pte); 2240 2241 /* 2242 * There is a reference on the page for the swap entry which has 2243 * been removed, so shouldn't take another. 2244 */ 2245 page_remove_rmap(subpage, vma, false); 2246 } 2247 2248 mmu_notifier_invalidate_range_end(&range); 2249 2250 return ret; 2251 } 2252 2253 /** 2254 * folio_make_device_exclusive - Mark the folio exclusively owned by a device. 2255 * @folio: The folio to replace page table entries for. 2256 * @mm: The mm_struct where the folio is expected to be mapped. 2257 * @address: Address where the folio is expected to be mapped. 2258 * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier callbacks 2259 * 2260 * Tries to remove all the page table entries which are mapping this 2261 * folio and replace them with special device exclusive swap entries to 2262 * grant a device exclusive access to the folio. 2263 * 2264 * Context: Caller must hold the folio lock. 2265 * Return: false if the page is still mapped, or if it could not be unmapped 2266 * from the expected address. Otherwise returns true (success). 2267 */ 2268 static bool folio_make_device_exclusive(struct folio *folio, 2269 struct mm_struct *mm, unsigned long address, void *owner) 2270 { 2271 struct make_exclusive_args args = { 2272 .mm = mm, 2273 .address = address, 2274 .owner = owner, 2275 .valid = false, 2276 }; 2277 struct rmap_walk_control rwc = { 2278 .rmap_one = page_make_device_exclusive_one, 2279 .done = folio_not_mapped, 2280 .anon_lock = folio_lock_anon_vma_read, 2281 .arg = &args, 2282 }; 2283 2284 /* 2285 * Restrict to anonymous folios for now to avoid potential writeback 2286 * issues. 2287 */ 2288 if (!folio_test_anon(folio)) 2289 return false; 2290 2291 rmap_walk(folio, &rwc); 2292 2293 return args.valid && !folio_mapcount(folio); 2294 } 2295 2296 /** 2297 * make_device_exclusive_range() - Mark a range for exclusive use by a device 2298 * @mm: mm_struct of associated target process 2299 * @start: start of the region to mark for exclusive device access 2300 * @end: end address of region 2301 * @pages: returns the pages which were successfully marked for exclusive access 2302 * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering 2303 * 2304 * Returns: number of pages found in the range by GUP. A page is marked for 2305 * exclusive access only if the page pointer is non-NULL. 2306 * 2307 * This function finds ptes mapping page(s) to the given address range, locks 2308 * them and replaces mappings with special swap entries preventing userspace CPU 2309 * access. On fault these entries are replaced with the original mapping after 2310 * calling MMU notifiers. 2311 * 2312 * A driver using this to program access from a device must use a mmu notifier 2313 * critical section to hold a device specific lock during programming. Once 2314 * programming is complete it should drop the page lock and reference after 2315 * which point CPU access to the page will revoke the exclusive access. 2316 */ 2317 int make_device_exclusive_range(struct mm_struct *mm, unsigned long start, 2318 unsigned long end, struct page **pages, 2319 void *owner) 2320 { 2321 long npages = (end - start) >> PAGE_SHIFT; 2322 long i; 2323 2324 npages = get_user_pages_remote(mm, start, npages, 2325 FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD, 2326 pages, NULL, NULL); 2327 if (npages < 0) 2328 return npages; 2329 2330 for (i = 0; i < npages; i++, start += PAGE_SIZE) { 2331 struct folio *folio = page_folio(pages[i]); 2332 if (PageTail(pages[i]) || !folio_trylock(folio)) { 2333 folio_put(folio); 2334 pages[i] = NULL; 2335 continue; 2336 } 2337 2338 if (!folio_make_device_exclusive(folio, mm, start, owner)) { 2339 folio_unlock(folio); 2340 folio_put(folio); 2341 pages[i] = NULL; 2342 } 2343 } 2344 2345 return npages; 2346 } 2347 EXPORT_SYMBOL_GPL(make_device_exclusive_range); 2348 #endif 2349 2350 void __put_anon_vma(struct anon_vma *anon_vma) 2351 { 2352 struct anon_vma *root = anon_vma->root; 2353 2354 anon_vma_free(anon_vma); 2355 if (root != anon_vma && atomic_dec_and_test(&root->refcount)) 2356 anon_vma_free(root); 2357 } 2358 2359 static struct anon_vma *rmap_walk_anon_lock(struct folio *folio, 2360 struct rmap_walk_control *rwc) 2361 { 2362 struct anon_vma *anon_vma; 2363 2364 if (rwc->anon_lock) 2365 return rwc->anon_lock(folio, rwc); 2366 2367 /* 2368 * Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read() 2369 * because that depends on page_mapped(); but not all its usages 2370 * are holding mmap_lock. Users without mmap_lock are required to 2371 * take a reference count to prevent the anon_vma disappearing 2372 */ 2373 anon_vma = folio_anon_vma(folio); 2374 if (!anon_vma) 2375 return NULL; 2376 2377 if (anon_vma_trylock_read(anon_vma)) 2378 goto out; 2379 2380 if (rwc->try_lock) { 2381 anon_vma = NULL; 2382 rwc->contended = true; 2383 goto out; 2384 } 2385 2386 anon_vma_lock_read(anon_vma); 2387 out: 2388 return anon_vma; 2389 } 2390 2391 /* 2392 * rmap_walk_anon - do something to anonymous page using the object-based 2393 * rmap method 2394 * @page: the page to be handled 2395 * @rwc: control variable according to each walk type 2396 * 2397 * Find all the mappings of a page using the mapping pointer and the vma chains 2398 * contained in the anon_vma struct it points to. 2399 */ 2400 static void rmap_walk_anon(struct folio *folio, 2401 struct rmap_walk_control *rwc, bool locked) 2402 { 2403 struct anon_vma *anon_vma; 2404 pgoff_t pgoff_start, pgoff_end; 2405 struct anon_vma_chain *avc; 2406 2407 if (locked) { 2408 anon_vma = folio_anon_vma(folio); 2409 /* anon_vma disappear under us? */ 2410 VM_BUG_ON_FOLIO(!anon_vma, folio); 2411 } else { 2412 anon_vma = rmap_walk_anon_lock(folio, rwc); 2413 } 2414 if (!anon_vma) 2415 return; 2416 2417 pgoff_start = folio_pgoff(folio); 2418 pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; 2419 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, 2420 pgoff_start, pgoff_end) { 2421 struct vm_area_struct *vma = avc->vma; 2422 unsigned long address = vma_address(&folio->page, vma); 2423 2424 VM_BUG_ON_VMA(address == -EFAULT, vma); 2425 cond_resched(); 2426 2427 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 2428 continue; 2429 2430 if (!rwc->rmap_one(folio, vma, address, rwc->arg)) 2431 break; 2432 if (rwc->done && rwc->done(folio)) 2433 break; 2434 } 2435 2436 if (!locked) 2437 anon_vma_unlock_read(anon_vma); 2438 } 2439 2440 /* 2441 * rmap_walk_file - do something to file page using the object-based rmap method 2442 * @page: the page to be handled 2443 * @rwc: control variable according to each walk type 2444 * 2445 * Find all the mappings of a page using the mapping pointer and the vma chains 2446 * contained in the address_space struct it points to. 2447 */ 2448 static void rmap_walk_file(struct folio *folio, 2449 struct rmap_walk_control *rwc, bool locked) 2450 { 2451 struct address_space *mapping = folio_mapping(folio); 2452 pgoff_t pgoff_start, pgoff_end; 2453 struct vm_area_struct *vma; 2454 2455 /* 2456 * The page lock not only makes sure that page->mapping cannot 2457 * suddenly be NULLified by truncation, it makes sure that the 2458 * structure at mapping cannot be freed and reused yet, 2459 * so we can safely take mapping->i_mmap_rwsem. 2460 */ 2461 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 2462 2463 if (!mapping) 2464 return; 2465 2466 pgoff_start = folio_pgoff(folio); 2467 pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; 2468 if (!locked) { 2469 if (i_mmap_trylock_read(mapping)) 2470 goto lookup; 2471 2472 if (rwc->try_lock) { 2473 rwc->contended = true; 2474 return; 2475 } 2476 2477 i_mmap_lock_read(mapping); 2478 } 2479 lookup: 2480 vma_interval_tree_foreach(vma, &mapping->i_mmap, 2481 pgoff_start, pgoff_end) { 2482 unsigned long address = vma_address(&folio->page, vma); 2483 2484 VM_BUG_ON_VMA(address == -EFAULT, vma); 2485 cond_resched(); 2486 2487 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 2488 continue; 2489 2490 if (!rwc->rmap_one(folio, vma, address, rwc->arg)) 2491 goto done; 2492 if (rwc->done && rwc->done(folio)) 2493 goto done; 2494 } 2495 2496 done: 2497 if (!locked) 2498 i_mmap_unlock_read(mapping); 2499 } 2500 2501 void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc) 2502 { 2503 if (unlikely(folio_test_ksm(folio))) 2504 rmap_walk_ksm(folio, rwc); 2505 else if (folio_test_anon(folio)) 2506 rmap_walk_anon(folio, rwc, false); 2507 else 2508 rmap_walk_file(folio, rwc, false); 2509 } 2510 2511 /* Like rmap_walk, but caller holds relevant rmap lock */ 2512 void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc) 2513 { 2514 /* no ksm support for now */ 2515 VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio); 2516 if (folio_test_anon(folio)) 2517 rmap_walk_anon(folio, rwc, true); 2518 else 2519 rmap_walk_file(folio, rwc, true); 2520 } 2521 2522 #ifdef CONFIG_HUGETLB_PAGE 2523 /* 2524 * The following two functions are for anonymous (private mapped) hugepages. 2525 * Unlike common anonymous pages, anonymous hugepages have no accounting code 2526 * and no lru code, because we handle hugepages differently from common pages. 2527 * 2528 * RMAP_COMPOUND is ignored. 2529 */ 2530 void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma, 2531 unsigned long address, rmap_t flags) 2532 { 2533 struct folio *folio = page_folio(page); 2534 struct anon_vma *anon_vma = vma->anon_vma; 2535 int first; 2536 2537 BUG_ON(!folio_test_locked(folio)); 2538 BUG_ON(!anon_vma); 2539 /* address might be in next vma when migration races vma_merge */ 2540 first = atomic_inc_and_test(&folio->_entire_mapcount); 2541 VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page); 2542 VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page); 2543 if (first) 2544 __page_set_anon_rmap(folio, page, vma, address, 2545 !!(flags & RMAP_EXCLUSIVE)); 2546 } 2547 2548 void hugepage_add_new_anon_rmap(struct folio *folio, 2549 struct vm_area_struct *vma, unsigned long address) 2550 { 2551 BUG_ON(address < vma->vm_start || address >= vma->vm_end); 2552 /* increment count (starts at -1) */ 2553 atomic_set(&folio->_entire_mapcount, 0); 2554 folio_clear_hugetlb_restore_reserve(folio); 2555 __page_set_anon_rmap(folio, &folio->page, vma, address, 1); 2556 } 2557 #endif /* CONFIG_HUGETLB_PAGE */ 2558