1 /* 2 * mm/rmap.c - physical to virtual reverse mappings 3 * 4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br> 5 * Released under the General Public License (GPL). 6 * 7 * Simple, low overhead reverse mapping scheme. 8 * Please try to keep this thing as modular as possible. 9 * 10 * Provides methods for unmapping each kind of mapped page: 11 * the anon methods track anonymous pages, and 12 * the file methods track pages belonging to an inode. 13 * 14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001 15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 17 * Contributions by Hugh Dickins 2003, 2004 18 */ 19 20 /* 21 * Lock ordering in mm: 22 * 23 * inode->i_rwsem (while writing or truncating, not reading or faulting) 24 * mm->mmap_lock 25 * mapping->invalidate_lock (in filemap_fault) 26 * page->flags PG_locked (lock_page) 27 * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share, see hugetlbfs below) 28 * mapping->i_mmap_rwsem 29 * anon_vma->rwsem 30 * mm->page_table_lock or pte_lock 31 * swap_lock (in swap_duplicate, swap_info_get) 32 * mmlist_lock (in mmput, drain_mmlist and others) 33 * mapping->private_lock (in block_dirty_folio) 34 * folio_lock_memcg move_lock (in block_dirty_folio) 35 * i_pages lock (widely used) 36 * lruvec->lru_lock (in folio_lruvec_lock_irq) 37 * inode->i_lock (in set_page_dirty's __mark_inode_dirty) 38 * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) 39 * sb_lock (within inode_lock in fs/fs-writeback.c) 40 * i_pages lock (widely used, in set_page_dirty, 41 * in arch-dependent flush_dcache_mmap_lock, 42 * within bdi.wb->list_lock in __sync_single_inode) 43 * 44 * anon_vma->rwsem,mapping->i_mmap_rwsem (memory_failure, collect_procs_anon) 45 * ->tasklist_lock 46 * pte map lock 47 * 48 * hugetlbfs PageHuge() take locks in this order: 49 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) 50 * vma_lock (hugetlb specific lock for pmd_sharing) 51 * mapping->i_mmap_rwsem (also used for hugetlb pmd sharing) 52 * page->flags PG_locked (lock_page) 53 */ 54 55 #include <linux/mm.h> 56 #include <linux/sched/mm.h> 57 #include <linux/sched/task.h> 58 #include <linux/pagemap.h> 59 #include <linux/swap.h> 60 #include <linux/swapops.h> 61 #include <linux/slab.h> 62 #include <linux/init.h> 63 #include <linux/ksm.h> 64 #include <linux/rmap.h> 65 #include <linux/rcupdate.h> 66 #include <linux/export.h> 67 #include <linux/memcontrol.h> 68 #include <linux/mmu_notifier.h> 69 #include <linux/migrate.h> 70 #include <linux/hugetlb.h> 71 #include <linux/huge_mm.h> 72 #include <linux/backing-dev.h> 73 #include <linux/page_idle.h> 74 #include <linux/memremap.h> 75 #include <linux/userfaultfd_k.h> 76 #include <linux/mm_inline.h> 77 78 #include <asm/tlbflush.h> 79 80 #define CREATE_TRACE_POINTS 81 #include <trace/events/tlb.h> 82 #include <trace/events/migrate.h> 83 84 #include "internal.h" 85 86 static struct kmem_cache *anon_vma_cachep; 87 static struct kmem_cache *anon_vma_chain_cachep; 88 89 static inline struct anon_vma *anon_vma_alloc(void) 90 { 91 struct anon_vma *anon_vma; 92 93 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 94 if (anon_vma) { 95 atomic_set(&anon_vma->refcount, 1); 96 anon_vma->num_children = 0; 97 anon_vma->num_active_vmas = 0; 98 anon_vma->parent = anon_vma; 99 /* 100 * Initialise the anon_vma root to point to itself. If called 101 * from fork, the root will be reset to the parents anon_vma. 102 */ 103 anon_vma->root = anon_vma; 104 } 105 106 return anon_vma; 107 } 108 109 static inline void anon_vma_free(struct anon_vma *anon_vma) 110 { 111 VM_BUG_ON(atomic_read(&anon_vma->refcount)); 112 113 /* 114 * Synchronize against folio_lock_anon_vma_read() such that 115 * we can safely hold the lock without the anon_vma getting 116 * freed. 117 * 118 * Relies on the full mb implied by the atomic_dec_and_test() from 119 * put_anon_vma() against the acquire barrier implied by 120 * down_read_trylock() from folio_lock_anon_vma_read(). This orders: 121 * 122 * folio_lock_anon_vma_read() VS put_anon_vma() 123 * down_read_trylock() atomic_dec_and_test() 124 * LOCK MB 125 * atomic_read() rwsem_is_locked() 126 * 127 * LOCK should suffice since the actual taking of the lock must 128 * happen _before_ what follows. 129 */ 130 might_sleep(); 131 if (rwsem_is_locked(&anon_vma->root->rwsem)) { 132 anon_vma_lock_write(anon_vma); 133 anon_vma_unlock_write(anon_vma); 134 } 135 136 kmem_cache_free(anon_vma_cachep, anon_vma); 137 } 138 139 static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) 140 { 141 return kmem_cache_alloc(anon_vma_chain_cachep, gfp); 142 } 143 144 static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) 145 { 146 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); 147 } 148 149 static void anon_vma_chain_link(struct vm_area_struct *vma, 150 struct anon_vma_chain *avc, 151 struct anon_vma *anon_vma) 152 { 153 avc->vma = vma; 154 avc->anon_vma = anon_vma; 155 list_add(&avc->same_vma, &vma->anon_vma_chain); 156 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); 157 } 158 159 /** 160 * __anon_vma_prepare - attach an anon_vma to a memory region 161 * @vma: the memory region in question 162 * 163 * This makes sure the memory mapping described by 'vma' has 164 * an 'anon_vma' attached to it, so that we can associate the 165 * anonymous pages mapped into it with that anon_vma. 166 * 167 * The common case will be that we already have one, which 168 * is handled inline by anon_vma_prepare(). But if 169 * not we either need to find an adjacent mapping that we 170 * can re-use the anon_vma from (very common when the only 171 * reason for splitting a vma has been mprotect()), or we 172 * allocate a new one. 173 * 174 * Anon-vma allocations are very subtle, because we may have 175 * optimistically looked up an anon_vma in folio_lock_anon_vma_read() 176 * and that may actually touch the rwsem even in the newly 177 * allocated vma (it depends on RCU to make sure that the 178 * anon_vma isn't actually destroyed). 179 * 180 * As a result, we need to do proper anon_vma locking even 181 * for the new allocation. At the same time, we do not want 182 * to do any locking for the common case of already having 183 * an anon_vma. 184 * 185 * This must be called with the mmap_lock held for reading. 186 */ 187 int __anon_vma_prepare(struct vm_area_struct *vma) 188 { 189 struct mm_struct *mm = vma->vm_mm; 190 struct anon_vma *anon_vma, *allocated; 191 struct anon_vma_chain *avc; 192 193 might_sleep(); 194 195 avc = anon_vma_chain_alloc(GFP_KERNEL); 196 if (!avc) 197 goto out_enomem; 198 199 anon_vma = find_mergeable_anon_vma(vma); 200 allocated = NULL; 201 if (!anon_vma) { 202 anon_vma = anon_vma_alloc(); 203 if (unlikely(!anon_vma)) 204 goto out_enomem_free_avc; 205 anon_vma->num_children++; /* self-parent link for new root */ 206 allocated = anon_vma; 207 } 208 209 anon_vma_lock_write(anon_vma); 210 /* page_table_lock to protect against threads */ 211 spin_lock(&mm->page_table_lock); 212 if (likely(!vma->anon_vma)) { 213 vma->anon_vma = anon_vma; 214 anon_vma_chain_link(vma, avc, anon_vma); 215 anon_vma->num_active_vmas++; 216 allocated = NULL; 217 avc = NULL; 218 } 219 spin_unlock(&mm->page_table_lock); 220 anon_vma_unlock_write(anon_vma); 221 222 if (unlikely(allocated)) 223 put_anon_vma(allocated); 224 if (unlikely(avc)) 225 anon_vma_chain_free(avc); 226 227 return 0; 228 229 out_enomem_free_avc: 230 anon_vma_chain_free(avc); 231 out_enomem: 232 return -ENOMEM; 233 } 234 235 /* 236 * This is a useful helper function for locking the anon_vma root as 237 * we traverse the vma->anon_vma_chain, looping over anon_vma's that 238 * have the same vma. 239 * 240 * Such anon_vma's should have the same root, so you'd expect to see 241 * just a single mutex_lock for the whole traversal. 242 */ 243 static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) 244 { 245 struct anon_vma *new_root = anon_vma->root; 246 if (new_root != root) { 247 if (WARN_ON_ONCE(root)) 248 up_write(&root->rwsem); 249 root = new_root; 250 down_write(&root->rwsem); 251 } 252 return root; 253 } 254 255 static inline void unlock_anon_vma_root(struct anon_vma *root) 256 { 257 if (root) 258 up_write(&root->rwsem); 259 } 260 261 /* 262 * Attach the anon_vmas from src to dst. 263 * Returns 0 on success, -ENOMEM on failure. 264 * 265 * anon_vma_clone() is called by __vma_adjust(), __split_vma(), copy_vma() and 266 * anon_vma_fork(). The first three want an exact copy of src, while the last 267 * one, anon_vma_fork(), may try to reuse an existing anon_vma to prevent 268 * endless growth of anon_vma. Since dst->anon_vma is set to NULL before call, 269 * we can identify this case by checking (!dst->anon_vma && src->anon_vma). 270 * 271 * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find 272 * and reuse existing anon_vma which has no vmas and only one child anon_vma. 273 * This prevents degradation of anon_vma hierarchy to endless linear chain in 274 * case of constantly forking task. On the other hand, an anon_vma with more 275 * than one child isn't reused even if there was no alive vma, thus rmap 276 * walker has a good chance of avoiding scanning the whole hierarchy when it 277 * searches where page is mapped. 278 */ 279 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) 280 { 281 struct anon_vma_chain *avc, *pavc; 282 struct anon_vma *root = NULL; 283 284 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { 285 struct anon_vma *anon_vma; 286 287 avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); 288 if (unlikely(!avc)) { 289 unlock_anon_vma_root(root); 290 root = NULL; 291 avc = anon_vma_chain_alloc(GFP_KERNEL); 292 if (!avc) 293 goto enomem_failure; 294 } 295 anon_vma = pavc->anon_vma; 296 root = lock_anon_vma_root(root, anon_vma); 297 anon_vma_chain_link(dst, avc, anon_vma); 298 299 /* 300 * Reuse existing anon_vma if it has no vma and only one 301 * anon_vma child. 302 * 303 * Root anon_vma is never reused: 304 * it has self-parent reference and at least one child. 305 */ 306 if (!dst->anon_vma && src->anon_vma && 307 anon_vma->num_children < 2 && 308 anon_vma->num_active_vmas == 0) 309 dst->anon_vma = anon_vma; 310 } 311 if (dst->anon_vma) 312 dst->anon_vma->num_active_vmas++; 313 unlock_anon_vma_root(root); 314 return 0; 315 316 enomem_failure: 317 /* 318 * dst->anon_vma is dropped here otherwise its num_active_vmas can 319 * be incorrectly decremented in unlink_anon_vmas(). 320 * We can safely do this because callers of anon_vma_clone() don't care 321 * about dst->anon_vma if anon_vma_clone() failed. 322 */ 323 dst->anon_vma = NULL; 324 unlink_anon_vmas(dst); 325 return -ENOMEM; 326 } 327 328 /* 329 * Attach vma to its own anon_vma, as well as to the anon_vmas that 330 * the corresponding VMA in the parent process is attached to. 331 * Returns 0 on success, non-zero on failure. 332 */ 333 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) 334 { 335 struct anon_vma_chain *avc; 336 struct anon_vma *anon_vma; 337 int error; 338 339 /* Don't bother if the parent process has no anon_vma here. */ 340 if (!pvma->anon_vma) 341 return 0; 342 343 /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ 344 vma->anon_vma = NULL; 345 346 /* 347 * First, attach the new VMA to the parent VMA's anon_vmas, 348 * so rmap can find non-COWed pages in child processes. 349 */ 350 error = anon_vma_clone(vma, pvma); 351 if (error) 352 return error; 353 354 /* An existing anon_vma has been reused, all done then. */ 355 if (vma->anon_vma) 356 return 0; 357 358 /* Then add our own anon_vma. */ 359 anon_vma = anon_vma_alloc(); 360 if (!anon_vma) 361 goto out_error; 362 anon_vma->num_active_vmas++; 363 avc = anon_vma_chain_alloc(GFP_KERNEL); 364 if (!avc) 365 goto out_error_free_anon_vma; 366 367 /* 368 * The root anon_vma's rwsem is the lock actually used when we 369 * lock any of the anon_vmas in this anon_vma tree. 370 */ 371 anon_vma->root = pvma->anon_vma->root; 372 anon_vma->parent = pvma->anon_vma; 373 /* 374 * With refcounts, an anon_vma can stay around longer than the 375 * process it belongs to. The root anon_vma needs to be pinned until 376 * this anon_vma is freed, because the lock lives in the root. 377 */ 378 get_anon_vma(anon_vma->root); 379 /* Mark this anon_vma as the one where our new (COWed) pages go. */ 380 vma->anon_vma = anon_vma; 381 anon_vma_lock_write(anon_vma); 382 anon_vma_chain_link(vma, avc, anon_vma); 383 anon_vma->parent->num_children++; 384 anon_vma_unlock_write(anon_vma); 385 386 return 0; 387 388 out_error_free_anon_vma: 389 put_anon_vma(anon_vma); 390 out_error: 391 unlink_anon_vmas(vma); 392 return -ENOMEM; 393 } 394 395 void unlink_anon_vmas(struct vm_area_struct *vma) 396 { 397 struct anon_vma_chain *avc, *next; 398 struct anon_vma *root = NULL; 399 400 /* 401 * Unlink each anon_vma chained to the VMA. This list is ordered 402 * from newest to oldest, ensuring the root anon_vma gets freed last. 403 */ 404 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 405 struct anon_vma *anon_vma = avc->anon_vma; 406 407 root = lock_anon_vma_root(root, anon_vma); 408 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); 409 410 /* 411 * Leave empty anon_vmas on the list - we'll need 412 * to free them outside the lock. 413 */ 414 if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) { 415 anon_vma->parent->num_children--; 416 continue; 417 } 418 419 list_del(&avc->same_vma); 420 anon_vma_chain_free(avc); 421 } 422 if (vma->anon_vma) { 423 vma->anon_vma->num_active_vmas--; 424 425 /* 426 * vma would still be needed after unlink, and anon_vma will be prepared 427 * when handle fault. 428 */ 429 vma->anon_vma = NULL; 430 } 431 unlock_anon_vma_root(root); 432 433 /* 434 * Iterate the list once more, it now only contains empty and unlinked 435 * anon_vmas, destroy them. Could not do before due to __put_anon_vma() 436 * needing to write-acquire the anon_vma->root->rwsem. 437 */ 438 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 439 struct anon_vma *anon_vma = avc->anon_vma; 440 441 VM_WARN_ON(anon_vma->num_children); 442 VM_WARN_ON(anon_vma->num_active_vmas); 443 put_anon_vma(anon_vma); 444 445 list_del(&avc->same_vma); 446 anon_vma_chain_free(avc); 447 } 448 } 449 450 static void anon_vma_ctor(void *data) 451 { 452 struct anon_vma *anon_vma = data; 453 454 init_rwsem(&anon_vma->rwsem); 455 atomic_set(&anon_vma->refcount, 0); 456 anon_vma->rb_root = RB_ROOT_CACHED; 457 } 458 459 void __init anon_vma_init(void) 460 { 461 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 462 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, 463 anon_vma_ctor); 464 anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, 465 SLAB_PANIC|SLAB_ACCOUNT); 466 } 467 468 /* 469 * Getting a lock on a stable anon_vma from a page off the LRU is tricky! 470 * 471 * Since there is no serialization what so ever against page_remove_rmap() 472 * the best this function can do is return a refcount increased anon_vma 473 * that might have been relevant to this page. 474 * 475 * The page might have been remapped to a different anon_vma or the anon_vma 476 * returned may already be freed (and even reused). 477 * 478 * In case it was remapped to a different anon_vma, the new anon_vma will be a 479 * child of the old anon_vma, and the anon_vma lifetime rules will therefore 480 * ensure that any anon_vma obtained from the page will still be valid for as 481 * long as we observe page_mapped() [ hence all those page_mapped() tests ]. 482 * 483 * All users of this function must be very careful when walking the anon_vma 484 * chain and verify that the page in question is indeed mapped in it 485 * [ something equivalent to page_mapped_in_vma() ]. 486 * 487 * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from 488 * page_remove_rmap() that the anon_vma pointer from page->mapping is valid 489 * if there is a mapcount, we can dereference the anon_vma after observing 490 * those. 491 */ 492 struct anon_vma *folio_get_anon_vma(struct folio *folio) 493 { 494 struct anon_vma *anon_vma = NULL; 495 unsigned long anon_mapping; 496 497 rcu_read_lock(); 498 anon_mapping = (unsigned long)READ_ONCE(folio->mapping); 499 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 500 goto out; 501 if (!folio_mapped(folio)) 502 goto out; 503 504 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 505 if (!atomic_inc_not_zero(&anon_vma->refcount)) { 506 anon_vma = NULL; 507 goto out; 508 } 509 510 /* 511 * If this folio is still mapped, then its anon_vma cannot have been 512 * freed. But if it has been unmapped, we have no security against the 513 * anon_vma structure being freed and reused (for another anon_vma: 514 * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero() 515 * above cannot corrupt). 516 */ 517 if (!folio_mapped(folio)) { 518 rcu_read_unlock(); 519 put_anon_vma(anon_vma); 520 return NULL; 521 } 522 out: 523 rcu_read_unlock(); 524 525 return anon_vma; 526 } 527 528 /* 529 * Similar to folio_get_anon_vma() except it locks the anon_vma. 530 * 531 * Its a little more complex as it tries to keep the fast path to a single 532 * atomic op -- the trylock. If we fail the trylock, we fall back to getting a 533 * reference like with folio_get_anon_vma() and then block on the mutex 534 * on !rwc->try_lock case. 535 */ 536 struct anon_vma *folio_lock_anon_vma_read(struct folio *folio, 537 struct rmap_walk_control *rwc) 538 { 539 struct anon_vma *anon_vma = NULL; 540 struct anon_vma *root_anon_vma; 541 unsigned long anon_mapping; 542 543 rcu_read_lock(); 544 anon_mapping = (unsigned long)READ_ONCE(folio->mapping); 545 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 546 goto out; 547 if (!folio_mapped(folio)) 548 goto out; 549 550 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 551 root_anon_vma = READ_ONCE(anon_vma->root); 552 if (down_read_trylock(&root_anon_vma->rwsem)) { 553 /* 554 * If the folio is still mapped, then this anon_vma is still 555 * its anon_vma, and holding the mutex ensures that it will 556 * not go away, see anon_vma_free(). 557 */ 558 if (!folio_mapped(folio)) { 559 up_read(&root_anon_vma->rwsem); 560 anon_vma = NULL; 561 } 562 goto out; 563 } 564 565 if (rwc && rwc->try_lock) { 566 anon_vma = NULL; 567 rwc->contended = true; 568 goto out; 569 } 570 571 /* trylock failed, we got to sleep */ 572 if (!atomic_inc_not_zero(&anon_vma->refcount)) { 573 anon_vma = NULL; 574 goto out; 575 } 576 577 if (!folio_mapped(folio)) { 578 rcu_read_unlock(); 579 put_anon_vma(anon_vma); 580 return NULL; 581 } 582 583 /* we pinned the anon_vma, its safe to sleep */ 584 rcu_read_unlock(); 585 anon_vma_lock_read(anon_vma); 586 587 if (atomic_dec_and_test(&anon_vma->refcount)) { 588 /* 589 * Oops, we held the last refcount, release the lock 590 * and bail -- can't simply use put_anon_vma() because 591 * we'll deadlock on the anon_vma_lock_write() recursion. 592 */ 593 anon_vma_unlock_read(anon_vma); 594 __put_anon_vma(anon_vma); 595 anon_vma = NULL; 596 } 597 598 return anon_vma; 599 600 out: 601 rcu_read_unlock(); 602 return anon_vma; 603 } 604 605 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 606 /* 607 * Flush TLB entries for recently unmapped pages from remote CPUs. It is 608 * important if a PTE was dirty when it was unmapped that it's flushed 609 * before any IO is initiated on the page to prevent lost writes. Similarly, 610 * it must be flushed before freeing to prevent data leakage. 611 */ 612 void try_to_unmap_flush(void) 613 { 614 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 615 616 if (!tlb_ubc->flush_required) 617 return; 618 619 arch_tlbbatch_flush(&tlb_ubc->arch); 620 tlb_ubc->flush_required = false; 621 tlb_ubc->writable = false; 622 } 623 624 /* Flush iff there are potentially writable TLB entries that can race with IO */ 625 void try_to_unmap_flush_dirty(void) 626 { 627 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 628 629 if (tlb_ubc->writable) 630 try_to_unmap_flush(); 631 } 632 633 /* 634 * Bits 0-14 of mm->tlb_flush_batched record pending generations. 635 * Bits 16-30 of mm->tlb_flush_batched bit record flushed generations. 636 */ 637 #define TLB_FLUSH_BATCH_FLUSHED_SHIFT 16 638 #define TLB_FLUSH_BATCH_PENDING_MASK \ 639 ((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT - 1)) - 1) 640 #define TLB_FLUSH_BATCH_PENDING_LARGE \ 641 (TLB_FLUSH_BATCH_PENDING_MASK / 2) 642 643 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) 644 { 645 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 646 int batch, nbatch; 647 648 arch_tlbbatch_add_mm(&tlb_ubc->arch, mm); 649 tlb_ubc->flush_required = true; 650 651 /* 652 * Ensure compiler does not re-order the setting of tlb_flush_batched 653 * before the PTE is cleared. 654 */ 655 barrier(); 656 batch = atomic_read(&mm->tlb_flush_batched); 657 retry: 658 if ((batch & TLB_FLUSH_BATCH_PENDING_MASK) > TLB_FLUSH_BATCH_PENDING_LARGE) { 659 /* 660 * Prevent `pending' from catching up with `flushed' because of 661 * overflow. Reset `pending' and `flushed' to be 1 and 0 if 662 * `pending' becomes large. 663 */ 664 nbatch = atomic_cmpxchg(&mm->tlb_flush_batched, batch, 1); 665 if (nbatch != batch) { 666 batch = nbatch; 667 goto retry; 668 } 669 } else { 670 atomic_inc(&mm->tlb_flush_batched); 671 } 672 673 /* 674 * If the PTE was dirty then it's best to assume it's writable. The 675 * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() 676 * before the page is queued for IO. 677 */ 678 if (writable) 679 tlb_ubc->writable = true; 680 } 681 682 /* 683 * Returns true if the TLB flush should be deferred to the end of a batch of 684 * unmap operations to reduce IPIs. 685 */ 686 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 687 { 688 bool should_defer = false; 689 690 if (!(flags & TTU_BATCH_FLUSH)) 691 return false; 692 693 /* If remote CPUs need to be flushed then defer batch the flush */ 694 if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids) 695 should_defer = true; 696 put_cpu(); 697 698 return should_defer; 699 } 700 701 /* 702 * Reclaim unmaps pages under the PTL but do not flush the TLB prior to 703 * releasing the PTL if TLB flushes are batched. It's possible for a parallel 704 * operation such as mprotect or munmap to race between reclaim unmapping 705 * the page and flushing the page. If this race occurs, it potentially allows 706 * access to data via a stale TLB entry. Tracking all mm's that have TLB 707 * batching in flight would be expensive during reclaim so instead track 708 * whether TLB batching occurred in the past and if so then do a flush here 709 * if required. This will cost one additional flush per reclaim cycle paid 710 * by the first operation at risk such as mprotect and mumap. 711 * 712 * This must be called under the PTL so that an access to tlb_flush_batched 713 * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise 714 * via the PTL. 715 */ 716 void flush_tlb_batched_pending(struct mm_struct *mm) 717 { 718 int batch = atomic_read(&mm->tlb_flush_batched); 719 int pending = batch & TLB_FLUSH_BATCH_PENDING_MASK; 720 int flushed = batch >> TLB_FLUSH_BATCH_FLUSHED_SHIFT; 721 722 if (pending != flushed) { 723 flush_tlb_mm(mm); 724 /* 725 * If the new TLB flushing is pending during flushing, leave 726 * mm->tlb_flush_batched as is, to avoid losing flushing. 727 */ 728 atomic_cmpxchg(&mm->tlb_flush_batched, batch, 729 pending | (pending << TLB_FLUSH_BATCH_FLUSHED_SHIFT)); 730 } 731 } 732 #else 733 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) 734 { 735 } 736 737 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 738 { 739 return false; 740 } 741 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ 742 743 /* 744 * At what user virtual address is page expected in vma? 745 * Caller should check the page is actually part of the vma. 746 */ 747 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 748 { 749 struct folio *folio = page_folio(page); 750 if (folio_test_anon(folio)) { 751 struct anon_vma *page__anon_vma = folio_anon_vma(folio); 752 /* 753 * Note: swapoff's unuse_vma() is more efficient with this 754 * check, and needs it to match anon_vma when KSM is active. 755 */ 756 if (!vma->anon_vma || !page__anon_vma || 757 vma->anon_vma->root != page__anon_vma->root) 758 return -EFAULT; 759 } else if (!vma->vm_file) { 760 return -EFAULT; 761 } else if (vma->vm_file->f_mapping != folio->mapping) { 762 return -EFAULT; 763 } 764 765 return vma_address(page, vma); 766 } 767 768 /* 769 * Returns the actual pmd_t* where we expect 'address' to be mapped from, or 770 * NULL if it doesn't exist. No guarantees / checks on what the pmd_t* 771 * represents. 772 */ 773 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) 774 { 775 pgd_t *pgd; 776 p4d_t *p4d; 777 pud_t *pud; 778 pmd_t *pmd = NULL; 779 780 pgd = pgd_offset(mm, address); 781 if (!pgd_present(*pgd)) 782 goto out; 783 784 p4d = p4d_offset(pgd, address); 785 if (!p4d_present(*p4d)) 786 goto out; 787 788 pud = pud_offset(p4d, address); 789 if (!pud_present(*pud)) 790 goto out; 791 792 pmd = pmd_offset(pud, address); 793 out: 794 return pmd; 795 } 796 797 struct folio_referenced_arg { 798 int mapcount; 799 int referenced; 800 unsigned long vm_flags; 801 struct mem_cgroup *memcg; 802 }; 803 /* 804 * arg: folio_referenced_arg will be passed 805 */ 806 static bool folio_referenced_one(struct folio *folio, 807 struct vm_area_struct *vma, unsigned long address, void *arg) 808 { 809 struct folio_referenced_arg *pra = arg; 810 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 811 int referenced = 0; 812 813 while (page_vma_mapped_walk(&pvmw)) { 814 address = pvmw.address; 815 816 if ((vma->vm_flags & VM_LOCKED) && 817 (!folio_test_large(folio) || !pvmw.pte)) { 818 /* Restore the mlock which got missed */ 819 mlock_vma_folio(folio, vma, !pvmw.pte); 820 page_vma_mapped_walk_done(&pvmw); 821 pra->vm_flags |= VM_LOCKED; 822 return false; /* To break the loop */ 823 } 824 825 if (pvmw.pte) { 826 if (lru_gen_enabled() && pte_young(*pvmw.pte) && 827 !(vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ))) { 828 lru_gen_look_around(&pvmw); 829 referenced++; 830 } 831 832 if (ptep_clear_flush_young_notify(vma, address, 833 pvmw.pte)) { 834 /* 835 * Don't treat a reference through 836 * a sequentially read mapping as such. 837 * If the folio has been used in another mapping, 838 * we will catch it; if this other mapping is 839 * already gone, the unmap path will have set 840 * the referenced flag or activated the folio. 841 */ 842 if (likely(!(vma->vm_flags & VM_SEQ_READ))) 843 referenced++; 844 } 845 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { 846 if (pmdp_clear_flush_young_notify(vma, address, 847 pvmw.pmd)) 848 referenced++; 849 } else { 850 /* unexpected pmd-mapped folio? */ 851 WARN_ON_ONCE(1); 852 } 853 854 pra->mapcount--; 855 } 856 857 if (referenced) 858 folio_clear_idle(folio); 859 if (folio_test_clear_young(folio)) 860 referenced++; 861 862 if (referenced) { 863 pra->referenced++; 864 pra->vm_flags |= vma->vm_flags & ~VM_LOCKED; 865 } 866 867 if (!pra->mapcount) 868 return false; /* To break the loop */ 869 870 return true; 871 } 872 873 static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg) 874 { 875 struct folio_referenced_arg *pra = arg; 876 struct mem_cgroup *memcg = pra->memcg; 877 878 if (!mm_match_cgroup(vma->vm_mm, memcg)) 879 return true; 880 881 return false; 882 } 883 884 /** 885 * folio_referenced() - Test if the folio was referenced. 886 * @folio: The folio to test. 887 * @is_locked: Caller holds lock on the folio. 888 * @memcg: target memory cgroup 889 * @vm_flags: A combination of all the vma->vm_flags which referenced the folio. 890 * 891 * Quick test_and_clear_referenced for all mappings of a folio, 892 * 893 * Return: The number of mappings which referenced the folio. Return -1 if 894 * the function bailed out due to rmap lock contention. 895 */ 896 int folio_referenced(struct folio *folio, int is_locked, 897 struct mem_cgroup *memcg, unsigned long *vm_flags) 898 { 899 int we_locked = 0; 900 struct folio_referenced_arg pra = { 901 .mapcount = folio_mapcount(folio), 902 .memcg = memcg, 903 }; 904 struct rmap_walk_control rwc = { 905 .rmap_one = folio_referenced_one, 906 .arg = (void *)&pra, 907 .anon_lock = folio_lock_anon_vma_read, 908 .try_lock = true, 909 }; 910 911 *vm_flags = 0; 912 if (!pra.mapcount) 913 return 0; 914 915 if (!folio_raw_mapping(folio)) 916 return 0; 917 918 if (!is_locked && (!folio_test_anon(folio) || folio_test_ksm(folio))) { 919 we_locked = folio_trylock(folio); 920 if (!we_locked) 921 return 1; 922 } 923 924 /* 925 * If we are reclaiming on behalf of a cgroup, skip 926 * counting on behalf of references from different 927 * cgroups 928 */ 929 if (memcg) { 930 rwc.invalid_vma = invalid_folio_referenced_vma; 931 } 932 933 rmap_walk(folio, &rwc); 934 *vm_flags = pra.vm_flags; 935 936 if (we_locked) 937 folio_unlock(folio); 938 939 return rwc.contended ? -1 : pra.referenced; 940 } 941 942 static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw) 943 { 944 int cleaned = 0; 945 struct vm_area_struct *vma = pvmw->vma; 946 struct mmu_notifier_range range; 947 unsigned long address = pvmw->address; 948 949 /* 950 * We have to assume the worse case ie pmd for invalidation. Note that 951 * the folio can not be freed from this function. 952 */ 953 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 954 0, vma, vma->vm_mm, address, 955 vma_address_end(pvmw)); 956 mmu_notifier_invalidate_range_start(&range); 957 958 while (page_vma_mapped_walk(pvmw)) { 959 int ret = 0; 960 961 address = pvmw->address; 962 if (pvmw->pte) { 963 pte_t entry; 964 pte_t *pte = pvmw->pte; 965 966 if (!pte_dirty(*pte) && !pte_write(*pte)) 967 continue; 968 969 flush_cache_page(vma, address, pte_pfn(*pte)); 970 entry = ptep_clear_flush(vma, address, pte); 971 entry = pte_wrprotect(entry); 972 entry = pte_mkclean(entry); 973 set_pte_at(vma->vm_mm, address, pte, entry); 974 ret = 1; 975 } else { 976 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 977 pmd_t *pmd = pvmw->pmd; 978 pmd_t entry; 979 980 if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) 981 continue; 982 983 flush_cache_range(vma, address, 984 address + HPAGE_PMD_SIZE); 985 entry = pmdp_invalidate(vma, address, pmd); 986 entry = pmd_wrprotect(entry); 987 entry = pmd_mkclean(entry); 988 set_pmd_at(vma->vm_mm, address, pmd, entry); 989 ret = 1; 990 #else 991 /* unexpected pmd-mapped folio? */ 992 WARN_ON_ONCE(1); 993 #endif 994 } 995 996 /* 997 * No need to call mmu_notifier_invalidate_range() as we are 998 * downgrading page table protection not changing it to point 999 * to a new page. 1000 * 1001 * See Documentation/mm/mmu_notifier.rst 1002 */ 1003 if (ret) 1004 cleaned++; 1005 } 1006 1007 mmu_notifier_invalidate_range_end(&range); 1008 1009 return cleaned; 1010 } 1011 1012 static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma, 1013 unsigned long address, void *arg) 1014 { 1015 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC); 1016 int *cleaned = arg; 1017 1018 *cleaned += page_vma_mkclean_one(&pvmw); 1019 1020 return true; 1021 } 1022 1023 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) 1024 { 1025 if (vma->vm_flags & VM_SHARED) 1026 return false; 1027 1028 return true; 1029 } 1030 1031 int folio_mkclean(struct folio *folio) 1032 { 1033 int cleaned = 0; 1034 struct address_space *mapping; 1035 struct rmap_walk_control rwc = { 1036 .arg = (void *)&cleaned, 1037 .rmap_one = page_mkclean_one, 1038 .invalid_vma = invalid_mkclean_vma, 1039 }; 1040 1041 BUG_ON(!folio_test_locked(folio)); 1042 1043 if (!folio_mapped(folio)) 1044 return 0; 1045 1046 mapping = folio_mapping(folio); 1047 if (!mapping) 1048 return 0; 1049 1050 rmap_walk(folio, &rwc); 1051 1052 return cleaned; 1053 } 1054 EXPORT_SYMBOL_GPL(folio_mkclean); 1055 1056 /** 1057 * pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of 1058 * [@pfn, @pfn + @nr_pages) at the specific offset (@pgoff) 1059 * within the @vma of shared mappings. And since clean PTEs 1060 * should also be readonly, write protects them too. 1061 * @pfn: start pfn. 1062 * @nr_pages: number of physically contiguous pages srarting with @pfn. 1063 * @pgoff: page offset that the @pfn mapped with. 1064 * @vma: vma that @pfn mapped within. 1065 * 1066 * Returns the number of cleaned PTEs (including PMDs). 1067 */ 1068 int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff, 1069 struct vm_area_struct *vma) 1070 { 1071 struct page_vma_mapped_walk pvmw = { 1072 .pfn = pfn, 1073 .nr_pages = nr_pages, 1074 .pgoff = pgoff, 1075 .vma = vma, 1076 .flags = PVMW_SYNC, 1077 }; 1078 1079 if (invalid_mkclean_vma(vma, NULL)) 1080 return 0; 1081 1082 pvmw.address = vma_pgoff_address(pgoff, nr_pages, vma); 1083 VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma); 1084 1085 return page_vma_mkclean_one(&pvmw); 1086 } 1087 1088 int total_compound_mapcount(struct page *head) 1089 { 1090 int mapcount = head_compound_mapcount(head); 1091 int nr_subpages; 1092 int i; 1093 1094 /* In the common case, avoid the loop when no subpages mapped by PTE */ 1095 if (head_subpages_mapcount(head) == 0) 1096 return mapcount; 1097 /* 1098 * Add all the PTE mappings of those subpages mapped by PTE. 1099 * Limit the loop, knowing that only subpages_mapcount are mapped? 1100 * Perhaps: given all the raciness, that may be a good or a bad idea. 1101 */ 1102 nr_subpages = thp_nr_pages(head); 1103 for (i = 0; i < nr_subpages; i++) 1104 mapcount += atomic_read(&head[i]._mapcount); 1105 1106 /* But each of those _mapcounts was based on -1 */ 1107 mapcount += nr_subpages; 1108 return mapcount; 1109 } 1110 1111 /** 1112 * page_move_anon_rmap - move a page to our anon_vma 1113 * @page: the page to move to our anon_vma 1114 * @vma: the vma the page belongs to 1115 * 1116 * When a page belongs exclusively to one process after a COW event, 1117 * that page can be moved into the anon_vma that belongs to just that 1118 * process, so the rmap code will not search the parent or sibling 1119 * processes. 1120 */ 1121 void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) 1122 { 1123 void *anon_vma = vma->anon_vma; 1124 struct folio *folio = page_folio(page); 1125 1126 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 1127 VM_BUG_ON_VMA(!anon_vma, vma); 1128 1129 anon_vma += PAGE_MAPPING_ANON; 1130 /* 1131 * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written 1132 * simultaneously, so a concurrent reader (eg folio_referenced()'s 1133 * folio_test_anon()) will not see one without the other. 1134 */ 1135 WRITE_ONCE(folio->mapping, anon_vma); 1136 SetPageAnonExclusive(page); 1137 } 1138 1139 /** 1140 * __page_set_anon_rmap - set up new anonymous rmap 1141 * @page: Page or Hugepage to add to rmap 1142 * @vma: VM area to add page to. 1143 * @address: User virtual address of the mapping 1144 * @exclusive: the page is exclusively owned by the current process 1145 */ 1146 static void __page_set_anon_rmap(struct page *page, 1147 struct vm_area_struct *vma, unsigned long address, int exclusive) 1148 { 1149 struct anon_vma *anon_vma = vma->anon_vma; 1150 1151 BUG_ON(!anon_vma); 1152 1153 if (PageAnon(page)) 1154 goto out; 1155 1156 /* 1157 * If the page isn't exclusively mapped into this vma, 1158 * we must use the _oldest_ possible anon_vma for the 1159 * page mapping! 1160 */ 1161 if (!exclusive) 1162 anon_vma = anon_vma->root; 1163 1164 /* 1165 * page_idle does a lockless/optimistic rmap scan on page->mapping. 1166 * Make sure the compiler doesn't split the stores of anon_vma and 1167 * the PAGE_MAPPING_ANON type identifier, otherwise the rmap code 1168 * could mistake the mapping for a struct address_space and crash. 1169 */ 1170 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 1171 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); 1172 page->index = linear_page_index(vma, address); 1173 out: 1174 if (exclusive) 1175 SetPageAnonExclusive(page); 1176 } 1177 1178 /** 1179 * __page_check_anon_rmap - sanity check anonymous rmap addition 1180 * @page: the page to add the mapping to 1181 * @vma: the vm area in which the mapping is added 1182 * @address: the user virtual address mapped 1183 */ 1184 static void __page_check_anon_rmap(struct page *page, 1185 struct vm_area_struct *vma, unsigned long address) 1186 { 1187 struct folio *folio = page_folio(page); 1188 /* 1189 * The page's anon-rmap details (mapping and index) are guaranteed to 1190 * be set up correctly at this point. 1191 * 1192 * We have exclusion against page_add_anon_rmap because the caller 1193 * always holds the page locked. 1194 * 1195 * We have exclusion against page_add_new_anon_rmap because those pages 1196 * are initially only visible via the pagetables, and the pte is locked 1197 * over the call to page_add_new_anon_rmap. 1198 */ 1199 VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root, 1200 folio); 1201 VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address), 1202 page); 1203 } 1204 1205 /** 1206 * page_add_anon_rmap - add pte mapping to an anonymous page 1207 * @page: the page to add the mapping to 1208 * @vma: the vm area in which the mapping is added 1209 * @address: the user virtual address mapped 1210 * @flags: the rmap flags 1211 * 1212 * The caller needs to hold the pte lock, and the page must be locked in 1213 * the anon_vma case: to serialize mapping,index checking after setting, 1214 * and to ensure that PageAnon is not being upgraded racily to PageKsm 1215 * (but PageKsm is never downgraded to PageAnon). 1216 */ 1217 void page_add_anon_rmap(struct page *page, 1218 struct vm_area_struct *vma, unsigned long address, rmap_t flags) 1219 { 1220 atomic_t *mapped; 1221 int nr = 0, nr_pmdmapped = 0; 1222 bool compound = flags & RMAP_COMPOUND; 1223 bool first = true; 1224 1225 /* Is page being mapped by PTE? Is this its first map to be added? */ 1226 if (likely(!compound)) { 1227 first = atomic_inc_and_test(&page->_mapcount); 1228 nr = first; 1229 if (first && PageCompound(page)) { 1230 mapped = subpages_mapcount_ptr(compound_head(page)); 1231 nr = atomic_inc_return_relaxed(mapped); 1232 nr = (nr < COMPOUND_MAPPED); 1233 } 1234 } else if (PageTransHuge(page)) { 1235 /* That test is redundant: it's for safety or to optimize out */ 1236 1237 first = atomic_inc_and_test(compound_mapcount_ptr(page)); 1238 if (first) { 1239 mapped = subpages_mapcount_ptr(page); 1240 nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped); 1241 if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) { 1242 nr_pmdmapped = thp_nr_pages(page); 1243 nr = nr_pmdmapped - (nr & SUBPAGES_MAPPED); 1244 /* Raced ahead of a remove and another add? */ 1245 if (unlikely(nr < 0)) 1246 nr = 0; 1247 } else { 1248 /* Raced ahead of a remove of COMPOUND_MAPPED */ 1249 nr = 0; 1250 } 1251 } 1252 } 1253 1254 VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page); 1255 VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page); 1256 1257 if (nr_pmdmapped) 1258 __mod_lruvec_page_state(page, NR_ANON_THPS, nr_pmdmapped); 1259 if (nr) 1260 __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr); 1261 1262 if (likely(!PageKsm(page))) { 1263 /* address might be in next vma when migration races vma_adjust */ 1264 if (first) 1265 __page_set_anon_rmap(page, vma, address, 1266 !!(flags & RMAP_EXCLUSIVE)); 1267 else 1268 __page_check_anon_rmap(page, vma, address); 1269 } 1270 1271 mlock_vma_page(page, vma, compound); 1272 } 1273 1274 /** 1275 * page_add_new_anon_rmap - add mapping to a new anonymous page 1276 * @page: the page to add the mapping to 1277 * @vma: the vm area in which the mapping is added 1278 * @address: the user virtual address mapped 1279 * 1280 * If it's a compound page, it is accounted as a compound page. As the page 1281 * is new, it's assume to get mapped exclusively by a single process. 1282 * 1283 * Same as page_add_anon_rmap but must only be called on *new* pages. 1284 * This means the inc-and-test can be bypassed. 1285 * Page does not have to be locked. 1286 */ 1287 void page_add_new_anon_rmap(struct page *page, 1288 struct vm_area_struct *vma, unsigned long address) 1289 { 1290 int nr; 1291 1292 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 1293 __SetPageSwapBacked(page); 1294 1295 if (likely(!PageCompound(page))) { 1296 /* increment count (starts at -1) */ 1297 atomic_set(&page->_mapcount, 0); 1298 nr = 1; 1299 } else { 1300 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 1301 /* increment count (starts at -1) */ 1302 atomic_set(compound_mapcount_ptr(page), 0); 1303 atomic_set(subpages_mapcount_ptr(page), COMPOUND_MAPPED); 1304 nr = thp_nr_pages(page); 1305 __mod_lruvec_page_state(page, NR_ANON_THPS, nr); 1306 } 1307 1308 __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr); 1309 __page_set_anon_rmap(page, vma, address, 1); 1310 } 1311 1312 /** 1313 * page_add_file_rmap - add pte mapping to a file page 1314 * @page: the page to add the mapping to 1315 * @vma: the vm area in which the mapping is added 1316 * @compound: charge the page as compound or small page 1317 * 1318 * The caller needs to hold the pte lock. 1319 */ 1320 void page_add_file_rmap(struct page *page, 1321 struct vm_area_struct *vma, bool compound) 1322 { 1323 atomic_t *mapped; 1324 int nr = 0, nr_pmdmapped = 0; 1325 bool first; 1326 1327 VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); 1328 1329 /* Is page being mapped by PTE? Is this its first map to be added? */ 1330 if (likely(!compound)) { 1331 first = atomic_inc_and_test(&page->_mapcount); 1332 nr = first; 1333 if (first && PageCompound(page)) { 1334 mapped = subpages_mapcount_ptr(compound_head(page)); 1335 nr = atomic_inc_return_relaxed(mapped); 1336 nr = (nr < COMPOUND_MAPPED); 1337 } 1338 } else if (PageTransHuge(page)) { 1339 /* That test is redundant: it's for safety or to optimize out */ 1340 1341 first = atomic_inc_and_test(compound_mapcount_ptr(page)); 1342 if (first) { 1343 mapped = subpages_mapcount_ptr(page); 1344 nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped); 1345 if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) { 1346 nr_pmdmapped = thp_nr_pages(page); 1347 nr = nr_pmdmapped - (nr & SUBPAGES_MAPPED); 1348 /* Raced ahead of a remove and another add? */ 1349 if (unlikely(nr < 0)) 1350 nr = 0; 1351 } else { 1352 /* Raced ahead of a remove of COMPOUND_MAPPED */ 1353 nr = 0; 1354 } 1355 } 1356 } 1357 1358 if (nr_pmdmapped) 1359 __mod_lruvec_page_state(page, PageSwapBacked(page) ? 1360 NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, nr_pmdmapped); 1361 if (nr) 1362 __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr); 1363 1364 mlock_vma_page(page, vma, compound); 1365 } 1366 1367 /** 1368 * page_remove_rmap - take down pte mapping from a page 1369 * @page: page to remove mapping from 1370 * @vma: the vm area from which the mapping is removed 1371 * @compound: uncharge the page as compound or small page 1372 * 1373 * The caller needs to hold the pte lock. 1374 */ 1375 void page_remove_rmap(struct page *page, 1376 struct vm_area_struct *vma, bool compound) 1377 { 1378 atomic_t *mapped; 1379 int nr = 0, nr_pmdmapped = 0; 1380 bool last; 1381 1382 VM_BUG_ON_PAGE(compound && !PageHead(page), page); 1383 1384 /* Hugetlb pages are not counted in NR_*MAPPED */ 1385 if (unlikely(PageHuge(page))) { 1386 /* hugetlb pages are always mapped with pmds */ 1387 atomic_dec(compound_mapcount_ptr(page)); 1388 return; 1389 } 1390 1391 /* Is page being unmapped by PTE? Is this its last map to be removed? */ 1392 if (likely(!compound)) { 1393 last = atomic_add_negative(-1, &page->_mapcount); 1394 nr = last; 1395 if (last && PageCompound(page)) { 1396 mapped = subpages_mapcount_ptr(compound_head(page)); 1397 nr = atomic_dec_return_relaxed(mapped); 1398 nr = (nr < COMPOUND_MAPPED); 1399 } 1400 } else if (PageTransHuge(page)) { 1401 /* That test is redundant: it's for safety or to optimize out */ 1402 1403 last = atomic_add_negative(-1, compound_mapcount_ptr(page)); 1404 if (last) { 1405 mapped = subpages_mapcount_ptr(page); 1406 nr = atomic_sub_return_relaxed(COMPOUND_MAPPED, mapped); 1407 if (likely(nr < COMPOUND_MAPPED)) { 1408 nr_pmdmapped = thp_nr_pages(page); 1409 nr = nr_pmdmapped - (nr & SUBPAGES_MAPPED); 1410 /* Raced ahead of another remove and an add? */ 1411 if (unlikely(nr < 0)) 1412 nr = 0; 1413 } else { 1414 /* An add of COMPOUND_MAPPED raced ahead */ 1415 nr = 0; 1416 } 1417 } 1418 } 1419 1420 if (nr_pmdmapped) { 1421 __mod_lruvec_page_state(page, PageAnon(page) ? NR_ANON_THPS : 1422 (PageSwapBacked(page) ? NR_SHMEM_PMDMAPPED : 1423 NR_FILE_PMDMAPPED), -nr_pmdmapped); 1424 } 1425 if (nr) { 1426 __mod_lruvec_page_state(page, PageAnon(page) ? NR_ANON_MAPPED : 1427 NR_FILE_MAPPED, -nr); 1428 /* 1429 * Queue anon THP for deferred split if at least one small 1430 * page of the compound page is unmapped, but at least one 1431 * small page is still mapped. 1432 */ 1433 if (PageTransCompound(page) && PageAnon(page)) 1434 if (!compound || nr < nr_pmdmapped) 1435 deferred_split_huge_page(compound_head(page)); 1436 } 1437 1438 /* 1439 * It would be tidy to reset PageAnon mapping when fully unmapped, 1440 * but that might overwrite a racing page_add_anon_rmap 1441 * which increments mapcount after us but sets mapping 1442 * before us: so leave the reset to free_pages_prepare, 1443 * and remember that it's only reliable while mapped. 1444 */ 1445 1446 munlock_vma_page(page, vma, compound); 1447 } 1448 1449 /* 1450 * @arg: enum ttu_flags will be passed to this argument 1451 */ 1452 static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, 1453 unsigned long address, void *arg) 1454 { 1455 struct mm_struct *mm = vma->vm_mm; 1456 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 1457 pte_t pteval; 1458 struct page *subpage; 1459 bool anon_exclusive, ret = true; 1460 struct mmu_notifier_range range; 1461 enum ttu_flags flags = (enum ttu_flags)(long)arg; 1462 1463 /* 1464 * When racing against e.g. zap_pte_range() on another cpu, 1465 * in between its ptep_get_and_clear_full() and page_remove_rmap(), 1466 * try_to_unmap() may return before page_mapped() has become false, 1467 * if page table locking is skipped: use TTU_SYNC to wait for that. 1468 */ 1469 if (flags & TTU_SYNC) 1470 pvmw.flags = PVMW_SYNC; 1471 1472 if (flags & TTU_SPLIT_HUGE_PMD) 1473 split_huge_pmd_address(vma, address, false, folio); 1474 1475 /* 1476 * For THP, we have to assume the worse case ie pmd for invalidation. 1477 * For hugetlb, it could be much worse if we need to do pud 1478 * invalidation in the case of pmd sharing. 1479 * 1480 * Note that the folio can not be freed in this function as call of 1481 * try_to_unmap() must hold a reference on the folio. 1482 */ 1483 range.end = vma_address_end(&pvmw); 1484 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, 1485 address, range.end); 1486 if (folio_test_hugetlb(folio)) { 1487 /* 1488 * If sharing is possible, start and end will be adjusted 1489 * accordingly. 1490 */ 1491 adjust_range_if_pmd_sharing_possible(vma, &range.start, 1492 &range.end); 1493 } 1494 mmu_notifier_invalidate_range_start(&range); 1495 1496 while (page_vma_mapped_walk(&pvmw)) { 1497 /* Unexpected PMD-mapped THP? */ 1498 VM_BUG_ON_FOLIO(!pvmw.pte, folio); 1499 1500 /* 1501 * If the folio is in an mlock()d vma, we must not swap it out. 1502 */ 1503 if (!(flags & TTU_IGNORE_MLOCK) && 1504 (vma->vm_flags & VM_LOCKED)) { 1505 /* Restore the mlock which got missed */ 1506 mlock_vma_folio(folio, vma, false); 1507 page_vma_mapped_walk_done(&pvmw); 1508 ret = false; 1509 break; 1510 } 1511 1512 subpage = folio_page(folio, 1513 pte_pfn(*pvmw.pte) - folio_pfn(folio)); 1514 address = pvmw.address; 1515 anon_exclusive = folio_test_anon(folio) && 1516 PageAnonExclusive(subpage); 1517 1518 if (folio_test_hugetlb(folio)) { 1519 bool anon = folio_test_anon(folio); 1520 1521 /* 1522 * The try_to_unmap() is only passed a hugetlb page 1523 * in the case where the hugetlb page is poisoned. 1524 */ 1525 VM_BUG_ON_PAGE(!PageHWPoison(subpage), subpage); 1526 /* 1527 * huge_pmd_unshare may unmap an entire PMD page. 1528 * There is no way of knowing exactly which PMDs may 1529 * be cached for this mm, so we must flush them all. 1530 * start/end were already adjusted above to cover this 1531 * range. 1532 */ 1533 flush_cache_range(vma, range.start, range.end); 1534 1535 /* 1536 * To call huge_pmd_unshare, i_mmap_rwsem must be 1537 * held in write mode. Caller needs to explicitly 1538 * do this outside rmap routines. 1539 * 1540 * We also must hold hugetlb vma_lock in write mode. 1541 * Lock order dictates acquiring vma_lock BEFORE 1542 * i_mmap_rwsem. We can only try lock here and fail 1543 * if unsuccessful. 1544 */ 1545 if (!anon) { 1546 VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); 1547 if (!hugetlb_vma_trylock_write(vma)) { 1548 page_vma_mapped_walk_done(&pvmw); 1549 ret = false; 1550 break; 1551 } 1552 if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) { 1553 hugetlb_vma_unlock_write(vma); 1554 flush_tlb_range(vma, 1555 range.start, range.end); 1556 mmu_notifier_invalidate_range(mm, 1557 range.start, range.end); 1558 /* 1559 * The ref count of the PMD page was 1560 * dropped which is part of the way map 1561 * counting is done for shared PMDs. 1562 * Return 'true' here. When there is 1563 * no other sharing, huge_pmd_unshare 1564 * returns false and we will unmap the 1565 * actual page and drop map count 1566 * to zero. 1567 */ 1568 page_vma_mapped_walk_done(&pvmw); 1569 break; 1570 } 1571 hugetlb_vma_unlock_write(vma); 1572 } 1573 pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); 1574 } else { 1575 flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); 1576 /* Nuke the page table entry. */ 1577 if (should_defer_flush(mm, flags)) { 1578 /* 1579 * We clear the PTE but do not flush so potentially 1580 * a remote CPU could still be writing to the folio. 1581 * If the entry was previously clean then the 1582 * architecture must guarantee that a clear->dirty 1583 * transition on a cached TLB entry is written through 1584 * and traps if the PTE is unmapped. 1585 */ 1586 pteval = ptep_get_and_clear(mm, address, pvmw.pte); 1587 1588 set_tlb_ubc_flush_pending(mm, pte_dirty(pteval)); 1589 } else { 1590 pteval = ptep_clear_flush(vma, address, pvmw.pte); 1591 } 1592 } 1593 1594 /* 1595 * Now the pte is cleared. If this pte was uffd-wp armed, 1596 * we may want to replace a none pte with a marker pte if 1597 * it's file-backed, so we don't lose the tracking info. 1598 */ 1599 pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval); 1600 1601 /* Set the dirty flag on the folio now the pte is gone. */ 1602 if (pte_dirty(pteval)) 1603 folio_mark_dirty(folio); 1604 1605 /* Update high watermark before we lower rss */ 1606 update_hiwater_rss(mm); 1607 1608 if (PageHWPoison(subpage) && !(flags & TTU_IGNORE_HWPOISON)) { 1609 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 1610 if (folio_test_hugetlb(folio)) { 1611 hugetlb_count_sub(folio_nr_pages(folio), mm); 1612 set_huge_pte_at(mm, address, pvmw.pte, pteval); 1613 } else { 1614 dec_mm_counter(mm, mm_counter(&folio->page)); 1615 set_pte_at(mm, address, pvmw.pte, pteval); 1616 } 1617 1618 } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { 1619 /* 1620 * The guest indicated that the page content is of no 1621 * interest anymore. Simply discard the pte, vmscan 1622 * will take care of the rest. 1623 * A future reference will then fault in a new zero 1624 * page. When userfaultfd is active, we must not drop 1625 * this page though, as its main user (postcopy 1626 * migration) will not expect userfaults on already 1627 * copied pages. 1628 */ 1629 dec_mm_counter(mm, mm_counter(&folio->page)); 1630 /* We have to invalidate as we cleared the pte */ 1631 mmu_notifier_invalidate_range(mm, address, 1632 address + PAGE_SIZE); 1633 } else if (folio_test_anon(folio)) { 1634 swp_entry_t entry = { .val = page_private(subpage) }; 1635 pte_t swp_pte; 1636 /* 1637 * Store the swap location in the pte. 1638 * See handle_pte_fault() ... 1639 */ 1640 if (unlikely(folio_test_swapbacked(folio) != 1641 folio_test_swapcache(folio))) { 1642 WARN_ON_ONCE(1); 1643 ret = false; 1644 /* We have to invalidate as we cleared the pte */ 1645 mmu_notifier_invalidate_range(mm, address, 1646 address + PAGE_SIZE); 1647 page_vma_mapped_walk_done(&pvmw); 1648 break; 1649 } 1650 1651 /* MADV_FREE page check */ 1652 if (!folio_test_swapbacked(folio)) { 1653 int ref_count, map_count; 1654 1655 /* 1656 * Synchronize with gup_pte_range(): 1657 * - clear PTE; barrier; read refcount 1658 * - inc refcount; barrier; read PTE 1659 */ 1660 smp_mb(); 1661 1662 ref_count = folio_ref_count(folio); 1663 map_count = folio_mapcount(folio); 1664 1665 /* 1666 * Order reads for page refcount and dirty flag 1667 * (see comments in __remove_mapping()). 1668 */ 1669 smp_rmb(); 1670 1671 /* 1672 * The only page refs must be one from isolation 1673 * plus the rmap(s) (dropped by discard:). 1674 */ 1675 if (ref_count == 1 + map_count && 1676 !folio_test_dirty(folio)) { 1677 /* Invalidate as we cleared the pte */ 1678 mmu_notifier_invalidate_range(mm, 1679 address, address + PAGE_SIZE); 1680 dec_mm_counter(mm, MM_ANONPAGES); 1681 goto discard; 1682 } 1683 1684 /* 1685 * If the folio was redirtied, it cannot be 1686 * discarded. Remap the page to page table. 1687 */ 1688 set_pte_at(mm, address, pvmw.pte, pteval); 1689 folio_set_swapbacked(folio); 1690 ret = false; 1691 page_vma_mapped_walk_done(&pvmw); 1692 break; 1693 } 1694 1695 if (swap_duplicate(entry) < 0) { 1696 set_pte_at(mm, address, pvmw.pte, pteval); 1697 ret = false; 1698 page_vma_mapped_walk_done(&pvmw); 1699 break; 1700 } 1701 if (arch_unmap_one(mm, vma, address, pteval) < 0) { 1702 swap_free(entry); 1703 set_pte_at(mm, address, pvmw.pte, pteval); 1704 ret = false; 1705 page_vma_mapped_walk_done(&pvmw); 1706 break; 1707 } 1708 1709 /* See page_try_share_anon_rmap(): clear PTE first. */ 1710 if (anon_exclusive && 1711 page_try_share_anon_rmap(subpage)) { 1712 swap_free(entry); 1713 set_pte_at(mm, address, pvmw.pte, pteval); 1714 ret = false; 1715 page_vma_mapped_walk_done(&pvmw); 1716 break; 1717 } 1718 /* 1719 * Note: We *don't* remember if the page was mapped 1720 * exclusively in the swap pte if the architecture 1721 * doesn't support __HAVE_ARCH_PTE_SWP_EXCLUSIVE. In 1722 * that case, swapin code has to re-determine that 1723 * manually and might detect the page as possibly 1724 * shared, for example, if there are other references on 1725 * the page or if the page is under writeback. We made 1726 * sure that there are no GUP pins on the page that 1727 * would rely on it, so for GUP pins this is fine. 1728 */ 1729 if (list_empty(&mm->mmlist)) { 1730 spin_lock(&mmlist_lock); 1731 if (list_empty(&mm->mmlist)) 1732 list_add(&mm->mmlist, &init_mm.mmlist); 1733 spin_unlock(&mmlist_lock); 1734 } 1735 dec_mm_counter(mm, MM_ANONPAGES); 1736 inc_mm_counter(mm, MM_SWAPENTS); 1737 swp_pte = swp_entry_to_pte(entry); 1738 if (anon_exclusive) 1739 swp_pte = pte_swp_mkexclusive(swp_pte); 1740 if (pte_soft_dirty(pteval)) 1741 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1742 if (pte_uffd_wp(pteval)) 1743 swp_pte = pte_swp_mkuffd_wp(swp_pte); 1744 set_pte_at(mm, address, pvmw.pte, swp_pte); 1745 /* Invalidate as we cleared the pte */ 1746 mmu_notifier_invalidate_range(mm, address, 1747 address + PAGE_SIZE); 1748 } else { 1749 /* 1750 * This is a locked file-backed folio, 1751 * so it cannot be removed from the page 1752 * cache and replaced by a new folio before 1753 * mmu_notifier_invalidate_range_end, so no 1754 * concurrent thread might update its page table 1755 * to point at a new folio while a device is 1756 * still using this folio. 1757 * 1758 * See Documentation/mm/mmu_notifier.rst 1759 */ 1760 dec_mm_counter(mm, mm_counter_file(&folio->page)); 1761 } 1762 discard: 1763 /* 1764 * No need to call mmu_notifier_invalidate_range() it has be 1765 * done above for all cases requiring it to happen under page 1766 * table lock before mmu_notifier_invalidate_range_end() 1767 * 1768 * See Documentation/mm/mmu_notifier.rst 1769 */ 1770 page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); 1771 if (vma->vm_flags & VM_LOCKED) 1772 mlock_page_drain_local(); 1773 folio_put(folio); 1774 } 1775 1776 mmu_notifier_invalidate_range_end(&range); 1777 1778 return ret; 1779 } 1780 1781 static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) 1782 { 1783 return vma_is_temporary_stack(vma); 1784 } 1785 1786 static int folio_not_mapped(struct folio *folio) 1787 { 1788 return !folio_mapped(folio); 1789 } 1790 1791 /** 1792 * try_to_unmap - Try to remove all page table mappings to a folio. 1793 * @folio: The folio to unmap. 1794 * @flags: action and flags 1795 * 1796 * Tries to remove all the page table entries which are mapping this 1797 * folio. It is the caller's responsibility to check if the folio is 1798 * still mapped if needed (use TTU_SYNC to prevent accounting races). 1799 * 1800 * Context: Caller must hold the folio lock. 1801 */ 1802 void try_to_unmap(struct folio *folio, enum ttu_flags flags) 1803 { 1804 struct rmap_walk_control rwc = { 1805 .rmap_one = try_to_unmap_one, 1806 .arg = (void *)flags, 1807 .done = folio_not_mapped, 1808 .anon_lock = folio_lock_anon_vma_read, 1809 }; 1810 1811 if (flags & TTU_RMAP_LOCKED) 1812 rmap_walk_locked(folio, &rwc); 1813 else 1814 rmap_walk(folio, &rwc); 1815 } 1816 1817 /* 1818 * @arg: enum ttu_flags will be passed to this argument. 1819 * 1820 * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs 1821 * containing migration entries. 1822 */ 1823 static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, 1824 unsigned long address, void *arg) 1825 { 1826 struct mm_struct *mm = vma->vm_mm; 1827 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 1828 pte_t pteval; 1829 struct page *subpage; 1830 bool anon_exclusive, ret = true; 1831 struct mmu_notifier_range range; 1832 enum ttu_flags flags = (enum ttu_flags)(long)arg; 1833 1834 /* 1835 * When racing against e.g. zap_pte_range() on another cpu, 1836 * in between its ptep_get_and_clear_full() and page_remove_rmap(), 1837 * try_to_migrate() may return before page_mapped() has become false, 1838 * if page table locking is skipped: use TTU_SYNC to wait for that. 1839 */ 1840 if (flags & TTU_SYNC) 1841 pvmw.flags = PVMW_SYNC; 1842 1843 /* 1844 * unmap_page() in mm/huge_memory.c is the only user of migration with 1845 * TTU_SPLIT_HUGE_PMD and it wants to freeze. 1846 */ 1847 if (flags & TTU_SPLIT_HUGE_PMD) 1848 split_huge_pmd_address(vma, address, true, folio); 1849 1850 /* 1851 * For THP, we have to assume the worse case ie pmd for invalidation. 1852 * For hugetlb, it could be much worse if we need to do pud 1853 * invalidation in the case of pmd sharing. 1854 * 1855 * Note that the page can not be free in this function as call of 1856 * try_to_unmap() must hold a reference on the page. 1857 */ 1858 range.end = vma_address_end(&pvmw); 1859 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, 1860 address, range.end); 1861 if (folio_test_hugetlb(folio)) { 1862 /* 1863 * If sharing is possible, start and end will be adjusted 1864 * accordingly. 1865 */ 1866 adjust_range_if_pmd_sharing_possible(vma, &range.start, 1867 &range.end); 1868 } 1869 mmu_notifier_invalidate_range_start(&range); 1870 1871 while (page_vma_mapped_walk(&pvmw)) { 1872 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 1873 /* PMD-mapped THP migration entry */ 1874 if (!pvmw.pte) { 1875 subpage = folio_page(folio, 1876 pmd_pfn(*pvmw.pmd) - folio_pfn(folio)); 1877 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || 1878 !folio_test_pmd_mappable(folio), folio); 1879 1880 if (set_pmd_migration_entry(&pvmw, subpage)) { 1881 ret = false; 1882 page_vma_mapped_walk_done(&pvmw); 1883 break; 1884 } 1885 continue; 1886 } 1887 #endif 1888 1889 /* Unexpected PMD-mapped THP? */ 1890 VM_BUG_ON_FOLIO(!pvmw.pte, folio); 1891 1892 if (folio_is_zone_device(folio)) { 1893 /* 1894 * Our PTE is a non-present device exclusive entry and 1895 * calculating the subpage as for the common case would 1896 * result in an invalid pointer. 1897 * 1898 * Since only PAGE_SIZE pages can currently be 1899 * migrated, just set it to page. This will need to be 1900 * changed when hugepage migrations to device private 1901 * memory are supported. 1902 */ 1903 VM_BUG_ON_FOLIO(folio_nr_pages(folio) > 1, folio); 1904 subpage = &folio->page; 1905 } else { 1906 subpage = folio_page(folio, 1907 pte_pfn(*pvmw.pte) - folio_pfn(folio)); 1908 } 1909 address = pvmw.address; 1910 anon_exclusive = folio_test_anon(folio) && 1911 PageAnonExclusive(subpage); 1912 1913 if (folio_test_hugetlb(folio)) { 1914 bool anon = folio_test_anon(folio); 1915 1916 /* 1917 * huge_pmd_unshare may unmap an entire PMD page. 1918 * There is no way of knowing exactly which PMDs may 1919 * be cached for this mm, so we must flush them all. 1920 * start/end were already adjusted above to cover this 1921 * range. 1922 */ 1923 flush_cache_range(vma, range.start, range.end); 1924 1925 /* 1926 * To call huge_pmd_unshare, i_mmap_rwsem must be 1927 * held in write mode. Caller needs to explicitly 1928 * do this outside rmap routines. 1929 * 1930 * We also must hold hugetlb vma_lock in write mode. 1931 * Lock order dictates acquiring vma_lock BEFORE 1932 * i_mmap_rwsem. We can only try lock here and 1933 * fail if unsuccessful. 1934 */ 1935 if (!anon) { 1936 VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); 1937 if (!hugetlb_vma_trylock_write(vma)) { 1938 page_vma_mapped_walk_done(&pvmw); 1939 ret = false; 1940 break; 1941 } 1942 if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) { 1943 hugetlb_vma_unlock_write(vma); 1944 flush_tlb_range(vma, 1945 range.start, range.end); 1946 mmu_notifier_invalidate_range(mm, 1947 range.start, range.end); 1948 1949 /* 1950 * The ref count of the PMD page was 1951 * dropped which is part of the way map 1952 * counting is done for shared PMDs. 1953 * Return 'true' here. When there is 1954 * no other sharing, huge_pmd_unshare 1955 * returns false and we will unmap the 1956 * actual page and drop map count 1957 * to zero. 1958 */ 1959 page_vma_mapped_walk_done(&pvmw); 1960 break; 1961 } 1962 hugetlb_vma_unlock_write(vma); 1963 } 1964 /* Nuke the hugetlb page table entry */ 1965 pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); 1966 } else { 1967 flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); 1968 /* Nuke the page table entry. */ 1969 pteval = ptep_clear_flush(vma, address, pvmw.pte); 1970 } 1971 1972 /* Set the dirty flag on the folio now the pte is gone. */ 1973 if (pte_dirty(pteval)) 1974 folio_mark_dirty(folio); 1975 1976 /* Update high watermark before we lower rss */ 1977 update_hiwater_rss(mm); 1978 1979 if (folio_is_device_private(folio)) { 1980 unsigned long pfn = folio_pfn(folio); 1981 swp_entry_t entry; 1982 pte_t swp_pte; 1983 1984 if (anon_exclusive) 1985 BUG_ON(page_try_share_anon_rmap(subpage)); 1986 1987 /* 1988 * Store the pfn of the page in a special migration 1989 * pte. do_swap_page() will wait until the migration 1990 * pte is removed and then restart fault handling. 1991 */ 1992 entry = pte_to_swp_entry(pteval); 1993 if (is_writable_device_private_entry(entry)) 1994 entry = make_writable_migration_entry(pfn); 1995 else if (anon_exclusive) 1996 entry = make_readable_exclusive_migration_entry(pfn); 1997 else 1998 entry = make_readable_migration_entry(pfn); 1999 swp_pte = swp_entry_to_pte(entry); 2000 2001 /* 2002 * pteval maps a zone device page and is therefore 2003 * a swap pte. 2004 */ 2005 if (pte_swp_soft_dirty(pteval)) 2006 swp_pte = pte_swp_mksoft_dirty(swp_pte); 2007 if (pte_swp_uffd_wp(pteval)) 2008 swp_pte = pte_swp_mkuffd_wp(swp_pte); 2009 set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); 2010 trace_set_migration_pte(pvmw.address, pte_val(swp_pte), 2011 compound_order(&folio->page)); 2012 /* 2013 * No need to invalidate here it will synchronize on 2014 * against the special swap migration pte. 2015 */ 2016 } else if (PageHWPoison(subpage)) { 2017 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 2018 if (folio_test_hugetlb(folio)) { 2019 hugetlb_count_sub(folio_nr_pages(folio), mm); 2020 set_huge_pte_at(mm, address, pvmw.pte, pteval); 2021 } else { 2022 dec_mm_counter(mm, mm_counter(&folio->page)); 2023 set_pte_at(mm, address, pvmw.pte, pteval); 2024 } 2025 2026 } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { 2027 /* 2028 * The guest indicated that the page content is of no 2029 * interest anymore. Simply discard the pte, vmscan 2030 * will take care of the rest. 2031 * A future reference will then fault in a new zero 2032 * page. When userfaultfd is active, we must not drop 2033 * this page though, as its main user (postcopy 2034 * migration) will not expect userfaults on already 2035 * copied pages. 2036 */ 2037 dec_mm_counter(mm, mm_counter(&folio->page)); 2038 /* We have to invalidate as we cleared the pte */ 2039 mmu_notifier_invalidate_range(mm, address, 2040 address + PAGE_SIZE); 2041 } else { 2042 swp_entry_t entry; 2043 pte_t swp_pte; 2044 2045 if (arch_unmap_one(mm, vma, address, pteval) < 0) { 2046 if (folio_test_hugetlb(folio)) 2047 set_huge_pte_at(mm, address, pvmw.pte, pteval); 2048 else 2049 set_pte_at(mm, address, pvmw.pte, pteval); 2050 ret = false; 2051 page_vma_mapped_walk_done(&pvmw); 2052 break; 2053 } 2054 VM_BUG_ON_PAGE(pte_write(pteval) && folio_test_anon(folio) && 2055 !anon_exclusive, subpage); 2056 2057 /* See page_try_share_anon_rmap(): clear PTE first. */ 2058 if (anon_exclusive && 2059 page_try_share_anon_rmap(subpage)) { 2060 if (folio_test_hugetlb(folio)) 2061 set_huge_pte_at(mm, address, pvmw.pte, pteval); 2062 else 2063 set_pte_at(mm, address, pvmw.pte, pteval); 2064 ret = false; 2065 page_vma_mapped_walk_done(&pvmw); 2066 break; 2067 } 2068 2069 /* 2070 * Store the pfn of the page in a special migration 2071 * pte. do_swap_page() will wait until the migration 2072 * pte is removed and then restart fault handling. 2073 */ 2074 if (pte_write(pteval)) 2075 entry = make_writable_migration_entry( 2076 page_to_pfn(subpage)); 2077 else if (anon_exclusive) 2078 entry = make_readable_exclusive_migration_entry( 2079 page_to_pfn(subpage)); 2080 else 2081 entry = make_readable_migration_entry( 2082 page_to_pfn(subpage)); 2083 if (pte_young(pteval)) 2084 entry = make_migration_entry_young(entry); 2085 if (pte_dirty(pteval)) 2086 entry = make_migration_entry_dirty(entry); 2087 swp_pte = swp_entry_to_pte(entry); 2088 if (pte_soft_dirty(pteval)) 2089 swp_pte = pte_swp_mksoft_dirty(swp_pte); 2090 if (pte_uffd_wp(pteval)) 2091 swp_pte = pte_swp_mkuffd_wp(swp_pte); 2092 if (folio_test_hugetlb(folio)) 2093 set_huge_pte_at(mm, address, pvmw.pte, swp_pte); 2094 else 2095 set_pte_at(mm, address, pvmw.pte, swp_pte); 2096 trace_set_migration_pte(address, pte_val(swp_pte), 2097 compound_order(&folio->page)); 2098 /* 2099 * No need to invalidate here it will synchronize on 2100 * against the special swap migration pte. 2101 */ 2102 } 2103 2104 /* 2105 * No need to call mmu_notifier_invalidate_range() it has be 2106 * done above for all cases requiring it to happen under page 2107 * table lock before mmu_notifier_invalidate_range_end() 2108 * 2109 * See Documentation/mm/mmu_notifier.rst 2110 */ 2111 page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); 2112 if (vma->vm_flags & VM_LOCKED) 2113 mlock_page_drain_local(); 2114 folio_put(folio); 2115 } 2116 2117 mmu_notifier_invalidate_range_end(&range); 2118 2119 return ret; 2120 } 2121 2122 /** 2123 * try_to_migrate - try to replace all page table mappings with swap entries 2124 * @folio: the folio to replace page table entries for 2125 * @flags: action and flags 2126 * 2127 * Tries to remove all the page table entries which are mapping this folio and 2128 * replace them with special swap entries. Caller must hold the folio lock. 2129 */ 2130 void try_to_migrate(struct folio *folio, enum ttu_flags flags) 2131 { 2132 struct rmap_walk_control rwc = { 2133 .rmap_one = try_to_migrate_one, 2134 .arg = (void *)flags, 2135 .done = folio_not_mapped, 2136 .anon_lock = folio_lock_anon_vma_read, 2137 }; 2138 2139 /* 2140 * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and 2141 * TTU_SPLIT_HUGE_PMD and TTU_SYNC flags. 2142 */ 2143 if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | 2144 TTU_SYNC))) 2145 return; 2146 2147 if (folio_is_zone_device(folio) && 2148 (!folio_is_device_private(folio) && !folio_is_device_coherent(folio))) 2149 return; 2150 2151 /* 2152 * During exec, a temporary VMA is setup and later moved. 2153 * The VMA is moved under the anon_vma lock but not the 2154 * page tables leading to a race where migration cannot 2155 * find the migration ptes. Rather than increasing the 2156 * locking requirements of exec(), migration skips 2157 * temporary VMAs until after exec() completes. 2158 */ 2159 if (!folio_test_ksm(folio) && folio_test_anon(folio)) 2160 rwc.invalid_vma = invalid_migration_vma; 2161 2162 if (flags & TTU_RMAP_LOCKED) 2163 rmap_walk_locked(folio, &rwc); 2164 else 2165 rmap_walk(folio, &rwc); 2166 } 2167 2168 #ifdef CONFIG_DEVICE_PRIVATE 2169 struct make_exclusive_args { 2170 struct mm_struct *mm; 2171 unsigned long address; 2172 void *owner; 2173 bool valid; 2174 }; 2175 2176 static bool page_make_device_exclusive_one(struct folio *folio, 2177 struct vm_area_struct *vma, unsigned long address, void *priv) 2178 { 2179 struct mm_struct *mm = vma->vm_mm; 2180 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); 2181 struct make_exclusive_args *args = priv; 2182 pte_t pteval; 2183 struct page *subpage; 2184 bool ret = true; 2185 struct mmu_notifier_range range; 2186 swp_entry_t entry; 2187 pte_t swp_pte; 2188 2189 mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma, 2190 vma->vm_mm, address, min(vma->vm_end, 2191 address + folio_size(folio)), 2192 args->owner); 2193 mmu_notifier_invalidate_range_start(&range); 2194 2195 while (page_vma_mapped_walk(&pvmw)) { 2196 /* Unexpected PMD-mapped THP? */ 2197 VM_BUG_ON_FOLIO(!pvmw.pte, folio); 2198 2199 if (!pte_present(*pvmw.pte)) { 2200 ret = false; 2201 page_vma_mapped_walk_done(&pvmw); 2202 break; 2203 } 2204 2205 subpage = folio_page(folio, 2206 pte_pfn(*pvmw.pte) - folio_pfn(folio)); 2207 address = pvmw.address; 2208 2209 /* Nuke the page table entry. */ 2210 flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); 2211 pteval = ptep_clear_flush(vma, address, pvmw.pte); 2212 2213 /* Set the dirty flag on the folio now the pte is gone. */ 2214 if (pte_dirty(pteval)) 2215 folio_mark_dirty(folio); 2216 2217 /* 2218 * Check that our target page is still mapped at the expected 2219 * address. 2220 */ 2221 if (args->mm == mm && args->address == address && 2222 pte_write(pteval)) 2223 args->valid = true; 2224 2225 /* 2226 * Store the pfn of the page in a special migration 2227 * pte. do_swap_page() will wait until the migration 2228 * pte is removed and then restart fault handling. 2229 */ 2230 if (pte_write(pteval)) 2231 entry = make_writable_device_exclusive_entry( 2232 page_to_pfn(subpage)); 2233 else 2234 entry = make_readable_device_exclusive_entry( 2235 page_to_pfn(subpage)); 2236 swp_pte = swp_entry_to_pte(entry); 2237 if (pte_soft_dirty(pteval)) 2238 swp_pte = pte_swp_mksoft_dirty(swp_pte); 2239 if (pte_uffd_wp(pteval)) 2240 swp_pte = pte_swp_mkuffd_wp(swp_pte); 2241 2242 set_pte_at(mm, address, pvmw.pte, swp_pte); 2243 2244 /* 2245 * There is a reference on the page for the swap entry which has 2246 * been removed, so shouldn't take another. 2247 */ 2248 page_remove_rmap(subpage, vma, false); 2249 } 2250 2251 mmu_notifier_invalidate_range_end(&range); 2252 2253 return ret; 2254 } 2255 2256 /** 2257 * folio_make_device_exclusive - Mark the folio exclusively owned by a device. 2258 * @folio: The folio to replace page table entries for. 2259 * @mm: The mm_struct where the folio is expected to be mapped. 2260 * @address: Address where the folio is expected to be mapped. 2261 * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier callbacks 2262 * 2263 * Tries to remove all the page table entries which are mapping this 2264 * folio and replace them with special device exclusive swap entries to 2265 * grant a device exclusive access to the folio. 2266 * 2267 * Context: Caller must hold the folio lock. 2268 * Return: false if the page is still mapped, or if it could not be unmapped 2269 * from the expected address. Otherwise returns true (success). 2270 */ 2271 static bool folio_make_device_exclusive(struct folio *folio, 2272 struct mm_struct *mm, unsigned long address, void *owner) 2273 { 2274 struct make_exclusive_args args = { 2275 .mm = mm, 2276 .address = address, 2277 .owner = owner, 2278 .valid = false, 2279 }; 2280 struct rmap_walk_control rwc = { 2281 .rmap_one = page_make_device_exclusive_one, 2282 .done = folio_not_mapped, 2283 .anon_lock = folio_lock_anon_vma_read, 2284 .arg = &args, 2285 }; 2286 2287 /* 2288 * Restrict to anonymous folios for now to avoid potential writeback 2289 * issues. 2290 */ 2291 if (!folio_test_anon(folio)) 2292 return false; 2293 2294 rmap_walk(folio, &rwc); 2295 2296 return args.valid && !folio_mapcount(folio); 2297 } 2298 2299 /** 2300 * make_device_exclusive_range() - Mark a range for exclusive use by a device 2301 * @mm: mm_struct of associated target process 2302 * @start: start of the region to mark for exclusive device access 2303 * @end: end address of region 2304 * @pages: returns the pages which were successfully marked for exclusive access 2305 * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering 2306 * 2307 * Returns: number of pages found in the range by GUP. A page is marked for 2308 * exclusive access only if the page pointer is non-NULL. 2309 * 2310 * This function finds ptes mapping page(s) to the given address range, locks 2311 * them and replaces mappings with special swap entries preventing userspace CPU 2312 * access. On fault these entries are replaced with the original mapping after 2313 * calling MMU notifiers. 2314 * 2315 * A driver using this to program access from a device must use a mmu notifier 2316 * critical section to hold a device specific lock during programming. Once 2317 * programming is complete it should drop the page lock and reference after 2318 * which point CPU access to the page will revoke the exclusive access. 2319 */ 2320 int make_device_exclusive_range(struct mm_struct *mm, unsigned long start, 2321 unsigned long end, struct page **pages, 2322 void *owner) 2323 { 2324 long npages = (end - start) >> PAGE_SHIFT; 2325 long i; 2326 2327 npages = get_user_pages_remote(mm, start, npages, 2328 FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD, 2329 pages, NULL, NULL); 2330 if (npages < 0) 2331 return npages; 2332 2333 for (i = 0; i < npages; i++, start += PAGE_SIZE) { 2334 struct folio *folio = page_folio(pages[i]); 2335 if (PageTail(pages[i]) || !folio_trylock(folio)) { 2336 folio_put(folio); 2337 pages[i] = NULL; 2338 continue; 2339 } 2340 2341 if (!folio_make_device_exclusive(folio, mm, start, owner)) { 2342 folio_unlock(folio); 2343 folio_put(folio); 2344 pages[i] = NULL; 2345 } 2346 } 2347 2348 return npages; 2349 } 2350 EXPORT_SYMBOL_GPL(make_device_exclusive_range); 2351 #endif 2352 2353 void __put_anon_vma(struct anon_vma *anon_vma) 2354 { 2355 struct anon_vma *root = anon_vma->root; 2356 2357 anon_vma_free(anon_vma); 2358 if (root != anon_vma && atomic_dec_and_test(&root->refcount)) 2359 anon_vma_free(root); 2360 } 2361 2362 static struct anon_vma *rmap_walk_anon_lock(struct folio *folio, 2363 struct rmap_walk_control *rwc) 2364 { 2365 struct anon_vma *anon_vma; 2366 2367 if (rwc->anon_lock) 2368 return rwc->anon_lock(folio, rwc); 2369 2370 /* 2371 * Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read() 2372 * because that depends on page_mapped(); but not all its usages 2373 * are holding mmap_lock. Users without mmap_lock are required to 2374 * take a reference count to prevent the anon_vma disappearing 2375 */ 2376 anon_vma = folio_anon_vma(folio); 2377 if (!anon_vma) 2378 return NULL; 2379 2380 if (anon_vma_trylock_read(anon_vma)) 2381 goto out; 2382 2383 if (rwc->try_lock) { 2384 anon_vma = NULL; 2385 rwc->contended = true; 2386 goto out; 2387 } 2388 2389 anon_vma_lock_read(anon_vma); 2390 out: 2391 return anon_vma; 2392 } 2393 2394 /* 2395 * rmap_walk_anon - do something to anonymous page using the object-based 2396 * rmap method 2397 * @page: the page to be handled 2398 * @rwc: control variable according to each walk type 2399 * 2400 * Find all the mappings of a page using the mapping pointer and the vma chains 2401 * contained in the anon_vma struct it points to. 2402 */ 2403 static void rmap_walk_anon(struct folio *folio, 2404 struct rmap_walk_control *rwc, bool locked) 2405 { 2406 struct anon_vma *anon_vma; 2407 pgoff_t pgoff_start, pgoff_end; 2408 struct anon_vma_chain *avc; 2409 2410 if (locked) { 2411 anon_vma = folio_anon_vma(folio); 2412 /* anon_vma disappear under us? */ 2413 VM_BUG_ON_FOLIO(!anon_vma, folio); 2414 } else { 2415 anon_vma = rmap_walk_anon_lock(folio, rwc); 2416 } 2417 if (!anon_vma) 2418 return; 2419 2420 pgoff_start = folio_pgoff(folio); 2421 pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; 2422 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, 2423 pgoff_start, pgoff_end) { 2424 struct vm_area_struct *vma = avc->vma; 2425 unsigned long address = vma_address(&folio->page, vma); 2426 2427 VM_BUG_ON_VMA(address == -EFAULT, vma); 2428 cond_resched(); 2429 2430 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 2431 continue; 2432 2433 if (!rwc->rmap_one(folio, vma, address, rwc->arg)) 2434 break; 2435 if (rwc->done && rwc->done(folio)) 2436 break; 2437 } 2438 2439 if (!locked) 2440 anon_vma_unlock_read(anon_vma); 2441 } 2442 2443 /* 2444 * rmap_walk_file - do something to file page using the object-based rmap method 2445 * @page: the page to be handled 2446 * @rwc: control variable according to each walk type 2447 * 2448 * Find all the mappings of a page using the mapping pointer and the vma chains 2449 * contained in the address_space struct it points to. 2450 */ 2451 static void rmap_walk_file(struct folio *folio, 2452 struct rmap_walk_control *rwc, bool locked) 2453 { 2454 struct address_space *mapping = folio_mapping(folio); 2455 pgoff_t pgoff_start, pgoff_end; 2456 struct vm_area_struct *vma; 2457 2458 /* 2459 * The page lock not only makes sure that page->mapping cannot 2460 * suddenly be NULLified by truncation, it makes sure that the 2461 * structure at mapping cannot be freed and reused yet, 2462 * so we can safely take mapping->i_mmap_rwsem. 2463 */ 2464 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 2465 2466 if (!mapping) 2467 return; 2468 2469 pgoff_start = folio_pgoff(folio); 2470 pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; 2471 if (!locked) { 2472 if (i_mmap_trylock_read(mapping)) 2473 goto lookup; 2474 2475 if (rwc->try_lock) { 2476 rwc->contended = true; 2477 return; 2478 } 2479 2480 i_mmap_lock_read(mapping); 2481 } 2482 lookup: 2483 vma_interval_tree_foreach(vma, &mapping->i_mmap, 2484 pgoff_start, pgoff_end) { 2485 unsigned long address = vma_address(&folio->page, vma); 2486 2487 VM_BUG_ON_VMA(address == -EFAULT, vma); 2488 cond_resched(); 2489 2490 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 2491 continue; 2492 2493 if (!rwc->rmap_one(folio, vma, address, rwc->arg)) 2494 goto done; 2495 if (rwc->done && rwc->done(folio)) 2496 goto done; 2497 } 2498 2499 done: 2500 if (!locked) 2501 i_mmap_unlock_read(mapping); 2502 } 2503 2504 void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc) 2505 { 2506 if (unlikely(folio_test_ksm(folio))) 2507 rmap_walk_ksm(folio, rwc); 2508 else if (folio_test_anon(folio)) 2509 rmap_walk_anon(folio, rwc, false); 2510 else 2511 rmap_walk_file(folio, rwc, false); 2512 } 2513 2514 /* Like rmap_walk, but caller holds relevant rmap lock */ 2515 void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc) 2516 { 2517 /* no ksm support for now */ 2518 VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio); 2519 if (folio_test_anon(folio)) 2520 rmap_walk_anon(folio, rwc, true); 2521 else 2522 rmap_walk_file(folio, rwc, true); 2523 } 2524 2525 #ifdef CONFIG_HUGETLB_PAGE 2526 /* 2527 * The following two functions are for anonymous (private mapped) hugepages. 2528 * Unlike common anonymous pages, anonymous hugepages have no accounting code 2529 * and no lru code, because we handle hugepages differently from common pages. 2530 * 2531 * RMAP_COMPOUND is ignored. 2532 */ 2533 void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma, 2534 unsigned long address, rmap_t flags) 2535 { 2536 struct anon_vma *anon_vma = vma->anon_vma; 2537 int first; 2538 2539 BUG_ON(!PageLocked(page)); 2540 BUG_ON(!anon_vma); 2541 /* address might be in next vma when migration races vma_adjust */ 2542 first = atomic_inc_and_test(compound_mapcount_ptr(page)); 2543 VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page); 2544 VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page); 2545 if (first) 2546 __page_set_anon_rmap(page, vma, address, 2547 !!(flags & RMAP_EXCLUSIVE)); 2548 } 2549 2550 void hugepage_add_new_anon_rmap(struct page *page, 2551 struct vm_area_struct *vma, unsigned long address) 2552 { 2553 BUG_ON(address < vma->vm_start || address >= vma->vm_end); 2554 /* increment count (starts at -1) */ 2555 atomic_set(compound_mapcount_ptr(page), 0); 2556 ClearHPageRestoreReserve(page); 2557 __page_set_anon_rmap(page, vma, address, 1); 2558 } 2559 #endif /* CONFIG_HUGETLB_PAGE */ 2560