1 /* 2 * mm/rmap.c - physical to virtual reverse mappings 3 * 4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br> 5 * Released under the General Public License (GPL). 6 * 7 * Simple, low overhead reverse mapping scheme. 8 * Please try to keep this thing as modular as possible. 9 * 10 * Provides methods for unmapping each kind of mapped page: 11 * the anon methods track anonymous pages, and 12 * the file methods track pages belonging to an inode. 13 * 14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001 15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 17 * Contributions by Hugh Dickins 2003, 2004 18 */ 19 20 /* 21 * Lock ordering in mm: 22 * 23 * inode->i_mutex (while writing or truncating, not reading or faulting) 24 * mm->mmap_sem 25 * page->flags PG_locked (lock_page) 26 * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share) 27 * mapping->i_mmap_rwsem 28 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) 29 * anon_vma->rwsem 30 * mm->page_table_lock or pte_lock 31 * zone_lru_lock (in mark_page_accessed, isolate_lru_page) 32 * swap_lock (in swap_duplicate, swap_info_get) 33 * mmlist_lock (in mmput, drain_mmlist and others) 34 * mapping->private_lock (in __set_page_dirty_buffers) 35 * mem_cgroup_{begin,end}_page_stat (memcg->move_lock) 36 * i_pages lock (widely used) 37 * inode->i_lock (in set_page_dirty's __mark_inode_dirty) 38 * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) 39 * sb_lock (within inode_lock in fs/fs-writeback.c) 40 * i_pages lock (widely used, in set_page_dirty, 41 * in arch-dependent flush_dcache_mmap_lock, 42 * within bdi.wb->list_lock in __sync_single_inode) 43 * 44 * anon_vma->rwsem,mapping->i_mutex (memory_failure, collect_procs_anon) 45 * ->tasklist_lock 46 * pte map lock 47 */ 48 49 #include <linux/mm.h> 50 #include <linux/sched/mm.h> 51 #include <linux/sched/task.h> 52 #include <linux/pagemap.h> 53 #include <linux/swap.h> 54 #include <linux/swapops.h> 55 #include <linux/slab.h> 56 #include <linux/init.h> 57 #include <linux/ksm.h> 58 #include <linux/rmap.h> 59 #include <linux/rcupdate.h> 60 #include <linux/export.h> 61 #include <linux/memcontrol.h> 62 #include <linux/mmu_notifier.h> 63 #include <linux/migrate.h> 64 #include <linux/hugetlb.h> 65 #include <linux/backing-dev.h> 66 #include <linux/page_idle.h> 67 #include <linux/memremap.h> 68 #include <linux/userfaultfd_k.h> 69 70 #include <asm/tlbflush.h> 71 72 #include <trace/events/tlb.h> 73 74 #include "internal.h" 75 76 static struct kmem_cache *anon_vma_cachep; 77 static struct kmem_cache *anon_vma_chain_cachep; 78 79 static inline struct anon_vma *anon_vma_alloc(void) 80 { 81 struct anon_vma *anon_vma; 82 83 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 84 if (anon_vma) { 85 atomic_set(&anon_vma->refcount, 1); 86 anon_vma->degree = 1; /* Reference for first vma */ 87 anon_vma->parent = anon_vma; 88 /* 89 * Initialise the anon_vma root to point to itself. If called 90 * from fork, the root will be reset to the parents anon_vma. 91 */ 92 anon_vma->root = anon_vma; 93 } 94 95 return anon_vma; 96 } 97 98 static inline void anon_vma_free(struct anon_vma *anon_vma) 99 { 100 VM_BUG_ON(atomic_read(&anon_vma->refcount)); 101 102 /* 103 * Synchronize against page_lock_anon_vma_read() such that 104 * we can safely hold the lock without the anon_vma getting 105 * freed. 106 * 107 * Relies on the full mb implied by the atomic_dec_and_test() from 108 * put_anon_vma() against the acquire barrier implied by 109 * down_read_trylock() from page_lock_anon_vma_read(). This orders: 110 * 111 * page_lock_anon_vma_read() VS put_anon_vma() 112 * down_read_trylock() atomic_dec_and_test() 113 * LOCK MB 114 * atomic_read() rwsem_is_locked() 115 * 116 * LOCK should suffice since the actual taking of the lock must 117 * happen _before_ what follows. 118 */ 119 might_sleep(); 120 if (rwsem_is_locked(&anon_vma->root->rwsem)) { 121 anon_vma_lock_write(anon_vma); 122 anon_vma_unlock_write(anon_vma); 123 } 124 125 kmem_cache_free(anon_vma_cachep, anon_vma); 126 } 127 128 static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) 129 { 130 return kmem_cache_alloc(anon_vma_chain_cachep, gfp); 131 } 132 133 static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) 134 { 135 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); 136 } 137 138 static void anon_vma_chain_link(struct vm_area_struct *vma, 139 struct anon_vma_chain *avc, 140 struct anon_vma *anon_vma) 141 { 142 avc->vma = vma; 143 avc->anon_vma = anon_vma; 144 list_add(&avc->same_vma, &vma->anon_vma_chain); 145 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); 146 } 147 148 /** 149 * __anon_vma_prepare - attach an anon_vma to a memory region 150 * @vma: the memory region in question 151 * 152 * This makes sure the memory mapping described by 'vma' has 153 * an 'anon_vma' attached to it, so that we can associate the 154 * anonymous pages mapped into it with that anon_vma. 155 * 156 * The common case will be that we already have one, which 157 * is handled inline by anon_vma_prepare(). But if 158 * not we either need to find an adjacent mapping that we 159 * can re-use the anon_vma from (very common when the only 160 * reason for splitting a vma has been mprotect()), or we 161 * allocate a new one. 162 * 163 * Anon-vma allocations are very subtle, because we may have 164 * optimistically looked up an anon_vma in page_lock_anon_vma_read() 165 * and that may actually touch the spinlock even in the newly 166 * allocated vma (it depends on RCU to make sure that the 167 * anon_vma isn't actually destroyed). 168 * 169 * As a result, we need to do proper anon_vma locking even 170 * for the new allocation. At the same time, we do not want 171 * to do any locking for the common case of already having 172 * an anon_vma. 173 * 174 * This must be called with the mmap_sem held for reading. 175 */ 176 int __anon_vma_prepare(struct vm_area_struct *vma) 177 { 178 struct mm_struct *mm = vma->vm_mm; 179 struct anon_vma *anon_vma, *allocated; 180 struct anon_vma_chain *avc; 181 182 might_sleep(); 183 184 avc = anon_vma_chain_alloc(GFP_KERNEL); 185 if (!avc) 186 goto out_enomem; 187 188 anon_vma = find_mergeable_anon_vma(vma); 189 allocated = NULL; 190 if (!anon_vma) { 191 anon_vma = anon_vma_alloc(); 192 if (unlikely(!anon_vma)) 193 goto out_enomem_free_avc; 194 allocated = anon_vma; 195 } 196 197 anon_vma_lock_write(anon_vma); 198 /* page_table_lock to protect against threads */ 199 spin_lock(&mm->page_table_lock); 200 if (likely(!vma->anon_vma)) { 201 vma->anon_vma = anon_vma; 202 anon_vma_chain_link(vma, avc, anon_vma); 203 /* vma reference or self-parent link for new root */ 204 anon_vma->degree++; 205 allocated = NULL; 206 avc = NULL; 207 } 208 spin_unlock(&mm->page_table_lock); 209 anon_vma_unlock_write(anon_vma); 210 211 if (unlikely(allocated)) 212 put_anon_vma(allocated); 213 if (unlikely(avc)) 214 anon_vma_chain_free(avc); 215 216 return 0; 217 218 out_enomem_free_avc: 219 anon_vma_chain_free(avc); 220 out_enomem: 221 return -ENOMEM; 222 } 223 224 /* 225 * This is a useful helper function for locking the anon_vma root as 226 * we traverse the vma->anon_vma_chain, looping over anon_vma's that 227 * have the same vma. 228 * 229 * Such anon_vma's should have the same root, so you'd expect to see 230 * just a single mutex_lock for the whole traversal. 231 */ 232 static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) 233 { 234 struct anon_vma *new_root = anon_vma->root; 235 if (new_root != root) { 236 if (WARN_ON_ONCE(root)) 237 up_write(&root->rwsem); 238 root = new_root; 239 down_write(&root->rwsem); 240 } 241 return root; 242 } 243 244 static inline void unlock_anon_vma_root(struct anon_vma *root) 245 { 246 if (root) 247 up_write(&root->rwsem); 248 } 249 250 /* 251 * Attach the anon_vmas from src to dst. 252 * Returns 0 on success, -ENOMEM on failure. 253 * 254 * If dst->anon_vma is NULL this function tries to find and reuse existing 255 * anon_vma which has no vmas and only one child anon_vma. This prevents 256 * degradation of anon_vma hierarchy to endless linear chain in case of 257 * constantly forking task. On the other hand, an anon_vma with more than one 258 * child isn't reused even if there was no alive vma, thus rmap walker has a 259 * good chance of avoiding scanning the whole hierarchy when it searches where 260 * page is mapped. 261 */ 262 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) 263 { 264 struct anon_vma_chain *avc, *pavc; 265 struct anon_vma *root = NULL; 266 267 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { 268 struct anon_vma *anon_vma; 269 270 avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); 271 if (unlikely(!avc)) { 272 unlock_anon_vma_root(root); 273 root = NULL; 274 avc = anon_vma_chain_alloc(GFP_KERNEL); 275 if (!avc) 276 goto enomem_failure; 277 } 278 anon_vma = pavc->anon_vma; 279 root = lock_anon_vma_root(root, anon_vma); 280 anon_vma_chain_link(dst, avc, anon_vma); 281 282 /* 283 * Reuse existing anon_vma if its degree lower than two, 284 * that means it has no vma and only one anon_vma child. 285 * 286 * Do not chose parent anon_vma, otherwise first child 287 * will always reuse it. Root anon_vma is never reused: 288 * it has self-parent reference and at least one child. 289 */ 290 if (!dst->anon_vma && anon_vma != src->anon_vma && 291 anon_vma->degree < 2) 292 dst->anon_vma = anon_vma; 293 } 294 if (dst->anon_vma) 295 dst->anon_vma->degree++; 296 unlock_anon_vma_root(root); 297 return 0; 298 299 enomem_failure: 300 /* 301 * dst->anon_vma is dropped here otherwise its degree can be incorrectly 302 * decremented in unlink_anon_vmas(). 303 * We can safely do this because callers of anon_vma_clone() don't care 304 * about dst->anon_vma if anon_vma_clone() failed. 305 */ 306 dst->anon_vma = NULL; 307 unlink_anon_vmas(dst); 308 return -ENOMEM; 309 } 310 311 /* 312 * Attach vma to its own anon_vma, as well as to the anon_vmas that 313 * the corresponding VMA in the parent process is attached to. 314 * Returns 0 on success, non-zero on failure. 315 */ 316 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) 317 { 318 struct anon_vma_chain *avc; 319 struct anon_vma *anon_vma; 320 int error; 321 322 /* Don't bother if the parent process has no anon_vma here. */ 323 if (!pvma->anon_vma) 324 return 0; 325 326 /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ 327 vma->anon_vma = NULL; 328 329 /* 330 * First, attach the new VMA to the parent VMA's anon_vmas, 331 * so rmap can find non-COWed pages in child processes. 332 */ 333 error = anon_vma_clone(vma, pvma); 334 if (error) 335 return error; 336 337 /* An existing anon_vma has been reused, all done then. */ 338 if (vma->anon_vma) 339 return 0; 340 341 /* Then add our own anon_vma. */ 342 anon_vma = anon_vma_alloc(); 343 if (!anon_vma) 344 goto out_error; 345 avc = anon_vma_chain_alloc(GFP_KERNEL); 346 if (!avc) 347 goto out_error_free_anon_vma; 348 349 /* 350 * The root anon_vma's spinlock is the lock actually used when we 351 * lock any of the anon_vmas in this anon_vma tree. 352 */ 353 anon_vma->root = pvma->anon_vma->root; 354 anon_vma->parent = pvma->anon_vma; 355 /* 356 * With refcounts, an anon_vma can stay around longer than the 357 * process it belongs to. The root anon_vma needs to be pinned until 358 * this anon_vma is freed, because the lock lives in the root. 359 */ 360 get_anon_vma(anon_vma->root); 361 /* Mark this anon_vma as the one where our new (COWed) pages go. */ 362 vma->anon_vma = anon_vma; 363 anon_vma_lock_write(anon_vma); 364 anon_vma_chain_link(vma, avc, anon_vma); 365 anon_vma->parent->degree++; 366 anon_vma_unlock_write(anon_vma); 367 368 return 0; 369 370 out_error_free_anon_vma: 371 put_anon_vma(anon_vma); 372 out_error: 373 unlink_anon_vmas(vma); 374 return -ENOMEM; 375 } 376 377 void unlink_anon_vmas(struct vm_area_struct *vma) 378 { 379 struct anon_vma_chain *avc, *next; 380 struct anon_vma *root = NULL; 381 382 /* 383 * Unlink each anon_vma chained to the VMA. This list is ordered 384 * from newest to oldest, ensuring the root anon_vma gets freed last. 385 */ 386 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 387 struct anon_vma *anon_vma = avc->anon_vma; 388 389 root = lock_anon_vma_root(root, anon_vma); 390 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); 391 392 /* 393 * Leave empty anon_vmas on the list - we'll need 394 * to free them outside the lock. 395 */ 396 if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) { 397 anon_vma->parent->degree--; 398 continue; 399 } 400 401 list_del(&avc->same_vma); 402 anon_vma_chain_free(avc); 403 } 404 if (vma->anon_vma) 405 vma->anon_vma->degree--; 406 unlock_anon_vma_root(root); 407 408 /* 409 * Iterate the list once more, it now only contains empty and unlinked 410 * anon_vmas, destroy them. Could not do before due to __put_anon_vma() 411 * needing to write-acquire the anon_vma->root->rwsem. 412 */ 413 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 414 struct anon_vma *anon_vma = avc->anon_vma; 415 416 VM_WARN_ON(anon_vma->degree); 417 put_anon_vma(anon_vma); 418 419 list_del(&avc->same_vma); 420 anon_vma_chain_free(avc); 421 } 422 } 423 424 static void anon_vma_ctor(void *data) 425 { 426 struct anon_vma *anon_vma = data; 427 428 init_rwsem(&anon_vma->rwsem); 429 atomic_set(&anon_vma->refcount, 0); 430 anon_vma->rb_root = RB_ROOT_CACHED; 431 } 432 433 void __init anon_vma_init(void) 434 { 435 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 436 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, 437 anon_vma_ctor); 438 anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, 439 SLAB_PANIC|SLAB_ACCOUNT); 440 } 441 442 /* 443 * Getting a lock on a stable anon_vma from a page off the LRU is tricky! 444 * 445 * Since there is no serialization what so ever against page_remove_rmap() 446 * the best this function can do is return a locked anon_vma that might 447 * have been relevant to this page. 448 * 449 * The page might have been remapped to a different anon_vma or the anon_vma 450 * returned may already be freed (and even reused). 451 * 452 * In case it was remapped to a different anon_vma, the new anon_vma will be a 453 * child of the old anon_vma, and the anon_vma lifetime rules will therefore 454 * ensure that any anon_vma obtained from the page will still be valid for as 455 * long as we observe page_mapped() [ hence all those page_mapped() tests ]. 456 * 457 * All users of this function must be very careful when walking the anon_vma 458 * chain and verify that the page in question is indeed mapped in it 459 * [ something equivalent to page_mapped_in_vma() ]. 460 * 461 * Since anon_vma's slab is DESTROY_BY_RCU and we know from page_remove_rmap() 462 * that the anon_vma pointer from page->mapping is valid if there is a 463 * mapcount, we can dereference the anon_vma after observing those. 464 */ 465 struct anon_vma *page_get_anon_vma(struct page *page) 466 { 467 struct anon_vma *anon_vma = NULL; 468 unsigned long anon_mapping; 469 470 rcu_read_lock(); 471 anon_mapping = (unsigned long)READ_ONCE(page->mapping); 472 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 473 goto out; 474 if (!page_mapped(page)) 475 goto out; 476 477 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 478 if (!atomic_inc_not_zero(&anon_vma->refcount)) { 479 anon_vma = NULL; 480 goto out; 481 } 482 483 /* 484 * If this page is still mapped, then its anon_vma cannot have been 485 * freed. But if it has been unmapped, we have no security against the 486 * anon_vma structure being freed and reused (for another anon_vma: 487 * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero() 488 * above cannot corrupt). 489 */ 490 if (!page_mapped(page)) { 491 rcu_read_unlock(); 492 put_anon_vma(anon_vma); 493 return NULL; 494 } 495 out: 496 rcu_read_unlock(); 497 498 return anon_vma; 499 } 500 501 /* 502 * Similar to page_get_anon_vma() except it locks the anon_vma. 503 * 504 * Its a little more complex as it tries to keep the fast path to a single 505 * atomic op -- the trylock. If we fail the trylock, we fall back to getting a 506 * reference like with page_get_anon_vma() and then block on the mutex. 507 */ 508 struct anon_vma *page_lock_anon_vma_read(struct page *page) 509 { 510 struct anon_vma *anon_vma = NULL; 511 struct anon_vma *root_anon_vma; 512 unsigned long anon_mapping; 513 514 rcu_read_lock(); 515 anon_mapping = (unsigned long)READ_ONCE(page->mapping); 516 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 517 goto out; 518 if (!page_mapped(page)) 519 goto out; 520 521 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 522 root_anon_vma = READ_ONCE(anon_vma->root); 523 if (down_read_trylock(&root_anon_vma->rwsem)) { 524 /* 525 * If the page is still mapped, then this anon_vma is still 526 * its anon_vma, and holding the mutex ensures that it will 527 * not go away, see anon_vma_free(). 528 */ 529 if (!page_mapped(page)) { 530 up_read(&root_anon_vma->rwsem); 531 anon_vma = NULL; 532 } 533 goto out; 534 } 535 536 /* trylock failed, we got to sleep */ 537 if (!atomic_inc_not_zero(&anon_vma->refcount)) { 538 anon_vma = NULL; 539 goto out; 540 } 541 542 if (!page_mapped(page)) { 543 rcu_read_unlock(); 544 put_anon_vma(anon_vma); 545 return NULL; 546 } 547 548 /* we pinned the anon_vma, its safe to sleep */ 549 rcu_read_unlock(); 550 anon_vma_lock_read(anon_vma); 551 552 if (atomic_dec_and_test(&anon_vma->refcount)) { 553 /* 554 * Oops, we held the last refcount, release the lock 555 * and bail -- can't simply use put_anon_vma() because 556 * we'll deadlock on the anon_vma_lock_write() recursion. 557 */ 558 anon_vma_unlock_read(anon_vma); 559 __put_anon_vma(anon_vma); 560 anon_vma = NULL; 561 } 562 563 return anon_vma; 564 565 out: 566 rcu_read_unlock(); 567 return anon_vma; 568 } 569 570 void page_unlock_anon_vma_read(struct anon_vma *anon_vma) 571 { 572 anon_vma_unlock_read(anon_vma); 573 } 574 575 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 576 /* 577 * Flush TLB entries for recently unmapped pages from remote CPUs. It is 578 * important if a PTE was dirty when it was unmapped that it's flushed 579 * before any IO is initiated on the page to prevent lost writes. Similarly, 580 * it must be flushed before freeing to prevent data leakage. 581 */ 582 void try_to_unmap_flush(void) 583 { 584 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 585 586 if (!tlb_ubc->flush_required) 587 return; 588 589 arch_tlbbatch_flush(&tlb_ubc->arch); 590 tlb_ubc->flush_required = false; 591 tlb_ubc->writable = false; 592 } 593 594 /* Flush iff there are potentially writable TLB entries that can race with IO */ 595 void try_to_unmap_flush_dirty(void) 596 { 597 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 598 599 if (tlb_ubc->writable) 600 try_to_unmap_flush(); 601 } 602 603 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) 604 { 605 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 606 607 arch_tlbbatch_add_mm(&tlb_ubc->arch, mm); 608 tlb_ubc->flush_required = true; 609 610 /* 611 * Ensure compiler does not re-order the setting of tlb_flush_batched 612 * before the PTE is cleared. 613 */ 614 barrier(); 615 mm->tlb_flush_batched = true; 616 617 /* 618 * If the PTE was dirty then it's best to assume it's writable. The 619 * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() 620 * before the page is queued for IO. 621 */ 622 if (writable) 623 tlb_ubc->writable = true; 624 } 625 626 /* 627 * Returns true if the TLB flush should be deferred to the end of a batch of 628 * unmap operations to reduce IPIs. 629 */ 630 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 631 { 632 bool should_defer = false; 633 634 if (!(flags & TTU_BATCH_FLUSH)) 635 return false; 636 637 /* If remote CPUs need to be flushed then defer batch the flush */ 638 if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids) 639 should_defer = true; 640 put_cpu(); 641 642 return should_defer; 643 } 644 645 /* 646 * Reclaim unmaps pages under the PTL but do not flush the TLB prior to 647 * releasing the PTL if TLB flushes are batched. It's possible for a parallel 648 * operation such as mprotect or munmap to race between reclaim unmapping 649 * the page and flushing the page. If this race occurs, it potentially allows 650 * access to data via a stale TLB entry. Tracking all mm's that have TLB 651 * batching in flight would be expensive during reclaim so instead track 652 * whether TLB batching occurred in the past and if so then do a flush here 653 * if required. This will cost one additional flush per reclaim cycle paid 654 * by the first operation at risk such as mprotect and mumap. 655 * 656 * This must be called under the PTL so that an access to tlb_flush_batched 657 * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise 658 * via the PTL. 659 */ 660 void flush_tlb_batched_pending(struct mm_struct *mm) 661 { 662 if (mm->tlb_flush_batched) { 663 flush_tlb_mm(mm); 664 665 /* 666 * Do not allow the compiler to re-order the clearing of 667 * tlb_flush_batched before the tlb is flushed. 668 */ 669 barrier(); 670 mm->tlb_flush_batched = false; 671 } 672 } 673 #else 674 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) 675 { 676 } 677 678 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 679 { 680 return false; 681 } 682 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ 683 684 /* 685 * At what user virtual address is page expected in vma? 686 * Caller should check the page is actually part of the vma. 687 */ 688 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 689 { 690 unsigned long address; 691 if (PageAnon(page)) { 692 struct anon_vma *page__anon_vma = page_anon_vma(page); 693 /* 694 * Note: swapoff's unuse_vma() is more efficient with this 695 * check, and needs it to match anon_vma when KSM is active. 696 */ 697 if (!vma->anon_vma || !page__anon_vma || 698 vma->anon_vma->root != page__anon_vma->root) 699 return -EFAULT; 700 } else if (page->mapping) { 701 if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping) 702 return -EFAULT; 703 } else 704 return -EFAULT; 705 address = __vma_address(page, vma); 706 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) 707 return -EFAULT; 708 return address; 709 } 710 711 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) 712 { 713 pgd_t *pgd; 714 p4d_t *p4d; 715 pud_t *pud; 716 pmd_t *pmd = NULL; 717 pmd_t pmde; 718 719 pgd = pgd_offset(mm, address); 720 if (!pgd_present(*pgd)) 721 goto out; 722 723 p4d = p4d_offset(pgd, address); 724 if (!p4d_present(*p4d)) 725 goto out; 726 727 pud = pud_offset(p4d, address); 728 if (!pud_present(*pud)) 729 goto out; 730 731 pmd = pmd_offset(pud, address); 732 /* 733 * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at() 734 * without holding anon_vma lock for write. So when looking for a 735 * genuine pmde (in which to find pte), test present and !THP together. 736 */ 737 pmde = *pmd; 738 barrier(); 739 if (!pmd_present(pmde) || pmd_trans_huge(pmde)) 740 pmd = NULL; 741 out: 742 return pmd; 743 } 744 745 struct page_referenced_arg { 746 int mapcount; 747 int referenced; 748 unsigned long vm_flags; 749 struct mem_cgroup *memcg; 750 }; 751 /* 752 * arg: page_referenced_arg will be passed 753 */ 754 static bool page_referenced_one(struct page *page, struct vm_area_struct *vma, 755 unsigned long address, void *arg) 756 { 757 struct page_referenced_arg *pra = arg; 758 struct page_vma_mapped_walk pvmw = { 759 .page = page, 760 .vma = vma, 761 .address = address, 762 }; 763 int referenced = 0; 764 765 while (page_vma_mapped_walk(&pvmw)) { 766 address = pvmw.address; 767 768 if (vma->vm_flags & VM_LOCKED) { 769 page_vma_mapped_walk_done(&pvmw); 770 pra->vm_flags |= VM_LOCKED; 771 return false; /* To break the loop */ 772 } 773 774 if (pvmw.pte) { 775 if (ptep_clear_flush_young_notify(vma, address, 776 pvmw.pte)) { 777 /* 778 * Don't treat a reference through 779 * a sequentially read mapping as such. 780 * If the page has been used in another mapping, 781 * we will catch it; if this other mapping is 782 * already gone, the unmap path will have set 783 * PG_referenced or activated the page. 784 */ 785 if (likely(!(vma->vm_flags & VM_SEQ_READ))) 786 referenced++; 787 } 788 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { 789 if (pmdp_clear_flush_young_notify(vma, address, 790 pvmw.pmd)) 791 referenced++; 792 } else { 793 /* unexpected pmd-mapped page? */ 794 WARN_ON_ONCE(1); 795 } 796 797 pra->mapcount--; 798 } 799 800 if (referenced) 801 clear_page_idle(page); 802 if (test_and_clear_page_young(page)) 803 referenced++; 804 805 if (referenced) { 806 pra->referenced++; 807 pra->vm_flags |= vma->vm_flags; 808 } 809 810 if (!pra->mapcount) 811 return false; /* To break the loop */ 812 813 return true; 814 } 815 816 static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg) 817 { 818 struct page_referenced_arg *pra = arg; 819 struct mem_cgroup *memcg = pra->memcg; 820 821 if (!mm_match_cgroup(vma->vm_mm, memcg)) 822 return true; 823 824 return false; 825 } 826 827 /** 828 * page_referenced - test if the page was referenced 829 * @page: the page to test 830 * @is_locked: caller holds lock on the page 831 * @memcg: target memory cgroup 832 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page 833 * 834 * Quick test_and_clear_referenced for all mappings to a page, 835 * returns the number of ptes which referenced the page. 836 */ 837 int page_referenced(struct page *page, 838 int is_locked, 839 struct mem_cgroup *memcg, 840 unsigned long *vm_flags) 841 { 842 int we_locked = 0; 843 struct page_referenced_arg pra = { 844 .mapcount = total_mapcount(page), 845 .memcg = memcg, 846 }; 847 struct rmap_walk_control rwc = { 848 .rmap_one = page_referenced_one, 849 .arg = (void *)&pra, 850 .anon_lock = page_lock_anon_vma_read, 851 }; 852 853 *vm_flags = 0; 854 if (!page_mapped(page)) 855 return 0; 856 857 if (!page_rmapping(page)) 858 return 0; 859 860 if (!is_locked && (!PageAnon(page) || PageKsm(page))) { 861 we_locked = trylock_page(page); 862 if (!we_locked) 863 return 1; 864 } 865 866 /* 867 * If we are reclaiming on behalf of a cgroup, skip 868 * counting on behalf of references from different 869 * cgroups 870 */ 871 if (memcg) { 872 rwc.invalid_vma = invalid_page_referenced_vma; 873 } 874 875 rmap_walk(page, &rwc); 876 *vm_flags = pra.vm_flags; 877 878 if (we_locked) 879 unlock_page(page); 880 881 return pra.referenced; 882 } 883 884 static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, 885 unsigned long address, void *arg) 886 { 887 struct page_vma_mapped_walk pvmw = { 888 .page = page, 889 .vma = vma, 890 .address = address, 891 .flags = PVMW_SYNC, 892 }; 893 struct mmu_notifier_range range; 894 int *cleaned = arg; 895 896 /* 897 * We have to assume the worse case ie pmd for invalidation. Note that 898 * the page can not be free from this function. 899 */ 900 mmu_notifier_range_init(&range, vma->vm_mm, address, 901 min(vma->vm_end, address + 902 (PAGE_SIZE << compound_order(page)))); 903 mmu_notifier_invalidate_range_start(&range); 904 905 while (page_vma_mapped_walk(&pvmw)) { 906 unsigned long cstart; 907 int ret = 0; 908 909 cstart = address = pvmw.address; 910 if (pvmw.pte) { 911 pte_t entry; 912 pte_t *pte = pvmw.pte; 913 914 if (!pte_dirty(*pte) && !pte_write(*pte)) 915 continue; 916 917 flush_cache_page(vma, address, pte_pfn(*pte)); 918 entry = ptep_clear_flush(vma, address, pte); 919 entry = pte_wrprotect(entry); 920 entry = pte_mkclean(entry); 921 set_pte_at(vma->vm_mm, address, pte, entry); 922 ret = 1; 923 } else { 924 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 925 pmd_t *pmd = pvmw.pmd; 926 pmd_t entry; 927 928 if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) 929 continue; 930 931 flush_cache_page(vma, address, page_to_pfn(page)); 932 entry = pmdp_huge_clear_flush(vma, address, pmd); 933 entry = pmd_wrprotect(entry); 934 entry = pmd_mkclean(entry); 935 set_pmd_at(vma->vm_mm, address, pmd, entry); 936 cstart &= PMD_MASK; 937 ret = 1; 938 #else 939 /* unexpected pmd-mapped page? */ 940 WARN_ON_ONCE(1); 941 #endif 942 } 943 944 /* 945 * No need to call mmu_notifier_invalidate_range() as we are 946 * downgrading page table protection not changing it to point 947 * to a new page. 948 * 949 * See Documentation/vm/mmu_notifier.rst 950 */ 951 if (ret) 952 (*cleaned)++; 953 } 954 955 mmu_notifier_invalidate_range_end(&range); 956 957 return true; 958 } 959 960 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) 961 { 962 if (vma->vm_flags & VM_SHARED) 963 return false; 964 965 return true; 966 } 967 968 int page_mkclean(struct page *page) 969 { 970 int cleaned = 0; 971 struct address_space *mapping; 972 struct rmap_walk_control rwc = { 973 .arg = (void *)&cleaned, 974 .rmap_one = page_mkclean_one, 975 .invalid_vma = invalid_mkclean_vma, 976 }; 977 978 BUG_ON(!PageLocked(page)); 979 980 if (!page_mapped(page)) 981 return 0; 982 983 mapping = page_mapping(page); 984 if (!mapping) 985 return 0; 986 987 rmap_walk(page, &rwc); 988 989 return cleaned; 990 } 991 EXPORT_SYMBOL_GPL(page_mkclean); 992 993 /** 994 * page_move_anon_rmap - move a page to our anon_vma 995 * @page: the page to move to our anon_vma 996 * @vma: the vma the page belongs to 997 * 998 * When a page belongs exclusively to one process after a COW event, 999 * that page can be moved into the anon_vma that belongs to just that 1000 * process, so the rmap code will not search the parent or sibling 1001 * processes. 1002 */ 1003 void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) 1004 { 1005 struct anon_vma *anon_vma = vma->anon_vma; 1006 1007 page = compound_head(page); 1008 1009 VM_BUG_ON_PAGE(!PageLocked(page), page); 1010 VM_BUG_ON_VMA(!anon_vma, vma); 1011 1012 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 1013 /* 1014 * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written 1015 * simultaneously, so a concurrent reader (eg page_referenced()'s 1016 * PageAnon()) will not see one without the other. 1017 */ 1018 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); 1019 } 1020 1021 /** 1022 * __page_set_anon_rmap - set up new anonymous rmap 1023 * @page: Page or Hugepage to add to rmap 1024 * @vma: VM area to add page to. 1025 * @address: User virtual address of the mapping 1026 * @exclusive: the page is exclusively owned by the current process 1027 */ 1028 static void __page_set_anon_rmap(struct page *page, 1029 struct vm_area_struct *vma, unsigned long address, int exclusive) 1030 { 1031 struct anon_vma *anon_vma = vma->anon_vma; 1032 1033 BUG_ON(!anon_vma); 1034 1035 if (PageAnon(page)) 1036 return; 1037 1038 /* 1039 * If the page isn't exclusively mapped into this vma, 1040 * we must use the _oldest_ possible anon_vma for the 1041 * page mapping! 1042 */ 1043 if (!exclusive) 1044 anon_vma = anon_vma->root; 1045 1046 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 1047 page->mapping = (struct address_space *) anon_vma; 1048 page->index = linear_page_index(vma, address); 1049 } 1050 1051 /** 1052 * __page_check_anon_rmap - sanity check anonymous rmap addition 1053 * @page: the page to add the mapping to 1054 * @vma: the vm area in which the mapping is added 1055 * @address: the user virtual address mapped 1056 */ 1057 static void __page_check_anon_rmap(struct page *page, 1058 struct vm_area_struct *vma, unsigned long address) 1059 { 1060 #ifdef CONFIG_DEBUG_VM 1061 /* 1062 * The page's anon-rmap details (mapping and index) are guaranteed to 1063 * be set up correctly at this point. 1064 * 1065 * We have exclusion against page_add_anon_rmap because the caller 1066 * always holds the page locked, except if called from page_dup_rmap, 1067 * in which case the page is already known to be setup. 1068 * 1069 * We have exclusion against page_add_new_anon_rmap because those pages 1070 * are initially only visible via the pagetables, and the pte is locked 1071 * over the call to page_add_new_anon_rmap. 1072 */ 1073 BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root); 1074 BUG_ON(page_to_pgoff(page) != linear_page_index(vma, address)); 1075 #endif 1076 } 1077 1078 /** 1079 * page_add_anon_rmap - add pte mapping to an anonymous page 1080 * @page: the page to add the mapping to 1081 * @vma: the vm area in which the mapping is added 1082 * @address: the user virtual address mapped 1083 * @compound: charge the page as compound or small page 1084 * 1085 * The caller needs to hold the pte lock, and the page must be locked in 1086 * the anon_vma case: to serialize mapping,index checking after setting, 1087 * and to ensure that PageAnon is not being upgraded racily to PageKsm 1088 * (but PageKsm is never downgraded to PageAnon). 1089 */ 1090 void page_add_anon_rmap(struct page *page, 1091 struct vm_area_struct *vma, unsigned long address, bool compound) 1092 { 1093 do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0); 1094 } 1095 1096 /* 1097 * Special version of the above for do_swap_page, which often runs 1098 * into pages that are exclusively owned by the current process. 1099 * Everybody else should continue to use page_add_anon_rmap above. 1100 */ 1101 void do_page_add_anon_rmap(struct page *page, 1102 struct vm_area_struct *vma, unsigned long address, int flags) 1103 { 1104 bool compound = flags & RMAP_COMPOUND; 1105 bool first; 1106 1107 if (compound) { 1108 atomic_t *mapcount; 1109 VM_BUG_ON_PAGE(!PageLocked(page), page); 1110 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 1111 mapcount = compound_mapcount_ptr(page); 1112 first = atomic_inc_and_test(mapcount); 1113 } else { 1114 first = atomic_inc_and_test(&page->_mapcount); 1115 } 1116 1117 if (first) { 1118 int nr = compound ? hpage_nr_pages(page) : 1; 1119 /* 1120 * We use the irq-unsafe __{inc|mod}_zone_page_stat because 1121 * these counters are not modified in interrupt context, and 1122 * pte lock(a spinlock) is held, which implies preemption 1123 * disabled. 1124 */ 1125 if (compound) 1126 __inc_node_page_state(page, NR_ANON_THPS); 1127 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr); 1128 } 1129 if (unlikely(PageKsm(page))) 1130 return; 1131 1132 VM_BUG_ON_PAGE(!PageLocked(page), page); 1133 1134 /* address might be in next vma when migration races vma_adjust */ 1135 if (first) 1136 __page_set_anon_rmap(page, vma, address, 1137 flags & RMAP_EXCLUSIVE); 1138 else 1139 __page_check_anon_rmap(page, vma, address); 1140 } 1141 1142 /** 1143 * page_add_new_anon_rmap - add pte mapping to a new anonymous page 1144 * @page: the page to add the mapping to 1145 * @vma: the vm area in which the mapping is added 1146 * @address: the user virtual address mapped 1147 * @compound: charge the page as compound or small page 1148 * 1149 * Same as page_add_anon_rmap but must only be called on *new* pages. 1150 * This means the inc-and-test can be bypassed. 1151 * Page does not have to be locked. 1152 */ 1153 void page_add_new_anon_rmap(struct page *page, 1154 struct vm_area_struct *vma, unsigned long address, bool compound) 1155 { 1156 int nr = compound ? hpage_nr_pages(page) : 1; 1157 1158 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 1159 __SetPageSwapBacked(page); 1160 if (compound) { 1161 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 1162 /* increment count (starts at -1) */ 1163 atomic_set(compound_mapcount_ptr(page), 0); 1164 __inc_node_page_state(page, NR_ANON_THPS); 1165 } else { 1166 /* Anon THP always mapped first with PMD */ 1167 VM_BUG_ON_PAGE(PageTransCompound(page), page); 1168 /* increment count (starts at -1) */ 1169 atomic_set(&page->_mapcount, 0); 1170 } 1171 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr); 1172 __page_set_anon_rmap(page, vma, address, 1); 1173 } 1174 1175 /** 1176 * page_add_file_rmap - add pte mapping to a file page 1177 * @page: the page to add the mapping to 1178 * @compound: charge the page as compound or small page 1179 * 1180 * The caller needs to hold the pte lock. 1181 */ 1182 void page_add_file_rmap(struct page *page, bool compound) 1183 { 1184 int i, nr = 1; 1185 1186 VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); 1187 lock_page_memcg(page); 1188 if (compound && PageTransHuge(page)) { 1189 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { 1190 if (atomic_inc_and_test(&page[i]._mapcount)) 1191 nr++; 1192 } 1193 if (!atomic_inc_and_test(compound_mapcount_ptr(page))) 1194 goto out; 1195 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 1196 __inc_node_page_state(page, NR_SHMEM_PMDMAPPED); 1197 } else { 1198 if (PageTransCompound(page) && page_mapping(page)) { 1199 VM_WARN_ON_ONCE(!PageLocked(page)); 1200 1201 SetPageDoubleMap(compound_head(page)); 1202 if (PageMlocked(page)) 1203 clear_page_mlock(compound_head(page)); 1204 } 1205 if (!atomic_inc_and_test(&page->_mapcount)) 1206 goto out; 1207 } 1208 __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr); 1209 out: 1210 unlock_page_memcg(page); 1211 } 1212 1213 static void page_remove_file_rmap(struct page *page, bool compound) 1214 { 1215 int i, nr = 1; 1216 1217 VM_BUG_ON_PAGE(compound && !PageHead(page), page); 1218 lock_page_memcg(page); 1219 1220 /* Hugepages are not counted in NR_FILE_MAPPED for now. */ 1221 if (unlikely(PageHuge(page))) { 1222 /* hugetlb pages are always mapped with pmds */ 1223 atomic_dec(compound_mapcount_ptr(page)); 1224 goto out; 1225 } 1226 1227 /* page still mapped by someone else? */ 1228 if (compound && PageTransHuge(page)) { 1229 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { 1230 if (atomic_add_negative(-1, &page[i]._mapcount)) 1231 nr++; 1232 } 1233 if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) 1234 goto out; 1235 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 1236 __dec_node_page_state(page, NR_SHMEM_PMDMAPPED); 1237 } else { 1238 if (!atomic_add_negative(-1, &page->_mapcount)) 1239 goto out; 1240 } 1241 1242 /* 1243 * We use the irq-unsafe __{inc|mod}_lruvec_page_state because 1244 * these counters are not modified in interrupt context, and 1245 * pte lock(a spinlock) is held, which implies preemption disabled. 1246 */ 1247 __mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr); 1248 1249 if (unlikely(PageMlocked(page))) 1250 clear_page_mlock(page); 1251 out: 1252 unlock_page_memcg(page); 1253 } 1254 1255 static void page_remove_anon_compound_rmap(struct page *page) 1256 { 1257 int i, nr; 1258 1259 if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) 1260 return; 1261 1262 /* Hugepages are not counted in NR_ANON_PAGES for now. */ 1263 if (unlikely(PageHuge(page))) 1264 return; 1265 1266 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 1267 return; 1268 1269 __dec_node_page_state(page, NR_ANON_THPS); 1270 1271 if (TestClearPageDoubleMap(page)) { 1272 /* 1273 * Subpages can be mapped with PTEs too. Check how many of 1274 * themi are still mapped. 1275 */ 1276 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { 1277 if (atomic_add_negative(-1, &page[i]._mapcount)) 1278 nr++; 1279 } 1280 } else { 1281 nr = HPAGE_PMD_NR; 1282 } 1283 1284 if (unlikely(PageMlocked(page))) 1285 clear_page_mlock(page); 1286 1287 if (nr) { 1288 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, -nr); 1289 deferred_split_huge_page(page); 1290 } 1291 } 1292 1293 /** 1294 * page_remove_rmap - take down pte mapping from a page 1295 * @page: page to remove mapping from 1296 * @compound: uncharge the page as compound or small page 1297 * 1298 * The caller needs to hold the pte lock. 1299 */ 1300 void page_remove_rmap(struct page *page, bool compound) 1301 { 1302 if (!PageAnon(page)) 1303 return page_remove_file_rmap(page, compound); 1304 1305 if (compound) 1306 return page_remove_anon_compound_rmap(page); 1307 1308 /* page still mapped by someone else? */ 1309 if (!atomic_add_negative(-1, &page->_mapcount)) 1310 return; 1311 1312 /* 1313 * We use the irq-unsafe __{inc|mod}_zone_page_stat because 1314 * these counters are not modified in interrupt context, and 1315 * pte lock(a spinlock) is held, which implies preemption disabled. 1316 */ 1317 __dec_node_page_state(page, NR_ANON_MAPPED); 1318 1319 if (unlikely(PageMlocked(page))) 1320 clear_page_mlock(page); 1321 1322 if (PageTransCompound(page)) 1323 deferred_split_huge_page(compound_head(page)); 1324 1325 /* 1326 * It would be tidy to reset the PageAnon mapping here, 1327 * but that might overwrite a racing page_add_anon_rmap 1328 * which increments mapcount after us but sets mapping 1329 * before us: so leave the reset to free_unref_page, 1330 * and remember that it's only reliable while mapped. 1331 * Leaving it set also helps swapoff to reinstate ptes 1332 * faster for those pages still in swapcache. 1333 */ 1334 } 1335 1336 /* 1337 * @arg: enum ttu_flags will be passed to this argument 1338 */ 1339 static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, 1340 unsigned long address, void *arg) 1341 { 1342 struct mm_struct *mm = vma->vm_mm; 1343 struct page_vma_mapped_walk pvmw = { 1344 .page = page, 1345 .vma = vma, 1346 .address = address, 1347 }; 1348 pte_t pteval; 1349 struct page *subpage; 1350 bool ret = true; 1351 struct mmu_notifier_range range; 1352 enum ttu_flags flags = (enum ttu_flags)arg; 1353 1354 /* munlock has nothing to gain from examining un-locked vmas */ 1355 if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED)) 1356 return true; 1357 1358 if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION) && 1359 is_zone_device_page(page) && !is_device_private_page(page)) 1360 return true; 1361 1362 if (flags & TTU_SPLIT_HUGE_PMD) { 1363 split_huge_pmd_address(vma, address, 1364 flags & TTU_SPLIT_FREEZE, page); 1365 } 1366 1367 /* 1368 * For THP, we have to assume the worse case ie pmd for invalidation. 1369 * For hugetlb, it could be much worse if we need to do pud 1370 * invalidation in the case of pmd sharing. 1371 * 1372 * Note that the page can not be free in this function as call of 1373 * try_to_unmap() must hold a reference on the page. 1374 */ 1375 mmu_notifier_range_init(&range, vma->vm_mm, vma->vm_start, 1376 min(vma->vm_end, vma->vm_start + 1377 (PAGE_SIZE << compound_order(page)))); 1378 if (PageHuge(page)) { 1379 /* 1380 * If sharing is possible, start and end will be adjusted 1381 * accordingly. 1382 * 1383 * If called for a huge page, caller must hold i_mmap_rwsem 1384 * in write mode as it is possible to call huge_pmd_unshare. 1385 */ 1386 adjust_range_if_pmd_sharing_possible(vma, &range.start, 1387 &range.end); 1388 } 1389 mmu_notifier_invalidate_range_start(&range); 1390 1391 while (page_vma_mapped_walk(&pvmw)) { 1392 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 1393 /* PMD-mapped THP migration entry */ 1394 if (!pvmw.pte && (flags & TTU_MIGRATION)) { 1395 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page); 1396 1397 set_pmd_migration_entry(&pvmw, page); 1398 continue; 1399 } 1400 #endif 1401 1402 /* 1403 * If the page is mlock()d, we cannot swap it out. 1404 * If it's recently referenced (perhaps page_referenced 1405 * skipped over this mm) then we should reactivate it. 1406 */ 1407 if (!(flags & TTU_IGNORE_MLOCK)) { 1408 if (vma->vm_flags & VM_LOCKED) { 1409 /* PTE-mapped THP are never mlocked */ 1410 if (!PageTransCompound(page)) { 1411 /* 1412 * Holding pte lock, we do *not* need 1413 * mmap_sem here 1414 */ 1415 mlock_vma_page(page); 1416 } 1417 ret = false; 1418 page_vma_mapped_walk_done(&pvmw); 1419 break; 1420 } 1421 if (flags & TTU_MUNLOCK) 1422 continue; 1423 } 1424 1425 /* Unexpected PMD-mapped THP? */ 1426 VM_BUG_ON_PAGE(!pvmw.pte, page); 1427 1428 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); 1429 address = pvmw.address; 1430 1431 if (PageHuge(page)) { 1432 if (huge_pmd_unshare(mm, &address, pvmw.pte)) { 1433 /* 1434 * huge_pmd_unshare unmapped an entire PMD 1435 * page. There is no way of knowing exactly 1436 * which PMDs may be cached for this mm, so 1437 * we must flush them all. start/end were 1438 * already adjusted above to cover this range. 1439 */ 1440 flush_cache_range(vma, range.start, range.end); 1441 flush_tlb_range(vma, range.start, range.end); 1442 mmu_notifier_invalidate_range(mm, range.start, 1443 range.end); 1444 1445 /* 1446 * The ref count of the PMD page was dropped 1447 * which is part of the way map counting 1448 * is done for shared PMDs. Return 'true' 1449 * here. When there is no other sharing, 1450 * huge_pmd_unshare returns false and we will 1451 * unmap the actual page and drop map count 1452 * to zero. 1453 */ 1454 page_vma_mapped_walk_done(&pvmw); 1455 break; 1456 } 1457 } 1458 1459 if (IS_ENABLED(CONFIG_MIGRATION) && 1460 (flags & TTU_MIGRATION) && 1461 is_zone_device_page(page)) { 1462 swp_entry_t entry; 1463 pte_t swp_pte; 1464 1465 pteval = ptep_get_and_clear(mm, pvmw.address, pvmw.pte); 1466 1467 /* 1468 * Store the pfn of the page in a special migration 1469 * pte. do_swap_page() will wait until the migration 1470 * pte is removed and then restart fault handling. 1471 */ 1472 entry = make_migration_entry(page, 0); 1473 swp_pte = swp_entry_to_pte(entry); 1474 if (pte_soft_dirty(pteval)) 1475 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1476 set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); 1477 /* 1478 * No need to invalidate here it will synchronize on 1479 * against the special swap migration pte. 1480 */ 1481 goto discard; 1482 } 1483 1484 if (!(flags & TTU_IGNORE_ACCESS)) { 1485 if (ptep_clear_flush_young_notify(vma, address, 1486 pvmw.pte)) { 1487 ret = false; 1488 page_vma_mapped_walk_done(&pvmw); 1489 break; 1490 } 1491 } 1492 1493 /* Nuke the page table entry. */ 1494 flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); 1495 if (should_defer_flush(mm, flags)) { 1496 /* 1497 * We clear the PTE but do not flush so potentially 1498 * a remote CPU could still be writing to the page. 1499 * If the entry was previously clean then the 1500 * architecture must guarantee that a clear->dirty 1501 * transition on a cached TLB entry is written through 1502 * and traps if the PTE is unmapped. 1503 */ 1504 pteval = ptep_get_and_clear(mm, address, pvmw.pte); 1505 1506 set_tlb_ubc_flush_pending(mm, pte_dirty(pteval)); 1507 } else { 1508 pteval = ptep_clear_flush(vma, address, pvmw.pte); 1509 } 1510 1511 /* Move the dirty bit to the page. Now the pte is gone. */ 1512 if (pte_dirty(pteval)) 1513 set_page_dirty(page); 1514 1515 /* Update high watermark before we lower rss */ 1516 update_hiwater_rss(mm); 1517 1518 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { 1519 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 1520 if (PageHuge(page)) { 1521 int nr = 1 << compound_order(page); 1522 hugetlb_count_sub(nr, mm); 1523 set_huge_swap_pte_at(mm, address, 1524 pvmw.pte, pteval, 1525 vma_mmu_pagesize(vma)); 1526 } else { 1527 dec_mm_counter(mm, mm_counter(page)); 1528 set_pte_at(mm, address, pvmw.pte, pteval); 1529 } 1530 1531 } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { 1532 /* 1533 * The guest indicated that the page content is of no 1534 * interest anymore. Simply discard the pte, vmscan 1535 * will take care of the rest. 1536 * A future reference will then fault in a new zero 1537 * page. When userfaultfd is active, we must not drop 1538 * this page though, as its main user (postcopy 1539 * migration) will not expect userfaults on already 1540 * copied pages. 1541 */ 1542 dec_mm_counter(mm, mm_counter(page)); 1543 /* We have to invalidate as we cleared the pte */ 1544 mmu_notifier_invalidate_range(mm, address, 1545 address + PAGE_SIZE); 1546 } else if (IS_ENABLED(CONFIG_MIGRATION) && 1547 (flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))) { 1548 swp_entry_t entry; 1549 pte_t swp_pte; 1550 1551 if (arch_unmap_one(mm, vma, address, pteval) < 0) { 1552 set_pte_at(mm, address, pvmw.pte, pteval); 1553 ret = false; 1554 page_vma_mapped_walk_done(&pvmw); 1555 break; 1556 } 1557 1558 /* 1559 * Store the pfn of the page in a special migration 1560 * pte. do_swap_page() will wait until the migration 1561 * pte is removed and then restart fault handling. 1562 */ 1563 entry = make_migration_entry(subpage, 1564 pte_write(pteval)); 1565 swp_pte = swp_entry_to_pte(entry); 1566 if (pte_soft_dirty(pteval)) 1567 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1568 set_pte_at(mm, address, pvmw.pte, swp_pte); 1569 /* 1570 * No need to invalidate here it will synchronize on 1571 * against the special swap migration pte. 1572 */ 1573 } else if (PageAnon(page)) { 1574 swp_entry_t entry = { .val = page_private(subpage) }; 1575 pte_t swp_pte; 1576 /* 1577 * Store the swap location in the pte. 1578 * See handle_pte_fault() ... 1579 */ 1580 if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) { 1581 WARN_ON_ONCE(1); 1582 ret = false; 1583 /* We have to invalidate as we cleared the pte */ 1584 mmu_notifier_invalidate_range(mm, address, 1585 address + PAGE_SIZE); 1586 page_vma_mapped_walk_done(&pvmw); 1587 break; 1588 } 1589 1590 /* MADV_FREE page check */ 1591 if (!PageSwapBacked(page)) { 1592 if (!PageDirty(page)) { 1593 /* Invalidate as we cleared the pte */ 1594 mmu_notifier_invalidate_range(mm, 1595 address, address + PAGE_SIZE); 1596 dec_mm_counter(mm, MM_ANONPAGES); 1597 goto discard; 1598 } 1599 1600 /* 1601 * If the page was redirtied, it cannot be 1602 * discarded. Remap the page to page table. 1603 */ 1604 set_pte_at(mm, address, pvmw.pte, pteval); 1605 SetPageSwapBacked(page); 1606 ret = false; 1607 page_vma_mapped_walk_done(&pvmw); 1608 break; 1609 } 1610 1611 if (swap_duplicate(entry) < 0) { 1612 set_pte_at(mm, address, pvmw.pte, pteval); 1613 ret = false; 1614 page_vma_mapped_walk_done(&pvmw); 1615 break; 1616 } 1617 if (arch_unmap_one(mm, vma, address, pteval) < 0) { 1618 set_pte_at(mm, address, pvmw.pte, pteval); 1619 ret = false; 1620 page_vma_mapped_walk_done(&pvmw); 1621 break; 1622 } 1623 if (list_empty(&mm->mmlist)) { 1624 spin_lock(&mmlist_lock); 1625 if (list_empty(&mm->mmlist)) 1626 list_add(&mm->mmlist, &init_mm.mmlist); 1627 spin_unlock(&mmlist_lock); 1628 } 1629 dec_mm_counter(mm, MM_ANONPAGES); 1630 inc_mm_counter(mm, MM_SWAPENTS); 1631 swp_pte = swp_entry_to_pte(entry); 1632 if (pte_soft_dirty(pteval)) 1633 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1634 set_pte_at(mm, address, pvmw.pte, swp_pte); 1635 /* Invalidate as we cleared the pte */ 1636 mmu_notifier_invalidate_range(mm, address, 1637 address + PAGE_SIZE); 1638 } else { 1639 /* 1640 * This is a locked file-backed page, thus it cannot 1641 * be removed from the page cache and replaced by a new 1642 * page before mmu_notifier_invalidate_range_end, so no 1643 * concurrent thread might update its page table to 1644 * point at new page while a device still is using this 1645 * page. 1646 * 1647 * See Documentation/vm/mmu_notifier.rst 1648 */ 1649 dec_mm_counter(mm, mm_counter_file(page)); 1650 } 1651 discard: 1652 /* 1653 * No need to call mmu_notifier_invalidate_range() it has be 1654 * done above for all cases requiring it to happen under page 1655 * table lock before mmu_notifier_invalidate_range_end() 1656 * 1657 * See Documentation/vm/mmu_notifier.rst 1658 */ 1659 page_remove_rmap(subpage, PageHuge(page)); 1660 put_page(page); 1661 } 1662 1663 mmu_notifier_invalidate_range_end(&range); 1664 1665 return ret; 1666 } 1667 1668 bool is_vma_temporary_stack(struct vm_area_struct *vma) 1669 { 1670 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); 1671 1672 if (!maybe_stack) 1673 return false; 1674 1675 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) == 1676 VM_STACK_INCOMPLETE_SETUP) 1677 return true; 1678 1679 return false; 1680 } 1681 1682 static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) 1683 { 1684 return is_vma_temporary_stack(vma); 1685 } 1686 1687 static int page_mapcount_is_zero(struct page *page) 1688 { 1689 return !total_mapcount(page); 1690 } 1691 1692 /** 1693 * try_to_unmap - try to remove all page table mappings to a page 1694 * @page: the page to get unmapped 1695 * @flags: action and flags 1696 * 1697 * Tries to remove all the page table entries which are mapping this 1698 * page, used in the pageout path. Caller must hold the page lock. 1699 * 1700 * If unmap is successful, return true. Otherwise, false. 1701 */ 1702 bool try_to_unmap(struct page *page, enum ttu_flags flags) 1703 { 1704 struct rmap_walk_control rwc = { 1705 .rmap_one = try_to_unmap_one, 1706 .arg = (void *)flags, 1707 .done = page_mapcount_is_zero, 1708 .anon_lock = page_lock_anon_vma_read, 1709 }; 1710 1711 /* 1712 * During exec, a temporary VMA is setup and later moved. 1713 * The VMA is moved under the anon_vma lock but not the 1714 * page tables leading to a race where migration cannot 1715 * find the migration ptes. Rather than increasing the 1716 * locking requirements of exec(), migration skips 1717 * temporary VMAs until after exec() completes. 1718 */ 1719 if ((flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE)) 1720 && !PageKsm(page) && PageAnon(page)) 1721 rwc.invalid_vma = invalid_migration_vma; 1722 1723 if (flags & TTU_RMAP_LOCKED) 1724 rmap_walk_locked(page, &rwc); 1725 else 1726 rmap_walk(page, &rwc); 1727 1728 return !page_mapcount(page) ? true : false; 1729 } 1730 1731 static int page_not_mapped(struct page *page) 1732 { 1733 return !page_mapped(page); 1734 }; 1735 1736 /** 1737 * try_to_munlock - try to munlock a page 1738 * @page: the page to be munlocked 1739 * 1740 * Called from munlock code. Checks all of the VMAs mapping the page 1741 * to make sure nobody else has this page mlocked. The page will be 1742 * returned with PG_mlocked cleared if no other vmas have it mlocked. 1743 */ 1744 1745 void try_to_munlock(struct page *page) 1746 { 1747 struct rmap_walk_control rwc = { 1748 .rmap_one = try_to_unmap_one, 1749 .arg = (void *)TTU_MUNLOCK, 1750 .done = page_not_mapped, 1751 .anon_lock = page_lock_anon_vma_read, 1752 1753 }; 1754 1755 VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page); 1756 VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page); 1757 1758 rmap_walk(page, &rwc); 1759 } 1760 1761 void __put_anon_vma(struct anon_vma *anon_vma) 1762 { 1763 struct anon_vma *root = anon_vma->root; 1764 1765 anon_vma_free(anon_vma); 1766 if (root != anon_vma && atomic_dec_and_test(&root->refcount)) 1767 anon_vma_free(root); 1768 } 1769 1770 static struct anon_vma *rmap_walk_anon_lock(struct page *page, 1771 struct rmap_walk_control *rwc) 1772 { 1773 struct anon_vma *anon_vma; 1774 1775 if (rwc->anon_lock) 1776 return rwc->anon_lock(page); 1777 1778 /* 1779 * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read() 1780 * because that depends on page_mapped(); but not all its usages 1781 * are holding mmap_sem. Users without mmap_sem are required to 1782 * take a reference count to prevent the anon_vma disappearing 1783 */ 1784 anon_vma = page_anon_vma(page); 1785 if (!anon_vma) 1786 return NULL; 1787 1788 anon_vma_lock_read(anon_vma); 1789 return anon_vma; 1790 } 1791 1792 /* 1793 * rmap_walk_anon - do something to anonymous page using the object-based 1794 * rmap method 1795 * @page: the page to be handled 1796 * @rwc: control variable according to each walk type 1797 * 1798 * Find all the mappings of a page using the mapping pointer and the vma chains 1799 * contained in the anon_vma struct it points to. 1800 * 1801 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1802 * where the page was found will be held for write. So, we won't recheck 1803 * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1804 * LOCKED. 1805 */ 1806 static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, 1807 bool locked) 1808 { 1809 struct anon_vma *anon_vma; 1810 pgoff_t pgoff_start, pgoff_end; 1811 struct anon_vma_chain *avc; 1812 1813 if (locked) { 1814 anon_vma = page_anon_vma(page); 1815 /* anon_vma disappear under us? */ 1816 VM_BUG_ON_PAGE(!anon_vma, page); 1817 } else { 1818 anon_vma = rmap_walk_anon_lock(page, rwc); 1819 } 1820 if (!anon_vma) 1821 return; 1822 1823 pgoff_start = page_to_pgoff(page); 1824 pgoff_end = pgoff_start + hpage_nr_pages(page) - 1; 1825 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, 1826 pgoff_start, pgoff_end) { 1827 struct vm_area_struct *vma = avc->vma; 1828 unsigned long address = vma_address(page, vma); 1829 1830 cond_resched(); 1831 1832 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 1833 continue; 1834 1835 if (!rwc->rmap_one(page, vma, address, rwc->arg)) 1836 break; 1837 if (rwc->done && rwc->done(page)) 1838 break; 1839 } 1840 1841 if (!locked) 1842 anon_vma_unlock_read(anon_vma); 1843 } 1844 1845 /* 1846 * rmap_walk_file - do something to file page using the object-based rmap method 1847 * @page: the page to be handled 1848 * @rwc: control variable according to each walk type 1849 * 1850 * Find all the mappings of a page using the mapping pointer and the vma chains 1851 * contained in the address_space struct it points to. 1852 * 1853 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1854 * where the page was found will be held for write. So, we won't recheck 1855 * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1856 * LOCKED. 1857 */ 1858 static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, 1859 bool locked) 1860 { 1861 struct address_space *mapping = page_mapping(page); 1862 pgoff_t pgoff_start, pgoff_end; 1863 struct vm_area_struct *vma; 1864 1865 /* 1866 * The page lock not only makes sure that page->mapping cannot 1867 * suddenly be NULLified by truncation, it makes sure that the 1868 * structure at mapping cannot be freed and reused yet, 1869 * so we can safely take mapping->i_mmap_rwsem. 1870 */ 1871 VM_BUG_ON_PAGE(!PageLocked(page), page); 1872 1873 if (!mapping) 1874 return; 1875 1876 pgoff_start = page_to_pgoff(page); 1877 pgoff_end = pgoff_start + hpage_nr_pages(page) - 1; 1878 if (!locked) 1879 i_mmap_lock_read(mapping); 1880 vma_interval_tree_foreach(vma, &mapping->i_mmap, 1881 pgoff_start, pgoff_end) { 1882 unsigned long address = vma_address(page, vma); 1883 1884 cond_resched(); 1885 1886 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 1887 continue; 1888 1889 if (!rwc->rmap_one(page, vma, address, rwc->arg)) 1890 goto done; 1891 if (rwc->done && rwc->done(page)) 1892 goto done; 1893 } 1894 1895 done: 1896 if (!locked) 1897 i_mmap_unlock_read(mapping); 1898 } 1899 1900 void rmap_walk(struct page *page, struct rmap_walk_control *rwc) 1901 { 1902 if (unlikely(PageKsm(page))) 1903 rmap_walk_ksm(page, rwc); 1904 else if (PageAnon(page)) 1905 rmap_walk_anon(page, rwc, false); 1906 else 1907 rmap_walk_file(page, rwc, false); 1908 } 1909 1910 /* Like rmap_walk, but caller holds relevant rmap lock */ 1911 void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc) 1912 { 1913 /* no ksm support for now */ 1914 VM_BUG_ON_PAGE(PageKsm(page), page); 1915 if (PageAnon(page)) 1916 rmap_walk_anon(page, rwc, true); 1917 else 1918 rmap_walk_file(page, rwc, true); 1919 } 1920 1921 #ifdef CONFIG_HUGETLB_PAGE 1922 /* 1923 * The following two functions are for anonymous (private mapped) hugepages. 1924 * Unlike common anonymous pages, anonymous hugepages have no accounting code 1925 * and no lru code, because we handle hugepages differently from common pages. 1926 */ 1927 void hugepage_add_anon_rmap(struct page *page, 1928 struct vm_area_struct *vma, unsigned long address) 1929 { 1930 struct anon_vma *anon_vma = vma->anon_vma; 1931 int first; 1932 1933 BUG_ON(!PageLocked(page)); 1934 BUG_ON(!anon_vma); 1935 /* address might be in next vma when migration races vma_adjust */ 1936 first = atomic_inc_and_test(compound_mapcount_ptr(page)); 1937 if (first) 1938 __page_set_anon_rmap(page, vma, address, 0); 1939 } 1940 1941 void hugepage_add_new_anon_rmap(struct page *page, 1942 struct vm_area_struct *vma, unsigned long address) 1943 { 1944 BUG_ON(address < vma->vm_start || address >= vma->vm_end); 1945 atomic_set(compound_mapcount_ptr(page), 0); 1946 __page_set_anon_rmap(page, vma, address, 1); 1947 } 1948 #endif /* CONFIG_HUGETLB_PAGE */ 1949