1 /* 2 * mm/rmap.c - physical to virtual reverse mappings 3 * 4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br> 5 * Released under the General Public License (GPL). 6 * 7 * Simple, low overhead reverse mapping scheme. 8 * Please try to keep this thing as modular as possible. 9 * 10 * Provides methods for unmapping each kind of mapped page: 11 * the anon methods track anonymous pages, and 12 * the file methods track pages belonging to an inode. 13 * 14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001 15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 17 * Contributions by Hugh Dickins 2003, 2004 18 */ 19 20 /* 21 * Lock ordering in mm: 22 * 23 * inode->i_mutex (while writing or truncating, not reading or faulting) 24 * mm->mmap_sem 25 * page->flags PG_locked (lock_page) 26 * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share) 27 * mapping->i_mmap_rwsem 28 * anon_vma->rwsem 29 * mm->page_table_lock or pte_lock 30 * zone_lru_lock (in mark_page_accessed, isolate_lru_page) 31 * swap_lock (in swap_duplicate, swap_info_get) 32 * mmlist_lock (in mmput, drain_mmlist and others) 33 * mapping->private_lock (in __set_page_dirty_buffers) 34 * mem_cgroup_{begin,end}_page_stat (memcg->move_lock) 35 * mapping->tree_lock (widely used) 36 * inode->i_lock (in set_page_dirty's __mark_inode_dirty) 37 * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) 38 * sb_lock (within inode_lock in fs/fs-writeback.c) 39 * mapping->tree_lock (widely used, in set_page_dirty, 40 * in arch-dependent flush_dcache_mmap_lock, 41 * within bdi.wb->list_lock in __sync_single_inode) 42 * 43 * anon_vma->rwsem,mapping->i_mutex (memory_failure, collect_procs_anon) 44 * ->tasklist_lock 45 * pte map lock 46 */ 47 48 #include <linux/mm.h> 49 #include <linux/sched/mm.h> 50 #include <linux/sched/task.h> 51 #include <linux/pagemap.h> 52 #include <linux/swap.h> 53 #include <linux/swapops.h> 54 #include <linux/slab.h> 55 #include <linux/init.h> 56 #include <linux/ksm.h> 57 #include <linux/rmap.h> 58 #include <linux/rcupdate.h> 59 #include <linux/export.h> 60 #include <linux/memcontrol.h> 61 #include <linux/mmu_notifier.h> 62 #include <linux/migrate.h> 63 #include <linux/hugetlb.h> 64 #include <linux/backing-dev.h> 65 #include <linux/page_idle.h> 66 67 #include <asm/tlbflush.h> 68 69 #include <trace/events/tlb.h> 70 71 #include "internal.h" 72 73 static struct kmem_cache *anon_vma_cachep; 74 static struct kmem_cache *anon_vma_chain_cachep; 75 76 static inline struct anon_vma *anon_vma_alloc(void) 77 { 78 struct anon_vma *anon_vma; 79 80 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 81 if (anon_vma) { 82 atomic_set(&anon_vma->refcount, 1); 83 anon_vma->degree = 1; /* Reference for first vma */ 84 anon_vma->parent = anon_vma; 85 /* 86 * Initialise the anon_vma root to point to itself. If called 87 * from fork, the root will be reset to the parents anon_vma. 88 */ 89 anon_vma->root = anon_vma; 90 } 91 92 return anon_vma; 93 } 94 95 static inline void anon_vma_free(struct anon_vma *anon_vma) 96 { 97 VM_BUG_ON(atomic_read(&anon_vma->refcount)); 98 99 /* 100 * Synchronize against page_lock_anon_vma_read() such that 101 * we can safely hold the lock without the anon_vma getting 102 * freed. 103 * 104 * Relies on the full mb implied by the atomic_dec_and_test() from 105 * put_anon_vma() against the acquire barrier implied by 106 * down_read_trylock() from page_lock_anon_vma_read(). This orders: 107 * 108 * page_lock_anon_vma_read() VS put_anon_vma() 109 * down_read_trylock() atomic_dec_and_test() 110 * LOCK MB 111 * atomic_read() rwsem_is_locked() 112 * 113 * LOCK should suffice since the actual taking of the lock must 114 * happen _before_ what follows. 115 */ 116 might_sleep(); 117 if (rwsem_is_locked(&anon_vma->root->rwsem)) { 118 anon_vma_lock_write(anon_vma); 119 anon_vma_unlock_write(anon_vma); 120 } 121 122 kmem_cache_free(anon_vma_cachep, anon_vma); 123 } 124 125 static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) 126 { 127 return kmem_cache_alloc(anon_vma_chain_cachep, gfp); 128 } 129 130 static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) 131 { 132 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); 133 } 134 135 static void anon_vma_chain_link(struct vm_area_struct *vma, 136 struct anon_vma_chain *avc, 137 struct anon_vma *anon_vma) 138 { 139 avc->vma = vma; 140 avc->anon_vma = anon_vma; 141 list_add(&avc->same_vma, &vma->anon_vma_chain); 142 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); 143 } 144 145 /** 146 * __anon_vma_prepare - attach an anon_vma to a memory region 147 * @vma: the memory region in question 148 * 149 * This makes sure the memory mapping described by 'vma' has 150 * an 'anon_vma' attached to it, so that we can associate the 151 * anonymous pages mapped into it with that anon_vma. 152 * 153 * The common case will be that we already have one, which 154 * is handled inline by anon_vma_prepare(). But if 155 * not we either need to find an adjacent mapping that we 156 * can re-use the anon_vma from (very common when the only 157 * reason for splitting a vma has been mprotect()), or we 158 * allocate a new one. 159 * 160 * Anon-vma allocations are very subtle, because we may have 161 * optimistically looked up an anon_vma in page_lock_anon_vma_read() 162 * and that may actually touch the spinlock even in the newly 163 * allocated vma (it depends on RCU to make sure that the 164 * anon_vma isn't actually destroyed). 165 * 166 * As a result, we need to do proper anon_vma locking even 167 * for the new allocation. At the same time, we do not want 168 * to do any locking for the common case of already having 169 * an anon_vma. 170 * 171 * This must be called with the mmap_sem held for reading. 172 */ 173 int __anon_vma_prepare(struct vm_area_struct *vma) 174 { 175 struct mm_struct *mm = vma->vm_mm; 176 struct anon_vma *anon_vma, *allocated; 177 struct anon_vma_chain *avc; 178 179 might_sleep(); 180 181 avc = anon_vma_chain_alloc(GFP_KERNEL); 182 if (!avc) 183 goto out_enomem; 184 185 anon_vma = find_mergeable_anon_vma(vma); 186 allocated = NULL; 187 if (!anon_vma) { 188 anon_vma = anon_vma_alloc(); 189 if (unlikely(!anon_vma)) 190 goto out_enomem_free_avc; 191 allocated = anon_vma; 192 } 193 194 anon_vma_lock_write(anon_vma); 195 /* page_table_lock to protect against threads */ 196 spin_lock(&mm->page_table_lock); 197 if (likely(!vma->anon_vma)) { 198 vma->anon_vma = anon_vma; 199 anon_vma_chain_link(vma, avc, anon_vma); 200 /* vma reference or self-parent link for new root */ 201 anon_vma->degree++; 202 allocated = NULL; 203 avc = NULL; 204 } 205 spin_unlock(&mm->page_table_lock); 206 anon_vma_unlock_write(anon_vma); 207 208 if (unlikely(allocated)) 209 put_anon_vma(allocated); 210 if (unlikely(avc)) 211 anon_vma_chain_free(avc); 212 213 return 0; 214 215 out_enomem_free_avc: 216 anon_vma_chain_free(avc); 217 out_enomem: 218 return -ENOMEM; 219 } 220 221 /* 222 * This is a useful helper function for locking the anon_vma root as 223 * we traverse the vma->anon_vma_chain, looping over anon_vma's that 224 * have the same vma. 225 * 226 * Such anon_vma's should have the same root, so you'd expect to see 227 * just a single mutex_lock for the whole traversal. 228 */ 229 static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) 230 { 231 struct anon_vma *new_root = anon_vma->root; 232 if (new_root != root) { 233 if (WARN_ON_ONCE(root)) 234 up_write(&root->rwsem); 235 root = new_root; 236 down_write(&root->rwsem); 237 } 238 return root; 239 } 240 241 static inline void unlock_anon_vma_root(struct anon_vma *root) 242 { 243 if (root) 244 up_write(&root->rwsem); 245 } 246 247 /* 248 * Attach the anon_vmas from src to dst. 249 * Returns 0 on success, -ENOMEM on failure. 250 * 251 * If dst->anon_vma is NULL this function tries to find and reuse existing 252 * anon_vma which has no vmas and only one child anon_vma. This prevents 253 * degradation of anon_vma hierarchy to endless linear chain in case of 254 * constantly forking task. On the other hand, an anon_vma with more than one 255 * child isn't reused even if there was no alive vma, thus rmap walker has a 256 * good chance of avoiding scanning the whole hierarchy when it searches where 257 * page is mapped. 258 */ 259 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) 260 { 261 struct anon_vma_chain *avc, *pavc; 262 struct anon_vma *root = NULL; 263 264 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { 265 struct anon_vma *anon_vma; 266 267 avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); 268 if (unlikely(!avc)) { 269 unlock_anon_vma_root(root); 270 root = NULL; 271 avc = anon_vma_chain_alloc(GFP_KERNEL); 272 if (!avc) 273 goto enomem_failure; 274 } 275 anon_vma = pavc->anon_vma; 276 root = lock_anon_vma_root(root, anon_vma); 277 anon_vma_chain_link(dst, avc, anon_vma); 278 279 /* 280 * Reuse existing anon_vma if its degree lower than two, 281 * that means it has no vma and only one anon_vma child. 282 * 283 * Do not chose parent anon_vma, otherwise first child 284 * will always reuse it. Root anon_vma is never reused: 285 * it has self-parent reference and at least one child. 286 */ 287 if (!dst->anon_vma && anon_vma != src->anon_vma && 288 anon_vma->degree < 2) 289 dst->anon_vma = anon_vma; 290 } 291 if (dst->anon_vma) 292 dst->anon_vma->degree++; 293 unlock_anon_vma_root(root); 294 return 0; 295 296 enomem_failure: 297 /* 298 * dst->anon_vma is dropped here otherwise its degree can be incorrectly 299 * decremented in unlink_anon_vmas(). 300 * We can safely do this because callers of anon_vma_clone() don't care 301 * about dst->anon_vma if anon_vma_clone() failed. 302 */ 303 dst->anon_vma = NULL; 304 unlink_anon_vmas(dst); 305 return -ENOMEM; 306 } 307 308 /* 309 * Attach vma to its own anon_vma, as well as to the anon_vmas that 310 * the corresponding VMA in the parent process is attached to. 311 * Returns 0 on success, non-zero on failure. 312 */ 313 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) 314 { 315 struct anon_vma_chain *avc; 316 struct anon_vma *anon_vma; 317 int error; 318 319 /* Don't bother if the parent process has no anon_vma here. */ 320 if (!pvma->anon_vma) 321 return 0; 322 323 /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ 324 vma->anon_vma = NULL; 325 326 /* 327 * First, attach the new VMA to the parent VMA's anon_vmas, 328 * so rmap can find non-COWed pages in child processes. 329 */ 330 error = anon_vma_clone(vma, pvma); 331 if (error) 332 return error; 333 334 /* An existing anon_vma has been reused, all done then. */ 335 if (vma->anon_vma) 336 return 0; 337 338 /* Then add our own anon_vma. */ 339 anon_vma = anon_vma_alloc(); 340 if (!anon_vma) 341 goto out_error; 342 avc = anon_vma_chain_alloc(GFP_KERNEL); 343 if (!avc) 344 goto out_error_free_anon_vma; 345 346 /* 347 * The root anon_vma's spinlock is the lock actually used when we 348 * lock any of the anon_vmas in this anon_vma tree. 349 */ 350 anon_vma->root = pvma->anon_vma->root; 351 anon_vma->parent = pvma->anon_vma; 352 /* 353 * With refcounts, an anon_vma can stay around longer than the 354 * process it belongs to. The root anon_vma needs to be pinned until 355 * this anon_vma is freed, because the lock lives in the root. 356 */ 357 get_anon_vma(anon_vma->root); 358 /* Mark this anon_vma as the one where our new (COWed) pages go. */ 359 vma->anon_vma = anon_vma; 360 anon_vma_lock_write(anon_vma); 361 anon_vma_chain_link(vma, avc, anon_vma); 362 anon_vma->parent->degree++; 363 anon_vma_unlock_write(anon_vma); 364 365 return 0; 366 367 out_error_free_anon_vma: 368 put_anon_vma(anon_vma); 369 out_error: 370 unlink_anon_vmas(vma); 371 return -ENOMEM; 372 } 373 374 void unlink_anon_vmas(struct vm_area_struct *vma) 375 { 376 struct anon_vma_chain *avc, *next; 377 struct anon_vma *root = NULL; 378 379 /* 380 * Unlink each anon_vma chained to the VMA. This list is ordered 381 * from newest to oldest, ensuring the root anon_vma gets freed last. 382 */ 383 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 384 struct anon_vma *anon_vma = avc->anon_vma; 385 386 root = lock_anon_vma_root(root, anon_vma); 387 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); 388 389 /* 390 * Leave empty anon_vmas on the list - we'll need 391 * to free them outside the lock. 392 */ 393 if (RB_EMPTY_ROOT(&anon_vma->rb_root)) { 394 anon_vma->parent->degree--; 395 continue; 396 } 397 398 list_del(&avc->same_vma); 399 anon_vma_chain_free(avc); 400 } 401 if (vma->anon_vma) 402 vma->anon_vma->degree--; 403 unlock_anon_vma_root(root); 404 405 /* 406 * Iterate the list once more, it now only contains empty and unlinked 407 * anon_vmas, destroy them. Could not do before due to __put_anon_vma() 408 * needing to write-acquire the anon_vma->root->rwsem. 409 */ 410 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 411 struct anon_vma *anon_vma = avc->anon_vma; 412 413 VM_WARN_ON(anon_vma->degree); 414 put_anon_vma(anon_vma); 415 416 list_del(&avc->same_vma); 417 anon_vma_chain_free(avc); 418 } 419 } 420 421 static void anon_vma_ctor(void *data) 422 { 423 struct anon_vma *anon_vma = data; 424 425 init_rwsem(&anon_vma->rwsem); 426 atomic_set(&anon_vma->refcount, 0); 427 anon_vma->rb_root = RB_ROOT; 428 } 429 430 void __init anon_vma_init(void) 431 { 432 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 433 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, 434 anon_vma_ctor); 435 anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, 436 SLAB_PANIC|SLAB_ACCOUNT); 437 } 438 439 /* 440 * Getting a lock on a stable anon_vma from a page off the LRU is tricky! 441 * 442 * Since there is no serialization what so ever against page_remove_rmap() 443 * the best this function can do is return a locked anon_vma that might 444 * have been relevant to this page. 445 * 446 * The page might have been remapped to a different anon_vma or the anon_vma 447 * returned may already be freed (and even reused). 448 * 449 * In case it was remapped to a different anon_vma, the new anon_vma will be a 450 * child of the old anon_vma, and the anon_vma lifetime rules will therefore 451 * ensure that any anon_vma obtained from the page will still be valid for as 452 * long as we observe page_mapped() [ hence all those page_mapped() tests ]. 453 * 454 * All users of this function must be very careful when walking the anon_vma 455 * chain and verify that the page in question is indeed mapped in it 456 * [ something equivalent to page_mapped_in_vma() ]. 457 * 458 * Since anon_vma's slab is DESTROY_BY_RCU and we know from page_remove_rmap() 459 * that the anon_vma pointer from page->mapping is valid if there is a 460 * mapcount, we can dereference the anon_vma after observing those. 461 */ 462 struct anon_vma *page_get_anon_vma(struct page *page) 463 { 464 struct anon_vma *anon_vma = NULL; 465 unsigned long anon_mapping; 466 467 rcu_read_lock(); 468 anon_mapping = (unsigned long)READ_ONCE(page->mapping); 469 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 470 goto out; 471 if (!page_mapped(page)) 472 goto out; 473 474 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 475 if (!atomic_inc_not_zero(&anon_vma->refcount)) { 476 anon_vma = NULL; 477 goto out; 478 } 479 480 /* 481 * If this page is still mapped, then its anon_vma cannot have been 482 * freed. But if it has been unmapped, we have no security against the 483 * anon_vma structure being freed and reused (for another anon_vma: 484 * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero() 485 * above cannot corrupt). 486 */ 487 if (!page_mapped(page)) { 488 rcu_read_unlock(); 489 put_anon_vma(anon_vma); 490 return NULL; 491 } 492 out: 493 rcu_read_unlock(); 494 495 return anon_vma; 496 } 497 498 /* 499 * Similar to page_get_anon_vma() except it locks the anon_vma. 500 * 501 * Its a little more complex as it tries to keep the fast path to a single 502 * atomic op -- the trylock. If we fail the trylock, we fall back to getting a 503 * reference like with page_get_anon_vma() and then block on the mutex. 504 */ 505 struct anon_vma *page_lock_anon_vma_read(struct page *page) 506 { 507 struct anon_vma *anon_vma = NULL; 508 struct anon_vma *root_anon_vma; 509 unsigned long anon_mapping; 510 511 rcu_read_lock(); 512 anon_mapping = (unsigned long)READ_ONCE(page->mapping); 513 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 514 goto out; 515 if (!page_mapped(page)) 516 goto out; 517 518 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 519 root_anon_vma = READ_ONCE(anon_vma->root); 520 if (down_read_trylock(&root_anon_vma->rwsem)) { 521 /* 522 * If the page is still mapped, then this anon_vma is still 523 * its anon_vma, and holding the mutex ensures that it will 524 * not go away, see anon_vma_free(). 525 */ 526 if (!page_mapped(page)) { 527 up_read(&root_anon_vma->rwsem); 528 anon_vma = NULL; 529 } 530 goto out; 531 } 532 533 /* trylock failed, we got to sleep */ 534 if (!atomic_inc_not_zero(&anon_vma->refcount)) { 535 anon_vma = NULL; 536 goto out; 537 } 538 539 if (!page_mapped(page)) { 540 rcu_read_unlock(); 541 put_anon_vma(anon_vma); 542 return NULL; 543 } 544 545 /* we pinned the anon_vma, its safe to sleep */ 546 rcu_read_unlock(); 547 anon_vma_lock_read(anon_vma); 548 549 if (atomic_dec_and_test(&anon_vma->refcount)) { 550 /* 551 * Oops, we held the last refcount, release the lock 552 * and bail -- can't simply use put_anon_vma() because 553 * we'll deadlock on the anon_vma_lock_write() recursion. 554 */ 555 anon_vma_unlock_read(anon_vma); 556 __put_anon_vma(anon_vma); 557 anon_vma = NULL; 558 } 559 560 return anon_vma; 561 562 out: 563 rcu_read_unlock(); 564 return anon_vma; 565 } 566 567 void page_unlock_anon_vma_read(struct anon_vma *anon_vma) 568 { 569 anon_vma_unlock_read(anon_vma); 570 } 571 572 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 573 /* 574 * Flush TLB entries for recently unmapped pages from remote CPUs. It is 575 * important if a PTE was dirty when it was unmapped that it's flushed 576 * before any IO is initiated on the page to prevent lost writes. Similarly, 577 * it must be flushed before freeing to prevent data leakage. 578 */ 579 void try_to_unmap_flush(void) 580 { 581 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 582 583 if (!tlb_ubc->flush_required) 584 return; 585 586 arch_tlbbatch_flush(&tlb_ubc->arch); 587 tlb_ubc->flush_required = false; 588 tlb_ubc->writable = false; 589 } 590 591 /* Flush iff there are potentially writable TLB entries that can race with IO */ 592 void try_to_unmap_flush_dirty(void) 593 { 594 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 595 596 if (tlb_ubc->writable) 597 try_to_unmap_flush(); 598 } 599 600 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) 601 { 602 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 603 604 arch_tlbbatch_add_mm(&tlb_ubc->arch, mm); 605 tlb_ubc->flush_required = true; 606 607 /* 608 * Ensure compiler does not re-order the setting of tlb_flush_batched 609 * before the PTE is cleared. 610 */ 611 barrier(); 612 mm->tlb_flush_batched = true; 613 614 /* 615 * If the PTE was dirty then it's best to assume it's writable. The 616 * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() 617 * before the page is queued for IO. 618 */ 619 if (writable) 620 tlb_ubc->writable = true; 621 } 622 623 /* 624 * Returns true if the TLB flush should be deferred to the end of a batch of 625 * unmap operations to reduce IPIs. 626 */ 627 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 628 { 629 bool should_defer = false; 630 631 if (!(flags & TTU_BATCH_FLUSH)) 632 return false; 633 634 /* If remote CPUs need to be flushed then defer batch the flush */ 635 if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids) 636 should_defer = true; 637 put_cpu(); 638 639 return should_defer; 640 } 641 642 /* 643 * Reclaim unmaps pages under the PTL but do not flush the TLB prior to 644 * releasing the PTL if TLB flushes are batched. It's possible for a parallel 645 * operation such as mprotect or munmap to race between reclaim unmapping 646 * the page and flushing the page. If this race occurs, it potentially allows 647 * access to data via a stale TLB entry. Tracking all mm's that have TLB 648 * batching in flight would be expensive during reclaim so instead track 649 * whether TLB batching occurred in the past and if so then do a flush here 650 * if required. This will cost one additional flush per reclaim cycle paid 651 * by the first operation at risk such as mprotect and mumap. 652 * 653 * This must be called under the PTL so that an access to tlb_flush_batched 654 * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise 655 * via the PTL. 656 */ 657 void flush_tlb_batched_pending(struct mm_struct *mm) 658 { 659 if (mm->tlb_flush_batched) { 660 flush_tlb_mm(mm); 661 662 /* 663 * Do not allow the compiler to re-order the clearing of 664 * tlb_flush_batched before the tlb is flushed. 665 */ 666 barrier(); 667 mm->tlb_flush_batched = false; 668 } 669 } 670 #else 671 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) 672 { 673 } 674 675 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 676 { 677 return false; 678 } 679 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ 680 681 /* 682 * At what user virtual address is page expected in vma? 683 * Caller should check the page is actually part of the vma. 684 */ 685 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 686 { 687 unsigned long address; 688 if (PageAnon(page)) { 689 struct anon_vma *page__anon_vma = page_anon_vma(page); 690 /* 691 * Note: swapoff's unuse_vma() is more efficient with this 692 * check, and needs it to match anon_vma when KSM is active. 693 */ 694 if (!vma->anon_vma || !page__anon_vma || 695 vma->anon_vma->root != page__anon_vma->root) 696 return -EFAULT; 697 } else if (page->mapping) { 698 if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping) 699 return -EFAULT; 700 } else 701 return -EFAULT; 702 address = __vma_address(page, vma); 703 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) 704 return -EFAULT; 705 return address; 706 } 707 708 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) 709 { 710 pgd_t *pgd; 711 p4d_t *p4d; 712 pud_t *pud; 713 pmd_t *pmd = NULL; 714 pmd_t pmde; 715 716 pgd = pgd_offset(mm, address); 717 if (!pgd_present(*pgd)) 718 goto out; 719 720 p4d = p4d_offset(pgd, address); 721 if (!p4d_present(*p4d)) 722 goto out; 723 724 pud = pud_offset(p4d, address); 725 if (!pud_present(*pud)) 726 goto out; 727 728 pmd = pmd_offset(pud, address); 729 /* 730 * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at() 731 * without holding anon_vma lock for write. So when looking for a 732 * genuine pmde (in which to find pte), test present and !THP together. 733 */ 734 pmde = *pmd; 735 barrier(); 736 if (!pmd_present(pmde) || pmd_trans_huge(pmde)) 737 pmd = NULL; 738 out: 739 return pmd; 740 } 741 742 struct page_referenced_arg { 743 int mapcount; 744 int referenced; 745 unsigned long vm_flags; 746 struct mem_cgroup *memcg; 747 }; 748 /* 749 * arg: page_referenced_arg will be passed 750 */ 751 static bool page_referenced_one(struct page *page, struct vm_area_struct *vma, 752 unsigned long address, void *arg) 753 { 754 struct page_referenced_arg *pra = arg; 755 struct page_vma_mapped_walk pvmw = { 756 .page = page, 757 .vma = vma, 758 .address = address, 759 }; 760 int referenced = 0; 761 762 while (page_vma_mapped_walk(&pvmw)) { 763 address = pvmw.address; 764 765 if (vma->vm_flags & VM_LOCKED) { 766 page_vma_mapped_walk_done(&pvmw); 767 pra->vm_flags |= VM_LOCKED; 768 return false; /* To break the loop */ 769 } 770 771 if (pvmw.pte) { 772 if (ptep_clear_flush_young_notify(vma, address, 773 pvmw.pte)) { 774 /* 775 * Don't treat a reference through 776 * a sequentially read mapping as such. 777 * If the page has been used in another mapping, 778 * we will catch it; if this other mapping is 779 * already gone, the unmap path will have set 780 * PG_referenced or activated the page. 781 */ 782 if (likely(!(vma->vm_flags & VM_SEQ_READ))) 783 referenced++; 784 } 785 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { 786 if (pmdp_clear_flush_young_notify(vma, address, 787 pvmw.pmd)) 788 referenced++; 789 } else { 790 /* unexpected pmd-mapped page? */ 791 WARN_ON_ONCE(1); 792 } 793 794 pra->mapcount--; 795 } 796 797 if (referenced) 798 clear_page_idle(page); 799 if (test_and_clear_page_young(page)) 800 referenced++; 801 802 if (referenced) { 803 pra->referenced++; 804 pra->vm_flags |= vma->vm_flags; 805 } 806 807 if (!pra->mapcount) 808 return false; /* To break the loop */ 809 810 return true; 811 } 812 813 static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg) 814 { 815 struct page_referenced_arg *pra = arg; 816 struct mem_cgroup *memcg = pra->memcg; 817 818 if (!mm_match_cgroup(vma->vm_mm, memcg)) 819 return true; 820 821 return false; 822 } 823 824 /** 825 * page_referenced - test if the page was referenced 826 * @page: the page to test 827 * @is_locked: caller holds lock on the page 828 * @memcg: target memory cgroup 829 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page 830 * 831 * Quick test_and_clear_referenced for all mappings to a page, 832 * returns the number of ptes which referenced the page. 833 */ 834 int page_referenced(struct page *page, 835 int is_locked, 836 struct mem_cgroup *memcg, 837 unsigned long *vm_flags) 838 { 839 int we_locked = 0; 840 struct page_referenced_arg pra = { 841 .mapcount = total_mapcount(page), 842 .memcg = memcg, 843 }; 844 struct rmap_walk_control rwc = { 845 .rmap_one = page_referenced_one, 846 .arg = (void *)&pra, 847 .anon_lock = page_lock_anon_vma_read, 848 }; 849 850 *vm_flags = 0; 851 if (!page_mapped(page)) 852 return 0; 853 854 if (!page_rmapping(page)) 855 return 0; 856 857 if (!is_locked && (!PageAnon(page) || PageKsm(page))) { 858 we_locked = trylock_page(page); 859 if (!we_locked) 860 return 1; 861 } 862 863 /* 864 * If we are reclaiming on behalf of a cgroup, skip 865 * counting on behalf of references from different 866 * cgroups 867 */ 868 if (memcg) { 869 rwc.invalid_vma = invalid_page_referenced_vma; 870 } 871 872 rmap_walk(page, &rwc); 873 *vm_flags = pra.vm_flags; 874 875 if (we_locked) 876 unlock_page(page); 877 878 return pra.referenced; 879 } 880 881 static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, 882 unsigned long address, void *arg) 883 { 884 struct page_vma_mapped_walk pvmw = { 885 .page = page, 886 .vma = vma, 887 .address = address, 888 .flags = PVMW_SYNC, 889 }; 890 int *cleaned = arg; 891 bool invalidation_needed = false; 892 893 while (page_vma_mapped_walk(&pvmw)) { 894 int ret = 0; 895 if (pvmw.pte) { 896 pte_t entry; 897 pte_t *pte = pvmw.pte; 898 899 if (!pte_dirty(*pte) && !pte_write(*pte)) 900 continue; 901 902 flush_cache_page(vma, pvmw.address, pte_pfn(*pte)); 903 entry = ptep_clear_flush(vma, pvmw.address, pte); 904 entry = pte_wrprotect(entry); 905 entry = pte_mkclean(entry); 906 set_pte_at(vma->vm_mm, pvmw.address, pte, entry); 907 ret = 1; 908 } else { 909 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 910 pmd_t *pmd = pvmw.pmd; 911 pmd_t entry; 912 913 if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) 914 continue; 915 916 flush_cache_page(vma, pvmw.address, page_to_pfn(page)); 917 entry = pmdp_huge_clear_flush(vma, pvmw.address, pmd); 918 entry = pmd_wrprotect(entry); 919 entry = pmd_mkclean(entry); 920 set_pmd_at(vma->vm_mm, pvmw.address, pmd, entry); 921 ret = 1; 922 #else 923 /* unexpected pmd-mapped page? */ 924 WARN_ON_ONCE(1); 925 #endif 926 } 927 928 if (ret) { 929 (*cleaned)++; 930 invalidation_needed = true; 931 } 932 } 933 934 if (invalidation_needed) { 935 mmu_notifier_invalidate_range(vma->vm_mm, address, 936 address + (1UL << compound_order(page))); 937 } 938 939 return true; 940 } 941 942 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) 943 { 944 if (vma->vm_flags & VM_SHARED) 945 return false; 946 947 return true; 948 } 949 950 int page_mkclean(struct page *page) 951 { 952 int cleaned = 0; 953 struct address_space *mapping; 954 struct rmap_walk_control rwc = { 955 .arg = (void *)&cleaned, 956 .rmap_one = page_mkclean_one, 957 .invalid_vma = invalid_mkclean_vma, 958 }; 959 960 BUG_ON(!PageLocked(page)); 961 962 if (!page_mapped(page)) 963 return 0; 964 965 mapping = page_mapping(page); 966 if (!mapping) 967 return 0; 968 969 rmap_walk(page, &rwc); 970 971 return cleaned; 972 } 973 EXPORT_SYMBOL_GPL(page_mkclean); 974 975 /** 976 * page_move_anon_rmap - move a page to our anon_vma 977 * @page: the page to move to our anon_vma 978 * @vma: the vma the page belongs to 979 * 980 * When a page belongs exclusively to one process after a COW event, 981 * that page can be moved into the anon_vma that belongs to just that 982 * process, so the rmap code will not search the parent or sibling 983 * processes. 984 */ 985 void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) 986 { 987 struct anon_vma *anon_vma = vma->anon_vma; 988 989 page = compound_head(page); 990 991 VM_BUG_ON_PAGE(!PageLocked(page), page); 992 VM_BUG_ON_VMA(!anon_vma, vma); 993 994 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 995 /* 996 * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written 997 * simultaneously, so a concurrent reader (eg page_referenced()'s 998 * PageAnon()) will not see one without the other. 999 */ 1000 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); 1001 } 1002 1003 /** 1004 * __page_set_anon_rmap - set up new anonymous rmap 1005 * @page: Page to add to rmap 1006 * @vma: VM area to add page to. 1007 * @address: User virtual address of the mapping 1008 * @exclusive: the page is exclusively owned by the current process 1009 */ 1010 static void __page_set_anon_rmap(struct page *page, 1011 struct vm_area_struct *vma, unsigned long address, int exclusive) 1012 { 1013 struct anon_vma *anon_vma = vma->anon_vma; 1014 1015 BUG_ON(!anon_vma); 1016 1017 if (PageAnon(page)) 1018 return; 1019 1020 /* 1021 * If the page isn't exclusively mapped into this vma, 1022 * we must use the _oldest_ possible anon_vma for the 1023 * page mapping! 1024 */ 1025 if (!exclusive) 1026 anon_vma = anon_vma->root; 1027 1028 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 1029 page->mapping = (struct address_space *) anon_vma; 1030 page->index = linear_page_index(vma, address); 1031 } 1032 1033 /** 1034 * __page_check_anon_rmap - sanity check anonymous rmap addition 1035 * @page: the page to add the mapping to 1036 * @vma: the vm area in which the mapping is added 1037 * @address: the user virtual address mapped 1038 */ 1039 static void __page_check_anon_rmap(struct page *page, 1040 struct vm_area_struct *vma, unsigned long address) 1041 { 1042 #ifdef CONFIG_DEBUG_VM 1043 /* 1044 * The page's anon-rmap details (mapping and index) are guaranteed to 1045 * be set up correctly at this point. 1046 * 1047 * We have exclusion against page_add_anon_rmap because the caller 1048 * always holds the page locked, except if called from page_dup_rmap, 1049 * in which case the page is already known to be setup. 1050 * 1051 * We have exclusion against page_add_new_anon_rmap because those pages 1052 * are initially only visible via the pagetables, and the pte is locked 1053 * over the call to page_add_new_anon_rmap. 1054 */ 1055 BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root); 1056 BUG_ON(page_to_pgoff(page) != linear_page_index(vma, address)); 1057 #endif 1058 } 1059 1060 /** 1061 * page_add_anon_rmap - add pte mapping to an anonymous page 1062 * @page: the page to add the mapping to 1063 * @vma: the vm area in which the mapping is added 1064 * @address: the user virtual address mapped 1065 * @compound: charge the page as compound or small page 1066 * 1067 * The caller needs to hold the pte lock, and the page must be locked in 1068 * the anon_vma case: to serialize mapping,index checking after setting, 1069 * and to ensure that PageAnon is not being upgraded racily to PageKsm 1070 * (but PageKsm is never downgraded to PageAnon). 1071 */ 1072 void page_add_anon_rmap(struct page *page, 1073 struct vm_area_struct *vma, unsigned long address, bool compound) 1074 { 1075 do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0); 1076 } 1077 1078 /* 1079 * Special version of the above for do_swap_page, which often runs 1080 * into pages that are exclusively owned by the current process. 1081 * Everybody else should continue to use page_add_anon_rmap above. 1082 */ 1083 void do_page_add_anon_rmap(struct page *page, 1084 struct vm_area_struct *vma, unsigned long address, int flags) 1085 { 1086 bool compound = flags & RMAP_COMPOUND; 1087 bool first; 1088 1089 if (compound) { 1090 atomic_t *mapcount; 1091 VM_BUG_ON_PAGE(!PageLocked(page), page); 1092 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 1093 mapcount = compound_mapcount_ptr(page); 1094 first = atomic_inc_and_test(mapcount); 1095 } else { 1096 first = atomic_inc_and_test(&page->_mapcount); 1097 } 1098 1099 if (first) { 1100 int nr = compound ? hpage_nr_pages(page) : 1; 1101 /* 1102 * We use the irq-unsafe __{inc|mod}_zone_page_stat because 1103 * these counters are not modified in interrupt context, and 1104 * pte lock(a spinlock) is held, which implies preemption 1105 * disabled. 1106 */ 1107 if (compound) 1108 __inc_node_page_state(page, NR_ANON_THPS); 1109 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr); 1110 } 1111 if (unlikely(PageKsm(page))) 1112 return; 1113 1114 VM_BUG_ON_PAGE(!PageLocked(page), page); 1115 1116 /* address might be in next vma when migration races vma_adjust */ 1117 if (first) 1118 __page_set_anon_rmap(page, vma, address, 1119 flags & RMAP_EXCLUSIVE); 1120 else 1121 __page_check_anon_rmap(page, vma, address); 1122 } 1123 1124 /** 1125 * page_add_new_anon_rmap - add pte mapping to a new anonymous page 1126 * @page: the page to add the mapping to 1127 * @vma: the vm area in which the mapping is added 1128 * @address: the user virtual address mapped 1129 * @compound: charge the page as compound or small page 1130 * 1131 * Same as page_add_anon_rmap but must only be called on *new* pages. 1132 * This means the inc-and-test can be bypassed. 1133 * Page does not have to be locked. 1134 */ 1135 void page_add_new_anon_rmap(struct page *page, 1136 struct vm_area_struct *vma, unsigned long address, bool compound) 1137 { 1138 int nr = compound ? hpage_nr_pages(page) : 1; 1139 1140 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 1141 __SetPageSwapBacked(page); 1142 if (compound) { 1143 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 1144 /* increment count (starts at -1) */ 1145 atomic_set(compound_mapcount_ptr(page), 0); 1146 __inc_node_page_state(page, NR_ANON_THPS); 1147 } else { 1148 /* Anon THP always mapped first with PMD */ 1149 VM_BUG_ON_PAGE(PageTransCompound(page), page); 1150 /* increment count (starts at -1) */ 1151 atomic_set(&page->_mapcount, 0); 1152 } 1153 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr); 1154 __page_set_anon_rmap(page, vma, address, 1); 1155 } 1156 1157 /** 1158 * page_add_file_rmap - add pte mapping to a file page 1159 * @page: the page to add the mapping to 1160 * 1161 * The caller needs to hold the pte lock. 1162 */ 1163 void page_add_file_rmap(struct page *page, bool compound) 1164 { 1165 int i, nr = 1; 1166 1167 VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); 1168 lock_page_memcg(page); 1169 if (compound && PageTransHuge(page)) { 1170 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { 1171 if (atomic_inc_and_test(&page[i]._mapcount)) 1172 nr++; 1173 } 1174 if (!atomic_inc_and_test(compound_mapcount_ptr(page))) 1175 goto out; 1176 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 1177 __inc_node_page_state(page, NR_SHMEM_PMDMAPPED); 1178 } else { 1179 if (PageTransCompound(page) && page_mapping(page)) { 1180 VM_WARN_ON_ONCE(!PageLocked(page)); 1181 1182 SetPageDoubleMap(compound_head(page)); 1183 if (PageMlocked(page)) 1184 clear_page_mlock(compound_head(page)); 1185 } 1186 if (!atomic_inc_and_test(&page->_mapcount)) 1187 goto out; 1188 } 1189 __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr); 1190 out: 1191 unlock_page_memcg(page); 1192 } 1193 1194 static void page_remove_file_rmap(struct page *page, bool compound) 1195 { 1196 int i, nr = 1; 1197 1198 VM_BUG_ON_PAGE(compound && !PageHead(page), page); 1199 lock_page_memcg(page); 1200 1201 /* Hugepages are not counted in NR_FILE_MAPPED for now. */ 1202 if (unlikely(PageHuge(page))) { 1203 /* hugetlb pages are always mapped with pmds */ 1204 atomic_dec(compound_mapcount_ptr(page)); 1205 goto out; 1206 } 1207 1208 /* page still mapped by someone else? */ 1209 if (compound && PageTransHuge(page)) { 1210 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { 1211 if (atomic_add_negative(-1, &page[i]._mapcount)) 1212 nr++; 1213 } 1214 if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) 1215 goto out; 1216 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 1217 __dec_node_page_state(page, NR_SHMEM_PMDMAPPED); 1218 } else { 1219 if (!atomic_add_negative(-1, &page->_mapcount)) 1220 goto out; 1221 } 1222 1223 /* 1224 * We use the irq-unsafe __{inc|mod}_lruvec_page_state because 1225 * these counters are not modified in interrupt context, and 1226 * pte lock(a spinlock) is held, which implies preemption disabled. 1227 */ 1228 __mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr); 1229 1230 if (unlikely(PageMlocked(page))) 1231 clear_page_mlock(page); 1232 out: 1233 unlock_page_memcg(page); 1234 } 1235 1236 static void page_remove_anon_compound_rmap(struct page *page) 1237 { 1238 int i, nr; 1239 1240 if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) 1241 return; 1242 1243 /* Hugepages are not counted in NR_ANON_PAGES for now. */ 1244 if (unlikely(PageHuge(page))) 1245 return; 1246 1247 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 1248 return; 1249 1250 __dec_node_page_state(page, NR_ANON_THPS); 1251 1252 if (TestClearPageDoubleMap(page)) { 1253 /* 1254 * Subpages can be mapped with PTEs too. Check how many of 1255 * themi are still mapped. 1256 */ 1257 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { 1258 if (atomic_add_negative(-1, &page[i]._mapcount)) 1259 nr++; 1260 } 1261 } else { 1262 nr = HPAGE_PMD_NR; 1263 } 1264 1265 if (unlikely(PageMlocked(page))) 1266 clear_page_mlock(page); 1267 1268 if (nr) { 1269 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, -nr); 1270 deferred_split_huge_page(page); 1271 } 1272 } 1273 1274 /** 1275 * page_remove_rmap - take down pte mapping from a page 1276 * @page: page to remove mapping from 1277 * @compound: uncharge the page as compound or small page 1278 * 1279 * The caller needs to hold the pte lock. 1280 */ 1281 void page_remove_rmap(struct page *page, bool compound) 1282 { 1283 if (!PageAnon(page)) 1284 return page_remove_file_rmap(page, compound); 1285 1286 if (compound) 1287 return page_remove_anon_compound_rmap(page); 1288 1289 /* page still mapped by someone else? */ 1290 if (!atomic_add_negative(-1, &page->_mapcount)) 1291 return; 1292 1293 /* 1294 * We use the irq-unsafe __{inc|mod}_zone_page_stat because 1295 * these counters are not modified in interrupt context, and 1296 * pte lock(a spinlock) is held, which implies preemption disabled. 1297 */ 1298 __dec_node_page_state(page, NR_ANON_MAPPED); 1299 1300 if (unlikely(PageMlocked(page))) 1301 clear_page_mlock(page); 1302 1303 if (PageTransCompound(page)) 1304 deferred_split_huge_page(compound_head(page)); 1305 1306 /* 1307 * It would be tidy to reset the PageAnon mapping here, 1308 * but that might overwrite a racing page_add_anon_rmap 1309 * which increments mapcount after us but sets mapping 1310 * before us: so leave the reset to free_hot_cold_page, 1311 * and remember that it's only reliable while mapped. 1312 * Leaving it set also helps swapoff to reinstate ptes 1313 * faster for those pages still in swapcache. 1314 */ 1315 } 1316 1317 /* 1318 * @arg: enum ttu_flags will be passed to this argument 1319 */ 1320 static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, 1321 unsigned long address, void *arg) 1322 { 1323 struct mm_struct *mm = vma->vm_mm; 1324 struct page_vma_mapped_walk pvmw = { 1325 .page = page, 1326 .vma = vma, 1327 .address = address, 1328 }; 1329 pte_t pteval; 1330 struct page *subpage; 1331 bool ret = true, invalidation_needed = false; 1332 enum ttu_flags flags = (enum ttu_flags)arg; 1333 1334 /* munlock has nothing to gain from examining un-locked vmas */ 1335 if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED)) 1336 return true; 1337 1338 if (flags & TTU_SPLIT_HUGE_PMD) { 1339 split_huge_pmd_address(vma, address, 1340 flags & TTU_MIGRATION, page); 1341 } 1342 1343 while (page_vma_mapped_walk(&pvmw)) { 1344 /* 1345 * If the page is mlock()d, we cannot swap it out. 1346 * If it's recently referenced (perhaps page_referenced 1347 * skipped over this mm) then we should reactivate it. 1348 */ 1349 if (!(flags & TTU_IGNORE_MLOCK)) { 1350 if (vma->vm_flags & VM_LOCKED) { 1351 /* PTE-mapped THP are never mlocked */ 1352 if (!PageTransCompound(page)) { 1353 /* 1354 * Holding pte lock, we do *not* need 1355 * mmap_sem here 1356 */ 1357 mlock_vma_page(page); 1358 } 1359 ret = false; 1360 page_vma_mapped_walk_done(&pvmw); 1361 break; 1362 } 1363 if (flags & TTU_MUNLOCK) 1364 continue; 1365 } 1366 1367 /* Unexpected PMD-mapped THP? */ 1368 VM_BUG_ON_PAGE(!pvmw.pte, page); 1369 1370 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); 1371 1372 if (!(flags & TTU_IGNORE_ACCESS)) { 1373 if (ptep_clear_flush_young_notify(vma, pvmw.address, 1374 pvmw.pte)) { 1375 ret = false; 1376 page_vma_mapped_walk_done(&pvmw); 1377 break; 1378 } 1379 } 1380 1381 /* Nuke the page table entry. */ 1382 flush_cache_page(vma, pvmw.address, pte_pfn(*pvmw.pte)); 1383 if (should_defer_flush(mm, flags)) { 1384 /* 1385 * We clear the PTE but do not flush so potentially 1386 * a remote CPU could still be writing to the page. 1387 * If the entry was previously clean then the 1388 * architecture must guarantee that a clear->dirty 1389 * transition on a cached TLB entry is written through 1390 * and traps if the PTE is unmapped. 1391 */ 1392 pteval = ptep_get_and_clear(mm, pvmw.address, 1393 pvmw.pte); 1394 1395 set_tlb_ubc_flush_pending(mm, pte_dirty(pteval)); 1396 } else { 1397 pteval = ptep_clear_flush(vma, pvmw.address, pvmw.pte); 1398 } 1399 1400 /* Move the dirty bit to the page. Now the pte is gone. */ 1401 if (pte_dirty(pteval)) 1402 set_page_dirty(page); 1403 1404 /* Update high watermark before we lower rss */ 1405 update_hiwater_rss(mm); 1406 1407 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { 1408 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 1409 if (PageHuge(page)) { 1410 int nr = 1 << compound_order(page); 1411 hugetlb_count_sub(nr, mm); 1412 set_huge_swap_pte_at(mm, pvmw.address, 1413 pvmw.pte, pteval, 1414 vma_mmu_pagesize(vma)); 1415 } else { 1416 dec_mm_counter(mm, mm_counter(page)); 1417 set_pte_at(mm, pvmw.address, pvmw.pte, pteval); 1418 } 1419 1420 } else if (pte_unused(pteval)) { 1421 /* 1422 * The guest indicated that the page content is of no 1423 * interest anymore. Simply discard the pte, vmscan 1424 * will take care of the rest. 1425 */ 1426 dec_mm_counter(mm, mm_counter(page)); 1427 } else if (IS_ENABLED(CONFIG_MIGRATION) && 1428 (flags & TTU_MIGRATION)) { 1429 swp_entry_t entry; 1430 pte_t swp_pte; 1431 /* 1432 * Store the pfn of the page in a special migration 1433 * pte. do_swap_page() will wait until the migration 1434 * pte is removed and then restart fault handling. 1435 */ 1436 entry = make_migration_entry(subpage, 1437 pte_write(pteval)); 1438 swp_pte = swp_entry_to_pte(entry); 1439 if (pte_soft_dirty(pteval)) 1440 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1441 set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); 1442 } else if (PageAnon(page)) { 1443 swp_entry_t entry = { .val = page_private(subpage) }; 1444 pte_t swp_pte; 1445 /* 1446 * Store the swap location in the pte. 1447 * See handle_pte_fault() ... 1448 */ 1449 if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) { 1450 WARN_ON_ONCE(1); 1451 ret = false; 1452 page_vma_mapped_walk_done(&pvmw); 1453 break; 1454 } 1455 1456 /* MADV_FREE page check */ 1457 if (!PageSwapBacked(page)) { 1458 if (!PageDirty(page)) { 1459 dec_mm_counter(mm, MM_ANONPAGES); 1460 goto discard; 1461 } 1462 1463 /* 1464 * If the page was redirtied, it cannot be 1465 * discarded. Remap the page to page table. 1466 */ 1467 set_pte_at(mm, pvmw.address, pvmw.pte, pteval); 1468 SetPageSwapBacked(page); 1469 ret = false; 1470 page_vma_mapped_walk_done(&pvmw); 1471 break; 1472 } 1473 1474 if (swap_duplicate(entry) < 0) { 1475 set_pte_at(mm, pvmw.address, pvmw.pte, pteval); 1476 ret = false; 1477 page_vma_mapped_walk_done(&pvmw); 1478 break; 1479 } 1480 if (list_empty(&mm->mmlist)) { 1481 spin_lock(&mmlist_lock); 1482 if (list_empty(&mm->mmlist)) 1483 list_add(&mm->mmlist, &init_mm.mmlist); 1484 spin_unlock(&mmlist_lock); 1485 } 1486 dec_mm_counter(mm, MM_ANONPAGES); 1487 inc_mm_counter(mm, MM_SWAPENTS); 1488 swp_pte = swp_entry_to_pte(entry); 1489 if (pte_soft_dirty(pteval)) 1490 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1491 set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); 1492 } else 1493 dec_mm_counter(mm, mm_counter_file(page)); 1494 discard: 1495 page_remove_rmap(subpage, PageHuge(page)); 1496 put_page(page); 1497 invalidation_needed = true; 1498 } 1499 1500 if (invalidation_needed) 1501 mmu_notifier_invalidate_range(mm, address, 1502 address + (1UL << compound_order(page))); 1503 return ret; 1504 } 1505 1506 bool is_vma_temporary_stack(struct vm_area_struct *vma) 1507 { 1508 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); 1509 1510 if (!maybe_stack) 1511 return false; 1512 1513 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) == 1514 VM_STACK_INCOMPLETE_SETUP) 1515 return true; 1516 1517 return false; 1518 } 1519 1520 static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) 1521 { 1522 return is_vma_temporary_stack(vma); 1523 } 1524 1525 static int page_mapcount_is_zero(struct page *page) 1526 { 1527 return !total_mapcount(page); 1528 } 1529 1530 /** 1531 * try_to_unmap - try to remove all page table mappings to a page 1532 * @page: the page to get unmapped 1533 * @flags: action and flags 1534 * 1535 * Tries to remove all the page table entries which are mapping this 1536 * page, used in the pageout path. Caller must hold the page lock. 1537 * 1538 * If unmap is successful, return true. Otherwise, false. 1539 */ 1540 bool try_to_unmap(struct page *page, enum ttu_flags flags) 1541 { 1542 struct rmap_walk_control rwc = { 1543 .rmap_one = try_to_unmap_one, 1544 .arg = (void *)flags, 1545 .done = page_mapcount_is_zero, 1546 .anon_lock = page_lock_anon_vma_read, 1547 }; 1548 1549 /* 1550 * During exec, a temporary VMA is setup and later moved. 1551 * The VMA is moved under the anon_vma lock but not the 1552 * page tables leading to a race where migration cannot 1553 * find the migration ptes. Rather than increasing the 1554 * locking requirements of exec(), migration skips 1555 * temporary VMAs until after exec() completes. 1556 */ 1557 if ((flags & TTU_MIGRATION) && !PageKsm(page) && PageAnon(page)) 1558 rwc.invalid_vma = invalid_migration_vma; 1559 1560 if (flags & TTU_RMAP_LOCKED) 1561 rmap_walk_locked(page, &rwc); 1562 else 1563 rmap_walk(page, &rwc); 1564 1565 return !page_mapcount(page) ? true : false; 1566 } 1567 1568 static int page_not_mapped(struct page *page) 1569 { 1570 return !page_mapped(page); 1571 }; 1572 1573 /** 1574 * try_to_munlock - try to munlock a page 1575 * @page: the page to be munlocked 1576 * 1577 * Called from munlock code. Checks all of the VMAs mapping the page 1578 * to make sure nobody else has this page mlocked. The page will be 1579 * returned with PG_mlocked cleared if no other vmas have it mlocked. 1580 */ 1581 1582 void try_to_munlock(struct page *page) 1583 { 1584 struct rmap_walk_control rwc = { 1585 .rmap_one = try_to_unmap_one, 1586 .arg = (void *)TTU_MUNLOCK, 1587 .done = page_not_mapped, 1588 .anon_lock = page_lock_anon_vma_read, 1589 1590 }; 1591 1592 VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page); 1593 VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page); 1594 1595 rmap_walk(page, &rwc); 1596 } 1597 1598 void __put_anon_vma(struct anon_vma *anon_vma) 1599 { 1600 struct anon_vma *root = anon_vma->root; 1601 1602 anon_vma_free(anon_vma); 1603 if (root != anon_vma && atomic_dec_and_test(&root->refcount)) 1604 anon_vma_free(root); 1605 } 1606 1607 static struct anon_vma *rmap_walk_anon_lock(struct page *page, 1608 struct rmap_walk_control *rwc) 1609 { 1610 struct anon_vma *anon_vma; 1611 1612 if (rwc->anon_lock) 1613 return rwc->anon_lock(page); 1614 1615 /* 1616 * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read() 1617 * because that depends on page_mapped(); but not all its usages 1618 * are holding mmap_sem. Users without mmap_sem are required to 1619 * take a reference count to prevent the anon_vma disappearing 1620 */ 1621 anon_vma = page_anon_vma(page); 1622 if (!anon_vma) 1623 return NULL; 1624 1625 anon_vma_lock_read(anon_vma); 1626 return anon_vma; 1627 } 1628 1629 /* 1630 * rmap_walk_anon - do something to anonymous page using the object-based 1631 * rmap method 1632 * @page: the page to be handled 1633 * @rwc: control variable according to each walk type 1634 * 1635 * Find all the mappings of a page using the mapping pointer and the vma chains 1636 * contained in the anon_vma struct it points to. 1637 * 1638 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1639 * where the page was found will be held for write. So, we won't recheck 1640 * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1641 * LOCKED. 1642 */ 1643 static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, 1644 bool locked) 1645 { 1646 struct anon_vma *anon_vma; 1647 pgoff_t pgoff_start, pgoff_end; 1648 struct anon_vma_chain *avc; 1649 1650 if (locked) { 1651 anon_vma = page_anon_vma(page); 1652 /* anon_vma disappear under us? */ 1653 VM_BUG_ON_PAGE(!anon_vma, page); 1654 } else { 1655 anon_vma = rmap_walk_anon_lock(page, rwc); 1656 } 1657 if (!anon_vma) 1658 return; 1659 1660 pgoff_start = page_to_pgoff(page); 1661 pgoff_end = pgoff_start + hpage_nr_pages(page) - 1; 1662 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, 1663 pgoff_start, pgoff_end) { 1664 struct vm_area_struct *vma = avc->vma; 1665 unsigned long address = vma_address(page, vma); 1666 1667 cond_resched(); 1668 1669 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 1670 continue; 1671 1672 if (!rwc->rmap_one(page, vma, address, rwc->arg)) 1673 break; 1674 if (rwc->done && rwc->done(page)) 1675 break; 1676 } 1677 1678 if (!locked) 1679 anon_vma_unlock_read(anon_vma); 1680 } 1681 1682 /* 1683 * rmap_walk_file - do something to file page using the object-based rmap method 1684 * @page: the page to be handled 1685 * @rwc: control variable according to each walk type 1686 * 1687 * Find all the mappings of a page using the mapping pointer and the vma chains 1688 * contained in the address_space struct it points to. 1689 * 1690 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1691 * where the page was found will be held for write. So, we won't recheck 1692 * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1693 * LOCKED. 1694 */ 1695 static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, 1696 bool locked) 1697 { 1698 struct address_space *mapping = page_mapping(page); 1699 pgoff_t pgoff_start, pgoff_end; 1700 struct vm_area_struct *vma; 1701 1702 /* 1703 * The page lock not only makes sure that page->mapping cannot 1704 * suddenly be NULLified by truncation, it makes sure that the 1705 * structure at mapping cannot be freed and reused yet, 1706 * so we can safely take mapping->i_mmap_rwsem. 1707 */ 1708 VM_BUG_ON_PAGE(!PageLocked(page), page); 1709 1710 if (!mapping) 1711 return; 1712 1713 pgoff_start = page_to_pgoff(page); 1714 pgoff_end = pgoff_start + hpage_nr_pages(page) - 1; 1715 if (!locked) 1716 i_mmap_lock_read(mapping); 1717 vma_interval_tree_foreach(vma, &mapping->i_mmap, 1718 pgoff_start, pgoff_end) { 1719 unsigned long address = vma_address(page, vma); 1720 1721 cond_resched(); 1722 1723 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 1724 continue; 1725 1726 if (!rwc->rmap_one(page, vma, address, rwc->arg)) 1727 goto done; 1728 if (rwc->done && rwc->done(page)) 1729 goto done; 1730 } 1731 1732 done: 1733 if (!locked) 1734 i_mmap_unlock_read(mapping); 1735 } 1736 1737 void rmap_walk(struct page *page, struct rmap_walk_control *rwc) 1738 { 1739 if (unlikely(PageKsm(page))) 1740 rmap_walk_ksm(page, rwc); 1741 else if (PageAnon(page)) 1742 rmap_walk_anon(page, rwc, false); 1743 else 1744 rmap_walk_file(page, rwc, false); 1745 } 1746 1747 /* Like rmap_walk, but caller holds relevant rmap lock */ 1748 void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc) 1749 { 1750 /* no ksm support for now */ 1751 VM_BUG_ON_PAGE(PageKsm(page), page); 1752 if (PageAnon(page)) 1753 rmap_walk_anon(page, rwc, true); 1754 else 1755 rmap_walk_file(page, rwc, true); 1756 } 1757 1758 #ifdef CONFIG_HUGETLB_PAGE 1759 /* 1760 * The following three functions are for anonymous (private mapped) hugepages. 1761 * Unlike common anonymous pages, anonymous hugepages have no accounting code 1762 * and no lru code, because we handle hugepages differently from common pages. 1763 */ 1764 static void __hugepage_set_anon_rmap(struct page *page, 1765 struct vm_area_struct *vma, unsigned long address, int exclusive) 1766 { 1767 struct anon_vma *anon_vma = vma->anon_vma; 1768 1769 BUG_ON(!anon_vma); 1770 1771 if (PageAnon(page)) 1772 return; 1773 if (!exclusive) 1774 anon_vma = anon_vma->root; 1775 1776 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 1777 page->mapping = (struct address_space *) anon_vma; 1778 page->index = linear_page_index(vma, address); 1779 } 1780 1781 void hugepage_add_anon_rmap(struct page *page, 1782 struct vm_area_struct *vma, unsigned long address) 1783 { 1784 struct anon_vma *anon_vma = vma->anon_vma; 1785 int first; 1786 1787 BUG_ON(!PageLocked(page)); 1788 BUG_ON(!anon_vma); 1789 /* address might be in next vma when migration races vma_adjust */ 1790 first = atomic_inc_and_test(compound_mapcount_ptr(page)); 1791 if (first) 1792 __hugepage_set_anon_rmap(page, vma, address, 0); 1793 } 1794 1795 void hugepage_add_new_anon_rmap(struct page *page, 1796 struct vm_area_struct *vma, unsigned long address) 1797 { 1798 BUG_ON(address < vma->vm_start || address >= vma->vm_end); 1799 atomic_set(compound_mapcount_ptr(page), 0); 1800 __hugepage_set_anon_rmap(page, vma, address, 1); 1801 } 1802 #endif /* CONFIG_HUGETLB_PAGE */ 1803