1 /* 2 * Memory merging support. 3 * 4 * This code enables dynamic sharing of identical pages found in different 5 * memory areas, even if they are not shared by fork() 6 * 7 * Copyright (C) 2008-2009 Red Hat, Inc. 8 * Authors: 9 * Izik Eidus 10 * Andrea Arcangeli 11 * Chris Wright 12 * Hugh Dickins 13 * 14 * This work is licensed under the terms of the GNU GPL, version 2. 15 */ 16 17 #include <linux/errno.h> 18 #include <linux/mm.h> 19 #include <linux/fs.h> 20 #include <linux/mman.h> 21 #include <linux/sched.h> 22 #include <linux/rwsem.h> 23 #include <linux/pagemap.h> 24 #include <linux/rmap.h> 25 #include <linux/spinlock.h> 26 #include <linux/jhash.h> 27 #include <linux/delay.h> 28 #include <linux/kthread.h> 29 #include <linux/wait.h> 30 #include <linux/slab.h> 31 #include <linux/rbtree.h> 32 #include <linux/memory.h> 33 #include <linux/mmu_notifier.h> 34 #include <linux/swap.h> 35 #include <linux/ksm.h> 36 #include <linux/hash.h> 37 #include <linux/freezer.h> 38 #include <linux/oom.h> 39 40 #include <asm/tlbflush.h> 41 #include "internal.h" 42 43 /* 44 * A few notes about the KSM scanning process, 45 * to make it easier to understand the data structures below: 46 * 47 * In order to reduce excessive scanning, KSM sorts the memory pages by their 48 * contents into a data structure that holds pointers to the pages' locations. 49 * 50 * Since the contents of the pages may change at any moment, KSM cannot just 51 * insert the pages into a normal sorted tree and expect it to find anything. 52 * Therefore KSM uses two data structures - the stable and the unstable tree. 53 * 54 * The stable tree holds pointers to all the merged pages (ksm pages), sorted 55 * by their contents. Because each such page is write-protected, searching on 56 * this tree is fully assured to be working (except when pages are unmapped), 57 * and therefore this tree is called the stable tree. 58 * 59 * In addition to the stable tree, KSM uses a second data structure called the 60 * unstable tree: this tree holds pointers to pages which have been found to 61 * be "unchanged for a period of time". The unstable tree sorts these pages 62 * by their contents, but since they are not write-protected, KSM cannot rely 63 * upon the unstable tree to work correctly - the unstable tree is liable to 64 * be corrupted as its contents are modified, and so it is called unstable. 65 * 66 * KSM solves this problem by several techniques: 67 * 68 * 1) The unstable tree is flushed every time KSM completes scanning all 69 * memory areas, and then the tree is rebuilt again from the beginning. 70 * 2) KSM will only insert into the unstable tree, pages whose hash value 71 * has not changed since the previous scan of all memory areas. 72 * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the 73 * colors of the nodes and not on their contents, assuring that even when 74 * the tree gets "corrupted" it won't get out of balance, so scanning time 75 * remains the same (also, searching and inserting nodes in an rbtree uses 76 * the same algorithm, so we have no overhead when we flush and rebuild). 77 * 4) KSM never flushes the stable tree, which means that even if it were to 78 * take 10 attempts to find a page in the unstable tree, once it is found, 79 * it is secured in the stable tree. (When we scan a new page, we first 80 * compare it against the stable tree, and then against the unstable tree.) 81 */ 82 83 /** 84 * struct mm_slot - ksm information per mm that is being scanned 85 * @link: link to the mm_slots hash list 86 * @mm_list: link into the mm_slots list, rooted in ksm_mm_head 87 * @rmap_list: head for this mm_slot's singly-linked list of rmap_items 88 * @mm: the mm that this information is valid for 89 */ 90 struct mm_slot { 91 struct hlist_node link; 92 struct list_head mm_list; 93 struct rmap_item *rmap_list; 94 struct mm_struct *mm; 95 }; 96 97 /** 98 * struct ksm_scan - cursor for scanning 99 * @mm_slot: the current mm_slot we are scanning 100 * @address: the next address inside that to be scanned 101 * @rmap_list: link to the next rmap to be scanned in the rmap_list 102 * @seqnr: count of completed full scans (needed when removing unstable node) 103 * 104 * There is only the one ksm_scan instance of this cursor structure. 105 */ 106 struct ksm_scan { 107 struct mm_slot *mm_slot; 108 unsigned long address; 109 struct rmap_item **rmap_list; 110 unsigned long seqnr; 111 }; 112 113 /** 114 * struct stable_node - node of the stable rbtree 115 * @node: rb node of this ksm page in the stable tree 116 * @hlist: hlist head of rmap_items using this ksm page 117 * @kpfn: page frame number of this ksm page 118 */ 119 struct stable_node { 120 struct rb_node node; 121 struct hlist_head hlist; 122 unsigned long kpfn; 123 }; 124 125 /** 126 * struct rmap_item - reverse mapping item for virtual addresses 127 * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list 128 * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree 129 * @mm: the memory structure this rmap_item is pointing into 130 * @address: the virtual address this rmap_item tracks (+ flags in low bits) 131 * @oldchecksum: previous checksum of the page at that virtual address 132 * @node: rb node of this rmap_item in the unstable tree 133 * @head: pointer to stable_node heading this list in the stable tree 134 * @hlist: link into hlist of rmap_items hanging off that stable_node 135 */ 136 struct rmap_item { 137 struct rmap_item *rmap_list; 138 struct anon_vma *anon_vma; /* when stable */ 139 struct mm_struct *mm; 140 unsigned long address; /* + low bits used for flags below */ 141 unsigned int oldchecksum; /* when unstable */ 142 union { 143 struct rb_node node; /* when node of unstable tree */ 144 struct { /* when listed from stable tree */ 145 struct stable_node *head; 146 struct hlist_node hlist; 147 }; 148 }; 149 }; 150 151 #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */ 152 #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */ 153 #define STABLE_FLAG 0x200 /* is listed from the stable tree */ 154 155 /* The stable and unstable tree heads */ 156 static struct rb_root root_stable_tree = RB_ROOT; 157 static struct rb_root root_unstable_tree = RB_ROOT; 158 159 #define MM_SLOTS_HASH_SHIFT 10 160 #define MM_SLOTS_HASH_HEADS (1 << MM_SLOTS_HASH_SHIFT) 161 static struct hlist_head mm_slots_hash[MM_SLOTS_HASH_HEADS]; 162 163 static struct mm_slot ksm_mm_head = { 164 .mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list), 165 }; 166 static struct ksm_scan ksm_scan = { 167 .mm_slot = &ksm_mm_head, 168 }; 169 170 static struct kmem_cache *rmap_item_cache; 171 static struct kmem_cache *stable_node_cache; 172 static struct kmem_cache *mm_slot_cache; 173 174 /* The number of nodes in the stable tree */ 175 static unsigned long ksm_pages_shared; 176 177 /* The number of page slots additionally sharing those nodes */ 178 static unsigned long ksm_pages_sharing; 179 180 /* The number of nodes in the unstable tree */ 181 static unsigned long ksm_pages_unshared; 182 183 /* The number of rmap_items in use: to calculate pages_volatile */ 184 static unsigned long ksm_rmap_items; 185 186 /* Number of pages ksmd should scan in one batch */ 187 static unsigned int ksm_thread_pages_to_scan = 100; 188 189 /* Milliseconds ksmd should sleep between batches */ 190 static unsigned int ksm_thread_sleep_millisecs = 20; 191 192 #define KSM_RUN_STOP 0 193 #define KSM_RUN_MERGE 1 194 #define KSM_RUN_UNMERGE 2 195 static unsigned int ksm_run = KSM_RUN_STOP; 196 197 static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait); 198 static DEFINE_MUTEX(ksm_thread_mutex); 199 static DEFINE_SPINLOCK(ksm_mmlist_lock); 200 201 #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\ 202 sizeof(struct __struct), __alignof__(struct __struct),\ 203 (__flags), NULL) 204 205 static int __init ksm_slab_init(void) 206 { 207 rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0); 208 if (!rmap_item_cache) 209 goto out; 210 211 stable_node_cache = KSM_KMEM_CACHE(stable_node, 0); 212 if (!stable_node_cache) 213 goto out_free1; 214 215 mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0); 216 if (!mm_slot_cache) 217 goto out_free2; 218 219 return 0; 220 221 out_free2: 222 kmem_cache_destroy(stable_node_cache); 223 out_free1: 224 kmem_cache_destroy(rmap_item_cache); 225 out: 226 return -ENOMEM; 227 } 228 229 static void __init ksm_slab_free(void) 230 { 231 kmem_cache_destroy(mm_slot_cache); 232 kmem_cache_destroy(stable_node_cache); 233 kmem_cache_destroy(rmap_item_cache); 234 mm_slot_cache = NULL; 235 } 236 237 static inline struct rmap_item *alloc_rmap_item(void) 238 { 239 struct rmap_item *rmap_item; 240 241 rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL); 242 if (rmap_item) 243 ksm_rmap_items++; 244 return rmap_item; 245 } 246 247 static inline void free_rmap_item(struct rmap_item *rmap_item) 248 { 249 ksm_rmap_items--; 250 rmap_item->mm = NULL; /* debug safety */ 251 kmem_cache_free(rmap_item_cache, rmap_item); 252 } 253 254 static inline struct stable_node *alloc_stable_node(void) 255 { 256 return kmem_cache_alloc(stable_node_cache, GFP_KERNEL); 257 } 258 259 static inline void free_stable_node(struct stable_node *stable_node) 260 { 261 kmem_cache_free(stable_node_cache, stable_node); 262 } 263 264 static inline struct mm_slot *alloc_mm_slot(void) 265 { 266 if (!mm_slot_cache) /* initialization failed */ 267 return NULL; 268 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); 269 } 270 271 static inline void free_mm_slot(struct mm_slot *mm_slot) 272 { 273 kmem_cache_free(mm_slot_cache, mm_slot); 274 } 275 276 static struct mm_slot *get_mm_slot(struct mm_struct *mm) 277 { 278 struct mm_slot *mm_slot; 279 struct hlist_head *bucket; 280 struct hlist_node *node; 281 282 bucket = &mm_slots_hash[hash_ptr(mm, MM_SLOTS_HASH_SHIFT)]; 283 hlist_for_each_entry(mm_slot, node, bucket, link) { 284 if (mm == mm_slot->mm) 285 return mm_slot; 286 } 287 return NULL; 288 } 289 290 static void insert_to_mm_slots_hash(struct mm_struct *mm, 291 struct mm_slot *mm_slot) 292 { 293 struct hlist_head *bucket; 294 295 bucket = &mm_slots_hash[hash_ptr(mm, MM_SLOTS_HASH_SHIFT)]; 296 mm_slot->mm = mm; 297 hlist_add_head(&mm_slot->link, bucket); 298 } 299 300 static inline int in_stable_tree(struct rmap_item *rmap_item) 301 { 302 return rmap_item->address & STABLE_FLAG; 303 } 304 305 /* 306 * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's 307 * page tables after it has passed through ksm_exit() - which, if necessary, 308 * takes mmap_sem briefly to serialize against them. ksm_exit() does not set 309 * a special flag: they can just back out as soon as mm_users goes to zero. 310 * ksm_test_exit() is used throughout to make this test for exit: in some 311 * places for correctness, in some places just to avoid unnecessary work. 312 */ 313 static inline bool ksm_test_exit(struct mm_struct *mm) 314 { 315 return atomic_read(&mm->mm_users) == 0; 316 } 317 318 /* 319 * We use break_ksm to break COW on a ksm page: it's a stripped down 320 * 321 * if (get_user_pages(current, mm, addr, 1, 1, 1, &page, NULL) == 1) 322 * put_page(page); 323 * 324 * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma, 325 * in case the application has unmapped and remapped mm,addr meanwhile. 326 * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP 327 * mmap of /dev/mem or /dev/kmem, where we would not want to touch it. 328 */ 329 static int break_ksm(struct vm_area_struct *vma, unsigned long addr) 330 { 331 struct page *page; 332 int ret = 0; 333 334 do { 335 cond_resched(); 336 page = follow_page(vma, addr, FOLL_GET); 337 if (IS_ERR_OR_NULL(page)) 338 break; 339 if (PageKsm(page)) 340 ret = handle_mm_fault(vma->vm_mm, vma, addr, 341 FAULT_FLAG_WRITE); 342 else 343 ret = VM_FAULT_WRITE; 344 put_page(page); 345 } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM))); 346 /* 347 * We must loop because handle_mm_fault() may back out if there's 348 * any difficulty e.g. if pte accessed bit gets updated concurrently. 349 * 350 * VM_FAULT_WRITE is what we have been hoping for: it indicates that 351 * COW has been broken, even if the vma does not permit VM_WRITE; 352 * but note that a concurrent fault might break PageKsm for us. 353 * 354 * VM_FAULT_SIGBUS could occur if we race with truncation of the 355 * backing file, which also invalidates anonymous pages: that's 356 * okay, that truncation will have unmapped the PageKsm for us. 357 * 358 * VM_FAULT_OOM: at the time of writing (late July 2009), setting 359 * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the 360 * current task has TIF_MEMDIE set, and will be OOM killed on return 361 * to user; and ksmd, having no mm, would never be chosen for that. 362 * 363 * But if the mm is in a limited mem_cgroup, then the fault may fail 364 * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and 365 * even ksmd can fail in this way - though it's usually breaking ksm 366 * just to undo a merge it made a moment before, so unlikely to oom. 367 * 368 * That's a pity: we might therefore have more kernel pages allocated 369 * than we're counting as nodes in the stable tree; but ksm_do_scan 370 * will retry to break_cow on each pass, so should recover the page 371 * in due course. The important thing is to not let VM_MERGEABLE 372 * be cleared while any such pages might remain in the area. 373 */ 374 return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; 375 } 376 377 static void break_cow(struct rmap_item *rmap_item) 378 { 379 struct mm_struct *mm = rmap_item->mm; 380 unsigned long addr = rmap_item->address; 381 struct vm_area_struct *vma; 382 383 /* 384 * It is not an accident that whenever we want to break COW 385 * to undo, we also need to drop a reference to the anon_vma. 386 */ 387 put_anon_vma(rmap_item->anon_vma); 388 389 down_read(&mm->mmap_sem); 390 if (ksm_test_exit(mm)) 391 goto out; 392 vma = find_vma(mm, addr); 393 if (!vma || vma->vm_start > addr) 394 goto out; 395 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) 396 goto out; 397 break_ksm(vma, addr); 398 out: 399 up_read(&mm->mmap_sem); 400 } 401 402 static struct page *page_trans_compound_anon(struct page *page) 403 { 404 if (PageTransCompound(page)) { 405 struct page *head = compound_trans_head(page); 406 /* 407 * head may actually be splitted and freed from under 408 * us but it's ok here. 409 */ 410 if (PageAnon(head)) 411 return head; 412 } 413 return NULL; 414 } 415 416 static struct page *get_mergeable_page(struct rmap_item *rmap_item) 417 { 418 struct mm_struct *mm = rmap_item->mm; 419 unsigned long addr = rmap_item->address; 420 struct vm_area_struct *vma; 421 struct page *page; 422 423 down_read(&mm->mmap_sem); 424 if (ksm_test_exit(mm)) 425 goto out; 426 vma = find_vma(mm, addr); 427 if (!vma || vma->vm_start > addr) 428 goto out; 429 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) 430 goto out; 431 432 page = follow_page(vma, addr, FOLL_GET); 433 if (IS_ERR_OR_NULL(page)) 434 goto out; 435 if (PageAnon(page) || page_trans_compound_anon(page)) { 436 flush_anon_page(vma, page, addr); 437 flush_dcache_page(page); 438 } else { 439 put_page(page); 440 out: page = NULL; 441 } 442 up_read(&mm->mmap_sem); 443 return page; 444 } 445 446 static void remove_node_from_stable_tree(struct stable_node *stable_node) 447 { 448 struct rmap_item *rmap_item; 449 struct hlist_node *hlist; 450 451 hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { 452 if (rmap_item->hlist.next) 453 ksm_pages_sharing--; 454 else 455 ksm_pages_shared--; 456 put_anon_vma(rmap_item->anon_vma); 457 rmap_item->address &= PAGE_MASK; 458 cond_resched(); 459 } 460 461 rb_erase(&stable_node->node, &root_stable_tree); 462 free_stable_node(stable_node); 463 } 464 465 /* 466 * get_ksm_page: checks if the page indicated by the stable node 467 * is still its ksm page, despite having held no reference to it. 468 * In which case we can trust the content of the page, and it 469 * returns the gotten page; but if the page has now been zapped, 470 * remove the stale node from the stable tree and return NULL. 471 * 472 * You would expect the stable_node to hold a reference to the ksm page. 473 * But if it increments the page's count, swapping out has to wait for 474 * ksmd to come around again before it can free the page, which may take 475 * seconds or even minutes: much too unresponsive. So instead we use a 476 * "keyhole reference": access to the ksm page from the stable node peeps 477 * out through its keyhole to see if that page still holds the right key, 478 * pointing back to this stable node. This relies on freeing a PageAnon 479 * page to reset its page->mapping to NULL, and relies on no other use of 480 * a page to put something that might look like our key in page->mapping. 481 * 482 * include/linux/pagemap.h page_cache_get_speculative() is a good reference, 483 * but this is different - made simpler by ksm_thread_mutex being held, but 484 * interesting for assuming that no other use of the struct page could ever 485 * put our expected_mapping into page->mapping (or a field of the union which 486 * coincides with page->mapping). The RCU calls are not for KSM at all, but 487 * to keep the page_count protocol described with page_cache_get_speculative. 488 * 489 * Note: it is possible that get_ksm_page() will return NULL one moment, 490 * then page the next, if the page is in between page_freeze_refs() and 491 * page_unfreeze_refs(): this shouldn't be a problem anywhere, the page 492 * is on its way to being freed; but it is an anomaly to bear in mind. 493 */ 494 static struct page *get_ksm_page(struct stable_node *stable_node) 495 { 496 struct page *page; 497 void *expected_mapping; 498 499 page = pfn_to_page(stable_node->kpfn); 500 expected_mapping = (void *)stable_node + 501 (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); 502 rcu_read_lock(); 503 if (page->mapping != expected_mapping) 504 goto stale; 505 if (!get_page_unless_zero(page)) 506 goto stale; 507 if (page->mapping != expected_mapping) { 508 put_page(page); 509 goto stale; 510 } 511 rcu_read_unlock(); 512 return page; 513 stale: 514 rcu_read_unlock(); 515 remove_node_from_stable_tree(stable_node); 516 return NULL; 517 } 518 519 /* 520 * Removing rmap_item from stable or unstable tree. 521 * This function will clean the information from the stable/unstable tree. 522 */ 523 static void remove_rmap_item_from_tree(struct rmap_item *rmap_item) 524 { 525 if (rmap_item->address & STABLE_FLAG) { 526 struct stable_node *stable_node; 527 struct page *page; 528 529 stable_node = rmap_item->head; 530 page = get_ksm_page(stable_node); 531 if (!page) 532 goto out; 533 534 lock_page(page); 535 hlist_del(&rmap_item->hlist); 536 unlock_page(page); 537 put_page(page); 538 539 if (stable_node->hlist.first) 540 ksm_pages_sharing--; 541 else 542 ksm_pages_shared--; 543 544 put_anon_vma(rmap_item->anon_vma); 545 rmap_item->address &= PAGE_MASK; 546 547 } else if (rmap_item->address & UNSTABLE_FLAG) { 548 unsigned char age; 549 /* 550 * Usually ksmd can and must skip the rb_erase, because 551 * root_unstable_tree was already reset to RB_ROOT. 552 * But be careful when an mm is exiting: do the rb_erase 553 * if this rmap_item was inserted by this scan, rather 554 * than left over from before. 555 */ 556 age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); 557 BUG_ON(age > 1); 558 if (!age) 559 rb_erase(&rmap_item->node, &root_unstable_tree); 560 561 ksm_pages_unshared--; 562 rmap_item->address &= PAGE_MASK; 563 } 564 out: 565 cond_resched(); /* we're called from many long loops */ 566 } 567 568 static void remove_trailing_rmap_items(struct mm_slot *mm_slot, 569 struct rmap_item **rmap_list) 570 { 571 while (*rmap_list) { 572 struct rmap_item *rmap_item = *rmap_list; 573 *rmap_list = rmap_item->rmap_list; 574 remove_rmap_item_from_tree(rmap_item); 575 free_rmap_item(rmap_item); 576 } 577 } 578 579 /* 580 * Though it's very tempting to unmerge in_stable_tree(rmap_item)s rather 581 * than check every pte of a given vma, the locking doesn't quite work for 582 * that - an rmap_item is assigned to the stable tree after inserting ksm 583 * page and upping mmap_sem. Nor does it fit with the way we skip dup'ing 584 * rmap_items from parent to child at fork time (so as not to waste time 585 * if exit comes before the next scan reaches it). 586 * 587 * Similarly, although we'd like to remove rmap_items (so updating counts 588 * and freeing memory) when unmerging an area, it's easier to leave that 589 * to the next pass of ksmd - consider, for example, how ksmd might be 590 * in cmp_and_merge_page on one of the rmap_items we would be removing. 591 */ 592 static int unmerge_ksm_pages(struct vm_area_struct *vma, 593 unsigned long start, unsigned long end) 594 { 595 unsigned long addr; 596 int err = 0; 597 598 for (addr = start; addr < end && !err; addr += PAGE_SIZE) { 599 if (ksm_test_exit(vma->vm_mm)) 600 break; 601 if (signal_pending(current)) 602 err = -ERESTARTSYS; 603 else 604 err = break_ksm(vma, addr); 605 } 606 return err; 607 } 608 609 #ifdef CONFIG_SYSFS 610 /* 611 * Only called through the sysfs control interface: 612 */ 613 static int unmerge_and_remove_all_rmap_items(void) 614 { 615 struct mm_slot *mm_slot; 616 struct mm_struct *mm; 617 struct vm_area_struct *vma; 618 int err = 0; 619 620 spin_lock(&ksm_mmlist_lock); 621 ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next, 622 struct mm_slot, mm_list); 623 spin_unlock(&ksm_mmlist_lock); 624 625 for (mm_slot = ksm_scan.mm_slot; 626 mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) { 627 mm = mm_slot->mm; 628 down_read(&mm->mmap_sem); 629 for (vma = mm->mmap; vma; vma = vma->vm_next) { 630 if (ksm_test_exit(mm)) 631 break; 632 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) 633 continue; 634 err = unmerge_ksm_pages(vma, 635 vma->vm_start, vma->vm_end); 636 if (err) 637 goto error; 638 } 639 640 remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list); 641 642 spin_lock(&ksm_mmlist_lock); 643 ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next, 644 struct mm_slot, mm_list); 645 if (ksm_test_exit(mm)) { 646 hlist_del(&mm_slot->link); 647 list_del(&mm_slot->mm_list); 648 spin_unlock(&ksm_mmlist_lock); 649 650 free_mm_slot(mm_slot); 651 clear_bit(MMF_VM_MERGEABLE, &mm->flags); 652 up_read(&mm->mmap_sem); 653 mmdrop(mm); 654 } else { 655 spin_unlock(&ksm_mmlist_lock); 656 up_read(&mm->mmap_sem); 657 } 658 } 659 660 ksm_scan.seqnr = 0; 661 return 0; 662 663 error: 664 up_read(&mm->mmap_sem); 665 spin_lock(&ksm_mmlist_lock); 666 ksm_scan.mm_slot = &ksm_mm_head; 667 spin_unlock(&ksm_mmlist_lock); 668 return err; 669 } 670 #endif /* CONFIG_SYSFS */ 671 672 static u32 calc_checksum(struct page *page) 673 { 674 u32 checksum; 675 void *addr = kmap_atomic(page, KM_USER0); 676 checksum = jhash2(addr, PAGE_SIZE / 4, 17); 677 kunmap_atomic(addr, KM_USER0); 678 return checksum; 679 } 680 681 static int memcmp_pages(struct page *page1, struct page *page2) 682 { 683 char *addr1, *addr2; 684 int ret; 685 686 addr1 = kmap_atomic(page1, KM_USER0); 687 addr2 = kmap_atomic(page2, KM_USER1); 688 ret = memcmp(addr1, addr2, PAGE_SIZE); 689 kunmap_atomic(addr2, KM_USER1); 690 kunmap_atomic(addr1, KM_USER0); 691 return ret; 692 } 693 694 static inline int pages_identical(struct page *page1, struct page *page2) 695 { 696 return !memcmp_pages(page1, page2); 697 } 698 699 static int write_protect_page(struct vm_area_struct *vma, struct page *page, 700 pte_t *orig_pte) 701 { 702 struct mm_struct *mm = vma->vm_mm; 703 unsigned long addr; 704 pte_t *ptep; 705 spinlock_t *ptl; 706 int swapped; 707 int err = -EFAULT; 708 709 addr = page_address_in_vma(page, vma); 710 if (addr == -EFAULT) 711 goto out; 712 713 BUG_ON(PageTransCompound(page)); 714 ptep = page_check_address(page, mm, addr, &ptl, 0); 715 if (!ptep) 716 goto out; 717 718 if (pte_write(*ptep) || pte_dirty(*ptep)) { 719 pte_t entry; 720 721 swapped = PageSwapCache(page); 722 flush_cache_page(vma, addr, page_to_pfn(page)); 723 /* 724 * Ok this is tricky, when get_user_pages_fast() run it doesn't 725 * take any lock, therefore the check that we are going to make 726 * with the pagecount against the mapcount is racey and 727 * O_DIRECT can happen right after the check. 728 * So we clear the pte and flush the tlb before the check 729 * this assure us that no O_DIRECT can happen after the check 730 * or in the middle of the check. 731 */ 732 entry = ptep_clear_flush(vma, addr, ptep); 733 /* 734 * Check that no O_DIRECT or similar I/O is in progress on the 735 * page 736 */ 737 if (page_mapcount(page) + 1 + swapped != page_count(page)) { 738 set_pte_at(mm, addr, ptep, entry); 739 goto out_unlock; 740 } 741 if (pte_dirty(entry)) 742 set_page_dirty(page); 743 entry = pte_mkclean(pte_wrprotect(entry)); 744 set_pte_at_notify(mm, addr, ptep, entry); 745 } 746 *orig_pte = *ptep; 747 err = 0; 748 749 out_unlock: 750 pte_unmap_unlock(ptep, ptl); 751 out: 752 return err; 753 } 754 755 /** 756 * replace_page - replace page in vma by new ksm page 757 * @vma: vma that holds the pte pointing to page 758 * @page: the page we are replacing by kpage 759 * @kpage: the ksm page we replace page by 760 * @orig_pte: the original value of the pte 761 * 762 * Returns 0 on success, -EFAULT on failure. 763 */ 764 static int replace_page(struct vm_area_struct *vma, struct page *page, 765 struct page *kpage, pte_t orig_pte) 766 { 767 struct mm_struct *mm = vma->vm_mm; 768 pgd_t *pgd; 769 pud_t *pud; 770 pmd_t *pmd; 771 pte_t *ptep; 772 spinlock_t *ptl; 773 unsigned long addr; 774 int err = -EFAULT; 775 776 addr = page_address_in_vma(page, vma); 777 if (addr == -EFAULT) 778 goto out; 779 780 pgd = pgd_offset(mm, addr); 781 if (!pgd_present(*pgd)) 782 goto out; 783 784 pud = pud_offset(pgd, addr); 785 if (!pud_present(*pud)) 786 goto out; 787 788 pmd = pmd_offset(pud, addr); 789 BUG_ON(pmd_trans_huge(*pmd)); 790 if (!pmd_present(*pmd)) 791 goto out; 792 793 ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); 794 if (!pte_same(*ptep, orig_pte)) { 795 pte_unmap_unlock(ptep, ptl); 796 goto out; 797 } 798 799 get_page(kpage); 800 page_add_anon_rmap(kpage, vma, addr); 801 802 flush_cache_page(vma, addr, pte_pfn(*ptep)); 803 ptep_clear_flush(vma, addr, ptep); 804 set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); 805 806 page_remove_rmap(page); 807 if (!page_mapped(page)) 808 try_to_free_swap(page); 809 put_page(page); 810 811 pte_unmap_unlock(ptep, ptl); 812 err = 0; 813 out: 814 return err; 815 } 816 817 static int page_trans_compound_anon_split(struct page *page) 818 { 819 int ret = 0; 820 struct page *transhuge_head = page_trans_compound_anon(page); 821 if (transhuge_head) { 822 /* Get the reference on the head to split it. */ 823 if (get_page_unless_zero(transhuge_head)) { 824 /* 825 * Recheck we got the reference while the head 826 * was still anonymous. 827 */ 828 if (PageAnon(transhuge_head)) 829 ret = split_huge_page(transhuge_head); 830 else 831 /* 832 * Retry later if split_huge_page run 833 * from under us. 834 */ 835 ret = 1; 836 put_page(transhuge_head); 837 } else 838 /* Retry later if split_huge_page run from under us. */ 839 ret = 1; 840 } 841 return ret; 842 } 843 844 /* 845 * try_to_merge_one_page - take two pages and merge them into one 846 * @vma: the vma that holds the pte pointing to page 847 * @page: the PageAnon page that we want to replace with kpage 848 * @kpage: the PageKsm page that we want to map instead of page, 849 * or NULL the first time when we want to use page as kpage. 850 * 851 * This function returns 0 if the pages were merged, -EFAULT otherwise. 852 */ 853 static int try_to_merge_one_page(struct vm_area_struct *vma, 854 struct page *page, struct page *kpage) 855 { 856 pte_t orig_pte = __pte(0); 857 int err = -EFAULT; 858 859 if (page == kpage) /* ksm page forked */ 860 return 0; 861 862 if (!(vma->vm_flags & VM_MERGEABLE)) 863 goto out; 864 if (PageTransCompound(page) && page_trans_compound_anon_split(page)) 865 goto out; 866 BUG_ON(PageTransCompound(page)); 867 if (!PageAnon(page)) 868 goto out; 869 870 /* 871 * We need the page lock to read a stable PageSwapCache in 872 * write_protect_page(). We use trylock_page() instead of 873 * lock_page() because we don't want to wait here - we 874 * prefer to continue scanning and merging different pages, 875 * then come back to this page when it is unlocked. 876 */ 877 if (!trylock_page(page)) 878 goto out; 879 /* 880 * If this anonymous page is mapped only here, its pte may need 881 * to be write-protected. If it's mapped elsewhere, all of its 882 * ptes are necessarily already write-protected. But in either 883 * case, we need to lock and check page_count is not raised. 884 */ 885 if (write_protect_page(vma, page, &orig_pte) == 0) { 886 if (!kpage) { 887 /* 888 * While we hold page lock, upgrade page from 889 * PageAnon+anon_vma to PageKsm+NULL stable_node: 890 * stable_tree_insert() will update stable_node. 891 */ 892 set_page_stable_node(page, NULL); 893 mark_page_accessed(page); 894 err = 0; 895 } else if (pages_identical(page, kpage)) 896 err = replace_page(vma, page, kpage, orig_pte); 897 } 898 899 if ((vma->vm_flags & VM_LOCKED) && kpage && !err) { 900 munlock_vma_page(page); 901 if (!PageMlocked(kpage)) { 902 unlock_page(page); 903 lock_page(kpage); 904 mlock_vma_page(kpage); 905 page = kpage; /* for final unlock */ 906 } 907 } 908 909 unlock_page(page); 910 out: 911 return err; 912 } 913 914 /* 915 * try_to_merge_with_ksm_page - like try_to_merge_two_pages, 916 * but no new kernel page is allocated: kpage must already be a ksm page. 917 * 918 * This function returns 0 if the pages were merged, -EFAULT otherwise. 919 */ 920 static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item, 921 struct page *page, struct page *kpage) 922 { 923 struct mm_struct *mm = rmap_item->mm; 924 struct vm_area_struct *vma; 925 int err = -EFAULT; 926 927 down_read(&mm->mmap_sem); 928 if (ksm_test_exit(mm)) 929 goto out; 930 vma = find_vma(mm, rmap_item->address); 931 if (!vma || vma->vm_start > rmap_item->address) 932 goto out; 933 934 err = try_to_merge_one_page(vma, page, kpage); 935 if (err) 936 goto out; 937 938 /* Must get reference to anon_vma while still holding mmap_sem */ 939 rmap_item->anon_vma = vma->anon_vma; 940 get_anon_vma(vma->anon_vma); 941 out: 942 up_read(&mm->mmap_sem); 943 return err; 944 } 945 946 /* 947 * try_to_merge_two_pages - take two identical pages and prepare them 948 * to be merged into one page. 949 * 950 * This function returns the kpage if we successfully merged two identical 951 * pages into one ksm page, NULL otherwise. 952 * 953 * Note that this function upgrades page to ksm page: if one of the pages 954 * is already a ksm page, try_to_merge_with_ksm_page should be used. 955 */ 956 static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item, 957 struct page *page, 958 struct rmap_item *tree_rmap_item, 959 struct page *tree_page) 960 { 961 int err; 962 963 err = try_to_merge_with_ksm_page(rmap_item, page, NULL); 964 if (!err) { 965 err = try_to_merge_with_ksm_page(tree_rmap_item, 966 tree_page, page); 967 /* 968 * If that fails, we have a ksm page with only one pte 969 * pointing to it: so break it. 970 */ 971 if (err) 972 break_cow(rmap_item); 973 } 974 return err ? NULL : page; 975 } 976 977 /* 978 * stable_tree_search - search for page inside the stable tree 979 * 980 * This function checks if there is a page inside the stable tree 981 * with identical content to the page that we are scanning right now. 982 * 983 * This function returns the stable tree node of identical content if found, 984 * NULL otherwise. 985 */ 986 static struct page *stable_tree_search(struct page *page) 987 { 988 struct rb_node *node = root_stable_tree.rb_node; 989 struct stable_node *stable_node; 990 991 stable_node = page_stable_node(page); 992 if (stable_node) { /* ksm page forked */ 993 get_page(page); 994 return page; 995 } 996 997 while (node) { 998 struct page *tree_page; 999 int ret; 1000 1001 cond_resched(); 1002 stable_node = rb_entry(node, struct stable_node, node); 1003 tree_page = get_ksm_page(stable_node); 1004 if (!tree_page) 1005 return NULL; 1006 1007 ret = memcmp_pages(page, tree_page); 1008 1009 if (ret < 0) { 1010 put_page(tree_page); 1011 node = node->rb_left; 1012 } else if (ret > 0) { 1013 put_page(tree_page); 1014 node = node->rb_right; 1015 } else 1016 return tree_page; 1017 } 1018 1019 return NULL; 1020 } 1021 1022 /* 1023 * stable_tree_insert - insert rmap_item pointing to new ksm page 1024 * into the stable tree. 1025 * 1026 * This function returns the stable tree node just allocated on success, 1027 * NULL otherwise. 1028 */ 1029 static struct stable_node *stable_tree_insert(struct page *kpage) 1030 { 1031 struct rb_node **new = &root_stable_tree.rb_node; 1032 struct rb_node *parent = NULL; 1033 struct stable_node *stable_node; 1034 1035 while (*new) { 1036 struct page *tree_page; 1037 int ret; 1038 1039 cond_resched(); 1040 stable_node = rb_entry(*new, struct stable_node, node); 1041 tree_page = get_ksm_page(stable_node); 1042 if (!tree_page) 1043 return NULL; 1044 1045 ret = memcmp_pages(kpage, tree_page); 1046 put_page(tree_page); 1047 1048 parent = *new; 1049 if (ret < 0) 1050 new = &parent->rb_left; 1051 else if (ret > 0) 1052 new = &parent->rb_right; 1053 else { 1054 /* 1055 * It is not a bug that stable_tree_search() didn't 1056 * find this node: because at that time our page was 1057 * not yet write-protected, so may have changed since. 1058 */ 1059 return NULL; 1060 } 1061 } 1062 1063 stable_node = alloc_stable_node(); 1064 if (!stable_node) 1065 return NULL; 1066 1067 rb_link_node(&stable_node->node, parent, new); 1068 rb_insert_color(&stable_node->node, &root_stable_tree); 1069 1070 INIT_HLIST_HEAD(&stable_node->hlist); 1071 1072 stable_node->kpfn = page_to_pfn(kpage); 1073 set_page_stable_node(kpage, stable_node); 1074 1075 return stable_node; 1076 } 1077 1078 /* 1079 * unstable_tree_search_insert - search for identical page, 1080 * else insert rmap_item into the unstable tree. 1081 * 1082 * This function searches for a page in the unstable tree identical to the 1083 * page currently being scanned; and if no identical page is found in the 1084 * tree, we insert rmap_item as a new object into the unstable tree. 1085 * 1086 * This function returns pointer to rmap_item found to be identical 1087 * to the currently scanned page, NULL otherwise. 1088 * 1089 * This function does both searching and inserting, because they share 1090 * the same walking algorithm in an rbtree. 1091 */ 1092 static 1093 struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item, 1094 struct page *page, 1095 struct page **tree_pagep) 1096 1097 { 1098 struct rb_node **new = &root_unstable_tree.rb_node; 1099 struct rb_node *parent = NULL; 1100 1101 while (*new) { 1102 struct rmap_item *tree_rmap_item; 1103 struct page *tree_page; 1104 int ret; 1105 1106 cond_resched(); 1107 tree_rmap_item = rb_entry(*new, struct rmap_item, node); 1108 tree_page = get_mergeable_page(tree_rmap_item); 1109 if (IS_ERR_OR_NULL(tree_page)) 1110 return NULL; 1111 1112 /* 1113 * Don't substitute a ksm page for a forked page. 1114 */ 1115 if (page == tree_page) { 1116 put_page(tree_page); 1117 return NULL; 1118 } 1119 1120 ret = memcmp_pages(page, tree_page); 1121 1122 parent = *new; 1123 if (ret < 0) { 1124 put_page(tree_page); 1125 new = &parent->rb_left; 1126 } else if (ret > 0) { 1127 put_page(tree_page); 1128 new = &parent->rb_right; 1129 } else { 1130 *tree_pagep = tree_page; 1131 return tree_rmap_item; 1132 } 1133 } 1134 1135 rmap_item->address |= UNSTABLE_FLAG; 1136 rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK); 1137 rb_link_node(&rmap_item->node, parent, new); 1138 rb_insert_color(&rmap_item->node, &root_unstable_tree); 1139 1140 ksm_pages_unshared++; 1141 return NULL; 1142 } 1143 1144 /* 1145 * stable_tree_append - add another rmap_item to the linked list of 1146 * rmap_items hanging off a given node of the stable tree, all sharing 1147 * the same ksm page. 1148 */ 1149 static void stable_tree_append(struct rmap_item *rmap_item, 1150 struct stable_node *stable_node) 1151 { 1152 rmap_item->head = stable_node; 1153 rmap_item->address |= STABLE_FLAG; 1154 hlist_add_head(&rmap_item->hlist, &stable_node->hlist); 1155 1156 if (rmap_item->hlist.next) 1157 ksm_pages_sharing++; 1158 else 1159 ksm_pages_shared++; 1160 } 1161 1162 /* 1163 * cmp_and_merge_page - first see if page can be merged into the stable tree; 1164 * if not, compare checksum to previous and if it's the same, see if page can 1165 * be inserted into the unstable tree, or merged with a page already there and 1166 * both transferred to the stable tree. 1167 * 1168 * @page: the page that we are searching identical page to. 1169 * @rmap_item: the reverse mapping into the virtual address of this page 1170 */ 1171 static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) 1172 { 1173 struct rmap_item *tree_rmap_item; 1174 struct page *tree_page = NULL; 1175 struct stable_node *stable_node; 1176 struct page *kpage; 1177 unsigned int checksum; 1178 int err; 1179 1180 remove_rmap_item_from_tree(rmap_item); 1181 1182 /* We first start with searching the page inside the stable tree */ 1183 kpage = stable_tree_search(page); 1184 if (kpage) { 1185 err = try_to_merge_with_ksm_page(rmap_item, page, kpage); 1186 if (!err) { 1187 /* 1188 * The page was successfully merged: 1189 * add its rmap_item to the stable tree. 1190 */ 1191 lock_page(kpage); 1192 stable_tree_append(rmap_item, page_stable_node(kpage)); 1193 unlock_page(kpage); 1194 } 1195 put_page(kpage); 1196 return; 1197 } 1198 1199 /* 1200 * If the hash value of the page has changed from the last time 1201 * we calculated it, this page is changing frequently: therefore we 1202 * don't want to insert it in the unstable tree, and we don't want 1203 * to waste our time searching for something identical to it there. 1204 */ 1205 checksum = calc_checksum(page); 1206 if (rmap_item->oldchecksum != checksum) { 1207 rmap_item->oldchecksum = checksum; 1208 return; 1209 } 1210 1211 tree_rmap_item = 1212 unstable_tree_search_insert(rmap_item, page, &tree_page); 1213 if (tree_rmap_item) { 1214 kpage = try_to_merge_two_pages(rmap_item, page, 1215 tree_rmap_item, tree_page); 1216 put_page(tree_page); 1217 /* 1218 * As soon as we merge this page, we want to remove the 1219 * rmap_item of the page we have merged with from the unstable 1220 * tree, and insert it instead as new node in the stable tree. 1221 */ 1222 if (kpage) { 1223 remove_rmap_item_from_tree(tree_rmap_item); 1224 1225 lock_page(kpage); 1226 stable_node = stable_tree_insert(kpage); 1227 if (stable_node) { 1228 stable_tree_append(tree_rmap_item, stable_node); 1229 stable_tree_append(rmap_item, stable_node); 1230 } 1231 unlock_page(kpage); 1232 1233 /* 1234 * If we fail to insert the page into the stable tree, 1235 * we will have 2 virtual addresses that are pointing 1236 * to a ksm page left outside the stable tree, 1237 * in which case we need to break_cow on both. 1238 */ 1239 if (!stable_node) { 1240 break_cow(tree_rmap_item); 1241 break_cow(rmap_item); 1242 } 1243 } 1244 } 1245 } 1246 1247 static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot, 1248 struct rmap_item **rmap_list, 1249 unsigned long addr) 1250 { 1251 struct rmap_item *rmap_item; 1252 1253 while (*rmap_list) { 1254 rmap_item = *rmap_list; 1255 if ((rmap_item->address & PAGE_MASK) == addr) 1256 return rmap_item; 1257 if (rmap_item->address > addr) 1258 break; 1259 *rmap_list = rmap_item->rmap_list; 1260 remove_rmap_item_from_tree(rmap_item); 1261 free_rmap_item(rmap_item); 1262 } 1263 1264 rmap_item = alloc_rmap_item(); 1265 if (rmap_item) { 1266 /* It has already been zeroed */ 1267 rmap_item->mm = mm_slot->mm; 1268 rmap_item->address = addr; 1269 rmap_item->rmap_list = *rmap_list; 1270 *rmap_list = rmap_item; 1271 } 1272 return rmap_item; 1273 } 1274 1275 static struct rmap_item *scan_get_next_rmap_item(struct page **page) 1276 { 1277 struct mm_struct *mm; 1278 struct mm_slot *slot; 1279 struct vm_area_struct *vma; 1280 struct rmap_item *rmap_item; 1281 1282 if (list_empty(&ksm_mm_head.mm_list)) 1283 return NULL; 1284 1285 slot = ksm_scan.mm_slot; 1286 if (slot == &ksm_mm_head) { 1287 /* 1288 * A number of pages can hang around indefinitely on per-cpu 1289 * pagevecs, raised page count preventing write_protect_page 1290 * from merging them. Though it doesn't really matter much, 1291 * it is puzzling to see some stuck in pages_volatile until 1292 * other activity jostles them out, and they also prevented 1293 * LTP's KSM test from succeeding deterministically; so drain 1294 * them here (here rather than on entry to ksm_do_scan(), 1295 * so we don't IPI too often when pages_to_scan is set low). 1296 */ 1297 lru_add_drain_all(); 1298 1299 root_unstable_tree = RB_ROOT; 1300 1301 spin_lock(&ksm_mmlist_lock); 1302 slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list); 1303 ksm_scan.mm_slot = slot; 1304 spin_unlock(&ksm_mmlist_lock); 1305 next_mm: 1306 ksm_scan.address = 0; 1307 ksm_scan.rmap_list = &slot->rmap_list; 1308 } 1309 1310 mm = slot->mm; 1311 down_read(&mm->mmap_sem); 1312 if (ksm_test_exit(mm)) 1313 vma = NULL; 1314 else 1315 vma = find_vma(mm, ksm_scan.address); 1316 1317 for (; vma; vma = vma->vm_next) { 1318 if (!(vma->vm_flags & VM_MERGEABLE)) 1319 continue; 1320 if (ksm_scan.address < vma->vm_start) 1321 ksm_scan.address = vma->vm_start; 1322 if (!vma->anon_vma) 1323 ksm_scan.address = vma->vm_end; 1324 1325 while (ksm_scan.address < vma->vm_end) { 1326 if (ksm_test_exit(mm)) 1327 break; 1328 *page = follow_page(vma, ksm_scan.address, FOLL_GET); 1329 if (IS_ERR_OR_NULL(*page)) { 1330 ksm_scan.address += PAGE_SIZE; 1331 cond_resched(); 1332 continue; 1333 } 1334 if (PageAnon(*page) || 1335 page_trans_compound_anon(*page)) { 1336 flush_anon_page(vma, *page, ksm_scan.address); 1337 flush_dcache_page(*page); 1338 rmap_item = get_next_rmap_item(slot, 1339 ksm_scan.rmap_list, ksm_scan.address); 1340 if (rmap_item) { 1341 ksm_scan.rmap_list = 1342 &rmap_item->rmap_list; 1343 ksm_scan.address += PAGE_SIZE; 1344 } else 1345 put_page(*page); 1346 up_read(&mm->mmap_sem); 1347 return rmap_item; 1348 } 1349 put_page(*page); 1350 ksm_scan.address += PAGE_SIZE; 1351 cond_resched(); 1352 } 1353 } 1354 1355 if (ksm_test_exit(mm)) { 1356 ksm_scan.address = 0; 1357 ksm_scan.rmap_list = &slot->rmap_list; 1358 } 1359 /* 1360 * Nuke all the rmap_items that are above this current rmap: 1361 * because there were no VM_MERGEABLE vmas with such addresses. 1362 */ 1363 remove_trailing_rmap_items(slot, ksm_scan.rmap_list); 1364 1365 spin_lock(&ksm_mmlist_lock); 1366 ksm_scan.mm_slot = list_entry(slot->mm_list.next, 1367 struct mm_slot, mm_list); 1368 if (ksm_scan.address == 0) { 1369 /* 1370 * We've completed a full scan of all vmas, holding mmap_sem 1371 * throughout, and found no VM_MERGEABLE: so do the same as 1372 * __ksm_exit does to remove this mm from all our lists now. 1373 * This applies either when cleaning up after __ksm_exit 1374 * (but beware: we can reach here even before __ksm_exit), 1375 * or when all VM_MERGEABLE areas have been unmapped (and 1376 * mmap_sem then protects against race with MADV_MERGEABLE). 1377 */ 1378 hlist_del(&slot->link); 1379 list_del(&slot->mm_list); 1380 spin_unlock(&ksm_mmlist_lock); 1381 1382 free_mm_slot(slot); 1383 clear_bit(MMF_VM_MERGEABLE, &mm->flags); 1384 up_read(&mm->mmap_sem); 1385 mmdrop(mm); 1386 } else { 1387 spin_unlock(&ksm_mmlist_lock); 1388 up_read(&mm->mmap_sem); 1389 } 1390 1391 /* Repeat until we've completed scanning the whole list */ 1392 slot = ksm_scan.mm_slot; 1393 if (slot != &ksm_mm_head) 1394 goto next_mm; 1395 1396 ksm_scan.seqnr++; 1397 return NULL; 1398 } 1399 1400 /** 1401 * ksm_do_scan - the ksm scanner main worker function. 1402 * @scan_npages - number of pages we want to scan before we return. 1403 */ 1404 static void ksm_do_scan(unsigned int scan_npages) 1405 { 1406 struct rmap_item *rmap_item; 1407 struct page *uninitialized_var(page); 1408 1409 while (scan_npages-- && likely(!freezing(current))) { 1410 cond_resched(); 1411 rmap_item = scan_get_next_rmap_item(&page); 1412 if (!rmap_item) 1413 return; 1414 if (!PageKsm(page) || !in_stable_tree(rmap_item)) 1415 cmp_and_merge_page(page, rmap_item); 1416 put_page(page); 1417 } 1418 } 1419 1420 static int ksmd_should_run(void) 1421 { 1422 return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list); 1423 } 1424 1425 static int ksm_scan_thread(void *nothing) 1426 { 1427 set_freezable(); 1428 set_user_nice(current, 5); 1429 1430 while (!kthread_should_stop()) { 1431 mutex_lock(&ksm_thread_mutex); 1432 if (ksmd_should_run()) 1433 ksm_do_scan(ksm_thread_pages_to_scan); 1434 mutex_unlock(&ksm_thread_mutex); 1435 1436 try_to_freeze(); 1437 1438 if (ksmd_should_run()) { 1439 schedule_timeout_interruptible( 1440 msecs_to_jiffies(ksm_thread_sleep_millisecs)); 1441 } else { 1442 wait_event_freezable(ksm_thread_wait, 1443 ksmd_should_run() || kthread_should_stop()); 1444 } 1445 } 1446 return 0; 1447 } 1448 1449 int ksm_madvise(struct vm_area_struct *vma, unsigned long start, 1450 unsigned long end, int advice, unsigned long *vm_flags) 1451 { 1452 struct mm_struct *mm = vma->vm_mm; 1453 int err; 1454 1455 switch (advice) { 1456 case MADV_MERGEABLE: 1457 /* 1458 * Be somewhat over-protective for now! 1459 */ 1460 if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE | 1461 VM_PFNMAP | VM_IO | VM_DONTEXPAND | 1462 VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE | 1463 VM_NONLINEAR | VM_MIXEDMAP | VM_SAO)) 1464 return 0; /* just ignore the advice */ 1465 1466 if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { 1467 err = __ksm_enter(mm); 1468 if (err) 1469 return err; 1470 } 1471 1472 *vm_flags |= VM_MERGEABLE; 1473 break; 1474 1475 case MADV_UNMERGEABLE: 1476 if (!(*vm_flags & VM_MERGEABLE)) 1477 return 0; /* just ignore the advice */ 1478 1479 if (vma->anon_vma) { 1480 err = unmerge_ksm_pages(vma, start, end); 1481 if (err) 1482 return err; 1483 } 1484 1485 *vm_flags &= ~VM_MERGEABLE; 1486 break; 1487 } 1488 1489 return 0; 1490 } 1491 1492 int __ksm_enter(struct mm_struct *mm) 1493 { 1494 struct mm_slot *mm_slot; 1495 int needs_wakeup; 1496 1497 mm_slot = alloc_mm_slot(); 1498 if (!mm_slot) 1499 return -ENOMEM; 1500 1501 /* Check ksm_run too? Would need tighter locking */ 1502 needs_wakeup = list_empty(&ksm_mm_head.mm_list); 1503 1504 spin_lock(&ksm_mmlist_lock); 1505 insert_to_mm_slots_hash(mm, mm_slot); 1506 /* 1507 * Insert just behind the scanning cursor, to let the area settle 1508 * down a little; when fork is followed by immediate exec, we don't 1509 * want ksmd to waste time setting up and tearing down an rmap_list. 1510 */ 1511 list_add_tail(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list); 1512 spin_unlock(&ksm_mmlist_lock); 1513 1514 set_bit(MMF_VM_MERGEABLE, &mm->flags); 1515 atomic_inc(&mm->mm_count); 1516 1517 if (needs_wakeup) 1518 wake_up_interruptible(&ksm_thread_wait); 1519 1520 return 0; 1521 } 1522 1523 void __ksm_exit(struct mm_struct *mm) 1524 { 1525 struct mm_slot *mm_slot; 1526 int easy_to_free = 0; 1527 1528 /* 1529 * This process is exiting: if it's straightforward (as is the 1530 * case when ksmd was never running), free mm_slot immediately. 1531 * But if it's at the cursor or has rmap_items linked to it, use 1532 * mmap_sem to synchronize with any break_cows before pagetables 1533 * are freed, and leave the mm_slot on the list for ksmd to free. 1534 * Beware: ksm may already have noticed it exiting and freed the slot. 1535 */ 1536 1537 spin_lock(&ksm_mmlist_lock); 1538 mm_slot = get_mm_slot(mm); 1539 if (mm_slot && ksm_scan.mm_slot != mm_slot) { 1540 if (!mm_slot->rmap_list) { 1541 hlist_del(&mm_slot->link); 1542 list_del(&mm_slot->mm_list); 1543 easy_to_free = 1; 1544 } else { 1545 list_move(&mm_slot->mm_list, 1546 &ksm_scan.mm_slot->mm_list); 1547 } 1548 } 1549 spin_unlock(&ksm_mmlist_lock); 1550 1551 if (easy_to_free) { 1552 free_mm_slot(mm_slot); 1553 clear_bit(MMF_VM_MERGEABLE, &mm->flags); 1554 mmdrop(mm); 1555 } else if (mm_slot) { 1556 down_write(&mm->mmap_sem); 1557 up_write(&mm->mmap_sem); 1558 } 1559 } 1560 1561 struct page *ksm_does_need_to_copy(struct page *page, 1562 struct vm_area_struct *vma, unsigned long address) 1563 { 1564 struct page *new_page; 1565 1566 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 1567 if (new_page) { 1568 copy_user_highpage(new_page, page, address, vma); 1569 1570 SetPageDirty(new_page); 1571 __SetPageUptodate(new_page); 1572 SetPageSwapBacked(new_page); 1573 __set_page_locked(new_page); 1574 1575 if (page_evictable(new_page, vma)) 1576 lru_cache_add_lru(new_page, LRU_ACTIVE_ANON); 1577 else 1578 add_page_to_unevictable_list(new_page); 1579 } 1580 1581 return new_page; 1582 } 1583 1584 int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg, 1585 unsigned long *vm_flags) 1586 { 1587 struct stable_node *stable_node; 1588 struct rmap_item *rmap_item; 1589 struct hlist_node *hlist; 1590 unsigned int mapcount = page_mapcount(page); 1591 int referenced = 0; 1592 int search_new_forks = 0; 1593 1594 VM_BUG_ON(!PageKsm(page)); 1595 VM_BUG_ON(!PageLocked(page)); 1596 1597 stable_node = page_stable_node(page); 1598 if (!stable_node) 1599 return 0; 1600 again: 1601 hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { 1602 struct anon_vma *anon_vma = rmap_item->anon_vma; 1603 struct anon_vma_chain *vmac; 1604 struct vm_area_struct *vma; 1605 1606 anon_vma_lock(anon_vma); 1607 list_for_each_entry(vmac, &anon_vma->head, same_anon_vma) { 1608 vma = vmac->vma; 1609 if (rmap_item->address < vma->vm_start || 1610 rmap_item->address >= vma->vm_end) 1611 continue; 1612 /* 1613 * Initially we examine only the vma which covers this 1614 * rmap_item; but later, if there is still work to do, 1615 * we examine covering vmas in other mms: in case they 1616 * were forked from the original since ksmd passed. 1617 */ 1618 if ((rmap_item->mm == vma->vm_mm) == search_new_forks) 1619 continue; 1620 1621 if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) 1622 continue; 1623 1624 referenced += page_referenced_one(page, vma, 1625 rmap_item->address, &mapcount, vm_flags); 1626 if (!search_new_forks || !mapcount) 1627 break; 1628 } 1629 anon_vma_unlock(anon_vma); 1630 if (!mapcount) 1631 goto out; 1632 } 1633 if (!search_new_forks++) 1634 goto again; 1635 out: 1636 return referenced; 1637 } 1638 1639 int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) 1640 { 1641 struct stable_node *stable_node; 1642 struct hlist_node *hlist; 1643 struct rmap_item *rmap_item; 1644 int ret = SWAP_AGAIN; 1645 int search_new_forks = 0; 1646 1647 VM_BUG_ON(!PageKsm(page)); 1648 VM_BUG_ON(!PageLocked(page)); 1649 1650 stable_node = page_stable_node(page); 1651 if (!stable_node) 1652 return SWAP_FAIL; 1653 again: 1654 hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { 1655 struct anon_vma *anon_vma = rmap_item->anon_vma; 1656 struct anon_vma_chain *vmac; 1657 struct vm_area_struct *vma; 1658 1659 anon_vma_lock(anon_vma); 1660 list_for_each_entry(vmac, &anon_vma->head, same_anon_vma) { 1661 vma = vmac->vma; 1662 if (rmap_item->address < vma->vm_start || 1663 rmap_item->address >= vma->vm_end) 1664 continue; 1665 /* 1666 * Initially we examine only the vma which covers this 1667 * rmap_item; but later, if there is still work to do, 1668 * we examine covering vmas in other mms: in case they 1669 * were forked from the original since ksmd passed. 1670 */ 1671 if ((rmap_item->mm == vma->vm_mm) == search_new_forks) 1672 continue; 1673 1674 ret = try_to_unmap_one(page, vma, 1675 rmap_item->address, flags); 1676 if (ret != SWAP_AGAIN || !page_mapped(page)) { 1677 anon_vma_unlock(anon_vma); 1678 goto out; 1679 } 1680 } 1681 anon_vma_unlock(anon_vma); 1682 } 1683 if (!search_new_forks++) 1684 goto again; 1685 out: 1686 return ret; 1687 } 1688 1689 #ifdef CONFIG_MIGRATION 1690 int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *, 1691 struct vm_area_struct *, unsigned long, void *), void *arg) 1692 { 1693 struct stable_node *stable_node; 1694 struct hlist_node *hlist; 1695 struct rmap_item *rmap_item; 1696 int ret = SWAP_AGAIN; 1697 int search_new_forks = 0; 1698 1699 VM_BUG_ON(!PageKsm(page)); 1700 VM_BUG_ON(!PageLocked(page)); 1701 1702 stable_node = page_stable_node(page); 1703 if (!stable_node) 1704 return ret; 1705 again: 1706 hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { 1707 struct anon_vma *anon_vma = rmap_item->anon_vma; 1708 struct anon_vma_chain *vmac; 1709 struct vm_area_struct *vma; 1710 1711 anon_vma_lock(anon_vma); 1712 list_for_each_entry(vmac, &anon_vma->head, same_anon_vma) { 1713 vma = vmac->vma; 1714 if (rmap_item->address < vma->vm_start || 1715 rmap_item->address >= vma->vm_end) 1716 continue; 1717 /* 1718 * Initially we examine only the vma which covers this 1719 * rmap_item; but later, if there is still work to do, 1720 * we examine covering vmas in other mms: in case they 1721 * were forked from the original since ksmd passed. 1722 */ 1723 if ((rmap_item->mm == vma->vm_mm) == search_new_forks) 1724 continue; 1725 1726 ret = rmap_one(page, vma, rmap_item->address, arg); 1727 if (ret != SWAP_AGAIN) { 1728 anon_vma_unlock(anon_vma); 1729 goto out; 1730 } 1731 } 1732 anon_vma_unlock(anon_vma); 1733 } 1734 if (!search_new_forks++) 1735 goto again; 1736 out: 1737 return ret; 1738 } 1739 1740 void ksm_migrate_page(struct page *newpage, struct page *oldpage) 1741 { 1742 struct stable_node *stable_node; 1743 1744 VM_BUG_ON(!PageLocked(oldpage)); 1745 VM_BUG_ON(!PageLocked(newpage)); 1746 VM_BUG_ON(newpage->mapping != oldpage->mapping); 1747 1748 stable_node = page_stable_node(newpage); 1749 if (stable_node) { 1750 VM_BUG_ON(stable_node->kpfn != page_to_pfn(oldpage)); 1751 stable_node->kpfn = page_to_pfn(newpage); 1752 } 1753 } 1754 #endif /* CONFIG_MIGRATION */ 1755 1756 #ifdef CONFIG_MEMORY_HOTREMOVE 1757 static struct stable_node *ksm_check_stable_tree(unsigned long start_pfn, 1758 unsigned long end_pfn) 1759 { 1760 struct rb_node *node; 1761 1762 for (node = rb_first(&root_stable_tree); node; node = rb_next(node)) { 1763 struct stable_node *stable_node; 1764 1765 stable_node = rb_entry(node, struct stable_node, node); 1766 if (stable_node->kpfn >= start_pfn && 1767 stable_node->kpfn < end_pfn) 1768 return stable_node; 1769 } 1770 return NULL; 1771 } 1772 1773 static int ksm_memory_callback(struct notifier_block *self, 1774 unsigned long action, void *arg) 1775 { 1776 struct memory_notify *mn = arg; 1777 struct stable_node *stable_node; 1778 1779 switch (action) { 1780 case MEM_GOING_OFFLINE: 1781 /* 1782 * Keep it very simple for now: just lock out ksmd and 1783 * MADV_UNMERGEABLE while any memory is going offline. 1784 * mutex_lock_nested() is necessary because lockdep was alarmed 1785 * that here we take ksm_thread_mutex inside notifier chain 1786 * mutex, and later take notifier chain mutex inside 1787 * ksm_thread_mutex to unlock it. But that's safe because both 1788 * are inside mem_hotplug_mutex. 1789 */ 1790 mutex_lock_nested(&ksm_thread_mutex, SINGLE_DEPTH_NESTING); 1791 break; 1792 1793 case MEM_OFFLINE: 1794 /* 1795 * Most of the work is done by page migration; but there might 1796 * be a few stable_nodes left over, still pointing to struct 1797 * pages which have been offlined: prune those from the tree. 1798 */ 1799 while ((stable_node = ksm_check_stable_tree(mn->start_pfn, 1800 mn->start_pfn + mn->nr_pages)) != NULL) 1801 remove_node_from_stable_tree(stable_node); 1802 /* fallthrough */ 1803 1804 case MEM_CANCEL_OFFLINE: 1805 mutex_unlock(&ksm_thread_mutex); 1806 break; 1807 } 1808 return NOTIFY_OK; 1809 } 1810 #endif /* CONFIG_MEMORY_HOTREMOVE */ 1811 1812 #ifdef CONFIG_SYSFS 1813 /* 1814 * This all compiles without CONFIG_SYSFS, but is a waste of space. 1815 */ 1816 1817 #define KSM_ATTR_RO(_name) \ 1818 static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 1819 #define KSM_ATTR(_name) \ 1820 static struct kobj_attribute _name##_attr = \ 1821 __ATTR(_name, 0644, _name##_show, _name##_store) 1822 1823 static ssize_t sleep_millisecs_show(struct kobject *kobj, 1824 struct kobj_attribute *attr, char *buf) 1825 { 1826 return sprintf(buf, "%u\n", ksm_thread_sleep_millisecs); 1827 } 1828 1829 static ssize_t sleep_millisecs_store(struct kobject *kobj, 1830 struct kobj_attribute *attr, 1831 const char *buf, size_t count) 1832 { 1833 unsigned long msecs; 1834 int err; 1835 1836 err = strict_strtoul(buf, 10, &msecs); 1837 if (err || msecs > UINT_MAX) 1838 return -EINVAL; 1839 1840 ksm_thread_sleep_millisecs = msecs; 1841 1842 return count; 1843 } 1844 KSM_ATTR(sleep_millisecs); 1845 1846 static ssize_t pages_to_scan_show(struct kobject *kobj, 1847 struct kobj_attribute *attr, char *buf) 1848 { 1849 return sprintf(buf, "%u\n", ksm_thread_pages_to_scan); 1850 } 1851 1852 static ssize_t pages_to_scan_store(struct kobject *kobj, 1853 struct kobj_attribute *attr, 1854 const char *buf, size_t count) 1855 { 1856 int err; 1857 unsigned long nr_pages; 1858 1859 err = strict_strtoul(buf, 10, &nr_pages); 1860 if (err || nr_pages > UINT_MAX) 1861 return -EINVAL; 1862 1863 ksm_thread_pages_to_scan = nr_pages; 1864 1865 return count; 1866 } 1867 KSM_ATTR(pages_to_scan); 1868 1869 static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr, 1870 char *buf) 1871 { 1872 return sprintf(buf, "%u\n", ksm_run); 1873 } 1874 1875 static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, 1876 const char *buf, size_t count) 1877 { 1878 int err; 1879 unsigned long flags; 1880 1881 err = strict_strtoul(buf, 10, &flags); 1882 if (err || flags > UINT_MAX) 1883 return -EINVAL; 1884 if (flags > KSM_RUN_UNMERGE) 1885 return -EINVAL; 1886 1887 /* 1888 * KSM_RUN_MERGE sets ksmd running, and 0 stops it running. 1889 * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items, 1890 * breaking COW to free the pages_shared (but leaves mm_slots 1891 * on the list for when ksmd may be set running again). 1892 */ 1893 1894 mutex_lock(&ksm_thread_mutex); 1895 if (ksm_run != flags) { 1896 ksm_run = flags; 1897 if (flags & KSM_RUN_UNMERGE) { 1898 int oom_score_adj; 1899 1900 oom_score_adj = test_set_oom_score_adj(OOM_SCORE_ADJ_MAX); 1901 err = unmerge_and_remove_all_rmap_items(); 1902 test_set_oom_score_adj(oom_score_adj); 1903 if (err) { 1904 ksm_run = KSM_RUN_STOP; 1905 count = err; 1906 } 1907 } 1908 } 1909 mutex_unlock(&ksm_thread_mutex); 1910 1911 if (flags & KSM_RUN_MERGE) 1912 wake_up_interruptible(&ksm_thread_wait); 1913 1914 return count; 1915 } 1916 KSM_ATTR(run); 1917 1918 static ssize_t pages_shared_show(struct kobject *kobj, 1919 struct kobj_attribute *attr, char *buf) 1920 { 1921 return sprintf(buf, "%lu\n", ksm_pages_shared); 1922 } 1923 KSM_ATTR_RO(pages_shared); 1924 1925 static ssize_t pages_sharing_show(struct kobject *kobj, 1926 struct kobj_attribute *attr, char *buf) 1927 { 1928 return sprintf(buf, "%lu\n", ksm_pages_sharing); 1929 } 1930 KSM_ATTR_RO(pages_sharing); 1931 1932 static ssize_t pages_unshared_show(struct kobject *kobj, 1933 struct kobj_attribute *attr, char *buf) 1934 { 1935 return sprintf(buf, "%lu\n", ksm_pages_unshared); 1936 } 1937 KSM_ATTR_RO(pages_unshared); 1938 1939 static ssize_t pages_volatile_show(struct kobject *kobj, 1940 struct kobj_attribute *attr, char *buf) 1941 { 1942 long ksm_pages_volatile; 1943 1944 ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared 1945 - ksm_pages_sharing - ksm_pages_unshared; 1946 /* 1947 * It was not worth any locking to calculate that statistic, 1948 * but it might therefore sometimes be negative: conceal that. 1949 */ 1950 if (ksm_pages_volatile < 0) 1951 ksm_pages_volatile = 0; 1952 return sprintf(buf, "%ld\n", ksm_pages_volatile); 1953 } 1954 KSM_ATTR_RO(pages_volatile); 1955 1956 static ssize_t full_scans_show(struct kobject *kobj, 1957 struct kobj_attribute *attr, char *buf) 1958 { 1959 return sprintf(buf, "%lu\n", ksm_scan.seqnr); 1960 } 1961 KSM_ATTR_RO(full_scans); 1962 1963 static struct attribute *ksm_attrs[] = { 1964 &sleep_millisecs_attr.attr, 1965 &pages_to_scan_attr.attr, 1966 &run_attr.attr, 1967 &pages_shared_attr.attr, 1968 &pages_sharing_attr.attr, 1969 &pages_unshared_attr.attr, 1970 &pages_volatile_attr.attr, 1971 &full_scans_attr.attr, 1972 NULL, 1973 }; 1974 1975 static struct attribute_group ksm_attr_group = { 1976 .attrs = ksm_attrs, 1977 .name = "ksm", 1978 }; 1979 #endif /* CONFIG_SYSFS */ 1980 1981 static int __init ksm_init(void) 1982 { 1983 struct task_struct *ksm_thread; 1984 int err; 1985 1986 err = ksm_slab_init(); 1987 if (err) 1988 goto out; 1989 1990 ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd"); 1991 if (IS_ERR(ksm_thread)) { 1992 printk(KERN_ERR "ksm: creating kthread failed\n"); 1993 err = PTR_ERR(ksm_thread); 1994 goto out_free; 1995 } 1996 1997 #ifdef CONFIG_SYSFS 1998 err = sysfs_create_group(mm_kobj, &ksm_attr_group); 1999 if (err) { 2000 printk(KERN_ERR "ksm: register sysfs failed\n"); 2001 kthread_stop(ksm_thread); 2002 goto out_free; 2003 } 2004 #else 2005 ksm_run = KSM_RUN_MERGE; /* no way for user to start it */ 2006 2007 #endif /* CONFIG_SYSFS */ 2008 2009 #ifdef CONFIG_MEMORY_HOTREMOVE 2010 /* 2011 * Choose a high priority since the callback takes ksm_thread_mutex: 2012 * later callbacks could only be taking locks which nest within that. 2013 */ 2014 hotplug_memory_notifier(ksm_memory_callback, 100); 2015 #endif 2016 return 0; 2017 2018 out_free: 2019 ksm_slab_free(); 2020 out: 2021 return err; 2022 } 2023 module_init(ksm_init) 2024