1 /* 2 * Memory merging support. 3 * 4 * This code enables dynamic sharing of identical pages found in different 5 * memory areas, even if they are not shared by fork() 6 * 7 * Copyright (C) 2008-2009 Red Hat, Inc. 8 * Authors: 9 * Izik Eidus 10 * Andrea Arcangeli 11 * Chris Wright 12 * Hugh Dickins 13 * 14 * This work is licensed under the terms of the GNU GPL, version 2. 15 */ 16 17 #include <linux/errno.h> 18 #include <linux/mm.h> 19 #include <linux/fs.h> 20 #include <linux/mman.h> 21 #include <linux/sched.h> 22 #include <linux/rwsem.h> 23 #include <linux/pagemap.h> 24 #include <linux/rmap.h> 25 #include <linux/spinlock.h> 26 #include <linux/jhash.h> 27 #include <linux/delay.h> 28 #include <linux/kthread.h> 29 #include <linux/wait.h> 30 #include <linux/slab.h> 31 #include <linux/rbtree.h> 32 #include <linux/memory.h> 33 #include <linux/mmu_notifier.h> 34 #include <linux/swap.h> 35 #include <linux/ksm.h> 36 #include <linux/hashtable.h> 37 #include <linux/freezer.h> 38 #include <linux/oom.h> 39 #include <linux/numa.h> 40 41 #include <asm/tlbflush.h> 42 #include "internal.h" 43 44 #ifdef CONFIG_NUMA 45 #define NUMA(x) (x) 46 #define DO_NUMA(x) do { (x); } while (0) 47 #else 48 #define NUMA(x) (0) 49 #define DO_NUMA(x) do { } while (0) 50 #endif 51 52 /* 53 * A few notes about the KSM scanning process, 54 * to make it easier to understand the data structures below: 55 * 56 * In order to reduce excessive scanning, KSM sorts the memory pages by their 57 * contents into a data structure that holds pointers to the pages' locations. 58 * 59 * Since the contents of the pages may change at any moment, KSM cannot just 60 * insert the pages into a normal sorted tree and expect it to find anything. 61 * Therefore KSM uses two data structures - the stable and the unstable tree. 62 * 63 * The stable tree holds pointers to all the merged pages (ksm pages), sorted 64 * by their contents. Because each such page is write-protected, searching on 65 * this tree is fully assured to be working (except when pages are unmapped), 66 * and therefore this tree is called the stable tree. 67 * 68 * In addition to the stable tree, KSM uses a second data structure called the 69 * unstable tree: this tree holds pointers to pages which have been found to 70 * be "unchanged for a period of time". The unstable tree sorts these pages 71 * by their contents, but since they are not write-protected, KSM cannot rely 72 * upon the unstable tree to work correctly - the unstable tree is liable to 73 * be corrupted as its contents are modified, and so it is called unstable. 74 * 75 * KSM solves this problem by several techniques: 76 * 77 * 1) The unstable tree is flushed every time KSM completes scanning all 78 * memory areas, and then the tree is rebuilt again from the beginning. 79 * 2) KSM will only insert into the unstable tree, pages whose hash value 80 * has not changed since the previous scan of all memory areas. 81 * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the 82 * colors of the nodes and not on their contents, assuring that even when 83 * the tree gets "corrupted" it won't get out of balance, so scanning time 84 * remains the same (also, searching and inserting nodes in an rbtree uses 85 * the same algorithm, so we have no overhead when we flush and rebuild). 86 * 4) KSM never flushes the stable tree, which means that even if it were to 87 * take 10 attempts to find a page in the unstable tree, once it is found, 88 * it is secured in the stable tree. (When we scan a new page, we first 89 * compare it against the stable tree, and then against the unstable tree.) 90 * 91 * If the merge_across_nodes tunable is unset, then KSM maintains multiple 92 * stable trees and multiple unstable trees: one of each for each NUMA node. 93 */ 94 95 /** 96 * struct mm_slot - ksm information per mm that is being scanned 97 * @link: link to the mm_slots hash list 98 * @mm_list: link into the mm_slots list, rooted in ksm_mm_head 99 * @rmap_list: head for this mm_slot's singly-linked list of rmap_items 100 * @mm: the mm that this information is valid for 101 */ 102 struct mm_slot { 103 struct hlist_node link; 104 struct list_head mm_list; 105 struct rmap_item *rmap_list; 106 struct mm_struct *mm; 107 }; 108 109 /** 110 * struct ksm_scan - cursor for scanning 111 * @mm_slot: the current mm_slot we are scanning 112 * @address: the next address inside that to be scanned 113 * @rmap_list: link to the next rmap to be scanned in the rmap_list 114 * @seqnr: count of completed full scans (needed when removing unstable node) 115 * 116 * There is only the one ksm_scan instance of this cursor structure. 117 */ 118 struct ksm_scan { 119 struct mm_slot *mm_slot; 120 unsigned long address; 121 struct rmap_item **rmap_list; 122 unsigned long seqnr; 123 }; 124 125 /** 126 * struct stable_node - node of the stable rbtree 127 * @node: rb node of this ksm page in the stable tree 128 * @head: (overlaying parent) &migrate_nodes indicates temporarily on that list 129 * @list: linked into migrate_nodes, pending placement in the proper node tree 130 * @hlist: hlist head of rmap_items using this ksm page 131 * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid) 132 * @nid: NUMA node id of stable tree in which linked (may not match kpfn) 133 */ 134 struct stable_node { 135 union { 136 struct rb_node node; /* when node of stable tree */ 137 struct { /* when listed for migration */ 138 struct list_head *head; 139 struct list_head list; 140 }; 141 }; 142 struct hlist_head hlist; 143 unsigned long kpfn; 144 #ifdef CONFIG_NUMA 145 int nid; 146 #endif 147 }; 148 149 /** 150 * struct rmap_item - reverse mapping item for virtual addresses 151 * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list 152 * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree 153 * @nid: NUMA node id of unstable tree in which linked (may not match page) 154 * @mm: the memory structure this rmap_item is pointing into 155 * @address: the virtual address this rmap_item tracks (+ flags in low bits) 156 * @oldchecksum: previous checksum of the page at that virtual address 157 * @node: rb node of this rmap_item in the unstable tree 158 * @head: pointer to stable_node heading this list in the stable tree 159 * @hlist: link into hlist of rmap_items hanging off that stable_node 160 */ 161 struct rmap_item { 162 struct rmap_item *rmap_list; 163 union { 164 struct anon_vma *anon_vma; /* when stable */ 165 #ifdef CONFIG_NUMA 166 int nid; /* when node of unstable tree */ 167 #endif 168 }; 169 struct mm_struct *mm; 170 unsigned long address; /* + low bits used for flags below */ 171 unsigned int oldchecksum; /* when unstable */ 172 union { 173 struct rb_node node; /* when node of unstable tree */ 174 struct { /* when listed from stable tree */ 175 struct stable_node *head; 176 struct hlist_node hlist; 177 }; 178 }; 179 }; 180 181 #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */ 182 #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */ 183 #define STABLE_FLAG 0x200 /* is listed from the stable tree */ 184 185 /* The stable and unstable tree heads */ 186 static struct rb_root one_stable_tree[1] = { RB_ROOT }; 187 static struct rb_root one_unstable_tree[1] = { RB_ROOT }; 188 static struct rb_root *root_stable_tree = one_stable_tree; 189 static struct rb_root *root_unstable_tree = one_unstable_tree; 190 191 /* Recently migrated nodes of stable tree, pending proper placement */ 192 static LIST_HEAD(migrate_nodes); 193 194 #define MM_SLOTS_HASH_BITS 10 195 static DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); 196 197 static struct mm_slot ksm_mm_head = { 198 .mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list), 199 }; 200 static struct ksm_scan ksm_scan = { 201 .mm_slot = &ksm_mm_head, 202 }; 203 204 static struct kmem_cache *rmap_item_cache; 205 static struct kmem_cache *stable_node_cache; 206 static struct kmem_cache *mm_slot_cache; 207 208 /* The number of nodes in the stable tree */ 209 static unsigned long ksm_pages_shared; 210 211 /* The number of page slots additionally sharing those nodes */ 212 static unsigned long ksm_pages_sharing; 213 214 /* The number of nodes in the unstable tree */ 215 static unsigned long ksm_pages_unshared; 216 217 /* The number of rmap_items in use: to calculate pages_volatile */ 218 static unsigned long ksm_rmap_items; 219 220 /* Number of pages ksmd should scan in one batch */ 221 static unsigned int ksm_thread_pages_to_scan = 100; 222 223 /* Milliseconds ksmd should sleep between batches */ 224 static unsigned int ksm_thread_sleep_millisecs = 20; 225 226 #ifdef CONFIG_NUMA 227 /* Zeroed when merging across nodes is not allowed */ 228 static unsigned int ksm_merge_across_nodes = 1; 229 static int ksm_nr_node_ids = 1; 230 #else 231 #define ksm_merge_across_nodes 1U 232 #define ksm_nr_node_ids 1 233 #endif 234 235 #define KSM_RUN_STOP 0 236 #define KSM_RUN_MERGE 1 237 #define KSM_RUN_UNMERGE 2 238 #define KSM_RUN_OFFLINE 4 239 static unsigned long ksm_run = KSM_RUN_STOP; 240 static void wait_while_offlining(void); 241 242 static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait); 243 static DEFINE_MUTEX(ksm_thread_mutex); 244 static DEFINE_SPINLOCK(ksm_mmlist_lock); 245 246 #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\ 247 sizeof(struct __struct), __alignof__(struct __struct),\ 248 (__flags), NULL) 249 250 static int __init ksm_slab_init(void) 251 { 252 rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0); 253 if (!rmap_item_cache) 254 goto out; 255 256 stable_node_cache = KSM_KMEM_CACHE(stable_node, 0); 257 if (!stable_node_cache) 258 goto out_free1; 259 260 mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0); 261 if (!mm_slot_cache) 262 goto out_free2; 263 264 return 0; 265 266 out_free2: 267 kmem_cache_destroy(stable_node_cache); 268 out_free1: 269 kmem_cache_destroy(rmap_item_cache); 270 out: 271 return -ENOMEM; 272 } 273 274 static void __init ksm_slab_free(void) 275 { 276 kmem_cache_destroy(mm_slot_cache); 277 kmem_cache_destroy(stable_node_cache); 278 kmem_cache_destroy(rmap_item_cache); 279 mm_slot_cache = NULL; 280 } 281 282 static inline struct rmap_item *alloc_rmap_item(void) 283 { 284 struct rmap_item *rmap_item; 285 286 rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL); 287 if (rmap_item) 288 ksm_rmap_items++; 289 return rmap_item; 290 } 291 292 static inline void free_rmap_item(struct rmap_item *rmap_item) 293 { 294 ksm_rmap_items--; 295 rmap_item->mm = NULL; /* debug safety */ 296 kmem_cache_free(rmap_item_cache, rmap_item); 297 } 298 299 static inline struct stable_node *alloc_stable_node(void) 300 { 301 return kmem_cache_alloc(stable_node_cache, GFP_KERNEL); 302 } 303 304 static inline void free_stable_node(struct stable_node *stable_node) 305 { 306 kmem_cache_free(stable_node_cache, stable_node); 307 } 308 309 static inline struct mm_slot *alloc_mm_slot(void) 310 { 311 if (!mm_slot_cache) /* initialization failed */ 312 return NULL; 313 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); 314 } 315 316 static inline void free_mm_slot(struct mm_slot *mm_slot) 317 { 318 kmem_cache_free(mm_slot_cache, mm_slot); 319 } 320 321 static struct mm_slot *get_mm_slot(struct mm_struct *mm) 322 { 323 struct mm_slot *slot; 324 325 hash_for_each_possible(mm_slots_hash, slot, link, (unsigned long)mm) 326 if (slot->mm == mm) 327 return slot; 328 329 return NULL; 330 } 331 332 static void insert_to_mm_slots_hash(struct mm_struct *mm, 333 struct mm_slot *mm_slot) 334 { 335 mm_slot->mm = mm; 336 hash_add(mm_slots_hash, &mm_slot->link, (unsigned long)mm); 337 } 338 339 /* 340 * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's 341 * page tables after it has passed through ksm_exit() - which, if necessary, 342 * takes mmap_sem briefly to serialize against them. ksm_exit() does not set 343 * a special flag: they can just back out as soon as mm_users goes to zero. 344 * ksm_test_exit() is used throughout to make this test for exit: in some 345 * places for correctness, in some places just to avoid unnecessary work. 346 */ 347 static inline bool ksm_test_exit(struct mm_struct *mm) 348 { 349 return atomic_read(&mm->mm_users) == 0; 350 } 351 352 /* 353 * We use break_ksm to break COW on a ksm page: it's a stripped down 354 * 355 * if (get_user_pages(current, mm, addr, 1, 1, 1, &page, NULL) == 1) 356 * put_page(page); 357 * 358 * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma, 359 * in case the application has unmapped and remapped mm,addr meanwhile. 360 * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP 361 * mmap of /dev/mem or /dev/kmem, where we would not want to touch it. 362 */ 363 static int break_ksm(struct vm_area_struct *vma, unsigned long addr) 364 { 365 struct page *page; 366 int ret = 0; 367 368 do { 369 cond_resched(); 370 page = follow_page(vma, addr, FOLL_GET | FOLL_MIGRATION); 371 if (IS_ERR_OR_NULL(page)) 372 break; 373 if (PageKsm(page)) 374 ret = handle_mm_fault(vma->vm_mm, vma, addr, 375 FAULT_FLAG_WRITE); 376 else 377 ret = VM_FAULT_WRITE; 378 put_page(page); 379 } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM))); 380 /* 381 * We must loop because handle_mm_fault() may back out if there's 382 * any difficulty e.g. if pte accessed bit gets updated concurrently. 383 * 384 * VM_FAULT_WRITE is what we have been hoping for: it indicates that 385 * COW has been broken, even if the vma does not permit VM_WRITE; 386 * but note that a concurrent fault might break PageKsm for us. 387 * 388 * VM_FAULT_SIGBUS could occur if we race with truncation of the 389 * backing file, which also invalidates anonymous pages: that's 390 * okay, that truncation will have unmapped the PageKsm for us. 391 * 392 * VM_FAULT_OOM: at the time of writing (late July 2009), setting 393 * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the 394 * current task has TIF_MEMDIE set, and will be OOM killed on return 395 * to user; and ksmd, having no mm, would never be chosen for that. 396 * 397 * But if the mm is in a limited mem_cgroup, then the fault may fail 398 * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and 399 * even ksmd can fail in this way - though it's usually breaking ksm 400 * just to undo a merge it made a moment before, so unlikely to oom. 401 * 402 * That's a pity: we might therefore have more kernel pages allocated 403 * than we're counting as nodes in the stable tree; but ksm_do_scan 404 * will retry to break_cow on each pass, so should recover the page 405 * in due course. The important thing is to not let VM_MERGEABLE 406 * be cleared while any such pages might remain in the area. 407 */ 408 return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; 409 } 410 411 static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm, 412 unsigned long addr) 413 { 414 struct vm_area_struct *vma; 415 if (ksm_test_exit(mm)) 416 return NULL; 417 vma = find_vma(mm, addr); 418 if (!vma || vma->vm_start > addr) 419 return NULL; 420 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) 421 return NULL; 422 return vma; 423 } 424 425 static void break_cow(struct rmap_item *rmap_item) 426 { 427 struct mm_struct *mm = rmap_item->mm; 428 unsigned long addr = rmap_item->address; 429 struct vm_area_struct *vma; 430 431 /* 432 * It is not an accident that whenever we want to break COW 433 * to undo, we also need to drop a reference to the anon_vma. 434 */ 435 put_anon_vma(rmap_item->anon_vma); 436 437 down_read(&mm->mmap_sem); 438 vma = find_mergeable_vma(mm, addr); 439 if (vma) 440 break_ksm(vma, addr); 441 up_read(&mm->mmap_sem); 442 } 443 444 static struct page *get_mergeable_page(struct rmap_item *rmap_item) 445 { 446 struct mm_struct *mm = rmap_item->mm; 447 unsigned long addr = rmap_item->address; 448 struct vm_area_struct *vma; 449 struct page *page; 450 451 down_read(&mm->mmap_sem); 452 vma = find_mergeable_vma(mm, addr); 453 if (!vma) 454 goto out; 455 456 page = follow_page(vma, addr, FOLL_GET); 457 if (IS_ERR_OR_NULL(page)) 458 goto out; 459 if (PageAnon(page)) { 460 flush_anon_page(vma, page, addr); 461 flush_dcache_page(page); 462 } else { 463 put_page(page); 464 out: 465 page = NULL; 466 } 467 up_read(&mm->mmap_sem); 468 return page; 469 } 470 471 /* 472 * This helper is used for getting right index into array of tree roots. 473 * When merge_across_nodes knob is set to 1, there are only two rb-trees for 474 * stable and unstable pages from all nodes with roots in index 0. Otherwise, 475 * every node has its own stable and unstable tree. 476 */ 477 static inline int get_kpfn_nid(unsigned long kpfn) 478 { 479 return ksm_merge_across_nodes ? 0 : NUMA(pfn_to_nid(kpfn)); 480 } 481 482 static void remove_node_from_stable_tree(struct stable_node *stable_node) 483 { 484 struct rmap_item *rmap_item; 485 486 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { 487 if (rmap_item->hlist.next) 488 ksm_pages_sharing--; 489 else 490 ksm_pages_shared--; 491 put_anon_vma(rmap_item->anon_vma); 492 rmap_item->address &= PAGE_MASK; 493 cond_resched(); 494 } 495 496 if (stable_node->head == &migrate_nodes) 497 list_del(&stable_node->list); 498 else 499 rb_erase(&stable_node->node, 500 root_stable_tree + NUMA(stable_node->nid)); 501 free_stable_node(stable_node); 502 } 503 504 /* 505 * get_ksm_page: checks if the page indicated by the stable node 506 * is still its ksm page, despite having held no reference to it. 507 * In which case we can trust the content of the page, and it 508 * returns the gotten page; but if the page has now been zapped, 509 * remove the stale node from the stable tree and return NULL. 510 * But beware, the stable node's page might be being migrated. 511 * 512 * You would expect the stable_node to hold a reference to the ksm page. 513 * But if it increments the page's count, swapping out has to wait for 514 * ksmd to come around again before it can free the page, which may take 515 * seconds or even minutes: much too unresponsive. So instead we use a 516 * "keyhole reference": access to the ksm page from the stable node peeps 517 * out through its keyhole to see if that page still holds the right key, 518 * pointing back to this stable node. This relies on freeing a PageAnon 519 * page to reset its page->mapping to NULL, and relies on no other use of 520 * a page to put something that might look like our key in page->mapping. 521 * is on its way to being freed; but it is an anomaly to bear in mind. 522 */ 523 static struct page *get_ksm_page(struct stable_node *stable_node, bool lock_it) 524 { 525 struct page *page; 526 void *expected_mapping; 527 unsigned long kpfn; 528 529 expected_mapping = (void *)stable_node + 530 (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); 531 again: 532 kpfn = READ_ONCE(stable_node->kpfn); 533 page = pfn_to_page(kpfn); 534 535 /* 536 * page is computed from kpfn, so on most architectures reading 537 * page->mapping is naturally ordered after reading node->kpfn, 538 * but on Alpha we need to be more careful. 539 */ 540 smp_read_barrier_depends(); 541 if (READ_ONCE(page->mapping) != expected_mapping) 542 goto stale; 543 544 /* 545 * We cannot do anything with the page while its refcount is 0. 546 * Usually 0 means free, or tail of a higher-order page: in which 547 * case this node is no longer referenced, and should be freed; 548 * however, it might mean that the page is under page_freeze_refs(). 549 * The __remove_mapping() case is easy, again the node is now stale; 550 * but if page is swapcache in migrate_page_move_mapping(), it might 551 * still be our page, in which case it's essential to keep the node. 552 */ 553 while (!get_page_unless_zero(page)) { 554 /* 555 * Another check for page->mapping != expected_mapping would 556 * work here too. We have chosen the !PageSwapCache test to 557 * optimize the common case, when the page is or is about to 558 * be freed: PageSwapCache is cleared (under spin_lock_irq) 559 * in the freeze_refs section of __remove_mapping(); but Anon 560 * page->mapping reset to NULL later, in free_pages_prepare(). 561 */ 562 if (!PageSwapCache(page)) 563 goto stale; 564 cpu_relax(); 565 } 566 567 if (READ_ONCE(page->mapping) != expected_mapping) { 568 put_page(page); 569 goto stale; 570 } 571 572 if (lock_it) { 573 lock_page(page); 574 if (READ_ONCE(page->mapping) != expected_mapping) { 575 unlock_page(page); 576 put_page(page); 577 goto stale; 578 } 579 } 580 return page; 581 582 stale: 583 /* 584 * We come here from above when page->mapping or !PageSwapCache 585 * suggests that the node is stale; but it might be under migration. 586 * We need smp_rmb(), matching the smp_wmb() in ksm_migrate_page(), 587 * before checking whether node->kpfn has been changed. 588 */ 589 smp_rmb(); 590 if (READ_ONCE(stable_node->kpfn) != kpfn) 591 goto again; 592 remove_node_from_stable_tree(stable_node); 593 return NULL; 594 } 595 596 /* 597 * Removing rmap_item from stable or unstable tree. 598 * This function will clean the information from the stable/unstable tree. 599 */ 600 static void remove_rmap_item_from_tree(struct rmap_item *rmap_item) 601 { 602 if (rmap_item->address & STABLE_FLAG) { 603 struct stable_node *stable_node; 604 struct page *page; 605 606 stable_node = rmap_item->head; 607 page = get_ksm_page(stable_node, true); 608 if (!page) 609 goto out; 610 611 hlist_del(&rmap_item->hlist); 612 unlock_page(page); 613 put_page(page); 614 615 if (!hlist_empty(&stable_node->hlist)) 616 ksm_pages_sharing--; 617 else 618 ksm_pages_shared--; 619 620 put_anon_vma(rmap_item->anon_vma); 621 rmap_item->address &= PAGE_MASK; 622 623 } else if (rmap_item->address & UNSTABLE_FLAG) { 624 unsigned char age; 625 /* 626 * Usually ksmd can and must skip the rb_erase, because 627 * root_unstable_tree was already reset to RB_ROOT. 628 * But be careful when an mm is exiting: do the rb_erase 629 * if this rmap_item was inserted by this scan, rather 630 * than left over from before. 631 */ 632 age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); 633 BUG_ON(age > 1); 634 if (!age) 635 rb_erase(&rmap_item->node, 636 root_unstable_tree + NUMA(rmap_item->nid)); 637 ksm_pages_unshared--; 638 rmap_item->address &= PAGE_MASK; 639 } 640 out: 641 cond_resched(); /* we're called from many long loops */ 642 } 643 644 static void remove_trailing_rmap_items(struct mm_slot *mm_slot, 645 struct rmap_item **rmap_list) 646 { 647 while (*rmap_list) { 648 struct rmap_item *rmap_item = *rmap_list; 649 *rmap_list = rmap_item->rmap_list; 650 remove_rmap_item_from_tree(rmap_item); 651 free_rmap_item(rmap_item); 652 } 653 } 654 655 /* 656 * Though it's very tempting to unmerge rmap_items from stable tree rather 657 * than check every pte of a given vma, the locking doesn't quite work for 658 * that - an rmap_item is assigned to the stable tree after inserting ksm 659 * page and upping mmap_sem. Nor does it fit with the way we skip dup'ing 660 * rmap_items from parent to child at fork time (so as not to waste time 661 * if exit comes before the next scan reaches it). 662 * 663 * Similarly, although we'd like to remove rmap_items (so updating counts 664 * and freeing memory) when unmerging an area, it's easier to leave that 665 * to the next pass of ksmd - consider, for example, how ksmd might be 666 * in cmp_and_merge_page on one of the rmap_items we would be removing. 667 */ 668 static int unmerge_ksm_pages(struct vm_area_struct *vma, 669 unsigned long start, unsigned long end) 670 { 671 unsigned long addr; 672 int err = 0; 673 674 for (addr = start; addr < end && !err; addr += PAGE_SIZE) { 675 if (ksm_test_exit(vma->vm_mm)) 676 break; 677 if (signal_pending(current)) 678 err = -ERESTARTSYS; 679 else 680 err = break_ksm(vma, addr); 681 } 682 return err; 683 } 684 685 #ifdef CONFIG_SYSFS 686 /* 687 * Only called through the sysfs control interface: 688 */ 689 static int remove_stable_node(struct stable_node *stable_node) 690 { 691 struct page *page; 692 int err; 693 694 page = get_ksm_page(stable_node, true); 695 if (!page) { 696 /* 697 * get_ksm_page did remove_node_from_stable_tree itself. 698 */ 699 return 0; 700 } 701 702 if (WARN_ON_ONCE(page_mapped(page))) { 703 /* 704 * This should not happen: but if it does, just refuse to let 705 * merge_across_nodes be switched - there is no need to panic. 706 */ 707 err = -EBUSY; 708 } else { 709 /* 710 * The stable node did not yet appear stale to get_ksm_page(), 711 * since that allows for an unmapped ksm page to be recognized 712 * right up until it is freed; but the node is safe to remove. 713 * This page might be in a pagevec waiting to be freed, 714 * or it might be PageSwapCache (perhaps under writeback), 715 * or it might have been removed from swapcache a moment ago. 716 */ 717 set_page_stable_node(page, NULL); 718 remove_node_from_stable_tree(stable_node); 719 err = 0; 720 } 721 722 unlock_page(page); 723 put_page(page); 724 return err; 725 } 726 727 static int remove_all_stable_nodes(void) 728 { 729 struct stable_node *stable_node, *next; 730 int nid; 731 int err = 0; 732 733 for (nid = 0; nid < ksm_nr_node_ids; nid++) { 734 while (root_stable_tree[nid].rb_node) { 735 stable_node = rb_entry(root_stable_tree[nid].rb_node, 736 struct stable_node, node); 737 if (remove_stable_node(stable_node)) { 738 err = -EBUSY; 739 break; /* proceed to next nid */ 740 } 741 cond_resched(); 742 } 743 } 744 list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { 745 if (remove_stable_node(stable_node)) 746 err = -EBUSY; 747 cond_resched(); 748 } 749 return err; 750 } 751 752 static int unmerge_and_remove_all_rmap_items(void) 753 { 754 struct mm_slot *mm_slot; 755 struct mm_struct *mm; 756 struct vm_area_struct *vma; 757 int err = 0; 758 759 spin_lock(&ksm_mmlist_lock); 760 ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next, 761 struct mm_slot, mm_list); 762 spin_unlock(&ksm_mmlist_lock); 763 764 for (mm_slot = ksm_scan.mm_slot; 765 mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) { 766 mm = mm_slot->mm; 767 down_read(&mm->mmap_sem); 768 for (vma = mm->mmap; vma; vma = vma->vm_next) { 769 if (ksm_test_exit(mm)) 770 break; 771 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) 772 continue; 773 err = unmerge_ksm_pages(vma, 774 vma->vm_start, vma->vm_end); 775 if (err) 776 goto error; 777 } 778 779 remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list); 780 781 spin_lock(&ksm_mmlist_lock); 782 ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next, 783 struct mm_slot, mm_list); 784 if (ksm_test_exit(mm)) { 785 hash_del(&mm_slot->link); 786 list_del(&mm_slot->mm_list); 787 spin_unlock(&ksm_mmlist_lock); 788 789 free_mm_slot(mm_slot); 790 clear_bit(MMF_VM_MERGEABLE, &mm->flags); 791 up_read(&mm->mmap_sem); 792 mmdrop(mm); 793 } else { 794 spin_unlock(&ksm_mmlist_lock); 795 up_read(&mm->mmap_sem); 796 } 797 } 798 799 /* Clean up stable nodes, but don't worry if some are still busy */ 800 remove_all_stable_nodes(); 801 ksm_scan.seqnr = 0; 802 return 0; 803 804 error: 805 up_read(&mm->mmap_sem); 806 spin_lock(&ksm_mmlist_lock); 807 ksm_scan.mm_slot = &ksm_mm_head; 808 spin_unlock(&ksm_mmlist_lock); 809 return err; 810 } 811 #endif /* CONFIG_SYSFS */ 812 813 static u32 calc_checksum(struct page *page) 814 { 815 u32 checksum; 816 void *addr = kmap_atomic(page); 817 checksum = jhash2(addr, PAGE_SIZE / 4, 17); 818 kunmap_atomic(addr); 819 return checksum; 820 } 821 822 static int memcmp_pages(struct page *page1, struct page *page2) 823 { 824 char *addr1, *addr2; 825 int ret; 826 827 addr1 = kmap_atomic(page1); 828 addr2 = kmap_atomic(page2); 829 ret = memcmp(addr1, addr2, PAGE_SIZE); 830 kunmap_atomic(addr2); 831 kunmap_atomic(addr1); 832 return ret; 833 } 834 835 static inline int pages_identical(struct page *page1, struct page *page2) 836 { 837 return !memcmp_pages(page1, page2); 838 } 839 840 static int write_protect_page(struct vm_area_struct *vma, struct page *page, 841 pte_t *orig_pte) 842 { 843 struct mm_struct *mm = vma->vm_mm; 844 unsigned long addr; 845 pte_t *ptep; 846 spinlock_t *ptl; 847 int swapped; 848 int err = -EFAULT; 849 unsigned long mmun_start; /* For mmu_notifiers */ 850 unsigned long mmun_end; /* For mmu_notifiers */ 851 852 addr = page_address_in_vma(page, vma); 853 if (addr == -EFAULT) 854 goto out; 855 856 BUG_ON(PageTransCompound(page)); 857 858 mmun_start = addr; 859 mmun_end = addr + PAGE_SIZE; 860 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 861 862 ptep = page_check_address(page, mm, addr, &ptl, 0); 863 if (!ptep) 864 goto out_mn; 865 866 if (pte_write(*ptep) || pte_dirty(*ptep)) { 867 pte_t entry; 868 869 swapped = PageSwapCache(page); 870 flush_cache_page(vma, addr, page_to_pfn(page)); 871 /* 872 * Ok this is tricky, when get_user_pages_fast() run it doesn't 873 * take any lock, therefore the check that we are going to make 874 * with the pagecount against the mapcount is racey and 875 * O_DIRECT can happen right after the check. 876 * So we clear the pte and flush the tlb before the check 877 * this assure us that no O_DIRECT can happen after the check 878 * or in the middle of the check. 879 */ 880 entry = ptep_clear_flush_notify(vma, addr, ptep); 881 /* 882 * Check that no O_DIRECT or similar I/O is in progress on the 883 * page 884 */ 885 if (page_mapcount(page) + 1 + swapped != page_count(page)) { 886 set_pte_at(mm, addr, ptep, entry); 887 goto out_unlock; 888 } 889 if (pte_dirty(entry)) 890 set_page_dirty(page); 891 entry = pte_mkclean(pte_wrprotect(entry)); 892 set_pte_at_notify(mm, addr, ptep, entry); 893 } 894 *orig_pte = *ptep; 895 err = 0; 896 897 out_unlock: 898 pte_unmap_unlock(ptep, ptl); 899 out_mn: 900 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 901 out: 902 return err; 903 } 904 905 /** 906 * replace_page - replace page in vma by new ksm page 907 * @vma: vma that holds the pte pointing to page 908 * @page: the page we are replacing by kpage 909 * @kpage: the ksm page we replace page by 910 * @orig_pte: the original value of the pte 911 * 912 * Returns 0 on success, -EFAULT on failure. 913 */ 914 static int replace_page(struct vm_area_struct *vma, struct page *page, 915 struct page *kpage, pte_t orig_pte) 916 { 917 struct mm_struct *mm = vma->vm_mm; 918 pmd_t *pmd; 919 pte_t *ptep; 920 spinlock_t *ptl; 921 unsigned long addr; 922 int err = -EFAULT; 923 unsigned long mmun_start; /* For mmu_notifiers */ 924 unsigned long mmun_end; /* For mmu_notifiers */ 925 926 addr = page_address_in_vma(page, vma); 927 if (addr == -EFAULT) 928 goto out; 929 930 pmd = mm_find_pmd(mm, addr); 931 if (!pmd) 932 goto out; 933 934 mmun_start = addr; 935 mmun_end = addr + PAGE_SIZE; 936 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 937 938 ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); 939 if (!pte_same(*ptep, orig_pte)) { 940 pte_unmap_unlock(ptep, ptl); 941 goto out_mn; 942 } 943 944 get_page(kpage); 945 page_add_anon_rmap(kpage, vma, addr, false); 946 947 flush_cache_page(vma, addr, pte_pfn(*ptep)); 948 ptep_clear_flush_notify(vma, addr, ptep); 949 set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); 950 951 page_remove_rmap(page, false); 952 if (!page_mapped(page)) 953 try_to_free_swap(page); 954 put_page(page); 955 956 pte_unmap_unlock(ptep, ptl); 957 err = 0; 958 out_mn: 959 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 960 out: 961 return err; 962 } 963 964 /* 965 * try_to_merge_one_page - take two pages and merge them into one 966 * @vma: the vma that holds the pte pointing to page 967 * @page: the PageAnon page that we want to replace with kpage 968 * @kpage: the PageKsm page that we want to map instead of page, 969 * or NULL the first time when we want to use page as kpage. 970 * 971 * This function returns 0 if the pages were merged, -EFAULT otherwise. 972 */ 973 static int try_to_merge_one_page(struct vm_area_struct *vma, 974 struct page *page, struct page *kpage) 975 { 976 pte_t orig_pte = __pte(0); 977 int err = -EFAULT; 978 979 if (page == kpage) /* ksm page forked */ 980 return 0; 981 982 if (!PageAnon(page)) 983 goto out; 984 985 /* 986 * We need the page lock to read a stable PageSwapCache in 987 * write_protect_page(). We use trylock_page() instead of 988 * lock_page() because we don't want to wait here - we 989 * prefer to continue scanning and merging different pages, 990 * then come back to this page when it is unlocked. 991 */ 992 if (!trylock_page(page)) 993 goto out; 994 995 if (PageTransCompound(page)) { 996 err = split_huge_page(page); 997 if (err) 998 goto out_unlock; 999 } 1000 1001 /* 1002 * If this anonymous page is mapped only here, its pte may need 1003 * to be write-protected. If it's mapped elsewhere, all of its 1004 * ptes are necessarily already write-protected. But in either 1005 * case, we need to lock and check page_count is not raised. 1006 */ 1007 if (write_protect_page(vma, page, &orig_pte) == 0) { 1008 if (!kpage) { 1009 /* 1010 * While we hold page lock, upgrade page from 1011 * PageAnon+anon_vma to PageKsm+NULL stable_node: 1012 * stable_tree_insert() will update stable_node. 1013 */ 1014 set_page_stable_node(page, NULL); 1015 mark_page_accessed(page); 1016 /* 1017 * Page reclaim just frees a clean page with no dirty 1018 * ptes: make sure that the ksm page would be swapped. 1019 */ 1020 if (!PageDirty(page)) 1021 SetPageDirty(page); 1022 err = 0; 1023 } else if (pages_identical(page, kpage)) 1024 err = replace_page(vma, page, kpage, orig_pte); 1025 } 1026 1027 if ((vma->vm_flags & VM_LOCKED) && kpage && !err) { 1028 munlock_vma_page(page); 1029 if (!PageMlocked(kpage)) { 1030 unlock_page(page); 1031 lock_page(kpage); 1032 mlock_vma_page(kpage); 1033 page = kpage; /* for final unlock */ 1034 } 1035 } 1036 1037 out_unlock: 1038 unlock_page(page); 1039 out: 1040 return err; 1041 } 1042 1043 /* 1044 * try_to_merge_with_ksm_page - like try_to_merge_two_pages, 1045 * but no new kernel page is allocated: kpage must already be a ksm page. 1046 * 1047 * This function returns 0 if the pages were merged, -EFAULT otherwise. 1048 */ 1049 static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item, 1050 struct page *page, struct page *kpage) 1051 { 1052 struct mm_struct *mm = rmap_item->mm; 1053 struct vm_area_struct *vma; 1054 int err = -EFAULT; 1055 1056 down_read(&mm->mmap_sem); 1057 vma = find_mergeable_vma(mm, rmap_item->address); 1058 if (!vma) 1059 goto out; 1060 1061 err = try_to_merge_one_page(vma, page, kpage); 1062 if (err) 1063 goto out; 1064 1065 /* Unstable nid is in union with stable anon_vma: remove first */ 1066 remove_rmap_item_from_tree(rmap_item); 1067 1068 /* Must get reference to anon_vma while still holding mmap_sem */ 1069 rmap_item->anon_vma = vma->anon_vma; 1070 get_anon_vma(vma->anon_vma); 1071 out: 1072 up_read(&mm->mmap_sem); 1073 return err; 1074 } 1075 1076 /* 1077 * try_to_merge_two_pages - take two identical pages and prepare them 1078 * to be merged into one page. 1079 * 1080 * This function returns the kpage if we successfully merged two identical 1081 * pages into one ksm page, NULL otherwise. 1082 * 1083 * Note that this function upgrades page to ksm page: if one of the pages 1084 * is already a ksm page, try_to_merge_with_ksm_page should be used. 1085 */ 1086 static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item, 1087 struct page *page, 1088 struct rmap_item *tree_rmap_item, 1089 struct page *tree_page) 1090 { 1091 int err; 1092 1093 err = try_to_merge_with_ksm_page(rmap_item, page, NULL); 1094 if (!err) { 1095 err = try_to_merge_with_ksm_page(tree_rmap_item, 1096 tree_page, page); 1097 /* 1098 * If that fails, we have a ksm page with only one pte 1099 * pointing to it: so break it. 1100 */ 1101 if (err) 1102 break_cow(rmap_item); 1103 } 1104 return err ? NULL : page; 1105 } 1106 1107 /* 1108 * stable_tree_search - search for page inside the stable tree 1109 * 1110 * This function checks if there is a page inside the stable tree 1111 * with identical content to the page that we are scanning right now. 1112 * 1113 * This function returns the stable tree node of identical content if found, 1114 * NULL otherwise. 1115 */ 1116 static struct page *stable_tree_search(struct page *page) 1117 { 1118 int nid; 1119 struct rb_root *root; 1120 struct rb_node **new; 1121 struct rb_node *parent; 1122 struct stable_node *stable_node; 1123 struct stable_node *page_node; 1124 1125 page_node = page_stable_node(page); 1126 if (page_node && page_node->head != &migrate_nodes) { 1127 /* ksm page forked */ 1128 get_page(page); 1129 return page; 1130 } 1131 1132 nid = get_kpfn_nid(page_to_pfn(page)); 1133 root = root_stable_tree + nid; 1134 again: 1135 new = &root->rb_node; 1136 parent = NULL; 1137 1138 while (*new) { 1139 struct page *tree_page; 1140 int ret; 1141 1142 cond_resched(); 1143 stable_node = rb_entry(*new, struct stable_node, node); 1144 tree_page = get_ksm_page(stable_node, false); 1145 if (!tree_page) { 1146 /* 1147 * If we walked over a stale stable_node, 1148 * get_ksm_page() will call rb_erase() and it 1149 * may rebalance the tree from under us. So 1150 * restart the search from scratch. Returning 1151 * NULL would be safe too, but we'd generate 1152 * false negative insertions just because some 1153 * stable_node was stale. 1154 */ 1155 goto again; 1156 } 1157 1158 ret = memcmp_pages(page, tree_page); 1159 put_page(tree_page); 1160 1161 parent = *new; 1162 if (ret < 0) 1163 new = &parent->rb_left; 1164 else if (ret > 0) 1165 new = &parent->rb_right; 1166 else { 1167 /* 1168 * Lock and unlock the stable_node's page (which 1169 * might already have been migrated) so that page 1170 * migration is sure to notice its raised count. 1171 * It would be more elegant to return stable_node 1172 * than kpage, but that involves more changes. 1173 */ 1174 tree_page = get_ksm_page(stable_node, true); 1175 if (tree_page) { 1176 unlock_page(tree_page); 1177 if (get_kpfn_nid(stable_node->kpfn) != 1178 NUMA(stable_node->nid)) { 1179 put_page(tree_page); 1180 goto replace; 1181 } 1182 return tree_page; 1183 } 1184 /* 1185 * There is now a place for page_node, but the tree may 1186 * have been rebalanced, so re-evaluate parent and new. 1187 */ 1188 if (page_node) 1189 goto again; 1190 return NULL; 1191 } 1192 } 1193 1194 if (!page_node) 1195 return NULL; 1196 1197 list_del(&page_node->list); 1198 DO_NUMA(page_node->nid = nid); 1199 rb_link_node(&page_node->node, parent, new); 1200 rb_insert_color(&page_node->node, root); 1201 get_page(page); 1202 return page; 1203 1204 replace: 1205 if (page_node) { 1206 list_del(&page_node->list); 1207 DO_NUMA(page_node->nid = nid); 1208 rb_replace_node(&stable_node->node, &page_node->node, root); 1209 get_page(page); 1210 } else { 1211 rb_erase(&stable_node->node, root); 1212 page = NULL; 1213 } 1214 stable_node->head = &migrate_nodes; 1215 list_add(&stable_node->list, stable_node->head); 1216 return page; 1217 } 1218 1219 /* 1220 * stable_tree_insert - insert stable tree node pointing to new ksm page 1221 * into the stable tree. 1222 * 1223 * This function returns the stable tree node just allocated on success, 1224 * NULL otherwise. 1225 */ 1226 static struct stable_node *stable_tree_insert(struct page *kpage) 1227 { 1228 int nid; 1229 unsigned long kpfn; 1230 struct rb_root *root; 1231 struct rb_node **new; 1232 struct rb_node *parent; 1233 struct stable_node *stable_node; 1234 1235 kpfn = page_to_pfn(kpage); 1236 nid = get_kpfn_nid(kpfn); 1237 root = root_stable_tree + nid; 1238 again: 1239 parent = NULL; 1240 new = &root->rb_node; 1241 1242 while (*new) { 1243 struct page *tree_page; 1244 int ret; 1245 1246 cond_resched(); 1247 stable_node = rb_entry(*new, struct stable_node, node); 1248 tree_page = get_ksm_page(stable_node, false); 1249 if (!tree_page) { 1250 /* 1251 * If we walked over a stale stable_node, 1252 * get_ksm_page() will call rb_erase() and it 1253 * may rebalance the tree from under us. So 1254 * restart the search from scratch. Returning 1255 * NULL would be safe too, but we'd generate 1256 * false negative insertions just because some 1257 * stable_node was stale. 1258 */ 1259 goto again; 1260 } 1261 1262 ret = memcmp_pages(kpage, tree_page); 1263 put_page(tree_page); 1264 1265 parent = *new; 1266 if (ret < 0) 1267 new = &parent->rb_left; 1268 else if (ret > 0) 1269 new = &parent->rb_right; 1270 else { 1271 /* 1272 * It is not a bug that stable_tree_search() didn't 1273 * find this node: because at that time our page was 1274 * not yet write-protected, so may have changed since. 1275 */ 1276 return NULL; 1277 } 1278 } 1279 1280 stable_node = alloc_stable_node(); 1281 if (!stable_node) 1282 return NULL; 1283 1284 INIT_HLIST_HEAD(&stable_node->hlist); 1285 stable_node->kpfn = kpfn; 1286 set_page_stable_node(kpage, stable_node); 1287 DO_NUMA(stable_node->nid = nid); 1288 rb_link_node(&stable_node->node, parent, new); 1289 rb_insert_color(&stable_node->node, root); 1290 1291 return stable_node; 1292 } 1293 1294 /* 1295 * unstable_tree_search_insert - search for identical page, 1296 * else insert rmap_item into the unstable tree. 1297 * 1298 * This function searches for a page in the unstable tree identical to the 1299 * page currently being scanned; and if no identical page is found in the 1300 * tree, we insert rmap_item as a new object into the unstable tree. 1301 * 1302 * This function returns pointer to rmap_item found to be identical 1303 * to the currently scanned page, NULL otherwise. 1304 * 1305 * This function does both searching and inserting, because they share 1306 * the same walking algorithm in an rbtree. 1307 */ 1308 static 1309 struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item, 1310 struct page *page, 1311 struct page **tree_pagep) 1312 { 1313 struct rb_node **new; 1314 struct rb_root *root; 1315 struct rb_node *parent = NULL; 1316 int nid; 1317 1318 nid = get_kpfn_nid(page_to_pfn(page)); 1319 root = root_unstable_tree + nid; 1320 new = &root->rb_node; 1321 1322 while (*new) { 1323 struct rmap_item *tree_rmap_item; 1324 struct page *tree_page; 1325 int ret; 1326 1327 cond_resched(); 1328 tree_rmap_item = rb_entry(*new, struct rmap_item, node); 1329 tree_page = get_mergeable_page(tree_rmap_item); 1330 if (!tree_page) 1331 return NULL; 1332 1333 /* 1334 * Don't substitute a ksm page for a forked page. 1335 */ 1336 if (page == tree_page) { 1337 put_page(tree_page); 1338 return NULL; 1339 } 1340 1341 ret = memcmp_pages(page, tree_page); 1342 1343 parent = *new; 1344 if (ret < 0) { 1345 put_page(tree_page); 1346 new = &parent->rb_left; 1347 } else if (ret > 0) { 1348 put_page(tree_page); 1349 new = &parent->rb_right; 1350 } else if (!ksm_merge_across_nodes && 1351 page_to_nid(tree_page) != nid) { 1352 /* 1353 * If tree_page has been migrated to another NUMA node, 1354 * it will be flushed out and put in the right unstable 1355 * tree next time: only merge with it when across_nodes. 1356 */ 1357 put_page(tree_page); 1358 return NULL; 1359 } else { 1360 *tree_pagep = tree_page; 1361 return tree_rmap_item; 1362 } 1363 } 1364 1365 rmap_item->address |= UNSTABLE_FLAG; 1366 rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK); 1367 DO_NUMA(rmap_item->nid = nid); 1368 rb_link_node(&rmap_item->node, parent, new); 1369 rb_insert_color(&rmap_item->node, root); 1370 1371 ksm_pages_unshared++; 1372 return NULL; 1373 } 1374 1375 /* 1376 * stable_tree_append - add another rmap_item to the linked list of 1377 * rmap_items hanging off a given node of the stable tree, all sharing 1378 * the same ksm page. 1379 */ 1380 static void stable_tree_append(struct rmap_item *rmap_item, 1381 struct stable_node *stable_node) 1382 { 1383 rmap_item->head = stable_node; 1384 rmap_item->address |= STABLE_FLAG; 1385 hlist_add_head(&rmap_item->hlist, &stable_node->hlist); 1386 1387 if (rmap_item->hlist.next) 1388 ksm_pages_sharing++; 1389 else 1390 ksm_pages_shared++; 1391 } 1392 1393 /* 1394 * cmp_and_merge_page - first see if page can be merged into the stable tree; 1395 * if not, compare checksum to previous and if it's the same, see if page can 1396 * be inserted into the unstable tree, or merged with a page already there and 1397 * both transferred to the stable tree. 1398 * 1399 * @page: the page that we are searching identical page to. 1400 * @rmap_item: the reverse mapping into the virtual address of this page 1401 */ 1402 static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) 1403 { 1404 struct rmap_item *tree_rmap_item; 1405 struct page *tree_page = NULL; 1406 struct stable_node *stable_node; 1407 struct page *kpage; 1408 unsigned int checksum; 1409 int err; 1410 1411 stable_node = page_stable_node(page); 1412 if (stable_node) { 1413 if (stable_node->head != &migrate_nodes && 1414 get_kpfn_nid(stable_node->kpfn) != NUMA(stable_node->nid)) { 1415 rb_erase(&stable_node->node, 1416 root_stable_tree + NUMA(stable_node->nid)); 1417 stable_node->head = &migrate_nodes; 1418 list_add(&stable_node->list, stable_node->head); 1419 } 1420 if (stable_node->head != &migrate_nodes && 1421 rmap_item->head == stable_node) 1422 return; 1423 } 1424 1425 /* We first start with searching the page inside the stable tree */ 1426 kpage = stable_tree_search(page); 1427 if (kpage == page && rmap_item->head == stable_node) { 1428 put_page(kpage); 1429 return; 1430 } 1431 1432 remove_rmap_item_from_tree(rmap_item); 1433 1434 if (kpage) { 1435 err = try_to_merge_with_ksm_page(rmap_item, page, kpage); 1436 if (!err) { 1437 /* 1438 * The page was successfully merged: 1439 * add its rmap_item to the stable tree. 1440 */ 1441 lock_page(kpage); 1442 stable_tree_append(rmap_item, page_stable_node(kpage)); 1443 unlock_page(kpage); 1444 } 1445 put_page(kpage); 1446 return; 1447 } 1448 1449 /* 1450 * If the hash value of the page has changed from the last time 1451 * we calculated it, this page is changing frequently: therefore we 1452 * don't want to insert it in the unstable tree, and we don't want 1453 * to waste our time searching for something identical to it there. 1454 */ 1455 checksum = calc_checksum(page); 1456 if (rmap_item->oldchecksum != checksum) { 1457 rmap_item->oldchecksum = checksum; 1458 return; 1459 } 1460 1461 tree_rmap_item = 1462 unstable_tree_search_insert(rmap_item, page, &tree_page); 1463 if (tree_rmap_item) { 1464 kpage = try_to_merge_two_pages(rmap_item, page, 1465 tree_rmap_item, tree_page); 1466 put_page(tree_page); 1467 if (kpage) { 1468 /* 1469 * The pages were successfully merged: insert new 1470 * node in the stable tree and add both rmap_items. 1471 */ 1472 lock_page(kpage); 1473 stable_node = stable_tree_insert(kpage); 1474 if (stable_node) { 1475 stable_tree_append(tree_rmap_item, stable_node); 1476 stable_tree_append(rmap_item, stable_node); 1477 } 1478 unlock_page(kpage); 1479 1480 /* 1481 * If we fail to insert the page into the stable tree, 1482 * we will have 2 virtual addresses that are pointing 1483 * to a ksm page left outside the stable tree, 1484 * in which case we need to break_cow on both. 1485 */ 1486 if (!stable_node) { 1487 break_cow(tree_rmap_item); 1488 break_cow(rmap_item); 1489 } 1490 } 1491 } 1492 } 1493 1494 static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot, 1495 struct rmap_item **rmap_list, 1496 unsigned long addr) 1497 { 1498 struct rmap_item *rmap_item; 1499 1500 while (*rmap_list) { 1501 rmap_item = *rmap_list; 1502 if ((rmap_item->address & PAGE_MASK) == addr) 1503 return rmap_item; 1504 if (rmap_item->address > addr) 1505 break; 1506 *rmap_list = rmap_item->rmap_list; 1507 remove_rmap_item_from_tree(rmap_item); 1508 free_rmap_item(rmap_item); 1509 } 1510 1511 rmap_item = alloc_rmap_item(); 1512 if (rmap_item) { 1513 /* It has already been zeroed */ 1514 rmap_item->mm = mm_slot->mm; 1515 rmap_item->address = addr; 1516 rmap_item->rmap_list = *rmap_list; 1517 *rmap_list = rmap_item; 1518 } 1519 return rmap_item; 1520 } 1521 1522 static struct rmap_item *scan_get_next_rmap_item(struct page **page) 1523 { 1524 struct mm_struct *mm; 1525 struct mm_slot *slot; 1526 struct vm_area_struct *vma; 1527 struct rmap_item *rmap_item; 1528 int nid; 1529 1530 if (list_empty(&ksm_mm_head.mm_list)) 1531 return NULL; 1532 1533 slot = ksm_scan.mm_slot; 1534 if (slot == &ksm_mm_head) { 1535 /* 1536 * A number of pages can hang around indefinitely on per-cpu 1537 * pagevecs, raised page count preventing write_protect_page 1538 * from merging them. Though it doesn't really matter much, 1539 * it is puzzling to see some stuck in pages_volatile until 1540 * other activity jostles them out, and they also prevented 1541 * LTP's KSM test from succeeding deterministically; so drain 1542 * them here (here rather than on entry to ksm_do_scan(), 1543 * so we don't IPI too often when pages_to_scan is set low). 1544 */ 1545 lru_add_drain_all(); 1546 1547 /* 1548 * Whereas stale stable_nodes on the stable_tree itself 1549 * get pruned in the regular course of stable_tree_search(), 1550 * those moved out to the migrate_nodes list can accumulate: 1551 * so prune them once before each full scan. 1552 */ 1553 if (!ksm_merge_across_nodes) { 1554 struct stable_node *stable_node, *next; 1555 struct page *page; 1556 1557 list_for_each_entry_safe(stable_node, next, 1558 &migrate_nodes, list) { 1559 page = get_ksm_page(stable_node, false); 1560 if (page) 1561 put_page(page); 1562 cond_resched(); 1563 } 1564 } 1565 1566 for (nid = 0; nid < ksm_nr_node_ids; nid++) 1567 root_unstable_tree[nid] = RB_ROOT; 1568 1569 spin_lock(&ksm_mmlist_lock); 1570 slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list); 1571 ksm_scan.mm_slot = slot; 1572 spin_unlock(&ksm_mmlist_lock); 1573 /* 1574 * Although we tested list_empty() above, a racing __ksm_exit 1575 * of the last mm on the list may have removed it since then. 1576 */ 1577 if (slot == &ksm_mm_head) 1578 return NULL; 1579 next_mm: 1580 ksm_scan.address = 0; 1581 ksm_scan.rmap_list = &slot->rmap_list; 1582 } 1583 1584 mm = slot->mm; 1585 down_read(&mm->mmap_sem); 1586 if (ksm_test_exit(mm)) 1587 vma = NULL; 1588 else 1589 vma = find_vma(mm, ksm_scan.address); 1590 1591 for (; vma; vma = vma->vm_next) { 1592 if (!(vma->vm_flags & VM_MERGEABLE)) 1593 continue; 1594 if (ksm_scan.address < vma->vm_start) 1595 ksm_scan.address = vma->vm_start; 1596 if (!vma->anon_vma) 1597 ksm_scan.address = vma->vm_end; 1598 1599 while (ksm_scan.address < vma->vm_end) { 1600 if (ksm_test_exit(mm)) 1601 break; 1602 *page = follow_page(vma, ksm_scan.address, FOLL_GET); 1603 if (IS_ERR_OR_NULL(*page)) { 1604 ksm_scan.address += PAGE_SIZE; 1605 cond_resched(); 1606 continue; 1607 } 1608 if (PageAnon(*page)) { 1609 flush_anon_page(vma, *page, ksm_scan.address); 1610 flush_dcache_page(*page); 1611 rmap_item = get_next_rmap_item(slot, 1612 ksm_scan.rmap_list, ksm_scan.address); 1613 if (rmap_item) { 1614 ksm_scan.rmap_list = 1615 &rmap_item->rmap_list; 1616 ksm_scan.address += PAGE_SIZE; 1617 } else 1618 put_page(*page); 1619 up_read(&mm->mmap_sem); 1620 return rmap_item; 1621 } 1622 put_page(*page); 1623 ksm_scan.address += PAGE_SIZE; 1624 cond_resched(); 1625 } 1626 } 1627 1628 if (ksm_test_exit(mm)) { 1629 ksm_scan.address = 0; 1630 ksm_scan.rmap_list = &slot->rmap_list; 1631 } 1632 /* 1633 * Nuke all the rmap_items that are above this current rmap: 1634 * because there were no VM_MERGEABLE vmas with such addresses. 1635 */ 1636 remove_trailing_rmap_items(slot, ksm_scan.rmap_list); 1637 1638 spin_lock(&ksm_mmlist_lock); 1639 ksm_scan.mm_slot = list_entry(slot->mm_list.next, 1640 struct mm_slot, mm_list); 1641 if (ksm_scan.address == 0) { 1642 /* 1643 * We've completed a full scan of all vmas, holding mmap_sem 1644 * throughout, and found no VM_MERGEABLE: so do the same as 1645 * __ksm_exit does to remove this mm from all our lists now. 1646 * This applies either when cleaning up after __ksm_exit 1647 * (but beware: we can reach here even before __ksm_exit), 1648 * or when all VM_MERGEABLE areas have been unmapped (and 1649 * mmap_sem then protects against race with MADV_MERGEABLE). 1650 */ 1651 hash_del(&slot->link); 1652 list_del(&slot->mm_list); 1653 spin_unlock(&ksm_mmlist_lock); 1654 1655 free_mm_slot(slot); 1656 clear_bit(MMF_VM_MERGEABLE, &mm->flags); 1657 up_read(&mm->mmap_sem); 1658 mmdrop(mm); 1659 } else { 1660 spin_unlock(&ksm_mmlist_lock); 1661 up_read(&mm->mmap_sem); 1662 } 1663 1664 /* Repeat until we've completed scanning the whole list */ 1665 slot = ksm_scan.mm_slot; 1666 if (slot != &ksm_mm_head) 1667 goto next_mm; 1668 1669 ksm_scan.seqnr++; 1670 return NULL; 1671 } 1672 1673 /** 1674 * ksm_do_scan - the ksm scanner main worker function. 1675 * @scan_npages - number of pages we want to scan before we return. 1676 */ 1677 static void ksm_do_scan(unsigned int scan_npages) 1678 { 1679 struct rmap_item *rmap_item; 1680 struct page *uninitialized_var(page); 1681 1682 while (scan_npages-- && likely(!freezing(current))) { 1683 cond_resched(); 1684 rmap_item = scan_get_next_rmap_item(&page); 1685 if (!rmap_item) 1686 return; 1687 cmp_and_merge_page(page, rmap_item); 1688 put_page(page); 1689 } 1690 } 1691 1692 static int ksmd_should_run(void) 1693 { 1694 return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list); 1695 } 1696 1697 static int ksm_scan_thread(void *nothing) 1698 { 1699 set_freezable(); 1700 set_user_nice(current, 5); 1701 1702 while (!kthread_should_stop()) { 1703 mutex_lock(&ksm_thread_mutex); 1704 wait_while_offlining(); 1705 if (ksmd_should_run()) 1706 ksm_do_scan(ksm_thread_pages_to_scan); 1707 mutex_unlock(&ksm_thread_mutex); 1708 1709 try_to_freeze(); 1710 1711 if (ksmd_should_run()) { 1712 schedule_timeout_interruptible( 1713 msecs_to_jiffies(ksm_thread_sleep_millisecs)); 1714 } else { 1715 wait_event_freezable(ksm_thread_wait, 1716 ksmd_should_run() || kthread_should_stop()); 1717 } 1718 } 1719 return 0; 1720 } 1721 1722 int ksm_madvise(struct vm_area_struct *vma, unsigned long start, 1723 unsigned long end, int advice, unsigned long *vm_flags) 1724 { 1725 struct mm_struct *mm = vma->vm_mm; 1726 int err; 1727 1728 switch (advice) { 1729 case MADV_MERGEABLE: 1730 /* 1731 * Be somewhat over-protective for now! 1732 */ 1733 if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE | 1734 VM_PFNMAP | VM_IO | VM_DONTEXPAND | 1735 VM_HUGETLB | VM_MIXEDMAP)) 1736 return 0; /* just ignore the advice */ 1737 1738 #ifdef VM_SAO 1739 if (*vm_flags & VM_SAO) 1740 return 0; 1741 #endif 1742 1743 if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { 1744 err = __ksm_enter(mm); 1745 if (err) 1746 return err; 1747 } 1748 1749 *vm_flags |= VM_MERGEABLE; 1750 break; 1751 1752 case MADV_UNMERGEABLE: 1753 if (!(*vm_flags & VM_MERGEABLE)) 1754 return 0; /* just ignore the advice */ 1755 1756 if (vma->anon_vma) { 1757 err = unmerge_ksm_pages(vma, start, end); 1758 if (err) 1759 return err; 1760 } 1761 1762 *vm_flags &= ~VM_MERGEABLE; 1763 break; 1764 } 1765 1766 return 0; 1767 } 1768 1769 int __ksm_enter(struct mm_struct *mm) 1770 { 1771 struct mm_slot *mm_slot; 1772 int needs_wakeup; 1773 1774 mm_slot = alloc_mm_slot(); 1775 if (!mm_slot) 1776 return -ENOMEM; 1777 1778 /* Check ksm_run too? Would need tighter locking */ 1779 needs_wakeup = list_empty(&ksm_mm_head.mm_list); 1780 1781 spin_lock(&ksm_mmlist_lock); 1782 insert_to_mm_slots_hash(mm, mm_slot); 1783 /* 1784 * When KSM_RUN_MERGE (or KSM_RUN_STOP), 1785 * insert just behind the scanning cursor, to let the area settle 1786 * down a little; when fork is followed by immediate exec, we don't 1787 * want ksmd to waste time setting up and tearing down an rmap_list. 1788 * 1789 * But when KSM_RUN_UNMERGE, it's important to insert ahead of its 1790 * scanning cursor, otherwise KSM pages in newly forked mms will be 1791 * missed: then we might as well insert at the end of the list. 1792 */ 1793 if (ksm_run & KSM_RUN_UNMERGE) 1794 list_add_tail(&mm_slot->mm_list, &ksm_mm_head.mm_list); 1795 else 1796 list_add_tail(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list); 1797 spin_unlock(&ksm_mmlist_lock); 1798 1799 set_bit(MMF_VM_MERGEABLE, &mm->flags); 1800 atomic_inc(&mm->mm_count); 1801 1802 if (needs_wakeup) 1803 wake_up_interruptible(&ksm_thread_wait); 1804 1805 return 0; 1806 } 1807 1808 void __ksm_exit(struct mm_struct *mm) 1809 { 1810 struct mm_slot *mm_slot; 1811 int easy_to_free = 0; 1812 1813 /* 1814 * This process is exiting: if it's straightforward (as is the 1815 * case when ksmd was never running), free mm_slot immediately. 1816 * But if it's at the cursor or has rmap_items linked to it, use 1817 * mmap_sem to synchronize with any break_cows before pagetables 1818 * are freed, and leave the mm_slot on the list for ksmd to free. 1819 * Beware: ksm may already have noticed it exiting and freed the slot. 1820 */ 1821 1822 spin_lock(&ksm_mmlist_lock); 1823 mm_slot = get_mm_slot(mm); 1824 if (mm_slot && ksm_scan.mm_slot != mm_slot) { 1825 if (!mm_slot->rmap_list) { 1826 hash_del(&mm_slot->link); 1827 list_del(&mm_slot->mm_list); 1828 easy_to_free = 1; 1829 } else { 1830 list_move(&mm_slot->mm_list, 1831 &ksm_scan.mm_slot->mm_list); 1832 } 1833 } 1834 spin_unlock(&ksm_mmlist_lock); 1835 1836 if (easy_to_free) { 1837 free_mm_slot(mm_slot); 1838 clear_bit(MMF_VM_MERGEABLE, &mm->flags); 1839 mmdrop(mm); 1840 } else if (mm_slot) { 1841 down_write(&mm->mmap_sem); 1842 up_write(&mm->mmap_sem); 1843 } 1844 } 1845 1846 struct page *ksm_might_need_to_copy(struct page *page, 1847 struct vm_area_struct *vma, unsigned long address) 1848 { 1849 struct anon_vma *anon_vma = page_anon_vma(page); 1850 struct page *new_page; 1851 1852 if (PageKsm(page)) { 1853 if (page_stable_node(page) && 1854 !(ksm_run & KSM_RUN_UNMERGE)) 1855 return page; /* no need to copy it */ 1856 } else if (!anon_vma) { 1857 return page; /* no need to copy it */ 1858 } else if (anon_vma->root == vma->anon_vma->root && 1859 page->index == linear_page_index(vma, address)) { 1860 return page; /* still no need to copy it */ 1861 } 1862 if (!PageUptodate(page)) 1863 return page; /* let do_swap_page report the error */ 1864 1865 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 1866 if (new_page) { 1867 copy_user_highpage(new_page, page, address, vma); 1868 1869 SetPageDirty(new_page); 1870 __SetPageUptodate(new_page); 1871 __SetPageLocked(new_page); 1872 } 1873 1874 return new_page; 1875 } 1876 1877 int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) 1878 { 1879 struct stable_node *stable_node; 1880 struct rmap_item *rmap_item; 1881 int ret = SWAP_AGAIN; 1882 int search_new_forks = 0; 1883 1884 VM_BUG_ON_PAGE(!PageKsm(page), page); 1885 1886 /* 1887 * Rely on the page lock to protect against concurrent modifications 1888 * to that page's node of the stable tree. 1889 */ 1890 VM_BUG_ON_PAGE(!PageLocked(page), page); 1891 1892 stable_node = page_stable_node(page); 1893 if (!stable_node) 1894 return ret; 1895 again: 1896 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { 1897 struct anon_vma *anon_vma = rmap_item->anon_vma; 1898 struct anon_vma_chain *vmac; 1899 struct vm_area_struct *vma; 1900 1901 cond_resched(); 1902 anon_vma_lock_read(anon_vma); 1903 anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, 1904 0, ULONG_MAX) { 1905 cond_resched(); 1906 vma = vmac->vma; 1907 if (rmap_item->address < vma->vm_start || 1908 rmap_item->address >= vma->vm_end) 1909 continue; 1910 /* 1911 * Initially we examine only the vma which covers this 1912 * rmap_item; but later, if there is still work to do, 1913 * we examine covering vmas in other mms: in case they 1914 * were forked from the original since ksmd passed. 1915 */ 1916 if ((rmap_item->mm == vma->vm_mm) == search_new_forks) 1917 continue; 1918 1919 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 1920 continue; 1921 1922 ret = rwc->rmap_one(page, vma, 1923 rmap_item->address, rwc->arg); 1924 if (ret != SWAP_AGAIN) { 1925 anon_vma_unlock_read(anon_vma); 1926 goto out; 1927 } 1928 if (rwc->done && rwc->done(page)) { 1929 anon_vma_unlock_read(anon_vma); 1930 goto out; 1931 } 1932 } 1933 anon_vma_unlock_read(anon_vma); 1934 } 1935 if (!search_new_forks++) 1936 goto again; 1937 out: 1938 return ret; 1939 } 1940 1941 #ifdef CONFIG_MIGRATION 1942 void ksm_migrate_page(struct page *newpage, struct page *oldpage) 1943 { 1944 struct stable_node *stable_node; 1945 1946 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); 1947 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 1948 VM_BUG_ON_PAGE(newpage->mapping != oldpage->mapping, newpage); 1949 1950 stable_node = page_stable_node(newpage); 1951 if (stable_node) { 1952 VM_BUG_ON_PAGE(stable_node->kpfn != page_to_pfn(oldpage), oldpage); 1953 stable_node->kpfn = page_to_pfn(newpage); 1954 /* 1955 * newpage->mapping was set in advance; now we need smp_wmb() 1956 * to make sure that the new stable_node->kpfn is visible 1957 * to get_ksm_page() before it can see that oldpage->mapping 1958 * has gone stale (or that PageSwapCache has been cleared). 1959 */ 1960 smp_wmb(); 1961 set_page_stable_node(oldpage, NULL); 1962 } 1963 } 1964 #endif /* CONFIG_MIGRATION */ 1965 1966 #ifdef CONFIG_MEMORY_HOTREMOVE 1967 static void wait_while_offlining(void) 1968 { 1969 while (ksm_run & KSM_RUN_OFFLINE) { 1970 mutex_unlock(&ksm_thread_mutex); 1971 wait_on_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE), 1972 TASK_UNINTERRUPTIBLE); 1973 mutex_lock(&ksm_thread_mutex); 1974 } 1975 } 1976 1977 static void ksm_check_stable_tree(unsigned long start_pfn, 1978 unsigned long end_pfn) 1979 { 1980 struct stable_node *stable_node, *next; 1981 struct rb_node *node; 1982 int nid; 1983 1984 for (nid = 0; nid < ksm_nr_node_ids; nid++) { 1985 node = rb_first(root_stable_tree + nid); 1986 while (node) { 1987 stable_node = rb_entry(node, struct stable_node, node); 1988 if (stable_node->kpfn >= start_pfn && 1989 stable_node->kpfn < end_pfn) { 1990 /* 1991 * Don't get_ksm_page, page has already gone: 1992 * which is why we keep kpfn instead of page* 1993 */ 1994 remove_node_from_stable_tree(stable_node); 1995 node = rb_first(root_stable_tree + nid); 1996 } else 1997 node = rb_next(node); 1998 cond_resched(); 1999 } 2000 } 2001 list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { 2002 if (stable_node->kpfn >= start_pfn && 2003 stable_node->kpfn < end_pfn) 2004 remove_node_from_stable_tree(stable_node); 2005 cond_resched(); 2006 } 2007 } 2008 2009 static int ksm_memory_callback(struct notifier_block *self, 2010 unsigned long action, void *arg) 2011 { 2012 struct memory_notify *mn = arg; 2013 2014 switch (action) { 2015 case MEM_GOING_OFFLINE: 2016 /* 2017 * Prevent ksm_do_scan(), unmerge_and_remove_all_rmap_items() 2018 * and remove_all_stable_nodes() while memory is going offline: 2019 * it is unsafe for them to touch the stable tree at this time. 2020 * But unmerge_ksm_pages(), rmap lookups and other entry points 2021 * which do not need the ksm_thread_mutex are all safe. 2022 */ 2023 mutex_lock(&ksm_thread_mutex); 2024 ksm_run |= KSM_RUN_OFFLINE; 2025 mutex_unlock(&ksm_thread_mutex); 2026 break; 2027 2028 case MEM_OFFLINE: 2029 /* 2030 * Most of the work is done by page migration; but there might 2031 * be a few stable_nodes left over, still pointing to struct 2032 * pages which have been offlined: prune those from the tree, 2033 * otherwise get_ksm_page() might later try to access a 2034 * non-existent struct page. 2035 */ 2036 ksm_check_stable_tree(mn->start_pfn, 2037 mn->start_pfn + mn->nr_pages); 2038 /* fallthrough */ 2039 2040 case MEM_CANCEL_OFFLINE: 2041 mutex_lock(&ksm_thread_mutex); 2042 ksm_run &= ~KSM_RUN_OFFLINE; 2043 mutex_unlock(&ksm_thread_mutex); 2044 2045 smp_mb(); /* wake_up_bit advises this */ 2046 wake_up_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE)); 2047 break; 2048 } 2049 return NOTIFY_OK; 2050 } 2051 #else 2052 static void wait_while_offlining(void) 2053 { 2054 } 2055 #endif /* CONFIG_MEMORY_HOTREMOVE */ 2056 2057 #ifdef CONFIG_SYSFS 2058 /* 2059 * This all compiles without CONFIG_SYSFS, but is a waste of space. 2060 */ 2061 2062 #define KSM_ATTR_RO(_name) \ 2063 static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 2064 #define KSM_ATTR(_name) \ 2065 static struct kobj_attribute _name##_attr = \ 2066 __ATTR(_name, 0644, _name##_show, _name##_store) 2067 2068 static ssize_t sleep_millisecs_show(struct kobject *kobj, 2069 struct kobj_attribute *attr, char *buf) 2070 { 2071 return sprintf(buf, "%u\n", ksm_thread_sleep_millisecs); 2072 } 2073 2074 static ssize_t sleep_millisecs_store(struct kobject *kobj, 2075 struct kobj_attribute *attr, 2076 const char *buf, size_t count) 2077 { 2078 unsigned long msecs; 2079 int err; 2080 2081 err = kstrtoul(buf, 10, &msecs); 2082 if (err || msecs > UINT_MAX) 2083 return -EINVAL; 2084 2085 ksm_thread_sleep_millisecs = msecs; 2086 2087 return count; 2088 } 2089 KSM_ATTR(sleep_millisecs); 2090 2091 static ssize_t pages_to_scan_show(struct kobject *kobj, 2092 struct kobj_attribute *attr, char *buf) 2093 { 2094 return sprintf(buf, "%u\n", ksm_thread_pages_to_scan); 2095 } 2096 2097 static ssize_t pages_to_scan_store(struct kobject *kobj, 2098 struct kobj_attribute *attr, 2099 const char *buf, size_t count) 2100 { 2101 int err; 2102 unsigned long nr_pages; 2103 2104 err = kstrtoul(buf, 10, &nr_pages); 2105 if (err || nr_pages > UINT_MAX) 2106 return -EINVAL; 2107 2108 ksm_thread_pages_to_scan = nr_pages; 2109 2110 return count; 2111 } 2112 KSM_ATTR(pages_to_scan); 2113 2114 static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr, 2115 char *buf) 2116 { 2117 return sprintf(buf, "%lu\n", ksm_run); 2118 } 2119 2120 static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, 2121 const char *buf, size_t count) 2122 { 2123 int err; 2124 unsigned long flags; 2125 2126 err = kstrtoul(buf, 10, &flags); 2127 if (err || flags > UINT_MAX) 2128 return -EINVAL; 2129 if (flags > KSM_RUN_UNMERGE) 2130 return -EINVAL; 2131 2132 /* 2133 * KSM_RUN_MERGE sets ksmd running, and 0 stops it running. 2134 * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items, 2135 * breaking COW to free the pages_shared (but leaves mm_slots 2136 * on the list for when ksmd may be set running again). 2137 */ 2138 2139 mutex_lock(&ksm_thread_mutex); 2140 wait_while_offlining(); 2141 if (ksm_run != flags) { 2142 ksm_run = flags; 2143 if (flags & KSM_RUN_UNMERGE) { 2144 set_current_oom_origin(); 2145 err = unmerge_and_remove_all_rmap_items(); 2146 clear_current_oom_origin(); 2147 if (err) { 2148 ksm_run = KSM_RUN_STOP; 2149 count = err; 2150 } 2151 } 2152 } 2153 mutex_unlock(&ksm_thread_mutex); 2154 2155 if (flags & KSM_RUN_MERGE) 2156 wake_up_interruptible(&ksm_thread_wait); 2157 2158 return count; 2159 } 2160 KSM_ATTR(run); 2161 2162 #ifdef CONFIG_NUMA 2163 static ssize_t merge_across_nodes_show(struct kobject *kobj, 2164 struct kobj_attribute *attr, char *buf) 2165 { 2166 return sprintf(buf, "%u\n", ksm_merge_across_nodes); 2167 } 2168 2169 static ssize_t merge_across_nodes_store(struct kobject *kobj, 2170 struct kobj_attribute *attr, 2171 const char *buf, size_t count) 2172 { 2173 int err; 2174 unsigned long knob; 2175 2176 err = kstrtoul(buf, 10, &knob); 2177 if (err) 2178 return err; 2179 if (knob > 1) 2180 return -EINVAL; 2181 2182 mutex_lock(&ksm_thread_mutex); 2183 wait_while_offlining(); 2184 if (ksm_merge_across_nodes != knob) { 2185 if (ksm_pages_shared || remove_all_stable_nodes()) 2186 err = -EBUSY; 2187 else if (root_stable_tree == one_stable_tree) { 2188 struct rb_root *buf; 2189 /* 2190 * This is the first time that we switch away from the 2191 * default of merging across nodes: must now allocate 2192 * a buffer to hold as many roots as may be needed. 2193 * Allocate stable and unstable together: 2194 * MAXSMP NODES_SHIFT 10 will use 16kB. 2195 */ 2196 buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf), 2197 GFP_KERNEL); 2198 /* Let us assume that RB_ROOT is NULL is zero */ 2199 if (!buf) 2200 err = -ENOMEM; 2201 else { 2202 root_stable_tree = buf; 2203 root_unstable_tree = buf + nr_node_ids; 2204 /* Stable tree is empty but not the unstable */ 2205 root_unstable_tree[0] = one_unstable_tree[0]; 2206 } 2207 } 2208 if (!err) { 2209 ksm_merge_across_nodes = knob; 2210 ksm_nr_node_ids = knob ? 1 : nr_node_ids; 2211 } 2212 } 2213 mutex_unlock(&ksm_thread_mutex); 2214 2215 return err ? err : count; 2216 } 2217 KSM_ATTR(merge_across_nodes); 2218 #endif 2219 2220 static ssize_t pages_shared_show(struct kobject *kobj, 2221 struct kobj_attribute *attr, char *buf) 2222 { 2223 return sprintf(buf, "%lu\n", ksm_pages_shared); 2224 } 2225 KSM_ATTR_RO(pages_shared); 2226 2227 static ssize_t pages_sharing_show(struct kobject *kobj, 2228 struct kobj_attribute *attr, char *buf) 2229 { 2230 return sprintf(buf, "%lu\n", ksm_pages_sharing); 2231 } 2232 KSM_ATTR_RO(pages_sharing); 2233 2234 static ssize_t pages_unshared_show(struct kobject *kobj, 2235 struct kobj_attribute *attr, char *buf) 2236 { 2237 return sprintf(buf, "%lu\n", ksm_pages_unshared); 2238 } 2239 KSM_ATTR_RO(pages_unshared); 2240 2241 static ssize_t pages_volatile_show(struct kobject *kobj, 2242 struct kobj_attribute *attr, char *buf) 2243 { 2244 long ksm_pages_volatile; 2245 2246 ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared 2247 - ksm_pages_sharing - ksm_pages_unshared; 2248 /* 2249 * It was not worth any locking to calculate that statistic, 2250 * but it might therefore sometimes be negative: conceal that. 2251 */ 2252 if (ksm_pages_volatile < 0) 2253 ksm_pages_volatile = 0; 2254 return sprintf(buf, "%ld\n", ksm_pages_volatile); 2255 } 2256 KSM_ATTR_RO(pages_volatile); 2257 2258 static ssize_t full_scans_show(struct kobject *kobj, 2259 struct kobj_attribute *attr, char *buf) 2260 { 2261 return sprintf(buf, "%lu\n", ksm_scan.seqnr); 2262 } 2263 KSM_ATTR_RO(full_scans); 2264 2265 static struct attribute *ksm_attrs[] = { 2266 &sleep_millisecs_attr.attr, 2267 &pages_to_scan_attr.attr, 2268 &run_attr.attr, 2269 &pages_shared_attr.attr, 2270 &pages_sharing_attr.attr, 2271 &pages_unshared_attr.attr, 2272 &pages_volatile_attr.attr, 2273 &full_scans_attr.attr, 2274 #ifdef CONFIG_NUMA 2275 &merge_across_nodes_attr.attr, 2276 #endif 2277 NULL, 2278 }; 2279 2280 static struct attribute_group ksm_attr_group = { 2281 .attrs = ksm_attrs, 2282 .name = "ksm", 2283 }; 2284 #endif /* CONFIG_SYSFS */ 2285 2286 static int __init ksm_init(void) 2287 { 2288 struct task_struct *ksm_thread; 2289 int err; 2290 2291 err = ksm_slab_init(); 2292 if (err) 2293 goto out; 2294 2295 ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd"); 2296 if (IS_ERR(ksm_thread)) { 2297 pr_err("ksm: creating kthread failed\n"); 2298 err = PTR_ERR(ksm_thread); 2299 goto out_free; 2300 } 2301 2302 #ifdef CONFIG_SYSFS 2303 err = sysfs_create_group(mm_kobj, &ksm_attr_group); 2304 if (err) { 2305 pr_err("ksm: register sysfs failed\n"); 2306 kthread_stop(ksm_thread); 2307 goto out_free; 2308 } 2309 #else 2310 ksm_run = KSM_RUN_MERGE; /* no way for user to start it */ 2311 2312 #endif /* CONFIG_SYSFS */ 2313 2314 #ifdef CONFIG_MEMORY_HOTREMOVE 2315 /* There is no significance to this priority 100 */ 2316 hotplug_memory_notifier(ksm_memory_callback, 100); 2317 #endif 2318 return 0; 2319 2320 out_free: 2321 ksm_slab_free(); 2322 out: 2323 return err; 2324 } 2325 subsys_initcall(ksm_init); 2326