xref: /openbmc/linux/mm/ksm.c (revision a86854d0)
1 /*
2  * Memory merging support.
3  *
4  * This code enables dynamic sharing of identical pages found in different
5  * memory areas, even if they are not shared by fork()
6  *
7  * Copyright (C) 2008-2009 Red Hat, Inc.
8  * Authors:
9  *	Izik Eidus
10  *	Andrea Arcangeli
11  *	Chris Wright
12  *	Hugh Dickins
13  *
14  * This work is licensed under the terms of the GNU GPL, version 2.
15  */
16 
17 #include <linux/errno.h>
18 #include <linux/mm.h>
19 #include <linux/fs.h>
20 #include <linux/mman.h>
21 #include <linux/sched.h>
22 #include <linux/sched/mm.h>
23 #include <linux/sched/coredump.h>
24 #include <linux/rwsem.h>
25 #include <linux/pagemap.h>
26 #include <linux/rmap.h>
27 #include <linux/spinlock.h>
28 #include <linux/jhash.h>
29 #include <linux/delay.h>
30 #include <linux/kthread.h>
31 #include <linux/wait.h>
32 #include <linux/slab.h>
33 #include <linux/rbtree.h>
34 #include <linux/memory.h>
35 #include <linux/mmu_notifier.h>
36 #include <linux/swap.h>
37 #include <linux/ksm.h>
38 #include <linux/hashtable.h>
39 #include <linux/freezer.h>
40 #include <linux/oom.h>
41 #include <linux/numa.h>
42 
43 #include <asm/tlbflush.h>
44 #include "internal.h"
45 
46 #ifdef CONFIG_NUMA
47 #define NUMA(x)		(x)
48 #define DO_NUMA(x)	do { (x); } while (0)
49 #else
50 #define NUMA(x)		(0)
51 #define DO_NUMA(x)	do { } while (0)
52 #endif
53 
54 /**
55  * DOC: Overview
56  *
57  * A few notes about the KSM scanning process,
58  * to make it easier to understand the data structures below:
59  *
60  * In order to reduce excessive scanning, KSM sorts the memory pages by their
61  * contents into a data structure that holds pointers to the pages' locations.
62  *
63  * Since the contents of the pages may change at any moment, KSM cannot just
64  * insert the pages into a normal sorted tree and expect it to find anything.
65  * Therefore KSM uses two data structures - the stable and the unstable tree.
66  *
67  * The stable tree holds pointers to all the merged pages (ksm pages), sorted
68  * by their contents.  Because each such page is write-protected, searching on
69  * this tree is fully assured to be working (except when pages are unmapped),
70  * and therefore this tree is called the stable tree.
71  *
72  * The stable tree node includes information required for reverse
73  * mapping from a KSM page to virtual addresses that map this page.
74  *
75  * In order to avoid large latencies of the rmap walks on KSM pages,
76  * KSM maintains two types of nodes in the stable tree:
77  *
78  * * the regular nodes that keep the reverse mapping structures in a
79  *   linked list
80  * * the "chains" that link nodes ("dups") that represent the same
81  *   write protected memory content, but each "dup" corresponds to a
82  *   different KSM page copy of that content
83  *
84  * Internally, the regular nodes, "dups" and "chains" are represented
85  * using the same :c:type:`struct stable_node` structure.
86  *
87  * In addition to the stable tree, KSM uses a second data structure called the
88  * unstable tree: this tree holds pointers to pages which have been found to
89  * be "unchanged for a period of time".  The unstable tree sorts these pages
90  * by their contents, but since they are not write-protected, KSM cannot rely
91  * upon the unstable tree to work correctly - the unstable tree is liable to
92  * be corrupted as its contents are modified, and so it is called unstable.
93  *
94  * KSM solves this problem by several techniques:
95  *
96  * 1) The unstable tree is flushed every time KSM completes scanning all
97  *    memory areas, and then the tree is rebuilt again from the beginning.
98  * 2) KSM will only insert into the unstable tree, pages whose hash value
99  *    has not changed since the previous scan of all memory areas.
100  * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the
101  *    colors of the nodes and not on their contents, assuring that even when
102  *    the tree gets "corrupted" it won't get out of balance, so scanning time
103  *    remains the same (also, searching and inserting nodes in an rbtree uses
104  *    the same algorithm, so we have no overhead when we flush and rebuild).
105  * 4) KSM never flushes the stable tree, which means that even if it were to
106  *    take 10 attempts to find a page in the unstable tree, once it is found,
107  *    it is secured in the stable tree.  (When we scan a new page, we first
108  *    compare it against the stable tree, and then against the unstable tree.)
109  *
110  * If the merge_across_nodes tunable is unset, then KSM maintains multiple
111  * stable trees and multiple unstable trees: one of each for each NUMA node.
112  */
113 
114 /**
115  * struct mm_slot - ksm information per mm that is being scanned
116  * @link: link to the mm_slots hash list
117  * @mm_list: link into the mm_slots list, rooted in ksm_mm_head
118  * @rmap_list: head for this mm_slot's singly-linked list of rmap_items
119  * @mm: the mm that this information is valid for
120  */
121 struct mm_slot {
122 	struct hlist_node link;
123 	struct list_head mm_list;
124 	struct rmap_item *rmap_list;
125 	struct mm_struct *mm;
126 };
127 
128 /**
129  * struct ksm_scan - cursor for scanning
130  * @mm_slot: the current mm_slot we are scanning
131  * @address: the next address inside that to be scanned
132  * @rmap_list: link to the next rmap to be scanned in the rmap_list
133  * @seqnr: count of completed full scans (needed when removing unstable node)
134  *
135  * There is only the one ksm_scan instance of this cursor structure.
136  */
137 struct ksm_scan {
138 	struct mm_slot *mm_slot;
139 	unsigned long address;
140 	struct rmap_item **rmap_list;
141 	unsigned long seqnr;
142 };
143 
144 /**
145  * struct stable_node - node of the stable rbtree
146  * @node: rb node of this ksm page in the stable tree
147  * @head: (overlaying parent) &migrate_nodes indicates temporarily on that list
148  * @hlist_dup: linked into the stable_node->hlist with a stable_node chain
149  * @list: linked into migrate_nodes, pending placement in the proper node tree
150  * @hlist: hlist head of rmap_items using this ksm page
151  * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid)
152  * @chain_prune_time: time of the last full garbage collection
153  * @rmap_hlist_len: number of rmap_item entries in hlist or STABLE_NODE_CHAIN
154  * @nid: NUMA node id of stable tree in which linked (may not match kpfn)
155  */
156 struct stable_node {
157 	union {
158 		struct rb_node node;	/* when node of stable tree */
159 		struct {		/* when listed for migration */
160 			struct list_head *head;
161 			struct {
162 				struct hlist_node hlist_dup;
163 				struct list_head list;
164 			};
165 		};
166 	};
167 	struct hlist_head hlist;
168 	union {
169 		unsigned long kpfn;
170 		unsigned long chain_prune_time;
171 	};
172 	/*
173 	 * STABLE_NODE_CHAIN can be any negative number in
174 	 * rmap_hlist_len negative range, but better not -1 to be able
175 	 * to reliably detect underflows.
176 	 */
177 #define STABLE_NODE_CHAIN -1024
178 	int rmap_hlist_len;
179 #ifdef CONFIG_NUMA
180 	int nid;
181 #endif
182 };
183 
184 /**
185  * struct rmap_item - reverse mapping item for virtual addresses
186  * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list
187  * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree
188  * @nid: NUMA node id of unstable tree in which linked (may not match page)
189  * @mm: the memory structure this rmap_item is pointing into
190  * @address: the virtual address this rmap_item tracks (+ flags in low bits)
191  * @oldchecksum: previous checksum of the page at that virtual address
192  * @node: rb node of this rmap_item in the unstable tree
193  * @head: pointer to stable_node heading this list in the stable tree
194  * @hlist: link into hlist of rmap_items hanging off that stable_node
195  */
196 struct rmap_item {
197 	struct rmap_item *rmap_list;
198 	union {
199 		struct anon_vma *anon_vma;	/* when stable */
200 #ifdef CONFIG_NUMA
201 		int nid;		/* when node of unstable tree */
202 #endif
203 	};
204 	struct mm_struct *mm;
205 	unsigned long address;		/* + low bits used for flags below */
206 	unsigned int oldchecksum;	/* when unstable */
207 	union {
208 		struct rb_node node;	/* when node of unstable tree */
209 		struct {		/* when listed from stable tree */
210 			struct stable_node *head;
211 			struct hlist_node hlist;
212 		};
213 	};
214 };
215 
216 #define SEQNR_MASK	0x0ff	/* low bits of unstable tree seqnr */
217 #define UNSTABLE_FLAG	0x100	/* is a node of the unstable tree */
218 #define STABLE_FLAG	0x200	/* is listed from the stable tree */
219 
220 /* The stable and unstable tree heads */
221 static struct rb_root one_stable_tree[1] = { RB_ROOT };
222 static struct rb_root one_unstable_tree[1] = { RB_ROOT };
223 static struct rb_root *root_stable_tree = one_stable_tree;
224 static struct rb_root *root_unstable_tree = one_unstable_tree;
225 
226 /* Recently migrated nodes of stable tree, pending proper placement */
227 static LIST_HEAD(migrate_nodes);
228 #define STABLE_NODE_DUP_HEAD ((struct list_head *)&migrate_nodes.prev)
229 
230 #define MM_SLOTS_HASH_BITS 10
231 static DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
232 
233 static struct mm_slot ksm_mm_head = {
234 	.mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list),
235 };
236 static struct ksm_scan ksm_scan = {
237 	.mm_slot = &ksm_mm_head,
238 };
239 
240 static struct kmem_cache *rmap_item_cache;
241 static struct kmem_cache *stable_node_cache;
242 static struct kmem_cache *mm_slot_cache;
243 
244 /* The number of nodes in the stable tree */
245 static unsigned long ksm_pages_shared;
246 
247 /* The number of page slots additionally sharing those nodes */
248 static unsigned long ksm_pages_sharing;
249 
250 /* The number of nodes in the unstable tree */
251 static unsigned long ksm_pages_unshared;
252 
253 /* The number of rmap_items in use: to calculate pages_volatile */
254 static unsigned long ksm_rmap_items;
255 
256 /* The number of stable_node chains */
257 static unsigned long ksm_stable_node_chains;
258 
259 /* The number of stable_node dups linked to the stable_node chains */
260 static unsigned long ksm_stable_node_dups;
261 
262 /* Delay in pruning stale stable_node_dups in the stable_node_chains */
263 static int ksm_stable_node_chains_prune_millisecs = 2000;
264 
265 /* Maximum number of page slots sharing a stable node */
266 static int ksm_max_page_sharing = 256;
267 
268 /* Number of pages ksmd should scan in one batch */
269 static unsigned int ksm_thread_pages_to_scan = 100;
270 
271 /* Milliseconds ksmd should sleep between batches */
272 static unsigned int ksm_thread_sleep_millisecs = 20;
273 
274 /* Checksum of an empty (zeroed) page */
275 static unsigned int zero_checksum __read_mostly;
276 
277 /* Whether to merge empty (zeroed) pages with actual zero pages */
278 static bool ksm_use_zero_pages __read_mostly;
279 
280 #ifdef CONFIG_NUMA
281 /* Zeroed when merging across nodes is not allowed */
282 static unsigned int ksm_merge_across_nodes = 1;
283 static int ksm_nr_node_ids = 1;
284 #else
285 #define ksm_merge_across_nodes	1U
286 #define ksm_nr_node_ids		1
287 #endif
288 
289 #define KSM_RUN_STOP	0
290 #define KSM_RUN_MERGE	1
291 #define KSM_RUN_UNMERGE	2
292 #define KSM_RUN_OFFLINE	4
293 static unsigned long ksm_run = KSM_RUN_STOP;
294 static void wait_while_offlining(void);
295 
296 static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait);
297 static DEFINE_MUTEX(ksm_thread_mutex);
298 static DEFINE_SPINLOCK(ksm_mmlist_lock);
299 
300 #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\
301 		sizeof(struct __struct), __alignof__(struct __struct),\
302 		(__flags), NULL)
303 
304 static int __init ksm_slab_init(void)
305 {
306 	rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0);
307 	if (!rmap_item_cache)
308 		goto out;
309 
310 	stable_node_cache = KSM_KMEM_CACHE(stable_node, 0);
311 	if (!stable_node_cache)
312 		goto out_free1;
313 
314 	mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0);
315 	if (!mm_slot_cache)
316 		goto out_free2;
317 
318 	return 0;
319 
320 out_free2:
321 	kmem_cache_destroy(stable_node_cache);
322 out_free1:
323 	kmem_cache_destroy(rmap_item_cache);
324 out:
325 	return -ENOMEM;
326 }
327 
328 static void __init ksm_slab_free(void)
329 {
330 	kmem_cache_destroy(mm_slot_cache);
331 	kmem_cache_destroy(stable_node_cache);
332 	kmem_cache_destroy(rmap_item_cache);
333 	mm_slot_cache = NULL;
334 }
335 
336 static __always_inline bool is_stable_node_chain(struct stable_node *chain)
337 {
338 	return chain->rmap_hlist_len == STABLE_NODE_CHAIN;
339 }
340 
341 static __always_inline bool is_stable_node_dup(struct stable_node *dup)
342 {
343 	return dup->head == STABLE_NODE_DUP_HEAD;
344 }
345 
346 static inline void stable_node_chain_add_dup(struct stable_node *dup,
347 					     struct stable_node *chain)
348 {
349 	VM_BUG_ON(is_stable_node_dup(dup));
350 	dup->head = STABLE_NODE_DUP_HEAD;
351 	VM_BUG_ON(!is_stable_node_chain(chain));
352 	hlist_add_head(&dup->hlist_dup, &chain->hlist);
353 	ksm_stable_node_dups++;
354 }
355 
356 static inline void __stable_node_dup_del(struct stable_node *dup)
357 {
358 	VM_BUG_ON(!is_stable_node_dup(dup));
359 	hlist_del(&dup->hlist_dup);
360 	ksm_stable_node_dups--;
361 }
362 
363 static inline void stable_node_dup_del(struct stable_node *dup)
364 {
365 	VM_BUG_ON(is_stable_node_chain(dup));
366 	if (is_stable_node_dup(dup))
367 		__stable_node_dup_del(dup);
368 	else
369 		rb_erase(&dup->node, root_stable_tree + NUMA(dup->nid));
370 #ifdef CONFIG_DEBUG_VM
371 	dup->head = NULL;
372 #endif
373 }
374 
375 static inline struct rmap_item *alloc_rmap_item(void)
376 {
377 	struct rmap_item *rmap_item;
378 
379 	rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL |
380 						__GFP_NORETRY | __GFP_NOWARN);
381 	if (rmap_item)
382 		ksm_rmap_items++;
383 	return rmap_item;
384 }
385 
386 static inline void free_rmap_item(struct rmap_item *rmap_item)
387 {
388 	ksm_rmap_items--;
389 	rmap_item->mm = NULL;	/* debug safety */
390 	kmem_cache_free(rmap_item_cache, rmap_item);
391 }
392 
393 static inline struct stable_node *alloc_stable_node(void)
394 {
395 	/*
396 	 * The allocation can take too long with GFP_KERNEL when memory is under
397 	 * pressure, which may lead to hung task warnings.  Adding __GFP_HIGH
398 	 * grants access to memory reserves, helping to avoid this problem.
399 	 */
400 	return kmem_cache_alloc(stable_node_cache, GFP_KERNEL | __GFP_HIGH);
401 }
402 
403 static inline void free_stable_node(struct stable_node *stable_node)
404 {
405 	VM_BUG_ON(stable_node->rmap_hlist_len &&
406 		  !is_stable_node_chain(stable_node));
407 	kmem_cache_free(stable_node_cache, stable_node);
408 }
409 
410 static inline struct mm_slot *alloc_mm_slot(void)
411 {
412 	if (!mm_slot_cache)	/* initialization failed */
413 		return NULL;
414 	return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
415 }
416 
417 static inline void free_mm_slot(struct mm_slot *mm_slot)
418 {
419 	kmem_cache_free(mm_slot_cache, mm_slot);
420 }
421 
422 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
423 {
424 	struct mm_slot *slot;
425 
426 	hash_for_each_possible(mm_slots_hash, slot, link, (unsigned long)mm)
427 		if (slot->mm == mm)
428 			return slot;
429 
430 	return NULL;
431 }
432 
433 static void insert_to_mm_slots_hash(struct mm_struct *mm,
434 				    struct mm_slot *mm_slot)
435 {
436 	mm_slot->mm = mm;
437 	hash_add(mm_slots_hash, &mm_slot->link, (unsigned long)mm);
438 }
439 
440 /*
441  * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's
442  * page tables after it has passed through ksm_exit() - which, if necessary,
443  * takes mmap_sem briefly to serialize against them.  ksm_exit() does not set
444  * a special flag: they can just back out as soon as mm_users goes to zero.
445  * ksm_test_exit() is used throughout to make this test for exit: in some
446  * places for correctness, in some places just to avoid unnecessary work.
447  */
448 static inline bool ksm_test_exit(struct mm_struct *mm)
449 {
450 	return atomic_read(&mm->mm_users) == 0;
451 }
452 
453 /*
454  * We use break_ksm to break COW on a ksm page: it's a stripped down
455  *
456  *	if (get_user_pages(addr, 1, 1, 1, &page, NULL) == 1)
457  *		put_page(page);
458  *
459  * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma,
460  * in case the application has unmapped and remapped mm,addr meanwhile.
461  * Could a ksm page appear anywhere else?  Actually yes, in a VM_PFNMAP
462  * mmap of /dev/mem or /dev/kmem, where we would not want to touch it.
463  *
464  * FAULT_FLAG/FOLL_REMOTE are because we do this outside the context
465  * of the process that owns 'vma'.  We also do not want to enforce
466  * protection keys here anyway.
467  */
468 static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
469 {
470 	struct page *page;
471 	int ret = 0;
472 
473 	do {
474 		cond_resched();
475 		page = follow_page(vma, addr,
476 				FOLL_GET | FOLL_MIGRATION | FOLL_REMOTE);
477 		if (IS_ERR_OR_NULL(page))
478 			break;
479 		if (PageKsm(page))
480 			ret = handle_mm_fault(vma, addr,
481 					FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE);
482 		else
483 			ret = VM_FAULT_WRITE;
484 		put_page(page);
485 	} while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));
486 	/*
487 	 * We must loop because handle_mm_fault() may back out if there's
488 	 * any difficulty e.g. if pte accessed bit gets updated concurrently.
489 	 *
490 	 * VM_FAULT_WRITE is what we have been hoping for: it indicates that
491 	 * COW has been broken, even if the vma does not permit VM_WRITE;
492 	 * but note that a concurrent fault might break PageKsm for us.
493 	 *
494 	 * VM_FAULT_SIGBUS could occur if we race with truncation of the
495 	 * backing file, which also invalidates anonymous pages: that's
496 	 * okay, that truncation will have unmapped the PageKsm for us.
497 	 *
498 	 * VM_FAULT_OOM: at the time of writing (late July 2009), setting
499 	 * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the
500 	 * current task has TIF_MEMDIE set, and will be OOM killed on return
501 	 * to user; and ksmd, having no mm, would never be chosen for that.
502 	 *
503 	 * But if the mm is in a limited mem_cgroup, then the fault may fail
504 	 * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and
505 	 * even ksmd can fail in this way - though it's usually breaking ksm
506 	 * just to undo a merge it made a moment before, so unlikely to oom.
507 	 *
508 	 * That's a pity: we might therefore have more kernel pages allocated
509 	 * than we're counting as nodes in the stable tree; but ksm_do_scan
510 	 * will retry to break_cow on each pass, so should recover the page
511 	 * in due course.  The important thing is to not let VM_MERGEABLE
512 	 * be cleared while any such pages might remain in the area.
513 	 */
514 	return (ret & VM_FAULT_OOM) ? -ENOMEM : 0;
515 }
516 
517 static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm,
518 		unsigned long addr)
519 {
520 	struct vm_area_struct *vma;
521 	if (ksm_test_exit(mm))
522 		return NULL;
523 	vma = find_vma(mm, addr);
524 	if (!vma || vma->vm_start > addr)
525 		return NULL;
526 	if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
527 		return NULL;
528 	return vma;
529 }
530 
531 static void break_cow(struct rmap_item *rmap_item)
532 {
533 	struct mm_struct *mm = rmap_item->mm;
534 	unsigned long addr = rmap_item->address;
535 	struct vm_area_struct *vma;
536 
537 	/*
538 	 * It is not an accident that whenever we want to break COW
539 	 * to undo, we also need to drop a reference to the anon_vma.
540 	 */
541 	put_anon_vma(rmap_item->anon_vma);
542 
543 	down_read(&mm->mmap_sem);
544 	vma = find_mergeable_vma(mm, addr);
545 	if (vma)
546 		break_ksm(vma, addr);
547 	up_read(&mm->mmap_sem);
548 }
549 
550 static struct page *get_mergeable_page(struct rmap_item *rmap_item)
551 {
552 	struct mm_struct *mm = rmap_item->mm;
553 	unsigned long addr = rmap_item->address;
554 	struct vm_area_struct *vma;
555 	struct page *page;
556 
557 	down_read(&mm->mmap_sem);
558 	vma = find_mergeable_vma(mm, addr);
559 	if (!vma)
560 		goto out;
561 
562 	page = follow_page(vma, addr, FOLL_GET);
563 	if (IS_ERR_OR_NULL(page))
564 		goto out;
565 	if (PageAnon(page)) {
566 		flush_anon_page(vma, page, addr);
567 		flush_dcache_page(page);
568 	} else {
569 		put_page(page);
570 out:
571 		page = NULL;
572 	}
573 	up_read(&mm->mmap_sem);
574 	return page;
575 }
576 
577 /*
578  * This helper is used for getting right index into array of tree roots.
579  * When merge_across_nodes knob is set to 1, there are only two rb-trees for
580  * stable and unstable pages from all nodes with roots in index 0. Otherwise,
581  * every node has its own stable and unstable tree.
582  */
583 static inline int get_kpfn_nid(unsigned long kpfn)
584 {
585 	return ksm_merge_across_nodes ? 0 : NUMA(pfn_to_nid(kpfn));
586 }
587 
588 static struct stable_node *alloc_stable_node_chain(struct stable_node *dup,
589 						   struct rb_root *root)
590 {
591 	struct stable_node *chain = alloc_stable_node();
592 	VM_BUG_ON(is_stable_node_chain(dup));
593 	if (likely(chain)) {
594 		INIT_HLIST_HEAD(&chain->hlist);
595 		chain->chain_prune_time = jiffies;
596 		chain->rmap_hlist_len = STABLE_NODE_CHAIN;
597 #if defined (CONFIG_DEBUG_VM) && defined(CONFIG_NUMA)
598 		chain->nid = -1; /* debug */
599 #endif
600 		ksm_stable_node_chains++;
601 
602 		/*
603 		 * Put the stable node chain in the first dimension of
604 		 * the stable tree and at the same time remove the old
605 		 * stable node.
606 		 */
607 		rb_replace_node(&dup->node, &chain->node, root);
608 
609 		/*
610 		 * Move the old stable node to the second dimension
611 		 * queued in the hlist_dup. The invariant is that all
612 		 * dup stable_nodes in the chain->hlist point to pages
613 		 * that are wrprotected and have the exact same
614 		 * content.
615 		 */
616 		stable_node_chain_add_dup(dup, chain);
617 	}
618 	return chain;
619 }
620 
621 static inline void free_stable_node_chain(struct stable_node *chain,
622 					  struct rb_root *root)
623 {
624 	rb_erase(&chain->node, root);
625 	free_stable_node(chain);
626 	ksm_stable_node_chains--;
627 }
628 
629 static void remove_node_from_stable_tree(struct stable_node *stable_node)
630 {
631 	struct rmap_item *rmap_item;
632 
633 	/* check it's not STABLE_NODE_CHAIN or negative */
634 	BUG_ON(stable_node->rmap_hlist_len < 0);
635 
636 	hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
637 		if (rmap_item->hlist.next)
638 			ksm_pages_sharing--;
639 		else
640 			ksm_pages_shared--;
641 		VM_BUG_ON(stable_node->rmap_hlist_len <= 0);
642 		stable_node->rmap_hlist_len--;
643 		put_anon_vma(rmap_item->anon_vma);
644 		rmap_item->address &= PAGE_MASK;
645 		cond_resched();
646 	}
647 
648 	/*
649 	 * We need the second aligned pointer of the migrate_nodes
650 	 * list_head to stay clear from the rb_parent_color union
651 	 * (aligned and different than any node) and also different
652 	 * from &migrate_nodes. This will verify that future list.h changes
653 	 * don't break STABLE_NODE_DUP_HEAD.
654 	 */
655 #if GCC_VERSION >= 40903 /* only recent gcc can handle it */
656 	BUILD_BUG_ON(STABLE_NODE_DUP_HEAD <= &migrate_nodes);
657 	BUILD_BUG_ON(STABLE_NODE_DUP_HEAD >= &migrate_nodes + 1);
658 #endif
659 
660 	if (stable_node->head == &migrate_nodes)
661 		list_del(&stable_node->list);
662 	else
663 		stable_node_dup_del(stable_node);
664 	free_stable_node(stable_node);
665 }
666 
667 /*
668  * get_ksm_page: checks if the page indicated by the stable node
669  * is still its ksm page, despite having held no reference to it.
670  * In which case we can trust the content of the page, and it
671  * returns the gotten page; but if the page has now been zapped,
672  * remove the stale node from the stable tree and return NULL.
673  * But beware, the stable node's page might be being migrated.
674  *
675  * You would expect the stable_node to hold a reference to the ksm page.
676  * But if it increments the page's count, swapping out has to wait for
677  * ksmd to come around again before it can free the page, which may take
678  * seconds or even minutes: much too unresponsive.  So instead we use a
679  * "keyhole reference": access to the ksm page from the stable node peeps
680  * out through its keyhole to see if that page still holds the right key,
681  * pointing back to this stable node.  This relies on freeing a PageAnon
682  * page to reset its page->mapping to NULL, and relies on no other use of
683  * a page to put something that might look like our key in page->mapping.
684  * is on its way to being freed; but it is an anomaly to bear in mind.
685  */
686 static struct page *get_ksm_page(struct stable_node *stable_node, bool lock_it)
687 {
688 	struct page *page;
689 	void *expected_mapping;
690 	unsigned long kpfn;
691 
692 	expected_mapping = (void *)((unsigned long)stable_node |
693 					PAGE_MAPPING_KSM);
694 again:
695 	kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */
696 	page = pfn_to_page(kpfn);
697 	if (READ_ONCE(page->mapping) != expected_mapping)
698 		goto stale;
699 
700 	/*
701 	 * We cannot do anything with the page while its refcount is 0.
702 	 * Usually 0 means free, or tail of a higher-order page: in which
703 	 * case this node is no longer referenced, and should be freed;
704 	 * however, it might mean that the page is under page_freeze_refs().
705 	 * The __remove_mapping() case is easy, again the node is now stale;
706 	 * but if page is swapcache in migrate_page_move_mapping(), it might
707 	 * still be our page, in which case it's essential to keep the node.
708 	 */
709 	while (!get_page_unless_zero(page)) {
710 		/*
711 		 * Another check for page->mapping != expected_mapping would
712 		 * work here too.  We have chosen the !PageSwapCache test to
713 		 * optimize the common case, when the page is or is about to
714 		 * be freed: PageSwapCache is cleared (under spin_lock_irq)
715 		 * in the freeze_refs section of __remove_mapping(); but Anon
716 		 * page->mapping reset to NULL later, in free_pages_prepare().
717 		 */
718 		if (!PageSwapCache(page))
719 			goto stale;
720 		cpu_relax();
721 	}
722 
723 	if (READ_ONCE(page->mapping) != expected_mapping) {
724 		put_page(page);
725 		goto stale;
726 	}
727 
728 	if (lock_it) {
729 		lock_page(page);
730 		if (READ_ONCE(page->mapping) != expected_mapping) {
731 			unlock_page(page);
732 			put_page(page);
733 			goto stale;
734 		}
735 	}
736 	return page;
737 
738 stale:
739 	/*
740 	 * We come here from above when page->mapping or !PageSwapCache
741 	 * suggests that the node is stale; but it might be under migration.
742 	 * We need smp_rmb(), matching the smp_wmb() in ksm_migrate_page(),
743 	 * before checking whether node->kpfn has been changed.
744 	 */
745 	smp_rmb();
746 	if (READ_ONCE(stable_node->kpfn) != kpfn)
747 		goto again;
748 	remove_node_from_stable_tree(stable_node);
749 	return NULL;
750 }
751 
752 /*
753  * Removing rmap_item from stable or unstable tree.
754  * This function will clean the information from the stable/unstable tree.
755  */
756 static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
757 {
758 	if (rmap_item->address & STABLE_FLAG) {
759 		struct stable_node *stable_node;
760 		struct page *page;
761 
762 		stable_node = rmap_item->head;
763 		page = get_ksm_page(stable_node, true);
764 		if (!page)
765 			goto out;
766 
767 		hlist_del(&rmap_item->hlist);
768 		unlock_page(page);
769 		put_page(page);
770 
771 		if (!hlist_empty(&stable_node->hlist))
772 			ksm_pages_sharing--;
773 		else
774 			ksm_pages_shared--;
775 		VM_BUG_ON(stable_node->rmap_hlist_len <= 0);
776 		stable_node->rmap_hlist_len--;
777 
778 		put_anon_vma(rmap_item->anon_vma);
779 		rmap_item->address &= PAGE_MASK;
780 
781 	} else if (rmap_item->address & UNSTABLE_FLAG) {
782 		unsigned char age;
783 		/*
784 		 * Usually ksmd can and must skip the rb_erase, because
785 		 * root_unstable_tree was already reset to RB_ROOT.
786 		 * But be careful when an mm is exiting: do the rb_erase
787 		 * if this rmap_item was inserted by this scan, rather
788 		 * than left over from before.
789 		 */
790 		age = (unsigned char)(ksm_scan.seqnr - rmap_item->address);
791 		BUG_ON(age > 1);
792 		if (!age)
793 			rb_erase(&rmap_item->node,
794 				 root_unstable_tree + NUMA(rmap_item->nid));
795 		ksm_pages_unshared--;
796 		rmap_item->address &= PAGE_MASK;
797 	}
798 out:
799 	cond_resched();		/* we're called from many long loops */
800 }
801 
802 static void remove_trailing_rmap_items(struct mm_slot *mm_slot,
803 				       struct rmap_item **rmap_list)
804 {
805 	while (*rmap_list) {
806 		struct rmap_item *rmap_item = *rmap_list;
807 		*rmap_list = rmap_item->rmap_list;
808 		remove_rmap_item_from_tree(rmap_item);
809 		free_rmap_item(rmap_item);
810 	}
811 }
812 
813 /*
814  * Though it's very tempting to unmerge rmap_items from stable tree rather
815  * than check every pte of a given vma, the locking doesn't quite work for
816  * that - an rmap_item is assigned to the stable tree after inserting ksm
817  * page and upping mmap_sem.  Nor does it fit with the way we skip dup'ing
818  * rmap_items from parent to child at fork time (so as not to waste time
819  * if exit comes before the next scan reaches it).
820  *
821  * Similarly, although we'd like to remove rmap_items (so updating counts
822  * and freeing memory) when unmerging an area, it's easier to leave that
823  * to the next pass of ksmd - consider, for example, how ksmd might be
824  * in cmp_and_merge_page on one of the rmap_items we would be removing.
825  */
826 static int unmerge_ksm_pages(struct vm_area_struct *vma,
827 			     unsigned long start, unsigned long end)
828 {
829 	unsigned long addr;
830 	int err = 0;
831 
832 	for (addr = start; addr < end && !err; addr += PAGE_SIZE) {
833 		if (ksm_test_exit(vma->vm_mm))
834 			break;
835 		if (signal_pending(current))
836 			err = -ERESTARTSYS;
837 		else
838 			err = break_ksm(vma, addr);
839 	}
840 	return err;
841 }
842 
843 static inline struct stable_node *page_stable_node(struct page *page)
844 {
845 	return PageKsm(page) ? page_rmapping(page) : NULL;
846 }
847 
848 static inline void set_page_stable_node(struct page *page,
849 					struct stable_node *stable_node)
850 {
851 	page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
852 }
853 
854 #ifdef CONFIG_SYSFS
855 /*
856  * Only called through the sysfs control interface:
857  */
858 static int remove_stable_node(struct stable_node *stable_node)
859 {
860 	struct page *page;
861 	int err;
862 
863 	page = get_ksm_page(stable_node, true);
864 	if (!page) {
865 		/*
866 		 * get_ksm_page did remove_node_from_stable_tree itself.
867 		 */
868 		return 0;
869 	}
870 
871 	if (WARN_ON_ONCE(page_mapped(page))) {
872 		/*
873 		 * This should not happen: but if it does, just refuse to let
874 		 * merge_across_nodes be switched - there is no need to panic.
875 		 */
876 		err = -EBUSY;
877 	} else {
878 		/*
879 		 * The stable node did not yet appear stale to get_ksm_page(),
880 		 * since that allows for an unmapped ksm page to be recognized
881 		 * right up until it is freed; but the node is safe to remove.
882 		 * This page might be in a pagevec waiting to be freed,
883 		 * or it might be PageSwapCache (perhaps under writeback),
884 		 * or it might have been removed from swapcache a moment ago.
885 		 */
886 		set_page_stable_node(page, NULL);
887 		remove_node_from_stable_tree(stable_node);
888 		err = 0;
889 	}
890 
891 	unlock_page(page);
892 	put_page(page);
893 	return err;
894 }
895 
896 static int remove_stable_node_chain(struct stable_node *stable_node,
897 				    struct rb_root *root)
898 {
899 	struct stable_node *dup;
900 	struct hlist_node *hlist_safe;
901 
902 	if (!is_stable_node_chain(stable_node)) {
903 		VM_BUG_ON(is_stable_node_dup(stable_node));
904 		if (remove_stable_node(stable_node))
905 			return true;
906 		else
907 			return false;
908 	}
909 
910 	hlist_for_each_entry_safe(dup, hlist_safe,
911 				  &stable_node->hlist, hlist_dup) {
912 		VM_BUG_ON(!is_stable_node_dup(dup));
913 		if (remove_stable_node(dup))
914 			return true;
915 	}
916 	BUG_ON(!hlist_empty(&stable_node->hlist));
917 	free_stable_node_chain(stable_node, root);
918 	return false;
919 }
920 
921 static int remove_all_stable_nodes(void)
922 {
923 	struct stable_node *stable_node, *next;
924 	int nid;
925 	int err = 0;
926 
927 	for (nid = 0; nid < ksm_nr_node_ids; nid++) {
928 		while (root_stable_tree[nid].rb_node) {
929 			stable_node = rb_entry(root_stable_tree[nid].rb_node,
930 						struct stable_node, node);
931 			if (remove_stable_node_chain(stable_node,
932 						     root_stable_tree + nid)) {
933 				err = -EBUSY;
934 				break;	/* proceed to next nid */
935 			}
936 			cond_resched();
937 		}
938 	}
939 	list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) {
940 		if (remove_stable_node(stable_node))
941 			err = -EBUSY;
942 		cond_resched();
943 	}
944 	return err;
945 }
946 
947 static int unmerge_and_remove_all_rmap_items(void)
948 {
949 	struct mm_slot *mm_slot;
950 	struct mm_struct *mm;
951 	struct vm_area_struct *vma;
952 	int err = 0;
953 
954 	spin_lock(&ksm_mmlist_lock);
955 	ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next,
956 						struct mm_slot, mm_list);
957 	spin_unlock(&ksm_mmlist_lock);
958 
959 	for (mm_slot = ksm_scan.mm_slot;
960 			mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) {
961 		mm = mm_slot->mm;
962 		down_read(&mm->mmap_sem);
963 		for (vma = mm->mmap; vma; vma = vma->vm_next) {
964 			if (ksm_test_exit(mm))
965 				break;
966 			if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
967 				continue;
968 			err = unmerge_ksm_pages(vma,
969 						vma->vm_start, vma->vm_end);
970 			if (err)
971 				goto error;
972 		}
973 
974 		remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list);
975 		up_read(&mm->mmap_sem);
976 
977 		spin_lock(&ksm_mmlist_lock);
978 		ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next,
979 						struct mm_slot, mm_list);
980 		if (ksm_test_exit(mm)) {
981 			hash_del(&mm_slot->link);
982 			list_del(&mm_slot->mm_list);
983 			spin_unlock(&ksm_mmlist_lock);
984 
985 			free_mm_slot(mm_slot);
986 			clear_bit(MMF_VM_MERGEABLE, &mm->flags);
987 			mmdrop(mm);
988 		} else
989 			spin_unlock(&ksm_mmlist_lock);
990 	}
991 
992 	/* Clean up stable nodes, but don't worry if some are still busy */
993 	remove_all_stable_nodes();
994 	ksm_scan.seqnr = 0;
995 	return 0;
996 
997 error:
998 	up_read(&mm->mmap_sem);
999 	spin_lock(&ksm_mmlist_lock);
1000 	ksm_scan.mm_slot = &ksm_mm_head;
1001 	spin_unlock(&ksm_mmlist_lock);
1002 	return err;
1003 }
1004 #endif /* CONFIG_SYSFS */
1005 
1006 static u32 calc_checksum(struct page *page)
1007 {
1008 	u32 checksum;
1009 	void *addr = kmap_atomic(page);
1010 	checksum = jhash2(addr, PAGE_SIZE / 4, 17);
1011 	kunmap_atomic(addr);
1012 	return checksum;
1013 }
1014 
1015 static int memcmp_pages(struct page *page1, struct page *page2)
1016 {
1017 	char *addr1, *addr2;
1018 	int ret;
1019 
1020 	addr1 = kmap_atomic(page1);
1021 	addr2 = kmap_atomic(page2);
1022 	ret = memcmp(addr1, addr2, PAGE_SIZE);
1023 	kunmap_atomic(addr2);
1024 	kunmap_atomic(addr1);
1025 	return ret;
1026 }
1027 
1028 static inline int pages_identical(struct page *page1, struct page *page2)
1029 {
1030 	return !memcmp_pages(page1, page2);
1031 }
1032 
1033 static int write_protect_page(struct vm_area_struct *vma, struct page *page,
1034 			      pte_t *orig_pte)
1035 {
1036 	struct mm_struct *mm = vma->vm_mm;
1037 	struct page_vma_mapped_walk pvmw = {
1038 		.page = page,
1039 		.vma = vma,
1040 	};
1041 	int swapped;
1042 	int err = -EFAULT;
1043 	unsigned long mmun_start;	/* For mmu_notifiers */
1044 	unsigned long mmun_end;		/* For mmu_notifiers */
1045 
1046 	pvmw.address = page_address_in_vma(page, vma);
1047 	if (pvmw.address == -EFAULT)
1048 		goto out;
1049 
1050 	BUG_ON(PageTransCompound(page));
1051 
1052 	mmun_start = pvmw.address;
1053 	mmun_end   = pvmw.address + PAGE_SIZE;
1054 	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1055 
1056 	if (!page_vma_mapped_walk(&pvmw))
1057 		goto out_mn;
1058 	if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?"))
1059 		goto out_unlock;
1060 
1061 	if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) ||
1062 	    (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte)) ||
1063 						mm_tlb_flush_pending(mm)) {
1064 		pte_t entry;
1065 
1066 		swapped = PageSwapCache(page);
1067 		flush_cache_page(vma, pvmw.address, page_to_pfn(page));
1068 		/*
1069 		 * Ok this is tricky, when get_user_pages_fast() run it doesn't
1070 		 * take any lock, therefore the check that we are going to make
1071 		 * with the pagecount against the mapcount is racey and
1072 		 * O_DIRECT can happen right after the check.
1073 		 * So we clear the pte and flush the tlb before the check
1074 		 * this assure us that no O_DIRECT can happen after the check
1075 		 * or in the middle of the check.
1076 		 *
1077 		 * No need to notify as we are downgrading page table to read
1078 		 * only not changing it to point to a new page.
1079 		 *
1080 		 * See Documentation/vm/mmu_notifier.rst
1081 		 */
1082 		entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte);
1083 		/*
1084 		 * Check that no O_DIRECT or similar I/O is in progress on the
1085 		 * page
1086 		 */
1087 		if (page_mapcount(page) + 1 + swapped != page_count(page)) {
1088 			set_pte_at(mm, pvmw.address, pvmw.pte, entry);
1089 			goto out_unlock;
1090 		}
1091 		if (pte_dirty(entry))
1092 			set_page_dirty(page);
1093 
1094 		if (pte_protnone(entry))
1095 			entry = pte_mkclean(pte_clear_savedwrite(entry));
1096 		else
1097 			entry = pte_mkclean(pte_wrprotect(entry));
1098 		set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry);
1099 	}
1100 	*orig_pte = *pvmw.pte;
1101 	err = 0;
1102 
1103 out_unlock:
1104 	page_vma_mapped_walk_done(&pvmw);
1105 out_mn:
1106 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1107 out:
1108 	return err;
1109 }
1110 
1111 /**
1112  * replace_page - replace page in vma by new ksm page
1113  * @vma:      vma that holds the pte pointing to page
1114  * @page:     the page we are replacing by kpage
1115  * @kpage:    the ksm page we replace page by
1116  * @orig_pte: the original value of the pte
1117  *
1118  * Returns 0 on success, -EFAULT on failure.
1119  */
1120 static int replace_page(struct vm_area_struct *vma, struct page *page,
1121 			struct page *kpage, pte_t orig_pte)
1122 {
1123 	struct mm_struct *mm = vma->vm_mm;
1124 	pmd_t *pmd;
1125 	pte_t *ptep;
1126 	pte_t newpte;
1127 	spinlock_t *ptl;
1128 	unsigned long addr;
1129 	int err = -EFAULT;
1130 	unsigned long mmun_start;	/* For mmu_notifiers */
1131 	unsigned long mmun_end;		/* For mmu_notifiers */
1132 
1133 	addr = page_address_in_vma(page, vma);
1134 	if (addr == -EFAULT)
1135 		goto out;
1136 
1137 	pmd = mm_find_pmd(mm, addr);
1138 	if (!pmd)
1139 		goto out;
1140 
1141 	mmun_start = addr;
1142 	mmun_end   = addr + PAGE_SIZE;
1143 	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1144 
1145 	ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
1146 	if (!pte_same(*ptep, orig_pte)) {
1147 		pte_unmap_unlock(ptep, ptl);
1148 		goto out_mn;
1149 	}
1150 
1151 	/*
1152 	 * No need to check ksm_use_zero_pages here: we can only have a
1153 	 * zero_page here if ksm_use_zero_pages was enabled alreaady.
1154 	 */
1155 	if (!is_zero_pfn(page_to_pfn(kpage))) {
1156 		get_page(kpage);
1157 		page_add_anon_rmap(kpage, vma, addr, false);
1158 		newpte = mk_pte(kpage, vma->vm_page_prot);
1159 	} else {
1160 		newpte = pte_mkspecial(pfn_pte(page_to_pfn(kpage),
1161 					       vma->vm_page_prot));
1162 		/*
1163 		 * We're replacing an anonymous page with a zero page, which is
1164 		 * not anonymous. We need to do proper accounting otherwise we
1165 		 * will get wrong values in /proc, and a BUG message in dmesg
1166 		 * when tearing down the mm.
1167 		 */
1168 		dec_mm_counter(mm, MM_ANONPAGES);
1169 	}
1170 
1171 	flush_cache_page(vma, addr, pte_pfn(*ptep));
1172 	/*
1173 	 * No need to notify as we are replacing a read only page with another
1174 	 * read only page with the same content.
1175 	 *
1176 	 * See Documentation/vm/mmu_notifier.rst
1177 	 */
1178 	ptep_clear_flush(vma, addr, ptep);
1179 	set_pte_at_notify(mm, addr, ptep, newpte);
1180 
1181 	page_remove_rmap(page, false);
1182 	if (!page_mapped(page))
1183 		try_to_free_swap(page);
1184 	put_page(page);
1185 
1186 	pte_unmap_unlock(ptep, ptl);
1187 	err = 0;
1188 out_mn:
1189 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1190 out:
1191 	return err;
1192 }
1193 
1194 /*
1195  * try_to_merge_one_page - take two pages and merge them into one
1196  * @vma: the vma that holds the pte pointing to page
1197  * @page: the PageAnon page that we want to replace with kpage
1198  * @kpage: the PageKsm page that we want to map instead of page,
1199  *         or NULL the first time when we want to use page as kpage.
1200  *
1201  * This function returns 0 if the pages were merged, -EFAULT otherwise.
1202  */
1203 static int try_to_merge_one_page(struct vm_area_struct *vma,
1204 				 struct page *page, struct page *kpage)
1205 {
1206 	pte_t orig_pte = __pte(0);
1207 	int err = -EFAULT;
1208 
1209 	if (page == kpage)			/* ksm page forked */
1210 		return 0;
1211 
1212 	if (!PageAnon(page))
1213 		goto out;
1214 
1215 	/*
1216 	 * We need the page lock to read a stable PageSwapCache in
1217 	 * write_protect_page().  We use trylock_page() instead of
1218 	 * lock_page() because we don't want to wait here - we
1219 	 * prefer to continue scanning and merging different pages,
1220 	 * then come back to this page when it is unlocked.
1221 	 */
1222 	if (!trylock_page(page))
1223 		goto out;
1224 
1225 	if (PageTransCompound(page)) {
1226 		if (split_huge_page(page))
1227 			goto out_unlock;
1228 	}
1229 
1230 	/*
1231 	 * If this anonymous page is mapped only here, its pte may need
1232 	 * to be write-protected.  If it's mapped elsewhere, all of its
1233 	 * ptes are necessarily already write-protected.  But in either
1234 	 * case, we need to lock and check page_count is not raised.
1235 	 */
1236 	if (write_protect_page(vma, page, &orig_pte) == 0) {
1237 		if (!kpage) {
1238 			/*
1239 			 * While we hold page lock, upgrade page from
1240 			 * PageAnon+anon_vma to PageKsm+NULL stable_node:
1241 			 * stable_tree_insert() will update stable_node.
1242 			 */
1243 			set_page_stable_node(page, NULL);
1244 			mark_page_accessed(page);
1245 			/*
1246 			 * Page reclaim just frees a clean page with no dirty
1247 			 * ptes: make sure that the ksm page would be swapped.
1248 			 */
1249 			if (!PageDirty(page))
1250 				SetPageDirty(page);
1251 			err = 0;
1252 		} else if (pages_identical(page, kpage))
1253 			err = replace_page(vma, page, kpage, orig_pte);
1254 	}
1255 
1256 	if ((vma->vm_flags & VM_LOCKED) && kpage && !err) {
1257 		munlock_vma_page(page);
1258 		if (!PageMlocked(kpage)) {
1259 			unlock_page(page);
1260 			lock_page(kpage);
1261 			mlock_vma_page(kpage);
1262 			page = kpage;		/* for final unlock */
1263 		}
1264 	}
1265 
1266 out_unlock:
1267 	unlock_page(page);
1268 out:
1269 	return err;
1270 }
1271 
1272 /*
1273  * try_to_merge_with_ksm_page - like try_to_merge_two_pages,
1274  * but no new kernel page is allocated: kpage must already be a ksm page.
1275  *
1276  * This function returns 0 if the pages were merged, -EFAULT otherwise.
1277  */
1278 static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item,
1279 				      struct page *page, struct page *kpage)
1280 {
1281 	struct mm_struct *mm = rmap_item->mm;
1282 	struct vm_area_struct *vma;
1283 	int err = -EFAULT;
1284 
1285 	down_read(&mm->mmap_sem);
1286 	vma = find_mergeable_vma(mm, rmap_item->address);
1287 	if (!vma)
1288 		goto out;
1289 
1290 	err = try_to_merge_one_page(vma, page, kpage);
1291 	if (err)
1292 		goto out;
1293 
1294 	/* Unstable nid is in union with stable anon_vma: remove first */
1295 	remove_rmap_item_from_tree(rmap_item);
1296 
1297 	/* Must get reference to anon_vma while still holding mmap_sem */
1298 	rmap_item->anon_vma = vma->anon_vma;
1299 	get_anon_vma(vma->anon_vma);
1300 out:
1301 	up_read(&mm->mmap_sem);
1302 	return err;
1303 }
1304 
1305 /*
1306  * try_to_merge_two_pages - take two identical pages and prepare them
1307  * to be merged into one page.
1308  *
1309  * This function returns the kpage if we successfully merged two identical
1310  * pages into one ksm page, NULL otherwise.
1311  *
1312  * Note that this function upgrades page to ksm page: if one of the pages
1313  * is already a ksm page, try_to_merge_with_ksm_page should be used.
1314  */
1315 static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item,
1316 					   struct page *page,
1317 					   struct rmap_item *tree_rmap_item,
1318 					   struct page *tree_page)
1319 {
1320 	int err;
1321 
1322 	err = try_to_merge_with_ksm_page(rmap_item, page, NULL);
1323 	if (!err) {
1324 		err = try_to_merge_with_ksm_page(tree_rmap_item,
1325 							tree_page, page);
1326 		/*
1327 		 * If that fails, we have a ksm page with only one pte
1328 		 * pointing to it: so break it.
1329 		 */
1330 		if (err)
1331 			break_cow(rmap_item);
1332 	}
1333 	return err ? NULL : page;
1334 }
1335 
1336 static __always_inline
1337 bool __is_page_sharing_candidate(struct stable_node *stable_node, int offset)
1338 {
1339 	VM_BUG_ON(stable_node->rmap_hlist_len < 0);
1340 	/*
1341 	 * Check that at least one mapping still exists, otherwise
1342 	 * there's no much point to merge and share with this
1343 	 * stable_node, as the underlying tree_page of the other
1344 	 * sharer is going to be freed soon.
1345 	 */
1346 	return stable_node->rmap_hlist_len &&
1347 		stable_node->rmap_hlist_len + offset < ksm_max_page_sharing;
1348 }
1349 
1350 static __always_inline
1351 bool is_page_sharing_candidate(struct stable_node *stable_node)
1352 {
1353 	return __is_page_sharing_candidate(stable_node, 0);
1354 }
1355 
1356 static struct page *stable_node_dup(struct stable_node **_stable_node_dup,
1357 				    struct stable_node **_stable_node,
1358 				    struct rb_root *root,
1359 				    bool prune_stale_stable_nodes)
1360 {
1361 	struct stable_node *dup, *found = NULL, *stable_node = *_stable_node;
1362 	struct hlist_node *hlist_safe;
1363 	struct page *_tree_page, *tree_page = NULL;
1364 	int nr = 0;
1365 	int found_rmap_hlist_len;
1366 
1367 	if (!prune_stale_stable_nodes ||
1368 	    time_before(jiffies, stable_node->chain_prune_time +
1369 			msecs_to_jiffies(
1370 				ksm_stable_node_chains_prune_millisecs)))
1371 		prune_stale_stable_nodes = false;
1372 	else
1373 		stable_node->chain_prune_time = jiffies;
1374 
1375 	hlist_for_each_entry_safe(dup, hlist_safe,
1376 				  &stable_node->hlist, hlist_dup) {
1377 		cond_resched();
1378 		/*
1379 		 * We must walk all stable_node_dup to prune the stale
1380 		 * stable nodes during lookup.
1381 		 *
1382 		 * get_ksm_page can drop the nodes from the
1383 		 * stable_node->hlist if they point to freed pages
1384 		 * (that's why we do a _safe walk). The "dup"
1385 		 * stable_node parameter itself will be freed from
1386 		 * under us if it returns NULL.
1387 		 */
1388 		_tree_page = get_ksm_page(dup, false);
1389 		if (!_tree_page)
1390 			continue;
1391 		nr += 1;
1392 		if (is_page_sharing_candidate(dup)) {
1393 			if (!found ||
1394 			    dup->rmap_hlist_len > found_rmap_hlist_len) {
1395 				if (found)
1396 					put_page(tree_page);
1397 				found = dup;
1398 				found_rmap_hlist_len = found->rmap_hlist_len;
1399 				tree_page = _tree_page;
1400 
1401 				/* skip put_page for found dup */
1402 				if (!prune_stale_stable_nodes)
1403 					break;
1404 				continue;
1405 			}
1406 		}
1407 		put_page(_tree_page);
1408 	}
1409 
1410 	if (found) {
1411 		/*
1412 		 * nr is counting all dups in the chain only if
1413 		 * prune_stale_stable_nodes is true, otherwise we may
1414 		 * break the loop at nr == 1 even if there are
1415 		 * multiple entries.
1416 		 */
1417 		if (prune_stale_stable_nodes && nr == 1) {
1418 			/*
1419 			 * If there's not just one entry it would
1420 			 * corrupt memory, better BUG_ON. In KSM
1421 			 * context with no lock held it's not even
1422 			 * fatal.
1423 			 */
1424 			BUG_ON(stable_node->hlist.first->next);
1425 
1426 			/*
1427 			 * There's just one entry and it is below the
1428 			 * deduplication limit so drop the chain.
1429 			 */
1430 			rb_replace_node(&stable_node->node, &found->node,
1431 					root);
1432 			free_stable_node(stable_node);
1433 			ksm_stable_node_chains--;
1434 			ksm_stable_node_dups--;
1435 			/*
1436 			 * NOTE: the caller depends on the stable_node
1437 			 * to be equal to stable_node_dup if the chain
1438 			 * was collapsed.
1439 			 */
1440 			*_stable_node = found;
1441 			/*
1442 			 * Just for robustneess as stable_node is
1443 			 * otherwise left as a stable pointer, the
1444 			 * compiler shall optimize it away at build
1445 			 * time.
1446 			 */
1447 			stable_node = NULL;
1448 		} else if (stable_node->hlist.first != &found->hlist_dup &&
1449 			   __is_page_sharing_candidate(found, 1)) {
1450 			/*
1451 			 * If the found stable_node dup can accept one
1452 			 * more future merge (in addition to the one
1453 			 * that is underway) and is not at the head of
1454 			 * the chain, put it there so next search will
1455 			 * be quicker in the !prune_stale_stable_nodes
1456 			 * case.
1457 			 *
1458 			 * NOTE: it would be inaccurate to use nr > 1
1459 			 * instead of checking the hlist.first pointer
1460 			 * directly, because in the
1461 			 * prune_stale_stable_nodes case "nr" isn't
1462 			 * the position of the found dup in the chain,
1463 			 * but the total number of dups in the chain.
1464 			 */
1465 			hlist_del(&found->hlist_dup);
1466 			hlist_add_head(&found->hlist_dup,
1467 				       &stable_node->hlist);
1468 		}
1469 	}
1470 
1471 	*_stable_node_dup = found;
1472 	return tree_page;
1473 }
1474 
1475 static struct stable_node *stable_node_dup_any(struct stable_node *stable_node,
1476 					       struct rb_root *root)
1477 {
1478 	if (!is_stable_node_chain(stable_node))
1479 		return stable_node;
1480 	if (hlist_empty(&stable_node->hlist)) {
1481 		free_stable_node_chain(stable_node, root);
1482 		return NULL;
1483 	}
1484 	return hlist_entry(stable_node->hlist.first,
1485 			   typeof(*stable_node), hlist_dup);
1486 }
1487 
1488 /*
1489  * Like for get_ksm_page, this function can free the *_stable_node and
1490  * *_stable_node_dup if the returned tree_page is NULL.
1491  *
1492  * It can also free and overwrite *_stable_node with the found
1493  * stable_node_dup if the chain is collapsed (in which case
1494  * *_stable_node will be equal to *_stable_node_dup like if the chain
1495  * never existed). It's up to the caller to verify tree_page is not
1496  * NULL before dereferencing *_stable_node or *_stable_node_dup.
1497  *
1498  * *_stable_node_dup is really a second output parameter of this
1499  * function and will be overwritten in all cases, the caller doesn't
1500  * need to initialize it.
1501  */
1502 static struct page *__stable_node_chain(struct stable_node **_stable_node_dup,
1503 					struct stable_node **_stable_node,
1504 					struct rb_root *root,
1505 					bool prune_stale_stable_nodes)
1506 {
1507 	struct stable_node *stable_node = *_stable_node;
1508 	if (!is_stable_node_chain(stable_node)) {
1509 		if (is_page_sharing_candidate(stable_node)) {
1510 			*_stable_node_dup = stable_node;
1511 			return get_ksm_page(stable_node, false);
1512 		}
1513 		/*
1514 		 * _stable_node_dup set to NULL means the stable_node
1515 		 * reached the ksm_max_page_sharing limit.
1516 		 */
1517 		*_stable_node_dup = NULL;
1518 		return NULL;
1519 	}
1520 	return stable_node_dup(_stable_node_dup, _stable_node, root,
1521 			       prune_stale_stable_nodes);
1522 }
1523 
1524 static __always_inline struct page *chain_prune(struct stable_node **s_n_d,
1525 						struct stable_node **s_n,
1526 						struct rb_root *root)
1527 {
1528 	return __stable_node_chain(s_n_d, s_n, root, true);
1529 }
1530 
1531 static __always_inline struct page *chain(struct stable_node **s_n_d,
1532 					  struct stable_node *s_n,
1533 					  struct rb_root *root)
1534 {
1535 	struct stable_node *old_stable_node = s_n;
1536 	struct page *tree_page;
1537 
1538 	tree_page = __stable_node_chain(s_n_d, &s_n, root, false);
1539 	/* not pruning dups so s_n cannot have changed */
1540 	VM_BUG_ON(s_n != old_stable_node);
1541 	return tree_page;
1542 }
1543 
1544 /*
1545  * stable_tree_search - search for page inside the stable tree
1546  *
1547  * This function checks if there is a page inside the stable tree
1548  * with identical content to the page that we are scanning right now.
1549  *
1550  * This function returns the stable tree node of identical content if found,
1551  * NULL otherwise.
1552  */
1553 static struct page *stable_tree_search(struct page *page)
1554 {
1555 	int nid;
1556 	struct rb_root *root;
1557 	struct rb_node **new;
1558 	struct rb_node *parent;
1559 	struct stable_node *stable_node, *stable_node_dup, *stable_node_any;
1560 	struct stable_node *page_node;
1561 
1562 	page_node = page_stable_node(page);
1563 	if (page_node && page_node->head != &migrate_nodes) {
1564 		/* ksm page forked */
1565 		get_page(page);
1566 		return page;
1567 	}
1568 
1569 	nid = get_kpfn_nid(page_to_pfn(page));
1570 	root = root_stable_tree + nid;
1571 again:
1572 	new = &root->rb_node;
1573 	parent = NULL;
1574 
1575 	while (*new) {
1576 		struct page *tree_page;
1577 		int ret;
1578 
1579 		cond_resched();
1580 		stable_node = rb_entry(*new, struct stable_node, node);
1581 		stable_node_any = NULL;
1582 		tree_page = chain_prune(&stable_node_dup, &stable_node,	root);
1583 		/*
1584 		 * NOTE: stable_node may have been freed by
1585 		 * chain_prune() if the returned stable_node_dup is
1586 		 * not NULL. stable_node_dup may have been inserted in
1587 		 * the rbtree instead as a regular stable_node (in
1588 		 * order to collapse the stable_node chain if a single
1589 		 * stable_node dup was found in it). In such case the
1590 		 * stable_node is overwritten by the calleee to point
1591 		 * to the stable_node_dup that was collapsed in the
1592 		 * stable rbtree and stable_node will be equal to
1593 		 * stable_node_dup like if the chain never existed.
1594 		 */
1595 		if (!stable_node_dup) {
1596 			/*
1597 			 * Either all stable_node dups were full in
1598 			 * this stable_node chain, or this chain was
1599 			 * empty and should be rb_erased.
1600 			 */
1601 			stable_node_any = stable_node_dup_any(stable_node,
1602 							      root);
1603 			if (!stable_node_any) {
1604 				/* rb_erase just run */
1605 				goto again;
1606 			}
1607 			/*
1608 			 * Take any of the stable_node dups page of
1609 			 * this stable_node chain to let the tree walk
1610 			 * continue. All KSM pages belonging to the
1611 			 * stable_node dups in a stable_node chain
1612 			 * have the same content and they're
1613 			 * wrprotected at all times. Any will work
1614 			 * fine to continue the walk.
1615 			 */
1616 			tree_page = get_ksm_page(stable_node_any, false);
1617 		}
1618 		VM_BUG_ON(!stable_node_dup ^ !!stable_node_any);
1619 		if (!tree_page) {
1620 			/*
1621 			 * If we walked over a stale stable_node,
1622 			 * get_ksm_page() will call rb_erase() and it
1623 			 * may rebalance the tree from under us. So
1624 			 * restart the search from scratch. Returning
1625 			 * NULL would be safe too, but we'd generate
1626 			 * false negative insertions just because some
1627 			 * stable_node was stale.
1628 			 */
1629 			goto again;
1630 		}
1631 
1632 		ret = memcmp_pages(page, tree_page);
1633 		put_page(tree_page);
1634 
1635 		parent = *new;
1636 		if (ret < 0)
1637 			new = &parent->rb_left;
1638 		else if (ret > 0)
1639 			new = &parent->rb_right;
1640 		else {
1641 			if (page_node) {
1642 				VM_BUG_ON(page_node->head != &migrate_nodes);
1643 				/*
1644 				 * Test if the migrated page should be merged
1645 				 * into a stable node dup. If the mapcount is
1646 				 * 1 we can migrate it with another KSM page
1647 				 * without adding it to the chain.
1648 				 */
1649 				if (page_mapcount(page) > 1)
1650 					goto chain_append;
1651 			}
1652 
1653 			if (!stable_node_dup) {
1654 				/*
1655 				 * If the stable_node is a chain and
1656 				 * we got a payload match in memcmp
1657 				 * but we cannot merge the scanned
1658 				 * page in any of the existing
1659 				 * stable_node dups because they're
1660 				 * all full, we need to wait the
1661 				 * scanned page to find itself a match
1662 				 * in the unstable tree to create a
1663 				 * brand new KSM page to add later to
1664 				 * the dups of this stable_node.
1665 				 */
1666 				return NULL;
1667 			}
1668 
1669 			/*
1670 			 * Lock and unlock the stable_node's page (which
1671 			 * might already have been migrated) so that page
1672 			 * migration is sure to notice its raised count.
1673 			 * It would be more elegant to return stable_node
1674 			 * than kpage, but that involves more changes.
1675 			 */
1676 			tree_page = get_ksm_page(stable_node_dup, true);
1677 			if (unlikely(!tree_page))
1678 				/*
1679 				 * The tree may have been rebalanced,
1680 				 * so re-evaluate parent and new.
1681 				 */
1682 				goto again;
1683 			unlock_page(tree_page);
1684 
1685 			if (get_kpfn_nid(stable_node_dup->kpfn) !=
1686 			    NUMA(stable_node_dup->nid)) {
1687 				put_page(tree_page);
1688 				goto replace;
1689 			}
1690 			return tree_page;
1691 		}
1692 	}
1693 
1694 	if (!page_node)
1695 		return NULL;
1696 
1697 	list_del(&page_node->list);
1698 	DO_NUMA(page_node->nid = nid);
1699 	rb_link_node(&page_node->node, parent, new);
1700 	rb_insert_color(&page_node->node, root);
1701 out:
1702 	if (is_page_sharing_candidate(page_node)) {
1703 		get_page(page);
1704 		return page;
1705 	} else
1706 		return NULL;
1707 
1708 replace:
1709 	/*
1710 	 * If stable_node was a chain and chain_prune collapsed it,
1711 	 * stable_node has been updated to be the new regular
1712 	 * stable_node. A collapse of the chain is indistinguishable
1713 	 * from the case there was no chain in the stable
1714 	 * rbtree. Otherwise stable_node is the chain and
1715 	 * stable_node_dup is the dup to replace.
1716 	 */
1717 	if (stable_node_dup == stable_node) {
1718 		VM_BUG_ON(is_stable_node_chain(stable_node_dup));
1719 		VM_BUG_ON(is_stable_node_dup(stable_node_dup));
1720 		/* there is no chain */
1721 		if (page_node) {
1722 			VM_BUG_ON(page_node->head != &migrate_nodes);
1723 			list_del(&page_node->list);
1724 			DO_NUMA(page_node->nid = nid);
1725 			rb_replace_node(&stable_node_dup->node,
1726 					&page_node->node,
1727 					root);
1728 			if (is_page_sharing_candidate(page_node))
1729 				get_page(page);
1730 			else
1731 				page = NULL;
1732 		} else {
1733 			rb_erase(&stable_node_dup->node, root);
1734 			page = NULL;
1735 		}
1736 	} else {
1737 		VM_BUG_ON(!is_stable_node_chain(stable_node));
1738 		__stable_node_dup_del(stable_node_dup);
1739 		if (page_node) {
1740 			VM_BUG_ON(page_node->head != &migrate_nodes);
1741 			list_del(&page_node->list);
1742 			DO_NUMA(page_node->nid = nid);
1743 			stable_node_chain_add_dup(page_node, stable_node);
1744 			if (is_page_sharing_candidate(page_node))
1745 				get_page(page);
1746 			else
1747 				page = NULL;
1748 		} else {
1749 			page = NULL;
1750 		}
1751 	}
1752 	stable_node_dup->head = &migrate_nodes;
1753 	list_add(&stable_node_dup->list, stable_node_dup->head);
1754 	return page;
1755 
1756 chain_append:
1757 	/* stable_node_dup could be null if it reached the limit */
1758 	if (!stable_node_dup)
1759 		stable_node_dup = stable_node_any;
1760 	/*
1761 	 * If stable_node was a chain and chain_prune collapsed it,
1762 	 * stable_node has been updated to be the new regular
1763 	 * stable_node. A collapse of the chain is indistinguishable
1764 	 * from the case there was no chain in the stable
1765 	 * rbtree. Otherwise stable_node is the chain and
1766 	 * stable_node_dup is the dup to replace.
1767 	 */
1768 	if (stable_node_dup == stable_node) {
1769 		VM_BUG_ON(is_stable_node_chain(stable_node_dup));
1770 		VM_BUG_ON(is_stable_node_dup(stable_node_dup));
1771 		/* chain is missing so create it */
1772 		stable_node = alloc_stable_node_chain(stable_node_dup,
1773 						      root);
1774 		if (!stable_node)
1775 			return NULL;
1776 	}
1777 	/*
1778 	 * Add this stable_node dup that was
1779 	 * migrated to the stable_node chain
1780 	 * of the current nid for this page
1781 	 * content.
1782 	 */
1783 	VM_BUG_ON(!is_stable_node_chain(stable_node));
1784 	VM_BUG_ON(!is_stable_node_dup(stable_node_dup));
1785 	VM_BUG_ON(page_node->head != &migrate_nodes);
1786 	list_del(&page_node->list);
1787 	DO_NUMA(page_node->nid = nid);
1788 	stable_node_chain_add_dup(page_node, stable_node);
1789 	goto out;
1790 }
1791 
1792 /*
1793  * stable_tree_insert - insert stable tree node pointing to new ksm page
1794  * into the stable tree.
1795  *
1796  * This function returns the stable tree node just allocated on success,
1797  * NULL otherwise.
1798  */
1799 static struct stable_node *stable_tree_insert(struct page *kpage)
1800 {
1801 	int nid;
1802 	unsigned long kpfn;
1803 	struct rb_root *root;
1804 	struct rb_node **new;
1805 	struct rb_node *parent;
1806 	struct stable_node *stable_node, *stable_node_dup, *stable_node_any;
1807 	bool need_chain = false;
1808 
1809 	kpfn = page_to_pfn(kpage);
1810 	nid = get_kpfn_nid(kpfn);
1811 	root = root_stable_tree + nid;
1812 again:
1813 	parent = NULL;
1814 	new = &root->rb_node;
1815 
1816 	while (*new) {
1817 		struct page *tree_page;
1818 		int ret;
1819 
1820 		cond_resched();
1821 		stable_node = rb_entry(*new, struct stable_node, node);
1822 		stable_node_any = NULL;
1823 		tree_page = chain(&stable_node_dup, stable_node, root);
1824 		if (!stable_node_dup) {
1825 			/*
1826 			 * Either all stable_node dups were full in
1827 			 * this stable_node chain, or this chain was
1828 			 * empty and should be rb_erased.
1829 			 */
1830 			stable_node_any = stable_node_dup_any(stable_node,
1831 							      root);
1832 			if (!stable_node_any) {
1833 				/* rb_erase just run */
1834 				goto again;
1835 			}
1836 			/*
1837 			 * Take any of the stable_node dups page of
1838 			 * this stable_node chain to let the tree walk
1839 			 * continue. All KSM pages belonging to the
1840 			 * stable_node dups in a stable_node chain
1841 			 * have the same content and they're
1842 			 * wrprotected at all times. Any will work
1843 			 * fine to continue the walk.
1844 			 */
1845 			tree_page = get_ksm_page(stable_node_any, false);
1846 		}
1847 		VM_BUG_ON(!stable_node_dup ^ !!stable_node_any);
1848 		if (!tree_page) {
1849 			/*
1850 			 * If we walked over a stale stable_node,
1851 			 * get_ksm_page() will call rb_erase() and it
1852 			 * may rebalance the tree from under us. So
1853 			 * restart the search from scratch. Returning
1854 			 * NULL would be safe too, but we'd generate
1855 			 * false negative insertions just because some
1856 			 * stable_node was stale.
1857 			 */
1858 			goto again;
1859 		}
1860 
1861 		ret = memcmp_pages(kpage, tree_page);
1862 		put_page(tree_page);
1863 
1864 		parent = *new;
1865 		if (ret < 0)
1866 			new = &parent->rb_left;
1867 		else if (ret > 0)
1868 			new = &parent->rb_right;
1869 		else {
1870 			need_chain = true;
1871 			break;
1872 		}
1873 	}
1874 
1875 	stable_node_dup = alloc_stable_node();
1876 	if (!stable_node_dup)
1877 		return NULL;
1878 
1879 	INIT_HLIST_HEAD(&stable_node_dup->hlist);
1880 	stable_node_dup->kpfn = kpfn;
1881 	set_page_stable_node(kpage, stable_node_dup);
1882 	stable_node_dup->rmap_hlist_len = 0;
1883 	DO_NUMA(stable_node_dup->nid = nid);
1884 	if (!need_chain) {
1885 		rb_link_node(&stable_node_dup->node, parent, new);
1886 		rb_insert_color(&stable_node_dup->node, root);
1887 	} else {
1888 		if (!is_stable_node_chain(stable_node)) {
1889 			struct stable_node *orig = stable_node;
1890 			/* chain is missing so create it */
1891 			stable_node = alloc_stable_node_chain(orig, root);
1892 			if (!stable_node) {
1893 				free_stable_node(stable_node_dup);
1894 				return NULL;
1895 			}
1896 		}
1897 		stable_node_chain_add_dup(stable_node_dup, stable_node);
1898 	}
1899 
1900 	return stable_node_dup;
1901 }
1902 
1903 /*
1904  * unstable_tree_search_insert - search for identical page,
1905  * else insert rmap_item into the unstable tree.
1906  *
1907  * This function searches for a page in the unstable tree identical to the
1908  * page currently being scanned; and if no identical page is found in the
1909  * tree, we insert rmap_item as a new object into the unstable tree.
1910  *
1911  * This function returns pointer to rmap_item found to be identical
1912  * to the currently scanned page, NULL otherwise.
1913  *
1914  * This function does both searching and inserting, because they share
1915  * the same walking algorithm in an rbtree.
1916  */
1917 static
1918 struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item,
1919 					      struct page *page,
1920 					      struct page **tree_pagep)
1921 {
1922 	struct rb_node **new;
1923 	struct rb_root *root;
1924 	struct rb_node *parent = NULL;
1925 	int nid;
1926 
1927 	nid = get_kpfn_nid(page_to_pfn(page));
1928 	root = root_unstable_tree + nid;
1929 	new = &root->rb_node;
1930 
1931 	while (*new) {
1932 		struct rmap_item *tree_rmap_item;
1933 		struct page *tree_page;
1934 		int ret;
1935 
1936 		cond_resched();
1937 		tree_rmap_item = rb_entry(*new, struct rmap_item, node);
1938 		tree_page = get_mergeable_page(tree_rmap_item);
1939 		if (!tree_page)
1940 			return NULL;
1941 
1942 		/*
1943 		 * Don't substitute a ksm page for a forked page.
1944 		 */
1945 		if (page == tree_page) {
1946 			put_page(tree_page);
1947 			return NULL;
1948 		}
1949 
1950 		ret = memcmp_pages(page, tree_page);
1951 
1952 		parent = *new;
1953 		if (ret < 0) {
1954 			put_page(tree_page);
1955 			new = &parent->rb_left;
1956 		} else if (ret > 0) {
1957 			put_page(tree_page);
1958 			new = &parent->rb_right;
1959 		} else if (!ksm_merge_across_nodes &&
1960 			   page_to_nid(tree_page) != nid) {
1961 			/*
1962 			 * If tree_page has been migrated to another NUMA node,
1963 			 * it will be flushed out and put in the right unstable
1964 			 * tree next time: only merge with it when across_nodes.
1965 			 */
1966 			put_page(tree_page);
1967 			return NULL;
1968 		} else {
1969 			*tree_pagep = tree_page;
1970 			return tree_rmap_item;
1971 		}
1972 	}
1973 
1974 	rmap_item->address |= UNSTABLE_FLAG;
1975 	rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK);
1976 	DO_NUMA(rmap_item->nid = nid);
1977 	rb_link_node(&rmap_item->node, parent, new);
1978 	rb_insert_color(&rmap_item->node, root);
1979 
1980 	ksm_pages_unshared++;
1981 	return NULL;
1982 }
1983 
1984 /*
1985  * stable_tree_append - add another rmap_item to the linked list of
1986  * rmap_items hanging off a given node of the stable tree, all sharing
1987  * the same ksm page.
1988  */
1989 static void stable_tree_append(struct rmap_item *rmap_item,
1990 			       struct stable_node *stable_node,
1991 			       bool max_page_sharing_bypass)
1992 {
1993 	/*
1994 	 * rmap won't find this mapping if we don't insert the
1995 	 * rmap_item in the right stable_node
1996 	 * duplicate. page_migration could break later if rmap breaks,
1997 	 * so we can as well crash here. We really need to check for
1998 	 * rmap_hlist_len == STABLE_NODE_CHAIN, but we can as well check
1999 	 * for other negative values as an undeflow if detected here
2000 	 * for the first time (and not when decreasing rmap_hlist_len)
2001 	 * would be sign of memory corruption in the stable_node.
2002 	 */
2003 	BUG_ON(stable_node->rmap_hlist_len < 0);
2004 
2005 	stable_node->rmap_hlist_len++;
2006 	if (!max_page_sharing_bypass)
2007 		/* possibly non fatal but unexpected overflow, only warn */
2008 		WARN_ON_ONCE(stable_node->rmap_hlist_len >
2009 			     ksm_max_page_sharing);
2010 
2011 	rmap_item->head = stable_node;
2012 	rmap_item->address |= STABLE_FLAG;
2013 	hlist_add_head(&rmap_item->hlist, &stable_node->hlist);
2014 
2015 	if (rmap_item->hlist.next)
2016 		ksm_pages_sharing++;
2017 	else
2018 		ksm_pages_shared++;
2019 }
2020 
2021 /*
2022  * cmp_and_merge_page - first see if page can be merged into the stable tree;
2023  * if not, compare checksum to previous and if it's the same, see if page can
2024  * be inserted into the unstable tree, or merged with a page already there and
2025  * both transferred to the stable tree.
2026  *
2027  * @page: the page that we are searching identical page to.
2028  * @rmap_item: the reverse mapping into the virtual address of this page
2029  */
2030 static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
2031 {
2032 	struct mm_struct *mm = rmap_item->mm;
2033 	struct rmap_item *tree_rmap_item;
2034 	struct page *tree_page = NULL;
2035 	struct stable_node *stable_node;
2036 	struct page *kpage;
2037 	unsigned int checksum;
2038 	int err;
2039 	bool max_page_sharing_bypass = false;
2040 
2041 	stable_node = page_stable_node(page);
2042 	if (stable_node) {
2043 		if (stable_node->head != &migrate_nodes &&
2044 		    get_kpfn_nid(READ_ONCE(stable_node->kpfn)) !=
2045 		    NUMA(stable_node->nid)) {
2046 			stable_node_dup_del(stable_node);
2047 			stable_node->head = &migrate_nodes;
2048 			list_add(&stable_node->list, stable_node->head);
2049 		}
2050 		if (stable_node->head != &migrate_nodes &&
2051 		    rmap_item->head == stable_node)
2052 			return;
2053 		/*
2054 		 * If it's a KSM fork, allow it to go over the sharing limit
2055 		 * without warnings.
2056 		 */
2057 		if (!is_page_sharing_candidate(stable_node))
2058 			max_page_sharing_bypass = true;
2059 	}
2060 
2061 	/* We first start with searching the page inside the stable tree */
2062 	kpage = stable_tree_search(page);
2063 	if (kpage == page && rmap_item->head == stable_node) {
2064 		put_page(kpage);
2065 		return;
2066 	}
2067 
2068 	remove_rmap_item_from_tree(rmap_item);
2069 
2070 	if (kpage) {
2071 		err = try_to_merge_with_ksm_page(rmap_item, page, kpage);
2072 		if (!err) {
2073 			/*
2074 			 * The page was successfully merged:
2075 			 * add its rmap_item to the stable tree.
2076 			 */
2077 			lock_page(kpage);
2078 			stable_tree_append(rmap_item, page_stable_node(kpage),
2079 					   max_page_sharing_bypass);
2080 			unlock_page(kpage);
2081 		}
2082 		put_page(kpage);
2083 		return;
2084 	}
2085 
2086 	/*
2087 	 * If the hash value of the page has changed from the last time
2088 	 * we calculated it, this page is changing frequently: therefore we
2089 	 * don't want to insert it in the unstable tree, and we don't want
2090 	 * to waste our time searching for something identical to it there.
2091 	 */
2092 	checksum = calc_checksum(page);
2093 	if (rmap_item->oldchecksum != checksum) {
2094 		rmap_item->oldchecksum = checksum;
2095 		return;
2096 	}
2097 
2098 	/*
2099 	 * Same checksum as an empty page. We attempt to merge it with the
2100 	 * appropriate zero page if the user enabled this via sysfs.
2101 	 */
2102 	if (ksm_use_zero_pages && (checksum == zero_checksum)) {
2103 		struct vm_area_struct *vma;
2104 
2105 		down_read(&mm->mmap_sem);
2106 		vma = find_mergeable_vma(mm, rmap_item->address);
2107 		err = try_to_merge_one_page(vma, page,
2108 					    ZERO_PAGE(rmap_item->address));
2109 		up_read(&mm->mmap_sem);
2110 		/*
2111 		 * In case of failure, the page was not really empty, so we
2112 		 * need to continue. Otherwise we're done.
2113 		 */
2114 		if (!err)
2115 			return;
2116 	}
2117 	tree_rmap_item =
2118 		unstable_tree_search_insert(rmap_item, page, &tree_page);
2119 	if (tree_rmap_item) {
2120 		bool split;
2121 
2122 		kpage = try_to_merge_two_pages(rmap_item, page,
2123 						tree_rmap_item, tree_page);
2124 		/*
2125 		 * If both pages we tried to merge belong to the same compound
2126 		 * page, then we actually ended up increasing the reference
2127 		 * count of the same compound page twice, and split_huge_page
2128 		 * failed.
2129 		 * Here we set a flag if that happened, and we use it later to
2130 		 * try split_huge_page again. Since we call put_page right
2131 		 * afterwards, the reference count will be correct and
2132 		 * split_huge_page should succeed.
2133 		 */
2134 		split = PageTransCompound(page)
2135 			&& compound_head(page) == compound_head(tree_page);
2136 		put_page(tree_page);
2137 		if (kpage) {
2138 			/*
2139 			 * The pages were successfully merged: insert new
2140 			 * node in the stable tree and add both rmap_items.
2141 			 */
2142 			lock_page(kpage);
2143 			stable_node = stable_tree_insert(kpage);
2144 			if (stable_node) {
2145 				stable_tree_append(tree_rmap_item, stable_node,
2146 						   false);
2147 				stable_tree_append(rmap_item, stable_node,
2148 						   false);
2149 			}
2150 			unlock_page(kpage);
2151 
2152 			/*
2153 			 * If we fail to insert the page into the stable tree,
2154 			 * we will have 2 virtual addresses that are pointing
2155 			 * to a ksm page left outside the stable tree,
2156 			 * in which case we need to break_cow on both.
2157 			 */
2158 			if (!stable_node) {
2159 				break_cow(tree_rmap_item);
2160 				break_cow(rmap_item);
2161 			}
2162 		} else if (split) {
2163 			/*
2164 			 * We are here if we tried to merge two pages and
2165 			 * failed because they both belonged to the same
2166 			 * compound page. We will split the page now, but no
2167 			 * merging will take place.
2168 			 * We do not want to add the cost of a full lock; if
2169 			 * the page is locked, it is better to skip it and
2170 			 * perhaps try again later.
2171 			 */
2172 			if (!trylock_page(page))
2173 				return;
2174 			split_huge_page(page);
2175 			unlock_page(page);
2176 		}
2177 	}
2178 }
2179 
2180 static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot,
2181 					    struct rmap_item **rmap_list,
2182 					    unsigned long addr)
2183 {
2184 	struct rmap_item *rmap_item;
2185 
2186 	while (*rmap_list) {
2187 		rmap_item = *rmap_list;
2188 		if ((rmap_item->address & PAGE_MASK) == addr)
2189 			return rmap_item;
2190 		if (rmap_item->address > addr)
2191 			break;
2192 		*rmap_list = rmap_item->rmap_list;
2193 		remove_rmap_item_from_tree(rmap_item);
2194 		free_rmap_item(rmap_item);
2195 	}
2196 
2197 	rmap_item = alloc_rmap_item();
2198 	if (rmap_item) {
2199 		/* It has already been zeroed */
2200 		rmap_item->mm = mm_slot->mm;
2201 		rmap_item->address = addr;
2202 		rmap_item->rmap_list = *rmap_list;
2203 		*rmap_list = rmap_item;
2204 	}
2205 	return rmap_item;
2206 }
2207 
2208 static struct rmap_item *scan_get_next_rmap_item(struct page **page)
2209 {
2210 	struct mm_struct *mm;
2211 	struct mm_slot *slot;
2212 	struct vm_area_struct *vma;
2213 	struct rmap_item *rmap_item;
2214 	int nid;
2215 
2216 	if (list_empty(&ksm_mm_head.mm_list))
2217 		return NULL;
2218 
2219 	slot = ksm_scan.mm_slot;
2220 	if (slot == &ksm_mm_head) {
2221 		/*
2222 		 * A number of pages can hang around indefinitely on per-cpu
2223 		 * pagevecs, raised page count preventing write_protect_page
2224 		 * from merging them.  Though it doesn't really matter much,
2225 		 * it is puzzling to see some stuck in pages_volatile until
2226 		 * other activity jostles them out, and they also prevented
2227 		 * LTP's KSM test from succeeding deterministically; so drain
2228 		 * them here (here rather than on entry to ksm_do_scan(),
2229 		 * so we don't IPI too often when pages_to_scan is set low).
2230 		 */
2231 		lru_add_drain_all();
2232 
2233 		/*
2234 		 * Whereas stale stable_nodes on the stable_tree itself
2235 		 * get pruned in the regular course of stable_tree_search(),
2236 		 * those moved out to the migrate_nodes list can accumulate:
2237 		 * so prune them once before each full scan.
2238 		 */
2239 		if (!ksm_merge_across_nodes) {
2240 			struct stable_node *stable_node, *next;
2241 			struct page *page;
2242 
2243 			list_for_each_entry_safe(stable_node, next,
2244 						 &migrate_nodes, list) {
2245 				page = get_ksm_page(stable_node, false);
2246 				if (page)
2247 					put_page(page);
2248 				cond_resched();
2249 			}
2250 		}
2251 
2252 		for (nid = 0; nid < ksm_nr_node_ids; nid++)
2253 			root_unstable_tree[nid] = RB_ROOT;
2254 
2255 		spin_lock(&ksm_mmlist_lock);
2256 		slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list);
2257 		ksm_scan.mm_slot = slot;
2258 		spin_unlock(&ksm_mmlist_lock);
2259 		/*
2260 		 * Although we tested list_empty() above, a racing __ksm_exit
2261 		 * of the last mm on the list may have removed it since then.
2262 		 */
2263 		if (slot == &ksm_mm_head)
2264 			return NULL;
2265 next_mm:
2266 		ksm_scan.address = 0;
2267 		ksm_scan.rmap_list = &slot->rmap_list;
2268 	}
2269 
2270 	mm = slot->mm;
2271 	down_read(&mm->mmap_sem);
2272 	if (ksm_test_exit(mm))
2273 		vma = NULL;
2274 	else
2275 		vma = find_vma(mm, ksm_scan.address);
2276 
2277 	for (; vma; vma = vma->vm_next) {
2278 		if (!(vma->vm_flags & VM_MERGEABLE))
2279 			continue;
2280 		if (ksm_scan.address < vma->vm_start)
2281 			ksm_scan.address = vma->vm_start;
2282 		if (!vma->anon_vma)
2283 			ksm_scan.address = vma->vm_end;
2284 
2285 		while (ksm_scan.address < vma->vm_end) {
2286 			if (ksm_test_exit(mm))
2287 				break;
2288 			*page = follow_page(vma, ksm_scan.address, FOLL_GET);
2289 			if (IS_ERR_OR_NULL(*page)) {
2290 				ksm_scan.address += PAGE_SIZE;
2291 				cond_resched();
2292 				continue;
2293 			}
2294 			if (PageAnon(*page)) {
2295 				flush_anon_page(vma, *page, ksm_scan.address);
2296 				flush_dcache_page(*page);
2297 				rmap_item = get_next_rmap_item(slot,
2298 					ksm_scan.rmap_list, ksm_scan.address);
2299 				if (rmap_item) {
2300 					ksm_scan.rmap_list =
2301 							&rmap_item->rmap_list;
2302 					ksm_scan.address += PAGE_SIZE;
2303 				} else
2304 					put_page(*page);
2305 				up_read(&mm->mmap_sem);
2306 				return rmap_item;
2307 			}
2308 			put_page(*page);
2309 			ksm_scan.address += PAGE_SIZE;
2310 			cond_resched();
2311 		}
2312 	}
2313 
2314 	if (ksm_test_exit(mm)) {
2315 		ksm_scan.address = 0;
2316 		ksm_scan.rmap_list = &slot->rmap_list;
2317 	}
2318 	/*
2319 	 * Nuke all the rmap_items that are above this current rmap:
2320 	 * because there were no VM_MERGEABLE vmas with such addresses.
2321 	 */
2322 	remove_trailing_rmap_items(slot, ksm_scan.rmap_list);
2323 
2324 	spin_lock(&ksm_mmlist_lock);
2325 	ksm_scan.mm_slot = list_entry(slot->mm_list.next,
2326 						struct mm_slot, mm_list);
2327 	if (ksm_scan.address == 0) {
2328 		/*
2329 		 * We've completed a full scan of all vmas, holding mmap_sem
2330 		 * throughout, and found no VM_MERGEABLE: so do the same as
2331 		 * __ksm_exit does to remove this mm from all our lists now.
2332 		 * This applies either when cleaning up after __ksm_exit
2333 		 * (but beware: we can reach here even before __ksm_exit),
2334 		 * or when all VM_MERGEABLE areas have been unmapped (and
2335 		 * mmap_sem then protects against race with MADV_MERGEABLE).
2336 		 */
2337 		hash_del(&slot->link);
2338 		list_del(&slot->mm_list);
2339 		spin_unlock(&ksm_mmlist_lock);
2340 
2341 		free_mm_slot(slot);
2342 		clear_bit(MMF_VM_MERGEABLE, &mm->flags);
2343 		up_read(&mm->mmap_sem);
2344 		mmdrop(mm);
2345 	} else {
2346 		up_read(&mm->mmap_sem);
2347 		/*
2348 		 * up_read(&mm->mmap_sem) first because after
2349 		 * spin_unlock(&ksm_mmlist_lock) run, the "mm" may
2350 		 * already have been freed under us by __ksm_exit()
2351 		 * because the "mm_slot" is still hashed and
2352 		 * ksm_scan.mm_slot doesn't point to it anymore.
2353 		 */
2354 		spin_unlock(&ksm_mmlist_lock);
2355 	}
2356 
2357 	/* Repeat until we've completed scanning the whole list */
2358 	slot = ksm_scan.mm_slot;
2359 	if (slot != &ksm_mm_head)
2360 		goto next_mm;
2361 
2362 	ksm_scan.seqnr++;
2363 	return NULL;
2364 }
2365 
2366 /**
2367  * ksm_do_scan  - the ksm scanner main worker function.
2368  * @scan_npages:  number of pages we want to scan before we return.
2369  */
2370 static void ksm_do_scan(unsigned int scan_npages)
2371 {
2372 	struct rmap_item *rmap_item;
2373 	struct page *uninitialized_var(page);
2374 
2375 	while (scan_npages-- && likely(!freezing(current))) {
2376 		cond_resched();
2377 		rmap_item = scan_get_next_rmap_item(&page);
2378 		if (!rmap_item)
2379 			return;
2380 		cmp_and_merge_page(page, rmap_item);
2381 		put_page(page);
2382 	}
2383 }
2384 
2385 static int ksmd_should_run(void)
2386 {
2387 	return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list);
2388 }
2389 
2390 static int ksm_scan_thread(void *nothing)
2391 {
2392 	set_freezable();
2393 	set_user_nice(current, 5);
2394 
2395 	while (!kthread_should_stop()) {
2396 		mutex_lock(&ksm_thread_mutex);
2397 		wait_while_offlining();
2398 		if (ksmd_should_run())
2399 			ksm_do_scan(ksm_thread_pages_to_scan);
2400 		mutex_unlock(&ksm_thread_mutex);
2401 
2402 		try_to_freeze();
2403 
2404 		if (ksmd_should_run()) {
2405 			schedule_timeout_interruptible(
2406 				msecs_to_jiffies(ksm_thread_sleep_millisecs));
2407 		} else {
2408 			wait_event_freezable(ksm_thread_wait,
2409 				ksmd_should_run() || kthread_should_stop());
2410 		}
2411 	}
2412 	return 0;
2413 }
2414 
2415 int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
2416 		unsigned long end, int advice, unsigned long *vm_flags)
2417 {
2418 	struct mm_struct *mm = vma->vm_mm;
2419 	int err;
2420 
2421 	switch (advice) {
2422 	case MADV_MERGEABLE:
2423 		/*
2424 		 * Be somewhat over-protective for now!
2425 		 */
2426 		if (*vm_flags & (VM_MERGEABLE | VM_SHARED  | VM_MAYSHARE   |
2427 				 VM_PFNMAP    | VM_IO      | VM_DONTEXPAND |
2428 				 VM_HUGETLB | VM_MIXEDMAP))
2429 			return 0;		/* just ignore the advice */
2430 
2431 #ifdef VM_SAO
2432 		if (*vm_flags & VM_SAO)
2433 			return 0;
2434 #endif
2435 #ifdef VM_SPARC_ADI
2436 		if (*vm_flags & VM_SPARC_ADI)
2437 			return 0;
2438 #endif
2439 
2440 		if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
2441 			err = __ksm_enter(mm);
2442 			if (err)
2443 				return err;
2444 		}
2445 
2446 		*vm_flags |= VM_MERGEABLE;
2447 		break;
2448 
2449 	case MADV_UNMERGEABLE:
2450 		if (!(*vm_flags & VM_MERGEABLE))
2451 			return 0;		/* just ignore the advice */
2452 
2453 		if (vma->anon_vma) {
2454 			err = unmerge_ksm_pages(vma, start, end);
2455 			if (err)
2456 				return err;
2457 		}
2458 
2459 		*vm_flags &= ~VM_MERGEABLE;
2460 		break;
2461 	}
2462 
2463 	return 0;
2464 }
2465 
2466 int __ksm_enter(struct mm_struct *mm)
2467 {
2468 	struct mm_slot *mm_slot;
2469 	int needs_wakeup;
2470 
2471 	mm_slot = alloc_mm_slot();
2472 	if (!mm_slot)
2473 		return -ENOMEM;
2474 
2475 	/* Check ksm_run too?  Would need tighter locking */
2476 	needs_wakeup = list_empty(&ksm_mm_head.mm_list);
2477 
2478 	spin_lock(&ksm_mmlist_lock);
2479 	insert_to_mm_slots_hash(mm, mm_slot);
2480 	/*
2481 	 * When KSM_RUN_MERGE (or KSM_RUN_STOP),
2482 	 * insert just behind the scanning cursor, to let the area settle
2483 	 * down a little; when fork is followed by immediate exec, we don't
2484 	 * want ksmd to waste time setting up and tearing down an rmap_list.
2485 	 *
2486 	 * But when KSM_RUN_UNMERGE, it's important to insert ahead of its
2487 	 * scanning cursor, otherwise KSM pages in newly forked mms will be
2488 	 * missed: then we might as well insert at the end of the list.
2489 	 */
2490 	if (ksm_run & KSM_RUN_UNMERGE)
2491 		list_add_tail(&mm_slot->mm_list, &ksm_mm_head.mm_list);
2492 	else
2493 		list_add_tail(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list);
2494 	spin_unlock(&ksm_mmlist_lock);
2495 
2496 	set_bit(MMF_VM_MERGEABLE, &mm->flags);
2497 	mmgrab(mm);
2498 
2499 	if (needs_wakeup)
2500 		wake_up_interruptible(&ksm_thread_wait);
2501 
2502 	return 0;
2503 }
2504 
2505 void __ksm_exit(struct mm_struct *mm)
2506 {
2507 	struct mm_slot *mm_slot;
2508 	int easy_to_free = 0;
2509 
2510 	/*
2511 	 * This process is exiting: if it's straightforward (as is the
2512 	 * case when ksmd was never running), free mm_slot immediately.
2513 	 * But if it's at the cursor or has rmap_items linked to it, use
2514 	 * mmap_sem to synchronize with any break_cows before pagetables
2515 	 * are freed, and leave the mm_slot on the list for ksmd to free.
2516 	 * Beware: ksm may already have noticed it exiting and freed the slot.
2517 	 */
2518 
2519 	spin_lock(&ksm_mmlist_lock);
2520 	mm_slot = get_mm_slot(mm);
2521 	if (mm_slot && ksm_scan.mm_slot != mm_slot) {
2522 		if (!mm_slot->rmap_list) {
2523 			hash_del(&mm_slot->link);
2524 			list_del(&mm_slot->mm_list);
2525 			easy_to_free = 1;
2526 		} else {
2527 			list_move(&mm_slot->mm_list,
2528 				  &ksm_scan.mm_slot->mm_list);
2529 		}
2530 	}
2531 	spin_unlock(&ksm_mmlist_lock);
2532 
2533 	if (easy_to_free) {
2534 		free_mm_slot(mm_slot);
2535 		clear_bit(MMF_VM_MERGEABLE, &mm->flags);
2536 		mmdrop(mm);
2537 	} else if (mm_slot) {
2538 		down_write(&mm->mmap_sem);
2539 		up_write(&mm->mmap_sem);
2540 	}
2541 }
2542 
2543 struct page *ksm_might_need_to_copy(struct page *page,
2544 			struct vm_area_struct *vma, unsigned long address)
2545 {
2546 	struct anon_vma *anon_vma = page_anon_vma(page);
2547 	struct page *new_page;
2548 
2549 	if (PageKsm(page)) {
2550 		if (page_stable_node(page) &&
2551 		    !(ksm_run & KSM_RUN_UNMERGE))
2552 			return page;	/* no need to copy it */
2553 	} else if (!anon_vma) {
2554 		return page;		/* no need to copy it */
2555 	} else if (anon_vma->root == vma->anon_vma->root &&
2556 		 page->index == linear_page_index(vma, address)) {
2557 		return page;		/* still no need to copy it */
2558 	}
2559 	if (!PageUptodate(page))
2560 		return page;		/* let do_swap_page report the error */
2561 
2562 	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
2563 	if (new_page) {
2564 		copy_user_highpage(new_page, page, address, vma);
2565 
2566 		SetPageDirty(new_page);
2567 		__SetPageUptodate(new_page);
2568 		__SetPageLocked(new_page);
2569 	}
2570 
2571 	return new_page;
2572 }
2573 
2574 void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
2575 {
2576 	struct stable_node *stable_node;
2577 	struct rmap_item *rmap_item;
2578 	int search_new_forks = 0;
2579 
2580 	VM_BUG_ON_PAGE(!PageKsm(page), page);
2581 
2582 	/*
2583 	 * Rely on the page lock to protect against concurrent modifications
2584 	 * to that page's node of the stable tree.
2585 	 */
2586 	VM_BUG_ON_PAGE(!PageLocked(page), page);
2587 
2588 	stable_node = page_stable_node(page);
2589 	if (!stable_node)
2590 		return;
2591 again:
2592 	hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
2593 		struct anon_vma *anon_vma = rmap_item->anon_vma;
2594 		struct anon_vma_chain *vmac;
2595 		struct vm_area_struct *vma;
2596 
2597 		cond_resched();
2598 		anon_vma_lock_read(anon_vma);
2599 		anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
2600 					       0, ULONG_MAX) {
2601 			cond_resched();
2602 			vma = vmac->vma;
2603 			if (rmap_item->address < vma->vm_start ||
2604 			    rmap_item->address >= vma->vm_end)
2605 				continue;
2606 			/*
2607 			 * Initially we examine only the vma which covers this
2608 			 * rmap_item; but later, if there is still work to do,
2609 			 * we examine covering vmas in other mms: in case they
2610 			 * were forked from the original since ksmd passed.
2611 			 */
2612 			if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
2613 				continue;
2614 
2615 			if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
2616 				continue;
2617 
2618 			if (!rwc->rmap_one(page, vma,
2619 					rmap_item->address, rwc->arg)) {
2620 				anon_vma_unlock_read(anon_vma);
2621 				return;
2622 			}
2623 			if (rwc->done && rwc->done(page)) {
2624 				anon_vma_unlock_read(anon_vma);
2625 				return;
2626 			}
2627 		}
2628 		anon_vma_unlock_read(anon_vma);
2629 	}
2630 	if (!search_new_forks++)
2631 		goto again;
2632 }
2633 
2634 #ifdef CONFIG_MIGRATION
2635 void ksm_migrate_page(struct page *newpage, struct page *oldpage)
2636 {
2637 	struct stable_node *stable_node;
2638 
2639 	VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
2640 	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
2641 	VM_BUG_ON_PAGE(newpage->mapping != oldpage->mapping, newpage);
2642 
2643 	stable_node = page_stable_node(newpage);
2644 	if (stable_node) {
2645 		VM_BUG_ON_PAGE(stable_node->kpfn != page_to_pfn(oldpage), oldpage);
2646 		stable_node->kpfn = page_to_pfn(newpage);
2647 		/*
2648 		 * newpage->mapping was set in advance; now we need smp_wmb()
2649 		 * to make sure that the new stable_node->kpfn is visible
2650 		 * to get_ksm_page() before it can see that oldpage->mapping
2651 		 * has gone stale (or that PageSwapCache has been cleared).
2652 		 */
2653 		smp_wmb();
2654 		set_page_stable_node(oldpage, NULL);
2655 	}
2656 }
2657 #endif /* CONFIG_MIGRATION */
2658 
2659 #ifdef CONFIG_MEMORY_HOTREMOVE
2660 static void wait_while_offlining(void)
2661 {
2662 	while (ksm_run & KSM_RUN_OFFLINE) {
2663 		mutex_unlock(&ksm_thread_mutex);
2664 		wait_on_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE),
2665 			    TASK_UNINTERRUPTIBLE);
2666 		mutex_lock(&ksm_thread_mutex);
2667 	}
2668 }
2669 
2670 static bool stable_node_dup_remove_range(struct stable_node *stable_node,
2671 					 unsigned long start_pfn,
2672 					 unsigned long end_pfn)
2673 {
2674 	if (stable_node->kpfn >= start_pfn &&
2675 	    stable_node->kpfn < end_pfn) {
2676 		/*
2677 		 * Don't get_ksm_page, page has already gone:
2678 		 * which is why we keep kpfn instead of page*
2679 		 */
2680 		remove_node_from_stable_tree(stable_node);
2681 		return true;
2682 	}
2683 	return false;
2684 }
2685 
2686 static bool stable_node_chain_remove_range(struct stable_node *stable_node,
2687 					   unsigned long start_pfn,
2688 					   unsigned long end_pfn,
2689 					   struct rb_root *root)
2690 {
2691 	struct stable_node *dup;
2692 	struct hlist_node *hlist_safe;
2693 
2694 	if (!is_stable_node_chain(stable_node)) {
2695 		VM_BUG_ON(is_stable_node_dup(stable_node));
2696 		return stable_node_dup_remove_range(stable_node, start_pfn,
2697 						    end_pfn);
2698 	}
2699 
2700 	hlist_for_each_entry_safe(dup, hlist_safe,
2701 				  &stable_node->hlist, hlist_dup) {
2702 		VM_BUG_ON(!is_stable_node_dup(dup));
2703 		stable_node_dup_remove_range(dup, start_pfn, end_pfn);
2704 	}
2705 	if (hlist_empty(&stable_node->hlist)) {
2706 		free_stable_node_chain(stable_node, root);
2707 		return true; /* notify caller that tree was rebalanced */
2708 	} else
2709 		return false;
2710 }
2711 
2712 static void ksm_check_stable_tree(unsigned long start_pfn,
2713 				  unsigned long end_pfn)
2714 {
2715 	struct stable_node *stable_node, *next;
2716 	struct rb_node *node;
2717 	int nid;
2718 
2719 	for (nid = 0; nid < ksm_nr_node_ids; nid++) {
2720 		node = rb_first(root_stable_tree + nid);
2721 		while (node) {
2722 			stable_node = rb_entry(node, struct stable_node, node);
2723 			if (stable_node_chain_remove_range(stable_node,
2724 							   start_pfn, end_pfn,
2725 							   root_stable_tree +
2726 							   nid))
2727 				node = rb_first(root_stable_tree + nid);
2728 			else
2729 				node = rb_next(node);
2730 			cond_resched();
2731 		}
2732 	}
2733 	list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) {
2734 		if (stable_node->kpfn >= start_pfn &&
2735 		    stable_node->kpfn < end_pfn)
2736 			remove_node_from_stable_tree(stable_node);
2737 		cond_resched();
2738 	}
2739 }
2740 
2741 static int ksm_memory_callback(struct notifier_block *self,
2742 			       unsigned long action, void *arg)
2743 {
2744 	struct memory_notify *mn = arg;
2745 
2746 	switch (action) {
2747 	case MEM_GOING_OFFLINE:
2748 		/*
2749 		 * Prevent ksm_do_scan(), unmerge_and_remove_all_rmap_items()
2750 		 * and remove_all_stable_nodes() while memory is going offline:
2751 		 * it is unsafe for them to touch the stable tree at this time.
2752 		 * But unmerge_ksm_pages(), rmap lookups and other entry points
2753 		 * which do not need the ksm_thread_mutex are all safe.
2754 		 */
2755 		mutex_lock(&ksm_thread_mutex);
2756 		ksm_run |= KSM_RUN_OFFLINE;
2757 		mutex_unlock(&ksm_thread_mutex);
2758 		break;
2759 
2760 	case MEM_OFFLINE:
2761 		/*
2762 		 * Most of the work is done by page migration; but there might
2763 		 * be a few stable_nodes left over, still pointing to struct
2764 		 * pages which have been offlined: prune those from the tree,
2765 		 * otherwise get_ksm_page() might later try to access a
2766 		 * non-existent struct page.
2767 		 */
2768 		ksm_check_stable_tree(mn->start_pfn,
2769 				      mn->start_pfn + mn->nr_pages);
2770 		/* fallthrough */
2771 
2772 	case MEM_CANCEL_OFFLINE:
2773 		mutex_lock(&ksm_thread_mutex);
2774 		ksm_run &= ~KSM_RUN_OFFLINE;
2775 		mutex_unlock(&ksm_thread_mutex);
2776 
2777 		smp_mb();	/* wake_up_bit advises this */
2778 		wake_up_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE));
2779 		break;
2780 	}
2781 	return NOTIFY_OK;
2782 }
2783 #else
2784 static void wait_while_offlining(void)
2785 {
2786 }
2787 #endif /* CONFIG_MEMORY_HOTREMOVE */
2788 
2789 #ifdef CONFIG_SYSFS
2790 /*
2791  * This all compiles without CONFIG_SYSFS, but is a waste of space.
2792  */
2793 
2794 #define KSM_ATTR_RO(_name) \
2795 	static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2796 #define KSM_ATTR(_name) \
2797 	static struct kobj_attribute _name##_attr = \
2798 		__ATTR(_name, 0644, _name##_show, _name##_store)
2799 
2800 static ssize_t sleep_millisecs_show(struct kobject *kobj,
2801 				    struct kobj_attribute *attr, char *buf)
2802 {
2803 	return sprintf(buf, "%u\n", ksm_thread_sleep_millisecs);
2804 }
2805 
2806 static ssize_t sleep_millisecs_store(struct kobject *kobj,
2807 				     struct kobj_attribute *attr,
2808 				     const char *buf, size_t count)
2809 {
2810 	unsigned long msecs;
2811 	int err;
2812 
2813 	err = kstrtoul(buf, 10, &msecs);
2814 	if (err || msecs > UINT_MAX)
2815 		return -EINVAL;
2816 
2817 	ksm_thread_sleep_millisecs = msecs;
2818 
2819 	return count;
2820 }
2821 KSM_ATTR(sleep_millisecs);
2822 
2823 static ssize_t pages_to_scan_show(struct kobject *kobj,
2824 				  struct kobj_attribute *attr, char *buf)
2825 {
2826 	return sprintf(buf, "%u\n", ksm_thread_pages_to_scan);
2827 }
2828 
2829 static ssize_t pages_to_scan_store(struct kobject *kobj,
2830 				   struct kobj_attribute *attr,
2831 				   const char *buf, size_t count)
2832 {
2833 	int err;
2834 	unsigned long nr_pages;
2835 
2836 	err = kstrtoul(buf, 10, &nr_pages);
2837 	if (err || nr_pages > UINT_MAX)
2838 		return -EINVAL;
2839 
2840 	ksm_thread_pages_to_scan = nr_pages;
2841 
2842 	return count;
2843 }
2844 KSM_ATTR(pages_to_scan);
2845 
2846 static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr,
2847 			char *buf)
2848 {
2849 	return sprintf(buf, "%lu\n", ksm_run);
2850 }
2851 
2852 static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
2853 			 const char *buf, size_t count)
2854 {
2855 	int err;
2856 	unsigned long flags;
2857 
2858 	err = kstrtoul(buf, 10, &flags);
2859 	if (err || flags > UINT_MAX)
2860 		return -EINVAL;
2861 	if (flags > KSM_RUN_UNMERGE)
2862 		return -EINVAL;
2863 
2864 	/*
2865 	 * KSM_RUN_MERGE sets ksmd running, and 0 stops it running.
2866 	 * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items,
2867 	 * breaking COW to free the pages_shared (but leaves mm_slots
2868 	 * on the list for when ksmd may be set running again).
2869 	 */
2870 
2871 	mutex_lock(&ksm_thread_mutex);
2872 	wait_while_offlining();
2873 	if (ksm_run != flags) {
2874 		ksm_run = flags;
2875 		if (flags & KSM_RUN_UNMERGE) {
2876 			set_current_oom_origin();
2877 			err = unmerge_and_remove_all_rmap_items();
2878 			clear_current_oom_origin();
2879 			if (err) {
2880 				ksm_run = KSM_RUN_STOP;
2881 				count = err;
2882 			}
2883 		}
2884 	}
2885 	mutex_unlock(&ksm_thread_mutex);
2886 
2887 	if (flags & KSM_RUN_MERGE)
2888 		wake_up_interruptible(&ksm_thread_wait);
2889 
2890 	return count;
2891 }
2892 KSM_ATTR(run);
2893 
2894 #ifdef CONFIG_NUMA
2895 static ssize_t merge_across_nodes_show(struct kobject *kobj,
2896 				struct kobj_attribute *attr, char *buf)
2897 {
2898 	return sprintf(buf, "%u\n", ksm_merge_across_nodes);
2899 }
2900 
2901 static ssize_t merge_across_nodes_store(struct kobject *kobj,
2902 				   struct kobj_attribute *attr,
2903 				   const char *buf, size_t count)
2904 {
2905 	int err;
2906 	unsigned long knob;
2907 
2908 	err = kstrtoul(buf, 10, &knob);
2909 	if (err)
2910 		return err;
2911 	if (knob > 1)
2912 		return -EINVAL;
2913 
2914 	mutex_lock(&ksm_thread_mutex);
2915 	wait_while_offlining();
2916 	if (ksm_merge_across_nodes != knob) {
2917 		if (ksm_pages_shared || remove_all_stable_nodes())
2918 			err = -EBUSY;
2919 		else if (root_stable_tree == one_stable_tree) {
2920 			struct rb_root *buf;
2921 			/*
2922 			 * This is the first time that we switch away from the
2923 			 * default of merging across nodes: must now allocate
2924 			 * a buffer to hold as many roots as may be needed.
2925 			 * Allocate stable and unstable together:
2926 			 * MAXSMP NODES_SHIFT 10 will use 16kB.
2927 			 */
2928 			buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf),
2929 				      GFP_KERNEL);
2930 			/* Let us assume that RB_ROOT is NULL is zero */
2931 			if (!buf)
2932 				err = -ENOMEM;
2933 			else {
2934 				root_stable_tree = buf;
2935 				root_unstable_tree = buf + nr_node_ids;
2936 				/* Stable tree is empty but not the unstable */
2937 				root_unstable_tree[0] = one_unstable_tree[0];
2938 			}
2939 		}
2940 		if (!err) {
2941 			ksm_merge_across_nodes = knob;
2942 			ksm_nr_node_ids = knob ? 1 : nr_node_ids;
2943 		}
2944 	}
2945 	mutex_unlock(&ksm_thread_mutex);
2946 
2947 	return err ? err : count;
2948 }
2949 KSM_ATTR(merge_across_nodes);
2950 #endif
2951 
2952 static ssize_t use_zero_pages_show(struct kobject *kobj,
2953 				struct kobj_attribute *attr, char *buf)
2954 {
2955 	return sprintf(buf, "%u\n", ksm_use_zero_pages);
2956 }
2957 static ssize_t use_zero_pages_store(struct kobject *kobj,
2958 				   struct kobj_attribute *attr,
2959 				   const char *buf, size_t count)
2960 {
2961 	int err;
2962 	bool value;
2963 
2964 	err = kstrtobool(buf, &value);
2965 	if (err)
2966 		return -EINVAL;
2967 
2968 	ksm_use_zero_pages = value;
2969 
2970 	return count;
2971 }
2972 KSM_ATTR(use_zero_pages);
2973 
2974 static ssize_t max_page_sharing_show(struct kobject *kobj,
2975 				     struct kobj_attribute *attr, char *buf)
2976 {
2977 	return sprintf(buf, "%u\n", ksm_max_page_sharing);
2978 }
2979 
2980 static ssize_t max_page_sharing_store(struct kobject *kobj,
2981 				      struct kobj_attribute *attr,
2982 				      const char *buf, size_t count)
2983 {
2984 	int err;
2985 	int knob;
2986 
2987 	err = kstrtoint(buf, 10, &knob);
2988 	if (err)
2989 		return err;
2990 	/*
2991 	 * When a KSM page is created it is shared by 2 mappings. This
2992 	 * being a signed comparison, it implicitly verifies it's not
2993 	 * negative.
2994 	 */
2995 	if (knob < 2)
2996 		return -EINVAL;
2997 
2998 	if (READ_ONCE(ksm_max_page_sharing) == knob)
2999 		return count;
3000 
3001 	mutex_lock(&ksm_thread_mutex);
3002 	wait_while_offlining();
3003 	if (ksm_max_page_sharing != knob) {
3004 		if (ksm_pages_shared || remove_all_stable_nodes())
3005 			err = -EBUSY;
3006 		else
3007 			ksm_max_page_sharing = knob;
3008 	}
3009 	mutex_unlock(&ksm_thread_mutex);
3010 
3011 	return err ? err : count;
3012 }
3013 KSM_ATTR(max_page_sharing);
3014 
3015 static ssize_t pages_shared_show(struct kobject *kobj,
3016 				 struct kobj_attribute *attr, char *buf)
3017 {
3018 	return sprintf(buf, "%lu\n", ksm_pages_shared);
3019 }
3020 KSM_ATTR_RO(pages_shared);
3021 
3022 static ssize_t pages_sharing_show(struct kobject *kobj,
3023 				  struct kobj_attribute *attr, char *buf)
3024 {
3025 	return sprintf(buf, "%lu\n", ksm_pages_sharing);
3026 }
3027 KSM_ATTR_RO(pages_sharing);
3028 
3029 static ssize_t pages_unshared_show(struct kobject *kobj,
3030 				   struct kobj_attribute *attr, char *buf)
3031 {
3032 	return sprintf(buf, "%lu\n", ksm_pages_unshared);
3033 }
3034 KSM_ATTR_RO(pages_unshared);
3035 
3036 static ssize_t pages_volatile_show(struct kobject *kobj,
3037 				   struct kobj_attribute *attr, char *buf)
3038 {
3039 	long ksm_pages_volatile;
3040 
3041 	ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared
3042 				- ksm_pages_sharing - ksm_pages_unshared;
3043 	/*
3044 	 * It was not worth any locking to calculate that statistic,
3045 	 * but it might therefore sometimes be negative: conceal that.
3046 	 */
3047 	if (ksm_pages_volatile < 0)
3048 		ksm_pages_volatile = 0;
3049 	return sprintf(buf, "%ld\n", ksm_pages_volatile);
3050 }
3051 KSM_ATTR_RO(pages_volatile);
3052 
3053 static ssize_t stable_node_dups_show(struct kobject *kobj,
3054 				     struct kobj_attribute *attr, char *buf)
3055 {
3056 	return sprintf(buf, "%lu\n", ksm_stable_node_dups);
3057 }
3058 KSM_ATTR_RO(stable_node_dups);
3059 
3060 static ssize_t stable_node_chains_show(struct kobject *kobj,
3061 				       struct kobj_attribute *attr, char *buf)
3062 {
3063 	return sprintf(buf, "%lu\n", ksm_stable_node_chains);
3064 }
3065 KSM_ATTR_RO(stable_node_chains);
3066 
3067 static ssize_t
3068 stable_node_chains_prune_millisecs_show(struct kobject *kobj,
3069 					struct kobj_attribute *attr,
3070 					char *buf)
3071 {
3072 	return sprintf(buf, "%u\n", ksm_stable_node_chains_prune_millisecs);
3073 }
3074 
3075 static ssize_t
3076 stable_node_chains_prune_millisecs_store(struct kobject *kobj,
3077 					 struct kobj_attribute *attr,
3078 					 const char *buf, size_t count)
3079 {
3080 	unsigned long msecs;
3081 	int err;
3082 
3083 	err = kstrtoul(buf, 10, &msecs);
3084 	if (err || msecs > UINT_MAX)
3085 		return -EINVAL;
3086 
3087 	ksm_stable_node_chains_prune_millisecs = msecs;
3088 
3089 	return count;
3090 }
3091 KSM_ATTR(stable_node_chains_prune_millisecs);
3092 
3093 static ssize_t full_scans_show(struct kobject *kobj,
3094 			       struct kobj_attribute *attr, char *buf)
3095 {
3096 	return sprintf(buf, "%lu\n", ksm_scan.seqnr);
3097 }
3098 KSM_ATTR_RO(full_scans);
3099 
3100 static struct attribute *ksm_attrs[] = {
3101 	&sleep_millisecs_attr.attr,
3102 	&pages_to_scan_attr.attr,
3103 	&run_attr.attr,
3104 	&pages_shared_attr.attr,
3105 	&pages_sharing_attr.attr,
3106 	&pages_unshared_attr.attr,
3107 	&pages_volatile_attr.attr,
3108 	&full_scans_attr.attr,
3109 #ifdef CONFIG_NUMA
3110 	&merge_across_nodes_attr.attr,
3111 #endif
3112 	&max_page_sharing_attr.attr,
3113 	&stable_node_chains_attr.attr,
3114 	&stable_node_dups_attr.attr,
3115 	&stable_node_chains_prune_millisecs_attr.attr,
3116 	&use_zero_pages_attr.attr,
3117 	NULL,
3118 };
3119 
3120 static const struct attribute_group ksm_attr_group = {
3121 	.attrs = ksm_attrs,
3122 	.name = "ksm",
3123 };
3124 #endif /* CONFIG_SYSFS */
3125 
3126 static int __init ksm_init(void)
3127 {
3128 	struct task_struct *ksm_thread;
3129 	int err;
3130 
3131 	/* The correct value depends on page size and endianness */
3132 	zero_checksum = calc_checksum(ZERO_PAGE(0));
3133 	/* Default to false for backwards compatibility */
3134 	ksm_use_zero_pages = false;
3135 
3136 	err = ksm_slab_init();
3137 	if (err)
3138 		goto out;
3139 
3140 	ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd");
3141 	if (IS_ERR(ksm_thread)) {
3142 		pr_err("ksm: creating kthread failed\n");
3143 		err = PTR_ERR(ksm_thread);
3144 		goto out_free;
3145 	}
3146 
3147 #ifdef CONFIG_SYSFS
3148 	err = sysfs_create_group(mm_kobj, &ksm_attr_group);
3149 	if (err) {
3150 		pr_err("ksm: register sysfs failed\n");
3151 		kthread_stop(ksm_thread);
3152 		goto out_free;
3153 	}
3154 #else
3155 	ksm_run = KSM_RUN_MERGE;	/* no way for user to start it */
3156 
3157 #endif /* CONFIG_SYSFS */
3158 
3159 #ifdef CONFIG_MEMORY_HOTREMOVE
3160 	/* There is no significance to this priority 100 */
3161 	hotplug_memory_notifier(ksm_memory_callback, 100);
3162 #endif
3163 	return 0;
3164 
3165 out_free:
3166 	ksm_slab_free();
3167 out:
3168 	return err;
3169 }
3170 subsys_initcall(ksm_init);
3171