xref: /openbmc/linux/mm/ksm.c (revision 3c881e05c814c970e4f9577446a9d3461d134607)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Memory merging support.
4   *
5   * This code enables dynamic sharing of identical pages found in different
6   * memory areas, even if they are not shared by fork()
7   *
8   * Copyright (C) 2008-2009 Red Hat, Inc.
9   * Authors:
10   *	Izik Eidus
11   *	Andrea Arcangeli
12   *	Chris Wright
13   *	Hugh Dickins
14   */
15  
16  #include <linux/errno.h>
17  #include <linux/mm.h>
18  #include <linux/fs.h>
19  #include <linux/mman.h>
20  #include <linux/sched.h>
21  #include <linux/sched/mm.h>
22  #include <linux/sched/coredump.h>
23  #include <linux/rwsem.h>
24  #include <linux/pagemap.h>
25  #include <linux/rmap.h>
26  #include <linux/spinlock.h>
27  #include <linux/xxhash.h>
28  #include <linux/delay.h>
29  #include <linux/kthread.h>
30  #include <linux/wait.h>
31  #include <linux/slab.h>
32  #include <linux/rbtree.h>
33  #include <linux/memory.h>
34  #include <linux/mmu_notifier.h>
35  #include <linux/swap.h>
36  #include <linux/ksm.h>
37  #include <linux/hashtable.h>
38  #include <linux/freezer.h>
39  #include <linux/oom.h>
40  #include <linux/numa.h>
41  
42  #include <asm/tlbflush.h>
43  #include "internal.h"
44  
45  #ifdef CONFIG_NUMA
46  #define NUMA(x)		(x)
47  #define DO_NUMA(x)	do { (x); } while (0)
48  #else
49  #define NUMA(x)		(0)
50  #define DO_NUMA(x)	do { } while (0)
51  #endif
52  
53  /**
54   * DOC: Overview
55   *
56   * A few notes about the KSM scanning process,
57   * to make it easier to understand the data structures below:
58   *
59   * In order to reduce excessive scanning, KSM sorts the memory pages by their
60   * contents into a data structure that holds pointers to the pages' locations.
61   *
62   * Since the contents of the pages may change at any moment, KSM cannot just
63   * insert the pages into a normal sorted tree and expect it to find anything.
64   * Therefore KSM uses two data structures - the stable and the unstable tree.
65   *
66   * The stable tree holds pointers to all the merged pages (ksm pages), sorted
67   * by their contents.  Because each such page is write-protected, searching on
68   * this tree is fully assured to be working (except when pages are unmapped),
69   * and therefore this tree is called the stable tree.
70   *
71   * The stable tree node includes information required for reverse
72   * mapping from a KSM page to virtual addresses that map this page.
73   *
74   * In order to avoid large latencies of the rmap walks on KSM pages,
75   * KSM maintains two types of nodes in the stable tree:
76   *
77   * * the regular nodes that keep the reverse mapping structures in a
78   *   linked list
79   * * the "chains" that link nodes ("dups") that represent the same
80   *   write protected memory content, but each "dup" corresponds to a
81   *   different KSM page copy of that content
82   *
83   * Internally, the regular nodes, "dups" and "chains" are represented
84   * using the same struct stable_node structure.
85   *
86   * In addition to the stable tree, KSM uses a second data structure called the
87   * unstable tree: this tree holds pointers to pages which have been found to
88   * be "unchanged for a period of time".  The unstable tree sorts these pages
89   * by their contents, but since they are not write-protected, KSM cannot rely
90   * upon the unstable tree to work correctly - the unstable tree is liable to
91   * be corrupted as its contents are modified, and so it is called unstable.
92   *
93   * KSM solves this problem by several techniques:
94   *
95   * 1) The unstable tree is flushed every time KSM completes scanning all
96   *    memory areas, and then the tree is rebuilt again from the beginning.
97   * 2) KSM will only insert into the unstable tree, pages whose hash value
98   *    has not changed since the previous scan of all memory areas.
99   * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the
100   *    colors of the nodes and not on their contents, assuring that even when
101   *    the tree gets "corrupted" it won't get out of balance, so scanning time
102   *    remains the same (also, searching and inserting nodes in an rbtree uses
103   *    the same algorithm, so we have no overhead when we flush and rebuild).
104   * 4) KSM never flushes the stable tree, which means that even if it were to
105   *    take 10 attempts to find a page in the unstable tree, once it is found,
106   *    it is secured in the stable tree.  (When we scan a new page, we first
107   *    compare it against the stable tree, and then against the unstable tree.)
108   *
109   * If the merge_across_nodes tunable is unset, then KSM maintains multiple
110   * stable trees and multiple unstable trees: one of each for each NUMA node.
111   */
112  
113  /**
114   * struct mm_slot - ksm information per mm that is being scanned
115   * @link: link to the mm_slots hash list
116   * @mm_list: link into the mm_slots list, rooted in ksm_mm_head
117   * @rmap_list: head for this mm_slot's singly-linked list of rmap_items
118   * @mm: the mm that this information is valid for
119   */
120  struct mm_slot {
121  	struct hlist_node link;
122  	struct list_head mm_list;
123  	struct rmap_item *rmap_list;
124  	struct mm_struct *mm;
125  };
126  
127  /**
128   * struct ksm_scan - cursor for scanning
129   * @mm_slot: the current mm_slot we are scanning
130   * @address: the next address inside that to be scanned
131   * @rmap_list: link to the next rmap to be scanned in the rmap_list
132   * @seqnr: count of completed full scans (needed when removing unstable node)
133   *
134   * There is only the one ksm_scan instance of this cursor structure.
135   */
136  struct ksm_scan {
137  	struct mm_slot *mm_slot;
138  	unsigned long address;
139  	struct rmap_item **rmap_list;
140  	unsigned long seqnr;
141  };
142  
143  /**
144   * struct stable_node - node of the stable rbtree
145   * @node: rb node of this ksm page in the stable tree
146   * @head: (overlaying parent) &migrate_nodes indicates temporarily on that list
147   * @hlist_dup: linked into the stable_node->hlist with a stable_node chain
148   * @list: linked into migrate_nodes, pending placement in the proper node tree
149   * @hlist: hlist head of rmap_items using this ksm page
150   * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid)
151   * @chain_prune_time: time of the last full garbage collection
152   * @rmap_hlist_len: number of rmap_item entries in hlist or STABLE_NODE_CHAIN
153   * @nid: NUMA node id of stable tree in which linked (may not match kpfn)
154   */
155  struct stable_node {
156  	union {
157  		struct rb_node node;	/* when node of stable tree */
158  		struct {		/* when listed for migration */
159  			struct list_head *head;
160  			struct {
161  				struct hlist_node hlist_dup;
162  				struct list_head list;
163  			};
164  		};
165  	};
166  	struct hlist_head hlist;
167  	union {
168  		unsigned long kpfn;
169  		unsigned long chain_prune_time;
170  	};
171  	/*
172  	 * STABLE_NODE_CHAIN can be any negative number in
173  	 * rmap_hlist_len negative range, but better not -1 to be able
174  	 * to reliably detect underflows.
175  	 */
176  #define STABLE_NODE_CHAIN -1024
177  	int rmap_hlist_len;
178  #ifdef CONFIG_NUMA
179  	int nid;
180  #endif
181  };
182  
183  /**
184   * struct rmap_item - reverse mapping item for virtual addresses
185   * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list
186   * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree
187   * @nid: NUMA node id of unstable tree in which linked (may not match page)
188   * @mm: the memory structure this rmap_item is pointing into
189   * @address: the virtual address this rmap_item tracks (+ flags in low bits)
190   * @oldchecksum: previous checksum of the page at that virtual address
191   * @node: rb node of this rmap_item in the unstable tree
192   * @head: pointer to stable_node heading this list in the stable tree
193   * @hlist: link into hlist of rmap_items hanging off that stable_node
194   */
195  struct rmap_item {
196  	struct rmap_item *rmap_list;
197  	union {
198  		struct anon_vma *anon_vma;	/* when stable */
199  #ifdef CONFIG_NUMA
200  		int nid;		/* when node of unstable tree */
201  #endif
202  	};
203  	struct mm_struct *mm;
204  	unsigned long address;		/* + low bits used for flags below */
205  	unsigned int oldchecksum;	/* when unstable */
206  	union {
207  		struct rb_node node;	/* when node of unstable tree */
208  		struct {		/* when listed from stable tree */
209  			struct stable_node *head;
210  			struct hlist_node hlist;
211  		};
212  	};
213  };
214  
215  #define SEQNR_MASK	0x0ff	/* low bits of unstable tree seqnr */
216  #define UNSTABLE_FLAG	0x100	/* is a node of the unstable tree */
217  #define STABLE_FLAG	0x200	/* is listed from the stable tree */
218  
219  /* The stable and unstable tree heads */
220  static struct rb_root one_stable_tree[1] = { RB_ROOT };
221  static struct rb_root one_unstable_tree[1] = { RB_ROOT };
222  static struct rb_root *root_stable_tree = one_stable_tree;
223  static struct rb_root *root_unstable_tree = one_unstable_tree;
224  
225  /* Recently migrated nodes of stable tree, pending proper placement */
226  static LIST_HEAD(migrate_nodes);
227  #define STABLE_NODE_DUP_HEAD ((struct list_head *)&migrate_nodes.prev)
228  
229  #define MM_SLOTS_HASH_BITS 10
230  static DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
231  
232  static struct mm_slot ksm_mm_head = {
233  	.mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list),
234  };
235  static struct ksm_scan ksm_scan = {
236  	.mm_slot = &ksm_mm_head,
237  };
238  
239  static struct kmem_cache *rmap_item_cache;
240  static struct kmem_cache *stable_node_cache;
241  static struct kmem_cache *mm_slot_cache;
242  
243  /* The number of nodes in the stable tree */
244  static unsigned long ksm_pages_shared;
245  
246  /* The number of page slots additionally sharing those nodes */
247  static unsigned long ksm_pages_sharing;
248  
249  /* The number of nodes in the unstable tree */
250  static unsigned long ksm_pages_unshared;
251  
252  /* The number of rmap_items in use: to calculate pages_volatile */
253  static unsigned long ksm_rmap_items;
254  
255  /* The number of stable_node chains */
256  static unsigned long ksm_stable_node_chains;
257  
258  /* The number of stable_node dups linked to the stable_node chains */
259  static unsigned long ksm_stable_node_dups;
260  
261  /* Delay in pruning stale stable_node_dups in the stable_node_chains */
262  static int ksm_stable_node_chains_prune_millisecs = 2000;
263  
264  /* Maximum number of page slots sharing a stable node */
265  static int ksm_max_page_sharing = 256;
266  
267  /* Number of pages ksmd should scan in one batch */
268  static unsigned int ksm_thread_pages_to_scan = 100;
269  
270  /* Milliseconds ksmd should sleep between batches */
271  static unsigned int ksm_thread_sleep_millisecs = 20;
272  
273  /* Checksum of an empty (zeroed) page */
274  static unsigned int zero_checksum __read_mostly;
275  
276  /* Whether to merge empty (zeroed) pages with actual zero pages */
277  static bool ksm_use_zero_pages __read_mostly;
278  
279  #ifdef CONFIG_NUMA
280  /* Zeroed when merging across nodes is not allowed */
281  static unsigned int ksm_merge_across_nodes = 1;
282  static int ksm_nr_node_ids = 1;
283  #else
284  #define ksm_merge_across_nodes	1U
285  #define ksm_nr_node_ids		1
286  #endif
287  
288  #define KSM_RUN_STOP	0
289  #define KSM_RUN_MERGE	1
290  #define KSM_RUN_UNMERGE	2
291  #define KSM_RUN_OFFLINE	4
292  static unsigned long ksm_run = KSM_RUN_STOP;
293  static void wait_while_offlining(void);
294  
295  static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait);
296  static DECLARE_WAIT_QUEUE_HEAD(ksm_iter_wait);
297  static DEFINE_MUTEX(ksm_thread_mutex);
298  static DEFINE_SPINLOCK(ksm_mmlist_lock);
299  
300  #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\
301  		sizeof(struct __struct), __alignof__(struct __struct),\
302  		(__flags), NULL)
303  
304  static int __init ksm_slab_init(void)
305  {
306  	rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0);
307  	if (!rmap_item_cache)
308  		goto out;
309  
310  	stable_node_cache = KSM_KMEM_CACHE(stable_node, 0);
311  	if (!stable_node_cache)
312  		goto out_free1;
313  
314  	mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0);
315  	if (!mm_slot_cache)
316  		goto out_free2;
317  
318  	return 0;
319  
320  out_free2:
321  	kmem_cache_destroy(stable_node_cache);
322  out_free1:
323  	kmem_cache_destroy(rmap_item_cache);
324  out:
325  	return -ENOMEM;
326  }
327  
328  static void __init ksm_slab_free(void)
329  {
330  	kmem_cache_destroy(mm_slot_cache);
331  	kmem_cache_destroy(stable_node_cache);
332  	kmem_cache_destroy(rmap_item_cache);
333  	mm_slot_cache = NULL;
334  }
335  
336  static __always_inline bool is_stable_node_chain(struct stable_node *chain)
337  {
338  	return chain->rmap_hlist_len == STABLE_NODE_CHAIN;
339  }
340  
341  static __always_inline bool is_stable_node_dup(struct stable_node *dup)
342  {
343  	return dup->head == STABLE_NODE_DUP_HEAD;
344  }
345  
346  static inline void stable_node_chain_add_dup(struct stable_node *dup,
347  					     struct stable_node *chain)
348  {
349  	VM_BUG_ON(is_stable_node_dup(dup));
350  	dup->head = STABLE_NODE_DUP_HEAD;
351  	VM_BUG_ON(!is_stable_node_chain(chain));
352  	hlist_add_head(&dup->hlist_dup, &chain->hlist);
353  	ksm_stable_node_dups++;
354  }
355  
356  static inline void __stable_node_dup_del(struct stable_node *dup)
357  {
358  	VM_BUG_ON(!is_stable_node_dup(dup));
359  	hlist_del(&dup->hlist_dup);
360  	ksm_stable_node_dups--;
361  }
362  
363  static inline void stable_node_dup_del(struct stable_node *dup)
364  {
365  	VM_BUG_ON(is_stable_node_chain(dup));
366  	if (is_stable_node_dup(dup))
367  		__stable_node_dup_del(dup);
368  	else
369  		rb_erase(&dup->node, root_stable_tree + NUMA(dup->nid));
370  #ifdef CONFIG_DEBUG_VM
371  	dup->head = NULL;
372  #endif
373  }
374  
375  static inline struct rmap_item *alloc_rmap_item(void)
376  {
377  	struct rmap_item *rmap_item;
378  
379  	rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL |
380  						__GFP_NORETRY | __GFP_NOWARN);
381  	if (rmap_item)
382  		ksm_rmap_items++;
383  	return rmap_item;
384  }
385  
386  static inline void free_rmap_item(struct rmap_item *rmap_item)
387  {
388  	ksm_rmap_items--;
389  	rmap_item->mm = NULL;	/* debug safety */
390  	kmem_cache_free(rmap_item_cache, rmap_item);
391  }
392  
393  static inline struct stable_node *alloc_stable_node(void)
394  {
395  	/*
396  	 * The allocation can take too long with GFP_KERNEL when memory is under
397  	 * pressure, which may lead to hung task warnings.  Adding __GFP_HIGH
398  	 * grants access to memory reserves, helping to avoid this problem.
399  	 */
400  	return kmem_cache_alloc(stable_node_cache, GFP_KERNEL | __GFP_HIGH);
401  }
402  
403  static inline void free_stable_node(struct stable_node *stable_node)
404  {
405  	VM_BUG_ON(stable_node->rmap_hlist_len &&
406  		  !is_stable_node_chain(stable_node));
407  	kmem_cache_free(stable_node_cache, stable_node);
408  }
409  
410  static inline struct mm_slot *alloc_mm_slot(void)
411  {
412  	if (!mm_slot_cache)	/* initialization failed */
413  		return NULL;
414  	return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
415  }
416  
417  static inline void free_mm_slot(struct mm_slot *mm_slot)
418  {
419  	kmem_cache_free(mm_slot_cache, mm_slot);
420  }
421  
422  static struct mm_slot *get_mm_slot(struct mm_struct *mm)
423  {
424  	struct mm_slot *slot;
425  
426  	hash_for_each_possible(mm_slots_hash, slot, link, (unsigned long)mm)
427  		if (slot->mm == mm)
428  			return slot;
429  
430  	return NULL;
431  }
432  
433  static void insert_to_mm_slots_hash(struct mm_struct *mm,
434  				    struct mm_slot *mm_slot)
435  {
436  	mm_slot->mm = mm;
437  	hash_add(mm_slots_hash, &mm_slot->link, (unsigned long)mm);
438  }
439  
440  /*
441   * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's
442   * page tables after it has passed through ksm_exit() - which, if necessary,
443   * takes mmap_lock briefly to serialize against them.  ksm_exit() does not set
444   * a special flag: they can just back out as soon as mm_users goes to zero.
445   * ksm_test_exit() is used throughout to make this test for exit: in some
446   * places for correctness, in some places just to avoid unnecessary work.
447   */
448  static inline bool ksm_test_exit(struct mm_struct *mm)
449  {
450  	return atomic_read(&mm->mm_users) == 0;
451  }
452  
453  /*
454   * We use break_ksm to break COW on a ksm page: it's a stripped down
455   *
456   *	if (get_user_pages(addr, 1, FOLL_WRITE, &page, NULL) == 1)
457   *		put_page(page);
458   *
459   * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma,
460   * in case the application has unmapped and remapped mm,addr meanwhile.
461   * Could a ksm page appear anywhere else?  Actually yes, in a VM_PFNMAP
462   * mmap of /dev/mem, where we would not want to touch it.
463   *
464   * FAULT_FLAG/FOLL_REMOTE are because we do this outside the context
465   * of the process that owns 'vma'.  We also do not want to enforce
466   * protection keys here anyway.
467   */
468  static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
469  {
470  	struct page *page;
471  	vm_fault_t ret = 0;
472  
473  	do {
474  		cond_resched();
475  		page = follow_page(vma, addr,
476  				FOLL_GET | FOLL_MIGRATION | FOLL_REMOTE);
477  		if (IS_ERR_OR_NULL(page))
478  			break;
479  		if (PageKsm(page))
480  			ret = handle_mm_fault(vma, addr,
481  					      FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE,
482  					      NULL);
483  		else
484  			ret = VM_FAULT_WRITE;
485  		put_page(page);
486  	} while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));
487  	/*
488  	 * We must loop because handle_mm_fault() may back out if there's
489  	 * any difficulty e.g. if pte accessed bit gets updated concurrently.
490  	 *
491  	 * VM_FAULT_WRITE is what we have been hoping for: it indicates that
492  	 * COW has been broken, even if the vma does not permit VM_WRITE;
493  	 * but note that a concurrent fault might break PageKsm for us.
494  	 *
495  	 * VM_FAULT_SIGBUS could occur if we race with truncation of the
496  	 * backing file, which also invalidates anonymous pages: that's
497  	 * okay, that truncation will have unmapped the PageKsm for us.
498  	 *
499  	 * VM_FAULT_OOM: at the time of writing (late July 2009), setting
500  	 * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the
501  	 * current task has TIF_MEMDIE set, and will be OOM killed on return
502  	 * to user; and ksmd, having no mm, would never be chosen for that.
503  	 *
504  	 * But if the mm is in a limited mem_cgroup, then the fault may fail
505  	 * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and
506  	 * even ksmd can fail in this way - though it's usually breaking ksm
507  	 * just to undo a merge it made a moment before, so unlikely to oom.
508  	 *
509  	 * That's a pity: we might therefore have more kernel pages allocated
510  	 * than we're counting as nodes in the stable tree; but ksm_do_scan
511  	 * will retry to break_cow on each pass, so should recover the page
512  	 * in due course.  The important thing is to not let VM_MERGEABLE
513  	 * be cleared while any such pages might remain in the area.
514  	 */
515  	return (ret & VM_FAULT_OOM) ? -ENOMEM : 0;
516  }
517  
518  static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm,
519  		unsigned long addr)
520  {
521  	struct vm_area_struct *vma;
522  	if (ksm_test_exit(mm))
523  		return NULL;
524  	vma = find_vma(mm, addr);
525  	if (!vma || vma->vm_start > addr)
526  		return NULL;
527  	if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
528  		return NULL;
529  	return vma;
530  }
531  
532  static void break_cow(struct rmap_item *rmap_item)
533  {
534  	struct mm_struct *mm = rmap_item->mm;
535  	unsigned long addr = rmap_item->address;
536  	struct vm_area_struct *vma;
537  
538  	/*
539  	 * It is not an accident that whenever we want to break COW
540  	 * to undo, we also need to drop a reference to the anon_vma.
541  	 */
542  	put_anon_vma(rmap_item->anon_vma);
543  
544  	mmap_read_lock(mm);
545  	vma = find_mergeable_vma(mm, addr);
546  	if (vma)
547  		break_ksm(vma, addr);
548  	mmap_read_unlock(mm);
549  }
550  
551  static struct page *get_mergeable_page(struct rmap_item *rmap_item)
552  {
553  	struct mm_struct *mm = rmap_item->mm;
554  	unsigned long addr = rmap_item->address;
555  	struct vm_area_struct *vma;
556  	struct page *page;
557  
558  	mmap_read_lock(mm);
559  	vma = find_mergeable_vma(mm, addr);
560  	if (!vma)
561  		goto out;
562  
563  	page = follow_page(vma, addr, FOLL_GET);
564  	if (IS_ERR_OR_NULL(page))
565  		goto out;
566  	if (PageAnon(page)) {
567  		flush_anon_page(vma, page, addr);
568  		flush_dcache_page(page);
569  	} else {
570  		put_page(page);
571  out:
572  		page = NULL;
573  	}
574  	mmap_read_unlock(mm);
575  	return page;
576  }
577  
578  /*
579   * This helper is used for getting right index into array of tree roots.
580   * When merge_across_nodes knob is set to 1, there are only two rb-trees for
581   * stable and unstable pages from all nodes with roots in index 0. Otherwise,
582   * every node has its own stable and unstable tree.
583   */
584  static inline int get_kpfn_nid(unsigned long kpfn)
585  {
586  	return ksm_merge_across_nodes ? 0 : NUMA(pfn_to_nid(kpfn));
587  }
588  
589  static struct stable_node *alloc_stable_node_chain(struct stable_node *dup,
590  						   struct rb_root *root)
591  {
592  	struct stable_node *chain = alloc_stable_node();
593  	VM_BUG_ON(is_stable_node_chain(dup));
594  	if (likely(chain)) {
595  		INIT_HLIST_HEAD(&chain->hlist);
596  		chain->chain_prune_time = jiffies;
597  		chain->rmap_hlist_len = STABLE_NODE_CHAIN;
598  #if defined (CONFIG_DEBUG_VM) && defined(CONFIG_NUMA)
599  		chain->nid = NUMA_NO_NODE; /* debug */
600  #endif
601  		ksm_stable_node_chains++;
602  
603  		/*
604  		 * Put the stable node chain in the first dimension of
605  		 * the stable tree and at the same time remove the old
606  		 * stable node.
607  		 */
608  		rb_replace_node(&dup->node, &chain->node, root);
609  
610  		/*
611  		 * Move the old stable node to the second dimension
612  		 * queued in the hlist_dup. The invariant is that all
613  		 * dup stable_nodes in the chain->hlist point to pages
614  		 * that are write protected and have the exact same
615  		 * content.
616  		 */
617  		stable_node_chain_add_dup(dup, chain);
618  	}
619  	return chain;
620  }
621  
622  static inline void free_stable_node_chain(struct stable_node *chain,
623  					  struct rb_root *root)
624  {
625  	rb_erase(&chain->node, root);
626  	free_stable_node(chain);
627  	ksm_stable_node_chains--;
628  }
629  
630  static void remove_node_from_stable_tree(struct stable_node *stable_node)
631  {
632  	struct rmap_item *rmap_item;
633  
634  	/* check it's not STABLE_NODE_CHAIN or negative */
635  	BUG_ON(stable_node->rmap_hlist_len < 0);
636  
637  	hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
638  		if (rmap_item->hlist.next)
639  			ksm_pages_sharing--;
640  		else
641  			ksm_pages_shared--;
642  		VM_BUG_ON(stable_node->rmap_hlist_len <= 0);
643  		stable_node->rmap_hlist_len--;
644  		put_anon_vma(rmap_item->anon_vma);
645  		rmap_item->address &= PAGE_MASK;
646  		cond_resched();
647  	}
648  
649  	/*
650  	 * We need the second aligned pointer of the migrate_nodes
651  	 * list_head to stay clear from the rb_parent_color union
652  	 * (aligned and different than any node) and also different
653  	 * from &migrate_nodes. This will verify that future list.h changes
654  	 * don't break STABLE_NODE_DUP_HEAD. Only recent gcc can handle it.
655  	 */
656  #if defined(GCC_VERSION) && GCC_VERSION >= 40903
657  	BUILD_BUG_ON(STABLE_NODE_DUP_HEAD <= &migrate_nodes);
658  	BUILD_BUG_ON(STABLE_NODE_DUP_HEAD >= &migrate_nodes + 1);
659  #endif
660  
661  	if (stable_node->head == &migrate_nodes)
662  		list_del(&stable_node->list);
663  	else
664  		stable_node_dup_del(stable_node);
665  	free_stable_node(stable_node);
666  }
667  
668  enum get_ksm_page_flags {
669  	GET_KSM_PAGE_NOLOCK,
670  	GET_KSM_PAGE_LOCK,
671  	GET_KSM_PAGE_TRYLOCK
672  };
673  
674  /*
675   * get_ksm_page: checks if the page indicated by the stable node
676   * is still its ksm page, despite having held no reference to it.
677   * In which case we can trust the content of the page, and it
678   * returns the gotten page; but if the page has now been zapped,
679   * remove the stale node from the stable tree and return NULL.
680   * But beware, the stable node's page might be being migrated.
681   *
682   * You would expect the stable_node to hold a reference to the ksm page.
683   * But if it increments the page's count, swapping out has to wait for
684   * ksmd to come around again before it can free the page, which may take
685   * seconds or even minutes: much too unresponsive.  So instead we use a
686   * "keyhole reference": access to the ksm page from the stable node peeps
687   * out through its keyhole to see if that page still holds the right key,
688   * pointing back to this stable node.  This relies on freeing a PageAnon
689   * page to reset its page->mapping to NULL, and relies on no other use of
690   * a page to put something that might look like our key in page->mapping.
691   * is on its way to being freed; but it is an anomaly to bear in mind.
692   */
693  static struct page *get_ksm_page(struct stable_node *stable_node,
694  				 enum get_ksm_page_flags flags)
695  {
696  	struct page *page;
697  	void *expected_mapping;
698  	unsigned long kpfn;
699  
700  	expected_mapping = (void *)((unsigned long)stable_node |
701  					PAGE_MAPPING_KSM);
702  again:
703  	kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */
704  	page = pfn_to_page(kpfn);
705  	if (READ_ONCE(page->mapping) != expected_mapping)
706  		goto stale;
707  
708  	/*
709  	 * We cannot do anything with the page while its refcount is 0.
710  	 * Usually 0 means free, or tail of a higher-order page: in which
711  	 * case this node is no longer referenced, and should be freed;
712  	 * however, it might mean that the page is under page_ref_freeze().
713  	 * The __remove_mapping() case is easy, again the node is now stale;
714  	 * the same is in reuse_ksm_page() case; but if page is swapcache
715  	 * in migrate_page_move_mapping(), it might still be our page,
716  	 * in which case it's essential to keep the node.
717  	 */
718  	while (!get_page_unless_zero(page)) {
719  		/*
720  		 * Another check for page->mapping != expected_mapping would
721  		 * work here too.  We have chosen the !PageSwapCache test to
722  		 * optimize the common case, when the page is or is about to
723  		 * be freed: PageSwapCache is cleared (under spin_lock_irq)
724  		 * in the ref_freeze section of __remove_mapping(); but Anon
725  		 * page->mapping reset to NULL later, in free_pages_prepare().
726  		 */
727  		if (!PageSwapCache(page))
728  			goto stale;
729  		cpu_relax();
730  	}
731  
732  	if (READ_ONCE(page->mapping) != expected_mapping) {
733  		put_page(page);
734  		goto stale;
735  	}
736  
737  	if (flags == GET_KSM_PAGE_TRYLOCK) {
738  		if (!trylock_page(page)) {
739  			put_page(page);
740  			return ERR_PTR(-EBUSY);
741  		}
742  	} else if (flags == GET_KSM_PAGE_LOCK)
743  		lock_page(page);
744  
745  	if (flags != GET_KSM_PAGE_NOLOCK) {
746  		if (READ_ONCE(page->mapping) != expected_mapping) {
747  			unlock_page(page);
748  			put_page(page);
749  			goto stale;
750  		}
751  	}
752  	return page;
753  
754  stale:
755  	/*
756  	 * We come here from above when page->mapping or !PageSwapCache
757  	 * suggests that the node is stale; but it might be under migration.
758  	 * We need smp_rmb(), matching the smp_wmb() in ksm_migrate_page(),
759  	 * before checking whether node->kpfn has been changed.
760  	 */
761  	smp_rmb();
762  	if (READ_ONCE(stable_node->kpfn) != kpfn)
763  		goto again;
764  	remove_node_from_stable_tree(stable_node);
765  	return NULL;
766  }
767  
768  /*
769   * Removing rmap_item from stable or unstable tree.
770   * This function will clean the information from the stable/unstable tree.
771   */
772  static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
773  {
774  	if (rmap_item->address & STABLE_FLAG) {
775  		struct stable_node *stable_node;
776  		struct page *page;
777  
778  		stable_node = rmap_item->head;
779  		page = get_ksm_page(stable_node, GET_KSM_PAGE_NOLOCK);
780  		if (!page)
781  			goto out;
782  
783  		hlist_del(&rmap_item->hlist);
784  		put_page(page);
785  
786  		if (!hlist_empty(&stable_node->hlist))
787  			ksm_pages_sharing--;
788  		else
789  			ksm_pages_shared--;
790  		VM_BUG_ON(stable_node->rmap_hlist_len <= 0);
791  		stable_node->rmap_hlist_len--;
792  
793  		put_anon_vma(rmap_item->anon_vma);
794  		rmap_item->head = NULL;
795  		rmap_item->address &= PAGE_MASK;
796  
797  	} else if (rmap_item->address & UNSTABLE_FLAG) {
798  		unsigned char age;
799  		/*
800  		 * Usually ksmd can and must skip the rb_erase, because
801  		 * root_unstable_tree was already reset to RB_ROOT.
802  		 * But be careful when an mm is exiting: do the rb_erase
803  		 * if this rmap_item was inserted by this scan, rather
804  		 * than left over from before.
805  		 */
806  		age = (unsigned char)(ksm_scan.seqnr - rmap_item->address);
807  		BUG_ON(age > 1);
808  		if (!age)
809  			rb_erase(&rmap_item->node,
810  				 root_unstable_tree + NUMA(rmap_item->nid));
811  		ksm_pages_unshared--;
812  		rmap_item->address &= PAGE_MASK;
813  	}
814  out:
815  	cond_resched();		/* we're called from many long loops */
816  }
817  
818  static void remove_trailing_rmap_items(struct rmap_item **rmap_list)
819  {
820  	while (*rmap_list) {
821  		struct rmap_item *rmap_item = *rmap_list;
822  		*rmap_list = rmap_item->rmap_list;
823  		remove_rmap_item_from_tree(rmap_item);
824  		free_rmap_item(rmap_item);
825  	}
826  }
827  
828  /*
829   * Though it's very tempting to unmerge rmap_items from stable tree rather
830   * than check every pte of a given vma, the locking doesn't quite work for
831   * that - an rmap_item is assigned to the stable tree after inserting ksm
832   * page and upping mmap_lock.  Nor does it fit with the way we skip dup'ing
833   * rmap_items from parent to child at fork time (so as not to waste time
834   * if exit comes before the next scan reaches it).
835   *
836   * Similarly, although we'd like to remove rmap_items (so updating counts
837   * and freeing memory) when unmerging an area, it's easier to leave that
838   * to the next pass of ksmd - consider, for example, how ksmd might be
839   * in cmp_and_merge_page on one of the rmap_items we would be removing.
840   */
841  static int unmerge_ksm_pages(struct vm_area_struct *vma,
842  			     unsigned long start, unsigned long end)
843  {
844  	unsigned long addr;
845  	int err = 0;
846  
847  	for (addr = start; addr < end && !err; addr += PAGE_SIZE) {
848  		if (ksm_test_exit(vma->vm_mm))
849  			break;
850  		if (signal_pending(current))
851  			err = -ERESTARTSYS;
852  		else
853  			err = break_ksm(vma, addr);
854  	}
855  	return err;
856  }
857  
858  static inline struct stable_node *page_stable_node(struct page *page)
859  {
860  	return PageKsm(page) ? page_rmapping(page) : NULL;
861  }
862  
863  static inline void set_page_stable_node(struct page *page,
864  					struct stable_node *stable_node)
865  {
866  	page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
867  }
868  
869  #ifdef CONFIG_SYSFS
870  /*
871   * Only called through the sysfs control interface:
872   */
873  static int remove_stable_node(struct stable_node *stable_node)
874  {
875  	struct page *page;
876  	int err;
877  
878  	page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK);
879  	if (!page) {
880  		/*
881  		 * get_ksm_page did remove_node_from_stable_tree itself.
882  		 */
883  		return 0;
884  	}
885  
886  	/*
887  	 * Page could be still mapped if this races with __mmput() running in
888  	 * between ksm_exit() and exit_mmap(). Just refuse to let
889  	 * merge_across_nodes/max_page_sharing be switched.
890  	 */
891  	err = -EBUSY;
892  	if (!page_mapped(page)) {
893  		/*
894  		 * The stable node did not yet appear stale to get_ksm_page(),
895  		 * since that allows for an unmapped ksm page to be recognized
896  		 * right up until it is freed; but the node is safe to remove.
897  		 * This page might be in a pagevec waiting to be freed,
898  		 * or it might be PageSwapCache (perhaps under writeback),
899  		 * or it might have been removed from swapcache a moment ago.
900  		 */
901  		set_page_stable_node(page, NULL);
902  		remove_node_from_stable_tree(stable_node);
903  		err = 0;
904  	}
905  
906  	unlock_page(page);
907  	put_page(page);
908  	return err;
909  }
910  
911  static int remove_stable_node_chain(struct stable_node *stable_node,
912  				    struct rb_root *root)
913  {
914  	struct stable_node *dup;
915  	struct hlist_node *hlist_safe;
916  
917  	if (!is_stable_node_chain(stable_node)) {
918  		VM_BUG_ON(is_stable_node_dup(stable_node));
919  		if (remove_stable_node(stable_node))
920  			return true;
921  		else
922  			return false;
923  	}
924  
925  	hlist_for_each_entry_safe(dup, hlist_safe,
926  				  &stable_node->hlist, hlist_dup) {
927  		VM_BUG_ON(!is_stable_node_dup(dup));
928  		if (remove_stable_node(dup))
929  			return true;
930  	}
931  	BUG_ON(!hlist_empty(&stable_node->hlist));
932  	free_stable_node_chain(stable_node, root);
933  	return false;
934  }
935  
936  static int remove_all_stable_nodes(void)
937  {
938  	struct stable_node *stable_node, *next;
939  	int nid;
940  	int err = 0;
941  
942  	for (nid = 0; nid < ksm_nr_node_ids; nid++) {
943  		while (root_stable_tree[nid].rb_node) {
944  			stable_node = rb_entry(root_stable_tree[nid].rb_node,
945  						struct stable_node, node);
946  			if (remove_stable_node_chain(stable_node,
947  						     root_stable_tree + nid)) {
948  				err = -EBUSY;
949  				break;	/* proceed to next nid */
950  			}
951  			cond_resched();
952  		}
953  	}
954  	list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) {
955  		if (remove_stable_node(stable_node))
956  			err = -EBUSY;
957  		cond_resched();
958  	}
959  	return err;
960  }
961  
962  static int unmerge_and_remove_all_rmap_items(void)
963  {
964  	struct mm_slot *mm_slot;
965  	struct mm_struct *mm;
966  	struct vm_area_struct *vma;
967  	int err = 0;
968  
969  	spin_lock(&ksm_mmlist_lock);
970  	ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next,
971  						struct mm_slot, mm_list);
972  	spin_unlock(&ksm_mmlist_lock);
973  
974  	for (mm_slot = ksm_scan.mm_slot;
975  			mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) {
976  		mm = mm_slot->mm;
977  		mmap_read_lock(mm);
978  		for (vma = mm->mmap; vma; vma = vma->vm_next) {
979  			if (ksm_test_exit(mm))
980  				break;
981  			if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
982  				continue;
983  			err = unmerge_ksm_pages(vma,
984  						vma->vm_start, vma->vm_end);
985  			if (err)
986  				goto error;
987  		}
988  
989  		remove_trailing_rmap_items(&mm_slot->rmap_list);
990  		mmap_read_unlock(mm);
991  
992  		spin_lock(&ksm_mmlist_lock);
993  		ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next,
994  						struct mm_slot, mm_list);
995  		if (ksm_test_exit(mm)) {
996  			hash_del(&mm_slot->link);
997  			list_del(&mm_slot->mm_list);
998  			spin_unlock(&ksm_mmlist_lock);
999  
1000  			free_mm_slot(mm_slot);
1001  			clear_bit(MMF_VM_MERGEABLE, &mm->flags);
1002  			mmdrop(mm);
1003  		} else
1004  			spin_unlock(&ksm_mmlist_lock);
1005  	}
1006  
1007  	/* Clean up stable nodes, but don't worry if some are still busy */
1008  	remove_all_stable_nodes();
1009  	ksm_scan.seqnr = 0;
1010  	return 0;
1011  
1012  error:
1013  	mmap_read_unlock(mm);
1014  	spin_lock(&ksm_mmlist_lock);
1015  	ksm_scan.mm_slot = &ksm_mm_head;
1016  	spin_unlock(&ksm_mmlist_lock);
1017  	return err;
1018  }
1019  #endif /* CONFIG_SYSFS */
1020  
1021  static u32 calc_checksum(struct page *page)
1022  {
1023  	u32 checksum;
1024  	void *addr = kmap_atomic(page);
1025  	checksum = xxhash(addr, PAGE_SIZE, 0);
1026  	kunmap_atomic(addr);
1027  	return checksum;
1028  }
1029  
1030  static int write_protect_page(struct vm_area_struct *vma, struct page *page,
1031  			      pte_t *orig_pte)
1032  {
1033  	struct mm_struct *mm = vma->vm_mm;
1034  	struct page_vma_mapped_walk pvmw = {
1035  		.page = page,
1036  		.vma = vma,
1037  	};
1038  	int swapped;
1039  	int err = -EFAULT;
1040  	struct mmu_notifier_range range;
1041  
1042  	pvmw.address = page_address_in_vma(page, vma);
1043  	if (pvmw.address == -EFAULT)
1044  		goto out;
1045  
1046  	BUG_ON(PageTransCompound(page));
1047  
1048  	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
1049  				pvmw.address,
1050  				pvmw.address + PAGE_SIZE);
1051  	mmu_notifier_invalidate_range_start(&range);
1052  
1053  	if (!page_vma_mapped_walk(&pvmw))
1054  		goto out_mn;
1055  	if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?"))
1056  		goto out_unlock;
1057  
1058  	if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) ||
1059  	    (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte)) ||
1060  						mm_tlb_flush_pending(mm)) {
1061  		pte_t entry;
1062  
1063  		swapped = PageSwapCache(page);
1064  		flush_cache_page(vma, pvmw.address, page_to_pfn(page));
1065  		/*
1066  		 * Ok this is tricky, when get_user_pages_fast() run it doesn't
1067  		 * take any lock, therefore the check that we are going to make
1068  		 * with the pagecount against the mapcount is racy and
1069  		 * O_DIRECT can happen right after the check.
1070  		 * So we clear the pte and flush the tlb before the check
1071  		 * this assure us that no O_DIRECT can happen after the check
1072  		 * or in the middle of the check.
1073  		 *
1074  		 * No need to notify as we are downgrading page table to read
1075  		 * only not changing it to point to a new page.
1076  		 *
1077  		 * See Documentation/vm/mmu_notifier.rst
1078  		 */
1079  		entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte);
1080  		/*
1081  		 * Check that no O_DIRECT or similar I/O is in progress on the
1082  		 * page
1083  		 */
1084  		if (page_mapcount(page) + 1 + swapped != page_count(page)) {
1085  			set_pte_at(mm, pvmw.address, pvmw.pte, entry);
1086  			goto out_unlock;
1087  		}
1088  		if (pte_dirty(entry))
1089  			set_page_dirty(page);
1090  
1091  		if (pte_protnone(entry))
1092  			entry = pte_mkclean(pte_clear_savedwrite(entry));
1093  		else
1094  			entry = pte_mkclean(pte_wrprotect(entry));
1095  		set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry);
1096  	}
1097  	*orig_pte = *pvmw.pte;
1098  	err = 0;
1099  
1100  out_unlock:
1101  	page_vma_mapped_walk_done(&pvmw);
1102  out_mn:
1103  	mmu_notifier_invalidate_range_end(&range);
1104  out:
1105  	return err;
1106  }
1107  
1108  /**
1109   * replace_page - replace page in vma by new ksm page
1110   * @vma:      vma that holds the pte pointing to page
1111   * @page:     the page we are replacing by kpage
1112   * @kpage:    the ksm page we replace page by
1113   * @orig_pte: the original value of the pte
1114   *
1115   * Returns 0 on success, -EFAULT on failure.
1116   */
1117  static int replace_page(struct vm_area_struct *vma, struct page *page,
1118  			struct page *kpage, pte_t orig_pte)
1119  {
1120  	struct mm_struct *mm = vma->vm_mm;
1121  	pmd_t *pmd;
1122  	pte_t *ptep;
1123  	pte_t newpte;
1124  	spinlock_t *ptl;
1125  	unsigned long addr;
1126  	int err = -EFAULT;
1127  	struct mmu_notifier_range range;
1128  
1129  	addr = page_address_in_vma(page, vma);
1130  	if (addr == -EFAULT)
1131  		goto out;
1132  
1133  	pmd = mm_find_pmd(mm, addr);
1134  	if (!pmd)
1135  		goto out;
1136  
1137  	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr,
1138  				addr + PAGE_SIZE);
1139  	mmu_notifier_invalidate_range_start(&range);
1140  
1141  	ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
1142  	if (!pte_same(*ptep, orig_pte)) {
1143  		pte_unmap_unlock(ptep, ptl);
1144  		goto out_mn;
1145  	}
1146  
1147  	/*
1148  	 * No need to check ksm_use_zero_pages here: we can only have a
1149  	 * zero_page here if ksm_use_zero_pages was enabled already.
1150  	 */
1151  	if (!is_zero_pfn(page_to_pfn(kpage))) {
1152  		get_page(kpage);
1153  		page_add_anon_rmap(kpage, vma, addr, false);
1154  		newpte = mk_pte(kpage, vma->vm_page_prot);
1155  	} else {
1156  		newpte = pte_mkspecial(pfn_pte(page_to_pfn(kpage),
1157  					       vma->vm_page_prot));
1158  		/*
1159  		 * We're replacing an anonymous page with a zero page, which is
1160  		 * not anonymous. We need to do proper accounting otherwise we
1161  		 * will get wrong values in /proc, and a BUG message in dmesg
1162  		 * when tearing down the mm.
1163  		 */
1164  		dec_mm_counter(mm, MM_ANONPAGES);
1165  	}
1166  
1167  	flush_cache_page(vma, addr, pte_pfn(*ptep));
1168  	/*
1169  	 * No need to notify as we are replacing a read only page with another
1170  	 * read only page with the same content.
1171  	 *
1172  	 * See Documentation/vm/mmu_notifier.rst
1173  	 */
1174  	ptep_clear_flush(vma, addr, ptep);
1175  	set_pte_at_notify(mm, addr, ptep, newpte);
1176  
1177  	page_remove_rmap(page, false);
1178  	if (!page_mapped(page))
1179  		try_to_free_swap(page);
1180  	put_page(page);
1181  
1182  	pte_unmap_unlock(ptep, ptl);
1183  	err = 0;
1184  out_mn:
1185  	mmu_notifier_invalidate_range_end(&range);
1186  out:
1187  	return err;
1188  }
1189  
1190  /*
1191   * try_to_merge_one_page - take two pages and merge them into one
1192   * @vma: the vma that holds the pte pointing to page
1193   * @page: the PageAnon page that we want to replace with kpage
1194   * @kpage: the PageKsm page that we want to map instead of page,
1195   *         or NULL the first time when we want to use page as kpage.
1196   *
1197   * This function returns 0 if the pages were merged, -EFAULT otherwise.
1198   */
1199  static int try_to_merge_one_page(struct vm_area_struct *vma,
1200  				 struct page *page, struct page *kpage)
1201  {
1202  	pte_t orig_pte = __pte(0);
1203  	int err = -EFAULT;
1204  
1205  	if (page == kpage)			/* ksm page forked */
1206  		return 0;
1207  
1208  	if (!PageAnon(page))
1209  		goto out;
1210  
1211  	/*
1212  	 * We need the page lock to read a stable PageSwapCache in
1213  	 * write_protect_page().  We use trylock_page() instead of
1214  	 * lock_page() because we don't want to wait here - we
1215  	 * prefer to continue scanning and merging different pages,
1216  	 * then come back to this page when it is unlocked.
1217  	 */
1218  	if (!trylock_page(page))
1219  		goto out;
1220  
1221  	if (PageTransCompound(page)) {
1222  		if (split_huge_page(page))
1223  			goto out_unlock;
1224  	}
1225  
1226  	/*
1227  	 * If this anonymous page is mapped only here, its pte may need
1228  	 * to be write-protected.  If it's mapped elsewhere, all of its
1229  	 * ptes are necessarily already write-protected.  But in either
1230  	 * case, we need to lock and check page_count is not raised.
1231  	 */
1232  	if (write_protect_page(vma, page, &orig_pte) == 0) {
1233  		if (!kpage) {
1234  			/*
1235  			 * While we hold page lock, upgrade page from
1236  			 * PageAnon+anon_vma to PageKsm+NULL stable_node:
1237  			 * stable_tree_insert() will update stable_node.
1238  			 */
1239  			set_page_stable_node(page, NULL);
1240  			mark_page_accessed(page);
1241  			/*
1242  			 * Page reclaim just frees a clean page with no dirty
1243  			 * ptes: make sure that the ksm page would be swapped.
1244  			 */
1245  			if (!PageDirty(page))
1246  				SetPageDirty(page);
1247  			err = 0;
1248  		} else if (pages_identical(page, kpage))
1249  			err = replace_page(vma, page, kpage, orig_pte);
1250  	}
1251  
1252  	if ((vma->vm_flags & VM_LOCKED) && kpage && !err) {
1253  		munlock_vma_page(page);
1254  		if (!PageMlocked(kpage)) {
1255  			unlock_page(page);
1256  			lock_page(kpage);
1257  			mlock_vma_page(kpage);
1258  			page = kpage;		/* for final unlock */
1259  		}
1260  	}
1261  
1262  out_unlock:
1263  	unlock_page(page);
1264  out:
1265  	return err;
1266  }
1267  
1268  /*
1269   * try_to_merge_with_ksm_page - like try_to_merge_two_pages,
1270   * but no new kernel page is allocated: kpage must already be a ksm page.
1271   *
1272   * This function returns 0 if the pages were merged, -EFAULT otherwise.
1273   */
1274  static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item,
1275  				      struct page *page, struct page *kpage)
1276  {
1277  	struct mm_struct *mm = rmap_item->mm;
1278  	struct vm_area_struct *vma;
1279  	int err = -EFAULT;
1280  
1281  	mmap_read_lock(mm);
1282  	vma = find_mergeable_vma(mm, rmap_item->address);
1283  	if (!vma)
1284  		goto out;
1285  
1286  	err = try_to_merge_one_page(vma, page, kpage);
1287  	if (err)
1288  		goto out;
1289  
1290  	/* Unstable nid is in union with stable anon_vma: remove first */
1291  	remove_rmap_item_from_tree(rmap_item);
1292  
1293  	/* Must get reference to anon_vma while still holding mmap_lock */
1294  	rmap_item->anon_vma = vma->anon_vma;
1295  	get_anon_vma(vma->anon_vma);
1296  out:
1297  	mmap_read_unlock(mm);
1298  	return err;
1299  }
1300  
1301  /*
1302   * try_to_merge_two_pages - take two identical pages and prepare them
1303   * to be merged into one page.
1304   *
1305   * This function returns the kpage if we successfully merged two identical
1306   * pages into one ksm page, NULL otherwise.
1307   *
1308   * Note that this function upgrades page to ksm page: if one of the pages
1309   * is already a ksm page, try_to_merge_with_ksm_page should be used.
1310   */
1311  static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item,
1312  					   struct page *page,
1313  					   struct rmap_item *tree_rmap_item,
1314  					   struct page *tree_page)
1315  {
1316  	int err;
1317  
1318  	err = try_to_merge_with_ksm_page(rmap_item, page, NULL);
1319  	if (!err) {
1320  		err = try_to_merge_with_ksm_page(tree_rmap_item,
1321  							tree_page, page);
1322  		/*
1323  		 * If that fails, we have a ksm page with only one pte
1324  		 * pointing to it: so break it.
1325  		 */
1326  		if (err)
1327  			break_cow(rmap_item);
1328  	}
1329  	return err ? NULL : page;
1330  }
1331  
1332  static __always_inline
1333  bool __is_page_sharing_candidate(struct stable_node *stable_node, int offset)
1334  {
1335  	VM_BUG_ON(stable_node->rmap_hlist_len < 0);
1336  	/*
1337  	 * Check that at least one mapping still exists, otherwise
1338  	 * there's no much point to merge and share with this
1339  	 * stable_node, as the underlying tree_page of the other
1340  	 * sharer is going to be freed soon.
1341  	 */
1342  	return stable_node->rmap_hlist_len &&
1343  		stable_node->rmap_hlist_len + offset < ksm_max_page_sharing;
1344  }
1345  
1346  static __always_inline
1347  bool is_page_sharing_candidate(struct stable_node *stable_node)
1348  {
1349  	return __is_page_sharing_candidate(stable_node, 0);
1350  }
1351  
1352  static struct page *stable_node_dup(struct stable_node **_stable_node_dup,
1353  				    struct stable_node **_stable_node,
1354  				    struct rb_root *root,
1355  				    bool prune_stale_stable_nodes)
1356  {
1357  	struct stable_node *dup, *found = NULL, *stable_node = *_stable_node;
1358  	struct hlist_node *hlist_safe;
1359  	struct page *_tree_page, *tree_page = NULL;
1360  	int nr = 0;
1361  	int found_rmap_hlist_len;
1362  
1363  	if (!prune_stale_stable_nodes ||
1364  	    time_before(jiffies, stable_node->chain_prune_time +
1365  			msecs_to_jiffies(
1366  				ksm_stable_node_chains_prune_millisecs)))
1367  		prune_stale_stable_nodes = false;
1368  	else
1369  		stable_node->chain_prune_time = jiffies;
1370  
1371  	hlist_for_each_entry_safe(dup, hlist_safe,
1372  				  &stable_node->hlist, hlist_dup) {
1373  		cond_resched();
1374  		/*
1375  		 * We must walk all stable_node_dup to prune the stale
1376  		 * stable nodes during lookup.
1377  		 *
1378  		 * get_ksm_page can drop the nodes from the
1379  		 * stable_node->hlist if they point to freed pages
1380  		 * (that's why we do a _safe walk). The "dup"
1381  		 * stable_node parameter itself will be freed from
1382  		 * under us if it returns NULL.
1383  		 */
1384  		_tree_page = get_ksm_page(dup, GET_KSM_PAGE_NOLOCK);
1385  		if (!_tree_page)
1386  			continue;
1387  		nr += 1;
1388  		if (is_page_sharing_candidate(dup)) {
1389  			if (!found ||
1390  			    dup->rmap_hlist_len > found_rmap_hlist_len) {
1391  				if (found)
1392  					put_page(tree_page);
1393  				found = dup;
1394  				found_rmap_hlist_len = found->rmap_hlist_len;
1395  				tree_page = _tree_page;
1396  
1397  				/* skip put_page for found dup */
1398  				if (!prune_stale_stable_nodes)
1399  					break;
1400  				continue;
1401  			}
1402  		}
1403  		put_page(_tree_page);
1404  	}
1405  
1406  	if (found) {
1407  		/*
1408  		 * nr is counting all dups in the chain only if
1409  		 * prune_stale_stable_nodes is true, otherwise we may
1410  		 * break the loop at nr == 1 even if there are
1411  		 * multiple entries.
1412  		 */
1413  		if (prune_stale_stable_nodes && nr == 1) {
1414  			/*
1415  			 * If there's not just one entry it would
1416  			 * corrupt memory, better BUG_ON. In KSM
1417  			 * context with no lock held it's not even
1418  			 * fatal.
1419  			 */
1420  			BUG_ON(stable_node->hlist.first->next);
1421  
1422  			/*
1423  			 * There's just one entry and it is below the
1424  			 * deduplication limit so drop the chain.
1425  			 */
1426  			rb_replace_node(&stable_node->node, &found->node,
1427  					root);
1428  			free_stable_node(stable_node);
1429  			ksm_stable_node_chains--;
1430  			ksm_stable_node_dups--;
1431  			/*
1432  			 * NOTE: the caller depends on the stable_node
1433  			 * to be equal to stable_node_dup if the chain
1434  			 * was collapsed.
1435  			 */
1436  			*_stable_node = found;
1437  			/*
1438  			 * Just for robustness, as stable_node is
1439  			 * otherwise left as a stable pointer, the
1440  			 * compiler shall optimize it away at build
1441  			 * time.
1442  			 */
1443  			stable_node = NULL;
1444  		} else if (stable_node->hlist.first != &found->hlist_dup &&
1445  			   __is_page_sharing_candidate(found, 1)) {
1446  			/*
1447  			 * If the found stable_node dup can accept one
1448  			 * more future merge (in addition to the one
1449  			 * that is underway) and is not at the head of
1450  			 * the chain, put it there so next search will
1451  			 * be quicker in the !prune_stale_stable_nodes
1452  			 * case.
1453  			 *
1454  			 * NOTE: it would be inaccurate to use nr > 1
1455  			 * instead of checking the hlist.first pointer
1456  			 * directly, because in the
1457  			 * prune_stale_stable_nodes case "nr" isn't
1458  			 * the position of the found dup in the chain,
1459  			 * but the total number of dups in the chain.
1460  			 */
1461  			hlist_del(&found->hlist_dup);
1462  			hlist_add_head(&found->hlist_dup,
1463  				       &stable_node->hlist);
1464  		}
1465  	}
1466  
1467  	*_stable_node_dup = found;
1468  	return tree_page;
1469  }
1470  
1471  static struct stable_node *stable_node_dup_any(struct stable_node *stable_node,
1472  					       struct rb_root *root)
1473  {
1474  	if (!is_stable_node_chain(stable_node))
1475  		return stable_node;
1476  	if (hlist_empty(&stable_node->hlist)) {
1477  		free_stable_node_chain(stable_node, root);
1478  		return NULL;
1479  	}
1480  	return hlist_entry(stable_node->hlist.first,
1481  			   typeof(*stable_node), hlist_dup);
1482  }
1483  
1484  /*
1485   * Like for get_ksm_page, this function can free the *_stable_node and
1486   * *_stable_node_dup if the returned tree_page is NULL.
1487   *
1488   * It can also free and overwrite *_stable_node with the found
1489   * stable_node_dup if the chain is collapsed (in which case
1490   * *_stable_node will be equal to *_stable_node_dup like if the chain
1491   * never existed). It's up to the caller to verify tree_page is not
1492   * NULL before dereferencing *_stable_node or *_stable_node_dup.
1493   *
1494   * *_stable_node_dup is really a second output parameter of this
1495   * function and will be overwritten in all cases, the caller doesn't
1496   * need to initialize it.
1497   */
1498  static struct page *__stable_node_chain(struct stable_node **_stable_node_dup,
1499  					struct stable_node **_stable_node,
1500  					struct rb_root *root,
1501  					bool prune_stale_stable_nodes)
1502  {
1503  	struct stable_node *stable_node = *_stable_node;
1504  	if (!is_stable_node_chain(stable_node)) {
1505  		if (is_page_sharing_candidate(stable_node)) {
1506  			*_stable_node_dup = stable_node;
1507  			return get_ksm_page(stable_node, GET_KSM_PAGE_NOLOCK);
1508  		}
1509  		/*
1510  		 * _stable_node_dup set to NULL means the stable_node
1511  		 * reached the ksm_max_page_sharing limit.
1512  		 */
1513  		*_stable_node_dup = NULL;
1514  		return NULL;
1515  	}
1516  	return stable_node_dup(_stable_node_dup, _stable_node, root,
1517  			       prune_stale_stable_nodes);
1518  }
1519  
1520  static __always_inline struct page *chain_prune(struct stable_node **s_n_d,
1521  						struct stable_node **s_n,
1522  						struct rb_root *root)
1523  {
1524  	return __stable_node_chain(s_n_d, s_n, root, true);
1525  }
1526  
1527  static __always_inline struct page *chain(struct stable_node **s_n_d,
1528  					  struct stable_node *s_n,
1529  					  struct rb_root *root)
1530  {
1531  	struct stable_node *old_stable_node = s_n;
1532  	struct page *tree_page;
1533  
1534  	tree_page = __stable_node_chain(s_n_d, &s_n, root, false);
1535  	/* not pruning dups so s_n cannot have changed */
1536  	VM_BUG_ON(s_n != old_stable_node);
1537  	return tree_page;
1538  }
1539  
1540  /*
1541   * stable_tree_search - search for page inside the stable tree
1542   *
1543   * This function checks if there is a page inside the stable tree
1544   * with identical content to the page that we are scanning right now.
1545   *
1546   * This function returns the stable tree node of identical content if found,
1547   * NULL otherwise.
1548   */
1549  static struct page *stable_tree_search(struct page *page)
1550  {
1551  	int nid;
1552  	struct rb_root *root;
1553  	struct rb_node **new;
1554  	struct rb_node *parent;
1555  	struct stable_node *stable_node, *stable_node_dup, *stable_node_any;
1556  	struct stable_node *page_node;
1557  
1558  	page_node = page_stable_node(page);
1559  	if (page_node && page_node->head != &migrate_nodes) {
1560  		/* ksm page forked */
1561  		get_page(page);
1562  		return page;
1563  	}
1564  
1565  	nid = get_kpfn_nid(page_to_pfn(page));
1566  	root = root_stable_tree + nid;
1567  again:
1568  	new = &root->rb_node;
1569  	parent = NULL;
1570  
1571  	while (*new) {
1572  		struct page *tree_page;
1573  		int ret;
1574  
1575  		cond_resched();
1576  		stable_node = rb_entry(*new, struct stable_node, node);
1577  		stable_node_any = NULL;
1578  		tree_page = chain_prune(&stable_node_dup, &stable_node,	root);
1579  		/*
1580  		 * NOTE: stable_node may have been freed by
1581  		 * chain_prune() if the returned stable_node_dup is
1582  		 * not NULL. stable_node_dup may have been inserted in
1583  		 * the rbtree instead as a regular stable_node (in
1584  		 * order to collapse the stable_node chain if a single
1585  		 * stable_node dup was found in it). In such case the
1586  		 * stable_node is overwritten by the calleee to point
1587  		 * to the stable_node_dup that was collapsed in the
1588  		 * stable rbtree and stable_node will be equal to
1589  		 * stable_node_dup like if the chain never existed.
1590  		 */
1591  		if (!stable_node_dup) {
1592  			/*
1593  			 * Either all stable_node dups were full in
1594  			 * this stable_node chain, or this chain was
1595  			 * empty and should be rb_erased.
1596  			 */
1597  			stable_node_any = stable_node_dup_any(stable_node,
1598  							      root);
1599  			if (!stable_node_any) {
1600  				/* rb_erase just run */
1601  				goto again;
1602  			}
1603  			/*
1604  			 * Take any of the stable_node dups page of
1605  			 * this stable_node chain to let the tree walk
1606  			 * continue. All KSM pages belonging to the
1607  			 * stable_node dups in a stable_node chain
1608  			 * have the same content and they're
1609  			 * write protected at all times. Any will work
1610  			 * fine to continue the walk.
1611  			 */
1612  			tree_page = get_ksm_page(stable_node_any,
1613  						 GET_KSM_PAGE_NOLOCK);
1614  		}
1615  		VM_BUG_ON(!stable_node_dup ^ !!stable_node_any);
1616  		if (!tree_page) {
1617  			/*
1618  			 * If we walked over a stale stable_node,
1619  			 * get_ksm_page() will call rb_erase() and it
1620  			 * may rebalance the tree from under us. So
1621  			 * restart the search from scratch. Returning
1622  			 * NULL would be safe too, but we'd generate
1623  			 * false negative insertions just because some
1624  			 * stable_node was stale.
1625  			 */
1626  			goto again;
1627  		}
1628  
1629  		ret = memcmp_pages(page, tree_page);
1630  		put_page(tree_page);
1631  
1632  		parent = *new;
1633  		if (ret < 0)
1634  			new = &parent->rb_left;
1635  		else if (ret > 0)
1636  			new = &parent->rb_right;
1637  		else {
1638  			if (page_node) {
1639  				VM_BUG_ON(page_node->head != &migrate_nodes);
1640  				/*
1641  				 * Test if the migrated page should be merged
1642  				 * into a stable node dup. If the mapcount is
1643  				 * 1 we can migrate it with another KSM page
1644  				 * without adding it to the chain.
1645  				 */
1646  				if (page_mapcount(page) > 1)
1647  					goto chain_append;
1648  			}
1649  
1650  			if (!stable_node_dup) {
1651  				/*
1652  				 * If the stable_node is a chain and
1653  				 * we got a payload match in memcmp
1654  				 * but we cannot merge the scanned
1655  				 * page in any of the existing
1656  				 * stable_node dups because they're
1657  				 * all full, we need to wait the
1658  				 * scanned page to find itself a match
1659  				 * in the unstable tree to create a
1660  				 * brand new KSM page to add later to
1661  				 * the dups of this stable_node.
1662  				 */
1663  				return NULL;
1664  			}
1665  
1666  			/*
1667  			 * Lock and unlock the stable_node's page (which
1668  			 * might already have been migrated) so that page
1669  			 * migration is sure to notice its raised count.
1670  			 * It would be more elegant to return stable_node
1671  			 * than kpage, but that involves more changes.
1672  			 */
1673  			tree_page = get_ksm_page(stable_node_dup,
1674  						 GET_KSM_PAGE_TRYLOCK);
1675  
1676  			if (PTR_ERR(tree_page) == -EBUSY)
1677  				return ERR_PTR(-EBUSY);
1678  
1679  			if (unlikely(!tree_page))
1680  				/*
1681  				 * The tree may have been rebalanced,
1682  				 * so re-evaluate parent and new.
1683  				 */
1684  				goto again;
1685  			unlock_page(tree_page);
1686  
1687  			if (get_kpfn_nid(stable_node_dup->kpfn) !=
1688  			    NUMA(stable_node_dup->nid)) {
1689  				put_page(tree_page);
1690  				goto replace;
1691  			}
1692  			return tree_page;
1693  		}
1694  	}
1695  
1696  	if (!page_node)
1697  		return NULL;
1698  
1699  	list_del(&page_node->list);
1700  	DO_NUMA(page_node->nid = nid);
1701  	rb_link_node(&page_node->node, parent, new);
1702  	rb_insert_color(&page_node->node, root);
1703  out:
1704  	if (is_page_sharing_candidate(page_node)) {
1705  		get_page(page);
1706  		return page;
1707  	} else
1708  		return NULL;
1709  
1710  replace:
1711  	/*
1712  	 * If stable_node was a chain and chain_prune collapsed it,
1713  	 * stable_node has been updated to be the new regular
1714  	 * stable_node. A collapse of the chain is indistinguishable
1715  	 * from the case there was no chain in the stable
1716  	 * rbtree. Otherwise stable_node is the chain and
1717  	 * stable_node_dup is the dup to replace.
1718  	 */
1719  	if (stable_node_dup == stable_node) {
1720  		VM_BUG_ON(is_stable_node_chain(stable_node_dup));
1721  		VM_BUG_ON(is_stable_node_dup(stable_node_dup));
1722  		/* there is no chain */
1723  		if (page_node) {
1724  			VM_BUG_ON(page_node->head != &migrate_nodes);
1725  			list_del(&page_node->list);
1726  			DO_NUMA(page_node->nid = nid);
1727  			rb_replace_node(&stable_node_dup->node,
1728  					&page_node->node,
1729  					root);
1730  			if (is_page_sharing_candidate(page_node))
1731  				get_page(page);
1732  			else
1733  				page = NULL;
1734  		} else {
1735  			rb_erase(&stable_node_dup->node, root);
1736  			page = NULL;
1737  		}
1738  	} else {
1739  		VM_BUG_ON(!is_stable_node_chain(stable_node));
1740  		__stable_node_dup_del(stable_node_dup);
1741  		if (page_node) {
1742  			VM_BUG_ON(page_node->head != &migrate_nodes);
1743  			list_del(&page_node->list);
1744  			DO_NUMA(page_node->nid = nid);
1745  			stable_node_chain_add_dup(page_node, stable_node);
1746  			if (is_page_sharing_candidate(page_node))
1747  				get_page(page);
1748  			else
1749  				page = NULL;
1750  		} else {
1751  			page = NULL;
1752  		}
1753  	}
1754  	stable_node_dup->head = &migrate_nodes;
1755  	list_add(&stable_node_dup->list, stable_node_dup->head);
1756  	return page;
1757  
1758  chain_append:
1759  	/* stable_node_dup could be null if it reached the limit */
1760  	if (!stable_node_dup)
1761  		stable_node_dup = stable_node_any;
1762  	/*
1763  	 * If stable_node was a chain and chain_prune collapsed it,
1764  	 * stable_node has been updated to be the new regular
1765  	 * stable_node. A collapse of the chain is indistinguishable
1766  	 * from the case there was no chain in the stable
1767  	 * rbtree. Otherwise stable_node is the chain and
1768  	 * stable_node_dup is the dup to replace.
1769  	 */
1770  	if (stable_node_dup == stable_node) {
1771  		VM_BUG_ON(is_stable_node_dup(stable_node_dup));
1772  		/* chain is missing so create it */
1773  		stable_node = alloc_stable_node_chain(stable_node_dup,
1774  						      root);
1775  		if (!stable_node)
1776  			return NULL;
1777  	}
1778  	/*
1779  	 * Add this stable_node dup that was
1780  	 * migrated to the stable_node chain
1781  	 * of the current nid for this page
1782  	 * content.
1783  	 */
1784  	VM_BUG_ON(!is_stable_node_dup(stable_node_dup));
1785  	VM_BUG_ON(page_node->head != &migrate_nodes);
1786  	list_del(&page_node->list);
1787  	DO_NUMA(page_node->nid = nid);
1788  	stable_node_chain_add_dup(page_node, stable_node);
1789  	goto out;
1790  }
1791  
1792  /*
1793   * stable_tree_insert - insert stable tree node pointing to new ksm page
1794   * into the stable tree.
1795   *
1796   * This function returns the stable tree node just allocated on success,
1797   * NULL otherwise.
1798   */
1799  static struct stable_node *stable_tree_insert(struct page *kpage)
1800  {
1801  	int nid;
1802  	unsigned long kpfn;
1803  	struct rb_root *root;
1804  	struct rb_node **new;
1805  	struct rb_node *parent;
1806  	struct stable_node *stable_node, *stable_node_dup, *stable_node_any;
1807  	bool need_chain = false;
1808  
1809  	kpfn = page_to_pfn(kpage);
1810  	nid = get_kpfn_nid(kpfn);
1811  	root = root_stable_tree + nid;
1812  again:
1813  	parent = NULL;
1814  	new = &root->rb_node;
1815  
1816  	while (*new) {
1817  		struct page *tree_page;
1818  		int ret;
1819  
1820  		cond_resched();
1821  		stable_node = rb_entry(*new, struct stable_node, node);
1822  		stable_node_any = NULL;
1823  		tree_page = chain(&stable_node_dup, stable_node, root);
1824  		if (!stable_node_dup) {
1825  			/*
1826  			 * Either all stable_node dups were full in
1827  			 * this stable_node chain, or this chain was
1828  			 * empty and should be rb_erased.
1829  			 */
1830  			stable_node_any = stable_node_dup_any(stable_node,
1831  							      root);
1832  			if (!stable_node_any) {
1833  				/* rb_erase just run */
1834  				goto again;
1835  			}
1836  			/*
1837  			 * Take any of the stable_node dups page of
1838  			 * this stable_node chain to let the tree walk
1839  			 * continue. All KSM pages belonging to the
1840  			 * stable_node dups in a stable_node chain
1841  			 * have the same content and they're
1842  			 * write protected at all times. Any will work
1843  			 * fine to continue the walk.
1844  			 */
1845  			tree_page = get_ksm_page(stable_node_any,
1846  						 GET_KSM_PAGE_NOLOCK);
1847  		}
1848  		VM_BUG_ON(!stable_node_dup ^ !!stable_node_any);
1849  		if (!tree_page) {
1850  			/*
1851  			 * If we walked over a stale stable_node,
1852  			 * get_ksm_page() will call rb_erase() and it
1853  			 * may rebalance the tree from under us. So
1854  			 * restart the search from scratch. Returning
1855  			 * NULL would be safe too, but we'd generate
1856  			 * false negative insertions just because some
1857  			 * stable_node was stale.
1858  			 */
1859  			goto again;
1860  		}
1861  
1862  		ret = memcmp_pages(kpage, tree_page);
1863  		put_page(tree_page);
1864  
1865  		parent = *new;
1866  		if (ret < 0)
1867  			new = &parent->rb_left;
1868  		else if (ret > 0)
1869  			new = &parent->rb_right;
1870  		else {
1871  			need_chain = true;
1872  			break;
1873  		}
1874  	}
1875  
1876  	stable_node_dup = alloc_stable_node();
1877  	if (!stable_node_dup)
1878  		return NULL;
1879  
1880  	INIT_HLIST_HEAD(&stable_node_dup->hlist);
1881  	stable_node_dup->kpfn = kpfn;
1882  	set_page_stable_node(kpage, stable_node_dup);
1883  	stable_node_dup->rmap_hlist_len = 0;
1884  	DO_NUMA(stable_node_dup->nid = nid);
1885  	if (!need_chain) {
1886  		rb_link_node(&stable_node_dup->node, parent, new);
1887  		rb_insert_color(&stable_node_dup->node, root);
1888  	} else {
1889  		if (!is_stable_node_chain(stable_node)) {
1890  			struct stable_node *orig = stable_node;
1891  			/* chain is missing so create it */
1892  			stable_node = alloc_stable_node_chain(orig, root);
1893  			if (!stable_node) {
1894  				free_stable_node(stable_node_dup);
1895  				return NULL;
1896  			}
1897  		}
1898  		stable_node_chain_add_dup(stable_node_dup, stable_node);
1899  	}
1900  
1901  	return stable_node_dup;
1902  }
1903  
1904  /*
1905   * unstable_tree_search_insert - search for identical page,
1906   * else insert rmap_item into the unstable tree.
1907   *
1908   * This function searches for a page in the unstable tree identical to the
1909   * page currently being scanned; and if no identical page is found in the
1910   * tree, we insert rmap_item as a new object into the unstable tree.
1911   *
1912   * This function returns pointer to rmap_item found to be identical
1913   * to the currently scanned page, NULL otherwise.
1914   *
1915   * This function does both searching and inserting, because they share
1916   * the same walking algorithm in an rbtree.
1917   */
1918  static
1919  struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item,
1920  					      struct page *page,
1921  					      struct page **tree_pagep)
1922  {
1923  	struct rb_node **new;
1924  	struct rb_root *root;
1925  	struct rb_node *parent = NULL;
1926  	int nid;
1927  
1928  	nid = get_kpfn_nid(page_to_pfn(page));
1929  	root = root_unstable_tree + nid;
1930  	new = &root->rb_node;
1931  
1932  	while (*new) {
1933  		struct rmap_item *tree_rmap_item;
1934  		struct page *tree_page;
1935  		int ret;
1936  
1937  		cond_resched();
1938  		tree_rmap_item = rb_entry(*new, struct rmap_item, node);
1939  		tree_page = get_mergeable_page(tree_rmap_item);
1940  		if (!tree_page)
1941  			return NULL;
1942  
1943  		/*
1944  		 * Don't substitute a ksm page for a forked page.
1945  		 */
1946  		if (page == tree_page) {
1947  			put_page(tree_page);
1948  			return NULL;
1949  		}
1950  
1951  		ret = memcmp_pages(page, tree_page);
1952  
1953  		parent = *new;
1954  		if (ret < 0) {
1955  			put_page(tree_page);
1956  			new = &parent->rb_left;
1957  		} else if (ret > 0) {
1958  			put_page(tree_page);
1959  			new = &parent->rb_right;
1960  		} else if (!ksm_merge_across_nodes &&
1961  			   page_to_nid(tree_page) != nid) {
1962  			/*
1963  			 * If tree_page has been migrated to another NUMA node,
1964  			 * it will be flushed out and put in the right unstable
1965  			 * tree next time: only merge with it when across_nodes.
1966  			 */
1967  			put_page(tree_page);
1968  			return NULL;
1969  		} else {
1970  			*tree_pagep = tree_page;
1971  			return tree_rmap_item;
1972  		}
1973  	}
1974  
1975  	rmap_item->address |= UNSTABLE_FLAG;
1976  	rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK);
1977  	DO_NUMA(rmap_item->nid = nid);
1978  	rb_link_node(&rmap_item->node, parent, new);
1979  	rb_insert_color(&rmap_item->node, root);
1980  
1981  	ksm_pages_unshared++;
1982  	return NULL;
1983  }
1984  
1985  /*
1986   * stable_tree_append - add another rmap_item to the linked list of
1987   * rmap_items hanging off a given node of the stable tree, all sharing
1988   * the same ksm page.
1989   */
1990  static void stable_tree_append(struct rmap_item *rmap_item,
1991  			       struct stable_node *stable_node,
1992  			       bool max_page_sharing_bypass)
1993  {
1994  	/*
1995  	 * rmap won't find this mapping if we don't insert the
1996  	 * rmap_item in the right stable_node
1997  	 * duplicate. page_migration could break later if rmap breaks,
1998  	 * so we can as well crash here. We really need to check for
1999  	 * rmap_hlist_len == STABLE_NODE_CHAIN, but we can as well check
2000  	 * for other negative values as an underflow if detected here
2001  	 * for the first time (and not when decreasing rmap_hlist_len)
2002  	 * would be sign of memory corruption in the stable_node.
2003  	 */
2004  	BUG_ON(stable_node->rmap_hlist_len < 0);
2005  
2006  	stable_node->rmap_hlist_len++;
2007  	if (!max_page_sharing_bypass)
2008  		/* possibly non fatal but unexpected overflow, only warn */
2009  		WARN_ON_ONCE(stable_node->rmap_hlist_len >
2010  			     ksm_max_page_sharing);
2011  
2012  	rmap_item->head = stable_node;
2013  	rmap_item->address |= STABLE_FLAG;
2014  	hlist_add_head(&rmap_item->hlist, &stable_node->hlist);
2015  
2016  	if (rmap_item->hlist.next)
2017  		ksm_pages_sharing++;
2018  	else
2019  		ksm_pages_shared++;
2020  }
2021  
2022  /*
2023   * cmp_and_merge_page - first see if page can be merged into the stable tree;
2024   * if not, compare checksum to previous and if it's the same, see if page can
2025   * be inserted into the unstable tree, or merged with a page already there and
2026   * both transferred to the stable tree.
2027   *
2028   * @page: the page that we are searching identical page to.
2029   * @rmap_item: the reverse mapping into the virtual address of this page
2030   */
2031  static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
2032  {
2033  	struct mm_struct *mm = rmap_item->mm;
2034  	struct rmap_item *tree_rmap_item;
2035  	struct page *tree_page = NULL;
2036  	struct stable_node *stable_node;
2037  	struct page *kpage;
2038  	unsigned int checksum;
2039  	int err;
2040  	bool max_page_sharing_bypass = false;
2041  
2042  	stable_node = page_stable_node(page);
2043  	if (stable_node) {
2044  		if (stable_node->head != &migrate_nodes &&
2045  		    get_kpfn_nid(READ_ONCE(stable_node->kpfn)) !=
2046  		    NUMA(stable_node->nid)) {
2047  			stable_node_dup_del(stable_node);
2048  			stable_node->head = &migrate_nodes;
2049  			list_add(&stable_node->list, stable_node->head);
2050  		}
2051  		if (stable_node->head != &migrate_nodes &&
2052  		    rmap_item->head == stable_node)
2053  			return;
2054  		/*
2055  		 * If it's a KSM fork, allow it to go over the sharing limit
2056  		 * without warnings.
2057  		 */
2058  		if (!is_page_sharing_candidate(stable_node))
2059  			max_page_sharing_bypass = true;
2060  	}
2061  
2062  	/* We first start with searching the page inside the stable tree */
2063  	kpage = stable_tree_search(page);
2064  	if (kpage == page && rmap_item->head == stable_node) {
2065  		put_page(kpage);
2066  		return;
2067  	}
2068  
2069  	remove_rmap_item_from_tree(rmap_item);
2070  
2071  	if (kpage) {
2072  		if (PTR_ERR(kpage) == -EBUSY)
2073  			return;
2074  
2075  		err = try_to_merge_with_ksm_page(rmap_item, page, kpage);
2076  		if (!err) {
2077  			/*
2078  			 * The page was successfully merged:
2079  			 * add its rmap_item to the stable tree.
2080  			 */
2081  			lock_page(kpage);
2082  			stable_tree_append(rmap_item, page_stable_node(kpage),
2083  					   max_page_sharing_bypass);
2084  			unlock_page(kpage);
2085  		}
2086  		put_page(kpage);
2087  		return;
2088  	}
2089  
2090  	/*
2091  	 * If the hash value of the page has changed from the last time
2092  	 * we calculated it, this page is changing frequently: therefore we
2093  	 * don't want to insert it in the unstable tree, and we don't want
2094  	 * to waste our time searching for something identical to it there.
2095  	 */
2096  	checksum = calc_checksum(page);
2097  	if (rmap_item->oldchecksum != checksum) {
2098  		rmap_item->oldchecksum = checksum;
2099  		return;
2100  	}
2101  
2102  	/*
2103  	 * Same checksum as an empty page. We attempt to merge it with the
2104  	 * appropriate zero page if the user enabled this via sysfs.
2105  	 */
2106  	if (ksm_use_zero_pages && (checksum == zero_checksum)) {
2107  		struct vm_area_struct *vma;
2108  
2109  		mmap_read_lock(mm);
2110  		vma = find_mergeable_vma(mm, rmap_item->address);
2111  		if (vma) {
2112  			err = try_to_merge_one_page(vma, page,
2113  					ZERO_PAGE(rmap_item->address));
2114  		} else {
2115  			/*
2116  			 * If the vma is out of date, we do not need to
2117  			 * continue.
2118  			 */
2119  			err = 0;
2120  		}
2121  		mmap_read_unlock(mm);
2122  		/*
2123  		 * In case of failure, the page was not really empty, so we
2124  		 * need to continue. Otherwise we're done.
2125  		 */
2126  		if (!err)
2127  			return;
2128  	}
2129  	tree_rmap_item =
2130  		unstable_tree_search_insert(rmap_item, page, &tree_page);
2131  	if (tree_rmap_item) {
2132  		bool split;
2133  
2134  		kpage = try_to_merge_two_pages(rmap_item, page,
2135  						tree_rmap_item, tree_page);
2136  		/*
2137  		 * If both pages we tried to merge belong to the same compound
2138  		 * page, then we actually ended up increasing the reference
2139  		 * count of the same compound page twice, and split_huge_page
2140  		 * failed.
2141  		 * Here we set a flag if that happened, and we use it later to
2142  		 * try split_huge_page again. Since we call put_page right
2143  		 * afterwards, the reference count will be correct and
2144  		 * split_huge_page should succeed.
2145  		 */
2146  		split = PageTransCompound(page)
2147  			&& compound_head(page) == compound_head(tree_page);
2148  		put_page(tree_page);
2149  		if (kpage) {
2150  			/*
2151  			 * The pages were successfully merged: insert new
2152  			 * node in the stable tree and add both rmap_items.
2153  			 */
2154  			lock_page(kpage);
2155  			stable_node = stable_tree_insert(kpage);
2156  			if (stable_node) {
2157  				stable_tree_append(tree_rmap_item, stable_node,
2158  						   false);
2159  				stable_tree_append(rmap_item, stable_node,
2160  						   false);
2161  			}
2162  			unlock_page(kpage);
2163  
2164  			/*
2165  			 * If we fail to insert the page into the stable tree,
2166  			 * we will have 2 virtual addresses that are pointing
2167  			 * to a ksm page left outside the stable tree,
2168  			 * in which case we need to break_cow on both.
2169  			 */
2170  			if (!stable_node) {
2171  				break_cow(tree_rmap_item);
2172  				break_cow(rmap_item);
2173  			}
2174  		} else if (split) {
2175  			/*
2176  			 * We are here if we tried to merge two pages and
2177  			 * failed because they both belonged to the same
2178  			 * compound page. We will split the page now, but no
2179  			 * merging will take place.
2180  			 * We do not want to add the cost of a full lock; if
2181  			 * the page is locked, it is better to skip it and
2182  			 * perhaps try again later.
2183  			 */
2184  			if (!trylock_page(page))
2185  				return;
2186  			split_huge_page(page);
2187  			unlock_page(page);
2188  		}
2189  	}
2190  }
2191  
2192  static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot,
2193  					    struct rmap_item **rmap_list,
2194  					    unsigned long addr)
2195  {
2196  	struct rmap_item *rmap_item;
2197  
2198  	while (*rmap_list) {
2199  		rmap_item = *rmap_list;
2200  		if ((rmap_item->address & PAGE_MASK) == addr)
2201  			return rmap_item;
2202  		if (rmap_item->address > addr)
2203  			break;
2204  		*rmap_list = rmap_item->rmap_list;
2205  		remove_rmap_item_from_tree(rmap_item);
2206  		free_rmap_item(rmap_item);
2207  	}
2208  
2209  	rmap_item = alloc_rmap_item();
2210  	if (rmap_item) {
2211  		/* It has already been zeroed */
2212  		rmap_item->mm = mm_slot->mm;
2213  		rmap_item->address = addr;
2214  		rmap_item->rmap_list = *rmap_list;
2215  		*rmap_list = rmap_item;
2216  	}
2217  	return rmap_item;
2218  }
2219  
2220  static struct rmap_item *scan_get_next_rmap_item(struct page **page)
2221  {
2222  	struct mm_struct *mm;
2223  	struct mm_slot *slot;
2224  	struct vm_area_struct *vma;
2225  	struct rmap_item *rmap_item;
2226  	int nid;
2227  
2228  	if (list_empty(&ksm_mm_head.mm_list))
2229  		return NULL;
2230  
2231  	slot = ksm_scan.mm_slot;
2232  	if (slot == &ksm_mm_head) {
2233  		/*
2234  		 * A number of pages can hang around indefinitely on per-cpu
2235  		 * pagevecs, raised page count preventing write_protect_page
2236  		 * from merging them.  Though it doesn't really matter much,
2237  		 * it is puzzling to see some stuck in pages_volatile until
2238  		 * other activity jostles them out, and they also prevented
2239  		 * LTP's KSM test from succeeding deterministically; so drain
2240  		 * them here (here rather than on entry to ksm_do_scan(),
2241  		 * so we don't IPI too often when pages_to_scan is set low).
2242  		 */
2243  		lru_add_drain_all();
2244  
2245  		/*
2246  		 * Whereas stale stable_nodes on the stable_tree itself
2247  		 * get pruned in the regular course of stable_tree_search(),
2248  		 * those moved out to the migrate_nodes list can accumulate:
2249  		 * so prune them once before each full scan.
2250  		 */
2251  		if (!ksm_merge_across_nodes) {
2252  			struct stable_node *stable_node, *next;
2253  			struct page *page;
2254  
2255  			list_for_each_entry_safe(stable_node, next,
2256  						 &migrate_nodes, list) {
2257  				page = get_ksm_page(stable_node,
2258  						    GET_KSM_PAGE_NOLOCK);
2259  				if (page)
2260  					put_page(page);
2261  				cond_resched();
2262  			}
2263  		}
2264  
2265  		for (nid = 0; nid < ksm_nr_node_ids; nid++)
2266  			root_unstable_tree[nid] = RB_ROOT;
2267  
2268  		spin_lock(&ksm_mmlist_lock);
2269  		slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list);
2270  		ksm_scan.mm_slot = slot;
2271  		spin_unlock(&ksm_mmlist_lock);
2272  		/*
2273  		 * Although we tested list_empty() above, a racing __ksm_exit
2274  		 * of the last mm on the list may have removed it since then.
2275  		 */
2276  		if (slot == &ksm_mm_head)
2277  			return NULL;
2278  next_mm:
2279  		ksm_scan.address = 0;
2280  		ksm_scan.rmap_list = &slot->rmap_list;
2281  	}
2282  
2283  	mm = slot->mm;
2284  	mmap_read_lock(mm);
2285  	if (ksm_test_exit(mm))
2286  		vma = NULL;
2287  	else
2288  		vma = find_vma(mm, ksm_scan.address);
2289  
2290  	for (; vma; vma = vma->vm_next) {
2291  		if (!(vma->vm_flags & VM_MERGEABLE))
2292  			continue;
2293  		if (ksm_scan.address < vma->vm_start)
2294  			ksm_scan.address = vma->vm_start;
2295  		if (!vma->anon_vma)
2296  			ksm_scan.address = vma->vm_end;
2297  
2298  		while (ksm_scan.address < vma->vm_end) {
2299  			if (ksm_test_exit(mm))
2300  				break;
2301  			*page = follow_page(vma, ksm_scan.address, FOLL_GET);
2302  			if (IS_ERR_OR_NULL(*page)) {
2303  				ksm_scan.address += PAGE_SIZE;
2304  				cond_resched();
2305  				continue;
2306  			}
2307  			if (PageAnon(*page)) {
2308  				flush_anon_page(vma, *page, ksm_scan.address);
2309  				flush_dcache_page(*page);
2310  				rmap_item = get_next_rmap_item(slot,
2311  					ksm_scan.rmap_list, ksm_scan.address);
2312  				if (rmap_item) {
2313  					ksm_scan.rmap_list =
2314  							&rmap_item->rmap_list;
2315  					ksm_scan.address += PAGE_SIZE;
2316  				} else
2317  					put_page(*page);
2318  				mmap_read_unlock(mm);
2319  				return rmap_item;
2320  			}
2321  			put_page(*page);
2322  			ksm_scan.address += PAGE_SIZE;
2323  			cond_resched();
2324  		}
2325  	}
2326  
2327  	if (ksm_test_exit(mm)) {
2328  		ksm_scan.address = 0;
2329  		ksm_scan.rmap_list = &slot->rmap_list;
2330  	}
2331  	/*
2332  	 * Nuke all the rmap_items that are above this current rmap:
2333  	 * because there were no VM_MERGEABLE vmas with such addresses.
2334  	 */
2335  	remove_trailing_rmap_items(ksm_scan.rmap_list);
2336  
2337  	spin_lock(&ksm_mmlist_lock);
2338  	ksm_scan.mm_slot = list_entry(slot->mm_list.next,
2339  						struct mm_slot, mm_list);
2340  	if (ksm_scan.address == 0) {
2341  		/*
2342  		 * We've completed a full scan of all vmas, holding mmap_lock
2343  		 * throughout, and found no VM_MERGEABLE: so do the same as
2344  		 * __ksm_exit does to remove this mm from all our lists now.
2345  		 * This applies either when cleaning up after __ksm_exit
2346  		 * (but beware: we can reach here even before __ksm_exit),
2347  		 * or when all VM_MERGEABLE areas have been unmapped (and
2348  		 * mmap_lock then protects against race with MADV_MERGEABLE).
2349  		 */
2350  		hash_del(&slot->link);
2351  		list_del(&slot->mm_list);
2352  		spin_unlock(&ksm_mmlist_lock);
2353  
2354  		free_mm_slot(slot);
2355  		clear_bit(MMF_VM_MERGEABLE, &mm->flags);
2356  		mmap_read_unlock(mm);
2357  		mmdrop(mm);
2358  	} else {
2359  		mmap_read_unlock(mm);
2360  		/*
2361  		 * mmap_read_unlock(mm) first because after
2362  		 * spin_unlock(&ksm_mmlist_lock) run, the "mm" may
2363  		 * already have been freed under us by __ksm_exit()
2364  		 * because the "mm_slot" is still hashed and
2365  		 * ksm_scan.mm_slot doesn't point to it anymore.
2366  		 */
2367  		spin_unlock(&ksm_mmlist_lock);
2368  	}
2369  
2370  	/* Repeat until we've completed scanning the whole list */
2371  	slot = ksm_scan.mm_slot;
2372  	if (slot != &ksm_mm_head)
2373  		goto next_mm;
2374  
2375  	ksm_scan.seqnr++;
2376  	return NULL;
2377  }
2378  
2379  /**
2380   * ksm_do_scan  - the ksm scanner main worker function.
2381   * @scan_npages:  number of pages we want to scan before we return.
2382   */
2383  static void ksm_do_scan(unsigned int scan_npages)
2384  {
2385  	struct rmap_item *rmap_item;
2386  	struct page *page;
2387  
2388  	while (scan_npages-- && likely(!freezing(current))) {
2389  		cond_resched();
2390  		rmap_item = scan_get_next_rmap_item(&page);
2391  		if (!rmap_item)
2392  			return;
2393  		cmp_and_merge_page(page, rmap_item);
2394  		put_page(page);
2395  	}
2396  }
2397  
2398  static int ksmd_should_run(void)
2399  {
2400  	return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list);
2401  }
2402  
2403  static int ksm_scan_thread(void *nothing)
2404  {
2405  	unsigned int sleep_ms;
2406  
2407  	set_freezable();
2408  	set_user_nice(current, 5);
2409  
2410  	while (!kthread_should_stop()) {
2411  		mutex_lock(&ksm_thread_mutex);
2412  		wait_while_offlining();
2413  		if (ksmd_should_run())
2414  			ksm_do_scan(ksm_thread_pages_to_scan);
2415  		mutex_unlock(&ksm_thread_mutex);
2416  
2417  		try_to_freeze();
2418  
2419  		if (ksmd_should_run()) {
2420  			sleep_ms = READ_ONCE(ksm_thread_sleep_millisecs);
2421  			wait_event_interruptible_timeout(ksm_iter_wait,
2422  				sleep_ms != READ_ONCE(ksm_thread_sleep_millisecs),
2423  				msecs_to_jiffies(sleep_ms));
2424  		} else {
2425  			wait_event_freezable(ksm_thread_wait,
2426  				ksmd_should_run() || kthread_should_stop());
2427  		}
2428  	}
2429  	return 0;
2430  }
2431  
2432  int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
2433  		unsigned long end, int advice, unsigned long *vm_flags)
2434  {
2435  	struct mm_struct *mm = vma->vm_mm;
2436  	int err;
2437  
2438  	switch (advice) {
2439  	case MADV_MERGEABLE:
2440  		/*
2441  		 * Be somewhat over-protective for now!
2442  		 */
2443  		if (*vm_flags & (VM_MERGEABLE | VM_SHARED  | VM_MAYSHARE   |
2444  				 VM_PFNMAP    | VM_IO      | VM_DONTEXPAND |
2445  				 VM_HUGETLB | VM_MIXEDMAP))
2446  			return 0;		/* just ignore the advice */
2447  
2448  		if (vma_is_dax(vma))
2449  			return 0;
2450  
2451  #ifdef VM_SAO
2452  		if (*vm_flags & VM_SAO)
2453  			return 0;
2454  #endif
2455  #ifdef VM_SPARC_ADI
2456  		if (*vm_flags & VM_SPARC_ADI)
2457  			return 0;
2458  #endif
2459  
2460  		if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
2461  			err = __ksm_enter(mm);
2462  			if (err)
2463  				return err;
2464  		}
2465  
2466  		*vm_flags |= VM_MERGEABLE;
2467  		break;
2468  
2469  	case MADV_UNMERGEABLE:
2470  		if (!(*vm_flags & VM_MERGEABLE))
2471  			return 0;		/* just ignore the advice */
2472  
2473  		if (vma->anon_vma) {
2474  			err = unmerge_ksm_pages(vma, start, end);
2475  			if (err)
2476  				return err;
2477  		}
2478  
2479  		*vm_flags &= ~VM_MERGEABLE;
2480  		break;
2481  	}
2482  
2483  	return 0;
2484  }
2485  EXPORT_SYMBOL_GPL(ksm_madvise);
2486  
2487  int __ksm_enter(struct mm_struct *mm)
2488  {
2489  	struct mm_slot *mm_slot;
2490  	int needs_wakeup;
2491  
2492  	mm_slot = alloc_mm_slot();
2493  	if (!mm_slot)
2494  		return -ENOMEM;
2495  
2496  	/* Check ksm_run too?  Would need tighter locking */
2497  	needs_wakeup = list_empty(&ksm_mm_head.mm_list);
2498  
2499  	spin_lock(&ksm_mmlist_lock);
2500  	insert_to_mm_slots_hash(mm, mm_slot);
2501  	/*
2502  	 * When KSM_RUN_MERGE (or KSM_RUN_STOP),
2503  	 * insert just behind the scanning cursor, to let the area settle
2504  	 * down a little; when fork is followed by immediate exec, we don't
2505  	 * want ksmd to waste time setting up and tearing down an rmap_list.
2506  	 *
2507  	 * But when KSM_RUN_UNMERGE, it's important to insert ahead of its
2508  	 * scanning cursor, otherwise KSM pages in newly forked mms will be
2509  	 * missed: then we might as well insert at the end of the list.
2510  	 */
2511  	if (ksm_run & KSM_RUN_UNMERGE)
2512  		list_add_tail(&mm_slot->mm_list, &ksm_mm_head.mm_list);
2513  	else
2514  		list_add_tail(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list);
2515  	spin_unlock(&ksm_mmlist_lock);
2516  
2517  	set_bit(MMF_VM_MERGEABLE, &mm->flags);
2518  	mmgrab(mm);
2519  
2520  	if (needs_wakeup)
2521  		wake_up_interruptible(&ksm_thread_wait);
2522  
2523  	return 0;
2524  }
2525  
2526  void __ksm_exit(struct mm_struct *mm)
2527  {
2528  	struct mm_slot *mm_slot;
2529  	int easy_to_free = 0;
2530  
2531  	/*
2532  	 * This process is exiting: if it's straightforward (as is the
2533  	 * case when ksmd was never running), free mm_slot immediately.
2534  	 * But if it's at the cursor or has rmap_items linked to it, use
2535  	 * mmap_lock to synchronize with any break_cows before pagetables
2536  	 * are freed, and leave the mm_slot on the list for ksmd to free.
2537  	 * Beware: ksm may already have noticed it exiting and freed the slot.
2538  	 */
2539  
2540  	spin_lock(&ksm_mmlist_lock);
2541  	mm_slot = get_mm_slot(mm);
2542  	if (mm_slot && ksm_scan.mm_slot != mm_slot) {
2543  		if (!mm_slot->rmap_list) {
2544  			hash_del(&mm_slot->link);
2545  			list_del(&mm_slot->mm_list);
2546  			easy_to_free = 1;
2547  		} else {
2548  			list_move(&mm_slot->mm_list,
2549  				  &ksm_scan.mm_slot->mm_list);
2550  		}
2551  	}
2552  	spin_unlock(&ksm_mmlist_lock);
2553  
2554  	if (easy_to_free) {
2555  		free_mm_slot(mm_slot);
2556  		clear_bit(MMF_VM_MERGEABLE, &mm->flags);
2557  		mmdrop(mm);
2558  	} else if (mm_slot) {
2559  		mmap_write_lock(mm);
2560  		mmap_write_unlock(mm);
2561  	}
2562  }
2563  
2564  struct page *ksm_might_need_to_copy(struct page *page,
2565  			struct vm_area_struct *vma, unsigned long address)
2566  {
2567  	struct anon_vma *anon_vma = page_anon_vma(page);
2568  	struct page *new_page;
2569  
2570  	if (PageKsm(page)) {
2571  		if (page_stable_node(page) &&
2572  		    !(ksm_run & KSM_RUN_UNMERGE))
2573  			return page;	/* no need to copy it */
2574  	} else if (!anon_vma) {
2575  		return page;		/* no need to copy it */
2576  	} else if (anon_vma->root == vma->anon_vma->root &&
2577  		 page->index == linear_page_index(vma, address)) {
2578  		return page;		/* still no need to copy it */
2579  	}
2580  	if (!PageUptodate(page))
2581  		return page;		/* let do_swap_page report the error */
2582  
2583  	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
2584  	if (new_page && mem_cgroup_charge(new_page, vma->vm_mm, GFP_KERNEL)) {
2585  		put_page(new_page);
2586  		new_page = NULL;
2587  	}
2588  	if (new_page) {
2589  		copy_user_highpage(new_page, page, address, vma);
2590  
2591  		SetPageDirty(new_page);
2592  		__SetPageUptodate(new_page);
2593  		__SetPageLocked(new_page);
2594  	}
2595  
2596  	return new_page;
2597  }
2598  
2599  void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
2600  {
2601  	struct stable_node *stable_node;
2602  	struct rmap_item *rmap_item;
2603  	int search_new_forks = 0;
2604  
2605  	VM_BUG_ON_PAGE(!PageKsm(page), page);
2606  
2607  	/*
2608  	 * Rely on the page lock to protect against concurrent modifications
2609  	 * to that page's node of the stable tree.
2610  	 */
2611  	VM_BUG_ON_PAGE(!PageLocked(page), page);
2612  
2613  	stable_node = page_stable_node(page);
2614  	if (!stable_node)
2615  		return;
2616  again:
2617  	hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
2618  		struct anon_vma *anon_vma = rmap_item->anon_vma;
2619  		struct anon_vma_chain *vmac;
2620  		struct vm_area_struct *vma;
2621  
2622  		cond_resched();
2623  		anon_vma_lock_read(anon_vma);
2624  		anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
2625  					       0, ULONG_MAX) {
2626  			unsigned long addr;
2627  
2628  			cond_resched();
2629  			vma = vmac->vma;
2630  
2631  			/* Ignore the stable/unstable/sqnr flags */
2632  			addr = rmap_item->address & PAGE_MASK;
2633  
2634  			if (addr < vma->vm_start || addr >= vma->vm_end)
2635  				continue;
2636  			/*
2637  			 * Initially we examine only the vma which covers this
2638  			 * rmap_item; but later, if there is still work to do,
2639  			 * we examine covering vmas in other mms: in case they
2640  			 * were forked from the original since ksmd passed.
2641  			 */
2642  			if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
2643  				continue;
2644  
2645  			if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
2646  				continue;
2647  
2648  			if (!rwc->rmap_one(page, vma, addr, rwc->arg)) {
2649  				anon_vma_unlock_read(anon_vma);
2650  				return;
2651  			}
2652  			if (rwc->done && rwc->done(page)) {
2653  				anon_vma_unlock_read(anon_vma);
2654  				return;
2655  			}
2656  		}
2657  		anon_vma_unlock_read(anon_vma);
2658  	}
2659  	if (!search_new_forks++)
2660  		goto again;
2661  }
2662  
2663  #ifdef CONFIG_MIGRATION
2664  void ksm_migrate_page(struct page *newpage, struct page *oldpage)
2665  {
2666  	struct stable_node *stable_node;
2667  
2668  	VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
2669  	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
2670  	VM_BUG_ON_PAGE(newpage->mapping != oldpage->mapping, newpage);
2671  
2672  	stable_node = page_stable_node(newpage);
2673  	if (stable_node) {
2674  		VM_BUG_ON_PAGE(stable_node->kpfn != page_to_pfn(oldpage), oldpage);
2675  		stable_node->kpfn = page_to_pfn(newpage);
2676  		/*
2677  		 * newpage->mapping was set in advance; now we need smp_wmb()
2678  		 * to make sure that the new stable_node->kpfn is visible
2679  		 * to get_ksm_page() before it can see that oldpage->mapping
2680  		 * has gone stale (or that PageSwapCache has been cleared).
2681  		 */
2682  		smp_wmb();
2683  		set_page_stable_node(oldpage, NULL);
2684  	}
2685  }
2686  #endif /* CONFIG_MIGRATION */
2687  
2688  #ifdef CONFIG_MEMORY_HOTREMOVE
2689  static void wait_while_offlining(void)
2690  {
2691  	while (ksm_run & KSM_RUN_OFFLINE) {
2692  		mutex_unlock(&ksm_thread_mutex);
2693  		wait_on_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE),
2694  			    TASK_UNINTERRUPTIBLE);
2695  		mutex_lock(&ksm_thread_mutex);
2696  	}
2697  }
2698  
2699  static bool stable_node_dup_remove_range(struct stable_node *stable_node,
2700  					 unsigned long start_pfn,
2701  					 unsigned long end_pfn)
2702  {
2703  	if (stable_node->kpfn >= start_pfn &&
2704  	    stable_node->kpfn < end_pfn) {
2705  		/*
2706  		 * Don't get_ksm_page, page has already gone:
2707  		 * which is why we keep kpfn instead of page*
2708  		 */
2709  		remove_node_from_stable_tree(stable_node);
2710  		return true;
2711  	}
2712  	return false;
2713  }
2714  
2715  static bool stable_node_chain_remove_range(struct stable_node *stable_node,
2716  					   unsigned long start_pfn,
2717  					   unsigned long end_pfn,
2718  					   struct rb_root *root)
2719  {
2720  	struct stable_node *dup;
2721  	struct hlist_node *hlist_safe;
2722  
2723  	if (!is_stable_node_chain(stable_node)) {
2724  		VM_BUG_ON(is_stable_node_dup(stable_node));
2725  		return stable_node_dup_remove_range(stable_node, start_pfn,
2726  						    end_pfn);
2727  	}
2728  
2729  	hlist_for_each_entry_safe(dup, hlist_safe,
2730  				  &stable_node->hlist, hlist_dup) {
2731  		VM_BUG_ON(!is_stable_node_dup(dup));
2732  		stable_node_dup_remove_range(dup, start_pfn, end_pfn);
2733  	}
2734  	if (hlist_empty(&stable_node->hlist)) {
2735  		free_stable_node_chain(stable_node, root);
2736  		return true; /* notify caller that tree was rebalanced */
2737  	} else
2738  		return false;
2739  }
2740  
2741  static void ksm_check_stable_tree(unsigned long start_pfn,
2742  				  unsigned long end_pfn)
2743  {
2744  	struct stable_node *stable_node, *next;
2745  	struct rb_node *node;
2746  	int nid;
2747  
2748  	for (nid = 0; nid < ksm_nr_node_ids; nid++) {
2749  		node = rb_first(root_stable_tree + nid);
2750  		while (node) {
2751  			stable_node = rb_entry(node, struct stable_node, node);
2752  			if (stable_node_chain_remove_range(stable_node,
2753  							   start_pfn, end_pfn,
2754  							   root_stable_tree +
2755  							   nid))
2756  				node = rb_first(root_stable_tree + nid);
2757  			else
2758  				node = rb_next(node);
2759  			cond_resched();
2760  		}
2761  	}
2762  	list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) {
2763  		if (stable_node->kpfn >= start_pfn &&
2764  		    stable_node->kpfn < end_pfn)
2765  			remove_node_from_stable_tree(stable_node);
2766  		cond_resched();
2767  	}
2768  }
2769  
2770  static int ksm_memory_callback(struct notifier_block *self,
2771  			       unsigned long action, void *arg)
2772  {
2773  	struct memory_notify *mn = arg;
2774  
2775  	switch (action) {
2776  	case MEM_GOING_OFFLINE:
2777  		/*
2778  		 * Prevent ksm_do_scan(), unmerge_and_remove_all_rmap_items()
2779  		 * and remove_all_stable_nodes() while memory is going offline:
2780  		 * it is unsafe for them to touch the stable tree at this time.
2781  		 * But unmerge_ksm_pages(), rmap lookups and other entry points
2782  		 * which do not need the ksm_thread_mutex are all safe.
2783  		 */
2784  		mutex_lock(&ksm_thread_mutex);
2785  		ksm_run |= KSM_RUN_OFFLINE;
2786  		mutex_unlock(&ksm_thread_mutex);
2787  		break;
2788  
2789  	case MEM_OFFLINE:
2790  		/*
2791  		 * Most of the work is done by page migration; but there might
2792  		 * be a few stable_nodes left over, still pointing to struct
2793  		 * pages which have been offlined: prune those from the tree,
2794  		 * otherwise get_ksm_page() might later try to access a
2795  		 * non-existent struct page.
2796  		 */
2797  		ksm_check_stable_tree(mn->start_pfn,
2798  				      mn->start_pfn + mn->nr_pages);
2799  		fallthrough;
2800  	case MEM_CANCEL_OFFLINE:
2801  		mutex_lock(&ksm_thread_mutex);
2802  		ksm_run &= ~KSM_RUN_OFFLINE;
2803  		mutex_unlock(&ksm_thread_mutex);
2804  
2805  		smp_mb();	/* wake_up_bit advises this */
2806  		wake_up_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE));
2807  		break;
2808  	}
2809  	return NOTIFY_OK;
2810  }
2811  #else
2812  static void wait_while_offlining(void)
2813  {
2814  }
2815  #endif /* CONFIG_MEMORY_HOTREMOVE */
2816  
2817  #ifdef CONFIG_SYSFS
2818  /*
2819   * This all compiles without CONFIG_SYSFS, but is a waste of space.
2820   */
2821  
2822  #define KSM_ATTR_RO(_name) \
2823  	static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2824  #define KSM_ATTR(_name) \
2825  	static struct kobj_attribute _name##_attr = \
2826  		__ATTR(_name, 0644, _name##_show, _name##_store)
2827  
2828  static ssize_t sleep_millisecs_show(struct kobject *kobj,
2829  				    struct kobj_attribute *attr, char *buf)
2830  {
2831  	return sysfs_emit(buf, "%u\n", ksm_thread_sleep_millisecs);
2832  }
2833  
2834  static ssize_t sleep_millisecs_store(struct kobject *kobj,
2835  				     struct kobj_attribute *attr,
2836  				     const char *buf, size_t count)
2837  {
2838  	unsigned int msecs;
2839  	int err;
2840  
2841  	err = kstrtouint(buf, 10, &msecs);
2842  	if (err)
2843  		return -EINVAL;
2844  
2845  	ksm_thread_sleep_millisecs = msecs;
2846  	wake_up_interruptible(&ksm_iter_wait);
2847  
2848  	return count;
2849  }
2850  KSM_ATTR(sleep_millisecs);
2851  
2852  static ssize_t pages_to_scan_show(struct kobject *kobj,
2853  				  struct kobj_attribute *attr, char *buf)
2854  {
2855  	return sysfs_emit(buf, "%u\n", ksm_thread_pages_to_scan);
2856  }
2857  
2858  static ssize_t pages_to_scan_store(struct kobject *kobj,
2859  				   struct kobj_attribute *attr,
2860  				   const char *buf, size_t count)
2861  {
2862  	unsigned int nr_pages;
2863  	int err;
2864  
2865  	err = kstrtouint(buf, 10, &nr_pages);
2866  	if (err)
2867  		return -EINVAL;
2868  
2869  	ksm_thread_pages_to_scan = nr_pages;
2870  
2871  	return count;
2872  }
2873  KSM_ATTR(pages_to_scan);
2874  
2875  static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr,
2876  			char *buf)
2877  {
2878  	return sysfs_emit(buf, "%lu\n", ksm_run);
2879  }
2880  
2881  static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
2882  			 const char *buf, size_t count)
2883  {
2884  	unsigned int flags;
2885  	int err;
2886  
2887  	err = kstrtouint(buf, 10, &flags);
2888  	if (err)
2889  		return -EINVAL;
2890  	if (flags > KSM_RUN_UNMERGE)
2891  		return -EINVAL;
2892  
2893  	/*
2894  	 * KSM_RUN_MERGE sets ksmd running, and 0 stops it running.
2895  	 * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items,
2896  	 * breaking COW to free the pages_shared (but leaves mm_slots
2897  	 * on the list for when ksmd may be set running again).
2898  	 */
2899  
2900  	mutex_lock(&ksm_thread_mutex);
2901  	wait_while_offlining();
2902  	if (ksm_run != flags) {
2903  		ksm_run = flags;
2904  		if (flags & KSM_RUN_UNMERGE) {
2905  			set_current_oom_origin();
2906  			err = unmerge_and_remove_all_rmap_items();
2907  			clear_current_oom_origin();
2908  			if (err) {
2909  				ksm_run = KSM_RUN_STOP;
2910  				count = err;
2911  			}
2912  		}
2913  	}
2914  	mutex_unlock(&ksm_thread_mutex);
2915  
2916  	if (flags & KSM_RUN_MERGE)
2917  		wake_up_interruptible(&ksm_thread_wait);
2918  
2919  	return count;
2920  }
2921  KSM_ATTR(run);
2922  
2923  #ifdef CONFIG_NUMA
2924  static ssize_t merge_across_nodes_show(struct kobject *kobj,
2925  				       struct kobj_attribute *attr, char *buf)
2926  {
2927  	return sysfs_emit(buf, "%u\n", ksm_merge_across_nodes);
2928  }
2929  
2930  static ssize_t merge_across_nodes_store(struct kobject *kobj,
2931  				   struct kobj_attribute *attr,
2932  				   const char *buf, size_t count)
2933  {
2934  	int err;
2935  	unsigned long knob;
2936  
2937  	err = kstrtoul(buf, 10, &knob);
2938  	if (err)
2939  		return err;
2940  	if (knob > 1)
2941  		return -EINVAL;
2942  
2943  	mutex_lock(&ksm_thread_mutex);
2944  	wait_while_offlining();
2945  	if (ksm_merge_across_nodes != knob) {
2946  		if (ksm_pages_shared || remove_all_stable_nodes())
2947  			err = -EBUSY;
2948  		else if (root_stable_tree == one_stable_tree) {
2949  			struct rb_root *buf;
2950  			/*
2951  			 * This is the first time that we switch away from the
2952  			 * default of merging across nodes: must now allocate
2953  			 * a buffer to hold as many roots as may be needed.
2954  			 * Allocate stable and unstable together:
2955  			 * MAXSMP NODES_SHIFT 10 will use 16kB.
2956  			 */
2957  			buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf),
2958  				      GFP_KERNEL);
2959  			/* Let us assume that RB_ROOT is NULL is zero */
2960  			if (!buf)
2961  				err = -ENOMEM;
2962  			else {
2963  				root_stable_tree = buf;
2964  				root_unstable_tree = buf + nr_node_ids;
2965  				/* Stable tree is empty but not the unstable */
2966  				root_unstable_tree[0] = one_unstable_tree[0];
2967  			}
2968  		}
2969  		if (!err) {
2970  			ksm_merge_across_nodes = knob;
2971  			ksm_nr_node_ids = knob ? 1 : nr_node_ids;
2972  		}
2973  	}
2974  	mutex_unlock(&ksm_thread_mutex);
2975  
2976  	return err ? err : count;
2977  }
2978  KSM_ATTR(merge_across_nodes);
2979  #endif
2980  
2981  static ssize_t use_zero_pages_show(struct kobject *kobj,
2982  				   struct kobj_attribute *attr, char *buf)
2983  {
2984  	return sysfs_emit(buf, "%u\n", ksm_use_zero_pages);
2985  }
2986  static ssize_t use_zero_pages_store(struct kobject *kobj,
2987  				   struct kobj_attribute *attr,
2988  				   const char *buf, size_t count)
2989  {
2990  	int err;
2991  	bool value;
2992  
2993  	err = kstrtobool(buf, &value);
2994  	if (err)
2995  		return -EINVAL;
2996  
2997  	ksm_use_zero_pages = value;
2998  
2999  	return count;
3000  }
3001  KSM_ATTR(use_zero_pages);
3002  
3003  static ssize_t max_page_sharing_show(struct kobject *kobj,
3004  				     struct kobj_attribute *attr, char *buf)
3005  {
3006  	return sysfs_emit(buf, "%u\n", ksm_max_page_sharing);
3007  }
3008  
3009  static ssize_t max_page_sharing_store(struct kobject *kobj,
3010  				      struct kobj_attribute *attr,
3011  				      const char *buf, size_t count)
3012  {
3013  	int err;
3014  	int knob;
3015  
3016  	err = kstrtoint(buf, 10, &knob);
3017  	if (err)
3018  		return err;
3019  	/*
3020  	 * When a KSM page is created it is shared by 2 mappings. This
3021  	 * being a signed comparison, it implicitly verifies it's not
3022  	 * negative.
3023  	 */
3024  	if (knob < 2)
3025  		return -EINVAL;
3026  
3027  	if (READ_ONCE(ksm_max_page_sharing) == knob)
3028  		return count;
3029  
3030  	mutex_lock(&ksm_thread_mutex);
3031  	wait_while_offlining();
3032  	if (ksm_max_page_sharing != knob) {
3033  		if (ksm_pages_shared || remove_all_stable_nodes())
3034  			err = -EBUSY;
3035  		else
3036  			ksm_max_page_sharing = knob;
3037  	}
3038  	mutex_unlock(&ksm_thread_mutex);
3039  
3040  	return err ? err : count;
3041  }
3042  KSM_ATTR(max_page_sharing);
3043  
3044  static ssize_t pages_shared_show(struct kobject *kobj,
3045  				 struct kobj_attribute *attr, char *buf)
3046  {
3047  	return sysfs_emit(buf, "%lu\n", ksm_pages_shared);
3048  }
3049  KSM_ATTR_RO(pages_shared);
3050  
3051  static ssize_t pages_sharing_show(struct kobject *kobj,
3052  				  struct kobj_attribute *attr, char *buf)
3053  {
3054  	return sysfs_emit(buf, "%lu\n", ksm_pages_sharing);
3055  }
3056  KSM_ATTR_RO(pages_sharing);
3057  
3058  static ssize_t pages_unshared_show(struct kobject *kobj,
3059  				   struct kobj_attribute *attr, char *buf)
3060  {
3061  	return sysfs_emit(buf, "%lu\n", ksm_pages_unshared);
3062  }
3063  KSM_ATTR_RO(pages_unshared);
3064  
3065  static ssize_t pages_volatile_show(struct kobject *kobj,
3066  				   struct kobj_attribute *attr, char *buf)
3067  {
3068  	long ksm_pages_volatile;
3069  
3070  	ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared
3071  				- ksm_pages_sharing - ksm_pages_unshared;
3072  	/*
3073  	 * It was not worth any locking to calculate that statistic,
3074  	 * but it might therefore sometimes be negative: conceal that.
3075  	 */
3076  	if (ksm_pages_volatile < 0)
3077  		ksm_pages_volatile = 0;
3078  	return sysfs_emit(buf, "%ld\n", ksm_pages_volatile);
3079  }
3080  KSM_ATTR_RO(pages_volatile);
3081  
3082  static ssize_t stable_node_dups_show(struct kobject *kobj,
3083  				     struct kobj_attribute *attr, char *buf)
3084  {
3085  	return sysfs_emit(buf, "%lu\n", ksm_stable_node_dups);
3086  }
3087  KSM_ATTR_RO(stable_node_dups);
3088  
3089  static ssize_t stable_node_chains_show(struct kobject *kobj,
3090  				       struct kobj_attribute *attr, char *buf)
3091  {
3092  	return sysfs_emit(buf, "%lu\n", ksm_stable_node_chains);
3093  }
3094  KSM_ATTR_RO(stable_node_chains);
3095  
3096  static ssize_t
3097  stable_node_chains_prune_millisecs_show(struct kobject *kobj,
3098  					struct kobj_attribute *attr,
3099  					char *buf)
3100  {
3101  	return sysfs_emit(buf, "%u\n", ksm_stable_node_chains_prune_millisecs);
3102  }
3103  
3104  static ssize_t
3105  stable_node_chains_prune_millisecs_store(struct kobject *kobj,
3106  					 struct kobj_attribute *attr,
3107  					 const char *buf, size_t count)
3108  {
3109  	unsigned long msecs;
3110  	int err;
3111  
3112  	err = kstrtoul(buf, 10, &msecs);
3113  	if (err || msecs > UINT_MAX)
3114  		return -EINVAL;
3115  
3116  	ksm_stable_node_chains_prune_millisecs = msecs;
3117  
3118  	return count;
3119  }
3120  KSM_ATTR(stable_node_chains_prune_millisecs);
3121  
3122  static ssize_t full_scans_show(struct kobject *kobj,
3123  			       struct kobj_attribute *attr, char *buf)
3124  {
3125  	return sysfs_emit(buf, "%lu\n", ksm_scan.seqnr);
3126  }
3127  KSM_ATTR_RO(full_scans);
3128  
3129  static struct attribute *ksm_attrs[] = {
3130  	&sleep_millisecs_attr.attr,
3131  	&pages_to_scan_attr.attr,
3132  	&run_attr.attr,
3133  	&pages_shared_attr.attr,
3134  	&pages_sharing_attr.attr,
3135  	&pages_unshared_attr.attr,
3136  	&pages_volatile_attr.attr,
3137  	&full_scans_attr.attr,
3138  #ifdef CONFIG_NUMA
3139  	&merge_across_nodes_attr.attr,
3140  #endif
3141  	&max_page_sharing_attr.attr,
3142  	&stable_node_chains_attr.attr,
3143  	&stable_node_dups_attr.attr,
3144  	&stable_node_chains_prune_millisecs_attr.attr,
3145  	&use_zero_pages_attr.attr,
3146  	NULL,
3147  };
3148  
3149  static const struct attribute_group ksm_attr_group = {
3150  	.attrs = ksm_attrs,
3151  	.name = "ksm",
3152  };
3153  #endif /* CONFIG_SYSFS */
3154  
3155  static int __init ksm_init(void)
3156  {
3157  	struct task_struct *ksm_thread;
3158  	int err;
3159  
3160  	/* The correct value depends on page size and endianness */
3161  	zero_checksum = calc_checksum(ZERO_PAGE(0));
3162  	/* Default to false for backwards compatibility */
3163  	ksm_use_zero_pages = false;
3164  
3165  	err = ksm_slab_init();
3166  	if (err)
3167  		goto out;
3168  
3169  	ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd");
3170  	if (IS_ERR(ksm_thread)) {
3171  		pr_err("ksm: creating kthread failed\n");
3172  		err = PTR_ERR(ksm_thread);
3173  		goto out_free;
3174  	}
3175  
3176  #ifdef CONFIG_SYSFS
3177  	err = sysfs_create_group(mm_kobj, &ksm_attr_group);
3178  	if (err) {
3179  		pr_err("ksm: register sysfs failed\n");
3180  		kthread_stop(ksm_thread);
3181  		goto out_free;
3182  	}
3183  #else
3184  	ksm_run = KSM_RUN_MERGE;	/* no way for user to start it */
3185  
3186  #endif /* CONFIG_SYSFS */
3187  
3188  #ifdef CONFIG_MEMORY_HOTREMOVE
3189  	/* There is no significance to this priority 100 */
3190  	hotplug_memory_notifier(ksm_memory_callback, 100);
3191  #endif
3192  	return 0;
3193  
3194  out_free:
3195  	ksm_slab_free();
3196  out:
3197  	return err;
3198  }
3199  subsys_initcall(ksm_init);
3200