xref: /openbmc/linux/mm/mempolicy.c (revision 76a4f7cc)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Simple NUMA memory policy for the Linux kernel.
4  *
5  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
6  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
7  *
8  * NUMA policy allows the user to give hints in which node(s) memory should
9  * be allocated.
10  *
11  * Support four policies per VMA and per process:
12  *
13  * The VMA policy has priority over the process policy for a page fault.
14  *
15  * interleave     Allocate memory interleaved over a set of nodes,
16  *                with normal fallback if it fails.
17  *                For VMA based allocations this interleaves based on the
18  *                offset into the backing object or offset into the mapping
19  *                for anonymous memory. For process policy an process counter
20  *                is used.
21  *
22  * bind           Only allocate memory on a specific set of nodes,
23  *                no fallback.
24  *                FIXME: memory is allocated starting with the first node
25  *                to the last. It would be better if bind would truly restrict
26  *                the allocation to memory nodes instead
27  *
28  * preferred       Try a specific node first before normal fallback.
29  *                As a special case NUMA_NO_NODE here means do the allocation
30  *                on the local CPU. This is normally identical to default,
31  *                but useful to set in a VMA when you have a non default
32  *                process policy.
33  *
34  * preferred many Try a set of nodes first before normal fallback. This is
35  *                similar to preferred without the special case.
36  *
37  * default        Allocate on the local node first, or when on a VMA
38  *                use the process policy. This is what Linux always did
39  *		  in a NUMA aware kernel and still does by, ahem, default.
40  *
41  * The process policy is applied for most non interrupt memory allocations
42  * in that process' context. Interrupts ignore the policies and always
43  * try to allocate on the local CPU. The VMA policy is only applied for memory
44  * allocations for a VMA in the VM.
45  *
46  * Currently there are a few corner cases in swapping where the policy
47  * is not applied, but the majority should be handled. When process policy
48  * is used it is not remembered over swap outs/swap ins.
49  *
50  * Only the highest zone in the zone hierarchy gets policied. Allocations
51  * requesting a lower zone just use default policy. This implies that
52  * on systems with highmem kernel lowmem allocation don't get policied.
53  * Same with GFP_DMA allocations.
54  *
55  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
56  * all users and remembered even when nobody has memory mapped.
57  */
58 
59 /* Notebook:
60    fix mmap readahead to honour policy and enable policy for any page cache
61    object
62    statistics for bigpages
63    global policy for page cache? currently it uses process policy. Requires
64    first item above.
65    handle mremap for shared memory (currently ignored for the policy)
66    grows down?
67    make bind policy root only? It can trigger oom much faster and the
68    kernel is not always grateful with that.
69 */
70 
71 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
72 
73 #include <linux/mempolicy.h>
74 #include <linux/pagewalk.h>
75 #include <linux/highmem.h>
76 #include <linux/hugetlb.h>
77 #include <linux/kernel.h>
78 #include <linux/sched.h>
79 #include <linux/sched/mm.h>
80 #include <linux/sched/numa_balancing.h>
81 #include <linux/sched/task.h>
82 #include <linux/nodemask.h>
83 #include <linux/cpuset.h>
84 #include <linux/slab.h>
85 #include <linux/string.h>
86 #include <linux/export.h>
87 #include <linux/nsproxy.h>
88 #include <linux/interrupt.h>
89 #include <linux/init.h>
90 #include <linux/compat.h>
91 #include <linux/ptrace.h>
92 #include <linux/swap.h>
93 #include <linux/seq_file.h>
94 #include <linux/proc_fs.h>
95 #include <linux/migrate.h>
96 #include <linux/ksm.h>
97 #include <linux/rmap.h>
98 #include <linux/security.h>
99 #include <linux/syscalls.h>
100 #include <linux/ctype.h>
101 #include <linux/mm_inline.h>
102 #include <linux/mmu_notifier.h>
103 #include <linux/printk.h>
104 #include <linux/swapops.h>
105 
106 #include <asm/tlbflush.h>
107 #include <linux/uaccess.h>
108 
109 #include "internal.h"
110 
111 /* Internal flags */
112 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
113 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
114 
115 static struct kmem_cache *policy_cache;
116 static struct kmem_cache *sn_cache;
117 
118 /* Highest zone. An specific allocation for a zone below that is not
119    policied. */
120 enum zone_type policy_zone = 0;
121 
122 /*
123  * run-time system-wide default policy => local allocation
124  */
125 static struct mempolicy default_policy = {
126 	.refcnt = ATOMIC_INIT(1), /* never free it */
127 	.mode = MPOL_LOCAL,
128 };
129 
130 static struct mempolicy preferred_node_policy[MAX_NUMNODES];
131 
132 /**
133  * numa_map_to_online_node - Find closest online node
134  * @node: Node id to start the search
135  *
136  * Lookup the next closest node by distance if @nid is not online.
137  */
138 int numa_map_to_online_node(int node)
139 {
140 	int min_dist = INT_MAX, dist, n, min_node;
141 
142 	if (node == NUMA_NO_NODE || node_online(node))
143 		return node;
144 
145 	min_node = node;
146 	for_each_online_node(n) {
147 		dist = node_distance(node, n);
148 		if (dist < min_dist) {
149 			min_dist = dist;
150 			min_node = n;
151 		}
152 	}
153 
154 	return min_node;
155 }
156 EXPORT_SYMBOL_GPL(numa_map_to_online_node);
157 
158 struct mempolicy *get_task_policy(struct task_struct *p)
159 {
160 	struct mempolicy *pol = p->mempolicy;
161 	int node;
162 
163 	if (pol)
164 		return pol;
165 
166 	node = numa_node_id();
167 	if (node != NUMA_NO_NODE) {
168 		pol = &preferred_node_policy[node];
169 		/* preferred_node_policy is not initialised early in boot */
170 		if (pol->mode)
171 			return pol;
172 	}
173 
174 	return &default_policy;
175 }
176 
177 static const struct mempolicy_operations {
178 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
179 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
180 } mpol_ops[MPOL_MAX];
181 
182 static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
183 {
184 	return pol->flags & MPOL_MODE_FLAGS;
185 }
186 
187 static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
188 				   const nodemask_t *rel)
189 {
190 	nodemask_t tmp;
191 	nodes_fold(tmp, *orig, nodes_weight(*rel));
192 	nodes_onto(*ret, tmp, *rel);
193 }
194 
195 static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
196 {
197 	if (nodes_empty(*nodes))
198 		return -EINVAL;
199 	pol->nodes = *nodes;
200 	return 0;
201 }
202 
203 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
204 {
205 	if (nodes_empty(*nodes))
206 		return -EINVAL;
207 
208 	nodes_clear(pol->nodes);
209 	node_set(first_node(*nodes), pol->nodes);
210 	return 0;
211 }
212 
213 /*
214  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
215  * any, for the new policy.  mpol_new() has already validated the nodes
216  * parameter with respect to the policy mode and flags.
217  *
218  * Must be called holding task's alloc_lock to protect task's mems_allowed
219  * and mempolicy.  May also be called holding the mmap_lock for write.
220  */
221 static int mpol_set_nodemask(struct mempolicy *pol,
222 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
223 {
224 	int ret;
225 
226 	/*
227 	 * Default (pol==NULL) resp. local memory policies are not a
228 	 * subject of any remapping. They also do not need any special
229 	 * constructor.
230 	 */
231 	if (!pol || pol->mode == MPOL_LOCAL)
232 		return 0;
233 
234 	/* Check N_MEMORY */
235 	nodes_and(nsc->mask1,
236 		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
237 
238 	VM_BUG_ON(!nodes);
239 
240 	if (pol->flags & MPOL_F_RELATIVE_NODES)
241 		mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
242 	else
243 		nodes_and(nsc->mask2, *nodes, nsc->mask1);
244 
245 	if (mpol_store_user_nodemask(pol))
246 		pol->w.user_nodemask = *nodes;
247 	else
248 		pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed;
249 
250 	ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
251 	return ret;
252 }
253 
254 /*
255  * This function just creates a new policy, does some check and simple
256  * initialization. You must invoke mpol_set_nodemask() to set nodes.
257  */
258 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
259 				  nodemask_t *nodes)
260 {
261 	struct mempolicy *policy;
262 
263 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
264 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
265 
266 	if (mode == MPOL_DEFAULT) {
267 		if (nodes && !nodes_empty(*nodes))
268 			return ERR_PTR(-EINVAL);
269 		return NULL;
270 	}
271 	VM_BUG_ON(!nodes);
272 
273 	/*
274 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
275 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
276 	 * All other modes require a valid pointer to a non-empty nodemask.
277 	 */
278 	if (mode == MPOL_PREFERRED) {
279 		if (nodes_empty(*nodes)) {
280 			if (((flags & MPOL_F_STATIC_NODES) ||
281 			     (flags & MPOL_F_RELATIVE_NODES)))
282 				return ERR_PTR(-EINVAL);
283 
284 			mode = MPOL_LOCAL;
285 		}
286 	} else if (mode == MPOL_LOCAL) {
287 		if (!nodes_empty(*nodes) ||
288 		    (flags & MPOL_F_STATIC_NODES) ||
289 		    (flags & MPOL_F_RELATIVE_NODES))
290 			return ERR_PTR(-EINVAL);
291 	} else if (nodes_empty(*nodes))
292 		return ERR_PTR(-EINVAL);
293 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
294 	if (!policy)
295 		return ERR_PTR(-ENOMEM);
296 	atomic_set(&policy->refcnt, 1);
297 	policy->mode = mode;
298 	policy->flags = flags;
299 
300 	return policy;
301 }
302 
303 /* Slow path of a mpol destructor. */
304 void __mpol_put(struct mempolicy *p)
305 {
306 	if (!atomic_dec_and_test(&p->refcnt))
307 		return;
308 	kmem_cache_free(policy_cache, p);
309 }
310 
311 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
312 {
313 }
314 
315 static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
316 {
317 	nodemask_t tmp;
318 
319 	if (pol->flags & MPOL_F_STATIC_NODES)
320 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
321 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
322 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
323 	else {
324 		nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed,
325 								*nodes);
326 		pol->w.cpuset_mems_allowed = *nodes;
327 	}
328 
329 	if (nodes_empty(tmp))
330 		tmp = *nodes;
331 
332 	pol->nodes = tmp;
333 }
334 
335 static void mpol_rebind_preferred(struct mempolicy *pol,
336 						const nodemask_t *nodes)
337 {
338 	pol->w.cpuset_mems_allowed = *nodes;
339 }
340 
341 /*
342  * mpol_rebind_policy - Migrate a policy to a different set of nodes
343  *
344  * Per-vma policies are protected by mmap_lock. Allocations using per-task
345  * policies are protected by task->mems_allowed_seq to prevent a premature
346  * OOM/allocation failure due to parallel nodemask modification.
347  */
348 static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
349 {
350 	if (!pol)
351 		return;
352 	if (!mpol_store_user_nodemask(pol) &&
353 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
354 		return;
355 
356 	mpol_ops[pol->mode].rebind(pol, newmask);
357 }
358 
359 /*
360  * Wrapper for mpol_rebind_policy() that just requires task
361  * pointer, and updates task mempolicy.
362  *
363  * Called with task's alloc_lock held.
364  */
365 
366 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
367 {
368 	mpol_rebind_policy(tsk->mempolicy, new);
369 }
370 
371 /*
372  * Rebind each vma in mm to new nodemask.
373  *
374  * Call holding a reference to mm.  Takes mm->mmap_lock during call.
375  */
376 
377 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
378 {
379 	struct vm_area_struct *vma;
380 
381 	mmap_write_lock(mm);
382 	for (vma = mm->mmap; vma; vma = vma->vm_next)
383 		mpol_rebind_policy(vma->vm_policy, new);
384 	mmap_write_unlock(mm);
385 }
386 
387 static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
388 	[MPOL_DEFAULT] = {
389 		.rebind = mpol_rebind_default,
390 	},
391 	[MPOL_INTERLEAVE] = {
392 		.create = mpol_new_nodemask,
393 		.rebind = mpol_rebind_nodemask,
394 	},
395 	[MPOL_PREFERRED] = {
396 		.create = mpol_new_preferred,
397 		.rebind = mpol_rebind_preferred,
398 	},
399 	[MPOL_BIND] = {
400 		.create = mpol_new_nodemask,
401 		.rebind = mpol_rebind_nodemask,
402 	},
403 	[MPOL_LOCAL] = {
404 		.rebind = mpol_rebind_default,
405 	},
406 	[MPOL_PREFERRED_MANY] = {
407 		.create = mpol_new_nodemask,
408 		.rebind = mpol_rebind_preferred,
409 	},
410 };
411 
412 static int migrate_page_add(struct page *page, struct list_head *pagelist,
413 				unsigned long flags);
414 
415 struct queue_pages {
416 	struct list_head *pagelist;
417 	unsigned long flags;
418 	nodemask_t *nmask;
419 	unsigned long start;
420 	unsigned long end;
421 	struct vm_area_struct *first;
422 };
423 
424 /*
425  * Check if the page's nid is in qp->nmask.
426  *
427  * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
428  * in the invert of qp->nmask.
429  */
430 static inline bool queue_pages_required(struct page *page,
431 					struct queue_pages *qp)
432 {
433 	int nid = page_to_nid(page);
434 	unsigned long flags = qp->flags;
435 
436 	return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
437 }
438 
439 /*
440  * queue_pages_pmd() has four possible return values:
441  * 0 - pages are placed on the right node or queued successfully, or
442  *     special page is met, i.e. huge zero page.
443  * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
444  *     specified.
445  * 2 - THP was split.
446  * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
447  *        existing page was already on a node that does not follow the
448  *        policy.
449  */
450 static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
451 				unsigned long end, struct mm_walk *walk)
452 	__releases(ptl)
453 {
454 	int ret = 0;
455 	struct page *page;
456 	struct queue_pages *qp = walk->private;
457 	unsigned long flags;
458 
459 	if (unlikely(is_pmd_migration_entry(*pmd))) {
460 		ret = -EIO;
461 		goto unlock;
462 	}
463 	page = pmd_page(*pmd);
464 	if (is_huge_zero_page(page)) {
465 		spin_unlock(ptl);
466 		walk->action = ACTION_CONTINUE;
467 		goto out;
468 	}
469 	if (!queue_pages_required(page, qp))
470 		goto unlock;
471 
472 	flags = qp->flags;
473 	/* go to thp migration */
474 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
475 		if (!vma_migratable(walk->vma) ||
476 		    migrate_page_add(page, qp->pagelist, flags)) {
477 			ret = 1;
478 			goto unlock;
479 		}
480 	} else
481 		ret = -EIO;
482 unlock:
483 	spin_unlock(ptl);
484 out:
485 	return ret;
486 }
487 
488 /*
489  * Scan through pages checking if pages follow certain conditions,
490  * and move them to the pagelist if they do.
491  *
492  * queue_pages_pte_range() has three possible return values:
493  * 0 - pages are placed on the right node or queued successfully, or
494  *     special page is met, i.e. zero page.
495  * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
496  *     specified.
497  * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
498  *        on a node that does not follow the policy.
499  */
500 static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
501 			unsigned long end, struct mm_walk *walk)
502 {
503 	struct vm_area_struct *vma = walk->vma;
504 	struct page *page;
505 	struct queue_pages *qp = walk->private;
506 	unsigned long flags = qp->flags;
507 	int ret;
508 	bool has_unmovable = false;
509 	pte_t *pte, *mapped_pte;
510 	spinlock_t *ptl;
511 
512 	ptl = pmd_trans_huge_lock(pmd, vma);
513 	if (ptl) {
514 		ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
515 		if (ret != 2)
516 			return ret;
517 	}
518 	/* THP was split, fall through to pte walk */
519 
520 	if (pmd_trans_unstable(pmd))
521 		return 0;
522 
523 	mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
524 	for (; addr != end; pte++, addr += PAGE_SIZE) {
525 		if (!pte_present(*pte))
526 			continue;
527 		page = vm_normal_page(vma, addr, *pte);
528 		if (!page)
529 			continue;
530 		/*
531 		 * vm_normal_page() filters out zero pages, but there might
532 		 * still be PageReserved pages to skip, perhaps in a VDSO.
533 		 */
534 		if (PageReserved(page))
535 			continue;
536 		if (!queue_pages_required(page, qp))
537 			continue;
538 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
539 			/* MPOL_MF_STRICT must be specified if we get here */
540 			if (!vma_migratable(vma)) {
541 				has_unmovable = true;
542 				break;
543 			}
544 
545 			/*
546 			 * Do not abort immediately since there may be
547 			 * temporary off LRU pages in the range.  Still
548 			 * need migrate other LRU pages.
549 			 */
550 			if (migrate_page_add(page, qp->pagelist, flags))
551 				has_unmovable = true;
552 		} else
553 			break;
554 	}
555 	pte_unmap_unlock(mapped_pte, ptl);
556 	cond_resched();
557 
558 	if (has_unmovable)
559 		return 1;
560 
561 	return addr != end ? -EIO : 0;
562 }
563 
564 static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
565 			       unsigned long addr, unsigned long end,
566 			       struct mm_walk *walk)
567 {
568 	int ret = 0;
569 #ifdef CONFIG_HUGETLB_PAGE
570 	struct queue_pages *qp = walk->private;
571 	unsigned long flags = (qp->flags & MPOL_MF_VALID);
572 	struct page *page;
573 	spinlock_t *ptl;
574 	pte_t entry;
575 
576 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
577 	entry = huge_ptep_get(pte);
578 	if (!pte_present(entry))
579 		goto unlock;
580 	page = pte_page(entry);
581 	if (!queue_pages_required(page, qp))
582 		goto unlock;
583 
584 	if (flags == MPOL_MF_STRICT) {
585 		/*
586 		 * STRICT alone means only detecting misplaced page and no
587 		 * need to further check other vma.
588 		 */
589 		ret = -EIO;
590 		goto unlock;
591 	}
592 
593 	if (!vma_migratable(walk->vma)) {
594 		/*
595 		 * Must be STRICT with MOVE*, otherwise .test_walk() have
596 		 * stopped walking current vma.
597 		 * Detecting misplaced page but allow migrating pages which
598 		 * have been queued.
599 		 */
600 		ret = 1;
601 		goto unlock;
602 	}
603 
604 	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
605 	if (flags & (MPOL_MF_MOVE_ALL) ||
606 	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) {
607 		if (!isolate_huge_page(page, qp->pagelist) &&
608 			(flags & MPOL_MF_STRICT))
609 			/*
610 			 * Failed to isolate page but allow migrating pages
611 			 * which have been queued.
612 			 */
613 			ret = 1;
614 	}
615 unlock:
616 	spin_unlock(ptl);
617 #else
618 	BUG();
619 #endif
620 	return ret;
621 }
622 
623 #ifdef CONFIG_NUMA_BALANCING
624 /*
625  * This is used to mark a range of virtual addresses to be inaccessible.
626  * These are later cleared by a NUMA hinting fault. Depending on these
627  * faults, pages may be migrated for better NUMA placement.
628  *
629  * This is assuming that NUMA faults are handled using PROT_NONE. If
630  * an architecture makes a different choice, it will need further
631  * changes to the core.
632  */
633 unsigned long change_prot_numa(struct vm_area_struct *vma,
634 			unsigned long addr, unsigned long end)
635 {
636 	int nr_updated;
637 
638 	nr_updated = change_protection(vma, addr, end, PAGE_NONE, MM_CP_PROT_NUMA);
639 	if (nr_updated)
640 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
641 
642 	return nr_updated;
643 }
644 #else
645 static unsigned long change_prot_numa(struct vm_area_struct *vma,
646 			unsigned long addr, unsigned long end)
647 {
648 	return 0;
649 }
650 #endif /* CONFIG_NUMA_BALANCING */
651 
652 static int queue_pages_test_walk(unsigned long start, unsigned long end,
653 				struct mm_walk *walk)
654 {
655 	struct vm_area_struct *vma = walk->vma;
656 	struct queue_pages *qp = walk->private;
657 	unsigned long endvma = vma->vm_end;
658 	unsigned long flags = qp->flags;
659 
660 	/* range check first */
661 	VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma);
662 
663 	if (!qp->first) {
664 		qp->first = vma;
665 		if (!(flags & MPOL_MF_DISCONTIG_OK) &&
666 			(qp->start < vma->vm_start))
667 			/* hole at head side of range */
668 			return -EFAULT;
669 	}
670 	if (!(flags & MPOL_MF_DISCONTIG_OK) &&
671 		((vma->vm_end < qp->end) &&
672 		(!vma->vm_next || vma->vm_end < vma->vm_next->vm_start)))
673 		/* hole at middle or tail of range */
674 		return -EFAULT;
675 
676 	/*
677 	 * Need check MPOL_MF_STRICT to return -EIO if possible
678 	 * regardless of vma_migratable
679 	 */
680 	if (!vma_migratable(vma) &&
681 	    !(flags & MPOL_MF_STRICT))
682 		return 1;
683 
684 	if (endvma > end)
685 		endvma = end;
686 
687 	if (flags & MPOL_MF_LAZY) {
688 		/* Similar to task_numa_work, skip inaccessible VMAs */
689 		if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
690 			!(vma->vm_flags & VM_MIXEDMAP))
691 			change_prot_numa(vma, start, endvma);
692 		return 1;
693 	}
694 
695 	/* queue pages from current vma */
696 	if (flags & MPOL_MF_VALID)
697 		return 0;
698 	return 1;
699 }
700 
701 static const struct mm_walk_ops queue_pages_walk_ops = {
702 	.hugetlb_entry		= queue_pages_hugetlb,
703 	.pmd_entry		= queue_pages_pte_range,
704 	.test_walk		= queue_pages_test_walk,
705 };
706 
707 /*
708  * Walk through page tables and collect pages to be migrated.
709  *
710  * If pages found in a given range are on a set of nodes (determined by
711  * @nodes and @flags,) it's isolated and queued to the pagelist which is
712  * passed via @private.
713  *
714  * queue_pages_range() has three possible return values:
715  * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
716  *     specified.
717  * 0 - queue pages successfully or no misplaced page.
718  * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
719  *         memory range specified by nodemask and maxnode points outside
720  *         your accessible address space (-EFAULT)
721  */
722 static int
723 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
724 		nodemask_t *nodes, unsigned long flags,
725 		struct list_head *pagelist)
726 {
727 	int err;
728 	struct queue_pages qp = {
729 		.pagelist = pagelist,
730 		.flags = flags,
731 		.nmask = nodes,
732 		.start = start,
733 		.end = end,
734 		.first = NULL,
735 	};
736 
737 	err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
738 
739 	if (!qp.first)
740 		/* whole range in hole */
741 		err = -EFAULT;
742 
743 	return err;
744 }
745 
746 /*
747  * Apply policy to a single VMA
748  * This must be called with the mmap_lock held for writing.
749  */
750 static int vma_replace_policy(struct vm_area_struct *vma,
751 						struct mempolicy *pol)
752 {
753 	int err;
754 	struct mempolicy *old;
755 	struct mempolicy *new;
756 
757 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
758 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
759 		 vma->vm_ops, vma->vm_file,
760 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
761 
762 	new = mpol_dup(pol);
763 	if (IS_ERR(new))
764 		return PTR_ERR(new);
765 
766 	if (vma->vm_ops && vma->vm_ops->set_policy) {
767 		err = vma->vm_ops->set_policy(vma, new);
768 		if (err)
769 			goto err_out;
770 	}
771 
772 	old = vma->vm_policy;
773 	vma->vm_policy = new; /* protected by mmap_lock */
774 	mpol_put(old);
775 
776 	return 0;
777  err_out:
778 	mpol_put(new);
779 	return err;
780 }
781 
782 /* Step 2: apply policy to a range and do splits. */
783 static int mbind_range(struct mm_struct *mm, unsigned long start,
784 		       unsigned long end, struct mempolicy *new_pol)
785 {
786 	struct vm_area_struct *next;
787 	struct vm_area_struct *prev;
788 	struct vm_area_struct *vma;
789 	int err = 0;
790 	pgoff_t pgoff;
791 	unsigned long vmstart;
792 	unsigned long vmend;
793 
794 	vma = find_vma(mm, start);
795 	VM_BUG_ON(!vma);
796 
797 	prev = vma->vm_prev;
798 	if (start > vma->vm_start)
799 		prev = vma;
800 
801 	for (; vma && vma->vm_start < end; prev = vma, vma = next) {
802 		next = vma->vm_next;
803 		vmstart = max(start, vma->vm_start);
804 		vmend   = min(end, vma->vm_end);
805 
806 		if (mpol_equal(vma_policy(vma), new_pol))
807 			continue;
808 
809 		pgoff = vma->vm_pgoff +
810 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
811 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
812 				 vma->anon_vma, vma->vm_file, pgoff,
813 				 new_pol, vma->vm_userfaultfd_ctx);
814 		if (prev) {
815 			vma = prev;
816 			next = vma->vm_next;
817 			if (mpol_equal(vma_policy(vma), new_pol))
818 				continue;
819 			/* vma_merge() joined vma && vma->next, case 8 */
820 			goto replace;
821 		}
822 		if (vma->vm_start != vmstart) {
823 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
824 			if (err)
825 				goto out;
826 		}
827 		if (vma->vm_end != vmend) {
828 			err = split_vma(vma->vm_mm, vma, vmend, 0);
829 			if (err)
830 				goto out;
831 		}
832  replace:
833 		err = vma_replace_policy(vma, new_pol);
834 		if (err)
835 			goto out;
836 	}
837 
838  out:
839 	return err;
840 }
841 
842 /* Set the process memory policy */
843 static long do_set_mempolicy(unsigned short mode, unsigned short flags,
844 			     nodemask_t *nodes)
845 {
846 	struct mempolicy *new, *old;
847 	NODEMASK_SCRATCH(scratch);
848 	int ret;
849 
850 	if (!scratch)
851 		return -ENOMEM;
852 
853 	new = mpol_new(mode, flags, nodes);
854 	if (IS_ERR(new)) {
855 		ret = PTR_ERR(new);
856 		goto out;
857 	}
858 
859 	if (flags & MPOL_F_NUMA_BALANCING) {
860 		if (new && new->mode == MPOL_BIND) {
861 			new->flags |= (MPOL_F_MOF | MPOL_F_MORON);
862 		} else {
863 			ret = -EINVAL;
864 			mpol_put(new);
865 			goto out;
866 		}
867 	}
868 
869 	ret = mpol_set_nodemask(new, nodes, scratch);
870 	if (ret) {
871 		mpol_put(new);
872 		goto out;
873 	}
874 	task_lock(current);
875 	old = current->mempolicy;
876 	current->mempolicy = new;
877 	if (new && new->mode == MPOL_INTERLEAVE)
878 		current->il_prev = MAX_NUMNODES-1;
879 	task_unlock(current);
880 	mpol_put(old);
881 	ret = 0;
882 out:
883 	NODEMASK_SCRATCH_FREE(scratch);
884 	return ret;
885 }
886 
887 /*
888  * Return nodemask for policy for get_mempolicy() query
889  *
890  * Called with task's alloc_lock held
891  */
892 static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
893 {
894 	nodes_clear(*nodes);
895 	if (p == &default_policy)
896 		return;
897 
898 	switch (p->mode) {
899 	case MPOL_BIND:
900 	case MPOL_INTERLEAVE:
901 	case MPOL_PREFERRED:
902 	case MPOL_PREFERRED_MANY:
903 		*nodes = p->nodes;
904 		break;
905 	case MPOL_LOCAL:
906 		/* return empty node mask for local allocation */
907 		break;
908 	default:
909 		BUG();
910 	}
911 }
912 
913 static int lookup_node(struct mm_struct *mm, unsigned long addr)
914 {
915 	struct page *p = NULL;
916 	int err;
917 
918 	int locked = 1;
919 	err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked);
920 	if (err > 0) {
921 		err = page_to_nid(p);
922 		put_page(p);
923 	}
924 	if (locked)
925 		mmap_read_unlock(mm);
926 	return err;
927 }
928 
929 /* Retrieve NUMA policy */
930 static long do_get_mempolicy(int *policy, nodemask_t *nmask,
931 			     unsigned long addr, unsigned long flags)
932 {
933 	int err;
934 	struct mm_struct *mm = current->mm;
935 	struct vm_area_struct *vma = NULL;
936 	struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
937 
938 	if (flags &
939 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
940 		return -EINVAL;
941 
942 	if (flags & MPOL_F_MEMS_ALLOWED) {
943 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
944 			return -EINVAL;
945 		*policy = 0;	/* just so it's initialized */
946 		task_lock(current);
947 		*nmask  = cpuset_current_mems_allowed;
948 		task_unlock(current);
949 		return 0;
950 	}
951 
952 	if (flags & MPOL_F_ADDR) {
953 		/*
954 		 * Do NOT fall back to task policy if the
955 		 * vma/shared policy at addr is NULL.  We
956 		 * want to return MPOL_DEFAULT in this case.
957 		 */
958 		mmap_read_lock(mm);
959 		vma = vma_lookup(mm, addr);
960 		if (!vma) {
961 			mmap_read_unlock(mm);
962 			return -EFAULT;
963 		}
964 		if (vma->vm_ops && vma->vm_ops->get_policy)
965 			pol = vma->vm_ops->get_policy(vma, addr);
966 		else
967 			pol = vma->vm_policy;
968 	} else if (addr)
969 		return -EINVAL;
970 
971 	if (!pol)
972 		pol = &default_policy;	/* indicates default behavior */
973 
974 	if (flags & MPOL_F_NODE) {
975 		if (flags & MPOL_F_ADDR) {
976 			/*
977 			 * Take a refcount on the mpol, lookup_node()
978 			 * will drop the mmap_lock, so after calling
979 			 * lookup_node() only "pol" remains valid, "vma"
980 			 * is stale.
981 			 */
982 			pol_refcount = pol;
983 			vma = NULL;
984 			mpol_get(pol);
985 			err = lookup_node(mm, addr);
986 			if (err < 0)
987 				goto out;
988 			*policy = err;
989 		} else if (pol == current->mempolicy &&
990 				pol->mode == MPOL_INTERLEAVE) {
991 			*policy = next_node_in(current->il_prev, pol->nodes);
992 		} else {
993 			err = -EINVAL;
994 			goto out;
995 		}
996 	} else {
997 		*policy = pol == &default_policy ? MPOL_DEFAULT :
998 						pol->mode;
999 		/*
1000 		 * Internal mempolicy flags must be masked off before exposing
1001 		 * the policy to userspace.
1002 		 */
1003 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
1004 	}
1005 
1006 	err = 0;
1007 	if (nmask) {
1008 		if (mpol_store_user_nodemask(pol)) {
1009 			*nmask = pol->w.user_nodemask;
1010 		} else {
1011 			task_lock(current);
1012 			get_policy_nodemask(pol, nmask);
1013 			task_unlock(current);
1014 		}
1015 	}
1016 
1017  out:
1018 	mpol_cond_put(pol);
1019 	if (vma)
1020 		mmap_read_unlock(mm);
1021 	if (pol_refcount)
1022 		mpol_put(pol_refcount);
1023 	return err;
1024 }
1025 
1026 #ifdef CONFIG_MIGRATION
1027 /*
1028  * page migration, thp tail pages can be passed.
1029  */
1030 static int migrate_page_add(struct page *page, struct list_head *pagelist,
1031 				unsigned long flags)
1032 {
1033 	struct page *head = compound_head(page);
1034 	/*
1035 	 * Avoid migrating a page that is shared with others.
1036 	 */
1037 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
1038 		if (!isolate_lru_page(head)) {
1039 			list_add_tail(&head->lru, pagelist);
1040 			mod_node_page_state(page_pgdat(head),
1041 				NR_ISOLATED_ANON + page_is_file_lru(head),
1042 				thp_nr_pages(head));
1043 		} else if (flags & MPOL_MF_STRICT) {
1044 			/*
1045 			 * Non-movable page may reach here.  And, there may be
1046 			 * temporary off LRU pages or non-LRU movable pages.
1047 			 * Treat them as unmovable pages since they can't be
1048 			 * isolated, so they can't be moved at the moment.  It
1049 			 * should return -EIO for this case too.
1050 			 */
1051 			return -EIO;
1052 		}
1053 	}
1054 
1055 	return 0;
1056 }
1057 
1058 /*
1059  * Migrate pages from one node to a target node.
1060  * Returns error or the number of pages not migrated.
1061  */
1062 static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1063 			   int flags)
1064 {
1065 	nodemask_t nmask;
1066 	LIST_HEAD(pagelist);
1067 	int err = 0;
1068 	struct migration_target_control mtc = {
1069 		.nid = dest,
1070 		.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1071 	};
1072 
1073 	nodes_clear(nmask);
1074 	node_set(source, nmask);
1075 
1076 	/*
1077 	 * This does not "check" the range but isolates all pages that
1078 	 * need migration.  Between passing in the full user address
1079 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1080 	 */
1081 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
1082 	queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
1083 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1084 
1085 	if (!list_empty(&pagelist)) {
1086 		err = migrate_pages(&pagelist, alloc_migration_target, NULL,
1087 				(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
1088 		if (err)
1089 			putback_movable_pages(&pagelist);
1090 	}
1091 
1092 	return err;
1093 }
1094 
1095 /*
1096  * Move pages between the two nodesets so as to preserve the physical
1097  * layout as much as possible.
1098  *
1099  * Returns the number of page that could not be moved.
1100  */
1101 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1102 		     const nodemask_t *to, int flags)
1103 {
1104 	int busy = 0;
1105 	int err = 0;
1106 	nodemask_t tmp;
1107 
1108 	lru_cache_disable();
1109 
1110 	mmap_read_lock(mm);
1111 
1112 	/*
1113 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1114 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
1115 	 * bit in 'tmp', and return that <source, dest> pair for migration.
1116 	 * The pair of nodemasks 'to' and 'from' define the map.
1117 	 *
1118 	 * If no pair of bits is found that way, fallback to picking some
1119 	 * pair of 'source' and 'dest' bits that are not the same.  If the
1120 	 * 'source' and 'dest' bits are the same, this represents a node
1121 	 * that will be migrating to itself, so no pages need move.
1122 	 *
1123 	 * If no bits are left in 'tmp', or if all remaining bits left
1124 	 * in 'tmp' correspond to the same bit in 'to', return false
1125 	 * (nothing left to migrate).
1126 	 *
1127 	 * This lets us pick a pair of nodes to migrate between, such that
1128 	 * if possible the dest node is not already occupied by some other
1129 	 * source node, minimizing the risk of overloading the memory on a
1130 	 * node that would happen if we migrated incoming memory to a node
1131 	 * before migrating outgoing memory source that same node.
1132 	 *
1133 	 * A single scan of tmp is sufficient.  As we go, we remember the
1134 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
1135 	 * that not only moved, but what's better, moved to an empty slot
1136 	 * (d is not set in tmp), then we break out then, with that pair.
1137 	 * Otherwise when we finish scanning from_tmp, we at least have the
1138 	 * most recent <s, d> pair that moved.  If we get all the way through
1139 	 * the scan of tmp without finding any node that moved, much less
1140 	 * moved to an empty node, then there is nothing left worth migrating.
1141 	 */
1142 
1143 	tmp = *from;
1144 	while (!nodes_empty(tmp)) {
1145 		int s, d;
1146 		int source = NUMA_NO_NODE;
1147 		int dest = 0;
1148 
1149 		for_each_node_mask(s, tmp) {
1150 
1151 			/*
1152 			 * do_migrate_pages() tries to maintain the relative
1153 			 * node relationship of the pages established between
1154 			 * threads and memory areas.
1155                          *
1156 			 * However if the number of source nodes is not equal to
1157 			 * the number of destination nodes we can not preserve
1158 			 * this node relative relationship.  In that case, skip
1159 			 * copying memory from a node that is in the destination
1160 			 * mask.
1161 			 *
1162 			 * Example: [2,3,4] -> [3,4,5] moves everything.
1163 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1164 			 */
1165 
1166 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
1167 						(node_isset(s, *to)))
1168 				continue;
1169 
1170 			d = node_remap(s, *from, *to);
1171 			if (s == d)
1172 				continue;
1173 
1174 			source = s;	/* Node moved. Memorize */
1175 			dest = d;
1176 
1177 			/* dest not in remaining from nodes? */
1178 			if (!node_isset(dest, tmp))
1179 				break;
1180 		}
1181 		if (source == NUMA_NO_NODE)
1182 			break;
1183 
1184 		node_clear(source, tmp);
1185 		err = migrate_to_node(mm, source, dest, flags);
1186 		if (err > 0)
1187 			busy += err;
1188 		if (err < 0)
1189 			break;
1190 	}
1191 	mmap_read_unlock(mm);
1192 
1193 	lru_cache_enable();
1194 	if (err < 0)
1195 		return err;
1196 	return busy;
1197 
1198 }
1199 
1200 /*
1201  * Allocate a new page for page migration based on vma policy.
1202  * Start by assuming the page is mapped by the same vma as contains @start.
1203  * Search forward from there, if not.  N.B., this assumes that the
1204  * list of pages handed to migrate_pages()--which is how we get here--
1205  * is in virtual address order.
1206  */
1207 static struct page *new_page(struct page *page, unsigned long start)
1208 {
1209 	struct vm_area_struct *vma;
1210 	unsigned long address;
1211 
1212 	vma = find_vma(current->mm, start);
1213 	while (vma) {
1214 		address = page_address_in_vma(page, vma);
1215 		if (address != -EFAULT)
1216 			break;
1217 		vma = vma->vm_next;
1218 	}
1219 
1220 	if (PageHuge(page)) {
1221 		return alloc_huge_page_vma(page_hstate(compound_head(page)),
1222 				vma, address);
1223 	} else if (PageTransHuge(page)) {
1224 		struct page *thp;
1225 
1226 		thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
1227 					 HPAGE_PMD_ORDER);
1228 		if (!thp)
1229 			return NULL;
1230 		prep_transhuge_page(thp);
1231 		return thp;
1232 	}
1233 	/*
1234 	 * if !vma, alloc_page_vma() will use task or system default policy
1235 	 */
1236 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
1237 			vma, address);
1238 }
1239 #else
1240 
1241 static int migrate_page_add(struct page *page, struct list_head *pagelist,
1242 				unsigned long flags)
1243 {
1244 	return -EIO;
1245 }
1246 
1247 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1248 		     const nodemask_t *to, int flags)
1249 {
1250 	return -ENOSYS;
1251 }
1252 
1253 static struct page *new_page(struct page *page, unsigned long start)
1254 {
1255 	return NULL;
1256 }
1257 #endif
1258 
1259 static long do_mbind(unsigned long start, unsigned long len,
1260 		     unsigned short mode, unsigned short mode_flags,
1261 		     nodemask_t *nmask, unsigned long flags)
1262 {
1263 	struct mm_struct *mm = current->mm;
1264 	struct mempolicy *new;
1265 	unsigned long end;
1266 	int err;
1267 	int ret;
1268 	LIST_HEAD(pagelist);
1269 
1270 	if (flags & ~(unsigned long)MPOL_MF_VALID)
1271 		return -EINVAL;
1272 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1273 		return -EPERM;
1274 
1275 	if (start & ~PAGE_MASK)
1276 		return -EINVAL;
1277 
1278 	if (mode == MPOL_DEFAULT)
1279 		flags &= ~MPOL_MF_STRICT;
1280 
1281 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1282 	end = start + len;
1283 
1284 	if (end < start)
1285 		return -EINVAL;
1286 	if (end == start)
1287 		return 0;
1288 
1289 	new = mpol_new(mode, mode_flags, nmask);
1290 	if (IS_ERR(new))
1291 		return PTR_ERR(new);
1292 
1293 	if (flags & MPOL_MF_LAZY)
1294 		new->flags |= MPOL_F_MOF;
1295 
1296 	/*
1297 	 * If we are using the default policy then operation
1298 	 * on discontinuous address spaces is okay after all
1299 	 */
1300 	if (!new)
1301 		flags |= MPOL_MF_DISCONTIG_OK;
1302 
1303 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1304 		 start, start + len, mode, mode_flags,
1305 		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
1306 
1307 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1308 
1309 		lru_cache_disable();
1310 	}
1311 	{
1312 		NODEMASK_SCRATCH(scratch);
1313 		if (scratch) {
1314 			mmap_write_lock(mm);
1315 			err = mpol_set_nodemask(new, nmask, scratch);
1316 			if (err)
1317 				mmap_write_unlock(mm);
1318 		} else
1319 			err = -ENOMEM;
1320 		NODEMASK_SCRATCH_FREE(scratch);
1321 	}
1322 	if (err)
1323 		goto mpol_out;
1324 
1325 	ret = queue_pages_range(mm, start, end, nmask,
1326 			  flags | MPOL_MF_INVERT, &pagelist);
1327 
1328 	if (ret < 0) {
1329 		err = ret;
1330 		goto up_out;
1331 	}
1332 
1333 	err = mbind_range(mm, start, end, new);
1334 
1335 	if (!err) {
1336 		int nr_failed = 0;
1337 
1338 		if (!list_empty(&pagelist)) {
1339 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1340 			nr_failed = migrate_pages(&pagelist, new_page, NULL,
1341 				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL);
1342 			if (nr_failed)
1343 				putback_movable_pages(&pagelist);
1344 		}
1345 
1346 		if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
1347 			err = -EIO;
1348 	} else {
1349 up_out:
1350 		if (!list_empty(&pagelist))
1351 			putback_movable_pages(&pagelist);
1352 	}
1353 
1354 	mmap_write_unlock(mm);
1355 mpol_out:
1356 	mpol_put(new);
1357 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1358 		lru_cache_enable();
1359 	return err;
1360 }
1361 
1362 /*
1363  * User space interface with variable sized bitmaps for nodelists.
1364  */
1365 static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask,
1366 		      unsigned long maxnode)
1367 {
1368 	unsigned long nlongs = BITS_TO_LONGS(maxnode);
1369 	int ret;
1370 
1371 	if (in_compat_syscall())
1372 		ret = compat_get_bitmap(mask,
1373 					(const compat_ulong_t __user *)nmask,
1374 					maxnode);
1375 	else
1376 		ret = copy_from_user(mask, nmask,
1377 				     nlongs * sizeof(unsigned long));
1378 
1379 	if (ret)
1380 		return -EFAULT;
1381 
1382 	if (maxnode % BITS_PER_LONG)
1383 		mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1;
1384 
1385 	return 0;
1386 }
1387 
1388 /* Copy a node mask from user space. */
1389 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1390 		     unsigned long maxnode)
1391 {
1392 	--maxnode;
1393 	nodes_clear(*nodes);
1394 	if (maxnode == 0 || !nmask)
1395 		return 0;
1396 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1397 		return -EINVAL;
1398 
1399 	/*
1400 	 * When the user specified more nodes than supported just check
1401 	 * if the non supported part is all zero, one word at a time,
1402 	 * starting at the end.
1403 	 */
1404 	while (maxnode > MAX_NUMNODES) {
1405 		unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG);
1406 		unsigned long t;
1407 
1408 		if (get_bitmap(&t, &nmask[maxnode / BITS_PER_LONG], bits))
1409 			return -EFAULT;
1410 
1411 		if (maxnode - bits >= MAX_NUMNODES) {
1412 			maxnode -= bits;
1413 		} else {
1414 			maxnode = MAX_NUMNODES;
1415 			t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1416 		}
1417 		if (t)
1418 			return -EINVAL;
1419 	}
1420 
1421 	return get_bitmap(nodes_addr(*nodes), nmask, maxnode);
1422 }
1423 
1424 /* Copy a kernel node mask to user space */
1425 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1426 			      nodemask_t *nodes)
1427 {
1428 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1429 	unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
1430 	bool compat = in_compat_syscall();
1431 
1432 	if (compat)
1433 		nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t);
1434 
1435 	if (copy > nbytes) {
1436 		if (copy > PAGE_SIZE)
1437 			return -EINVAL;
1438 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1439 			return -EFAULT;
1440 		copy = nbytes;
1441 		maxnode = nr_node_ids;
1442 	}
1443 
1444 	if (compat)
1445 		return compat_put_bitmap((compat_ulong_t __user *)mask,
1446 					 nodes_addr(*nodes), maxnode);
1447 
1448 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1449 }
1450 
1451 /* Basic parameter sanity check used by both mbind() and set_mempolicy() */
1452 static inline int sanitize_mpol_flags(int *mode, unsigned short *flags)
1453 {
1454 	*flags = *mode & MPOL_MODE_FLAGS;
1455 	*mode &= ~MPOL_MODE_FLAGS;
1456 
1457 	if ((unsigned int)(*mode) >=  MPOL_MAX)
1458 		return -EINVAL;
1459 	if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
1460 		return -EINVAL;
1461 
1462 	return 0;
1463 }
1464 
1465 static long kernel_mbind(unsigned long start, unsigned long len,
1466 			 unsigned long mode, const unsigned long __user *nmask,
1467 			 unsigned long maxnode, unsigned int flags)
1468 {
1469 	unsigned short mode_flags;
1470 	nodemask_t nodes;
1471 	int lmode = mode;
1472 	int err;
1473 
1474 	start = untagged_addr(start);
1475 	err = sanitize_mpol_flags(&lmode, &mode_flags);
1476 	if (err)
1477 		return err;
1478 
1479 	err = get_nodes(&nodes, nmask, maxnode);
1480 	if (err)
1481 		return err;
1482 
1483 	return do_mbind(start, len, lmode, mode_flags, &nodes, flags);
1484 }
1485 
1486 SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1487 		unsigned long, mode, const unsigned long __user *, nmask,
1488 		unsigned long, maxnode, unsigned int, flags)
1489 {
1490 	return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1491 }
1492 
1493 /* Set the process memory policy */
1494 static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1495 				 unsigned long maxnode)
1496 {
1497 	unsigned short mode_flags;
1498 	nodemask_t nodes;
1499 	int lmode = mode;
1500 	int err;
1501 
1502 	err = sanitize_mpol_flags(&lmode, &mode_flags);
1503 	if (err)
1504 		return err;
1505 
1506 	err = get_nodes(&nodes, nmask, maxnode);
1507 	if (err)
1508 		return err;
1509 
1510 	return do_set_mempolicy(lmode, mode_flags, &nodes);
1511 }
1512 
1513 SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1514 		unsigned long, maxnode)
1515 {
1516 	return kernel_set_mempolicy(mode, nmask, maxnode);
1517 }
1518 
1519 static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1520 				const unsigned long __user *old_nodes,
1521 				const unsigned long __user *new_nodes)
1522 {
1523 	struct mm_struct *mm = NULL;
1524 	struct task_struct *task;
1525 	nodemask_t task_nodes;
1526 	int err;
1527 	nodemask_t *old;
1528 	nodemask_t *new;
1529 	NODEMASK_SCRATCH(scratch);
1530 
1531 	if (!scratch)
1532 		return -ENOMEM;
1533 
1534 	old = &scratch->mask1;
1535 	new = &scratch->mask2;
1536 
1537 	err = get_nodes(old, old_nodes, maxnode);
1538 	if (err)
1539 		goto out;
1540 
1541 	err = get_nodes(new, new_nodes, maxnode);
1542 	if (err)
1543 		goto out;
1544 
1545 	/* Find the mm_struct */
1546 	rcu_read_lock();
1547 	task = pid ? find_task_by_vpid(pid) : current;
1548 	if (!task) {
1549 		rcu_read_unlock();
1550 		err = -ESRCH;
1551 		goto out;
1552 	}
1553 	get_task_struct(task);
1554 
1555 	err = -EINVAL;
1556 
1557 	/*
1558 	 * Check if this process has the right to modify the specified process.
1559 	 * Use the regular "ptrace_may_access()" checks.
1560 	 */
1561 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1562 		rcu_read_unlock();
1563 		err = -EPERM;
1564 		goto out_put;
1565 	}
1566 	rcu_read_unlock();
1567 
1568 	task_nodes = cpuset_mems_allowed(task);
1569 	/* Is the user allowed to access the target nodes? */
1570 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1571 		err = -EPERM;
1572 		goto out_put;
1573 	}
1574 
1575 	task_nodes = cpuset_mems_allowed(current);
1576 	nodes_and(*new, *new, task_nodes);
1577 	if (nodes_empty(*new))
1578 		goto out_put;
1579 
1580 	err = security_task_movememory(task);
1581 	if (err)
1582 		goto out_put;
1583 
1584 	mm = get_task_mm(task);
1585 	put_task_struct(task);
1586 
1587 	if (!mm) {
1588 		err = -EINVAL;
1589 		goto out;
1590 	}
1591 
1592 	err = do_migrate_pages(mm, old, new,
1593 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1594 
1595 	mmput(mm);
1596 out:
1597 	NODEMASK_SCRATCH_FREE(scratch);
1598 
1599 	return err;
1600 
1601 out_put:
1602 	put_task_struct(task);
1603 	goto out;
1604 
1605 }
1606 
1607 SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1608 		const unsigned long __user *, old_nodes,
1609 		const unsigned long __user *, new_nodes)
1610 {
1611 	return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1612 }
1613 
1614 
1615 /* Retrieve NUMA policy */
1616 static int kernel_get_mempolicy(int __user *policy,
1617 				unsigned long __user *nmask,
1618 				unsigned long maxnode,
1619 				unsigned long addr,
1620 				unsigned long flags)
1621 {
1622 	int err;
1623 	int pval;
1624 	nodemask_t nodes;
1625 
1626 	if (nmask != NULL && maxnode < nr_node_ids)
1627 		return -EINVAL;
1628 
1629 	addr = untagged_addr(addr);
1630 
1631 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
1632 
1633 	if (err)
1634 		return err;
1635 
1636 	if (policy && put_user(pval, policy))
1637 		return -EFAULT;
1638 
1639 	if (nmask)
1640 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
1641 
1642 	return err;
1643 }
1644 
1645 SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1646 		unsigned long __user *, nmask, unsigned long, maxnode,
1647 		unsigned long, addr, unsigned long, flags)
1648 {
1649 	return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1650 }
1651 
1652 bool vma_migratable(struct vm_area_struct *vma)
1653 {
1654 	if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1655 		return false;
1656 
1657 	/*
1658 	 * DAX device mappings require predictable access latency, so avoid
1659 	 * incurring periodic faults.
1660 	 */
1661 	if (vma_is_dax(vma))
1662 		return false;
1663 
1664 	if (is_vm_hugetlb_page(vma) &&
1665 		!hugepage_migration_supported(hstate_vma(vma)))
1666 		return false;
1667 
1668 	/*
1669 	 * Migration allocates pages in the highest zone. If we cannot
1670 	 * do so then migration (at least from node to node) is not
1671 	 * possible.
1672 	 */
1673 	if (vma->vm_file &&
1674 		gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
1675 			< policy_zone)
1676 		return false;
1677 	return true;
1678 }
1679 
1680 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1681 						unsigned long addr)
1682 {
1683 	struct mempolicy *pol = NULL;
1684 
1685 	if (vma) {
1686 		if (vma->vm_ops && vma->vm_ops->get_policy) {
1687 			pol = vma->vm_ops->get_policy(vma, addr);
1688 		} else if (vma->vm_policy) {
1689 			pol = vma->vm_policy;
1690 
1691 			/*
1692 			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1693 			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1694 			 * count on these policies which will be dropped by
1695 			 * mpol_cond_put() later
1696 			 */
1697 			if (mpol_needs_cond_ref(pol))
1698 				mpol_get(pol);
1699 		}
1700 	}
1701 
1702 	return pol;
1703 }
1704 
1705 /*
1706  * get_vma_policy(@vma, @addr)
1707  * @vma: virtual memory area whose policy is sought
1708  * @addr: address in @vma for shared policy lookup
1709  *
1710  * Returns effective policy for a VMA at specified address.
1711  * Falls back to current->mempolicy or system default policy, as necessary.
1712  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1713  * count--added by the get_policy() vm_op, as appropriate--to protect against
1714  * freeing by another task.  It is the caller's responsibility to free the
1715  * extra reference for shared policies.
1716  */
1717 static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1718 						unsigned long addr)
1719 {
1720 	struct mempolicy *pol = __get_vma_policy(vma, addr);
1721 
1722 	if (!pol)
1723 		pol = get_task_policy(current);
1724 
1725 	return pol;
1726 }
1727 
1728 bool vma_policy_mof(struct vm_area_struct *vma)
1729 {
1730 	struct mempolicy *pol;
1731 
1732 	if (vma->vm_ops && vma->vm_ops->get_policy) {
1733 		bool ret = false;
1734 
1735 		pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1736 		if (pol && (pol->flags & MPOL_F_MOF))
1737 			ret = true;
1738 		mpol_cond_put(pol);
1739 
1740 		return ret;
1741 	}
1742 
1743 	pol = vma->vm_policy;
1744 	if (!pol)
1745 		pol = get_task_policy(current);
1746 
1747 	return pol->flags & MPOL_F_MOF;
1748 }
1749 
1750 static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1751 {
1752 	enum zone_type dynamic_policy_zone = policy_zone;
1753 
1754 	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1755 
1756 	/*
1757 	 * if policy->nodes has movable memory only,
1758 	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1759 	 *
1760 	 * policy->nodes is intersect with node_states[N_MEMORY].
1761 	 * so if the following test fails, it implies
1762 	 * policy->nodes has movable memory only.
1763 	 */
1764 	if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY]))
1765 		dynamic_policy_zone = ZONE_MOVABLE;
1766 
1767 	return zone >= dynamic_policy_zone;
1768 }
1769 
1770 /*
1771  * Return a nodemask representing a mempolicy for filtering nodes for
1772  * page allocation
1773  */
1774 nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1775 {
1776 	int mode = policy->mode;
1777 
1778 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
1779 	if (unlikely(mode == MPOL_BIND) &&
1780 		apply_policy_zone(policy, gfp_zone(gfp)) &&
1781 		cpuset_nodemask_valid_mems_allowed(&policy->nodes))
1782 		return &policy->nodes;
1783 
1784 	if (mode == MPOL_PREFERRED_MANY)
1785 		return &policy->nodes;
1786 
1787 	return NULL;
1788 }
1789 
1790 /*
1791  * Return the  preferred node id for 'prefer' mempolicy, and return
1792  * the given id for all other policies.
1793  *
1794  * policy_node() is always coupled with policy_nodemask(), which
1795  * secures the nodemask limit for 'bind' and 'prefer-many' policy.
1796  */
1797 static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
1798 {
1799 	if (policy->mode == MPOL_PREFERRED) {
1800 		nd = first_node(policy->nodes);
1801 	} else {
1802 		/*
1803 		 * __GFP_THISNODE shouldn't even be used with the bind policy
1804 		 * because we might easily break the expectation to stay on the
1805 		 * requested node and not break the policy.
1806 		 */
1807 		WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
1808 	}
1809 
1810 	return nd;
1811 }
1812 
1813 /* Do dynamic interleaving for a process */
1814 static unsigned interleave_nodes(struct mempolicy *policy)
1815 {
1816 	unsigned next;
1817 	struct task_struct *me = current;
1818 
1819 	next = next_node_in(me->il_prev, policy->nodes);
1820 	if (next < MAX_NUMNODES)
1821 		me->il_prev = next;
1822 	return next;
1823 }
1824 
1825 /*
1826  * Depending on the memory policy provide a node from which to allocate the
1827  * next slab entry.
1828  */
1829 unsigned int mempolicy_slab_node(void)
1830 {
1831 	struct mempolicy *policy;
1832 	int node = numa_mem_id();
1833 
1834 	if (!in_task())
1835 		return node;
1836 
1837 	policy = current->mempolicy;
1838 	if (!policy)
1839 		return node;
1840 
1841 	switch (policy->mode) {
1842 	case MPOL_PREFERRED:
1843 		return first_node(policy->nodes);
1844 
1845 	case MPOL_INTERLEAVE:
1846 		return interleave_nodes(policy);
1847 
1848 	case MPOL_BIND:
1849 	case MPOL_PREFERRED_MANY:
1850 	{
1851 		struct zoneref *z;
1852 
1853 		/*
1854 		 * Follow bind policy behavior and start allocation at the
1855 		 * first node.
1856 		 */
1857 		struct zonelist *zonelist;
1858 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1859 		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1860 		z = first_zones_zonelist(zonelist, highest_zoneidx,
1861 							&policy->nodes);
1862 		return z->zone ? zone_to_nid(z->zone) : node;
1863 	}
1864 	case MPOL_LOCAL:
1865 		return node;
1866 
1867 	default:
1868 		BUG();
1869 	}
1870 }
1871 
1872 /*
1873  * Do static interleaving for a VMA with known offset @n.  Returns the n'th
1874  * node in pol->nodes (starting from n=0), wrapping around if n exceeds the
1875  * number of present nodes.
1876  */
1877 static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
1878 {
1879 	nodemask_t nodemask = pol->nodes;
1880 	unsigned int target, nnodes;
1881 	int i;
1882 	int nid;
1883 	/*
1884 	 * The barrier will stabilize the nodemask in a register or on
1885 	 * the stack so that it will stop changing under the code.
1886 	 *
1887 	 * Between first_node() and next_node(), pol->nodes could be changed
1888 	 * by other threads. So we put pol->nodes in a local stack.
1889 	 */
1890 	barrier();
1891 
1892 	nnodes = nodes_weight(nodemask);
1893 	if (!nnodes)
1894 		return numa_node_id();
1895 	target = (unsigned int)n % nnodes;
1896 	nid = first_node(nodemask);
1897 	for (i = 0; i < target; i++)
1898 		nid = next_node(nid, nodemask);
1899 	return nid;
1900 }
1901 
1902 /* Determine a node number for interleave */
1903 static inline unsigned interleave_nid(struct mempolicy *pol,
1904 		 struct vm_area_struct *vma, unsigned long addr, int shift)
1905 {
1906 	if (vma) {
1907 		unsigned long off;
1908 
1909 		/*
1910 		 * for small pages, there is no difference between
1911 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
1912 		 * for huge pages, since vm_pgoff is in units of small
1913 		 * pages, we need to shift off the always 0 bits to get
1914 		 * a useful offset.
1915 		 */
1916 		BUG_ON(shift < PAGE_SHIFT);
1917 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1918 		off += (addr - vma->vm_start) >> shift;
1919 		return offset_il_node(pol, off);
1920 	} else
1921 		return interleave_nodes(pol);
1922 }
1923 
1924 #ifdef CONFIG_HUGETLBFS
1925 /*
1926  * huge_node(@vma, @addr, @gfp_flags, @mpol)
1927  * @vma: virtual memory area whose policy is sought
1928  * @addr: address in @vma for shared policy lookup and interleave policy
1929  * @gfp_flags: for requested zone
1930  * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1931  * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy
1932  *
1933  * Returns a nid suitable for a huge page allocation and a pointer
1934  * to the struct mempolicy for conditional unref after allocation.
1935  * If the effective policy is 'bind' or 'prefer-many', returns a pointer
1936  * to the mempolicy's @nodemask for filtering the zonelist.
1937  *
1938  * Must be protected by read_mems_allowed_begin()
1939  */
1940 int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
1941 				struct mempolicy **mpol, nodemask_t **nodemask)
1942 {
1943 	int nid;
1944 	int mode;
1945 
1946 	*mpol = get_vma_policy(vma, addr);
1947 	*nodemask = NULL;
1948 	mode = (*mpol)->mode;
1949 
1950 	if (unlikely(mode == MPOL_INTERLEAVE)) {
1951 		nid = interleave_nid(*mpol, vma, addr,
1952 					huge_page_shift(hstate_vma(vma)));
1953 	} else {
1954 		nid = policy_node(gfp_flags, *mpol, numa_node_id());
1955 		if (mode == MPOL_BIND || mode == MPOL_PREFERRED_MANY)
1956 			*nodemask = &(*mpol)->nodes;
1957 	}
1958 	return nid;
1959 }
1960 
1961 /*
1962  * init_nodemask_of_mempolicy
1963  *
1964  * If the current task's mempolicy is "default" [NULL], return 'false'
1965  * to indicate default policy.  Otherwise, extract the policy nodemask
1966  * for 'bind' or 'interleave' policy into the argument nodemask, or
1967  * initialize the argument nodemask to contain the single node for
1968  * 'preferred' or 'local' policy and return 'true' to indicate presence
1969  * of non-default mempolicy.
1970  *
1971  * We don't bother with reference counting the mempolicy [mpol_get/put]
1972  * because the current task is examining it's own mempolicy and a task's
1973  * mempolicy is only ever changed by the task itself.
1974  *
1975  * N.B., it is the caller's responsibility to free a returned nodemask.
1976  */
1977 bool init_nodemask_of_mempolicy(nodemask_t *mask)
1978 {
1979 	struct mempolicy *mempolicy;
1980 
1981 	if (!(mask && current->mempolicy))
1982 		return false;
1983 
1984 	task_lock(current);
1985 	mempolicy = current->mempolicy;
1986 	switch (mempolicy->mode) {
1987 	case MPOL_PREFERRED:
1988 	case MPOL_PREFERRED_MANY:
1989 	case MPOL_BIND:
1990 	case MPOL_INTERLEAVE:
1991 		*mask = mempolicy->nodes;
1992 		break;
1993 
1994 	case MPOL_LOCAL:
1995 		init_nodemask_of_node(mask, numa_node_id());
1996 		break;
1997 
1998 	default:
1999 		BUG();
2000 	}
2001 	task_unlock(current);
2002 
2003 	return true;
2004 }
2005 #endif
2006 
2007 /*
2008  * mempolicy_in_oom_domain
2009  *
2010  * If tsk's mempolicy is "bind", check for intersection between mask and
2011  * the policy nodemask. Otherwise, return true for all other policies
2012  * including "interleave", as a tsk with "interleave" policy may have
2013  * memory allocated from all nodes in system.
2014  *
2015  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
2016  */
2017 bool mempolicy_in_oom_domain(struct task_struct *tsk,
2018 					const nodemask_t *mask)
2019 {
2020 	struct mempolicy *mempolicy;
2021 	bool ret = true;
2022 
2023 	if (!mask)
2024 		return ret;
2025 
2026 	task_lock(tsk);
2027 	mempolicy = tsk->mempolicy;
2028 	if (mempolicy && mempolicy->mode == MPOL_BIND)
2029 		ret = nodes_intersects(mempolicy->nodes, *mask);
2030 	task_unlock(tsk);
2031 
2032 	return ret;
2033 }
2034 
2035 /* Allocate a page in interleaved policy.
2036    Own path because it needs to do special accounting. */
2037 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2038 					unsigned nid)
2039 {
2040 	struct page *page;
2041 
2042 	page = __alloc_pages(gfp, order, nid, NULL);
2043 	/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
2044 	if (!static_branch_likely(&vm_numa_stat_key))
2045 		return page;
2046 	if (page && page_to_nid(page) == nid) {
2047 		preempt_disable();
2048 		__count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT);
2049 		preempt_enable();
2050 	}
2051 	return page;
2052 }
2053 
2054 static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
2055 						int nid, struct mempolicy *pol)
2056 {
2057 	struct page *page;
2058 	gfp_t preferred_gfp;
2059 
2060 	/*
2061 	 * This is a two pass approach. The first pass will only try the
2062 	 * preferred nodes but skip the direct reclaim and allow the
2063 	 * allocation to fail, while the second pass will try all the
2064 	 * nodes in system.
2065 	 */
2066 	preferred_gfp = gfp | __GFP_NOWARN;
2067 	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2068 	page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes);
2069 	if (!page)
2070 		page = __alloc_pages(gfp, order, numa_node_id(), NULL);
2071 
2072 	return page;
2073 }
2074 
2075 /**
2076  * alloc_pages_vma - Allocate a page for a VMA.
2077  * @gfp: GFP flags.
2078  * @order: Order of the GFP allocation.
2079  * @vma: Pointer to VMA or NULL if not available.
2080  * @addr: Virtual address of the allocation.  Must be inside @vma.
2081  * @node: Which node to prefer for allocation (modulo policy).
2082  * @hugepage: For hugepages try only the preferred node if possible.
2083  *
2084  * Allocate a page for a specific address in @vma, using the appropriate
2085  * NUMA policy.  When @vma is not NULL the caller must hold the mmap_lock
2086  * of the mm_struct of the VMA to prevent it from going away.  Should be
2087  * used for all allocations for pages that will be mapped into user space.
2088  *
2089  * Return: The page on success or NULL if allocation fails.
2090  */
2091 struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
2092 		unsigned long addr, int node, bool hugepage)
2093 {
2094 	struct mempolicy *pol;
2095 	struct page *page;
2096 	int preferred_nid;
2097 	nodemask_t *nmask;
2098 
2099 	pol = get_vma_policy(vma, addr);
2100 
2101 	if (pol->mode == MPOL_INTERLEAVE) {
2102 		unsigned nid;
2103 
2104 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
2105 		mpol_cond_put(pol);
2106 		page = alloc_page_interleave(gfp, order, nid);
2107 		goto out;
2108 	}
2109 
2110 	if (pol->mode == MPOL_PREFERRED_MANY) {
2111 		page = alloc_pages_preferred_many(gfp, order, node, pol);
2112 		mpol_cond_put(pol);
2113 		goto out;
2114 	}
2115 
2116 	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2117 		int hpage_node = node;
2118 
2119 		/*
2120 		 * For hugepage allocation and non-interleave policy which
2121 		 * allows the current node (or other explicitly preferred
2122 		 * node) we only try to allocate from the current/preferred
2123 		 * node and don't fall back to other nodes, as the cost of
2124 		 * remote accesses would likely offset THP benefits.
2125 		 *
2126 		 * If the policy is interleave or does not allow the current
2127 		 * node in its nodemask, we allocate the standard way.
2128 		 */
2129 		if (pol->mode == MPOL_PREFERRED)
2130 			hpage_node = first_node(pol->nodes);
2131 
2132 		nmask = policy_nodemask(gfp, pol);
2133 		if (!nmask || node_isset(hpage_node, *nmask)) {
2134 			mpol_cond_put(pol);
2135 			/*
2136 			 * First, try to allocate THP only on local node, but
2137 			 * don't reclaim unnecessarily, just compact.
2138 			 */
2139 			page = __alloc_pages_node(hpage_node,
2140 				gfp | __GFP_THISNODE | __GFP_NORETRY, order);
2141 
2142 			/*
2143 			 * If hugepage allocations are configured to always
2144 			 * synchronous compact or the vma has been madvised
2145 			 * to prefer hugepage backing, retry allowing remote
2146 			 * memory with both reclaim and compact as well.
2147 			 */
2148 			if (!page && (gfp & __GFP_DIRECT_RECLAIM))
2149 				page = __alloc_pages_node(hpage_node,
2150 								gfp, order);
2151 
2152 			goto out;
2153 		}
2154 	}
2155 
2156 	nmask = policy_nodemask(gfp, pol);
2157 	preferred_nid = policy_node(gfp, pol, node);
2158 	page = __alloc_pages(gfp, order, preferred_nid, nmask);
2159 	mpol_cond_put(pol);
2160 out:
2161 	return page;
2162 }
2163 EXPORT_SYMBOL(alloc_pages_vma);
2164 
2165 /**
2166  * alloc_pages - Allocate pages.
2167  * @gfp: GFP flags.
2168  * @order: Power of two of number of pages to allocate.
2169  *
2170  * Allocate 1 << @order contiguous pages.  The physical address of the
2171  * first page is naturally aligned (eg an order-3 allocation will be aligned
2172  * to a multiple of 8 * PAGE_SIZE bytes).  The NUMA policy of the current
2173  * process is honoured when in process context.
2174  *
2175  * Context: Can be called from any context, providing the appropriate GFP
2176  * flags are used.
2177  * Return: The page on success or NULL if allocation fails.
2178  */
2179 struct page *alloc_pages(gfp_t gfp, unsigned order)
2180 {
2181 	struct mempolicy *pol = &default_policy;
2182 	struct page *page;
2183 
2184 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2185 		pol = get_task_policy(current);
2186 
2187 	/*
2188 	 * No reference counting needed for current->mempolicy
2189 	 * nor system default_policy
2190 	 */
2191 	if (pol->mode == MPOL_INTERLEAVE)
2192 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2193 	else if (pol->mode == MPOL_PREFERRED_MANY)
2194 		page = alloc_pages_preferred_many(gfp, order,
2195 				numa_node_id(), pol);
2196 	else
2197 		page = __alloc_pages(gfp, order,
2198 				policy_node(gfp, pol, numa_node_id()),
2199 				policy_nodemask(gfp, pol));
2200 
2201 	return page;
2202 }
2203 EXPORT_SYMBOL(alloc_pages);
2204 
2205 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2206 {
2207 	struct mempolicy *pol = mpol_dup(vma_policy(src));
2208 
2209 	if (IS_ERR(pol))
2210 		return PTR_ERR(pol);
2211 	dst->vm_policy = pol;
2212 	return 0;
2213 }
2214 
2215 /*
2216  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
2217  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2218  * with the mems_allowed returned by cpuset_mems_allowed().  This
2219  * keeps mempolicies cpuset relative after its cpuset moves.  See
2220  * further kernel/cpuset.c update_nodemask().
2221  *
2222  * current's mempolicy may be rebinded by the other task(the task that changes
2223  * cpuset's mems), so we needn't do rebind work for current task.
2224  */
2225 
2226 /* Slow path of a mempolicy duplicate */
2227 struct mempolicy *__mpol_dup(struct mempolicy *old)
2228 {
2229 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2230 
2231 	if (!new)
2232 		return ERR_PTR(-ENOMEM);
2233 
2234 	/* task's mempolicy is protected by alloc_lock */
2235 	if (old == current->mempolicy) {
2236 		task_lock(current);
2237 		*new = *old;
2238 		task_unlock(current);
2239 	} else
2240 		*new = *old;
2241 
2242 	if (current_cpuset_is_being_rebound()) {
2243 		nodemask_t mems = cpuset_mems_allowed(current);
2244 		mpol_rebind_policy(new, &mems);
2245 	}
2246 	atomic_set(&new->refcnt, 1);
2247 	return new;
2248 }
2249 
2250 /* Slow path of a mempolicy comparison */
2251 bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2252 {
2253 	if (!a || !b)
2254 		return false;
2255 	if (a->mode != b->mode)
2256 		return false;
2257 	if (a->flags != b->flags)
2258 		return false;
2259 	if (mpol_store_user_nodemask(a))
2260 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2261 			return false;
2262 
2263 	switch (a->mode) {
2264 	case MPOL_BIND:
2265 	case MPOL_INTERLEAVE:
2266 	case MPOL_PREFERRED:
2267 	case MPOL_PREFERRED_MANY:
2268 		return !!nodes_equal(a->nodes, b->nodes);
2269 	case MPOL_LOCAL:
2270 		return true;
2271 	default:
2272 		BUG();
2273 		return false;
2274 	}
2275 }
2276 
2277 /*
2278  * Shared memory backing store policy support.
2279  *
2280  * Remember policies even when nobody has shared memory mapped.
2281  * The policies are kept in Red-Black tree linked from the inode.
2282  * They are protected by the sp->lock rwlock, which should be held
2283  * for any accesses to the tree.
2284  */
2285 
2286 /*
2287  * lookup first element intersecting start-end.  Caller holds sp->lock for
2288  * reading or for writing
2289  */
2290 static struct sp_node *
2291 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2292 {
2293 	struct rb_node *n = sp->root.rb_node;
2294 
2295 	while (n) {
2296 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
2297 
2298 		if (start >= p->end)
2299 			n = n->rb_right;
2300 		else if (end <= p->start)
2301 			n = n->rb_left;
2302 		else
2303 			break;
2304 	}
2305 	if (!n)
2306 		return NULL;
2307 	for (;;) {
2308 		struct sp_node *w = NULL;
2309 		struct rb_node *prev = rb_prev(n);
2310 		if (!prev)
2311 			break;
2312 		w = rb_entry(prev, struct sp_node, nd);
2313 		if (w->end <= start)
2314 			break;
2315 		n = prev;
2316 	}
2317 	return rb_entry(n, struct sp_node, nd);
2318 }
2319 
2320 /*
2321  * Insert a new shared policy into the list.  Caller holds sp->lock for
2322  * writing.
2323  */
2324 static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2325 {
2326 	struct rb_node **p = &sp->root.rb_node;
2327 	struct rb_node *parent = NULL;
2328 	struct sp_node *nd;
2329 
2330 	while (*p) {
2331 		parent = *p;
2332 		nd = rb_entry(parent, struct sp_node, nd);
2333 		if (new->start < nd->start)
2334 			p = &(*p)->rb_left;
2335 		else if (new->end > nd->end)
2336 			p = &(*p)->rb_right;
2337 		else
2338 			BUG();
2339 	}
2340 	rb_link_node(&new->nd, parent, p);
2341 	rb_insert_color(&new->nd, &sp->root);
2342 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2343 		 new->policy ? new->policy->mode : 0);
2344 }
2345 
2346 /* Find shared policy intersecting idx */
2347 struct mempolicy *
2348 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2349 {
2350 	struct mempolicy *pol = NULL;
2351 	struct sp_node *sn;
2352 
2353 	if (!sp->root.rb_node)
2354 		return NULL;
2355 	read_lock(&sp->lock);
2356 	sn = sp_lookup(sp, idx, idx+1);
2357 	if (sn) {
2358 		mpol_get(sn->policy);
2359 		pol = sn->policy;
2360 	}
2361 	read_unlock(&sp->lock);
2362 	return pol;
2363 }
2364 
2365 static void sp_free(struct sp_node *n)
2366 {
2367 	mpol_put(n->policy);
2368 	kmem_cache_free(sn_cache, n);
2369 }
2370 
2371 /**
2372  * mpol_misplaced - check whether current page node is valid in policy
2373  *
2374  * @page: page to be checked
2375  * @vma: vm area where page mapped
2376  * @addr: virtual address where page mapped
2377  *
2378  * Lookup current policy node id for vma,addr and "compare to" page's
2379  * node id.  Policy determination "mimics" alloc_page_vma().
2380  * Called from fault path where we know the vma and faulting address.
2381  *
2382  * Return: NUMA_NO_NODE if the page is in a node that is valid for this
2383  * policy, or a suitable node ID to allocate a replacement page from.
2384  */
2385 int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2386 {
2387 	struct mempolicy *pol;
2388 	struct zoneref *z;
2389 	int curnid = page_to_nid(page);
2390 	unsigned long pgoff;
2391 	int thiscpu = raw_smp_processor_id();
2392 	int thisnid = cpu_to_node(thiscpu);
2393 	int polnid = NUMA_NO_NODE;
2394 	int ret = NUMA_NO_NODE;
2395 
2396 	pol = get_vma_policy(vma, addr);
2397 	if (!(pol->flags & MPOL_F_MOF))
2398 		goto out;
2399 
2400 	switch (pol->mode) {
2401 	case MPOL_INTERLEAVE:
2402 		pgoff = vma->vm_pgoff;
2403 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2404 		polnid = offset_il_node(pol, pgoff);
2405 		break;
2406 
2407 	case MPOL_PREFERRED:
2408 		if (node_isset(curnid, pol->nodes))
2409 			goto out;
2410 		polnid = first_node(pol->nodes);
2411 		break;
2412 
2413 	case MPOL_LOCAL:
2414 		polnid = numa_node_id();
2415 		break;
2416 
2417 	case MPOL_BIND:
2418 		/* Optimize placement among multiple nodes via NUMA balancing */
2419 		if (pol->flags & MPOL_F_MORON) {
2420 			if (node_isset(thisnid, pol->nodes))
2421 				break;
2422 			goto out;
2423 		}
2424 		fallthrough;
2425 
2426 	case MPOL_PREFERRED_MANY:
2427 		/*
2428 		 * use current page if in policy nodemask,
2429 		 * else select nearest allowed node, if any.
2430 		 * If no allowed nodes, use current [!misplaced].
2431 		 */
2432 		if (node_isset(curnid, pol->nodes))
2433 			goto out;
2434 		z = first_zones_zonelist(
2435 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2436 				gfp_zone(GFP_HIGHUSER),
2437 				&pol->nodes);
2438 		polnid = zone_to_nid(z->zone);
2439 		break;
2440 
2441 	default:
2442 		BUG();
2443 	}
2444 
2445 	/* Migrate the page towards the node whose CPU is referencing it */
2446 	if (pol->flags & MPOL_F_MORON) {
2447 		polnid = thisnid;
2448 
2449 		if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2450 			goto out;
2451 	}
2452 
2453 	if (curnid != polnid)
2454 		ret = polnid;
2455 out:
2456 	mpol_cond_put(pol);
2457 
2458 	return ret;
2459 }
2460 
2461 /*
2462  * Drop the (possibly final) reference to task->mempolicy.  It needs to be
2463  * dropped after task->mempolicy is set to NULL so that any allocation done as
2464  * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2465  * policy.
2466  */
2467 void mpol_put_task_policy(struct task_struct *task)
2468 {
2469 	struct mempolicy *pol;
2470 
2471 	task_lock(task);
2472 	pol = task->mempolicy;
2473 	task->mempolicy = NULL;
2474 	task_unlock(task);
2475 	mpol_put(pol);
2476 }
2477 
2478 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2479 {
2480 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2481 	rb_erase(&n->nd, &sp->root);
2482 	sp_free(n);
2483 }
2484 
2485 static void sp_node_init(struct sp_node *node, unsigned long start,
2486 			unsigned long end, struct mempolicy *pol)
2487 {
2488 	node->start = start;
2489 	node->end = end;
2490 	node->policy = pol;
2491 }
2492 
2493 static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2494 				struct mempolicy *pol)
2495 {
2496 	struct sp_node *n;
2497 	struct mempolicy *newpol;
2498 
2499 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2500 	if (!n)
2501 		return NULL;
2502 
2503 	newpol = mpol_dup(pol);
2504 	if (IS_ERR(newpol)) {
2505 		kmem_cache_free(sn_cache, n);
2506 		return NULL;
2507 	}
2508 	newpol->flags |= MPOL_F_SHARED;
2509 	sp_node_init(n, start, end, newpol);
2510 
2511 	return n;
2512 }
2513 
2514 /* Replace a policy range. */
2515 static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2516 				 unsigned long end, struct sp_node *new)
2517 {
2518 	struct sp_node *n;
2519 	struct sp_node *n_new = NULL;
2520 	struct mempolicy *mpol_new = NULL;
2521 	int ret = 0;
2522 
2523 restart:
2524 	write_lock(&sp->lock);
2525 	n = sp_lookup(sp, start, end);
2526 	/* Take care of old policies in the same range. */
2527 	while (n && n->start < end) {
2528 		struct rb_node *next = rb_next(&n->nd);
2529 		if (n->start >= start) {
2530 			if (n->end <= end)
2531 				sp_delete(sp, n);
2532 			else
2533 				n->start = end;
2534 		} else {
2535 			/* Old policy spanning whole new range. */
2536 			if (n->end > end) {
2537 				if (!n_new)
2538 					goto alloc_new;
2539 
2540 				*mpol_new = *n->policy;
2541 				atomic_set(&mpol_new->refcnt, 1);
2542 				sp_node_init(n_new, end, n->end, mpol_new);
2543 				n->end = start;
2544 				sp_insert(sp, n_new);
2545 				n_new = NULL;
2546 				mpol_new = NULL;
2547 				break;
2548 			} else
2549 				n->end = start;
2550 		}
2551 		if (!next)
2552 			break;
2553 		n = rb_entry(next, struct sp_node, nd);
2554 	}
2555 	if (new)
2556 		sp_insert(sp, new);
2557 	write_unlock(&sp->lock);
2558 	ret = 0;
2559 
2560 err_out:
2561 	if (mpol_new)
2562 		mpol_put(mpol_new);
2563 	if (n_new)
2564 		kmem_cache_free(sn_cache, n_new);
2565 
2566 	return ret;
2567 
2568 alloc_new:
2569 	write_unlock(&sp->lock);
2570 	ret = -ENOMEM;
2571 	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2572 	if (!n_new)
2573 		goto err_out;
2574 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2575 	if (!mpol_new)
2576 		goto err_out;
2577 	goto restart;
2578 }
2579 
2580 /**
2581  * mpol_shared_policy_init - initialize shared policy for inode
2582  * @sp: pointer to inode shared policy
2583  * @mpol:  struct mempolicy to install
2584  *
2585  * Install non-NULL @mpol in inode's shared policy rb-tree.
2586  * On entry, the current task has a reference on a non-NULL @mpol.
2587  * This must be released on exit.
2588  * This is called at get_inode() calls and we can use GFP_KERNEL.
2589  */
2590 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2591 {
2592 	int ret;
2593 
2594 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
2595 	rwlock_init(&sp->lock);
2596 
2597 	if (mpol) {
2598 		struct vm_area_struct pvma;
2599 		struct mempolicy *new;
2600 		NODEMASK_SCRATCH(scratch);
2601 
2602 		if (!scratch)
2603 			goto put_mpol;
2604 		/* contextualize the tmpfs mount point mempolicy */
2605 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2606 		if (IS_ERR(new))
2607 			goto free_scratch; /* no valid nodemask intersection */
2608 
2609 		task_lock(current);
2610 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2611 		task_unlock(current);
2612 		if (ret)
2613 			goto put_new;
2614 
2615 		/* Create pseudo-vma that contains just the policy */
2616 		vma_init(&pvma, NULL);
2617 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
2618 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2619 
2620 put_new:
2621 		mpol_put(new);			/* drop initial ref */
2622 free_scratch:
2623 		NODEMASK_SCRATCH_FREE(scratch);
2624 put_mpol:
2625 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
2626 	}
2627 }
2628 
2629 int mpol_set_shared_policy(struct shared_policy *info,
2630 			struct vm_area_struct *vma, struct mempolicy *npol)
2631 {
2632 	int err;
2633 	struct sp_node *new = NULL;
2634 	unsigned long sz = vma_pages(vma);
2635 
2636 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2637 		 vma->vm_pgoff,
2638 		 sz, npol ? npol->mode : -1,
2639 		 npol ? npol->flags : -1,
2640 		 npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE);
2641 
2642 	if (npol) {
2643 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2644 		if (!new)
2645 			return -ENOMEM;
2646 	}
2647 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2648 	if (err && new)
2649 		sp_free(new);
2650 	return err;
2651 }
2652 
2653 /* Free a backing policy store on inode delete. */
2654 void mpol_free_shared_policy(struct shared_policy *p)
2655 {
2656 	struct sp_node *n;
2657 	struct rb_node *next;
2658 
2659 	if (!p->root.rb_node)
2660 		return;
2661 	write_lock(&p->lock);
2662 	next = rb_first(&p->root);
2663 	while (next) {
2664 		n = rb_entry(next, struct sp_node, nd);
2665 		next = rb_next(&n->nd);
2666 		sp_delete(p, n);
2667 	}
2668 	write_unlock(&p->lock);
2669 }
2670 
2671 #ifdef CONFIG_NUMA_BALANCING
2672 static int __initdata numabalancing_override;
2673 
2674 static void __init check_numabalancing_enable(void)
2675 {
2676 	bool numabalancing_default = false;
2677 
2678 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2679 		numabalancing_default = true;
2680 
2681 	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2682 	if (numabalancing_override)
2683 		set_numabalancing_state(numabalancing_override == 1);
2684 
2685 	if (num_online_nodes() > 1 && !numabalancing_override) {
2686 		pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2687 			numabalancing_default ? "Enabling" : "Disabling");
2688 		set_numabalancing_state(numabalancing_default);
2689 	}
2690 }
2691 
2692 static int __init setup_numabalancing(char *str)
2693 {
2694 	int ret = 0;
2695 	if (!str)
2696 		goto out;
2697 
2698 	if (!strcmp(str, "enable")) {
2699 		numabalancing_override = 1;
2700 		ret = 1;
2701 	} else if (!strcmp(str, "disable")) {
2702 		numabalancing_override = -1;
2703 		ret = 1;
2704 	}
2705 out:
2706 	if (!ret)
2707 		pr_warn("Unable to parse numa_balancing=\n");
2708 
2709 	return ret;
2710 }
2711 __setup("numa_balancing=", setup_numabalancing);
2712 #else
2713 static inline void __init check_numabalancing_enable(void)
2714 {
2715 }
2716 #endif /* CONFIG_NUMA_BALANCING */
2717 
2718 /* assumes fs == KERNEL_DS */
2719 void __init numa_policy_init(void)
2720 {
2721 	nodemask_t interleave_nodes;
2722 	unsigned long largest = 0;
2723 	int nid, prefer = 0;
2724 
2725 	policy_cache = kmem_cache_create("numa_policy",
2726 					 sizeof(struct mempolicy),
2727 					 0, SLAB_PANIC, NULL);
2728 
2729 	sn_cache = kmem_cache_create("shared_policy_node",
2730 				     sizeof(struct sp_node),
2731 				     0, SLAB_PANIC, NULL);
2732 
2733 	for_each_node(nid) {
2734 		preferred_node_policy[nid] = (struct mempolicy) {
2735 			.refcnt = ATOMIC_INIT(1),
2736 			.mode = MPOL_PREFERRED,
2737 			.flags = MPOL_F_MOF | MPOL_F_MORON,
2738 			.nodes = nodemask_of_node(nid),
2739 		};
2740 	}
2741 
2742 	/*
2743 	 * Set interleaving policy for system init. Interleaving is only
2744 	 * enabled across suitably sized nodes (default is >= 16MB), or
2745 	 * fall back to the largest node if they're all smaller.
2746 	 */
2747 	nodes_clear(interleave_nodes);
2748 	for_each_node_state(nid, N_MEMORY) {
2749 		unsigned long total_pages = node_present_pages(nid);
2750 
2751 		/* Preserve the largest node */
2752 		if (largest < total_pages) {
2753 			largest = total_pages;
2754 			prefer = nid;
2755 		}
2756 
2757 		/* Interleave this node? */
2758 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2759 			node_set(nid, interleave_nodes);
2760 	}
2761 
2762 	/* All too small, use the largest */
2763 	if (unlikely(nodes_empty(interleave_nodes)))
2764 		node_set(prefer, interleave_nodes);
2765 
2766 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2767 		pr_err("%s: interleaving failed\n", __func__);
2768 
2769 	check_numabalancing_enable();
2770 }
2771 
2772 /* Reset policy of current process to default */
2773 void numa_default_policy(void)
2774 {
2775 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2776 }
2777 
2778 /*
2779  * Parse and format mempolicy from/to strings
2780  */
2781 
2782 static const char * const policy_modes[] =
2783 {
2784 	[MPOL_DEFAULT]    = "default",
2785 	[MPOL_PREFERRED]  = "prefer",
2786 	[MPOL_BIND]       = "bind",
2787 	[MPOL_INTERLEAVE] = "interleave",
2788 	[MPOL_LOCAL]      = "local",
2789 	[MPOL_PREFERRED_MANY]  = "prefer (many)",
2790 };
2791 
2792 
2793 #ifdef CONFIG_TMPFS
2794 /**
2795  * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2796  * @str:  string containing mempolicy to parse
2797  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2798  *
2799  * Format of input:
2800  *	<mode>[=<flags>][:<nodelist>]
2801  *
2802  * On success, returns 0, else 1
2803  */
2804 int mpol_parse_str(char *str, struct mempolicy **mpol)
2805 {
2806 	struct mempolicy *new = NULL;
2807 	unsigned short mode_flags;
2808 	nodemask_t nodes;
2809 	char *nodelist = strchr(str, ':');
2810 	char *flags = strchr(str, '=');
2811 	int err = 1, mode;
2812 
2813 	if (flags)
2814 		*flags++ = '\0';	/* terminate mode string */
2815 
2816 	if (nodelist) {
2817 		/* NUL-terminate mode or flags string */
2818 		*nodelist++ = '\0';
2819 		if (nodelist_parse(nodelist, nodes))
2820 			goto out;
2821 		if (!nodes_subset(nodes, node_states[N_MEMORY]))
2822 			goto out;
2823 	} else
2824 		nodes_clear(nodes);
2825 
2826 	mode = match_string(policy_modes, MPOL_MAX, str);
2827 	if (mode < 0)
2828 		goto out;
2829 
2830 	switch (mode) {
2831 	case MPOL_PREFERRED:
2832 		/*
2833 		 * Insist on a nodelist of one node only, although later
2834 		 * we use first_node(nodes) to grab a single node, so here
2835 		 * nodelist (or nodes) cannot be empty.
2836 		 */
2837 		if (nodelist) {
2838 			char *rest = nodelist;
2839 			while (isdigit(*rest))
2840 				rest++;
2841 			if (*rest)
2842 				goto out;
2843 			if (nodes_empty(nodes))
2844 				goto out;
2845 		}
2846 		break;
2847 	case MPOL_INTERLEAVE:
2848 		/*
2849 		 * Default to online nodes with memory if no nodelist
2850 		 */
2851 		if (!nodelist)
2852 			nodes = node_states[N_MEMORY];
2853 		break;
2854 	case MPOL_LOCAL:
2855 		/*
2856 		 * Don't allow a nodelist;  mpol_new() checks flags
2857 		 */
2858 		if (nodelist)
2859 			goto out;
2860 		break;
2861 	case MPOL_DEFAULT:
2862 		/*
2863 		 * Insist on a empty nodelist
2864 		 */
2865 		if (!nodelist)
2866 			err = 0;
2867 		goto out;
2868 	case MPOL_PREFERRED_MANY:
2869 	case MPOL_BIND:
2870 		/*
2871 		 * Insist on a nodelist
2872 		 */
2873 		if (!nodelist)
2874 			goto out;
2875 	}
2876 
2877 	mode_flags = 0;
2878 	if (flags) {
2879 		/*
2880 		 * Currently, we only support two mutually exclusive
2881 		 * mode flags.
2882 		 */
2883 		if (!strcmp(flags, "static"))
2884 			mode_flags |= MPOL_F_STATIC_NODES;
2885 		else if (!strcmp(flags, "relative"))
2886 			mode_flags |= MPOL_F_RELATIVE_NODES;
2887 		else
2888 			goto out;
2889 	}
2890 
2891 	new = mpol_new(mode, mode_flags, &nodes);
2892 	if (IS_ERR(new))
2893 		goto out;
2894 
2895 	/*
2896 	 * Save nodes for mpol_to_str() to show the tmpfs mount options
2897 	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2898 	 */
2899 	if (mode != MPOL_PREFERRED) {
2900 		new->nodes = nodes;
2901 	} else if (nodelist) {
2902 		nodes_clear(new->nodes);
2903 		node_set(first_node(nodes), new->nodes);
2904 	} else {
2905 		new->mode = MPOL_LOCAL;
2906 	}
2907 
2908 	/*
2909 	 * Save nodes for contextualization: this will be used to "clone"
2910 	 * the mempolicy in a specific context [cpuset] at a later time.
2911 	 */
2912 	new->w.user_nodemask = nodes;
2913 
2914 	err = 0;
2915 
2916 out:
2917 	/* Restore string for error message */
2918 	if (nodelist)
2919 		*--nodelist = ':';
2920 	if (flags)
2921 		*--flags = '=';
2922 	if (!err)
2923 		*mpol = new;
2924 	return err;
2925 }
2926 #endif /* CONFIG_TMPFS */
2927 
2928 /**
2929  * mpol_to_str - format a mempolicy structure for printing
2930  * @buffer:  to contain formatted mempolicy string
2931  * @maxlen:  length of @buffer
2932  * @pol:  pointer to mempolicy to be formatted
2933  *
2934  * Convert @pol into a string.  If @buffer is too short, truncate the string.
2935  * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2936  * longest flag, "relative", and to display at least a few node ids.
2937  */
2938 void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
2939 {
2940 	char *p = buffer;
2941 	nodemask_t nodes = NODE_MASK_NONE;
2942 	unsigned short mode = MPOL_DEFAULT;
2943 	unsigned short flags = 0;
2944 
2945 	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
2946 		mode = pol->mode;
2947 		flags = pol->flags;
2948 	}
2949 
2950 	switch (mode) {
2951 	case MPOL_DEFAULT:
2952 	case MPOL_LOCAL:
2953 		break;
2954 	case MPOL_PREFERRED:
2955 	case MPOL_PREFERRED_MANY:
2956 	case MPOL_BIND:
2957 	case MPOL_INTERLEAVE:
2958 		nodes = pol->nodes;
2959 		break;
2960 	default:
2961 		WARN_ON_ONCE(1);
2962 		snprintf(p, maxlen, "unknown");
2963 		return;
2964 	}
2965 
2966 	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
2967 
2968 	if (flags & MPOL_MODE_FLAGS) {
2969 		p += snprintf(p, buffer + maxlen - p, "=");
2970 
2971 		/*
2972 		 * Currently, the only defined flags are mutually exclusive
2973 		 */
2974 		if (flags & MPOL_F_STATIC_NODES)
2975 			p += snprintf(p, buffer + maxlen - p, "static");
2976 		else if (flags & MPOL_F_RELATIVE_NODES)
2977 			p += snprintf(p, buffer + maxlen - p, "relative");
2978 	}
2979 
2980 	if (!nodes_empty(nodes))
2981 		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
2982 			       nodemask_pr_args(&nodes));
2983 }
2984 
2985 bool numa_demotion_enabled = false;
2986 
2987 #ifdef CONFIG_SYSFS
2988 static ssize_t numa_demotion_enabled_show(struct kobject *kobj,
2989 					  struct kobj_attribute *attr, char *buf)
2990 {
2991 	return sysfs_emit(buf, "%s\n",
2992 			  numa_demotion_enabled? "true" : "false");
2993 }
2994 
2995 static ssize_t numa_demotion_enabled_store(struct kobject *kobj,
2996 					   struct kobj_attribute *attr,
2997 					   const char *buf, size_t count)
2998 {
2999 	if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1))
3000 		numa_demotion_enabled = true;
3001 	else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1))
3002 		numa_demotion_enabled = false;
3003 	else
3004 		return -EINVAL;
3005 
3006 	return count;
3007 }
3008 
3009 static struct kobj_attribute numa_demotion_enabled_attr =
3010 	__ATTR(demotion_enabled, 0644, numa_demotion_enabled_show,
3011 	       numa_demotion_enabled_store);
3012 
3013 static struct attribute *numa_attrs[] = {
3014 	&numa_demotion_enabled_attr.attr,
3015 	NULL,
3016 };
3017 
3018 static const struct attribute_group numa_attr_group = {
3019 	.attrs = numa_attrs,
3020 };
3021 
3022 static int __init numa_init_sysfs(void)
3023 {
3024 	int err;
3025 	struct kobject *numa_kobj;
3026 
3027 	numa_kobj = kobject_create_and_add("numa", mm_kobj);
3028 	if (!numa_kobj) {
3029 		pr_err("failed to create numa kobject\n");
3030 		return -ENOMEM;
3031 	}
3032 	err = sysfs_create_group(numa_kobj, &numa_attr_group);
3033 	if (err) {
3034 		pr_err("failed to register numa group\n");
3035 		goto delete_obj;
3036 	}
3037 	return 0;
3038 
3039 delete_obj:
3040 	kobject_put(numa_kobj);
3041 	return err;
3042 }
3043 subsys_initcall(numa_init_sysfs);
3044 #endif
3045