xref: /openbmc/linux/mm/mempolicy.c (revision c00b6b9610991c042ff4c3153daaa3ea8522c210)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Simple NUMA memory policy for the Linux kernel.
4  *
5  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
6  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
7  *
8  * NUMA policy allows the user to give hints in which node(s) memory should
9  * be allocated.
10  *
11  * Support four policies per VMA and per process:
12  *
13  * The VMA policy has priority over the process policy for a page fault.
14  *
15  * interleave     Allocate memory interleaved over a set of nodes,
16  *                with normal fallback if it fails.
17  *                For VMA based allocations this interleaves based on the
18  *                offset into the backing object or offset into the mapping
19  *                for anonymous memory. For process policy an process counter
20  *                is used.
21  *
22  * bind           Only allocate memory on a specific set of nodes,
23  *                no fallback.
24  *                FIXME: memory is allocated starting with the first node
25  *                to the last. It would be better if bind would truly restrict
26  *                the allocation to memory nodes instead
27  *
28  * preferred       Try a specific node first before normal fallback.
29  *                As a special case NUMA_NO_NODE here means do the allocation
30  *                on the local CPU. This is normally identical to default,
31  *                but useful to set in a VMA when you have a non default
32  *                process policy.
33  *
34  * preferred many Try a set of nodes first before normal fallback. This is
35  *                similar to preferred without the special case.
36  *
37  * default        Allocate on the local node first, or when on a VMA
38  *                use the process policy. This is what Linux always did
39  *		  in a NUMA aware kernel and still does by, ahem, default.
40  *
41  * The process policy is applied for most non interrupt memory allocations
42  * in that process' context. Interrupts ignore the policies and always
43  * try to allocate on the local CPU. The VMA policy is only applied for memory
44  * allocations for a VMA in the VM.
45  *
46  * Currently there are a few corner cases in swapping where the policy
47  * is not applied, but the majority should be handled. When process policy
48  * is used it is not remembered over swap outs/swap ins.
49  *
50  * Only the highest zone in the zone hierarchy gets policied. Allocations
51  * requesting a lower zone just use default policy. This implies that
52  * on systems with highmem kernel lowmem allocation don't get policied.
53  * Same with GFP_DMA allocations.
54  *
55  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
56  * all users and remembered even when nobody has memory mapped.
57  */
58 
59 /* Notebook:
60    fix mmap readahead to honour policy and enable policy for any page cache
61    object
62    statistics for bigpages
63    global policy for page cache? currently it uses process policy. Requires
64    first item above.
65    handle mremap for shared memory (currently ignored for the policy)
66    grows down?
67    make bind policy root only? It can trigger oom much faster and the
68    kernel is not always grateful with that.
69 */
70 
71 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
72 
73 #include <linux/mempolicy.h>
74 #include <linux/pagewalk.h>
75 #include <linux/highmem.h>
76 #include <linux/hugetlb.h>
77 #include <linux/kernel.h>
78 #include <linux/sched.h>
79 #include <linux/sched/mm.h>
80 #include <linux/sched/numa_balancing.h>
81 #include <linux/sched/task.h>
82 #include <linux/nodemask.h>
83 #include <linux/cpuset.h>
84 #include <linux/slab.h>
85 #include <linux/string.h>
86 #include <linux/export.h>
87 #include <linux/nsproxy.h>
88 #include <linux/interrupt.h>
89 #include <linux/init.h>
90 #include <linux/compat.h>
91 #include <linux/ptrace.h>
92 #include <linux/swap.h>
93 #include <linux/seq_file.h>
94 #include <linux/proc_fs.h>
95 #include <linux/migrate.h>
96 #include <linux/ksm.h>
97 #include <linux/rmap.h>
98 #include <linux/security.h>
99 #include <linux/syscalls.h>
100 #include <linux/ctype.h>
101 #include <linux/mm_inline.h>
102 #include <linux/mmu_notifier.h>
103 #include <linux/printk.h>
104 #include <linux/swapops.h>
105 
106 #include <asm/tlbflush.h>
107 #include <linux/uaccess.h>
108 
109 #include "internal.h"
110 
111 /* Internal flags */
112 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
113 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
114 
115 static struct kmem_cache *policy_cache;
116 static struct kmem_cache *sn_cache;
117 
118 /* Highest zone. An specific allocation for a zone below that is not
119    policied. */
120 enum zone_type policy_zone = 0;
121 
122 /*
123  * run-time system-wide default policy => local allocation
124  */
125 static struct mempolicy default_policy = {
126 	.refcnt = ATOMIC_INIT(1), /* never free it */
127 	.mode = MPOL_LOCAL,
128 };
129 
130 static struct mempolicy preferred_node_policy[MAX_NUMNODES];
131 
132 /**
133  * numa_map_to_online_node - Find closest online node
134  * @node: Node id to start the search
135  *
136  * Lookup the next closest node by distance if @nid is not online.
137  */
138 int numa_map_to_online_node(int node)
139 {
140 	int min_dist = INT_MAX, dist, n, min_node;
141 
142 	if (node == NUMA_NO_NODE || node_online(node))
143 		return node;
144 
145 	min_node = node;
146 	for_each_online_node(n) {
147 		dist = node_distance(node, n);
148 		if (dist < min_dist) {
149 			min_dist = dist;
150 			min_node = n;
151 		}
152 	}
153 
154 	return min_node;
155 }
156 EXPORT_SYMBOL_GPL(numa_map_to_online_node);
157 
158 struct mempolicy *get_task_policy(struct task_struct *p)
159 {
160 	struct mempolicy *pol = p->mempolicy;
161 	int node;
162 
163 	if (pol)
164 		return pol;
165 
166 	node = numa_node_id();
167 	if (node != NUMA_NO_NODE) {
168 		pol = &preferred_node_policy[node];
169 		/* preferred_node_policy is not initialised early in boot */
170 		if (pol->mode)
171 			return pol;
172 	}
173 
174 	return &default_policy;
175 }
176 
177 static const struct mempolicy_operations {
178 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
179 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
180 } mpol_ops[MPOL_MAX];
181 
182 static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
183 {
184 	return pol->flags & MPOL_MODE_FLAGS;
185 }
186 
187 static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
188 				   const nodemask_t *rel)
189 {
190 	nodemask_t tmp;
191 	nodes_fold(tmp, *orig, nodes_weight(*rel));
192 	nodes_onto(*ret, tmp, *rel);
193 }
194 
195 static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
196 {
197 	if (nodes_empty(*nodes))
198 		return -EINVAL;
199 	pol->nodes = *nodes;
200 	return 0;
201 }
202 
203 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
204 {
205 	if (nodes_empty(*nodes))
206 		return -EINVAL;
207 
208 	nodes_clear(pol->nodes);
209 	node_set(first_node(*nodes), pol->nodes);
210 	return 0;
211 }
212 
213 /*
214  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
215  * any, for the new policy.  mpol_new() has already validated the nodes
216  * parameter with respect to the policy mode and flags.
217  *
218  * Must be called holding task's alloc_lock to protect task's mems_allowed
219  * and mempolicy.  May also be called holding the mmap_lock for write.
220  */
221 static int mpol_set_nodemask(struct mempolicy *pol,
222 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
223 {
224 	int ret;
225 
226 	/*
227 	 * Default (pol==NULL) resp. local memory policies are not a
228 	 * subject of any remapping. They also do not need any special
229 	 * constructor.
230 	 */
231 	if (!pol || pol->mode == MPOL_LOCAL)
232 		return 0;
233 
234 	/* Check N_MEMORY */
235 	nodes_and(nsc->mask1,
236 		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
237 
238 	VM_BUG_ON(!nodes);
239 
240 	if (pol->flags & MPOL_F_RELATIVE_NODES)
241 		mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
242 	else
243 		nodes_and(nsc->mask2, *nodes, nsc->mask1);
244 
245 	if (mpol_store_user_nodemask(pol))
246 		pol->w.user_nodemask = *nodes;
247 	else
248 		pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed;
249 
250 	ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
251 	return ret;
252 }
253 
254 /*
255  * This function just creates a new policy, does some check and simple
256  * initialization. You must invoke mpol_set_nodemask() to set nodes.
257  */
258 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
259 				  nodemask_t *nodes)
260 {
261 	struct mempolicy *policy;
262 
263 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
264 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
265 
266 	if (mode == MPOL_DEFAULT) {
267 		if (nodes && !nodes_empty(*nodes))
268 			return ERR_PTR(-EINVAL);
269 		return NULL;
270 	}
271 	VM_BUG_ON(!nodes);
272 
273 	/*
274 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
275 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
276 	 * All other modes require a valid pointer to a non-empty nodemask.
277 	 */
278 	if (mode == MPOL_PREFERRED) {
279 		if (nodes_empty(*nodes)) {
280 			if (((flags & MPOL_F_STATIC_NODES) ||
281 			     (flags & MPOL_F_RELATIVE_NODES)))
282 				return ERR_PTR(-EINVAL);
283 
284 			mode = MPOL_LOCAL;
285 		}
286 	} else if (mode == MPOL_LOCAL) {
287 		if (!nodes_empty(*nodes) ||
288 		    (flags & MPOL_F_STATIC_NODES) ||
289 		    (flags & MPOL_F_RELATIVE_NODES))
290 			return ERR_PTR(-EINVAL);
291 	} else if (nodes_empty(*nodes))
292 		return ERR_PTR(-EINVAL);
293 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
294 	if (!policy)
295 		return ERR_PTR(-ENOMEM);
296 	atomic_set(&policy->refcnt, 1);
297 	policy->mode = mode;
298 	policy->flags = flags;
299 
300 	return policy;
301 }
302 
303 /* Slow path of a mpol destructor. */
304 void __mpol_put(struct mempolicy *p)
305 {
306 	if (!atomic_dec_and_test(&p->refcnt))
307 		return;
308 	kmem_cache_free(policy_cache, p);
309 }
310 
311 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
312 {
313 }
314 
315 static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
316 {
317 	nodemask_t tmp;
318 
319 	if (pol->flags & MPOL_F_STATIC_NODES)
320 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
321 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
322 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
323 	else {
324 		nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed,
325 								*nodes);
326 		pol->w.cpuset_mems_allowed = *nodes;
327 	}
328 
329 	if (nodes_empty(tmp))
330 		tmp = *nodes;
331 
332 	pol->nodes = tmp;
333 }
334 
335 static void mpol_rebind_preferred(struct mempolicy *pol,
336 						const nodemask_t *nodes)
337 {
338 	pol->w.cpuset_mems_allowed = *nodes;
339 }
340 
341 /*
342  * mpol_rebind_policy - Migrate a policy to a different set of nodes
343  *
344  * Per-vma policies are protected by mmap_lock. Allocations using per-task
345  * policies are protected by task->mems_allowed_seq to prevent a premature
346  * OOM/allocation failure due to parallel nodemask modification.
347  */
348 static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
349 {
350 	if (!pol)
351 		return;
352 	if (!mpol_store_user_nodemask(pol) &&
353 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
354 		return;
355 
356 	mpol_ops[pol->mode].rebind(pol, newmask);
357 }
358 
359 /*
360  * Wrapper for mpol_rebind_policy() that just requires task
361  * pointer, and updates task mempolicy.
362  *
363  * Called with task's alloc_lock held.
364  */
365 
366 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
367 {
368 	mpol_rebind_policy(tsk->mempolicy, new);
369 }
370 
371 /*
372  * Rebind each vma in mm to new nodemask.
373  *
374  * Call holding a reference to mm.  Takes mm->mmap_lock during call.
375  */
376 
377 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
378 {
379 	struct vm_area_struct *vma;
380 
381 	mmap_write_lock(mm);
382 	for (vma = mm->mmap; vma; vma = vma->vm_next)
383 		mpol_rebind_policy(vma->vm_policy, new);
384 	mmap_write_unlock(mm);
385 }
386 
387 static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
388 	[MPOL_DEFAULT] = {
389 		.rebind = mpol_rebind_default,
390 	},
391 	[MPOL_INTERLEAVE] = {
392 		.create = mpol_new_nodemask,
393 		.rebind = mpol_rebind_nodemask,
394 	},
395 	[MPOL_PREFERRED] = {
396 		.create = mpol_new_preferred,
397 		.rebind = mpol_rebind_preferred,
398 	},
399 	[MPOL_BIND] = {
400 		.create = mpol_new_nodemask,
401 		.rebind = mpol_rebind_nodemask,
402 	},
403 	[MPOL_LOCAL] = {
404 		.rebind = mpol_rebind_default,
405 	},
406 	[MPOL_PREFERRED_MANY] = {
407 		.create = mpol_new_nodemask,
408 		.rebind = mpol_rebind_preferred,
409 	},
410 };
411 
412 static int migrate_page_add(struct page *page, struct list_head *pagelist,
413 				unsigned long flags);
414 
415 struct queue_pages {
416 	struct list_head *pagelist;
417 	unsigned long flags;
418 	nodemask_t *nmask;
419 	unsigned long start;
420 	unsigned long end;
421 	struct vm_area_struct *first;
422 };
423 
424 /*
425  * Check if the page's nid is in qp->nmask.
426  *
427  * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
428  * in the invert of qp->nmask.
429  */
430 static inline bool queue_pages_required(struct page *page,
431 					struct queue_pages *qp)
432 {
433 	int nid = page_to_nid(page);
434 	unsigned long flags = qp->flags;
435 
436 	return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
437 }
438 
439 /*
440  * queue_pages_pmd() has four possible return values:
441  * 0 - pages are placed on the right node or queued successfully, or
442  *     special page is met, i.e. huge zero page.
443  * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
444  *     specified.
445  * 2 - THP was split.
446  * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
447  *        existing page was already on a node that does not follow the
448  *        policy.
449  */
450 static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
451 				unsigned long end, struct mm_walk *walk)
452 	__releases(ptl)
453 {
454 	int ret = 0;
455 	struct page *page;
456 	struct queue_pages *qp = walk->private;
457 	unsigned long flags;
458 
459 	if (unlikely(is_pmd_migration_entry(*pmd))) {
460 		ret = -EIO;
461 		goto unlock;
462 	}
463 	page = pmd_page(*pmd);
464 	if (is_huge_zero_page(page)) {
465 		spin_unlock(ptl);
466 		walk->action = ACTION_CONTINUE;
467 		goto out;
468 	}
469 	if (!queue_pages_required(page, qp))
470 		goto unlock;
471 
472 	flags = qp->flags;
473 	/* go to thp migration */
474 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
475 		if (!vma_migratable(walk->vma) ||
476 		    migrate_page_add(page, qp->pagelist, flags)) {
477 			ret = 1;
478 			goto unlock;
479 		}
480 	} else
481 		ret = -EIO;
482 unlock:
483 	spin_unlock(ptl);
484 out:
485 	return ret;
486 }
487 
488 /*
489  * Scan through pages checking if pages follow certain conditions,
490  * and move them to the pagelist if they do.
491  *
492  * queue_pages_pte_range() has three possible return values:
493  * 0 - pages are placed on the right node or queued successfully, or
494  *     special page is met, i.e. zero page.
495  * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
496  *     specified.
497  * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
498  *        on a node that does not follow the policy.
499  */
500 static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
501 			unsigned long end, struct mm_walk *walk)
502 {
503 	struct vm_area_struct *vma = walk->vma;
504 	struct page *page;
505 	struct queue_pages *qp = walk->private;
506 	unsigned long flags = qp->flags;
507 	int ret;
508 	bool has_unmovable = false;
509 	pte_t *pte, *mapped_pte;
510 	spinlock_t *ptl;
511 
512 	ptl = pmd_trans_huge_lock(pmd, vma);
513 	if (ptl) {
514 		ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
515 		if (ret != 2)
516 			return ret;
517 	}
518 	/* THP was split, fall through to pte walk */
519 
520 	if (pmd_trans_unstable(pmd))
521 		return 0;
522 
523 	mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
524 	for (; addr != end; pte++, addr += PAGE_SIZE) {
525 		if (!pte_present(*pte))
526 			continue;
527 		page = vm_normal_page(vma, addr, *pte);
528 		if (!page)
529 			continue;
530 		/*
531 		 * vm_normal_page() filters out zero pages, but there might
532 		 * still be PageReserved pages to skip, perhaps in a VDSO.
533 		 */
534 		if (PageReserved(page))
535 			continue;
536 		if (!queue_pages_required(page, qp))
537 			continue;
538 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
539 			/* MPOL_MF_STRICT must be specified if we get here */
540 			if (!vma_migratable(vma)) {
541 				has_unmovable = true;
542 				break;
543 			}
544 
545 			/*
546 			 * Do not abort immediately since there may be
547 			 * temporary off LRU pages in the range.  Still
548 			 * need migrate other LRU pages.
549 			 */
550 			if (migrate_page_add(page, qp->pagelist, flags))
551 				has_unmovable = true;
552 		} else
553 			break;
554 	}
555 	pte_unmap_unlock(mapped_pte, ptl);
556 	cond_resched();
557 
558 	if (has_unmovable)
559 		return 1;
560 
561 	return addr != end ? -EIO : 0;
562 }
563 
564 static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
565 			       unsigned long addr, unsigned long end,
566 			       struct mm_walk *walk)
567 {
568 	int ret = 0;
569 #ifdef CONFIG_HUGETLB_PAGE
570 	struct queue_pages *qp = walk->private;
571 	unsigned long flags = (qp->flags & MPOL_MF_VALID);
572 	struct page *page;
573 	spinlock_t *ptl;
574 	pte_t entry;
575 
576 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
577 	entry = huge_ptep_get(pte);
578 	if (!pte_present(entry))
579 		goto unlock;
580 	page = pte_page(entry);
581 	if (!queue_pages_required(page, qp))
582 		goto unlock;
583 
584 	if (flags == MPOL_MF_STRICT) {
585 		/*
586 		 * STRICT alone means only detecting misplaced page and no
587 		 * need to further check other vma.
588 		 */
589 		ret = -EIO;
590 		goto unlock;
591 	}
592 
593 	if (!vma_migratable(walk->vma)) {
594 		/*
595 		 * Must be STRICT with MOVE*, otherwise .test_walk() have
596 		 * stopped walking current vma.
597 		 * Detecting misplaced page but allow migrating pages which
598 		 * have been queued.
599 		 */
600 		ret = 1;
601 		goto unlock;
602 	}
603 
604 	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
605 	if (flags & (MPOL_MF_MOVE_ALL) ||
606 	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) {
607 		if (!isolate_huge_page(page, qp->pagelist) &&
608 			(flags & MPOL_MF_STRICT))
609 			/*
610 			 * Failed to isolate page but allow migrating pages
611 			 * which have been queued.
612 			 */
613 			ret = 1;
614 	}
615 unlock:
616 	spin_unlock(ptl);
617 #else
618 	BUG();
619 #endif
620 	return ret;
621 }
622 
623 #ifdef CONFIG_NUMA_BALANCING
624 /*
625  * This is used to mark a range of virtual addresses to be inaccessible.
626  * These are later cleared by a NUMA hinting fault. Depending on these
627  * faults, pages may be migrated for better NUMA placement.
628  *
629  * This is assuming that NUMA faults are handled using PROT_NONE. If
630  * an architecture makes a different choice, it will need further
631  * changes to the core.
632  */
633 unsigned long change_prot_numa(struct vm_area_struct *vma,
634 			unsigned long addr, unsigned long end)
635 {
636 	int nr_updated;
637 
638 	nr_updated = change_protection(vma, addr, end, PAGE_NONE, MM_CP_PROT_NUMA);
639 	if (nr_updated)
640 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
641 
642 	return nr_updated;
643 }
644 #else
645 static unsigned long change_prot_numa(struct vm_area_struct *vma,
646 			unsigned long addr, unsigned long end)
647 {
648 	return 0;
649 }
650 #endif /* CONFIG_NUMA_BALANCING */
651 
652 static int queue_pages_test_walk(unsigned long start, unsigned long end,
653 				struct mm_walk *walk)
654 {
655 	struct vm_area_struct *vma = walk->vma;
656 	struct queue_pages *qp = walk->private;
657 	unsigned long endvma = vma->vm_end;
658 	unsigned long flags = qp->flags;
659 
660 	/* range check first */
661 	VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma);
662 
663 	if (!qp->first) {
664 		qp->first = vma;
665 		if (!(flags & MPOL_MF_DISCONTIG_OK) &&
666 			(qp->start < vma->vm_start))
667 			/* hole at head side of range */
668 			return -EFAULT;
669 	}
670 	if (!(flags & MPOL_MF_DISCONTIG_OK) &&
671 		((vma->vm_end < qp->end) &&
672 		(!vma->vm_next || vma->vm_end < vma->vm_next->vm_start)))
673 		/* hole at middle or tail of range */
674 		return -EFAULT;
675 
676 	/*
677 	 * Need check MPOL_MF_STRICT to return -EIO if possible
678 	 * regardless of vma_migratable
679 	 */
680 	if (!vma_migratable(vma) &&
681 	    !(flags & MPOL_MF_STRICT))
682 		return 1;
683 
684 	if (endvma > end)
685 		endvma = end;
686 
687 	if (flags & MPOL_MF_LAZY) {
688 		/* Similar to task_numa_work, skip inaccessible VMAs */
689 		if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
690 			!(vma->vm_flags & VM_MIXEDMAP))
691 			change_prot_numa(vma, start, endvma);
692 		return 1;
693 	}
694 
695 	/* queue pages from current vma */
696 	if (flags & MPOL_MF_VALID)
697 		return 0;
698 	return 1;
699 }
700 
701 static const struct mm_walk_ops queue_pages_walk_ops = {
702 	.hugetlb_entry		= queue_pages_hugetlb,
703 	.pmd_entry		= queue_pages_pte_range,
704 	.test_walk		= queue_pages_test_walk,
705 };
706 
707 /*
708  * Walk through page tables and collect pages to be migrated.
709  *
710  * If pages found in a given range are on a set of nodes (determined by
711  * @nodes and @flags,) it's isolated and queued to the pagelist which is
712  * passed via @private.
713  *
714  * queue_pages_range() has three possible return values:
715  * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
716  *     specified.
717  * 0 - queue pages successfully or no misplaced page.
718  * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
719  *         memory range specified by nodemask and maxnode points outside
720  *         your accessible address space (-EFAULT)
721  */
722 static int
723 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
724 		nodemask_t *nodes, unsigned long flags,
725 		struct list_head *pagelist)
726 {
727 	int err;
728 	struct queue_pages qp = {
729 		.pagelist = pagelist,
730 		.flags = flags,
731 		.nmask = nodes,
732 		.start = start,
733 		.end = end,
734 		.first = NULL,
735 	};
736 
737 	err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
738 
739 	if (!qp.first)
740 		/* whole range in hole */
741 		err = -EFAULT;
742 
743 	return err;
744 }
745 
746 /*
747  * Apply policy to a single VMA
748  * This must be called with the mmap_lock held for writing.
749  */
750 static int vma_replace_policy(struct vm_area_struct *vma,
751 						struct mempolicy *pol)
752 {
753 	int err;
754 	struct mempolicy *old;
755 	struct mempolicy *new;
756 
757 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
758 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
759 		 vma->vm_ops, vma->vm_file,
760 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
761 
762 	new = mpol_dup(pol);
763 	if (IS_ERR(new))
764 		return PTR_ERR(new);
765 
766 	if (vma->vm_ops && vma->vm_ops->set_policy) {
767 		err = vma->vm_ops->set_policy(vma, new);
768 		if (err)
769 			goto err_out;
770 	}
771 
772 	old = vma->vm_policy;
773 	vma->vm_policy = new; /* protected by mmap_lock */
774 	mpol_put(old);
775 
776 	return 0;
777  err_out:
778 	mpol_put(new);
779 	return err;
780 }
781 
782 /* Step 2: apply policy to a range and do splits. */
783 static int mbind_range(struct mm_struct *mm, unsigned long start,
784 		       unsigned long end, struct mempolicy *new_pol)
785 {
786 	struct vm_area_struct *next;
787 	struct vm_area_struct *prev;
788 	struct vm_area_struct *vma;
789 	int err = 0;
790 	pgoff_t pgoff;
791 	unsigned long vmstart;
792 	unsigned long vmend;
793 
794 	vma = find_vma(mm, start);
795 	VM_BUG_ON(!vma);
796 
797 	prev = vma->vm_prev;
798 	if (start > vma->vm_start)
799 		prev = vma;
800 
801 	for (; vma && vma->vm_start < end; prev = vma, vma = next) {
802 		next = vma->vm_next;
803 		vmstart = max(start, vma->vm_start);
804 		vmend   = min(end, vma->vm_end);
805 
806 		if (mpol_equal(vma_policy(vma), new_pol))
807 			continue;
808 
809 		pgoff = vma->vm_pgoff +
810 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
811 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
812 				 vma->anon_vma, vma->vm_file, pgoff,
813 				 new_pol, vma->vm_userfaultfd_ctx);
814 		if (prev) {
815 			vma = prev;
816 			next = vma->vm_next;
817 			if (mpol_equal(vma_policy(vma), new_pol))
818 				continue;
819 			/* vma_merge() joined vma && vma->next, case 8 */
820 			goto replace;
821 		}
822 		if (vma->vm_start != vmstart) {
823 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
824 			if (err)
825 				goto out;
826 		}
827 		if (vma->vm_end != vmend) {
828 			err = split_vma(vma->vm_mm, vma, vmend, 0);
829 			if (err)
830 				goto out;
831 		}
832  replace:
833 		err = vma_replace_policy(vma, new_pol);
834 		if (err)
835 			goto out;
836 	}
837 
838  out:
839 	return err;
840 }
841 
842 /* Set the process memory policy */
843 static long do_set_mempolicy(unsigned short mode, unsigned short flags,
844 			     nodemask_t *nodes)
845 {
846 	struct mempolicy *new, *old;
847 	NODEMASK_SCRATCH(scratch);
848 	int ret;
849 
850 	if (!scratch)
851 		return -ENOMEM;
852 
853 	new = mpol_new(mode, flags, nodes);
854 	if (IS_ERR(new)) {
855 		ret = PTR_ERR(new);
856 		goto out;
857 	}
858 
859 	ret = mpol_set_nodemask(new, nodes, scratch);
860 	if (ret) {
861 		mpol_put(new);
862 		goto out;
863 	}
864 	task_lock(current);
865 	old = current->mempolicy;
866 	current->mempolicy = new;
867 	if (new && new->mode == MPOL_INTERLEAVE)
868 		current->il_prev = MAX_NUMNODES-1;
869 	task_unlock(current);
870 	mpol_put(old);
871 	ret = 0;
872 out:
873 	NODEMASK_SCRATCH_FREE(scratch);
874 	return ret;
875 }
876 
877 /*
878  * Return nodemask for policy for get_mempolicy() query
879  *
880  * Called with task's alloc_lock held
881  */
882 static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
883 {
884 	nodes_clear(*nodes);
885 	if (p == &default_policy)
886 		return;
887 
888 	switch (p->mode) {
889 	case MPOL_BIND:
890 	case MPOL_INTERLEAVE:
891 	case MPOL_PREFERRED:
892 	case MPOL_PREFERRED_MANY:
893 		*nodes = p->nodes;
894 		break;
895 	case MPOL_LOCAL:
896 		/* return empty node mask for local allocation */
897 		break;
898 	default:
899 		BUG();
900 	}
901 }
902 
903 static int lookup_node(struct mm_struct *mm, unsigned long addr)
904 {
905 	struct page *p = NULL;
906 	int err;
907 
908 	int locked = 1;
909 	err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked);
910 	if (err > 0) {
911 		err = page_to_nid(p);
912 		put_page(p);
913 	}
914 	if (locked)
915 		mmap_read_unlock(mm);
916 	return err;
917 }
918 
919 /* Retrieve NUMA policy */
920 static long do_get_mempolicy(int *policy, nodemask_t *nmask,
921 			     unsigned long addr, unsigned long flags)
922 {
923 	int err;
924 	struct mm_struct *mm = current->mm;
925 	struct vm_area_struct *vma = NULL;
926 	struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
927 
928 	if (flags &
929 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
930 		return -EINVAL;
931 
932 	if (flags & MPOL_F_MEMS_ALLOWED) {
933 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
934 			return -EINVAL;
935 		*policy = 0;	/* just so it's initialized */
936 		task_lock(current);
937 		*nmask  = cpuset_current_mems_allowed;
938 		task_unlock(current);
939 		return 0;
940 	}
941 
942 	if (flags & MPOL_F_ADDR) {
943 		/*
944 		 * Do NOT fall back to task policy if the
945 		 * vma/shared policy at addr is NULL.  We
946 		 * want to return MPOL_DEFAULT in this case.
947 		 */
948 		mmap_read_lock(mm);
949 		vma = vma_lookup(mm, addr);
950 		if (!vma) {
951 			mmap_read_unlock(mm);
952 			return -EFAULT;
953 		}
954 		if (vma->vm_ops && vma->vm_ops->get_policy)
955 			pol = vma->vm_ops->get_policy(vma, addr);
956 		else
957 			pol = vma->vm_policy;
958 	} else if (addr)
959 		return -EINVAL;
960 
961 	if (!pol)
962 		pol = &default_policy;	/* indicates default behavior */
963 
964 	if (flags & MPOL_F_NODE) {
965 		if (flags & MPOL_F_ADDR) {
966 			/*
967 			 * Take a refcount on the mpol, lookup_node()
968 			 * will drop the mmap_lock, so after calling
969 			 * lookup_node() only "pol" remains valid, "vma"
970 			 * is stale.
971 			 */
972 			pol_refcount = pol;
973 			vma = NULL;
974 			mpol_get(pol);
975 			err = lookup_node(mm, addr);
976 			if (err < 0)
977 				goto out;
978 			*policy = err;
979 		} else if (pol == current->mempolicy &&
980 				pol->mode == MPOL_INTERLEAVE) {
981 			*policy = next_node_in(current->il_prev, pol->nodes);
982 		} else {
983 			err = -EINVAL;
984 			goto out;
985 		}
986 	} else {
987 		*policy = pol == &default_policy ? MPOL_DEFAULT :
988 						pol->mode;
989 		/*
990 		 * Internal mempolicy flags must be masked off before exposing
991 		 * the policy to userspace.
992 		 */
993 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
994 	}
995 
996 	err = 0;
997 	if (nmask) {
998 		if (mpol_store_user_nodemask(pol)) {
999 			*nmask = pol->w.user_nodemask;
1000 		} else {
1001 			task_lock(current);
1002 			get_policy_nodemask(pol, nmask);
1003 			task_unlock(current);
1004 		}
1005 	}
1006 
1007  out:
1008 	mpol_cond_put(pol);
1009 	if (vma)
1010 		mmap_read_unlock(mm);
1011 	if (pol_refcount)
1012 		mpol_put(pol_refcount);
1013 	return err;
1014 }
1015 
1016 #ifdef CONFIG_MIGRATION
1017 /*
1018  * page migration, thp tail pages can be passed.
1019  */
1020 static int migrate_page_add(struct page *page, struct list_head *pagelist,
1021 				unsigned long flags)
1022 {
1023 	struct page *head = compound_head(page);
1024 	/*
1025 	 * Avoid migrating a page that is shared with others.
1026 	 */
1027 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
1028 		if (!isolate_lru_page(head)) {
1029 			list_add_tail(&head->lru, pagelist);
1030 			mod_node_page_state(page_pgdat(head),
1031 				NR_ISOLATED_ANON + page_is_file_lru(head),
1032 				thp_nr_pages(head));
1033 		} else if (flags & MPOL_MF_STRICT) {
1034 			/*
1035 			 * Non-movable page may reach here.  And, there may be
1036 			 * temporary off LRU pages or non-LRU movable pages.
1037 			 * Treat them as unmovable pages since they can't be
1038 			 * isolated, so they can't be moved at the moment.  It
1039 			 * should return -EIO for this case too.
1040 			 */
1041 			return -EIO;
1042 		}
1043 	}
1044 
1045 	return 0;
1046 }
1047 
1048 /*
1049  * Migrate pages from one node to a target node.
1050  * Returns error or the number of pages not migrated.
1051  */
1052 static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1053 			   int flags)
1054 {
1055 	nodemask_t nmask;
1056 	LIST_HEAD(pagelist);
1057 	int err = 0;
1058 	struct migration_target_control mtc = {
1059 		.nid = dest,
1060 		.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1061 	};
1062 
1063 	nodes_clear(nmask);
1064 	node_set(source, nmask);
1065 
1066 	/*
1067 	 * This does not "check" the range but isolates all pages that
1068 	 * need migration.  Between passing in the full user address
1069 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1070 	 */
1071 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
1072 	queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
1073 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1074 
1075 	if (!list_empty(&pagelist)) {
1076 		err = migrate_pages(&pagelist, alloc_migration_target, NULL,
1077 				(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
1078 		if (err)
1079 			putback_movable_pages(&pagelist);
1080 	}
1081 
1082 	return err;
1083 }
1084 
1085 /*
1086  * Move pages between the two nodesets so as to preserve the physical
1087  * layout as much as possible.
1088  *
1089  * Returns the number of page that could not be moved.
1090  */
1091 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1092 		     const nodemask_t *to, int flags)
1093 {
1094 	int busy = 0;
1095 	int err = 0;
1096 	nodemask_t tmp;
1097 
1098 	lru_cache_disable();
1099 
1100 	mmap_read_lock(mm);
1101 
1102 	/*
1103 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1104 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
1105 	 * bit in 'tmp', and return that <source, dest> pair for migration.
1106 	 * The pair of nodemasks 'to' and 'from' define the map.
1107 	 *
1108 	 * If no pair of bits is found that way, fallback to picking some
1109 	 * pair of 'source' and 'dest' bits that are not the same.  If the
1110 	 * 'source' and 'dest' bits are the same, this represents a node
1111 	 * that will be migrating to itself, so no pages need move.
1112 	 *
1113 	 * If no bits are left in 'tmp', or if all remaining bits left
1114 	 * in 'tmp' correspond to the same bit in 'to', return false
1115 	 * (nothing left to migrate).
1116 	 *
1117 	 * This lets us pick a pair of nodes to migrate between, such that
1118 	 * if possible the dest node is not already occupied by some other
1119 	 * source node, minimizing the risk of overloading the memory on a
1120 	 * node that would happen if we migrated incoming memory to a node
1121 	 * before migrating outgoing memory source that same node.
1122 	 *
1123 	 * A single scan of tmp is sufficient.  As we go, we remember the
1124 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
1125 	 * that not only moved, but what's better, moved to an empty slot
1126 	 * (d is not set in tmp), then we break out then, with that pair.
1127 	 * Otherwise when we finish scanning from_tmp, we at least have the
1128 	 * most recent <s, d> pair that moved.  If we get all the way through
1129 	 * the scan of tmp without finding any node that moved, much less
1130 	 * moved to an empty node, then there is nothing left worth migrating.
1131 	 */
1132 
1133 	tmp = *from;
1134 	while (!nodes_empty(tmp)) {
1135 		int s, d;
1136 		int source = NUMA_NO_NODE;
1137 		int dest = 0;
1138 
1139 		for_each_node_mask(s, tmp) {
1140 
1141 			/*
1142 			 * do_migrate_pages() tries to maintain the relative
1143 			 * node relationship of the pages established between
1144 			 * threads and memory areas.
1145                          *
1146 			 * However if the number of source nodes is not equal to
1147 			 * the number of destination nodes we can not preserve
1148 			 * this node relative relationship.  In that case, skip
1149 			 * copying memory from a node that is in the destination
1150 			 * mask.
1151 			 *
1152 			 * Example: [2,3,4] -> [3,4,5] moves everything.
1153 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1154 			 */
1155 
1156 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
1157 						(node_isset(s, *to)))
1158 				continue;
1159 
1160 			d = node_remap(s, *from, *to);
1161 			if (s == d)
1162 				continue;
1163 
1164 			source = s;	/* Node moved. Memorize */
1165 			dest = d;
1166 
1167 			/* dest not in remaining from nodes? */
1168 			if (!node_isset(dest, tmp))
1169 				break;
1170 		}
1171 		if (source == NUMA_NO_NODE)
1172 			break;
1173 
1174 		node_clear(source, tmp);
1175 		err = migrate_to_node(mm, source, dest, flags);
1176 		if (err > 0)
1177 			busy += err;
1178 		if (err < 0)
1179 			break;
1180 	}
1181 	mmap_read_unlock(mm);
1182 
1183 	lru_cache_enable();
1184 	if (err < 0)
1185 		return err;
1186 	return busy;
1187 
1188 }
1189 
1190 /*
1191  * Allocate a new page for page migration based on vma policy.
1192  * Start by assuming the page is mapped by the same vma as contains @start.
1193  * Search forward from there, if not.  N.B., this assumes that the
1194  * list of pages handed to migrate_pages()--which is how we get here--
1195  * is in virtual address order.
1196  */
1197 static struct page *new_page(struct page *page, unsigned long start)
1198 {
1199 	struct vm_area_struct *vma;
1200 	unsigned long address;
1201 
1202 	vma = find_vma(current->mm, start);
1203 	while (vma) {
1204 		address = page_address_in_vma(page, vma);
1205 		if (address != -EFAULT)
1206 			break;
1207 		vma = vma->vm_next;
1208 	}
1209 
1210 	if (PageHuge(page)) {
1211 		return alloc_huge_page_vma(page_hstate(compound_head(page)),
1212 				vma, address);
1213 	} else if (PageTransHuge(page)) {
1214 		struct page *thp;
1215 
1216 		thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
1217 					 HPAGE_PMD_ORDER);
1218 		if (!thp)
1219 			return NULL;
1220 		prep_transhuge_page(thp);
1221 		return thp;
1222 	}
1223 	/*
1224 	 * if !vma, alloc_page_vma() will use task or system default policy
1225 	 */
1226 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
1227 			vma, address);
1228 }
1229 #else
1230 
1231 static int migrate_page_add(struct page *page, struct list_head *pagelist,
1232 				unsigned long flags)
1233 {
1234 	return -EIO;
1235 }
1236 
1237 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1238 		     const nodemask_t *to, int flags)
1239 {
1240 	return -ENOSYS;
1241 }
1242 
1243 static struct page *new_page(struct page *page, unsigned long start)
1244 {
1245 	return NULL;
1246 }
1247 #endif
1248 
1249 static long do_mbind(unsigned long start, unsigned long len,
1250 		     unsigned short mode, unsigned short mode_flags,
1251 		     nodemask_t *nmask, unsigned long flags)
1252 {
1253 	struct mm_struct *mm = current->mm;
1254 	struct mempolicy *new;
1255 	unsigned long end;
1256 	int err;
1257 	int ret;
1258 	LIST_HEAD(pagelist);
1259 
1260 	if (flags & ~(unsigned long)MPOL_MF_VALID)
1261 		return -EINVAL;
1262 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1263 		return -EPERM;
1264 
1265 	if (start & ~PAGE_MASK)
1266 		return -EINVAL;
1267 
1268 	if (mode == MPOL_DEFAULT)
1269 		flags &= ~MPOL_MF_STRICT;
1270 
1271 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1272 	end = start + len;
1273 
1274 	if (end < start)
1275 		return -EINVAL;
1276 	if (end == start)
1277 		return 0;
1278 
1279 	new = mpol_new(mode, mode_flags, nmask);
1280 	if (IS_ERR(new))
1281 		return PTR_ERR(new);
1282 
1283 	if (flags & MPOL_MF_LAZY)
1284 		new->flags |= MPOL_F_MOF;
1285 
1286 	/*
1287 	 * If we are using the default policy then operation
1288 	 * on discontinuous address spaces is okay after all
1289 	 */
1290 	if (!new)
1291 		flags |= MPOL_MF_DISCONTIG_OK;
1292 
1293 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1294 		 start, start + len, mode, mode_flags,
1295 		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
1296 
1297 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1298 
1299 		lru_cache_disable();
1300 	}
1301 	{
1302 		NODEMASK_SCRATCH(scratch);
1303 		if (scratch) {
1304 			mmap_write_lock(mm);
1305 			err = mpol_set_nodemask(new, nmask, scratch);
1306 			if (err)
1307 				mmap_write_unlock(mm);
1308 		} else
1309 			err = -ENOMEM;
1310 		NODEMASK_SCRATCH_FREE(scratch);
1311 	}
1312 	if (err)
1313 		goto mpol_out;
1314 
1315 	ret = queue_pages_range(mm, start, end, nmask,
1316 			  flags | MPOL_MF_INVERT, &pagelist);
1317 
1318 	if (ret < 0) {
1319 		err = ret;
1320 		goto up_out;
1321 	}
1322 
1323 	err = mbind_range(mm, start, end, new);
1324 
1325 	if (!err) {
1326 		int nr_failed = 0;
1327 
1328 		if (!list_empty(&pagelist)) {
1329 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1330 			nr_failed = migrate_pages(&pagelist, new_page, NULL,
1331 				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL);
1332 			if (nr_failed)
1333 				putback_movable_pages(&pagelist);
1334 		}
1335 
1336 		if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
1337 			err = -EIO;
1338 	} else {
1339 up_out:
1340 		if (!list_empty(&pagelist))
1341 			putback_movable_pages(&pagelist);
1342 	}
1343 
1344 	mmap_write_unlock(mm);
1345 mpol_out:
1346 	mpol_put(new);
1347 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1348 		lru_cache_enable();
1349 	return err;
1350 }
1351 
1352 /*
1353  * User space interface with variable sized bitmaps for nodelists.
1354  */
1355 static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask,
1356 		      unsigned long maxnode)
1357 {
1358 	unsigned long nlongs = BITS_TO_LONGS(maxnode);
1359 	int ret;
1360 
1361 	if (in_compat_syscall())
1362 		ret = compat_get_bitmap(mask,
1363 					(const compat_ulong_t __user *)nmask,
1364 					maxnode);
1365 	else
1366 		ret = copy_from_user(mask, nmask,
1367 				     nlongs * sizeof(unsigned long));
1368 
1369 	if (ret)
1370 		return -EFAULT;
1371 
1372 	if (maxnode % BITS_PER_LONG)
1373 		mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1;
1374 
1375 	return 0;
1376 }
1377 
1378 /* Copy a node mask from user space. */
1379 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1380 		     unsigned long maxnode)
1381 {
1382 	--maxnode;
1383 	nodes_clear(*nodes);
1384 	if (maxnode == 0 || !nmask)
1385 		return 0;
1386 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1387 		return -EINVAL;
1388 
1389 	/*
1390 	 * When the user specified more nodes than supported just check
1391 	 * if the non supported part is all zero, one word at a time,
1392 	 * starting at the end.
1393 	 */
1394 	while (maxnode > MAX_NUMNODES) {
1395 		unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG);
1396 		unsigned long t;
1397 
1398 		if (get_bitmap(&t, &nmask[maxnode / BITS_PER_LONG], bits))
1399 			return -EFAULT;
1400 
1401 		if (maxnode - bits >= MAX_NUMNODES) {
1402 			maxnode -= bits;
1403 		} else {
1404 			maxnode = MAX_NUMNODES;
1405 			t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1406 		}
1407 		if (t)
1408 			return -EINVAL;
1409 	}
1410 
1411 	return get_bitmap(nodes_addr(*nodes), nmask, maxnode);
1412 }
1413 
1414 /* Copy a kernel node mask to user space */
1415 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1416 			      nodemask_t *nodes)
1417 {
1418 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1419 	unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
1420 	bool compat = in_compat_syscall();
1421 
1422 	if (compat)
1423 		nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t);
1424 
1425 	if (copy > nbytes) {
1426 		if (copy > PAGE_SIZE)
1427 			return -EINVAL;
1428 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1429 			return -EFAULT;
1430 		copy = nbytes;
1431 		maxnode = nr_node_ids;
1432 	}
1433 
1434 	if (compat)
1435 		return compat_put_bitmap((compat_ulong_t __user *)mask,
1436 					 nodes_addr(*nodes), maxnode);
1437 
1438 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1439 }
1440 
1441 /* Basic parameter sanity check used by both mbind() and set_mempolicy() */
1442 static inline int sanitize_mpol_flags(int *mode, unsigned short *flags)
1443 {
1444 	*flags = *mode & MPOL_MODE_FLAGS;
1445 	*mode &= ~MPOL_MODE_FLAGS;
1446 
1447 	if ((unsigned int)(*mode) >=  MPOL_MAX)
1448 		return -EINVAL;
1449 	if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
1450 		return -EINVAL;
1451 	if (*flags & MPOL_F_NUMA_BALANCING) {
1452 		if (*mode != MPOL_BIND)
1453 			return -EINVAL;
1454 		*flags |= (MPOL_F_MOF | MPOL_F_MORON);
1455 	}
1456 	return 0;
1457 }
1458 
1459 static long kernel_mbind(unsigned long start, unsigned long len,
1460 			 unsigned long mode, const unsigned long __user *nmask,
1461 			 unsigned long maxnode, unsigned int flags)
1462 {
1463 	unsigned short mode_flags;
1464 	nodemask_t nodes;
1465 	int lmode = mode;
1466 	int err;
1467 
1468 	start = untagged_addr(start);
1469 	err = sanitize_mpol_flags(&lmode, &mode_flags);
1470 	if (err)
1471 		return err;
1472 
1473 	err = get_nodes(&nodes, nmask, maxnode);
1474 	if (err)
1475 		return err;
1476 
1477 	return do_mbind(start, len, lmode, mode_flags, &nodes, flags);
1478 }
1479 
1480 SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1481 		unsigned long, mode, const unsigned long __user *, nmask,
1482 		unsigned long, maxnode, unsigned int, flags)
1483 {
1484 	return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1485 }
1486 
1487 /* Set the process memory policy */
1488 static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1489 				 unsigned long maxnode)
1490 {
1491 	unsigned short mode_flags;
1492 	nodemask_t nodes;
1493 	int lmode = mode;
1494 	int err;
1495 
1496 	err = sanitize_mpol_flags(&lmode, &mode_flags);
1497 	if (err)
1498 		return err;
1499 
1500 	err = get_nodes(&nodes, nmask, maxnode);
1501 	if (err)
1502 		return err;
1503 
1504 	return do_set_mempolicy(lmode, mode_flags, &nodes);
1505 }
1506 
1507 SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1508 		unsigned long, maxnode)
1509 {
1510 	return kernel_set_mempolicy(mode, nmask, maxnode);
1511 }
1512 
1513 static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1514 				const unsigned long __user *old_nodes,
1515 				const unsigned long __user *new_nodes)
1516 {
1517 	struct mm_struct *mm = NULL;
1518 	struct task_struct *task;
1519 	nodemask_t task_nodes;
1520 	int err;
1521 	nodemask_t *old;
1522 	nodemask_t *new;
1523 	NODEMASK_SCRATCH(scratch);
1524 
1525 	if (!scratch)
1526 		return -ENOMEM;
1527 
1528 	old = &scratch->mask1;
1529 	new = &scratch->mask2;
1530 
1531 	err = get_nodes(old, old_nodes, maxnode);
1532 	if (err)
1533 		goto out;
1534 
1535 	err = get_nodes(new, new_nodes, maxnode);
1536 	if (err)
1537 		goto out;
1538 
1539 	/* Find the mm_struct */
1540 	rcu_read_lock();
1541 	task = pid ? find_task_by_vpid(pid) : current;
1542 	if (!task) {
1543 		rcu_read_unlock();
1544 		err = -ESRCH;
1545 		goto out;
1546 	}
1547 	get_task_struct(task);
1548 
1549 	err = -EINVAL;
1550 
1551 	/*
1552 	 * Check if this process has the right to modify the specified process.
1553 	 * Use the regular "ptrace_may_access()" checks.
1554 	 */
1555 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1556 		rcu_read_unlock();
1557 		err = -EPERM;
1558 		goto out_put;
1559 	}
1560 	rcu_read_unlock();
1561 
1562 	task_nodes = cpuset_mems_allowed(task);
1563 	/* Is the user allowed to access the target nodes? */
1564 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1565 		err = -EPERM;
1566 		goto out_put;
1567 	}
1568 
1569 	task_nodes = cpuset_mems_allowed(current);
1570 	nodes_and(*new, *new, task_nodes);
1571 	if (nodes_empty(*new))
1572 		goto out_put;
1573 
1574 	err = security_task_movememory(task);
1575 	if (err)
1576 		goto out_put;
1577 
1578 	mm = get_task_mm(task);
1579 	put_task_struct(task);
1580 
1581 	if (!mm) {
1582 		err = -EINVAL;
1583 		goto out;
1584 	}
1585 
1586 	err = do_migrate_pages(mm, old, new,
1587 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1588 
1589 	mmput(mm);
1590 out:
1591 	NODEMASK_SCRATCH_FREE(scratch);
1592 
1593 	return err;
1594 
1595 out_put:
1596 	put_task_struct(task);
1597 	goto out;
1598 
1599 }
1600 
1601 SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1602 		const unsigned long __user *, old_nodes,
1603 		const unsigned long __user *, new_nodes)
1604 {
1605 	return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1606 }
1607 
1608 
1609 /* Retrieve NUMA policy */
1610 static int kernel_get_mempolicy(int __user *policy,
1611 				unsigned long __user *nmask,
1612 				unsigned long maxnode,
1613 				unsigned long addr,
1614 				unsigned long flags)
1615 {
1616 	int err;
1617 	int pval;
1618 	nodemask_t nodes;
1619 
1620 	if (nmask != NULL && maxnode < nr_node_ids)
1621 		return -EINVAL;
1622 
1623 	addr = untagged_addr(addr);
1624 
1625 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
1626 
1627 	if (err)
1628 		return err;
1629 
1630 	if (policy && put_user(pval, policy))
1631 		return -EFAULT;
1632 
1633 	if (nmask)
1634 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
1635 
1636 	return err;
1637 }
1638 
1639 SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1640 		unsigned long __user *, nmask, unsigned long, maxnode,
1641 		unsigned long, addr, unsigned long, flags)
1642 {
1643 	return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1644 }
1645 
1646 bool vma_migratable(struct vm_area_struct *vma)
1647 {
1648 	if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1649 		return false;
1650 
1651 	/*
1652 	 * DAX device mappings require predictable access latency, so avoid
1653 	 * incurring periodic faults.
1654 	 */
1655 	if (vma_is_dax(vma))
1656 		return false;
1657 
1658 	if (is_vm_hugetlb_page(vma) &&
1659 		!hugepage_migration_supported(hstate_vma(vma)))
1660 		return false;
1661 
1662 	/*
1663 	 * Migration allocates pages in the highest zone. If we cannot
1664 	 * do so then migration (at least from node to node) is not
1665 	 * possible.
1666 	 */
1667 	if (vma->vm_file &&
1668 		gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
1669 			< policy_zone)
1670 		return false;
1671 	return true;
1672 }
1673 
1674 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1675 						unsigned long addr)
1676 {
1677 	struct mempolicy *pol = NULL;
1678 
1679 	if (vma) {
1680 		if (vma->vm_ops && vma->vm_ops->get_policy) {
1681 			pol = vma->vm_ops->get_policy(vma, addr);
1682 		} else if (vma->vm_policy) {
1683 			pol = vma->vm_policy;
1684 
1685 			/*
1686 			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1687 			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1688 			 * count on these policies which will be dropped by
1689 			 * mpol_cond_put() later
1690 			 */
1691 			if (mpol_needs_cond_ref(pol))
1692 				mpol_get(pol);
1693 		}
1694 	}
1695 
1696 	return pol;
1697 }
1698 
1699 /*
1700  * get_vma_policy(@vma, @addr)
1701  * @vma: virtual memory area whose policy is sought
1702  * @addr: address in @vma for shared policy lookup
1703  *
1704  * Returns effective policy for a VMA at specified address.
1705  * Falls back to current->mempolicy or system default policy, as necessary.
1706  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1707  * count--added by the get_policy() vm_op, as appropriate--to protect against
1708  * freeing by another task.  It is the caller's responsibility to free the
1709  * extra reference for shared policies.
1710  */
1711 static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1712 						unsigned long addr)
1713 {
1714 	struct mempolicy *pol = __get_vma_policy(vma, addr);
1715 
1716 	if (!pol)
1717 		pol = get_task_policy(current);
1718 
1719 	return pol;
1720 }
1721 
1722 bool vma_policy_mof(struct vm_area_struct *vma)
1723 {
1724 	struct mempolicy *pol;
1725 
1726 	if (vma->vm_ops && vma->vm_ops->get_policy) {
1727 		bool ret = false;
1728 
1729 		pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1730 		if (pol && (pol->flags & MPOL_F_MOF))
1731 			ret = true;
1732 		mpol_cond_put(pol);
1733 
1734 		return ret;
1735 	}
1736 
1737 	pol = vma->vm_policy;
1738 	if (!pol)
1739 		pol = get_task_policy(current);
1740 
1741 	return pol->flags & MPOL_F_MOF;
1742 }
1743 
1744 static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1745 {
1746 	enum zone_type dynamic_policy_zone = policy_zone;
1747 
1748 	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1749 
1750 	/*
1751 	 * if policy->nodes has movable memory only,
1752 	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1753 	 *
1754 	 * policy->nodes is intersect with node_states[N_MEMORY].
1755 	 * so if the following test fails, it implies
1756 	 * policy->nodes has movable memory only.
1757 	 */
1758 	if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY]))
1759 		dynamic_policy_zone = ZONE_MOVABLE;
1760 
1761 	return zone >= dynamic_policy_zone;
1762 }
1763 
1764 /*
1765  * Return a nodemask representing a mempolicy for filtering nodes for
1766  * page allocation
1767  */
1768 nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1769 {
1770 	int mode = policy->mode;
1771 
1772 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
1773 	if (unlikely(mode == MPOL_BIND) &&
1774 		apply_policy_zone(policy, gfp_zone(gfp)) &&
1775 		cpuset_nodemask_valid_mems_allowed(&policy->nodes))
1776 		return &policy->nodes;
1777 
1778 	if (mode == MPOL_PREFERRED_MANY)
1779 		return &policy->nodes;
1780 
1781 	return NULL;
1782 }
1783 
1784 /*
1785  * Return the  preferred node id for 'prefer' mempolicy, and return
1786  * the given id for all other policies.
1787  *
1788  * policy_node() is always coupled with policy_nodemask(), which
1789  * secures the nodemask limit for 'bind' and 'prefer-many' policy.
1790  */
1791 static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
1792 {
1793 	if (policy->mode == MPOL_PREFERRED) {
1794 		nd = first_node(policy->nodes);
1795 	} else {
1796 		/*
1797 		 * __GFP_THISNODE shouldn't even be used with the bind policy
1798 		 * because we might easily break the expectation to stay on the
1799 		 * requested node and not break the policy.
1800 		 */
1801 		WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
1802 	}
1803 
1804 	return nd;
1805 }
1806 
1807 /* Do dynamic interleaving for a process */
1808 static unsigned interleave_nodes(struct mempolicy *policy)
1809 {
1810 	unsigned next;
1811 	struct task_struct *me = current;
1812 
1813 	next = next_node_in(me->il_prev, policy->nodes);
1814 	if (next < MAX_NUMNODES)
1815 		me->il_prev = next;
1816 	return next;
1817 }
1818 
1819 /*
1820  * Depending on the memory policy provide a node from which to allocate the
1821  * next slab entry.
1822  */
1823 unsigned int mempolicy_slab_node(void)
1824 {
1825 	struct mempolicy *policy;
1826 	int node = numa_mem_id();
1827 
1828 	if (!in_task())
1829 		return node;
1830 
1831 	policy = current->mempolicy;
1832 	if (!policy)
1833 		return node;
1834 
1835 	switch (policy->mode) {
1836 	case MPOL_PREFERRED:
1837 		return first_node(policy->nodes);
1838 
1839 	case MPOL_INTERLEAVE:
1840 		return interleave_nodes(policy);
1841 
1842 	case MPOL_BIND:
1843 	case MPOL_PREFERRED_MANY:
1844 	{
1845 		struct zoneref *z;
1846 
1847 		/*
1848 		 * Follow bind policy behavior and start allocation at the
1849 		 * first node.
1850 		 */
1851 		struct zonelist *zonelist;
1852 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1853 		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1854 		z = first_zones_zonelist(zonelist, highest_zoneidx,
1855 							&policy->nodes);
1856 		return z->zone ? zone_to_nid(z->zone) : node;
1857 	}
1858 	case MPOL_LOCAL:
1859 		return node;
1860 
1861 	default:
1862 		BUG();
1863 	}
1864 }
1865 
1866 /*
1867  * Do static interleaving for a VMA with known offset @n.  Returns the n'th
1868  * node in pol->nodes (starting from n=0), wrapping around if n exceeds the
1869  * number of present nodes.
1870  */
1871 static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
1872 {
1873 	nodemask_t nodemask = pol->nodes;
1874 	unsigned int target, nnodes;
1875 	int i;
1876 	int nid;
1877 	/*
1878 	 * The barrier will stabilize the nodemask in a register or on
1879 	 * the stack so that it will stop changing under the code.
1880 	 *
1881 	 * Between first_node() and next_node(), pol->nodes could be changed
1882 	 * by other threads. So we put pol->nodes in a local stack.
1883 	 */
1884 	barrier();
1885 
1886 	nnodes = nodes_weight(nodemask);
1887 	if (!nnodes)
1888 		return numa_node_id();
1889 	target = (unsigned int)n % nnodes;
1890 	nid = first_node(nodemask);
1891 	for (i = 0; i < target; i++)
1892 		nid = next_node(nid, nodemask);
1893 	return nid;
1894 }
1895 
1896 /* Determine a node number for interleave */
1897 static inline unsigned interleave_nid(struct mempolicy *pol,
1898 		 struct vm_area_struct *vma, unsigned long addr, int shift)
1899 {
1900 	if (vma) {
1901 		unsigned long off;
1902 
1903 		/*
1904 		 * for small pages, there is no difference between
1905 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
1906 		 * for huge pages, since vm_pgoff is in units of small
1907 		 * pages, we need to shift off the always 0 bits to get
1908 		 * a useful offset.
1909 		 */
1910 		BUG_ON(shift < PAGE_SHIFT);
1911 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1912 		off += (addr - vma->vm_start) >> shift;
1913 		return offset_il_node(pol, off);
1914 	} else
1915 		return interleave_nodes(pol);
1916 }
1917 
1918 #ifdef CONFIG_HUGETLBFS
1919 /*
1920  * huge_node(@vma, @addr, @gfp_flags, @mpol)
1921  * @vma: virtual memory area whose policy is sought
1922  * @addr: address in @vma for shared policy lookup and interleave policy
1923  * @gfp_flags: for requested zone
1924  * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1925  * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy
1926  *
1927  * Returns a nid suitable for a huge page allocation and a pointer
1928  * to the struct mempolicy for conditional unref after allocation.
1929  * If the effective policy is 'bind' or 'prefer-many', returns a pointer
1930  * to the mempolicy's @nodemask for filtering the zonelist.
1931  *
1932  * Must be protected by read_mems_allowed_begin()
1933  */
1934 int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
1935 				struct mempolicy **mpol, nodemask_t **nodemask)
1936 {
1937 	int nid;
1938 	int mode;
1939 
1940 	*mpol = get_vma_policy(vma, addr);
1941 	*nodemask = NULL;
1942 	mode = (*mpol)->mode;
1943 
1944 	if (unlikely(mode == MPOL_INTERLEAVE)) {
1945 		nid = interleave_nid(*mpol, vma, addr,
1946 					huge_page_shift(hstate_vma(vma)));
1947 	} else {
1948 		nid = policy_node(gfp_flags, *mpol, numa_node_id());
1949 		if (mode == MPOL_BIND || mode == MPOL_PREFERRED_MANY)
1950 			*nodemask = &(*mpol)->nodes;
1951 	}
1952 	return nid;
1953 }
1954 
1955 /*
1956  * init_nodemask_of_mempolicy
1957  *
1958  * If the current task's mempolicy is "default" [NULL], return 'false'
1959  * to indicate default policy.  Otherwise, extract the policy nodemask
1960  * for 'bind' or 'interleave' policy into the argument nodemask, or
1961  * initialize the argument nodemask to contain the single node for
1962  * 'preferred' or 'local' policy and return 'true' to indicate presence
1963  * of non-default mempolicy.
1964  *
1965  * We don't bother with reference counting the mempolicy [mpol_get/put]
1966  * because the current task is examining it's own mempolicy and a task's
1967  * mempolicy is only ever changed by the task itself.
1968  *
1969  * N.B., it is the caller's responsibility to free a returned nodemask.
1970  */
1971 bool init_nodemask_of_mempolicy(nodemask_t *mask)
1972 {
1973 	struct mempolicy *mempolicy;
1974 
1975 	if (!(mask && current->mempolicy))
1976 		return false;
1977 
1978 	task_lock(current);
1979 	mempolicy = current->mempolicy;
1980 	switch (mempolicy->mode) {
1981 	case MPOL_PREFERRED:
1982 	case MPOL_PREFERRED_MANY:
1983 	case MPOL_BIND:
1984 	case MPOL_INTERLEAVE:
1985 		*mask = mempolicy->nodes;
1986 		break;
1987 
1988 	case MPOL_LOCAL:
1989 		init_nodemask_of_node(mask, numa_node_id());
1990 		break;
1991 
1992 	default:
1993 		BUG();
1994 	}
1995 	task_unlock(current);
1996 
1997 	return true;
1998 }
1999 #endif
2000 
2001 /*
2002  * mempolicy_in_oom_domain
2003  *
2004  * If tsk's mempolicy is "bind", check for intersection between mask and
2005  * the policy nodemask. Otherwise, return true for all other policies
2006  * including "interleave", as a tsk with "interleave" policy may have
2007  * memory allocated from all nodes in system.
2008  *
2009  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
2010  */
2011 bool mempolicy_in_oom_domain(struct task_struct *tsk,
2012 					const nodemask_t *mask)
2013 {
2014 	struct mempolicy *mempolicy;
2015 	bool ret = true;
2016 
2017 	if (!mask)
2018 		return ret;
2019 
2020 	task_lock(tsk);
2021 	mempolicy = tsk->mempolicy;
2022 	if (mempolicy && mempolicy->mode == MPOL_BIND)
2023 		ret = nodes_intersects(mempolicy->nodes, *mask);
2024 	task_unlock(tsk);
2025 
2026 	return ret;
2027 }
2028 
2029 /* Allocate a page in interleaved policy.
2030    Own path because it needs to do special accounting. */
2031 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2032 					unsigned nid)
2033 {
2034 	struct page *page;
2035 
2036 	page = __alloc_pages(gfp, order, nid, NULL);
2037 	/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
2038 	if (!static_branch_likely(&vm_numa_stat_key))
2039 		return page;
2040 	if (page && page_to_nid(page) == nid) {
2041 		preempt_disable();
2042 		__count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT);
2043 		preempt_enable();
2044 	}
2045 	return page;
2046 }
2047 
2048 static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
2049 						int nid, struct mempolicy *pol)
2050 {
2051 	struct page *page;
2052 	gfp_t preferred_gfp;
2053 
2054 	/*
2055 	 * This is a two pass approach. The first pass will only try the
2056 	 * preferred nodes but skip the direct reclaim and allow the
2057 	 * allocation to fail, while the second pass will try all the
2058 	 * nodes in system.
2059 	 */
2060 	preferred_gfp = gfp | __GFP_NOWARN;
2061 	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2062 	page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes);
2063 	if (!page)
2064 		page = __alloc_pages(gfp, order, numa_node_id(), NULL);
2065 
2066 	return page;
2067 }
2068 
2069 /**
2070  * alloc_pages_vma - Allocate a page for a VMA.
2071  * @gfp: GFP flags.
2072  * @order: Order of the GFP allocation.
2073  * @vma: Pointer to VMA or NULL if not available.
2074  * @addr: Virtual address of the allocation.  Must be inside @vma.
2075  * @node: Which node to prefer for allocation (modulo policy).
2076  * @hugepage: For hugepages try only the preferred node if possible.
2077  *
2078  * Allocate a page for a specific address in @vma, using the appropriate
2079  * NUMA policy.  When @vma is not NULL the caller must hold the mmap_lock
2080  * of the mm_struct of the VMA to prevent it from going away.  Should be
2081  * used for all allocations for pages that will be mapped into user space.
2082  *
2083  * Return: The page on success or NULL if allocation fails.
2084  */
2085 struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
2086 		unsigned long addr, int node, bool hugepage)
2087 {
2088 	struct mempolicy *pol;
2089 	struct page *page;
2090 	int preferred_nid;
2091 	nodemask_t *nmask;
2092 
2093 	pol = get_vma_policy(vma, addr);
2094 
2095 	if (pol->mode == MPOL_INTERLEAVE) {
2096 		unsigned nid;
2097 
2098 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
2099 		mpol_cond_put(pol);
2100 		page = alloc_page_interleave(gfp, order, nid);
2101 		goto out;
2102 	}
2103 
2104 	if (pol->mode == MPOL_PREFERRED_MANY) {
2105 		page = alloc_pages_preferred_many(gfp, order, node, pol);
2106 		mpol_cond_put(pol);
2107 		goto out;
2108 	}
2109 
2110 	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2111 		int hpage_node = node;
2112 
2113 		/*
2114 		 * For hugepage allocation and non-interleave policy which
2115 		 * allows the current node (or other explicitly preferred
2116 		 * node) we only try to allocate from the current/preferred
2117 		 * node and don't fall back to other nodes, as the cost of
2118 		 * remote accesses would likely offset THP benefits.
2119 		 *
2120 		 * If the policy is interleave or does not allow the current
2121 		 * node in its nodemask, we allocate the standard way.
2122 		 */
2123 		if (pol->mode == MPOL_PREFERRED)
2124 			hpage_node = first_node(pol->nodes);
2125 
2126 		nmask = policy_nodemask(gfp, pol);
2127 		if (!nmask || node_isset(hpage_node, *nmask)) {
2128 			mpol_cond_put(pol);
2129 			/*
2130 			 * First, try to allocate THP only on local node, but
2131 			 * don't reclaim unnecessarily, just compact.
2132 			 */
2133 			page = __alloc_pages_node(hpage_node,
2134 				gfp | __GFP_THISNODE | __GFP_NORETRY, order);
2135 
2136 			/*
2137 			 * If hugepage allocations are configured to always
2138 			 * synchronous compact or the vma has been madvised
2139 			 * to prefer hugepage backing, retry allowing remote
2140 			 * memory with both reclaim and compact as well.
2141 			 */
2142 			if (!page && (gfp & __GFP_DIRECT_RECLAIM))
2143 				page = __alloc_pages_node(hpage_node,
2144 								gfp, order);
2145 
2146 			goto out;
2147 		}
2148 	}
2149 
2150 	nmask = policy_nodemask(gfp, pol);
2151 	preferred_nid = policy_node(gfp, pol, node);
2152 	page = __alloc_pages(gfp, order, preferred_nid, nmask);
2153 	mpol_cond_put(pol);
2154 out:
2155 	return page;
2156 }
2157 EXPORT_SYMBOL(alloc_pages_vma);
2158 
2159 /**
2160  * alloc_pages - Allocate pages.
2161  * @gfp: GFP flags.
2162  * @order: Power of two of number of pages to allocate.
2163  *
2164  * Allocate 1 << @order contiguous pages.  The physical address of the
2165  * first page is naturally aligned (eg an order-3 allocation will be aligned
2166  * to a multiple of 8 * PAGE_SIZE bytes).  The NUMA policy of the current
2167  * process is honoured when in process context.
2168  *
2169  * Context: Can be called from any context, providing the appropriate GFP
2170  * flags are used.
2171  * Return: The page on success or NULL if allocation fails.
2172  */
2173 struct page *alloc_pages(gfp_t gfp, unsigned order)
2174 {
2175 	struct mempolicy *pol = &default_policy;
2176 	struct page *page;
2177 
2178 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2179 		pol = get_task_policy(current);
2180 
2181 	/*
2182 	 * No reference counting needed for current->mempolicy
2183 	 * nor system default_policy
2184 	 */
2185 	if (pol->mode == MPOL_INTERLEAVE)
2186 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2187 	else if (pol->mode == MPOL_PREFERRED_MANY)
2188 		page = alloc_pages_preferred_many(gfp, order,
2189 				numa_node_id(), pol);
2190 	else
2191 		page = __alloc_pages(gfp, order,
2192 				policy_node(gfp, pol, numa_node_id()),
2193 				policy_nodemask(gfp, pol));
2194 
2195 	return page;
2196 }
2197 EXPORT_SYMBOL(alloc_pages);
2198 
2199 static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
2200 		struct mempolicy *pol, unsigned long nr_pages,
2201 		struct page **page_array)
2202 {
2203 	int nodes;
2204 	unsigned long nr_pages_per_node;
2205 	int delta;
2206 	int i;
2207 	unsigned long nr_allocated;
2208 	unsigned long total_allocated = 0;
2209 
2210 	nodes = nodes_weight(pol->nodes);
2211 	nr_pages_per_node = nr_pages / nodes;
2212 	delta = nr_pages - nodes * nr_pages_per_node;
2213 
2214 	for (i = 0; i < nodes; i++) {
2215 		if (delta) {
2216 			nr_allocated = __alloc_pages_bulk(gfp,
2217 					interleave_nodes(pol), NULL,
2218 					nr_pages_per_node + 1, NULL,
2219 					page_array);
2220 			delta--;
2221 		} else {
2222 			nr_allocated = __alloc_pages_bulk(gfp,
2223 					interleave_nodes(pol), NULL,
2224 					nr_pages_per_node, NULL, page_array);
2225 		}
2226 
2227 		page_array += nr_allocated;
2228 		total_allocated += nr_allocated;
2229 	}
2230 
2231 	return total_allocated;
2232 }
2233 
2234 static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
2235 		struct mempolicy *pol, unsigned long nr_pages,
2236 		struct page **page_array)
2237 {
2238 	gfp_t preferred_gfp;
2239 	unsigned long nr_allocated = 0;
2240 
2241 	preferred_gfp = gfp | __GFP_NOWARN;
2242 	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2243 
2244 	nr_allocated  = __alloc_pages_bulk(preferred_gfp, nid, &pol->nodes,
2245 					   nr_pages, NULL, page_array);
2246 
2247 	if (nr_allocated < nr_pages)
2248 		nr_allocated += __alloc_pages_bulk(gfp, numa_node_id(), NULL,
2249 				nr_pages - nr_allocated, NULL,
2250 				page_array + nr_allocated);
2251 	return nr_allocated;
2252 }
2253 
2254 /* alloc pages bulk and mempolicy should be considered at the
2255  * same time in some situation such as vmalloc.
2256  *
2257  * It can accelerate memory allocation especially interleaving
2258  * allocate memory.
2259  */
2260 unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
2261 		unsigned long nr_pages, struct page **page_array)
2262 {
2263 	struct mempolicy *pol = &default_policy;
2264 
2265 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2266 		pol = get_task_policy(current);
2267 
2268 	if (pol->mode == MPOL_INTERLEAVE)
2269 		return alloc_pages_bulk_array_interleave(gfp, pol,
2270 							 nr_pages, page_array);
2271 
2272 	if (pol->mode == MPOL_PREFERRED_MANY)
2273 		return alloc_pages_bulk_array_preferred_many(gfp,
2274 				numa_node_id(), pol, nr_pages, page_array);
2275 
2276 	return __alloc_pages_bulk(gfp, policy_node(gfp, pol, numa_node_id()),
2277 				  policy_nodemask(gfp, pol), nr_pages, NULL,
2278 				  page_array);
2279 }
2280 
2281 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2282 {
2283 	struct mempolicy *pol = mpol_dup(vma_policy(src));
2284 
2285 	if (IS_ERR(pol))
2286 		return PTR_ERR(pol);
2287 	dst->vm_policy = pol;
2288 	return 0;
2289 }
2290 
2291 /*
2292  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
2293  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2294  * with the mems_allowed returned by cpuset_mems_allowed().  This
2295  * keeps mempolicies cpuset relative after its cpuset moves.  See
2296  * further kernel/cpuset.c update_nodemask().
2297  *
2298  * current's mempolicy may be rebinded by the other task(the task that changes
2299  * cpuset's mems), so we needn't do rebind work for current task.
2300  */
2301 
2302 /* Slow path of a mempolicy duplicate */
2303 struct mempolicy *__mpol_dup(struct mempolicy *old)
2304 {
2305 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2306 
2307 	if (!new)
2308 		return ERR_PTR(-ENOMEM);
2309 
2310 	/* task's mempolicy is protected by alloc_lock */
2311 	if (old == current->mempolicy) {
2312 		task_lock(current);
2313 		*new = *old;
2314 		task_unlock(current);
2315 	} else
2316 		*new = *old;
2317 
2318 	if (current_cpuset_is_being_rebound()) {
2319 		nodemask_t mems = cpuset_mems_allowed(current);
2320 		mpol_rebind_policy(new, &mems);
2321 	}
2322 	atomic_set(&new->refcnt, 1);
2323 	return new;
2324 }
2325 
2326 /* Slow path of a mempolicy comparison */
2327 bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2328 {
2329 	if (!a || !b)
2330 		return false;
2331 	if (a->mode != b->mode)
2332 		return false;
2333 	if (a->flags != b->flags)
2334 		return false;
2335 	if (mpol_store_user_nodemask(a))
2336 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2337 			return false;
2338 
2339 	switch (a->mode) {
2340 	case MPOL_BIND:
2341 	case MPOL_INTERLEAVE:
2342 	case MPOL_PREFERRED:
2343 	case MPOL_PREFERRED_MANY:
2344 		return !!nodes_equal(a->nodes, b->nodes);
2345 	case MPOL_LOCAL:
2346 		return true;
2347 	default:
2348 		BUG();
2349 		return false;
2350 	}
2351 }
2352 
2353 /*
2354  * Shared memory backing store policy support.
2355  *
2356  * Remember policies even when nobody has shared memory mapped.
2357  * The policies are kept in Red-Black tree linked from the inode.
2358  * They are protected by the sp->lock rwlock, which should be held
2359  * for any accesses to the tree.
2360  */
2361 
2362 /*
2363  * lookup first element intersecting start-end.  Caller holds sp->lock for
2364  * reading or for writing
2365  */
2366 static struct sp_node *
2367 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2368 {
2369 	struct rb_node *n = sp->root.rb_node;
2370 
2371 	while (n) {
2372 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
2373 
2374 		if (start >= p->end)
2375 			n = n->rb_right;
2376 		else if (end <= p->start)
2377 			n = n->rb_left;
2378 		else
2379 			break;
2380 	}
2381 	if (!n)
2382 		return NULL;
2383 	for (;;) {
2384 		struct sp_node *w = NULL;
2385 		struct rb_node *prev = rb_prev(n);
2386 		if (!prev)
2387 			break;
2388 		w = rb_entry(prev, struct sp_node, nd);
2389 		if (w->end <= start)
2390 			break;
2391 		n = prev;
2392 	}
2393 	return rb_entry(n, struct sp_node, nd);
2394 }
2395 
2396 /*
2397  * Insert a new shared policy into the list.  Caller holds sp->lock for
2398  * writing.
2399  */
2400 static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2401 {
2402 	struct rb_node **p = &sp->root.rb_node;
2403 	struct rb_node *parent = NULL;
2404 	struct sp_node *nd;
2405 
2406 	while (*p) {
2407 		parent = *p;
2408 		nd = rb_entry(parent, struct sp_node, nd);
2409 		if (new->start < nd->start)
2410 			p = &(*p)->rb_left;
2411 		else if (new->end > nd->end)
2412 			p = &(*p)->rb_right;
2413 		else
2414 			BUG();
2415 	}
2416 	rb_link_node(&new->nd, parent, p);
2417 	rb_insert_color(&new->nd, &sp->root);
2418 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2419 		 new->policy ? new->policy->mode : 0);
2420 }
2421 
2422 /* Find shared policy intersecting idx */
2423 struct mempolicy *
2424 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2425 {
2426 	struct mempolicy *pol = NULL;
2427 	struct sp_node *sn;
2428 
2429 	if (!sp->root.rb_node)
2430 		return NULL;
2431 	read_lock(&sp->lock);
2432 	sn = sp_lookup(sp, idx, idx+1);
2433 	if (sn) {
2434 		mpol_get(sn->policy);
2435 		pol = sn->policy;
2436 	}
2437 	read_unlock(&sp->lock);
2438 	return pol;
2439 }
2440 
2441 static void sp_free(struct sp_node *n)
2442 {
2443 	mpol_put(n->policy);
2444 	kmem_cache_free(sn_cache, n);
2445 }
2446 
2447 /**
2448  * mpol_misplaced - check whether current page node is valid in policy
2449  *
2450  * @page: page to be checked
2451  * @vma: vm area where page mapped
2452  * @addr: virtual address where page mapped
2453  *
2454  * Lookup current policy node id for vma,addr and "compare to" page's
2455  * node id.  Policy determination "mimics" alloc_page_vma().
2456  * Called from fault path where we know the vma and faulting address.
2457  *
2458  * Return: NUMA_NO_NODE if the page is in a node that is valid for this
2459  * policy, or a suitable node ID to allocate a replacement page from.
2460  */
2461 int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2462 {
2463 	struct mempolicy *pol;
2464 	struct zoneref *z;
2465 	int curnid = page_to_nid(page);
2466 	unsigned long pgoff;
2467 	int thiscpu = raw_smp_processor_id();
2468 	int thisnid = cpu_to_node(thiscpu);
2469 	int polnid = NUMA_NO_NODE;
2470 	int ret = NUMA_NO_NODE;
2471 
2472 	pol = get_vma_policy(vma, addr);
2473 	if (!(pol->flags & MPOL_F_MOF))
2474 		goto out;
2475 
2476 	switch (pol->mode) {
2477 	case MPOL_INTERLEAVE:
2478 		pgoff = vma->vm_pgoff;
2479 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2480 		polnid = offset_il_node(pol, pgoff);
2481 		break;
2482 
2483 	case MPOL_PREFERRED:
2484 		if (node_isset(curnid, pol->nodes))
2485 			goto out;
2486 		polnid = first_node(pol->nodes);
2487 		break;
2488 
2489 	case MPOL_LOCAL:
2490 		polnid = numa_node_id();
2491 		break;
2492 
2493 	case MPOL_BIND:
2494 		/* Optimize placement among multiple nodes via NUMA balancing */
2495 		if (pol->flags & MPOL_F_MORON) {
2496 			if (node_isset(thisnid, pol->nodes))
2497 				break;
2498 			goto out;
2499 		}
2500 		fallthrough;
2501 
2502 	case MPOL_PREFERRED_MANY:
2503 		/*
2504 		 * use current page if in policy nodemask,
2505 		 * else select nearest allowed node, if any.
2506 		 * If no allowed nodes, use current [!misplaced].
2507 		 */
2508 		if (node_isset(curnid, pol->nodes))
2509 			goto out;
2510 		z = first_zones_zonelist(
2511 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2512 				gfp_zone(GFP_HIGHUSER),
2513 				&pol->nodes);
2514 		polnid = zone_to_nid(z->zone);
2515 		break;
2516 
2517 	default:
2518 		BUG();
2519 	}
2520 
2521 	/* Migrate the page towards the node whose CPU is referencing it */
2522 	if (pol->flags & MPOL_F_MORON) {
2523 		polnid = thisnid;
2524 
2525 		if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2526 			goto out;
2527 	}
2528 
2529 	if (curnid != polnid)
2530 		ret = polnid;
2531 out:
2532 	mpol_cond_put(pol);
2533 
2534 	return ret;
2535 }
2536 
2537 /*
2538  * Drop the (possibly final) reference to task->mempolicy.  It needs to be
2539  * dropped after task->mempolicy is set to NULL so that any allocation done as
2540  * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2541  * policy.
2542  */
2543 void mpol_put_task_policy(struct task_struct *task)
2544 {
2545 	struct mempolicy *pol;
2546 
2547 	task_lock(task);
2548 	pol = task->mempolicy;
2549 	task->mempolicy = NULL;
2550 	task_unlock(task);
2551 	mpol_put(pol);
2552 }
2553 
2554 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2555 {
2556 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2557 	rb_erase(&n->nd, &sp->root);
2558 	sp_free(n);
2559 }
2560 
2561 static void sp_node_init(struct sp_node *node, unsigned long start,
2562 			unsigned long end, struct mempolicy *pol)
2563 {
2564 	node->start = start;
2565 	node->end = end;
2566 	node->policy = pol;
2567 }
2568 
2569 static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2570 				struct mempolicy *pol)
2571 {
2572 	struct sp_node *n;
2573 	struct mempolicy *newpol;
2574 
2575 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2576 	if (!n)
2577 		return NULL;
2578 
2579 	newpol = mpol_dup(pol);
2580 	if (IS_ERR(newpol)) {
2581 		kmem_cache_free(sn_cache, n);
2582 		return NULL;
2583 	}
2584 	newpol->flags |= MPOL_F_SHARED;
2585 	sp_node_init(n, start, end, newpol);
2586 
2587 	return n;
2588 }
2589 
2590 /* Replace a policy range. */
2591 static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2592 				 unsigned long end, struct sp_node *new)
2593 {
2594 	struct sp_node *n;
2595 	struct sp_node *n_new = NULL;
2596 	struct mempolicy *mpol_new = NULL;
2597 	int ret = 0;
2598 
2599 restart:
2600 	write_lock(&sp->lock);
2601 	n = sp_lookup(sp, start, end);
2602 	/* Take care of old policies in the same range. */
2603 	while (n && n->start < end) {
2604 		struct rb_node *next = rb_next(&n->nd);
2605 		if (n->start >= start) {
2606 			if (n->end <= end)
2607 				sp_delete(sp, n);
2608 			else
2609 				n->start = end;
2610 		} else {
2611 			/* Old policy spanning whole new range. */
2612 			if (n->end > end) {
2613 				if (!n_new)
2614 					goto alloc_new;
2615 
2616 				*mpol_new = *n->policy;
2617 				atomic_set(&mpol_new->refcnt, 1);
2618 				sp_node_init(n_new, end, n->end, mpol_new);
2619 				n->end = start;
2620 				sp_insert(sp, n_new);
2621 				n_new = NULL;
2622 				mpol_new = NULL;
2623 				break;
2624 			} else
2625 				n->end = start;
2626 		}
2627 		if (!next)
2628 			break;
2629 		n = rb_entry(next, struct sp_node, nd);
2630 	}
2631 	if (new)
2632 		sp_insert(sp, new);
2633 	write_unlock(&sp->lock);
2634 	ret = 0;
2635 
2636 err_out:
2637 	if (mpol_new)
2638 		mpol_put(mpol_new);
2639 	if (n_new)
2640 		kmem_cache_free(sn_cache, n_new);
2641 
2642 	return ret;
2643 
2644 alloc_new:
2645 	write_unlock(&sp->lock);
2646 	ret = -ENOMEM;
2647 	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2648 	if (!n_new)
2649 		goto err_out;
2650 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2651 	if (!mpol_new)
2652 		goto err_out;
2653 	goto restart;
2654 }
2655 
2656 /**
2657  * mpol_shared_policy_init - initialize shared policy for inode
2658  * @sp: pointer to inode shared policy
2659  * @mpol:  struct mempolicy to install
2660  *
2661  * Install non-NULL @mpol in inode's shared policy rb-tree.
2662  * On entry, the current task has a reference on a non-NULL @mpol.
2663  * This must be released on exit.
2664  * This is called at get_inode() calls and we can use GFP_KERNEL.
2665  */
2666 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2667 {
2668 	int ret;
2669 
2670 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
2671 	rwlock_init(&sp->lock);
2672 
2673 	if (mpol) {
2674 		struct vm_area_struct pvma;
2675 		struct mempolicy *new;
2676 		NODEMASK_SCRATCH(scratch);
2677 
2678 		if (!scratch)
2679 			goto put_mpol;
2680 		/* contextualize the tmpfs mount point mempolicy */
2681 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2682 		if (IS_ERR(new))
2683 			goto free_scratch; /* no valid nodemask intersection */
2684 
2685 		task_lock(current);
2686 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2687 		task_unlock(current);
2688 		if (ret)
2689 			goto put_new;
2690 
2691 		/* Create pseudo-vma that contains just the policy */
2692 		vma_init(&pvma, NULL);
2693 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
2694 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2695 
2696 put_new:
2697 		mpol_put(new);			/* drop initial ref */
2698 free_scratch:
2699 		NODEMASK_SCRATCH_FREE(scratch);
2700 put_mpol:
2701 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
2702 	}
2703 }
2704 
2705 int mpol_set_shared_policy(struct shared_policy *info,
2706 			struct vm_area_struct *vma, struct mempolicy *npol)
2707 {
2708 	int err;
2709 	struct sp_node *new = NULL;
2710 	unsigned long sz = vma_pages(vma);
2711 
2712 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2713 		 vma->vm_pgoff,
2714 		 sz, npol ? npol->mode : -1,
2715 		 npol ? npol->flags : -1,
2716 		 npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE);
2717 
2718 	if (npol) {
2719 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2720 		if (!new)
2721 			return -ENOMEM;
2722 	}
2723 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2724 	if (err && new)
2725 		sp_free(new);
2726 	return err;
2727 }
2728 
2729 /* Free a backing policy store on inode delete. */
2730 void mpol_free_shared_policy(struct shared_policy *p)
2731 {
2732 	struct sp_node *n;
2733 	struct rb_node *next;
2734 
2735 	if (!p->root.rb_node)
2736 		return;
2737 	write_lock(&p->lock);
2738 	next = rb_first(&p->root);
2739 	while (next) {
2740 		n = rb_entry(next, struct sp_node, nd);
2741 		next = rb_next(&n->nd);
2742 		sp_delete(p, n);
2743 	}
2744 	write_unlock(&p->lock);
2745 }
2746 
2747 #ifdef CONFIG_NUMA_BALANCING
2748 static int __initdata numabalancing_override;
2749 
2750 static void __init check_numabalancing_enable(void)
2751 {
2752 	bool numabalancing_default = false;
2753 
2754 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2755 		numabalancing_default = true;
2756 
2757 	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2758 	if (numabalancing_override)
2759 		set_numabalancing_state(numabalancing_override == 1);
2760 
2761 	if (num_online_nodes() > 1 && !numabalancing_override) {
2762 		pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2763 			numabalancing_default ? "Enabling" : "Disabling");
2764 		set_numabalancing_state(numabalancing_default);
2765 	}
2766 }
2767 
2768 static int __init setup_numabalancing(char *str)
2769 {
2770 	int ret = 0;
2771 	if (!str)
2772 		goto out;
2773 
2774 	if (!strcmp(str, "enable")) {
2775 		numabalancing_override = 1;
2776 		ret = 1;
2777 	} else if (!strcmp(str, "disable")) {
2778 		numabalancing_override = -1;
2779 		ret = 1;
2780 	}
2781 out:
2782 	if (!ret)
2783 		pr_warn("Unable to parse numa_balancing=\n");
2784 
2785 	return ret;
2786 }
2787 __setup("numa_balancing=", setup_numabalancing);
2788 #else
2789 static inline void __init check_numabalancing_enable(void)
2790 {
2791 }
2792 #endif /* CONFIG_NUMA_BALANCING */
2793 
2794 /* assumes fs == KERNEL_DS */
2795 void __init numa_policy_init(void)
2796 {
2797 	nodemask_t interleave_nodes;
2798 	unsigned long largest = 0;
2799 	int nid, prefer = 0;
2800 
2801 	policy_cache = kmem_cache_create("numa_policy",
2802 					 sizeof(struct mempolicy),
2803 					 0, SLAB_PANIC, NULL);
2804 
2805 	sn_cache = kmem_cache_create("shared_policy_node",
2806 				     sizeof(struct sp_node),
2807 				     0, SLAB_PANIC, NULL);
2808 
2809 	for_each_node(nid) {
2810 		preferred_node_policy[nid] = (struct mempolicy) {
2811 			.refcnt = ATOMIC_INIT(1),
2812 			.mode = MPOL_PREFERRED,
2813 			.flags = MPOL_F_MOF | MPOL_F_MORON,
2814 			.nodes = nodemask_of_node(nid),
2815 		};
2816 	}
2817 
2818 	/*
2819 	 * Set interleaving policy for system init. Interleaving is only
2820 	 * enabled across suitably sized nodes (default is >= 16MB), or
2821 	 * fall back to the largest node if they're all smaller.
2822 	 */
2823 	nodes_clear(interleave_nodes);
2824 	for_each_node_state(nid, N_MEMORY) {
2825 		unsigned long total_pages = node_present_pages(nid);
2826 
2827 		/* Preserve the largest node */
2828 		if (largest < total_pages) {
2829 			largest = total_pages;
2830 			prefer = nid;
2831 		}
2832 
2833 		/* Interleave this node? */
2834 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2835 			node_set(nid, interleave_nodes);
2836 	}
2837 
2838 	/* All too small, use the largest */
2839 	if (unlikely(nodes_empty(interleave_nodes)))
2840 		node_set(prefer, interleave_nodes);
2841 
2842 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2843 		pr_err("%s: interleaving failed\n", __func__);
2844 
2845 	check_numabalancing_enable();
2846 }
2847 
2848 /* Reset policy of current process to default */
2849 void numa_default_policy(void)
2850 {
2851 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2852 }
2853 
2854 /*
2855  * Parse and format mempolicy from/to strings
2856  */
2857 
2858 static const char * const policy_modes[] =
2859 {
2860 	[MPOL_DEFAULT]    = "default",
2861 	[MPOL_PREFERRED]  = "prefer",
2862 	[MPOL_BIND]       = "bind",
2863 	[MPOL_INTERLEAVE] = "interleave",
2864 	[MPOL_LOCAL]      = "local",
2865 	[MPOL_PREFERRED_MANY]  = "prefer (many)",
2866 };
2867 
2868 
2869 #ifdef CONFIG_TMPFS
2870 /**
2871  * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2872  * @str:  string containing mempolicy to parse
2873  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2874  *
2875  * Format of input:
2876  *	<mode>[=<flags>][:<nodelist>]
2877  *
2878  * On success, returns 0, else 1
2879  */
2880 int mpol_parse_str(char *str, struct mempolicy **mpol)
2881 {
2882 	struct mempolicy *new = NULL;
2883 	unsigned short mode_flags;
2884 	nodemask_t nodes;
2885 	char *nodelist = strchr(str, ':');
2886 	char *flags = strchr(str, '=');
2887 	int err = 1, mode;
2888 
2889 	if (flags)
2890 		*flags++ = '\0';	/* terminate mode string */
2891 
2892 	if (nodelist) {
2893 		/* NUL-terminate mode or flags string */
2894 		*nodelist++ = '\0';
2895 		if (nodelist_parse(nodelist, nodes))
2896 			goto out;
2897 		if (!nodes_subset(nodes, node_states[N_MEMORY]))
2898 			goto out;
2899 	} else
2900 		nodes_clear(nodes);
2901 
2902 	mode = match_string(policy_modes, MPOL_MAX, str);
2903 	if (mode < 0)
2904 		goto out;
2905 
2906 	switch (mode) {
2907 	case MPOL_PREFERRED:
2908 		/*
2909 		 * Insist on a nodelist of one node only, although later
2910 		 * we use first_node(nodes) to grab a single node, so here
2911 		 * nodelist (or nodes) cannot be empty.
2912 		 */
2913 		if (nodelist) {
2914 			char *rest = nodelist;
2915 			while (isdigit(*rest))
2916 				rest++;
2917 			if (*rest)
2918 				goto out;
2919 			if (nodes_empty(nodes))
2920 				goto out;
2921 		}
2922 		break;
2923 	case MPOL_INTERLEAVE:
2924 		/*
2925 		 * Default to online nodes with memory if no nodelist
2926 		 */
2927 		if (!nodelist)
2928 			nodes = node_states[N_MEMORY];
2929 		break;
2930 	case MPOL_LOCAL:
2931 		/*
2932 		 * Don't allow a nodelist;  mpol_new() checks flags
2933 		 */
2934 		if (nodelist)
2935 			goto out;
2936 		break;
2937 	case MPOL_DEFAULT:
2938 		/*
2939 		 * Insist on a empty nodelist
2940 		 */
2941 		if (!nodelist)
2942 			err = 0;
2943 		goto out;
2944 	case MPOL_PREFERRED_MANY:
2945 	case MPOL_BIND:
2946 		/*
2947 		 * Insist on a nodelist
2948 		 */
2949 		if (!nodelist)
2950 			goto out;
2951 	}
2952 
2953 	mode_flags = 0;
2954 	if (flags) {
2955 		/*
2956 		 * Currently, we only support two mutually exclusive
2957 		 * mode flags.
2958 		 */
2959 		if (!strcmp(flags, "static"))
2960 			mode_flags |= MPOL_F_STATIC_NODES;
2961 		else if (!strcmp(flags, "relative"))
2962 			mode_flags |= MPOL_F_RELATIVE_NODES;
2963 		else
2964 			goto out;
2965 	}
2966 
2967 	new = mpol_new(mode, mode_flags, &nodes);
2968 	if (IS_ERR(new))
2969 		goto out;
2970 
2971 	/*
2972 	 * Save nodes for mpol_to_str() to show the tmpfs mount options
2973 	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2974 	 */
2975 	if (mode != MPOL_PREFERRED) {
2976 		new->nodes = nodes;
2977 	} else if (nodelist) {
2978 		nodes_clear(new->nodes);
2979 		node_set(first_node(nodes), new->nodes);
2980 	} else {
2981 		new->mode = MPOL_LOCAL;
2982 	}
2983 
2984 	/*
2985 	 * Save nodes for contextualization: this will be used to "clone"
2986 	 * the mempolicy in a specific context [cpuset] at a later time.
2987 	 */
2988 	new->w.user_nodemask = nodes;
2989 
2990 	err = 0;
2991 
2992 out:
2993 	/* Restore string for error message */
2994 	if (nodelist)
2995 		*--nodelist = ':';
2996 	if (flags)
2997 		*--flags = '=';
2998 	if (!err)
2999 		*mpol = new;
3000 	return err;
3001 }
3002 #endif /* CONFIG_TMPFS */
3003 
3004 /**
3005  * mpol_to_str - format a mempolicy structure for printing
3006  * @buffer:  to contain formatted mempolicy string
3007  * @maxlen:  length of @buffer
3008  * @pol:  pointer to mempolicy to be formatted
3009  *
3010  * Convert @pol into a string.  If @buffer is too short, truncate the string.
3011  * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
3012  * longest flag, "relative", and to display at least a few node ids.
3013  */
3014 void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
3015 {
3016 	char *p = buffer;
3017 	nodemask_t nodes = NODE_MASK_NONE;
3018 	unsigned short mode = MPOL_DEFAULT;
3019 	unsigned short flags = 0;
3020 
3021 	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
3022 		mode = pol->mode;
3023 		flags = pol->flags;
3024 	}
3025 
3026 	switch (mode) {
3027 	case MPOL_DEFAULT:
3028 	case MPOL_LOCAL:
3029 		break;
3030 	case MPOL_PREFERRED:
3031 	case MPOL_PREFERRED_MANY:
3032 	case MPOL_BIND:
3033 	case MPOL_INTERLEAVE:
3034 		nodes = pol->nodes;
3035 		break;
3036 	default:
3037 		WARN_ON_ONCE(1);
3038 		snprintf(p, maxlen, "unknown");
3039 		return;
3040 	}
3041 
3042 	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
3043 
3044 	if (flags & MPOL_MODE_FLAGS) {
3045 		p += snprintf(p, buffer + maxlen - p, "=");
3046 
3047 		/*
3048 		 * Currently, the only defined flags are mutually exclusive
3049 		 */
3050 		if (flags & MPOL_F_STATIC_NODES)
3051 			p += snprintf(p, buffer + maxlen - p, "static");
3052 		else if (flags & MPOL_F_RELATIVE_NODES)
3053 			p += snprintf(p, buffer + maxlen - p, "relative");
3054 	}
3055 
3056 	if (!nodes_empty(nodes))
3057 		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
3058 			       nodemask_pr_args(&nodes));
3059 }
3060 
3061 bool numa_demotion_enabled = false;
3062 
3063 #ifdef CONFIG_SYSFS
3064 static ssize_t numa_demotion_enabled_show(struct kobject *kobj,
3065 					  struct kobj_attribute *attr, char *buf)
3066 {
3067 	return sysfs_emit(buf, "%s\n",
3068 			  numa_demotion_enabled? "true" : "false");
3069 }
3070 
3071 static ssize_t numa_demotion_enabled_store(struct kobject *kobj,
3072 					   struct kobj_attribute *attr,
3073 					   const char *buf, size_t count)
3074 {
3075 	if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1))
3076 		numa_demotion_enabled = true;
3077 	else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1))
3078 		numa_demotion_enabled = false;
3079 	else
3080 		return -EINVAL;
3081 
3082 	return count;
3083 }
3084 
3085 static struct kobj_attribute numa_demotion_enabled_attr =
3086 	__ATTR(demotion_enabled, 0644, numa_demotion_enabled_show,
3087 	       numa_demotion_enabled_store);
3088 
3089 static struct attribute *numa_attrs[] = {
3090 	&numa_demotion_enabled_attr.attr,
3091 	NULL,
3092 };
3093 
3094 static const struct attribute_group numa_attr_group = {
3095 	.attrs = numa_attrs,
3096 };
3097 
3098 static int __init numa_init_sysfs(void)
3099 {
3100 	int err;
3101 	struct kobject *numa_kobj;
3102 
3103 	numa_kobj = kobject_create_and_add("numa", mm_kobj);
3104 	if (!numa_kobj) {
3105 		pr_err("failed to create numa kobject\n");
3106 		return -ENOMEM;
3107 	}
3108 	err = sysfs_create_group(numa_kobj, &numa_attr_group);
3109 	if (err) {
3110 		pr_err("failed to register numa group\n");
3111 		goto delete_obj;
3112 	}
3113 	return 0;
3114 
3115 delete_obj:
3116 	kobject_put(numa_kobj);
3117 	return err;
3118 }
3119 subsys_initcall(numa_init_sysfs);
3120 #endif
3121