xref: /openbmc/linux/mm/mempolicy.c (revision dea54fba)
1 /*
2  * Simple NUMA memory policy for the Linux kernel.
3  *
4  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
5  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
6  * Subject to the GNU Public License, version 2.
7  *
8  * NUMA policy allows the user to give hints in which node(s) memory should
9  * be allocated.
10  *
11  * Support four policies per VMA and per process:
12  *
13  * The VMA policy has priority over the process policy for a page fault.
14  *
15  * interleave     Allocate memory interleaved over a set of nodes,
16  *                with normal fallback if it fails.
17  *                For VMA based allocations this interleaves based on the
18  *                offset into the backing object or offset into the mapping
19  *                for anonymous memory. For process policy an process counter
20  *                is used.
21  *
22  * bind           Only allocate memory on a specific set of nodes,
23  *                no fallback.
24  *                FIXME: memory is allocated starting with the first node
25  *                to the last. It would be better if bind would truly restrict
26  *                the allocation to memory nodes instead
27  *
28  * preferred       Try a specific node first before normal fallback.
29  *                As a special case NUMA_NO_NODE here means do the allocation
30  *                on the local CPU. This is normally identical to default,
31  *                but useful to set in a VMA when you have a non default
32  *                process policy.
33  *
34  * default        Allocate on the local node first, or when on a VMA
35  *                use the process policy. This is what Linux always did
36  *		  in a NUMA aware kernel and still does by, ahem, default.
37  *
38  * The process policy is applied for most non interrupt memory allocations
39  * in that process' context. Interrupts ignore the policies and always
40  * try to allocate on the local CPU. The VMA policy is only applied for memory
41  * allocations for a VMA in the VM.
42  *
43  * Currently there are a few corner cases in swapping where the policy
44  * is not applied, but the majority should be handled. When process policy
45  * is used it is not remembered over swap outs/swap ins.
46  *
47  * Only the highest zone in the zone hierarchy gets policied. Allocations
48  * requesting a lower zone just use default policy. This implies that
49  * on systems with highmem kernel lowmem allocation don't get policied.
50  * Same with GFP_DMA allocations.
51  *
52  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53  * all users and remembered even when nobody has memory mapped.
54  */
55 
56 /* Notebook:
57    fix mmap readahead to honour policy and enable policy for any page cache
58    object
59    statistics for bigpages
60    global policy for page cache? currently it uses process policy. Requires
61    first item above.
62    handle mremap for shared memory (currently ignored for the policy)
63    grows down?
64    make bind policy root only? It can trigger oom much faster and the
65    kernel is not always grateful with that.
66 */
67 
68 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69 
70 #include <linux/mempolicy.h>
71 #include <linux/mm.h>
72 #include <linux/highmem.h>
73 #include <linux/hugetlb.h>
74 #include <linux/kernel.h>
75 #include <linux/sched.h>
76 #include <linux/sched/mm.h>
77 #include <linux/sched/numa_balancing.h>
78 #include <linux/sched/task.h>
79 #include <linux/nodemask.h>
80 #include <linux/cpuset.h>
81 #include <linux/slab.h>
82 #include <linux/string.h>
83 #include <linux/export.h>
84 #include <linux/nsproxy.h>
85 #include <linux/interrupt.h>
86 #include <linux/init.h>
87 #include <linux/compat.h>
88 #include <linux/swap.h>
89 #include <linux/seq_file.h>
90 #include <linux/proc_fs.h>
91 #include <linux/migrate.h>
92 #include <linux/ksm.h>
93 #include <linux/rmap.h>
94 #include <linux/security.h>
95 #include <linux/syscalls.h>
96 #include <linux/ctype.h>
97 #include <linux/mm_inline.h>
98 #include <linux/mmu_notifier.h>
99 #include <linux/printk.h>
100 
101 #include <asm/tlbflush.h>
102 #include <linux/uaccess.h>
103 
104 #include "internal.h"
105 
106 /* Internal flags */
107 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
108 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
109 
110 static struct kmem_cache *policy_cache;
111 static struct kmem_cache *sn_cache;
112 
113 /* Highest zone. An specific allocation for a zone below that is not
114    policied. */
115 enum zone_type policy_zone = 0;
116 
117 /*
118  * run-time system-wide default policy => local allocation
119  */
120 static struct mempolicy default_policy = {
121 	.refcnt = ATOMIC_INIT(1), /* never free it */
122 	.mode = MPOL_PREFERRED,
123 	.flags = MPOL_F_LOCAL,
124 };
125 
126 static struct mempolicy preferred_node_policy[MAX_NUMNODES];
127 
128 struct mempolicy *get_task_policy(struct task_struct *p)
129 {
130 	struct mempolicy *pol = p->mempolicy;
131 	int node;
132 
133 	if (pol)
134 		return pol;
135 
136 	node = numa_node_id();
137 	if (node != NUMA_NO_NODE) {
138 		pol = &preferred_node_policy[node];
139 		/* preferred_node_policy is not initialised early in boot */
140 		if (pol->mode)
141 			return pol;
142 	}
143 
144 	return &default_policy;
145 }
146 
147 static const struct mempolicy_operations {
148 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
149 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
150 } mpol_ops[MPOL_MAX];
151 
152 static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
153 {
154 	return pol->flags & MPOL_MODE_FLAGS;
155 }
156 
157 static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
158 				   const nodemask_t *rel)
159 {
160 	nodemask_t tmp;
161 	nodes_fold(tmp, *orig, nodes_weight(*rel));
162 	nodes_onto(*ret, tmp, *rel);
163 }
164 
165 static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
166 {
167 	if (nodes_empty(*nodes))
168 		return -EINVAL;
169 	pol->v.nodes = *nodes;
170 	return 0;
171 }
172 
173 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
174 {
175 	if (!nodes)
176 		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
177 	else if (nodes_empty(*nodes))
178 		return -EINVAL;			/*  no allowed nodes */
179 	else
180 		pol->v.preferred_node = first_node(*nodes);
181 	return 0;
182 }
183 
184 static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
185 {
186 	if (nodes_empty(*nodes))
187 		return -EINVAL;
188 	pol->v.nodes = *nodes;
189 	return 0;
190 }
191 
192 /*
193  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
194  * any, for the new policy.  mpol_new() has already validated the nodes
195  * parameter with respect to the policy mode and flags.  But, we need to
196  * handle an empty nodemask with MPOL_PREFERRED here.
197  *
198  * Must be called holding task's alloc_lock to protect task's mems_allowed
199  * and mempolicy.  May also be called holding the mmap_semaphore for write.
200  */
201 static int mpol_set_nodemask(struct mempolicy *pol,
202 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
203 {
204 	int ret;
205 
206 	/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
207 	if (pol == NULL)
208 		return 0;
209 	/* Check N_MEMORY */
210 	nodes_and(nsc->mask1,
211 		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
212 
213 	VM_BUG_ON(!nodes);
214 	if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
215 		nodes = NULL;	/* explicit local allocation */
216 	else {
217 		if (pol->flags & MPOL_F_RELATIVE_NODES)
218 			mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
219 		else
220 			nodes_and(nsc->mask2, *nodes, nsc->mask1);
221 
222 		if (mpol_store_user_nodemask(pol))
223 			pol->w.user_nodemask = *nodes;
224 		else
225 			pol->w.cpuset_mems_allowed =
226 						cpuset_current_mems_allowed;
227 	}
228 
229 	if (nodes)
230 		ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
231 	else
232 		ret = mpol_ops[pol->mode].create(pol, NULL);
233 	return ret;
234 }
235 
236 /*
237  * This function just creates a new policy, does some check and simple
238  * initialization. You must invoke mpol_set_nodemask() to set nodes.
239  */
240 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
241 				  nodemask_t *nodes)
242 {
243 	struct mempolicy *policy;
244 
245 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
246 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
247 
248 	if (mode == MPOL_DEFAULT) {
249 		if (nodes && !nodes_empty(*nodes))
250 			return ERR_PTR(-EINVAL);
251 		return NULL;
252 	}
253 	VM_BUG_ON(!nodes);
254 
255 	/*
256 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
257 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
258 	 * All other modes require a valid pointer to a non-empty nodemask.
259 	 */
260 	if (mode == MPOL_PREFERRED) {
261 		if (nodes_empty(*nodes)) {
262 			if (((flags & MPOL_F_STATIC_NODES) ||
263 			     (flags & MPOL_F_RELATIVE_NODES)))
264 				return ERR_PTR(-EINVAL);
265 		}
266 	} else if (mode == MPOL_LOCAL) {
267 		if (!nodes_empty(*nodes) ||
268 		    (flags & MPOL_F_STATIC_NODES) ||
269 		    (flags & MPOL_F_RELATIVE_NODES))
270 			return ERR_PTR(-EINVAL);
271 		mode = MPOL_PREFERRED;
272 	} else if (nodes_empty(*nodes))
273 		return ERR_PTR(-EINVAL);
274 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
275 	if (!policy)
276 		return ERR_PTR(-ENOMEM);
277 	atomic_set(&policy->refcnt, 1);
278 	policy->mode = mode;
279 	policy->flags = flags;
280 
281 	return policy;
282 }
283 
284 /* Slow path of a mpol destructor. */
285 void __mpol_put(struct mempolicy *p)
286 {
287 	if (!atomic_dec_and_test(&p->refcnt))
288 		return;
289 	kmem_cache_free(policy_cache, p);
290 }
291 
292 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
293 {
294 }
295 
296 static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
297 {
298 	nodemask_t tmp;
299 
300 	if (pol->flags & MPOL_F_STATIC_NODES)
301 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
302 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
303 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
304 	else {
305 		nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
306 								*nodes);
307 		pol->w.cpuset_mems_allowed = tmp;
308 	}
309 
310 	if (nodes_empty(tmp))
311 		tmp = *nodes;
312 
313 	pol->v.nodes = tmp;
314 }
315 
316 static void mpol_rebind_preferred(struct mempolicy *pol,
317 						const nodemask_t *nodes)
318 {
319 	nodemask_t tmp;
320 
321 	if (pol->flags & MPOL_F_STATIC_NODES) {
322 		int node = first_node(pol->w.user_nodemask);
323 
324 		if (node_isset(node, *nodes)) {
325 			pol->v.preferred_node = node;
326 			pol->flags &= ~MPOL_F_LOCAL;
327 		} else
328 			pol->flags |= MPOL_F_LOCAL;
329 	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
330 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
331 		pol->v.preferred_node = first_node(tmp);
332 	} else if (!(pol->flags & MPOL_F_LOCAL)) {
333 		pol->v.preferred_node = node_remap(pol->v.preferred_node,
334 						   pol->w.cpuset_mems_allowed,
335 						   *nodes);
336 		pol->w.cpuset_mems_allowed = *nodes;
337 	}
338 }
339 
340 /*
341  * mpol_rebind_policy - Migrate a policy to a different set of nodes
342  *
343  * Per-vma policies are protected by mmap_sem. Allocations using per-task
344  * policies are protected by task->mems_allowed_seq to prevent a premature
345  * OOM/allocation failure due to parallel nodemask modification.
346  */
347 static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
348 {
349 	if (!pol)
350 		return;
351 	if (!mpol_store_user_nodemask(pol) &&
352 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
353 		return;
354 
355 	mpol_ops[pol->mode].rebind(pol, newmask);
356 }
357 
358 /*
359  * Wrapper for mpol_rebind_policy() that just requires task
360  * pointer, and updates task mempolicy.
361  *
362  * Called with task's alloc_lock held.
363  */
364 
365 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
366 {
367 	mpol_rebind_policy(tsk->mempolicy, new);
368 }
369 
370 /*
371  * Rebind each vma in mm to new nodemask.
372  *
373  * Call holding a reference to mm.  Takes mm->mmap_sem during call.
374  */
375 
376 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
377 {
378 	struct vm_area_struct *vma;
379 
380 	down_write(&mm->mmap_sem);
381 	for (vma = mm->mmap; vma; vma = vma->vm_next)
382 		mpol_rebind_policy(vma->vm_policy, new);
383 	up_write(&mm->mmap_sem);
384 }
385 
386 static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
387 	[MPOL_DEFAULT] = {
388 		.rebind = mpol_rebind_default,
389 	},
390 	[MPOL_INTERLEAVE] = {
391 		.create = mpol_new_interleave,
392 		.rebind = mpol_rebind_nodemask,
393 	},
394 	[MPOL_PREFERRED] = {
395 		.create = mpol_new_preferred,
396 		.rebind = mpol_rebind_preferred,
397 	},
398 	[MPOL_BIND] = {
399 		.create = mpol_new_bind,
400 		.rebind = mpol_rebind_nodemask,
401 	},
402 };
403 
404 static void migrate_page_add(struct page *page, struct list_head *pagelist,
405 				unsigned long flags);
406 
407 struct queue_pages {
408 	struct list_head *pagelist;
409 	unsigned long flags;
410 	nodemask_t *nmask;
411 	struct vm_area_struct *prev;
412 };
413 
414 /*
415  * Scan through pages checking if pages follow certain conditions,
416  * and move them to the pagelist if they do.
417  */
418 static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
419 			unsigned long end, struct mm_walk *walk)
420 {
421 	struct vm_area_struct *vma = walk->vma;
422 	struct page *page;
423 	struct queue_pages *qp = walk->private;
424 	unsigned long flags = qp->flags;
425 	int nid, ret;
426 	pte_t *pte;
427 	spinlock_t *ptl;
428 
429 	if (pmd_trans_huge(*pmd)) {
430 		ptl = pmd_lock(walk->mm, pmd);
431 		if (pmd_trans_huge(*pmd)) {
432 			page = pmd_page(*pmd);
433 			if (is_huge_zero_page(page)) {
434 				spin_unlock(ptl);
435 				__split_huge_pmd(vma, pmd, addr, false, NULL);
436 			} else {
437 				get_page(page);
438 				spin_unlock(ptl);
439 				lock_page(page);
440 				ret = split_huge_page(page);
441 				unlock_page(page);
442 				put_page(page);
443 				if (ret)
444 					return 0;
445 			}
446 		} else {
447 			spin_unlock(ptl);
448 		}
449 	}
450 
451 	if (pmd_trans_unstable(pmd))
452 		return 0;
453 retry:
454 	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
455 	for (; addr != end; pte++, addr += PAGE_SIZE) {
456 		if (!pte_present(*pte))
457 			continue;
458 		page = vm_normal_page(vma, addr, *pte);
459 		if (!page)
460 			continue;
461 		/*
462 		 * vm_normal_page() filters out zero pages, but there might
463 		 * still be PageReserved pages to skip, perhaps in a VDSO.
464 		 */
465 		if (PageReserved(page))
466 			continue;
467 		nid = page_to_nid(page);
468 		if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
469 			continue;
470 		if (PageTransCompound(page)) {
471 			get_page(page);
472 			pte_unmap_unlock(pte, ptl);
473 			lock_page(page);
474 			ret = split_huge_page(page);
475 			unlock_page(page);
476 			put_page(page);
477 			/* Failed to split -- skip. */
478 			if (ret) {
479 				pte = pte_offset_map_lock(walk->mm, pmd,
480 						addr, &ptl);
481 				continue;
482 			}
483 			goto retry;
484 		}
485 
486 		migrate_page_add(page, qp->pagelist, flags);
487 	}
488 	pte_unmap_unlock(pte - 1, ptl);
489 	cond_resched();
490 	return 0;
491 }
492 
493 static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
494 			       unsigned long addr, unsigned long end,
495 			       struct mm_walk *walk)
496 {
497 #ifdef CONFIG_HUGETLB_PAGE
498 	struct queue_pages *qp = walk->private;
499 	unsigned long flags = qp->flags;
500 	int nid;
501 	struct page *page;
502 	spinlock_t *ptl;
503 	pte_t entry;
504 
505 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
506 	entry = huge_ptep_get(pte);
507 	if (!pte_present(entry))
508 		goto unlock;
509 	page = pte_page(entry);
510 	nid = page_to_nid(page);
511 	if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
512 		goto unlock;
513 	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
514 	if (flags & (MPOL_MF_MOVE_ALL) ||
515 	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
516 		isolate_huge_page(page, qp->pagelist);
517 unlock:
518 	spin_unlock(ptl);
519 #else
520 	BUG();
521 #endif
522 	return 0;
523 }
524 
525 #ifdef CONFIG_NUMA_BALANCING
526 /*
527  * This is used to mark a range of virtual addresses to be inaccessible.
528  * These are later cleared by a NUMA hinting fault. Depending on these
529  * faults, pages may be migrated for better NUMA placement.
530  *
531  * This is assuming that NUMA faults are handled using PROT_NONE. If
532  * an architecture makes a different choice, it will need further
533  * changes to the core.
534  */
535 unsigned long change_prot_numa(struct vm_area_struct *vma,
536 			unsigned long addr, unsigned long end)
537 {
538 	int nr_updated;
539 
540 	nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
541 	if (nr_updated)
542 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
543 
544 	return nr_updated;
545 }
546 #else
547 static unsigned long change_prot_numa(struct vm_area_struct *vma,
548 			unsigned long addr, unsigned long end)
549 {
550 	return 0;
551 }
552 #endif /* CONFIG_NUMA_BALANCING */
553 
554 static int queue_pages_test_walk(unsigned long start, unsigned long end,
555 				struct mm_walk *walk)
556 {
557 	struct vm_area_struct *vma = walk->vma;
558 	struct queue_pages *qp = walk->private;
559 	unsigned long endvma = vma->vm_end;
560 	unsigned long flags = qp->flags;
561 
562 	if (!vma_migratable(vma))
563 		return 1;
564 
565 	if (endvma > end)
566 		endvma = end;
567 	if (vma->vm_start > start)
568 		start = vma->vm_start;
569 
570 	if (!(flags & MPOL_MF_DISCONTIG_OK)) {
571 		if (!vma->vm_next && vma->vm_end < end)
572 			return -EFAULT;
573 		if (qp->prev && qp->prev->vm_end < vma->vm_start)
574 			return -EFAULT;
575 	}
576 
577 	qp->prev = vma;
578 
579 	if (flags & MPOL_MF_LAZY) {
580 		/* Similar to task_numa_work, skip inaccessible VMAs */
581 		if (!is_vm_hugetlb_page(vma) &&
582 			(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) &&
583 			!(vma->vm_flags & VM_MIXEDMAP))
584 			change_prot_numa(vma, start, endvma);
585 		return 1;
586 	}
587 
588 	/* queue pages from current vma */
589 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
590 		return 0;
591 	return 1;
592 }
593 
594 /*
595  * Walk through page tables and collect pages to be migrated.
596  *
597  * If pages found in a given range are on a set of nodes (determined by
598  * @nodes and @flags,) it's isolated and queued to the pagelist which is
599  * passed via @private.)
600  */
601 static int
602 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
603 		nodemask_t *nodes, unsigned long flags,
604 		struct list_head *pagelist)
605 {
606 	struct queue_pages qp = {
607 		.pagelist = pagelist,
608 		.flags = flags,
609 		.nmask = nodes,
610 		.prev = NULL,
611 	};
612 	struct mm_walk queue_pages_walk = {
613 		.hugetlb_entry = queue_pages_hugetlb,
614 		.pmd_entry = queue_pages_pte_range,
615 		.test_walk = queue_pages_test_walk,
616 		.mm = mm,
617 		.private = &qp,
618 	};
619 
620 	return walk_page_range(start, end, &queue_pages_walk);
621 }
622 
623 /*
624  * Apply policy to a single VMA
625  * This must be called with the mmap_sem held for writing.
626  */
627 static int vma_replace_policy(struct vm_area_struct *vma,
628 						struct mempolicy *pol)
629 {
630 	int err;
631 	struct mempolicy *old;
632 	struct mempolicy *new;
633 
634 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
635 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
636 		 vma->vm_ops, vma->vm_file,
637 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
638 
639 	new = mpol_dup(pol);
640 	if (IS_ERR(new))
641 		return PTR_ERR(new);
642 
643 	if (vma->vm_ops && vma->vm_ops->set_policy) {
644 		err = vma->vm_ops->set_policy(vma, new);
645 		if (err)
646 			goto err_out;
647 	}
648 
649 	old = vma->vm_policy;
650 	vma->vm_policy = new; /* protected by mmap_sem */
651 	mpol_put(old);
652 
653 	return 0;
654  err_out:
655 	mpol_put(new);
656 	return err;
657 }
658 
659 /* Step 2: apply policy to a range and do splits. */
660 static int mbind_range(struct mm_struct *mm, unsigned long start,
661 		       unsigned long end, struct mempolicy *new_pol)
662 {
663 	struct vm_area_struct *next;
664 	struct vm_area_struct *prev;
665 	struct vm_area_struct *vma;
666 	int err = 0;
667 	pgoff_t pgoff;
668 	unsigned long vmstart;
669 	unsigned long vmend;
670 
671 	vma = find_vma(mm, start);
672 	if (!vma || vma->vm_start > start)
673 		return -EFAULT;
674 
675 	prev = vma->vm_prev;
676 	if (start > vma->vm_start)
677 		prev = vma;
678 
679 	for (; vma && vma->vm_start < end; prev = vma, vma = next) {
680 		next = vma->vm_next;
681 		vmstart = max(start, vma->vm_start);
682 		vmend   = min(end, vma->vm_end);
683 
684 		if (mpol_equal(vma_policy(vma), new_pol))
685 			continue;
686 
687 		pgoff = vma->vm_pgoff +
688 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
689 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
690 				 vma->anon_vma, vma->vm_file, pgoff,
691 				 new_pol, vma->vm_userfaultfd_ctx);
692 		if (prev) {
693 			vma = prev;
694 			next = vma->vm_next;
695 			if (mpol_equal(vma_policy(vma), new_pol))
696 				continue;
697 			/* vma_merge() joined vma && vma->next, case 8 */
698 			goto replace;
699 		}
700 		if (vma->vm_start != vmstart) {
701 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
702 			if (err)
703 				goto out;
704 		}
705 		if (vma->vm_end != vmend) {
706 			err = split_vma(vma->vm_mm, vma, vmend, 0);
707 			if (err)
708 				goto out;
709 		}
710  replace:
711 		err = vma_replace_policy(vma, new_pol);
712 		if (err)
713 			goto out;
714 	}
715 
716  out:
717 	return err;
718 }
719 
720 /* Set the process memory policy */
721 static long do_set_mempolicy(unsigned short mode, unsigned short flags,
722 			     nodemask_t *nodes)
723 {
724 	struct mempolicy *new, *old;
725 	NODEMASK_SCRATCH(scratch);
726 	int ret;
727 
728 	if (!scratch)
729 		return -ENOMEM;
730 
731 	new = mpol_new(mode, flags, nodes);
732 	if (IS_ERR(new)) {
733 		ret = PTR_ERR(new);
734 		goto out;
735 	}
736 
737 	task_lock(current);
738 	ret = mpol_set_nodemask(new, nodes, scratch);
739 	if (ret) {
740 		task_unlock(current);
741 		mpol_put(new);
742 		goto out;
743 	}
744 	old = current->mempolicy;
745 	current->mempolicy = new;
746 	if (new && new->mode == MPOL_INTERLEAVE)
747 		current->il_prev = MAX_NUMNODES-1;
748 	task_unlock(current);
749 	mpol_put(old);
750 	ret = 0;
751 out:
752 	NODEMASK_SCRATCH_FREE(scratch);
753 	return ret;
754 }
755 
756 /*
757  * Return nodemask for policy for get_mempolicy() query
758  *
759  * Called with task's alloc_lock held
760  */
761 static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
762 {
763 	nodes_clear(*nodes);
764 	if (p == &default_policy)
765 		return;
766 
767 	switch (p->mode) {
768 	case MPOL_BIND:
769 		/* Fall through */
770 	case MPOL_INTERLEAVE:
771 		*nodes = p->v.nodes;
772 		break;
773 	case MPOL_PREFERRED:
774 		if (!(p->flags & MPOL_F_LOCAL))
775 			node_set(p->v.preferred_node, *nodes);
776 		/* else return empty node mask for local allocation */
777 		break;
778 	default:
779 		BUG();
780 	}
781 }
782 
783 static int lookup_node(unsigned long addr)
784 {
785 	struct page *p;
786 	int err;
787 
788 	err = get_user_pages(addr & PAGE_MASK, 1, 0, &p, NULL);
789 	if (err >= 0) {
790 		err = page_to_nid(p);
791 		put_page(p);
792 	}
793 	return err;
794 }
795 
796 /* Retrieve NUMA policy */
797 static long do_get_mempolicy(int *policy, nodemask_t *nmask,
798 			     unsigned long addr, unsigned long flags)
799 {
800 	int err;
801 	struct mm_struct *mm = current->mm;
802 	struct vm_area_struct *vma = NULL;
803 	struct mempolicy *pol = current->mempolicy;
804 
805 	if (flags &
806 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
807 		return -EINVAL;
808 
809 	if (flags & MPOL_F_MEMS_ALLOWED) {
810 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
811 			return -EINVAL;
812 		*policy = 0;	/* just so it's initialized */
813 		task_lock(current);
814 		*nmask  = cpuset_current_mems_allowed;
815 		task_unlock(current);
816 		return 0;
817 	}
818 
819 	if (flags & MPOL_F_ADDR) {
820 		/*
821 		 * Do NOT fall back to task policy if the
822 		 * vma/shared policy at addr is NULL.  We
823 		 * want to return MPOL_DEFAULT in this case.
824 		 */
825 		down_read(&mm->mmap_sem);
826 		vma = find_vma_intersection(mm, addr, addr+1);
827 		if (!vma) {
828 			up_read(&mm->mmap_sem);
829 			return -EFAULT;
830 		}
831 		if (vma->vm_ops && vma->vm_ops->get_policy)
832 			pol = vma->vm_ops->get_policy(vma, addr);
833 		else
834 			pol = vma->vm_policy;
835 	} else if (addr)
836 		return -EINVAL;
837 
838 	if (!pol)
839 		pol = &default_policy;	/* indicates default behavior */
840 
841 	if (flags & MPOL_F_NODE) {
842 		if (flags & MPOL_F_ADDR) {
843 			err = lookup_node(addr);
844 			if (err < 0)
845 				goto out;
846 			*policy = err;
847 		} else if (pol == current->mempolicy &&
848 				pol->mode == MPOL_INTERLEAVE) {
849 			*policy = next_node_in(current->il_prev, pol->v.nodes);
850 		} else {
851 			err = -EINVAL;
852 			goto out;
853 		}
854 	} else {
855 		*policy = pol == &default_policy ? MPOL_DEFAULT :
856 						pol->mode;
857 		/*
858 		 * Internal mempolicy flags must be masked off before exposing
859 		 * the policy to userspace.
860 		 */
861 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
862 	}
863 
864 	err = 0;
865 	if (nmask) {
866 		if (mpol_store_user_nodemask(pol)) {
867 			*nmask = pol->w.user_nodemask;
868 		} else {
869 			task_lock(current);
870 			get_policy_nodemask(pol, nmask);
871 			task_unlock(current);
872 		}
873 	}
874 
875  out:
876 	mpol_cond_put(pol);
877 	if (vma)
878 		up_read(&current->mm->mmap_sem);
879 	return err;
880 }
881 
882 #ifdef CONFIG_MIGRATION
883 /*
884  * page migration
885  */
886 static void migrate_page_add(struct page *page, struct list_head *pagelist,
887 				unsigned long flags)
888 {
889 	/*
890 	 * Avoid migrating a page that is shared with others.
891 	 */
892 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
893 		if (!isolate_lru_page(page)) {
894 			list_add_tail(&page->lru, pagelist);
895 			inc_node_page_state(page, NR_ISOLATED_ANON +
896 					    page_is_file_cache(page));
897 		}
898 	}
899 }
900 
901 static struct page *new_node_page(struct page *page, unsigned long node, int **x)
902 {
903 	if (PageHuge(page))
904 		return alloc_huge_page_node(page_hstate(compound_head(page)),
905 					node);
906 	else
907 		return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
908 						    __GFP_THISNODE, 0);
909 }
910 
911 /*
912  * Migrate pages from one node to a target node.
913  * Returns error or the number of pages not migrated.
914  */
915 static int migrate_to_node(struct mm_struct *mm, int source, int dest,
916 			   int flags)
917 {
918 	nodemask_t nmask;
919 	LIST_HEAD(pagelist);
920 	int err = 0;
921 
922 	nodes_clear(nmask);
923 	node_set(source, nmask);
924 
925 	/*
926 	 * This does not "check" the range but isolates all pages that
927 	 * need migration.  Between passing in the full user address
928 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
929 	 */
930 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
931 	queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
932 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
933 
934 	if (!list_empty(&pagelist)) {
935 		err = migrate_pages(&pagelist, new_node_page, NULL, dest,
936 					MIGRATE_SYNC, MR_SYSCALL);
937 		if (err)
938 			putback_movable_pages(&pagelist);
939 	}
940 
941 	return err;
942 }
943 
944 /*
945  * Move pages between the two nodesets so as to preserve the physical
946  * layout as much as possible.
947  *
948  * Returns the number of page that could not be moved.
949  */
950 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
951 		     const nodemask_t *to, int flags)
952 {
953 	int busy = 0;
954 	int err;
955 	nodemask_t tmp;
956 
957 	err = migrate_prep();
958 	if (err)
959 		return err;
960 
961 	down_read(&mm->mmap_sem);
962 
963 	/*
964 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
965 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
966 	 * bit in 'tmp', and return that <source, dest> pair for migration.
967 	 * The pair of nodemasks 'to' and 'from' define the map.
968 	 *
969 	 * If no pair of bits is found that way, fallback to picking some
970 	 * pair of 'source' and 'dest' bits that are not the same.  If the
971 	 * 'source' and 'dest' bits are the same, this represents a node
972 	 * that will be migrating to itself, so no pages need move.
973 	 *
974 	 * If no bits are left in 'tmp', or if all remaining bits left
975 	 * in 'tmp' correspond to the same bit in 'to', return false
976 	 * (nothing left to migrate).
977 	 *
978 	 * This lets us pick a pair of nodes to migrate between, such that
979 	 * if possible the dest node is not already occupied by some other
980 	 * source node, minimizing the risk of overloading the memory on a
981 	 * node that would happen if we migrated incoming memory to a node
982 	 * before migrating outgoing memory source that same node.
983 	 *
984 	 * A single scan of tmp is sufficient.  As we go, we remember the
985 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
986 	 * that not only moved, but what's better, moved to an empty slot
987 	 * (d is not set in tmp), then we break out then, with that pair.
988 	 * Otherwise when we finish scanning from_tmp, we at least have the
989 	 * most recent <s, d> pair that moved.  If we get all the way through
990 	 * the scan of tmp without finding any node that moved, much less
991 	 * moved to an empty node, then there is nothing left worth migrating.
992 	 */
993 
994 	tmp = *from;
995 	while (!nodes_empty(tmp)) {
996 		int s,d;
997 		int source = NUMA_NO_NODE;
998 		int dest = 0;
999 
1000 		for_each_node_mask(s, tmp) {
1001 
1002 			/*
1003 			 * do_migrate_pages() tries to maintain the relative
1004 			 * node relationship of the pages established between
1005 			 * threads and memory areas.
1006                          *
1007 			 * However if the number of source nodes is not equal to
1008 			 * the number of destination nodes we can not preserve
1009 			 * this node relative relationship.  In that case, skip
1010 			 * copying memory from a node that is in the destination
1011 			 * mask.
1012 			 *
1013 			 * Example: [2,3,4] -> [3,4,5] moves everything.
1014 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1015 			 */
1016 
1017 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
1018 						(node_isset(s, *to)))
1019 				continue;
1020 
1021 			d = node_remap(s, *from, *to);
1022 			if (s == d)
1023 				continue;
1024 
1025 			source = s;	/* Node moved. Memorize */
1026 			dest = d;
1027 
1028 			/* dest not in remaining from nodes? */
1029 			if (!node_isset(dest, tmp))
1030 				break;
1031 		}
1032 		if (source == NUMA_NO_NODE)
1033 			break;
1034 
1035 		node_clear(source, tmp);
1036 		err = migrate_to_node(mm, source, dest, flags);
1037 		if (err > 0)
1038 			busy += err;
1039 		if (err < 0)
1040 			break;
1041 	}
1042 	up_read(&mm->mmap_sem);
1043 	if (err < 0)
1044 		return err;
1045 	return busy;
1046 
1047 }
1048 
1049 /*
1050  * Allocate a new page for page migration based on vma policy.
1051  * Start by assuming the page is mapped by the same vma as contains @start.
1052  * Search forward from there, if not.  N.B., this assumes that the
1053  * list of pages handed to migrate_pages()--which is how we get here--
1054  * is in virtual address order.
1055  */
1056 static struct page *new_page(struct page *page, unsigned long start, int **x)
1057 {
1058 	struct vm_area_struct *vma;
1059 	unsigned long uninitialized_var(address);
1060 
1061 	vma = find_vma(current->mm, start);
1062 	while (vma) {
1063 		address = page_address_in_vma(page, vma);
1064 		if (address != -EFAULT)
1065 			break;
1066 		vma = vma->vm_next;
1067 	}
1068 
1069 	if (PageHuge(page)) {
1070 		BUG_ON(!vma);
1071 		return alloc_huge_page_noerr(vma, address, 1);
1072 	}
1073 	/*
1074 	 * if !vma, alloc_page_vma() will use task or system default policy
1075 	 */
1076 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
1077 			vma, address);
1078 }
1079 #else
1080 
1081 static void migrate_page_add(struct page *page, struct list_head *pagelist,
1082 				unsigned long flags)
1083 {
1084 }
1085 
1086 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1087 		     const nodemask_t *to, int flags)
1088 {
1089 	return -ENOSYS;
1090 }
1091 
1092 static struct page *new_page(struct page *page, unsigned long start, int **x)
1093 {
1094 	return NULL;
1095 }
1096 #endif
1097 
1098 static long do_mbind(unsigned long start, unsigned long len,
1099 		     unsigned short mode, unsigned short mode_flags,
1100 		     nodemask_t *nmask, unsigned long flags)
1101 {
1102 	struct mm_struct *mm = current->mm;
1103 	struct mempolicy *new;
1104 	unsigned long end;
1105 	int err;
1106 	LIST_HEAD(pagelist);
1107 
1108 	if (flags & ~(unsigned long)MPOL_MF_VALID)
1109 		return -EINVAL;
1110 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1111 		return -EPERM;
1112 
1113 	if (start & ~PAGE_MASK)
1114 		return -EINVAL;
1115 
1116 	if (mode == MPOL_DEFAULT)
1117 		flags &= ~MPOL_MF_STRICT;
1118 
1119 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1120 	end = start + len;
1121 
1122 	if (end < start)
1123 		return -EINVAL;
1124 	if (end == start)
1125 		return 0;
1126 
1127 	new = mpol_new(mode, mode_flags, nmask);
1128 	if (IS_ERR(new))
1129 		return PTR_ERR(new);
1130 
1131 	if (flags & MPOL_MF_LAZY)
1132 		new->flags |= MPOL_F_MOF;
1133 
1134 	/*
1135 	 * If we are using the default policy then operation
1136 	 * on discontinuous address spaces is okay after all
1137 	 */
1138 	if (!new)
1139 		flags |= MPOL_MF_DISCONTIG_OK;
1140 
1141 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1142 		 start, start + len, mode, mode_flags,
1143 		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
1144 
1145 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1146 
1147 		err = migrate_prep();
1148 		if (err)
1149 			goto mpol_out;
1150 	}
1151 	{
1152 		NODEMASK_SCRATCH(scratch);
1153 		if (scratch) {
1154 			down_write(&mm->mmap_sem);
1155 			task_lock(current);
1156 			err = mpol_set_nodemask(new, nmask, scratch);
1157 			task_unlock(current);
1158 			if (err)
1159 				up_write(&mm->mmap_sem);
1160 		} else
1161 			err = -ENOMEM;
1162 		NODEMASK_SCRATCH_FREE(scratch);
1163 	}
1164 	if (err)
1165 		goto mpol_out;
1166 
1167 	err = queue_pages_range(mm, start, end, nmask,
1168 			  flags | MPOL_MF_INVERT, &pagelist);
1169 	if (!err)
1170 		err = mbind_range(mm, start, end, new);
1171 
1172 	if (!err) {
1173 		int nr_failed = 0;
1174 
1175 		if (!list_empty(&pagelist)) {
1176 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1177 			nr_failed = migrate_pages(&pagelist, new_page, NULL,
1178 				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1179 			if (nr_failed)
1180 				putback_movable_pages(&pagelist);
1181 		}
1182 
1183 		if (nr_failed && (flags & MPOL_MF_STRICT))
1184 			err = -EIO;
1185 	} else
1186 		putback_movable_pages(&pagelist);
1187 
1188 	up_write(&mm->mmap_sem);
1189  mpol_out:
1190 	mpol_put(new);
1191 	return err;
1192 }
1193 
1194 /*
1195  * User space interface with variable sized bitmaps for nodelists.
1196  */
1197 
1198 /* Copy a node mask from user space. */
1199 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1200 		     unsigned long maxnode)
1201 {
1202 	unsigned long k;
1203 	unsigned long nlongs;
1204 	unsigned long endmask;
1205 
1206 	--maxnode;
1207 	nodes_clear(*nodes);
1208 	if (maxnode == 0 || !nmask)
1209 		return 0;
1210 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1211 		return -EINVAL;
1212 
1213 	nlongs = BITS_TO_LONGS(maxnode);
1214 	if ((maxnode % BITS_PER_LONG) == 0)
1215 		endmask = ~0UL;
1216 	else
1217 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1218 
1219 	/* When the user specified more nodes than supported just check
1220 	   if the non supported part is all zero. */
1221 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1222 		if (nlongs > PAGE_SIZE/sizeof(long))
1223 			return -EINVAL;
1224 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1225 			unsigned long t;
1226 			if (get_user(t, nmask + k))
1227 				return -EFAULT;
1228 			if (k == nlongs - 1) {
1229 				if (t & endmask)
1230 					return -EINVAL;
1231 			} else if (t)
1232 				return -EINVAL;
1233 		}
1234 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1235 		endmask = ~0UL;
1236 	}
1237 
1238 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1239 		return -EFAULT;
1240 	nodes_addr(*nodes)[nlongs-1] &= endmask;
1241 	return 0;
1242 }
1243 
1244 /* Copy a kernel node mask to user space */
1245 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1246 			      nodemask_t *nodes)
1247 {
1248 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1249 	const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1250 
1251 	if (copy > nbytes) {
1252 		if (copy > PAGE_SIZE)
1253 			return -EINVAL;
1254 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1255 			return -EFAULT;
1256 		copy = nbytes;
1257 	}
1258 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1259 }
1260 
1261 SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1262 		unsigned long, mode, const unsigned long __user *, nmask,
1263 		unsigned long, maxnode, unsigned, flags)
1264 {
1265 	nodemask_t nodes;
1266 	int err;
1267 	unsigned short mode_flags;
1268 
1269 	mode_flags = mode & MPOL_MODE_FLAGS;
1270 	mode &= ~MPOL_MODE_FLAGS;
1271 	if (mode >= MPOL_MAX)
1272 		return -EINVAL;
1273 	if ((mode_flags & MPOL_F_STATIC_NODES) &&
1274 	    (mode_flags & MPOL_F_RELATIVE_NODES))
1275 		return -EINVAL;
1276 	err = get_nodes(&nodes, nmask, maxnode);
1277 	if (err)
1278 		return err;
1279 	return do_mbind(start, len, mode, mode_flags, &nodes, flags);
1280 }
1281 
1282 /* Set the process memory policy */
1283 SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1284 		unsigned long, maxnode)
1285 {
1286 	int err;
1287 	nodemask_t nodes;
1288 	unsigned short flags;
1289 
1290 	flags = mode & MPOL_MODE_FLAGS;
1291 	mode &= ~MPOL_MODE_FLAGS;
1292 	if ((unsigned int)mode >= MPOL_MAX)
1293 		return -EINVAL;
1294 	if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1295 		return -EINVAL;
1296 	err = get_nodes(&nodes, nmask, maxnode);
1297 	if (err)
1298 		return err;
1299 	return do_set_mempolicy(mode, flags, &nodes);
1300 }
1301 
1302 SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1303 		const unsigned long __user *, old_nodes,
1304 		const unsigned long __user *, new_nodes)
1305 {
1306 	const struct cred *cred = current_cred(), *tcred;
1307 	struct mm_struct *mm = NULL;
1308 	struct task_struct *task;
1309 	nodemask_t task_nodes;
1310 	int err;
1311 	nodemask_t *old;
1312 	nodemask_t *new;
1313 	NODEMASK_SCRATCH(scratch);
1314 
1315 	if (!scratch)
1316 		return -ENOMEM;
1317 
1318 	old = &scratch->mask1;
1319 	new = &scratch->mask2;
1320 
1321 	err = get_nodes(old, old_nodes, maxnode);
1322 	if (err)
1323 		goto out;
1324 
1325 	err = get_nodes(new, new_nodes, maxnode);
1326 	if (err)
1327 		goto out;
1328 
1329 	/* Find the mm_struct */
1330 	rcu_read_lock();
1331 	task = pid ? find_task_by_vpid(pid) : current;
1332 	if (!task) {
1333 		rcu_read_unlock();
1334 		err = -ESRCH;
1335 		goto out;
1336 	}
1337 	get_task_struct(task);
1338 
1339 	err = -EINVAL;
1340 
1341 	/*
1342 	 * Check if this process has the right to modify the specified
1343 	 * process. The right exists if the process has administrative
1344 	 * capabilities, superuser privileges or the same
1345 	 * userid as the target process.
1346 	 */
1347 	tcred = __task_cred(task);
1348 	if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1349 	    !uid_eq(cred->uid,  tcred->suid) && !uid_eq(cred->uid,  tcred->uid) &&
1350 	    !capable(CAP_SYS_NICE)) {
1351 		rcu_read_unlock();
1352 		err = -EPERM;
1353 		goto out_put;
1354 	}
1355 	rcu_read_unlock();
1356 
1357 	task_nodes = cpuset_mems_allowed(task);
1358 	/* Is the user allowed to access the target nodes? */
1359 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1360 		err = -EPERM;
1361 		goto out_put;
1362 	}
1363 
1364 	if (!nodes_subset(*new, node_states[N_MEMORY])) {
1365 		err = -EINVAL;
1366 		goto out_put;
1367 	}
1368 
1369 	err = security_task_movememory(task);
1370 	if (err)
1371 		goto out_put;
1372 
1373 	mm = get_task_mm(task);
1374 	put_task_struct(task);
1375 
1376 	if (!mm) {
1377 		err = -EINVAL;
1378 		goto out;
1379 	}
1380 
1381 	err = do_migrate_pages(mm, old, new,
1382 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1383 
1384 	mmput(mm);
1385 out:
1386 	NODEMASK_SCRATCH_FREE(scratch);
1387 
1388 	return err;
1389 
1390 out_put:
1391 	put_task_struct(task);
1392 	goto out;
1393 
1394 }
1395 
1396 
1397 /* Retrieve NUMA policy */
1398 SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1399 		unsigned long __user *, nmask, unsigned long, maxnode,
1400 		unsigned long, addr, unsigned long, flags)
1401 {
1402 	int err;
1403 	int uninitialized_var(pval);
1404 	nodemask_t nodes;
1405 
1406 	if (nmask != NULL && maxnode < MAX_NUMNODES)
1407 		return -EINVAL;
1408 
1409 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
1410 
1411 	if (err)
1412 		return err;
1413 
1414 	if (policy && put_user(pval, policy))
1415 		return -EFAULT;
1416 
1417 	if (nmask)
1418 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
1419 
1420 	return err;
1421 }
1422 
1423 #ifdef CONFIG_COMPAT
1424 
1425 COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1426 		       compat_ulong_t __user *, nmask,
1427 		       compat_ulong_t, maxnode,
1428 		       compat_ulong_t, addr, compat_ulong_t, flags)
1429 {
1430 	long err;
1431 	unsigned long __user *nm = NULL;
1432 	unsigned long nr_bits, alloc_size;
1433 	DECLARE_BITMAP(bm, MAX_NUMNODES);
1434 
1435 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1436 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1437 
1438 	if (nmask)
1439 		nm = compat_alloc_user_space(alloc_size);
1440 
1441 	err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1442 
1443 	if (!err && nmask) {
1444 		unsigned long copy_size;
1445 		copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1446 		err = copy_from_user(bm, nm, copy_size);
1447 		/* ensure entire bitmap is zeroed */
1448 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1449 		err |= compat_put_bitmap(nmask, bm, nr_bits);
1450 	}
1451 
1452 	return err;
1453 }
1454 
1455 COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1456 		       compat_ulong_t, maxnode)
1457 {
1458 	unsigned long __user *nm = NULL;
1459 	unsigned long nr_bits, alloc_size;
1460 	DECLARE_BITMAP(bm, MAX_NUMNODES);
1461 
1462 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1463 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1464 
1465 	if (nmask) {
1466 		if (compat_get_bitmap(bm, nmask, nr_bits))
1467 			return -EFAULT;
1468 		nm = compat_alloc_user_space(alloc_size);
1469 		if (copy_to_user(nm, bm, alloc_size))
1470 			return -EFAULT;
1471 	}
1472 
1473 	return sys_set_mempolicy(mode, nm, nr_bits+1);
1474 }
1475 
1476 COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1477 		       compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1478 		       compat_ulong_t, maxnode, compat_ulong_t, flags)
1479 {
1480 	unsigned long __user *nm = NULL;
1481 	unsigned long nr_bits, alloc_size;
1482 	nodemask_t bm;
1483 
1484 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1485 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1486 
1487 	if (nmask) {
1488 		if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
1489 			return -EFAULT;
1490 		nm = compat_alloc_user_space(alloc_size);
1491 		if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1492 			return -EFAULT;
1493 	}
1494 
1495 	return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1496 }
1497 
1498 #endif
1499 
1500 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1501 						unsigned long addr)
1502 {
1503 	struct mempolicy *pol = NULL;
1504 
1505 	if (vma) {
1506 		if (vma->vm_ops && vma->vm_ops->get_policy) {
1507 			pol = vma->vm_ops->get_policy(vma, addr);
1508 		} else if (vma->vm_policy) {
1509 			pol = vma->vm_policy;
1510 
1511 			/*
1512 			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1513 			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1514 			 * count on these policies which will be dropped by
1515 			 * mpol_cond_put() later
1516 			 */
1517 			if (mpol_needs_cond_ref(pol))
1518 				mpol_get(pol);
1519 		}
1520 	}
1521 
1522 	return pol;
1523 }
1524 
1525 /*
1526  * get_vma_policy(@vma, @addr)
1527  * @vma: virtual memory area whose policy is sought
1528  * @addr: address in @vma for shared policy lookup
1529  *
1530  * Returns effective policy for a VMA at specified address.
1531  * Falls back to current->mempolicy or system default policy, as necessary.
1532  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1533  * count--added by the get_policy() vm_op, as appropriate--to protect against
1534  * freeing by another task.  It is the caller's responsibility to free the
1535  * extra reference for shared policies.
1536  */
1537 static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1538 						unsigned long addr)
1539 {
1540 	struct mempolicy *pol = __get_vma_policy(vma, addr);
1541 
1542 	if (!pol)
1543 		pol = get_task_policy(current);
1544 
1545 	return pol;
1546 }
1547 
1548 bool vma_policy_mof(struct vm_area_struct *vma)
1549 {
1550 	struct mempolicy *pol;
1551 
1552 	if (vma->vm_ops && vma->vm_ops->get_policy) {
1553 		bool ret = false;
1554 
1555 		pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1556 		if (pol && (pol->flags & MPOL_F_MOF))
1557 			ret = true;
1558 		mpol_cond_put(pol);
1559 
1560 		return ret;
1561 	}
1562 
1563 	pol = vma->vm_policy;
1564 	if (!pol)
1565 		pol = get_task_policy(current);
1566 
1567 	return pol->flags & MPOL_F_MOF;
1568 }
1569 
1570 static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1571 {
1572 	enum zone_type dynamic_policy_zone = policy_zone;
1573 
1574 	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1575 
1576 	/*
1577 	 * if policy->v.nodes has movable memory only,
1578 	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1579 	 *
1580 	 * policy->v.nodes is intersect with node_states[N_MEMORY].
1581 	 * so if the following test faile, it implies
1582 	 * policy->v.nodes has movable memory only.
1583 	 */
1584 	if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1585 		dynamic_policy_zone = ZONE_MOVABLE;
1586 
1587 	return zone >= dynamic_policy_zone;
1588 }
1589 
1590 /*
1591  * Return a nodemask representing a mempolicy for filtering nodes for
1592  * page allocation
1593  */
1594 static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1595 {
1596 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
1597 	if (unlikely(policy->mode == MPOL_BIND) &&
1598 			apply_policy_zone(policy, gfp_zone(gfp)) &&
1599 			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1600 		return &policy->v.nodes;
1601 
1602 	return NULL;
1603 }
1604 
1605 /* Return the node id preferred by the given mempolicy, or the given id */
1606 static int policy_node(gfp_t gfp, struct mempolicy *policy,
1607 								int nd)
1608 {
1609 	if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
1610 		nd = policy->v.preferred_node;
1611 	else {
1612 		/*
1613 		 * __GFP_THISNODE shouldn't even be used with the bind policy
1614 		 * because we might easily break the expectation to stay on the
1615 		 * requested node and not break the policy.
1616 		 */
1617 		WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
1618 	}
1619 
1620 	return nd;
1621 }
1622 
1623 /* Do dynamic interleaving for a process */
1624 static unsigned interleave_nodes(struct mempolicy *policy)
1625 {
1626 	unsigned next;
1627 	struct task_struct *me = current;
1628 
1629 	next = next_node_in(me->il_prev, policy->v.nodes);
1630 	if (next < MAX_NUMNODES)
1631 		me->il_prev = next;
1632 	return next;
1633 }
1634 
1635 /*
1636  * Depending on the memory policy provide a node from which to allocate the
1637  * next slab entry.
1638  */
1639 unsigned int mempolicy_slab_node(void)
1640 {
1641 	struct mempolicy *policy;
1642 	int node = numa_mem_id();
1643 
1644 	if (in_interrupt())
1645 		return node;
1646 
1647 	policy = current->mempolicy;
1648 	if (!policy || policy->flags & MPOL_F_LOCAL)
1649 		return node;
1650 
1651 	switch (policy->mode) {
1652 	case MPOL_PREFERRED:
1653 		/*
1654 		 * handled MPOL_F_LOCAL above
1655 		 */
1656 		return policy->v.preferred_node;
1657 
1658 	case MPOL_INTERLEAVE:
1659 		return interleave_nodes(policy);
1660 
1661 	case MPOL_BIND: {
1662 		struct zoneref *z;
1663 
1664 		/*
1665 		 * Follow bind policy behavior and start allocation at the
1666 		 * first node.
1667 		 */
1668 		struct zonelist *zonelist;
1669 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1670 		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1671 		z = first_zones_zonelist(zonelist, highest_zoneidx,
1672 							&policy->v.nodes);
1673 		return z->zone ? z->zone->node : node;
1674 	}
1675 
1676 	default:
1677 		BUG();
1678 	}
1679 }
1680 
1681 /*
1682  * Do static interleaving for a VMA with known offset @n.  Returns the n'th
1683  * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
1684  * number of present nodes.
1685  */
1686 static unsigned offset_il_node(struct mempolicy *pol,
1687 			       struct vm_area_struct *vma, unsigned long n)
1688 {
1689 	unsigned nnodes = nodes_weight(pol->v.nodes);
1690 	unsigned target;
1691 	int i;
1692 	int nid;
1693 
1694 	if (!nnodes)
1695 		return numa_node_id();
1696 	target = (unsigned int)n % nnodes;
1697 	nid = first_node(pol->v.nodes);
1698 	for (i = 0; i < target; i++)
1699 		nid = next_node(nid, pol->v.nodes);
1700 	return nid;
1701 }
1702 
1703 /* Determine a node number for interleave */
1704 static inline unsigned interleave_nid(struct mempolicy *pol,
1705 		 struct vm_area_struct *vma, unsigned long addr, int shift)
1706 {
1707 	if (vma) {
1708 		unsigned long off;
1709 
1710 		/*
1711 		 * for small pages, there is no difference between
1712 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
1713 		 * for huge pages, since vm_pgoff is in units of small
1714 		 * pages, we need to shift off the always 0 bits to get
1715 		 * a useful offset.
1716 		 */
1717 		BUG_ON(shift < PAGE_SHIFT);
1718 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1719 		off += (addr - vma->vm_start) >> shift;
1720 		return offset_il_node(pol, vma, off);
1721 	} else
1722 		return interleave_nodes(pol);
1723 }
1724 
1725 #ifdef CONFIG_HUGETLBFS
1726 /*
1727  * huge_node(@vma, @addr, @gfp_flags, @mpol)
1728  * @vma: virtual memory area whose policy is sought
1729  * @addr: address in @vma for shared policy lookup and interleave policy
1730  * @gfp_flags: for requested zone
1731  * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1732  * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
1733  *
1734  * Returns a nid suitable for a huge page allocation and a pointer
1735  * to the struct mempolicy for conditional unref after allocation.
1736  * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1737  * @nodemask for filtering the zonelist.
1738  *
1739  * Must be protected by read_mems_allowed_begin()
1740  */
1741 int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
1742 				struct mempolicy **mpol, nodemask_t **nodemask)
1743 {
1744 	int nid;
1745 
1746 	*mpol = get_vma_policy(vma, addr);
1747 	*nodemask = NULL;	/* assume !MPOL_BIND */
1748 
1749 	if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1750 		nid = interleave_nid(*mpol, vma, addr,
1751 					huge_page_shift(hstate_vma(vma)));
1752 	} else {
1753 		nid = policy_node(gfp_flags, *mpol, numa_node_id());
1754 		if ((*mpol)->mode == MPOL_BIND)
1755 			*nodemask = &(*mpol)->v.nodes;
1756 	}
1757 	return nid;
1758 }
1759 
1760 /*
1761  * init_nodemask_of_mempolicy
1762  *
1763  * If the current task's mempolicy is "default" [NULL], return 'false'
1764  * to indicate default policy.  Otherwise, extract the policy nodemask
1765  * for 'bind' or 'interleave' policy into the argument nodemask, or
1766  * initialize the argument nodemask to contain the single node for
1767  * 'preferred' or 'local' policy and return 'true' to indicate presence
1768  * of non-default mempolicy.
1769  *
1770  * We don't bother with reference counting the mempolicy [mpol_get/put]
1771  * because the current task is examining it's own mempolicy and a task's
1772  * mempolicy is only ever changed by the task itself.
1773  *
1774  * N.B., it is the caller's responsibility to free a returned nodemask.
1775  */
1776 bool init_nodemask_of_mempolicy(nodemask_t *mask)
1777 {
1778 	struct mempolicy *mempolicy;
1779 	int nid;
1780 
1781 	if (!(mask && current->mempolicy))
1782 		return false;
1783 
1784 	task_lock(current);
1785 	mempolicy = current->mempolicy;
1786 	switch (mempolicy->mode) {
1787 	case MPOL_PREFERRED:
1788 		if (mempolicy->flags & MPOL_F_LOCAL)
1789 			nid = numa_node_id();
1790 		else
1791 			nid = mempolicy->v.preferred_node;
1792 		init_nodemask_of_node(mask, nid);
1793 		break;
1794 
1795 	case MPOL_BIND:
1796 		/* Fall through */
1797 	case MPOL_INTERLEAVE:
1798 		*mask =  mempolicy->v.nodes;
1799 		break;
1800 
1801 	default:
1802 		BUG();
1803 	}
1804 	task_unlock(current);
1805 
1806 	return true;
1807 }
1808 #endif
1809 
1810 /*
1811  * mempolicy_nodemask_intersects
1812  *
1813  * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1814  * policy.  Otherwise, check for intersection between mask and the policy
1815  * nodemask for 'bind' or 'interleave' policy.  For 'perferred' or 'local'
1816  * policy, always return true since it may allocate elsewhere on fallback.
1817  *
1818  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1819  */
1820 bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1821 					const nodemask_t *mask)
1822 {
1823 	struct mempolicy *mempolicy;
1824 	bool ret = true;
1825 
1826 	if (!mask)
1827 		return ret;
1828 	task_lock(tsk);
1829 	mempolicy = tsk->mempolicy;
1830 	if (!mempolicy)
1831 		goto out;
1832 
1833 	switch (mempolicy->mode) {
1834 	case MPOL_PREFERRED:
1835 		/*
1836 		 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
1837 		 * allocate from, they may fallback to other nodes when oom.
1838 		 * Thus, it's possible for tsk to have allocated memory from
1839 		 * nodes in mask.
1840 		 */
1841 		break;
1842 	case MPOL_BIND:
1843 	case MPOL_INTERLEAVE:
1844 		ret = nodes_intersects(mempolicy->v.nodes, *mask);
1845 		break;
1846 	default:
1847 		BUG();
1848 	}
1849 out:
1850 	task_unlock(tsk);
1851 	return ret;
1852 }
1853 
1854 /* Allocate a page in interleaved policy.
1855    Own path because it needs to do special accounting. */
1856 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1857 					unsigned nid)
1858 {
1859 	struct page *page;
1860 
1861 	page = __alloc_pages(gfp, order, nid);
1862 	if (page && page_to_nid(page) == nid)
1863 		inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1864 	return page;
1865 }
1866 
1867 /**
1868  * 	alloc_pages_vma	- Allocate a page for a VMA.
1869  *
1870  * 	@gfp:
1871  *      %GFP_USER    user allocation.
1872  *      %GFP_KERNEL  kernel allocations,
1873  *      %GFP_HIGHMEM highmem/user allocations,
1874  *      %GFP_FS      allocation should not call back into a file system.
1875  *      %GFP_ATOMIC  don't sleep.
1876  *
1877  *	@order:Order of the GFP allocation.
1878  * 	@vma:  Pointer to VMA or NULL if not available.
1879  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
1880  *	@node: Which node to prefer for allocation (modulo policy).
1881  *	@hugepage: for hugepages try only the preferred node if possible
1882  *
1883  * 	This function allocates a page from the kernel page pool and applies
1884  *	a NUMA policy associated with the VMA or the current process.
1885  *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
1886  *	mm_struct of the VMA to prevent it from going away. Should be used for
1887  *	all allocations for pages that will be mapped into user space. Returns
1888  *	NULL when no page can be allocated.
1889  */
1890 struct page *
1891 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
1892 		unsigned long addr, int node, bool hugepage)
1893 {
1894 	struct mempolicy *pol;
1895 	struct page *page;
1896 	int preferred_nid;
1897 	nodemask_t *nmask;
1898 
1899 	pol = get_vma_policy(vma, addr);
1900 
1901 	if (pol->mode == MPOL_INTERLEAVE) {
1902 		unsigned nid;
1903 
1904 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
1905 		mpol_cond_put(pol);
1906 		page = alloc_page_interleave(gfp, order, nid);
1907 		goto out;
1908 	}
1909 
1910 	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
1911 		int hpage_node = node;
1912 
1913 		/*
1914 		 * For hugepage allocation and non-interleave policy which
1915 		 * allows the current node (or other explicitly preferred
1916 		 * node) we only try to allocate from the current/preferred
1917 		 * node and don't fall back to other nodes, as the cost of
1918 		 * remote accesses would likely offset THP benefits.
1919 		 *
1920 		 * If the policy is interleave, or does not allow the current
1921 		 * node in its nodemask, we allocate the standard way.
1922 		 */
1923 		if (pol->mode == MPOL_PREFERRED &&
1924 						!(pol->flags & MPOL_F_LOCAL))
1925 			hpage_node = pol->v.preferred_node;
1926 
1927 		nmask = policy_nodemask(gfp, pol);
1928 		if (!nmask || node_isset(hpage_node, *nmask)) {
1929 			mpol_cond_put(pol);
1930 			page = __alloc_pages_node(hpage_node,
1931 						gfp | __GFP_THISNODE, order);
1932 			goto out;
1933 		}
1934 	}
1935 
1936 	nmask = policy_nodemask(gfp, pol);
1937 	preferred_nid = policy_node(gfp, pol, node);
1938 	page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
1939 	mpol_cond_put(pol);
1940 out:
1941 	return page;
1942 }
1943 
1944 /**
1945  * 	alloc_pages_current - Allocate pages.
1946  *
1947  *	@gfp:
1948  *		%GFP_USER   user allocation,
1949  *      	%GFP_KERNEL kernel allocation,
1950  *      	%GFP_HIGHMEM highmem allocation,
1951  *      	%GFP_FS     don't call back into a file system.
1952  *      	%GFP_ATOMIC don't sleep.
1953  *	@order: Power of two of allocation size in pages. 0 is a single page.
1954  *
1955  *	Allocate a page from the kernel page pool.  When not in
1956  *	interrupt context and apply the current process NUMA policy.
1957  *	Returns NULL when no page can be allocated.
1958  */
1959 struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1960 {
1961 	struct mempolicy *pol = &default_policy;
1962 	struct page *page;
1963 
1964 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
1965 		pol = get_task_policy(current);
1966 
1967 	/*
1968 	 * No reference counting needed for current->mempolicy
1969 	 * nor system default_policy
1970 	 */
1971 	if (pol->mode == MPOL_INTERLEAVE)
1972 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
1973 	else
1974 		page = __alloc_pages_nodemask(gfp, order,
1975 				policy_node(gfp, pol, numa_node_id()),
1976 				policy_nodemask(gfp, pol));
1977 
1978 	return page;
1979 }
1980 EXPORT_SYMBOL(alloc_pages_current);
1981 
1982 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
1983 {
1984 	struct mempolicy *pol = mpol_dup(vma_policy(src));
1985 
1986 	if (IS_ERR(pol))
1987 		return PTR_ERR(pol);
1988 	dst->vm_policy = pol;
1989 	return 0;
1990 }
1991 
1992 /*
1993  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
1994  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1995  * with the mems_allowed returned by cpuset_mems_allowed().  This
1996  * keeps mempolicies cpuset relative after its cpuset moves.  See
1997  * further kernel/cpuset.c update_nodemask().
1998  *
1999  * current's mempolicy may be rebinded by the other task(the task that changes
2000  * cpuset's mems), so we needn't do rebind work for current task.
2001  */
2002 
2003 /* Slow path of a mempolicy duplicate */
2004 struct mempolicy *__mpol_dup(struct mempolicy *old)
2005 {
2006 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2007 
2008 	if (!new)
2009 		return ERR_PTR(-ENOMEM);
2010 
2011 	/* task's mempolicy is protected by alloc_lock */
2012 	if (old == current->mempolicy) {
2013 		task_lock(current);
2014 		*new = *old;
2015 		task_unlock(current);
2016 	} else
2017 		*new = *old;
2018 
2019 	if (current_cpuset_is_being_rebound()) {
2020 		nodemask_t mems = cpuset_mems_allowed(current);
2021 		mpol_rebind_policy(new, &mems);
2022 	}
2023 	atomic_set(&new->refcnt, 1);
2024 	return new;
2025 }
2026 
2027 /* Slow path of a mempolicy comparison */
2028 bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2029 {
2030 	if (!a || !b)
2031 		return false;
2032 	if (a->mode != b->mode)
2033 		return false;
2034 	if (a->flags != b->flags)
2035 		return false;
2036 	if (mpol_store_user_nodemask(a))
2037 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2038 			return false;
2039 
2040 	switch (a->mode) {
2041 	case MPOL_BIND:
2042 		/* Fall through */
2043 	case MPOL_INTERLEAVE:
2044 		return !!nodes_equal(a->v.nodes, b->v.nodes);
2045 	case MPOL_PREFERRED:
2046 		return a->v.preferred_node == b->v.preferred_node;
2047 	default:
2048 		BUG();
2049 		return false;
2050 	}
2051 }
2052 
2053 /*
2054  * Shared memory backing store policy support.
2055  *
2056  * Remember policies even when nobody has shared memory mapped.
2057  * The policies are kept in Red-Black tree linked from the inode.
2058  * They are protected by the sp->lock rwlock, which should be held
2059  * for any accesses to the tree.
2060  */
2061 
2062 /*
2063  * lookup first element intersecting start-end.  Caller holds sp->lock for
2064  * reading or for writing
2065  */
2066 static struct sp_node *
2067 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2068 {
2069 	struct rb_node *n = sp->root.rb_node;
2070 
2071 	while (n) {
2072 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
2073 
2074 		if (start >= p->end)
2075 			n = n->rb_right;
2076 		else if (end <= p->start)
2077 			n = n->rb_left;
2078 		else
2079 			break;
2080 	}
2081 	if (!n)
2082 		return NULL;
2083 	for (;;) {
2084 		struct sp_node *w = NULL;
2085 		struct rb_node *prev = rb_prev(n);
2086 		if (!prev)
2087 			break;
2088 		w = rb_entry(prev, struct sp_node, nd);
2089 		if (w->end <= start)
2090 			break;
2091 		n = prev;
2092 	}
2093 	return rb_entry(n, struct sp_node, nd);
2094 }
2095 
2096 /*
2097  * Insert a new shared policy into the list.  Caller holds sp->lock for
2098  * writing.
2099  */
2100 static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2101 {
2102 	struct rb_node **p = &sp->root.rb_node;
2103 	struct rb_node *parent = NULL;
2104 	struct sp_node *nd;
2105 
2106 	while (*p) {
2107 		parent = *p;
2108 		nd = rb_entry(parent, struct sp_node, nd);
2109 		if (new->start < nd->start)
2110 			p = &(*p)->rb_left;
2111 		else if (new->end > nd->end)
2112 			p = &(*p)->rb_right;
2113 		else
2114 			BUG();
2115 	}
2116 	rb_link_node(&new->nd, parent, p);
2117 	rb_insert_color(&new->nd, &sp->root);
2118 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2119 		 new->policy ? new->policy->mode : 0);
2120 }
2121 
2122 /* Find shared policy intersecting idx */
2123 struct mempolicy *
2124 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2125 {
2126 	struct mempolicy *pol = NULL;
2127 	struct sp_node *sn;
2128 
2129 	if (!sp->root.rb_node)
2130 		return NULL;
2131 	read_lock(&sp->lock);
2132 	sn = sp_lookup(sp, idx, idx+1);
2133 	if (sn) {
2134 		mpol_get(sn->policy);
2135 		pol = sn->policy;
2136 	}
2137 	read_unlock(&sp->lock);
2138 	return pol;
2139 }
2140 
2141 static void sp_free(struct sp_node *n)
2142 {
2143 	mpol_put(n->policy);
2144 	kmem_cache_free(sn_cache, n);
2145 }
2146 
2147 /**
2148  * mpol_misplaced - check whether current page node is valid in policy
2149  *
2150  * @page: page to be checked
2151  * @vma: vm area where page mapped
2152  * @addr: virtual address where page mapped
2153  *
2154  * Lookup current policy node id for vma,addr and "compare to" page's
2155  * node id.
2156  *
2157  * Returns:
2158  *	-1	- not misplaced, page is in the right node
2159  *	node	- node id where the page should be
2160  *
2161  * Policy determination "mimics" alloc_page_vma().
2162  * Called from fault path where we know the vma and faulting address.
2163  */
2164 int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2165 {
2166 	struct mempolicy *pol;
2167 	struct zoneref *z;
2168 	int curnid = page_to_nid(page);
2169 	unsigned long pgoff;
2170 	int thiscpu = raw_smp_processor_id();
2171 	int thisnid = cpu_to_node(thiscpu);
2172 	int polnid = -1;
2173 	int ret = -1;
2174 
2175 	BUG_ON(!vma);
2176 
2177 	pol = get_vma_policy(vma, addr);
2178 	if (!(pol->flags & MPOL_F_MOF))
2179 		goto out;
2180 
2181 	switch (pol->mode) {
2182 	case MPOL_INTERLEAVE:
2183 		BUG_ON(addr >= vma->vm_end);
2184 		BUG_ON(addr < vma->vm_start);
2185 
2186 		pgoff = vma->vm_pgoff;
2187 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2188 		polnid = offset_il_node(pol, vma, pgoff);
2189 		break;
2190 
2191 	case MPOL_PREFERRED:
2192 		if (pol->flags & MPOL_F_LOCAL)
2193 			polnid = numa_node_id();
2194 		else
2195 			polnid = pol->v.preferred_node;
2196 		break;
2197 
2198 	case MPOL_BIND:
2199 
2200 		/*
2201 		 * allows binding to multiple nodes.
2202 		 * use current page if in policy nodemask,
2203 		 * else select nearest allowed node, if any.
2204 		 * If no allowed nodes, use current [!misplaced].
2205 		 */
2206 		if (node_isset(curnid, pol->v.nodes))
2207 			goto out;
2208 		z = first_zones_zonelist(
2209 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2210 				gfp_zone(GFP_HIGHUSER),
2211 				&pol->v.nodes);
2212 		polnid = z->zone->node;
2213 		break;
2214 
2215 	default:
2216 		BUG();
2217 	}
2218 
2219 	/* Migrate the page towards the node whose CPU is referencing it */
2220 	if (pol->flags & MPOL_F_MORON) {
2221 		polnid = thisnid;
2222 
2223 		if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2224 			goto out;
2225 	}
2226 
2227 	if (curnid != polnid)
2228 		ret = polnid;
2229 out:
2230 	mpol_cond_put(pol);
2231 
2232 	return ret;
2233 }
2234 
2235 /*
2236  * Drop the (possibly final) reference to task->mempolicy.  It needs to be
2237  * dropped after task->mempolicy is set to NULL so that any allocation done as
2238  * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2239  * policy.
2240  */
2241 void mpol_put_task_policy(struct task_struct *task)
2242 {
2243 	struct mempolicy *pol;
2244 
2245 	task_lock(task);
2246 	pol = task->mempolicy;
2247 	task->mempolicy = NULL;
2248 	task_unlock(task);
2249 	mpol_put(pol);
2250 }
2251 
2252 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2253 {
2254 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2255 	rb_erase(&n->nd, &sp->root);
2256 	sp_free(n);
2257 }
2258 
2259 static void sp_node_init(struct sp_node *node, unsigned long start,
2260 			unsigned long end, struct mempolicy *pol)
2261 {
2262 	node->start = start;
2263 	node->end = end;
2264 	node->policy = pol;
2265 }
2266 
2267 static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2268 				struct mempolicy *pol)
2269 {
2270 	struct sp_node *n;
2271 	struct mempolicy *newpol;
2272 
2273 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2274 	if (!n)
2275 		return NULL;
2276 
2277 	newpol = mpol_dup(pol);
2278 	if (IS_ERR(newpol)) {
2279 		kmem_cache_free(sn_cache, n);
2280 		return NULL;
2281 	}
2282 	newpol->flags |= MPOL_F_SHARED;
2283 	sp_node_init(n, start, end, newpol);
2284 
2285 	return n;
2286 }
2287 
2288 /* Replace a policy range. */
2289 static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2290 				 unsigned long end, struct sp_node *new)
2291 {
2292 	struct sp_node *n;
2293 	struct sp_node *n_new = NULL;
2294 	struct mempolicy *mpol_new = NULL;
2295 	int ret = 0;
2296 
2297 restart:
2298 	write_lock(&sp->lock);
2299 	n = sp_lookup(sp, start, end);
2300 	/* Take care of old policies in the same range. */
2301 	while (n && n->start < end) {
2302 		struct rb_node *next = rb_next(&n->nd);
2303 		if (n->start >= start) {
2304 			if (n->end <= end)
2305 				sp_delete(sp, n);
2306 			else
2307 				n->start = end;
2308 		} else {
2309 			/* Old policy spanning whole new range. */
2310 			if (n->end > end) {
2311 				if (!n_new)
2312 					goto alloc_new;
2313 
2314 				*mpol_new = *n->policy;
2315 				atomic_set(&mpol_new->refcnt, 1);
2316 				sp_node_init(n_new, end, n->end, mpol_new);
2317 				n->end = start;
2318 				sp_insert(sp, n_new);
2319 				n_new = NULL;
2320 				mpol_new = NULL;
2321 				break;
2322 			} else
2323 				n->end = start;
2324 		}
2325 		if (!next)
2326 			break;
2327 		n = rb_entry(next, struct sp_node, nd);
2328 	}
2329 	if (new)
2330 		sp_insert(sp, new);
2331 	write_unlock(&sp->lock);
2332 	ret = 0;
2333 
2334 err_out:
2335 	if (mpol_new)
2336 		mpol_put(mpol_new);
2337 	if (n_new)
2338 		kmem_cache_free(sn_cache, n_new);
2339 
2340 	return ret;
2341 
2342 alloc_new:
2343 	write_unlock(&sp->lock);
2344 	ret = -ENOMEM;
2345 	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2346 	if (!n_new)
2347 		goto err_out;
2348 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2349 	if (!mpol_new)
2350 		goto err_out;
2351 	goto restart;
2352 }
2353 
2354 /**
2355  * mpol_shared_policy_init - initialize shared policy for inode
2356  * @sp: pointer to inode shared policy
2357  * @mpol:  struct mempolicy to install
2358  *
2359  * Install non-NULL @mpol in inode's shared policy rb-tree.
2360  * On entry, the current task has a reference on a non-NULL @mpol.
2361  * This must be released on exit.
2362  * This is called at get_inode() calls and we can use GFP_KERNEL.
2363  */
2364 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2365 {
2366 	int ret;
2367 
2368 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
2369 	rwlock_init(&sp->lock);
2370 
2371 	if (mpol) {
2372 		struct vm_area_struct pvma;
2373 		struct mempolicy *new;
2374 		NODEMASK_SCRATCH(scratch);
2375 
2376 		if (!scratch)
2377 			goto put_mpol;
2378 		/* contextualize the tmpfs mount point mempolicy */
2379 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2380 		if (IS_ERR(new))
2381 			goto free_scratch; /* no valid nodemask intersection */
2382 
2383 		task_lock(current);
2384 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2385 		task_unlock(current);
2386 		if (ret)
2387 			goto put_new;
2388 
2389 		/* Create pseudo-vma that contains just the policy */
2390 		memset(&pvma, 0, sizeof(struct vm_area_struct));
2391 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
2392 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2393 
2394 put_new:
2395 		mpol_put(new);			/* drop initial ref */
2396 free_scratch:
2397 		NODEMASK_SCRATCH_FREE(scratch);
2398 put_mpol:
2399 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
2400 	}
2401 }
2402 
2403 int mpol_set_shared_policy(struct shared_policy *info,
2404 			struct vm_area_struct *vma, struct mempolicy *npol)
2405 {
2406 	int err;
2407 	struct sp_node *new = NULL;
2408 	unsigned long sz = vma_pages(vma);
2409 
2410 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2411 		 vma->vm_pgoff,
2412 		 sz, npol ? npol->mode : -1,
2413 		 npol ? npol->flags : -1,
2414 		 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
2415 
2416 	if (npol) {
2417 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2418 		if (!new)
2419 			return -ENOMEM;
2420 	}
2421 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2422 	if (err && new)
2423 		sp_free(new);
2424 	return err;
2425 }
2426 
2427 /* Free a backing policy store on inode delete. */
2428 void mpol_free_shared_policy(struct shared_policy *p)
2429 {
2430 	struct sp_node *n;
2431 	struct rb_node *next;
2432 
2433 	if (!p->root.rb_node)
2434 		return;
2435 	write_lock(&p->lock);
2436 	next = rb_first(&p->root);
2437 	while (next) {
2438 		n = rb_entry(next, struct sp_node, nd);
2439 		next = rb_next(&n->nd);
2440 		sp_delete(p, n);
2441 	}
2442 	write_unlock(&p->lock);
2443 }
2444 
2445 #ifdef CONFIG_NUMA_BALANCING
2446 static int __initdata numabalancing_override;
2447 
2448 static void __init check_numabalancing_enable(void)
2449 {
2450 	bool numabalancing_default = false;
2451 
2452 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2453 		numabalancing_default = true;
2454 
2455 	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2456 	if (numabalancing_override)
2457 		set_numabalancing_state(numabalancing_override == 1);
2458 
2459 	if (num_online_nodes() > 1 && !numabalancing_override) {
2460 		pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2461 			numabalancing_default ? "Enabling" : "Disabling");
2462 		set_numabalancing_state(numabalancing_default);
2463 	}
2464 }
2465 
2466 static int __init setup_numabalancing(char *str)
2467 {
2468 	int ret = 0;
2469 	if (!str)
2470 		goto out;
2471 
2472 	if (!strcmp(str, "enable")) {
2473 		numabalancing_override = 1;
2474 		ret = 1;
2475 	} else if (!strcmp(str, "disable")) {
2476 		numabalancing_override = -1;
2477 		ret = 1;
2478 	}
2479 out:
2480 	if (!ret)
2481 		pr_warn("Unable to parse numa_balancing=\n");
2482 
2483 	return ret;
2484 }
2485 __setup("numa_balancing=", setup_numabalancing);
2486 #else
2487 static inline void __init check_numabalancing_enable(void)
2488 {
2489 }
2490 #endif /* CONFIG_NUMA_BALANCING */
2491 
2492 /* assumes fs == KERNEL_DS */
2493 void __init numa_policy_init(void)
2494 {
2495 	nodemask_t interleave_nodes;
2496 	unsigned long largest = 0;
2497 	int nid, prefer = 0;
2498 
2499 	policy_cache = kmem_cache_create("numa_policy",
2500 					 sizeof(struct mempolicy),
2501 					 0, SLAB_PANIC, NULL);
2502 
2503 	sn_cache = kmem_cache_create("shared_policy_node",
2504 				     sizeof(struct sp_node),
2505 				     0, SLAB_PANIC, NULL);
2506 
2507 	for_each_node(nid) {
2508 		preferred_node_policy[nid] = (struct mempolicy) {
2509 			.refcnt = ATOMIC_INIT(1),
2510 			.mode = MPOL_PREFERRED,
2511 			.flags = MPOL_F_MOF | MPOL_F_MORON,
2512 			.v = { .preferred_node = nid, },
2513 		};
2514 	}
2515 
2516 	/*
2517 	 * Set interleaving policy for system init. Interleaving is only
2518 	 * enabled across suitably sized nodes (default is >= 16MB), or
2519 	 * fall back to the largest node if they're all smaller.
2520 	 */
2521 	nodes_clear(interleave_nodes);
2522 	for_each_node_state(nid, N_MEMORY) {
2523 		unsigned long total_pages = node_present_pages(nid);
2524 
2525 		/* Preserve the largest node */
2526 		if (largest < total_pages) {
2527 			largest = total_pages;
2528 			prefer = nid;
2529 		}
2530 
2531 		/* Interleave this node? */
2532 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2533 			node_set(nid, interleave_nodes);
2534 	}
2535 
2536 	/* All too small, use the largest */
2537 	if (unlikely(nodes_empty(interleave_nodes)))
2538 		node_set(prefer, interleave_nodes);
2539 
2540 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2541 		pr_err("%s: interleaving failed\n", __func__);
2542 
2543 	check_numabalancing_enable();
2544 }
2545 
2546 /* Reset policy of current process to default */
2547 void numa_default_policy(void)
2548 {
2549 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2550 }
2551 
2552 /*
2553  * Parse and format mempolicy from/to strings
2554  */
2555 
2556 /*
2557  * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
2558  */
2559 static const char * const policy_modes[] =
2560 {
2561 	[MPOL_DEFAULT]    = "default",
2562 	[MPOL_PREFERRED]  = "prefer",
2563 	[MPOL_BIND]       = "bind",
2564 	[MPOL_INTERLEAVE] = "interleave",
2565 	[MPOL_LOCAL]      = "local",
2566 };
2567 
2568 
2569 #ifdef CONFIG_TMPFS
2570 /**
2571  * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2572  * @str:  string containing mempolicy to parse
2573  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2574  *
2575  * Format of input:
2576  *	<mode>[=<flags>][:<nodelist>]
2577  *
2578  * On success, returns 0, else 1
2579  */
2580 int mpol_parse_str(char *str, struct mempolicy **mpol)
2581 {
2582 	struct mempolicy *new = NULL;
2583 	unsigned short mode;
2584 	unsigned short mode_flags;
2585 	nodemask_t nodes;
2586 	char *nodelist = strchr(str, ':');
2587 	char *flags = strchr(str, '=');
2588 	int err = 1;
2589 
2590 	if (nodelist) {
2591 		/* NUL-terminate mode or flags string */
2592 		*nodelist++ = '\0';
2593 		if (nodelist_parse(nodelist, nodes))
2594 			goto out;
2595 		if (!nodes_subset(nodes, node_states[N_MEMORY]))
2596 			goto out;
2597 	} else
2598 		nodes_clear(nodes);
2599 
2600 	if (flags)
2601 		*flags++ = '\0';	/* terminate mode string */
2602 
2603 	for (mode = 0; mode < MPOL_MAX; mode++) {
2604 		if (!strcmp(str, policy_modes[mode])) {
2605 			break;
2606 		}
2607 	}
2608 	if (mode >= MPOL_MAX)
2609 		goto out;
2610 
2611 	switch (mode) {
2612 	case MPOL_PREFERRED:
2613 		/*
2614 		 * Insist on a nodelist of one node only
2615 		 */
2616 		if (nodelist) {
2617 			char *rest = nodelist;
2618 			while (isdigit(*rest))
2619 				rest++;
2620 			if (*rest)
2621 				goto out;
2622 		}
2623 		break;
2624 	case MPOL_INTERLEAVE:
2625 		/*
2626 		 * Default to online nodes with memory if no nodelist
2627 		 */
2628 		if (!nodelist)
2629 			nodes = node_states[N_MEMORY];
2630 		break;
2631 	case MPOL_LOCAL:
2632 		/*
2633 		 * Don't allow a nodelist;  mpol_new() checks flags
2634 		 */
2635 		if (nodelist)
2636 			goto out;
2637 		mode = MPOL_PREFERRED;
2638 		break;
2639 	case MPOL_DEFAULT:
2640 		/*
2641 		 * Insist on a empty nodelist
2642 		 */
2643 		if (!nodelist)
2644 			err = 0;
2645 		goto out;
2646 	case MPOL_BIND:
2647 		/*
2648 		 * Insist on a nodelist
2649 		 */
2650 		if (!nodelist)
2651 			goto out;
2652 	}
2653 
2654 	mode_flags = 0;
2655 	if (flags) {
2656 		/*
2657 		 * Currently, we only support two mutually exclusive
2658 		 * mode flags.
2659 		 */
2660 		if (!strcmp(flags, "static"))
2661 			mode_flags |= MPOL_F_STATIC_NODES;
2662 		else if (!strcmp(flags, "relative"))
2663 			mode_flags |= MPOL_F_RELATIVE_NODES;
2664 		else
2665 			goto out;
2666 	}
2667 
2668 	new = mpol_new(mode, mode_flags, &nodes);
2669 	if (IS_ERR(new))
2670 		goto out;
2671 
2672 	/*
2673 	 * Save nodes for mpol_to_str() to show the tmpfs mount options
2674 	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2675 	 */
2676 	if (mode != MPOL_PREFERRED)
2677 		new->v.nodes = nodes;
2678 	else if (nodelist)
2679 		new->v.preferred_node = first_node(nodes);
2680 	else
2681 		new->flags |= MPOL_F_LOCAL;
2682 
2683 	/*
2684 	 * Save nodes for contextualization: this will be used to "clone"
2685 	 * the mempolicy in a specific context [cpuset] at a later time.
2686 	 */
2687 	new->w.user_nodemask = nodes;
2688 
2689 	err = 0;
2690 
2691 out:
2692 	/* Restore string for error message */
2693 	if (nodelist)
2694 		*--nodelist = ':';
2695 	if (flags)
2696 		*--flags = '=';
2697 	if (!err)
2698 		*mpol = new;
2699 	return err;
2700 }
2701 #endif /* CONFIG_TMPFS */
2702 
2703 /**
2704  * mpol_to_str - format a mempolicy structure for printing
2705  * @buffer:  to contain formatted mempolicy string
2706  * @maxlen:  length of @buffer
2707  * @pol:  pointer to mempolicy to be formatted
2708  *
2709  * Convert @pol into a string.  If @buffer is too short, truncate the string.
2710  * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2711  * longest flag, "relative", and to display at least a few node ids.
2712  */
2713 void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
2714 {
2715 	char *p = buffer;
2716 	nodemask_t nodes = NODE_MASK_NONE;
2717 	unsigned short mode = MPOL_DEFAULT;
2718 	unsigned short flags = 0;
2719 
2720 	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
2721 		mode = pol->mode;
2722 		flags = pol->flags;
2723 	}
2724 
2725 	switch (mode) {
2726 	case MPOL_DEFAULT:
2727 		break;
2728 	case MPOL_PREFERRED:
2729 		if (flags & MPOL_F_LOCAL)
2730 			mode = MPOL_LOCAL;
2731 		else
2732 			node_set(pol->v.preferred_node, nodes);
2733 		break;
2734 	case MPOL_BIND:
2735 	case MPOL_INTERLEAVE:
2736 		nodes = pol->v.nodes;
2737 		break;
2738 	default:
2739 		WARN_ON_ONCE(1);
2740 		snprintf(p, maxlen, "unknown");
2741 		return;
2742 	}
2743 
2744 	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
2745 
2746 	if (flags & MPOL_MODE_FLAGS) {
2747 		p += snprintf(p, buffer + maxlen - p, "=");
2748 
2749 		/*
2750 		 * Currently, the only defined flags are mutually exclusive
2751 		 */
2752 		if (flags & MPOL_F_STATIC_NODES)
2753 			p += snprintf(p, buffer + maxlen - p, "static");
2754 		else if (flags & MPOL_F_RELATIVE_NODES)
2755 			p += snprintf(p, buffer + maxlen - p, "relative");
2756 	}
2757 
2758 	if (!nodes_empty(nodes))
2759 		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
2760 			       nodemask_pr_args(&nodes));
2761 }
2762