xref: /openbmc/linux/mm/mempolicy.c (revision 8b036556)
1 /*
2  * Simple NUMA memory policy for the Linux kernel.
3  *
4  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
5  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
6  * Subject to the GNU Public License, version 2.
7  *
8  * NUMA policy allows the user to give hints in which node(s) memory should
9  * be allocated.
10  *
11  * Support four policies per VMA and per process:
12  *
13  * The VMA policy has priority over the process policy for a page fault.
14  *
15  * interleave     Allocate memory interleaved over a set of nodes,
16  *                with normal fallback if it fails.
17  *                For VMA based allocations this interleaves based on the
18  *                offset into the backing object or offset into the mapping
19  *                for anonymous memory. For process policy an process counter
20  *                is used.
21  *
22  * bind           Only allocate memory on a specific set of nodes,
23  *                no fallback.
24  *                FIXME: memory is allocated starting with the first node
25  *                to the last. It would be better if bind would truly restrict
26  *                the allocation to memory nodes instead
27  *
28  * preferred       Try a specific node first before normal fallback.
29  *                As a special case NUMA_NO_NODE here means do the allocation
30  *                on the local CPU. This is normally identical to default,
31  *                but useful to set in a VMA when you have a non default
32  *                process policy.
33  *
34  * default        Allocate on the local node first, or when on a VMA
35  *                use the process policy. This is what Linux always did
36  *		  in a NUMA aware kernel and still does by, ahem, default.
37  *
38  * The process policy is applied for most non interrupt memory allocations
39  * in that process' context. Interrupts ignore the policies and always
40  * try to allocate on the local CPU. The VMA policy is only applied for memory
41  * allocations for a VMA in the VM.
42  *
43  * Currently there are a few corner cases in swapping where the policy
44  * is not applied, but the majority should be handled. When process policy
45  * is used it is not remembered over swap outs/swap ins.
46  *
47  * Only the highest zone in the zone hierarchy gets policied. Allocations
48  * requesting a lower zone just use default policy. This implies that
49  * on systems with highmem kernel lowmem allocation don't get policied.
50  * Same with GFP_DMA allocations.
51  *
52  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53  * all users and remembered even when nobody has memory mapped.
54  */
55 
56 /* Notebook:
57    fix mmap readahead to honour policy and enable policy for any page cache
58    object
59    statistics for bigpages
60    global policy for page cache? currently it uses process policy. Requires
61    first item above.
62    handle mremap for shared memory (currently ignored for the policy)
63    grows down?
64    make bind policy root only? It can trigger oom much faster and the
65    kernel is not always grateful with that.
66 */
67 
68 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69 
70 #include <linux/mempolicy.h>
71 #include <linux/mm.h>
72 #include <linux/highmem.h>
73 #include <linux/hugetlb.h>
74 #include <linux/kernel.h>
75 #include <linux/sched.h>
76 #include <linux/nodemask.h>
77 #include <linux/cpuset.h>
78 #include <linux/slab.h>
79 #include <linux/string.h>
80 #include <linux/export.h>
81 #include <linux/nsproxy.h>
82 #include <linux/interrupt.h>
83 #include <linux/init.h>
84 #include <linux/compat.h>
85 #include <linux/swap.h>
86 #include <linux/seq_file.h>
87 #include <linux/proc_fs.h>
88 #include <linux/migrate.h>
89 #include <linux/ksm.h>
90 #include <linux/rmap.h>
91 #include <linux/security.h>
92 #include <linux/syscalls.h>
93 #include <linux/ctype.h>
94 #include <linux/mm_inline.h>
95 #include <linux/mmu_notifier.h>
96 #include <linux/printk.h>
97 
98 #include <asm/tlbflush.h>
99 #include <asm/uaccess.h>
100 #include <linux/random.h>
101 
102 #include "internal.h"
103 
104 /* Internal flags */
105 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
106 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
107 
108 static struct kmem_cache *policy_cache;
109 static struct kmem_cache *sn_cache;
110 
111 /* Highest zone. An specific allocation for a zone below that is not
112    policied. */
113 enum zone_type policy_zone = 0;
114 
115 /*
116  * run-time system-wide default policy => local allocation
117  */
118 static struct mempolicy default_policy = {
119 	.refcnt = ATOMIC_INIT(1), /* never free it */
120 	.mode = MPOL_PREFERRED,
121 	.flags = MPOL_F_LOCAL,
122 };
123 
124 static struct mempolicy preferred_node_policy[MAX_NUMNODES];
125 
126 struct mempolicy *get_task_policy(struct task_struct *p)
127 {
128 	struct mempolicy *pol = p->mempolicy;
129 	int node;
130 
131 	if (pol)
132 		return pol;
133 
134 	node = numa_node_id();
135 	if (node != NUMA_NO_NODE) {
136 		pol = &preferred_node_policy[node];
137 		/* preferred_node_policy is not initialised early in boot */
138 		if (pol->mode)
139 			return pol;
140 	}
141 
142 	return &default_policy;
143 }
144 
145 static const struct mempolicy_operations {
146 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
147 	/*
148 	 * If read-side task has no lock to protect task->mempolicy, write-side
149 	 * task will rebind the task->mempolicy by two step. The first step is
150 	 * setting all the newly nodes, and the second step is cleaning all the
151 	 * disallowed nodes. In this way, we can avoid finding no node to alloc
152 	 * page.
153 	 * If we have a lock to protect task->mempolicy in read-side, we do
154 	 * rebind directly.
155 	 *
156 	 * step:
157 	 * 	MPOL_REBIND_ONCE - do rebind work at once
158 	 * 	MPOL_REBIND_STEP1 - set all the newly nodes
159 	 * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
160 	 */
161 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
162 			enum mpol_rebind_step step);
163 } mpol_ops[MPOL_MAX];
164 
165 static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
166 {
167 	return pol->flags & MPOL_MODE_FLAGS;
168 }
169 
170 static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
171 				   const nodemask_t *rel)
172 {
173 	nodemask_t tmp;
174 	nodes_fold(tmp, *orig, nodes_weight(*rel));
175 	nodes_onto(*ret, tmp, *rel);
176 }
177 
178 static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
179 {
180 	if (nodes_empty(*nodes))
181 		return -EINVAL;
182 	pol->v.nodes = *nodes;
183 	return 0;
184 }
185 
186 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
187 {
188 	if (!nodes)
189 		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
190 	else if (nodes_empty(*nodes))
191 		return -EINVAL;			/*  no allowed nodes */
192 	else
193 		pol->v.preferred_node = first_node(*nodes);
194 	return 0;
195 }
196 
197 static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
198 {
199 	if (nodes_empty(*nodes))
200 		return -EINVAL;
201 	pol->v.nodes = *nodes;
202 	return 0;
203 }
204 
205 /*
206  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
207  * any, for the new policy.  mpol_new() has already validated the nodes
208  * parameter with respect to the policy mode and flags.  But, we need to
209  * handle an empty nodemask with MPOL_PREFERRED here.
210  *
211  * Must be called holding task's alloc_lock to protect task's mems_allowed
212  * and mempolicy.  May also be called holding the mmap_semaphore for write.
213  */
214 static int mpol_set_nodemask(struct mempolicy *pol,
215 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
216 {
217 	int ret;
218 
219 	/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
220 	if (pol == NULL)
221 		return 0;
222 	/* Check N_MEMORY */
223 	nodes_and(nsc->mask1,
224 		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
225 
226 	VM_BUG_ON(!nodes);
227 	if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
228 		nodes = NULL;	/* explicit local allocation */
229 	else {
230 		if (pol->flags & MPOL_F_RELATIVE_NODES)
231 			mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
232 		else
233 			nodes_and(nsc->mask2, *nodes, nsc->mask1);
234 
235 		if (mpol_store_user_nodemask(pol))
236 			pol->w.user_nodemask = *nodes;
237 		else
238 			pol->w.cpuset_mems_allowed =
239 						cpuset_current_mems_allowed;
240 	}
241 
242 	if (nodes)
243 		ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
244 	else
245 		ret = mpol_ops[pol->mode].create(pol, NULL);
246 	return ret;
247 }
248 
249 /*
250  * This function just creates a new policy, does some check and simple
251  * initialization. You must invoke mpol_set_nodemask() to set nodes.
252  */
253 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
254 				  nodemask_t *nodes)
255 {
256 	struct mempolicy *policy;
257 
258 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
259 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
260 
261 	if (mode == MPOL_DEFAULT) {
262 		if (nodes && !nodes_empty(*nodes))
263 			return ERR_PTR(-EINVAL);
264 		return NULL;
265 	}
266 	VM_BUG_ON(!nodes);
267 
268 	/*
269 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
270 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
271 	 * All other modes require a valid pointer to a non-empty nodemask.
272 	 */
273 	if (mode == MPOL_PREFERRED) {
274 		if (nodes_empty(*nodes)) {
275 			if (((flags & MPOL_F_STATIC_NODES) ||
276 			     (flags & MPOL_F_RELATIVE_NODES)))
277 				return ERR_PTR(-EINVAL);
278 		}
279 	} else if (mode == MPOL_LOCAL) {
280 		if (!nodes_empty(*nodes))
281 			return ERR_PTR(-EINVAL);
282 		mode = MPOL_PREFERRED;
283 	} else if (nodes_empty(*nodes))
284 		return ERR_PTR(-EINVAL);
285 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
286 	if (!policy)
287 		return ERR_PTR(-ENOMEM);
288 	atomic_set(&policy->refcnt, 1);
289 	policy->mode = mode;
290 	policy->flags = flags;
291 
292 	return policy;
293 }
294 
295 /* Slow path of a mpol destructor. */
296 void __mpol_put(struct mempolicy *p)
297 {
298 	if (!atomic_dec_and_test(&p->refcnt))
299 		return;
300 	kmem_cache_free(policy_cache, p);
301 }
302 
303 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
304 				enum mpol_rebind_step step)
305 {
306 }
307 
308 /*
309  * step:
310  * 	MPOL_REBIND_ONCE  - do rebind work at once
311  * 	MPOL_REBIND_STEP1 - set all the newly nodes
312  * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
313  */
314 static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
315 				 enum mpol_rebind_step step)
316 {
317 	nodemask_t tmp;
318 
319 	if (pol->flags & MPOL_F_STATIC_NODES)
320 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
321 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
322 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
323 	else {
324 		/*
325 		 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
326 		 * result
327 		 */
328 		if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
329 			nodes_remap(tmp, pol->v.nodes,
330 					pol->w.cpuset_mems_allowed, *nodes);
331 			pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
332 		} else if (step == MPOL_REBIND_STEP2) {
333 			tmp = pol->w.cpuset_mems_allowed;
334 			pol->w.cpuset_mems_allowed = *nodes;
335 		} else
336 			BUG();
337 	}
338 
339 	if (nodes_empty(tmp))
340 		tmp = *nodes;
341 
342 	if (step == MPOL_REBIND_STEP1)
343 		nodes_or(pol->v.nodes, pol->v.nodes, tmp);
344 	else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
345 		pol->v.nodes = tmp;
346 	else
347 		BUG();
348 
349 	if (!node_isset(current->il_next, tmp)) {
350 		current->il_next = next_node(current->il_next, tmp);
351 		if (current->il_next >= MAX_NUMNODES)
352 			current->il_next = first_node(tmp);
353 		if (current->il_next >= MAX_NUMNODES)
354 			current->il_next = numa_node_id();
355 	}
356 }
357 
358 static void mpol_rebind_preferred(struct mempolicy *pol,
359 				  const nodemask_t *nodes,
360 				  enum mpol_rebind_step step)
361 {
362 	nodemask_t tmp;
363 
364 	if (pol->flags & MPOL_F_STATIC_NODES) {
365 		int node = first_node(pol->w.user_nodemask);
366 
367 		if (node_isset(node, *nodes)) {
368 			pol->v.preferred_node = node;
369 			pol->flags &= ~MPOL_F_LOCAL;
370 		} else
371 			pol->flags |= MPOL_F_LOCAL;
372 	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
373 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
374 		pol->v.preferred_node = first_node(tmp);
375 	} else if (!(pol->flags & MPOL_F_LOCAL)) {
376 		pol->v.preferred_node = node_remap(pol->v.preferred_node,
377 						   pol->w.cpuset_mems_allowed,
378 						   *nodes);
379 		pol->w.cpuset_mems_allowed = *nodes;
380 	}
381 }
382 
383 /*
384  * mpol_rebind_policy - Migrate a policy to a different set of nodes
385  *
386  * If read-side task has no lock to protect task->mempolicy, write-side
387  * task will rebind the task->mempolicy by two step. The first step is
388  * setting all the newly nodes, and the second step is cleaning all the
389  * disallowed nodes. In this way, we can avoid finding no node to alloc
390  * page.
391  * If we have a lock to protect task->mempolicy in read-side, we do
392  * rebind directly.
393  *
394  * step:
395  * 	MPOL_REBIND_ONCE  - do rebind work at once
396  * 	MPOL_REBIND_STEP1 - set all the newly nodes
397  * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
398  */
399 static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
400 				enum mpol_rebind_step step)
401 {
402 	if (!pol)
403 		return;
404 	if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
405 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
406 		return;
407 
408 	if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
409 		return;
410 
411 	if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
412 		BUG();
413 
414 	if (step == MPOL_REBIND_STEP1)
415 		pol->flags |= MPOL_F_REBINDING;
416 	else if (step == MPOL_REBIND_STEP2)
417 		pol->flags &= ~MPOL_F_REBINDING;
418 	else if (step >= MPOL_REBIND_NSTEP)
419 		BUG();
420 
421 	mpol_ops[pol->mode].rebind(pol, newmask, step);
422 }
423 
424 /*
425  * Wrapper for mpol_rebind_policy() that just requires task
426  * pointer, and updates task mempolicy.
427  *
428  * Called with task's alloc_lock held.
429  */
430 
431 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
432 			enum mpol_rebind_step step)
433 {
434 	mpol_rebind_policy(tsk->mempolicy, new, step);
435 }
436 
437 /*
438  * Rebind each vma in mm to new nodemask.
439  *
440  * Call holding a reference to mm.  Takes mm->mmap_sem during call.
441  */
442 
443 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
444 {
445 	struct vm_area_struct *vma;
446 
447 	down_write(&mm->mmap_sem);
448 	for (vma = mm->mmap; vma; vma = vma->vm_next)
449 		mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
450 	up_write(&mm->mmap_sem);
451 }
452 
453 static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
454 	[MPOL_DEFAULT] = {
455 		.rebind = mpol_rebind_default,
456 	},
457 	[MPOL_INTERLEAVE] = {
458 		.create = mpol_new_interleave,
459 		.rebind = mpol_rebind_nodemask,
460 	},
461 	[MPOL_PREFERRED] = {
462 		.create = mpol_new_preferred,
463 		.rebind = mpol_rebind_preferred,
464 	},
465 	[MPOL_BIND] = {
466 		.create = mpol_new_bind,
467 		.rebind = mpol_rebind_nodemask,
468 	},
469 };
470 
471 static void migrate_page_add(struct page *page, struct list_head *pagelist,
472 				unsigned long flags);
473 
474 struct queue_pages {
475 	struct list_head *pagelist;
476 	unsigned long flags;
477 	nodemask_t *nmask;
478 	struct vm_area_struct *prev;
479 };
480 
481 /*
482  * Scan through pages checking if pages follow certain conditions,
483  * and move them to the pagelist if they do.
484  */
485 static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
486 			unsigned long end, struct mm_walk *walk)
487 {
488 	struct vm_area_struct *vma = walk->vma;
489 	struct page *page;
490 	struct queue_pages *qp = walk->private;
491 	unsigned long flags = qp->flags;
492 	int nid;
493 	pte_t *pte;
494 	spinlock_t *ptl;
495 
496 	split_huge_page_pmd(vma, addr, pmd);
497 	if (pmd_trans_unstable(pmd))
498 		return 0;
499 
500 	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
501 	for (; addr != end; pte++, addr += PAGE_SIZE) {
502 		if (!pte_present(*pte))
503 			continue;
504 		page = vm_normal_page(vma, addr, *pte);
505 		if (!page)
506 			continue;
507 		/*
508 		 * vm_normal_page() filters out zero pages, but there might
509 		 * still be PageReserved pages to skip, perhaps in a VDSO.
510 		 */
511 		if (PageReserved(page))
512 			continue;
513 		nid = page_to_nid(page);
514 		if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
515 			continue;
516 
517 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
518 			migrate_page_add(page, qp->pagelist, flags);
519 	}
520 	pte_unmap_unlock(pte - 1, ptl);
521 	cond_resched();
522 	return 0;
523 }
524 
525 static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
526 			       unsigned long addr, unsigned long end,
527 			       struct mm_walk *walk)
528 {
529 #ifdef CONFIG_HUGETLB_PAGE
530 	struct queue_pages *qp = walk->private;
531 	unsigned long flags = qp->flags;
532 	int nid;
533 	struct page *page;
534 	spinlock_t *ptl;
535 	pte_t entry;
536 
537 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
538 	entry = huge_ptep_get(pte);
539 	if (!pte_present(entry))
540 		goto unlock;
541 	page = pte_page(entry);
542 	nid = page_to_nid(page);
543 	if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
544 		goto unlock;
545 	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
546 	if (flags & (MPOL_MF_MOVE_ALL) ||
547 	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
548 		isolate_huge_page(page, qp->pagelist);
549 unlock:
550 	spin_unlock(ptl);
551 #else
552 	BUG();
553 #endif
554 	return 0;
555 }
556 
557 #ifdef CONFIG_NUMA_BALANCING
558 /*
559  * This is used to mark a range of virtual addresses to be inaccessible.
560  * These are later cleared by a NUMA hinting fault. Depending on these
561  * faults, pages may be migrated for better NUMA placement.
562  *
563  * This is assuming that NUMA faults are handled using PROT_NONE. If
564  * an architecture makes a different choice, it will need further
565  * changes to the core.
566  */
567 unsigned long change_prot_numa(struct vm_area_struct *vma,
568 			unsigned long addr, unsigned long end)
569 {
570 	int nr_updated;
571 
572 	nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
573 	if (nr_updated)
574 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
575 
576 	return nr_updated;
577 }
578 #else
579 static unsigned long change_prot_numa(struct vm_area_struct *vma,
580 			unsigned long addr, unsigned long end)
581 {
582 	return 0;
583 }
584 #endif /* CONFIG_NUMA_BALANCING */
585 
586 static int queue_pages_test_walk(unsigned long start, unsigned long end,
587 				struct mm_walk *walk)
588 {
589 	struct vm_area_struct *vma = walk->vma;
590 	struct queue_pages *qp = walk->private;
591 	unsigned long endvma = vma->vm_end;
592 	unsigned long flags = qp->flags;
593 
594 	if (vma->vm_flags & VM_PFNMAP)
595 		return 1;
596 
597 	if (endvma > end)
598 		endvma = end;
599 	if (vma->vm_start > start)
600 		start = vma->vm_start;
601 
602 	if (!(flags & MPOL_MF_DISCONTIG_OK)) {
603 		if (!vma->vm_next && vma->vm_end < end)
604 			return -EFAULT;
605 		if (qp->prev && qp->prev->vm_end < vma->vm_start)
606 			return -EFAULT;
607 	}
608 
609 	qp->prev = vma;
610 
611 	if (vma->vm_flags & VM_PFNMAP)
612 		return 1;
613 
614 	if (flags & MPOL_MF_LAZY) {
615 		/* Similar to task_numa_work, skip inaccessible VMAs */
616 		if (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))
617 			change_prot_numa(vma, start, endvma);
618 		return 1;
619 	}
620 
621 	if ((flags & MPOL_MF_STRICT) ||
622 	    ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
623 	     vma_migratable(vma)))
624 		/* queue pages from current vma */
625 		return 0;
626 	return 1;
627 }
628 
629 /*
630  * Walk through page tables and collect pages to be migrated.
631  *
632  * If pages found in a given range are on a set of nodes (determined by
633  * @nodes and @flags,) it's isolated and queued to the pagelist which is
634  * passed via @private.)
635  */
636 static int
637 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
638 		nodemask_t *nodes, unsigned long flags,
639 		struct list_head *pagelist)
640 {
641 	struct queue_pages qp = {
642 		.pagelist = pagelist,
643 		.flags = flags,
644 		.nmask = nodes,
645 		.prev = NULL,
646 	};
647 	struct mm_walk queue_pages_walk = {
648 		.hugetlb_entry = queue_pages_hugetlb,
649 		.pmd_entry = queue_pages_pte_range,
650 		.test_walk = queue_pages_test_walk,
651 		.mm = mm,
652 		.private = &qp,
653 	};
654 
655 	return walk_page_range(start, end, &queue_pages_walk);
656 }
657 
658 /*
659  * Apply policy to a single VMA
660  * This must be called with the mmap_sem held for writing.
661  */
662 static int vma_replace_policy(struct vm_area_struct *vma,
663 						struct mempolicy *pol)
664 {
665 	int err;
666 	struct mempolicy *old;
667 	struct mempolicy *new;
668 
669 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
670 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
671 		 vma->vm_ops, vma->vm_file,
672 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
673 
674 	new = mpol_dup(pol);
675 	if (IS_ERR(new))
676 		return PTR_ERR(new);
677 
678 	if (vma->vm_ops && vma->vm_ops->set_policy) {
679 		err = vma->vm_ops->set_policy(vma, new);
680 		if (err)
681 			goto err_out;
682 	}
683 
684 	old = vma->vm_policy;
685 	vma->vm_policy = new; /* protected by mmap_sem */
686 	mpol_put(old);
687 
688 	return 0;
689  err_out:
690 	mpol_put(new);
691 	return err;
692 }
693 
694 /* Step 2: apply policy to a range and do splits. */
695 static int mbind_range(struct mm_struct *mm, unsigned long start,
696 		       unsigned long end, struct mempolicy *new_pol)
697 {
698 	struct vm_area_struct *next;
699 	struct vm_area_struct *prev;
700 	struct vm_area_struct *vma;
701 	int err = 0;
702 	pgoff_t pgoff;
703 	unsigned long vmstart;
704 	unsigned long vmend;
705 
706 	vma = find_vma(mm, start);
707 	if (!vma || vma->vm_start > start)
708 		return -EFAULT;
709 
710 	prev = vma->vm_prev;
711 	if (start > vma->vm_start)
712 		prev = vma;
713 
714 	for (; vma && vma->vm_start < end; prev = vma, vma = next) {
715 		next = vma->vm_next;
716 		vmstart = max(start, vma->vm_start);
717 		vmend   = min(end, vma->vm_end);
718 
719 		if (mpol_equal(vma_policy(vma), new_pol))
720 			continue;
721 
722 		pgoff = vma->vm_pgoff +
723 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
724 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
725 				  vma->anon_vma, vma->vm_file, pgoff,
726 				  new_pol);
727 		if (prev) {
728 			vma = prev;
729 			next = vma->vm_next;
730 			if (mpol_equal(vma_policy(vma), new_pol))
731 				continue;
732 			/* vma_merge() joined vma && vma->next, case 8 */
733 			goto replace;
734 		}
735 		if (vma->vm_start != vmstart) {
736 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
737 			if (err)
738 				goto out;
739 		}
740 		if (vma->vm_end != vmend) {
741 			err = split_vma(vma->vm_mm, vma, vmend, 0);
742 			if (err)
743 				goto out;
744 		}
745  replace:
746 		err = vma_replace_policy(vma, new_pol);
747 		if (err)
748 			goto out;
749 	}
750 
751  out:
752 	return err;
753 }
754 
755 /* Set the process memory policy */
756 static long do_set_mempolicy(unsigned short mode, unsigned short flags,
757 			     nodemask_t *nodes)
758 {
759 	struct mempolicy *new, *old;
760 	NODEMASK_SCRATCH(scratch);
761 	int ret;
762 
763 	if (!scratch)
764 		return -ENOMEM;
765 
766 	new = mpol_new(mode, flags, nodes);
767 	if (IS_ERR(new)) {
768 		ret = PTR_ERR(new);
769 		goto out;
770 	}
771 
772 	task_lock(current);
773 	ret = mpol_set_nodemask(new, nodes, scratch);
774 	if (ret) {
775 		task_unlock(current);
776 		mpol_put(new);
777 		goto out;
778 	}
779 	old = current->mempolicy;
780 	current->mempolicy = new;
781 	if (new && new->mode == MPOL_INTERLEAVE &&
782 	    nodes_weight(new->v.nodes))
783 		current->il_next = first_node(new->v.nodes);
784 	task_unlock(current);
785 	mpol_put(old);
786 	ret = 0;
787 out:
788 	NODEMASK_SCRATCH_FREE(scratch);
789 	return ret;
790 }
791 
792 /*
793  * Return nodemask for policy for get_mempolicy() query
794  *
795  * Called with task's alloc_lock held
796  */
797 static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
798 {
799 	nodes_clear(*nodes);
800 	if (p == &default_policy)
801 		return;
802 
803 	switch (p->mode) {
804 	case MPOL_BIND:
805 		/* Fall through */
806 	case MPOL_INTERLEAVE:
807 		*nodes = p->v.nodes;
808 		break;
809 	case MPOL_PREFERRED:
810 		if (!(p->flags & MPOL_F_LOCAL))
811 			node_set(p->v.preferred_node, *nodes);
812 		/* else return empty node mask for local allocation */
813 		break;
814 	default:
815 		BUG();
816 	}
817 }
818 
819 static int lookup_node(struct mm_struct *mm, unsigned long addr)
820 {
821 	struct page *p;
822 	int err;
823 
824 	err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
825 	if (err >= 0) {
826 		err = page_to_nid(p);
827 		put_page(p);
828 	}
829 	return err;
830 }
831 
832 /* Retrieve NUMA policy */
833 static long do_get_mempolicy(int *policy, nodemask_t *nmask,
834 			     unsigned long addr, unsigned long flags)
835 {
836 	int err;
837 	struct mm_struct *mm = current->mm;
838 	struct vm_area_struct *vma = NULL;
839 	struct mempolicy *pol = current->mempolicy;
840 
841 	if (flags &
842 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
843 		return -EINVAL;
844 
845 	if (flags & MPOL_F_MEMS_ALLOWED) {
846 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
847 			return -EINVAL;
848 		*policy = 0;	/* just so it's initialized */
849 		task_lock(current);
850 		*nmask  = cpuset_current_mems_allowed;
851 		task_unlock(current);
852 		return 0;
853 	}
854 
855 	if (flags & MPOL_F_ADDR) {
856 		/*
857 		 * Do NOT fall back to task policy if the
858 		 * vma/shared policy at addr is NULL.  We
859 		 * want to return MPOL_DEFAULT in this case.
860 		 */
861 		down_read(&mm->mmap_sem);
862 		vma = find_vma_intersection(mm, addr, addr+1);
863 		if (!vma) {
864 			up_read(&mm->mmap_sem);
865 			return -EFAULT;
866 		}
867 		if (vma->vm_ops && vma->vm_ops->get_policy)
868 			pol = vma->vm_ops->get_policy(vma, addr);
869 		else
870 			pol = vma->vm_policy;
871 	} else if (addr)
872 		return -EINVAL;
873 
874 	if (!pol)
875 		pol = &default_policy;	/* indicates default behavior */
876 
877 	if (flags & MPOL_F_NODE) {
878 		if (flags & MPOL_F_ADDR) {
879 			err = lookup_node(mm, addr);
880 			if (err < 0)
881 				goto out;
882 			*policy = err;
883 		} else if (pol == current->mempolicy &&
884 				pol->mode == MPOL_INTERLEAVE) {
885 			*policy = current->il_next;
886 		} else {
887 			err = -EINVAL;
888 			goto out;
889 		}
890 	} else {
891 		*policy = pol == &default_policy ? MPOL_DEFAULT :
892 						pol->mode;
893 		/*
894 		 * Internal mempolicy flags must be masked off before exposing
895 		 * the policy to userspace.
896 		 */
897 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
898 	}
899 
900 	if (vma) {
901 		up_read(&current->mm->mmap_sem);
902 		vma = NULL;
903 	}
904 
905 	err = 0;
906 	if (nmask) {
907 		if (mpol_store_user_nodemask(pol)) {
908 			*nmask = pol->w.user_nodemask;
909 		} else {
910 			task_lock(current);
911 			get_policy_nodemask(pol, nmask);
912 			task_unlock(current);
913 		}
914 	}
915 
916  out:
917 	mpol_cond_put(pol);
918 	if (vma)
919 		up_read(&current->mm->mmap_sem);
920 	return err;
921 }
922 
923 #ifdef CONFIG_MIGRATION
924 /*
925  * page migration
926  */
927 static void migrate_page_add(struct page *page, struct list_head *pagelist,
928 				unsigned long flags)
929 {
930 	/*
931 	 * Avoid migrating a page that is shared with others.
932 	 */
933 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
934 		if (!isolate_lru_page(page)) {
935 			list_add_tail(&page->lru, pagelist);
936 			inc_zone_page_state(page, NR_ISOLATED_ANON +
937 					    page_is_file_cache(page));
938 		}
939 	}
940 }
941 
942 static struct page *new_node_page(struct page *page, unsigned long node, int **x)
943 {
944 	if (PageHuge(page))
945 		return alloc_huge_page_node(page_hstate(compound_head(page)),
946 					node);
947 	else
948 		return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
949 }
950 
951 /*
952  * Migrate pages from one node to a target node.
953  * Returns error or the number of pages not migrated.
954  */
955 static int migrate_to_node(struct mm_struct *mm, int source, int dest,
956 			   int flags)
957 {
958 	nodemask_t nmask;
959 	LIST_HEAD(pagelist);
960 	int err = 0;
961 
962 	nodes_clear(nmask);
963 	node_set(source, nmask);
964 
965 	/*
966 	 * This does not "check" the range but isolates all pages that
967 	 * need migration.  Between passing in the full user address
968 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
969 	 */
970 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
971 	queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
972 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
973 
974 	if (!list_empty(&pagelist)) {
975 		err = migrate_pages(&pagelist, new_node_page, NULL, dest,
976 					MIGRATE_SYNC, MR_SYSCALL);
977 		if (err)
978 			putback_movable_pages(&pagelist);
979 	}
980 
981 	return err;
982 }
983 
984 /*
985  * Move pages between the two nodesets so as to preserve the physical
986  * layout as much as possible.
987  *
988  * Returns the number of page that could not be moved.
989  */
990 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
991 		     const nodemask_t *to, int flags)
992 {
993 	int busy = 0;
994 	int err;
995 	nodemask_t tmp;
996 
997 	err = migrate_prep();
998 	if (err)
999 		return err;
1000 
1001 	down_read(&mm->mmap_sem);
1002 
1003 	/*
1004 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1005 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
1006 	 * bit in 'tmp', and return that <source, dest> pair for migration.
1007 	 * The pair of nodemasks 'to' and 'from' define the map.
1008 	 *
1009 	 * If no pair of bits is found that way, fallback to picking some
1010 	 * pair of 'source' and 'dest' bits that are not the same.  If the
1011 	 * 'source' and 'dest' bits are the same, this represents a node
1012 	 * that will be migrating to itself, so no pages need move.
1013 	 *
1014 	 * If no bits are left in 'tmp', or if all remaining bits left
1015 	 * in 'tmp' correspond to the same bit in 'to', return false
1016 	 * (nothing left to migrate).
1017 	 *
1018 	 * This lets us pick a pair of nodes to migrate between, such that
1019 	 * if possible the dest node is not already occupied by some other
1020 	 * source node, minimizing the risk of overloading the memory on a
1021 	 * node that would happen if we migrated incoming memory to a node
1022 	 * before migrating outgoing memory source that same node.
1023 	 *
1024 	 * A single scan of tmp is sufficient.  As we go, we remember the
1025 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
1026 	 * that not only moved, but what's better, moved to an empty slot
1027 	 * (d is not set in tmp), then we break out then, with that pair.
1028 	 * Otherwise when we finish scanning from_tmp, we at least have the
1029 	 * most recent <s, d> pair that moved.  If we get all the way through
1030 	 * the scan of tmp without finding any node that moved, much less
1031 	 * moved to an empty node, then there is nothing left worth migrating.
1032 	 */
1033 
1034 	tmp = *from;
1035 	while (!nodes_empty(tmp)) {
1036 		int s,d;
1037 		int source = NUMA_NO_NODE;
1038 		int dest = 0;
1039 
1040 		for_each_node_mask(s, tmp) {
1041 
1042 			/*
1043 			 * do_migrate_pages() tries to maintain the relative
1044 			 * node relationship of the pages established between
1045 			 * threads and memory areas.
1046                          *
1047 			 * However if the number of source nodes is not equal to
1048 			 * the number of destination nodes we can not preserve
1049 			 * this node relative relationship.  In that case, skip
1050 			 * copying memory from a node that is in the destination
1051 			 * mask.
1052 			 *
1053 			 * Example: [2,3,4] -> [3,4,5] moves everything.
1054 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1055 			 */
1056 
1057 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
1058 						(node_isset(s, *to)))
1059 				continue;
1060 
1061 			d = node_remap(s, *from, *to);
1062 			if (s == d)
1063 				continue;
1064 
1065 			source = s;	/* Node moved. Memorize */
1066 			dest = d;
1067 
1068 			/* dest not in remaining from nodes? */
1069 			if (!node_isset(dest, tmp))
1070 				break;
1071 		}
1072 		if (source == NUMA_NO_NODE)
1073 			break;
1074 
1075 		node_clear(source, tmp);
1076 		err = migrate_to_node(mm, source, dest, flags);
1077 		if (err > 0)
1078 			busy += err;
1079 		if (err < 0)
1080 			break;
1081 	}
1082 	up_read(&mm->mmap_sem);
1083 	if (err < 0)
1084 		return err;
1085 	return busy;
1086 
1087 }
1088 
1089 /*
1090  * Allocate a new page for page migration based on vma policy.
1091  * Start by assuming the page is mapped by the same vma as contains @start.
1092  * Search forward from there, if not.  N.B., this assumes that the
1093  * list of pages handed to migrate_pages()--which is how we get here--
1094  * is in virtual address order.
1095  */
1096 static struct page *new_page(struct page *page, unsigned long start, int **x)
1097 {
1098 	struct vm_area_struct *vma;
1099 	unsigned long uninitialized_var(address);
1100 
1101 	vma = find_vma(current->mm, start);
1102 	while (vma) {
1103 		address = page_address_in_vma(page, vma);
1104 		if (address != -EFAULT)
1105 			break;
1106 		vma = vma->vm_next;
1107 	}
1108 
1109 	if (PageHuge(page)) {
1110 		BUG_ON(!vma);
1111 		return alloc_huge_page_noerr(vma, address, 1);
1112 	}
1113 	/*
1114 	 * if !vma, alloc_page_vma() will use task or system default policy
1115 	 */
1116 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
1117 }
1118 #else
1119 
1120 static void migrate_page_add(struct page *page, struct list_head *pagelist,
1121 				unsigned long flags)
1122 {
1123 }
1124 
1125 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1126 		     const nodemask_t *to, int flags)
1127 {
1128 	return -ENOSYS;
1129 }
1130 
1131 static struct page *new_page(struct page *page, unsigned long start, int **x)
1132 {
1133 	return NULL;
1134 }
1135 #endif
1136 
1137 static long do_mbind(unsigned long start, unsigned long len,
1138 		     unsigned short mode, unsigned short mode_flags,
1139 		     nodemask_t *nmask, unsigned long flags)
1140 {
1141 	struct mm_struct *mm = current->mm;
1142 	struct mempolicy *new;
1143 	unsigned long end;
1144 	int err;
1145 	LIST_HEAD(pagelist);
1146 
1147 	if (flags & ~(unsigned long)MPOL_MF_VALID)
1148 		return -EINVAL;
1149 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1150 		return -EPERM;
1151 
1152 	if (start & ~PAGE_MASK)
1153 		return -EINVAL;
1154 
1155 	if (mode == MPOL_DEFAULT)
1156 		flags &= ~MPOL_MF_STRICT;
1157 
1158 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1159 	end = start + len;
1160 
1161 	if (end < start)
1162 		return -EINVAL;
1163 	if (end == start)
1164 		return 0;
1165 
1166 	new = mpol_new(mode, mode_flags, nmask);
1167 	if (IS_ERR(new))
1168 		return PTR_ERR(new);
1169 
1170 	if (flags & MPOL_MF_LAZY)
1171 		new->flags |= MPOL_F_MOF;
1172 
1173 	/*
1174 	 * If we are using the default policy then operation
1175 	 * on discontinuous address spaces is okay after all
1176 	 */
1177 	if (!new)
1178 		flags |= MPOL_MF_DISCONTIG_OK;
1179 
1180 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1181 		 start, start + len, mode, mode_flags,
1182 		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
1183 
1184 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1185 
1186 		err = migrate_prep();
1187 		if (err)
1188 			goto mpol_out;
1189 	}
1190 	{
1191 		NODEMASK_SCRATCH(scratch);
1192 		if (scratch) {
1193 			down_write(&mm->mmap_sem);
1194 			task_lock(current);
1195 			err = mpol_set_nodemask(new, nmask, scratch);
1196 			task_unlock(current);
1197 			if (err)
1198 				up_write(&mm->mmap_sem);
1199 		} else
1200 			err = -ENOMEM;
1201 		NODEMASK_SCRATCH_FREE(scratch);
1202 	}
1203 	if (err)
1204 		goto mpol_out;
1205 
1206 	err = queue_pages_range(mm, start, end, nmask,
1207 			  flags | MPOL_MF_INVERT, &pagelist);
1208 	if (!err)
1209 		err = mbind_range(mm, start, end, new);
1210 
1211 	if (!err) {
1212 		int nr_failed = 0;
1213 
1214 		if (!list_empty(&pagelist)) {
1215 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1216 			nr_failed = migrate_pages(&pagelist, new_page, NULL,
1217 				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1218 			if (nr_failed)
1219 				putback_movable_pages(&pagelist);
1220 		}
1221 
1222 		if (nr_failed && (flags & MPOL_MF_STRICT))
1223 			err = -EIO;
1224 	} else
1225 		putback_movable_pages(&pagelist);
1226 
1227 	up_write(&mm->mmap_sem);
1228  mpol_out:
1229 	mpol_put(new);
1230 	return err;
1231 }
1232 
1233 /*
1234  * User space interface with variable sized bitmaps for nodelists.
1235  */
1236 
1237 /* Copy a node mask from user space. */
1238 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1239 		     unsigned long maxnode)
1240 {
1241 	unsigned long k;
1242 	unsigned long nlongs;
1243 	unsigned long endmask;
1244 
1245 	--maxnode;
1246 	nodes_clear(*nodes);
1247 	if (maxnode == 0 || !nmask)
1248 		return 0;
1249 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1250 		return -EINVAL;
1251 
1252 	nlongs = BITS_TO_LONGS(maxnode);
1253 	if ((maxnode % BITS_PER_LONG) == 0)
1254 		endmask = ~0UL;
1255 	else
1256 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1257 
1258 	/* When the user specified more nodes than supported just check
1259 	   if the non supported part is all zero. */
1260 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1261 		if (nlongs > PAGE_SIZE/sizeof(long))
1262 			return -EINVAL;
1263 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1264 			unsigned long t;
1265 			if (get_user(t, nmask + k))
1266 				return -EFAULT;
1267 			if (k == nlongs - 1) {
1268 				if (t & endmask)
1269 					return -EINVAL;
1270 			} else if (t)
1271 				return -EINVAL;
1272 		}
1273 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1274 		endmask = ~0UL;
1275 	}
1276 
1277 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1278 		return -EFAULT;
1279 	nodes_addr(*nodes)[nlongs-1] &= endmask;
1280 	return 0;
1281 }
1282 
1283 /* Copy a kernel node mask to user space */
1284 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1285 			      nodemask_t *nodes)
1286 {
1287 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1288 	const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1289 
1290 	if (copy > nbytes) {
1291 		if (copy > PAGE_SIZE)
1292 			return -EINVAL;
1293 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1294 			return -EFAULT;
1295 		copy = nbytes;
1296 	}
1297 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1298 }
1299 
1300 SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1301 		unsigned long, mode, const unsigned long __user *, nmask,
1302 		unsigned long, maxnode, unsigned, flags)
1303 {
1304 	nodemask_t nodes;
1305 	int err;
1306 	unsigned short mode_flags;
1307 
1308 	mode_flags = mode & MPOL_MODE_FLAGS;
1309 	mode &= ~MPOL_MODE_FLAGS;
1310 	if (mode >= MPOL_MAX)
1311 		return -EINVAL;
1312 	if ((mode_flags & MPOL_F_STATIC_NODES) &&
1313 	    (mode_flags & MPOL_F_RELATIVE_NODES))
1314 		return -EINVAL;
1315 	err = get_nodes(&nodes, nmask, maxnode);
1316 	if (err)
1317 		return err;
1318 	return do_mbind(start, len, mode, mode_flags, &nodes, flags);
1319 }
1320 
1321 /* Set the process memory policy */
1322 SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1323 		unsigned long, maxnode)
1324 {
1325 	int err;
1326 	nodemask_t nodes;
1327 	unsigned short flags;
1328 
1329 	flags = mode & MPOL_MODE_FLAGS;
1330 	mode &= ~MPOL_MODE_FLAGS;
1331 	if ((unsigned int)mode >= MPOL_MAX)
1332 		return -EINVAL;
1333 	if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1334 		return -EINVAL;
1335 	err = get_nodes(&nodes, nmask, maxnode);
1336 	if (err)
1337 		return err;
1338 	return do_set_mempolicy(mode, flags, &nodes);
1339 }
1340 
1341 SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1342 		const unsigned long __user *, old_nodes,
1343 		const unsigned long __user *, new_nodes)
1344 {
1345 	const struct cred *cred = current_cred(), *tcred;
1346 	struct mm_struct *mm = NULL;
1347 	struct task_struct *task;
1348 	nodemask_t task_nodes;
1349 	int err;
1350 	nodemask_t *old;
1351 	nodemask_t *new;
1352 	NODEMASK_SCRATCH(scratch);
1353 
1354 	if (!scratch)
1355 		return -ENOMEM;
1356 
1357 	old = &scratch->mask1;
1358 	new = &scratch->mask2;
1359 
1360 	err = get_nodes(old, old_nodes, maxnode);
1361 	if (err)
1362 		goto out;
1363 
1364 	err = get_nodes(new, new_nodes, maxnode);
1365 	if (err)
1366 		goto out;
1367 
1368 	/* Find the mm_struct */
1369 	rcu_read_lock();
1370 	task = pid ? find_task_by_vpid(pid) : current;
1371 	if (!task) {
1372 		rcu_read_unlock();
1373 		err = -ESRCH;
1374 		goto out;
1375 	}
1376 	get_task_struct(task);
1377 
1378 	err = -EINVAL;
1379 
1380 	/*
1381 	 * Check if this process has the right to modify the specified
1382 	 * process. The right exists if the process has administrative
1383 	 * capabilities, superuser privileges or the same
1384 	 * userid as the target process.
1385 	 */
1386 	tcred = __task_cred(task);
1387 	if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1388 	    !uid_eq(cred->uid,  tcred->suid) && !uid_eq(cred->uid,  tcred->uid) &&
1389 	    !capable(CAP_SYS_NICE)) {
1390 		rcu_read_unlock();
1391 		err = -EPERM;
1392 		goto out_put;
1393 	}
1394 	rcu_read_unlock();
1395 
1396 	task_nodes = cpuset_mems_allowed(task);
1397 	/* Is the user allowed to access the target nodes? */
1398 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1399 		err = -EPERM;
1400 		goto out_put;
1401 	}
1402 
1403 	if (!nodes_subset(*new, node_states[N_MEMORY])) {
1404 		err = -EINVAL;
1405 		goto out_put;
1406 	}
1407 
1408 	err = security_task_movememory(task);
1409 	if (err)
1410 		goto out_put;
1411 
1412 	mm = get_task_mm(task);
1413 	put_task_struct(task);
1414 
1415 	if (!mm) {
1416 		err = -EINVAL;
1417 		goto out;
1418 	}
1419 
1420 	err = do_migrate_pages(mm, old, new,
1421 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1422 
1423 	mmput(mm);
1424 out:
1425 	NODEMASK_SCRATCH_FREE(scratch);
1426 
1427 	return err;
1428 
1429 out_put:
1430 	put_task_struct(task);
1431 	goto out;
1432 
1433 }
1434 
1435 
1436 /* Retrieve NUMA policy */
1437 SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1438 		unsigned long __user *, nmask, unsigned long, maxnode,
1439 		unsigned long, addr, unsigned long, flags)
1440 {
1441 	int err;
1442 	int uninitialized_var(pval);
1443 	nodemask_t nodes;
1444 
1445 	if (nmask != NULL && maxnode < MAX_NUMNODES)
1446 		return -EINVAL;
1447 
1448 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
1449 
1450 	if (err)
1451 		return err;
1452 
1453 	if (policy && put_user(pval, policy))
1454 		return -EFAULT;
1455 
1456 	if (nmask)
1457 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
1458 
1459 	return err;
1460 }
1461 
1462 #ifdef CONFIG_COMPAT
1463 
1464 COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1465 		       compat_ulong_t __user *, nmask,
1466 		       compat_ulong_t, maxnode,
1467 		       compat_ulong_t, addr, compat_ulong_t, flags)
1468 {
1469 	long err;
1470 	unsigned long __user *nm = NULL;
1471 	unsigned long nr_bits, alloc_size;
1472 	DECLARE_BITMAP(bm, MAX_NUMNODES);
1473 
1474 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1475 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1476 
1477 	if (nmask)
1478 		nm = compat_alloc_user_space(alloc_size);
1479 
1480 	err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1481 
1482 	if (!err && nmask) {
1483 		unsigned long copy_size;
1484 		copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1485 		err = copy_from_user(bm, nm, copy_size);
1486 		/* ensure entire bitmap is zeroed */
1487 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1488 		err |= compat_put_bitmap(nmask, bm, nr_bits);
1489 	}
1490 
1491 	return err;
1492 }
1493 
1494 COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1495 		       compat_ulong_t, maxnode)
1496 {
1497 	long err = 0;
1498 	unsigned long __user *nm = NULL;
1499 	unsigned long nr_bits, alloc_size;
1500 	DECLARE_BITMAP(bm, MAX_NUMNODES);
1501 
1502 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1503 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1504 
1505 	if (nmask) {
1506 		err = compat_get_bitmap(bm, nmask, nr_bits);
1507 		nm = compat_alloc_user_space(alloc_size);
1508 		err |= copy_to_user(nm, bm, alloc_size);
1509 	}
1510 
1511 	if (err)
1512 		return -EFAULT;
1513 
1514 	return sys_set_mempolicy(mode, nm, nr_bits+1);
1515 }
1516 
1517 COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1518 		       compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1519 		       compat_ulong_t, maxnode, compat_ulong_t, flags)
1520 {
1521 	long err = 0;
1522 	unsigned long __user *nm = NULL;
1523 	unsigned long nr_bits, alloc_size;
1524 	nodemask_t bm;
1525 
1526 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1527 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1528 
1529 	if (nmask) {
1530 		err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1531 		nm = compat_alloc_user_space(alloc_size);
1532 		err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1533 	}
1534 
1535 	if (err)
1536 		return -EFAULT;
1537 
1538 	return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1539 }
1540 
1541 #endif
1542 
1543 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1544 						unsigned long addr)
1545 {
1546 	struct mempolicy *pol = NULL;
1547 
1548 	if (vma) {
1549 		if (vma->vm_ops && vma->vm_ops->get_policy) {
1550 			pol = vma->vm_ops->get_policy(vma, addr);
1551 		} else if (vma->vm_policy) {
1552 			pol = vma->vm_policy;
1553 
1554 			/*
1555 			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1556 			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1557 			 * count on these policies which will be dropped by
1558 			 * mpol_cond_put() later
1559 			 */
1560 			if (mpol_needs_cond_ref(pol))
1561 				mpol_get(pol);
1562 		}
1563 	}
1564 
1565 	return pol;
1566 }
1567 
1568 /*
1569  * get_vma_policy(@vma, @addr)
1570  * @vma: virtual memory area whose policy is sought
1571  * @addr: address in @vma for shared policy lookup
1572  *
1573  * Returns effective policy for a VMA at specified address.
1574  * Falls back to current->mempolicy or system default policy, as necessary.
1575  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1576  * count--added by the get_policy() vm_op, as appropriate--to protect against
1577  * freeing by another task.  It is the caller's responsibility to free the
1578  * extra reference for shared policies.
1579  */
1580 static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1581 						unsigned long addr)
1582 {
1583 	struct mempolicy *pol = __get_vma_policy(vma, addr);
1584 
1585 	if (!pol)
1586 		pol = get_task_policy(current);
1587 
1588 	return pol;
1589 }
1590 
1591 bool vma_policy_mof(struct vm_area_struct *vma)
1592 {
1593 	struct mempolicy *pol;
1594 
1595 	if (vma->vm_ops && vma->vm_ops->get_policy) {
1596 		bool ret = false;
1597 
1598 		pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1599 		if (pol && (pol->flags & MPOL_F_MOF))
1600 			ret = true;
1601 		mpol_cond_put(pol);
1602 
1603 		return ret;
1604 	}
1605 
1606 	pol = vma->vm_policy;
1607 	if (!pol)
1608 		pol = get_task_policy(current);
1609 
1610 	return pol->flags & MPOL_F_MOF;
1611 }
1612 
1613 static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1614 {
1615 	enum zone_type dynamic_policy_zone = policy_zone;
1616 
1617 	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1618 
1619 	/*
1620 	 * if policy->v.nodes has movable memory only,
1621 	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1622 	 *
1623 	 * policy->v.nodes is intersect with node_states[N_MEMORY].
1624 	 * so if the following test faile, it implies
1625 	 * policy->v.nodes has movable memory only.
1626 	 */
1627 	if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1628 		dynamic_policy_zone = ZONE_MOVABLE;
1629 
1630 	return zone >= dynamic_policy_zone;
1631 }
1632 
1633 /*
1634  * Return a nodemask representing a mempolicy for filtering nodes for
1635  * page allocation
1636  */
1637 static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1638 {
1639 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
1640 	if (unlikely(policy->mode == MPOL_BIND) &&
1641 			apply_policy_zone(policy, gfp_zone(gfp)) &&
1642 			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1643 		return &policy->v.nodes;
1644 
1645 	return NULL;
1646 }
1647 
1648 /* Return a zonelist indicated by gfp for node representing a mempolicy */
1649 static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
1650 	int nd)
1651 {
1652 	switch (policy->mode) {
1653 	case MPOL_PREFERRED:
1654 		if (!(policy->flags & MPOL_F_LOCAL))
1655 			nd = policy->v.preferred_node;
1656 		break;
1657 	case MPOL_BIND:
1658 		/*
1659 		 * Normally, MPOL_BIND allocations are node-local within the
1660 		 * allowed nodemask.  However, if __GFP_THISNODE is set and the
1661 		 * current node isn't part of the mask, we use the zonelist for
1662 		 * the first node in the mask instead.
1663 		 */
1664 		if (unlikely(gfp & __GFP_THISNODE) &&
1665 				unlikely(!node_isset(nd, policy->v.nodes)))
1666 			nd = first_node(policy->v.nodes);
1667 		break;
1668 	default:
1669 		BUG();
1670 	}
1671 	return node_zonelist(nd, gfp);
1672 }
1673 
1674 /* Do dynamic interleaving for a process */
1675 static unsigned interleave_nodes(struct mempolicy *policy)
1676 {
1677 	unsigned nid, next;
1678 	struct task_struct *me = current;
1679 
1680 	nid = me->il_next;
1681 	next = next_node(nid, policy->v.nodes);
1682 	if (next >= MAX_NUMNODES)
1683 		next = first_node(policy->v.nodes);
1684 	if (next < MAX_NUMNODES)
1685 		me->il_next = next;
1686 	return nid;
1687 }
1688 
1689 /*
1690  * Depending on the memory policy provide a node from which to allocate the
1691  * next slab entry.
1692  */
1693 unsigned int mempolicy_slab_node(void)
1694 {
1695 	struct mempolicy *policy;
1696 	int node = numa_mem_id();
1697 
1698 	if (in_interrupt())
1699 		return node;
1700 
1701 	policy = current->mempolicy;
1702 	if (!policy || policy->flags & MPOL_F_LOCAL)
1703 		return node;
1704 
1705 	switch (policy->mode) {
1706 	case MPOL_PREFERRED:
1707 		/*
1708 		 * handled MPOL_F_LOCAL above
1709 		 */
1710 		return policy->v.preferred_node;
1711 
1712 	case MPOL_INTERLEAVE:
1713 		return interleave_nodes(policy);
1714 
1715 	case MPOL_BIND: {
1716 		/*
1717 		 * Follow bind policy behavior and start allocation at the
1718 		 * first node.
1719 		 */
1720 		struct zonelist *zonelist;
1721 		struct zone *zone;
1722 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1723 		zonelist = &NODE_DATA(node)->node_zonelists[0];
1724 		(void)first_zones_zonelist(zonelist, highest_zoneidx,
1725 							&policy->v.nodes,
1726 							&zone);
1727 		return zone ? zone->node : node;
1728 	}
1729 
1730 	default:
1731 		BUG();
1732 	}
1733 }
1734 
1735 /* Do static interleaving for a VMA with known offset. */
1736 static unsigned offset_il_node(struct mempolicy *pol,
1737 		struct vm_area_struct *vma, unsigned long off)
1738 {
1739 	unsigned nnodes = nodes_weight(pol->v.nodes);
1740 	unsigned target;
1741 	int c;
1742 	int nid = NUMA_NO_NODE;
1743 
1744 	if (!nnodes)
1745 		return numa_node_id();
1746 	target = (unsigned int)off % nnodes;
1747 	c = 0;
1748 	do {
1749 		nid = next_node(nid, pol->v.nodes);
1750 		c++;
1751 	} while (c <= target);
1752 	return nid;
1753 }
1754 
1755 /* Determine a node number for interleave */
1756 static inline unsigned interleave_nid(struct mempolicy *pol,
1757 		 struct vm_area_struct *vma, unsigned long addr, int shift)
1758 {
1759 	if (vma) {
1760 		unsigned long off;
1761 
1762 		/*
1763 		 * for small pages, there is no difference between
1764 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
1765 		 * for huge pages, since vm_pgoff is in units of small
1766 		 * pages, we need to shift off the always 0 bits to get
1767 		 * a useful offset.
1768 		 */
1769 		BUG_ON(shift < PAGE_SHIFT);
1770 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1771 		off += (addr - vma->vm_start) >> shift;
1772 		return offset_il_node(pol, vma, off);
1773 	} else
1774 		return interleave_nodes(pol);
1775 }
1776 
1777 /*
1778  * Return the bit number of a random bit set in the nodemask.
1779  * (returns NUMA_NO_NODE if nodemask is empty)
1780  */
1781 int node_random(const nodemask_t *maskp)
1782 {
1783 	int w, bit = NUMA_NO_NODE;
1784 
1785 	w = nodes_weight(*maskp);
1786 	if (w)
1787 		bit = bitmap_ord_to_pos(maskp->bits,
1788 			get_random_int() % w, MAX_NUMNODES);
1789 	return bit;
1790 }
1791 
1792 #ifdef CONFIG_HUGETLBFS
1793 /*
1794  * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1795  * @vma: virtual memory area whose policy is sought
1796  * @addr: address in @vma for shared policy lookup and interleave policy
1797  * @gfp_flags: for requested zone
1798  * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1799  * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
1800  *
1801  * Returns a zonelist suitable for a huge page allocation and a pointer
1802  * to the struct mempolicy for conditional unref after allocation.
1803  * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1804  * @nodemask for filtering the zonelist.
1805  *
1806  * Must be protected by read_mems_allowed_begin()
1807  */
1808 struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1809 				gfp_t gfp_flags, struct mempolicy **mpol,
1810 				nodemask_t **nodemask)
1811 {
1812 	struct zonelist *zl;
1813 
1814 	*mpol = get_vma_policy(vma, addr);
1815 	*nodemask = NULL;	/* assume !MPOL_BIND */
1816 
1817 	if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1818 		zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1819 				huge_page_shift(hstate_vma(vma))), gfp_flags);
1820 	} else {
1821 		zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
1822 		if ((*mpol)->mode == MPOL_BIND)
1823 			*nodemask = &(*mpol)->v.nodes;
1824 	}
1825 	return zl;
1826 }
1827 
1828 /*
1829  * init_nodemask_of_mempolicy
1830  *
1831  * If the current task's mempolicy is "default" [NULL], return 'false'
1832  * to indicate default policy.  Otherwise, extract the policy nodemask
1833  * for 'bind' or 'interleave' policy into the argument nodemask, or
1834  * initialize the argument nodemask to contain the single node for
1835  * 'preferred' or 'local' policy and return 'true' to indicate presence
1836  * of non-default mempolicy.
1837  *
1838  * We don't bother with reference counting the mempolicy [mpol_get/put]
1839  * because the current task is examining it's own mempolicy and a task's
1840  * mempolicy is only ever changed by the task itself.
1841  *
1842  * N.B., it is the caller's responsibility to free a returned nodemask.
1843  */
1844 bool init_nodemask_of_mempolicy(nodemask_t *mask)
1845 {
1846 	struct mempolicy *mempolicy;
1847 	int nid;
1848 
1849 	if (!(mask && current->mempolicy))
1850 		return false;
1851 
1852 	task_lock(current);
1853 	mempolicy = current->mempolicy;
1854 	switch (mempolicy->mode) {
1855 	case MPOL_PREFERRED:
1856 		if (mempolicy->flags & MPOL_F_LOCAL)
1857 			nid = numa_node_id();
1858 		else
1859 			nid = mempolicy->v.preferred_node;
1860 		init_nodemask_of_node(mask, nid);
1861 		break;
1862 
1863 	case MPOL_BIND:
1864 		/* Fall through */
1865 	case MPOL_INTERLEAVE:
1866 		*mask =  mempolicy->v.nodes;
1867 		break;
1868 
1869 	default:
1870 		BUG();
1871 	}
1872 	task_unlock(current);
1873 
1874 	return true;
1875 }
1876 #endif
1877 
1878 /*
1879  * mempolicy_nodemask_intersects
1880  *
1881  * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1882  * policy.  Otherwise, check for intersection between mask and the policy
1883  * nodemask for 'bind' or 'interleave' policy.  For 'perferred' or 'local'
1884  * policy, always return true since it may allocate elsewhere on fallback.
1885  *
1886  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1887  */
1888 bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1889 					const nodemask_t *mask)
1890 {
1891 	struct mempolicy *mempolicy;
1892 	bool ret = true;
1893 
1894 	if (!mask)
1895 		return ret;
1896 	task_lock(tsk);
1897 	mempolicy = tsk->mempolicy;
1898 	if (!mempolicy)
1899 		goto out;
1900 
1901 	switch (mempolicy->mode) {
1902 	case MPOL_PREFERRED:
1903 		/*
1904 		 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
1905 		 * allocate from, they may fallback to other nodes when oom.
1906 		 * Thus, it's possible for tsk to have allocated memory from
1907 		 * nodes in mask.
1908 		 */
1909 		break;
1910 	case MPOL_BIND:
1911 	case MPOL_INTERLEAVE:
1912 		ret = nodes_intersects(mempolicy->v.nodes, *mask);
1913 		break;
1914 	default:
1915 		BUG();
1916 	}
1917 out:
1918 	task_unlock(tsk);
1919 	return ret;
1920 }
1921 
1922 /* Allocate a page in interleaved policy.
1923    Own path because it needs to do special accounting. */
1924 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1925 					unsigned nid)
1926 {
1927 	struct zonelist *zl;
1928 	struct page *page;
1929 
1930 	zl = node_zonelist(nid, gfp);
1931 	page = __alloc_pages(gfp, order, zl);
1932 	if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1933 		inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1934 	return page;
1935 }
1936 
1937 /**
1938  * 	alloc_pages_vma	- Allocate a page for a VMA.
1939  *
1940  * 	@gfp:
1941  *      %GFP_USER    user allocation.
1942  *      %GFP_KERNEL  kernel allocations,
1943  *      %GFP_HIGHMEM highmem/user allocations,
1944  *      %GFP_FS      allocation should not call back into a file system.
1945  *      %GFP_ATOMIC  don't sleep.
1946  *
1947  *	@order:Order of the GFP allocation.
1948  * 	@vma:  Pointer to VMA or NULL if not available.
1949  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
1950  *	@node: Which node to prefer for allocation (modulo policy).
1951  *	@hugepage: for hugepages try only the preferred node if possible
1952  *
1953  * 	This function allocates a page from the kernel page pool and applies
1954  *	a NUMA policy associated with the VMA or the current process.
1955  *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
1956  *	mm_struct of the VMA to prevent it from going away. Should be used for
1957  *	all allocations for pages that will be mapped into user space. Returns
1958  *	NULL when no page can be allocated.
1959  */
1960 struct page *
1961 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
1962 		unsigned long addr, int node, bool hugepage)
1963 {
1964 	struct mempolicy *pol;
1965 	struct page *page;
1966 	unsigned int cpuset_mems_cookie;
1967 	struct zonelist *zl;
1968 	nodemask_t *nmask;
1969 
1970 retry_cpuset:
1971 	pol = get_vma_policy(vma, addr);
1972 	cpuset_mems_cookie = read_mems_allowed_begin();
1973 
1974 	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage &&
1975 					pol->mode != MPOL_INTERLEAVE)) {
1976 		/*
1977 		 * For hugepage allocation and non-interleave policy which
1978 		 * allows the current node, we only try to allocate from the
1979 		 * current node and don't fall back to other nodes, as the
1980 		 * cost of remote accesses would likely offset THP benefits.
1981 		 *
1982 		 * If the policy is interleave, or does not allow the current
1983 		 * node in its nodemask, we allocate the standard way.
1984 		 */
1985 		nmask = policy_nodemask(gfp, pol);
1986 		if (!nmask || node_isset(node, *nmask)) {
1987 			mpol_cond_put(pol);
1988 			page = alloc_pages_exact_node(node, gfp, order);
1989 			goto out;
1990 		}
1991 	}
1992 
1993 	if (pol->mode == MPOL_INTERLEAVE) {
1994 		unsigned nid;
1995 
1996 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
1997 		mpol_cond_put(pol);
1998 		page = alloc_page_interleave(gfp, order, nid);
1999 		goto out;
2000 	}
2001 
2002 	nmask = policy_nodemask(gfp, pol);
2003 	zl = policy_zonelist(gfp, pol, node);
2004 	mpol_cond_put(pol);
2005 	page = __alloc_pages_nodemask(gfp, order, zl, nmask);
2006 out:
2007 	if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2008 		goto retry_cpuset;
2009 	return page;
2010 }
2011 
2012 /**
2013  * 	alloc_pages_current - Allocate pages.
2014  *
2015  *	@gfp:
2016  *		%GFP_USER   user allocation,
2017  *      	%GFP_KERNEL kernel allocation,
2018  *      	%GFP_HIGHMEM highmem allocation,
2019  *      	%GFP_FS     don't call back into a file system.
2020  *      	%GFP_ATOMIC don't sleep.
2021  *	@order: Power of two of allocation size in pages. 0 is a single page.
2022  *
2023  *	Allocate a page from the kernel page pool.  When not in
2024  *	interrupt context and apply the current process NUMA policy.
2025  *	Returns NULL when no page can be allocated.
2026  *
2027  *	Don't call cpuset_update_task_memory_state() unless
2028  *	1) it's ok to take cpuset_sem (can WAIT), and
2029  *	2) allocating for current task (not interrupt).
2030  */
2031 struct page *alloc_pages_current(gfp_t gfp, unsigned order)
2032 {
2033 	struct mempolicy *pol = &default_policy;
2034 	struct page *page;
2035 	unsigned int cpuset_mems_cookie;
2036 
2037 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2038 		pol = get_task_policy(current);
2039 
2040 retry_cpuset:
2041 	cpuset_mems_cookie = read_mems_allowed_begin();
2042 
2043 	/*
2044 	 * No reference counting needed for current->mempolicy
2045 	 * nor system default_policy
2046 	 */
2047 	if (pol->mode == MPOL_INTERLEAVE)
2048 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2049 	else
2050 		page = __alloc_pages_nodemask(gfp, order,
2051 				policy_zonelist(gfp, pol, numa_node_id()),
2052 				policy_nodemask(gfp, pol));
2053 
2054 	if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2055 		goto retry_cpuset;
2056 
2057 	return page;
2058 }
2059 EXPORT_SYMBOL(alloc_pages_current);
2060 
2061 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2062 {
2063 	struct mempolicy *pol = mpol_dup(vma_policy(src));
2064 
2065 	if (IS_ERR(pol))
2066 		return PTR_ERR(pol);
2067 	dst->vm_policy = pol;
2068 	return 0;
2069 }
2070 
2071 /*
2072  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
2073  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2074  * with the mems_allowed returned by cpuset_mems_allowed().  This
2075  * keeps mempolicies cpuset relative after its cpuset moves.  See
2076  * further kernel/cpuset.c update_nodemask().
2077  *
2078  * current's mempolicy may be rebinded by the other task(the task that changes
2079  * cpuset's mems), so we needn't do rebind work for current task.
2080  */
2081 
2082 /* Slow path of a mempolicy duplicate */
2083 struct mempolicy *__mpol_dup(struct mempolicy *old)
2084 {
2085 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2086 
2087 	if (!new)
2088 		return ERR_PTR(-ENOMEM);
2089 
2090 	/* task's mempolicy is protected by alloc_lock */
2091 	if (old == current->mempolicy) {
2092 		task_lock(current);
2093 		*new = *old;
2094 		task_unlock(current);
2095 	} else
2096 		*new = *old;
2097 
2098 	if (current_cpuset_is_being_rebound()) {
2099 		nodemask_t mems = cpuset_mems_allowed(current);
2100 		if (new->flags & MPOL_F_REBINDING)
2101 			mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
2102 		else
2103 			mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
2104 	}
2105 	atomic_set(&new->refcnt, 1);
2106 	return new;
2107 }
2108 
2109 /* Slow path of a mempolicy comparison */
2110 bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2111 {
2112 	if (!a || !b)
2113 		return false;
2114 	if (a->mode != b->mode)
2115 		return false;
2116 	if (a->flags != b->flags)
2117 		return false;
2118 	if (mpol_store_user_nodemask(a))
2119 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2120 			return false;
2121 
2122 	switch (a->mode) {
2123 	case MPOL_BIND:
2124 		/* Fall through */
2125 	case MPOL_INTERLEAVE:
2126 		return !!nodes_equal(a->v.nodes, b->v.nodes);
2127 	case MPOL_PREFERRED:
2128 		return a->v.preferred_node == b->v.preferred_node;
2129 	default:
2130 		BUG();
2131 		return false;
2132 	}
2133 }
2134 
2135 /*
2136  * Shared memory backing store policy support.
2137  *
2138  * Remember policies even when nobody has shared memory mapped.
2139  * The policies are kept in Red-Black tree linked from the inode.
2140  * They are protected by the sp->lock spinlock, which should be held
2141  * for any accesses to the tree.
2142  */
2143 
2144 /* lookup first element intersecting start-end */
2145 /* Caller holds sp->lock */
2146 static struct sp_node *
2147 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2148 {
2149 	struct rb_node *n = sp->root.rb_node;
2150 
2151 	while (n) {
2152 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
2153 
2154 		if (start >= p->end)
2155 			n = n->rb_right;
2156 		else if (end <= p->start)
2157 			n = n->rb_left;
2158 		else
2159 			break;
2160 	}
2161 	if (!n)
2162 		return NULL;
2163 	for (;;) {
2164 		struct sp_node *w = NULL;
2165 		struct rb_node *prev = rb_prev(n);
2166 		if (!prev)
2167 			break;
2168 		w = rb_entry(prev, struct sp_node, nd);
2169 		if (w->end <= start)
2170 			break;
2171 		n = prev;
2172 	}
2173 	return rb_entry(n, struct sp_node, nd);
2174 }
2175 
2176 /* Insert a new shared policy into the list. */
2177 /* Caller holds sp->lock */
2178 static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2179 {
2180 	struct rb_node **p = &sp->root.rb_node;
2181 	struct rb_node *parent = NULL;
2182 	struct sp_node *nd;
2183 
2184 	while (*p) {
2185 		parent = *p;
2186 		nd = rb_entry(parent, struct sp_node, nd);
2187 		if (new->start < nd->start)
2188 			p = &(*p)->rb_left;
2189 		else if (new->end > nd->end)
2190 			p = &(*p)->rb_right;
2191 		else
2192 			BUG();
2193 	}
2194 	rb_link_node(&new->nd, parent, p);
2195 	rb_insert_color(&new->nd, &sp->root);
2196 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2197 		 new->policy ? new->policy->mode : 0);
2198 }
2199 
2200 /* Find shared policy intersecting idx */
2201 struct mempolicy *
2202 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2203 {
2204 	struct mempolicy *pol = NULL;
2205 	struct sp_node *sn;
2206 
2207 	if (!sp->root.rb_node)
2208 		return NULL;
2209 	spin_lock(&sp->lock);
2210 	sn = sp_lookup(sp, idx, idx+1);
2211 	if (sn) {
2212 		mpol_get(sn->policy);
2213 		pol = sn->policy;
2214 	}
2215 	spin_unlock(&sp->lock);
2216 	return pol;
2217 }
2218 
2219 static void sp_free(struct sp_node *n)
2220 {
2221 	mpol_put(n->policy);
2222 	kmem_cache_free(sn_cache, n);
2223 }
2224 
2225 /**
2226  * mpol_misplaced - check whether current page node is valid in policy
2227  *
2228  * @page: page to be checked
2229  * @vma: vm area where page mapped
2230  * @addr: virtual address where page mapped
2231  *
2232  * Lookup current policy node id for vma,addr and "compare to" page's
2233  * node id.
2234  *
2235  * Returns:
2236  *	-1	- not misplaced, page is in the right node
2237  *	node	- node id where the page should be
2238  *
2239  * Policy determination "mimics" alloc_page_vma().
2240  * Called from fault path where we know the vma and faulting address.
2241  */
2242 int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2243 {
2244 	struct mempolicy *pol;
2245 	struct zone *zone;
2246 	int curnid = page_to_nid(page);
2247 	unsigned long pgoff;
2248 	int thiscpu = raw_smp_processor_id();
2249 	int thisnid = cpu_to_node(thiscpu);
2250 	int polnid = -1;
2251 	int ret = -1;
2252 
2253 	BUG_ON(!vma);
2254 
2255 	pol = get_vma_policy(vma, addr);
2256 	if (!(pol->flags & MPOL_F_MOF))
2257 		goto out;
2258 
2259 	switch (pol->mode) {
2260 	case MPOL_INTERLEAVE:
2261 		BUG_ON(addr >= vma->vm_end);
2262 		BUG_ON(addr < vma->vm_start);
2263 
2264 		pgoff = vma->vm_pgoff;
2265 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2266 		polnid = offset_il_node(pol, vma, pgoff);
2267 		break;
2268 
2269 	case MPOL_PREFERRED:
2270 		if (pol->flags & MPOL_F_LOCAL)
2271 			polnid = numa_node_id();
2272 		else
2273 			polnid = pol->v.preferred_node;
2274 		break;
2275 
2276 	case MPOL_BIND:
2277 		/*
2278 		 * allows binding to multiple nodes.
2279 		 * use current page if in policy nodemask,
2280 		 * else select nearest allowed node, if any.
2281 		 * If no allowed nodes, use current [!misplaced].
2282 		 */
2283 		if (node_isset(curnid, pol->v.nodes))
2284 			goto out;
2285 		(void)first_zones_zonelist(
2286 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2287 				gfp_zone(GFP_HIGHUSER),
2288 				&pol->v.nodes, &zone);
2289 		polnid = zone->node;
2290 		break;
2291 
2292 	default:
2293 		BUG();
2294 	}
2295 
2296 	/* Migrate the page towards the node whose CPU is referencing it */
2297 	if (pol->flags & MPOL_F_MORON) {
2298 		polnid = thisnid;
2299 
2300 		if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2301 			goto out;
2302 	}
2303 
2304 	if (curnid != polnid)
2305 		ret = polnid;
2306 out:
2307 	mpol_cond_put(pol);
2308 
2309 	return ret;
2310 }
2311 
2312 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2313 {
2314 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2315 	rb_erase(&n->nd, &sp->root);
2316 	sp_free(n);
2317 }
2318 
2319 static void sp_node_init(struct sp_node *node, unsigned long start,
2320 			unsigned long end, struct mempolicy *pol)
2321 {
2322 	node->start = start;
2323 	node->end = end;
2324 	node->policy = pol;
2325 }
2326 
2327 static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2328 				struct mempolicy *pol)
2329 {
2330 	struct sp_node *n;
2331 	struct mempolicy *newpol;
2332 
2333 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2334 	if (!n)
2335 		return NULL;
2336 
2337 	newpol = mpol_dup(pol);
2338 	if (IS_ERR(newpol)) {
2339 		kmem_cache_free(sn_cache, n);
2340 		return NULL;
2341 	}
2342 	newpol->flags |= MPOL_F_SHARED;
2343 	sp_node_init(n, start, end, newpol);
2344 
2345 	return n;
2346 }
2347 
2348 /* Replace a policy range. */
2349 static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2350 				 unsigned long end, struct sp_node *new)
2351 {
2352 	struct sp_node *n;
2353 	struct sp_node *n_new = NULL;
2354 	struct mempolicy *mpol_new = NULL;
2355 	int ret = 0;
2356 
2357 restart:
2358 	spin_lock(&sp->lock);
2359 	n = sp_lookup(sp, start, end);
2360 	/* Take care of old policies in the same range. */
2361 	while (n && n->start < end) {
2362 		struct rb_node *next = rb_next(&n->nd);
2363 		if (n->start >= start) {
2364 			if (n->end <= end)
2365 				sp_delete(sp, n);
2366 			else
2367 				n->start = end;
2368 		} else {
2369 			/* Old policy spanning whole new range. */
2370 			if (n->end > end) {
2371 				if (!n_new)
2372 					goto alloc_new;
2373 
2374 				*mpol_new = *n->policy;
2375 				atomic_set(&mpol_new->refcnt, 1);
2376 				sp_node_init(n_new, end, n->end, mpol_new);
2377 				n->end = start;
2378 				sp_insert(sp, n_new);
2379 				n_new = NULL;
2380 				mpol_new = NULL;
2381 				break;
2382 			} else
2383 				n->end = start;
2384 		}
2385 		if (!next)
2386 			break;
2387 		n = rb_entry(next, struct sp_node, nd);
2388 	}
2389 	if (new)
2390 		sp_insert(sp, new);
2391 	spin_unlock(&sp->lock);
2392 	ret = 0;
2393 
2394 err_out:
2395 	if (mpol_new)
2396 		mpol_put(mpol_new);
2397 	if (n_new)
2398 		kmem_cache_free(sn_cache, n_new);
2399 
2400 	return ret;
2401 
2402 alloc_new:
2403 	spin_unlock(&sp->lock);
2404 	ret = -ENOMEM;
2405 	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2406 	if (!n_new)
2407 		goto err_out;
2408 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2409 	if (!mpol_new)
2410 		goto err_out;
2411 	goto restart;
2412 }
2413 
2414 /**
2415  * mpol_shared_policy_init - initialize shared policy for inode
2416  * @sp: pointer to inode shared policy
2417  * @mpol:  struct mempolicy to install
2418  *
2419  * Install non-NULL @mpol in inode's shared policy rb-tree.
2420  * On entry, the current task has a reference on a non-NULL @mpol.
2421  * This must be released on exit.
2422  * This is called at get_inode() calls and we can use GFP_KERNEL.
2423  */
2424 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2425 {
2426 	int ret;
2427 
2428 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
2429 	spin_lock_init(&sp->lock);
2430 
2431 	if (mpol) {
2432 		struct vm_area_struct pvma;
2433 		struct mempolicy *new;
2434 		NODEMASK_SCRATCH(scratch);
2435 
2436 		if (!scratch)
2437 			goto put_mpol;
2438 		/* contextualize the tmpfs mount point mempolicy */
2439 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2440 		if (IS_ERR(new))
2441 			goto free_scratch; /* no valid nodemask intersection */
2442 
2443 		task_lock(current);
2444 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2445 		task_unlock(current);
2446 		if (ret)
2447 			goto put_new;
2448 
2449 		/* Create pseudo-vma that contains just the policy */
2450 		memset(&pvma, 0, sizeof(struct vm_area_struct));
2451 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
2452 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2453 
2454 put_new:
2455 		mpol_put(new);			/* drop initial ref */
2456 free_scratch:
2457 		NODEMASK_SCRATCH_FREE(scratch);
2458 put_mpol:
2459 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
2460 	}
2461 }
2462 
2463 int mpol_set_shared_policy(struct shared_policy *info,
2464 			struct vm_area_struct *vma, struct mempolicy *npol)
2465 {
2466 	int err;
2467 	struct sp_node *new = NULL;
2468 	unsigned long sz = vma_pages(vma);
2469 
2470 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2471 		 vma->vm_pgoff,
2472 		 sz, npol ? npol->mode : -1,
2473 		 npol ? npol->flags : -1,
2474 		 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
2475 
2476 	if (npol) {
2477 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2478 		if (!new)
2479 			return -ENOMEM;
2480 	}
2481 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2482 	if (err && new)
2483 		sp_free(new);
2484 	return err;
2485 }
2486 
2487 /* Free a backing policy store on inode delete. */
2488 void mpol_free_shared_policy(struct shared_policy *p)
2489 {
2490 	struct sp_node *n;
2491 	struct rb_node *next;
2492 
2493 	if (!p->root.rb_node)
2494 		return;
2495 	spin_lock(&p->lock);
2496 	next = rb_first(&p->root);
2497 	while (next) {
2498 		n = rb_entry(next, struct sp_node, nd);
2499 		next = rb_next(&n->nd);
2500 		sp_delete(p, n);
2501 	}
2502 	spin_unlock(&p->lock);
2503 }
2504 
2505 #ifdef CONFIG_NUMA_BALANCING
2506 static int __initdata numabalancing_override;
2507 
2508 static void __init check_numabalancing_enable(void)
2509 {
2510 	bool numabalancing_default = false;
2511 
2512 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2513 		numabalancing_default = true;
2514 
2515 	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2516 	if (numabalancing_override)
2517 		set_numabalancing_state(numabalancing_override == 1);
2518 
2519 	if (nr_node_ids > 1 && !numabalancing_override) {
2520 		pr_info("%s automatic NUMA balancing. "
2521 			"Configure with numa_balancing= or the "
2522 			"kernel.numa_balancing sysctl",
2523 			numabalancing_default ? "Enabling" : "Disabling");
2524 		set_numabalancing_state(numabalancing_default);
2525 	}
2526 }
2527 
2528 static int __init setup_numabalancing(char *str)
2529 {
2530 	int ret = 0;
2531 	if (!str)
2532 		goto out;
2533 
2534 	if (!strcmp(str, "enable")) {
2535 		numabalancing_override = 1;
2536 		ret = 1;
2537 	} else if (!strcmp(str, "disable")) {
2538 		numabalancing_override = -1;
2539 		ret = 1;
2540 	}
2541 out:
2542 	if (!ret)
2543 		pr_warn("Unable to parse numa_balancing=\n");
2544 
2545 	return ret;
2546 }
2547 __setup("numa_balancing=", setup_numabalancing);
2548 #else
2549 static inline void __init check_numabalancing_enable(void)
2550 {
2551 }
2552 #endif /* CONFIG_NUMA_BALANCING */
2553 
2554 /* assumes fs == KERNEL_DS */
2555 void __init numa_policy_init(void)
2556 {
2557 	nodemask_t interleave_nodes;
2558 	unsigned long largest = 0;
2559 	int nid, prefer = 0;
2560 
2561 	policy_cache = kmem_cache_create("numa_policy",
2562 					 sizeof(struct mempolicy),
2563 					 0, SLAB_PANIC, NULL);
2564 
2565 	sn_cache = kmem_cache_create("shared_policy_node",
2566 				     sizeof(struct sp_node),
2567 				     0, SLAB_PANIC, NULL);
2568 
2569 	for_each_node(nid) {
2570 		preferred_node_policy[nid] = (struct mempolicy) {
2571 			.refcnt = ATOMIC_INIT(1),
2572 			.mode = MPOL_PREFERRED,
2573 			.flags = MPOL_F_MOF | MPOL_F_MORON,
2574 			.v = { .preferred_node = nid, },
2575 		};
2576 	}
2577 
2578 	/*
2579 	 * Set interleaving policy for system init. Interleaving is only
2580 	 * enabled across suitably sized nodes (default is >= 16MB), or
2581 	 * fall back to the largest node if they're all smaller.
2582 	 */
2583 	nodes_clear(interleave_nodes);
2584 	for_each_node_state(nid, N_MEMORY) {
2585 		unsigned long total_pages = node_present_pages(nid);
2586 
2587 		/* Preserve the largest node */
2588 		if (largest < total_pages) {
2589 			largest = total_pages;
2590 			prefer = nid;
2591 		}
2592 
2593 		/* Interleave this node? */
2594 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2595 			node_set(nid, interleave_nodes);
2596 	}
2597 
2598 	/* All too small, use the largest */
2599 	if (unlikely(nodes_empty(interleave_nodes)))
2600 		node_set(prefer, interleave_nodes);
2601 
2602 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2603 		pr_err("%s: interleaving failed\n", __func__);
2604 
2605 	check_numabalancing_enable();
2606 }
2607 
2608 /* Reset policy of current process to default */
2609 void numa_default_policy(void)
2610 {
2611 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2612 }
2613 
2614 /*
2615  * Parse and format mempolicy from/to strings
2616  */
2617 
2618 /*
2619  * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
2620  */
2621 static const char * const policy_modes[] =
2622 {
2623 	[MPOL_DEFAULT]    = "default",
2624 	[MPOL_PREFERRED]  = "prefer",
2625 	[MPOL_BIND]       = "bind",
2626 	[MPOL_INTERLEAVE] = "interleave",
2627 	[MPOL_LOCAL]      = "local",
2628 };
2629 
2630 
2631 #ifdef CONFIG_TMPFS
2632 /**
2633  * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2634  * @str:  string containing mempolicy to parse
2635  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2636  *
2637  * Format of input:
2638  *	<mode>[=<flags>][:<nodelist>]
2639  *
2640  * On success, returns 0, else 1
2641  */
2642 int mpol_parse_str(char *str, struct mempolicy **mpol)
2643 {
2644 	struct mempolicy *new = NULL;
2645 	unsigned short mode;
2646 	unsigned short mode_flags;
2647 	nodemask_t nodes;
2648 	char *nodelist = strchr(str, ':');
2649 	char *flags = strchr(str, '=');
2650 	int err = 1;
2651 
2652 	if (nodelist) {
2653 		/* NUL-terminate mode or flags string */
2654 		*nodelist++ = '\0';
2655 		if (nodelist_parse(nodelist, nodes))
2656 			goto out;
2657 		if (!nodes_subset(nodes, node_states[N_MEMORY]))
2658 			goto out;
2659 	} else
2660 		nodes_clear(nodes);
2661 
2662 	if (flags)
2663 		*flags++ = '\0';	/* terminate mode string */
2664 
2665 	for (mode = 0; mode < MPOL_MAX; mode++) {
2666 		if (!strcmp(str, policy_modes[mode])) {
2667 			break;
2668 		}
2669 	}
2670 	if (mode >= MPOL_MAX)
2671 		goto out;
2672 
2673 	switch (mode) {
2674 	case MPOL_PREFERRED:
2675 		/*
2676 		 * Insist on a nodelist of one node only
2677 		 */
2678 		if (nodelist) {
2679 			char *rest = nodelist;
2680 			while (isdigit(*rest))
2681 				rest++;
2682 			if (*rest)
2683 				goto out;
2684 		}
2685 		break;
2686 	case MPOL_INTERLEAVE:
2687 		/*
2688 		 * Default to online nodes with memory if no nodelist
2689 		 */
2690 		if (!nodelist)
2691 			nodes = node_states[N_MEMORY];
2692 		break;
2693 	case MPOL_LOCAL:
2694 		/*
2695 		 * Don't allow a nodelist;  mpol_new() checks flags
2696 		 */
2697 		if (nodelist)
2698 			goto out;
2699 		mode = MPOL_PREFERRED;
2700 		break;
2701 	case MPOL_DEFAULT:
2702 		/*
2703 		 * Insist on a empty nodelist
2704 		 */
2705 		if (!nodelist)
2706 			err = 0;
2707 		goto out;
2708 	case MPOL_BIND:
2709 		/*
2710 		 * Insist on a nodelist
2711 		 */
2712 		if (!nodelist)
2713 			goto out;
2714 	}
2715 
2716 	mode_flags = 0;
2717 	if (flags) {
2718 		/*
2719 		 * Currently, we only support two mutually exclusive
2720 		 * mode flags.
2721 		 */
2722 		if (!strcmp(flags, "static"))
2723 			mode_flags |= MPOL_F_STATIC_NODES;
2724 		else if (!strcmp(flags, "relative"))
2725 			mode_flags |= MPOL_F_RELATIVE_NODES;
2726 		else
2727 			goto out;
2728 	}
2729 
2730 	new = mpol_new(mode, mode_flags, &nodes);
2731 	if (IS_ERR(new))
2732 		goto out;
2733 
2734 	/*
2735 	 * Save nodes for mpol_to_str() to show the tmpfs mount options
2736 	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2737 	 */
2738 	if (mode != MPOL_PREFERRED)
2739 		new->v.nodes = nodes;
2740 	else if (nodelist)
2741 		new->v.preferred_node = first_node(nodes);
2742 	else
2743 		new->flags |= MPOL_F_LOCAL;
2744 
2745 	/*
2746 	 * Save nodes for contextualization: this will be used to "clone"
2747 	 * the mempolicy in a specific context [cpuset] at a later time.
2748 	 */
2749 	new->w.user_nodemask = nodes;
2750 
2751 	err = 0;
2752 
2753 out:
2754 	/* Restore string for error message */
2755 	if (nodelist)
2756 		*--nodelist = ':';
2757 	if (flags)
2758 		*--flags = '=';
2759 	if (!err)
2760 		*mpol = new;
2761 	return err;
2762 }
2763 #endif /* CONFIG_TMPFS */
2764 
2765 /**
2766  * mpol_to_str - format a mempolicy structure for printing
2767  * @buffer:  to contain formatted mempolicy string
2768  * @maxlen:  length of @buffer
2769  * @pol:  pointer to mempolicy to be formatted
2770  *
2771  * Convert @pol into a string.  If @buffer is too short, truncate the string.
2772  * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2773  * longest flag, "relative", and to display at least a few node ids.
2774  */
2775 void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
2776 {
2777 	char *p = buffer;
2778 	nodemask_t nodes = NODE_MASK_NONE;
2779 	unsigned short mode = MPOL_DEFAULT;
2780 	unsigned short flags = 0;
2781 
2782 	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
2783 		mode = pol->mode;
2784 		flags = pol->flags;
2785 	}
2786 
2787 	switch (mode) {
2788 	case MPOL_DEFAULT:
2789 		break;
2790 	case MPOL_PREFERRED:
2791 		if (flags & MPOL_F_LOCAL)
2792 			mode = MPOL_LOCAL;
2793 		else
2794 			node_set(pol->v.preferred_node, nodes);
2795 		break;
2796 	case MPOL_BIND:
2797 	case MPOL_INTERLEAVE:
2798 		nodes = pol->v.nodes;
2799 		break;
2800 	default:
2801 		WARN_ON_ONCE(1);
2802 		snprintf(p, maxlen, "unknown");
2803 		return;
2804 	}
2805 
2806 	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
2807 
2808 	if (flags & MPOL_MODE_FLAGS) {
2809 		p += snprintf(p, buffer + maxlen - p, "=");
2810 
2811 		/*
2812 		 * Currently, the only defined flags are mutually exclusive
2813 		 */
2814 		if (flags & MPOL_F_STATIC_NODES)
2815 			p += snprintf(p, buffer + maxlen - p, "static");
2816 		else if (flags & MPOL_F_RELATIVE_NODES)
2817 			p += snprintf(p, buffer + maxlen - p, "relative");
2818 	}
2819 
2820 	if (!nodes_empty(nodes))
2821 		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
2822 			       nodemask_pr_args(&nodes));
2823 }
2824