xref: /openbmc/linux/mm/mempolicy.c (revision 7490ca1e)
1 /*
2  * Simple NUMA memory policy for the Linux kernel.
3  *
4  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
5  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
6  * Subject to the GNU Public License, version 2.
7  *
8  * NUMA policy allows the user to give hints in which node(s) memory should
9  * be allocated.
10  *
11  * Support four policies per VMA and per process:
12  *
13  * The VMA policy has priority over the process policy for a page fault.
14  *
15  * interleave     Allocate memory interleaved over a set of nodes,
16  *                with normal fallback if it fails.
17  *                For VMA based allocations this interleaves based on the
18  *                offset into the backing object or offset into the mapping
19  *                for anonymous memory. For process policy an process counter
20  *                is used.
21  *
22  * bind           Only allocate memory on a specific set of nodes,
23  *                no fallback.
24  *                FIXME: memory is allocated starting with the first node
25  *                to the last. It would be better if bind would truly restrict
26  *                the allocation to memory nodes instead
27  *
28  * preferred       Try a specific node first before normal fallback.
29  *                As a special case node -1 here means do the allocation
30  *                on the local CPU. This is normally identical to default,
31  *                but useful to set in a VMA when you have a non default
32  *                process policy.
33  *
34  * default        Allocate on the local node first, or when on a VMA
35  *                use the process policy. This is what Linux always did
36  *		  in a NUMA aware kernel and still does by, ahem, default.
37  *
38  * The process policy is applied for most non interrupt memory allocations
39  * in that process' context. Interrupts ignore the policies and always
40  * try to allocate on the local CPU. The VMA policy is only applied for memory
41  * allocations for a VMA in the VM.
42  *
43  * Currently there are a few corner cases in swapping where the policy
44  * is not applied, but the majority should be handled. When process policy
45  * is used it is not remembered over swap outs/swap ins.
46  *
47  * Only the highest zone in the zone hierarchy gets policied. Allocations
48  * requesting a lower zone just use default policy. This implies that
49  * on systems with highmem kernel lowmem allocation don't get policied.
50  * Same with GFP_DMA allocations.
51  *
52  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53  * all users and remembered even when nobody has memory mapped.
54  */
55 
56 /* Notebook:
57    fix mmap readahead to honour policy and enable policy for any page cache
58    object
59    statistics for bigpages
60    global policy for page cache? currently it uses process policy. Requires
61    first item above.
62    handle mremap for shared memory (currently ignored for the policy)
63    grows down?
64    make bind policy root only? It can trigger oom much faster and the
65    kernel is not always grateful with that.
66 */
67 
68 #include <linux/mempolicy.h>
69 #include <linux/mm.h>
70 #include <linux/highmem.h>
71 #include <linux/hugetlb.h>
72 #include <linux/kernel.h>
73 #include <linux/sched.h>
74 #include <linux/nodemask.h>
75 #include <linux/cpuset.h>
76 #include <linux/slab.h>
77 #include <linux/string.h>
78 #include <linux/export.h>
79 #include <linux/nsproxy.h>
80 #include <linux/interrupt.h>
81 #include <linux/init.h>
82 #include <linux/compat.h>
83 #include <linux/swap.h>
84 #include <linux/seq_file.h>
85 #include <linux/proc_fs.h>
86 #include <linux/migrate.h>
87 #include <linux/ksm.h>
88 #include <linux/rmap.h>
89 #include <linux/security.h>
90 #include <linux/syscalls.h>
91 #include <linux/ctype.h>
92 #include <linux/mm_inline.h>
93 
94 #include <asm/tlbflush.h>
95 #include <asm/uaccess.h>
96 #include <linux/random.h>
97 
98 #include "internal.h"
99 
100 /* Internal flags */
101 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
102 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
103 
104 static struct kmem_cache *policy_cache;
105 static struct kmem_cache *sn_cache;
106 
107 /* Highest zone. An specific allocation for a zone below that is not
108    policied. */
109 enum zone_type policy_zone = 0;
110 
111 /*
112  * run-time system-wide default policy => local allocation
113  */
114 static struct mempolicy default_policy = {
115 	.refcnt = ATOMIC_INIT(1), /* never free it */
116 	.mode = MPOL_PREFERRED,
117 	.flags = MPOL_F_LOCAL,
118 };
119 
120 static const struct mempolicy_operations {
121 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
122 	/*
123 	 * If read-side task has no lock to protect task->mempolicy, write-side
124 	 * task will rebind the task->mempolicy by two step. The first step is
125 	 * setting all the newly nodes, and the second step is cleaning all the
126 	 * disallowed nodes. In this way, we can avoid finding no node to alloc
127 	 * page.
128 	 * If we have a lock to protect task->mempolicy in read-side, we do
129 	 * rebind directly.
130 	 *
131 	 * step:
132 	 * 	MPOL_REBIND_ONCE - do rebind work at once
133 	 * 	MPOL_REBIND_STEP1 - set all the newly nodes
134 	 * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
135 	 */
136 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
137 			enum mpol_rebind_step step);
138 } mpol_ops[MPOL_MAX];
139 
140 /* Check that the nodemask contains at least one populated zone */
141 static int is_valid_nodemask(const nodemask_t *nodemask)
142 {
143 	int nd, k;
144 
145 	for_each_node_mask(nd, *nodemask) {
146 		struct zone *z;
147 
148 		for (k = 0; k <= policy_zone; k++) {
149 			z = &NODE_DATA(nd)->node_zones[k];
150 			if (z->present_pages > 0)
151 				return 1;
152 		}
153 	}
154 
155 	return 0;
156 }
157 
158 static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
159 {
160 	return pol->flags & MPOL_MODE_FLAGS;
161 }
162 
163 static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
164 				   const nodemask_t *rel)
165 {
166 	nodemask_t tmp;
167 	nodes_fold(tmp, *orig, nodes_weight(*rel));
168 	nodes_onto(*ret, tmp, *rel);
169 }
170 
171 static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
172 {
173 	if (nodes_empty(*nodes))
174 		return -EINVAL;
175 	pol->v.nodes = *nodes;
176 	return 0;
177 }
178 
179 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
180 {
181 	if (!nodes)
182 		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
183 	else if (nodes_empty(*nodes))
184 		return -EINVAL;			/*  no allowed nodes */
185 	else
186 		pol->v.preferred_node = first_node(*nodes);
187 	return 0;
188 }
189 
190 static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
191 {
192 	if (!is_valid_nodemask(nodes))
193 		return -EINVAL;
194 	pol->v.nodes = *nodes;
195 	return 0;
196 }
197 
198 /*
199  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
200  * any, for the new policy.  mpol_new() has already validated the nodes
201  * parameter with respect to the policy mode and flags.  But, we need to
202  * handle an empty nodemask with MPOL_PREFERRED here.
203  *
204  * Must be called holding task's alloc_lock to protect task's mems_allowed
205  * and mempolicy.  May also be called holding the mmap_semaphore for write.
206  */
207 static int mpol_set_nodemask(struct mempolicy *pol,
208 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
209 {
210 	int ret;
211 
212 	/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
213 	if (pol == NULL)
214 		return 0;
215 	/* Check N_HIGH_MEMORY */
216 	nodes_and(nsc->mask1,
217 		  cpuset_current_mems_allowed, node_states[N_HIGH_MEMORY]);
218 
219 	VM_BUG_ON(!nodes);
220 	if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
221 		nodes = NULL;	/* explicit local allocation */
222 	else {
223 		if (pol->flags & MPOL_F_RELATIVE_NODES)
224 			mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
225 		else
226 			nodes_and(nsc->mask2, *nodes, nsc->mask1);
227 
228 		if (mpol_store_user_nodemask(pol))
229 			pol->w.user_nodemask = *nodes;
230 		else
231 			pol->w.cpuset_mems_allowed =
232 						cpuset_current_mems_allowed;
233 	}
234 
235 	if (nodes)
236 		ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
237 	else
238 		ret = mpol_ops[pol->mode].create(pol, NULL);
239 	return ret;
240 }
241 
242 /*
243  * This function just creates a new policy, does some check and simple
244  * initialization. You must invoke mpol_set_nodemask() to set nodes.
245  */
246 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
247 				  nodemask_t *nodes)
248 {
249 	struct mempolicy *policy;
250 
251 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
252 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
253 
254 	if (mode == MPOL_DEFAULT) {
255 		if (nodes && !nodes_empty(*nodes))
256 			return ERR_PTR(-EINVAL);
257 		return NULL;	/* simply delete any existing policy */
258 	}
259 	VM_BUG_ON(!nodes);
260 
261 	/*
262 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
263 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
264 	 * All other modes require a valid pointer to a non-empty nodemask.
265 	 */
266 	if (mode == MPOL_PREFERRED) {
267 		if (nodes_empty(*nodes)) {
268 			if (((flags & MPOL_F_STATIC_NODES) ||
269 			     (flags & MPOL_F_RELATIVE_NODES)))
270 				return ERR_PTR(-EINVAL);
271 		}
272 	} else if (nodes_empty(*nodes))
273 		return ERR_PTR(-EINVAL);
274 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
275 	if (!policy)
276 		return ERR_PTR(-ENOMEM);
277 	atomic_set(&policy->refcnt, 1);
278 	policy->mode = mode;
279 	policy->flags = flags;
280 
281 	return policy;
282 }
283 
284 /* Slow path of a mpol destructor. */
285 void __mpol_put(struct mempolicy *p)
286 {
287 	if (!atomic_dec_and_test(&p->refcnt))
288 		return;
289 	kmem_cache_free(policy_cache, p);
290 }
291 
292 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
293 				enum mpol_rebind_step step)
294 {
295 }
296 
297 /*
298  * step:
299  * 	MPOL_REBIND_ONCE  - do rebind work at once
300  * 	MPOL_REBIND_STEP1 - set all the newly nodes
301  * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
302  */
303 static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
304 				 enum mpol_rebind_step step)
305 {
306 	nodemask_t tmp;
307 
308 	if (pol->flags & MPOL_F_STATIC_NODES)
309 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
310 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
311 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
312 	else {
313 		/*
314 		 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
315 		 * result
316 		 */
317 		if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
318 			nodes_remap(tmp, pol->v.nodes,
319 					pol->w.cpuset_mems_allowed, *nodes);
320 			pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
321 		} else if (step == MPOL_REBIND_STEP2) {
322 			tmp = pol->w.cpuset_mems_allowed;
323 			pol->w.cpuset_mems_allowed = *nodes;
324 		} else
325 			BUG();
326 	}
327 
328 	if (nodes_empty(tmp))
329 		tmp = *nodes;
330 
331 	if (step == MPOL_REBIND_STEP1)
332 		nodes_or(pol->v.nodes, pol->v.nodes, tmp);
333 	else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
334 		pol->v.nodes = tmp;
335 	else
336 		BUG();
337 
338 	if (!node_isset(current->il_next, tmp)) {
339 		current->il_next = next_node(current->il_next, tmp);
340 		if (current->il_next >= MAX_NUMNODES)
341 			current->il_next = first_node(tmp);
342 		if (current->il_next >= MAX_NUMNODES)
343 			current->il_next = numa_node_id();
344 	}
345 }
346 
347 static void mpol_rebind_preferred(struct mempolicy *pol,
348 				  const nodemask_t *nodes,
349 				  enum mpol_rebind_step step)
350 {
351 	nodemask_t tmp;
352 
353 	if (pol->flags & MPOL_F_STATIC_NODES) {
354 		int node = first_node(pol->w.user_nodemask);
355 
356 		if (node_isset(node, *nodes)) {
357 			pol->v.preferred_node = node;
358 			pol->flags &= ~MPOL_F_LOCAL;
359 		} else
360 			pol->flags |= MPOL_F_LOCAL;
361 	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
362 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
363 		pol->v.preferred_node = first_node(tmp);
364 	} else if (!(pol->flags & MPOL_F_LOCAL)) {
365 		pol->v.preferred_node = node_remap(pol->v.preferred_node,
366 						   pol->w.cpuset_mems_allowed,
367 						   *nodes);
368 		pol->w.cpuset_mems_allowed = *nodes;
369 	}
370 }
371 
372 /*
373  * mpol_rebind_policy - Migrate a policy to a different set of nodes
374  *
375  * If read-side task has no lock to protect task->mempolicy, write-side
376  * task will rebind the task->mempolicy by two step. The first step is
377  * setting all the newly nodes, and the second step is cleaning all the
378  * disallowed nodes. In this way, we can avoid finding no node to alloc
379  * page.
380  * If we have a lock to protect task->mempolicy in read-side, we do
381  * rebind directly.
382  *
383  * step:
384  * 	MPOL_REBIND_ONCE  - do rebind work at once
385  * 	MPOL_REBIND_STEP1 - set all the newly nodes
386  * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
387  */
388 static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
389 				enum mpol_rebind_step step)
390 {
391 	if (!pol)
392 		return;
393 	if (!mpol_store_user_nodemask(pol) && step == 0 &&
394 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
395 		return;
396 
397 	if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
398 		return;
399 
400 	if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
401 		BUG();
402 
403 	if (step == MPOL_REBIND_STEP1)
404 		pol->flags |= MPOL_F_REBINDING;
405 	else if (step == MPOL_REBIND_STEP2)
406 		pol->flags &= ~MPOL_F_REBINDING;
407 	else if (step >= MPOL_REBIND_NSTEP)
408 		BUG();
409 
410 	mpol_ops[pol->mode].rebind(pol, newmask, step);
411 }
412 
413 /*
414  * Wrapper for mpol_rebind_policy() that just requires task
415  * pointer, and updates task mempolicy.
416  *
417  * Called with task's alloc_lock held.
418  */
419 
420 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
421 			enum mpol_rebind_step step)
422 {
423 	mpol_rebind_policy(tsk->mempolicy, new, step);
424 }
425 
426 /*
427  * Rebind each vma in mm to new nodemask.
428  *
429  * Call holding a reference to mm.  Takes mm->mmap_sem during call.
430  */
431 
432 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
433 {
434 	struct vm_area_struct *vma;
435 
436 	down_write(&mm->mmap_sem);
437 	for (vma = mm->mmap; vma; vma = vma->vm_next)
438 		mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
439 	up_write(&mm->mmap_sem);
440 }
441 
442 static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
443 	[MPOL_DEFAULT] = {
444 		.rebind = mpol_rebind_default,
445 	},
446 	[MPOL_INTERLEAVE] = {
447 		.create = mpol_new_interleave,
448 		.rebind = mpol_rebind_nodemask,
449 	},
450 	[MPOL_PREFERRED] = {
451 		.create = mpol_new_preferred,
452 		.rebind = mpol_rebind_preferred,
453 	},
454 	[MPOL_BIND] = {
455 		.create = mpol_new_bind,
456 		.rebind = mpol_rebind_nodemask,
457 	},
458 };
459 
460 static void migrate_page_add(struct page *page, struct list_head *pagelist,
461 				unsigned long flags);
462 
463 /* Scan through pages checking if pages follow certain conditions. */
464 static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
465 		unsigned long addr, unsigned long end,
466 		const nodemask_t *nodes, unsigned long flags,
467 		void *private)
468 {
469 	pte_t *orig_pte;
470 	pte_t *pte;
471 	spinlock_t *ptl;
472 
473 	orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
474 	do {
475 		struct page *page;
476 		int nid;
477 
478 		if (!pte_present(*pte))
479 			continue;
480 		page = vm_normal_page(vma, addr, *pte);
481 		if (!page)
482 			continue;
483 		/*
484 		 * vm_normal_page() filters out zero pages, but there might
485 		 * still be PageReserved pages to skip, perhaps in a VDSO.
486 		 * And we cannot move PageKsm pages sensibly or safely yet.
487 		 */
488 		if (PageReserved(page) || PageKsm(page))
489 			continue;
490 		nid = page_to_nid(page);
491 		if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
492 			continue;
493 
494 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
495 			migrate_page_add(page, private, flags);
496 		else
497 			break;
498 	} while (pte++, addr += PAGE_SIZE, addr != end);
499 	pte_unmap_unlock(orig_pte, ptl);
500 	return addr != end;
501 }
502 
503 static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
504 		unsigned long addr, unsigned long end,
505 		const nodemask_t *nodes, unsigned long flags,
506 		void *private)
507 {
508 	pmd_t *pmd;
509 	unsigned long next;
510 
511 	pmd = pmd_offset(pud, addr);
512 	do {
513 		next = pmd_addr_end(addr, end);
514 		split_huge_page_pmd(vma->vm_mm, pmd);
515 		if (pmd_none_or_clear_bad(pmd))
516 			continue;
517 		if (check_pte_range(vma, pmd, addr, next, nodes,
518 				    flags, private))
519 			return -EIO;
520 	} while (pmd++, addr = next, addr != end);
521 	return 0;
522 }
523 
524 static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
525 		unsigned long addr, unsigned long end,
526 		const nodemask_t *nodes, unsigned long flags,
527 		void *private)
528 {
529 	pud_t *pud;
530 	unsigned long next;
531 
532 	pud = pud_offset(pgd, addr);
533 	do {
534 		next = pud_addr_end(addr, end);
535 		if (pud_none_or_clear_bad(pud))
536 			continue;
537 		if (check_pmd_range(vma, pud, addr, next, nodes,
538 				    flags, private))
539 			return -EIO;
540 	} while (pud++, addr = next, addr != end);
541 	return 0;
542 }
543 
544 static inline int check_pgd_range(struct vm_area_struct *vma,
545 		unsigned long addr, unsigned long end,
546 		const nodemask_t *nodes, unsigned long flags,
547 		void *private)
548 {
549 	pgd_t *pgd;
550 	unsigned long next;
551 
552 	pgd = pgd_offset(vma->vm_mm, addr);
553 	do {
554 		next = pgd_addr_end(addr, end);
555 		if (pgd_none_or_clear_bad(pgd))
556 			continue;
557 		if (check_pud_range(vma, pgd, addr, next, nodes,
558 				    flags, private))
559 			return -EIO;
560 	} while (pgd++, addr = next, addr != end);
561 	return 0;
562 }
563 
564 /*
565  * Check if all pages in a range are on a set of nodes.
566  * If pagelist != NULL then isolate pages from the LRU and
567  * put them on the pagelist.
568  */
569 static struct vm_area_struct *
570 check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
571 		const nodemask_t *nodes, unsigned long flags, void *private)
572 {
573 	int err;
574 	struct vm_area_struct *first, *vma, *prev;
575 
576 
577 	first = find_vma(mm, start);
578 	if (!first)
579 		return ERR_PTR(-EFAULT);
580 	prev = NULL;
581 	for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
582 		if (!(flags & MPOL_MF_DISCONTIG_OK)) {
583 			if (!vma->vm_next && vma->vm_end < end)
584 				return ERR_PTR(-EFAULT);
585 			if (prev && prev->vm_end < vma->vm_start)
586 				return ERR_PTR(-EFAULT);
587 		}
588 		if (!is_vm_hugetlb_page(vma) &&
589 		    ((flags & MPOL_MF_STRICT) ||
590 		     ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
591 				vma_migratable(vma)))) {
592 			unsigned long endvma = vma->vm_end;
593 
594 			if (endvma > end)
595 				endvma = end;
596 			if (vma->vm_start > start)
597 				start = vma->vm_start;
598 			err = check_pgd_range(vma, start, endvma, nodes,
599 						flags, private);
600 			if (err) {
601 				first = ERR_PTR(err);
602 				break;
603 			}
604 		}
605 		prev = vma;
606 	}
607 	return first;
608 }
609 
610 /* Apply policy to a single VMA */
611 static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
612 {
613 	int err = 0;
614 	struct mempolicy *old = vma->vm_policy;
615 
616 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
617 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
618 		 vma->vm_ops, vma->vm_file,
619 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
620 
621 	if (vma->vm_ops && vma->vm_ops->set_policy)
622 		err = vma->vm_ops->set_policy(vma, new);
623 	if (!err) {
624 		mpol_get(new);
625 		vma->vm_policy = new;
626 		mpol_put(old);
627 	}
628 	return err;
629 }
630 
631 /* Step 2: apply policy to a range and do splits. */
632 static int mbind_range(struct mm_struct *mm, unsigned long start,
633 		       unsigned long end, struct mempolicy *new_pol)
634 {
635 	struct vm_area_struct *next;
636 	struct vm_area_struct *prev;
637 	struct vm_area_struct *vma;
638 	int err = 0;
639 	pgoff_t pgoff;
640 	unsigned long vmstart;
641 	unsigned long vmend;
642 
643 	vma = find_vma_prev(mm, start, &prev);
644 	if (!vma || vma->vm_start > start)
645 		return -EFAULT;
646 
647 	if (start > vma->vm_start)
648 		prev = vma;
649 
650 	for (; vma && vma->vm_start < end; prev = vma, vma = next) {
651 		next = vma->vm_next;
652 		vmstart = max(start, vma->vm_start);
653 		vmend   = min(end, vma->vm_end);
654 
655 		if (mpol_equal(vma_policy(vma), new_pol))
656 			continue;
657 
658 		pgoff = vma->vm_pgoff +
659 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
660 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
661 				  vma->anon_vma, vma->vm_file, pgoff,
662 				  new_pol);
663 		if (prev) {
664 			vma = prev;
665 			next = vma->vm_next;
666 			continue;
667 		}
668 		if (vma->vm_start != vmstart) {
669 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
670 			if (err)
671 				goto out;
672 		}
673 		if (vma->vm_end != vmend) {
674 			err = split_vma(vma->vm_mm, vma, vmend, 0);
675 			if (err)
676 				goto out;
677 		}
678 		err = policy_vma(vma, new_pol);
679 		if (err)
680 			goto out;
681 	}
682 
683  out:
684 	return err;
685 }
686 
687 /*
688  * Update task->flags PF_MEMPOLICY bit: set iff non-default
689  * mempolicy.  Allows more rapid checking of this (combined perhaps
690  * with other PF_* flag bits) on memory allocation hot code paths.
691  *
692  * If called from outside this file, the task 'p' should -only- be
693  * a newly forked child not yet visible on the task list, because
694  * manipulating the task flags of a visible task is not safe.
695  *
696  * The above limitation is why this routine has the funny name
697  * mpol_fix_fork_child_flag().
698  *
699  * It is also safe to call this with a task pointer of current,
700  * which the static wrapper mpol_set_task_struct_flag() does,
701  * for use within this file.
702  */
703 
704 void mpol_fix_fork_child_flag(struct task_struct *p)
705 {
706 	if (p->mempolicy)
707 		p->flags |= PF_MEMPOLICY;
708 	else
709 		p->flags &= ~PF_MEMPOLICY;
710 }
711 
712 static void mpol_set_task_struct_flag(void)
713 {
714 	mpol_fix_fork_child_flag(current);
715 }
716 
717 /* Set the process memory policy */
718 static long do_set_mempolicy(unsigned short mode, unsigned short flags,
719 			     nodemask_t *nodes)
720 {
721 	struct mempolicy *new, *old;
722 	struct mm_struct *mm = current->mm;
723 	NODEMASK_SCRATCH(scratch);
724 	int ret;
725 
726 	if (!scratch)
727 		return -ENOMEM;
728 
729 	new = mpol_new(mode, flags, nodes);
730 	if (IS_ERR(new)) {
731 		ret = PTR_ERR(new);
732 		goto out;
733 	}
734 	/*
735 	 * prevent changing our mempolicy while show_numa_maps()
736 	 * is using it.
737 	 * Note:  do_set_mempolicy() can be called at init time
738 	 * with no 'mm'.
739 	 */
740 	if (mm)
741 		down_write(&mm->mmap_sem);
742 	task_lock(current);
743 	ret = mpol_set_nodemask(new, nodes, scratch);
744 	if (ret) {
745 		task_unlock(current);
746 		if (mm)
747 			up_write(&mm->mmap_sem);
748 		mpol_put(new);
749 		goto out;
750 	}
751 	old = current->mempolicy;
752 	current->mempolicy = new;
753 	mpol_set_task_struct_flag();
754 	if (new && new->mode == MPOL_INTERLEAVE &&
755 	    nodes_weight(new->v.nodes))
756 		current->il_next = first_node(new->v.nodes);
757 	task_unlock(current);
758 	if (mm)
759 		up_write(&mm->mmap_sem);
760 
761 	mpol_put(old);
762 	ret = 0;
763 out:
764 	NODEMASK_SCRATCH_FREE(scratch);
765 	return ret;
766 }
767 
768 /*
769  * Return nodemask for policy for get_mempolicy() query
770  *
771  * Called with task's alloc_lock held
772  */
773 static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
774 {
775 	nodes_clear(*nodes);
776 	if (p == &default_policy)
777 		return;
778 
779 	switch (p->mode) {
780 	case MPOL_BIND:
781 		/* Fall through */
782 	case MPOL_INTERLEAVE:
783 		*nodes = p->v.nodes;
784 		break;
785 	case MPOL_PREFERRED:
786 		if (!(p->flags & MPOL_F_LOCAL))
787 			node_set(p->v.preferred_node, *nodes);
788 		/* else return empty node mask for local allocation */
789 		break;
790 	default:
791 		BUG();
792 	}
793 }
794 
795 static int lookup_node(struct mm_struct *mm, unsigned long addr)
796 {
797 	struct page *p;
798 	int err;
799 
800 	err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
801 	if (err >= 0) {
802 		err = page_to_nid(p);
803 		put_page(p);
804 	}
805 	return err;
806 }
807 
808 /* Retrieve NUMA policy */
809 static long do_get_mempolicy(int *policy, nodemask_t *nmask,
810 			     unsigned long addr, unsigned long flags)
811 {
812 	int err;
813 	struct mm_struct *mm = current->mm;
814 	struct vm_area_struct *vma = NULL;
815 	struct mempolicy *pol = current->mempolicy;
816 
817 	if (flags &
818 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
819 		return -EINVAL;
820 
821 	if (flags & MPOL_F_MEMS_ALLOWED) {
822 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
823 			return -EINVAL;
824 		*policy = 0;	/* just so it's initialized */
825 		task_lock(current);
826 		*nmask  = cpuset_current_mems_allowed;
827 		task_unlock(current);
828 		return 0;
829 	}
830 
831 	if (flags & MPOL_F_ADDR) {
832 		/*
833 		 * Do NOT fall back to task policy if the
834 		 * vma/shared policy at addr is NULL.  We
835 		 * want to return MPOL_DEFAULT in this case.
836 		 */
837 		down_read(&mm->mmap_sem);
838 		vma = find_vma_intersection(mm, addr, addr+1);
839 		if (!vma) {
840 			up_read(&mm->mmap_sem);
841 			return -EFAULT;
842 		}
843 		if (vma->vm_ops && vma->vm_ops->get_policy)
844 			pol = vma->vm_ops->get_policy(vma, addr);
845 		else
846 			pol = vma->vm_policy;
847 	} else if (addr)
848 		return -EINVAL;
849 
850 	if (!pol)
851 		pol = &default_policy;	/* indicates default behavior */
852 
853 	if (flags & MPOL_F_NODE) {
854 		if (flags & MPOL_F_ADDR) {
855 			err = lookup_node(mm, addr);
856 			if (err < 0)
857 				goto out;
858 			*policy = err;
859 		} else if (pol == current->mempolicy &&
860 				pol->mode == MPOL_INTERLEAVE) {
861 			*policy = current->il_next;
862 		} else {
863 			err = -EINVAL;
864 			goto out;
865 		}
866 	} else {
867 		*policy = pol == &default_policy ? MPOL_DEFAULT :
868 						pol->mode;
869 		/*
870 		 * Internal mempolicy flags must be masked off before exposing
871 		 * the policy to userspace.
872 		 */
873 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
874 	}
875 
876 	if (vma) {
877 		up_read(&current->mm->mmap_sem);
878 		vma = NULL;
879 	}
880 
881 	err = 0;
882 	if (nmask) {
883 		if (mpol_store_user_nodemask(pol)) {
884 			*nmask = pol->w.user_nodemask;
885 		} else {
886 			task_lock(current);
887 			get_policy_nodemask(pol, nmask);
888 			task_unlock(current);
889 		}
890 	}
891 
892  out:
893 	mpol_cond_put(pol);
894 	if (vma)
895 		up_read(&current->mm->mmap_sem);
896 	return err;
897 }
898 
899 #ifdef CONFIG_MIGRATION
900 /*
901  * page migration
902  */
903 static void migrate_page_add(struct page *page, struct list_head *pagelist,
904 				unsigned long flags)
905 {
906 	/*
907 	 * Avoid migrating a page that is shared with others.
908 	 */
909 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
910 		if (!isolate_lru_page(page)) {
911 			list_add_tail(&page->lru, pagelist);
912 			inc_zone_page_state(page, NR_ISOLATED_ANON +
913 					    page_is_file_cache(page));
914 		}
915 	}
916 }
917 
918 static struct page *new_node_page(struct page *page, unsigned long node, int **x)
919 {
920 	return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
921 }
922 
923 /*
924  * Migrate pages from one node to a target node.
925  * Returns error or the number of pages not migrated.
926  */
927 static int migrate_to_node(struct mm_struct *mm, int source, int dest,
928 			   int flags)
929 {
930 	nodemask_t nmask;
931 	LIST_HEAD(pagelist);
932 	int err = 0;
933 	struct vm_area_struct *vma;
934 
935 	nodes_clear(nmask);
936 	node_set(source, nmask);
937 
938 	vma = check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
939 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
940 	if (IS_ERR(vma))
941 		return PTR_ERR(vma);
942 
943 	if (!list_empty(&pagelist)) {
944 		err = migrate_pages(&pagelist, new_node_page, dest,
945 							false, MIGRATE_SYNC);
946 		if (err)
947 			putback_lru_pages(&pagelist);
948 	}
949 
950 	return err;
951 }
952 
953 /*
954  * Move pages between the two nodesets so as to preserve the physical
955  * layout as much as possible.
956  *
957  * Returns the number of page that could not be moved.
958  */
959 int do_migrate_pages(struct mm_struct *mm,
960 	const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
961 {
962 	int busy = 0;
963 	int err;
964 	nodemask_t tmp;
965 
966 	err = migrate_prep();
967 	if (err)
968 		return err;
969 
970 	down_read(&mm->mmap_sem);
971 
972 	err = migrate_vmas(mm, from_nodes, to_nodes, flags);
973 	if (err)
974 		goto out;
975 
976 	/*
977 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
978 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
979 	 * bit in 'tmp', and return that <source, dest> pair for migration.
980 	 * The pair of nodemasks 'to' and 'from' define the map.
981 	 *
982 	 * If no pair of bits is found that way, fallback to picking some
983 	 * pair of 'source' and 'dest' bits that are not the same.  If the
984 	 * 'source' and 'dest' bits are the same, this represents a node
985 	 * that will be migrating to itself, so no pages need move.
986 	 *
987 	 * If no bits are left in 'tmp', or if all remaining bits left
988 	 * in 'tmp' correspond to the same bit in 'to', return false
989 	 * (nothing left to migrate).
990 	 *
991 	 * This lets us pick a pair of nodes to migrate between, such that
992 	 * if possible the dest node is not already occupied by some other
993 	 * source node, minimizing the risk of overloading the memory on a
994 	 * node that would happen if we migrated incoming memory to a node
995 	 * before migrating outgoing memory source that same node.
996 	 *
997 	 * A single scan of tmp is sufficient.  As we go, we remember the
998 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
999 	 * that not only moved, but what's better, moved to an empty slot
1000 	 * (d is not set in tmp), then we break out then, with that pair.
1001 	 * Otherwise when we finish scanning from_tmp, we at least have the
1002 	 * most recent <s, d> pair that moved.  If we get all the way through
1003 	 * the scan of tmp without finding any node that moved, much less
1004 	 * moved to an empty node, then there is nothing left worth migrating.
1005 	 */
1006 
1007 	tmp = *from_nodes;
1008 	while (!nodes_empty(tmp)) {
1009 		int s,d;
1010 		int source = -1;
1011 		int dest = 0;
1012 
1013 		for_each_node_mask(s, tmp) {
1014 			d = node_remap(s, *from_nodes, *to_nodes);
1015 			if (s == d)
1016 				continue;
1017 
1018 			source = s;	/* Node moved. Memorize */
1019 			dest = d;
1020 
1021 			/* dest not in remaining from nodes? */
1022 			if (!node_isset(dest, tmp))
1023 				break;
1024 		}
1025 		if (source == -1)
1026 			break;
1027 
1028 		node_clear(source, tmp);
1029 		err = migrate_to_node(mm, source, dest, flags);
1030 		if (err > 0)
1031 			busy += err;
1032 		if (err < 0)
1033 			break;
1034 	}
1035 out:
1036 	up_read(&mm->mmap_sem);
1037 	if (err < 0)
1038 		return err;
1039 	return busy;
1040 
1041 }
1042 
1043 /*
1044  * Allocate a new page for page migration based on vma policy.
1045  * Start assuming that page is mapped by vma pointed to by @private.
1046  * Search forward from there, if not.  N.B., this assumes that the
1047  * list of pages handed to migrate_pages()--which is how we get here--
1048  * is in virtual address order.
1049  */
1050 static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
1051 {
1052 	struct vm_area_struct *vma = (struct vm_area_struct *)private;
1053 	unsigned long uninitialized_var(address);
1054 
1055 	while (vma) {
1056 		address = page_address_in_vma(page, vma);
1057 		if (address != -EFAULT)
1058 			break;
1059 		vma = vma->vm_next;
1060 	}
1061 
1062 	/*
1063 	 * if !vma, alloc_page_vma() will use task or system default policy
1064 	 */
1065 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
1066 }
1067 #else
1068 
1069 static void migrate_page_add(struct page *page, struct list_head *pagelist,
1070 				unsigned long flags)
1071 {
1072 }
1073 
1074 int do_migrate_pages(struct mm_struct *mm,
1075 	const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
1076 {
1077 	return -ENOSYS;
1078 }
1079 
1080 static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
1081 {
1082 	return NULL;
1083 }
1084 #endif
1085 
1086 static long do_mbind(unsigned long start, unsigned long len,
1087 		     unsigned short mode, unsigned short mode_flags,
1088 		     nodemask_t *nmask, unsigned long flags)
1089 {
1090 	struct vm_area_struct *vma;
1091 	struct mm_struct *mm = current->mm;
1092 	struct mempolicy *new;
1093 	unsigned long end;
1094 	int err;
1095 	LIST_HEAD(pagelist);
1096 
1097 	if (flags & ~(unsigned long)(MPOL_MF_STRICT |
1098 				     MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1099 		return -EINVAL;
1100 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1101 		return -EPERM;
1102 
1103 	if (start & ~PAGE_MASK)
1104 		return -EINVAL;
1105 
1106 	if (mode == MPOL_DEFAULT)
1107 		flags &= ~MPOL_MF_STRICT;
1108 
1109 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1110 	end = start + len;
1111 
1112 	if (end < start)
1113 		return -EINVAL;
1114 	if (end == start)
1115 		return 0;
1116 
1117 	new = mpol_new(mode, mode_flags, nmask);
1118 	if (IS_ERR(new))
1119 		return PTR_ERR(new);
1120 
1121 	/*
1122 	 * If we are using the default policy then operation
1123 	 * on discontinuous address spaces is okay after all
1124 	 */
1125 	if (!new)
1126 		flags |= MPOL_MF_DISCONTIG_OK;
1127 
1128 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1129 		 start, start + len, mode, mode_flags,
1130 		 nmask ? nodes_addr(*nmask)[0] : -1);
1131 
1132 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1133 
1134 		err = migrate_prep();
1135 		if (err)
1136 			goto mpol_out;
1137 	}
1138 	{
1139 		NODEMASK_SCRATCH(scratch);
1140 		if (scratch) {
1141 			down_write(&mm->mmap_sem);
1142 			task_lock(current);
1143 			err = mpol_set_nodemask(new, nmask, scratch);
1144 			task_unlock(current);
1145 			if (err)
1146 				up_write(&mm->mmap_sem);
1147 		} else
1148 			err = -ENOMEM;
1149 		NODEMASK_SCRATCH_FREE(scratch);
1150 	}
1151 	if (err)
1152 		goto mpol_out;
1153 
1154 	vma = check_range(mm, start, end, nmask,
1155 			  flags | MPOL_MF_INVERT, &pagelist);
1156 
1157 	err = PTR_ERR(vma);
1158 	if (!IS_ERR(vma)) {
1159 		int nr_failed = 0;
1160 
1161 		err = mbind_range(mm, start, end, new);
1162 
1163 		if (!list_empty(&pagelist)) {
1164 			nr_failed = migrate_pages(&pagelist, new_vma_page,
1165 						(unsigned long)vma,
1166 						false, true);
1167 			if (nr_failed)
1168 				putback_lru_pages(&pagelist);
1169 		}
1170 
1171 		if (!err && nr_failed && (flags & MPOL_MF_STRICT))
1172 			err = -EIO;
1173 	} else
1174 		putback_lru_pages(&pagelist);
1175 
1176 	up_write(&mm->mmap_sem);
1177  mpol_out:
1178 	mpol_put(new);
1179 	return err;
1180 }
1181 
1182 /*
1183  * User space interface with variable sized bitmaps for nodelists.
1184  */
1185 
1186 /* Copy a node mask from user space. */
1187 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1188 		     unsigned long maxnode)
1189 {
1190 	unsigned long k;
1191 	unsigned long nlongs;
1192 	unsigned long endmask;
1193 
1194 	--maxnode;
1195 	nodes_clear(*nodes);
1196 	if (maxnode == 0 || !nmask)
1197 		return 0;
1198 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1199 		return -EINVAL;
1200 
1201 	nlongs = BITS_TO_LONGS(maxnode);
1202 	if ((maxnode % BITS_PER_LONG) == 0)
1203 		endmask = ~0UL;
1204 	else
1205 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1206 
1207 	/* When the user specified more nodes than supported just check
1208 	   if the non supported part is all zero. */
1209 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1210 		if (nlongs > PAGE_SIZE/sizeof(long))
1211 			return -EINVAL;
1212 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1213 			unsigned long t;
1214 			if (get_user(t, nmask + k))
1215 				return -EFAULT;
1216 			if (k == nlongs - 1) {
1217 				if (t & endmask)
1218 					return -EINVAL;
1219 			} else if (t)
1220 				return -EINVAL;
1221 		}
1222 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1223 		endmask = ~0UL;
1224 	}
1225 
1226 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1227 		return -EFAULT;
1228 	nodes_addr(*nodes)[nlongs-1] &= endmask;
1229 	return 0;
1230 }
1231 
1232 /* Copy a kernel node mask to user space */
1233 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1234 			      nodemask_t *nodes)
1235 {
1236 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1237 	const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1238 
1239 	if (copy > nbytes) {
1240 		if (copy > PAGE_SIZE)
1241 			return -EINVAL;
1242 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1243 			return -EFAULT;
1244 		copy = nbytes;
1245 	}
1246 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1247 }
1248 
1249 SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1250 		unsigned long, mode, unsigned long __user *, nmask,
1251 		unsigned long, maxnode, unsigned, flags)
1252 {
1253 	nodemask_t nodes;
1254 	int err;
1255 	unsigned short mode_flags;
1256 
1257 	mode_flags = mode & MPOL_MODE_FLAGS;
1258 	mode &= ~MPOL_MODE_FLAGS;
1259 	if (mode >= MPOL_MAX)
1260 		return -EINVAL;
1261 	if ((mode_flags & MPOL_F_STATIC_NODES) &&
1262 	    (mode_flags & MPOL_F_RELATIVE_NODES))
1263 		return -EINVAL;
1264 	err = get_nodes(&nodes, nmask, maxnode);
1265 	if (err)
1266 		return err;
1267 	return do_mbind(start, len, mode, mode_flags, &nodes, flags);
1268 }
1269 
1270 /* Set the process memory policy */
1271 SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask,
1272 		unsigned long, maxnode)
1273 {
1274 	int err;
1275 	nodemask_t nodes;
1276 	unsigned short flags;
1277 
1278 	flags = mode & MPOL_MODE_FLAGS;
1279 	mode &= ~MPOL_MODE_FLAGS;
1280 	if ((unsigned int)mode >= MPOL_MAX)
1281 		return -EINVAL;
1282 	if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1283 		return -EINVAL;
1284 	err = get_nodes(&nodes, nmask, maxnode);
1285 	if (err)
1286 		return err;
1287 	return do_set_mempolicy(mode, flags, &nodes);
1288 }
1289 
1290 SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1291 		const unsigned long __user *, old_nodes,
1292 		const unsigned long __user *, new_nodes)
1293 {
1294 	const struct cred *cred = current_cred(), *tcred;
1295 	struct mm_struct *mm = NULL;
1296 	struct task_struct *task;
1297 	nodemask_t task_nodes;
1298 	int err;
1299 	nodemask_t *old;
1300 	nodemask_t *new;
1301 	NODEMASK_SCRATCH(scratch);
1302 
1303 	if (!scratch)
1304 		return -ENOMEM;
1305 
1306 	old = &scratch->mask1;
1307 	new = &scratch->mask2;
1308 
1309 	err = get_nodes(old, old_nodes, maxnode);
1310 	if (err)
1311 		goto out;
1312 
1313 	err = get_nodes(new, new_nodes, maxnode);
1314 	if (err)
1315 		goto out;
1316 
1317 	/* Find the mm_struct */
1318 	rcu_read_lock();
1319 	task = pid ? find_task_by_vpid(pid) : current;
1320 	if (!task) {
1321 		rcu_read_unlock();
1322 		err = -ESRCH;
1323 		goto out;
1324 	}
1325 	mm = get_task_mm(task);
1326 	rcu_read_unlock();
1327 
1328 	err = -EINVAL;
1329 	if (!mm)
1330 		goto out;
1331 
1332 	/*
1333 	 * Check if this process has the right to modify the specified
1334 	 * process. The right exists if the process has administrative
1335 	 * capabilities, superuser privileges or the same
1336 	 * userid as the target process.
1337 	 */
1338 	rcu_read_lock();
1339 	tcred = __task_cred(task);
1340 	if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
1341 	    cred->uid  != tcred->suid && cred->uid  != tcred->uid &&
1342 	    !capable(CAP_SYS_NICE)) {
1343 		rcu_read_unlock();
1344 		err = -EPERM;
1345 		goto out;
1346 	}
1347 	rcu_read_unlock();
1348 
1349 	task_nodes = cpuset_mems_allowed(task);
1350 	/* Is the user allowed to access the target nodes? */
1351 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1352 		err = -EPERM;
1353 		goto out;
1354 	}
1355 
1356 	if (!nodes_subset(*new, node_states[N_HIGH_MEMORY])) {
1357 		err = -EINVAL;
1358 		goto out;
1359 	}
1360 
1361 	err = security_task_movememory(task);
1362 	if (err)
1363 		goto out;
1364 
1365 	err = do_migrate_pages(mm, old, new,
1366 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1367 out:
1368 	if (mm)
1369 		mmput(mm);
1370 	NODEMASK_SCRATCH_FREE(scratch);
1371 
1372 	return err;
1373 }
1374 
1375 
1376 /* Retrieve NUMA policy */
1377 SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1378 		unsigned long __user *, nmask, unsigned long, maxnode,
1379 		unsigned long, addr, unsigned long, flags)
1380 {
1381 	int err;
1382 	int uninitialized_var(pval);
1383 	nodemask_t nodes;
1384 
1385 	if (nmask != NULL && maxnode < MAX_NUMNODES)
1386 		return -EINVAL;
1387 
1388 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
1389 
1390 	if (err)
1391 		return err;
1392 
1393 	if (policy && put_user(pval, policy))
1394 		return -EFAULT;
1395 
1396 	if (nmask)
1397 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
1398 
1399 	return err;
1400 }
1401 
1402 #ifdef CONFIG_COMPAT
1403 
1404 asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1405 				     compat_ulong_t __user *nmask,
1406 				     compat_ulong_t maxnode,
1407 				     compat_ulong_t addr, compat_ulong_t flags)
1408 {
1409 	long err;
1410 	unsigned long __user *nm = NULL;
1411 	unsigned long nr_bits, alloc_size;
1412 	DECLARE_BITMAP(bm, MAX_NUMNODES);
1413 
1414 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1415 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1416 
1417 	if (nmask)
1418 		nm = compat_alloc_user_space(alloc_size);
1419 
1420 	err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1421 
1422 	if (!err && nmask) {
1423 		unsigned long copy_size;
1424 		copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1425 		err = copy_from_user(bm, nm, copy_size);
1426 		/* ensure entire bitmap is zeroed */
1427 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1428 		err |= compat_put_bitmap(nmask, bm, nr_bits);
1429 	}
1430 
1431 	return err;
1432 }
1433 
1434 asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1435 				     compat_ulong_t maxnode)
1436 {
1437 	long err = 0;
1438 	unsigned long __user *nm = NULL;
1439 	unsigned long nr_bits, alloc_size;
1440 	DECLARE_BITMAP(bm, MAX_NUMNODES);
1441 
1442 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1443 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1444 
1445 	if (nmask) {
1446 		err = compat_get_bitmap(bm, nmask, nr_bits);
1447 		nm = compat_alloc_user_space(alloc_size);
1448 		err |= copy_to_user(nm, bm, alloc_size);
1449 	}
1450 
1451 	if (err)
1452 		return -EFAULT;
1453 
1454 	return sys_set_mempolicy(mode, nm, nr_bits+1);
1455 }
1456 
1457 asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1458 			     compat_ulong_t mode, compat_ulong_t __user *nmask,
1459 			     compat_ulong_t maxnode, compat_ulong_t flags)
1460 {
1461 	long err = 0;
1462 	unsigned long __user *nm = NULL;
1463 	unsigned long nr_bits, alloc_size;
1464 	nodemask_t bm;
1465 
1466 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1467 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1468 
1469 	if (nmask) {
1470 		err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1471 		nm = compat_alloc_user_space(alloc_size);
1472 		err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1473 	}
1474 
1475 	if (err)
1476 		return -EFAULT;
1477 
1478 	return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1479 }
1480 
1481 #endif
1482 
1483 /*
1484  * get_vma_policy(@task, @vma, @addr)
1485  * @task - task for fallback if vma policy == default
1486  * @vma   - virtual memory area whose policy is sought
1487  * @addr  - address in @vma for shared policy lookup
1488  *
1489  * Returns effective policy for a VMA at specified address.
1490  * Falls back to @task or system default policy, as necessary.
1491  * Current or other task's task mempolicy and non-shared vma policies
1492  * are protected by the task's mmap_sem, which must be held for read by
1493  * the caller.
1494  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1495  * count--added by the get_policy() vm_op, as appropriate--to protect against
1496  * freeing by another task.  It is the caller's responsibility to free the
1497  * extra reference for shared policies.
1498  */
1499 struct mempolicy *get_vma_policy(struct task_struct *task,
1500 		struct vm_area_struct *vma, unsigned long addr)
1501 {
1502 	struct mempolicy *pol = task->mempolicy;
1503 
1504 	if (vma) {
1505 		if (vma->vm_ops && vma->vm_ops->get_policy) {
1506 			struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1507 									addr);
1508 			if (vpol)
1509 				pol = vpol;
1510 		} else if (vma->vm_policy)
1511 			pol = vma->vm_policy;
1512 	}
1513 	if (!pol)
1514 		pol = &default_policy;
1515 	return pol;
1516 }
1517 
1518 /*
1519  * Return a nodemask representing a mempolicy for filtering nodes for
1520  * page allocation
1521  */
1522 static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1523 {
1524 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
1525 	if (unlikely(policy->mode == MPOL_BIND) &&
1526 			gfp_zone(gfp) >= policy_zone &&
1527 			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1528 		return &policy->v.nodes;
1529 
1530 	return NULL;
1531 }
1532 
1533 /* Return a zonelist indicated by gfp for node representing a mempolicy */
1534 static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
1535 	int nd)
1536 {
1537 	switch (policy->mode) {
1538 	case MPOL_PREFERRED:
1539 		if (!(policy->flags & MPOL_F_LOCAL))
1540 			nd = policy->v.preferred_node;
1541 		break;
1542 	case MPOL_BIND:
1543 		/*
1544 		 * Normally, MPOL_BIND allocations are node-local within the
1545 		 * allowed nodemask.  However, if __GFP_THISNODE is set and the
1546 		 * current node isn't part of the mask, we use the zonelist for
1547 		 * the first node in the mask instead.
1548 		 */
1549 		if (unlikely(gfp & __GFP_THISNODE) &&
1550 				unlikely(!node_isset(nd, policy->v.nodes)))
1551 			nd = first_node(policy->v.nodes);
1552 		break;
1553 	default:
1554 		BUG();
1555 	}
1556 	return node_zonelist(nd, gfp);
1557 }
1558 
1559 /* Do dynamic interleaving for a process */
1560 static unsigned interleave_nodes(struct mempolicy *policy)
1561 {
1562 	unsigned nid, next;
1563 	struct task_struct *me = current;
1564 
1565 	nid = me->il_next;
1566 	next = next_node(nid, policy->v.nodes);
1567 	if (next >= MAX_NUMNODES)
1568 		next = first_node(policy->v.nodes);
1569 	if (next < MAX_NUMNODES)
1570 		me->il_next = next;
1571 	return nid;
1572 }
1573 
1574 /*
1575  * Depending on the memory policy provide a node from which to allocate the
1576  * next slab entry.
1577  * @policy must be protected by freeing by the caller.  If @policy is
1578  * the current task's mempolicy, this protection is implicit, as only the
1579  * task can change it's policy.  The system default policy requires no
1580  * such protection.
1581  */
1582 unsigned slab_node(struct mempolicy *policy)
1583 {
1584 	if (!policy || policy->flags & MPOL_F_LOCAL)
1585 		return numa_node_id();
1586 
1587 	switch (policy->mode) {
1588 	case MPOL_PREFERRED:
1589 		/*
1590 		 * handled MPOL_F_LOCAL above
1591 		 */
1592 		return policy->v.preferred_node;
1593 
1594 	case MPOL_INTERLEAVE:
1595 		return interleave_nodes(policy);
1596 
1597 	case MPOL_BIND: {
1598 		/*
1599 		 * Follow bind policy behavior and start allocation at the
1600 		 * first node.
1601 		 */
1602 		struct zonelist *zonelist;
1603 		struct zone *zone;
1604 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1605 		zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
1606 		(void)first_zones_zonelist(zonelist, highest_zoneidx,
1607 							&policy->v.nodes,
1608 							&zone);
1609 		return zone ? zone->node : numa_node_id();
1610 	}
1611 
1612 	default:
1613 		BUG();
1614 	}
1615 }
1616 
1617 /* Do static interleaving for a VMA with known offset. */
1618 static unsigned offset_il_node(struct mempolicy *pol,
1619 		struct vm_area_struct *vma, unsigned long off)
1620 {
1621 	unsigned nnodes = nodes_weight(pol->v.nodes);
1622 	unsigned target;
1623 	int c;
1624 	int nid = -1;
1625 
1626 	if (!nnodes)
1627 		return numa_node_id();
1628 	target = (unsigned int)off % nnodes;
1629 	c = 0;
1630 	do {
1631 		nid = next_node(nid, pol->v.nodes);
1632 		c++;
1633 	} while (c <= target);
1634 	return nid;
1635 }
1636 
1637 /* Determine a node number for interleave */
1638 static inline unsigned interleave_nid(struct mempolicy *pol,
1639 		 struct vm_area_struct *vma, unsigned long addr, int shift)
1640 {
1641 	if (vma) {
1642 		unsigned long off;
1643 
1644 		/*
1645 		 * for small pages, there is no difference between
1646 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
1647 		 * for huge pages, since vm_pgoff is in units of small
1648 		 * pages, we need to shift off the always 0 bits to get
1649 		 * a useful offset.
1650 		 */
1651 		BUG_ON(shift < PAGE_SHIFT);
1652 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1653 		off += (addr - vma->vm_start) >> shift;
1654 		return offset_il_node(pol, vma, off);
1655 	} else
1656 		return interleave_nodes(pol);
1657 }
1658 
1659 /*
1660  * Return the bit number of a random bit set in the nodemask.
1661  * (returns -1 if nodemask is empty)
1662  */
1663 int node_random(const nodemask_t *maskp)
1664 {
1665 	int w, bit = -1;
1666 
1667 	w = nodes_weight(*maskp);
1668 	if (w)
1669 		bit = bitmap_ord_to_pos(maskp->bits,
1670 			get_random_int() % w, MAX_NUMNODES);
1671 	return bit;
1672 }
1673 
1674 #ifdef CONFIG_HUGETLBFS
1675 /*
1676  * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1677  * @vma = virtual memory area whose policy is sought
1678  * @addr = address in @vma for shared policy lookup and interleave policy
1679  * @gfp_flags = for requested zone
1680  * @mpol = pointer to mempolicy pointer for reference counted mempolicy
1681  * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
1682  *
1683  * Returns a zonelist suitable for a huge page allocation and a pointer
1684  * to the struct mempolicy for conditional unref after allocation.
1685  * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1686  * @nodemask for filtering the zonelist.
1687  *
1688  * Must be protected by get_mems_allowed()
1689  */
1690 struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1691 				gfp_t gfp_flags, struct mempolicy **mpol,
1692 				nodemask_t **nodemask)
1693 {
1694 	struct zonelist *zl;
1695 
1696 	*mpol = get_vma_policy(current, vma, addr);
1697 	*nodemask = NULL;	/* assume !MPOL_BIND */
1698 
1699 	if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1700 		zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1701 				huge_page_shift(hstate_vma(vma))), gfp_flags);
1702 	} else {
1703 		zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
1704 		if ((*mpol)->mode == MPOL_BIND)
1705 			*nodemask = &(*mpol)->v.nodes;
1706 	}
1707 	return zl;
1708 }
1709 
1710 /*
1711  * init_nodemask_of_mempolicy
1712  *
1713  * If the current task's mempolicy is "default" [NULL], return 'false'
1714  * to indicate default policy.  Otherwise, extract the policy nodemask
1715  * for 'bind' or 'interleave' policy into the argument nodemask, or
1716  * initialize the argument nodemask to contain the single node for
1717  * 'preferred' or 'local' policy and return 'true' to indicate presence
1718  * of non-default mempolicy.
1719  *
1720  * We don't bother with reference counting the mempolicy [mpol_get/put]
1721  * because the current task is examining it's own mempolicy and a task's
1722  * mempolicy is only ever changed by the task itself.
1723  *
1724  * N.B., it is the caller's responsibility to free a returned nodemask.
1725  */
1726 bool init_nodemask_of_mempolicy(nodemask_t *mask)
1727 {
1728 	struct mempolicy *mempolicy;
1729 	int nid;
1730 
1731 	if (!(mask && current->mempolicy))
1732 		return false;
1733 
1734 	task_lock(current);
1735 	mempolicy = current->mempolicy;
1736 	switch (mempolicy->mode) {
1737 	case MPOL_PREFERRED:
1738 		if (mempolicy->flags & MPOL_F_LOCAL)
1739 			nid = numa_node_id();
1740 		else
1741 			nid = mempolicy->v.preferred_node;
1742 		init_nodemask_of_node(mask, nid);
1743 		break;
1744 
1745 	case MPOL_BIND:
1746 		/* Fall through */
1747 	case MPOL_INTERLEAVE:
1748 		*mask =  mempolicy->v.nodes;
1749 		break;
1750 
1751 	default:
1752 		BUG();
1753 	}
1754 	task_unlock(current);
1755 
1756 	return true;
1757 }
1758 #endif
1759 
1760 /*
1761  * mempolicy_nodemask_intersects
1762  *
1763  * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1764  * policy.  Otherwise, check for intersection between mask and the policy
1765  * nodemask for 'bind' or 'interleave' policy.  For 'perferred' or 'local'
1766  * policy, always return true since it may allocate elsewhere on fallback.
1767  *
1768  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1769  */
1770 bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1771 					const nodemask_t *mask)
1772 {
1773 	struct mempolicy *mempolicy;
1774 	bool ret = true;
1775 
1776 	if (!mask)
1777 		return ret;
1778 	task_lock(tsk);
1779 	mempolicy = tsk->mempolicy;
1780 	if (!mempolicy)
1781 		goto out;
1782 
1783 	switch (mempolicy->mode) {
1784 	case MPOL_PREFERRED:
1785 		/*
1786 		 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
1787 		 * allocate from, they may fallback to other nodes when oom.
1788 		 * Thus, it's possible for tsk to have allocated memory from
1789 		 * nodes in mask.
1790 		 */
1791 		break;
1792 	case MPOL_BIND:
1793 	case MPOL_INTERLEAVE:
1794 		ret = nodes_intersects(mempolicy->v.nodes, *mask);
1795 		break;
1796 	default:
1797 		BUG();
1798 	}
1799 out:
1800 	task_unlock(tsk);
1801 	return ret;
1802 }
1803 
1804 /* Allocate a page in interleaved policy.
1805    Own path because it needs to do special accounting. */
1806 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1807 					unsigned nid)
1808 {
1809 	struct zonelist *zl;
1810 	struct page *page;
1811 
1812 	zl = node_zonelist(nid, gfp);
1813 	page = __alloc_pages(gfp, order, zl);
1814 	if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1815 		inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1816 	return page;
1817 }
1818 
1819 /**
1820  * 	alloc_pages_vma	- Allocate a page for a VMA.
1821  *
1822  * 	@gfp:
1823  *      %GFP_USER    user allocation.
1824  *      %GFP_KERNEL  kernel allocations,
1825  *      %GFP_HIGHMEM highmem/user allocations,
1826  *      %GFP_FS      allocation should not call back into a file system.
1827  *      %GFP_ATOMIC  don't sleep.
1828  *
1829  *	@order:Order of the GFP allocation.
1830  * 	@vma:  Pointer to VMA or NULL if not available.
1831  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
1832  *
1833  * 	This function allocates a page from the kernel page pool and applies
1834  *	a NUMA policy associated with the VMA or the current process.
1835  *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
1836  *	mm_struct of the VMA to prevent it from going away. Should be used for
1837  *	all allocations for pages that will be mapped into
1838  * 	user space. Returns NULL when no page can be allocated.
1839  *
1840  *	Should be called with the mm_sem of the vma hold.
1841  */
1842 struct page *
1843 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
1844 		unsigned long addr, int node)
1845 {
1846 	struct mempolicy *pol = get_vma_policy(current, vma, addr);
1847 	struct zonelist *zl;
1848 	struct page *page;
1849 
1850 	get_mems_allowed();
1851 	if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
1852 		unsigned nid;
1853 
1854 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
1855 		mpol_cond_put(pol);
1856 		page = alloc_page_interleave(gfp, order, nid);
1857 		put_mems_allowed();
1858 		return page;
1859 	}
1860 	zl = policy_zonelist(gfp, pol, node);
1861 	if (unlikely(mpol_needs_cond_ref(pol))) {
1862 		/*
1863 		 * slow path: ref counted shared policy
1864 		 */
1865 		struct page *page =  __alloc_pages_nodemask(gfp, order,
1866 						zl, policy_nodemask(gfp, pol));
1867 		__mpol_put(pol);
1868 		put_mems_allowed();
1869 		return page;
1870 	}
1871 	/*
1872 	 * fast path:  default or task policy
1873 	 */
1874 	page = __alloc_pages_nodemask(gfp, order, zl,
1875 				      policy_nodemask(gfp, pol));
1876 	put_mems_allowed();
1877 	return page;
1878 }
1879 
1880 /**
1881  * 	alloc_pages_current - Allocate pages.
1882  *
1883  *	@gfp:
1884  *		%GFP_USER   user allocation,
1885  *      	%GFP_KERNEL kernel allocation,
1886  *      	%GFP_HIGHMEM highmem allocation,
1887  *      	%GFP_FS     don't call back into a file system.
1888  *      	%GFP_ATOMIC don't sleep.
1889  *	@order: Power of two of allocation size in pages. 0 is a single page.
1890  *
1891  *	Allocate a page from the kernel page pool.  When not in
1892  *	interrupt context and apply the current process NUMA policy.
1893  *	Returns NULL when no page can be allocated.
1894  *
1895  *	Don't call cpuset_update_task_memory_state() unless
1896  *	1) it's ok to take cpuset_sem (can WAIT), and
1897  *	2) allocating for current task (not interrupt).
1898  */
1899 struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1900 {
1901 	struct mempolicy *pol = current->mempolicy;
1902 	struct page *page;
1903 
1904 	if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
1905 		pol = &default_policy;
1906 
1907 	get_mems_allowed();
1908 	/*
1909 	 * No reference counting needed for current->mempolicy
1910 	 * nor system default_policy
1911 	 */
1912 	if (pol->mode == MPOL_INTERLEAVE)
1913 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
1914 	else
1915 		page = __alloc_pages_nodemask(gfp, order,
1916 				policy_zonelist(gfp, pol, numa_node_id()),
1917 				policy_nodemask(gfp, pol));
1918 	put_mems_allowed();
1919 	return page;
1920 }
1921 EXPORT_SYMBOL(alloc_pages_current);
1922 
1923 /*
1924  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
1925  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1926  * with the mems_allowed returned by cpuset_mems_allowed().  This
1927  * keeps mempolicies cpuset relative after its cpuset moves.  See
1928  * further kernel/cpuset.c update_nodemask().
1929  *
1930  * current's mempolicy may be rebinded by the other task(the task that changes
1931  * cpuset's mems), so we needn't do rebind work for current task.
1932  */
1933 
1934 /* Slow path of a mempolicy duplicate */
1935 struct mempolicy *__mpol_dup(struct mempolicy *old)
1936 {
1937 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1938 
1939 	if (!new)
1940 		return ERR_PTR(-ENOMEM);
1941 
1942 	/* task's mempolicy is protected by alloc_lock */
1943 	if (old == current->mempolicy) {
1944 		task_lock(current);
1945 		*new = *old;
1946 		task_unlock(current);
1947 	} else
1948 		*new = *old;
1949 
1950 	rcu_read_lock();
1951 	if (current_cpuset_is_being_rebound()) {
1952 		nodemask_t mems = cpuset_mems_allowed(current);
1953 		if (new->flags & MPOL_F_REBINDING)
1954 			mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
1955 		else
1956 			mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
1957 	}
1958 	rcu_read_unlock();
1959 	atomic_set(&new->refcnt, 1);
1960 	return new;
1961 }
1962 
1963 /*
1964  * If *frompol needs [has] an extra ref, copy *frompol to *tompol ,
1965  * eliminate the * MPOL_F_* flags that require conditional ref and
1966  * [NOTE!!!] drop the extra ref.  Not safe to reference *frompol directly
1967  * after return.  Use the returned value.
1968  *
1969  * Allows use of a mempolicy for, e.g., multiple allocations with a single
1970  * policy lookup, even if the policy needs/has extra ref on lookup.
1971  * shmem_readahead needs this.
1972  */
1973 struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
1974 						struct mempolicy *frompol)
1975 {
1976 	if (!mpol_needs_cond_ref(frompol))
1977 		return frompol;
1978 
1979 	*tompol = *frompol;
1980 	tompol->flags &= ~MPOL_F_SHARED;	/* copy doesn't need unref */
1981 	__mpol_put(frompol);
1982 	return tompol;
1983 }
1984 
1985 /* Slow path of a mempolicy comparison */
1986 bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1987 {
1988 	if (!a || !b)
1989 		return false;
1990 	if (a->mode != b->mode)
1991 		return false;
1992 	if (a->flags != b->flags)
1993 		return false;
1994 	if (mpol_store_user_nodemask(a))
1995 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
1996 			return false;
1997 
1998 	switch (a->mode) {
1999 	case MPOL_BIND:
2000 		/* Fall through */
2001 	case MPOL_INTERLEAVE:
2002 		return !!nodes_equal(a->v.nodes, b->v.nodes);
2003 	case MPOL_PREFERRED:
2004 		return a->v.preferred_node == b->v.preferred_node;
2005 	default:
2006 		BUG();
2007 		return false;
2008 	}
2009 }
2010 
2011 /*
2012  * Shared memory backing store policy support.
2013  *
2014  * Remember policies even when nobody has shared memory mapped.
2015  * The policies are kept in Red-Black tree linked from the inode.
2016  * They are protected by the sp->lock spinlock, which should be held
2017  * for any accesses to the tree.
2018  */
2019 
2020 /* lookup first element intersecting start-end */
2021 /* Caller holds sp->lock */
2022 static struct sp_node *
2023 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2024 {
2025 	struct rb_node *n = sp->root.rb_node;
2026 
2027 	while (n) {
2028 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
2029 
2030 		if (start >= p->end)
2031 			n = n->rb_right;
2032 		else if (end <= p->start)
2033 			n = n->rb_left;
2034 		else
2035 			break;
2036 	}
2037 	if (!n)
2038 		return NULL;
2039 	for (;;) {
2040 		struct sp_node *w = NULL;
2041 		struct rb_node *prev = rb_prev(n);
2042 		if (!prev)
2043 			break;
2044 		w = rb_entry(prev, struct sp_node, nd);
2045 		if (w->end <= start)
2046 			break;
2047 		n = prev;
2048 	}
2049 	return rb_entry(n, struct sp_node, nd);
2050 }
2051 
2052 /* Insert a new shared policy into the list. */
2053 /* Caller holds sp->lock */
2054 static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2055 {
2056 	struct rb_node **p = &sp->root.rb_node;
2057 	struct rb_node *parent = NULL;
2058 	struct sp_node *nd;
2059 
2060 	while (*p) {
2061 		parent = *p;
2062 		nd = rb_entry(parent, struct sp_node, nd);
2063 		if (new->start < nd->start)
2064 			p = &(*p)->rb_left;
2065 		else if (new->end > nd->end)
2066 			p = &(*p)->rb_right;
2067 		else
2068 			BUG();
2069 	}
2070 	rb_link_node(&new->nd, parent, p);
2071 	rb_insert_color(&new->nd, &sp->root);
2072 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2073 		 new->policy ? new->policy->mode : 0);
2074 }
2075 
2076 /* Find shared policy intersecting idx */
2077 struct mempolicy *
2078 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2079 {
2080 	struct mempolicy *pol = NULL;
2081 	struct sp_node *sn;
2082 
2083 	if (!sp->root.rb_node)
2084 		return NULL;
2085 	spin_lock(&sp->lock);
2086 	sn = sp_lookup(sp, idx, idx+1);
2087 	if (sn) {
2088 		mpol_get(sn->policy);
2089 		pol = sn->policy;
2090 	}
2091 	spin_unlock(&sp->lock);
2092 	return pol;
2093 }
2094 
2095 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2096 {
2097 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2098 	rb_erase(&n->nd, &sp->root);
2099 	mpol_put(n->policy);
2100 	kmem_cache_free(sn_cache, n);
2101 }
2102 
2103 static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2104 				struct mempolicy *pol)
2105 {
2106 	struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2107 
2108 	if (!n)
2109 		return NULL;
2110 	n->start = start;
2111 	n->end = end;
2112 	mpol_get(pol);
2113 	pol->flags |= MPOL_F_SHARED;	/* for unref */
2114 	n->policy = pol;
2115 	return n;
2116 }
2117 
2118 /* Replace a policy range. */
2119 static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2120 				 unsigned long end, struct sp_node *new)
2121 {
2122 	struct sp_node *n, *new2 = NULL;
2123 
2124 restart:
2125 	spin_lock(&sp->lock);
2126 	n = sp_lookup(sp, start, end);
2127 	/* Take care of old policies in the same range. */
2128 	while (n && n->start < end) {
2129 		struct rb_node *next = rb_next(&n->nd);
2130 		if (n->start >= start) {
2131 			if (n->end <= end)
2132 				sp_delete(sp, n);
2133 			else
2134 				n->start = end;
2135 		} else {
2136 			/* Old policy spanning whole new range. */
2137 			if (n->end > end) {
2138 				if (!new2) {
2139 					spin_unlock(&sp->lock);
2140 					new2 = sp_alloc(end, n->end, n->policy);
2141 					if (!new2)
2142 						return -ENOMEM;
2143 					goto restart;
2144 				}
2145 				n->end = start;
2146 				sp_insert(sp, new2);
2147 				new2 = NULL;
2148 				break;
2149 			} else
2150 				n->end = start;
2151 		}
2152 		if (!next)
2153 			break;
2154 		n = rb_entry(next, struct sp_node, nd);
2155 	}
2156 	if (new)
2157 		sp_insert(sp, new);
2158 	spin_unlock(&sp->lock);
2159 	if (new2) {
2160 		mpol_put(new2->policy);
2161 		kmem_cache_free(sn_cache, new2);
2162 	}
2163 	return 0;
2164 }
2165 
2166 /**
2167  * mpol_shared_policy_init - initialize shared policy for inode
2168  * @sp: pointer to inode shared policy
2169  * @mpol:  struct mempolicy to install
2170  *
2171  * Install non-NULL @mpol in inode's shared policy rb-tree.
2172  * On entry, the current task has a reference on a non-NULL @mpol.
2173  * This must be released on exit.
2174  * This is called at get_inode() calls and we can use GFP_KERNEL.
2175  */
2176 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2177 {
2178 	int ret;
2179 
2180 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
2181 	spin_lock_init(&sp->lock);
2182 
2183 	if (mpol) {
2184 		struct vm_area_struct pvma;
2185 		struct mempolicy *new;
2186 		NODEMASK_SCRATCH(scratch);
2187 
2188 		if (!scratch)
2189 			goto put_mpol;
2190 		/* contextualize the tmpfs mount point mempolicy */
2191 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2192 		if (IS_ERR(new))
2193 			goto free_scratch; /* no valid nodemask intersection */
2194 
2195 		task_lock(current);
2196 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2197 		task_unlock(current);
2198 		if (ret)
2199 			goto put_new;
2200 
2201 		/* Create pseudo-vma that contains just the policy */
2202 		memset(&pvma, 0, sizeof(struct vm_area_struct));
2203 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
2204 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2205 
2206 put_new:
2207 		mpol_put(new);			/* drop initial ref */
2208 free_scratch:
2209 		NODEMASK_SCRATCH_FREE(scratch);
2210 put_mpol:
2211 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
2212 	}
2213 }
2214 
2215 int mpol_set_shared_policy(struct shared_policy *info,
2216 			struct vm_area_struct *vma, struct mempolicy *npol)
2217 {
2218 	int err;
2219 	struct sp_node *new = NULL;
2220 	unsigned long sz = vma_pages(vma);
2221 
2222 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2223 		 vma->vm_pgoff,
2224 		 sz, npol ? npol->mode : -1,
2225 		 npol ? npol->flags : -1,
2226 		 npol ? nodes_addr(npol->v.nodes)[0] : -1);
2227 
2228 	if (npol) {
2229 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2230 		if (!new)
2231 			return -ENOMEM;
2232 	}
2233 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2234 	if (err && new)
2235 		kmem_cache_free(sn_cache, new);
2236 	return err;
2237 }
2238 
2239 /* Free a backing policy store on inode delete. */
2240 void mpol_free_shared_policy(struct shared_policy *p)
2241 {
2242 	struct sp_node *n;
2243 	struct rb_node *next;
2244 
2245 	if (!p->root.rb_node)
2246 		return;
2247 	spin_lock(&p->lock);
2248 	next = rb_first(&p->root);
2249 	while (next) {
2250 		n = rb_entry(next, struct sp_node, nd);
2251 		next = rb_next(&n->nd);
2252 		rb_erase(&n->nd, &p->root);
2253 		mpol_put(n->policy);
2254 		kmem_cache_free(sn_cache, n);
2255 	}
2256 	spin_unlock(&p->lock);
2257 }
2258 
2259 /* assumes fs == KERNEL_DS */
2260 void __init numa_policy_init(void)
2261 {
2262 	nodemask_t interleave_nodes;
2263 	unsigned long largest = 0;
2264 	int nid, prefer = 0;
2265 
2266 	policy_cache = kmem_cache_create("numa_policy",
2267 					 sizeof(struct mempolicy),
2268 					 0, SLAB_PANIC, NULL);
2269 
2270 	sn_cache = kmem_cache_create("shared_policy_node",
2271 				     sizeof(struct sp_node),
2272 				     0, SLAB_PANIC, NULL);
2273 
2274 	/*
2275 	 * Set interleaving policy for system init. Interleaving is only
2276 	 * enabled across suitably sized nodes (default is >= 16MB), or
2277 	 * fall back to the largest node if they're all smaller.
2278 	 */
2279 	nodes_clear(interleave_nodes);
2280 	for_each_node_state(nid, N_HIGH_MEMORY) {
2281 		unsigned long total_pages = node_present_pages(nid);
2282 
2283 		/* Preserve the largest node */
2284 		if (largest < total_pages) {
2285 			largest = total_pages;
2286 			prefer = nid;
2287 		}
2288 
2289 		/* Interleave this node? */
2290 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2291 			node_set(nid, interleave_nodes);
2292 	}
2293 
2294 	/* All too small, use the largest */
2295 	if (unlikely(nodes_empty(interleave_nodes)))
2296 		node_set(prefer, interleave_nodes);
2297 
2298 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2299 		printk("numa_policy_init: interleaving failed\n");
2300 }
2301 
2302 /* Reset policy of current process to default */
2303 void numa_default_policy(void)
2304 {
2305 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2306 }
2307 
2308 /*
2309  * Parse and format mempolicy from/to strings
2310  */
2311 
2312 /*
2313  * "local" is pseudo-policy:  MPOL_PREFERRED with MPOL_F_LOCAL flag
2314  * Used only for mpol_parse_str() and mpol_to_str()
2315  */
2316 #define MPOL_LOCAL MPOL_MAX
2317 static const char * const policy_modes[] =
2318 {
2319 	[MPOL_DEFAULT]    = "default",
2320 	[MPOL_PREFERRED]  = "prefer",
2321 	[MPOL_BIND]       = "bind",
2322 	[MPOL_INTERLEAVE] = "interleave",
2323 	[MPOL_LOCAL]      = "local"
2324 };
2325 
2326 
2327 #ifdef CONFIG_TMPFS
2328 /**
2329  * mpol_parse_str - parse string to mempolicy
2330  * @str:  string containing mempolicy to parse
2331  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2332  * @no_context:  flag whether to "contextualize" the mempolicy
2333  *
2334  * Format of input:
2335  *	<mode>[=<flags>][:<nodelist>]
2336  *
2337  * if @no_context is true, save the input nodemask in w.user_nodemask in
2338  * the returned mempolicy.  This will be used to "clone" the mempolicy in
2339  * a specific context [cpuset] at a later time.  Used to parse tmpfs mpol
2340  * mount option.  Note that if 'static' or 'relative' mode flags were
2341  * specified, the input nodemask will already have been saved.  Saving
2342  * it again is redundant, but safe.
2343  *
2344  * On success, returns 0, else 1
2345  */
2346 int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
2347 {
2348 	struct mempolicy *new = NULL;
2349 	unsigned short mode;
2350 	unsigned short uninitialized_var(mode_flags);
2351 	nodemask_t nodes;
2352 	char *nodelist = strchr(str, ':');
2353 	char *flags = strchr(str, '=');
2354 	int err = 1;
2355 
2356 	if (nodelist) {
2357 		/* NUL-terminate mode or flags string */
2358 		*nodelist++ = '\0';
2359 		if (nodelist_parse(nodelist, nodes))
2360 			goto out;
2361 		if (!nodes_subset(nodes, node_states[N_HIGH_MEMORY]))
2362 			goto out;
2363 	} else
2364 		nodes_clear(nodes);
2365 
2366 	if (flags)
2367 		*flags++ = '\0';	/* terminate mode string */
2368 
2369 	for (mode = 0; mode <= MPOL_LOCAL; mode++) {
2370 		if (!strcmp(str, policy_modes[mode])) {
2371 			break;
2372 		}
2373 	}
2374 	if (mode > MPOL_LOCAL)
2375 		goto out;
2376 
2377 	switch (mode) {
2378 	case MPOL_PREFERRED:
2379 		/*
2380 		 * Insist on a nodelist of one node only
2381 		 */
2382 		if (nodelist) {
2383 			char *rest = nodelist;
2384 			while (isdigit(*rest))
2385 				rest++;
2386 			if (*rest)
2387 				goto out;
2388 		}
2389 		break;
2390 	case MPOL_INTERLEAVE:
2391 		/*
2392 		 * Default to online nodes with memory if no nodelist
2393 		 */
2394 		if (!nodelist)
2395 			nodes = node_states[N_HIGH_MEMORY];
2396 		break;
2397 	case MPOL_LOCAL:
2398 		/*
2399 		 * Don't allow a nodelist;  mpol_new() checks flags
2400 		 */
2401 		if (nodelist)
2402 			goto out;
2403 		mode = MPOL_PREFERRED;
2404 		break;
2405 	case MPOL_DEFAULT:
2406 		/*
2407 		 * Insist on a empty nodelist
2408 		 */
2409 		if (!nodelist)
2410 			err = 0;
2411 		goto out;
2412 	case MPOL_BIND:
2413 		/*
2414 		 * Insist on a nodelist
2415 		 */
2416 		if (!nodelist)
2417 			goto out;
2418 	}
2419 
2420 	mode_flags = 0;
2421 	if (flags) {
2422 		/*
2423 		 * Currently, we only support two mutually exclusive
2424 		 * mode flags.
2425 		 */
2426 		if (!strcmp(flags, "static"))
2427 			mode_flags |= MPOL_F_STATIC_NODES;
2428 		else if (!strcmp(flags, "relative"))
2429 			mode_flags |= MPOL_F_RELATIVE_NODES;
2430 		else
2431 			goto out;
2432 	}
2433 
2434 	new = mpol_new(mode, mode_flags, &nodes);
2435 	if (IS_ERR(new))
2436 		goto out;
2437 
2438 	if (no_context) {
2439 		/* save for contextualization */
2440 		new->w.user_nodemask = nodes;
2441 	} else {
2442 		int ret;
2443 		NODEMASK_SCRATCH(scratch);
2444 		if (scratch) {
2445 			task_lock(current);
2446 			ret = mpol_set_nodemask(new, &nodes, scratch);
2447 			task_unlock(current);
2448 		} else
2449 			ret = -ENOMEM;
2450 		NODEMASK_SCRATCH_FREE(scratch);
2451 		if (ret) {
2452 			mpol_put(new);
2453 			goto out;
2454 		}
2455 	}
2456 	err = 0;
2457 
2458 out:
2459 	/* Restore string for error message */
2460 	if (nodelist)
2461 		*--nodelist = ':';
2462 	if (flags)
2463 		*--flags = '=';
2464 	if (!err)
2465 		*mpol = new;
2466 	return err;
2467 }
2468 #endif /* CONFIG_TMPFS */
2469 
2470 /**
2471  * mpol_to_str - format a mempolicy structure for printing
2472  * @buffer:  to contain formatted mempolicy string
2473  * @maxlen:  length of @buffer
2474  * @pol:  pointer to mempolicy to be formatted
2475  * @no_context:  "context free" mempolicy - use nodemask in w.user_nodemask
2476  *
2477  * Convert a mempolicy into a string.
2478  * Returns the number of characters in buffer (if positive)
2479  * or an error (negative)
2480  */
2481 int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
2482 {
2483 	char *p = buffer;
2484 	int l;
2485 	nodemask_t nodes;
2486 	unsigned short mode;
2487 	unsigned short flags = pol ? pol->flags : 0;
2488 
2489 	/*
2490 	 * Sanity check:  room for longest mode, flag and some nodes
2491 	 */
2492 	VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
2493 
2494 	if (!pol || pol == &default_policy)
2495 		mode = MPOL_DEFAULT;
2496 	else
2497 		mode = pol->mode;
2498 
2499 	switch (mode) {
2500 	case MPOL_DEFAULT:
2501 		nodes_clear(nodes);
2502 		break;
2503 
2504 	case MPOL_PREFERRED:
2505 		nodes_clear(nodes);
2506 		if (flags & MPOL_F_LOCAL)
2507 			mode = MPOL_LOCAL;	/* pseudo-policy */
2508 		else
2509 			node_set(pol->v.preferred_node, nodes);
2510 		break;
2511 
2512 	case MPOL_BIND:
2513 		/* Fall through */
2514 	case MPOL_INTERLEAVE:
2515 		if (no_context)
2516 			nodes = pol->w.user_nodemask;
2517 		else
2518 			nodes = pol->v.nodes;
2519 		break;
2520 
2521 	default:
2522 		BUG();
2523 	}
2524 
2525 	l = strlen(policy_modes[mode]);
2526 	if (buffer + maxlen < p + l + 1)
2527 		return -ENOSPC;
2528 
2529 	strcpy(p, policy_modes[mode]);
2530 	p += l;
2531 
2532 	if (flags & MPOL_MODE_FLAGS) {
2533 		if (buffer + maxlen < p + 2)
2534 			return -ENOSPC;
2535 		*p++ = '=';
2536 
2537 		/*
2538 		 * Currently, the only defined flags are mutually exclusive
2539 		 */
2540 		if (flags & MPOL_F_STATIC_NODES)
2541 			p += snprintf(p, buffer + maxlen - p, "static");
2542 		else if (flags & MPOL_F_RELATIVE_NODES)
2543 			p += snprintf(p, buffer + maxlen - p, "relative");
2544 	}
2545 
2546 	if (!nodes_empty(nodes)) {
2547 		if (buffer + maxlen < p + 2)
2548 			return -ENOSPC;
2549 		*p++ = ':';
2550 	 	p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
2551 	}
2552 	return p - buffer;
2553 }
2554