xref: /openbmc/linux/mm/mempolicy.c (revision 5606e3877ad8baea42f3a71ebde0a03622bbb551)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Simple NUMA memory policy for the Linux kernel.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
58bccd85fSChristoph Lameter  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
61da177e4SLinus Torvalds  * Subject to the GNU Public License, version 2.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * NUMA policy allows the user to give hints in which node(s) memory should
91da177e4SLinus Torvalds  * be allocated.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Support four policies per VMA and per process:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * The VMA policy has priority over the process policy for a page fault.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * interleave     Allocate memory interleaved over a set of nodes,
161da177e4SLinus Torvalds  *                with normal fallback if it fails.
171da177e4SLinus Torvalds  *                For VMA based allocations this interleaves based on the
181da177e4SLinus Torvalds  *                offset into the backing object or offset into the mapping
191da177e4SLinus Torvalds  *                for anonymous memory. For process policy an process counter
201da177e4SLinus Torvalds  *                is used.
218bccd85fSChristoph Lameter  *
221da177e4SLinus Torvalds  * bind           Only allocate memory on a specific set of nodes,
231da177e4SLinus Torvalds  *                no fallback.
248bccd85fSChristoph Lameter  *                FIXME: memory is allocated starting with the first node
258bccd85fSChristoph Lameter  *                to the last. It would be better if bind would truly restrict
268bccd85fSChristoph Lameter  *                the allocation to memory nodes instead
278bccd85fSChristoph Lameter  *
281da177e4SLinus Torvalds  * preferred       Try a specific node first before normal fallback.
291da177e4SLinus Torvalds  *                As a special case node -1 here means do the allocation
301da177e4SLinus Torvalds  *                on the local CPU. This is normally identical to default,
311da177e4SLinus Torvalds  *                but useful to set in a VMA when you have a non default
321da177e4SLinus Torvalds  *                process policy.
338bccd85fSChristoph Lameter  *
341da177e4SLinus Torvalds  * default        Allocate on the local node first, or when on a VMA
351da177e4SLinus Torvalds  *                use the process policy. This is what Linux always did
361da177e4SLinus Torvalds  *		  in a NUMA aware kernel and still does by, ahem, default.
371da177e4SLinus Torvalds  *
381da177e4SLinus Torvalds  * The process policy is applied for most non interrupt memory allocations
391da177e4SLinus Torvalds  * in that process' context. Interrupts ignore the policies and always
401da177e4SLinus Torvalds  * try to allocate on the local CPU. The VMA policy is only applied for memory
411da177e4SLinus Torvalds  * allocations for a VMA in the VM.
421da177e4SLinus Torvalds  *
431da177e4SLinus Torvalds  * Currently there are a few corner cases in swapping where the policy
441da177e4SLinus Torvalds  * is not applied, but the majority should be handled. When process policy
451da177e4SLinus Torvalds  * is used it is not remembered over swap outs/swap ins.
461da177e4SLinus Torvalds  *
471da177e4SLinus Torvalds  * Only the highest zone in the zone hierarchy gets policied. Allocations
481da177e4SLinus Torvalds  * requesting a lower zone just use default policy. This implies that
491da177e4SLinus Torvalds  * on systems with highmem kernel lowmem allocation don't get policied.
501da177e4SLinus Torvalds  * Same with GFP_DMA allocations.
511da177e4SLinus Torvalds  *
521da177e4SLinus Torvalds  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
531da177e4SLinus Torvalds  * all users and remembered even when nobody has memory mapped.
541da177e4SLinus Torvalds  */
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds /* Notebook:
571da177e4SLinus Torvalds    fix mmap readahead to honour policy and enable policy for any page cache
581da177e4SLinus Torvalds    object
591da177e4SLinus Torvalds    statistics for bigpages
601da177e4SLinus Torvalds    global policy for page cache? currently it uses process policy. Requires
611da177e4SLinus Torvalds    first item above.
621da177e4SLinus Torvalds    handle mremap for shared memory (currently ignored for the policy)
631da177e4SLinus Torvalds    grows down?
641da177e4SLinus Torvalds    make bind policy root only? It can trigger oom much faster and the
651da177e4SLinus Torvalds    kernel is not always grateful with that.
661da177e4SLinus Torvalds */
671da177e4SLinus Torvalds 
681da177e4SLinus Torvalds #include <linux/mempolicy.h>
691da177e4SLinus Torvalds #include <linux/mm.h>
701da177e4SLinus Torvalds #include <linux/highmem.h>
711da177e4SLinus Torvalds #include <linux/hugetlb.h>
721da177e4SLinus Torvalds #include <linux/kernel.h>
731da177e4SLinus Torvalds #include <linux/sched.h>
741da177e4SLinus Torvalds #include <linux/nodemask.h>
751da177e4SLinus Torvalds #include <linux/cpuset.h>
761da177e4SLinus Torvalds #include <linux/slab.h>
771da177e4SLinus Torvalds #include <linux/string.h>
78b95f1b31SPaul Gortmaker #include <linux/export.h>
79b488893aSPavel Emelyanov #include <linux/nsproxy.h>
801da177e4SLinus Torvalds #include <linux/interrupt.h>
811da177e4SLinus Torvalds #include <linux/init.h>
821da177e4SLinus Torvalds #include <linux/compat.h>
83dc9aa5b9SChristoph Lameter #include <linux/swap.h>
841a75a6c8SChristoph Lameter #include <linux/seq_file.h>
851a75a6c8SChristoph Lameter #include <linux/proc_fs.h>
86b20a3503SChristoph Lameter #include <linux/migrate.h>
8762b61f61SHugh Dickins #include <linux/ksm.h>
8895a402c3SChristoph Lameter #include <linux/rmap.h>
8986c3a764SDavid Quigley #include <linux/security.h>
90dbcb0f19SAdrian Bunk #include <linux/syscalls.h>
91095f1fc4SLee Schermerhorn #include <linux/ctype.h>
926d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h>
93b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h>
94dc9aa5b9SChristoph Lameter 
951da177e4SLinus Torvalds #include <asm/tlbflush.h>
961da177e4SLinus Torvalds #include <asm/uaccess.h>
97778d3b0fSMichal Hocko #include <linux/random.h>
981da177e4SLinus Torvalds 
9962695a84SNick Piggin #include "internal.h"
10062695a84SNick Piggin 
10138e35860SChristoph Lameter /* Internal flags */
102dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
10338e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
104dc9aa5b9SChristoph Lameter 
105fcc234f8SPekka Enberg static struct kmem_cache *policy_cache;
106fcc234f8SPekka Enberg static struct kmem_cache *sn_cache;
1071da177e4SLinus Torvalds 
1081da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not
1091da177e4SLinus Torvalds    policied. */
1106267276fSChristoph Lameter enum zone_type policy_zone = 0;
1111da177e4SLinus Torvalds 
112bea904d5SLee Schermerhorn /*
113bea904d5SLee Schermerhorn  * run-time system-wide default policy => local allocation
114bea904d5SLee Schermerhorn  */
115e754d79dSH Hartley Sweeten static struct mempolicy default_policy = {
1161da177e4SLinus Torvalds 	.refcnt = ATOMIC_INIT(1), /* never free it */
117bea904d5SLee Schermerhorn 	.mode = MPOL_PREFERRED,
118fc36b8d3SLee Schermerhorn 	.flags = MPOL_F_LOCAL,
1191da177e4SLinus Torvalds };
1201da177e4SLinus Torvalds 
121*5606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES];
122*5606e387SMel Gorman 
123*5606e387SMel Gorman static struct mempolicy *get_task_policy(struct task_struct *p)
124*5606e387SMel Gorman {
125*5606e387SMel Gorman 	struct mempolicy *pol = p->mempolicy;
126*5606e387SMel Gorman 	int node;
127*5606e387SMel Gorman 
128*5606e387SMel Gorman 	if (!pol) {
129*5606e387SMel Gorman 		node = numa_node_id();
130*5606e387SMel Gorman 		if (node != -1)
131*5606e387SMel Gorman 			pol = &preferred_node_policy[node];
132*5606e387SMel Gorman 
133*5606e387SMel Gorman 		/* preferred_node_policy is not initialised early in boot */
134*5606e387SMel Gorman 		if (!pol->mode)
135*5606e387SMel Gorman 			pol = NULL;
136*5606e387SMel Gorman 	}
137*5606e387SMel Gorman 
138*5606e387SMel Gorman 	return pol;
139*5606e387SMel Gorman }
140*5606e387SMel Gorman 
14137012946SDavid Rientjes static const struct mempolicy_operations {
14237012946SDavid Rientjes 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
143708c1bbcSMiao Xie 	/*
144708c1bbcSMiao Xie 	 * If read-side task has no lock to protect task->mempolicy, write-side
145708c1bbcSMiao Xie 	 * task will rebind the task->mempolicy by two step. The first step is
146708c1bbcSMiao Xie 	 * setting all the newly nodes, and the second step is cleaning all the
147708c1bbcSMiao Xie 	 * disallowed nodes. In this way, we can avoid finding no node to alloc
148708c1bbcSMiao Xie 	 * page.
149708c1bbcSMiao Xie 	 * If we have a lock to protect task->mempolicy in read-side, we do
150708c1bbcSMiao Xie 	 * rebind directly.
151708c1bbcSMiao Xie 	 *
152708c1bbcSMiao Xie 	 * step:
153708c1bbcSMiao Xie 	 * 	MPOL_REBIND_ONCE - do rebind work at once
154708c1bbcSMiao Xie 	 * 	MPOL_REBIND_STEP1 - set all the newly nodes
155708c1bbcSMiao Xie 	 * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
156708c1bbcSMiao Xie 	 */
157708c1bbcSMiao Xie 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
158708c1bbcSMiao Xie 			enum mpol_rebind_step step);
15937012946SDavid Rientjes } mpol_ops[MPOL_MAX];
16037012946SDavid Rientjes 
16119770b32SMel Gorman /* Check that the nodemask contains at least one populated zone */
16237012946SDavid Rientjes static int is_valid_nodemask(const nodemask_t *nodemask)
1631da177e4SLinus Torvalds {
16419770b32SMel Gorman 	int nd, k;
1651da177e4SLinus Torvalds 
16619770b32SMel Gorman 	for_each_node_mask(nd, *nodemask) {
16719770b32SMel Gorman 		struct zone *z;
16819770b32SMel Gorman 
16919770b32SMel Gorman 		for (k = 0; k <= policy_zone; k++) {
17019770b32SMel Gorman 			z = &NODE_DATA(nd)->node_zones[k];
171dd942ae3SAndi Kleen 			if (z->present_pages > 0)
17219770b32SMel Gorman 				return 1;
173dd942ae3SAndi Kleen 		}
174dd942ae3SAndi Kleen 	}
17519770b32SMel Gorman 
17619770b32SMel Gorman 	return 0;
1771da177e4SLinus Torvalds }
1781da177e4SLinus Torvalds 
179f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
180f5b087b5SDavid Rientjes {
1816d556294SBob Liu 	return pol->flags & MPOL_MODE_FLAGS;
1824c50bc01SDavid Rientjes }
1834c50bc01SDavid Rientjes 
1844c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
1854c50bc01SDavid Rientjes 				   const nodemask_t *rel)
1864c50bc01SDavid Rientjes {
1874c50bc01SDavid Rientjes 	nodemask_t tmp;
1884c50bc01SDavid Rientjes 	nodes_fold(tmp, *orig, nodes_weight(*rel));
1894c50bc01SDavid Rientjes 	nodes_onto(*ret, tmp, *rel);
190f5b087b5SDavid Rientjes }
191f5b087b5SDavid Rientjes 
19237012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
19337012946SDavid Rientjes {
19437012946SDavid Rientjes 	if (nodes_empty(*nodes))
19537012946SDavid Rientjes 		return -EINVAL;
19637012946SDavid Rientjes 	pol->v.nodes = *nodes;
19737012946SDavid Rientjes 	return 0;
19837012946SDavid Rientjes }
19937012946SDavid Rientjes 
20037012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
20137012946SDavid Rientjes {
20237012946SDavid Rientjes 	if (!nodes)
203fc36b8d3SLee Schermerhorn 		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
20437012946SDavid Rientjes 	else if (nodes_empty(*nodes))
20537012946SDavid Rientjes 		return -EINVAL;			/*  no allowed nodes */
20637012946SDavid Rientjes 	else
20737012946SDavid Rientjes 		pol->v.preferred_node = first_node(*nodes);
20837012946SDavid Rientjes 	return 0;
20937012946SDavid Rientjes }
21037012946SDavid Rientjes 
21137012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
21237012946SDavid Rientjes {
21337012946SDavid Rientjes 	if (!is_valid_nodemask(nodes))
21437012946SDavid Rientjes 		return -EINVAL;
21537012946SDavid Rientjes 	pol->v.nodes = *nodes;
21637012946SDavid Rientjes 	return 0;
21737012946SDavid Rientjes }
21837012946SDavid Rientjes 
21958568d2aSMiao Xie /*
22058568d2aSMiao Xie  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
22158568d2aSMiao Xie  * any, for the new policy.  mpol_new() has already validated the nodes
22258568d2aSMiao Xie  * parameter with respect to the policy mode and flags.  But, we need to
22358568d2aSMiao Xie  * handle an empty nodemask with MPOL_PREFERRED here.
22458568d2aSMiao Xie  *
22558568d2aSMiao Xie  * Must be called holding task's alloc_lock to protect task's mems_allowed
22658568d2aSMiao Xie  * and mempolicy.  May also be called holding the mmap_semaphore for write.
22758568d2aSMiao Xie  */
2284bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol,
2294bfc4495SKAMEZAWA Hiroyuki 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
23058568d2aSMiao Xie {
23158568d2aSMiao Xie 	int ret;
23258568d2aSMiao Xie 
23358568d2aSMiao Xie 	/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
23458568d2aSMiao Xie 	if (pol == NULL)
23558568d2aSMiao Xie 		return 0;
2364bfc4495SKAMEZAWA Hiroyuki 	/* Check N_HIGH_MEMORY */
2374bfc4495SKAMEZAWA Hiroyuki 	nodes_and(nsc->mask1,
2384bfc4495SKAMEZAWA Hiroyuki 		  cpuset_current_mems_allowed, node_states[N_HIGH_MEMORY]);
23958568d2aSMiao Xie 
24058568d2aSMiao Xie 	VM_BUG_ON(!nodes);
24158568d2aSMiao Xie 	if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
24258568d2aSMiao Xie 		nodes = NULL;	/* explicit local allocation */
24358568d2aSMiao Xie 	else {
24458568d2aSMiao Xie 		if (pol->flags & MPOL_F_RELATIVE_NODES)
2454bfc4495SKAMEZAWA Hiroyuki 			mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
24658568d2aSMiao Xie 		else
2474bfc4495SKAMEZAWA Hiroyuki 			nodes_and(nsc->mask2, *nodes, nsc->mask1);
2484bfc4495SKAMEZAWA Hiroyuki 
24958568d2aSMiao Xie 		if (mpol_store_user_nodemask(pol))
25058568d2aSMiao Xie 			pol->w.user_nodemask = *nodes;
25158568d2aSMiao Xie 		else
25258568d2aSMiao Xie 			pol->w.cpuset_mems_allowed =
25358568d2aSMiao Xie 						cpuset_current_mems_allowed;
25458568d2aSMiao Xie 	}
25558568d2aSMiao Xie 
2564bfc4495SKAMEZAWA Hiroyuki 	if (nodes)
2574bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
2584bfc4495SKAMEZAWA Hiroyuki 	else
2594bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, NULL);
26058568d2aSMiao Xie 	return ret;
26158568d2aSMiao Xie }
26258568d2aSMiao Xie 
26358568d2aSMiao Xie /*
26458568d2aSMiao Xie  * This function just creates a new policy, does some check and simple
26558568d2aSMiao Xie  * initialization. You must invoke mpol_set_nodemask() to set nodes.
26658568d2aSMiao Xie  */
267028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
268028fec41SDavid Rientjes 				  nodemask_t *nodes)
2691da177e4SLinus Torvalds {
2701da177e4SLinus Torvalds 	struct mempolicy *policy;
2711da177e4SLinus Torvalds 
272028fec41SDavid Rientjes 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
273028fec41SDavid Rientjes 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
274140d5a49SPaul Mundt 
275a720094dSMel Gorman 	if (mode == MPOL_DEFAULT) {
2763e1f0645SDavid Rientjes 		if (nodes && !nodes_empty(*nodes))
27737012946SDavid Rientjes 			return ERR_PTR(-EINVAL);
278d3a71033SLee Schermerhorn 		return NULL;
27937012946SDavid Rientjes 	}
2803e1f0645SDavid Rientjes 	VM_BUG_ON(!nodes);
2813e1f0645SDavid Rientjes 
2823e1f0645SDavid Rientjes 	/*
2833e1f0645SDavid Rientjes 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
2843e1f0645SDavid Rientjes 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
2853e1f0645SDavid Rientjes 	 * All other modes require a valid pointer to a non-empty nodemask.
2863e1f0645SDavid Rientjes 	 */
2873e1f0645SDavid Rientjes 	if (mode == MPOL_PREFERRED) {
2883e1f0645SDavid Rientjes 		if (nodes_empty(*nodes)) {
2893e1f0645SDavid Rientjes 			if (((flags & MPOL_F_STATIC_NODES) ||
2903e1f0645SDavid Rientjes 			     (flags & MPOL_F_RELATIVE_NODES)))
2913e1f0645SDavid Rientjes 				return ERR_PTR(-EINVAL);
2923e1f0645SDavid Rientjes 		}
293479e2802SPeter Zijlstra 	} else if (mode == MPOL_LOCAL) {
294479e2802SPeter Zijlstra 		if (!nodes_empty(*nodes))
295479e2802SPeter Zijlstra 			return ERR_PTR(-EINVAL);
296479e2802SPeter Zijlstra 		mode = MPOL_PREFERRED;
2973e1f0645SDavid Rientjes 	} else if (nodes_empty(*nodes))
2983e1f0645SDavid Rientjes 		return ERR_PTR(-EINVAL);
2991da177e4SLinus Torvalds 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
3001da177e4SLinus Torvalds 	if (!policy)
3011da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
3021da177e4SLinus Torvalds 	atomic_set(&policy->refcnt, 1);
30345c4745aSLee Schermerhorn 	policy->mode = mode;
30437012946SDavid Rientjes 	policy->flags = flags;
3053e1f0645SDavid Rientjes 
30637012946SDavid Rientjes 	return policy;
30737012946SDavid Rientjes }
30837012946SDavid Rientjes 
30952cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */
31052cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p)
31152cd3b07SLee Schermerhorn {
31252cd3b07SLee Schermerhorn 	if (!atomic_dec_and_test(&p->refcnt))
31352cd3b07SLee Schermerhorn 		return;
31452cd3b07SLee Schermerhorn 	kmem_cache_free(policy_cache, p);
31552cd3b07SLee Schermerhorn }
31652cd3b07SLee Schermerhorn 
317708c1bbcSMiao Xie static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
318708c1bbcSMiao Xie 				enum mpol_rebind_step step)
31937012946SDavid Rientjes {
32037012946SDavid Rientjes }
32137012946SDavid Rientjes 
322708c1bbcSMiao Xie /*
323708c1bbcSMiao Xie  * step:
324708c1bbcSMiao Xie  * 	MPOL_REBIND_ONCE  - do rebind work at once
325708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP1 - set all the newly nodes
326708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
327708c1bbcSMiao Xie  */
328708c1bbcSMiao Xie static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
329708c1bbcSMiao Xie 				 enum mpol_rebind_step step)
3301d0d2680SDavid Rientjes {
3311d0d2680SDavid Rientjes 	nodemask_t tmp;
3321d0d2680SDavid Rientjes 
33337012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES)
33437012946SDavid Rientjes 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
33537012946SDavid Rientjes 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
33637012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3371d0d2680SDavid Rientjes 	else {
338708c1bbcSMiao Xie 		/*
339708c1bbcSMiao Xie 		 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
340708c1bbcSMiao Xie 		 * result
341708c1bbcSMiao Xie 		 */
342708c1bbcSMiao Xie 		if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
343708c1bbcSMiao Xie 			nodes_remap(tmp, pol->v.nodes,
344708c1bbcSMiao Xie 					pol->w.cpuset_mems_allowed, *nodes);
345708c1bbcSMiao Xie 			pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
346708c1bbcSMiao Xie 		} else if (step == MPOL_REBIND_STEP2) {
347708c1bbcSMiao Xie 			tmp = pol->w.cpuset_mems_allowed;
34837012946SDavid Rientjes 			pol->w.cpuset_mems_allowed = *nodes;
349708c1bbcSMiao Xie 		} else
350708c1bbcSMiao Xie 			BUG();
3511d0d2680SDavid Rientjes 	}
35237012946SDavid Rientjes 
353708c1bbcSMiao Xie 	if (nodes_empty(tmp))
354708c1bbcSMiao Xie 		tmp = *nodes;
355708c1bbcSMiao Xie 
356708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP1)
357708c1bbcSMiao Xie 		nodes_or(pol->v.nodes, pol->v.nodes, tmp);
358708c1bbcSMiao Xie 	else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
3591d0d2680SDavid Rientjes 		pol->v.nodes = tmp;
360708c1bbcSMiao Xie 	else
361708c1bbcSMiao Xie 		BUG();
362708c1bbcSMiao Xie 
3631d0d2680SDavid Rientjes 	if (!node_isset(current->il_next, tmp)) {
3641d0d2680SDavid Rientjes 		current->il_next = next_node(current->il_next, tmp);
3651d0d2680SDavid Rientjes 		if (current->il_next >= MAX_NUMNODES)
3661d0d2680SDavid Rientjes 			current->il_next = first_node(tmp);
3671d0d2680SDavid Rientjes 		if (current->il_next >= MAX_NUMNODES)
3681d0d2680SDavid Rientjes 			current->il_next = numa_node_id();
3691d0d2680SDavid Rientjes 	}
37037012946SDavid Rientjes }
37137012946SDavid Rientjes 
37237012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol,
373708c1bbcSMiao Xie 				  const nodemask_t *nodes,
374708c1bbcSMiao Xie 				  enum mpol_rebind_step step)
37537012946SDavid Rientjes {
37637012946SDavid Rientjes 	nodemask_t tmp;
37737012946SDavid Rientjes 
37837012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES) {
3791d0d2680SDavid Rientjes 		int node = first_node(pol->w.user_nodemask);
3801d0d2680SDavid Rientjes 
381fc36b8d3SLee Schermerhorn 		if (node_isset(node, *nodes)) {
3821d0d2680SDavid Rientjes 			pol->v.preferred_node = node;
383fc36b8d3SLee Schermerhorn 			pol->flags &= ~MPOL_F_LOCAL;
384fc36b8d3SLee Schermerhorn 		} else
385fc36b8d3SLee Schermerhorn 			pol->flags |= MPOL_F_LOCAL;
38637012946SDavid Rientjes 	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
38737012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3881d0d2680SDavid Rientjes 		pol->v.preferred_node = first_node(tmp);
389fc36b8d3SLee Schermerhorn 	} else if (!(pol->flags & MPOL_F_LOCAL)) {
3901d0d2680SDavid Rientjes 		pol->v.preferred_node = node_remap(pol->v.preferred_node,
39137012946SDavid Rientjes 						   pol->w.cpuset_mems_allowed,
39237012946SDavid Rientjes 						   *nodes);
39337012946SDavid Rientjes 		pol->w.cpuset_mems_allowed = *nodes;
3941d0d2680SDavid Rientjes 	}
3951d0d2680SDavid Rientjes }
39637012946SDavid Rientjes 
397708c1bbcSMiao Xie /*
398708c1bbcSMiao Xie  * mpol_rebind_policy - Migrate a policy to a different set of nodes
399708c1bbcSMiao Xie  *
400708c1bbcSMiao Xie  * If read-side task has no lock to protect task->mempolicy, write-side
401708c1bbcSMiao Xie  * task will rebind the task->mempolicy by two step. The first step is
402708c1bbcSMiao Xie  * setting all the newly nodes, and the second step is cleaning all the
403708c1bbcSMiao Xie  * disallowed nodes. In this way, we can avoid finding no node to alloc
404708c1bbcSMiao Xie  * page.
405708c1bbcSMiao Xie  * If we have a lock to protect task->mempolicy in read-side, we do
406708c1bbcSMiao Xie  * rebind directly.
407708c1bbcSMiao Xie  *
408708c1bbcSMiao Xie  * step:
409708c1bbcSMiao Xie  * 	MPOL_REBIND_ONCE  - do rebind work at once
410708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP1 - set all the newly nodes
411708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
412708c1bbcSMiao Xie  */
413708c1bbcSMiao Xie static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
414708c1bbcSMiao Xie 				enum mpol_rebind_step step)
41537012946SDavid Rientjes {
41637012946SDavid Rientjes 	if (!pol)
41737012946SDavid Rientjes 		return;
41889c522c7SWang Sheng-Hui 	if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
41937012946SDavid Rientjes 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
42037012946SDavid Rientjes 		return;
421708c1bbcSMiao Xie 
422708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
423708c1bbcSMiao Xie 		return;
424708c1bbcSMiao Xie 
425708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
426708c1bbcSMiao Xie 		BUG();
427708c1bbcSMiao Xie 
428708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP1)
429708c1bbcSMiao Xie 		pol->flags |= MPOL_F_REBINDING;
430708c1bbcSMiao Xie 	else if (step == MPOL_REBIND_STEP2)
431708c1bbcSMiao Xie 		pol->flags &= ~MPOL_F_REBINDING;
432708c1bbcSMiao Xie 	else if (step >= MPOL_REBIND_NSTEP)
433708c1bbcSMiao Xie 		BUG();
434708c1bbcSMiao Xie 
435708c1bbcSMiao Xie 	mpol_ops[pol->mode].rebind(pol, newmask, step);
4361d0d2680SDavid Rientjes }
4371d0d2680SDavid Rientjes 
4381d0d2680SDavid Rientjes /*
4391d0d2680SDavid Rientjes  * Wrapper for mpol_rebind_policy() that just requires task
4401d0d2680SDavid Rientjes  * pointer, and updates task mempolicy.
44158568d2aSMiao Xie  *
44258568d2aSMiao Xie  * Called with task's alloc_lock held.
4431d0d2680SDavid Rientjes  */
4441d0d2680SDavid Rientjes 
445708c1bbcSMiao Xie void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
446708c1bbcSMiao Xie 			enum mpol_rebind_step step)
4471d0d2680SDavid Rientjes {
448708c1bbcSMiao Xie 	mpol_rebind_policy(tsk->mempolicy, new, step);
4491d0d2680SDavid Rientjes }
4501d0d2680SDavid Rientjes 
4511d0d2680SDavid Rientjes /*
4521d0d2680SDavid Rientjes  * Rebind each vma in mm to new nodemask.
4531d0d2680SDavid Rientjes  *
4541d0d2680SDavid Rientjes  * Call holding a reference to mm.  Takes mm->mmap_sem during call.
4551d0d2680SDavid Rientjes  */
4561d0d2680SDavid Rientjes 
4571d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
4581d0d2680SDavid Rientjes {
4591d0d2680SDavid Rientjes 	struct vm_area_struct *vma;
4601d0d2680SDavid Rientjes 
4611d0d2680SDavid Rientjes 	down_write(&mm->mmap_sem);
4621d0d2680SDavid Rientjes 	for (vma = mm->mmap; vma; vma = vma->vm_next)
463708c1bbcSMiao Xie 		mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
4641d0d2680SDavid Rientjes 	up_write(&mm->mmap_sem);
4651d0d2680SDavid Rientjes }
4661d0d2680SDavid Rientjes 
46737012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
46837012946SDavid Rientjes 	[MPOL_DEFAULT] = {
46937012946SDavid Rientjes 		.rebind = mpol_rebind_default,
47037012946SDavid Rientjes 	},
47137012946SDavid Rientjes 	[MPOL_INTERLEAVE] = {
47237012946SDavid Rientjes 		.create = mpol_new_interleave,
47337012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
47437012946SDavid Rientjes 	},
47537012946SDavid Rientjes 	[MPOL_PREFERRED] = {
47637012946SDavid Rientjes 		.create = mpol_new_preferred,
47737012946SDavid Rientjes 		.rebind = mpol_rebind_preferred,
47837012946SDavid Rientjes 	},
47937012946SDavid Rientjes 	[MPOL_BIND] = {
48037012946SDavid Rientjes 		.create = mpol_new_bind,
48137012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
48237012946SDavid Rientjes 	},
48337012946SDavid Rientjes };
48437012946SDavid Rientjes 
485fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
486fc301289SChristoph Lameter 				unsigned long flags);
4871a75a6c8SChristoph Lameter 
48838e35860SChristoph Lameter /* Scan through pages checking if pages follow certain conditions. */
489b5810039SNick Piggin static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
490dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
491dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
49238e35860SChristoph Lameter 		void *private)
4931da177e4SLinus Torvalds {
49491612e0dSHugh Dickins 	pte_t *orig_pte;
49591612e0dSHugh Dickins 	pte_t *pte;
496705e87c0SHugh Dickins 	spinlock_t *ptl;
497941150a3SHugh Dickins 
498705e87c0SHugh Dickins 	orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
49991612e0dSHugh Dickins 	do {
5006aab341eSLinus Torvalds 		struct page *page;
50125ba77c1SAndy Whitcroft 		int nid;
50291612e0dSHugh Dickins 
50391612e0dSHugh Dickins 		if (!pte_present(*pte))
50491612e0dSHugh Dickins 			continue;
5056aab341eSLinus Torvalds 		page = vm_normal_page(vma, addr, *pte);
5066aab341eSLinus Torvalds 		if (!page)
50791612e0dSHugh Dickins 			continue;
508053837fcSNick Piggin 		/*
50962b61f61SHugh Dickins 		 * vm_normal_page() filters out zero pages, but there might
51062b61f61SHugh Dickins 		 * still be PageReserved pages to skip, perhaps in a VDSO.
51162b61f61SHugh Dickins 		 * And we cannot move PageKsm pages sensibly or safely yet.
512053837fcSNick Piggin 		 */
51362b61f61SHugh Dickins 		if (PageReserved(page) || PageKsm(page))
514f4598c8bSChristoph Lameter 			continue;
5156aab341eSLinus Torvalds 		nid = page_to_nid(page);
51638e35860SChristoph Lameter 		if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
51738e35860SChristoph Lameter 			continue;
51838e35860SChristoph Lameter 
519b1f72d18SStephen Wilson 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
520fc301289SChristoph Lameter 			migrate_page_add(page, private, flags);
521dc9aa5b9SChristoph Lameter 		else
5221da177e4SLinus Torvalds 			break;
52391612e0dSHugh Dickins 	} while (pte++, addr += PAGE_SIZE, addr != end);
524705e87c0SHugh Dickins 	pte_unmap_unlock(orig_pte, ptl);
52591612e0dSHugh Dickins 	return addr != end;
52691612e0dSHugh Dickins }
52791612e0dSHugh Dickins 
528b5810039SNick Piggin static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
529dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
530dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
53138e35860SChristoph Lameter 		void *private)
53291612e0dSHugh Dickins {
53391612e0dSHugh Dickins 	pmd_t *pmd;
53491612e0dSHugh Dickins 	unsigned long next;
53591612e0dSHugh Dickins 
53691612e0dSHugh Dickins 	pmd = pmd_offset(pud, addr);
53791612e0dSHugh Dickins 	do {
53891612e0dSHugh Dickins 		next = pmd_addr_end(addr, end);
539bae9c19bSAndrea Arcangeli 		split_huge_page_pmd(vma->vm_mm, pmd);
5401a5a9906SAndrea Arcangeli 		if (pmd_none_or_trans_huge_or_clear_bad(pmd))
54191612e0dSHugh Dickins 			continue;
542dc9aa5b9SChristoph Lameter 		if (check_pte_range(vma, pmd, addr, next, nodes,
54338e35860SChristoph Lameter 				    flags, private))
54491612e0dSHugh Dickins 			return -EIO;
54591612e0dSHugh Dickins 	} while (pmd++, addr = next, addr != end);
54691612e0dSHugh Dickins 	return 0;
54791612e0dSHugh Dickins }
54891612e0dSHugh Dickins 
549b5810039SNick Piggin static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
550dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
551dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
55238e35860SChristoph Lameter 		void *private)
55391612e0dSHugh Dickins {
55491612e0dSHugh Dickins 	pud_t *pud;
55591612e0dSHugh Dickins 	unsigned long next;
55691612e0dSHugh Dickins 
55791612e0dSHugh Dickins 	pud = pud_offset(pgd, addr);
55891612e0dSHugh Dickins 	do {
55991612e0dSHugh Dickins 		next = pud_addr_end(addr, end);
56091612e0dSHugh Dickins 		if (pud_none_or_clear_bad(pud))
56191612e0dSHugh Dickins 			continue;
562dc9aa5b9SChristoph Lameter 		if (check_pmd_range(vma, pud, addr, next, nodes,
56338e35860SChristoph Lameter 				    flags, private))
56491612e0dSHugh Dickins 			return -EIO;
56591612e0dSHugh Dickins 	} while (pud++, addr = next, addr != end);
56691612e0dSHugh Dickins 	return 0;
56791612e0dSHugh Dickins }
56891612e0dSHugh Dickins 
569b5810039SNick Piggin static inline int check_pgd_range(struct vm_area_struct *vma,
570dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
571dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
57238e35860SChristoph Lameter 		void *private)
57391612e0dSHugh Dickins {
57491612e0dSHugh Dickins 	pgd_t *pgd;
57591612e0dSHugh Dickins 	unsigned long next;
57691612e0dSHugh Dickins 
577b5810039SNick Piggin 	pgd = pgd_offset(vma->vm_mm, addr);
57891612e0dSHugh Dickins 	do {
57991612e0dSHugh Dickins 		next = pgd_addr_end(addr, end);
58091612e0dSHugh Dickins 		if (pgd_none_or_clear_bad(pgd))
58191612e0dSHugh Dickins 			continue;
582dc9aa5b9SChristoph Lameter 		if (check_pud_range(vma, pgd, addr, next, nodes,
58338e35860SChristoph Lameter 				    flags, private))
58491612e0dSHugh Dickins 			return -EIO;
58591612e0dSHugh Dickins 	} while (pgd++, addr = next, addr != end);
58691612e0dSHugh Dickins 	return 0;
5871da177e4SLinus Torvalds }
5881da177e4SLinus Torvalds 
589b24f53a0SLee Schermerhorn #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
590b24f53a0SLee Schermerhorn /*
5914b10e7d5SMel Gorman  * This is used to mark a range of virtual addresses to be inaccessible.
5924b10e7d5SMel Gorman  * These are later cleared by a NUMA hinting fault. Depending on these
5934b10e7d5SMel Gorman  * faults, pages may be migrated for better NUMA placement.
5944b10e7d5SMel Gorman  *
5954b10e7d5SMel Gorman  * This is assuming that NUMA faults are handled using PROT_NONE. If
5964b10e7d5SMel Gorman  * an architecture makes a different choice, it will need further
5974b10e7d5SMel Gorman  * changes to the core.
598b24f53a0SLee Schermerhorn  */
5994b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma,
6004b10e7d5SMel Gorman 			unsigned long addr, unsigned long end)
601b24f53a0SLee Schermerhorn {
6024b10e7d5SMel Gorman 	int nr_updated;
6034b10e7d5SMel Gorman 	BUILD_BUG_ON(_PAGE_NUMA != _PAGE_PROTNONE);
604b24f53a0SLee Schermerhorn 
6054b10e7d5SMel Gorman 	nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1);
60603c5a6e1SMel Gorman 	if (nr_updated)
60703c5a6e1SMel Gorman 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
608b24f53a0SLee Schermerhorn 
6094b10e7d5SMel Gorman 	return nr_updated;
610b24f53a0SLee Schermerhorn }
611b24f53a0SLee Schermerhorn #else
612b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma,
613b24f53a0SLee Schermerhorn 			unsigned long addr, unsigned long end)
614b24f53a0SLee Schermerhorn {
615b24f53a0SLee Schermerhorn 	return 0;
616b24f53a0SLee Schermerhorn }
617b24f53a0SLee Schermerhorn #endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */
618b24f53a0SLee Schermerhorn 
619dc9aa5b9SChristoph Lameter /*
620dc9aa5b9SChristoph Lameter  * Check if all pages in a range are on a set of nodes.
621dc9aa5b9SChristoph Lameter  * If pagelist != NULL then isolate pages from the LRU and
622dc9aa5b9SChristoph Lameter  * put them on the pagelist.
623dc9aa5b9SChristoph Lameter  */
6241da177e4SLinus Torvalds static struct vm_area_struct *
6251da177e4SLinus Torvalds check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
62638e35860SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags, void *private)
6271da177e4SLinus Torvalds {
6281da177e4SLinus Torvalds 	int err;
6291da177e4SLinus Torvalds 	struct vm_area_struct *first, *vma, *prev;
6301da177e4SLinus Torvalds 
631053837fcSNick Piggin 
6321da177e4SLinus Torvalds 	first = find_vma(mm, start);
6331da177e4SLinus Torvalds 	if (!first)
6341da177e4SLinus Torvalds 		return ERR_PTR(-EFAULT);
6351da177e4SLinus Torvalds 	prev = NULL;
6361da177e4SLinus Torvalds 	for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
6375b952b3cSAndi Kleen 		unsigned long endvma = vma->vm_end;
638dc9aa5b9SChristoph Lameter 
6395b952b3cSAndi Kleen 		if (endvma > end)
6405b952b3cSAndi Kleen 			endvma = end;
6415b952b3cSAndi Kleen 		if (vma->vm_start > start)
6425b952b3cSAndi Kleen 			start = vma->vm_start;
643b24f53a0SLee Schermerhorn 
644b24f53a0SLee Schermerhorn 		if (!(flags & MPOL_MF_DISCONTIG_OK)) {
645b24f53a0SLee Schermerhorn 			if (!vma->vm_next && vma->vm_end < end)
646b24f53a0SLee Schermerhorn 				return ERR_PTR(-EFAULT);
647b24f53a0SLee Schermerhorn 			if (prev && prev->vm_end < vma->vm_start)
648b24f53a0SLee Schermerhorn 				return ERR_PTR(-EFAULT);
649b24f53a0SLee Schermerhorn 		}
650b24f53a0SLee Schermerhorn 
651b24f53a0SLee Schermerhorn 		if (is_vm_hugetlb_page(vma))
652b24f53a0SLee Schermerhorn 			goto next;
653b24f53a0SLee Schermerhorn 
654b24f53a0SLee Schermerhorn 		if (flags & MPOL_MF_LAZY) {
655b24f53a0SLee Schermerhorn 			change_prot_numa(vma, start, endvma);
656b24f53a0SLee Schermerhorn 			goto next;
657b24f53a0SLee Schermerhorn 		}
658b24f53a0SLee Schermerhorn 
659b24f53a0SLee Schermerhorn 		if ((flags & MPOL_MF_STRICT) ||
660b24f53a0SLee Schermerhorn 		     ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
661b24f53a0SLee Schermerhorn 		      vma_migratable(vma))) {
662b24f53a0SLee Schermerhorn 
663dc9aa5b9SChristoph Lameter 			err = check_pgd_range(vma, start, endvma, nodes,
66438e35860SChristoph Lameter 						flags, private);
6651da177e4SLinus Torvalds 			if (err) {
6661da177e4SLinus Torvalds 				first = ERR_PTR(err);
6671da177e4SLinus Torvalds 				break;
6681da177e4SLinus Torvalds 			}
6691da177e4SLinus Torvalds 		}
670b24f53a0SLee Schermerhorn next:
6711da177e4SLinus Torvalds 		prev = vma;
6721da177e4SLinus Torvalds 	}
6731da177e4SLinus Torvalds 	return first;
6741da177e4SLinus Torvalds }
6751da177e4SLinus Torvalds 
676869833f2SKOSAKI Motohiro /*
677869833f2SKOSAKI Motohiro  * Apply policy to a single VMA
678869833f2SKOSAKI Motohiro  * This must be called with the mmap_sem held for writing.
679869833f2SKOSAKI Motohiro  */
680869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma,
681869833f2SKOSAKI Motohiro 						struct mempolicy *pol)
6828d34694cSKOSAKI Motohiro {
683869833f2SKOSAKI Motohiro 	int err;
684869833f2SKOSAKI Motohiro 	struct mempolicy *old;
685869833f2SKOSAKI Motohiro 	struct mempolicy *new;
6868d34694cSKOSAKI Motohiro 
6878d34694cSKOSAKI Motohiro 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
6888d34694cSKOSAKI Motohiro 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
6898d34694cSKOSAKI Motohiro 		 vma->vm_ops, vma->vm_file,
6908d34694cSKOSAKI Motohiro 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
6918d34694cSKOSAKI Motohiro 
692869833f2SKOSAKI Motohiro 	new = mpol_dup(pol);
693869833f2SKOSAKI Motohiro 	if (IS_ERR(new))
694869833f2SKOSAKI Motohiro 		return PTR_ERR(new);
695869833f2SKOSAKI Motohiro 
696869833f2SKOSAKI Motohiro 	if (vma->vm_ops && vma->vm_ops->set_policy) {
6978d34694cSKOSAKI Motohiro 		err = vma->vm_ops->set_policy(vma, new);
698869833f2SKOSAKI Motohiro 		if (err)
699869833f2SKOSAKI Motohiro 			goto err_out;
7008d34694cSKOSAKI Motohiro 	}
701869833f2SKOSAKI Motohiro 
702869833f2SKOSAKI Motohiro 	old = vma->vm_policy;
703869833f2SKOSAKI Motohiro 	vma->vm_policy = new; /* protected by mmap_sem */
704869833f2SKOSAKI Motohiro 	mpol_put(old);
705869833f2SKOSAKI Motohiro 
706869833f2SKOSAKI Motohiro 	return 0;
707869833f2SKOSAKI Motohiro  err_out:
708869833f2SKOSAKI Motohiro 	mpol_put(new);
7098d34694cSKOSAKI Motohiro 	return err;
7108d34694cSKOSAKI Motohiro }
7118d34694cSKOSAKI Motohiro 
7121da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */
7139d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start,
7149d8cebd4SKOSAKI Motohiro 		       unsigned long end, struct mempolicy *new_pol)
7151da177e4SLinus Torvalds {
7161da177e4SLinus Torvalds 	struct vm_area_struct *next;
7179d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *prev;
7189d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *vma;
7199d8cebd4SKOSAKI Motohiro 	int err = 0;
720e26a5114SKOSAKI Motohiro 	pgoff_t pgoff;
7219d8cebd4SKOSAKI Motohiro 	unsigned long vmstart;
7229d8cebd4SKOSAKI Motohiro 	unsigned long vmend;
7231da177e4SLinus Torvalds 
724097d5910SLinus Torvalds 	vma = find_vma(mm, start);
7259d8cebd4SKOSAKI Motohiro 	if (!vma || vma->vm_start > start)
7269d8cebd4SKOSAKI Motohiro 		return -EFAULT;
7279d8cebd4SKOSAKI Motohiro 
728097d5910SLinus Torvalds 	prev = vma->vm_prev;
729e26a5114SKOSAKI Motohiro 	if (start > vma->vm_start)
730e26a5114SKOSAKI Motohiro 		prev = vma;
731e26a5114SKOSAKI Motohiro 
7329d8cebd4SKOSAKI Motohiro 	for (; vma && vma->vm_start < end; prev = vma, vma = next) {
7331da177e4SLinus Torvalds 		next = vma->vm_next;
7349d8cebd4SKOSAKI Motohiro 		vmstart = max(start, vma->vm_start);
7359d8cebd4SKOSAKI Motohiro 		vmend   = min(end, vma->vm_end);
7369d8cebd4SKOSAKI Motohiro 
737e26a5114SKOSAKI Motohiro 		if (mpol_equal(vma_policy(vma), new_pol))
738e26a5114SKOSAKI Motohiro 			continue;
739e26a5114SKOSAKI Motohiro 
740e26a5114SKOSAKI Motohiro 		pgoff = vma->vm_pgoff +
741e26a5114SKOSAKI Motohiro 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
7429d8cebd4SKOSAKI Motohiro 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
743e26a5114SKOSAKI Motohiro 				  vma->anon_vma, vma->vm_file, pgoff,
7448aacc9f5SCaspar Zhang 				  new_pol);
7459d8cebd4SKOSAKI Motohiro 		if (prev) {
7469d8cebd4SKOSAKI Motohiro 			vma = prev;
7479d8cebd4SKOSAKI Motohiro 			next = vma->vm_next;
7489d8cebd4SKOSAKI Motohiro 			continue;
7491da177e4SLinus Torvalds 		}
7509d8cebd4SKOSAKI Motohiro 		if (vma->vm_start != vmstart) {
7519d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
7529d8cebd4SKOSAKI Motohiro 			if (err)
7539d8cebd4SKOSAKI Motohiro 				goto out;
7549d8cebd4SKOSAKI Motohiro 		}
7559d8cebd4SKOSAKI Motohiro 		if (vma->vm_end != vmend) {
7569d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmend, 0);
7579d8cebd4SKOSAKI Motohiro 			if (err)
7589d8cebd4SKOSAKI Motohiro 				goto out;
7599d8cebd4SKOSAKI Motohiro 		}
760869833f2SKOSAKI Motohiro 		err = vma_replace_policy(vma, new_pol);
7619d8cebd4SKOSAKI Motohiro 		if (err)
7629d8cebd4SKOSAKI Motohiro 			goto out;
7639d8cebd4SKOSAKI Motohiro 	}
7649d8cebd4SKOSAKI Motohiro 
7659d8cebd4SKOSAKI Motohiro  out:
7661da177e4SLinus Torvalds 	return err;
7671da177e4SLinus Torvalds }
7681da177e4SLinus Torvalds 
769c61afb18SPaul Jackson /*
770c61afb18SPaul Jackson  * Update task->flags PF_MEMPOLICY bit: set iff non-default
771c61afb18SPaul Jackson  * mempolicy.  Allows more rapid checking of this (combined perhaps
772c61afb18SPaul Jackson  * with other PF_* flag bits) on memory allocation hot code paths.
773c61afb18SPaul Jackson  *
774c61afb18SPaul Jackson  * If called from outside this file, the task 'p' should -only- be
775c61afb18SPaul Jackson  * a newly forked child not yet visible on the task list, because
776c61afb18SPaul Jackson  * manipulating the task flags of a visible task is not safe.
777c61afb18SPaul Jackson  *
778c61afb18SPaul Jackson  * The above limitation is why this routine has the funny name
779c61afb18SPaul Jackson  * mpol_fix_fork_child_flag().
780c61afb18SPaul Jackson  *
781c61afb18SPaul Jackson  * It is also safe to call this with a task pointer of current,
782c61afb18SPaul Jackson  * which the static wrapper mpol_set_task_struct_flag() does,
783c61afb18SPaul Jackson  * for use within this file.
784c61afb18SPaul Jackson  */
785c61afb18SPaul Jackson 
786c61afb18SPaul Jackson void mpol_fix_fork_child_flag(struct task_struct *p)
787c61afb18SPaul Jackson {
788c61afb18SPaul Jackson 	if (p->mempolicy)
789c61afb18SPaul Jackson 		p->flags |= PF_MEMPOLICY;
790c61afb18SPaul Jackson 	else
791c61afb18SPaul Jackson 		p->flags &= ~PF_MEMPOLICY;
792c61afb18SPaul Jackson }
793c61afb18SPaul Jackson 
794c61afb18SPaul Jackson static void mpol_set_task_struct_flag(void)
795c61afb18SPaul Jackson {
796c61afb18SPaul Jackson 	mpol_fix_fork_child_flag(current);
797c61afb18SPaul Jackson }
798c61afb18SPaul Jackson 
7991da177e4SLinus Torvalds /* Set the process memory policy */
800028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags,
801028fec41SDavid Rientjes 			     nodemask_t *nodes)
8021da177e4SLinus Torvalds {
80358568d2aSMiao Xie 	struct mempolicy *new, *old;
804f4e53d91SLee Schermerhorn 	struct mm_struct *mm = current->mm;
8054bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH(scratch);
80658568d2aSMiao Xie 	int ret;
8071da177e4SLinus Torvalds 
8084bfc4495SKAMEZAWA Hiroyuki 	if (!scratch)
8094bfc4495SKAMEZAWA Hiroyuki 		return -ENOMEM;
810f4e53d91SLee Schermerhorn 
8114bfc4495SKAMEZAWA Hiroyuki 	new = mpol_new(mode, flags, nodes);
8124bfc4495SKAMEZAWA Hiroyuki 	if (IS_ERR(new)) {
8134bfc4495SKAMEZAWA Hiroyuki 		ret = PTR_ERR(new);
8144bfc4495SKAMEZAWA Hiroyuki 		goto out;
8154bfc4495SKAMEZAWA Hiroyuki 	}
816f4e53d91SLee Schermerhorn 	/*
817f4e53d91SLee Schermerhorn 	 * prevent changing our mempolicy while show_numa_maps()
818f4e53d91SLee Schermerhorn 	 * is using it.
819f4e53d91SLee Schermerhorn 	 * Note:  do_set_mempolicy() can be called at init time
820f4e53d91SLee Schermerhorn 	 * with no 'mm'.
821f4e53d91SLee Schermerhorn 	 */
822f4e53d91SLee Schermerhorn 	if (mm)
823f4e53d91SLee Schermerhorn 		down_write(&mm->mmap_sem);
82458568d2aSMiao Xie 	task_lock(current);
8254bfc4495SKAMEZAWA Hiroyuki 	ret = mpol_set_nodemask(new, nodes, scratch);
82658568d2aSMiao Xie 	if (ret) {
82758568d2aSMiao Xie 		task_unlock(current);
82858568d2aSMiao Xie 		if (mm)
82958568d2aSMiao Xie 			up_write(&mm->mmap_sem);
83058568d2aSMiao Xie 		mpol_put(new);
8314bfc4495SKAMEZAWA Hiroyuki 		goto out;
83258568d2aSMiao Xie 	}
83358568d2aSMiao Xie 	old = current->mempolicy;
8341da177e4SLinus Torvalds 	current->mempolicy = new;
835c61afb18SPaul Jackson 	mpol_set_task_struct_flag();
83645c4745aSLee Schermerhorn 	if (new && new->mode == MPOL_INTERLEAVE &&
837f5b087b5SDavid Rientjes 	    nodes_weight(new->v.nodes))
838dfcd3c0dSAndi Kleen 		current->il_next = first_node(new->v.nodes);
83958568d2aSMiao Xie 	task_unlock(current);
840f4e53d91SLee Schermerhorn 	if (mm)
841f4e53d91SLee Schermerhorn 		up_write(&mm->mmap_sem);
842f4e53d91SLee Schermerhorn 
84358568d2aSMiao Xie 	mpol_put(old);
8444bfc4495SKAMEZAWA Hiroyuki 	ret = 0;
8454bfc4495SKAMEZAWA Hiroyuki out:
8464bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH_FREE(scratch);
8474bfc4495SKAMEZAWA Hiroyuki 	return ret;
8481da177e4SLinus Torvalds }
8491da177e4SLinus Torvalds 
850bea904d5SLee Schermerhorn /*
851bea904d5SLee Schermerhorn  * Return nodemask for policy for get_mempolicy() query
85258568d2aSMiao Xie  *
85358568d2aSMiao Xie  * Called with task's alloc_lock held
854bea904d5SLee Schermerhorn  */
855bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
8561da177e4SLinus Torvalds {
857dfcd3c0dSAndi Kleen 	nodes_clear(*nodes);
858bea904d5SLee Schermerhorn 	if (p == &default_policy)
859bea904d5SLee Schermerhorn 		return;
860bea904d5SLee Schermerhorn 
86145c4745aSLee Schermerhorn 	switch (p->mode) {
86219770b32SMel Gorman 	case MPOL_BIND:
86319770b32SMel Gorman 		/* Fall through */
8641da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
865dfcd3c0dSAndi Kleen 		*nodes = p->v.nodes;
8661da177e4SLinus Torvalds 		break;
8671da177e4SLinus Torvalds 	case MPOL_PREFERRED:
868fc36b8d3SLee Schermerhorn 		if (!(p->flags & MPOL_F_LOCAL))
869dfcd3c0dSAndi Kleen 			node_set(p->v.preferred_node, *nodes);
87053f2556bSLee Schermerhorn 		/* else return empty node mask for local allocation */
8711da177e4SLinus Torvalds 		break;
8721da177e4SLinus Torvalds 	default:
8731da177e4SLinus Torvalds 		BUG();
8741da177e4SLinus Torvalds 	}
8751da177e4SLinus Torvalds }
8761da177e4SLinus Torvalds 
8771da177e4SLinus Torvalds static int lookup_node(struct mm_struct *mm, unsigned long addr)
8781da177e4SLinus Torvalds {
8791da177e4SLinus Torvalds 	struct page *p;
8801da177e4SLinus Torvalds 	int err;
8811da177e4SLinus Torvalds 
8821da177e4SLinus Torvalds 	err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
8831da177e4SLinus Torvalds 	if (err >= 0) {
8841da177e4SLinus Torvalds 		err = page_to_nid(p);
8851da177e4SLinus Torvalds 		put_page(p);
8861da177e4SLinus Torvalds 	}
8871da177e4SLinus Torvalds 	return err;
8881da177e4SLinus Torvalds }
8891da177e4SLinus Torvalds 
8901da177e4SLinus Torvalds /* Retrieve NUMA policy */
891dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask,
8921da177e4SLinus Torvalds 			     unsigned long addr, unsigned long flags)
8931da177e4SLinus Torvalds {
8948bccd85fSChristoph Lameter 	int err;
8951da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
8961da177e4SLinus Torvalds 	struct vm_area_struct *vma = NULL;
8971da177e4SLinus Torvalds 	struct mempolicy *pol = current->mempolicy;
8981da177e4SLinus Torvalds 
899754af6f5SLee Schermerhorn 	if (flags &
900754af6f5SLee Schermerhorn 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
9011da177e4SLinus Torvalds 		return -EINVAL;
902754af6f5SLee Schermerhorn 
903754af6f5SLee Schermerhorn 	if (flags & MPOL_F_MEMS_ALLOWED) {
904754af6f5SLee Schermerhorn 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
905754af6f5SLee Schermerhorn 			return -EINVAL;
906754af6f5SLee Schermerhorn 		*policy = 0;	/* just so it's initialized */
90758568d2aSMiao Xie 		task_lock(current);
908754af6f5SLee Schermerhorn 		*nmask  = cpuset_current_mems_allowed;
90958568d2aSMiao Xie 		task_unlock(current);
910754af6f5SLee Schermerhorn 		return 0;
911754af6f5SLee Schermerhorn 	}
912754af6f5SLee Schermerhorn 
9131da177e4SLinus Torvalds 	if (flags & MPOL_F_ADDR) {
914bea904d5SLee Schermerhorn 		/*
915bea904d5SLee Schermerhorn 		 * Do NOT fall back to task policy if the
916bea904d5SLee Schermerhorn 		 * vma/shared policy at addr is NULL.  We
917bea904d5SLee Schermerhorn 		 * want to return MPOL_DEFAULT in this case.
918bea904d5SLee Schermerhorn 		 */
9191da177e4SLinus Torvalds 		down_read(&mm->mmap_sem);
9201da177e4SLinus Torvalds 		vma = find_vma_intersection(mm, addr, addr+1);
9211da177e4SLinus Torvalds 		if (!vma) {
9221da177e4SLinus Torvalds 			up_read(&mm->mmap_sem);
9231da177e4SLinus Torvalds 			return -EFAULT;
9241da177e4SLinus Torvalds 		}
9251da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
9261da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
9271da177e4SLinus Torvalds 		else
9281da177e4SLinus Torvalds 			pol = vma->vm_policy;
9291da177e4SLinus Torvalds 	} else if (addr)
9301da177e4SLinus Torvalds 		return -EINVAL;
9311da177e4SLinus Torvalds 
9321da177e4SLinus Torvalds 	if (!pol)
933bea904d5SLee Schermerhorn 		pol = &default_policy;	/* indicates default behavior */
9341da177e4SLinus Torvalds 
9351da177e4SLinus Torvalds 	if (flags & MPOL_F_NODE) {
9361da177e4SLinus Torvalds 		if (flags & MPOL_F_ADDR) {
9371da177e4SLinus Torvalds 			err = lookup_node(mm, addr);
9381da177e4SLinus Torvalds 			if (err < 0)
9391da177e4SLinus Torvalds 				goto out;
9408bccd85fSChristoph Lameter 			*policy = err;
9411da177e4SLinus Torvalds 		} else if (pol == current->mempolicy &&
94245c4745aSLee Schermerhorn 				pol->mode == MPOL_INTERLEAVE) {
9438bccd85fSChristoph Lameter 			*policy = current->il_next;
9441da177e4SLinus Torvalds 		} else {
9451da177e4SLinus Torvalds 			err = -EINVAL;
9461da177e4SLinus Torvalds 			goto out;
9471da177e4SLinus Torvalds 		}
948bea904d5SLee Schermerhorn 	} else {
949bea904d5SLee Schermerhorn 		*policy = pol == &default_policy ? MPOL_DEFAULT :
950bea904d5SLee Schermerhorn 						pol->mode;
951d79df630SDavid Rientjes 		/*
952d79df630SDavid Rientjes 		 * Internal mempolicy flags must be masked off before exposing
953d79df630SDavid Rientjes 		 * the policy to userspace.
954d79df630SDavid Rientjes 		 */
955d79df630SDavid Rientjes 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
956bea904d5SLee Schermerhorn 	}
9571da177e4SLinus Torvalds 
9581da177e4SLinus Torvalds 	if (vma) {
9591da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
9601da177e4SLinus Torvalds 		vma = NULL;
9611da177e4SLinus Torvalds 	}
9621da177e4SLinus Torvalds 
9631da177e4SLinus Torvalds 	err = 0;
96458568d2aSMiao Xie 	if (nmask) {
965c6b6ef8bSLee Schermerhorn 		if (mpol_store_user_nodemask(pol)) {
966c6b6ef8bSLee Schermerhorn 			*nmask = pol->w.user_nodemask;
967c6b6ef8bSLee Schermerhorn 		} else {
96858568d2aSMiao Xie 			task_lock(current);
969bea904d5SLee Schermerhorn 			get_policy_nodemask(pol, nmask);
97058568d2aSMiao Xie 			task_unlock(current);
97158568d2aSMiao Xie 		}
972c6b6ef8bSLee Schermerhorn 	}
9731da177e4SLinus Torvalds 
9741da177e4SLinus Torvalds  out:
97552cd3b07SLee Schermerhorn 	mpol_cond_put(pol);
9761da177e4SLinus Torvalds 	if (vma)
9771da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
9781da177e4SLinus Torvalds 	return err;
9791da177e4SLinus Torvalds }
9801da177e4SLinus Torvalds 
981b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION
9828bccd85fSChristoph Lameter /*
9836ce3c4c0SChristoph Lameter  * page migration
9846ce3c4c0SChristoph Lameter  */
985fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
986fc301289SChristoph Lameter 				unsigned long flags)
9876ce3c4c0SChristoph Lameter {
9886ce3c4c0SChristoph Lameter 	/*
989fc301289SChristoph Lameter 	 * Avoid migrating a page that is shared with others.
9906ce3c4c0SChristoph Lameter 	 */
99162695a84SNick Piggin 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
99262695a84SNick Piggin 		if (!isolate_lru_page(page)) {
99362695a84SNick Piggin 			list_add_tail(&page->lru, pagelist);
9946d9c285aSKOSAKI Motohiro 			inc_zone_page_state(page, NR_ISOLATED_ANON +
9956d9c285aSKOSAKI Motohiro 					    page_is_file_cache(page));
99662695a84SNick Piggin 		}
99762695a84SNick Piggin 	}
9986ce3c4c0SChristoph Lameter }
9996ce3c4c0SChristoph Lameter 
1000742755a1SChristoph Lameter static struct page *new_node_page(struct page *page, unsigned long node, int **x)
100195a402c3SChristoph Lameter {
10026484eb3eSMel Gorman 	return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
100395a402c3SChristoph Lameter }
100495a402c3SChristoph Lameter 
10056ce3c4c0SChristoph Lameter /*
10067e2ab150SChristoph Lameter  * Migrate pages from one node to a target node.
10077e2ab150SChristoph Lameter  * Returns error or the number of pages not migrated.
10087e2ab150SChristoph Lameter  */
1009dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1010dbcb0f19SAdrian Bunk 			   int flags)
10117e2ab150SChristoph Lameter {
10127e2ab150SChristoph Lameter 	nodemask_t nmask;
10137e2ab150SChristoph Lameter 	LIST_HEAD(pagelist);
10147e2ab150SChristoph Lameter 	int err = 0;
10157e2ab150SChristoph Lameter 
10167e2ab150SChristoph Lameter 	nodes_clear(nmask);
10177e2ab150SChristoph Lameter 	node_set(source, nmask);
10187e2ab150SChristoph Lameter 
101908270807SMinchan Kim 	/*
102008270807SMinchan Kim 	 * This does not "check" the range but isolates all pages that
102108270807SMinchan Kim 	 * need migration.  Between passing in the full user address
102208270807SMinchan Kim 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
102308270807SMinchan Kim 	 */
102408270807SMinchan Kim 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
102508270807SMinchan Kim 	check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
10267e2ab150SChristoph Lameter 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
10277e2ab150SChristoph Lameter 
1028cf608ac1SMinchan Kim 	if (!list_empty(&pagelist)) {
10297f0f2496SMel Gorman 		err = migrate_pages(&pagelist, new_node_page, dest,
10307b2a2d4aSMel Gorman 							false, MIGRATE_SYNC,
10317b2a2d4aSMel Gorman 							MR_SYSCALL);
1032cf608ac1SMinchan Kim 		if (err)
1033cf608ac1SMinchan Kim 			putback_lru_pages(&pagelist);
1034cf608ac1SMinchan Kim 	}
103595a402c3SChristoph Lameter 
10367e2ab150SChristoph Lameter 	return err;
10377e2ab150SChristoph Lameter }
10387e2ab150SChristoph Lameter 
10397e2ab150SChristoph Lameter /*
10407e2ab150SChristoph Lameter  * Move pages between the two nodesets so as to preserve the physical
10417e2ab150SChristoph Lameter  * layout as much as possible.
104239743889SChristoph Lameter  *
104339743889SChristoph Lameter  * Returns the number of page that could not be moved.
104439743889SChristoph Lameter  */
10450ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
10460ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
104739743889SChristoph Lameter {
10487e2ab150SChristoph Lameter 	int busy = 0;
10490aedadf9SChristoph Lameter 	int err;
10507e2ab150SChristoph Lameter 	nodemask_t tmp;
105139743889SChristoph Lameter 
10520aedadf9SChristoph Lameter 	err = migrate_prep();
10530aedadf9SChristoph Lameter 	if (err)
10540aedadf9SChristoph Lameter 		return err;
10550aedadf9SChristoph Lameter 
105639743889SChristoph Lameter 	down_read(&mm->mmap_sem);
1057d4984711SChristoph Lameter 
10580ce72d4fSAndrew Morton 	err = migrate_vmas(mm, from, to, flags);
10597b2259b3SChristoph Lameter 	if (err)
10607b2259b3SChristoph Lameter 		goto out;
10617b2259b3SChristoph Lameter 
10627e2ab150SChristoph Lameter 	/*
10637e2ab150SChristoph Lameter 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
10647e2ab150SChristoph Lameter 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
10657e2ab150SChristoph Lameter 	 * bit in 'tmp', and return that <source, dest> pair for migration.
10667e2ab150SChristoph Lameter 	 * The pair of nodemasks 'to' and 'from' define the map.
10677e2ab150SChristoph Lameter 	 *
10687e2ab150SChristoph Lameter 	 * If no pair of bits is found that way, fallback to picking some
10697e2ab150SChristoph Lameter 	 * pair of 'source' and 'dest' bits that are not the same.  If the
10707e2ab150SChristoph Lameter 	 * 'source' and 'dest' bits are the same, this represents a node
10717e2ab150SChristoph Lameter 	 * that will be migrating to itself, so no pages need move.
10727e2ab150SChristoph Lameter 	 *
10737e2ab150SChristoph Lameter 	 * If no bits are left in 'tmp', or if all remaining bits left
10747e2ab150SChristoph Lameter 	 * in 'tmp' correspond to the same bit in 'to', return false
10757e2ab150SChristoph Lameter 	 * (nothing left to migrate).
10767e2ab150SChristoph Lameter 	 *
10777e2ab150SChristoph Lameter 	 * This lets us pick a pair of nodes to migrate between, such that
10787e2ab150SChristoph Lameter 	 * if possible the dest node is not already occupied by some other
10797e2ab150SChristoph Lameter 	 * source node, minimizing the risk of overloading the memory on a
10807e2ab150SChristoph Lameter 	 * node that would happen if we migrated incoming memory to a node
10817e2ab150SChristoph Lameter 	 * before migrating outgoing memory source that same node.
10827e2ab150SChristoph Lameter 	 *
10837e2ab150SChristoph Lameter 	 * A single scan of tmp is sufficient.  As we go, we remember the
10847e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
10857e2ab150SChristoph Lameter 	 * that not only moved, but what's better, moved to an empty slot
10867e2ab150SChristoph Lameter 	 * (d is not set in tmp), then we break out then, with that pair.
1087ae0e47f0SJustin P. Mattock 	 * Otherwise when we finish scanning from_tmp, we at least have the
10887e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved.  If we get all the way through
10897e2ab150SChristoph Lameter 	 * the scan of tmp without finding any node that moved, much less
10907e2ab150SChristoph Lameter 	 * moved to an empty node, then there is nothing left worth migrating.
10917e2ab150SChristoph Lameter 	 */
10927e2ab150SChristoph Lameter 
10930ce72d4fSAndrew Morton 	tmp = *from;
10947e2ab150SChristoph Lameter 	while (!nodes_empty(tmp)) {
10957e2ab150SChristoph Lameter 		int s,d;
10967e2ab150SChristoph Lameter 		int source = -1;
10977e2ab150SChristoph Lameter 		int dest = 0;
10987e2ab150SChristoph Lameter 
10997e2ab150SChristoph Lameter 		for_each_node_mask(s, tmp) {
11004a5b18ccSLarry Woodman 
11014a5b18ccSLarry Woodman 			/*
11024a5b18ccSLarry Woodman 			 * do_migrate_pages() tries to maintain the relative
11034a5b18ccSLarry Woodman 			 * node relationship of the pages established between
11044a5b18ccSLarry Woodman 			 * threads and memory areas.
11054a5b18ccSLarry Woodman                          *
11064a5b18ccSLarry Woodman 			 * However if the number of source nodes is not equal to
11074a5b18ccSLarry Woodman 			 * the number of destination nodes we can not preserve
11084a5b18ccSLarry Woodman 			 * this node relative relationship.  In that case, skip
11094a5b18ccSLarry Woodman 			 * copying memory from a node that is in the destination
11104a5b18ccSLarry Woodman 			 * mask.
11114a5b18ccSLarry Woodman 			 *
11124a5b18ccSLarry Woodman 			 * Example: [2,3,4] -> [3,4,5] moves everything.
11134a5b18ccSLarry Woodman 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
11144a5b18ccSLarry Woodman 			 */
11154a5b18ccSLarry Woodman 
11160ce72d4fSAndrew Morton 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
11170ce72d4fSAndrew Morton 						(node_isset(s, *to)))
11184a5b18ccSLarry Woodman 				continue;
11194a5b18ccSLarry Woodman 
11200ce72d4fSAndrew Morton 			d = node_remap(s, *from, *to);
11217e2ab150SChristoph Lameter 			if (s == d)
11227e2ab150SChristoph Lameter 				continue;
11237e2ab150SChristoph Lameter 
11247e2ab150SChristoph Lameter 			source = s;	/* Node moved. Memorize */
11257e2ab150SChristoph Lameter 			dest = d;
11267e2ab150SChristoph Lameter 
11277e2ab150SChristoph Lameter 			/* dest not in remaining from nodes? */
11287e2ab150SChristoph Lameter 			if (!node_isset(dest, tmp))
11297e2ab150SChristoph Lameter 				break;
11307e2ab150SChristoph Lameter 		}
11317e2ab150SChristoph Lameter 		if (source == -1)
11327e2ab150SChristoph Lameter 			break;
11337e2ab150SChristoph Lameter 
11347e2ab150SChristoph Lameter 		node_clear(source, tmp);
11357e2ab150SChristoph Lameter 		err = migrate_to_node(mm, source, dest, flags);
11367e2ab150SChristoph Lameter 		if (err > 0)
11377e2ab150SChristoph Lameter 			busy += err;
11387e2ab150SChristoph Lameter 		if (err < 0)
11397e2ab150SChristoph Lameter 			break;
114039743889SChristoph Lameter 	}
11417b2259b3SChristoph Lameter out:
114239743889SChristoph Lameter 	up_read(&mm->mmap_sem);
11437e2ab150SChristoph Lameter 	if (err < 0)
11447e2ab150SChristoph Lameter 		return err;
11457e2ab150SChristoph Lameter 	return busy;
1146b20a3503SChristoph Lameter 
114739743889SChristoph Lameter }
114839743889SChristoph Lameter 
11493ad33b24SLee Schermerhorn /*
11503ad33b24SLee Schermerhorn  * Allocate a new page for page migration based on vma policy.
11513ad33b24SLee Schermerhorn  * Start assuming that page is mapped by vma pointed to by @private.
11523ad33b24SLee Schermerhorn  * Search forward from there, if not.  N.B., this assumes that the
11533ad33b24SLee Schermerhorn  * list of pages handed to migrate_pages()--which is how we get here--
11543ad33b24SLee Schermerhorn  * is in virtual address order.
11553ad33b24SLee Schermerhorn  */
1156742755a1SChristoph Lameter static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
115795a402c3SChristoph Lameter {
115895a402c3SChristoph Lameter 	struct vm_area_struct *vma = (struct vm_area_struct *)private;
11593ad33b24SLee Schermerhorn 	unsigned long uninitialized_var(address);
116095a402c3SChristoph Lameter 
11613ad33b24SLee Schermerhorn 	while (vma) {
11623ad33b24SLee Schermerhorn 		address = page_address_in_vma(page, vma);
11633ad33b24SLee Schermerhorn 		if (address != -EFAULT)
11643ad33b24SLee Schermerhorn 			break;
11653ad33b24SLee Schermerhorn 		vma = vma->vm_next;
11663ad33b24SLee Schermerhorn 	}
11673ad33b24SLee Schermerhorn 
11683ad33b24SLee Schermerhorn 	/*
11693ad33b24SLee Schermerhorn 	 * if !vma, alloc_page_vma() will use task or system default policy
11703ad33b24SLee Schermerhorn 	 */
11713ad33b24SLee Schermerhorn 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
117295a402c3SChristoph Lameter }
1173b20a3503SChristoph Lameter #else
1174b20a3503SChristoph Lameter 
1175b20a3503SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
1176b20a3503SChristoph Lameter 				unsigned long flags)
1177b20a3503SChristoph Lameter {
1178b20a3503SChristoph Lameter }
1179b20a3503SChristoph Lameter 
11800ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
11810ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
1182b20a3503SChristoph Lameter {
1183b20a3503SChristoph Lameter 	return -ENOSYS;
1184b20a3503SChristoph Lameter }
118595a402c3SChristoph Lameter 
118669939749SKeith Owens static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
118795a402c3SChristoph Lameter {
118895a402c3SChristoph Lameter 	return NULL;
118995a402c3SChristoph Lameter }
1190b20a3503SChristoph Lameter #endif
1191b20a3503SChristoph Lameter 
1192dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len,
1193028fec41SDavid Rientjes 		     unsigned short mode, unsigned short mode_flags,
1194028fec41SDavid Rientjes 		     nodemask_t *nmask, unsigned long flags)
11956ce3c4c0SChristoph Lameter {
11966ce3c4c0SChristoph Lameter 	struct vm_area_struct *vma;
11976ce3c4c0SChristoph Lameter 	struct mm_struct *mm = current->mm;
11986ce3c4c0SChristoph Lameter 	struct mempolicy *new;
11996ce3c4c0SChristoph Lameter 	unsigned long end;
12006ce3c4c0SChristoph Lameter 	int err;
12016ce3c4c0SChristoph Lameter 	LIST_HEAD(pagelist);
12026ce3c4c0SChristoph Lameter 
1203b24f53a0SLee Schermerhorn 	if (flags & ~(unsigned long)MPOL_MF_VALID)
12046ce3c4c0SChristoph Lameter 		return -EINVAL;
120574c00241SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
12066ce3c4c0SChristoph Lameter 		return -EPERM;
12076ce3c4c0SChristoph Lameter 
12086ce3c4c0SChristoph Lameter 	if (start & ~PAGE_MASK)
12096ce3c4c0SChristoph Lameter 		return -EINVAL;
12106ce3c4c0SChristoph Lameter 
1211a720094dSMel Gorman 	if (mode == MPOL_DEFAULT)
12126ce3c4c0SChristoph Lameter 		flags &= ~MPOL_MF_STRICT;
12136ce3c4c0SChristoph Lameter 
12146ce3c4c0SChristoph Lameter 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
12156ce3c4c0SChristoph Lameter 	end = start + len;
12166ce3c4c0SChristoph Lameter 
12176ce3c4c0SChristoph Lameter 	if (end < start)
12186ce3c4c0SChristoph Lameter 		return -EINVAL;
12196ce3c4c0SChristoph Lameter 	if (end == start)
12206ce3c4c0SChristoph Lameter 		return 0;
12216ce3c4c0SChristoph Lameter 
1222028fec41SDavid Rientjes 	new = mpol_new(mode, mode_flags, nmask);
12236ce3c4c0SChristoph Lameter 	if (IS_ERR(new))
12246ce3c4c0SChristoph Lameter 		return PTR_ERR(new);
12256ce3c4c0SChristoph Lameter 
1226b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY)
1227b24f53a0SLee Schermerhorn 		new->flags |= MPOL_F_MOF;
1228b24f53a0SLee Schermerhorn 
12296ce3c4c0SChristoph Lameter 	/*
12306ce3c4c0SChristoph Lameter 	 * If we are using the default policy then operation
12316ce3c4c0SChristoph Lameter 	 * on discontinuous address spaces is okay after all
12326ce3c4c0SChristoph Lameter 	 */
12336ce3c4c0SChristoph Lameter 	if (!new)
12346ce3c4c0SChristoph Lameter 		flags |= MPOL_MF_DISCONTIG_OK;
12356ce3c4c0SChristoph Lameter 
1236028fec41SDavid Rientjes 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1237028fec41SDavid Rientjes 		 start, start + len, mode, mode_flags,
1238028fec41SDavid Rientjes 		 nmask ? nodes_addr(*nmask)[0] : -1);
12396ce3c4c0SChristoph Lameter 
12400aedadf9SChristoph Lameter 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
12410aedadf9SChristoph Lameter 
12420aedadf9SChristoph Lameter 		err = migrate_prep();
12430aedadf9SChristoph Lameter 		if (err)
1244b05ca738SKOSAKI Motohiro 			goto mpol_out;
12450aedadf9SChristoph Lameter 	}
12464bfc4495SKAMEZAWA Hiroyuki 	{
12474bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
12484bfc4495SKAMEZAWA Hiroyuki 		if (scratch) {
12496ce3c4c0SChristoph Lameter 			down_write(&mm->mmap_sem);
125058568d2aSMiao Xie 			task_lock(current);
12514bfc4495SKAMEZAWA Hiroyuki 			err = mpol_set_nodemask(new, nmask, scratch);
125258568d2aSMiao Xie 			task_unlock(current);
12534bfc4495SKAMEZAWA Hiroyuki 			if (err)
125458568d2aSMiao Xie 				up_write(&mm->mmap_sem);
12554bfc4495SKAMEZAWA Hiroyuki 		} else
12564bfc4495SKAMEZAWA Hiroyuki 			err = -ENOMEM;
12574bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
12584bfc4495SKAMEZAWA Hiroyuki 	}
1259b05ca738SKOSAKI Motohiro 	if (err)
1260b05ca738SKOSAKI Motohiro 		goto mpol_out;
1261b05ca738SKOSAKI Motohiro 
12626ce3c4c0SChristoph Lameter 	vma = check_range(mm, start, end, nmask,
12636ce3c4c0SChristoph Lameter 			  flags | MPOL_MF_INVERT, &pagelist);
12646ce3c4c0SChristoph Lameter 
1265b24f53a0SLee Schermerhorn 	err = PTR_ERR(vma);	/* maybe ... */
1266a720094dSMel Gorman 	if (!IS_ERR(vma))
12679d8cebd4SKOSAKI Motohiro 		err = mbind_range(mm, start, end, new);
12687e2ab150SChristoph Lameter 
1269b24f53a0SLee Schermerhorn 	if (!err) {
1270b24f53a0SLee Schermerhorn 		int nr_failed = 0;
1271b24f53a0SLee Schermerhorn 
1272cf608ac1SMinchan Kim 		if (!list_empty(&pagelist)) {
1273b24f53a0SLee Schermerhorn 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
127495a402c3SChristoph Lameter 			nr_failed = migrate_pages(&pagelist, new_vma_page,
12757f0f2496SMel Gorman 						(unsigned long)vma,
12767b2a2d4aSMel Gorman 						false, MIGRATE_SYNC,
12777b2a2d4aSMel Gorman 						MR_MEMPOLICY_MBIND);
1278cf608ac1SMinchan Kim 			if (nr_failed)
1279cf608ac1SMinchan Kim 				putback_lru_pages(&pagelist);
1280cf608ac1SMinchan Kim 		}
12816ce3c4c0SChristoph Lameter 
1282b24f53a0SLee Schermerhorn 		if (nr_failed && (flags & MPOL_MF_STRICT))
12836ce3c4c0SChristoph Lameter 			err = -EIO;
1284ab8a3e14SKOSAKI Motohiro 	} else
1285ab8a3e14SKOSAKI Motohiro 		putback_lru_pages(&pagelist);
1286b20a3503SChristoph Lameter 
12876ce3c4c0SChristoph Lameter 	up_write(&mm->mmap_sem);
1288b05ca738SKOSAKI Motohiro  mpol_out:
1289f0be3d32SLee Schermerhorn 	mpol_put(new);
12906ce3c4c0SChristoph Lameter 	return err;
12916ce3c4c0SChristoph Lameter }
12926ce3c4c0SChristoph Lameter 
129339743889SChristoph Lameter /*
12948bccd85fSChristoph Lameter  * User space interface with variable sized bitmaps for nodelists.
12958bccd85fSChristoph Lameter  */
12968bccd85fSChristoph Lameter 
12978bccd85fSChristoph Lameter /* Copy a node mask from user space. */
129839743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
12998bccd85fSChristoph Lameter 		     unsigned long maxnode)
13008bccd85fSChristoph Lameter {
13018bccd85fSChristoph Lameter 	unsigned long k;
13028bccd85fSChristoph Lameter 	unsigned long nlongs;
13038bccd85fSChristoph Lameter 	unsigned long endmask;
13048bccd85fSChristoph Lameter 
13058bccd85fSChristoph Lameter 	--maxnode;
13068bccd85fSChristoph Lameter 	nodes_clear(*nodes);
13078bccd85fSChristoph Lameter 	if (maxnode == 0 || !nmask)
13088bccd85fSChristoph Lameter 		return 0;
1309a9c930baSAndi Kleen 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1310636f13c1SChris Wright 		return -EINVAL;
13118bccd85fSChristoph Lameter 
13128bccd85fSChristoph Lameter 	nlongs = BITS_TO_LONGS(maxnode);
13138bccd85fSChristoph Lameter 	if ((maxnode % BITS_PER_LONG) == 0)
13148bccd85fSChristoph Lameter 		endmask = ~0UL;
13158bccd85fSChristoph Lameter 	else
13168bccd85fSChristoph Lameter 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
13178bccd85fSChristoph Lameter 
13188bccd85fSChristoph Lameter 	/* When the user specified more nodes than supported just check
13198bccd85fSChristoph Lameter 	   if the non supported part is all zero. */
13208bccd85fSChristoph Lameter 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
13218bccd85fSChristoph Lameter 		if (nlongs > PAGE_SIZE/sizeof(long))
13228bccd85fSChristoph Lameter 			return -EINVAL;
13238bccd85fSChristoph Lameter 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
13248bccd85fSChristoph Lameter 			unsigned long t;
13258bccd85fSChristoph Lameter 			if (get_user(t, nmask + k))
13268bccd85fSChristoph Lameter 				return -EFAULT;
13278bccd85fSChristoph Lameter 			if (k == nlongs - 1) {
13288bccd85fSChristoph Lameter 				if (t & endmask)
13298bccd85fSChristoph Lameter 					return -EINVAL;
13308bccd85fSChristoph Lameter 			} else if (t)
13318bccd85fSChristoph Lameter 				return -EINVAL;
13328bccd85fSChristoph Lameter 		}
13338bccd85fSChristoph Lameter 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
13348bccd85fSChristoph Lameter 		endmask = ~0UL;
13358bccd85fSChristoph Lameter 	}
13368bccd85fSChristoph Lameter 
13378bccd85fSChristoph Lameter 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
13388bccd85fSChristoph Lameter 		return -EFAULT;
13398bccd85fSChristoph Lameter 	nodes_addr(*nodes)[nlongs-1] &= endmask;
13408bccd85fSChristoph Lameter 	return 0;
13418bccd85fSChristoph Lameter }
13428bccd85fSChristoph Lameter 
13438bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */
13448bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
13458bccd85fSChristoph Lameter 			      nodemask_t *nodes)
13468bccd85fSChristoph Lameter {
13478bccd85fSChristoph Lameter 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
13488bccd85fSChristoph Lameter 	const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
13498bccd85fSChristoph Lameter 
13508bccd85fSChristoph Lameter 	if (copy > nbytes) {
13518bccd85fSChristoph Lameter 		if (copy > PAGE_SIZE)
13528bccd85fSChristoph Lameter 			return -EINVAL;
13538bccd85fSChristoph Lameter 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
13548bccd85fSChristoph Lameter 			return -EFAULT;
13558bccd85fSChristoph Lameter 		copy = nbytes;
13568bccd85fSChristoph Lameter 	}
13578bccd85fSChristoph Lameter 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
13588bccd85fSChristoph Lameter }
13598bccd85fSChristoph Lameter 
1360938bb9f5SHeiko Carstens SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1361938bb9f5SHeiko Carstens 		unsigned long, mode, unsigned long __user *, nmask,
1362938bb9f5SHeiko Carstens 		unsigned long, maxnode, unsigned, flags)
13638bccd85fSChristoph Lameter {
13648bccd85fSChristoph Lameter 	nodemask_t nodes;
13658bccd85fSChristoph Lameter 	int err;
1366028fec41SDavid Rientjes 	unsigned short mode_flags;
13678bccd85fSChristoph Lameter 
1368028fec41SDavid Rientjes 	mode_flags = mode & MPOL_MODE_FLAGS;
1369028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1370a3b51e01SDavid Rientjes 	if (mode >= MPOL_MAX)
1371a3b51e01SDavid Rientjes 		return -EINVAL;
13724c50bc01SDavid Rientjes 	if ((mode_flags & MPOL_F_STATIC_NODES) &&
13734c50bc01SDavid Rientjes 	    (mode_flags & MPOL_F_RELATIVE_NODES))
13744c50bc01SDavid Rientjes 		return -EINVAL;
13758bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
13768bccd85fSChristoph Lameter 	if (err)
13778bccd85fSChristoph Lameter 		return err;
1378028fec41SDavid Rientjes 	return do_mbind(start, len, mode, mode_flags, &nodes, flags);
13798bccd85fSChristoph Lameter }
13808bccd85fSChristoph Lameter 
13818bccd85fSChristoph Lameter /* Set the process memory policy */
1382938bb9f5SHeiko Carstens SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask,
1383938bb9f5SHeiko Carstens 		unsigned long, maxnode)
13848bccd85fSChristoph Lameter {
13858bccd85fSChristoph Lameter 	int err;
13868bccd85fSChristoph Lameter 	nodemask_t nodes;
1387028fec41SDavid Rientjes 	unsigned short flags;
13888bccd85fSChristoph Lameter 
1389028fec41SDavid Rientjes 	flags = mode & MPOL_MODE_FLAGS;
1390028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1391028fec41SDavid Rientjes 	if ((unsigned int)mode >= MPOL_MAX)
13928bccd85fSChristoph Lameter 		return -EINVAL;
13934c50bc01SDavid Rientjes 	if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
13944c50bc01SDavid Rientjes 		return -EINVAL;
13958bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
13968bccd85fSChristoph Lameter 	if (err)
13978bccd85fSChristoph Lameter 		return err;
1398028fec41SDavid Rientjes 	return do_set_mempolicy(mode, flags, &nodes);
13998bccd85fSChristoph Lameter }
14008bccd85fSChristoph Lameter 
1401938bb9f5SHeiko Carstens SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1402938bb9f5SHeiko Carstens 		const unsigned long __user *, old_nodes,
1403938bb9f5SHeiko Carstens 		const unsigned long __user *, new_nodes)
140439743889SChristoph Lameter {
1405c69e8d9cSDavid Howells 	const struct cred *cred = current_cred(), *tcred;
1406596d7cfaSKOSAKI Motohiro 	struct mm_struct *mm = NULL;
140739743889SChristoph Lameter 	struct task_struct *task;
140839743889SChristoph Lameter 	nodemask_t task_nodes;
140939743889SChristoph Lameter 	int err;
1410596d7cfaSKOSAKI Motohiro 	nodemask_t *old;
1411596d7cfaSKOSAKI Motohiro 	nodemask_t *new;
1412596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH(scratch);
141339743889SChristoph Lameter 
1414596d7cfaSKOSAKI Motohiro 	if (!scratch)
1415596d7cfaSKOSAKI Motohiro 		return -ENOMEM;
141639743889SChristoph Lameter 
1417596d7cfaSKOSAKI Motohiro 	old = &scratch->mask1;
1418596d7cfaSKOSAKI Motohiro 	new = &scratch->mask2;
1419596d7cfaSKOSAKI Motohiro 
1420596d7cfaSKOSAKI Motohiro 	err = get_nodes(old, old_nodes, maxnode);
142139743889SChristoph Lameter 	if (err)
1422596d7cfaSKOSAKI Motohiro 		goto out;
1423596d7cfaSKOSAKI Motohiro 
1424596d7cfaSKOSAKI Motohiro 	err = get_nodes(new, new_nodes, maxnode);
1425596d7cfaSKOSAKI Motohiro 	if (err)
1426596d7cfaSKOSAKI Motohiro 		goto out;
142739743889SChristoph Lameter 
142839743889SChristoph Lameter 	/* Find the mm_struct */
142955cfaa3cSZeng Zhaoming 	rcu_read_lock();
1430228ebcbeSPavel Emelyanov 	task = pid ? find_task_by_vpid(pid) : current;
143139743889SChristoph Lameter 	if (!task) {
143255cfaa3cSZeng Zhaoming 		rcu_read_unlock();
1433596d7cfaSKOSAKI Motohiro 		err = -ESRCH;
1434596d7cfaSKOSAKI Motohiro 		goto out;
143539743889SChristoph Lameter 	}
14363268c63eSChristoph Lameter 	get_task_struct(task);
143739743889SChristoph Lameter 
1438596d7cfaSKOSAKI Motohiro 	err = -EINVAL;
143939743889SChristoph Lameter 
144039743889SChristoph Lameter 	/*
144139743889SChristoph Lameter 	 * Check if this process has the right to modify the specified
144239743889SChristoph Lameter 	 * process. The right exists if the process has administrative
14437f927fccSAlexey Dobriyan 	 * capabilities, superuser privileges or the same
144439743889SChristoph Lameter 	 * userid as the target process.
144539743889SChristoph Lameter 	 */
1446c69e8d9cSDavid Howells 	tcred = __task_cred(task);
1447b38a86ebSEric W. Biederman 	if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1448b38a86ebSEric W. Biederman 	    !uid_eq(cred->uid,  tcred->suid) && !uid_eq(cred->uid,  tcred->uid) &&
144974c00241SChristoph Lameter 	    !capable(CAP_SYS_NICE)) {
1450c69e8d9cSDavid Howells 		rcu_read_unlock();
145139743889SChristoph Lameter 		err = -EPERM;
14523268c63eSChristoph Lameter 		goto out_put;
145339743889SChristoph Lameter 	}
1454c69e8d9cSDavid Howells 	rcu_read_unlock();
145539743889SChristoph Lameter 
145639743889SChristoph Lameter 	task_nodes = cpuset_mems_allowed(task);
145739743889SChristoph Lameter 	/* Is the user allowed to access the target nodes? */
1458596d7cfaSKOSAKI Motohiro 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
145939743889SChristoph Lameter 		err = -EPERM;
14603268c63eSChristoph Lameter 		goto out_put;
146139743889SChristoph Lameter 	}
146239743889SChristoph Lameter 
1463596d7cfaSKOSAKI Motohiro 	if (!nodes_subset(*new, node_states[N_HIGH_MEMORY])) {
14643b42d28bSChristoph Lameter 		err = -EINVAL;
14653268c63eSChristoph Lameter 		goto out_put;
14663b42d28bSChristoph Lameter 	}
14673b42d28bSChristoph Lameter 
146886c3a764SDavid Quigley 	err = security_task_movememory(task);
146986c3a764SDavid Quigley 	if (err)
14703268c63eSChristoph Lameter 		goto out_put;
147186c3a764SDavid Quigley 
14723268c63eSChristoph Lameter 	mm = get_task_mm(task);
14733268c63eSChristoph Lameter 	put_task_struct(task);
1474f2a9ef88SSasha Levin 
1475f2a9ef88SSasha Levin 	if (!mm) {
1476f2a9ef88SSasha Levin 		err = -EINVAL;
1477f2a9ef88SSasha Levin 		goto out;
1478f2a9ef88SSasha Levin 	}
1479f2a9ef88SSasha Levin 
1480596d7cfaSKOSAKI Motohiro 	err = do_migrate_pages(mm, old, new,
148174c00241SChristoph Lameter 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
14823268c63eSChristoph Lameter 
148339743889SChristoph Lameter 	mmput(mm);
14843268c63eSChristoph Lameter out:
1485596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH_FREE(scratch);
1486596d7cfaSKOSAKI Motohiro 
148739743889SChristoph Lameter 	return err;
14883268c63eSChristoph Lameter 
14893268c63eSChristoph Lameter out_put:
14903268c63eSChristoph Lameter 	put_task_struct(task);
14913268c63eSChristoph Lameter 	goto out;
14923268c63eSChristoph Lameter 
149339743889SChristoph Lameter }
149439743889SChristoph Lameter 
149539743889SChristoph Lameter 
14968bccd85fSChristoph Lameter /* Retrieve NUMA policy */
1497938bb9f5SHeiko Carstens SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1498938bb9f5SHeiko Carstens 		unsigned long __user *, nmask, unsigned long, maxnode,
1499938bb9f5SHeiko Carstens 		unsigned long, addr, unsigned long, flags)
15008bccd85fSChristoph Lameter {
1501dbcb0f19SAdrian Bunk 	int err;
1502dbcb0f19SAdrian Bunk 	int uninitialized_var(pval);
15038bccd85fSChristoph Lameter 	nodemask_t nodes;
15048bccd85fSChristoph Lameter 
15058bccd85fSChristoph Lameter 	if (nmask != NULL && maxnode < MAX_NUMNODES)
15068bccd85fSChristoph Lameter 		return -EINVAL;
15078bccd85fSChristoph Lameter 
15088bccd85fSChristoph Lameter 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
15098bccd85fSChristoph Lameter 
15108bccd85fSChristoph Lameter 	if (err)
15118bccd85fSChristoph Lameter 		return err;
15128bccd85fSChristoph Lameter 
15138bccd85fSChristoph Lameter 	if (policy && put_user(pval, policy))
15148bccd85fSChristoph Lameter 		return -EFAULT;
15158bccd85fSChristoph Lameter 
15168bccd85fSChristoph Lameter 	if (nmask)
15178bccd85fSChristoph Lameter 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
15188bccd85fSChristoph Lameter 
15198bccd85fSChristoph Lameter 	return err;
15208bccd85fSChristoph Lameter }
15218bccd85fSChristoph Lameter 
15221da177e4SLinus Torvalds #ifdef CONFIG_COMPAT
15231da177e4SLinus Torvalds 
15241da177e4SLinus Torvalds asmlinkage long compat_sys_get_mempolicy(int __user *policy,
15251da177e4SLinus Torvalds 				     compat_ulong_t __user *nmask,
15261da177e4SLinus Torvalds 				     compat_ulong_t maxnode,
15271da177e4SLinus Torvalds 				     compat_ulong_t addr, compat_ulong_t flags)
15281da177e4SLinus Torvalds {
15291da177e4SLinus Torvalds 	long err;
15301da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
15311da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
15321da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
15331da177e4SLinus Torvalds 
15341da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
15351da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
15361da177e4SLinus Torvalds 
15371da177e4SLinus Torvalds 	if (nmask)
15381da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
15391da177e4SLinus Torvalds 
15401da177e4SLinus Torvalds 	err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
15411da177e4SLinus Torvalds 
15421da177e4SLinus Torvalds 	if (!err && nmask) {
15432bbff6c7SKAMEZAWA Hiroyuki 		unsigned long copy_size;
15442bbff6c7SKAMEZAWA Hiroyuki 		copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
15452bbff6c7SKAMEZAWA Hiroyuki 		err = copy_from_user(bm, nm, copy_size);
15461da177e4SLinus Torvalds 		/* ensure entire bitmap is zeroed */
15471da177e4SLinus Torvalds 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
15481da177e4SLinus Torvalds 		err |= compat_put_bitmap(nmask, bm, nr_bits);
15491da177e4SLinus Torvalds 	}
15501da177e4SLinus Torvalds 
15511da177e4SLinus Torvalds 	return err;
15521da177e4SLinus Torvalds }
15531da177e4SLinus Torvalds 
15541da177e4SLinus Torvalds asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
15551da177e4SLinus Torvalds 				     compat_ulong_t maxnode)
15561da177e4SLinus Torvalds {
15571da177e4SLinus Torvalds 	long err = 0;
15581da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
15591da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
15601da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
15611da177e4SLinus Torvalds 
15621da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
15631da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
15641da177e4SLinus Torvalds 
15651da177e4SLinus Torvalds 	if (nmask) {
15661da177e4SLinus Torvalds 		err = compat_get_bitmap(bm, nmask, nr_bits);
15671da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
15681da177e4SLinus Torvalds 		err |= copy_to_user(nm, bm, alloc_size);
15691da177e4SLinus Torvalds 	}
15701da177e4SLinus Torvalds 
15711da177e4SLinus Torvalds 	if (err)
15721da177e4SLinus Torvalds 		return -EFAULT;
15731da177e4SLinus Torvalds 
15741da177e4SLinus Torvalds 	return sys_set_mempolicy(mode, nm, nr_bits+1);
15751da177e4SLinus Torvalds }
15761da177e4SLinus Torvalds 
15771da177e4SLinus Torvalds asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
15781da177e4SLinus Torvalds 			     compat_ulong_t mode, compat_ulong_t __user *nmask,
15791da177e4SLinus Torvalds 			     compat_ulong_t maxnode, compat_ulong_t flags)
15801da177e4SLinus Torvalds {
15811da177e4SLinus Torvalds 	long err = 0;
15821da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
15831da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
1584dfcd3c0dSAndi Kleen 	nodemask_t bm;
15851da177e4SLinus Torvalds 
15861da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
15871da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
15881da177e4SLinus Torvalds 
15891da177e4SLinus Torvalds 	if (nmask) {
1590dfcd3c0dSAndi Kleen 		err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
15911da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
1592dfcd3c0dSAndi Kleen 		err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
15931da177e4SLinus Torvalds 	}
15941da177e4SLinus Torvalds 
15951da177e4SLinus Torvalds 	if (err)
15961da177e4SLinus Torvalds 		return -EFAULT;
15971da177e4SLinus Torvalds 
15981da177e4SLinus Torvalds 	return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
15991da177e4SLinus Torvalds }
16001da177e4SLinus Torvalds 
16011da177e4SLinus Torvalds #endif
16021da177e4SLinus Torvalds 
1603480eccf9SLee Schermerhorn /*
1604480eccf9SLee Schermerhorn  * get_vma_policy(@task, @vma, @addr)
1605480eccf9SLee Schermerhorn  * @task - task for fallback if vma policy == default
1606480eccf9SLee Schermerhorn  * @vma   - virtual memory area whose policy is sought
1607480eccf9SLee Schermerhorn  * @addr  - address in @vma for shared policy lookup
1608480eccf9SLee Schermerhorn  *
1609480eccf9SLee Schermerhorn  * Returns effective policy for a VMA at specified address.
1610480eccf9SLee Schermerhorn  * Falls back to @task or system default policy, as necessary.
161132f8516aSDavid Rientjes  * Current or other task's task mempolicy and non-shared vma policies must be
161232f8516aSDavid Rientjes  * protected by task_lock(task) by the caller.
161352cd3b07SLee Schermerhorn  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
161452cd3b07SLee Schermerhorn  * count--added by the get_policy() vm_op, as appropriate--to protect against
161552cd3b07SLee Schermerhorn  * freeing by another task.  It is the caller's responsibility to free the
161652cd3b07SLee Schermerhorn  * extra reference for shared policies.
1617480eccf9SLee Schermerhorn  */
1618d98f6cb6SStephen Wilson struct mempolicy *get_vma_policy(struct task_struct *task,
161948fce342SChristoph Lameter 		struct vm_area_struct *vma, unsigned long addr)
16201da177e4SLinus Torvalds {
1621*5606e387SMel Gorman 	struct mempolicy *pol = get_task_policy(task);
16221da177e4SLinus Torvalds 
16231da177e4SLinus Torvalds 	if (vma) {
1624480eccf9SLee Schermerhorn 		if (vma->vm_ops && vma->vm_ops->get_policy) {
1625ae4d8c16SLee Schermerhorn 			struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1626ae4d8c16SLee Schermerhorn 									addr);
1627ae4d8c16SLee Schermerhorn 			if (vpol)
1628ae4d8c16SLee Schermerhorn 				pol = vpol;
162900442ad0SMel Gorman 		} else if (vma->vm_policy) {
16301da177e4SLinus Torvalds 			pol = vma->vm_policy;
163100442ad0SMel Gorman 
163200442ad0SMel Gorman 			/*
163300442ad0SMel Gorman 			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
163400442ad0SMel Gorman 			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
163500442ad0SMel Gorman 			 * count on these policies which will be dropped by
163600442ad0SMel Gorman 			 * mpol_cond_put() later
163700442ad0SMel Gorman 			 */
163800442ad0SMel Gorman 			if (mpol_needs_cond_ref(pol))
163900442ad0SMel Gorman 				mpol_get(pol);
164000442ad0SMel Gorman 		}
16411da177e4SLinus Torvalds 	}
16421da177e4SLinus Torvalds 	if (!pol)
16431da177e4SLinus Torvalds 		pol = &default_policy;
16441da177e4SLinus Torvalds 	return pol;
16451da177e4SLinus Torvalds }
16461da177e4SLinus Torvalds 
164752cd3b07SLee Schermerhorn /*
164852cd3b07SLee Schermerhorn  * Return a nodemask representing a mempolicy for filtering nodes for
164952cd3b07SLee Schermerhorn  * page allocation
165052cd3b07SLee Schermerhorn  */
165152cd3b07SLee Schermerhorn static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
165219770b32SMel Gorman {
165319770b32SMel Gorman 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
165445c4745aSLee Schermerhorn 	if (unlikely(policy->mode == MPOL_BIND) &&
165519770b32SMel Gorman 			gfp_zone(gfp) >= policy_zone &&
165619770b32SMel Gorman 			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
165719770b32SMel Gorman 		return &policy->v.nodes;
165819770b32SMel Gorman 
165919770b32SMel Gorman 	return NULL;
166019770b32SMel Gorman }
166119770b32SMel Gorman 
166252cd3b07SLee Schermerhorn /* Return a zonelist indicated by gfp for node representing a mempolicy */
16632f5f9486SAndi Kleen static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
16642f5f9486SAndi Kleen 	int nd)
16651da177e4SLinus Torvalds {
166645c4745aSLee Schermerhorn 	switch (policy->mode) {
16671da177e4SLinus Torvalds 	case MPOL_PREFERRED:
1668fc36b8d3SLee Schermerhorn 		if (!(policy->flags & MPOL_F_LOCAL))
16691da177e4SLinus Torvalds 			nd = policy->v.preferred_node;
16701da177e4SLinus Torvalds 		break;
16711da177e4SLinus Torvalds 	case MPOL_BIND:
167219770b32SMel Gorman 		/*
167352cd3b07SLee Schermerhorn 		 * Normally, MPOL_BIND allocations are node-local within the
167452cd3b07SLee Schermerhorn 		 * allowed nodemask.  However, if __GFP_THISNODE is set and the
16756eb27e1fSBob Liu 		 * current node isn't part of the mask, we use the zonelist for
167652cd3b07SLee Schermerhorn 		 * the first node in the mask instead.
167719770b32SMel Gorman 		 */
167819770b32SMel Gorman 		if (unlikely(gfp & __GFP_THISNODE) &&
167919770b32SMel Gorman 				unlikely(!node_isset(nd, policy->v.nodes)))
168019770b32SMel Gorman 			nd = first_node(policy->v.nodes);
168119770b32SMel Gorman 		break;
16821da177e4SLinus Torvalds 	default:
16831da177e4SLinus Torvalds 		BUG();
16841da177e4SLinus Torvalds 	}
16850e88460dSMel Gorman 	return node_zonelist(nd, gfp);
16861da177e4SLinus Torvalds }
16871da177e4SLinus Torvalds 
16881da177e4SLinus Torvalds /* Do dynamic interleaving for a process */
16891da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy)
16901da177e4SLinus Torvalds {
16911da177e4SLinus Torvalds 	unsigned nid, next;
16921da177e4SLinus Torvalds 	struct task_struct *me = current;
16931da177e4SLinus Torvalds 
16941da177e4SLinus Torvalds 	nid = me->il_next;
1695dfcd3c0dSAndi Kleen 	next = next_node(nid, policy->v.nodes);
16961da177e4SLinus Torvalds 	if (next >= MAX_NUMNODES)
1697dfcd3c0dSAndi Kleen 		next = first_node(policy->v.nodes);
1698f5b087b5SDavid Rientjes 	if (next < MAX_NUMNODES)
16991da177e4SLinus Torvalds 		me->il_next = next;
17001da177e4SLinus Torvalds 	return nid;
17011da177e4SLinus Torvalds }
17021da177e4SLinus Torvalds 
1703dc85da15SChristoph Lameter /*
1704dc85da15SChristoph Lameter  * Depending on the memory policy provide a node from which to allocate the
1705dc85da15SChristoph Lameter  * next slab entry.
170652cd3b07SLee Schermerhorn  * @policy must be protected by freeing by the caller.  If @policy is
170752cd3b07SLee Schermerhorn  * the current task's mempolicy, this protection is implicit, as only the
170852cd3b07SLee Schermerhorn  * task can change it's policy.  The system default policy requires no
170952cd3b07SLee Schermerhorn  * such protection.
1710dc85da15SChristoph Lameter  */
1711e7b691b0SAndi Kleen unsigned slab_node(void)
1712dc85da15SChristoph Lameter {
1713e7b691b0SAndi Kleen 	struct mempolicy *policy;
1714e7b691b0SAndi Kleen 
1715e7b691b0SAndi Kleen 	if (in_interrupt())
1716e7b691b0SAndi Kleen 		return numa_node_id();
1717e7b691b0SAndi Kleen 
1718e7b691b0SAndi Kleen 	policy = current->mempolicy;
1719fc36b8d3SLee Schermerhorn 	if (!policy || policy->flags & MPOL_F_LOCAL)
1720bea904d5SLee Schermerhorn 		return numa_node_id();
1721765c4507SChristoph Lameter 
1722bea904d5SLee Schermerhorn 	switch (policy->mode) {
1723bea904d5SLee Schermerhorn 	case MPOL_PREFERRED:
1724fc36b8d3SLee Schermerhorn 		/*
1725fc36b8d3SLee Schermerhorn 		 * handled MPOL_F_LOCAL above
1726fc36b8d3SLee Schermerhorn 		 */
1727bea904d5SLee Schermerhorn 		return policy->v.preferred_node;
1728bea904d5SLee Schermerhorn 
1729dc85da15SChristoph Lameter 	case MPOL_INTERLEAVE:
1730dc85da15SChristoph Lameter 		return interleave_nodes(policy);
1731dc85da15SChristoph Lameter 
1732dd1a239fSMel Gorman 	case MPOL_BIND: {
1733dc85da15SChristoph Lameter 		/*
1734dc85da15SChristoph Lameter 		 * Follow bind policy behavior and start allocation at the
1735dc85da15SChristoph Lameter 		 * first node.
1736dc85da15SChristoph Lameter 		 */
173719770b32SMel Gorman 		struct zonelist *zonelist;
173819770b32SMel Gorman 		struct zone *zone;
173919770b32SMel Gorman 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
174019770b32SMel Gorman 		zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
174119770b32SMel Gorman 		(void)first_zones_zonelist(zonelist, highest_zoneidx,
174219770b32SMel Gorman 							&policy->v.nodes,
174319770b32SMel Gorman 							&zone);
1744800416f7SEric Dumazet 		return zone ? zone->node : numa_node_id();
1745dd1a239fSMel Gorman 	}
1746dc85da15SChristoph Lameter 
1747dc85da15SChristoph Lameter 	default:
1748bea904d5SLee Schermerhorn 		BUG();
1749dc85da15SChristoph Lameter 	}
1750dc85da15SChristoph Lameter }
1751dc85da15SChristoph Lameter 
17521da177e4SLinus Torvalds /* Do static interleaving for a VMA with known offset. */
17531da177e4SLinus Torvalds static unsigned offset_il_node(struct mempolicy *pol,
17541da177e4SLinus Torvalds 		struct vm_area_struct *vma, unsigned long off)
17551da177e4SLinus Torvalds {
1756dfcd3c0dSAndi Kleen 	unsigned nnodes = nodes_weight(pol->v.nodes);
1757f5b087b5SDavid Rientjes 	unsigned target;
17581da177e4SLinus Torvalds 	int c;
17591da177e4SLinus Torvalds 	int nid = -1;
17601da177e4SLinus Torvalds 
1761f5b087b5SDavid Rientjes 	if (!nnodes)
1762f5b087b5SDavid Rientjes 		return numa_node_id();
1763f5b087b5SDavid Rientjes 	target = (unsigned int)off % nnodes;
17641da177e4SLinus Torvalds 	c = 0;
17651da177e4SLinus Torvalds 	do {
1766dfcd3c0dSAndi Kleen 		nid = next_node(nid, pol->v.nodes);
17671da177e4SLinus Torvalds 		c++;
17681da177e4SLinus Torvalds 	} while (c <= target);
17691da177e4SLinus Torvalds 	return nid;
17701da177e4SLinus Torvalds }
17711da177e4SLinus Torvalds 
17725da7ca86SChristoph Lameter /* Determine a node number for interleave */
17735da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol,
17745da7ca86SChristoph Lameter 		 struct vm_area_struct *vma, unsigned long addr, int shift)
17755da7ca86SChristoph Lameter {
17765da7ca86SChristoph Lameter 	if (vma) {
17775da7ca86SChristoph Lameter 		unsigned long off;
17785da7ca86SChristoph Lameter 
17793b98b087SNishanth Aravamudan 		/*
17803b98b087SNishanth Aravamudan 		 * for small pages, there is no difference between
17813b98b087SNishanth Aravamudan 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
17823b98b087SNishanth Aravamudan 		 * for huge pages, since vm_pgoff is in units of small
17833b98b087SNishanth Aravamudan 		 * pages, we need to shift off the always 0 bits to get
17843b98b087SNishanth Aravamudan 		 * a useful offset.
17853b98b087SNishanth Aravamudan 		 */
17863b98b087SNishanth Aravamudan 		BUG_ON(shift < PAGE_SHIFT);
17873b98b087SNishanth Aravamudan 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
17885da7ca86SChristoph Lameter 		off += (addr - vma->vm_start) >> shift;
17895da7ca86SChristoph Lameter 		return offset_il_node(pol, vma, off);
17905da7ca86SChristoph Lameter 	} else
17915da7ca86SChristoph Lameter 		return interleave_nodes(pol);
17925da7ca86SChristoph Lameter }
17935da7ca86SChristoph Lameter 
1794778d3b0fSMichal Hocko /*
1795778d3b0fSMichal Hocko  * Return the bit number of a random bit set in the nodemask.
1796778d3b0fSMichal Hocko  * (returns -1 if nodemask is empty)
1797778d3b0fSMichal Hocko  */
1798778d3b0fSMichal Hocko int node_random(const nodemask_t *maskp)
1799778d3b0fSMichal Hocko {
1800778d3b0fSMichal Hocko 	int w, bit = -1;
1801778d3b0fSMichal Hocko 
1802778d3b0fSMichal Hocko 	w = nodes_weight(*maskp);
1803778d3b0fSMichal Hocko 	if (w)
1804778d3b0fSMichal Hocko 		bit = bitmap_ord_to_pos(maskp->bits,
1805778d3b0fSMichal Hocko 			get_random_int() % w, MAX_NUMNODES);
1806778d3b0fSMichal Hocko 	return bit;
1807778d3b0fSMichal Hocko }
1808778d3b0fSMichal Hocko 
180900ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS
1810480eccf9SLee Schermerhorn /*
1811480eccf9SLee Schermerhorn  * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1812480eccf9SLee Schermerhorn  * @vma = virtual memory area whose policy is sought
1813480eccf9SLee Schermerhorn  * @addr = address in @vma for shared policy lookup and interleave policy
1814480eccf9SLee Schermerhorn  * @gfp_flags = for requested zone
181519770b32SMel Gorman  * @mpol = pointer to mempolicy pointer for reference counted mempolicy
181619770b32SMel Gorman  * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
1817480eccf9SLee Schermerhorn  *
181852cd3b07SLee Schermerhorn  * Returns a zonelist suitable for a huge page allocation and a pointer
181952cd3b07SLee Schermerhorn  * to the struct mempolicy for conditional unref after allocation.
182052cd3b07SLee Schermerhorn  * If the effective policy is 'BIND, returns a pointer to the mempolicy's
182152cd3b07SLee Schermerhorn  * @nodemask for filtering the zonelist.
1822c0ff7453SMiao Xie  *
1823c0ff7453SMiao Xie  * Must be protected by get_mems_allowed()
1824480eccf9SLee Schermerhorn  */
1825396faf03SMel Gorman struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
182619770b32SMel Gorman 				gfp_t gfp_flags, struct mempolicy **mpol,
182719770b32SMel Gorman 				nodemask_t **nodemask)
18285da7ca86SChristoph Lameter {
1829480eccf9SLee Schermerhorn 	struct zonelist *zl;
18305da7ca86SChristoph Lameter 
183152cd3b07SLee Schermerhorn 	*mpol = get_vma_policy(current, vma, addr);
183219770b32SMel Gorman 	*nodemask = NULL;	/* assume !MPOL_BIND */
18335da7ca86SChristoph Lameter 
183452cd3b07SLee Schermerhorn 	if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
183552cd3b07SLee Schermerhorn 		zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1836a5516438SAndi Kleen 				huge_page_shift(hstate_vma(vma))), gfp_flags);
183752cd3b07SLee Schermerhorn 	} else {
18382f5f9486SAndi Kleen 		zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
183952cd3b07SLee Schermerhorn 		if ((*mpol)->mode == MPOL_BIND)
184052cd3b07SLee Schermerhorn 			*nodemask = &(*mpol)->v.nodes;
1841480eccf9SLee Schermerhorn 	}
1842480eccf9SLee Schermerhorn 	return zl;
18435da7ca86SChristoph Lameter }
184406808b08SLee Schermerhorn 
184506808b08SLee Schermerhorn /*
184606808b08SLee Schermerhorn  * init_nodemask_of_mempolicy
184706808b08SLee Schermerhorn  *
184806808b08SLee Schermerhorn  * If the current task's mempolicy is "default" [NULL], return 'false'
184906808b08SLee Schermerhorn  * to indicate default policy.  Otherwise, extract the policy nodemask
185006808b08SLee Schermerhorn  * for 'bind' or 'interleave' policy into the argument nodemask, or
185106808b08SLee Schermerhorn  * initialize the argument nodemask to contain the single node for
185206808b08SLee Schermerhorn  * 'preferred' or 'local' policy and return 'true' to indicate presence
185306808b08SLee Schermerhorn  * of non-default mempolicy.
185406808b08SLee Schermerhorn  *
185506808b08SLee Schermerhorn  * We don't bother with reference counting the mempolicy [mpol_get/put]
185606808b08SLee Schermerhorn  * because the current task is examining it's own mempolicy and a task's
185706808b08SLee Schermerhorn  * mempolicy is only ever changed by the task itself.
185806808b08SLee Schermerhorn  *
185906808b08SLee Schermerhorn  * N.B., it is the caller's responsibility to free a returned nodemask.
186006808b08SLee Schermerhorn  */
186106808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask)
186206808b08SLee Schermerhorn {
186306808b08SLee Schermerhorn 	struct mempolicy *mempolicy;
186406808b08SLee Schermerhorn 	int nid;
186506808b08SLee Schermerhorn 
186606808b08SLee Schermerhorn 	if (!(mask && current->mempolicy))
186706808b08SLee Schermerhorn 		return false;
186806808b08SLee Schermerhorn 
1869c0ff7453SMiao Xie 	task_lock(current);
187006808b08SLee Schermerhorn 	mempolicy = current->mempolicy;
187106808b08SLee Schermerhorn 	switch (mempolicy->mode) {
187206808b08SLee Schermerhorn 	case MPOL_PREFERRED:
187306808b08SLee Schermerhorn 		if (mempolicy->flags & MPOL_F_LOCAL)
187406808b08SLee Schermerhorn 			nid = numa_node_id();
187506808b08SLee Schermerhorn 		else
187606808b08SLee Schermerhorn 			nid = mempolicy->v.preferred_node;
187706808b08SLee Schermerhorn 		init_nodemask_of_node(mask, nid);
187806808b08SLee Schermerhorn 		break;
187906808b08SLee Schermerhorn 
188006808b08SLee Schermerhorn 	case MPOL_BIND:
188106808b08SLee Schermerhorn 		/* Fall through */
188206808b08SLee Schermerhorn 	case MPOL_INTERLEAVE:
188306808b08SLee Schermerhorn 		*mask =  mempolicy->v.nodes;
188406808b08SLee Schermerhorn 		break;
188506808b08SLee Schermerhorn 
188606808b08SLee Schermerhorn 	default:
188706808b08SLee Schermerhorn 		BUG();
188806808b08SLee Schermerhorn 	}
1889c0ff7453SMiao Xie 	task_unlock(current);
189006808b08SLee Schermerhorn 
189106808b08SLee Schermerhorn 	return true;
189206808b08SLee Schermerhorn }
189300ac59adSChen, Kenneth W #endif
18945da7ca86SChristoph Lameter 
18956f48d0ebSDavid Rientjes /*
18966f48d0ebSDavid Rientjes  * mempolicy_nodemask_intersects
18976f48d0ebSDavid Rientjes  *
18986f48d0ebSDavid Rientjes  * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
18996f48d0ebSDavid Rientjes  * policy.  Otherwise, check for intersection between mask and the policy
19006f48d0ebSDavid Rientjes  * nodemask for 'bind' or 'interleave' policy.  For 'perferred' or 'local'
19016f48d0ebSDavid Rientjes  * policy, always return true since it may allocate elsewhere on fallback.
19026f48d0ebSDavid Rientjes  *
19036f48d0ebSDavid Rientjes  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
19046f48d0ebSDavid Rientjes  */
19056f48d0ebSDavid Rientjes bool mempolicy_nodemask_intersects(struct task_struct *tsk,
19066f48d0ebSDavid Rientjes 					const nodemask_t *mask)
19076f48d0ebSDavid Rientjes {
19086f48d0ebSDavid Rientjes 	struct mempolicy *mempolicy;
19096f48d0ebSDavid Rientjes 	bool ret = true;
19106f48d0ebSDavid Rientjes 
19116f48d0ebSDavid Rientjes 	if (!mask)
19126f48d0ebSDavid Rientjes 		return ret;
19136f48d0ebSDavid Rientjes 	task_lock(tsk);
19146f48d0ebSDavid Rientjes 	mempolicy = tsk->mempolicy;
19156f48d0ebSDavid Rientjes 	if (!mempolicy)
19166f48d0ebSDavid Rientjes 		goto out;
19176f48d0ebSDavid Rientjes 
19186f48d0ebSDavid Rientjes 	switch (mempolicy->mode) {
19196f48d0ebSDavid Rientjes 	case MPOL_PREFERRED:
19206f48d0ebSDavid Rientjes 		/*
19216f48d0ebSDavid Rientjes 		 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
19226f48d0ebSDavid Rientjes 		 * allocate from, they may fallback to other nodes when oom.
19236f48d0ebSDavid Rientjes 		 * Thus, it's possible for tsk to have allocated memory from
19246f48d0ebSDavid Rientjes 		 * nodes in mask.
19256f48d0ebSDavid Rientjes 		 */
19266f48d0ebSDavid Rientjes 		break;
19276f48d0ebSDavid Rientjes 	case MPOL_BIND:
19286f48d0ebSDavid Rientjes 	case MPOL_INTERLEAVE:
19296f48d0ebSDavid Rientjes 		ret = nodes_intersects(mempolicy->v.nodes, *mask);
19306f48d0ebSDavid Rientjes 		break;
19316f48d0ebSDavid Rientjes 	default:
19326f48d0ebSDavid Rientjes 		BUG();
19336f48d0ebSDavid Rientjes 	}
19346f48d0ebSDavid Rientjes out:
19356f48d0ebSDavid Rientjes 	task_unlock(tsk);
19366f48d0ebSDavid Rientjes 	return ret;
19376f48d0ebSDavid Rientjes }
19386f48d0ebSDavid Rientjes 
19391da177e4SLinus Torvalds /* Allocate a page in interleaved policy.
19401da177e4SLinus Torvalds    Own path because it needs to do special accounting. */
1941662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1942662f3a0bSAndi Kleen 					unsigned nid)
19431da177e4SLinus Torvalds {
19441da177e4SLinus Torvalds 	struct zonelist *zl;
19451da177e4SLinus Torvalds 	struct page *page;
19461da177e4SLinus Torvalds 
19470e88460dSMel Gorman 	zl = node_zonelist(nid, gfp);
19481da177e4SLinus Torvalds 	page = __alloc_pages(gfp, order, zl);
1949dd1a239fSMel Gorman 	if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1950ca889e6cSChristoph Lameter 		inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
19511da177e4SLinus Torvalds 	return page;
19521da177e4SLinus Torvalds }
19531da177e4SLinus Torvalds 
19541da177e4SLinus Torvalds /**
19550bbbc0b3SAndrea Arcangeli  * 	alloc_pages_vma	- Allocate a page for a VMA.
19561da177e4SLinus Torvalds  *
19571da177e4SLinus Torvalds  * 	@gfp:
19581da177e4SLinus Torvalds  *      %GFP_USER    user allocation.
19591da177e4SLinus Torvalds  *      %GFP_KERNEL  kernel allocations,
19601da177e4SLinus Torvalds  *      %GFP_HIGHMEM highmem/user allocations,
19611da177e4SLinus Torvalds  *      %GFP_FS      allocation should not call back into a file system.
19621da177e4SLinus Torvalds  *      %GFP_ATOMIC  don't sleep.
19631da177e4SLinus Torvalds  *
19640bbbc0b3SAndrea Arcangeli  *	@order:Order of the GFP allocation.
19651da177e4SLinus Torvalds  * 	@vma:  Pointer to VMA or NULL if not available.
19661da177e4SLinus Torvalds  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
19671da177e4SLinus Torvalds  *
19681da177e4SLinus Torvalds  * 	This function allocates a page from the kernel page pool and applies
19691da177e4SLinus Torvalds  *	a NUMA policy associated with the VMA or the current process.
19701da177e4SLinus Torvalds  *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
19711da177e4SLinus Torvalds  *	mm_struct of the VMA to prevent it from going away. Should be used for
19721da177e4SLinus Torvalds  *	all allocations for pages that will be mapped into
19731da177e4SLinus Torvalds  * 	user space. Returns NULL when no page can be allocated.
19741da177e4SLinus Torvalds  *
19751da177e4SLinus Torvalds  *	Should be called with the mm_sem of the vma hold.
19761da177e4SLinus Torvalds  */
19771da177e4SLinus Torvalds struct page *
19780bbbc0b3SAndrea Arcangeli alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
19792f5f9486SAndi Kleen 		unsigned long addr, int node)
19801da177e4SLinus Torvalds {
1981cc9a6c87SMel Gorman 	struct mempolicy *pol;
1982480eccf9SLee Schermerhorn 	struct zonelist *zl;
1983c0ff7453SMiao Xie 	struct page *page;
1984cc9a6c87SMel Gorman 	unsigned int cpuset_mems_cookie;
19851da177e4SLinus Torvalds 
1986cc9a6c87SMel Gorman retry_cpuset:
1987cc9a6c87SMel Gorman 	pol = get_vma_policy(current, vma, addr);
1988cc9a6c87SMel Gorman 	cpuset_mems_cookie = get_mems_allowed();
1989cc9a6c87SMel Gorman 
199045c4745aSLee Schermerhorn 	if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
19911da177e4SLinus Torvalds 		unsigned nid;
19925da7ca86SChristoph Lameter 
19938eac563cSAndi Kleen 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
199452cd3b07SLee Schermerhorn 		mpol_cond_put(pol);
19950bbbc0b3SAndrea Arcangeli 		page = alloc_page_interleave(gfp, order, nid);
1996cc9a6c87SMel Gorman 		if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
1997cc9a6c87SMel Gorman 			goto retry_cpuset;
1998cc9a6c87SMel Gorman 
1999c0ff7453SMiao Xie 		return page;
20001da177e4SLinus Torvalds 	}
20012f5f9486SAndi Kleen 	zl = policy_zonelist(gfp, pol, node);
200252cd3b07SLee Schermerhorn 	if (unlikely(mpol_needs_cond_ref(pol))) {
2003480eccf9SLee Schermerhorn 		/*
200452cd3b07SLee Schermerhorn 		 * slow path: ref counted shared policy
2005480eccf9SLee Schermerhorn 		 */
20060bbbc0b3SAndrea Arcangeli 		struct page *page =  __alloc_pages_nodemask(gfp, order,
200752cd3b07SLee Schermerhorn 						zl, policy_nodemask(gfp, pol));
2008f0be3d32SLee Schermerhorn 		__mpol_put(pol);
2009cc9a6c87SMel Gorman 		if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2010cc9a6c87SMel Gorman 			goto retry_cpuset;
2011480eccf9SLee Schermerhorn 		return page;
2012480eccf9SLee Schermerhorn 	}
2013480eccf9SLee Schermerhorn 	/*
2014480eccf9SLee Schermerhorn 	 * fast path:  default or task policy
2015480eccf9SLee Schermerhorn 	 */
20160bbbc0b3SAndrea Arcangeli 	page = __alloc_pages_nodemask(gfp, order, zl,
20170bbbc0b3SAndrea Arcangeli 				      policy_nodemask(gfp, pol));
2018cc9a6c87SMel Gorman 	if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2019cc9a6c87SMel Gorman 		goto retry_cpuset;
2020c0ff7453SMiao Xie 	return page;
20211da177e4SLinus Torvalds }
20221da177e4SLinus Torvalds 
20231da177e4SLinus Torvalds /**
20241da177e4SLinus Torvalds  * 	alloc_pages_current - Allocate pages.
20251da177e4SLinus Torvalds  *
20261da177e4SLinus Torvalds  *	@gfp:
20271da177e4SLinus Torvalds  *		%GFP_USER   user allocation,
20281da177e4SLinus Torvalds  *      	%GFP_KERNEL kernel allocation,
20291da177e4SLinus Torvalds  *      	%GFP_HIGHMEM highmem allocation,
20301da177e4SLinus Torvalds  *      	%GFP_FS     don't call back into a file system.
20311da177e4SLinus Torvalds  *      	%GFP_ATOMIC don't sleep.
20321da177e4SLinus Torvalds  *	@order: Power of two of allocation size in pages. 0 is a single page.
20331da177e4SLinus Torvalds  *
20341da177e4SLinus Torvalds  *	Allocate a page from the kernel page pool.  When not in
20351da177e4SLinus Torvalds  *	interrupt context and apply the current process NUMA policy.
20361da177e4SLinus Torvalds  *	Returns NULL when no page can be allocated.
20371da177e4SLinus Torvalds  *
2038cf2a473cSPaul Jackson  *	Don't call cpuset_update_task_memory_state() unless
20391da177e4SLinus Torvalds  *	1) it's ok to take cpuset_sem (can WAIT), and
20401da177e4SLinus Torvalds  *	2) allocating for current task (not interrupt).
20411da177e4SLinus Torvalds  */
2042dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order)
20431da177e4SLinus Torvalds {
2044*5606e387SMel Gorman 	struct mempolicy *pol = get_task_policy(current);
2045c0ff7453SMiao Xie 	struct page *page;
2046cc9a6c87SMel Gorman 	unsigned int cpuset_mems_cookie;
20471da177e4SLinus Torvalds 
20489b819d20SChristoph Lameter 	if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
20491da177e4SLinus Torvalds 		pol = &default_policy;
205052cd3b07SLee Schermerhorn 
2051cc9a6c87SMel Gorman retry_cpuset:
2052cc9a6c87SMel Gorman 	cpuset_mems_cookie = get_mems_allowed();
2053cc9a6c87SMel Gorman 
205452cd3b07SLee Schermerhorn 	/*
205552cd3b07SLee Schermerhorn 	 * No reference counting needed for current->mempolicy
205652cd3b07SLee Schermerhorn 	 * nor system default_policy
205752cd3b07SLee Schermerhorn 	 */
205845c4745aSLee Schermerhorn 	if (pol->mode == MPOL_INTERLEAVE)
2059c0ff7453SMiao Xie 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2060c0ff7453SMiao Xie 	else
2061c0ff7453SMiao Xie 		page = __alloc_pages_nodemask(gfp, order,
20625c4b4be3SAndi Kleen 				policy_zonelist(gfp, pol, numa_node_id()),
20635c4b4be3SAndi Kleen 				policy_nodemask(gfp, pol));
2064cc9a6c87SMel Gorman 
2065cc9a6c87SMel Gorman 	if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2066cc9a6c87SMel Gorman 		goto retry_cpuset;
2067cc9a6c87SMel Gorman 
2068c0ff7453SMiao Xie 	return page;
20691da177e4SLinus Torvalds }
20701da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current);
20711da177e4SLinus Torvalds 
20724225399aSPaul Jackson /*
2073846a16bfSLee Schermerhorn  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
20744225399aSPaul Jackson  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
20754225399aSPaul Jackson  * with the mems_allowed returned by cpuset_mems_allowed().  This
20764225399aSPaul Jackson  * keeps mempolicies cpuset relative after its cpuset moves.  See
20774225399aSPaul Jackson  * further kernel/cpuset.c update_nodemask().
2078708c1bbcSMiao Xie  *
2079708c1bbcSMiao Xie  * current's mempolicy may be rebinded by the other task(the task that changes
2080708c1bbcSMiao Xie  * cpuset's mems), so we needn't do rebind work for current task.
20814225399aSPaul Jackson  */
20824225399aSPaul Jackson 
2083846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */
2084846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old)
20851da177e4SLinus Torvalds {
20861da177e4SLinus Torvalds 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
20871da177e4SLinus Torvalds 
20881da177e4SLinus Torvalds 	if (!new)
20891da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2090708c1bbcSMiao Xie 
2091708c1bbcSMiao Xie 	/* task's mempolicy is protected by alloc_lock */
2092708c1bbcSMiao Xie 	if (old == current->mempolicy) {
2093708c1bbcSMiao Xie 		task_lock(current);
2094708c1bbcSMiao Xie 		*new = *old;
2095708c1bbcSMiao Xie 		task_unlock(current);
2096708c1bbcSMiao Xie 	} else
2097708c1bbcSMiao Xie 		*new = *old;
2098708c1bbcSMiao Xie 
209999ee4ca7SPaul E. McKenney 	rcu_read_lock();
21004225399aSPaul Jackson 	if (current_cpuset_is_being_rebound()) {
21014225399aSPaul Jackson 		nodemask_t mems = cpuset_mems_allowed(current);
2102708c1bbcSMiao Xie 		if (new->flags & MPOL_F_REBINDING)
2103708c1bbcSMiao Xie 			mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
2104708c1bbcSMiao Xie 		else
2105708c1bbcSMiao Xie 			mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
21064225399aSPaul Jackson 	}
210799ee4ca7SPaul E. McKenney 	rcu_read_unlock();
21081da177e4SLinus Torvalds 	atomic_set(&new->refcnt, 1);
21091da177e4SLinus Torvalds 	return new;
21101da177e4SLinus Torvalds }
21111da177e4SLinus Torvalds 
211252cd3b07SLee Schermerhorn /*
211352cd3b07SLee Schermerhorn  * If *frompol needs [has] an extra ref, copy *frompol to *tompol ,
211452cd3b07SLee Schermerhorn  * eliminate the * MPOL_F_* flags that require conditional ref and
211552cd3b07SLee Schermerhorn  * [NOTE!!!] drop the extra ref.  Not safe to reference *frompol directly
211652cd3b07SLee Schermerhorn  * after return.  Use the returned value.
211752cd3b07SLee Schermerhorn  *
211852cd3b07SLee Schermerhorn  * Allows use of a mempolicy for, e.g., multiple allocations with a single
211952cd3b07SLee Schermerhorn  * policy lookup, even if the policy needs/has extra ref on lookup.
212052cd3b07SLee Schermerhorn  * shmem_readahead needs this.
212152cd3b07SLee Schermerhorn  */
212252cd3b07SLee Schermerhorn struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
212352cd3b07SLee Schermerhorn 						struct mempolicy *frompol)
212452cd3b07SLee Schermerhorn {
212552cd3b07SLee Schermerhorn 	if (!mpol_needs_cond_ref(frompol))
212652cd3b07SLee Schermerhorn 		return frompol;
212752cd3b07SLee Schermerhorn 
212852cd3b07SLee Schermerhorn 	*tompol = *frompol;
212952cd3b07SLee Schermerhorn 	tompol->flags &= ~MPOL_F_SHARED;	/* copy doesn't need unref */
213052cd3b07SLee Schermerhorn 	__mpol_put(frompol);
213152cd3b07SLee Schermerhorn 	return tompol;
213252cd3b07SLee Schermerhorn }
213352cd3b07SLee Schermerhorn 
21341da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */
2135fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
21361da177e4SLinus Torvalds {
21371da177e4SLinus Torvalds 	if (!a || !b)
2138fcfb4dccSKOSAKI Motohiro 		return false;
213945c4745aSLee Schermerhorn 	if (a->mode != b->mode)
2140fcfb4dccSKOSAKI Motohiro 		return false;
214119800502SBob Liu 	if (a->flags != b->flags)
2142fcfb4dccSKOSAKI Motohiro 		return false;
214319800502SBob Liu 	if (mpol_store_user_nodemask(a))
214419800502SBob Liu 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2145fcfb4dccSKOSAKI Motohiro 			return false;
214619800502SBob Liu 
214745c4745aSLee Schermerhorn 	switch (a->mode) {
214819770b32SMel Gorman 	case MPOL_BIND:
214919770b32SMel Gorman 		/* Fall through */
21501da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
2151fcfb4dccSKOSAKI Motohiro 		return !!nodes_equal(a->v.nodes, b->v.nodes);
21521da177e4SLinus Torvalds 	case MPOL_PREFERRED:
215375719661SNamhyung Kim 		return a->v.preferred_node == b->v.preferred_node;
21541da177e4SLinus Torvalds 	default:
21551da177e4SLinus Torvalds 		BUG();
2156fcfb4dccSKOSAKI Motohiro 		return false;
21571da177e4SLinus Torvalds 	}
21581da177e4SLinus Torvalds }
21591da177e4SLinus Torvalds 
21601da177e4SLinus Torvalds /*
21611da177e4SLinus Torvalds  * Shared memory backing store policy support.
21621da177e4SLinus Torvalds  *
21631da177e4SLinus Torvalds  * Remember policies even when nobody has shared memory mapped.
21641da177e4SLinus Torvalds  * The policies are kept in Red-Black tree linked from the inode.
21651da177e4SLinus Torvalds  * They are protected by the sp->lock spinlock, which should be held
21661da177e4SLinus Torvalds  * for any accesses to the tree.
21671da177e4SLinus Torvalds  */
21681da177e4SLinus Torvalds 
21691da177e4SLinus Torvalds /* lookup first element intersecting start-end */
2170b22d127aSMel Gorman /* Caller holds sp->mutex */
21711da177e4SLinus Torvalds static struct sp_node *
21721da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
21731da177e4SLinus Torvalds {
21741da177e4SLinus Torvalds 	struct rb_node *n = sp->root.rb_node;
21751da177e4SLinus Torvalds 
21761da177e4SLinus Torvalds 	while (n) {
21771da177e4SLinus Torvalds 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
21781da177e4SLinus Torvalds 
21791da177e4SLinus Torvalds 		if (start >= p->end)
21801da177e4SLinus Torvalds 			n = n->rb_right;
21811da177e4SLinus Torvalds 		else if (end <= p->start)
21821da177e4SLinus Torvalds 			n = n->rb_left;
21831da177e4SLinus Torvalds 		else
21841da177e4SLinus Torvalds 			break;
21851da177e4SLinus Torvalds 	}
21861da177e4SLinus Torvalds 	if (!n)
21871da177e4SLinus Torvalds 		return NULL;
21881da177e4SLinus Torvalds 	for (;;) {
21891da177e4SLinus Torvalds 		struct sp_node *w = NULL;
21901da177e4SLinus Torvalds 		struct rb_node *prev = rb_prev(n);
21911da177e4SLinus Torvalds 		if (!prev)
21921da177e4SLinus Torvalds 			break;
21931da177e4SLinus Torvalds 		w = rb_entry(prev, struct sp_node, nd);
21941da177e4SLinus Torvalds 		if (w->end <= start)
21951da177e4SLinus Torvalds 			break;
21961da177e4SLinus Torvalds 		n = prev;
21971da177e4SLinus Torvalds 	}
21981da177e4SLinus Torvalds 	return rb_entry(n, struct sp_node, nd);
21991da177e4SLinus Torvalds }
22001da177e4SLinus Torvalds 
22011da177e4SLinus Torvalds /* Insert a new shared policy into the list. */
22021da177e4SLinus Torvalds /* Caller holds sp->lock */
22031da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new)
22041da177e4SLinus Torvalds {
22051da177e4SLinus Torvalds 	struct rb_node **p = &sp->root.rb_node;
22061da177e4SLinus Torvalds 	struct rb_node *parent = NULL;
22071da177e4SLinus Torvalds 	struct sp_node *nd;
22081da177e4SLinus Torvalds 
22091da177e4SLinus Torvalds 	while (*p) {
22101da177e4SLinus Torvalds 		parent = *p;
22111da177e4SLinus Torvalds 		nd = rb_entry(parent, struct sp_node, nd);
22121da177e4SLinus Torvalds 		if (new->start < nd->start)
22131da177e4SLinus Torvalds 			p = &(*p)->rb_left;
22141da177e4SLinus Torvalds 		else if (new->end > nd->end)
22151da177e4SLinus Torvalds 			p = &(*p)->rb_right;
22161da177e4SLinus Torvalds 		else
22171da177e4SLinus Torvalds 			BUG();
22181da177e4SLinus Torvalds 	}
22191da177e4SLinus Torvalds 	rb_link_node(&new->nd, parent, p);
22201da177e4SLinus Torvalds 	rb_insert_color(&new->nd, &sp->root);
2221140d5a49SPaul Mundt 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
222245c4745aSLee Schermerhorn 		 new->policy ? new->policy->mode : 0);
22231da177e4SLinus Torvalds }
22241da177e4SLinus Torvalds 
22251da177e4SLinus Torvalds /* Find shared policy intersecting idx */
22261da177e4SLinus Torvalds struct mempolicy *
22271da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
22281da177e4SLinus Torvalds {
22291da177e4SLinus Torvalds 	struct mempolicy *pol = NULL;
22301da177e4SLinus Torvalds 	struct sp_node *sn;
22311da177e4SLinus Torvalds 
22321da177e4SLinus Torvalds 	if (!sp->root.rb_node)
22331da177e4SLinus Torvalds 		return NULL;
2234b22d127aSMel Gorman 	mutex_lock(&sp->mutex);
22351da177e4SLinus Torvalds 	sn = sp_lookup(sp, idx, idx+1);
22361da177e4SLinus Torvalds 	if (sn) {
22371da177e4SLinus Torvalds 		mpol_get(sn->policy);
22381da177e4SLinus Torvalds 		pol = sn->policy;
22391da177e4SLinus Torvalds 	}
2240b22d127aSMel Gorman 	mutex_unlock(&sp->mutex);
22411da177e4SLinus Torvalds 	return pol;
22421da177e4SLinus Torvalds }
22431da177e4SLinus Torvalds 
224463f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n)
224563f74ca2SKOSAKI Motohiro {
224663f74ca2SKOSAKI Motohiro 	mpol_put(n->policy);
224763f74ca2SKOSAKI Motohiro 	kmem_cache_free(sn_cache, n);
224863f74ca2SKOSAKI Motohiro }
224963f74ca2SKOSAKI Motohiro 
2250771fb4d8SLee Schermerhorn /**
2251771fb4d8SLee Schermerhorn  * mpol_misplaced - check whether current page node is valid in policy
2252771fb4d8SLee Schermerhorn  *
2253771fb4d8SLee Schermerhorn  * @page   - page to be checked
2254771fb4d8SLee Schermerhorn  * @vma    - vm area where page mapped
2255771fb4d8SLee Schermerhorn  * @addr   - virtual address where page mapped
2256771fb4d8SLee Schermerhorn  *
2257771fb4d8SLee Schermerhorn  * Lookup current policy node id for vma,addr and "compare to" page's
2258771fb4d8SLee Schermerhorn  * node id.
2259771fb4d8SLee Schermerhorn  *
2260771fb4d8SLee Schermerhorn  * Returns:
2261771fb4d8SLee Schermerhorn  *	-1	- not misplaced, page is in the right node
2262771fb4d8SLee Schermerhorn  *	node	- node id where the page should be
2263771fb4d8SLee Schermerhorn  *
2264771fb4d8SLee Schermerhorn  * Policy determination "mimics" alloc_page_vma().
2265771fb4d8SLee Schermerhorn  * Called from fault path where we know the vma and faulting address.
2266771fb4d8SLee Schermerhorn  */
2267771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2268771fb4d8SLee Schermerhorn {
2269771fb4d8SLee Schermerhorn 	struct mempolicy *pol;
2270771fb4d8SLee Schermerhorn 	struct zone *zone;
2271771fb4d8SLee Schermerhorn 	int curnid = page_to_nid(page);
2272771fb4d8SLee Schermerhorn 	unsigned long pgoff;
2273771fb4d8SLee Schermerhorn 	int polnid = -1;
2274771fb4d8SLee Schermerhorn 	int ret = -1;
2275771fb4d8SLee Schermerhorn 
2276771fb4d8SLee Schermerhorn 	BUG_ON(!vma);
2277771fb4d8SLee Schermerhorn 
2278771fb4d8SLee Schermerhorn 	pol = get_vma_policy(current, vma, addr);
2279771fb4d8SLee Schermerhorn 	if (!(pol->flags & MPOL_F_MOF))
2280771fb4d8SLee Schermerhorn 		goto out;
2281771fb4d8SLee Schermerhorn 
2282771fb4d8SLee Schermerhorn 	switch (pol->mode) {
2283771fb4d8SLee Schermerhorn 	case MPOL_INTERLEAVE:
2284771fb4d8SLee Schermerhorn 		BUG_ON(addr >= vma->vm_end);
2285771fb4d8SLee Schermerhorn 		BUG_ON(addr < vma->vm_start);
2286771fb4d8SLee Schermerhorn 
2287771fb4d8SLee Schermerhorn 		pgoff = vma->vm_pgoff;
2288771fb4d8SLee Schermerhorn 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2289771fb4d8SLee Schermerhorn 		polnid = offset_il_node(pol, vma, pgoff);
2290771fb4d8SLee Schermerhorn 		break;
2291771fb4d8SLee Schermerhorn 
2292771fb4d8SLee Schermerhorn 	case MPOL_PREFERRED:
2293771fb4d8SLee Schermerhorn 		if (pol->flags & MPOL_F_LOCAL)
2294771fb4d8SLee Schermerhorn 			polnid = numa_node_id();
2295771fb4d8SLee Schermerhorn 		else
2296771fb4d8SLee Schermerhorn 			polnid = pol->v.preferred_node;
2297771fb4d8SLee Schermerhorn 		break;
2298771fb4d8SLee Schermerhorn 
2299771fb4d8SLee Schermerhorn 	case MPOL_BIND:
2300771fb4d8SLee Schermerhorn 		/*
2301771fb4d8SLee Schermerhorn 		 * allows binding to multiple nodes.
2302771fb4d8SLee Schermerhorn 		 * use current page if in policy nodemask,
2303771fb4d8SLee Schermerhorn 		 * else select nearest allowed node, if any.
2304771fb4d8SLee Schermerhorn 		 * If no allowed nodes, use current [!misplaced].
2305771fb4d8SLee Schermerhorn 		 */
2306771fb4d8SLee Schermerhorn 		if (node_isset(curnid, pol->v.nodes))
2307771fb4d8SLee Schermerhorn 			goto out;
2308771fb4d8SLee Schermerhorn 		(void)first_zones_zonelist(
2309771fb4d8SLee Schermerhorn 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2310771fb4d8SLee Schermerhorn 				gfp_zone(GFP_HIGHUSER),
2311771fb4d8SLee Schermerhorn 				&pol->v.nodes, &zone);
2312771fb4d8SLee Schermerhorn 		polnid = zone->node;
2313771fb4d8SLee Schermerhorn 		break;
2314771fb4d8SLee Schermerhorn 
2315771fb4d8SLee Schermerhorn 	default:
2316771fb4d8SLee Schermerhorn 		BUG();
2317771fb4d8SLee Schermerhorn 	}
2318*5606e387SMel Gorman 
2319*5606e387SMel Gorman 	/* Migrate the page towards the node whose CPU is referencing it */
2320*5606e387SMel Gorman 	if (pol->flags & MPOL_F_MORON)
2321*5606e387SMel Gorman 		polnid = numa_node_id();
2322*5606e387SMel Gorman 
2323771fb4d8SLee Schermerhorn 	if (curnid != polnid)
2324771fb4d8SLee Schermerhorn 		ret = polnid;
2325771fb4d8SLee Schermerhorn out:
2326771fb4d8SLee Schermerhorn 	mpol_cond_put(pol);
2327771fb4d8SLee Schermerhorn 
2328771fb4d8SLee Schermerhorn 	return ret;
2329771fb4d8SLee Schermerhorn }
2330771fb4d8SLee Schermerhorn 
23311da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n)
23321da177e4SLinus Torvalds {
2333140d5a49SPaul Mundt 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
23341da177e4SLinus Torvalds 	rb_erase(&n->nd, &sp->root);
233563f74ca2SKOSAKI Motohiro 	sp_free(n);
23361da177e4SLinus Torvalds }
23371da177e4SLinus Torvalds 
2338dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2339dbcb0f19SAdrian Bunk 				struct mempolicy *pol)
23401da177e4SLinus Torvalds {
2341869833f2SKOSAKI Motohiro 	struct sp_node *n;
2342869833f2SKOSAKI Motohiro 	struct mempolicy *newpol;
23431da177e4SLinus Torvalds 
2344869833f2SKOSAKI Motohiro 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
23451da177e4SLinus Torvalds 	if (!n)
23461da177e4SLinus Torvalds 		return NULL;
2347869833f2SKOSAKI Motohiro 
2348869833f2SKOSAKI Motohiro 	newpol = mpol_dup(pol);
2349869833f2SKOSAKI Motohiro 	if (IS_ERR(newpol)) {
2350869833f2SKOSAKI Motohiro 		kmem_cache_free(sn_cache, n);
2351869833f2SKOSAKI Motohiro 		return NULL;
2352869833f2SKOSAKI Motohiro 	}
2353869833f2SKOSAKI Motohiro 	newpol->flags |= MPOL_F_SHARED;
2354869833f2SKOSAKI Motohiro 
23551da177e4SLinus Torvalds 	n->start = start;
23561da177e4SLinus Torvalds 	n->end = end;
2357869833f2SKOSAKI Motohiro 	n->policy = newpol;
2358869833f2SKOSAKI Motohiro 
23591da177e4SLinus Torvalds 	return n;
23601da177e4SLinus Torvalds }
23611da177e4SLinus Torvalds 
23621da177e4SLinus Torvalds /* Replace a policy range. */
23631da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
23641da177e4SLinus Torvalds 				 unsigned long end, struct sp_node *new)
23651da177e4SLinus Torvalds {
2366b22d127aSMel Gorman 	struct sp_node *n;
2367b22d127aSMel Gorman 	int ret = 0;
23681da177e4SLinus Torvalds 
2369b22d127aSMel Gorman 	mutex_lock(&sp->mutex);
23701da177e4SLinus Torvalds 	n = sp_lookup(sp, start, end);
23711da177e4SLinus Torvalds 	/* Take care of old policies in the same range. */
23721da177e4SLinus Torvalds 	while (n && n->start < end) {
23731da177e4SLinus Torvalds 		struct rb_node *next = rb_next(&n->nd);
23741da177e4SLinus Torvalds 		if (n->start >= start) {
23751da177e4SLinus Torvalds 			if (n->end <= end)
23761da177e4SLinus Torvalds 				sp_delete(sp, n);
23771da177e4SLinus Torvalds 			else
23781da177e4SLinus Torvalds 				n->start = end;
23791da177e4SLinus Torvalds 		} else {
23801da177e4SLinus Torvalds 			/* Old policy spanning whole new range. */
23811da177e4SLinus Torvalds 			if (n->end > end) {
2382b22d127aSMel Gorman 				struct sp_node *new2;
23831da177e4SLinus Torvalds 				new2 = sp_alloc(end, n->end, n->policy);
2384b22d127aSMel Gorman 				if (!new2) {
2385b22d127aSMel Gorman 					ret = -ENOMEM;
2386b22d127aSMel Gorman 					goto out;
23871da177e4SLinus Torvalds 				}
23881da177e4SLinus Torvalds 				n->end = start;
23891da177e4SLinus Torvalds 				sp_insert(sp, new2);
23901da177e4SLinus Torvalds 				break;
23911da177e4SLinus Torvalds 			} else
23921da177e4SLinus Torvalds 				n->end = start;
23931da177e4SLinus Torvalds 		}
23941da177e4SLinus Torvalds 		if (!next)
23951da177e4SLinus Torvalds 			break;
23961da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
23971da177e4SLinus Torvalds 	}
23981da177e4SLinus Torvalds 	if (new)
23991da177e4SLinus Torvalds 		sp_insert(sp, new);
2400b22d127aSMel Gorman out:
2401b22d127aSMel Gorman 	mutex_unlock(&sp->mutex);
2402b22d127aSMel Gorman 	return ret;
24031da177e4SLinus Torvalds }
24041da177e4SLinus Torvalds 
240571fe804bSLee Schermerhorn /**
240671fe804bSLee Schermerhorn  * mpol_shared_policy_init - initialize shared policy for inode
240771fe804bSLee Schermerhorn  * @sp: pointer to inode shared policy
240871fe804bSLee Schermerhorn  * @mpol:  struct mempolicy to install
240971fe804bSLee Schermerhorn  *
241071fe804bSLee Schermerhorn  * Install non-NULL @mpol in inode's shared policy rb-tree.
241171fe804bSLee Schermerhorn  * On entry, the current task has a reference on a non-NULL @mpol.
241271fe804bSLee Schermerhorn  * This must be released on exit.
24134bfc4495SKAMEZAWA Hiroyuki  * This is called at get_inode() calls and we can use GFP_KERNEL.
241471fe804bSLee Schermerhorn  */
241571fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
24167339ff83SRobin Holt {
241758568d2aSMiao Xie 	int ret;
241858568d2aSMiao Xie 
241971fe804bSLee Schermerhorn 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
2420b22d127aSMel Gorman 	mutex_init(&sp->mutex);
24217339ff83SRobin Holt 
242271fe804bSLee Schermerhorn 	if (mpol) {
24237339ff83SRobin Holt 		struct vm_area_struct pvma;
242471fe804bSLee Schermerhorn 		struct mempolicy *new;
24254bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
24267339ff83SRobin Holt 
24274bfc4495SKAMEZAWA Hiroyuki 		if (!scratch)
24285c0c1654SLee Schermerhorn 			goto put_mpol;
242971fe804bSLee Schermerhorn 		/* contextualize the tmpfs mount point mempolicy */
243071fe804bSLee Schermerhorn 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
243115d77835SLee Schermerhorn 		if (IS_ERR(new))
24320cae3457SDan Carpenter 			goto free_scratch; /* no valid nodemask intersection */
243358568d2aSMiao Xie 
243458568d2aSMiao Xie 		task_lock(current);
24354bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
243658568d2aSMiao Xie 		task_unlock(current);
243715d77835SLee Schermerhorn 		if (ret)
24385c0c1654SLee Schermerhorn 			goto put_new;
243971fe804bSLee Schermerhorn 
244071fe804bSLee Schermerhorn 		/* Create pseudo-vma that contains just the policy */
24417339ff83SRobin Holt 		memset(&pvma, 0, sizeof(struct vm_area_struct));
244271fe804bSLee Schermerhorn 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
244371fe804bSLee Schermerhorn 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
244415d77835SLee Schermerhorn 
24455c0c1654SLee Schermerhorn put_new:
244671fe804bSLee Schermerhorn 		mpol_put(new);			/* drop initial ref */
24470cae3457SDan Carpenter free_scratch:
24484bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
24495c0c1654SLee Schermerhorn put_mpol:
24505c0c1654SLee Schermerhorn 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
24517339ff83SRobin Holt 	}
24527339ff83SRobin Holt }
24537339ff83SRobin Holt 
24541da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info,
24551da177e4SLinus Torvalds 			struct vm_area_struct *vma, struct mempolicy *npol)
24561da177e4SLinus Torvalds {
24571da177e4SLinus Torvalds 	int err;
24581da177e4SLinus Torvalds 	struct sp_node *new = NULL;
24591da177e4SLinus Torvalds 	unsigned long sz = vma_pages(vma);
24601da177e4SLinus Torvalds 
2461028fec41SDavid Rientjes 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
24621da177e4SLinus Torvalds 		 vma->vm_pgoff,
246345c4745aSLee Schermerhorn 		 sz, npol ? npol->mode : -1,
2464028fec41SDavid Rientjes 		 npol ? npol->flags : -1,
2465dfcd3c0dSAndi Kleen 		 npol ? nodes_addr(npol->v.nodes)[0] : -1);
24661da177e4SLinus Torvalds 
24671da177e4SLinus Torvalds 	if (npol) {
24681da177e4SLinus Torvalds 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
24691da177e4SLinus Torvalds 		if (!new)
24701da177e4SLinus Torvalds 			return -ENOMEM;
24711da177e4SLinus Torvalds 	}
24721da177e4SLinus Torvalds 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
24731da177e4SLinus Torvalds 	if (err && new)
247463f74ca2SKOSAKI Motohiro 		sp_free(new);
24751da177e4SLinus Torvalds 	return err;
24761da177e4SLinus Torvalds }
24771da177e4SLinus Torvalds 
24781da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */
24791da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p)
24801da177e4SLinus Torvalds {
24811da177e4SLinus Torvalds 	struct sp_node *n;
24821da177e4SLinus Torvalds 	struct rb_node *next;
24831da177e4SLinus Torvalds 
24841da177e4SLinus Torvalds 	if (!p->root.rb_node)
24851da177e4SLinus Torvalds 		return;
2486b22d127aSMel Gorman 	mutex_lock(&p->mutex);
24871da177e4SLinus Torvalds 	next = rb_first(&p->root);
24881da177e4SLinus Torvalds 	while (next) {
24891da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
24901da177e4SLinus Torvalds 		next = rb_next(&n->nd);
249163f74ca2SKOSAKI Motohiro 		sp_delete(p, n);
24921da177e4SLinus Torvalds 	}
2493b22d127aSMel Gorman 	mutex_unlock(&p->mutex);
24941da177e4SLinus Torvalds }
24951da177e4SLinus Torvalds 
24961da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */
24971da177e4SLinus Torvalds void __init numa_policy_init(void)
24981da177e4SLinus Torvalds {
2499b71636e2SPaul Mundt 	nodemask_t interleave_nodes;
2500b71636e2SPaul Mundt 	unsigned long largest = 0;
2501b71636e2SPaul Mundt 	int nid, prefer = 0;
2502b71636e2SPaul Mundt 
25031da177e4SLinus Torvalds 	policy_cache = kmem_cache_create("numa_policy",
25041da177e4SLinus Torvalds 					 sizeof(struct mempolicy),
250520c2df83SPaul Mundt 					 0, SLAB_PANIC, NULL);
25061da177e4SLinus Torvalds 
25071da177e4SLinus Torvalds 	sn_cache = kmem_cache_create("shared_policy_node",
25081da177e4SLinus Torvalds 				     sizeof(struct sp_node),
250920c2df83SPaul Mundt 				     0, SLAB_PANIC, NULL);
25101da177e4SLinus Torvalds 
2511*5606e387SMel Gorman 	for_each_node(nid) {
2512*5606e387SMel Gorman 		preferred_node_policy[nid] = (struct mempolicy) {
2513*5606e387SMel Gorman 			.refcnt = ATOMIC_INIT(1),
2514*5606e387SMel Gorman 			.mode = MPOL_PREFERRED,
2515*5606e387SMel Gorman 			.flags = MPOL_F_MOF | MPOL_F_MORON,
2516*5606e387SMel Gorman 			.v = { .preferred_node = nid, },
2517*5606e387SMel Gorman 		};
2518*5606e387SMel Gorman 	}
2519*5606e387SMel Gorman 
2520b71636e2SPaul Mundt 	/*
2521b71636e2SPaul Mundt 	 * Set interleaving policy for system init. Interleaving is only
2522b71636e2SPaul Mundt 	 * enabled across suitably sized nodes (default is >= 16MB), or
2523b71636e2SPaul Mundt 	 * fall back to the largest node if they're all smaller.
2524b71636e2SPaul Mundt 	 */
2525b71636e2SPaul Mundt 	nodes_clear(interleave_nodes);
252656bbd65dSChristoph Lameter 	for_each_node_state(nid, N_HIGH_MEMORY) {
2527b71636e2SPaul Mundt 		unsigned long total_pages = node_present_pages(nid);
25281da177e4SLinus Torvalds 
2529b71636e2SPaul Mundt 		/* Preserve the largest node */
2530b71636e2SPaul Mundt 		if (largest < total_pages) {
2531b71636e2SPaul Mundt 			largest = total_pages;
2532b71636e2SPaul Mundt 			prefer = nid;
2533b71636e2SPaul Mundt 		}
2534b71636e2SPaul Mundt 
2535b71636e2SPaul Mundt 		/* Interleave this node? */
2536b71636e2SPaul Mundt 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2537b71636e2SPaul Mundt 			node_set(nid, interleave_nodes);
2538b71636e2SPaul Mundt 	}
2539b71636e2SPaul Mundt 
2540b71636e2SPaul Mundt 	/* All too small, use the largest */
2541b71636e2SPaul Mundt 	if (unlikely(nodes_empty(interleave_nodes)))
2542b71636e2SPaul Mundt 		node_set(prefer, interleave_nodes);
2543b71636e2SPaul Mundt 
2544028fec41SDavid Rientjes 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
25451da177e4SLinus Torvalds 		printk("numa_policy_init: interleaving failed\n");
25461da177e4SLinus Torvalds }
25471da177e4SLinus Torvalds 
25488bccd85fSChristoph Lameter /* Reset policy of current process to default */
25491da177e4SLinus Torvalds void numa_default_policy(void)
25501da177e4SLinus Torvalds {
2551028fec41SDavid Rientjes 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
25521da177e4SLinus Torvalds }
255368860ec1SPaul Jackson 
25544225399aSPaul Jackson /*
2555095f1fc4SLee Schermerhorn  * Parse and format mempolicy from/to strings
2556095f1fc4SLee Schermerhorn  */
2557095f1fc4SLee Schermerhorn 
2558095f1fc4SLee Schermerhorn /*
2559fc36b8d3SLee Schermerhorn  * "local" is pseudo-policy:  MPOL_PREFERRED with MPOL_F_LOCAL flag
25603f226aa1SLee Schermerhorn  * Used only for mpol_parse_str() and mpol_to_str()
25611a75a6c8SChristoph Lameter  */
2562345ace9cSLee Schermerhorn static const char * const policy_modes[] =
2563345ace9cSLee Schermerhorn {
2564345ace9cSLee Schermerhorn 	[MPOL_DEFAULT]    = "default",
2565345ace9cSLee Schermerhorn 	[MPOL_PREFERRED]  = "prefer",
2566345ace9cSLee Schermerhorn 	[MPOL_BIND]       = "bind",
2567345ace9cSLee Schermerhorn 	[MPOL_INTERLEAVE] = "interleave",
2568d3a71033SLee Schermerhorn 	[MPOL_LOCAL]      = "local",
2569345ace9cSLee Schermerhorn };
25701a75a6c8SChristoph Lameter 
2571095f1fc4SLee Schermerhorn 
2572095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS
2573095f1fc4SLee Schermerhorn /**
2574095f1fc4SLee Schermerhorn  * mpol_parse_str - parse string to mempolicy
2575095f1fc4SLee Schermerhorn  * @str:  string containing mempolicy to parse
257671fe804bSLee Schermerhorn  * @mpol:  pointer to struct mempolicy pointer, returned on success.
257771fe804bSLee Schermerhorn  * @no_context:  flag whether to "contextualize" the mempolicy
2578095f1fc4SLee Schermerhorn  *
2579095f1fc4SLee Schermerhorn  * Format of input:
2580095f1fc4SLee Schermerhorn  *	<mode>[=<flags>][:<nodelist>]
2581095f1fc4SLee Schermerhorn  *
258271fe804bSLee Schermerhorn  * if @no_context is true, save the input nodemask in w.user_nodemask in
258371fe804bSLee Schermerhorn  * the returned mempolicy.  This will be used to "clone" the mempolicy in
258471fe804bSLee Schermerhorn  * a specific context [cpuset] at a later time.  Used to parse tmpfs mpol
258571fe804bSLee Schermerhorn  * mount option.  Note that if 'static' or 'relative' mode flags were
258671fe804bSLee Schermerhorn  * specified, the input nodemask will already have been saved.  Saving
258771fe804bSLee Schermerhorn  * it again is redundant, but safe.
258871fe804bSLee Schermerhorn  *
258971fe804bSLee Schermerhorn  * On success, returns 0, else 1
2590095f1fc4SLee Schermerhorn  */
259171fe804bSLee Schermerhorn int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
2592095f1fc4SLee Schermerhorn {
259371fe804bSLee Schermerhorn 	struct mempolicy *new = NULL;
2594b4652e84SLee Schermerhorn 	unsigned short mode;
259571fe804bSLee Schermerhorn 	unsigned short uninitialized_var(mode_flags);
259671fe804bSLee Schermerhorn 	nodemask_t nodes;
2597095f1fc4SLee Schermerhorn 	char *nodelist = strchr(str, ':');
2598095f1fc4SLee Schermerhorn 	char *flags = strchr(str, '=');
2599095f1fc4SLee Schermerhorn 	int err = 1;
2600095f1fc4SLee Schermerhorn 
2601095f1fc4SLee Schermerhorn 	if (nodelist) {
2602095f1fc4SLee Schermerhorn 		/* NUL-terminate mode or flags string */
2603095f1fc4SLee Schermerhorn 		*nodelist++ = '\0';
260471fe804bSLee Schermerhorn 		if (nodelist_parse(nodelist, nodes))
2605095f1fc4SLee Schermerhorn 			goto out;
260671fe804bSLee Schermerhorn 		if (!nodes_subset(nodes, node_states[N_HIGH_MEMORY]))
2607095f1fc4SLee Schermerhorn 			goto out;
260871fe804bSLee Schermerhorn 	} else
260971fe804bSLee Schermerhorn 		nodes_clear(nodes);
261071fe804bSLee Schermerhorn 
2611095f1fc4SLee Schermerhorn 	if (flags)
2612095f1fc4SLee Schermerhorn 		*flags++ = '\0';	/* terminate mode string */
2613095f1fc4SLee Schermerhorn 
2614479e2802SPeter Zijlstra 	for (mode = 0; mode < MPOL_MAX; mode++) {
2615345ace9cSLee Schermerhorn 		if (!strcmp(str, policy_modes[mode])) {
2616095f1fc4SLee Schermerhorn 			break;
2617095f1fc4SLee Schermerhorn 		}
2618095f1fc4SLee Schermerhorn 	}
2619a720094dSMel Gorman 	if (mode >= MPOL_MAX)
2620095f1fc4SLee Schermerhorn 		goto out;
2621095f1fc4SLee Schermerhorn 
262271fe804bSLee Schermerhorn 	switch (mode) {
2623095f1fc4SLee Schermerhorn 	case MPOL_PREFERRED:
262471fe804bSLee Schermerhorn 		/*
262571fe804bSLee Schermerhorn 		 * Insist on a nodelist of one node only
262671fe804bSLee Schermerhorn 		 */
2627095f1fc4SLee Schermerhorn 		if (nodelist) {
2628095f1fc4SLee Schermerhorn 			char *rest = nodelist;
2629095f1fc4SLee Schermerhorn 			while (isdigit(*rest))
2630095f1fc4SLee Schermerhorn 				rest++;
2631926f2ae0SKOSAKI Motohiro 			if (*rest)
2632926f2ae0SKOSAKI Motohiro 				goto out;
2633095f1fc4SLee Schermerhorn 		}
2634095f1fc4SLee Schermerhorn 		break;
2635095f1fc4SLee Schermerhorn 	case MPOL_INTERLEAVE:
2636095f1fc4SLee Schermerhorn 		/*
2637095f1fc4SLee Schermerhorn 		 * Default to online nodes with memory if no nodelist
2638095f1fc4SLee Schermerhorn 		 */
2639095f1fc4SLee Schermerhorn 		if (!nodelist)
264071fe804bSLee Schermerhorn 			nodes = node_states[N_HIGH_MEMORY];
26413f226aa1SLee Schermerhorn 		break;
264271fe804bSLee Schermerhorn 	case MPOL_LOCAL:
26433f226aa1SLee Schermerhorn 		/*
264471fe804bSLee Schermerhorn 		 * Don't allow a nodelist;  mpol_new() checks flags
26453f226aa1SLee Schermerhorn 		 */
264671fe804bSLee Schermerhorn 		if (nodelist)
26473f226aa1SLee Schermerhorn 			goto out;
264871fe804bSLee Schermerhorn 		mode = MPOL_PREFERRED;
26493f226aa1SLee Schermerhorn 		break;
2650413b43deSRavikiran G Thirumalai 	case MPOL_DEFAULT:
2651413b43deSRavikiran G Thirumalai 		/*
2652413b43deSRavikiran G Thirumalai 		 * Insist on a empty nodelist
2653413b43deSRavikiran G Thirumalai 		 */
2654413b43deSRavikiran G Thirumalai 		if (!nodelist)
2655413b43deSRavikiran G Thirumalai 			err = 0;
2656413b43deSRavikiran G Thirumalai 		goto out;
2657d69b2e63SKOSAKI Motohiro 	case MPOL_BIND:
265871fe804bSLee Schermerhorn 		/*
2659d69b2e63SKOSAKI Motohiro 		 * Insist on a nodelist
266071fe804bSLee Schermerhorn 		 */
2661d69b2e63SKOSAKI Motohiro 		if (!nodelist)
2662d69b2e63SKOSAKI Motohiro 			goto out;
2663095f1fc4SLee Schermerhorn 	}
2664095f1fc4SLee Schermerhorn 
266571fe804bSLee Schermerhorn 	mode_flags = 0;
2666095f1fc4SLee Schermerhorn 	if (flags) {
2667095f1fc4SLee Schermerhorn 		/*
2668095f1fc4SLee Schermerhorn 		 * Currently, we only support two mutually exclusive
2669095f1fc4SLee Schermerhorn 		 * mode flags.
2670095f1fc4SLee Schermerhorn 		 */
2671095f1fc4SLee Schermerhorn 		if (!strcmp(flags, "static"))
267271fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_STATIC_NODES;
2673095f1fc4SLee Schermerhorn 		else if (!strcmp(flags, "relative"))
267471fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_RELATIVE_NODES;
2675095f1fc4SLee Schermerhorn 		else
2676926f2ae0SKOSAKI Motohiro 			goto out;
2677095f1fc4SLee Schermerhorn 	}
267871fe804bSLee Schermerhorn 
267971fe804bSLee Schermerhorn 	new = mpol_new(mode, mode_flags, &nodes);
268071fe804bSLee Schermerhorn 	if (IS_ERR(new))
2681926f2ae0SKOSAKI Motohiro 		goto out;
2682926f2ae0SKOSAKI Motohiro 
2683e17f74afSLee Schermerhorn 	if (no_context) {
2684e17f74afSLee Schermerhorn 		/* save for contextualization */
2685e17f74afSLee Schermerhorn 		new->w.user_nodemask = nodes;
2686e17f74afSLee Schermerhorn 	} else {
268758568d2aSMiao Xie 		int ret;
26884bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
26894bfc4495SKAMEZAWA Hiroyuki 		if (scratch) {
269058568d2aSMiao Xie 			task_lock(current);
26914bfc4495SKAMEZAWA Hiroyuki 			ret = mpol_set_nodemask(new, &nodes, scratch);
269258568d2aSMiao Xie 			task_unlock(current);
26934bfc4495SKAMEZAWA Hiroyuki 		} else
26944bfc4495SKAMEZAWA Hiroyuki 			ret = -ENOMEM;
26954bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
26964bfc4495SKAMEZAWA Hiroyuki 		if (ret) {
26974bfc4495SKAMEZAWA Hiroyuki 			mpol_put(new);
2698926f2ae0SKOSAKI Motohiro 			goto out;
2699926f2ae0SKOSAKI Motohiro 		}
2700926f2ae0SKOSAKI Motohiro 	}
2701926f2ae0SKOSAKI Motohiro 	err = 0;
270271fe804bSLee Schermerhorn 
2703095f1fc4SLee Schermerhorn out:
2704095f1fc4SLee Schermerhorn 	/* Restore string for error message */
2705095f1fc4SLee Schermerhorn 	if (nodelist)
2706095f1fc4SLee Schermerhorn 		*--nodelist = ':';
2707095f1fc4SLee Schermerhorn 	if (flags)
2708095f1fc4SLee Schermerhorn 		*--flags = '=';
270971fe804bSLee Schermerhorn 	if (!err)
271071fe804bSLee Schermerhorn 		*mpol = new;
2711095f1fc4SLee Schermerhorn 	return err;
2712095f1fc4SLee Schermerhorn }
2713095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */
2714095f1fc4SLee Schermerhorn 
271571fe804bSLee Schermerhorn /**
271671fe804bSLee Schermerhorn  * mpol_to_str - format a mempolicy structure for printing
271771fe804bSLee Schermerhorn  * @buffer:  to contain formatted mempolicy string
271871fe804bSLee Schermerhorn  * @maxlen:  length of @buffer
271971fe804bSLee Schermerhorn  * @pol:  pointer to mempolicy to be formatted
272071fe804bSLee Schermerhorn  * @no_context:  "context free" mempolicy - use nodemask in w.user_nodemask
272171fe804bSLee Schermerhorn  *
27221a75a6c8SChristoph Lameter  * Convert a mempolicy into a string.
27231a75a6c8SChristoph Lameter  * Returns the number of characters in buffer (if positive)
27241a75a6c8SChristoph Lameter  * or an error (negative)
27251a75a6c8SChristoph Lameter  */
272671fe804bSLee Schermerhorn int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
27271a75a6c8SChristoph Lameter {
27281a75a6c8SChristoph Lameter 	char *p = buffer;
27291a75a6c8SChristoph Lameter 	int l;
27301a75a6c8SChristoph Lameter 	nodemask_t nodes;
2731bea904d5SLee Schermerhorn 	unsigned short mode;
2732f5b087b5SDavid Rientjes 	unsigned short flags = pol ? pol->flags : 0;
27331a75a6c8SChristoph Lameter 
27342291990aSLee Schermerhorn 	/*
27352291990aSLee Schermerhorn 	 * Sanity check:  room for longest mode, flag and some nodes
27362291990aSLee Schermerhorn 	 */
27372291990aSLee Schermerhorn 	VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
27382291990aSLee Schermerhorn 
2739bea904d5SLee Schermerhorn 	if (!pol || pol == &default_policy)
2740bea904d5SLee Schermerhorn 		mode = MPOL_DEFAULT;
2741bea904d5SLee Schermerhorn 	else
2742bea904d5SLee Schermerhorn 		mode = pol->mode;
2743bea904d5SLee Schermerhorn 
27441a75a6c8SChristoph Lameter 	switch (mode) {
27451a75a6c8SChristoph Lameter 	case MPOL_DEFAULT:
27461a75a6c8SChristoph Lameter 		nodes_clear(nodes);
27471a75a6c8SChristoph Lameter 		break;
27481a75a6c8SChristoph Lameter 
27491a75a6c8SChristoph Lameter 	case MPOL_PREFERRED:
27501a75a6c8SChristoph Lameter 		nodes_clear(nodes);
2751fc36b8d3SLee Schermerhorn 		if (flags & MPOL_F_LOCAL)
275253f2556bSLee Schermerhorn 			mode = MPOL_LOCAL;	/* pseudo-policy */
275353f2556bSLee Schermerhorn 		else
2754fc36b8d3SLee Schermerhorn 			node_set(pol->v.preferred_node, nodes);
27551a75a6c8SChristoph Lameter 		break;
27561a75a6c8SChristoph Lameter 
27571a75a6c8SChristoph Lameter 	case MPOL_BIND:
275819770b32SMel Gorman 		/* Fall through */
27591a75a6c8SChristoph Lameter 	case MPOL_INTERLEAVE:
276071fe804bSLee Schermerhorn 		if (no_context)
276171fe804bSLee Schermerhorn 			nodes = pol->w.user_nodemask;
276271fe804bSLee Schermerhorn 		else
27631a75a6c8SChristoph Lameter 			nodes = pol->v.nodes;
27641a75a6c8SChristoph Lameter 		break;
27651a75a6c8SChristoph Lameter 
27661a75a6c8SChristoph Lameter 	default:
276780de7c31SDave Jones 		return -EINVAL;
27681a75a6c8SChristoph Lameter 	}
27691a75a6c8SChristoph Lameter 
2770345ace9cSLee Schermerhorn 	l = strlen(policy_modes[mode]);
27711a75a6c8SChristoph Lameter 	if (buffer + maxlen < p + l + 1)
27721a75a6c8SChristoph Lameter 		return -ENOSPC;
27731a75a6c8SChristoph Lameter 
2774345ace9cSLee Schermerhorn 	strcpy(p, policy_modes[mode]);
27751a75a6c8SChristoph Lameter 	p += l;
27761a75a6c8SChristoph Lameter 
2777fc36b8d3SLee Schermerhorn 	if (flags & MPOL_MODE_FLAGS) {
2778f5b087b5SDavid Rientjes 		if (buffer + maxlen < p + 2)
2779f5b087b5SDavid Rientjes 			return -ENOSPC;
2780f5b087b5SDavid Rientjes 		*p++ = '=';
2781f5b087b5SDavid Rientjes 
27822291990aSLee Schermerhorn 		/*
27832291990aSLee Schermerhorn 		 * Currently, the only defined flags are mutually exclusive
27842291990aSLee Schermerhorn 		 */
2785f5b087b5SDavid Rientjes 		if (flags & MPOL_F_STATIC_NODES)
27862291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "static");
27872291990aSLee Schermerhorn 		else if (flags & MPOL_F_RELATIVE_NODES)
27882291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "relative");
2789f5b087b5SDavid Rientjes 	}
2790f5b087b5SDavid Rientjes 
27911a75a6c8SChristoph Lameter 	if (!nodes_empty(nodes)) {
27921a75a6c8SChristoph Lameter 		if (buffer + maxlen < p + 2)
27931a75a6c8SChristoph Lameter 			return -ENOSPC;
2794095f1fc4SLee Schermerhorn 		*p++ = ':';
27951a75a6c8SChristoph Lameter 	 	p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
27961a75a6c8SChristoph Lameter 	}
27971a75a6c8SChristoph Lameter 	return p - buffer;
27981a75a6c8SChristoph Lameter }
2799