xref: /openbmc/linux/mm/mempolicy.c (revision 1da6f0e1b316d0215989fe4d7c657edead1fdea7)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Simple NUMA memory policy for the Linux kernel.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
58bccd85fSChristoph Lameter  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
61da177e4SLinus Torvalds  * Subject to the GNU Public License, version 2.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * NUMA policy allows the user to give hints in which node(s) memory should
91da177e4SLinus Torvalds  * be allocated.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Support four policies per VMA and per process:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * The VMA policy has priority over the process policy for a page fault.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * interleave     Allocate memory interleaved over a set of nodes,
161da177e4SLinus Torvalds  *                with normal fallback if it fails.
171da177e4SLinus Torvalds  *                For VMA based allocations this interleaves based on the
181da177e4SLinus Torvalds  *                offset into the backing object or offset into the mapping
191da177e4SLinus Torvalds  *                for anonymous memory. For process policy an process counter
201da177e4SLinus Torvalds  *                is used.
218bccd85fSChristoph Lameter  *
221da177e4SLinus Torvalds  * bind           Only allocate memory on a specific set of nodes,
231da177e4SLinus Torvalds  *                no fallback.
248bccd85fSChristoph Lameter  *                FIXME: memory is allocated starting with the first node
258bccd85fSChristoph Lameter  *                to the last. It would be better if bind would truly restrict
268bccd85fSChristoph Lameter  *                the allocation to memory nodes instead
278bccd85fSChristoph Lameter  *
281da177e4SLinus Torvalds  * preferred       Try a specific node first before normal fallback.
2900ef2d2fSDavid Rientjes  *                As a special case NUMA_NO_NODE here means do the allocation
301da177e4SLinus Torvalds  *                on the local CPU. This is normally identical to default,
311da177e4SLinus Torvalds  *                but useful to set in a VMA when you have a non default
321da177e4SLinus Torvalds  *                process policy.
338bccd85fSChristoph Lameter  *
341da177e4SLinus Torvalds  * default        Allocate on the local node first, or when on a VMA
351da177e4SLinus Torvalds  *                use the process policy. This is what Linux always did
361da177e4SLinus Torvalds  *		  in a NUMA aware kernel and still does by, ahem, default.
371da177e4SLinus Torvalds  *
381da177e4SLinus Torvalds  * The process policy is applied for most non interrupt memory allocations
391da177e4SLinus Torvalds  * in that process' context. Interrupts ignore the policies and always
401da177e4SLinus Torvalds  * try to allocate on the local CPU. The VMA policy is only applied for memory
411da177e4SLinus Torvalds  * allocations for a VMA in the VM.
421da177e4SLinus Torvalds  *
431da177e4SLinus Torvalds  * Currently there are a few corner cases in swapping where the policy
441da177e4SLinus Torvalds  * is not applied, but the majority should be handled. When process policy
451da177e4SLinus Torvalds  * is used it is not remembered over swap outs/swap ins.
461da177e4SLinus Torvalds  *
471da177e4SLinus Torvalds  * Only the highest zone in the zone hierarchy gets policied. Allocations
481da177e4SLinus Torvalds  * requesting a lower zone just use default policy. This implies that
491da177e4SLinus Torvalds  * on systems with highmem kernel lowmem allocation don't get policied.
501da177e4SLinus Torvalds  * Same with GFP_DMA allocations.
511da177e4SLinus Torvalds  *
521da177e4SLinus Torvalds  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
531da177e4SLinus Torvalds  * all users and remembered even when nobody has memory mapped.
541da177e4SLinus Torvalds  */
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds /* Notebook:
571da177e4SLinus Torvalds    fix mmap readahead to honour policy and enable policy for any page cache
581da177e4SLinus Torvalds    object
591da177e4SLinus Torvalds    statistics for bigpages
601da177e4SLinus Torvalds    global policy for page cache? currently it uses process policy. Requires
611da177e4SLinus Torvalds    first item above.
621da177e4SLinus Torvalds    handle mremap for shared memory (currently ignored for the policy)
631da177e4SLinus Torvalds    grows down?
641da177e4SLinus Torvalds    make bind policy root only? It can trigger oom much faster and the
651da177e4SLinus Torvalds    kernel is not always grateful with that.
661da177e4SLinus Torvalds */
671da177e4SLinus Torvalds 
681da177e4SLinus Torvalds #include <linux/mempolicy.h>
691da177e4SLinus Torvalds #include <linux/mm.h>
701da177e4SLinus Torvalds #include <linux/highmem.h>
711da177e4SLinus Torvalds #include <linux/hugetlb.h>
721da177e4SLinus Torvalds #include <linux/kernel.h>
731da177e4SLinus Torvalds #include <linux/sched.h>
741da177e4SLinus Torvalds #include <linux/nodemask.h>
751da177e4SLinus Torvalds #include <linux/cpuset.h>
761da177e4SLinus Torvalds #include <linux/slab.h>
771da177e4SLinus Torvalds #include <linux/string.h>
78b95f1b31SPaul Gortmaker #include <linux/export.h>
79b488893aSPavel Emelyanov #include <linux/nsproxy.h>
801da177e4SLinus Torvalds #include <linux/interrupt.h>
811da177e4SLinus Torvalds #include <linux/init.h>
821da177e4SLinus Torvalds #include <linux/compat.h>
83dc9aa5b9SChristoph Lameter #include <linux/swap.h>
841a75a6c8SChristoph Lameter #include <linux/seq_file.h>
851a75a6c8SChristoph Lameter #include <linux/proc_fs.h>
86b20a3503SChristoph Lameter #include <linux/migrate.h>
8762b61f61SHugh Dickins #include <linux/ksm.h>
8895a402c3SChristoph Lameter #include <linux/rmap.h>
8986c3a764SDavid Quigley #include <linux/security.h>
90dbcb0f19SAdrian Bunk #include <linux/syscalls.h>
91095f1fc4SLee Schermerhorn #include <linux/ctype.h>
926d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h>
93b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h>
94dc9aa5b9SChristoph Lameter 
951da177e4SLinus Torvalds #include <asm/tlbflush.h>
961da177e4SLinus Torvalds #include <asm/uaccess.h>
97778d3b0fSMichal Hocko #include <linux/random.h>
981da177e4SLinus Torvalds 
9962695a84SNick Piggin #include "internal.h"
10062695a84SNick Piggin 
10138e35860SChristoph Lameter /* Internal flags */
102dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
10338e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
104dc9aa5b9SChristoph Lameter 
105fcc234f8SPekka Enberg static struct kmem_cache *policy_cache;
106fcc234f8SPekka Enberg static struct kmem_cache *sn_cache;
1071da177e4SLinus Torvalds 
1081da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not
1091da177e4SLinus Torvalds    policied. */
1106267276fSChristoph Lameter enum zone_type policy_zone = 0;
1111da177e4SLinus Torvalds 
112bea904d5SLee Schermerhorn /*
113bea904d5SLee Schermerhorn  * run-time system-wide default policy => local allocation
114bea904d5SLee Schermerhorn  */
115e754d79dSH Hartley Sweeten static struct mempolicy default_policy = {
1161da177e4SLinus Torvalds 	.refcnt = ATOMIC_INIT(1), /* never free it */
117bea904d5SLee Schermerhorn 	.mode = MPOL_PREFERRED,
118fc36b8d3SLee Schermerhorn 	.flags = MPOL_F_LOCAL,
1191da177e4SLinus Torvalds };
1201da177e4SLinus Torvalds 
1215606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES];
1225606e387SMel Gorman 
1235606e387SMel Gorman static struct mempolicy *get_task_policy(struct task_struct *p)
1245606e387SMel Gorman {
1255606e387SMel Gorman 	struct mempolicy *pol = p->mempolicy;
1265606e387SMel Gorman 
1275606e387SMel Gorman 	if (!pol) {
128*1da6f0e1SJianguo Wu 		int node = numa_node_id();
1295606e387SMel Gorman 
130*1da6f0e1SJianguo Wu 		if (node != NUMA_NO_NODE) {
131*1da6f0e1SJianguo Wu 			pol = &preferred_node_policy[node];
132*1da6f0e1SJianguo Wu 			/*
133*1da6f0e1SJianguo Wu 			 * preferred_node_policy is not initialised early in
134*1da6f0e1SJianguo Wu 			 * boot
135*1da6f0e1SJianguo Wu 			 */
1365606e387SMel Gorman 			if (!pol->mode)
1375606e387SMel Gorman 				pol = NULL;
1385606e387SMel Gorman 		}
139*1da6f0e1SJianguo Wu 	}
1405606e387SMel Gorman 
1415606e387SMel Gorman 	return pol;
1425606e387SMel Gorman }
1435606e387SMel Gorman 
14437012946SDavid Rientjes static const struct mempolicy_operations {
14537012946SDavid Rientjes 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
146708c1bbcSMiao Xie 	/*
147708c1bbcSMiao Xie 	 * If read-side task has no lock to protect task->mempolicy, write-side
148708c1bbcSMiao Xie 	 * task will rebind the task->mempolicy by two step. The first step is
149708c1bbcSMiao Xie 	 * setting all the newly nodes, and the second step is cleaning all the
150708c1bbcSMiao Xie 	 * disallowed nodes. In this way, we can avoid finding no node to alloc
151708c1bbcSMiao Xie 	 * page.
152708c1bbcSMiao Xie 	 * If we have a lock to protect task->mempolicy in read-side, we do
153708c1bbcSMiao Xie 	 * rebind directly.
154708c1bbcSMiao Xie 	 *
155708c1bbcSMiao Xie 	 * step:
156708c1bbcSMiao Xie 	 * 	MPOL_REBIND_ONCE - do rebind work at once
157708c1bbcSMiao Xie 	 * 	MPOL_REBIND_STEP1 - set all the newly nodes
158708c1bbcSMiao Xie 	 * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
159708c1bbcSMiao Xie 	 */
160708c1bbcSMiao Xie 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
161708c1bbcSMiao Xie 			enum mpol_rebind_step step);
16237012946SDavid Rientjes } mpol_ops[MPOL_MAX];
16337012946SDavid Rientjes 
16419770b32SMel Gorman /* Check that the nodemask contains at least one populated zone */
16537012946SDavid Rientjes static int is_valid_nodemask(const nodemask_t *nodemask)
1661da177e4SLinus Torvalds {
167d3eb1570SLai Jiangshan 	return nodes_intersects(*nodemask, node_states[N_MEMORY]);
1681da177e4SLinus Torvalds }
1691da177e4SLinus Torvalds 
170f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
171f5b087b5SDavid Rientjes {
1726d556294SBob Liu 	return pol->flags & MPOL_MODE_FLAGS;
1734c50bc01SDavid Rientjes }
1744c50bc01SDavid Rientjes 
1754c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
1764c50bc01SDavid Rientjes 				   const nodemask_t *rel)
1774c50bc01SDavid Rientjes {
1784c50bc01SDavid Rientjes 	nodemask_t tmp;
1794c50bc01SDavid Rientjes 	nodes_fold(tmp, *orig, nodes_weight(*rel));
1804c50bc01SDavid Rientjes 	nodes_onto(*ret, tmp, *rel);
181f5b087b5SDavid Rientjes }
182f5b087b5SDavid Rientjes 
18337012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
18437012946SDavid Rientjes {
18537012946SDavid Rientjes 	if (nodes_empty(*nodes))
18637012946SDavid Rientjes 		return -EINVAL;
18737012946SDavid Rientjes 	pol->v.nodes = *nodes;
18837012946SDavid Rientjes 	return 0;
18937012946SDavid Rientjes }
19037012946SDavid Rientjes 
19137012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
19237012946SDavid Rientjes {
19337012946SDavid Rientjes 	if (!nodes)
194fc36b8d3SLee Schermerhorn 		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
19537012946SDavid Rientjes 	else if (nodes_empty(*nodes))
19637012946SDavid Rientjes 		return -EINVAL;			/*  no allowed nodes */
19737012946SDavid Rientjes 	else
19837012946SDavid Rientjes 		pol->v.preferred_node = first_node(*nodes);
19937012946SDavid Rientjes 	return 0;
20037012946SDavid Rientjes }
20137012946SDavid Rientjes 
20237012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
20337012946SDavid Rientjes {
20437012946SDavid Rientjes 	if (!is_valid_nodemask(nodes))
20537012946SDavid Rientjes 		return -EINVAL;
20637012946SDavid Rientjes 	pol->v.nodes = *nodes;
20737012946SDavid Rientjes 	return 0;
20837012946SDavid Rientjes }
20937012946SDavid Rientjes 
21058568d2aSMiao Xie /*
21158568d2aSMiao Xie  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
21258568d2aSMiao Xie  * any, for the new policy.  mpol_new() has already validated the nodes
21358568d2aSMiao Xie  * parameter with respect to the policy mode and flags.  But, we need to
21458568d2aSMiao Xie  * handle an empty nodemask with MPOL_PREFERRED here.
21558568d2aSMiao Xie  *
21658568d2aSMiao Xie  * Must be called holding task's alloc_lock to protect task's mems_allowed
21758568d2aSMiao Xie  * and mempolicy.  May also be called holding the mmap_semaphore for write.
21858568d2aSMiao Xie  */
2194bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol,
2204bfc4495SKAMEZAWA Hiroyuki 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
22158568d2aSMiao Xie {
22258568d2aSMiao Xie 	int ret;
22358568d2aSMiao Xie 
22458568d2aSMiao Xie 	/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
22558568d2aSMiao Xie 	if (pol == NULL)
22658568d2aSMiao Xie 		return 0;
22701f13bd6SLai Jiangshan 	/* Check N_MEMORY */
2284bfc4495SKAMEZAWA Hiroyuki 	nodes_and(nsc->mask1,
22901f13bd6SLai Jiangshan 		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
23058568d2aSMiao Xie 
23158568d2aSMiao Xie 	VM_BUG_ON(!nodes);
23258568d2aSMiao Xie 	if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
23358568d2aSMiao Xie 		nodes = NULL;	/* explicit local allocation */
23458568d2aSMiao Xie 	else {
23558568d2aSMiao Xie 		if (pol->flags & MPOL_F_RELATIVE_NODES)
2364bfc4495SKAMEZAWA Hiroyuki 			mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
23758568d2aSMiao Xie 		else
2384bfc4495SKAMEZAWA Hiroyuki 			nodes_and(nsc->mask2, *nodes, nsc->mask1);
2394bfc4495SKAMEZAWA Hiroyuki 
24058568d2aSMiao Xie 		if (mpol_store_user_nodemask(pol))
24158568d2aSMiao Xie 			pol->w.user_nodemask = *nodes;
24258568d2aSMiao Xie 		else
24358568d2aSMiao Xie 			pol->w.cpuset_mems_allowed =
24458568d2aSMiao Xie 						cpuset_current_mems_allowed;
24558568d2aSMiao Xie 	}
24658568d2aSMiao Xie 
2474bfc4495SKAMEZAWA Hiroyuki 	if (nodes)
2484bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
2494bfc4495SKAMEZAWA Hiroyuki 	else
2504bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, NULL);
25158568d2aSMiao Xie 	return ret;
25258568d2aSMiao Xie }
25358568d2aSMiao Xie 
25458568d2aSMiao Xie /*
25558568d2aSMiao Xie  * This function just creates a new policy, does some check and simple
25658568d2aSMiao Xie  * initialization. You must invoke mpol_set_nodemask() to set nodes.
25758568d2aSMiao Xie  */
258028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
259028fec41SDavid Rientjes 				  nodemask_t *nodes)
2601da177e4SLinus Torvalds {
2611da177e4SLinus Torvalds 	struct mempolicy *policy;
2621da177e4SLinus Torvalds 
263028fec41SDavid Rientjes 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
26400ef2d2fSDavid Rientjes 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
265140d5a49SPaul Mundt 
2663e1f0645SDavid Rientjes 	if (mode == MPOL_DEFAULT) {
2673e1f0645SDavid Rientjes 		if (nodes && !nodes_empty(*nodes))
26837012946SDavid Rientjes 			return ERR_PTR(-EINVAL);
269d3a71033SLee Schermerhorn 		return NULL;
27037012946SDavid Rientjes 	}
2713e1f0645SDavid Rientjes 	VM_BUG_ON(!nodes);
2723e1f0645SDavid Rientjes 
2733e1f0645SDavid Rientjes 	/*
2743e1f0645SDavid Rientjes 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
2753e1f0645SDavid Rientjes 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
2763e1f0645SDavid Rientjes 	 * All other modes require a valid pointer to a non-empty nodemask.
2773e1f0645SDavid Rientjes 	 */
2783e1f0645SDavid Rientjes 	if (mode == MPOL_PREFERRED) {
2793e1f0645SDavid Rientjes 		if (nodes_empty(*nodes)) {
2803e1f0645SDavid Rientjes 			if (((flags & MPOL_F_STATIC_NODES) ||
2813e1f0645SDavid Rientjes 			     (flags & MPOL_F_RELATIVE_NODES)))
2823e1f0645SDavid Rientjes 				return ERR_PTR(-EINVAL);
2833e1f0645SDavid Rientjes 		}
284479e2802SPeter Zijlstra 	} else if (mode == MPOL_LOCAL) {
285479e2802SPeter Zijlstra 		if (!nodes_empty(*nodes))
286479e2802SPeter Zijlstra 			return ERR_PTR(-EINVAL);
287479e2802SPeter Zijlstra 		mode = MPOL_PREFERRED;
2883e1f0645SDavid Rientjes 	} else if (nodes_empty(*nodes))
2893e1f0645SDavid Rientjes 		return ERR_PTR(-EINVAL);
2901da177e4SLinus Torvalds 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2911da177e4SLinus Torvalds 	if (!policy)
2921da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2931da177e4SLinus Torvalds 	atomic_set(&policy->refcnt, 1);
29445c4745aSLee Schermerhorn 	policy->mode = mode;
29537012946SDavid Rientjes 	policy->flags = flags;
2963e1f0645SDavid Rientjes 
29737012946SDavid Rientjes 	return policy;
29837012946SDavid Rientjes }
29937012946SDavid Rientjes 
30052cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */
30152cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p)
30252cd3b07SLee Schermerhorn {
30352cd3b07SLee Schermerhorn 	if (!atomic_dec_and_test(&p->refcnt))
30452cd3b07SLee Schermerhorn 		return;
30552cd3b07SLee Schermerhorn 	kmem_cache_free(policy_cache, p);
30652cd3b07SLee Schermerhorn }
30752cd3b07SLee Schermerhorn 
308708c1bbcSMiao Xie static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
309708c1bbcSMiao Xie 				enum mpol_rebind_step step)
31037012946SDavid Rientjes {
31137012946SDavid Rientjes }
31237012946SDavid Rientjes 
313708c1bbcSMiao Xie /*
314708c1bbcSMiao Xie  * step:
315708c1bbcSMiao Xie  * 	MPOL_REBIND_ONCE  - do rebind work at once
316708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP1 - set all the newly nodes
317708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
318708c1bbcSMiao Xie  */
319708c1bbcSMiao Xie static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
320708c1bbcSMiao Xie 				 enum mpol_rebind_step step)
3211d0d2680SDavid Rientjes {
3221d0d2680SDavid Rientjes 	nodemask_t tmp;
3231d0d2680SDavid Rientjes 
32437012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES)
32537012946SDavid Rientjes 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
32637012946SDavid Rientjes 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
32737012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3281d0d2680SDavid Rientjes 	else {
329708c1bbcSMiao Xie 		/*
330708c1bbcSMiao Xie 		 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
331708c1bbcSMiao Xie 		 * result
332708c1bbcSMiao Xie 		 */
333708c1bbcSMiao Xie 		if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
334708c1bbcSMiao Xie 			nodes_remap(tmp, pol->v.nodes,
335708c1bbcSMiao Xie 					pol->w.cpuset_mems_allowed, *nodes);
336708c1bbcSMiao Xie 			pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
337708c1bbcSMiao Xie 		} else if (step == MPOL_REBIND_STEP2) {
338708c1bbcSMiao Xie 			tmp = pol->w.cpuset_mems_allowed;
33937012946SDavid Rientjes 			pol->w.cpuset_mems_allowed = *nodes;
340708c1bbcSMiao Xie 		} else
341708c1bbcSMiao Xie 			BUG();
3421d0d2680SDavid Rientjes 	}
34337012946SDavid Rientjes 
344708c1bbcSMiao Xie 	if (nodes_empty(tmp))
345708c1bbcSMiao Xie 		tmp = *nodes;
346708c1bbcSMiao Xie 
347708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP1)
348708c1bbcSMiao Xie 		nodes_or(pol->v.nodes, pol->v.nodes, tmp);
349708c1bbcSMiao Xie 	else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
3501d0d2680SDavid Rientjes 		pol->v.nodes = tmp;
351708c1bbcSMiao Xie 	else
352708c1bbcSMiao Xie 		BUG();
353708c1bbcSMiao Xie 
3541d0d2680SDavid Rientjes 	if (!node_isset(current->il_next, tmp)) {
3551d0d2680SDavid Rientjes 		current->il_next = next_node(current->il_next, tmp);
3561d0d2680SDavid Rientjes 		if (current->il_next >= MAX_NUMNODES)
3571d0d2680SDavid Rientjes 			current->il_next = first_node(tmp);
3581d0d2680SDavid Rientjes 		if (current->il_next >= MAX_NUMNODES)
3591d0d2680SDavid Rientjes 			current->il_next = numa_node_id();
3601d0d2680SDavid Rientjes 	}
36137012946SDavid Rientjes }
36237012946SDavid Rientjes 
36337012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol,
364708c1bbcSMiao Xie 				  const nodemask_t *nodes,
365708c1bbcSMiao Xie 				  enum mpol_rebind_step step)
36637012946SDavid Rientjes {
36737012946SDavid Rientjes 	nodemask_t tmp;
36837012946SDavid Rientjes 
36937012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES) {
3701d0d2680SDavid Rientjes 		int node = first_node(pol->w.user_nodemask);
3711d0d2680SDavid Rientjes 
372fc36b8d3SLee Schermerhorn 		if (node_isset(node, *nodes)) {
3731d0d2680SDavid Rientjes 			pol->v.preferred_node = node;
374fc36b8d3SLee Schermerhorn 			pol->flags &= ~MPOL_F_LOCAL;
375fc36b8d3SLee Schermerhorn 		} else
376fc36b8d3SLee Schermerhorn 			pol->flags |= MPOL_F_LOCAL;
37737012946SDavid Rientjes 	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
37837012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3791d0d2680SDavid Rientjes 		pol->v.preferred_node = first_node(tmp);
380fc36b8d3SLee Schermerhorn 	} else if (!(pol->flags & MPOL_F_LOCAL)) {
3811d0d2680SDavid Rientjes 		pol->v.preferred_node = node_remap(pol->v.preferred_node,
38237012946SDavid Rientjes 						   pol->w.cpuset_mems_allowed,
38337012946SDavid Rientjes 						   *nodes);
38437012946SDavid Rientjes 		pol->w.cpuset_mems_allowed = *nodes;
3851d0d2680SDavid Rientjes 	}
3861d0d2680SDavid Rientjes }
38737012946SDavid Rientjes 
388708c1bbcSMiao Xie /*
389708c1bbcSMiao Xie  * mpol_rebind_policy - Migrate a policy to a different set of nodes
390708c1bbcSMiao Xie  *
391708c1bbcSMiao Xie  * If read-side task has no lock to protect task->mempolicy, write-side
392708c1bbcSMiao Xie  * task will rebind the task->mempolicy by two step. The first step is
393708c1bbcSMiao Xie  * setting all the newly nodes, and the second step is cleaning all the
394708c1bbcSMiao Xie  * disallowed nodes. In this way, we can avoid finding no node to alloc
395708c1bbcSMiao Xie  * page.
396708c1bbcSMiao Xie  * If we have a lock to protect task->mempolicy in read-side, we do
397708c1bbcSMiao Xie  * rebind directly.
398708c1bbcSMiao Xie  *
399708c1bbcSMiao Xie  * step:
400708c1bbcSMiao Xie  * 	MPOL_REBIND_ONCE  - do rebind work at once
401708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP1 - set all the newly nodes
402708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
403708c1bbcSMiao Xie  */
404708c1bbcSMiao Xie static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
405708c1bbcSMiao Xie 				enum mpol_rebind_step step)
40637012946SDavid Rientjes {
40737012946SDavid Rientjes 	if (!pol)
40837012946SDavid Rientjes 		return;
40989c522c7SWang Sheng-Hui 	if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
41037012946SDavid Rientjes 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
41137012946SDavid Rientjes 		return;
412708c1bbcSMiao Xie 
413708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
414708c1bbcSMiao Xie 		return;
415708c1bbcSMiao Xie 
416708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
417708c1bbcSMiao Xie 		BUG();
418708c1bbcSMiao Xie 
419708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP1)
420708c1bbcSMiao Xie 		pol->flags |= MPOL_F_REBINDING;
421708c1bbcSMiao Xie 	else if (step == MPOL_REBIND_STEP2)
422708c1bbcSMiao Xie 		pol->flags &= ~MPOL_F_REBINDING;
423708c1bbcSMiao Xie 	else if (step >= MPOL_REBIND_NSTEP)
424708c1bbcSMiao Xie 		BUG();
425708c1bbcSMiao Xie 
426708c1bbcSMiao Xie 	mpol_ops[pol->mode].rebind(pol, newmask, step);
4271d0d2680SDavid Rientjes }
4281d0d2680SDavid Rientjes 
4291d0d2680SDavid Rientjes /*
4301d0d2680SDavid Rientjes  * Wrapper for mpol_rebind_policy() that just requires task
4311d0d2680SDavid Rientjes  * pointer, and updates task mempolicy.
43258568d2aSMiao Xie  *
43358568d2aSMiao Xie  * Called with task's alloc_lock held.
4341d0d2680SDavid Rientjes  */
4351d0d2680SDavid Rientjes 
436708c1bbcSMiao Xie void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
437708c1bbcSMiao Xie 			enum mpol_rebind_step step)
4381d0d2680SDavid Rientjes {
439708c1bbcSMiao Xie 	mpol_rebind_policy(tsk->mempolicy, new, step);
4401d0d2680SDavid Rientjes }
4411d0d2680SDavid Rientjes 
4421d0d2680SDavid Rientjes /*
4431d0d2680SDavid Rientjes  * Rebind each vma in mm to new nodemask.
4441d0d2680SDavid Rientjes  *
4451d0d2680SDavid Rientjes  * Call holding a reference to mm.  Takes mm->mmap_sem during call.
4461d0d2680SDavid Rientjes  */
4471d0d2680SDavid Rientjes 
4481d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
4491d0d2680SDavid Rientjes {
4501d0d2680SDavid Rientjes 	struct vm_area_struct *vma;
4511d0d2680SDavid Rientjes 
4521d0d2680SDavid Rientjes 	down_write(&mm->mmap_sem);
4531d0d2680SDavid Rientjes 	for (vma = mm->mmap; vma; vma = vma->vm_next)
454708c1bbcSMiao Xie 		mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
4551d0d2680SDavid Rientjes 	up_write(&mm->mmap_sem);
4561d0d2680SDavid Rientjes }
4571d0d2680SDavid Rientjes 
45837012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
45937012946SDavid Rientjes 	[MPOL_DEFAULT] = {
46037012946SDavid Rientjes 		.rebind = mpol_rebind_default,
46137012946SDavid Rientjes 	},
46237012946SDavid Rientjes 	[MPOL_INTERLEAVE] = {
46337012946SDavid Rientjes 		.create = mpol_new_interleave,
46437012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
46537012946SDavid Rientjes 	},
46637012946SDavid Rientjes 	[MPOL_PREFERRED] = {
46737012946SDavid Rientjes 		.create = mpol_new_preferred,
46837012946SDavid Rientjes 		.rebind = mpol_rebind_preferred,
46937012946SDavid Rientjes 	},
47037012946SDavid Rientjes 	[MPOL_BIND] = {
47137012946SDavid Rientjes 		.create = mpol_new_bind,
47237012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
47337012946SDavid Rientjes 	},
47437012946SDavid Rientjes };
47537012946SDavid Rientjes 
476fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
477fc301289SChristoph Lameter 				unsigned long flags);
4781a75a6c8SChristoph Lameter 
47938e35860SChristoph Lameter /* Scan through pages checking if pages follow certain conditions. */
480b5810039SNick Piggin static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
481dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
482dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
48338e35860SChristoph Lameter 		void *private)
4841da177e4SLinus Torvalds {
48591612e0dSHugh Dickins 	pte_t *orig_pte;
48691612e0dSHugh Dickins 	pte_t *pte;
487705e87c0SHugh Dickins 	spinlock_t *ptl;
488941150a3SHugh Dickins 
489705e87c0SHugh Dickins 	orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
49091612e0dSHugh Dickins 	do {
4916aab341eSLinus Torvalds 		struct page *page;
49225ba77c1SAndy Whitcroft 		int nid;
49391612e0dSHugh Dickins 
49491612e0dSHugh Dickins 		if (!pte_present(*pte))
49591612e0dSHugh Dickins 			continue;
4966aab341eSLinus Torvalds 		page = vm_normal_page(vma, addr, *pte);
4976aab341eSLinus Torvalds 		if (!page)
49891612e0dSHugh Dickins 			continue;
499053837fcSNick Piggin 		/*
50062b61f61SHugh Dickins 		 * vm_normal_page() filters out zero pages, but there might
50162b61f61SHugh Dickins 		 * still be PageReserved pages to skip, perhaps in a VDSO.
502053837fcSNick Piggin 		 */
503b79bc0a0SHugh Dickins 		if (PageReserved(page))
504f4598c8bSChristoph Lameter 			continue;
5056aab341eSLinus Torvalds 		nid = page_to_nid(page);
50638e35860SChristoph Lameter 		if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
50738e35860SChristoph Lameter 			continue;
50838e35860SChristoph Lameter 
509b1f72d18SStephen Wilson 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
510fc301289SChristoph Lameter 			migrate_page_add(page, private, flags);
511dc9aa5b9SChristoph Lameter 		else
5121da177e4SLinus Torvalds 			break;
51391612e0dSHugh Dickins 	} while (pte++, addr += PAGE_SIZE, addr != end);
514705e87c0SHugh Dickins 	pte_unmap_unlock(orig_pte, ptl);
51591612e0dSHugh Dickins 	return addr != end;
51691612e0dSHugh Dickins }
51791612e0dSHugh Dickins 
518b5810039SNick Piggin static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
519dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
520dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
52138e35860SChristoph Lameter 		void *private)
52291612e0dSHugh Dickins {
52391612e0dSHugh Dickins 	pmd_t *pmd;
52491612e0dSHugh Dickins 	unsigned long next;
52591612e0dSHugh Dickins 
52691612e0dSHugh Dickins 	pmd = pmd_offset(pud, addr);
52791612e0dSHugh Dickins 	do {
52891612e0dSHugh Dickins 		next = pmd_addr_end(addr, end);
529e180377fSKirill A. Shutemov 		split_huge_page_pmd(vma, addr, pmd);
5301a5a9906SAndrea Arcangeli 		if (pmd_none_or_trans_huge_or_clear_bad(pmd))
53191612e0dSHugh Dickins 			continue;
532dc9aa5b9SChristoph Lameter 		if (check_pte_range(vma, pmd, addr, next, nodes,
53338e35860SChristoph Lameter 				    flags, private))
53491612e0dSHugh Dickins 			return -EIO;
53591612e0dSHugh Dickins 	} while (pmd++, addr = next, addr != end);
53691612e0dSHugh Dickins 	return 0;
53791612e0dSHugh Dickins }
53891612e0dSHugh Dickins 
539b5810039SNick Piggin static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
540dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
541dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
54238e35860SChristoph Lameter 		void *private)
54391612e0dSHugh Dickins {
54491612e0dSHugh Dickins 	pud_t *pud;
54591612e0dSHugh Dickins 	unsigned long next;
54691612e0dSHugh Dickins 
54791612e0dSHugh Dickins 	pud = pud_offset(pgd, addr);
54891612e0dSHugh Dickins 	do {
54991612e0dSHugh Dickins 		next = pud_addr_end(addr, end);
55091612e0dSHugh Dickins 		if (pud_none_or_clear_bad(pud))
55191612e0dSHugh Dickins 			continue;
552dc9aa5b9SChristoph Lameter 		if (check_pmd_range(vma, pud, addr, next, nodes,
55338e35860SChristoph Lameter 				    flags, private))
55491612e0dSHugh Dickins 			return -EIO;
55591612e0dSHugh Dickins 	} while (pud++, addr = next, addr != end);
55691612e0dSHugh Dickins 	return 0;
55791612e0dSHugh Dickins }
55891612e0dSHugh Dickins 
559b5810039SNick Piggin static inline int check_pgd_range(struct vm_area_struct *vma,
560dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
561dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
56238e35860SChristoph Lameter 		void *private)
56391612e0dSHugh Dickins {
56491612e0dSHugh Dickins 	pgd_t *pgd;
56591612e0dSHugh Dickins 	unsigned long next;
56691612e0dSHugh Dickins 
567b5810039SNick Piggin 	pgd = pgd_offset(vma->vm_mm, addr);
56891612e0dSHugh Dickins 	do {
56991612e0dSHugh Dickins 		next = pgd_addr_end(addr, end);
57091612e0dSHugh Dickins 		if (pgd_none_or_clear_bad(pgd))
57191612e0dSHugh Dickins 			continue;
572dc9aa5b9SChristoph Lameter 		if (check_pud_range(vma, pgd, addr, next, nodes,
57338e35860SChristoph Lameter 				    flags, private))
57491612e0dSHugh Dickins 			return -EIO;
57591612e0dSHugh Dickins 	} while (pgd++, addr = next, addr != end);
57691612e0dSHugh Dickins 	return 0;
5771da177e4SLinus Torvalds }
5781da177e4SLinus Torvalds 
579b24f53a0SLee Schermerhorn #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
580b24f53a0SLee Schermerhorn /*
5814b10e7d5SMel Gorman  * This is used to mark a range of virtual addresses to be inaccessible.
5824b10e7d5SMel Gorman  * These are later cleared by a NUMA hinting fault. Depending on these
5834b10e7d5SMel Gorman  * faults, pages may be migrated for better NUMA placement.
5844b10e7d5SMel Gorman  *
5854b10e7d5SMel Gorman  * This is assuming that NUMA faults are handled using PROT_NONE. If
5864b10e7d5SMel Gorman  * an architecture makes a different choice, it will need further
5874b10e7d5SMel Gorman  * changes to the core.
588b24f53a0SLee Schermerhorn  */
5894b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma,
5904b10e7d5SMel Gorman 			unsigned long addr, unsigned long end)
591b24f53a0SLee Schermerhorn {
5924b10e7d5SMel Gorman 	int nr_updated;
5934b10e7d5SMel Gorman 	BUILD_BUG_ON(_PAGE_NUMA != _PAGE_PROTNONE);
594b24f53a0SLee Schermerhorn 
5954b10e7d5SMel Gorman 	nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1);
59603c5a6e1SMel Gorman 	if (nr_updated)
59703c5a6e1SMel Gorman 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
598b24f53a0SLee Schermerhorn 
5994b10e7d5SMel Gorman 	return nr_updated;
600b24f53a0SLee Schermerhorn }
601b24f53a0SLee Schermerhorn #else
602b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma,
603b24f53a0SLee Schermerhorn 			unsigned long addr, unsigned long end)
604b24f53a0SLee Schermerhorn {
605b24f53a0SLee Schermerhorn 	return 0;
606b24f53a0SLee Schermerhorn }
607b24f53a0SLee Schermerhorn #endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */
608b24f53a0SLee Schermerhorn 
609dc9aa5b9SChristoph Lameter /*
610dc9aa5b9SChristoph Lameter  * Check if all pages in a range are on a set of nodes.
611dc9aa5b9SChristoph Lameter  * If pagelist != NULL then isolate pages from the LRU and
612dc9aa5b9SChristoph Lameter  * put them on the pagelist.
613dc9aa5b9SChristoph Lameter  */
6141da177e4SLinus Torvalds static struct vm_area_struct *
6151da177e4SLinus Torvalds check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
61638e35860SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags, void *private)
6171da177e4SLinus Torvalds {
6181da177e4SLinus Torvalds 	int err;
6191da177e4SLinus Torvalds 	struct vm_area_struct *first, *vma, *prev;
6201da177e4SLinus Torvalds 
621053837fcSNick Piggin 
6221da177e4SLinus Torvalds 	first = find_vma(mm, start);
6231da177e4SLinus Torvalds 	if (!first)
6241da177e4SLinus Torvalds 		return ERR_PTR(-EFAULT);
6251da177e4SLinus Torvalds 	prev = NULL;
6261da177e4SLinus Torvalds 	for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
6275b952b3cSAndi Kleen 		unsigned long endvma = vma->vm_end;
628dc9aa5b9SChristoph Lameter 
6295b952b3cSAndi Kleen 		if (endvma > end)
6305b952b3cSAndi Kleen 			endvma = end;
6315b952b3cSAndi Kleen 		if (vma->vm_start > start)
6325b952b3cSAndi Kleen 			start = vma->vm_start;
633b24f53a0SLee Schermerhorn 
634b24f53a0SLee Schermerhorn 		if (!(flags & MPOL_MF_DISCONTIG_OK)) {
635b24f53a0SLee Schermerhorn 			if (!vma->vm_next && vma->vm_end < end)
636b24f53a0SLee Schermerhorn 				return ERR_PTR(-EFAULT);
637b24f53a0SLee Schermerhorn 			if (prev && prev->vm_end < vma->vm_start)
638b24f53a0SLee Schermerhorn 				return ERR_PTR(-EFAULT);
639b24f53a0SLee Schermerhorn 		}
640b24f53a0SLee Schermerhorn 
641b24f53a0SLee Schermerhorn 		if (is_vm_hugetlb_page(vma))
642b24f53a0SLee Schermerhorn 			goto next;
643b24f53a0SLee Schermerhorn 
644b24f53a0SLee Schermerhorn 		if (flags & MPOL_MF_LAZY) {
645b24f53a0SLee Schermerhorn 			change_prot_numa(vma, start, endvma);
646b24f53a0SLee Schermerhorn 			goto next;
647b24f53a0SLee Schermerhorn 		}
648b24f53a0SLee Schermerhorn 
649b24f53a0SLee Schermerhorn 		if ((flags & MPOL_MF_STRICT) ||
650b24f53a0SLee Schermerhorn 		     ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
651b24f53a0SLee Schermerhorn 		      vma_migratable(vma))) {
652b24f53a0SLee Schermerhorn 
653dc9aa5b9SChristoph Lameter 			err = check_pgd_range(vma, start, endvma, nodes,
65438e35860SChristoph Lameter 						flags, private);
6551da177e4SLinus Torvalds 			if (err) {
6561da177e4SLinus Torvalds 				first = ERR_PTR(err);
6571da177e4SLinus Torvalds 				break;
6581da177e4SLinus Torvalds 			}
6591da177e4SLinus Torvalds 		}
660b24f53a0SLee Schermerhorn next:
6611da177e4SLinus Torvalds 		prev = vma;
6621da177e4SLinus Torvalds 	}
6631da177e4SLinus Torvalds 	return first;
6641da177e4SLinus Torvalds }
6651da177e4SLinus Torvalds 
666869833f2SKOSAKI Motohiro /*
667869833f2SKOSAKI Motohiro  * Apply policy to a single VMA
668869833f2SKOSAKI Motohiro  * This must be called with the mmap_sem held for writing.
669869833f2SKOSAKI Motohiro  */
670869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma,
671869833f2SKOSAKI Motohiro 						struct mempolicy *pol)
6728d34694cSKOSAKI Motohiro {
673869833f2SKOSAKI Motohiro 	int err;
674869833f2SKOSAKI Motohiro 	struct mempolicy *old;
675869833f2SKOSAKI Motohiro 	struct mempolicy *new;
6768d34694cSKOSAKI Motohiro 
6778d34694cSKOSAKI Motohiro 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
6788d34694cSKOSAKI Motohiro 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
6798d34694cSKOSAKI Motohiro 		 vma->vm_ops, vma->vm_file,
6808d34694cSKOSAKI Motohiro 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
6818d34694cSKOSAKI Motohiro 
682869833f2SKOSAKI Motohiro 	new = mpol_dup(pol);
683869833f2SKOSAKI Motohiro 	if (IS_ERR(new))
684869833f2SKOSAKI Motohiro 		return PTR_ERR(new);
685869833f2SKOSAKI Motohiro 
686869833f2SKOSAKI Motohiro 	if (vma->vm_ops && vma->vm_ops->set_policy) {
6878d34694cSKOSAKI Motohiro 		err = vma->vm_ops->set_policy(vma, new);
688869833f2SKOSAKI Motohiro 		if (err)
689869833f2SKOSAKI Motohiro 			goto err_out;
6908d34694cSKOSAKI Motohiro 	}
691869833f2SKOSAKI Motohiro 
692869833f2SKOSAKI Motohiro 	old = vma->vm_policy;
693869833f2SKOSAKI Motohiro 	vma->vm_policy = new; /* protected by mmap_sem */
694869833f2SKOSAKI Motohiro 	mpol_put(old);
695869833f2SKOSAKI Motohiro 
696869833f2SKOSAKI Motohiro 	return 0;
697869833f2SKOSAKI Motohiro  err_out:
698869833f2SKOSAKI Motohiro 	mpol_put(new);
6998d34694cSKOSAKI Motohiro 	return err;
7008d34694cSKOSAKI Motohiro }
7018d34694cSKOSAKI Motohiro 
7021da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */
7039d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start,
7049d8cebd4SKOSAKI Motohiro 		       unsigned long end, struct mempolicy *new_pol)
7051da177e4SLinus Torvalds {
7061da177e4SLinus Torvalds 	struct vm_area_struct *next;
7079d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *prev;
7089d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *vma;
7099d8cebd4SKOSAKI Motohiro 	int err = 0;
710e26a5114SKOSAKI Motohiro 	pgoff_t pgoff;
7119d8cebd4SKOSAKI Motohiro 	unsigned long vmstart;
7129d8cebd4SKOSAKI Motohiro 	unsigned long vmend;
7131da177e4SLinus Torvalds 
714097d5910SLinus Torvalds 	vma = find_vma(mm, start);
7159d8cebd4SKOSAKI Motohiro 	if (!vma || vma->vm_start > start)
7169d8cebd4SKOSAKI Motohiro 		return -EFAULT;
7179d8cebd4SKOSAKI Motohiro 
718097d5910SLinus Torvalds 	prev = vma->vm_prev;
719e26a5114SKOSAKI Motohiro 	if (start > vma->vm_start)
720e26a5114SKOSAKI Motohiro 		prev = vma;
721e26a5114SKOSAKI Motohiro 
7229d8cebd4SKOSAKI Motohiro 	for (; vma && vma->vm_start < end; prev = vma, vma = next) {
7231da177e4SLinus Torvalds 		next = vma->vm_next;
7249d8cebd4SKOSAKI Motohiro 		vmstart = max(start, vma->vm_start);
7259d8cebd4SKOSAKI Motohiro 		vmend   = min(end, vma->vm_end);
7269d8cebd4SKOSAKI Motohiro 
727e26a5114SKOSAKI Motohiro 		if (mpol_equal(vma_policy(vma), new_pol))
728e26a5114SKOSAKI Motohiro 			continue;
729e26a5114SKOSAKI Motohiro 
730e26a5114SKOSAKI Motohiro 		pgoff = vma->vm_pgoff +
731e26a5114SKOSAKI Motohiro 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
7329d8cebd4SKOSAKI Motohiro 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
733e26a5114SKOSAKI Motohiro 				  vma->anon_vma, vma->vm_file, pgoff,
7348aacc9f5SCaspar Zhang 				  new_pol);
7359d8cebd4SKOSAKI Motohiro 		if (prev) {
7369d8cebd4SKOSAKI Motohiro 			vma = prev;
7379d8cebd4SKOSAKI Motohiro 			next = vma->vm_next;
7383964acd0SOleg Nesterov 			if (mpol_equal(vma_policy(vma), new_pol))
7399d8cebd4SKOSAKI Motohiro 				continue;
7403964acd0SOleg Nesterov 			/* vma_merge() joined vma && vma->next, case 8 */
7413964acd0SOleg Nesterov 			goto replace;
7421da177e4SLinus Torvalds 		}
7439d8cebd4SKOSAKI Motohiro 		if (vma->vm_start != vmstart) {
7449d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
7459d8cebd4SKOSAKI Motohiro 			if (err)
7469d8cebd4SKOSAKI Motohiro 				goto out;
7479d8cebd4SKOSAKI Motohiro 		}
7489d8cebd4SKOSAKI Motohiro 		if (vma->vm_end != vmend) {
7499d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmend, 0);
7509d8cebd4SKOSAKI Motohiro 			if (err)
7519d8cebd4SKOSAKI Motohiro 				goto out;
7529d8cebd4SKOSAKI Motohiro 		}
7533964acd0SOleg Nesterov  replace:
754869833f2SKOSAKI Motohiro 		err = vma_replace_policy(vma, new_pol);
7559d8cebd4SKOSAKI Motohiro 		if (err)
7569d8cebd4SKOSAKI Motohiro 			goto out;
7579d8cebd4SKOSAKI Motohiro 	}
7589d8cebd4SKOSAKI Motohiro 
7599d8cebd4SKOSAKI Motohiro  out:
7601da177e4SLinus Torvalds 	return err;
7611da177e4SLinus Torvalds }
7621da177e4SLinus Torvalds 
763c61afb18SPaul Jackson /*
764c61afb18SPaul Jackson  * Update task->flags PF_MEMPOLICY bit: set iff non-default
765c61afb18SPaul Jackson  * mempolicy.  Allows more rapid checking of this (combined perhaps
766c61afb18SPaul Jackson  * with other PF_* flag bits) on memory allocation hot code paths.
767c61afb18SPaul Jackson  *
768c61afb18SPaul Jackson  * If called from outside this file, the task 'p' should -only- be
769c61afb18SPaul Jackson  * a newly forked child not yet visible on the task list, because
770c61afb18SPaul Jackson  * manipulating the task flags of a visible task is not safe.
771c61afb18SPaul Jackson  *
772c61afb18SPaul Jackson  * The above limitation is why this routine has the funny name
773c61afb18SPaul Jackson  * mpol_fix_fork_child_flag().
774c61afb18SPaul Jackson  *
775c61afb18SPaul Jackson  * It is also safe to call this with a task pointer of current,
776c61afb18SPaul Jackson  * which the static wrapper mpol_set_task_struct_flag() does,
777c61afb18SPaul Jackson  * for use within this file.
778c61afb18SPaul Jackson  */
779c61afb18SPaul Jackson 
780c61afb18SPaul Jackson void mpol_fix_fork_child_flag(struct task_struct *p)
781c61afb18SPaul Jackson {
782c61afb18SPaul Jackson 	if (p->mempolicy)
783c61afb18SPaul Jackson 		p->flags |= PF_MEMPOLICY;
784c61afb18SPaul Jackson 	else
785c61afb18SPaul Jackson 		p->flags &= ~PF_MEMPOLICY;
786c61afb18SPaul Jackson }
787c61afb18SPaul Jackson 
788c61afb18SPaul Jackson static void mpol_set_task_struct_flag(void)
789c61afb18SPaul Jackson {
790c61afb18SPaul Jackson 	mpol_fix_fork_child_flag(current);
791c61afb18SPaul Jackson }
792c61afb18SPaul Jackson 
7931da177e4SLinus Torvalds /* Set the process memory policy */
794028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags,
795028fec41SDavid Rientjes 			     nodemask_t *nodes)
7961da177e4SLinus Torvalds {
79758568d2aSMiao Xie 	struct mempolicy *new, *old;
798f4e53d91SLee Schermerhorn 	struct mm_struct *mm = current->mm;
7994bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH(scratch);
80058568d2aSMiao Xie 	int ret;
8011da177e4SLinus Torvalds 
8024bfc4495SKAMEZAWA Hiroyuki 	if (!scratch)
8034bfc4495SKAMEZAWA Hiroyuki 		return -ENOMEM;
804f4e53d91SLee Schermerhorn 
8054bfc4495SKAMEZAWA Hiroyuki 	new = mpol_new(mode, flags, nodes);
8064bfc4495SKAMEZAWA Hiroyuki 	if (IS_ERR(new)) {
8074bfc4495SKAMEZAWA Hiroyuki 		ret = PTR_ERR(new);
8084bfc4495SKAMEZAWA Hiroyuki 		goto out;
8094bfc4495SKAMEZAWA Hiroyuki 	}
810f4e53d91SLee Schermerhorn 	/*
811f4e53d91SLee Schermerhorn 	 * prevent changing our mempolicy while show_numa_maps()
812f4e53d91SLee Schermerhorn 	 * is using it.
813f4e53d91SLee Schermerhorn 	 * Note:  do_set_mempolicy() can be called at init time
814f4e53d91SLee Schermerhorn 	 * with no 'mm'.
815f4e53d91SLee Schermerhorn 	 */
816f4e53d91SLee Schermerhorn 	if (mm)
817f4e53d91SLee Schermerhorn 		down_write(&mm->mmap_sem);
81858568d2aSMiao Xie 	task_lock(current);
8194bfc4495SKAMEZAWA Hiroyuki 	ret = mpol_set_nodemask(new, nodes, scratch);
82058568d2aSMiao Xie 	if (ret) {
82158568d2aSMiao Xie 		task_unlock(current);
82258568d2aSMiao Xie 		if (mm)
82358568d2aSMiao Xie 			up_write(&mm->mmap_sem);
82458568d2aSMiao Xie 		mpol_put(new);
8254bfc4495SKAMEZAWA Hiroyuki 		goto out;
82658568d2aSMiao Xie 	}
82758568d2aSMiao Xie 	old = current->mempolicy;
8281da177e4SLinus Torvalds 	current->mempolicy = new;
829c61afb18SPaul Jackson 	mpol_set_task_struct_flag();
83045c4745aSLee Schermerhorn 	if (new && new->mode == MPOL_INTERLEAVE &&
831f5b087b5SDavid Rientjes 	    nodes_weight(new->v.nodes))
832dfcd3c0dSAndi Kleen 		current->il_next = first_node(new->v.nodes);
83358568d2aSMiao Xie 	task_unlock(current);
834f4e53d91SLee Schermerhorn 	if (mm)
835f4e53d91SLee Schermerhorn 		up_write(&mm->mmap_sem);
836f4e53d91SLee Schermerhorn 
83758568d2aSMiao Xie 	mpol_put(old);
8384bfc4495SKAMEZAWA Hiroyuki 	ret = 0;
8394bfc4495SKAMEZAWA Hiroyuki out:
8404bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH_FREE(scratch);
8414bfc4495SKAMEZAWA Hiroyuki 	return ret;
8421da177e4SLinus Torvalds }
8431da177e4SLinus Torvalds 
844bea904d5SLee Schermerhorn /*
845bea904d5SLee Schermerhorn  * Return nodemask for policy for get_mempolicy() query
84658568d2aSMiao Xie  *
84758568d2aSMiao Xie  * Called with task's alloc_lock held
848bea904d5SLee Schermerhorn  */
849bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
8501da177e4SLinus Torvalds {
851dfcd3c0dSAndi Kleen 	nodes_clear(*nodes);
852bea904d5SLee Schermerhorn 	if (p == &default_policy)
853bea904d5SLee Schermerhorn 		return;
854bea904d5SLee Schermerhorn 
85545c4745aSLee Schermerhorn 	switch (p->mode) {
85619770b32SMel Gorman 	case MPOL_BIND:
85719770b32SMel Gorman 		/* Fall through */
8581da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
859dfcd3c0dSAndi Kleen 		*nodes = p->v.nodes;
8601da177e4SLinus Torvalds 		break;
8611da177e4SLinus Torvalds 	case MPOL_PREFERRED:
862fc36b8d3SLee Schermerhorn 		if (!(p->flags & MPOL_F_LOCAL))
863dfcd3c0dSAndi Kleen 			node_set(p->v.preferred_node, *nodes);
86453f2556bSLee Schermerhorn 		/* else return empty node mask for local allocation */
8651da177e4SLinus Torvalds 		break;
8661da177e4SLinus Torvalds 	default:
8671da177e4SLinus Torvalds 		BUG();
8681da177e4SLinus Torvalds 	}
8691da177e4SLinus Torvalds }
8701da177e4SLinus Torvalds 
8711da177e4SLinus Torvalds static int lookup_node(struct mm_struct *mm, unsigned long addr)
8721da177e4SLinus Torvalds {
8731da177e4SLinus Torvalds 	struct page *p;
8741da177e4SLinus Torvalds 	int err;
8751da177e4SLinus Torvalds 
8761da177e4SLinus Torvalds 	err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
8771da177e4SLinus Torvalds 	if (err >= 0) {
8781da177e4SLinus Torvalds 		err = page_to_nid(p);
8791da177e4SLinus Torvalds 		put_page(p);
8801da177e4SLinus Torvalds 	}
8811da177e4SLinus Torvalds 	return err;
8821da177e4SLinus Torvalds }
8831da177e4SLinus Torvalds 
8841da177e4SLinus Torvalds /* Retrieve NUMA policy */
885dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask,
8861da177e4SLinus Torvalds 			     unsigned long addr, unsigned long flags)
8871da177e4SLinus Torvalds {
8888bccd85fSChristoph Lameter 	int err;
8891da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
8901da177e4SLinus Torvalds 	struct vm_area_struct *vma = NULL;
8911da177e4SLinus Torvalds 	struct mempolicy *pol = current->mempolicy;
8921da177e4SLinus Torvalds 
893754af6f5SLee Schermerhorn 	if (flags &
894754af6f5SLee Schermerhorn 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
8951da177e4SLinus Torvalds 		return -EINVAL;
896754af6f5SLee Schermerhorn 
897754af6f5SLee Schermerhorn 	if (flags & MPOL_F_MEMS_ALLOWED) {
898754af6f5SLee Schermerhorn 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
899754af6f5SLee Schermerhorn 			return -EINVAL;
900754af6f5SLee Schermerhorn 		*policy = 0;	/* just so it's initialized */
90158568d2aSMiao Xie 		task_lock(current);
902754af6f5SLee Schermerhorn 		*nmask  = cpuset_current_mems_allowed;
90358568d2aSMiao Xie 		task_unlock(current);
904754af6f5SLee Schermerhorn 		return 0;
905754af6f5SLee Schermerhorn 	}
906754af6f5SLee Schermerhorn 
9071da177e4SLinus Torvalds 	if (flags & MPOL_F_ADDR) {
908bea904d5SLee Schermerhorn 		/*
909bea904d5SLee Schermerhorn 		 * Do NOT fall back to task policy if the
910bea904d5SLee Schermerhorn 		 * vma/shared policy at addr is NULL.  We
911bea904d5SLee Schermerhorn 		 * want to return MPOL_DEFAULT in this case.
912bea904d5SLee Schermerhorn 		 */
9131da177e4SLinus Torvalds 		down_read(&mm->mmap_sem);
9141da177e4SLinus Torvalds 		vma = find_vma_intersection(mm, addr, addr+1);
9151da177e4SLinus Torvalds 		if (!vma) {
9161da177e4SLinus Torvalds 			up_read(&mm->mmap_sem);
9171da177e4SLinus Torvalds 			return -EFAULT;
9181da177e4SLinus Torvalds 		}
9191da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
9201da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
9211da177e4SLinus Torvalds 		else
9221da177e4SLinus Torvalds 			pol = vma->vm_policy;
9231da177e4SLinus Torvalds 	} else if (addr)
9241da177e4SLinus Torvalds 		return -EINVAL;
9251da177e4SLinus Torvalds 
9261da177e4SLinus Torvalds 	if (!pol)
927bea904d5SLee Schermerhorn 		pol = &default_policy;	/* indicates default behavior */
9281da177e4SLinus Torvalds 
9291da177e4SLinus Torvalds 	if (flags & MPOL_F_NODE) {
9301da177e4SLinus Torvalds 		if (flags & MPOL_F_ADDR) {
9311da177e4SLinus Torvalds 			err = lookup_node(mm, addr);
9321da177e4SLinus Torvalds 			if (err < 0)
9331da177e4SLinus Torvalds 				goto out;
9348bccd85fSChristoph Lameter 			*policy = err;
9351da177e4SLinus Torvalds 		} else if (pol == current->mempolicy &&
93645c4745aSLee Schermerhorn 				pol->mode == MPOL_INTERLEAVE) {
9378bccd85fSChristoph Lameter 			*policy = current->il_next;
9381da177e4SLinus Torvalds 		} else {
9391da177e4SLinus Torvalds 			err = -EINVAL;
9401da177e4SLinus Torvalds 			goto out;
9411da177e4SLinus Torvalds 		}
942bea904d5SLee Schermerhorn 	} else {
943bea904d5SLee Schermerhorn 		*policy = pol == &default_policy ? MPOL_DEFAULT :
944bea904d5SLee Schermerhorn 						pol->mode;
945d79df630SDavid Rientjes 		/*
946d79df630SDavid Rientjes 		 * Internal mempolicy flags must be masked off before exposing
947d79df630SDavid Rientjes 		 * the policy to userspace.
948d79df630SDavid Rientjes 		 */
949d79df630SDavid Rientjes 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
950bea904d5SLee Schermerhorn 	}
9511da177e4SLinus Torvalds 
9521da177e4SLinus Torvalds 	if (vma) {
9531da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
9541da177e4SLinus Torvalds 		vma = NULL;
9551da177e4SLinus Torvalds 	}
9561da177e4SLinus Torvalds 
9571da177e4SLinus Torvalds 	err = 0;
95858568d2aSMiao Xie 	if (nmask) {
959c6b6ef8bSLee Schermerhorn 		if (mpol_store_user_nodemask(pol)) {
960c6b6ef8bSLee Schermerhorn 			*nmask = pol->w.user_nodemask;
961c6b6ef8bSLee Schermerhorn 		} else {
96258568d2aSMiao Xie 			task_lock(current);
963bea904d5SLee Schermerhorn 			get_policy_nodemask(pol, nmask);
96458568d2aSMiao Xie 			task_unlock(current);
96558568d2aSMiao Xie 		}
966c6b6ef8bSLee Schermerhorn 	}
9671da177e4SLinus Torvalds 
9681da177e4SLinus Torvalds  out:
96952cd3b07SLee Schermerhorn 	mpol_cond_put(pol);
9701da177e4SLinus Torvalds 	if (vma)
9711da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
9721da177e4SLinus Torvalds 	return err;
9731da177e4SLinus Torvalds }
9741da177e4SLinus Torvalds 
975b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION
9768bccd85fSChristoph Lameter /*
9776ce3c4c0SChristoph Lameter  * page migration
9786ce3c4c0SChristoph Lameter  */
979fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
980fc301289SChristoph Lameter 				unsigned long flags)
9816ce3c4c0SChristoph Lameter {
9826ce3c4c0SChristoph Lameter 	/*
983fc301289SChristoph Lameter 	 * Avoid migrating a page that is shared with others.
9846ce3c4c0SChristoph Lameter 	 */
98562695a84SNick Piggin 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
98662695a84SNick Piggin 		if (!isolate_lru_page(page)) {
98762695a84SNick Piggin 			list_add_tail(&page->lru, pagelist);
9886d9c285aSKOSAKI Motohiro 			inc_zone_page_state(page, NR_ISOLATED_ANON +
9896d9c285aSKOSAKI Motohiro 					    page_is_file_cache(page));
99062695a84SNick Piggin 		}
99162695a84SNick Piggin 	}
9926ce3c4c0SChristoph Lameter }
9936ce3c4c0SChristoph Lameter 
994742755a1SChristoph Lameter static struct page *new_node_page(struct page *page, unsigned long node, int **x)
99595a402c3SChristoph Lameter {
9966484eb3eSMel Gorman 	return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
99795a402c3SChristoph Lameter }
99895a402c3SChristoph Lameter 
9996ce3c4c0SChristoph Lameter /*
10007e2ab150SChristoph Lameter  * Migrate pages from one node to a target node.
10017e2ab150SChristoph Lameter  * Returns error or the number of pages not migrated.
10027e2ab150SChristoph Lameter  */
1003dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1004dbcb0f19SAdrian Bunk 			   int flags)
10057e2ab150SChristoph Lameter {
10067e2ab150SChristoph Lameter 	nodemask_t nmask;
10077e2ab150SChristoph Lameter 	LIST_HEAD(pagelist);
10087e2ab150SChristoph Lameter 	int err = 0;
10097e2ab150SChristoph Lameter 
10107e2ab150SChristoph Lameter 	nodes_clear(nmask);
10117e2ab150SChristoph Lameter 	node_set(source, nmask);
10127e2ab150SChristoph Lameter 
101308270807SMinchan Kim 	/*
101408270807SMinchan Kim 	 * This does not "check" the range but isolates all pages that
101508270807SMinchan Kim 	 * need migration.  Between passing in the full user address
101608270807SMinchan Kim 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
101708270807SMinchan Kim 	 */
101808270807SMinchan Kim 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
101908270807SMinchan Kim 	check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
10207e2ab150SChristoph Lameter 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
10217e2ab150SChristoph Lameter 
1022cf608ac1SMinchan Kim 	if (!list_empty(&pagelist)) {
10237f0f2496SMel Gorman 		err = migrate_pages(&pagelist, new_node_page, dest,
10249c620e2bSHugh Dickins 					MIGRATE_SYNC, MR_SYSCALL);
1025cf608ac1SMinchan Kim 		if (err)
1026cf608ac1SMinchan Kim 			putback_lru_pages(&pagelist);
1027cf608ac1SMinchan Kim 	}
102895a402c3SChristoph Lameter 
10297e2ab150SChristoph Lameter 	return err;
10307e2ab150SChristoph Lameter }
10317e2ab150SChristoph Lameter 
10327e2ab150SChristoph Lameter /*
10337e2ab150SChristoph Lameter  * Move pages between the two nodesets so as to preserve the physical
10347e2ab150SChristoph Lameter  * layout as much as possible.
103539743889SChristoph Lameter  *
103639743889SChristoph Lameter  * Returns the number of page that could not be moved.
103739743889SChristoph Lameter  */
10380ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
10390ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
104039743889SChristoph Lameter {
10417e2ab150SChristoph Lameter 	int busy = 0;
10420aedadf9SChristoph Lameter 	int err;
10437e2ab150SChristoph Lameter 	nodemask_t tmp;
104439743889SChristoph Lameter 
10450aedadf9SChristoph Lameter 	err = migrate_prep();
10460aedadf9SChristoph Lameter 	if (err)
10470aedadf9SChristoph Lameter 		return err;
10480aedadf9SChristoph Lameter 
104939743889SChristoph Lameter 	down_read(&mm->mmap_sem);
1050d4984711SChristoph Lameter 
10510ce72d4fSAndrew Morton 	err = migrate_vmas(mm, from, to, flags);
10527b2259b3SChristoph Lameter 	if (err)
10537b2259b3SChristoph Lameter 		goto out;
10547b2259b3SChristoph Lameter 
10557e2ab150SChristoph Lameter 	/*
10567e2ab150SChristoph Lameter 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
10577e2ab150SChristoph Lameter 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
10587e2ab150SChristoph Lameter 	 * bit in 'tmp', and return that <source, dest> pair for migration.
10597e2ab150SChristoph Lameter 	 * The pair of nodemasks 'to' and 'from' define the map.
10607e2ab150SChristoph Lameter 	 *
10617e2ab150SChristoph Lameter 	 * If no pair of bits is found that way, fallback to picking some
10627e2ab150SChristoph Lameter 	 * pair of 'source' and 'dest' bits that are not the same.  If the
10637e2ab150SChristoph Lameter 	 * 'source' and 'dest' bits are the same, this represents a node
10647e2ab150SChristoph Lameter 	 * that will be migrating to itself, so no pages need move.
10657e2ab150SChristoph Lameter 	 *
10667e2ab150SChristoph Lameter 	 * If no bits are left in 'tmp', or if all remaining bits left
10677e2ab150SChristoph Lameter 	 * in 'tmp' correspond to the same bit in 'to', return false
10687e2ab150SChristoph Lameter 	 * (nothing left to migrate).
10697e2ab150SChristoph Lameter 	 *
10707e2ab150SChristoph Lameter 	 * This lets us pick a pair of nodes to migrate between, such that
10717e2ab150SChristoph Lameter 	 * if possible the dest node is not already occupied by some other
10727e2ab150SChristoph Lameter 	 * source node, minimizing the risk of overloading the memory on a
10737e2ab150SChristoph Lameter 	 * node that would happen if we migrated incoming memory to a node
10747e2ab150SChristoph Lameter 	 * before migrating outgoing memory source that same node.
10757e2ab150SChristoph Lameter 	 *
10767e2ab150SChristoph Lameter 	 * A single scan of tmp is sufficient.  As we go, we remember the
10777e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
10787e2ab150SChristoph Lameter 	 * that not only moved, but what's better, moved to an empty slot
10797e2ab150SChristoph Lameter 	 * (d is not set in tmp), then we break out then, with that pair.
1080ae0e47f0SJustin P. Mattock 	 * Otherwise when we finish scanning from_tmp, we at least have the
10817e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved.  If we get all the way through
10827e2ab150SChristoph Lameter 	 * the scan of tmp without finding any node that moved, much less
10837e2ab150SChristoph Lameter 	 * moved to an empty node, then there is nothing left worth migrating.
10847e2ab150SChristoph Lameter 	 */
10857e2ab150SChristoph Lameter 
10860ce72d4fSAndrew Morton 	tmp = *from;
10877e2ab150SChristoph Lameter 	while (!nodes_empty(tmp)) {
10887e2ab150SChristoph Lameter 		int s,d;
10897e2ab150SChristoph Lameter 		int source = -1;
10907e2ab150SChristoph Lameter 		int dest = 0;
10917e2ab150SChristoph Lameter 
10927e2ab150SChristoph Lameter 		for_each_node_mask(s, tmp) {
10934a5b18ccSLarry Woodman 
10944a5b18ccSLarry Woodman 			/*
10954a5b18ccSLarry Woodman 			 * do_migrate_pages() tries to maintain the relative
10964a5b18ccSLarry Woodman 			 * node relationship of the pages established between
10974a5b18ccSLarry Woodman 			 * threads and memory areas.
10984a5b18ccSLarry Woodman                          *
10994a5b18ccSLarry Woodman 			 * However if the number of source nodes is not equal to
11004a5b18ccSLarry Woodman 			 * the number of destination nodes we can not preserve
11014a5b18ccSLarry Woodman 			 * this node relative relationship.  In that case, skip
11024a5b18ccSLarry Woodman 			 * copying memory from a node that is in the destination
11034a5b18ccSLarry Woodman 			 * mask.
11044a5b18ccSLarry Woodman 			 *
11054a5b18ccSLarry Woodman 			 * Example: [2,3,4] -> [3,4,5] moves everything.
11064a5b18ccSLarry Woodman 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
11074a5b18ccSLarry Woodman 			 */
11084a5b18ccSLarry Woodman 
11090ce72d4fSAndrew Morton 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
11100ce72d4fSAndrew Morton 						(node_isset(s, *to)))
11114a5b18ccSLarry Woodman 				continue;
11124a5b18ccSLarry Woodman 
11130ce72d4fSAndrew Morton 			d = node_remap(s, *from, *to);
11147e2ab150SChristoph Lameter 			if (s == d)
11157e2ab150SChristoph Lameter 				continue;
11167e2ab150SChristoph Lameter 
11177e2ab150SChristoph Lameter 			source = s;	/* Node moved. Memorize */
11187e2ab150SChristoph Lameter 			dest = d;
11197e2ab150SChristoph Lameter 
11207e2ab150SChristoph Lameter 			/* dest not in remaining from nodes? */
11217e2ab150SChristoph Lameter 			if (!node_isset(dest, tmp))
11227e2ab150SChristoph Lameter 				break;
11237e2ab150SChristoph Lameter 		}
11247e2ab150SChristoph Lameter 		if (source == -1)
11257e2ab150SChristoph Lameter 			break;
11267e2ab150SChristoph Lameter 
11277e2ab150SChristoph Lameter 		node_clear(source, tmp);
11287e2ab150SChristoph Lameter 		err = migrate_to_node(mm, source, dest, flags);
11297e2ab150SChristoph Lameter 		if (err > 0)
11307e2ab150SChristoph Lameter 			busy += err;
11317e2ab150SChristoph Lameter 		if (err < 0)
11327e2ab150SChristoph Lameter 			break;
113339743889SChristoph Lameter 	}
11347b2259b3SChristoph Lameter out:
113539743889SChristoph Lameter 	up_read(&mm->mmap_sem);
11367e2ab150SChristoph Lameter 	if (err < 0)
11377e2ab150SChristoph Lameter 		return err;
11387e2ab150SChristoph Lameter 	return busy;
1139b20a3503SChristoph Lameter 
114039743889SChristoph Lameter }
114139743889SChristoph Lameter 
11423ad33b24SLee Schermerhorn /*
11433ad33b24SLee Schermerhorn  * Allocate a new page for page migration based on vma policy.
11443ad33b24SLee Schermerhorn  * Start assuming that page is mapped by vma pointed to by @private.
11453ad33b24SLee Schermerhorn  * Search forward from there, if not.  N.B., this assumes that the
11463ad33b24SLee Schermerhorn  * list of pages handed to migrate_pages()--which is how we get here--
11473ad33b24SLee Schermerhorn  * is in virtual address order.
11483ad33b24SLee Schermerhorn  */
1149742755a1SChristoph Lameter static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
115095a402c3SChristoph Lameter {
115195a402c3SChristoph Lameter 	struct vm_area_struct *vma = (struct vm_area_struct *)private;
11523ad33b24SLee Schermerhorn 	unsigned long uninitialized_var(address);
115395a402c3SChristoph Lameter 
11543ad33b24SLee Schermerhorn 	while (vma) {
11553ad33b24SLee Schermerhorn 		address = page_address_in_vma(page, vma);
11563ad33b24SLee Schermerhorn 		if (address != -EFAULT)
11573ad33b24SLee Schermerhorn 			break;
11583ad33b24SLee Schermerhorn 		vma = vma->vm_next;
11593ad33b24SLee Schermerhorn 	}
11603ad33b24SLee Schermerhorn 
11613ad33b24SLee Schermerhorn 	/*
11623ad33b24SLee Schermerhorn 	 * if !vma, alloc_page_vma() will use task or system default policy
11633ad33b24SLee Schermerhorn 	 */
11643ad33b24SLee Schermerhorn 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
116595a402c3SChristoph Lameter }
1166b20a3503SChristoph Lameter #else
1167b20a3503SChristoph Lameter 
1168b20a3503SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
1169b20a3503SChristoph Lameter 				unsigned long flags)
1170b20a3503SChristoph Lameter {
1171b20a3503SChristoph Lameter }
1172b20a3503SChristoph Lameter 
11730ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
11740ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
1175b20a3503SChristoph Lameter {
1176b20a3503SChristoph Lameter 	return -ENOSYS;
1177b20a3503SChristoph Lameter }
117895a402c3SChristoph Lameter 
117969939749SKeith Owens static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
118095a402c3SChristoph Lameter {
118195a402c3SChristoph Lameter 	return NULL;
118295a402c3SChristoph Lameter }
1183b20a3503SChristoph Lameter #endif
1184b20a3503SChristoph Lameter 
1185dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len,
1186028fec41SDavid Rientjes 		     unsigned short mode, unsigned short mode_flags,
1187028fec41SDavid Rientjes 		     nodemask_t *nmask, unsigned long flags)
11886ce3c4c0SChristoph Lameter {
11896ce3c4c0SChristoph Lameter 	struct vm_area_struct *vma;
11906ce3c4c0SChristoph Lameter 	struct mm_struct *mm = current->mm;
11916ce3c4c0SChristoph Lameter 	struct mempolicy *new;
11926ce3c4c0SChristoph Lameter 	unsigned long end;
11936ce3c4c0SChristoph Lameter 	int err;
11946ce3c4c0SChristoph Lameter 	LIST_HEAD(pagelist);
11956ce3c4c0SChristoph Lameter 
1196b24f53a0SLee Schermerhorn 	if (flags & ~(unsigned long)MPOL_MF_VALID)
11976ce3c4c0SChristoph Lameter 		return -EINVAL;
119874c00241SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
11996ce3c4c0SChristoph Lameter 		return -EPERM;
12006ce3c4c0SChristoph Lameter 
12016ce3c4c0SChristoph Lameter 	if (start & ~PAGE_MASK)
12026ce3c4c0SChristoph Lameter 		return -EINVAL;
12036ce3c4c0SChristoph Lameter 
12046ce3c4c0SChristoph Lameter 	if (mode == MPOL_DEFAULT)
12056ce3c4c0SChristoph Lameter 		flags &= ~MPOL_MF_STRICT;
12066ce3c4c0SChristoph Lameter 
12076ce3c4c0SChristoph Lameter 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
12086ce3c4c0SChristoph Lameter 	end = start + len;
12096ce3c4c0SChristoph Lameter 
12106ce3c4c0SChristoph Lameter 	if (end < start)
12116ce3c4c0SChristoph Lameter 		return -EINVAL;
12126ce3c4c0SChristoph Lameter 	if (end == start)
12136ce3c4c0SChristoph Lameter 		return 0;
12146ce3c4c0SChristoph Lameter 
1215028fec41SDavid Rientjes 	new = mpol_new(mode, mode_flags, nmask);
12166ce3c4c0SChristoph Lameter 	if (IS_ERR(new))
12176ce3c4c0SChristoph Lameter 		return PTR_ERR(new);
12186ce3c4c0SChristoph Lameter 
1219b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY)
1220b24f53a0SLee Schermerhorn 		new->flags |= MPOL_F_MOF;
1221b24f53a0SLee Schermerhorn 
12226ce3c4c0SChristoph Lameter 	/*
12236ce3c4c0SChristoph Lameter 	 * If we are using the default policy then operation
12246ce3c4c0SChristoph Lameter 	 * on discontinuous address spaces is okay after all
12256ce3c4c0SChristoph Lameter 	 */
12266ce3c4c0SChristoph Lameter 	if (!new)
12276ce3c4c0SChristoph Lameter 		flags |= MPOL_MF_DISCONTIG_OK;
12286ce3c4c0SChristoph Lameter 
1229028fec41SDavid Rientjes 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1230028fec41SDavid Rientjes 		 start, start + len, mode, mode_flags,
123100ef2d2fSDavid Rientjes 		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
12326ce3c4c0SChristoph Lameter 
12330aedadf9SChristoph Lameter 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
12340aedadf9SChristoph Lameter 
12350aedadf9SChristoph Lameter 		err = migrate_prep();
12360aedadf9SChristoph Lameter 		if (err)
1237b05ca738SKOSAKI Motohiro 			goto mpol_out;
12380aedadf9SChristoph Lameter 	}
12394bfc4495SKAMEZAWA Hiroyuki 	{
12404bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
12414bfc4495SKAMEZAWA Hiroyuki 		if (scratch) {
12426ce3c4c0SChristoph Lameter 			down_write(&mm->mmap_sem);
124358568d2aSMiao Xie 			task_lock(current);
12444bfc4495SKAMEZAWA Hiroyuki 			err = mpol_set_nodemask(new, nmask, scratch);
124558568d2aSMiao Xie 			task_unlock(current);
12464bfc4495SKAMEZAWA Hiroyuki 			if (err)
124758568d2aSMiao Xie 				up_write(&mm->mmap_sem);
12484bfc4495SKAMEZAWA Hiroyuki 		} else
12494bfc4495SKAMEZAWA Hiroyuki 			err = -ENOMEM;
12504bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
12514bfc4495SKAMEZAWA Hiroyuki 	}
1252b05ca738SKOSAKI Motohiro 	if (err)
1253b05ca738SKOSAKI Motohiro 		goto mpol_out;
1254b05ca738SKOSAKI Motohiro 
12556ce3c4c0SChristoph Lameter 	vma = check_range(mm, start, end, nmask,
12566ce3c4c0SChristoph Lameter 			  flags | MPOL_MF_INVERT, &pagelist);
12576ce3c4c0SChristoph Lameter 
1258b24f53a0SLee Schermerhorn 	err = PTR_ERR(vma);	/* maybe ... */
1259a720094dSMel Gorman 	if (!IS_ERR(vma))
12609d8cebd4SKOSAKI Motohiro 		err = mbind_range(mm, start, end, new);
12617e2ab150SChristoph Lameter 
1262b24f53a0SLee Schermerhorn 	if (!err) {
1263b24f53a0SLee Schermerhorn 		int nr_failed = 0;
1264b24f53a0SLee Schermerhorn 
1265cf608ac1SMinchan Kim 		if (!list_empty(&pagelist)) {
1266b24f53a0SLee Schermerhorn 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
126795a402c3SChristoph Lameter 			nr_failed = migrate_pages(&pagelist, new_vma_page,
12687f0f2496SMel Gorman 					(unsigned long)vma,
12699c620e2bSHugh Dickins 					MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1270cf608ac1SMinchan Kim 			if (nr_failed)
1271cf608ac1SMinchan Kim 				putback_lru_pages(&pagelist);
1272cf608ac1SMinchan Kim 		}
12736ce3c4c0SChristoph Lameter 
1274b24f53a0SLee Schermerhorn 		if (nr_failed && (flags & MPOL_MF_STRICT))
12756ce3c4c0SChristoph Lameter 			err = -EIO;
1276ab8a3e14SKOSAKI Motohiro 	} else
1277ab8a3e14SKOSAKI Motohiro 		putback_lru_pages(&pagelist);
1278b20a3503SChristoph Lameter 
12796ce3c4c0SChristoph Lameter 	up_write(&mm->mmap_sem);
1280b05ca738SKOSAKI Motohiro  mpol_out:
1281f0be3d32SLee Schermerhorn 	mpol_put(new);
12826ce3c4c0SChristoph Lameter 	return err;
12836ce3c4c0SChristoph Lameter }
12846ce3c4c0SChristoph Lameter 
128539743889SChristoph Lameter /*
12868bccd85fSChristoph Lameter  * User space interface with variable sized bitmaps for nodelists.
12878bccd85fSChristoph Lameter  */
12888bccd85fSChristoph Lameter 
12898bccd85fSChristoph Lameter /* Copy a node mask from user space. */
129039743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
12918bccd85fSChristoph Lameter 		     unsigned long maxnode)
12928bccd85fSChristoph Lameter {
12938bccd85fSChristoph Lameter 	unsigned long k;
12948bccd85fSChristoph Lameter 	unsigned long nlongs;
12958bccd85fSChristoph Lameter 	unsigned long endmask;
12968bccd85fSChristoph Lameter 
12978bccd85fSChristoph Lameter 	--maxnode;
12988bccd85fSChristoph Lameter 	nodes_clear(*nodes);
12998bccd85fSChristoph Lameter 	if (maxnode == 0 || !nmask)
13008bccd85fSChristoph Lameter 		return 0;
1301a9c930baSAndi Kleen 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1302636f13c1SChris Wright 		return -EINVAL;
13038bccd85fSChristoph Lameter 
13048bccd85fSChristoph Lameter 	nlongs = BITS_TO_LONGS(maxnode);
13058bccd85fSChristoph Lameter 	if ((maxnode % BITS_PER_LONG) == 0)
13068bccd85fSChristoph Lameter 		endmask = ~0UL;
13078bccd85fSChristoph Lameter 	else
13088bccd85fSChristoph Lameter 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
13098bccd85fSChristoph Lameter 
13108bccd85fSChristoph Lameter 	/* When the user specified more nodes than supported just check
13118bccd85fSChristoph Lameter 	   if the non supported part is all zero. */
13128bccd85fSChristoph Lameter 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
13138bccd85fSChristoph Lameter 		if (nlongs > PAGE_SIZE/sizeof(long))
13148bccd85fSChristoph Lameter 			return -EINVAL;
13158bccd85fSChristoph Lameter 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
13168bccd85fSChristoph Lameter 			unsigned long t;
13178bccd85fSChristoph Lameter 			if (get_user(t, nmask + k))
13188bccd85fSChristoph Lameter 				return -EFAULT;
13198bccd85fSChristoph Lameter 			if (k == nlongs - 1) {
13208bccd85fSChristoph Lameter 				if (t & endmask)
13218bccd85fSChristoph Lameter 					return -EINVAL;
13228bccd85fSChristoph Lameter 			} else if (t)
13238bccd85fSChristoph Lameter 				return -EINVAL;
13248bccd85fSChristoph Lameter 		}
13258bccd85fSChristoph Lameter 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
13268bccd85fSChristoph Lameter 		endmask = ~0UL;
13278bccd85fSChristoph Lameter 	}
13288bccd85fSChristoph Lameter 
13298bccd85fSChristoph Lameter 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
13308bccd85fSChristoph Lameter 		return -EFAULT;
13318bccd85fSChristoph Lameter 	nodes_addr(*nodes)[nlongs-1] &= endmask;
13328bccd85fSChristoph Lameter 	return 0;
13338bccd85fSChristoph Lameter }
13348bccd85fSChristoph Lameter 
13358bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */
13368bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
13378bccd85fSChristoph Lameter 			      nodemask_t *nodes)
13388bccd85fSChristoph Lameter {
13398bccd85fSChristoph Lameter 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
13408bccd85fSChristoph Lameter 	const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
13418bccd85fSChristoph Lameter 
13428bccd85fSChristoph Lameter 	if (copy > nbytes) {
13438bccd85fSChristoph Lameter 		if (copy > PAGE_SIZE)
13448bccd85fSChristoph Lameter 			return -EINVAL;
13458bccd85fSChristoph Lameter 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
13468bccd85fSChristoph Lameter 			return -EFAULT;
13478bccd85fSChristoph Lameter 		copy = nbytes;
13488bccd85fSChristoph Lameter 	}
13498bccd85fSChristoph Lameter 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
13508bccd85fSChristoph Lameter }
13518bccd85fSChristoph Lameter 
1352938bb9f5SHeiko Carstens SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1353938bb9f5SHeiko Carstens 		unsigned long, mode, unsigned long __user *, nmask,
1354938bb9f5SHeiko Carstens 		unsigned long, maxnode, unsigned, flags)
13558bccd85fSChristoph Lameter {
13568bccd85fSChristoph Lameter 	nodemask_t nodes;
13578bccd85fSChristoph Lameter 	int err;
1358028fec41SDavid Rientjes 	unsigned short mode_flags;
13598bccd85fSChristoph Lameter 
1360028fec41SDavid Rientjes 	mode_flags = mode & MPOL_MODE_FLAGS;
1361028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1362a3b51e01SDavid Rientjes 	if (mode >= MPOL_MAX)
1363a3b51e01SDavid Rientjes 		return -EINVAL;
13644c50bc01SDavid Rientjes 	if ((mode_flags & MPOL_F_STATIC_NODES) &&
13654c50bc01SDavid Rientjes 	    (mode_flags & MPOL_F_RELATIVE_NODES))
13664c50bc01SDavid Rientjes 		return -EINVAL;
13678bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
13688bccd85fSChristoph Lameter 	if (err)
13698bccd85fSChristoph Lameter 		return err;
1370028fec41SDavid Rientjes 	return do_mbind(start, len, mode, mode_flags, &nodes, flags);
13718bccd85fSChristoph Lameter }
13728bccd85fSChristoph Lameter 
13738bccd85fSChristoph Lameter /* Set the process memory policy */
1374938bb9f5SHeiko Carstens SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask,
1375938bb9f5SHeiko Carstens 		unsigned long, maxnode)
13768bccd85fSChristoph Lameter {
13778bccd85fSChristoph Lameter 	int err;
13788bccd85fSChristoph Lameter 	nodemask_t nodes;
1379028fec41SDavid Rientjes 	unsigned short flags;
13808bccd85fSChristoph Lameter 
1381028fec41SDavid Rientjes 	flags = mode & MPOL_MODE_FLAGS;
1382028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1383028fec41SDavid Rientjes 	if ((unsigned int)mode >= MPOL_MAX)
13848bccd85fSChristoph Lameter 		return -EINVAL;
13854c50bc01SDavid Rientjes 	if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
13864c50bc01SDavid Rientjes 		return -EINVAL;
13878bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
13888bccd85fSChristoph Lameter 	if (err)
13898bccd85fSChristoph Lameter 		return err;
1390028fec41SDavid Rientjes 	return do_set_mempolicy(mode, flags, &nodes);
13918bccd85fSChristoph Lameter }
13928bccd85fSChristoph Lameter 
1393938bb9f5SHeiko Carstens SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1394938bb9f5SHeiko Carstens 		const unsigned long __user *, old_nodes,
1395938bb9f5SHeiko Carstens 		const unsigned long __user *, new_nodes)
139639743889SChristoph Lameter {
1397c69e8d9cSDavid Howells 	const struct cred *cred = current_cred(), *tcred;
1398596d7cfaSKOSAKI Motohiro 	struct mm_struct *mm = NULL;
139939743889SChristoph Lameter 	struct task_struct *task;
140039743889SChristoph Lameter 	nodemask_t task_nodes;
140139743889SChristoph Lameter 	int err;
1402596d7cfaSKOSAKI Motohiro 	nodemask_t *old;
1403596d7cfaSKOSAKI Motohiro 	nodemask_t *new;
1404596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH(scratch);
140539743889SChristoph Lameter 
1406596d7cfaSKOSAKI Motohiro 	if (!scratch)
1407596d7cfaSKOSAKI Motohiro 		return -ENOMEM;
140839743889SChristoph Lameter 
1409596d7cfaSKOSAKI Motohiro 	old = &scratch->mask1;
1410596d7cfaSKOSAKI Motohiro 	new = &scratch->mask2;
1411596d7cfaSKOSAKI Motohiro 
1412596d7cfaSKOSAKI Motohiro 	err = get_nodes(old, old_nodes, maxnode);
141339743889SChristoph Lameter 	if (err)
1414596d7cfaSKOSAKI Motohiro 		goto out;
1415596d7cfaSKOSAKI Motohiro 
1416596d7cfaSKOSAKI Motohiro 	err = get_nodes(new, new_nodes, maxnode);
1417596d7cfaSKOSAKI Motohiro 	if (err)
1418596d7cfaSKOSAKI Motohiro 		goto out;
141939743889SChristoph Lameter 
142039743889SChristoph Lameter 	/* Find the mm_struct */
142155cfaa3cSZeng Zhaoming 	rcu_read_lock();
1422228ebcbeSPavel Emelyanov 	task = pid ? find_task_by_vpid(pid) : current;
142339743889SChristoph Lameter 	if (!task) {
142455cfaa3cSZeng Zhaoming 		rcu_read_unlock();
1425596d7cfaSKOSAKI Motohiro 		err = -ESRCH;
1426596d7cfaSKOSAKI Motohiro 		goto out;
142739743889SChristoph Lameter 	}
14283268c63eSChristoph Lameter 	get_task_struct(task);
142939743889SChristoph Lameter 
1430596d7cfaSKOSAKI Motohiro 	err = -EINVAL;
143139743889SChristoph Lameter 
143239743889SChristoph Lameter 	/*
143339743889SChristoph Lameter 	 * Check if this process has the right to modify the specified
143439743889SChristoph Lameter 	 * process. The right exists if the process has administrative
14357f927fccSAlexey Dobriyan 	 * capabilities, superuser privileges or the same
143639743889SChristoph Lameter 	 * userid as the target process.
143739743889SChristoph Lameter 	 */
1438c69e8d9cSDavid Howells 	tcred = __task_cred(task);
1439b38a86ebSEric W. Biederman 	if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1440b38a86ebSEric W. Biederman 	    !uid_eq(cred->uid,  tcred->suid) && !uid_eq(cred->uid,  tcred->uid) &&
144174c00241SChristoph Lameter 	    !capable(CAP_SYS_NICE)) {
1442c69e8d9cSDavid Howells 		rcu_read_unlock();
144339743889SChristoph Lameter 		err = -EPERM;
14443268c63eSChristoph Lameter 		goto out_put;
144539743889SChristoph Lameter 	}
1446c69e8d9cSDavid Howells 	rcu_read_unlock();
144739743889SChristoph Lameter 
144839743889SChristoph Lameter 	task_nodes = cpuset_mems_allowed(task);
144939743889SChristoph Lameter 	/* Is the user allowed to access the target nodes? */
1450596d7cfaSKOSAKI Motohiro 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
145139743889SChristoph Lameter 		err = -EPERM;
14523268c63eSChristoph Lameter 		goto out_put;
145339743889SChristoph Lameter 	}
145439743889SChristoph Lameter 
145501f13bd6SLai Jiangshan 	if (!nodes_subset(*new, node_states[N_MEMORY])) {
14563b42d28bSChristoph Lameter 		err = -EINVAL;
14573268c63eSChristoph Lameter 		goto out_put;
14583b42d28bSChristoph Lameter 	}
14593b42d28bSChristoph Lameter 
146086c3a764SDavid Quigley 	err = security_task_movememory(task);
146186c3a764SDavid Quigley 	if (err)
14623268c63eSChristoph Lameter 		goto out_put;
146386c3a764SDavid Quigley 
14643268c63eSChristoph Lameter 	mm = get_task_mm(task);
14653268c63eSChristoph Lameter 	put_task_struct(task);
1466f2a9ef88SSasha Levin 
1467f2a9ef88SSasha Levin 	if (!mm) {
1468f2a9ef88SSasha Levin 		err = -EINVAL;
1469f2a9ef88SSasha Levin 		goto out;
1470f2a9ef88SSasha Levin 	}
1471f2a9ef88SSasha Levin 
1472596d7cfaSKOSAKI Motohiro 	err = do_migrate_pages(mm, old, new,
147374c00241SChristoph Lameter 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
14743268c63eSChristoph Lameter 
147539743889SChristoph Lameter 	mmput(mm);
14763268c63eSChristoph Lameter out:
1477596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH_FREE(scratch);
1478596d7cfaSKOSAKI Motohiro 
147939743889SChristoph Lameter 	return err;
14803268c63eSChristoph Lameter 
14813268c63eSChristoph Lameter out_put:
14823268c63eSChristoph Lameter 	put_task_struct(task);
14833268c63eSChristoph Lameter 	goto out;
14843268c63eSChristoph Lameter 
148539743889SChristoph Lameter }
148639743889SChristoph Lameter 
148739743889SChristoph Lameter 
14888bccd85fSChristoph Lameter /* Retrieve NUMA policy */
1489938bb9f5SHeiko Carstens SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1490938bb9f5SHeiko Carstens 		unsigned long __user *, nmask, unsigned long, maxnode,
1491938bb9f5SHeiko Carstens 		unsigned long, addr, unsigned long, flags)
14928bccd85fSChristoph Lameter {
1493dbcb0f19SAdrian Bunk 	int err;
1494dbcb0f19SAdrian Bunk 	int uninitialized_var(pval);
14958bccd85fSChristoph Lameter 	nodemask_t nodes;
14968bccd85fSChristoph Lameter 
14978bccd85fSChristoph Lameter 	if (nmask != NULL && maxnode < MAX_NUMNODES)
14988bccd85fSChristoph Lameter 		return -EINVAL;
14998bccd85fSChristoph Lameter 
15008bccd85fSChristoph Lameter 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
15018bccd85fSChristoph Lameter 
15028bccd85fSChristoph Lameter 	if (err)
15038bccd85fSChristoph Lameter 		return err;
15048bccd85fSChristoph Lameter 
15058bccd85fSChristoph Lameter 	if (policy && put_user(pval, policy))
15068bccd85fSChristoph Lameter 		return -EFAULT;
15078bccd85fSChristoph Lameter 
15088bccd85fSChristoph Lameter 	if (nmask)
15098bccd85fSChristoph Lameter 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
15108bccd85fSChristoph Lameter 
15118bccd85fSChristoph Lameter 	return err;
15128bccd85fSChristoph Lameter }
15138bccd85fSChristoph Lameter 
15141da177e4SLinus Torvalds #ifdef CONFIG_COMPAT
15151da177e4SLinus Torvalds 
15161da177e4SLinus Torvalds asmlinkage long compat_sys_get_mempolicy(int __user *policy,
15171da177e4SLinus Torvalds 				     compat_ulong_t __user *nmask,
15181da177e4SLinus Torvalds 				     compat_ulong_t maxnode,
15191da177e4SLinus Torvalds 				     compat_ulong_t addr, compat_ulong_t flags)
15201da177e4SLinus Torvalds {
15211da177e4SLinus Torvalds 	long err;
15221da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
15231da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
15241da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
15251da177e4SLinus Torvalds 
15261da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
15271da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
15281da177e4SLinus Torvalds 
15291da177e4SLinus Torvalds 	if (nmask)
15301da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
15311da177e4SLinus Torvalds 
15321da177e4SLinus Torvalds 	err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
15331da177e4SLinus Torvalds 
15341da177e4SLinus Torvalds 	if (!err && nmask) {
15352bbff6c7SKAMEZAWA Hiroyuki 		unsigned long copy_size;
15362bbff6c7SKAMEZAWA Hiroyuki 		copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
15372bbff6c7SKAMEZAWA Hiroyuki 		err = copy_from_user(bm, nm, copy_size);
15381da177e4SLinus Torvalds 		/* ensure entire bitmap is zeroed */
15391da177e4SLinus Torvalds 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
15401da177e4SLinus Torvalds 		err |= compat_put_bitmap(nmask, bm, nr_bits);
15411da177e4SLinus Torvalds 	}
15421da177e4SLinus Torvalds 
15431da177e4SLinus Torvalds 	return err;
15441da177e4SLinus Torvalds }
15451da177e4SLinus Torvalds 
15461da177e4SLinus Torvalds asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
15471da177e4SLinus Torvalds 				     compat_ulong_t maxnode)
15481da177e4SLinus Torvalds {
15491da177e4SLinus Torvalds 	long err = 0;
15501da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
15511da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
15521da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
15531da177e4SLinus Torvalds 
15541da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
15551da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
15561da177e4SLinus Torvalds 
15571da177e4SLinus Torvalds 	if (nmask) {
15581da177e4SLinus Torvalds 		err = compat_get_bitmap(bm, nmask, nr_bits);
15591da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
15601da177e4SLinus Torvalds 		err |= copy_to_user(nm, bm, alloc_size);
15611da177e4SLinus Torvalds 	}
15621da177e4SLinus Torvalds 
15631da177e4SLinus Torvalds 	if (err)
15641da177e4SLinus Torvalds 		return -EFAULT;
15651da177e4SLinus Torvalds 
15661da177e4SLinus Torvalds 	return sys_set_mempolicy(mode, nm, nr_bits+1);
15671da177e4SLinus Torvalds }
15681da177e4SLinus Torvalds 
15691da177e4SLinus Torvalds asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
15701da177e4SLinus Torvalds 			     compat_ulong_t mode, compat_ulong_t __user *nmask,
15711da177e4SLinus Torvalds 			     compat_ulong_t maxnode, compat_ulong_t flags)
15721da177e4SLinus Torvalds {
15731da177e4SLinus Torvalds 	long err = 0;
15741da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
15751da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
1576dfcd3c0dSAndi Kleen 	nodemask_t bm;
15771da177e4SLinus Torvalds 
15781da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
15791da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
15801da177e4SLinus Torvalds 
15811da177e4SLinus Torvalds 	if (nmask) {
1582dfcd3c0dSAndi Kleen 		err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
15831da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
1584dfcd3c0dSAndi Kleen 		err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
15851da177e4SLinus Torvalds 	}
15861da177e4SLinus Torvalds 
15871da177e4SLinus Torvalds 	if (err)
15881da177e4SLinus Torvalds 		return -EFAULT;
15891da177e4SLinus Torvalds 
15901da177e4SLinus Torvalds 	return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
15911da177e4SLinus Torvalds }
15921da177e4SLinus Torvalds 
15931da177e4SLinus Torvalds #endif
15941da177e4SLinus Torvalds 
1595480eccf9SLee Schermerhorn /*
1596480eccf9SLee Schermerhorn  * get_vma_policy(@task, @vma, @addr)
1597480eccf9SLee Schermerhorn  * @task - task for fallback if vma policy == default
1598480eccf9SLee Schermerhorn  * @vma   - virtual memory area whose policy is sought
1599480eccf9SLee Schermerhorn  * @addr  - address in @vma for shared policy lookup
1600480eccf9SLee Schermerhorn  *
1601480eccf9SLee Schermerhorn  * Returns effective policy for a VMA at specified address.
1602480eccf9SLee Schermerhorn  * Falls back to @task or system default policy, as necessary.
160332f8516aSDavid Rientjes  * Current or other task's task mempolicy and non-shared vma policies must be
160432f8516aSDavid Rientjes  * protected by task_lock(task) by the caller.
160552cd3b07SLee Schermerhorn  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
160652cd3b07SLee Schermerhorn  * count--added by the get_policy() vm_op, as appropriate--to protect against
160752cd3b07SLee Schermerhorn  * freeing by another task.  It is the caller's responsibility to free the
160852cd3b07SLee Schermerhorn  * extra reference for shared policies.
1609480eccf9SLee Schermerhorn  */
1610d98f6cb6SStephen Wilson struct mempolicy *get_vma_policy(struct task_struct *task,
161148fce342SChristoph Lameter 		struct vm_area_struct *vma, unsigned long addr)
16121da177e4SLinus Torvalds {
16135606e387SMel Gorman 	struct mempolicy *pol = get_task_policy(task);
16141da177e4SLinus Torvalds 
16151da177e4SLinus Torvalds 	if (vma) {
1616480eccf9SLee Schermerhorn 		if (vma->vm_ops && vma->vm_ops->get_policy) {
1617ae4d8c16SLee Schermerhorn 			struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1618ae4d8c16SLee Schermerhorn 									addr);
1619ae4d8c16SLee Schermerhorn 			if (vpol)
1620ae4d8c16SLee Schermerhorn 				pol = vpol;
162100442ad0SMel Gorman 		} else if (vma->vm_policy) {
16221da177e4SLinus Torvalds 			pol = vma->vm_policy;
162300442ad0SMel Gorman 
162400442ad0SMel Gorman 			/*
162500442ad0SMel Gorman 			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
162600442ad0SMel Gorman 			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
162700442ad0SMel Gorman 			 * count on these policies which will be dropped by
162800442ad0SMel Gorman 			 * mpol_cond_put() later
162900442ad0SMel Gorman 			 */
163000442ad0SMel Gorman 			if (mpol_needs_cond_ref(pol))
163100442ad0SMel Gorman 				mpol_get(pol);
163200442ad0SMel Gorman 		}
16331da177e4SLinus Torvalds 	}
16341da177e4SLinus Torvalds 	if (!pol)
16351da177e4SLinus Torvalds 		pol = &default_policy;
16361da177e4SLinus Torvalds 	return pol;
16371da177e4SLinus Torvalds }
16381da177e4SLinus Torvalds 
1639d3eb1570SLai Jiangshan static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1640d3eb1570SLai Jiangshan {
1641d3eb1570SLai Jiangshan 	enum zone_type dynamic_policy_zone = policy_zone;
1642d3eb1570SLai Jiangshan 
1643d3eb1570SLai Jiangshan 	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1644d3eb1570SLai Jiangshan 
1645d3eb1570SLai Jiangshan 	/*
1646d3eb1570SLai Jiangshan 	 * if policy->v.nodes has movable memory only,
1647d3eb1570SLai Jiangshan 	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1648d3eb1570SLai Jiangshan 	 *
1649d3eb1570SLai Jiangshan 	 * policy->v.nodes is intersect with node_states[N_MEMORY].
1650d3eb1570SLai Jiangshan 	 * so if the following test faile, it implies
1651d3eb1570SLai Jiangshan 	 * policy->v.nodes has movable memory only.
1652d3eb1570SLai Jiangshan 	 */
1653d3eb1570SLai Jiangshan 	if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1654d3eb1570SLai Jiangshan 		dynamic_policy_zone = ZONE_MOVABLE;
1655d3eb1570SLai Jiangshan 
1656d3eb1570SLai Jiangshan 	return zone >= dynamic_policy_zone;
1657d3eb1570SLai Jiangshan }
1658d3eb1570SLai Jiangshan 
165952cd3b07SLee Schermerhorn /*
166052cd3b07SLee Schermerhorn  * Return a nodemask representing a mempolicy for filtering nodes for
166152cd3b07SLee Schermerhorn  * page allocation
166252cd3b07SLee Schermerhorn  */
166352cd3b07SLee Schermerhorn static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
166419770b32SMel Gorman {
166519770b32SMel Gorman 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
166645c4745aSLee Schermerhorn 	if (unlikely(policy->mode == MPOL_BIND) &&
1667d3eb1570SLai Jiangshan 			apply_policy_zone(policy, gfp_zone(gfp)) &&
166819770b32SMel Gorman 			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
166919770b32SMel Gorman 		return &policy->v.nodes;
167019770b32SMel Gorman 
167119770b32SMel Gorman 	return NULL;
167219770b32SMel Gorman }
167319770b32SMel Gorman 
167452cd3b07SLee Schermerhorn /* Return a zonelist indicated by gfp for node representing a mempolicy */
16752f5f9486SAndi Kleen static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
16762f5f9486SAndi Kleen 	int nd)
16771da177e4SLinus Torvalds {
167845c4745aSLee Schermerhorn 	switch (policy->mode) {
16791da177e4SLinus Torvalds 	case MPOL_PREFERRED:
1680fc36b8d3SLee Schermerhorn 		if (!(policy->flags & MPOL_F_LOCAL))
16811da177e4SLinus Torvalds 			nd = policy->v.preferred_node;
16821da177e4SLinus Torvalds 		break;
16831da177e4SLinus Torvalds 	case MPOL_BIND:
168419770b32SMel Gorman 		/*
168552cd3b07SLee Schermerhorn 		 * Normally, MPOL_BIND allocations are node-local within the
168652cd3b07SLee Schermerhorn 		 * allowed nodemask.  However, if __GFP_THISNODE is set and the
16876eb27e1fSBob Liu 		 * current node isn't part of the mask, we use the zonelist for
168852cd3b07SLee Schermerhorn 		 * the first node in the mask instead.
168919770b32SMel Gorman 		 */
169019770b32SMel Gorman 		if (unlikely(gfp & __GFP_THISNODE) &&
169119770b32SMel Gorman 				unlikely(!node_isset(nd, policy->v.nodes)))
169219770b32SMel Gorman 			nd = first_node(policy->v.nodes);
169319770b32SMel Gorman 		break;
16941da177e4SLinus Torvalds 	default:
16951da177e4SLinus Torvalds 		BUG();
16961da177e4SLinus Torvalds 	}
16970e88460dSMel Gorman 	return node_zonelist(nd, gfp);
16981da177e4SLinus Torvalds }
16991da177e4SLinus Torvalds 
17001da177e4SLinus Torvalds /* Do dynamic interleaving for a process */
17011da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy)
17021da177e4SLinus Torvalds {
17031da177e4SLinus Torvalds 	unsigned nid, next;
17041da177e4SLinus Torvalds 	struct task_struct *me = current;
17051da177e4SLinus Torvalds 
17061da177e4SLinus Torvalds 	nid = me->il_next;
1707dfcd3c0dSAndi Kleen 	next = next_node(nid, policy->v.nodes);
17081da177e4SLinus Torvalds 	if (next >= MAX_NUMNODES)
1709dfcd3c0dSAndi Kleen 		next = first_node(policy->v.nodes);
1710f5b087b5SDavid Rientjes 	if (next < MAX_NUMNODES)
17111da177e4SLinus Torvalds 		me->il_next = next;
17121da177e4SLinus Torvalds 	return nid;
17131da177e4SLinus Torvalds }
17141da177e4SLinus Torvalds 
1715dc85da15SChristoph Lameter /*
1716dc85da15SChristoph Lameter  * Depending on the memory policy provide a node from which to allocate the
1717dc85da15SChristoph Lameter  * next slab entry.
171852cd3b07SLee Schermerhorn  * @policy must be protected by freeing by the caller.  If @policy is
171952cd3b07SLee Schermerhorn  * the current task's mempolicy, this protection is implicit, as only the
172052cd3b07SLee Schermerhorn  * task can change it's policy.  The system default policy requires no
172152cd3b07SLee Schermerhorn  * such protection.
1722dc85da15SChristoph Lameter  */
1723e7b691b0SAndi Kleen unsigned slab_node(void)
1724dc85da15SChristoph Lameter {
1725e7b691b0SAndi Kleen 	struct mempolicy *policy;
1726e7b691b0SAndi Kleen 
1727e7b691b0SAndi Kleen 	if (in_interrupt())
1728e7b691b0SAndi Kleen 		return numa_node_id();
1729e7b691b0SAndi Kleen 
1730e7b691b0SAndi Kleen 	policy = current->mempolicy;
1731fc36b8d3SLee Schermerhorn 	if (!policy || policy->flags & MPOL_F_LOCAL)
1732bea904d5SLee Schermerhorn 		return numa_node_id();
1733765c4507SChristoph Lameter 
1734bea904d5SLee Schermerhorn 	switch (policy->mode) {
1735bea904d5SLee Schermerhorn 	case MPOL_PREFERRED:
1736fc36b8d3SLee Schermerhorn 		/*
1737fc36b8d3SLee Schermerhorn 		 * handled MPOL_F_LOCAL above
1738fc36b8d3SLee Schermerhorn 		 */
1739bea904d5SLee Schermerhorn 		return policy->v.preferred_node;
1740bea904d5SLee Schermerhorn 
1741dc85da15SChristoph Lameter 	case MPOL_INTERLEAVE:
1742dc85da15SChristoph Lameter 		return interleave_nodes(policy);
1743dc85da15SChristoph Lameter 
1744dd1a239fSMel Gorman 	case MPOL_BIND: {
1745dc85da15SChristoph Lameter 		/*
1746dc85da15SChristoph Lameter 		 * Follow bind policy behavior and start allocation at the
1747dc85da15SChristoph Lameter 		 * first node.
1748dc85da15SChristoph Lameter 		 */
174919770b32SMel Gorman 		struct zonelist *zonelist;
175019770b32SMel Gorman 		struct zone *zone;
175119770b32SMel Gorman 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
175219770b32SMel Gorman 		zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
175319770b32SMel Gorman 		(void)first_zones_zonelist(zonelist, highest_zoneidx,
175419770b32SMel Gorman 							&policy->v.nodes,
175519770b32SMel Gorman 							&zone);
1756800416f7SEric Dumazet 		return zone ? zone->node : numa_node_id();
1757dd1a239fSMel Gorman 	}
1758dc85da15SChristoph Lameter 
1759dc85da15SChristoph Lameter 	default:
1760bea904d5SLee Schermerhorn 		BUG();
1761dc85da15SChristoph Lameter 	}
1762dc85da15SChristoph Lameter }
1763dc85da15SChristoph Lameter 
17641da177e4SLinus Torvalds /* Do static interleaving for a VMA with known offset. */
17651da177e4SLinus Torvalds static unsigned offset_il_node(struct mempolicy *pol,
17661da177e4SLinus Torvalds 		struct vm_area_struct *vma, unsigned long off)
17671da177e4SLinus Torvalds {
1768dfcd3c0dSAndi Kleen 	unsigned nnodes = nodes_weight(pol->v.nodes);
1769f5b087b5SDavid Rientjes 	unsigned target;
17701da177e4SLinus Torvalds 	int c;
17711da177e4SLinus Torvalds 	int nid = -1;
17721da177e4SLinus Torvalds 
1773f5b087b5SDavid Rientjes 	if (!nnodes)
1774f5b087b5SDavid Rientjes 		return numa_node_id();
1775f5b087b5SDavid Rientjes 	target = (unsigned int)off % nnodes;
17761da177e4SLinus Torvalds 	c = 0;
17771da177e4SLinus Torvalds 	do {
1778dfcd3c0dSAndi Kleen 		nid = next_node(nid, pol->v.nodes);
17791da177e4SLinus Torvalds 		c++;
17801da177e4SLinus Torvalds 	} while (c <= target);
17811da177e4SLinus Torvalds 	return nid;
17821da177e4SLinus Torvalds }
17831da177e4SLinus Torvalds 
17845da7ca86SChristoph Lameter /* Determine a node number for interleave */
17855da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol,
17865da7ca86SChristoph Lameter 		 struct vm_area_struct *vma, unsigned long addr, int shift)
17875da7ca86SChristoph Lameter {
17885da7ca86SChristoph Lameter 	if (vma) {
17895da7ca86SChristoph Lameter 		unsigned long off;
17905da7ca86SChristoph Lameter 
17913b98b087SNishanth Aravamudan 		/*
17923b98b087SNishanth Aravamudan 		 * for small pages, there is no difference between
17933b98b087SNishanth Aravamudan 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
17943b98b087SNishanth Aravamudan 		 * for huge pages, since vm_pgoff is in units of small
17953b98b087SNishanth Aravamudan 		 * pages, we need to shift off the always 0 bits to get
17963b98b087SNishanth Aravamudan 		 * a useful offset.
17973b98b087SNishanth Aravamudan 		 */
17983b98b087SNishanth Aravamudan 		BUG_ON(shift < PAGE_SHIFT);
17993b98b087SNishanth Aravamudan 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
18005da7ca86SChristoph Lameter 		off += (addr - vma->vm_start) >> shift;
18015da7ca86SChristoph Lameter 		return offset_il_node(pol, vma, off);
18025da7ca86SChristoph Lameter 	} else
18035da7ca86SChristoph Lameter 		return interleave_nodes(pol);
18045da7ca86SChristoph Lameter }
18055da7ca86SChristoph Lameter 
1806778d3b0fSMichal Hocko /*
1807778d3b0fSMichal Hocko  * Return the bit number of a random bit set in the nodemask.
1808778d3b0fSMichal Hocko  * (returns -1 if nodemask is empty)
1809778d3b0fSMichal Hocko  */
1810778d3b0fSMichal Hocko int node_random(const nodemask_t *maskp)
1811778d3b0fSMichal Hocko {
1812778d3b0fSMichal Hocko 	int w, bit = -1;
1813778d3b0fSMichal Hocko 
1814778d3b0fSMichal Hocko 	w = nodes_weight(*maskp);
1815778d3b0fSMichal Hocko 	if (w)
1816778d3b0fSMichal Hocko 		bit = bitmap_ord_to_pos(maskp->bits,
1817778d3b0fSMichal Hocko 			get_random_int() % w, MAX_NUMNODES);
1818778d3b0fSMichal Hocko 	return bit;
1819778d3b0fSMichal Hocko }
1820778d3b0fSMichal Hocko 
182100ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS
1822480eccf9SLee Schermerhorn /*
1823480eccf9SLee Schermerhorn  * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1824480eccf9SLee Schermerhorn  * @vma = virtual memory area whose policy is sought
1825480eccf9SLee Schermerhorn  * @addr = address in @vma for shared policy lookup and interleave policy
1826480eccf9SLee Schermerhorn  * @gfp_flags = for requested zone
182719770b32SMel Gorman  * @mpol = pointer to mempolicy pointer for reference counted mempolicy
182819770b32SMel Gorman  * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
1829480eccf9SLee Schermerhorn  *
183052cd3b07SLee Schermerhorn  * Returns a zonelist suitable for a huge page allocation and a pointer
183152cd3b07SLee Schermerhorn  * to the struct mempolicy for conditional unref after allocation.
183252cd3b07SLee Schermerhorn  * If the effective policy is 'BIND, returns a pointer to the mempolicy's
183352cd3b07SLee Schermerhorn  * @nodemask for filtering the zonelist.
1834c0ff7453SMiao Xie  *
1835c0ff7453SMiao Xie  * Must be protected by get_mems_allowed()
1836480eccf9SLee Schermerhorn  */
1837396faf03SMel Gorman struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
183819770b32SMel Gorman 				gfp_t gfp_flags, struct mempolicy **mpol,
183919770b32SMel Gorman 				nodemask_t **nodemask)
18405da7ca86SChristoph Lameter {
1841480eccf9SLee Schermerhorn 	struct zonelist *zl;
18425da7ca86SChristoph Lameter 
184352cd3b07SLee Schermerhorn 	*mpol = get_vma_policy(current, vma, addr);
184419770b32SMel Gorman 	*nodemask = NULL;	/* assume !MPOL_BIND */
18455da7ca86SChristoph Lameter 
184652cd3b07SLee Schermerhorn 	if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
184752cd3b07SLee Schermerhorn 		zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1848a5516438SAndi Kleen 				huge_page_shift(hstate_vma(vma))), gfp_flags);
184952cd3b07SLee Schermerhorn 	} else {
18502f5f9486SAndi Kleen 		zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
185152cd3b07SLee Schermerhorn 		if ((*mpol)->mode == MPOL_BIND)
185252cd3b07SLee Schermerhorn 			*nodemask = &(*mpol)->v.nodes;
1853480eccf9SLee Schermerhorn 	}
1854480eccf9SLee Schermerhorn 	return zl;
18555da7ca86SChristoph Lameter }
185606808b08SLee Schermerhorn 
185706808b08SLee Schermerhorn /*
185806808b08SLee Schermerhorn  * init_nodemask_of_mempolicy
185906808b08SLee Schermerhorn  *
186006808b08SLee Schermerhorn  * If the current task's mempolicy is "default" [NULL], return 'false'
186106808b08SLee Schermerhorn  * to indicate default policy.  Otherwise, extract the policy nodemask
186206808b08SLee Schermerhorn  * for 'bind' or 'interleave' policy into the argument nodemask, or
186306808b08SLee Schermerhorn  * initialize the argument nodemask to contain the single node for
186406808b08SLee Schermerhorn  * 'preferred' or 'local' policy and return 'true' to indicate presence
186506808b08SLee Schermerhorn  * of non-default mempolicy.
186606808b08SLee Schermerhorn  *
186706808b08SLee Schermerhorn  * We don't bother with reference counting the mempolicy [mpol_get/put]
186806808b08SLee Schermerhorn  * because the current task is examining it's own mempolicy and a task's
186906808b08SLee Schermerhorn  * mempolicy is only ever changed by the task itself.
187006808b08SLee Schermerhorn  *
187106808b08SLee Schermerhorn  * N.B., it is the caller's responsibility to free a returned nodemask.
187206808b08SLee Schermerhorn  */
187306808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask)
187406808b08SLee Schermerhorn {
187506808b08SLee Schermerhorn 	struct mempolicy *mempolicy;
187606808b08SLee Schermerhorn 	int nid;
187706808b08SLee Schermerhorn 
187806808b08SLee Schermerhorn 	if (!(mask && current->mempolicy))
187906808b08SLee Schermerhorn 		return false;
188006808b08SLee Schermerhorn 
1881c0ff7453SMiao Xie 	task_lock(current);
188206808b08SLee Schermerhorn 	mempolicy = current->mempolicy;
188306808b08SLee Schermerhorn 	switch (mempolicy->mode) {
188406808b08SLee Schermerhorn 	case MPOL_PREFERRED:
188506808b08SLee Schermerhorn 		if (mempolicy->flags & MPOL_F_LOCAL)
188606808b08SLee Schermerhorn 			nid = numa_node_id();
188706808b08SLee Schermerhorn 		else
188806808b08SLee Schermerhorn 			nid = mempolicy->v.preferred_node;
188906808b08SLee Schermerhorn 		init_nodemask_of_node(mask, nid);
189006808b08SLee Schermerhorn 		break;
189106808b08SLee Schermerhorn 
189206808b08SLee Schermerhorn 	case MPOL_BIND:
189306808b08SLee Schermerhorn 		/* Fall through */
189406808b08SLee Schermerhorn 	case MPOL_INTERLEAVE:
189506808b08SLee Schermerhorn 		*mask =  mempolicy->v.nodes;
189606808b08SLee Schermerhorn 		break;
189706808b08SLee Schermerhorn 
189806808b08SLee Schermerhorn 	default:
189906808b08SLee Schermerhorn 		BUG();
190006808b08SLee Schermerhorn 	}
1901c0ff7453SMiao Xie 	task_unlock(current);
190206808b08SLee Schermerhorn 
190306808b08SLee Schermerhorn 	return true;
190406808b08SLee Schermerhorn }
190500ac59adSChen, Kenneth W #endif
19065da7ca86SChristoph Lameter 
19076f48d0ebSDavid Rientjes /*
19086f48d0ebSDavid Rientjes  * mempolicy_nodemask_intersects
19096f48d0ebSDavid Rientjes  *
19106f48d0ebSDavid Rientjes  * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
19116f48d0ebSDavid Rientjes  * policy.  Otherwise, check for intersection between mask and the policy
19126f48d0ebSDavid Rientjes  * nodemask for 'bind' or 'interleave' policy.  For 'perferred' or 'local'
19136f48d0ebSDavid Rientjes  * policy, always return true since it may allocate elsewhere on fallback.
19146f48d0ebSDavid Rientjes  *
19156f48d0ebSDavid Rientjes  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
19166f48d0ebSDavid Rientjes  */
19176f48d0ebSDavid Rientjes bool mempolicy_nodemask_intersects(struct task_struct *tsk,
19186f48d0ebSDavid Rientjes 					const nodemask_t *mask)
19196f48d0ebSDavid Rientjes {
19206f48d0ebSDavid Rientjes 	struct mempolicy *mempolicy;
19216f48d0ebSDavid Rientjes 	bool ret = true;
19226f48d0ebSDavid Rientjes 
19236f48d0ebSDavid Rientjes 	if (!mask)
19246f48d0ebSDavid Rientjes 		return ret;
19256f48d0ebSDavid Rientjes 	task_lock(tsk);
19266f48d0ebSDavid Rientjes 	mempolicy = tsk->mempolicy;
19276f48d0ebSDavid Rientjes 	if (!mempolicy)
19286f48d0ebSDavid Rientjes 		goto out;
19296f48d0ebSDavid Rientjes 
19306f48d0ebSDavid Rientjes 	switch (mempolicy->mode) {
19316f48d0ebSDavid Rientjes 	case MPOL_PREFERRED:
19326f48d0ebSDavid Rientjes 		/*
19336f48d0ebSDavid Rientjes 		 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
19346f48d0ebSDavid Rientjes 		 * allocate from, they may fallback to other nodes when oom.
19356f48d0ebSDavid Rientjes 		 * Thus, it's possible for tsk to have allocated memory from
19366f48d0ebSDavid Rientjes 		 * nodes in mask.
19376f48d0ebSDavid Rientjes 		 */
19386f48d0ebSDavid Rientjes 		break;
19396f48d0ebSDavid Rientjes 	case MPOL_BIND:
19406f48d0ebSDavid Rientjes 	case MPOL_INTERLEAVE:
19416f48d0ebSDavid Rientjes 		ret = nodes_intersects(mempolicy->v.nodes, *mask);
19426f48d0ebSDavid Rientjes 		break;
19436f48d0ebSDavid Rientjes 	default:
19446f48d0ebSDavid Rientjes 		BUG();
19456f48d0ebSDavid Rientjes 	}
19466f48d0ebSDavid Rientjes out:
19476f48d0ebSDavid Rientjes 	task_unlock(tsk);
19486f48d0ebSDavid Rientjes 	return ret;
19496f48d0ebSDavid Rientjes }
19506f48d0ebSDavid Rientjes 
19511da177e4SLinus Torvalds /* Allocate a page in interleaved policy.
19521da177e4SLinus Torvalds    Own path because it needs to do special accounting. */
1953662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1954662f3a0bSAndi Kleen 					unsigned nid)
19551da177e4SLinus Torvalds {
19561da177e4SLinus Torvalds 	struct zonelist *zl;
19571da177e4SLinus Torvalds 	struct page *page;
19581da177e4SLinus Torvalds 
19590e88460dSMel Gorman 	zl = node_zonelist(nid, gfp);
19601da177e4SLinus Torvalds 	page = __alloc_pages(gfp, order, zl);
1961dd1a239fSMel Gorman 	if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1962ca889e6cSChristoph Lameter 		inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
19631da177e4SLinus Torvalds 	return page;
19641da177e4SLinus Torvalds }
19651da177e4SLinus Torvalds 
19661da177e4SLinus Torvalds /**
19670bbbc0b3SAndrea Arcangeli  * 	alloc_pages_vma	- Allocate a page for a VMA.
19681da177e4SLinus Torvalds  *
19691da177e4SLinus Torvalds  * 	@gfp:
19701da177e4SLinus Torvalds  *      %GFP_USER    user allocation.
19711da177e4SLinus Torvalds  *      %GFP_KERNEL  kernel allocations,
19721da177e4SLinus Torvalds  *      %GFP_HIGHMEM highmem/user allocations,
19731da177e4SLinus Torvalds  *      %GFP_FS      allocation should not call back into a file system.
19741da177e4SLinus Torvalds  *      %GFP_ATOMIC  don't sleep.
19751da177e4SLinus Torvalds  *
19760bbbc0b3SAndrea Arcangeli  *	@order:Order of the GFP allocation.
19771da177e4SLinus Torvalds  * 	@vma:  Pointer to VMA or NULL if not available.
19781da177e4SLinus Torvalds  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
19791da177e4SLinus Torvalds  *
19801da177e4SLinus Torvalds  * 	This function allocates a page from the kernel page pool and applies
19811da177e4SLinus Torvalds  *	a NUMA policy associated with the VMA or the current process.
19821da177e4SLinus Torvalds  *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
19831da177e4SLinus Torvalds  *	mm_struct of the VMA to prevent it from going away. Should be used for
19841da177e4SLinus Torvalds  *	all allocations for pages that will be mapped into
19851da177e4SLinus Torvalds  * 	user space. Returns NULL when no page can be allocated.
19861da177e4SLinus Torvalds  *
19871da177e4SLinus Torvalds  *	Should be called with the mm_sem of the vma hold.
19881da177e4SLinus Torvalds  */
19891da177e4SLinus Torvalds struct page *
19900bbbc0b3SAndrea Arcangeli alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
19912f5f9486SAndi Kleen 		unsigned long addr, int node)
19921da177e4SLinus Torvalds {
1993cc9a6c87SMel Gorman 	struct mempolicy *pol;
1994c0ff7453SMiao Xie 	struct page *page;
1995cc9a6c87SMel Gorman 	unsigned int cpuset_mems_cookie;
19961da177e4SLinus Torvalds 
1997cc9a6c87SMel Gorman retry_cpuset:
1998cc9a6c87SMel Gorman 	pol = get_vma_policy(current, vma, addr);
1999cc9a6c87SMel Gorman 	cpuset_mems_cookie = get_mems_allowed();
2000cc9a6c87SMel Gorman 
200145c4745aSLee Schermerhorn 	if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
20021da177e4SLinus Torvalds 		unsigned nid;
20035da7ca86SChristoph Lameter 
20048eac563cSAndi Kleen 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
200552cd3b07SLee Schermerhorn 		mpol_cond_put(pol);
20060bbbc0b3SAndrea Arcangeli 		page = alloc_page_interleave(gfp, order, nid);
2007cc9a6c87SMel Gorman 		if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2008cc9a6c87SMel Gorman 			goto retry_cpuset;
2009cc9a6c87SMel Gorman 
2010c0ff7453SMiao Xie 		return page;
20111da177e4SLinus Torvalds 	}
2012212a0a6fSDavid Rientjes 	page = __alloc_pages_nodemask(gfp, order,
2013212a0a6fSDavid Rientjes 				      policy_zonelist(gfp, pol, node),
20140bbbc0b3SAndrea Arcangeli 				      policy_nodemask(gfp, pol));
2015212a0a6fSDavid Rientjes 	if (unlikely(mpol_needs_cond_ref(pol)))
2016212a0a6fSDavid Rientjes 		__mpol_put(pol);
2017cc9a6c87SMel Gorman 	if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2018cc9a6c87SMel Gorman 		goto retry_cpuset;
2019c0ff7453SMiao Xie 	return page;
20201da177e4SLinus Torvalds }
20211da177e4SLinus Torvalds 
20221da177e4SLinus Torvalds /**
20231da177e4SLinus Torvalds  * 	alloc_pages_current - Allocate pages.
20241da177e4SLinus Torvalds  *
20251da177e4SLinus Torvalds  *	@gfp:
20261da177e4SLinus Torvalds  *		%GFP_USER   user allocation,
20271da177e4SLinus Torvalds  *      	%GFP_KERNEL kernel allocation,
20281da177e4SLinus Torvalds  *      	%GFP_HIGHMEM highmem allocation,
20291da177e4SLinus Torvalds  *      	%GFP_FS     don't call back into a file system.
20301da177e4SLinus Torvalds  *      	%GFP_ATOMIC don't sleep.
20311da177e4SLinus Torvalds  *	@order: Power of two of allocation size in pages. 0 is a single page.
20321da177e4SLinus Torvalds  *
20331da177e4SLinus Torvalds  *	Allocate a page from the kernel page pool.  When not in
20341da177e4SLinus Torvalds  *	interrupt context and apply the current process NUMA policy.
20351da177e4SLinus Torvalds  *	Returns NULL when no page can be allocated.
20361da177e4SLinus Torvalds  *
2037cf2a473cSPaul Jackson  *	Don't call cpuset_update_task_memory_state() unless
20381da177e4SLinus Torvalds  *	1) it's ok to take cpuset_sem (can WAIT), and
20391da177e4SLinus Torvalds  *	2) allocating for current task (not interrupt).
20401da177e4SLinus Torvalds  */
2041dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order)
20421da177e4SLinus Torvalds {
20435606e387SMel Gorman 	struct mempolicy *pol = get_task_policy(current);
2044c0ff7453SMiao Xie 	struct page *page;
2045cc9a6c87SMel Gorman 	unsigned int cpuset_mems_cookie;
20461da177e4SLinus Torvalds 
20479b819d20SChristoph Lameter 	if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
20481da177e4SLinus Torvalds 		pol = &default_policy;
204952cd3b07SLee Schermerhorn 
2050cc9a6c87SMel Gorman retry_cpuset:
2051cc9a6c87SMel Gorman 	cpuset_mems_cookie = get_mems_allowed();
2052cc9a6c87SMel Gorman 
205352cd3b07SLee Schermerhorn 	/*
205452cd3b07SLee Schermerhorn 	 * No reference counting needed for current->mempolicy
205552cd3b07SLee Schermerhorn 	 * nor system default_policy
205652cd3b07SLee Schermerhorn 	 */
205745c4745aSLee Schermerhorn 	if (pol->mode == MPOL_INTERLEAVE)
2058c0ff7453SMiao Xie 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2059c0ff7453SMiao Xie 	else
2060c0ff7453SMiao Xie 		page = __alloc_pages_nodemask(gfp, order,
20615c4b4be3SAndi Kleen 				policy_zonelist(gfp, pol, numa_node_id()),
20625c4b4be3SAndi Kleen 				policy_nodemask(gfp, pol));
2063cc9a6c87SMel Gorman 
2064cc9a6c87SMel Gorman 	if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2065cc9a6c87SMel Gorman 		goto retry_cpuset;
2066cc9a6c87SMel Gorman 
2067c0ff7453SMiao Xie 	return page;
20681da177e4SLinus Torvalds }
20691da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current);
20701da177e4SLinus Torvalds 
2071ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2072ef0855d3SOleg Nesterov {
2073ef0855d3SOleg Nesterov 	struct mempolicy *pol = mpol_dup(vma_policy(src));
2074ef0855d3SOleg Nesterov 
2075ef0855d3SOleg Nesterov 	if (IS_ERR(pol))
2076ef0855d3SOleg Nesterov 		return PTR_ERR(pol);
2077ef0855d3SOleg Nesterov 	dst->vm_policy = pol;
2078ef0855d3SOleg Nesterov 	return 0;
2079ef0855d3SOleg Nesterov }
2080ef0855d3SOleg Nesterov 
20814225399aSPaul Jackson /*
2082846a16bfSLee Schermerhorn  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
20834225399aSPaul Jackson  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
20844225399aSPaul Jackson  * with the mems_allowed returned by cpuset_mems_allowed().  This
20854225399aSPaul Jackson  * keeps mempolicies cpuset relative after its cpuset moves.  See
20864225399aSPaul Jackson  * further kernel/cpuset.c update_nodemask().
2087708c1bbcSMiao Xie  *
2088708c1bbcSMiao Xie  * current's mempolicy may be rebinded by the other task(the task that changes
2089708c1bbcSMiao Xie  * cpuset's mems), so we needn't do rebind work for current task.
20904225399aSPaul Jackson  */
20914225399aSPaul Jackson 
2092846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */
2093846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old)
20941da177e4SLinus Torvalds {
20951da177e4SLinus Torvalds 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
20961da177e4SLinus Torvalds 
20971da177e4SLinus Torvalds 	if (!new)
20981da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2099708c1bbcSMiao Xie 
2100708c1bbcSMiao Xie 	/* task's mempolicy is protected by alloc_lock */
2101708c1bbcSMiao Xie 	if (old == current->mempolicy) {
2102708c1bbcSMiao Xie 		task_lock(current);
2103708c1bbcSMiao Xie 		*new = *old;
2104708c1bbcSMiao Xie 		task_unlock(current);
2105708c1bbcSMiao Xie 	} else
2106708c1bbcSMiao Xie 		*new = *old;
2107708c1bbcSMiao Xie 
210899ee4ca7SPaul E. McKenney 	rcu_read_lock();
21094225399aSPaul Jackson 	if (current_cpuset_is_being_rebound()) {
21104225399aSPaul Jackson 		nodemask_t mems = cpuset_mems_allowed(current);
2111708c1bbcSMiao Xie 		if (new->flags & MPOL_F_REBINDING)
2112708c1bbcSMiao Xie 			mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
2113708c1bbcSMiao Xie 		else
2114708c1bbcSMiao Xie 			mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
21154225399aSPaul Jackson 	}
211699ee4ca7SPaul E. McKenney 	rcu_read_unlock();
21171da177e4SLinus Torvalds 	atomic_set(&new->refcnt, 1);
21181da177e4SLinus Torvalds 	return new;
21191da177e4SLinus Torvalds }
21201da177e4SLinus Torvalds 
21211da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */
2122fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
21231da177e4SLinus Torvalds {
21241da177e4SLinus Torvalds 	if (!a || !b)
2125fcfb4dccSKOSAKI Motohiro 		return false;
212645c4745aSLee Schermerhorn 	if (a->mode != b->mode)
2127fcfb4dccSKOSAKI Motohiro 		return false;
212819800502SBob Liu 	if (a->flags != b->flags)
2129fcfb4dccSKOSAKI Motohiro 		return false;
213019800502SBob Liu 	if (mpol_store_user_nodemask(a))
213119800502SBob Liu 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2132fcfb4dccSKOSAKI Motohiro 			return false;
213319800502SBob Liu 
213445c4745aSLee Schermerhorn 	switch (a->mode) {
213519770b32SMel Gorman 	case MPOL_BIND:
213619770b32SMel Gorman 		/* Fall through */
21371da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
2138fcfb4dccSKOSAKI Motohiro 		return !!nodes_equal(a->v.nodes, b->v.nodes);
21391da177e4SLinus Torvalds 	case MPOL_PREFERRED:
214075719661SNamhyung Kim 		return a->v.preferred_node == b->v.preferred_node;
21411da177e4SLinus Torvalds 	default:
21421da177e4SLinus Torvalds 		BUG();
2143fcfb4dccSKOSAKI Motohiro 		return false;
21441da177e4SLinus Torvalds 	}
21451da177e4SLinus Torvalds }
21461da177e4SLinus Torvalds 
21471da177e4SLinus Torvalds /*
21481da177e4SLinus Torvalds  * Shared memory backing store policy support.
21491da177e4SLinus Torvalds  *
21501da177e4SLinus Torvalds  * Remember policies even when nobody has shared memory mapped.
21511da177e4SLinus Torvalds  * The policies are kept in Red-Black tree linked from the inode.
21521da177e4SLinus Torvalds  * They are protected by the sp->lock spinlock, which should be held
21531da177e4SLinus Torvalds  * for any accesses to the tree.
21541da177e4SLinus Torvalds  */
21551da177e4SLinus Torvalds 
21561da177e4SLinus Torvalds /* lookup first element intersecting start-end */
215742288fe3SMel Gorman /* Caller holds sp->lock */
21581da177e4SLinus Torvalds static struct sp_node *
21591da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
21601da177e4SLinus Torvalds {
21611da177e4SLinus Torvalds 	struct rb_node *n = sp->root.rb_node;
21621da177e4SLinus Torvalds 
21631da177e4SLinus Torvalds 	while (n) {
21641da177e4SLinus Torvalds 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
21651da177e4SLinus Torvalds 
21661da177e4SLinus Torvalds 		if (start >= p->end)
21671da177e4SLinus Torvalds 			n = n->rb_right;
21681da177e4SLinus Torvalds 		else if (end <= p->start)
21691da177e4SLinus Torvalds 			n = n->rb_left;
21701da177e4SLinus Torvalds 		else
21711da177e4SLinus Torvalds 			break;
21721da177e4SLinus Torvalds 	}
21731da177e4SLinus Torvalds 	if (!n)
21741da177e4SLinus Torvalds 		return NULL;
21751da177e4SLinus Torvalds 	for (;;) {
21761da177e4SLinus Torvalds 		struct sp_node *w = NULL;
21771da177e4SLinus Torvalds 		struct rb_node *prev = rb_prev(n);
21781da177e4SLinus Torvalds 		if (!prev)
21791da177e4SLinus Torvalds 			break;
21801da177e4SLinus Torvalds 		w = rb_entry(prev, struct sp_node, nd);
21811da177e4SLinus Torvalds 		if (w->end <= start)
21821da177e4SLinus Torvalds 			break;
21831da177e4SLinus Torvalds 		n = prev;
21841da177e4SLinus Torvalds 	}
21851da177e4SLinus Torvalds 	return rb_entry(n, struct sp_node, nd);
21861da177e4SLinus Torvalds }
21871da177e4SLinus Torvalds 
21881da177e4SLinus Torvalds /* Insert a new shared policy into the list. */
21891da177e4SLinus Torvalds /* Caller holds sp->lock */
21901da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new)
21911da177e4SLinus Torvalds {
21921da177e4SLinus Torvalds 	struct rb_node **p = &sp->root.rb_node;
21931da177e4SLinus Torvalds 	struct rb_node *parent = NULL;
21941da177e4SLinus Torvalds 	struct sp_node *nd;
21951da177e4SLinus Torvalds 
21961da177e4SLinus Torvalds 	while (*p) {
21971da177e4SLinus Torvalds 		parent = *p;
21981da177e4SLinus Torvalds 		nd = rb_entry(parent, struct sp_node, nd);
21991da177e4SLinus Torvalds 		if (new->start < nd->start)
22001da177e4SLinus Torvalds 			p = &(*p)->rb_left;
22011da177e4SLinus Torvalds 		else if (new->end > nd->end)
22021da177e4SLinus Torvalds 			p = &(*p)->rb_right;
22031da177e4SLinus Torvalds 		else
22041da177e4SLinus Torvalds 			BUG();
22051da177e4SLinus Torvalds 	}
22061da177e4SLinus Torvalds 	rb_link_node(&new->nd, parent, p);
22071da177e4SLinus Torvalds 	rb_insert_color(&new->nd, &sp->root);
2208140d5a49SPaul Mundt 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
220945c4745aSLee Schermerhorn 		 new->policy ? new->policy->mode : 0);
22101da177e4SLinus Torvalds }
22111da177e4SLinus Torvalds 
22121da177e4SLinus Torvalds /* Find shared policy intersecting idx */
22131da177e4SLinus Torvalds struct mempolicy *
22141da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
22151da177e4SLinus Torvalds {
22161da177e4SLinus Torvalds 	struct mempolicy *pol = NULL;
22171da177e4SLinus Torvalds 	struct sp_node *sn;
22181da177e4SLinus Torvalds 
22191da177e4SLinus Torvalds 	if (!sp->root.rb_node)
22201da177e4SLinus Torvalds 		return NULL;
222142288fe3SMel Gorman 	spin_lock(&sp->lock);
22221da177e4SLinus Torvalds 	sn = sp_lookup(sp, idx, idx+1);
22231da177e4SLinus Torvalds 	if (sn) {
22241da177e4SLinus Torvalds 		mpol_get(sn->policy);
22251da177e4SLinus Torvalds 		pol = sn->policy;
22261da177e4SLinus Torvalds 	}
222742288fe3SMel Gorman 	spin_unlock(&sp->lock);
22281da177e4SLinus Torvalds 	return pol;
22291da177e4SLinus Torvalds }
22301da177e4SLinus Torvalds 
223163f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n)
223263f74ca2SKOSAKI Motohiro {
223363f74ca2SKOSAKI Motohiro 	mpol_put(n->policy);
223463f74ca2SKOSAKI Motohiro 	kmem_cache_free(sn_cache, n);
223563f74ca2SKOSAKI Motohiro }
223663f74ca2SKOSAKI Motohiro 
2237771fb4d8SLee Schermerhorn /**
2238771fb4d8SLee Schermerhorn  * mpol_misplaced - check whether current page node is valid in policy
2239771fb4d8SLee Schermerhorn  *
2240771fb4d8SLee Schermerhorn  * @page   - page to be checked
2241771fb4d8SLee Schermerhorn  * @vma    - vm area where page mapped
2242771fb4d8SLee Schermerhorn  * @addr   - virtual address where page mapped
2243771fb4d8SLee Schermerhorn  *
2244771fb4d8SLee Schermerhorn  * Lookup current policy node id for vma,addr and "compare to" page's
2245771fb4d8SLee Schermerhorn  * node id.
2246771fb4d8SLee Schermerhorn  *
2247771fb4d8SLee Schermerhorn  * Returns:
2248771fb4d8SLee Schermerhorn  *	-1	- not misplaced, page is in the right node
2249771fb4d8SLee Schermerhorn  *	node	- node id where the page should be
2250771fb4d8SLee Schermerhorn  *
2251771fb4d8SLee Schermerhorn  * Policy determination "mimics" alloc_page_vma().
2252771fb4d8SLee Schermerhorn  * Called from fault path where we know the vma and faulting address.
2253771fb4d8SLee Schermerhorn  */
2254771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2255771fb4d8SLee Schermerhorn {
2256771fb4d8SLee Schermerhorn 	struct mempolicy *pol;
2257771fb4d8SLee Schermerhorn 	struct zone *zone;
2258771fb4d8SLee Schermerhorn 	int curnid = page_to_nid(page);
2259771fb4d8SLee Schermerhorn 	unsigned long pgoff;
2260771fb4d8SLee Schermerhorn 	int polnid = -1;
2261771fb4d8SLee Schermerhorn 	int ret = -1;
2262771fb4d8SLee Schermerhorn 
2263771fb4d8SLee Schermerhorn 	BUG_ON(!vma);
2264771fb4d8SLee Schermerhorn 
2265771fb4d8SLee Schermerhorn 	pol = get_vma_policy(current, vma, addr);
2266771fb4d8SLee Schermerhorn 	if (!(pol->flags & MPOL_F_MOF))
2267771fb4d8SLee Schermerhorn 		goto out;
2268771fb4d8SLee Schermerhorn 
2269771fb4d8SLee Schermerhorn 	switch (pol->mode) {
2270771fb4d8SLee Schermerhorn 	case MPOL_INTERLEAVE:
2271771fb4d8SLee Schermerhorn 		BUG_ON(addr >= vma->vm_end);
2272771fb4d8SLee Schermerhorn 		BUG_ON(addr < vma->vm_start);
2273771fb4d8SLee Schermerhorn 
2274771fb4d8SLee Schermerhorn 		pgoff = vma->vm_pgoff;
2275771fb4d8SLee Schermerhorn 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2276771fb4d8SLee Schermerhorn 		polnid = offset_il_node(pol, vma, pgoff);
2277771fb4d8SLee Schermerhorn 		break;
2278771fb4d8SLee Schermerhorn 
2279771fb4d8SLee Schermerhorn 	case MPOL_PREFERRED:
2280771fb4d8SLee Schermerhorn 		if (pol->flags & MPOL_F_LOCAL)
2281771fb4d8SLee Schermerhorn 			polnid = numa_node_id();
2282771fb4d8SLee Schermerhorn 		else
2283771fb4d8SLee Schermerhorn 			polnid = pol->v.preferred_node;
2284771fb4d8SLee Schermerhorn 		break;
2285771fb4d8SLee Schermerhorn 
2286771fb4d8SLee Schermerhorn 	case MPOL_BIND:
2287771fb4d8SLee Schermerhorn 		/*
2288771fb4d8SLee Schermerhorn 		 * allows binding to multiple nodes.
2289771fb4d8SLee Schermerhorn 		 * use current page if in policy nodemask,
2290771fb4d8SLee Schermerhorn 		 * else select nearest allowed node, if any.
2291771fb4d8SLee Schermerhorn 		 * If no allowed nodes, use current [!misplaced].
2292771fb4d8SLee Schermerhorn 		 */
2293771fb4d8SLee Schermerhorn 		if (node_isset(curnid, pol->v.nodes))
2294771fb4d8SLee Schermerhorn 			goto out;
2295771fb4d8SLee Schermerhorn 		(void)first_zones_zonelist(
2296771fb4d8SLee Schermerhorn 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2297771fb4d8SLee Schermerhorn 				gfp_zone(GFP_HIGHUSER),
2298771fb4d8SLee Schermerhorn 				&pol->v.nodes, &zone);
2299771fb4d8SLee Schermerhorn 		polnid = zone->node;
2300771fb4d8SLee Schermerhorn 		break;
2301771fb4d8SLee Schermerhorn 
2302771fb4d8SLee Schermerhorn 	default:
2303771fb4d8SLee Schermerhorn 		BUG();
2304771fb4d8SLee Schermerhorn 	}
23055606e387SMel Gorman 
23065606e387SMel Gorman 	/* Migrate the page towards the node whose CPU is referencing it */
2307e42c8ff2SMel Gorman 	if (pol->flags & MPOL_F_MORON) {
2308e42c8ff2SMel Gorman 		int last_nid;
2309e42c8ff2SMel Gorman 
23105606e387SMel Gorman 		polnid = numa_node_id();
23115606e387SMel Gorman 
2312e42c8ff2SMel Gorman 		/*
2313e42c8ff2SMel Gorman 		 * Multi-stage node selection is used in conjunction
2314e42c8ff2SMel Gorman 		 * with a periodic migration fault to build a temporal
2315e42c8ff2SMel Gorman 		 * task<->page relation. By using a two-stage filter we
2316e42c8ff2SMel Gorman 		 * remove short/unlikely relations.
2317e42c8ff2SMel Gorman 		 *
2318e42c8ff2SMel Gorman 		 * Using P(p) ~ n_p / n_t as per frequentist
2319e42c8ff2SMel Gorman 		 * probability, we can equate a task's usage of a
2320e42c8ff2SMel Gorman 		 * particular page (n_p) per total usage of this
2321e42c8ff2SMel Gorman 		 * page (n_t) (in a given time-span) to a probability.
2322e42c8ff2SMel Gorman 		 *
2323e42c8ff2SMel Gorman 		 * Our periodic faults will sample this probability and
2324e42c8ff2SMel Gorman 		 * getting the same result twice in a row, given these
2325e42c8ff2SMel Gorman 		 * samples are fully independent, is then given by
2326e42c8ff2SMel Gorman 		 * P(n)^2, provided our sample period is sufficiently
2327e42c8ff2SMel Gorman 		 * short compared to the usage pattern.
2328e42c8ff2SMel Gorman 		 *
2329e42c8ff2SMel Gorman 		 * This quadric squishes small probabilities, making
2330e42c8ff2SMel Gorman 		 * it less likely we act on an unlikely task<->page
2331e42c8ff2SMel Gorman 		 * relation.
2332e42c8ff2SMel Gorman 		 */
233322b751c3SMel Gorman 		last_nid = page_nid_xchg_last(page, polnid);
2334e42c8ff2SMel Gorman 		if (last_nid != polnid)
2335e42c8ff2SMel Gorman 			goto out;
2336e42c8ff2SMel Gorman 	}
2337e42c8ff2SMel Gorman 
2338771fb4d8SLee Schermerhorn 	if (curnid != polnid)
2339771fb4d8SLee Schermerhorn 		ret = polnid;
2340771fb4d8SLee Schermerhorn out:
2341771fb4d8SLee Schermerhorn 	mpol_cond_put(pol);
2342771fb4d8SLee Schermerhorn 
2343771fb4d8SLee Schermerhorn 	return ret;
2344771fb4d8SLee Schermerhorn }
2345771fb4d8SLee Schermerhorn 
23461da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n)
23471da177e4SLinus Torvalds {
2348140d5a49SPaul Mundt 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
23491da177e4SLinus Torvalds 	rb_erase(&n->nd, &sp->root);
235063f74ca2SKOSAKI Motohiro 	sp_free(n);
23511da177e4SLinus Torvalds }
23521da177e4SLinus Torvalds 
235342288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start,
235442288fe3SMel Gorman 			unsigned long end, struct mempolicy *pol)
235542288fe3SMel Gorman {
235642288fe3SMel Gorman 	node->start = start;
235742288fe3SMel Gorman 	node->end = end;
235842288fe3SMel Gorman 	node->policy = pol;
235942288fe3SMel Gorman }
236042288fe3SMel Gorman 
2361dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2362dbcb0f19SAdrian Bunk 				struct mempolicy *pol)
23631da177e4SLinus Torvalds {
2364869833f2SKOSAKI Motohiro 	struct sp_node *n;
2365869833f2SKOSAKI Motohiro 	struct mempolicy *newpol;
23661da177e4SLinus Torvalds 
2367869833f2SKOSAKI Motohiro 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
23681da177e4SLinus Torvalds 	if (!n)
23691da177e4SLinus Torvalds 		return NULL;
2370869833f2SKOSAKI Motohiro 
2371869833f2SKOSAKI Motohiro 	newpol = mpol_dup(pol);
2372869833f2SKOSAKI Motohiro 	if (IS_ERR(newpol)) {
2373869833f2SKOSAKI Motohiro 		kmem_cache_free(sn_cache, n);
2374869833f2SKOSAKI Motohiro 		return NULL;
2375869833f2SKOSAKI Motohiro 	}
2376869833f2SKOSAKI Motohiro 	newpol->flags |= MPOL_F_SHARED;
237742288fe3SMel Gorman 	sp_node_init(n, start, end, newpol);
2378869833f2SKOSAKI Motohiro 
23791da177e4SLinus Torvalds 	return n;
23801da177e4SLinus Torvalds }
23811da177e4SLinus Torvalds 
23821da177e4SLinus Torvalds /* Replace a policy range. */
23831da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
23841da177e4SLinus Torvalds 				 unsigned long end, struct sp_node *new)
23851da177e4SLinus Torvalds {
2386b22d127aSMel Gorman 	struct sp_node *n;
238742288fe3SMel Gorman 	struct sp_node *n_new = NULL;
238842288fe3SMel Gorman 	struct mempolicy *mpol_new = NULL;
2389b22d127aSMel Gorman 	int ret = 0;
23901da177e4SLinus Torvalds 
239142288fe3SMel Gorman restart:
239242288fe3SMel Gorman 	spin_lock(&sp->lock);
23931da177e4SLinus Torvalds 	n = sp_lookup(sp, start, end);
23941da177e4SLinus Torvalds 	/* Take care of old policies in the same range. */
23951da177e4SLinus Torvalds 	while (n && n->start < end) {
23961da177e4SLinus Torvalds 		struct rb_node *next = rb_next(&n->nd);
23971da177e4SLinus Torvalds 		if (n->start >= start) {
23981da177e4SLinus Torvalds 			if (n->end <= end)
23991da177e4SLinus Torvalds 				sp_delete(sp, n);
24001da177e4SLinus Torvalds 			else
24011da177e4SLinus Torvalds 				n->start = end;
24021da177e4SLinus Torvalds 		} else {
24031da177e4SLinus Torvalds 			/* Old policy spanning whole new range. */
24041da177e4SLinus Torvalds 			if (n->end > end) {
240542288fe3SMel Gorman 				if (!n_new)
240642288fe3SMel Gorman 					goto alloc_new;
240742288fe3SMel Gorman 
240842288fe3SMel Gorman 				*mpol_new = *n->policy;
240942288fe3SMel Gorman 				atomic_set(&mpol_new->refcnt, 1);
24107880639cSKOSAKI Motohiro 				sp_node_init(n_new, end, n->end, mpol_new);
24111da177e4SLinus Torvalds 				n->end = start;
24125ca39575SHillf Danton 				sp_insert(sp, n_new);
241342288fe3SMel Gorman 				n_new = NULL;
241442288fe3SMel Gorman 				mpol_new = NULL;
24151da177e4SLinus Torvalds 				break;
24161da177e4SLinus Torvalds 			} else
24171da177e4SLinus Torvalds 				n->end = start;
24181da177e4SLinus Torvalds 		}
24191da177e4SLinus Torvalds 		if (!next)
24201da177e4SLinus Torvalds 			break;
24211da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
24221da177e4SLinus Torvalds 	}
24231da177e4SLinus Torvalds 	if (new)
24241da177e4SLinus Torvalds 		sp_insert(sp, new);
242542288fe3SMel Gorman 	spin_unlock(&sp->lock);
242642288fe3SMel Gorman 	ret = 0;
242742288fe3SMel Gorman 
242842288fe3SMel Gorman err_out:
242942288fe3SMel Gorman 	if (mpol_new)
243042288fe3SMel Gorman 		mpol_put(mpol_new);
243142288fe3SMel Gorman 	if (n_new)
243242288fe3SMel Gorman 		kmem_cache_free(sn_cache, n_new);
243342288fe3SMel Gorman 
2434b22d127aSMel Gorman 	return ret;
243542288fe3SMel Gorman 
243642288fe3SMel Gorman alloc_new:
243742288fe3SMel Gorman 	spin_unlock(&sp->lock);
243842288fe3SMel Gorman 	ret = -ENOMEM;
243942288fe3SMel Gorman 	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
244042288fe3SMel Gorman 	if (!n_new)
244142288fe3SMel Gorman 		goto err_out;
244242288fe3SMel Gorman 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
244342288fe3SMel Gorman 	if (!mpol_new)
244442288fe3SMel Gorman 		goto err_out;
244542288fe3SMel Gorman 	goto restart;
24461da177e4SLinus Torvalds }
24471da177e4SLinus Torvalds 
244871fe804bSLee Schermerhorn /**
244971fe804bSLee Schermerhorn  * mpol_shared_policy_init - initialize shared policy for inode
245071fe804bSLee Schermerhorn  * @sp: pointer to inode shared policy
245171fe804bSLee Schermerhorn  * @mpol:  struct mempolicy to install
245271fe804bSLee Schermerhorn  *
245371fe804bSLee Schermerhorn  * Install non-NULL @mpol in inode's shared policy rb-tree.
245471fe804bSLee Schermerhorn  * On entry, the current task has a reference on a non-NULL @mpol.
245571fe804bSLee Schermerhorn  * This must be released on exit.
24564bfc4495SKAMEZAWA Hiroyuki  * This is called at get_inode() calls and we can use GFP_KERNEL.
245771fe804bSLee Schermerhorn  */
245871fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
24597339ff83SRobin Holt {
246058568d2aSMiao Xie 	int ret;
246158568d2aSMiao Xie 
246271fe804bSLee Schermerhorn 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
246342288fe3SMel Gorman 	spin_lock_init(&sp->lock);
24647339ff83SRobin Holt 
246571fe804bSLee Schermerhorn 	if (mpol) {
24667339ff83SRobin Holt 		struct vm_area_struct pvma;
246771fe804bSLee Schermerhorn 		struct mempolicy *new;
24684bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
24697339ff83SRobin Holt 
24704bfc4495SKAMEZAWA Hiroyuki 		if (!scratch)
24715c0c1654SLee Schermerhorn 			goto put_mpol;
247271fe804bSLee Schermerhorn 		/* contextualize the tmpfs mount point mempolicy */
247371fe804bSLee Schermerhorn 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
247415d77835SLee Schermerhorn 		if (IS_ERR(new))
24750cae3457SDan Carpenter 			goto free_scratch; /* no valid nodemask intersection */
247658568d2aSMiao Xie 
247758568d2aSMiao Xie 		task_lock(current);
24784bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
247958568d2aSMiao Xie 		task_unlock(current);
248015d77835SLee Schermerhorn 		if (ret)
24815c0c1654SLee Schermerhorn 			goto put_new;
248271fe804bSLee Schermerhorn 
248371fe804bSLee Schermerhorn 		/* Create pseudo-vma that contains just the policy */
24847339ff83SRobin Holt 		memset(&pvma, 0, sizeof(struct vm_area_struct));
248571fe804bSLee Schermerhorn 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
248671fe804bSLee Schermerhorn 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
248715d77835SLee Schermerhorn 
24885c0c1654SLee Schermerhorn put_new:
248971fe804bSLee Schermerhorn 		mpol_put(new);			/* drop initial ref */
24900cae3457SDan Carpenter free_scratch:
24914bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
24925c0c1654SLee Schermerhorn put_mpol:
24935c0c1654SLee Schermerhorn 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
24947339ff83SRobin Holt 	}
24957339ff83SRobin Holt }
24967339ff83SRobin Holt 
24971da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info,
24981da177e4SLinus Torvalds 			struct vm_area_struct *vma, struct mempolicy *npol)
24991da177e4SLinus Torvalds {
25001da177e4SLinus Torvalds 	int err;
25011da177e4SLinus Torvalds 	struct sp_node *new = NULL;
25021da177e4SLinus Torvalds 	unsigned long sz = vma_pages(vma);
25031da177e4SLinus Torvalds 
2504028fec41SDavid Rientjes 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
25051da177e4SLinus Torvalds 		 vma->vm_pgoff,
250645c4745aSLee Schermerhorn 		 sz, npol ? npol->mode : -1,
2507028fec41SDavid Rientjes 		 npol ? npol->flags : -1,
250800ef2d2fSDavid Rientjes 		 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
25091da177e4SLinus Torvalds 
25101da177e4SLinus Torvalds 	if (npol) {
25111da177e4SLinus Torvalds 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
25121da177e4SLinus Torvalds 		if (!new)
25131da177e4SLinus Torvalds 			return -ENOMEM;
25141da177e4SLinus Torvalds 	}
25151da177e4SLinus Torvalds 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
25161da177e4SLinus Torvalds 	if (err && new)
251763f74ca2SKOSAKI Motohiro 		sp_free(new);
25181da177e4SLinus Torvalds 	return err;
25191da177e4SLinus Torvalds }
25201da177e4SLinus Torvalds 
25211da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */
25221da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p)
25231da177e4SLinus Torvalds {
25241da177e4SLinus Torvalds 	struct sp_node *n;
25251da177e4SLinus Torvalds 	struct rb_node *next;
25261da177e4SLinus Torvalds 
25271da177e4SLinus Torvalds 	if (!p->root.rb_node)
25281da177e4SLinus Torvalds 		return;
252942288fe3SMel Gorman 	spin_lock(&p->lock);
25301da177e4SLinus Torvalds 	next = rb_first(&p->root);
25311da177e4SLinus Torvalds 	while (next) {
25321da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
25331da177e4SLinus Torvalds 		next = rb_next(&n->nd);
253463f74ca2SKOSAKI Motohiro 		sp_delete(p, n);
25351da177e4SLinus Torvalds 	}
253642288fe3SMel Gorman 	spin_unlock(&p->lock);
25371da177e4SLinus Torvalds }
25381da177e4SLinus Torvalds 
25391a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING
25401a687c2eSMel Gorman static bool __initdata numabalancing_override;
25411a687c2eSMel Gorman 
25421a687c2eSMel Gorman static void __init check_numabalancing_enable(void)
25431a687c2eSMel Gorman {
25441a687c2eSMel Gorman 	bool numabalancing_default = false;
25451a687c2eSMel Gorman 
25461a687c2eSMel Gorman 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
25471a687c2eSMel Gorman 		numabalancing_default = true;
25481a687c2eSMel Gorman 
25491a687c2eSMel Gorman 	if (nr_node_ids > 1 && !numabalancing_override) {
25501a687c2eSMel Gorman 		printk(KERN_INFO "Enabling automatic NUMA balancing. "
25511a687c2eSMel Gorman 			"Configure with numa_balancing= or sysctl");
25521a687c2eSMel Gorman 		set_numabalancing_state(numabalancing_default);
25531a687c2eSMel Gorman 	}
25541a687c2eSMel Gorman }
25551a687c2eSMel Gorman 
25561a687c2eSMel Gorman static int __init setup_numabalancing(char *str)
25571a687c2eSMel Gorman {
25581a687c2eSMel Gorman 	int ret = 0;
25591a687c2eSMel Gorman 	if (!str)
25601a687c2eSMel Gorman 		goto out;
25611a687c2eSMel Gorman 	numabalancing_override = true;
25621a687c2eSMel Gorman 
25631a687c2eSMel Gorman 	if (!strcmp(str, "enable")) {
25641a687c2eSMel Gorman 		set_numabalancing_state(true);
25651a687c2eSMel Gorman 		ret = 1;
25661a687c2eSMel Gorman 	} else if (!strcmp(str, "disable")) {
25671a687c2eSMel Gorman 		set_numabalancing_state(false);
25681a687c2eSMel Gorman 		ret = 1;
25691a687c2eSMel Gorman 	}
25701a687c2eSMel Gorman out:
25711a687c2eSMel Gorman 	if (!ret)
25721a687c2eSMel Gorman 		printk(KERN_WARNING "Unable to parse numa_balancing=\n");
25731a687c2eSMel Gorman 
25741a687c2eSMel Gorman 	return ret;
25751a687c2eSMel Gorman }
25761a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing);
25771a687c2eSMel Gorman #else
25781a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void)
25791a687c2eSMel Gorman {
25801a687c2eSMel Gorman }
25811a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */
25821a687c2eSMel Gorman 
25831da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */
25841da177e4SLinus Torvalds void __init numa_policy_init(void)
25851da177e4SLinus Torvalds {
2586b71636e2SPaul Mundt 	nodemask_t interleave_nodes;
2587b71636e2SPaul Mundt 	unsigned long largest = 0;
2588b71636e2SPaul Mundt 	int nid, prefer = 0;
2589b71636e2SPaul Mundt 
25901da177e4SLinus Torvalds 	policy_cache = kmem_cache_create("numa_policy",
25911da177e4SLinus Torvalds 					 sizeof(struct mempolicy),
259220c2df83SPaul Mundt 					 0, SLAB_PANIC, NULL);
25931da177e4SLinus Torvalds 
25941da177e4SLinus Torvalds 	sn_cache = kmem_cache_create("shared_policy_node",
25951da177e4SLinus Torvalds 				     sizeof(struct sp_node),
259620c2df83SPaul Mundt 				     0, SLAB_PANIC, NULL);
25971da177e4SLinus Torvalds 
25985606e387SMel Gorman 	for_each_node(nid) {
25995606e387SMel Gorman 		preferred_node_policy[nid] = (struct mempolicy) {
26005606e387SMel Gorman 			.refcnt = ATOMIC_INIT(1),
26015606e387SMel Gorman 			.mode = MPOL_PREFERRED,
26025606e387SMel Gorman 			.flags = MPOL_F_MOF | MPOL_F_MORON,
26035606e387SMel Gorman 			.v = { .preferred_node = nid, },
26045606e387SMel Gorman 		};
26055606e387SMel Gorman 	}
26065606e387SMel Gorman 
2607b71636e2SPaul Mundt 	/*
2608b71636e2SPaul Mundt 	 * Set interleaving policy for system init. Interleaving is only
2609b71636e2SPaul Mundt 	 * enabled across suitably sized nodes (default is >= 16MB), or
2610b71636e2SPaul Mundt 	 * fall back to the largest node if they're all smaller.
2611b71636e2SPaul Mundt 	 */
2612b71636e2SPaul Mundt 	nodes_clear(interleave_nodes);
261301f13bd6SLai Jiangshan 	for_each_node_state(nid, N_MEMORY) {
2614b71636e2SPaul Mundt 		unsigned long total_pages = node_present_pages(nid);
26151da177e4SLinus Torvalds 
2616b71636e2SPaul Mundt 		/* Preserve the largest node */
2617b71636e2SPaul Mundt 		if (largest < total_pages) {
2618b71636e2SPaul Mundt 			largest = total_pages;
2619b71636e2SPaul Mundt 			prefer = nid;
2620b71636e2SPaul Mundt 		}
2621b71636e2SPaul Mundt 
2622b71636e2SPaul Mundt 		/* Interleave this node? */
2623b71636e2SPaul Mundt 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2624b71636e2SPaul Mundt 			node_set(nid, interleave_nodes);
2625b71636e2SPaul Mundt 	}
2626b71636e2SPaul Mundt 
2627b71636e2SPaul Mundt 	/* All too small, use the largest */
2628b71636e2SPaul Mundt 	if (unlikely(nodes_empty(interleave_nodes)))
2629b71636e2SPaul Mundt 		node_set(prefer, interleave_nodes);
2630b71636e2SPaul Mundt 
2631028fec41SDavid Rientjes 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
26321da177e4SLinus Torvalds 		printk("numa_policy_init: interleaving failed\n");
26331a687c2eSMel Gorman 
26341a687c2eSMel Gorman 	check_numabalancing_enable();
26351da177e4SLinus Torvalds }
26361da177e4SLinus Torvalds 
26378bccd85fSChristoph Lameter /* Reset policy of current process to default */
26381da177e4SLinus Torvalds void numa_default_policy(void)
26391da177e4SLinus Torvalds {
2640028fec41SDavid Rientjes 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
26411da177e4SLinus Torvalds }
264268860ec1SPaul Jackson 
26434225399aSPaul Jackson /*
2644095f1fc4SLee Schermerhorn  * Parse and format mempolicy from/to strings
2645095f1fc4SLee Schermerhorn  */
2646095f1fc4SLee Schermerhorn 
2647095f1fc4SLee Schermerhorn /*
2648f2a07f40SHugh Dickins  * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
26491a75a6c8SChristoph Lameter  */
2650345ace9cSLee Schermerhorn static const char * const policy_modes[] =
2651345ace9cSLee Schermerhorn {
2652345ace9cSLee Schermerhorn 	[MPOL_DEFAULT]    = "default",
2653345ace9cSLee Schermerhorn 	[MPOL_PREFERRED]  = "prefer",
2654345ace9cSLee Schermerhorn 	[MPOL_BIND]       = "bind",
2655345ace9cSLee Schermerhorn 	[MPOL_INTERLEAVE] = "interleave",
2656d3a71033SLee Schermerhorn 	[MPOL_LOCAL]      = "local",
2657345ace9cSLee Schermerhorn };
26581a75a6c8SChristoph Lameter 
2659095f1fc4SLee Schermerhorn 
2660095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS
2661095f1fc4SLee Schermerhorn /**
2662f2a07f40SHugh Dickins  * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2663095f1fc4SLee Schermerhorn  * @str:  string containing mempolicy to parse
266471fe804bSLee Schermerhorn  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2665095f1fc4SLee Schermerhorn  *
2666095f1fc4SLee Schermerhorn  * Format of input:
2667095f1fc4SLee Schermerhorn  *	<mode>[=<flags>][:<nodelist>]
2668095f1fc4SLee Schermerhorn  *
266971fe804bSLee Schermerhorn  * On success, returns 0, else 1
2670095f1fc4SLee Schermerhorn  */
2671a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol)
2672095f1fc4SLee Schermerhorn {
267371fe804bSLee Schermerhorn 	struct mempolicy *new = NULL;
2674b4652e84SLee Schermerhorn 	unsigned short mode;
2675f2a07f40SHugh Dickins 	unsigned short mode_flags;
267671fe804bSLee Schermerhorn 	nodemask_t nodes;
2677095f1fc4SLee Schermerhorn 	char *nodelist = strchr(str, ':');
2678095f1fc4SLee Schermerhorn 	char *flags = strchr(str, '=');
2679095f1fc4SLee Schermerhorn 	int err = 1;
2680095f1fc4SLee Schermerhorn 
2681095f1fc4SLee Schermerhorn 	if (nodelist) {
2682095f1fc4SLee Schermerhorn 		/* NUL-terminate mode or flags string */
2683095f1fc4SLee Schermerhorn 		*nodelist++ = '\0';
268471fe804bSLee Schermerhorn 		if (nodelist_parse(nodelist, nodes))
2685095f1fc4SLee Schermerhorn 			goto out;
268601f13bd6SLai Jiangshan 		if (!nodes_subset(nodes, node_states[N_MEMORY]))
2687095f1fc4SLee Schermerhorn 			goto out;
268871fe804bSLee Schermerhorn 	} else
268971fe804bSLee Schermerhorn 		nodes_clear(nodes);
269071fe804bSLee Schermerhorn 
2691095f1fc4SLee Schermerhorn 	if (flags)
2692095f1fc4SLee Schermerhorn 		*flags++ = '\0';	/* terminate mode string */
2693095f1fc4SLee Schermerhorn 
2694479e2802SPeter Zijlstra 	for (mode = 0; mode < MPOL_MAX; mode++) {
2695345ace9cSLee Schermerhorn 		if (!strcmp(str, policy_modes[mode])) {
2696095f1fc4SLee Schermerhorn 			break;
2697095f1fc4SLee Schermerhorn 		}
2698095f1fc4SLee Schermerhorn 	}
2699a720094dSMel Gorman 	if (mode >= MPOL_MAX)
2700095f1fc4SLee Schermerhorn 		goto out;
2701095f1fc4SLee Schermerhorn 
270271fe804bSLee Schermerhorn 	switch (mode) {
2703095f1fc4SLee Schermerhorn 	case MPOL_PREFERRED:
270471fe804bSLee Schermerhorn 		/*
270571fe804bSLee Schermerhorn 		 * Insist on a nodelist of one node only
270671fe804bSLee Schermerhorn 		 */
2707095f1fc4SLee Schermerhorn 		if (nodelist) {
2708095f1fc4SLee Schermerhorn 			char *rest = nodelist;
2709095f1fc4SLee Schermerhorn 			while (isdigit(*rest))
2710095f1fc4SLee Schermerhorn 				rest++;
2711926f2ae0SKOSAKI Motohiro 			if (*rest)
2712926f2ae0SKOSAKI Motohiro 				goto out;
2713095f1fc4SLee Schermerhorn 		}
2714095f1fc4SLee Schermerhorn 		break;
2715095f1fc4SLee Schermerhorn 	case MPOL_INTERLEAVE:
2716095f1fc4SLee Schermerhorn 		/*
2717095f1fc4SLee Schermerhorn 		 * Default to online nodes with memory if no nodelist
2718095f1fc4SLee Schermerhorn 		 */
2719095f1fc4SLee Schermerhorn 		if (!nodelist)
272001f13bd6SLai Jiangshan 			nodes = node_states[N_MEMORY];
27213f226aa1SLee Schermerhorn 		break;
272271fe804bSLee Schermerhorn 	case MPOL_LOCAL:
27233f226aa1SLee Schermerhorn 		/*
272471fe804bSLee Schermerhorn 		 * Don't allow a nodelist;  mpol_new() checks flags
27253f226aa1SLee Schermerhorn 		 */
272671fe804bSLee Schermerhorn 		if (nodelist)
27273f226aa1SLee Schermerhorn 			goto out;
272871fe804bSLee Schermerhorn 		mode = MPOL_PREFERRED;
27293f226aa1SLee Schermerhorn 		break;
2730413b43deSRavikiran G Thirumalai 	case MPOL_DEFAULT:
2731413b43deSRavikiran G Thirumalai 		/*
2732413b43deSRavikiran G Thirumalai 		 * Insist on a empty nodelist
2733413b43deSRavikiran G Thirumalai 		 */
2734413b43deSRavikiran G Thirumalai 		if (!nodelist)
2735413b43deSRavikiran G Thirumalai 			err = 0;
2736413b43deSRavikiran G Thirumalai 		goto out;
2737d69b2e63SKOSAKI Motohiro 	case MPOL_BIND:
273871fe804bSLee Schermerhorn 		/*
2739d69b2e63SKOSAKI Motohiro 		 * Insist on a nodelist
274071fe804bSLee Schermerhorn 		 */
2741d69b2e63SKOSAKI Motohiro 		if (!nodelist)
2742d69b2e63SKOSAKI Motohiro 			goto out;
2743095f1fc4SLee Schermerhorn 	}
2744095f1fc4SLee Schermerhorn 
274571fe804bSLee Schermerhorn 	mode_flags = 0;
2746095f1fc4SLee Schermerhorn 	if (flags) {
2747095f1fc4SLee Schermerhorn 		/*
2748095f1fc4SLee Schermerhorn 		 * Currently, we only support two mutually exclusive
2749095f1fc4SLee Schermerhorn 		 * mode flags.
2750095f1fc4SLee Schermerhorn 		 */
2751095f1fc4SLee Schermerhorn 		if (!strcmp(flags, "static"))
275271fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_STATIC_NODES;
2753095f1fc4SLee Schermerhorn 		else if (!strcmp(flags, "relative"))
275471fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_RELATIVE_NODES;
2755095f1fc4SLee Schermerhorn 		else
2756926f2ae0SKOSAKI Motohiro 			goto out;
2757095f1fc4SLee Schermerhorn 	}
275871fe804bSLee Schermerhorn 
275971fe804bSLee Schermerhorn 	new = mpol_new(mode, mode_flags, &nodes);
276071fe804bSLee Schermerhorn 	if (IS_ERR(new))
2761926f2ae0SKOSAKI Motohiro 		goto out;
2762926f2ae0SKOSAKI Motohiro 
2763f2a07f40SHugh Dickins 	/*
2764f2a07f40SHugh Dickins 	 * Save nodes for mpol_to_str() to show the tmpfs mount options
2765f2a07f40SHugh Dickins 	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2766f2a07f40SHugh Dickins 	 */
2767f2a07f40SHugh Dickins 	if (mode != MPOL_PREFERRED)
2768f2a07f40SHugh Dickins 		new->v.nodes = nodes;
2769f2a07f40SHugh Dickins 	else if (nodelist)
2770f2a07f40SHugh Dickins 		new->v.preferred_node = first_node(nodes);
2771f2a07f40SHugh Dickins 	else
2772f2a07f40SHugh Dickins 		new->flags |= MPOL_F_LOCAL;
2773f2a07f40SHugh Dickins 
2774f2a07f40SHugh Dickins 	/*
2775f2a07f40SHugh Dickins 	 * Save nodes for contextualization: this will be used to "clone"
2776f2a07f40SHugh Dickins 	 * the mempolicy in a specific context [cpuset] at a later time.
2777f2a07f40SHugh Dickins 	 */
2778e17f74afSLee Schermerhorn 	new->w.user_nodemask = nodes;
2779f2a07f40SHugh Dickins 
2780926f2ae0SKOSAKI Motohiro 	err = 0;
278171fe804bSLee Schermerhorn 
2782095f1fc4SLee Schermerhorn out:
2783095f1fc4SLee Schermerhorn 	/* Restore string for error message */
2784095f1fc4SLee Schermerhorn 	if (nodelist)
2785095f1fc4SLee Schermerhorn 		*--nodelist = ':';
2786095f1fc4SLee Schermerhorn 	if (flags)
2787095f1fc4SLee Schermerhorn 		*--flags = '=';
278871fe804bSLee Schermerhorn 	if (!err)
278971fe804bSLee Schermerhorn 		*mpol = new;
2790095f1fc4SLee Schermerhorn 	return err;
2791095f1fc4SLee Schermerhorn }
2792095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */
2793095f1fc4SLee Schermerhorn 
279471fe804bSLee Schermerhorn /**
279571fe804bSLee Schermerhorn  * mpol_to_str - format a mempolicy structure for printing
279671fe804bSLee Schermerhorn  * @buffer:  to contain formatted mempolicy string
279771fe804bSLee Schermerhorn  * @maxlen:  length of @buffer
279871fe804bSLee Schermerhorn  * @pol:  pointer to mempolicy to be formatted
279971fe804bSLee Schermerhorn  *
28001a75a6c8SChristoph Lameter  * Convert a mempolicy into a string.
28011a75a6c8SChristoph Lameter  * Returns the number of characters in buffer (if positive)
28021a75a6c8SChristoph Lameter  * or an error (negative)
28031a75a6c8SChristoph Lameter  */
2804a7a88b23SHugh Dickins int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
28051a75a6c8SChristoph Lameter {
28061a75a6c8SChristoph Lameter 	char *p = buffer;
28071a75a6c8SChristoph Lameter 	int l;
28081a75a6c8SChristoph Lameter 	nodemask_t nodes;
2809bea904d5SLee Schermerhorn 	unsigned short mode;
2810f5b087b5SDavid Rientjes 	unsigned short flags = pol ? pol->flags : 0;
28111a75a6c8SChristoph Lameter 
28122291990aSLee Schermerhorn 	/*
28132291990aSLee Schermerhorn 	 * Sanity check:  room for longest mode, flag and some nodes
28142291990aSLee Schermerhorn 	 */
28152291990aSLee Schermerhorn 	VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
28162291990aSLee Schermerhorn 
2817bea904d5SLee Schermerhorn 	if (!pol || pol == &default_policy)
2818bea904d5SLee Schermerhorn 		mode = MPOL_DEFAULT;
2819bea904d5SLee Schermerhorn 	else
2820bea904d5SLee Schermerhorn 		mode = pol->mode;
2821bea904d5SLee Schermerhorn 
28221a75a6c8SChristoph Lameter 	switch (mode) {
28231a75a6c8SChristoph Lameter 	case MPOL_DEFAULT:
28241a75a6c8SChristoph Lameter 		nodes_clear(nodes);
28251a75a6c8SChristoph Lameter 		break;
28261a75a6c8SChristoph Lameter 
28271a75a6c8SChristoph Lameter 	case MPOL_PREFERRED:
28281a75a6c8SChristoph Lameter 		nodes_clear(nodes);
2829fc36b8d3SLee Schermerhorn 		if (flags & MPOL_F_LOCAL)
2830f2a07f40SHugh Dickins 			mode = MPOL_LOCAL;
283153f2556bSLee Schermerhorn 		else
2832fc36b8d3SLee Schermerhorn 			node_set(pol->v.preferred_node, nodes);
28331a75a6c8SChristoph Lameter 		break;
28341a75a6c8SChristoph Lameter 
28351a75a6c8SChristoph Lameter 	case MPOL_BIND:
283619770b32SMel Gorman 		/* Fall through */
28371a75a6c8SChristoph Lameter 	case MPOL_INTERLEAVE:
28381a75a6c8SChristoph Lameter 		nodes = pol->v.nodes;
28391a75a6c8SChristoph Lameter 		break;
28401a75a6c8SChristoph Lameter 
28411a75a6c8SChristoph Lameter 	default:
284280de7c31SDave Jones 		return -EINVAL;
28431a75a6c8SChristoph Lameter 	}
28441a75a6c8SChristoph Lameter 
2845345ace9cSLee Schermerhorn 	l = strlen(policy_modes[mode]);
28461a75a6c8SChristoph Lameter 	if (buffer + maxlen < p + l + 1)
28471a75a6c8SChristoph Lameter 		return -ENOSPC;
28481a75a6c8SChristoph Lameter 
2849345ace9cSLee Schermerhorn 	strcpy(p, policy_modes[mode]);
28501a75a6c8SChristoph Lameter 	p += l;
28511a75a6c8SChristoph Lameter 
2852fc36b8d3SLee Schermerhorn 	if (flags & MPOL_MODE_FLAGS) {
2853f5b087b5SDavid Rientjes 		if (buffer + maxlen < p + 2)
2854f5b087b5SDavid Rientjes 			return -ENOSPC;
2855f5b087b5SDavid Rientjes 		*p++ = '=';
2856f5b087b5SDavid Rientjes 
28572291990aSLee Schermerhorn 		/*
28582291990aSLee Schermerhorn 		 * Currently, the only defined flags are mutually exclusive
28592291990aSLee Schermerhorn 		 */
2860f5b087b5SDavid Rientjes 		if (flags & MPOL_F_STATIC_NODES)
28612291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "static");
28622291990aSLee Schermerhorn 		else if (flags & MPOL_F_RELATIVE_NODES)
28632291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "relative");
2864f5b087b5SDavid Rientjes 	}
2865f5b087b5SDavid Rientjes 
28661a75a6c8SChristoph Lameter 	if (!nodes_empty(nodes)) {
28671a75a6c8SChristoph Lameter 		if (buffer + maxlen < p + 2)
28681a75a6c8SChristoph Lameter 			return -ENOSPC;
2869095f1fc4SLee Schermerhorn 		*p++ = ':';
28701a75a6c8SChristoph Lameter 	 	p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
28711a75a6c8SChristoph Lameter 	}
28721a75a6c8SChristoph Lameter 	return p - buffer;
28731a75a6c8SChristoph Lameter }
2874