xref: /openbmc/linux/mm/mempolicy.c (revision 4a404bea941ac3c62e11b88c9d16197334eee2f1)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Simple NUMA memory policy for the Linux kernel.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
58bccd85fSChristoph Lameter  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
61da177e4SLinus Torvalds  * Subject to the GNU Public License, version 2.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * NUMA policy allows the user to give hints in which node(s) memory should
91da177e4SLinus Torvalds  * be allocated.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Support four policies per VMA and per process:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * The VMA policy has priority over the process policy for a page fault.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * interleave     Allocate memory interleaved over a set of nodes,
161da177e4SLinus Torvalds  *                with normal fallback if it fails.
171da177e4SLinus Torvalds  *                For VMA based allocations this interleaves based on the
181da177e4SLinus Torvalds  *                offset into the backing object or offset into the mapping
191da177e4SLinus Torvalds  *                for anonymous memory. For process policy an process counter
201da177e4SLinus Torvalds  *                is used.
218bccd85fSChristoph Lameter  *
221da177e4SLinus Torvalds  * bind           Only allocate memory on a specific set of nodes,
231da177e4SLinus Torvalds  *                no fallback.
248bccd85fSChristoph Lameter  *                FIXME: memory is allocated starting with the first node
258bccd85fSChristoph Lameter  *                to the last. It would be better if bind would truly restrict
268bccd85fSChristoph Lameter  *                the allocation to memory nodes instead
278bccd85fSChristoph Lameter  *
281da177e4SLinus Torvalds  * preferred       Try a specific node first before normal fallback.
2900ef2d2fSDavid Rientjes  *                As a special case NUMA_NO_NODE here means do the allocation
301da177e4SLinus Torvalds  *                on the local CPU. This is normally identical to default,
311da177e4SLinus Torvalds  *                but useful to set in a VMA when you have a non default
321da177e4SLinus Torvalds  *                process policy.
338bccd85fSChristoph Lameter  *
341da177e4SLinus Torvalds  * default        Allocate on the local node first, or when on a VMA
351da177e4SLinus Torvalds  *                use the process policy. This is what Linux always did
361da177e4SLinus Torvalds  *		  in a NUMA aware kernel and still does by, ahem, default.
371da177e4SLinus Torvalds  *
381da177e4SLinus Torvalds  * The process policy is applied for most non interrupt memory allocations
391da177e4SLinus Torvalds  * in that process' context. Interrupts ignore the policies and always
401da177e4SLinus Torvalds  * try to allocate on the local CPU. The VMA policy is only applied for memory
411da177e4SLinus Torvalds  * allocations for a VMA in the VM.
421da177e4SLinus Torvalds  *
431da177e4SLinus Torvalds  * Currently there are a few corner cases in swapping where the policy
441da177e4SLinus Torvalds  * is not applied, but the majority should be handled. When process policy
451da177e4SLinus Torvalds  * is used it is not remembered over swap outs/swap ins.
461da177e4SLinus Torvalds  *
471da177e4SLinus Torvalds  * Only the highest zone in the zone hierarchy gets policied. Allocations
481da177e4SLinus Torvalds  * requesting a lower zone just use default policy. This implies that
491da177e4SLinus Torvalds  * on systems with highmem kernel lowmem allocation don't get policied.
501da177e4SLinus Torvalds  * Same with GFP_DMA allocations.
511da177e4SLinus Torvalds  *
521da177e4SLinus Torvalds  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
531da177e4SLinus Torvalds  * all users and remembered even when nobody has memory mapped.
541da177e4SLinus Torvalds  */
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds /* Notebook:
571da177e4SLinus Torvalds    fix mmap readahead to honour policy and enable policy for any page cache
581da177e4SLinus Torvalds    object
591da177e4SLinus Torvalds    statistics for bigpages
601da177e4SLinus Torvalds    global policy for page cache? currently it uses process policy. Requires
611da177e4SLinus Torvalds    first item above.
621da177e4SLinus Torvalds    handle mremap for shared memory (currently ignored for the policy)
631da177e4SLinus Torvalds    grows down?
641da177e4SLinus Torvalds    make bind policy root only? It can trigger oom much faster and the
651da177e4SLinus Torvalds    kernel is not always grateful with that.
661da177e4SLinus Torvalds */
671da177e4SLinus Torvalds 
681da177e4SLinus Torvalds #include <linux/mempolicy.h>
691da177e4SLinus Torvalds #include <linux/mm.h>
701da177e4SLinus Torvalds #include <linux/highmem.h>
711da177e4SLinus Torvalds #include <linux/hugetlb.h>
721da177e4SLinus Torvalds #include <linux/kernel.h>
731da177e4SLinus Torvalds #include <linux/sched.h>
741da177e4SLinus Torvalds #include <linux/nodemask.h>
751da177e4SLinus Torvalds #include <linux/cpuset.h>
761da177e4SLinus Torvalds #include <linux/slab.h>
771da177e4SLinus Torvalds #include <linux/string.h>
78b95f1b31SPaul Gortmaker #include <linux/export.h>
79b488893aSPavel Emelyanov #include <linux/nsproxy.h>
801da177e4SLinus Torvalds #include <linux/interrupt.h>
811da177e4SLinus Torvalds #include <linux/init.h>
821da177e4SLinus Torvalds #include <linux/compat.h>
83dc9aa5b9SChristoph Lameter #include <linux/swap.h>
841a75a6c8SChristoph Lameter #include <linux/seq_file.h>
851a75a6c8SChristoph Lameter #include <linux/proc_fs.h>
86b20a3503SChristoph Lameter #include <linux/migrate.h>
8762b61f61SHugh Dickins #include <linux/ksm.h>
8895a402c3SChristoph Lameter #include <linux/rmap.h>
8986c3a764SDavid Quigley #include <linux/security.h>
90dbcb0f19SAdrian Bunk #include <linux/syscalls.h>
91095f1fc4SLee Schermerhorn #include <linux/ctype.h>
926d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h>
93b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h>
94dc9aa5b9SChristoph Lameter 
951da177e4SLinus Torvalds #include <asm/tlbflush.h>
961da177e4SLinus Torvalds #include <asm/uaccess.h>
97778d3b0fSMichal Hocko #include <linux/random.h>
981da177e4SLinus Torvalds 
9962695a84SNick Piggin #include "internal.h"
10062695a84SNick Piggin 
10138e35860SChristoph Lameter /* Internal flags */
102dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
10338e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
104dc9aa5b9SChristoph Lameter 
105fcc234f8SPekka Enberg static struct kmem_cache *policy_cache;
106fcc234f8SPekka Enberg static struct kmem_cache *sn_cache;
1071da177e4SLinus Torvalds 
1081da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not
1091da177e4SLinus Torvalds    policied. */
1106267276fSChristoph Lameter enum zone_type policy_zone = 0;
1111da177e4SLinus Torvalds 
112bea904d5SLee Schermerhorn /*
113bea904d5SLee Schermerhorn  * run-time system-wide default policy => local allocation
114bea904d5SLee Schermerhorn  */
115e754d79dSH Hartley Sweeten static struct mempolicy default_policy = {
1161da177e4SLinus Torvalds 	.refcnt = ATOMIC_INIT(1), /* never free it */
117bea904d5SLee Schermerhorn 	.mode = MPOL_PREFERRED,
118fc36b8d3SLee Schermerhorn 	.flags = MPOL_F_LOCAL,
1191da177e4SLinus Torvalds };
1201da177e4SLinus Torvalds 
1215606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES];
1225606e387SMel Gorman 
1235606e387SMel Gorman static struct mempolicy *get_task_policy(struct task_struct *p)
1245606e387SMel Gorman {
1255606e387SMel Gorman 	struct mempolicy *pol = p->mempolicy;
1265606e387SMel Gorman 
1275606e387SMel Gorman 	if (!pol) {
1281da6f0e1SJianguo Wu 		int node = numa_node_id();
1295606e387SMel Gorman 
1301da6f0e1SJianguo Wu 		if (node != NUMA_NO_NODE) {
1311da6f0e1SJianguo Wu 			pol = &preferred_node_policy[node];
1321da6f0e1SJianguo Wu 			/*
1331da6f0e1SJianguo Wu 			 * preferred_node_policy is not initialised early in
1341da6f0e1SJianguo Wu 			 * boot
1351da6f0e1SJianguo Wu 			 */
1365606e387SMel Gorman 			if (!pol->mode)
1375606e387SMel Gorman 				pol = NULL;
1385606e387SMel Gorman 		}
1391da6f0e1SJianguo Wu 	}
1405606e387SMel Gorman 
1415606e387SMel Gorman 	return pol;
1425606e387SMel Gorman }
1435606e387SMel Gorman 
14437012946SDavid Rientjes static const struct mempolicy_operations {
14537012946SDavid Rientjes 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
146708c1bbcSMiao Xie 	/*
147708c1bbcSMiao Xie 	 * If read-side task has no lock to protect task->mempolicy, write-side
148708c1bbcSMiao Xie 	 * task will rebind the task->mempolicy by two step. The first step is
149708c1bbcSMiao Xie 	 * setting all the newly nodes, and the second step is cleaning all the
150708c1bbcSMiao Xie 	 * disallowed nodes. In this way, we can avoid finding no node to alloc
151708c1bbcSMiao Xie 	 * page.
152708c1bbcSMiao Xie 	 * If we have a lock to protect task->mempolicy in read-side, we do
153708c1bbcSMiao Xie 	 * rebind directly.
154708c1bbcSMiao Xie 	 *
155708c1bbcSMiao Xie 	 * step:
156708c1bbcSMiao Xie 	 * 	MPOL_REBIND_ONCE - do rebind work at once
157708c1bbcSMiao Xie 	 * 	MPOL_REBIND_STEP1 - set all the newly nodes
158708c1bbcSMiao Xie 	 * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
159708c1bbcSMiao Xie 	 */
160708c1bbcSMiao Xie 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
161708c1bbcSMiao Xie 			enum mpol_rebind_step step);
16237012946SDavid Rientjes } mpol_ops[MPOL_MAX];
16337012946SDavid Rientjes 
16419770b32SMel Gorman /* Check that the nodemask contains at least one populated zone */
16537012946SDavid Rientjes static int is_valid_nodemask(const nodemask_t *nodemask)
1661da177e4SLinus Torvalds {
167d3eb1570SLai Jiangshan 	return nodes_intersects(*nodemask, node_states[N_MEMORY]);
1681da177e4SLinus Torvalds }
1691da177e4SLinus Torvalds 
170f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
171f5b087b5SDavid Rientjes {
1726d556294SBob Liu 	return pol->flags & MPOL_MODE_FLAGS;
1734c50bc01SDavid Rientjes }
1744c50bc01SDavid Rientjes 
1754c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
1764c50bc01SDavid Rientjes 				   const nodemask_t *rel)
1774c50bc01SDavid Rientjes {
1784c50bc01SDavid Rientjes 	nodemask_t tmp;
1794c50bc01SDavid Rientjes 	nodes_fold(tmp, *orig, nodes_weight(*rel));
1804c50bc01SDavid Rientjes 	nodes_onto(*ret, tmp, *rel);
181f5b087b5SDavid Rientjes }
182f5b087b5SDavid Rientjes 
18337012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
18437012946SDavid Rientjes {
18537012946SDavid Rientjes 	if (nodes_empty(*nodes))
18637012946SDavid Rientjes 		return -EINVAL;
18737012946SDavid Rientjes 	pol->v.nodes = *nodes;
18837012946SDavid Rientjes 	return 0;
18937012946SDavid Rientjes }
19037012946SDavid Rientjes 
19137012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
19237012946SDavid Rientjes {
19337012946SDavid Rientjes 	if (!nodes)
194fc36b8d3SLee Schermerhorn 		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
19537012946SDavid Rientjes 	else if (nodes_empty(*nodes))
19637012946SDavid Rientjes 		return -EINVAL;			/*  no allowed nodes */
19737012946SDavid Rientjes 	else
19837012946SDavid Rientjes 		pol->v.preferred_node = first_node(*nodes);
19937012946SDavid Rientjes 	return 0;
20037012946SDavid Rientjes }
20137012946SDavid Rientjes 
20237012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
20337012946SDavid Rientjes {
20437012946SDavid Rientjes 	if (!is_valid_nodemask(nodes))
20537012946SDavid Rientjes 		return -EINVAL;
20637012946SDavid Rientjes 	pol->v.nodes = *nodes;
20737012946SDavid Rientjes 	return 0;
20837012946SDavid Rientjes }
20937012946SDavid Rientjes 
21058568d2aSMiao Xie /*
21158568d2aSMiao Xie  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
21258568d2aSMiao Xie  * any, for the new policy.  mpol_new() has already validated the nodes
21358568d2aSMiao Xie  * parameter with respect to the policy mode and flags.  But, we need to
21458568d2aSMiao Xie  * handle an empty nodemask with MPOL_PREFERRED here.
21558568d2aSMiao Xie  *
21658568d2aSMiao Xie  * Must be called holding task's alloc_lock to protect task's mems_allowed
21758568d2aSMiao Xie  * and mempolicy.  May also be called holding the mmap_semaphore for write.
21858568d2aSMiao Xie  */
2194bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol,
2204bfc4495SKAMEZAWA Hiroyuki 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
22158568d2aSMiao Xie {
22258568d2aSMiao Xie 	int ret;
22358568d2aSMiao Xie 
22458568d2aSMiao Xie 	/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
22558568d2aSMiao Xie 	if (pol == NULL)
22658568d2aSMiao Xie 		return 0;
22701f13bd6SLai Jiangshan 	/* Check N_MEMORY */
2284bfc4495SKAMEZAWA Hiroyuki 	nodes_and(nsc->mask1,
22901f13bd6SLai Jiangshan 		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
23058568d2aSMiao Xie 
23158568d2aSMiao Xie 	VM_BUG_ON(!nodes);
23258568d2aSMiao Xie 	if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
23358568d2aSMiao Xie 		nodes = NULL;	/* explicit local allocation */
23458568d2aSMiao Xie 	else {
23558568d2aSMiao Xie 		if (pol->flags & MPOL_F_RELATIVE_NODES)
2364bfc4495SKAMEZAWA Hiroyuki 			mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
23758568d2aSMiao Xie 		else
2384bfc4495SKAMEZAWA Hiroyuki 			nodes_and(nsc->mask2, *nodes, nsc->mask1);
2394bfc4495SKAMEZAWA Hiroyuki 
24058568d2aSMiao Xie 		if (mpol_store_user_nodemask(pol))
24158568d2aSMiao Xie 			pol->w.user_nodemask = *nodes;
24258568d2aSMiao Xie 		else
24358568d2aSMiao Xie 			pol->w.cpuset_mems_allowed =
24458568d2aSMiao Xie 						cpuset_current_mems_allowed;
24558568d2aSMiao Xie 	}
24658568d2aSMiao Xie 
2474bfc4495SKAMEZAWA Hiroyuki 	if (nodes)
2484bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
2494bfc4495SKAMEZAWA Hiroyuki 	else
2504bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, NULL);
25158568d2aSMiao Xie 	return ret;
25258568d2aSMiao Xie }
25358568d2aSMiao Xie 
25458568d2aSMiao Xie /*
25558568d2aSMiao Xie  * This function just creates a new policy, does some check and simple
25658568d2aSMiao Xie  * initialization. You must invoke mpol_set_nodemask() to set nodes.
25758568d2aSMiao Xie  */
258028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
259028fec41SDavid Rientjes 				  nodemask_t *nodes)
2601da177e4SLinus Torvalds {
2611da177e4SLinus Torvalds 	struct mempolicy *policy;
2621da177e4SLinus Torvalds 
263028fec41SDavid Rientjes 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
26400ef2d2fSDavid Rientjes 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
265140d5a49SPaul Mundt 
2663e1f0645SDavid Rientjes 	if (mode == MPOL_DEFAULT) {
2673e1f0645SDavid Rientjes 		if (nodes && !nodes_empty(*nodes))
26837012946SDavid Rientjes 			return ERR_PTR(-EINVAL);
269d3a71033SLee Schermerhorn 		return NULL;
27037012946SDavid Rientjes 	}
2713e1f0645SDavid Rientjes 	VM_BUG_ON(!nodes);
2723e1f0645SDavid Rientjes 
2733e1f0645SDavid Rientjes 	/*
2743e1f0645SDavid Rientjes 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
2753e1f0645SDavid Rientjes 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
2763e1f0645SDavid Rientjes 	 * All other modes require a valid pointer to a non-empty nodemask.
2773e1f0645SDavid Rientjes 	 */
2783e1f0645SDavid Rientjes 	if (mode == MPOL_PREFERRED) {
2793e1f0645SDavid Rientjes 		if (nodes_empty(*nodes)) {
2803e1f0645SDavid Rientjes 			if (((flags & MPOL_F_STATIC_NODES) ||
2813e1f0645SDavid Rientjes 			     (flags & MPOL_F_RELATIVE_NODES)))
2823e1f0645SDavid Rientjes 				return ERR_PTR(-EINVAL);
2833e1f0645SDavid Rientjes 		}
284479e2802SPeter Zijlstra 	} else if (mode == MPOL_LOCAL) {
285479e2802SPeter Zijlstra 		if (!nodes_empty(*nodes))
286479e2802SPeter Zijlstra 			return ERR_PTR(-EINVAL);
287479e2802SPeter Zijlstra 		mode = MPOL_PREFERRED;
2883e1f0645SDavid Rientjes 	} else if (nodes_empty(*nodes))
2893e1f0645SDavid Rientjes 		return ERR_PTR(-EINVAL);
2901da177e4SLinus Torvalds 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2911da177e4SLinus Torvalds 	if (!policy)
2921da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2931da177e4SLinus Torvalds 	atomic_set(&policy->refcnt, 1);
29445c4745aSLee Schermerhorn 	policy->mode = mode;
29537012946SDavid Rientjes 	policy->flags = flags;
2963e1f0645SDavid Rientjes 
29737012946SDavid Rientjes 	return policy;
29837012946SDavid Rientjes }
29937012946SDavid Rientjes 
30052cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */
30152cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p)
30252cd3b07SLee Schermerhorn {
30352cd3b07SLee Schermerhorn 	if (!atomic_dec_and_test(&p->refcnt))
30452cd3b07SLee Schermerhorn 		return;
30552cd3b07SLee Schermerhorn 	kmem_cache_free(policy_cache, p);
30652cd3b07SLee Schermerhorn }
30752cd3b07SLee Schermerhorn 
308708c1bbcSMiao Xie static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
309708c1bbcSMiao Xie 				enum mpol_rebind_step step)
31037012946SDavid Rientjes {
31137012946SDavid Rientjes }
31237012946SDavid Rientjes 
313708c1bbcSMiao Xie /*
314708c1bbcSMiao Xie  * step:
315708c1bbcSMiao Xie  * 	MPOL_REBIND_ONCE  - do rebind work at once
316708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP1 - set all the newly nodes
317708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
318708c1bbcSMiao Xie  */
319708c1bbcSMiao Xie static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
320708c1bbcSMiao Xie 				 enum mpol_rebind_step step)
3211d0d2680SDavid Rientjes {
3221d0d2680SDavid Rientjes 	nodemask_t tmp;
3231d0d2680SDavid Rientjes 
32437012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES)
32537012946SDavid Rientjes 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
32637012946SDavid Rientjes 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
32737012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3281d0d2680SDavid Rientjes 	else {
329708c1bbcSMiao Xie 		/*
330708c1bbcSMiao Xie 		 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
331708c1bbcSMiao Xie 		 * result
332708c1bbcSMiao Xie 		 */
333708c1bbcSMiao Xie 		if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
334708c1bbcSMiao Xie 			nodes_remap(tmp, pol->v.nodes,
335708c1bbcSMiao Xie 					pol->w.cpuset_mems_allowed, *nodes);
336708c1bbcSMiao Xie 			pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
337708c1bbcSMiao Xie 		} else if (step == MPOL_REBIND_STEP2) {
338708c1bbcSMiao Xie 			tmp = pol->w.cpuset_mems_allowed;
33937012946SDavid Rientjes 			pol->w.cpuset_mems_allowed = *nodes;
340708c1bbcSMiao Xie 		} else
341708c1bbcSMiao Xie 			BUG();
3421d0d2680SDavid Rientjes 	}
34337012946SDavid Rientjes 
344708c1bbcSMiao Xie 	if (nodes_empty(tmp))
345708c1bbcSMiao Xie 		tmp = *nodes;
346708c1bbcSMiao Xie 
347708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP1)
348708c1bbcSMiao Xie 		nodes_or(pol->v.nodes, pol->v.nodes, tmp);
349708c1bbcSMiao Xie 	else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
3501d0d2680SDavid Rientjes 		pol->v.nodes = tmp;
351708c1bbcSMiao Xie 	else
352708c1bbcSMiao Xie 		BUG();
353708c1bbcSMiao Xie 
3541d0d2680SDavid Rientjes 	if (!node_isset(current->il_next, tmp)) {
3551d0d2680SDavid Rientjes 		current->il_next = next_node(current->il_next, tmp);
3561d0d2680SDavid Rientjes 		if (current->il_next >= MAX_NUMNODES)
3571d0d2680SDavid Rientjes 			current->il_next = first_node(tmp);
3581d0d2680SDavid Rientjes 		if (current->il_next >= MAX_NUMNODES)
3591d0d2680SDavid Rientjes 			current->il_next = numa_node_id();
3601d0d2680SDavid Rientjes 	}
36137012946SDavid Rientjes }
36237012946SDavid Rientjes 
36337012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol,
364708c1bbcSMiao Xie 				  const nodemask_t *nodes,
365708c1bbcSMiao Xie 				  enum mpol_rebind_step step)
36637012946SDavid Rientjes {
36737012946SDavid Rientjes 	nodemask_t tmp;
36837012946SDavid Rientjes 
36937012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES) {
3701d0d2680SDavid Rientjes 		int node = first_node(pol->w.user_nodemask);
3711d0d2680SDavid Rientjes 
372fc36b8d3SLee Schermerhorn 		if (node_isset(node, *nodes)) {
3731d0d2680SDavid Rientjes 			pol->v.preferred_node = node;
374fc36b8d3SLee Schermerhorn 			pol->flags &= ~MPOL_F_LOCAL;
375fc36b8d3SLee Schermerhorn 		} else
376fc36b8d3SLee Schermerhorn 			pol->flags |= MPOL_F_LOCAL;
37737012946SDavid Rientjes 	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
37837012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3791d0d2680SDavid Rientjes 		pol->v.preferred_node = first_node(tmp);
380fc36b8d3SLee Schermerhorn 	} else if (!(pol->flags & MPOL_F_LOCAL)) {
3811d0d2680SDavid Rientjes 		pol->v.preferred_node = node_remap(pol->v.preferred_node,
38237012946SDavid Rientjes 						   pol->w.cpuset_mems_allowed,
38337012946SDavid Rientjes 						   *nodes);
38437012946SDavid Rientjes 		pol->w.cpuset_mems_allowed = *nodes;
3851d0d2680SDavid Rientjes 	}
3861d0d2680SDavid Rientjes }
38737012946SDavid Rientjes 
388708c1bbcSMiao Xie /*
389708c1bbcSMiao Xie  * mpol_rebind_policy - Migrate a policy to a different set of nodes
390708c1bbcSMiao Xie  *
391708c1bbcSMiao Xie  * If read-side task has no lock to protect task->mempolicy, write-side
392708c1bbcSMiao Xie  * task will rebind the task->mempolicy by two step. The first step is
393708c1bbcSMiao Xie  * setting all the newly nodes, and the second step is cleaning all the
394708c1bbcSMiao Xie  * disallowed nodes. In this way, we can avoid finding no node to alloc
395708c1bbcSMiao Xie  * page.
396708c1bbcSMiao Xie  * If we have a lock to protect task->mempolicy in read-side, we do
397708c1bbcSMiao Xie  * rebind directly.
398708c1bbcSMiao Xie  *
399708c1bbcSMiao Xie  * step:
400708c1bbcSMiao Xie  * 	MPOL_REBIND_ONCE  - do rebind work at once
401708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP1 - set all the newly nodes
402708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
403708c1bbcSMiao Xie  */
404708c1bbcSMiao Xie static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
405708c1bbcSMiao Xie 				enum mpol_rebind_step step)
40637012946SDavid Rientjes {
40737012946SDavid Rientjes 	if (!pol)
40837012946SDavid Rientjes 		return;
40989c522c7SWang Sheng-Hui 	if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
41037012946SDavid Rientjes 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
41137012946SDavid Rientjes 		return;
412708c1bbcSMiao Xie 
413708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
414708c1bbcSMiao Xie 		return;
415708c1bbcSMiao Xie 
416708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
417708c1bbcSMiao Xie 		BUG();
418708c1bbcSMiao Xie 
419708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP1)
420708c1bbcSMiao Xie 		pol->flags |= MPOL_F_REBINDING;
421708c1bbcSMiao Xie 	else if (step == MPOL_REBIND_STEP2)
422708c1bbcSMiao Xie 		pol->flags &= ~MPOL_F_REBINDING;
423708c1bbcSMiao Xie 	else if (step >= MPOL_REBIND_NSTEP)
424708c1bbcSMiao Xie 		BUG();
425708c1bbcSMiao Xie 
426708c1bbcSMiao Xie 	mpol_ops[pol->mode].rebind(pol, newmask, step);
4271d0d2680SDavid Rientjes }
4281d0d2680SDavid Rientjes 
4291d0d2680SDavid Rientjes /*
4301d0d2680SDavid Rientjes  * Wrapper for mpol_rebind_policy() that just requires task
4311d0d2680SDavid Rientjes  * pointer, and updates task mempolicy.
43258568d2aSMiao Xie  *
43358568d2aSMiao Xie  * Called with task's alloc_lock held.
4341d0d2680SDavid Rientjes  */
4351d0d2680SDavid Rientjes 
436708c1bbcSMiao Xie void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
437708c1bbcSMiao Xie 			enum mpol_rebind_step step)
4381d0d2680SDavid Rientjes {
439708c1bbcSMiao Xie 	mpol_rebind_policy(tsk->mempolicy, new, step);
4401d0d2680SDavid Rientjes }
4411d0d2680SDavid Rientjes 
4421d0d2680SDavid Rientjes /*
4431d0d2680SDavid Rientjes  * Rebind each vma in mm to new nodemask.
4441d0d2680SDavid Rientjes  *
4451d0d2680SDavid Rientjes  * Call holding a reference to mm.  Takes mm->mmap_sem during call.
4461d0d2680SDavid Rientjes  */
4471d0d2680SDavid Rientjes 
4481d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
4491d0d2680SDavid Rientjes {
4501d0d2680SDavid Rientjes 	struct vm_area_struct *vma;
4511d0d2680SDavid Rientjes 
4521d0d2680SDavid Rientjes 	down_write(&mm->mmap_sem);
4531d0d2680SDavid Rientjes 	for (vma = mm->mmap; vma; vma = vma->vm_next)
454708c1bbcSMiao Xie 		mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
4551d0d2680SDavid Rientjes 	up_write(&mm->mmap_sem);
4561d0d2680SDavid Rientjes }
4571d0d2680SDavid Rientjes 
45837012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
45937012946SDavid Rientjes 	[MPOL_DEFAULT] = {
46037012946SDavid Rientjes 		.rebind = mpol_rebind_default,
46137012946SDavid Rientjes 	},
46237012946SDavid Rientjes 	[MPOL_INTERLEAVE] = {
46337012946SDavid Rientjes 		.create = mpol_new_interleave,
46437012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
46537012946SDavid Rientjes 	},
46637012946SDavid Rientjes 	[MPOL_PREFERRED] = {
46737012946SDavid Rientjes 		.create = mpol_new_preferred,
46837012946SDavid Rientjes 		.rebind = mpol_rebind_preferred,
46937012946SDavid Rientjes 	},
47037012946SDavid Rientjes 	[MPOL_BIND] = {
47137012946SDavid Rientjes 		.create = mpol_new_bind,
47237012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
47337012946SDavid Rientjes 	},
47437012946SDavid Rientjes };
47537012946SDavid Rientjes 
476fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
477fc301289SChristoph Lameter 				unsigned long flags);
4781a75a6c8SChristoph Lameter 
47998094945SNaoya Horiguchi /*
48098094945SNaoya Horiguchi  * Scan through pages checking if pages follow certain conditions,
48198094945SNaoya Horiguchi  * and move them to the pagelist if they do.
48298094945SNaoya Horiguchi  */
48398094945SNaoya Horiguchi static int queue_pages_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
484dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
485dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
48638e35860SChristoph Lameter 		void *private)
4871da177e4SLinus Torvalds {
48891612e0dSHugh Dickins 	pte_t *orig_pte;
48991612e0dSHugh Dickins 	pte_t *pte;
490705e87c0SHugh Dickins 	spinlock_t *ptl;
491941150a3SHugh Dickins 
492705e87c0SHugh Dickins 	orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
49391612e0dSHugh Dickins 	do {
4946aab341eSLinus Torvalds 		struct page *page;
49525ba77c1SAndy Whitcroft 		int nid;
49691612e0dSHugh Dickins 
49791612e0dSHugh Dickins 		if (!pte_present(*pte))
49891612e0dSHugh Dickins 			continue;
4996aab341eSLinus Torvalds 		page = vm_normal_page(vma, addr, *pte);
5006aab341eSLinus Torvalds 		if (!page)
50191612e0dSHugh Dickins 			continue;
502053837fcSNick Piggin 		/*
50362b61f61SHugh Dickins 		 * vm_normal_page() filters out zero pages, but there might
50462b61f61SHugh Dickins 		 * still be PageReserved pages to skip, perhaps in a VDSO.
505053837fcSNick Piggin 		 */
506b79bc0a0SHugh Dickins 		if (PageReserved(page))
507f4598c8bSChristoph Lameter 			continue;
5086aab341eSLinus Torvalds 		nid = page_to_nid(page);
50938e35860SChristoph Lameter 		if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
51038e35860SChristoph Lameter 			continue;
51138e35860SChristoph Lameter 
512b1f72d18SStephen Wilson 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
513fc301289SChristoph Lameter 			migrate_page_add(page, private, flags);
514dc9aa5b9SChristoph Lameter 		else
5151da177e4SLinus Torvalds 			break;
51691612e0dSHugh Dickins 	} while (pte++, addr += PAGE_SIZE, addr != end);
517705e87c0SHugh Dickins 	pte_unmap_unlock(orig_pte, ptl);
51891612e0dSHugh Dickins 	return addr != end;
51991612e0dSHugh Dickins }
52091612e0dSHugh Dickins 
52198094945SNaoya Horiguchi static void queue_pages_hugetlb_pmd_range(struct vm_area_struct *vma,
52298094945SNaoya Horiguchi 		pmd_t *pmd, const nodemask_t *nodes, unsigned long flags,
523e2d8cf40SNaoya Horiguchi 				    void *private)
524e2d8cf40SNaoya Horiguchi {
525e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE
526e2d8cf40SNaoya Horiguchi 	int nid;
527e2d8cf40SNaoya Horiguchi 	struct page *page;
528cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
529e2d8cf40SNaoya Horiguchi 
530cb900f41SKirill A. Shutemov 	ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, (pte_t *)pmd);
531e2d8cf40SNaoya Horiguchi 	page = pte_page(huge_ptep_get((pte_t *)pmd));
532e2d8cf40SNaoya Horiguchi 	nid = page_to_nid(page);
533e2d8cf40SNaoya Horiguchi 	if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
534e2d8cf40SNaoya Horiguchi 		goto unlock;
535e2d8cf40SNaoya Horiguchi 	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
536e2d8cf40SNaoya Horiguchi 	if (flags & (MPOL_MF_MOVE_ALL) ||
537e2d8cf40SNaoya Horiguchi 	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
538e2d8cf40SNaoya Horiguchi 		isolate_huge_page(page, private);
539e2d8cf40SNaoya Horiguchi unlock:
540cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
541e2d8cf40SNaoya Horiguchi #else
542e2d8cf40SNaoya Horiguchi 	BUG();
543e2d8cf40SNaoya Horiguchi #endif
544e2d8cf40SNaoya Horiguchi }
545e2d8cf40SNaoya Horiguchi 
54698094945SNaoya Horiguchi static inline int queue_pages_pmd_range(struct vm_area_struct *vma, pud_t *pud,
547dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
548dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
54938e35860SChristoph Lameter 		void *private)
55091612e0dSHugh Dickins {
55191612e0dSHugh Dickins 	pmd_t *pmd;
55291612e0dSHugh Dickins 	unsigned long next;
55391612e0dSHugh Dickins 
55491612e0dSHugh Dickins 	pmd = pmd_offset(pud, addr);
55591612e0dSHugh Dickins 	do {
55691612e0dSHugh Dickins 		next = pmd_addr_end(addr, end);
557e2d8cf40SNaoya Horiguchi 		if (!pmd_present(*pmd))
558e2d8cf40SNaoya Horiguchi 			continue;
559e2d8cf40SNaoya Horiguchi 		if (pmd_huge(*pmd) && is_vm_hugetlb_page(vma)) {
56098094945SNaoya Horiguchi 			queue_pages_hugetlb_pmd_range(vma, pmd, nodes,
561e2d8cf40SNaoya Horiguchi 						flags, private);
562e2d8cf40SNaoya Horiguchi 			continue;
563e2d8cf40SNaoya Horiguchi 		}
564e180377fSKirill A. Shutemov 		split_huge_page_pmd(vma, addr, pmd);
5651a5a9906SAndrea Arcangeli 		if (pmd_none_or_trans_huge_or_clear_bad(pmd))
56691612e0dSHugh Dickins 			continue;
56798094945SNaoya Horiguchi 		if (queue_pages_pte_range(vma, pmd, addr, next, nodes,
56838e35860SChristoph Lameter 				    flags, private))
56991612e0dSHugh Dickins 			return -EIO;
57091612e0dSHugh Dickins 	} while (pmd++, addr = next, addr != end);
57191612e0dSHugh Dickins 	return 0;
57291612e0dSHugh Dickins }
57391612e0dSHugh Dickins 
57498094945SNaoya Horiguchi static inline int queue_pages_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
575dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
576dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
57738e35860SChristoph Lameter 		void *private)
57891612e0dSHugh Dickins {
57991612e0dSHugh Dickins 	pud_t *pud;
58091612e0dSHugh Dickins 	unsigned long next;
58191612e0dSHugh Dickins 
58291612e0dSHugh Dickins 	pud = pud_offset(pgd, addr);
58391612e0dSHugh Dickins 	do {
58491612e0dSHugh Dickins 		next = pud_addr_end(addr, end);
585e2d8cf40SNaoya Horiguchi 		if (pud_huge(*pud) && is_vm_hugetlb_page(vma))
586e2d8cf40SNaoya Horiguchi 			continue;
58791612e0dSHugh Dickins 		if (pud_none_or_clear_bad(pud))
58891612e0dSHugh Dickins 			continue;
58998094945SNaoya Horiguchi 		if (queue_pages_pmd_range(vma, pud, addr, next, nodes,
59038e35860SChristoph Lameter 				    flags, private))
59191612e0dSHugh Dickins 			return -EIO;
59291612e0dSHugh Dickins 	} while (pud++, addr = next, addr != end);
59391612e0dSHugh Dickins 	return 0;
59491612e0dSHugh Dickins }
59591612e0dSHugh Dickins 
59698094945SNaoya Horiguchi static inline int queue_pages_pgd_range(struct vm_area_struct *vma,
597dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
598dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
59938e35860SChristoph Lameter 		void *private)
60091612e0dSHugh Dickins {
60191612e0dSHugh Dickins 	pgd_t *pgd;
60291612e0dSHugh Dickins 	unsigned long next;
60391612e0dSHugh Dickins 
604b5810039SNick Piggin 	pgd = pgd_offset(vma->vm_mm, addr);
60591612e0dSHugh Dickins 	do {
60691612e0dSHugh Dickins 		next = pgd_addr_end(addr, end);
60791612e0dSHugh Dickins 		if (pgd_none_or_clear_bad(pgd))
60891612e0dSHugh Dickins 			continue;
60998094945SNaoya Horiguchi 		if (queue_pages_pud_range(vma, pgd, addr, next, nodes,
61038e35860SChristoph Lameter 				    flags, private))
61191612e0dSHugh Dickins 			return -EIO;
61291612e0dSHugh Dickins 	} while (pgd++, addr = next, addr != end);
61391612e0dSHugh Dickins 	return 0;
6141da177e4SLinus Torvalds }
6151da177e4SLinus Torvalds 
6165877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING
617b24f53a0SLee Schermerhorn /*
6184b10e7d5SMel Gorman  * This is used to mark a range of virtual addresses to be inaccessible.
6194b10e7d5SMel Gorman  * These are later cleared by a NUMA hinting fault. Depending on these
6204b10e7d5SMel Gorman  * faults, pages may be migrated for better NUMA placement.
6214b10e7d5SMel Gorman  *
6224b10e7d5SMel Gorman  * This is assuming that NUMA faults are handled using PROT_NONE. If
6234b10e7d5SMel Gorman  * an architecture makes a different choice, it will need further
6244b10e7d5SMel Gorman  * changes to the core.
625b24f53a0SLee Schermerhorn  */
6264b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma,
6274b10e7d5SMel Gorman 			unsigned long addr, unsigned long end)
628b24f53a0SLee Schermerhorn {
6294b10e7d5SMel Gorman 	int nr_updated;
630b24f53a0SLee Schermerhorn 
6314b10e7d5SMel Gorman 	nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1);
63203c5a6e1SMel Gorman 	if (nr_updated)
63303c5a6e1SMel Gorman 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
634b24f53a0SLee Schermerhorn 
6354b10e7d5SMel Gorman 	return nr_updated;
636b24f53a0SLee Schermerhorn }
637b24f53a0SLee Schermerhorn #else
638b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma,
639b24f53a0SLee Schermerhorn 			unsigned long addr, unsigned long end)
640b24f53a0SLee Schermerhorn {
641b24f53a0SLee Schermerhorn 	return 0;
642b24f53a0SLee Schermerhorn }
6435877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */
644b24f53a0SLee Schermerhorn 
645dc9aa5b9SChristoph Lameter /*
64698094945SNaoya Horiguchi  * Walk through page tables and collect pages to be migrated.
64798094945SNaoya Horiguchi  *
64898094945SNaoya Horiguchi  * If pages found in a given range are on a set of nodes (determined by
64998094945SNaoya Horiguchi  * @nodes and @flags,) it's isolated and queued to the pagelist which is
65098094945SNaoya Horiguchi  * passed via @private.)
651dc9aa5b9SChristoph Lameter  */
6521da177e4SLinus Torvalds static struct vm_area_struct *
65398094945SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
65438e35860SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags, void *private)
6551da177e4SLinus Torvalds {
6561da177e4SLinus Torvalds 	int err;
6571da177e4SLinus Torvalds 	struct vm_area_struct *first, *vma, *prev;
6581da177e4SLinus Torvalds 
659053837fcSNick Piggin 
6601da177e4SLinus Torvalds 	first = find_vma(mm, start);
6611da177e4SLinus Torvalds 	if (!first)
6621da177e4SLinus Torvalds 		return ERR_PTR(-EFAULT);
6631da177e4SLinus Torvalds 	prev = NULL;
6641da177e4SLinus Torvalds 	for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
6655b952b3cSAndi Kleen 		unsigned long endvma = vma->vm_end;
666dc9aa5b9SChristoph Lameter 
6675b952b3cSAndi Kleen 		if (endvma > end)
6685b952b3cSAndi Kleen 			endvma = end;
6695b952b3cSAndi Kleen 		if (vma->vm_start > start)
6705b952b3cSAndi Kleen 			start = vma->vm_start;
671b24f53a0SLee Schermerhorn 
672b24f53a0SLee Schermerhorn 		if (!(flags & MPOL_MF_DISCONTIG_OK)) {
673b24f53a0SLee Schermerhorn 			if (!vma->vm_next && vma->vm_end < end)
674b24f53a0SLee Schermerhorn 				return ERR_PTR(-EFAULT);
675b24f53a0SLee Schermerhorn 			if (prev && prev->vm_end < vma->vm_start)
676b24f53a0SLee Schermerhorn 				return ERR_PTR(-EFAULT);
677b24f53a0SLee Schermerhorn 		}
678b24f53a0SLee Schermerhorn 
679b24f53a0SLee Schermerhorn 		if (flags & MPOL_MF_LAZY) {
680b24f53a0SLee Schermerhorn 			change_prot_numa(vma, start, endvma);
681b24f53a0SLee Schermerhorn 			goto next;
682b24f53a0SLee Schermerhorn 		}
683b24f53a0SLee Schermerhorn 
684b24f53a0SLee Schermerhorn 		if ((flags & MPOL_MF_STRICT) ||
685b24f53a0SLee Schermerhorn 		     ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
686b24f53a0SLee Schermerhorn 		      vma_migratable(vma))) {
687b24f53a0SLee Schermerhorn 
68898094945SNaoya Horiguchi 			err = queue_pages_pgd_range(vma, start, endvma, nodes,
68938e35860SChristoph Lameter 						flags, private);
6901da177e4SLinus Torvalds 			if (err) {
6911da177e4SLinus Torvalds 				first = ERR_PTR(err);
6921da177e4SLinus Torvalds 				break;
6931da177e4SLinus Torvalds 			}
6941da177e4SLinus Torvalds 		}
695b24f53a0SLee Schermerhorn next:
6961da177e4SLinus Torvalds 		prev = vma;
6971da177e4SLinus Torvalds 	}
6981da177e4SLinus Torvalds 	return first;
6991da177e4SLinus Torvalds }
7001da177e4SLinus Torvalds 
701869833f2SKOSAKI Motohiro /*
702869833f2SKOSAKI Motohiro  * Apply policy to a single VMA
703869833f2SKOSAKI Motohiro  * This must be called with the mmap_sem held for writing.
704869833f2SKOSAKI Motohiro  */
705869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma,
706869833f2SKOSAKI Motohiro 						struct mempolicy *pol)
7078d34694cSKOSAKI Motohiro {
708869833f2SKOSAKI Motohiro 	int err;
709869833f2SKOSAKI Motohiro 	struct mempolicy *old;
710869833f2SKOSAKI Motohiro 	struct mempolicy *new;
7118d34694cSKOSAKI Motohiro 
7128d34694cSKOSAKI Motohiro 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
7138d34694cSKOSAKI Motohiro 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
7148d34694cSKOSAKI Motohiro 		 vma->vm_ops, vma->vm_file,
7158d34694cSKOSAKI Motohiro 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
7168d34694cSKOSAKI Motohiro 
717869833f2SKOSAKI Motohiro 	new = mpol_dup(pol);
718869833f2SKOSAKI Motohiro 	if (IS_ERR(new))
719869833f2SKOSAKI Motohiro 		return PTR_ERR(new);
720869833f2SKOSAKI Motohiro 
721869833f2SKOSAKI Motohiro 	if (vma->vm_ops && vma->vm_ops->set_policy) {
7228d34694cSKOSAKI Motohiro 		err = vma->vm_ops->set_policy(vma, new);
723869833f2SKOSAKI Motohiro 		if (err)
724869833f2SKOSAKI Motohiro 			goto err_out;
7258d34694cSKOSAKI Motohiro 	}
726869833f2SKOSAKI Motohiro 
727869833f2SKOSAKI Motohiro 	old = vma->vm_policy;
728869833f2SKOSAKI Motohiro 	vma->vm_policy = new; /* protected by mmap_sem */
729869833f2SKOSAKI Motohiro 	mpol_put(old);
730869833f2SKOSAKI Motohiro 
731869833f2SKOSAKI Motohiro 	return 0;
732869833f2SKOSAKI Motohiro  err_out:
733869833f2SKOSAKI Motohiro 	mpol_put(new);
7348d34694cSKOSAKI Motohiro 	return err;
7358d34694cSKOSAKI Motohiro }
7368d34694cSKOSAKI Motohiro 
7371da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */
7389d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start,
7399d8cebd4SKOSAKI Motohiro 		       unsigned long end, struct mempolicy *new_pol)
7401da177e4SLinus Torvalds {
7411da177e4SLinus Torvalds 	struct vm_area_struct *next;
7429d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *prev;
7439d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *vma;
7449d8cebd4SKOSAKI Motohiro 	int err = 0;
745e26a5114SKOSAKI Motohiro 	pgoff_t pgoff;
7469d8cebd4SKOSAKI Motohiro 	unsigned long vmstart;
7479d8cebd4SKOSAKI Motohiro 	unsigned long vmend;
7481da177e4SLinus Torvalds 
749097d5910SLinus Torvalds 	vma = find_vma(mm, start);
7509d8cebd4SKOSAKI Motohiro 	if (!vma || vma->vm_start > start)
7519d8cebd4SKOSAKI Motohiro 		return -EFAULT;
7529d8cebd4SKOSAKI Motohiro 
753097d5910SLinus Torvalds 	prev = vma->vm_prev;
754e26a5114SKOSAKI Motohiro 	if (start > vma->vm_start)
755e26a5114SKOSAKI Motohiro 		prev = vma;
756e26a5114SKOSAKI Motohiro 
7579d8cebd4SKOSAKI Motohiro 	for (; vma && vma->vm_start < end; prev = vma, vma = next) {
7581da177e4SLinus Torvalds 		next = vma->vm_next;
7599d8cebd4SKOSAKI Motohiro 		vmstart = max(start, vma->vm_start);
7609d8cebd4SKOSAKI Motohiro 		vmend   = min(end, vma->vm_end);
7619d8cebd4SKOSAKI Motohiro 
762e26a5114SKOSAKI Motohiro 		if (mpol_equal(vma_policy(vma), new_pol))
763e26a5114SKOSAKI Motohiro 			continue;
764e26a5114SKOSAKI Motohiro 
765e26a5114SKOSAKI Motohiro 		pgoff = vma->vm_pgoff +
766e26a5114SKOSAKI Motohiro 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
7679d8cebd4SKOSAKI Motohiro 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
768e26a5114SKOSAKI Motohiro 				  vma->anon_vma, vma->vm_file, pgoff,
7698aacc9f5SCaspar Zhang 				  new_pol);
7709d8cebd4SKOSAKI Motohiro 		if (prev) {
7719d8cebd4SKOSAKI Motohiro 			vma = prev;
7729d8cebd4SKOSAKI Motohiro 			next = vma->vm_next;
7733964acd0SOleg Nesterov 			if (mpol_equal(vma_policy(vma), new_pol))
7749d8cebd4SKOSAKI Motohiro 				continue;
7753964acd0SOleg Nesterov 			/* vma_merge() joined vma && vma->next, case 8 */
7763964acd0SOleg Nesterov 			goto replace;
7771da177e4SLinus Torvalds 		}
7789d8cebd4SKOSAKI Motohiro 		if (vma->vm_start != vmstart) {
7799d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
7809d8cebd4SKOSAKI Motohiro 			if (err)
7819d8cebd4SKOSAKI Motohiro 				goto out;
7829d8cebd4SKOSAKI Motohiro 		}
7839d8cebd4SKOSAKI Motohiro 		if (vma->vm_end != vmend) {
7849d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmend, 0);
7859d8cebd4SKOSAKI Motohiro 			if (err)
7869d8cebd4SKOSAKI Motohiro 				goto out;
7879d8cebd4SKOSAKI Motohiro 		}
7883964acd0SOleg Nesterov  replace:
789869833f2SKOSAKI Motohiro 		err = vma_replace_policy(vma, new_pol);
7909d8cebd4SKOSAKI Motohiro 		if (err)
7919d8cebd4SKOSAKI Motohiro 			goto out;
7929d8cebd4SKOSAKI Motohiro 	}
7939d8cebd4SKOSAKI Motohiro 
7949d8cebd4SKOSAKI Motohiro  out:
7951da177e4SLinus Torvalds 	return err;
7961da177e4SLinus Torvalds }
7971da177e4SLinus Torvalds 
798c61afb18SPaul Jackson /*
799c61afb18SPaul Jackson  * Update task->flags PF_MEMPOLICY bit: set iff non-default
800c61afb18SPaul Jackson  * mempolicy.  Allows more rapid checking of this (combined perhaps
801c61afb18SPaul Jackson  * with other PF_* flag bits) on memory allocation hot code paths.
802c61afb18SPaul Jackson  *
803c61afb18SPaul Jackson  * If called from outside this file, the task 'p' should -only- be
804c61afb18SPaul Jackson  * a newly forked child not yet visible on the task list, because
805c61afb18SPaul Jackson  * manipulating the task flags of a visible task is not safe.
806c61afb18SPaul Jackson  *
807c61afb18SPaul Jackson  * The above limitation is why this routine has the funny name
808c61afb18SPaul Jackson  * mpol_fix_fork_child_flag().
809c61afb18SPaul Jackson  *
810c61afb18SPaul Jackson  * It is also safe to call this with a task pointer of current,
811c61afb18SPaul Jackson  * which the static wrapper mpol_set_task_struct_flag() does,
812c61afb18SPaul Jackson  * for use within this file.
813c61afb18SPaul Jackson  */
814c61afb18SPaul Jackson 
815c61afb18SPaul Jackson void mpol_fix_fork_child_flag(struct task_struct *p)
816c61afb18SPaul Jackson {
817c61afb18SPaul Jackson 	if (p->mempolicy)
818c61afb18SPaul Jackson 		p->flags |= PF_MEMPOLICY;
819c61afb18SPaul Jackson 	else
820c61afb18SPaul Jackson 		p->flags &= ~PF_MEMPOLICY;
821c61afb18SPaul Jackson }
822c61afb18SPaul Jackson 
823c61afb18SPaul Jackson static void mpol_set_task_struct_flag(void)
824c61afb18SPaul Jackson {
825c61afb18SPaul Jackson 	mpol_fix_fork_child_flag(current);
826c61afb18SPaul Jackson }
827c61afb18SPaul Jackson 
8281da177e4SLinus Torvalds /* Set the process memory policy */
829028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags,
830028fec41SDavid Rientjes 			     nodemask_t *nodes)
8311da177e4SLinus Torvalds {
83258568d2aSMiao Xie 	struct mempolicy *new, *old;
833f4e53d91SLee Schermerhorn 	struct mm_struct *mm = current->mm;
8344bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH(scratch);
83558568d2aSMiao Xie 	int ret;
8361da177e4SLinus Torvalds 
8374bfc4495SKAMEZAWA Hiroyuki 	if (!scratch)
8384bfc4495SKAMEZAWA Hiroyuki 		return -ENOMEM;
839f4e53d91SLee Schermerhorn 
8404bfc4495SKAMEZAWA Hiroyuki 	new = mpol_new(mode, flags, nodes);
8414bfc4495SKAMEZAWA Hiroyuki 	if (IS_ERR(new)) {
8424bfc4495SKAMEZAWA Hiroyuki 		ret = PTR_ERR(new);
8434bfc4495SKAMEZAWA Hiroyuki 		goto out;
8444bfc4495SKAMEZAWA Hiroyuki 	}
845f4e53d91SLee Schermerhorn 	/*
846f4e53d91SLee Schermerhorn 	 * prevent changing our mempolicy while show_numa_maps()
847f4e53d91SLee Schermerhorn 	 * is using it.
848f4e53d91SLee Schermerhorn 	 * Note:  do_set_mempolicy() can be called at init time
849f4e53d91SLee Schermerhorn 	 * with no 'mm'.
850f4e53d91SLee Schermerhorn 	 */
851f4e53d91SLee Schermerhorn 	if (mm)
852f4e53d91SLee Schermerhorn 		down_write(&mm->mmap_sem);
85358568d2aSMiao Xie 	task_lock(current);
8544bfc4495SKAMEZAWA Hiroyuki 	ret = mpol_set_nodemask(new, nodes, scratch);
85558568d2aSMiao Xie 	if (ret) {
85658568d2aSMiao Xie 		task_unlock(current);
85758568d2aSMiao Xie 		if (mm)
85858568d2aSMiao Xie 			up_write(&mm->mmap_sem);
85958568d2aSMiao Xie 		mpol_put(new);
8604bfc4495SKAMEZAWA Hiroyuki 		goto out;
86158568d2aSMiao Xie 	}
86258568d2aSMiao Xie 	old = current->mempolicy;
8631da177e4SLinus Torvalds 	current->mempolicy = new;
864c61afb18SPaul Jackson 	mpol_set_task_struct_flag();
86545c4745aSLee Schermerhorn 	if (new && new->mode == MPOL_INTERLEAVE &&
866f5b087b5SDavid Rientjes 	    nodes_weight(new->v.nodes))
867dfcd3c0dSAndi Kleen 		current->il_next = first_node(new->v.nodes);
86858568d2aSMiao Xie 	task_unlock(current);
869f4e53d91SLee Schermerhorn 	if (mm)
870f4e53d91SLee Schermerhorn 		up_write(&mm->mmap_sem);
871f4e53d91SLee Schermerhorn 
87258568d2aSMiao Xie 	mpol_put(old);
8734bfc4495SKAMEZAWA Hiroyuki 	ret = 0;
8744bfc4495SKAMEZAWA Hiroyuki out:
8754bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH_FREE(scratch);
8764bfc4495SKAMEZAWA Hiroyuki 	return ret;
8771da177e4SLinus Torvalds }
8781da177e4SLinus Torvalds 
879bea904d5SLee Schermerhorn /*
880bea904d5SLee Schermerhorn  * Return nodemask for policy for get_mempolicy() query
88158568d2aSMiao Xie  *
88258568d2aSMiao Xie  * Called with task's alloc_lock held
883bea904d5SLee Schermerhorn  */
884bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
8851da177e4SLinus Torvalds {
886dfcd3c0dSAndi Kleen 	nodes_clear(*nodes);
887bea904d5SLee Schermerhorn 	if (p == &default_policy)
888bea904d5SLee Schermerhorn 		return;
889bea904d5SLee Schermerhorn 
89045c4745aSLee Schermerhorn 	switch (p->mode) {
89119770b32SMel Gorman 	case MPOL_BIND:
89219770b32SMel Gorman 		/* Fall through */
8931da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
894dfcd3c0dSAndi Kleen 		*nodes = p->v.nodes;
8951da177e4SLinus Torvalds 		break;
8961da177e4SLinus Torvalds 	case MPOL_PREFERRED:
897fc36b8d3SLee Schermerhorn 		if (!(p->flags & MPOL_F_LOCAL))
898dfcd3c0dSAndi Kleen 			node_set(p->v.preferred_node, *nodes);
89953f2556bSLee Schermerhorn 		/* else return empty node mask for local allocation */
9001da177e4SLinus Torvalds 		break;
9011da177e4SLinus Torvalds 	default:
9021da177e4SLinus Torvalds 		BUG();
9031da177e4SLinus Torvalds 	}
9041da177e4SLinus Torvalds }
9051da177e4SLinus Torvalds 
9061da177e4SLinus Torvalds static int lookup_node(struct mm_struct *mm, unsigned long addr)
9071da177e4SLinus Torvalds {
9081da177e4SLinus Torvalds 	struct page *p;
9091da177e4SLinus Torvalds 	int err;
9101da177e4SLinus Torvalds 
9111da177e4SLinus Torvalds 	err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
9121da177e4SLinus Torvalds 	if (err >= 0) {
9131da177e4SLinus Torvalds 		err = page_to_nid(p);
9141da177e4SLinus Torvalds 		put_page(p);
9151da177e4SLinus Torvalds 	}
9161da177e4SLinus Torvalds 	return err;
9171da177e4SLinus Torvalds }
9181da177e4SLinus Torvalds 
9191da177e4SLinus Torvalds /* Retrieve NUMA policy */
920dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask,
9211da177e4SLinus Torvalds 			     unsigned long addr, unsigned long flags)
9221da177e4SLinus Torvalds {
9238bccd85fSChristoph Lameter 	int err;
9241da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
9251da177e4SLinus Torvalds 	struct vm_area_struct *vma = NULL;
9261da177e4SLinus Torvalds 	struct mempolicy *pol = current->mempolicy;
9271da177e4SLinus Torvalds 
928754af6f5SLee Schermerhorn 	if (flags &
929754af6f5SLee Schermerhorn 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
9301da177e4SLinus Torvalds 		return -EINVAL;
931754af6f5SLee Schermerhorn 
932754af6f5SLee Schermerhorn 	if (flags & MPOL_F_MEMS_ALLOWED) {
933754af6f5SLee Schermerhorn 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
934754af6f5SLee Schermerhorn 			return -EINVAL;
935754af6f5SLee Schermerhorn 		*policy = 0;	/* just so it's initialized */
93658568d2aSMiao Xie 		task_lock(current);
937754af6f5SLee Schermerhorn 		*nmask  = cpuset_current_mems_allowed;
93858568d2aSMiao Xie 		task_unlock(current);
939754af6f5SLee Schermerhorn 		return 0;
940754af6f5SLee Schermerhorn 	}
941754af6f5SLee Schermerhorn 
9421da177e4SLinus Torvalds 	if (flags & MPOL_F_ADDR) {
943bea904d5SLee Schermerhorn 		/*
944bea904d5SLee Schermerhorn 		 * Do NOT fall back to task policy if the
945bea904d5SLee Schermerhorn 		 * vma/shared policy at addr is NULL.  We
946bea904d5SLee Schermerhorn 		 * want to return MPOL_DEFAULT in this case.
947bea904d5SLee Schermerhorn 		 */
9481da177e4SLinus Torvalds 		down_read(&mm->mmap_sem);
9491da177e4SLinus Torvalds 		vma = find_vma_intersection(mm, addr, addr+1);
9501da177e4SLinus Torvalds 		if (!vma) {
9511da177e4SLinus Torvalds 			up_read(&mm->mmap_sem);
9521da177e4SLinus Torvalds 			return -EFAULT;
9531da177e4SLinus Torvalds 		}
9541da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
9551da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
9561da177e4SLinus Torvalds 		else
9571da177e4SLinus Torvalds 			pol = vma->vm_policy;
9581da177e4SLinus Torvalds 	} else if (addr)
9591da177e4SLinus Torvalds 		return -EINVAL;
9601da177e4SLinus Torvalds 
9611da177e4SLinus Torvalds 	if (!pol)
962bea904d5SLee Schermerhorn 		pol = &default_policy;	/* indicates default behavior */
9631da177e4SLinus Torvalds 
9641da177e4SLinus Torvalds 	if (flags & MPOL_F_NODE) {
9651da177e4SLinus Torvalds 		if (flags & MPOL_F_ADDR) {
9661da177e4SLinus Torvalds 			err = lookup_node(mm, addr);
9671da177e4SLinus Torvalds 			if (err < 0)
9681da177e4SLinus Torvalds 				goto out;
9698bccd85fSChristoph Lameter 			*policy = err;
9701da177e4SLinus Torvalds 		} else if (pol == current->mempolicy &&
97145c4745aSLee Schermerhorn 				pol->mode == MPOL_INTERLEAVE) {
9728bccd85fSChristoph Lameter 			*policy = current->il_next;
9731da177e4SLinus Torvalds 		} else {
9741da177e4SLinus Torvalds 			err = -EINVAL;
9751da177e4SLinus Torvalds 			goto out;
9761da177e4SLinus Torvalds 		}
977bea904d5SLee Schermerhorn 	} else {
978bea904d5SLee Schermerhorn 		*policy = pol == &default_policy ? MPOL_DEFAULT :
979bea904d5SLee Schermerhorn 						pol->mode;
980d79df630SDavid Rientjes 		/*
981d79df630SDavid Rientjes 		 * Internal mempolicy flags must be masked off before exposing
982d79df630SDavid Rientjes 		 * the policy to userspace.
983d79df630SDavid Rientjes 		 */
984d79df630SDavid Rientjes 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
985bea904d5SLee Schermerhorn 	}
9861da177e4SLinus Torvalds 
9871da177e4SLinus Torvalds 	if (vma) {
9881da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
9891da177e4SLinus Torvalds 		vma = NULL;
9901da177e4SLinus Torvalds 	}
9911da177e4SLinus Torvalds 
9921da177e4SLinus Torvalds 	err = 0;
99358568d2aSMiao Xie 	if (nmask) {
994c6b6ef8bSLee Schermerhorn 		if (mpol_store_user_nodemask(pol)) {
995c6b6ef8bSLee Schermerhorn 			*nmask = pol->w.user_nodemask;
996c6b6ef8bSLee Schermerhorn 		} else {
99758568d2aSMiao Xie 			task_lock(current);
998bea904d5SLee Schermerhorn 			get_policy_nodemask(pol, nmask);
99958568d2aSMiao Xie 			task_unlock(current);
100058568d2aSMiao Xie 		}
1001c6b6ef8bSLee Schermerhorn 	}
10021da177e4SLinus Torvalds 
10031da177e4SLinus Torvalds  out:
100452cd3b07SLee Schermerhorn 	mpol_cond_put(pol);
10051da177e4SLinus Torvalds 	if (vma)
10061da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
10071da177e4SLinus Torvalds 	return err;
10081da177e4SLinus Torvalds }
10091da177e4SLinus Torvalds 
1010b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION
10118bccd85fSChristoph Lameter /*
10126ce3c4c0SChristoph Lameter  * page migration
10136ce3c4c0SChristoph Lameter  */
1014fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
1015fc301289SChristoph Lameter 				unsigned long flags)
10166ce3c4c0SChristoph Lameter {
10176ce3c4c0SChristoph Lameter 	/*
1018fc301289SChristoph Lameter 	 * Avoid migrating a page that is shared with others.
10196ce3c4c0SChristoph Lameter 	 */
102062695a84SNick Piggin 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
102162695a84SNick Piggin 		if (!isolate_lru_page(page)) {
102262695a84SNick Piggin 			list_add_tail(&page->lru, pagelist);
10236d9c285aSKOSAKI Motohiro 			inc_zone_page_state(page, NR_ISOLATED_ANON +
10246d9c285aSKOSAKI Motohiro 					    page_is_file_cache(page));
102562695a84SNick Piggin 		}
102662695a84SNick Piggin 	}
10276ce3c4c0SChristoph Lameter }
10286ce3c4c0SChristoph Lameter 
1029742755a1SChristoph Lameter static struct page *new_node_page(struct page *page, unsigned long node, int **x)
103095a402c3SChristoph Lameter {
1031e2d8cf40SNaoya Horiguchi 	if (PageHuge(page))
1032e2d8cf40SNaoya Horiguchi 		return alloc_huge_page_node(page_hstate(compound_head(page)),
1033e2d8cf40SNaoya Horiguchi 					node);
1034e2d8cf40SNaoya Horiguchi 	else
10356484eb3eSMel Gorman 		return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
103695a402c3SChristoph Lameter }
103795a402c3SChristoph Lameter 
10386ce3c4c0SChristoph Lameter /*
10397e2ab150SChristoph Lameter  * Migrate pages from one node to a target node.
10407e2ab150SChristoph Lameter  * Returns error or the number of pages not migrated.
10417e2ab150SChristoph Lameter  */
1042dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1043dbcb0f19SAdrian Bunk 			   int flags)
10447e2ab150SChristoph Lameter {
10457e2ab150SChristoph Lameter 	nodemask_t nmask;
10467e2ab150SChristoph Lameter 	LIST_HEAD(pagelist);
10477e2ab150SChristoph Lameter 	int err = 0;
10487e2ab150SChristoph Lameter 
10497e2ab150SChristoph Lameter 	nodes_clear(nmask);
10507e2ab150SChristoph Lameter 	node_set(source, nmask);
10517e2ab150SChristoph Lameter 
105208270807SMinchan Kim 	/*
105308270807SMinchan Kim 	 * This does not "check" the range but isolates all pages that
105408270807SMinchan Kim 	 * need migration.  Between passing in the full user address
105508270807SMinchan Kim 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
105608270807SMinchan Kim 	 */
105708270807SMinchan Kim 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
105898094945SNaoya Horiguchi 	queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
10597e2ab150SChristoph Lameter 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
10607e2ab150SChristoph Lameter 
1061cf608ac1SMinchan Kim 	if (!list_empty(&pagelist)) {
10627f0f2496SMel Gorman 		err = migrate_pages(&pagelist, new_node_page, dest,
10639c620e2bSHugh Dickins 					MIGRATE_SYNC, MR_SYSCALL);
1064cf608ac1SMinchan Kim 		if (err)
1065e2d8cf40SNaoya Horiguchi 			putback_movable_pages(&pagelist);
1066cf608ac1SMinchan Kim 	}
106795a402c3SChristoph Lameter 
10687e2ab150SChristoph Lameter 	return err;
10697e2ab150SChristoph Lameter }
10707e2ab150SChristoph Lameter 
10717e2ab150SChristoph Lameter /*
10727e2ab150SChristoph Lameter  * Move pages between the two nodesets so as to preserve the physical
10737e2ab150SChristoph Lameter  * layout as much as possible.
107439743889SChristoph Lameter  *
107539743889SChristoph Lameter  * Returns the number of page that could not be moved.
107639743889SChristoph Lameter  */
10770ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
10780ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
107939743889SChristoph Lameter {
10807e2ab150SChristoph Lameter 	int busy = 0;
10810aedadf9SChristoph Lameter 	int err;
10827e2ab150SChristoph Lameter 	nodemask_t tmp;
108339743889SChristoph Lameter 
10840aedadf9SChristoph Lameter 	err = migrate_prep();
10850aedadf9SChristoph Lameter 	if (err)
10860aedadf9SChristoph Lameter 		return err;
10870aedadf9SChristoph Lameter 
108839743889SChristoph Lameter 	down_read(&mm->mmap_sem);
1089d4984711SChristoph Lameter 
10900ce72d4fSAndrew Morton 	err = migrate_vmas(mm, from, to, flags);
10917b2259b3SChristoph Lameter 	if (err)
10927b2259b3SChristoph Lameter 		goto out;
10937b2259b3SChristoph Lameter 
10947e2ab150SChristoph Lameter 	/*
10957e2ab150SChristoph Lameter 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
10967e2ab150SChristoph Lameter 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
10977e2ab150SChristoph Lameter 	 * bit in 'tmp', and return that <source, dest> pair for migration.
10987e2ab150SChristoph Lameter 	 * The pair of nodemasks 'to' and 'from' define the map.
10997e2ab150SChristoph Lameter 	 *
11007e2ab150SChristoph Lameter 	 * If no pair of bits is found that way, fallback to picking some
11017e2ab150SChristoph Lameter 	 * pair of 'source' and 'dest' bits that are not the same.  If the
11027e2ab150SChristoph Lameter 	 * 'source' and 'dest' bits are the same, this represents a node
11037e2ab150SChristoph Lameter 	 * that will be migrating to itself, so no pages need move.
11047e2ab150SChristoph Lameter 	 *
11057e2ab150SChristoph Lameter 	 * If no bits are left in 'tmp', or if all remaining bits left
11067e2ab150SChristoph Lameter 	 * in 'tmp' correspond to the same bit in 'to', return false
11077e2ab150SChristoph Lameter 	 * (nothing left to migrate).
11087e2ab150SChristoph Lameter 	 *
11097e2ab150SChristoph Lameter 	 * This lets us pick a pair of nodes to migrate between, such that
11107e2ab150SChristoph Lameter 	 * if possible the dest node is not already occupied by some other
11117e2ab150SChristoph Lameter 	 * source node, minimizing the risk of overloading the memory on a
11127e2ab150SChristoph Lameter 	 * node that would happen if we migrated incoming memory to a node
11137e2ab150SChristoph Lameter 	 * before migrating outgoing memory source that same node.
11147e2ab150SChristoph Lameter 	 *
11157e2ab150SChristoph Lameter 	 * A single scan of tmp is sufficient.  As we go, we remember the
11167e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
11177e2ab150SChristoph Lameter 	 * that not only moved, but what's better, moved to an empty slot
11187e2ab150SChristoph Lameter 	 * (d is not set in tmp), then we break out then, with that pair.
1119ae0e47f0SJustin P. Mattock 	 * Otherwise when we finish scanning from_tmp, we at least have the
11207e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved.  If we get all the way through
11217e2ab150SChristoph Lameter 	 * the scan of tmp without finding any node that moved, much less
11227e2ab150SChristoph Lameter 	 * moved to an empty node, then there is nothing left worth migrating.
11237e2ab150SChristoph Lameter 	 */
11247e2ab150SChristoph Lameter 
11250ce72d4fSAndrew Morton 	tmp = *from;
11267e2ab150SChristoph Lameter 	while (!nodes_empty(tmp)) {
11277e2ab150SChristoph Lameter 		int s,d;
1128b76ac7e7SJianguo Wu 		int source = NUMA_NO_NODE;
11297e2ab150SChristoph Lameter 		int dest = 0;
11307e2ab150SChristoph Lameter 
11317e2ab150SChristoph Lameter 		for_each_node_mask(s, tmp) {
11324a5b18ccSLarry Woodman 
11334a5b18ccSLarry Woodman 			/*
11344a5b18ccSLarry Woodman 			 * do_migrate_pages() tries to maintain the relative
11354a5b18ccSLarry Woodman 			 * node relationship of the pages established between
11364a5b18ccSLarry Woodman 			 * threads and memory areas.
11374a5b18ccSLarry Woodman                          *
11384a5b18ccSLarry Woodman 			 * However if the number of source nodes is not equal to
11394a5b18ccSLarry Woodman 			 * the number of destination nodes we can not preserve
11404a5b18ccSLarry Woodman 			 * this node relative relationship.  In that case, skip
11414a5b18ccSLarry Woodman 			 * copying memory from a node that is in the destination
11424a5b18ccSLarry Woodman 			 * mask.
11434a5b18ccSLarry Woodman 			 *
11444a5b18ccSLarry Woodman 			 * Example: [2,3,4] -> [3,4,5] moves everything.
11454a5b18ccSLarry Woodman 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
11464a5b18ccSLarry Woodman 			 */
11474a5b18ccSLarry Woodman 
11480ce72d4fSAndrew Morton 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
11490ce72d4fSAndrew Morton 						(node_isset(s, *to)))
11504a5b18ccSLarry Woodman 				continue;
11514a5b18ccSLarry Woodman 
11520ce72d4fSAndrew Morton 			d = node_remap(s, *from, *to);
11537e2ab150SChristoph Lameter 			if (s == d)
11547e2ab150SChristoph Lameter 				continue;
11557e2ab150SChristoph Lameter 
11567e2ab150SChristoph Lameter 			source = s;	/* Node moved. Memorize */
11577e2ab150SChristoph Lameter 			dest = d;
11587e2ab150SChristoph Lameter 
11597e2ab150SChristoph Lameter 			/* dest not in remaining from nodes? */
11607e2ab150SChristoph Lameter 			if (!node_isset(dest, tmp))
11617e2ab150SChristoph Lameter 				break;
11627e2ab150SChristoph Lameter 		}
1163b76ac7e7SJianguo Wu 		if (source == NUMA_NO_NODE)
11647e2ab150SChristoph Lameter 			break;
11657e2ab150SChristoph Lameter 
11667e2ab150SChristoph Lameter 		node_clear(source, tmp);
11677e2ab150SChristoph Lameter 		err = migrate_to_node(mm, source, dest, flags);
11687e2ab150SChristoph Lameter 		if (err > 0)
11697e2ab150SChristoph Lameter 			busy += err;
11707e2ab150SChristoph Lameter 		if (err < 0)
11717e2ab150SChristoph Lameter 			break;
117239743889SChristoph Lameter 	}
11737b2259b3SChristoph Lameter out:
117439743889SChristoph Lameter 	up_read(&mm->mmap_sem);
11757e2ab150SChristoph Lameter 	if (err < 0)
11767e2ab150SChristoph Lameter 		return err;
11777e2ab150SChristoph Lameter 	return busy;
1178b20a3503SChristoph Lameter 
117939743889SChristoph Lameter }
118039743889SChristoph Lameter 
11813ad33b24SLee Schermerhorn /*
11823ad33b24SLee Schermerhorn  * Allocate a new page for page migration based on vma policy.
11833ad33b24SLee Schermerhorn  * Start assuming that page is mapped by vma pointed to by @private.
11843ad33b24SLee Schermerhorn  * Search forward from there, if not.  N.B., this assumes that the
11853ad33b24SLee Schermerhorn  * list of pages handed to migrate_pages()--which is how we get here--
11863ad33b24SLee Schermerhorn  * is in virtual address order.
11873ad33b24SLee Schermerhorn  */
1188742755a1SChristoph Lameter static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
118995a402c3SChristoph Lameter {
119095a402c3SChristoph Lameter 	struct vm_area_struct *vma = (struct vm_area_struct *)private;
11913ad33b24SLee Schermerhorn 	unsigned long uninitialized_var(address);
119295a402c3SChristoph Lameter 
11933ad33b24SLee Schermerhorn 	while (vma) {
11943ad33b24SLee Schermerhorn 		address = page_address_in_vma(page, vma);
11953ad33b24SLee Schermerhorn 		if (address != -EFAULT)
11963ad33b24SLee Schermerhorn 			break;
11973ad33b24SLee Schermerhorn 		vma = vma->vm_next;
11983ad33b24SLee Schermerhorn 	}
11993ad33b24SLee Schermerhorn 
120011c731e8SWanpeng Li 	if (PageHuge(page)) {
1201cc81717eSMichal Hocko 		BUG_ON(!vma);
120274060e4dSNaoya Horiguchi 		return alloc_huge_page_noerr(vma, address, 1);
120311c731e8SWanpeng Li 	}
120411c731e8SWanpeng Li 	/*
120511c731e8SWanpeng Li 	 * if !vma, alloc_page_vma() will use task or system default policy
120611c731e8SWanpeng Li 	 */
12073ad33b24SLee Schermerhorn 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
120895a402c3SChristoph Lameter }
1209b20a3503SChristoph Lameter #else
1210b20a3503SChristoph Lameter 
1211b20a3503SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
1212b20a3503SChristoph Lameter 				unsigned long flags)
1213b20a3503SChristoph Lameter {
1214b20a3503SChristoph Lameter }
1215b20a3503SChristoph Lameter 
12160ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
12170ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
1218b20a3503SChristoph Lameter {
1219b20a3503SChristoph Lameter 	return -ENOSYS;
1220b20a3503SChristoph Lameter }
122195a402c3SChristoph Lameter 
122269939749SKeith Owens static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
122395a402c3SChristoph Lameter {
122495a402c3SChristoph Lameter 	return NULL;
122595a402c3SChristoph Lameter }
1226b20a3503SChristoph Lameter #endif
1227b20a3503SChristoph Lameter 
1228dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len,
1229028fec41SDavid Rientjes 		     unsigned short mode, unsigned short mode_flags,
1230028fec41SDavid Rientjes 		     nodemask_t *nmask, unsigned long flags)
12316ce3c4c0SChristoph Lameter {
12326ce3c4c0SChristoph Lameter 	struct vm_area_struct *vma;
12336ce3c4c0SChristoph Lameter 	struct mm_struct *mm = current->mm;
12346ce3c4c0SChristoph Lameter 	struct mempolicy *new;
12356ce3c4c0SChristoph Lameter 	unsigned long end;
12366ce3c4c0SChristoph Lameter 	int err;
12376ce3c4c0SChristoph Lameter 	LIST_HEAD(pagelist);
12386ce3c4c0SChristoph Lameter 
1239b24f53a0SLee Schermerhorn 	if (flags & ~(unsigned long)MPOL_MF_VALID)
12406ce3c4c0SChristoph Lameter 		return -EINVAL;
124174c00241SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
12426ce3c4c0SChristoph Lameter 		return -EPERM;
12436ce3c4c0SChristoph Lameter 
12446ce3c4c0SChristoph Lameter 	if (start & ~PAGE_MASK)
12456ce3c4c0SChristoph Lameter 		return -EINVAL;
12466ce3c4c0SChristoph Lameter 
12476ce3c4c0SChristoph Lameter 	if (mode == MPOL_DEFAULT)
12486ce3c4c0SChristoph Lameter 		flags &= ~MPOL_MF_STRICT;
12496ce3c4c0SChristoph Lameter 
12506ce3c4c0SChristoph Lameter 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
12516ce3c4c0SChristoph Lameter 	end = start + len;
12526ce3c4c0SChristoph Lameter 
12536ce3c4c0SChristoph Lameter 	if (end < start)
12546ce3c4c0SChristoph Lameter 		return -EINVAL;
12556ce3c4c0SChristoph Lameter 	if (end == start)
12566ce3c4c0SChristoph Lameter 		return 0;
12576ce3c4c0SChristoph Lameter 
1258028fec41SDavid Rientjes 	new = mpol_new(mode, mode_flags, nmask);
12596ce3c4c0SChristoph Lameter 	if (IS_ERR(new))
12606ce3c4c0SChristoph Lameter 		return PTR_ERR(new);
12616ce3c4c0SChristoph Lameter 
1262b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY)
1263b24f53a0SLee Schermerhorn 		new->flags |= MPOL_F_MOF;
1264b24f53a0SLee Schermerhorn 
12656ce3c4c0SChristoph Lameter 	/*
12666ce3c4c0SChristoph Lameter 	 * If we are using the default policy then operation
12676ce3c4c0SChristoph Lameter 	 * on discontinuous address spaces is okay after all
12686ce3c4c0SChristoph Lameter 	 */
12696ce3c4c0SChristoph Lameter 	if (!new)
12706ce3c4c0SChristoph Lameter 		flags |= MPOL_MF_DISCONTIG_OK;
12716ce3c4c0SChristoph Lameter 
1272028fec41SDavid Rientjes 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1273028fec41SDavid Rientjes 		 start, start + len, mode, mode_flags,
127400ef2d2fSDavid Rientjes 		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
12756ce3c4c0SChristoph Lameter 
12760aedadf9SChristoph Lameter 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
12770aedadf9SChristoph Lameter 
12780aedadf9SChristoph Lameter 		err = migrate_prep();
12790aedadf9SChristoph Lameter 		if (err)
1280b05ca738SKOSAKI Motohiro 			goto mpol_out;
12810aedadf9SChristoph Lameter 	}
12824bfc4495SKAMEZAWA Hiroyuki 	{
12834bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
12844bfc4495SKAMEZAWA Hiroyuki 		if (scratch) {
12856ce3c4c0SChristoph Lameter 			down_write(&mm->mmap_sem);
128658568d2aSMiao Xie 			task_lock(current);
12874bfc4495SKAMEZAWA Hiroyuki 			err = mpol_set_nodemask(new, nmask, scratch);
128858568d2aSMiao Xie 			task_unlock(current);
12894bfc4495SKAMEZAWA Hiroyuki 			if (err)
129058568d2aSMiao Xie 				up_write(&mm->mmap_sem);
12914bfc4495SKAMEZAWA Hiroyuki 		} else
12924bfc4495SKAMEZAWA Hiroyuki 			err = -ENOMEM;
12934bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
12944bfc4495SKAMEZAWA Hiroyuki 	}
1295b05ca738SKOSAKI Motohiro 	if (err)
1296b05ca738SKOSAKI Motohiro 		goto mpol_out;
1297b05ca738SKOSAKI Motohiro 
129898094945SNaoya Horiguchi 	vma = queue_pages_range(mm, start, end, nmask,
12996ce3c4c0SChristoph Lameter 			  flags | MPOL_MF_INVERT, &pagelist);
13006ce3c4c0SChristoph Lameter 
1301b24f53a0SLee Schermerhorn 	err = PTR_ERR(vma);	/* maybe ... */
1302a720094dSMel Gorman 	if (!IS_ERR(vma))
13039d8cebd4SKOSAKI Motohiro 		err = mbind_range(mm, start, end, new);
13047e2ab150SChristoph Lameter 
1305b24f53a0SLee Schermerhorn 	if (!err) {
1306b24f53a0SLee Schermerhorn 		int nr_failed = 0;
1307b24f53a0SLee Schermerhorn 
1308cf608ac1SMinchan Kim 		if (!list_empty(&pagelist)) {
1309b24f53a0SLee Schermerhorn 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
131095a402c3SChristoph Lameter 			nr_failed = migrate_pages(&pagelist, new_vma_page,
13117f0f2496SMel Gorman 					(unsigned long)vma,
13129c620e2bSHugh Dickins 					MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1313cf608ac1SMinchan Kim 			if (nr_failed)
131474060e4dSNaoya Horiguchi 				putback_movable_pages(&pagelist);
1315cf608ac1SMinchan Kim 		}
13166ce3c4c0SChristoph Lameter 
1317b24f53a0SLee Schermerhorn 		if (nr_failed && (flags & MPOL_MF_STRICT))
13186ce3c4c0SChristoph Lameter 			err = -EIO;
1319ab8a3e14SKOSAKI Motohiro 	} else
1320b0e5fd73SJoonsoo Kim 		putback_movable_pages(&pagelist);
1321b20a3503SChristoph Lameter 
13226ce3c4c0SChristoph Lameter 	up_write(&mm->mmap_sem);
1323b05ca738SKOSAKI Motohiro  mpol_out:
1324f0be3d32SLee Schermerhorn 	mpol_put(new);
13256ce3c4c0SChristoph Lameter 	return err;
13266ce3c4c0SChristoph Lameter }
13276ce3c4c0SChristoph Lameter 
132839743889SChristoph Lameter /*
13298bccd85fSChristoph Lameter  * User space interface with variable sized bitmaps for nodelists.
13308bccd85fSChristoph Lameter  */
13318bccd85fSChristoph Lameter 
13328bccd85fSChristoph Lameter /* Copy a node mask from user space. */
133339743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
13348bccd85fSChristoph Lameter 		     unsigned long maxnode)
13358bccd85fSChristoph Lameter {
13368bccd85fSChristoph Lameter 	unsigned long k;
13378bccd85fSChristoph Lameter 	unsigned long nlongs;
13388bccd85fSChristoph Lameter 	unsigned long endmask;
13398bccd85fSChristoph Lameter 
13408bccd85fSChristoph Lameter 	--maxnode;
13418bccd85fSChristoph Lameter 	nodes_clear(*nodes);
13428bccd85fSChristoph Lameter 	if (maxnode == 0 || !nmask)
13438bccd85fSChristoph Lameter 		return 0;
1344a9c930baSAndi Kleen 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1345636f13c1SChris Wright 		return -EINVAL;
13468bccd85fSChristoph Lameter 
13478bccd85fSChristoph Lameter 	nlongs = BITS_TO_LONGS(maxnode);
13488bccd85fSChristoph Lameter 	if ((maxnode % BITS_PER_LONG) == 0)
13498bccd85fSChristoph Lameter 		endmask = ~0UL;
13508bccd85fSChristoph Lameter 	else
13518bccd85fSChristoph Lameter 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
13528bccd85fSChristoph Lameter 
13538bccd85fSChristoph Lameter 	/* When the user specified more nodes than supported just check
13548bccd85fSChristoph Lameter 	   if the non supported part is all zero. */
13558bccd85fSChristoph Lameter 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
13568bccd85fSChristoph Lameter 		if (nlongs > PAGE_SIZE/sizeof(long))
13578bccd85fSChristoph Lameter 			return -EINVAL;
13588bccd85fSChristoph Lameter 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
13598bccd85fSChristoph Lameter 			unsigned long t;
13608bccd85fSChristoph Lameter 			if (get_user(t, nmask + k))
13618bccd85fSChristoph Lameter 				return -EFAULT;
13628bccd85fSChristoph Lameter 			if (k == nlongs - 1) {
13638bccd85fSChristoph Lameter 				if (t & endmask)
13648bccd85fSChristoph Lameter 					return -EINVAL;
13658bccd85fSChristoph Lameter 			} else if (t)
13668bccd85fSChristoph Lameter 				return -EINVAL;
13678bccd85fSChristoph Lameter 		}
13688bccd85fSChristoph Lameter 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
13698bccd85fSChristoph Lameter 		endmask = ~0UL;
13708bccd85fSChristoph Lameter 	}
13718bccd85fSChristoph Lameter 
13728bccd85fSChristoph Lameter 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
13738bccd85fSChristoph Lameter 		return -EFAULT;
13748bccd85fSChristoph Lameter 	nodes_addr(*nodes)[nlongs-1] &= endmask;
13758bccd85fSChristoph Lameter 	return 0;
13768bccd85fSChristoph Lameter }
13778bccd85fSChristoph Lameter 
13788bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */
13798bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
13808bccd85fSChristoph Lameter 			      nodemask_t *nodes)
13818bccd85fSChristoph Lameter {
13828bccd85fSChristoph Lameter 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
13838bccd85fSChristoph Lameter 	const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
13848bccd85fSChristoph Lameter 
13858bccd85fSChristoph Lameter 	if (copy > nbytes) {
13868bccd85fSChristoph Lameter 		if (copy > PAGE_SIZE)
13878bccd85fSChristoph Lameter 			return -EINVAL;
13888bccd85fSChristoph Lameter 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
13898bccd85fSChristoph Lameter 			return -EFAULT;
13908bccd85fSChristoph Lameter 		copy = nbytes;
13918bccd85fSChristoph Lameter 	}
13928bccd85fSChristoph Lameter 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
13938bccd85fSChristoph Lameter }
13948bccd85fSChristoph Lameter 
1395938bb9f5SHeiko Carstens SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1396938bb9f5SHeiko Carstens 		unsigned long, mode, unsigned long __user *, nmask,
1397938bb9f5SHeiko Carstens 		unsigned long, maxnode, unsigned, flags)
13988bccd85fSChristoph Lameter {
13998bccd85fSChristoph Lameter 	nodemask_t nodes;
14008bccd85fSChristoph Lameter 	int err;
1401028fec41SDavid Rientjes 	unsigned short mode_flags;
14028bccd85fSChristoph Lameter 
1403028fec41SDavid Rientjes 	mode_flags = mode & MPOL_MODE_FLAGS;
1404028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1405a3b51e01SDavid Rientjes 	if (mode >= MPOL_MAX)
1406a3b51e01SDavid Rientjes 		return -EINVAL;
14074c50bc01SDavid Rientjes 	if ((mode_flags & MPOL_F_STATIC_NODES) &&
14084c50bc01SDavid Rientjes 	    (mode_flags & MPOL_F_RELATIVE_NODES))
14094c50bc01SDavid Rientjes 		return -EINVAL;
14108bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
14118bccd85fSChristoph Lameter 	if (err)
14128bccd85fSChristoph Lameter 		return err;
1413028fec41SDavid Rientjes 	return do_mbind(start, len, mode, mode_flags, &nodes, flags);
14148bccd85fSChristoph Lameter }
14158bccd85fSChristoph Lameter 
14168bccd85fSChristoph Lameter /* Set the process memory policy */
1417938bb9f5SHeiko Carstens SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask,
1418938bb9f5SHeiko Carstens 		unsigned long, maxnode)
14198bccd85fSChristoph Lameter {
14208bccd85fSChristoph Lameter 	int err;
14218bccd85fSChristoph Lameter 	nodemask_t nodes;
1422028fec41SDavid Rientjes 	unsigned short flags;
14238bccd85fSChristoph Lameter 
1424028fec41SDavid Rientjes 	flags = mode & MPOL_MODE_FLAGS;
1425028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1426028fec41SDavid Rientjes 	if ((unsigned int)mode >= MPOL_MAX)
14278bccd85fSChristoph Lameter 		return -EINVAL;
14284c50bc01SDavid Rientjes 	if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
14294c50bc01SDavid Rientjes 		return -EINVAL;
14308bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
14318bccd85fSChristoph Lameter 	if (err)
14328bccd85fSChristoph Lameter 		return err;
1433028fec41SDavid Rientjes 	return do_set_mempolicy(mode, flags, &nodes);
14348bccd85fSChristoph Lameter }
14358bccd85fSChristoph Lameter 
1436938bb9f5SHeiko Carstens SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1437938bb9f5SHeiko Carstens 		const unsigned long __user *, old_nodes,
1438938bb9f5SHeiko Carstens 		const unsigned long __user *, new_nodes)
143939743889SChristoph Lameter {
1440c69e8d9cSDavid Howells 	const struct cred *cred = current_cred(), *tcred;
1441596d7cfaSKOSAKI Motohiro 	struct mm_struct *mm = NULL;
144239743889SChristoph Lameter 	struct task_struct *task;
144339743889SChristoph Lameter 	nodemask_t task_nodes;
144439743889SChristoph Lameter 	int err;
1445596d7cfaSKOSAKI Motohiro 	nodemask_t *old;
1446596d7cfaSKOSAKI Motohiro 	nodemask_t *new;
1447596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH(scratch);
144839743889SChristoph Lameter 
1449596d7cfaSKOSAKI Motohiro 	if (!scratch)
1450596d7cfaSKOSAKI Motohiro 		return -ENOMEM;
145139743889SChristoph Lameter 
1452596d7cfaSKOSAKI Motohiro 	old = &scratch->mask1;
1453596d7cfaSKOSAKI Motohiro 	new = &scratch->mask2;
1454596d7cfaSKOSAKI Motohiro 
1455596d7cfaSKOSAKI Motohiro 	err = get_nodes(old, old_nodes, maxnode);
145639743889SChristoph Lameter 	if (err)
1457596d7cfaSKOSAKI Motohiro 		goto out;
1458596d7cfaSKOSAKI Motohiro 
1459596d7cfaSKOSAKI Motohiro 	err = get_nodes(new, new_nodes, maxnode);
1460596d7cfaSKOSAKI Motohiro 	if (err)
1461596d7cfaSKOSAKI Motohiro 		goto out;
146239743889SChristoph Lameter 
146339743889SChristoph Lameter 	/* Find the mm_struct */
146455cfaa3cSZeng Zhaoming 	rcu_read_lock();
1465228ebcbeSPavel Emelyanov 	task = pid ? find_task_by_vpid(pid) : current;
146639743889SChristoph Lameter 	if (!task) {
146755cfaa3cSZeng Zhaoming 		rcu_read_unlock();
1468596d7cfaSKOSAKI Motohiro 		err = -ESRCH;
1469596d7cfaSKOSAKI Motohiro 		goto out;
147039743889SChristoph Lameter 	}
14713268c63eSChristoph Lameter 	get_task_struct(task);
147239743889SChristoph Lameter 
1473596d7cfaSKOSAKI Motohiro 	err = -EINVAL;
147439743889SChristoph Lameter 
147539743889SChristoph Lameter 	/*
147639743889SChristoph Lameter 	 * Check if this process has the right to modify the specified
147739743889SChristoph Lameter 	 * process. The right exists if the process has administrative
14787f927fccSAlexey Dobriyan 	 * capabilities, superuser privileges or the same
147939743889SChristoph Lameter 	 * userid as the target process.
148039743889SChristoph Lameter 	 */
1481c69e8d9cSDavid Howells 	tcred = __task_cred(task);
1482b38a86ebSEric W. Biederman 	if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1483b38a86ebSEric W. Biederman 	    !uid_eq(cred->uid,  tcred->suid) && !uid_eq(cred->uid,  tcred->uid) &&
148474c00241SChristoph Lameter 	    !capable(CAP_SYS_NICE)) {
1485c69e8d9cSDavid Howells 		rcu_read_unlock();
148639743889SChristoph Lameter 		err = -EPERM;
14873268c63eSChristoph Lameter 		goto out_put;
148839743889SChristoph Lameter 	}
1489c69e8d9cSDavid Howells 	rcu_read_unlock();
149039743889SChristoph Lameter 
149139743889SChristoph Lameter 	task_nodes = cpuset_mems_allowed(task);
149239743889SChristoph Lameter 	/* Is the user allowed to access the target nodes? */
1493596d7cfaSKOSAKI Motohiro 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
149439743889SChristoph Lameter 		err = -EPERM;
14953268c63eSChristoph Lameter 		goto out_put;
149639743889SChristoph Lameter 	}
149739743889SChristoph Lameter 
149801f13bd6SLai Jiangshan 	if (!nodes_subset(*new, node_states[N_MEMORY])) {
14993b42d28bSChristoph Lameter 		err = -EINVAL;
15003268c63eSChristoph Lameter 		goto out_put;
15013b42d28bSChristoph Lameter 	}
15023b42d28bSChristoph Lameter 
150386c3a764SDavid Quigley 	err = security_task_movememory(task);
150486c3a764SDavid Quigley 	if (err)
15053268c63eSChristoph Lameter 		goto out_put;
150686c3a764SDavid Quigley 
15073268c63eSChristoph Lameter 	mm = get_task_mm(task);
15083268c63eSChristoph Lameter 	put_task_struct(task);
1509f2a9ef88SSasha Levin 
1510f2a9ef88SSasha Levin 	if (!mm) {
1511f2a9ef88SSasha Levin 		err = -EINVAL;
1512f2a9ef88SSasha Levin 		goto out;
1513f2a9ef88SSasha Levin 	}
1514f2a9ef88SSasha Levin 
1515596d7cfaSKOSAKI Motohiro 	err = do_migrate_pages(mm, old, new,
151674c00241SChristoph Lameter 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
15173268c63eSChristoph Lameter 
151839743889SChristoph Lameter 	mmput(mm);
15193268c63eSChristoph Lameter out:
1520596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH_FREE(scratch);
1521596d7cfaSKOSAKI Motohiro 
152239743889SChristoph Lameter 	return err;
15233268c63eSChristoph Lameter 
15243268c63eSChristoph Lameter out_put:
15253268c63eSChristoph Lameter 	put_task_struct(task);
15263268c63eSChristoph Lameter 	goto out;
15273268c63eSChristoph Lameter 
152839743889SChristoph Lameter }
152939743889SChristoph Lameter 
153039743889SChristoph Lameter 
15318bccd85fSChristoph Lameter /* Retrieve NUMA policy */
1532938bb9f5SHeiko Carstens SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1533938bb9f5SHeiko Carstens 		unsigned long __user *, nmask, unsigned long, maxnode,
1534938bb9f5SHeiko Carstens 		unsigned long, addr, unsigned long, flags)
15358bccd85fSChristoph Lameter {
1536dbcb0f19SAdrian Bunk 	int err;
1537dbcb0f19SAdrian Bunk 	int uninitialized_var(pval);
15388bccd85fSChristoph Lameter 	nodemask_t nodes;
15398bccd85fSChristoph Lameter 
15408bccd85fSChristoph Lameter 	if (nmask != NULL && maxnode < MAX_NUMNODES)
15418bccd85fSChristoph Lameter 		return -EINVAL;
15428bccd85fSChristoph Lameter 
15438bccd85fSChristoph Lameter 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
15448bccd85fSChristoph Lameter 
15458bccd85fSChristoph Lameter 	if (err)
15468bccd85fSChristoph Lameter 		return err;
15478bccd85fSChristoph Lameter 
15488bccd85fSChristoph Lameter 	if (policy && put_user(pval, policy))
15498bccd85fSChristoph Lameter 		return -EFAULT;
15508bccd85fSChristoph Lameter 
15518bccd85fSChristoph Lameter 	if (nmask)
15528bccd85fSChristoph Lameter 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
15538bccd85fSChristoph Lameter 
15548bccd85fSChristoph Lameter 	return err;
15558bccd85fSChristoph Lameter }
15568bccd85fSChristoph Lameter 
15571da177e4SLinus Torvalds #ifdef CONFIG_COMPAT
15581da177e4SLinus Torvalds 
15591da177e4SLinus Torvalds asmlinkage long compat_sys_get_mempolicy(int __user *policy,
15601da177e4SLinus Torvalds 				     compat_ulong_t __user *nmask,
15611da177e4SLinus Torvalds 				     compat_ulong_t maxnode,
15621da177e4SLinus Torvalds 				     compat_ulong_t addr, compat_ulong_t flags)
15631da177e4SLinus Torvalds {
15641da177e4SLinus Torvalds 	long err;
15651da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
15661da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
15671da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
15681da177e4SLinus Torvalds 
15691da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
15701da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
15711da177e4SLinus Torvalds 
15721da177e4SLinus Torvalds 	if (nmask)
15731da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
15741da177e4SLinus Torvalds 
15751da177e4SLinus Torvalds 	err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
15761da177e4SLinus Torvalds 
15771da177e4SLinus Torvalds 	if (!err && nmask) {
15782bbff6c7SKAMEZAWA Hiroyuki 		unsigned long copy_size;
15792bbff6c7SKAMEZAWA Hiroyuki 		copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
15802bbff6c7SKAMEZAWA Hiroyuki 		err = copy_from_user(bm, nm, copy_size);
15811da177e4SLinus Torvalds 		/* ensure entire bitmap is zeroed */
15821da177e4SLinus Torvalds 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
15831da177e4SLinus Torvalds 		err |= compat_put_bitmap(nmask, bm, nr_bits);
15841da177e4SLinus Torvalds 	}
15851da177e4SLinus Torvalds 
15861da177e4SLinus Torvalds 	return err;
15871da177e4SLinus Torvalds }
15881da177e4SLinus Torvalds 
15891da177e4SLinus Torvalds asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
15901da177e4SLinus Torvalds 				     compat_ulong_t maxnode)
15911da177e4SLinus Torvalds {
15921da177e4SLinus Torvalds 	long err = 0;
15931da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
15941da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
15951da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
15961da177e4SLinus Torvalds 
15971da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
15981da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
15991da177e4SLinus Torvalds 
16001da177e4SLinus Torvalds 	if (nmask) {
16011da177e4SLinus Torvalds 		err = compat_get_bitmap(bm, nmask, nr_bits);
16021da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
16031da177e4SLinus Torvalds 		err |= copy_to_user(nm, bm, alloc_size);
16041da177e4SLinus Torvalds 	}
16051da177e4SLinus Torvalds 
16061da177e4SLinus Torvalds 	if (err)
16071da177e4SLinus Torvalds 		return -EFAULT;
16081da177e4SLinus Torvalds 
16091da177e4SLinus Torvalds 	return sys_set_mempolicy(mode, nm, nr_bits+1);
16101da177e4SLinus Torvalds }
16111da177e4SLinus Torvalds 
16121da177e4SLinus Torvalds asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
16131da177e4SLinus Torvalds 			     compat_ulong_t mode, compat_ulong_t __user *nmask,
16141da177e4SLinus Torvalds 			     compat_ulong_t maxnode, compat_ulong_t flags)
16151da177e4SLinus Torvalds {
16161da177e4SLinus Torvalds 	long err = 0;
16171da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
16181da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
1619dfcd3c0dSAndi Kleen 	nodemask_t bm;
16201da177e4SLinus Torvalds 
16211da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
16221da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
16231da177e4SLinus Torvalds 
16241da177e4SLinus Torvalds 	if (nmask) {
1625dfcd3c0dSAndi Kleen 		err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
16261da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
1627dfcd3c0dSAndi Kleen 		err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
16281da177e4SLinus Torvalds 	}
16291da177e4SLinus Torvalds 
16301da177e4SLinus Torvalds 	if (err)
16311da177e4SLinus Torvalds 		return -EFAULT;
16321da177e4SLinus Torvalds 
16331da177e4SLinus Torvalds 	return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
16341da177e4SLinus Torvalds }
16351da177e4SLinus Torvalds 
16361da177e4SLinus Torvalds #endif
16371da177e4SLinus Torvalds 
1638480eccf9SLee Schermerhorn /*
1639480eccf9SLee Schermerhorn  * get_vma_policy(@task, @vma, @addr)
1640480eccf9SLee Schermerhorn  * @task - task for fallback if vma policy == default
1641480eccf9SLee Schermerhorn  * @vma   - virtual memory area whose policy is sought
1642480eccf9SLee Schermerhorn  * @addr  - address in @vma for shared policy lookup
1643480eccf9SLee Schermerhorn  *
1644480eccf9SLee Schermerhorn  * Returns effective policy for a VMA at specified address.
1645480eccf9SLee Schermerhorn  * Falls back to @task or system default policy, as necessary.
164632f8516aSDavid Rientjes  * Current or other task's task mempolicy and non-shared vma policies must be
164732f8516aSDavid Rientjes  * protected by task_lock(task) by the caller.
164852cd3b07SLee Schermerhorn  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
164952cd3b07SLee Schermerhorn  * count--added by the get_policy() vm_op, as appropriate--to protect against
165052cd3b07SLee Schermerhorn  * freeing by another task.  It is the caller's responsibility to free the
165152cd3b07SLee Schermerhorn  * extra reference for shared policies.
1652480eccf9SLee Schermerhorn  */
1653d98f6cb6SStephen Wilson struct mempolicy *get_vma_policy(struct task_struct *task,
165448fce342SChristoph Lameter 		struct vm_area_struct *vma, unsigned long addr)
16551da177e4SLinus Torvalds {
16565606e387SMel Gorman 	struct mempolicy *pol = get_task_policy(task);
16571da177e4SLinus Torvalds 
16581da177e4SLinus Torvalds 	if (vma) {
1659480eccf9SLee Schermerhorn 		if (vma->vm_ops && vma->vm_ops->get_policy) {
1660ae4d8c16SLee Schermerhorn 			struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1661ae4d8c16SLee Schermerhorn 									addr);
1662ae4d8c16SLee Schermerhorn 			if (vpol)
1663ae4d8c16SLee Schermerhorn 				pol = vpol;
166400442ad0SMel Gorman 		} else if (vma->vm_policy) {
16651da177e4SLinus Torvalds 			pol = vma->vm_policy;
166600442ad0SMel Gorman 
166700442ad0SMel Gorman 			/*
166800442ad0SMel Gorman 			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
166900442ad0SMel Gorman 			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
167000442ad0SMel Gorman 			 * count on these policies which will be dropped by
167100442ad0SMel Gorman 			 * mpol_cond_put() later
167200442ad0SMel Gorman 			 */
167300442ad0SMel Gorman 			if (mpol_needs_cond_ref(pol))
167400442ad0SMel Gorman 				mpol_get(pol);
167500442ad0SMel Gorman 		}
16761da177e4SLinus Torvalds 	}
16771da177e4SLinus Torvalds 	if (!pol)
16781da177e4SLinus Torvalds 		pol = &default_policy;
16791da177e4SLinus Torvalds 	return pol;
16801da177e4SLinus Torvalds }
16811da177e4SLinus Torvalds 
1682fc314724SMel Gorman bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma)
1683fc314724SMel Gorman {
1684fc314724SMel Gorman 	struct mempolicy *pol = get_task_policy(task);
1685fc314724SMel Gorman 	if (vma) {
1686fc314724SMel Gorman 		if (vma->vm_ops && vma->vm_ops->get_policy) {
1687fc314724SMel Gorman 			bool ret = false;
1688fc314724SMel Gorman 
1689fc314724SMel Gorman 			pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1690fc314724SMel Gorman 			if (pol && (pol->flags & MPOL_F_MOF))
1691fc314724SMel Gorman 				ret = true;
1692fc314724SMel Gorman 			mpol_cond_put(pol);
1693fc314724SMel Gorman 
1694fc314724SMel Gorman 			return ret;
1695fc314724SMel Gorman 		} else if (vma->vm_policy) {
1696fc314724SMel Gorman 			pol = vma->vm_policy;
1697fc314724SMel Gorman 		}
1698fc314724SMel Gorman 	}
1699fc314724SMel Gorman 
1700fc314724SMel Gorman 	if (!pol)
1701fc314724SMel Gorman 		return default_policy.flags & MPOL_F_MOF;
1702fc314724SMel Gorman 
1703fc314724SMel Gorman 	return pol->flags & MPOL_F_MOF;
1704fc314724SMel Gorman }
1705fc314724SMel Gorman 
1706d3eb1570SLai Jiangshan static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1707d3eb1570SLai Jiangshan {
1708d3eb1570SLai Jiangshan 	enum zone_type dynamic_policy_zone = policy_zone;
1709d3eb1570SLai Jiangshan 
1710d3eb1570SLai Jiangshan 	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1711d3eb1570SLai Jiangshan 
1712d3eb1570SLai Jiangshan 	/*
1713d3eb1570SLai Jiangshan 	 * if policy->v.nodes has movable memory only,
1714d3eb1570SLai Jiangshan 	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1715d3eb1570SLai Jiangshan 	 *
1716d3eb1570SLai Jiangshan 	 * policy->v.nodes is intersect with node_states[N_MEMORY].
1717d3eb1570SLai Jiangshan 	 * so if the following test faile, it implies
1718d3eb1570SLai Jiangshan 	 * policy->v.nodes has movable memory only.
1719d3eb1570SLai Jiangshan 	 */
1720d3eb1570SLai Jiangshan 	if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1721d3eb1570SLai Jiangshan 		dynamic_policy_zone = ZONE_MOVABLE;
1722d3eb1570SLai Jiangshan 
1723d3eb1570SLai Jiangshan 	return zone >= dynamic_policy_zone;
1724d3eb1570SLai Jiangshan }
1725d3eb1570SLai Jiangshan 
172652cd3b07SLee Schermerhorn /*
172752cd3b07SLee Schermerhorn  * Return a nodemask representing a mempolicy for filtering nodes for
172852cd3b07SLee Schermerhorn  * page allocation
172952cd3b07SLee Schermerhorn  */
173052cd3b07SLee Schermerhorn static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
173119770b32SMel Gorman {
173219770b32SMel Gorman 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
173345c4745aSLee Schermerhorn 	if (unlikely(policy->mode == MPOL_BIND) &&
1734d3eb1570SLai Jiangshan 			apply_policy_zone(policy, gfp_zone(gfp)) &&
173519770b32SMel Gorman 			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
173619770b32SMel Gorman 		return &policy->v.nodes;
173719770b32SMel Gorman 
173819770b32SMel Gorman 	return NULL;
173919770b32SMel Gorman }
174019770b32SMel Gorman 
174152cd3b07SLee Schermerhorn /* Return a zonelist indicated by gfp for node representing a mempolicy */
17422f5f9486SAndi Kleen static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
17432f5f9486SAndi Kleen 	int nd)
17441da177e4SLinus Torvalds {
174545c4745aSLee Schermerhorn 	switch (policy->mode) {
17461da177e4SLinus Torvalds 	case MPOL_PREFERRED:
1747fc36b8d3SLee Schermerhorn 		if (!(policy->flags & MPOL_F_LOCAL))
17481da177e4SLinus Torvalds 			nd = policy->v.preferred_node;
17491da177e4SLinus Torvalds 		break;
17501da177e4SLinus Torvalds 	case MPOL_BIND:
175119770b32SMel Gorman 		/*
175252cd3b07SLee Schermerhorn 		 * Normally, MPOL_BIND allocations are node-local within the
175352cd3b07SLee Schermerhorn 		 * allowed nodemask.  However, if __GFP_THISNODE is set and the
17546eb27e1fSBob Liu 		 * current node isn't part of the mask, we use the zonelist for
175552cd3b07SLee Schermerhorn 		 * the first node in the mask instead.
175619770b32SMel Gorman 		 */
175719770b32SMel Gorman 		if (unlikely(gfp & __GFP_THISNODE) &&
175819770b32SMel Gorman 				unlikely(!node_isset(nd, policy->v.nodes)))
175919770b32SMel Gorman 			nd = first_node(policy->v.nodes);
176019770b32SMel Gorman 		break;
17611da177e4SLinus Torvalds 	default:
17621da177e4SLinus Torvalds 		BUG();
17631da177e4SLinus Torvalds 	}
17640e88460dSMel Gorman 	return node_zonelist(nd, gfp);
17651da177e4SLinus Torvalds }
17661da177e4SLinus Torvalds 
17671da177e4SLinus Torvalds /* Do dynamic interleaving for a process */
17681da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy)
17691da177e4SLinus Torvalds {
17701da177e4SLinus Torvalds 	unsigned nid, next;
17711da177e4SLinus Torvalds 	struct task_struct *me = current;
17721da177e4SLinus Torvalds 
17731da177e4SLinus Torvalds 	nid = me->il_next;
1774dfcd3c0dSAndi Kleen 	next = next_node(nid, policy->v.nodes);
17751da177e4SLinus Torvalds 	if (next >= MAX_NUMNODES)
1776dfcd3c0dSAndi Kleen 		next = first_node(policy->v.nodes);
1777f5b087b5SDavid Rientjes 	if (next < MAX_NUMNODES)
17781da177e4SLinus Torvalds 		me->il_next = next;
17791da177e4SLinus Torvalds 	return nid;
17801da177e4SLinus Torvalds }
17811da177e4SLinus Torvalds 
1782dc85da15SChristoph Lameter /*
1783dc85da15SChristoph Lameter  * Depending on the memory policy provide a node from which to allocate the
1784dc85da15SChristoph Lameter  * next slab entry.
178552cd3b07SLee Schermerhorn  * @policy must be protected by freeing by the caller.  If @policy is
178652cd3b07SLee Schermerhorn  * the current task's mempolicy, this protection is implicit, as only the
178752cd3b07SLee Schermerhorn  * task can change it's policy.  The system default policy requires no
178852cd3b07SLee Schermerhorn  * such protection.
1789dc85da15SChristoph Lameter  */
1790e7b691b0SAndi Kleen unsigned slab_node(void)
1791dc85da15SChristoph Lameter {
1792e7b691b0SAndi Kleen 	struct mempolicy *policy;
1793e7b691b0SAndi Kleen 
1794e7b691b0SAndi Kleen 	if (in_interrupt())
1795e7b691b0SAndi Kleen 		return numa_node_id();
1796e7b691b0SAndi Kleen 
1797e7b691b0SAndi Kleen 	policy = current->mempolicy;
1798fc36b8d3SLee Schermerhorn 	if (!policy || policy->flags & MPOL_F_LOCAL)
1799bea904d5SLee Schermerhorn 		return numa_node_id();
1800765c4507SChristoph Lameter 
1801bea904d5SLee Schermerhorn 	switch (policy->mode) {
1802bea904d5SLee Schermerhorn 	case MPOL_PREFERRED:
1803fc36b8d3SLee Schermerhorn 		/*
1804fc36b8d3SLee Schermerhorn 		 * handled MPOL_F_LOCAL above
1805fc36b8d3SLee Schermerhorn 		 */
1806bea904d5SLee Schermerhorn 		return policy->v.preferred_node;
1807bea904d5SLee Schermerhorn 
1808dc85da15SChristoph Lameter 	case MPOL_INTERLEAVE:
1809dc85da15SChristoph Lameter 		return interleave_nodes(policy);
1810dc85da15SChristoph Lameter 
1811dd1a239fSMel Gorman 	case MPOL_BIND: {
1812dc85da15SChristoph Lameter 		/*
1813dc85da15SChristoph Lameter 		 * Follow bind policy behavior and start allocation at the
1814dc85da15SChristoph Lameter 		 * first node.
1815dc85da15SChristoph Lameter 		 */
181619770b32SMel Gorman 		struct zonelist *zonelist;
181719770b32SMel Gorman 		struct zone *zone;
181819770b32SMel Gorman 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
181919770b32SMel Gorman 		zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
182019770b32SMel Gorman 		(void)first_zones_zonelist(zonelist, highest_zoneidx,
182119770b32SMel Gorman 							&policy->v.nodes,
182219770b32SMel Gorman 							&zone);
1823800416f7SEric Dumazet 		return zone ? zone->node : numa_node_id();
1824dd1a239fSMel Gorman 	}
1825dc85da15SChristoph Lameter 
1826dc85da15SChristoph Lameter 	default:
1827bea904d5SLee Schermerhorn 		BUG();
1828dc85da15SChristoph Lameter 	}
1829dc85da15SChristoph Lameter }
1830dc85da15SChristoph Lameter 
18311da177e4SLinus Torvalds /* Do static interleaving for a VMA with known offset. */
18321da177e4SLinus Torvalds static unsigned offset_il_node(struct mempolicy *pol,
18331da177e4SLinus Torvalds 		struct vm_area_struct *vma, unsigned long off)
18341da177e4SLinus Torvalds {
1835dfcd3c0dSAndi Kleen 	unsigned nnodes = nodes_weight(pol->v.nodes);
1836f5b087b5SDavid Rientjes 	unsigned target;
18371da177e4SLinus Torvalds 	int c;
1838b76ac7e7SJianguo Wu 	int nid = NUMA_NO_NODE;
18391da177e4SLinus Torvalds 
1840f5b087b5SDavid Rientjes 	if (!nnodes)
1841f5b087b5SDavid Rientjes 		return numa_node_id();
1842f5b087b5SDavid Rientjes 	target = (unsigned int)off % nnodes;
18431da177e4SLinus Torvalds 	c = 0;
18441da177e4SLinus Torvalds 	do {
1845dfcd3c0dSAndi Kleen 		nid = next_node(nid, pol->v.nodes);
18461da177e4SLinus Torvalds 		c++;
18471da177e4SLinus Torvalds 	} while (c <= target);
18481da177e4SLinus Torvalds 	return nid;
18491da177e4SLinus Torvalds }
18501da177e4SLinus Torvalds 
18515da7ca86SChristoph Lameter /* Determine a node number for interleave */
18525da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol,
18535da7ca86SChristoph Lameter 		 struct vm_area_struct *vma, unsigned long addr, int shift)
18545da7ca86SChristoph Lameter {
18555da7ca86SChristoph Lameter 	if (vma) {
18565da7ca86SChristoph Lameter 		unsigned long off;
18575da7ca86SChristoph Lameter 
18583b98b087SNishanth Aravamudan 		/*
18593b98b087SNishanth Aravamudan 		 * for small pages, there is no difference between
18603b98b087SNishanth Aravamudan 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
18613b98b087SNishanth Aravamudan 		 * for huge pages, since vm_pgoff is in units of small
18623b98b087SNishanth Aravamudan 		 * pages, we need to shift off the always 0 bits to get
18633b98b087SNishanth Aravamudan 		 * a useful offset.
18643b98b087SNishanth Aravamudan 		 */
18653b98b087SNishanth Aravamudan 		BUG_ON(shift < PAGE_SHIFT);
18663b98b087SNishanth Aravamudan 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
18675da7ca86SChristoph Lameter 		off += (addr - vma->vm_start) >> shift;
18685da7ca86SChristoph Lameter 		return offset_il_node(pol, vma, off);
18695da7ca86SChristoph Lameter 	} else
18705da7ca86SChristoph Lameter 		return interleave_nodes(pol);
18715da7ca86SChristoph Lameter }
18725da7ca86SChristoph Lameter 
1873778d3b0fSMichal Hocko /*
1874778d3b0fSMichal Hocko  * Return the bit number of a random bit set in the nodemask.
1875b76ac7e7SJianguo Wu  * (returns NUMA_NO_NODE if nodemask is empty)
1876778d3b0fSMichal Hocko  */
1877778d3b0fSMichal Hocko int node_random(const nodemask_t *maskp)
1878778d3b0fSMichal Hocko {
1879b76ac7e7SJianguo Wu 	int w, bit = NUMA_NO_NODE;
1880778d3b0fSMichal Hocko 
1881778d3b0fSMichal Hocko 	w = nodes_weight(*maskp);
1882778d3b0fSMichal Hocko 	if (w)
1883778d3b0fSMichal Hocko 		bit = bitmap_ord_to_pos(maskp->bits,
1884778d3b0fSMichal Hocko 			get_random_int() % w, MAX_NUMNODES);
1885778d3b0fSMichal Hocko 	return bit;
1886778d3b0fSMichal Hocko }
1887778d3b0fSMichal Hocko 
188800ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS
1889480eccf9SLee Schermerhorn /*
1890480eccf9SLee Schermerhorn  * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1891480eccf9SLee Schermerhorn  * @vma = virtual memory area whose policy is sought
1892480eccf9SLee Schermerhorn  * @addr = address in @vma for shared policy lookup and interleave policy
1893480eccf9SLee Schermerhorn  * @gfp_flags = for requested zone
189419770b32SMel Gorman  * @mpol = pointer to mempolicy pointer for reference counted mempolicy
189519770b32SMel Gorman  * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
1896480eccf9SLee Schermerhorn  *
189752cd3b07SLee Schermerhorn  * Returns a zonelist suitable for a huge page allocation and a pointer
189852cd3b07SLee Schermerhorn  * to the struct mempolicy for conditional unref after allocation.
189952cd3b07SLee Schermerhorn  * If the effective policy is 'BIND, returns a pointer to the mempolicy's
190052cd3b07SLee Schermerhorn  * @nodemask for filtering the zonelist.
1901c0ff7453SMiao Xie  *
1902c0ff7453SMiao Xie  * Must be protected by get_mems_allowed()
1903480eccf9SLee Schermerhorn  */
1904396faf03SMel Gorman struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
190519770b32SMel Gorman 				gfp_t gfp_flags, struct mempolicy **mpol,
190619770b32SMel Gorman 				nodemask_t **nodemask)
19075da7ca86SChristoph Lameter {
1908480eccf9SLee Schermerhorn 	struct zonelist *zl;
19095da7ca86SChristoph Lameter 
191052cd3b07SLee Schermerhorn 	*mpol = get_vma_policy(current, vma, addr);
191119770b32SMel Gorman 	*nodemask = NULL;	/* assume !MPOL_BIND */
19125da7ca86SChristoph Lameter 
191352cd3b07SLee Schermerhorn 	if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
191452cd3b07SLee Schermerhorn 		zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1915a5516438SAndi Kleen 				huge_page_shift(hstate_vma(vma))), gfp_flags);
191652cd3b07SLee Schermerhorn 	} else {
19172f5f9486SAndi Kleen 		zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
191852cd3b07SLee Schermerhorn 		if ((*mpol)->mode == MPOL_BIND)
191952cd3b07SLee Schermerhorn 			*nodemask = &(*mpol)->v.nodes;
1920480eccf9SLee Schermerhorn 	}
1921480eccf9SLee Schermerhorn 	return zl;
19225da7ca86SChristoph Lameter }
192306808b08SLee Schermerhorn 
192406808b08SLee Schermerhorn /*
192506808b08SLee Schermerhorn  * init_nodemask_of_mempolicy
192606808b08SLee Schermerhorn  *
192706808b08SLee Schermerhorn  * If the current task's mempolicy is "default" [NULL], return 'false'
192806808b08SLee Schermerhorn  * to indicate default policy.  Otherwise, extract the policy nodemask
192906808b08SLee Schermerhorn  * for 'bind' or 'interleave' policy into the argument nodemask, or
193006808b08SLee Schermerhorn  * initialize the argument nodemask to contain the single node for
193106808b08SLee Schermerhorn  * 'preferred' or 'local' policy and return 'true' to indicate presence
193206808b08SLee Schermerhorn  * of non-default mempolicy.
193306808b08SLee Schermerhorn  *
193406808b08SLee Schermerhorn  * We don't bother with reference counting the mempolicy [mpol_get/put]
193506808b08SLee Schermerhorn  * because the current task is examining it's own mempolicy and a task's
193606808b08SLee Schermerhorn  * mempolicy is only ever changed by the task itself.
193706808b08SLee Schermerhorn  *
193806808b08SLee Schermerhorn  * N.B., it is the caller's responsibility to free a returned nodemask.
193906808b08SLee Schermerhorn  */
194006808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask)
194106808b08SLee Schermerhorn {
194206808b08SLee Schermerhorn 	struct mempolicy *mempolicy;
194306808b08SLee Schermerhorn 	int nid;
194406808b08SLee Schermerhorn 
194506808b08SLee Schermerhorn 	if (!(mask && current->mempolicy))
194606808b08SLee Schermerhorn 		return false;
194706808b08SLee Schermerhorn 
1948c0ff7453SMiao Xie 	task_lock(current);
194906808b08SLee Schermerhorn 	mempolicy = current->mempolicy;
195006808b08SLee Schermerhorn 	switch (mempolicy->mode) {
195106808b08SLee Schermerhorn 	case MPOL_PREFERRED:
195206808b08SLee Schermerhorn 		if (mempolicy->flags & MPOL_F_LOCAL)
195306808b08SLee Schermerhorn 			nid = numa_node_id();
195406808b08SLee Schermerhorn 		else
195506808b08SLee Schermerhorn 			nid = mempolicy->v.preferred_node;
195606808b08SLee Schermerhorn 		init_nodemask_of_node(mask, nid);
195706808b08SLee Schermerhorn 		break;
195806808b08SLee Schermerhorn 
195906808b08SLee Schermerhorn 	case MPOL_BIND:
196006808b08SLee Schermerhorn 		/* Fall through */
196106808b08SLee Schermerhorn 	case MPOL_INTERLEAVE:
196206808b08SLee Schermerhorn 		*mask =  mempolicy->v.nodes;
196306808b08SLee Schermerhorn 		break;
196406808b08SLee Schermerhorn 
196506808b08SLee Schermerhorn 	default:
196606808b08SLee Schermerhorn 		BUG();
196706808b08SLee Schermerhorn 	}
1968c0ff7453SMiao Xie 	task_unlock(current);
196906808b08SLee Schermerhorn 
197006808b08SLee Schermerhorn 	return true;
197106808b08SLee Schermerhorn }
197200ac59adSChen, Kenneth W #endif
19735da7ca86SChristoph Lameter 
19746f48d0ebSDavid Rientjes /*
19756f48d0ebSDavid Rientjes  * mempolicy_nodemask_intersects
19766f48d0ebSDavid Rientjes  *
19776f48d0ebSDavid Rientjes  * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
19786f48d0ebSDavid Rientjes  * policy.  Otherwise, check for intersection between mask and the policy
19796f48d0ebSDavid Rientjes  * nodemask for 'bind' or 'interleave' policy.  For 'perferred' or 'local'
19806f48d0ebSDavid Rientjes  * policy, always return true since it may allocate elsewhere on fallback.
19816f48d0ebSDavid Rientjes  *
19826f48d0ebSDavid Rientjes  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
19836f48d0ebSDavid Rientjes  */
19846f48d0ebSDavid Rientjes bool mempolicy_nodemask_intersects(struct task_struct *tsk,
19856f48d0ebSDavid Rientjes 					const nodemask_t *mask)
19866f48d0ebSDavid Rientjes {
19876f48d0ebSDavid Rientjes 	struct mempolicy *mempolicy;
19886f48d0ebSDavid Rientjes 	bool ret = true;
19896f48d0ebSDavid Rientjes 
19906f48d0ebSDavid Rientjes 	if (!mask)
19916f48d0ebSDavid Rientjes 		return ret;
19926f48d0ebSDavid Rientjes 	task_lock(tsk);
19936f48d0ebSDavid Rientjes 	mempolicy = tsk->mempolicy;
19946f48d0ebSDavid Rientjes 	if (!mempolicy)
19956f48d0ebSDavid Rientjes 		goto out;
19966f48d0ebSDavid Rientjes 
19976f48d0ebSDavid Rientjes 	switch (mempolicy->mode) {
19986f48d0ebSDavid Rientjes 	case MPOL_PREFERRED:
19996f48d0ebSDavid Rientjes 		/*
20006f48d0ebSDavid Rientjes 		 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
20016f48d0ebSDavid Rientjes 		 * allocate from, they may fallback to other nodes when oom.
20026f48d0ebSDavid Rientjes 		 * Thus, it's possible for tsk to have allocated memory from
20036f48d0ebSDavid Rientjes 		 * nodes in mask.
20046f48d0ebSDavid Rientjes 		 */
20056f48d0ebSDavid Rientjes 		break;
20066f48d0ebSDavid Rientjes 	case MPOL_BIND:
20076f48d0ebSDavid Rientjes 	case MPOL_INTERLEAVE:
20086f48d0ebSDavid Rientjes 		ret = nodes_intersects(mempolicy->v.nodes, *mask);
20096f48d0ebSDavid Rientjes 		break;
20106f48d0ebSDavid Rientjes 	default:
20116f48d0ebSDavid Rientjes 		BUG();
20126f48d0ebSDavid Rientjes 	}
20136f48d0ebSDavid Rientjes out:
20146f48d0ebSDavid Rientjes 	task_unlock(tsk);
20156f48d0ebSDavid Rientjes 	return ret;
20166f48d0ebSDavid Rientjes }
20176f48d0ebSDavid Rientjes 
20181da177e4SLinus Torvalds /* Allocate a page in interleaved policy.
20191da177e4SLinus Torvalds    Own path because it needs to do special accounting. */
2020662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2021662f3a0bSAndi Kleen 					unsigned nid)
20221da177e4SLinus Torvalds {
20231da177e4SLinus Torvalds 	struct zonelist *zl;
20241da177e4SLinus Torvalds 	struct page *page;
20251da177e4SLinus Torvalds 
20260e88460dSMel Gorman 	zl = node_zonelist(nid, gfp);
20271da177e4SLinus Torvalds 	page = __alloc_pages(gfp, order, zl);
2028dd1a239fSMel Gorman 	if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
2029ca889e6cSChristoph Lameter 		inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
20301da177e4SLinus Torvalds 	return page;
20311da177e4SLinus Torvalds }
20321da177e4SLinus Torvalds 
20331da177e4SLinus Torvalds /**
20340bbbc0b3SAndrea Arcangeli  * 	alloc_pages_vma	- Allocate a page for a VMA.
20351da177e4SLinus Torvalds  *
20361da177e4SLinus Torvalds  * 	@gfp:
20371da177e4SLinus Torvalds  *      %GFP_USER    user allocation.
20381da177e4SLinus Torvalds  *      %GFP_KERNEL  kernel allocations,
20391da177e4SLinus Torvalds  *      %GFP_HIGHMEM highmem/user allocations,
20401da177e4SLinus Torvalds  *      %GFP_FS      allocation should not call back into a file system.
20411da177e4SLinus Torvalds  *      %GFP_ATOMIC  don't sleep.
20421da177e4SLinus Torvalds  *
20430bbbc0b3SAndrea Arcangeli  *	@order:Order of the GFP allocation.
20441da177e4SLinus Torvalds  * 	@vma:  Pointer to VMA or NULL if not available.
20451da177e4SLinus Torvalds  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
20461da177e4SLinus Torvalds  *
20471da177e4SLinus Torvalds  * 	This function allocates a page from the kernel page pool and applies
20481da177e4SLinus Torvalds  *	a NUMA policy associated with the VMA or the current process.
20491da177e4SLinus Torvalds  *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
20501da177e4SLinus Torvalds  *	mm_struct of the VMA to prevent it from going away. Should be used for
20511da177e4SLinus Torvalds  *	all allocations for pages that will be mapped into
20521da177e4SLinus Torvalds  * 	user space. Returns NULL when no page can be allocated.
20531da177e4SLinus Torvalds  *
20541da177e4SLinus Torvalds  *	Should be called with the mm_sem of the vma hold.
20551da177e4SLinus Torvalds  */
20561da177e4SLinus Torvalds struct page *
20570bbbc0b3SAndrea Arcangeli alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
20582f5f9486SAndi Kleen 		unsigned long addr, int node)
20591da177e4SLinus Torvalds {
2060cc9a6c87SMel Gorman 	struct mempolicy *pol;
2061c0ff7453SMiao Xie 	struct page *page;
2062cc9a6c87SMel Gorman 	unsigned int cpuset_mems_cookie;
20631da177e4SLinus Torvalds 
2064cc9a6c87SMel Gorman retry_cpuset:
2065cc9a6c87SMel Gorman 	pol = get_vma_policy(current, vma, addr);
2066cc9a6c87SMel Gorman 	cpuset_mems_cookie = get_mems_allowed();
2067cc9a6c87SMel Gorman 
206845c4745aSLee Schermerhorn 	if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
20691da177e4SLinus Torvalds 		unsigned nid;
20705da7ca86SChristoph Lameter 
20718eac563cSAndi Kleen 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
207252cd3b07SLee Schermerhorn 		mpol_cond_put(pol);
20730bbbc0b3SAndrea Arcangeli 		page = alloc_page_interleave(gfp, order, nid);
2074cc9a6c87SMel Gorman 		if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2075cc9a6c87SMel Gorman 			goto retry_cpuset;
2076cc9a6c87SMel Gorman 
2077c0ff7453SMiao Xie 		return page;
20781da177e4SLinus Torvalds 	}
2079212a0a6fSDavid Rientjes 	page = __alloc_pages_nodemask(gfp, order,
2080212a0a6fSDavid Rientjes 				      policy_zonelist(gfp, pol, node),
20810bbbc0b3SAndrea Arcangeli 				      policy_nodemask(gfp, pol));
2082212a0a6fSDavid Rientjes 	if (unlikely(mpol_needs_cond_ref(pol)))
2083212a0a6fSDavid Rientjes 		__mpol_put(pol);
2084cc9a6c87SMel Gorman 	if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2085cc9a6c87SMel Gorman 		goto retry_cpuset;
2086c0ff7453SMiao Xie 	return page;
20871da177e4SLinus Torvalds }
20881da177e4SLinus Torvalds 
20891da177e4SLinus Torvalds /**
20901da177e4SLinus Torvalds  * 	alloc_pages_current - Allocate pages.
20911da177e4SLinus Torvalds  *
20921da177e4SLinus Torvalds  *	@gfp:
20931da177e4SLinus Torvalds  *		%GFP_USER   user allocation,
20941da177e4SLinus Torvalds  *      	%GFP_KERNEL kernel allocation,
20951da177e4SLinus Torvalds  *      	%GFP_HIGHMEM highmem allocation,
20961da177e4SLinus Torvalds  *      	%GFP_FS     don't call back into a file system.
20971da177e4SLinus Torvalds  *      	%GFP_ATOMIC don't sleep.
20981da177e4SLinus Torvalds  *	@order: Power of two of allocation size in pages. 0 is a single page.
20991da177e4SLinus Torvalds  *
21001da177e4SLinus Torvalds  *	Allocate a page from the kernel page pool.  When not in
21011da177e4SLinus Torvalds  *	interrupt context and apply the current process NUMA policy.
21021da177e4SLinus Torvalds  *	Returns NULL when no page can be allocated.
21031da177e4SLinus Torvalds  *
2104cf2a473cSPaul Jackson  *	Don't call cpuset_update_task_memory_state() unless
21051da177e4SLinus Torvalds  *	1) it's ok to take cpuset_sem (can WAIT), and
21061da177e4SLinus Torvalds  *	2) allocating for current task (not interrupt).
21071da177e4SLinus Torvalds  */
2108dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order)
21091da177e4SLinus Torvalds {
21105606e387SMel Gorman 	struct mempolicy *pol = get_task_policy(current);
2111c0ff7453SMiao Xie 	struct page *page;
2112cc9a6c87SMel Gorman 	unsigned int cpuset_mems_cookie;
21131da177e4SLinus Torvalds 
21149b819d20SChristoph Lameter 	if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
21151da177e4SLinus Torvalds 		pol = &default_policy;
211652cd3b07SLee Schermerhorn 
2117cc9a6c87SMel Gorman retry_cpuset:
2118cc9a6c87SMel Gorman 	cpuset_mems_cookie = get_mems_allowed();
2119cc9a6c87SMel Gorman 
212052cd3b07SLee Schermerhorn 	/*
212152cd3b07SLee Schermerhorn 	 * No reference counting needed for current->mempolicy
212252cd3b07SLee Schermerhorn 	 * nor system default_policy
212352cd3b07SLee Schermerhorn 	 */
212445c4745aSLee Schermerhorn 	if (pol->mode == MPOL_INTERLEAVE)
2125c0ff7453SMiao Xie 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2126c0ff7453SMiao Xie 	else
2127c0ff7453SMiao Xie 		page = __alloc_pages_nodemask(gfp, order,
21285c4b4be3SAndi Kleen 				policy_zonelist(gfp, pol, numa_node_id()),
21295c4b4be3SAndi Kleen 				policy_nodemask(gfp, pol));
2130cc9a6c87SMel Gorman 
2131cc9a6c87SMel Gorman 	if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2132cc9a6c87SMel Gorman 		goto retry_cpuset;
2133cc9a6c87SMel Gorman 
2134c0ff7453SMiao Xie 	return page;
21351da177e4SLinus Torvalds }
21361da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current);
21371da177e4SLinus Torvalds 
2138ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2139ef0855d3SOleg Nesterov {
2140ef0855d3SOleg Nesterov 	struct mempolicy *pol = mpol_dup(vma_policy(src));
2141ef0855d3SOleg Nesterov 
2142ef0855d3SOleg Nesterov 	if (IS_ERR(pol))
2143ef0855d3SOleg Nesterov 		return PTR_ERR(pol);
2144ef0855d3SOleg Nesterov 	dst->vm_policy = pol;
2145ef0855d3SOleg Nesterov 	return 0;
2146ef0855d3SOleg Nesterov }
2147ef0855d3SOleg Nesterov 
21484225399aSPaul Jackson /*
2149846a16bfSLee Schermerhorn  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
21504225399aSPaul Jackson  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
21514225399aSPaul Jackson  * with the mems_allowed returned by cpuset_mems_allowed().  This
21524225399aSPaul Jackson  * keeps mempolicies cpuset relative after its cpuset moves.  See
21534225399aSPaul Jackson  * further kernel/cpuset.c update_nodemask().
2154708c1bbcSMiao Xie  *
2155708c1bbcSMiao Xie  * current's mempolicy may be rebinded by the other task(the task that changes
2156708c1bbcSMiao Xie  * cpuset's mems), so we needn't do rebind work for current task.
21574225399aSPaul Jackson  */
21584225399aSPaul Jackson 
2159846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */
2160846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old)
21611da177e4SLinus Torvalds {
21621da177e4SLinus Torvalds 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
21631da177e4SLinus Torvalds 
21641da177e4SLinus Torvalds 	if (!new)
21651da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2166708c1bbcSMiao Xie 
2167708c1bbcSMiao Xie 	/* task's mempolicy is protected by alloc_lock */
2168708c1bbcSMiao Xie 	if (old == current->mempolicy) {
2169708c1bbcSMiao Xie 		task_lock(current);
2170708c1bbcSMiao Xie 		*new = *old;
2171708c1bbcSMiao Xie 		task_unlock(current);
2172708c1bbcSMiao Xie 	} else
2173708c1bbcSMiao Xie 		*new = *old;
2174708c1bbcSMiao Xie 
217599ee4ca7SPaul E. McKenney 	rcu_read_lock();
21764225399aSPaul Jackson 	if (current_cpuset_is_being_rebound()) {
21774225399aSPaul Jackson 		nodemask_t mems = cpuset_mems_allowed(current);
2178708c1bbcSMiao Xie 		if (new->flags & MPOL_F_REBINDING)
2179708c1bbcSMiao Xie 			mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
2180708c1bbcSMiao Xie 		else
2181708c1bbcSMiao Xie 			mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
21824225399aSPaul Jackson 	}
218399ee4ca7SPaul E. McKenney 	rcu_read_unlock();
21841da177e4SLinus Torvalds 	atomic_set(&new->refcnt, 1);
21851da177e4SLinus Torvalds 	return new;
21861da177e4SLinus Torvalds }
21871da177e4SLinus Torvalds 
21881da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */
2189fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
21901da177e4SLinus Torvalds {
21911da177e4SLinus Torvalds 	if (!a || !b)
2192fcfb4dccSKOSAKI Motohiro 		return false;
219345c4745aSLee Schermerhorn 	if (a->mode != b->mode)
2194fcfb4dccSKOSAKI Motohiro 		return false;
219519800502SBob Liu 	if (a->flags != b->flags)
2196fcfb4dccSKOSAKI Motohiro 		return false;
219719800502SBob Liu 	if (mpol_store_user_nodemask(a))
219819800502SBob Liu 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2199fcfb4dccSKOSAKI Motohiro 			return false;
220019800502SBob Liu 
220145c4745aSLee Schermerhorn 	switch (a->mode) {
220219770b32SMel Gorman 	case MPOL_BIND:
220319770b32SMel Gorman 		/* Fall through */
22041da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
2205fcfb4dccSKOSAKI Motohiro 		return !!nodes_equal(a->v.nodes, b->v.nodes);
22061da177e4SLinus Torvalds 	case MPOL_PREFERRED:
220775719661SNamhyung Kim 		return a->v.preferred_node == b->v.preferred_node;
22081da177e4SLinus Torvalds 	default:
22091da177e4SLinus Torvalds 		BUG();
2210fcfb4dccSKOSAKI Motohiro 		return false;
22111da177e4SLinus Torvalds 	}
22121da177e4SLinus Torvalds }
22131da177e4SLinus Torvalds 
22141da177e4SLinus Torvalds /*
22151da177e4SLinus Torvalds  * Shared memory backing store policy support.
22161da177e4SLinus Torvalds  *
22171da177e4SLinus Torvalds  * Remember policies even when nobody has shared memory mapped.
22181da177e4SLinus Torvalds  * The policies are kept in Red-Black tree linked from the inode.
22191da177e4SLinus Torvalds  * They are protected by the sp->lock spinlock, which should be held
22201da177e4SLinus Torvalds  * for any accesses to the tree.
22211da177e4SLinus Torvalds  */
22221da177e4SLinus Torvalds 
22231da177e4SLinus Torvalds /* lookup first element intersecting start-end */
222442288fe3SMel Gorman /* Caller holds sp->lock */
22251da177e4SLinus Torvalds static struct sp_node *
22261da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
22271da177e4SLinus Torvalds {
22281da177e4SLinus Torvalds 	struct rb_node *n = sp->root.rb_node;
22291da177e4SLinus Torvalds 
22301da177e4SLinus Torvalds 	while (n) {
22311da177e4SLinus Torvalds 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
22321da177e4SLinus Torvalds 
22331da177e4SLinus Torvalds 		if (start >= p->end)
22341da177e4SLinus Torvalds 			n = n->rb_right;
22351da177e4SLinus Torvalds 		else if (end <= p->start)
22361da177e4SLinus Torvalds 			n = n->rb_left;
22371da177e4SLinus Torvalds 		else
22381da177e4SLinus Torvalds 			break;
22391da177e4SLinus Torvalds 	}
22401da177e4SLinus Torvalds 	if (!n)
22411da177e4SLinus Torvalds 		return NULL;
22421da177e4SLinus Torvalds 	for (;;) {
22431da177e4SLinus Torvalds 		struct sp_node *w = NULL;
22441da177e4SLinus Torvalds 		struct rb_node *prev = rb_prev(n);
22451da177e4SLinus Torvalds 		if (!prev)
22461da177e4SLinus Torvalds 			break;
22471da177e4SLinus Torvalds 		w = rb_entry(prev, struct sp_node, nd);
22481da177e4SLinus Torvalds 		if (w->end <= start)
22491da177e4SLinus Torvalds 			break;
22501da177e4SLinus Torvalds 		n = prev;
22511da177e4SLinus Torvalds 	}
22521da177e4SLinus Torvalds 	return rb_entry(n, struct sp_node, nd);
22531da177e4SLinus Torvalds }
22541da177e4SLinus Torvalds 
22551da177e4SLinus Torvalds /* Insert a new shared policy into the list. */
22561da177e4SLinus Torvalds /* Caller holds sp->lock */
22571da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new)
22581da177e4SLinus Torvalds {
22591da177e4SLinus Torvalds 	struct rb_node **p = &sp->root.rb_node;
22601da177e4SLinus Torvalds 	struct rb_node *parent = NULL;
22611da177e4SLinus Torvalds 	struct sp_node *nd;
22621da177e4SLinus Torvalds 
22631da177e4SLinus Torvalds 	while (*p) {
22641da177e4SLinus Torvalds 		parent = *p;
22651da177e4SLinus Torvalds 		nd = rb_entry(parent, struct sp_node, nd);
22661da177e4SLinus Torvalds 		if (new->start < nd->start)
22671da177e4SLinus Torvalds 			p = &(*p)->rb_left;
22681da177e4SLinus Torvalds 		else if (new->end > nd->end)
22691da177e4SLinus Torvalds 			p = &(*p)->rb_right;
22701da177e4SLinus Torvalds 		else
22711da177e4SLinus Torvalds 			BUG();
22721da177e4SLinus Torvalds 	}
22731da177e4SLinus Torvalds 	rb_link_node(&new->nd, parent, p);
22741da177e4SLinus Torvalds 	rb_insert_color(&new->nd, &sp->root);
2275140d5a49SPaul Mundt 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
227645c4745aSLee Schermerhorn 		 new->policy ? new->policy->mode : 0);
22771da177e4SLinus Torvalds }
22781da177e4SLinus Torvalds 
22791da177e4SLinus Torvalds /* Find shared policy intersecting idx */
22801da177e4SLinus Torvalds struct mempolicy *
22811da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
22821da177e4SLinus Torvalds {
22831da177e4SLinus Torvalds 	struct mempolicy *pol = NULL;
22841da177e4SLinus Torvalds 	struct sp_node *sn;
22851da177e4SLinus Torvalds 
22861da177e4SLinus Torvalds 	if (!sp->root.rb_node)
22871da177e4SLinus Torvalds 		return NULL;
228842288fe3SMel Gorman 	spin_lock(&sp->lock);
22891da177e4SLinus Torvalds 	sn = sp_lookup(sp, idx, idx+1);
22901da177e4SLinus Torvalds 	if (sn) {
22911da177e4SLinus Torvalds 		mpol_get(sn->policy);
22921da177e4SLinus Torvalds 		pol = sn->policy;
22931da177e4SLinus Torvalds 	}
229442288fe3SMel Gorman 	spin_unlock(&sp->lock);
22951da177e4SLinus Torvalds 	return pol;
22961da177e4SLinus Torvalds }
22971da177e4SLinus Torvalds 
229863f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n)
229963f74ca2SKOSAKI Motohiro {
230063f74ca2SKOSAKI Motohiro 	mpol_put(n->policy);
230163f74ca2SKOSAKI Motohiro 	kmem_cache_free(sn_cache, n);
230263f74ca2SKOSAKI Motohiro }
230363f74ca2SKOSAKI Motohiro 
2304de1c9ce6SRik van Riel #ifdef CONFIG_NUMA_BALANCING
2305de1c9ce6SRik van Riel static bool numa_migrate_deferred(struct task_struct *p, int last_cpupid)
2306de1c9ce6SRik van Riel {
2307de1c9ce6SRik van Riel 	/* Never defer a private fault */
2308de1c9ce6SRik van Riel 	if (cpupid_match_pid(p, last_cpupid))
2309de1c9ce6SRik van Riel 		return false;
2310de1c9ce6SRik van Riel 
2311de1c9ce6SRik van Riel 	if (p->numa_migrate_deferred) {
2312de1c9ce6SRik van Riel 		p->numa_migrate_deferred--;
2313de1c9ce6SRik van Riel 		return true;
2314de1c9ce6SRik van Riel 	}
2315de1c9ce6SRik van Riel 	return false;
2316de1c9ce6SRik van Riel }
2317de1c9ce6SRik van Riel 
2318de1c9ce6SRik van Riel static inline void defer_numa_migrate(struct task_struct *p)
2319de1c9ce6SRik van Riel {
2320de1c9ce6SRik van Riel 	p->numa_migrate_deferred = sysctl_numa_balancing_migrate_deferred;
2321de1c9ce6SRik van Riel }
2322de1c9ce6SRik van Riel #else
2323de1c9ce6SRik van Riel static inline bool numa_migrate_deferred(struct task_struct *p, int last_cpupid)
2324de1c9ce6SRik van Riel {
2325de1c9ce6SRik van Riel 	return false;
2326de1c9ce6SRik van Riel }
2327de1c9ce6SRik van Riel 
2328de1c9ce6SRik van Riel static inline void defer_numa_migrate(struct task_struct *p)
2329de1c9ce6SRik van Riel {
2330de1c9ce6SRik van Riel }
2331de1c9ce6SRik van Riel #endif /* CONFIG_NUMA_BALANCING */
2332de1c9ce6SRik van Riel 
2333771fb4d8SLee Schermerhorn /**
2334771fb4d8SLee Schermerhorn  * mpol_misplaced - check whether current page node is valid in policy
2335771fb4d8SLee Schermerhorn  *
2336771fb4d8SLee Schermerhorn  * @page   - page to be checked
2337771fb4d8SLee Schermerhorn  * @vma    - vm area where page mapped
2338771fb4d8SLee Schermerhorn  * @addr   - virtual address where page mapped
2339771fb4d8SLee Schermerhorn  *
2340771fb4d8SLee Schermerhorn  * Lookup current policy node id for vma,addr and "compare to" page's
2341771fb4d8SLee Schermerhorn  * node id.
2342771fb4d8SLee Schermerhorn  *
2343771fb4d8SLee Schermerhorn  * Returns:
2344771fb4d8SLee Schermerhorn  *	-1	- not misplaced, page is in the right node
2345771fb4d8SLee Schermerhorn  *	node	- node id where the page should be
2346771fb4d8SLee Schermerhorn  *
2347771fb4d8SLee Schermerhorn  * Policy determination "mimics" alloc_page_vma().
2348771fb4d8SLee Schermerhorn  * Called from fault path where we know the vma and faulting address.
2349771fb4d8SLee Schermerhorn  */
2350771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2351771fb4d8SLee Schermerhorn {
2352771fb4d8SLee Schermerhorn 	struct mempolicy *pol;
2353771fb4d8SLee Schermerhorn 	struct zone *zone;
2354771fb4d8SLee Schermerhorn 	int curnid = page_to_nid(page);
2355771fb4d8SLee Schermerhorn 	unsigned long pgoff;
235690572890SPeter Zijlstra 	int thiscpu = raw_smp_processor_id();
235790572890SPeter Zijlstra 	int thisnid = cpu_to_node(thiscpu);
2358771fb4d8SLee Schermerhorn 	int polnid = -1;
2359771fb4d8SLee Schermerhorn 	int ret = -1;
2360771fb4d8SLee Schermerhorn 
2361771fb4d8SLee Schermerhorn 	BUG_ON(!vma);
2362771fb4d8SLee Schermerhorn 
2363771fb4d8SLee Schermerhorn 	pol = get_vma_policy(current, vma, addr);
2364771fb4d8SLee Schermerhorn 	if (!(pol->flags & MPOL_F_MOF))
2365771fb4d8SLee Schermerhorn 		goto out;
2366771fb4d8SLee Schermerhorn 
2367771fb4d8SLee Schermerhorn 	switch (pol->mode) {
2368771fb4d8SLee Schermerhorn 	case MPOL_INTERLEAVE:
2369771fb4d8SLee Schermerhorn 		BUG_ON(addr >= vma->vm_end);
2370771fb4d8SLee Schermerhorn 		BUG_ON(addr < vma->vm_start);
2371771fb4d8SLee Schermerhorn 
2372771fb4d8SLee Schermerhorn 		pgoff = vma->vm_pgoff;
2373771fb4d8SLee Schermerhorn 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2374771fb4d8SLee Schermerhorn 		polnid = offset_il_node(pol, vma, pgoff);
2375771fb4d8SLee Schermerhorn 		break;
2376771fb4d8SLee Schermerhorn 
2377771fb4d8SLee Schermerhorn 	case MPOL_PREFERRED:
2378771fb4d8SLee Schermerhorn 		if (pol->flags & MPOL_F_LOCAL)
2379771fb4d8SLee Schermerhorn 			polnid = numa_node_id();
2380771fb4d8SLee Schermerhorn 		else
2381771fb4d8SLee Schermerhorn 			polnid = pol->v.preferred_node;
2382771fb4d8SLee Schermerhorn 		break;
2383771fb4d8SLee Schermerhorn 
2384771fb4d8SLee Schermerhorn 	case MPOL_BIND:
2385771fb4d8SLee Schermerhorn 		/*
2386771fb4d8SLee Schermerhorn 		 * allows binding to multiple nodes.
2387771fb4d8SLee Schermerhorn 		 * use current page if in policy nodemask,
2388771fb4d8SLee Schermerhorn 		 * else select nearest allowed node, if any.
2389771fb4d8SLee Schermerhorn 		 * If no allowed nodes, use current [!misplaced].
2390771fb4d8SLee Schermerhorn 		 */
2391771fb4d8SLee Schermerhorn 		if (node_isset(curnid, pol->v.nodes))
2392771fb4d8SLee Schermerhorn 			goto out;
2393771fb4d8SLee Schermerhorn 		(void)first_zones_zonelist(
2394771fb4d8SLee Schermerhorn 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2395771fb4d8SLee Schermerhorn 				gfp_zone(GFP_HIGHUSER),
2396771fb4d8SLee Schermerhorn 				&pol->v.nodes, &zone);
2397771fb4d8SLee Schermerhorn 		polnid = zone->node;
2398771fb4d8SLee Schermerhorn 		break;
2399771fb4d8SLee Schermerhorn 
2400771fb4d8SLee Schermerhorn 	default:
2401771fb4d8SLee Schermerhorn 		BUG();
2402771fb4d8SLee Schermerhorn 	}
24035606e387SMel Gorman 
24045606e387SMel Gorman 	/* Migrate the page towards the node whose CPU is referencing it */
2405e42c8ff2SMel Gorman 	if (pol->flags & MPOL_F_MORON) {
240690572890SPeter Zijlstra 		int last_cpupid;
240790572890SPeter Zijlstra 		int this_cpupid;
2408e42c8ff2SMel Gorman 
240990572890SPeter Zijlstra 		polnid = thisnid;
241090572890SPeter Zijlstra 		this_cpupid = cpu_pid_to_cpupid(thiscpu, current->pid);
24115606e387SMel Gorman 
2412e42c8ff2SMel Gorman 		/*
2413e42c8ff2SMel Gorman 		 * Multi-stage node selection is used in conjunction
2414e42c8ff2SMel Gorman 		 * with a periodic migration fault to build a temporal
2415e42c8ff2SMel Gorman 		 * task<->page relation. By using a two-stage filter we
2416e42c8ff2SMel Gorman 		 * remove short/unlikely relations.
2417e42c8ff2SMel Gorman 		 *
2418e42c8ff2SMel Gorman 		 * Using P(p) ~ n_p / n_t as per frequentist
2419e42c8ff2SMel Gorman 		 * probability, we can equate a task's usage of a
2420e42c8ff2SMel Gorman 		 * particular page (n_p) per total usage of this
2421e42c8ff2SMel Gorman 		 * page (n_t) (in a given time-span) to a probability.
2422e42c8ff2SMel Gorman 		 *
2423e42c8ff2SMel Gorman 		 * Our periodic faults will sample this probability and
2424e42c8ff2SMel Gorman 		 * getting the same result twice in a row, given these
2425e42c8ff2SMel Gorman 		 * samples are fully independent, is then given by
2426e42c8ff2SMel Gorman 		 * P(n)^2, provided our sample period is sufficiently
2427e42c8ff2SMel Gorman 		 * short compared to the usage pattern.
2428e42c8ff2SMel Gorman 		 *
2429e42c8ff2SMel Gorman 		 * This quadric squishes small probabilities, making
2430e42c8ff2SMel Gorman 		 * it less likely we act on an unlikely task<->page
2431e42c8ff2SMel Gorman 		 * relation.
2432e42c8ff2SMel Gorman 		 */
243390572890SPeter Zijlstra 		last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
2434de1c9ce6SRik van Riel 		if (!cpupid_pid_unset(last_cpupid) && cpupid_to_nid(last_cpupid) != thisnid) {
2435de1c9ce6SRik van Riel 
2436de1c9ce6SRik van Riel 			/* See sysctl_numa_balancing_migrate_deferred comment */
2437de1c9ce6SRik van Riel 			if (!cpupid_match_pid(current, last_cpupid))
2438de1c9ce6SRik van Riel 				defer_numa_migrate(current);
2439de1c9ce6SRik van Riel 
2440de1c9ce6SRik van Riel 			goto out;
2441de1c9ce6SRik van Riel 		}
2442de1c9ce6SRik van Riel 
2443de1c9ce6SRik van Riel 		/*
2444de1c9ce6SRik van Riel 		 * The quadratic filter above reduces extraneous migration
2445de1c9ce6SRik van Riel 		 * of shared pages somewhat. This code reduces it even more,
2446de1c9ce6SRik van Riel 		 * reducing the overhead of page migrations of shared pages.
2447de1c9ce6SRik van Riel 		 * This makes workloads with shared pages rely more on
2448de1c9ce6SRik van Riel 		 * "move task near its memory", and less on "move memory
2449de1c9ce6SRik van Riel 		 * towards its task", which is exactly what we want.
2450de1c9ce6SRik van Riel 		 */
2451de1c9ce6SRik van Riel 		if (numa_migrate_deferred(current, last_cpupid))
2452e42c8ff2SMel Gorman 			goto out;
2453e42c8ff2SMel Gorman 	}
2454e42c8ff2SMel Gorman 
2455771fb4d8SLee Schermerhorn 	if (curnid != polnid)
2456771fb4d8SLee Schermerhorn 		ret = polnid;
2457771fb4d8SLee Schermerhorn out:
2458771fb4d8SLee Schermerhorn 	mpol_cond_put(pol);
2459771fb4d8SLee Schermerhorn 
2460771fb4d8SLee Schermerhorn 	return ret;
2461771fb4d8SLee Schermerhorn }
2462771fb4d8SLee Schermerhorn 
24631da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n)
24641da177e4SLinus Torvalds {
2465140d5a49SPaul Mundt 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
24661da177e4SLinus Torvalds 	rb_erase(&n->nd, &sp->root);
246763f74ca2SKOSAKI Motohiro 	sp_free(n);
24681da177e4SLinus Torvalds }
24691da177e4SLinus Torvalds 
247042288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start,
247142288fe3SMel Gorman 			unsigned long end, struct mempolicy *pol)
247242288fe3SMel Gorman {
247342288fe3SMel Gorman 	node->start = start;
247442288fe3SMel Gorman 	node->end = end;
247542288fe3SMel Gorman 	node->policy = pol;
247642288fe3SMel Gorman }
247742288fe3SMel Gorman 
2478dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2479dbcb0f19SAdrian Bunk 				struct mempolicy *pol)
24801da177e4SLinus Torvalds {
2481869833f2SKOSAKI Motohiro 	struct sp_node *n;
2482869833f2SKOSAKI Motohiro 	struct mempolicy *newpol;
24831da177e4SLinus Torvalds 
2484869833f2SKOSAKI Motohiro 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
24851da177e4SLinus Torvalds 	if (!n)
24861da177e4SLinus Torvalds 		return NULL;
2487869833f2SKOSAKI Motohiro 
2488869833f2SKOSAKI Motohiro 	newpol = mpol_dup(pol);
2489869833f2SKOSAKI Motohiro 	if (IS_ERR(newpol)) {
2490869833f2SKOSAKI Motohiro 		kmem_cache_free(sn_cache, n);
2491869833f2SKOSAKI Motohiro 		return NULL;
2492869833f2SKOSAKI Motohiro 	}
2493869833f2SKOSAKI Motohiro 	newpol->flags |= MPOL_F_SHARED;
249442288fe3SMel Gorman 	sp_node_init(n, start, end, newpol);
2495869833f2SKOSAKI Motohiro 
24961da177e4SLinus Torvalds 	return n;
24971da177e4SLinus Torvalds }
24981da177e4SLinus Torvalds 
24991da177e4SLinus Torvalds /* Replace a policy range. */
25001da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
25011da177e4SLinus Torvalds 				 unsigned long end, struct sp_node *new)
25021da177e4SLinus Torvalds {
2503b22d127aSMel Gorman 	struct sp_node *n;
250442288fe3SMel Gorman 	struct sp_node *n_new = NULL;
250542288fe3SMel Gorman 	struct mempolicy *mpol_new = NULL;
2506b22d127aSMel Gorman 	int ret = 0;
25071da177e4SLinus Torvalds 
250842288fe3SMel Gorman restart:
250942288fe3SMel Gorman 	spin_lock(&sp->lock);
25101da177e4SLinus Torvalds 	n = sp_lookup(sp, start, end);
25111da177e4SLinus Torvalds 	/* Take care of old policies in the same range. */
25121da177e4SLinus Torvalds 	while (n && n->start < end) {
25131da177e4SLinus Torvalds 		struct rb_node *next = rb_next(&n->nd);
25141da177e4SLinus Torvalds 		if (n->start >= start) {
25151da177e4SLinus Torvalds 			if (n->end <= end)
25161da177e4SLinus Torvalds 				sp_delete(sp, n);
25171da177e4SLinus Torvalds 			else
25181da177e4SLinus Torvalds 				n->start = end;
25191da177e4SLinus Torvalds 		} else {
25201da177e4SLinus Torvalds 			/* Old policy spanning whole new range. */
25211da177e4SLinus Torvalds 			if (n->end > end) {
252242288fe3SMel Gorman 				if (!n_new)
252342288fe3SMel Gorman 					goto alloc_new;
252442288fe3SMel Gorman 
252542288fe3SMel Gorman 				*mpol_new = *n->policy;
252642288fe3SMel Gorman 				atomic_set(&mpol_new->refcnt, 1);
25277880639cSKOSAKI Motohiro 				sp_node_init(n_new, end, n->end, mpol_new);
25281da177e4SLinus Torvalds 				n->end = start;
25295ca39575SHillf Danton 				sp_insert(sp, n_new);
253042288fe3SMel Gorman 				n_new = NULL;
253142288fe3SMel Gorman 				mpol_new = NULL;
25321da177e4SLinus Torvalds 				break;
25331da177e4SLinus Torvalds 			} else
25341da177e4SLinus Torvalds 				n->end = start;
25351da177e4SLinus Torvalds 		}
25361da177e4SLinus Torvalds 		if (!next)
25371da177e4SLinus Torvalds 			break;
25381da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
25391da177e4SLinus Torvalds 	}
25401da177e4SLinus Torvalds 	if (new)
25411da177e4SLinus Torvalds 		sp_insert(sp, new);
254242288fe3SMel Gorman 	spin_unlock(&sp->lock);
254342288fe3SMel Gorman 	ret = 0;
254442288fe3SMel Gorman 
254542288fe3SMel Gorman err_out:
254642288fe3SMel Gorman 	if (mpol_new)
254742288fe3SMel Gorman 		mpol_put(mpol_new);
254842288fe3SMel Gorman 	if (n_new)
254942288fe3SMel Gorman 		kmem_cache_free(sn_cache, n_new);
255042288fe3SMel Gorman 
2551b22d127aSMel Gorman 	return ret;
255242288fe3SMel Gorman 
255342288fe3SMel Gorman alloc_new:
255442288fe3SMel Gorman 	spin_unlock(&sp->lock);
255542288fe3SMel Gorman 	ret = -ENOMEM;
255642288fe3SMel Gorman 	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
255742288fe3SMel Gorman 	if (!n_new)
255842288fe3SMel Gorman 		goto err_out;
255942288fe3SMel Gorman 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
256042288fe3SMel Gorman 	if (!mpol_new)
256142288fe3SMel Gorman 		goto err_out;
256242288fe3SMel Gorman 	goto restart;
25631da177e4SLinus Torvalds }
25641da177e4SLinus Torvalds 
256571fe804bSLee Schermerhorn /**
256671fe804bSLee Schermerhorn  * mpol_shared_policy_init - initialize shared policy for inode
256771fe804bSLee Schermerhorn  * @sp: pointer to inode shared policy
256871fe804bSLee Schermerhorn  * @mpol:  struct mempolicy to install
256971fe804bSLee Schermerhorn  *
257071fe804bSLee Schermerhorn  * Install non-NULL @mpol in inode's shared policy rb-tree.
257171fe804bSLee Schermerhorn  * On entry, the current task has a reference on a non-NULL @mpol.
257271fe804bSLee Schermerhorn  * This must be released on exit.
25734bfc4495SKAMEZAWA Hiroyuki  * This is called at get_inode() calls and we can use GFP_KERNEL.
257471fe804bSLee Schermerhorn  */
257571fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
25767339ff83SRobin Holt {
257758568d2aSMiao Xie 	int ret;
257858568d2aSMiao Xie 
257971fe804bSLee Schermerhorn 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
258042288fe3SMel Gorman 	spin_lock_init(&sp->lock);
25817339ff83SRobin Holt 
258271fe804bSLee Schermerhorn 	if (mpol) {
25837339ff83SRobin Holt 		struct vm_area_struct pvma;
258471fe804bSLee Schermerhorn 		struct mempolicy *new;
25854bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
25867339ff83SRobin Holt 
25874bfc4495SKAMEZAWA Hiroyuki 		if (!scratch)
25885c0c1654SLee Schermerhorn 			goto put_mpol;
258971fe804bSLee Schermerhorn 		/* contextualize the tmpfs mount point mempolicy */
259071fe804bSLee Schermerhorn 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
259115d77835SLee Schermerhorn 		if (IS_ERR(new))
25920cae3457SDan Carpenter 			goto free_scratch; /* no valid nodemask intersection */
259358568d2aSMiao Xie 
259458568d2aSMiao Xie 		task_lock(current);
25954bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
259658568d2aSMiao Xie 		task_unlock(current);
259715d77835SLee Schermerhorn 		if (ret)
25985c0c1654SLee Schermerhorn 			goto put_new;
259971fe804bSLee Schermerhorn 
260071fe804bSLee Schermerhorn 		/* Create pseudo-vma that contains just the policy */
26017339ff83SRobin Holt 		memset(&pvma, 0, sizeof(struct vm_area_struct));
260271fe804bSLee Schermerhorn 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
260371fe804bSLee Schermerhorn 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
260415d77835SLee Schermerhorn 
26055c0c1654SLee Schermerhorn put_new:
260671fe804bSLee Schermerhorn 		mpol_put(new);			/* drop initial ref */
26070cae3457SDan Carpenter free_scratch:
26084bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
26095c0c1654SLee Schermerhorn put_mpol:
26105c0c1654SLee Schermerhorn 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
26117339ff83SRobin Holt 	}
26127339ff83SRobin Holt }
26137339ff83SRobin Holt 
26141da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info,
26151da177e4SLinus Torvalds 			struct vm_area_struct *vma, struct mempolicy *npol)
26161da177e4SLinus Torvalds {
26171da177e4SLinus Torvalds 	int err;
26181da177e4SLinus Torvalds 	struct sp_node *new = NULL;
26191da177e4SLinus Torvalds 	unsigned long sz = vma_pages(vma);
26201da177e4SLinus Torvalds 
2621028fec41SDavid Rientjes 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
26221da177e4SLinus Torvalds 		 vma->vm_pgoff,
262345c4745aSLee Schermerhorn 		 sz, npol ? npol->mode : -1,
2624028fec41SDavid Rientjes 		 npol ? npol->flags : -1,
262500ef2d2fSDavid Rientjes 		 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
26261da177e4SLinus Torvalds 
26271da177e4SLinus Torvalds 	if (npol) {
26281da177e4SLinus Torvalds 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
26291da177e4SLinus Torvalds 		if (!new)
26301da177e4SLinus Torvalds 			return -ENOMEM;
26311da177e4SLinus Torvalds 	}
26321da177e4SLinus Torvalds 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
26331da177e4SLinus Torvalds 	if (err && new)
263463f74ca2SKOSAKI Motohiro 		sp_free(new);
26351da177e4SLinus Torvalds 	return err;
26361da177e4SLinus Torvalds }
26371da177e4SLinus Torvalds 
26381da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */
26391da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p)
26401da177e4SLinus Torvalds {
26411da177e4SLinus Torvalds 	struct sp_node *n;
26421da177e4SLinus Torvalds 	struct rb_node *next;
26431da177e4SLinus Torvalds 
26441da177e4SLinus Torvalds 	if (!p->root.rb_node)
26451da177e4SLinus Torvalds 		return;
264642288fe3SMel Gorman 	spin_lock(&p->lock);
26471da177e4SLinus Torvalds 	next = rb_first(&p->root);
26481da177e4SLinus Torvalds 	while (next) {
26491da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
26501da177e4SLinus Torvalds 		next = rb_next(&n->nd);
265163f74ca2SKOSAKI Motohiro 		sp_delete(p, n);
26521da177e4SLinus Torvalds 	}
265342288fe3SMel Gorman 	spin_unlock(&p->lock);
26541da177e4SLinus Torvalds }
26551da177e4SLinus Torvalds 
26561a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING
2657c297663cSMel Gorman static int __initdata numabalancing_override;
26581a687c2eSMel Gorman 
26591a687c2eSMel Gorman static void __init check_numabalancing_enable(void)
26601a687c2eSMel Gorman {
26611a687c2eSMel Gorman 	bool numabalancing_default = false;
26621a687c2eSMel Gorman 
26631a687c2eSMel Gorman 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
26641a687c2eSMel Gorman 		numabalancing_default = true;
26651a687c2eSMel Gorman 
2666c297663cSMel Gorman 	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2667c297663cSMel Gorman 	if (numabalancing_override)
2668c297663cSMel Gorman 		set_numabalancing_state(numabalancing_override == 1);
2669c297663cSMel Gorman 
26701a687c2eSMel Gorman 	if (nr_node_ids > 1 && !numabalancing_override) {
2671*4a404beaSAndrew Morton 		pr_info("%s automatic NUMA balancing. "
2672c297663cSMel Gorman 			"Configure with numa_balancing= or the "
2673c297663cSMel Gorman 			"kernel.numa_balancing sysctl",
2674c297663cSMel Gorman 			numabalancing_default ? "Enabling" : "Disabling");
26751a687c2eSMel Gorman 		set_numabalancing_state(numabalancing_default);
26761a687c2eSMel Gorman 	}
26771a687c2eSMel Gorman }
26781a687c2eSMel Gorman 
26791a687c2eSMel Gorman static int __init setup_numabalancing(char *str)
26801a687c2eSMel Gorman {
26811a687c2eSMel Gorman 	int ret = 0;
26821a687c2eSMel Gorman 	if (!str)
26831a687c2eSMel Gorman 		goto out;
26841a687c2eSMel Gorman 
26851a687c2eSMel Gorman 	if (!strcmp(str, "enable")) {
2686c297663cSMel Gorman 		numabalancing_override = 1;
26871a687c2eSMel Gorman 		ret = 1;
26881a687c2eSMel Gorman 	} else if (!strcmp(str, "disable")) {
2689c297663cSMel Gorman 		numabalancing_override = -1;
26901a687c2eSMel Gorman 		ret = 1;
26911a687c2eSMel Gorman 	}
26921a687c2eSMel Gorman out:
26931a687c2eSMel Gorman 	if (!ret)
2694*4a404beaSAndrew Morton 		pr_warn("Unable to parse numa_balancing=\n");
26951a687c2eSMel Gorman 
26961a687c2eSMel Gorman 	return ret;
26971a687c2eSMel Gorman }
26981a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing);
26991a687c2eSMel Gorman #else
27001a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void)
27011a687c2eSMel Gorman {
27021a687c2eSMel Gorman }
27031a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */
27041a687c2eSMel Gorman 
27051da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */
27061da177e4SLinus Torvalds void __init numa_policy_init(void)
27071da177e4SLinus Torvalds {
2708b71636e2SPaul Mundt 	nodemask_t interleave_nodes;
2709b71636e2SPaul Mundt 	unsigned long largest = 0;
2710b71636e2SPaul Mundt 	int nid, prefer = 0;
2711b71636e2SPaul Mundt 
27121da177e4SLinus Torvalds 	policy_cache = kmem_cache_create("numa_policy",
27131da177e4SLinus Torvalds 					 sizeof(struct mempolicy),
271420c2df83SPaul Mundt 					 0, SLAB_PANIC, NULL);
27151da177e4SLinus Torvalds 
27161da177e4SLinus Torvalds 	sn_cache = kmem_cache_create("shared_policy_node",
27171da177e4SLinus Torvalds 				     sizeof(struct sp_node),
271820c2df83SPaul Mundt 				     0, SLAB_PANIC, NULL);
27191da177e4SLinus Torvalds 
27205606e387SMel Gorman 	for_each_node(nid) {
27215606e387SMel Gorman 		preferred_node_policy[nid] = (struct mempolicy) {
27225606e387SMel Gorman 			.refcnt = ATOMIC_INIT(1),
27235606e387SMel Gorman 			.mode = MPOL_PREFERRED,
27245606e387SMel Gorman 			.flags = MPOL_F_MOF | MPOL_F_MORON,
27255606e387SMel Gorman 			.v = { .preferred_node = nid, },
27265606e387SMel Gorman 		};
27275606e387SMel Gorman 	}
27285606e387SMel Gorman 
2729b71636e2SPaul Mundt 	/*
2730b71636e2SPaul Mundt 	 * Set interleaving policy for system init. Interleaving is only
2731b71636e2SPaul Mundt 	 * enabled across suitably sized nodes (default is >= 16MB), or
2732b71636e2SPaul Mundt 	 * fall back to the largest node if they're all smaller.
2733b71636e2SPaul Mundt 	 */
2734b71636e2SPaul Mundt 	nodes_clear(interleave_nodes);
273501f13bd6SLai Jiangshan 	for_each_node_state(nid, N_MEMORY) {
2736b71636e2SPaul Mundt 		unsigned long total_pages = node_present_pages(nid);
27371da177e4SLinus Torvalds 
2738b71636e2SPaul Mundt 		/* Preserve the largest node */
2739b71636e2SPaul Mundt 		if (largest < total_pages) {
2740b71636e2SPaul Mundt 			largest = total_pages;
2741b71636e2SPaul Mundt 			prefer = nid;
2742b71636e2SPaul Mundt 		}
2743b71636e2SPaul Mundt 
2744b71636e2SPaul Mundt 		/* Interleave this node? */
2745b71636e2SPaul Mundt 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2746b71636e2SPaul Mundt 			node_set(nid, interleave_nodes);
2747b71636e2SPaul Mundt 	}
2748b71636e2SPaul Mundt 
2749b71636e2SPaul Mundt 	/* All too small, use the largest */
2750b71636e2SPaul Mundt 	if (unlikely(nodes_empty(interleave_nodes)))
2751b71636e2SPaul Mundt 		node_set(prefer, interleave_nodes);
2752b71636e2SPaul Mundt 
2753028fec41SDavid Rientjes 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
27541da177e4SLinus Torvalds 		printk("numa_policy_init: interleaving failed\n");
27551a687c2eSMel Gorman 
27561a687c2eSMel Gorman 	check_numabalancing_enable();
27571da177e4SLinus Torvalds }
27581da177e4SLinus Torvalds 
27598bccd85fSChristoph Lameter /* Reset policy of current process to default */
27601da177e4SLinus Torvalds void numa_default_policy(void)
27611da177e4SLinus Torvalds {
2762028fec41SDavid Rientjes 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
27631da177e4SLinus Torvalds }
276468860ec1SPaul Jackson 
27654225399aSPaul Jackson /*
2766095f1fc4SLee Schermerhorn  * Parse and format mempolicy from/to strings
2767095f1fc4SLee Schermerhorn  */
2768095f1fc4SLee Schermerhorn 
2769095f1fc4SLee Schermerhorn /*
2770f2a07f40SHugh Dickins  * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
27711a75a6c8SChristoph Lameter  */
2772345ace9cSLee Schermerhorn static const char * const policy_modes[] =
2773345ace9cSLee Schermerhorn {
2774345ace9cSLee Schermerhorn 	[MPOL_DEFAULT]    = "default",
2775345ace9cSLee Schermerhorn 	[MPOL_PREFERRED]  = "prefer",
2776345ace9cSLee Schermerhorn 	[MPOL_BIND]       = "bind",
2777345ace9cSLee Schermerhorn 	[MPOL_INTERLEAVE] = "interleave",
2778d3a71033SLee Schermerhorn 	[MPOL_LOCAL]      = "local",
2779345ace9cSLee Schermerhorn };
27801a75a6c8SChristoph Lameter 
2781095f1fc4SLee Schermerhorn 
2782095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS
2783095f1fc4SLee Schermerhorn /**
2784f2a07f40SHugh Dickins  * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2785095f1fc4SLee Schermerhorn  * @str:  string containing mempolicy to parse
278671fe804bSLee Schermerhorn  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2787095f1fc4SLee Schermerhorn  *
2788095f1fc4SLee Schermerhorn  * Format of input:
2789095f1fc4SLee Schermerhorn  *	<mode>[=<flags>][:<nodelist>]
2790095f1fc4SLee Schermerhorn  *
279171fe804bSLee Schermerhorn  * On success, returns 0, else 1
2792095f1fc4SLee Schermerhorn  */
2793a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol)
2794095f1fc4SLee Schermerhorn {
279571fe804bSLee Schermerhorn 	struct mempolicy *new = NULL;
2796b4652e84SLee Schermerhorn 	unsigned short mode;
2797f2a07f40SHugh Dickins 	unsigned short mode_flags;
279871fe804bSLee Schermerhorn 	nodemask_t nodes;
2799095f1fc4SLee Schermerhorn 	char *nodelist = strchr(str, ':');
2800095f1fc4SLee Schermerhorn 	char *flags = strchr(str, '=');
2801095f1fc4SLee Schermerhorn 	int err = 1;
2802095f1fc4SLee Schermerhorn 
2803095f1fc4SLee Schermerhorn 	if (nodelist) {
2804095f1fc4SLee Schermerhorn 		/* NUL-terminate mode or flags string */
2805095f1fc4SLee Schermerhorn 		*nodelist++ = '\0';
280671fe804bSLee Schermerhorn 		if (nodelist_parse(nodelist, nodes))
2807095f1fc4SLee Schermerhorn 			goto out;
280801f13bd6SLai Jiangshan 		if (!nodes_subset(nodes, node_states[N_MEMORY]))
2809095f1fc4SLee Schermerhorn 			goto out;
281071fe804bSLee Schermerhorn 	} else
281171fe804bSLee Schermerhorn 		nodes_clear(nodes);
281271fe804bSLee Schermerhorn 
2813095f1fc4SLee Schermerhorn 	if (flags)
2814095f1fc4SLee Schermerhorn 		*flags++ = '\0';	/* terminate mode string */
2815095f1fc4SLee Schermerhorn 
2816479e2802SPeter Zijlstra 	for (mode = 0; mode < MPOL_MAX; mode++) {
2817345ace9cSLee Schermerhorn 		if (!strcmp(str, policy_modes[mode])) {
2818095f1fc4SLee Schermerhorn 			break;
2819095f1fc4SLee Schermerhorn 		}
2820095f1fc4SLee Schermerhorn 	}
2821a720094dSMel Gorman 	if (mode >= MPOL_MAX)
2822095f1fc4SLee Schermerhorn 		goto out;
2823095f1fc4SLee Schermerhorn 
282471fe804bSLee Schermerhorn 	switch (mode) {
2825095f1fc4SLee Schermerhorn 	case MPOL_PREFERRED:
282671fe804bSLee Schermerhorn 		/*
282771fe804bSLee Schermerhorn 		 * Insist on a nodelist of one node only
282871fe804bSLee Schermerhorn 		 */
2829095f1fc4SLee Schermerhorn 		if (nodelist) {
2830095f1fc4SLee Schermerhorn 			char *rest = nodelist;
2831095f1fc4SLee Schermerhorn 			while (isdigit(*rest))
2832095f1fc4SLee Schermerhorn 				rest++;
2833926f2ae0SKOSAKI Motohiro 			if (*rest)
2834926f2ae0SKOSAKI Motohiro 				goto out;
2835095f1fc4SLee Schermerhorn 		}
2836095f1fc4SLee Schermerhorn 		break;
2837095f1fc4SLee Schermerhorn 	case MPOL_INTERLEAVE:
2838095f1fc4SLee Schermerhorn 		/*
2839095f1fc4SLee Schermerhorn 		 * Default to online nodes with memory if no nodelist
2840095f1fc4SLee Schermerhorn 		 */
2841095f1fc4SLee Schermerhorn 		if (!nodelist)
284201f13bd6SLai Jiangshan 			nodes = node_states[N_MEMORY];
28433f226aa1SLee Schermerhorn 		break;
284471fe804bSLee Schermerhorn 	case MPOL_LOCAL:
28453f226aa1SLee Schermerhorn 		/*
284671fe804bSLee Schermerhorn 		 * Don't allow a nodelist;  mpol_new() checks flags
28473f226aa1SLee Schermerhorn 		 */
284871fe804bSLee Schermerhorn 		if (nodelist)
28493f226aa1SLee Schermerhorn 			goto out;
285071fe804bSLee Schermerhorn 		mode = MPOL_PREFERRED;
28513f226aa1SLee Schermerhorn 		break;
2852413b43deSRavikiran G Thirumalai 	case MPOL_DEFAULT:
2853413b43deSRavikiran G Thirumalai 		/*
2854413b43deSRavikiran G Thirumalai 		 * Insist on a empty nodelist
2855413b43deSRavikiran G Thirumalai 		 */
2856413b43deSRavikiran G Thirumalai 		if (!nodelist)
2857413b43deSRavikiran G Thirumalai 			err = 0;
2858413b43deSRavikiran G Thirumalai 		goto out;
2859d69b2e63SKOSAKI Motohiro 	case MPOL_BIND:
286071fe804bSLee Schermerhorn 		/*
2861d69b2e63SKOSAKI Motohiro 		 * Insist on a nodelist
286271fe804bSLee Schermerhorn 		 */
2863d69b2e63SKOSAKI Motohiro 		if (!nodelist)
2864d69b2e63SKOSAKI Motohiro 			goto out;
2865095f1fc4SLee Schermerhorn 	}
2866095f1fc4SLee Schermerhorn 
286771fe804bSLee Schermerhorn 	mode_flags = 0;
2868095f1fc4SLee Schermerhorn 	if (flags) {
2869095f1fc4SLee Schermerhorn 		/*
2870095f1fc4SLee Schermerhorn 		 * Currently, we only support two mutually exclusive
2871095f1fc4SLee Schermerhorn 		 * mode flags.
2872095f1fc4SLee Schermerhorn 		 */
2873095f1fc4SLee Schermerhorn 		if (!strcmp(flags, "static"))
287471fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_STATIC_NODES;
2875095f1fc4SLee Schermerhorn 		else if (!strcmp(flags, "relative"))
287671fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_RELATIVE_NODES;
2877095f1fc4SLee Schermerhorn 		else
2878926f2ae0SKOSAKI Motohiro 			goto out;
2879095f1fc4SLee Schermerhorn 	}
288071fe804bSLee Schermerhorn 
288171fe804bSLee Schermerhorn 	new = mpol_new(mode, mode_flags, &nodes);
288271fe804bSLee Schermerhorn 	if (IS_ERR(new))
2883926f2ae0SKOSAKI Motohiro 		goto out;
2884926f2ae0SKOSAKI Motohiro 
2885f2a07f40SHugh Dickins 	/*
2886f2a07f40SHugh Dickins 	 * Save nodes for mpol_to_str() to show the tmpfs mount options
2887f2a07f40SHugh Dickins 	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2888f2a07f40SHugh Dickins 	 */
2889f2a07f40SHugh Dickins 	if (mode != MPOL_PREFERRED)
2890f2a07f40SHugh Dickins 		new->v.nodes = nodes;
2891f2a07f40SHugh Dickins 	else if (nodelist)
2892f2a07f40SHugh Dickins 		new->v.preferred_node = first_node(nodes);
2893f2a07f40SHugh Dickins 	else
2894f2a07f40SHugh Dickins 		new->flags |= MPOL_F_LOCAL;
2895f2a07f40SHugh Dickins 
2896f2a07f40SHugh Dickins 	/*
2897f2a07f40SHugh Dickins 	 * Save nodes for contextualization: this will be used to "clone"
2898f2a07f40SHugh Dickins 	 * the mempolicy in a specific context [cpuset] at a later time.
2899f2a07f40SHugh Dickins 	 */
2900e17f74afSLee Schermerhorn 	new->w.user_nodemask = nodes;
2901f2a07f40SHugh Dickins 
2902926f2ae0SKOSAKI Motohiro 	err = 0;
290371fe804bSLee Schermerhorn 
2904095f1fc4SLee Schermerhorn out:
2905095f1fc4SLee Schermerhorn 	/* Restore string for error message */
2906095f1fc4SLee Schermerhorn 	if (nodelist)
2907095f1fc4SLee Schermerhorn 		*--nodelist = ':';
2908095f1fc4SLee Schermerhorn 	if (flags)
2909095f1fc4SLee Schermerhorn 		*--flags = '=';
291071fe804bSLee Schermerhorn 	if (!err)
291171fe804bSLee Schermerhorn 		*mpol = new;
2912095f1fc4SLee Schermerhorn 	return err;
2913095f1fc4SLee Schermerhorn }
2914095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */
2915095f1fc4SLee Schermerhorn 
291671fe804bSLee Schermerhorn /**
291771fe804bSLee Schermerhorn  * mpol_to_str - format a mempolicy structure for printing
291871fe804bSLee Schermerhorn  * @buffer:  to contain formatted mempolicy string
291971fe804bSLee Schermerhorn  * @maxlen:  length of @buffer
292071fe804bSLee Schermerhorn  * @pol:  pointer to mempolicy to be formatted
292171fe804bSLee Schermerhorn  *
2922948927eeSDavid Rientjes  * Convert @pol into a string.  If @buffer is too short, truncate the string.
2923948927eeSDavid Rientjes  * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2924948927eeSDavid Rientjes  * longest flag, "relative", and to display at least a few node ids.
29251a75a6c8SChristoph Lameter  */
2926948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
29271a75a6c8SChristoph Lameter {
29281a75a6c8SChristoph Lameter 	char *p = buffer;
2929948927eeSDavid Rientjes 	nodemask_t nodes = NODE_MASK_NONE;
2930948927eeSDavid Rientjes 	unsigned short mode = MPOL_DEFAULT;
2931948927eeSDavid Rientjes 	unsigned short flags = 0;
29321a75a6c8SChristoph Lameter 
2933948927eeSDavid Rientjes 	if (pol && pol != &default_policy) {
2934bea904d5SLee Schermerhorn 		mode = pol->mode;
2935948927eeSDavid Rientjes 		flags = pol->flags;
2936948927eeSDavid Rientjes 	}
2937bea904d5SLee Schermerhorn 
29381a75a6c8SChristoph Lameter 	switch (mode) {
29391a75a6c8SChristoph Lameter 	case MPOL_DEFAULT:
29401a75a6c8SChristoph Lameter 		break;
29411a75a6c8SChristoph Lameter 	case MPOL_PREFERRED:
2942fc36b8d3SLee Schermerhorn 		if (flags & MPOL_F_LOCAL)
2943f2a07f40SHugh Dickins 			mode = MPOL_LOCAL;
294453f2556bSLee Schermerhorn 		else
2945fc36b8d3SLee Schermerhorn 			node_set(pol->v.preferred_node, nodes);
29461a75a6c8SChristoph Lameter 		break;
29471a75a6c8SChristoph Lameter 	case MPOL_BIND:
29481a75a6c8SChristoph Lameter 	case MPOL_INTERLEAVE:
29491a75a6c8SChristoph Lameter 		nodes = pol->v.nodes;
29501a75a6c8SChristoph Lameter 		break;
29511a75a6c8SChristoph Lameter 	default:
2952948927eeSDavid Rientjes 		WARN_ON_ONCE(1);
2953948927eeSDavid Rientjes 		snprintf(p, maxlen, "unknown");
2954948927eeSDavid Rientjes 		return;
29551a75a6c8SChristoph Lameter 	}
29561a75a6c8SChristoph Lameter 
2957b7a9f420SDavid Rientjes 	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
29581a75a6c8SChristoph Lameter 
2959fc36b8d3SLee Schermerhorn 	if (flags & MPOL_MODE_FLAGS) {
2960948927eeSDavid Rientjes 		p += snprintf(p, buffer + maxlen - p, "=");
2961f5b087b5SDavid Rientjes 
29622291990aSLee Schermerhorn 		/*
29632291990aSLee Schermerhorn 		 * Currently, the only defined flags are mutually exclusive
29642291990aSLee Schermerhorn 		 */
2965f5b087b5SDavid Rientjes 		if (flags & MPOL_F_STATIC_NODES)
29662291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "static");
29672291990aSLee Schermerhorn 		else if (flags & MPOL_F_RELATIVE_NODES)
29682291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "relative");
2969f5b087b5SDavid Rientjes 	}
2970f5b087b5SDavid Rientjes 
29711a75a6c8SChristoph Lameter 	if (!nodes_empty(nodes)) {
2972948927eeSDavid Rientjes 		p += snprintf(p, buffer + maxlen - p, ":");
29731a75a6c8SChristoph Lameter 	 	p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
29741a75a6c8SChristoph Lameter 	}
29751a75a6c8SChristoph Lameter }
2976