xref: /openbmc/linux/mm/mempolicy.c (revision 74060e4d78795c7c43805133cb717d82533d4e0d)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Simple NUMA memory policy for the Linux kernel.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
58bccd85fSChristoph Lameter  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
61da177e4SLinus Torvalds  * Subject to the GNU Public License, version 2.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * NUMA policy allows the user to give hints in which node(s) memory should
91da177e4SLinus Torvalds  * be allocated.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Support four policies per VMA and per process:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * The VMA policy has priority over the process policy for a page fault.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * interleave     Allocate memory interleaved over a set of nodes,
161da177e4SLinus Torvalds  *                with normal fallback if it fails.
171da177e4SLinus Torvalds  *                For VMA based allocations this interleaves based on the
181da177e4SLinus Torvalds  *                offset into the backing object or offset into the mapping
191da177e4SLinus Torvalds  *                for anonymous memory. For process policy an process counter
201da177e4SLinus Torvalds  *                is used.
218bccd85fSChristoph Lameter  *
221da177e4SLinus Torvalds  * bind           Only allocate memory on a specific set of nodes,
231da177e4SLinus Torvalds  *                no fallback.
248bccd85fSChristoph Lameter  *                FIXME: memory is allocated starting with the first node
258bccd85fSChristoph Lameter  *                to the last. It would be better if bind would truly restrict
268bccd85fSChristoph Lameter  *                the allocation to memory nodes instead
278bccd85fSChristoph Lameter  *
281da177e4SLinus Torvalds  * preferred       Try a specific node first before normal fallback.
2900ef2d2fSDavid Rientjes  *                As a special case NUMA_NO_NODE here means do the allocation
301da177e4SLinus Torvalds  *                on the local CPU. This is normally identical to default,
311da177e4SLinus Torvalds  *                but useful to set in a VMA when you have a non default
321da177e4SLinus Torvalds  *                process policy.
338bccd85fSChristoph Lameter  *
341da177e4SLinus Torvalds  * default        Allocate on the local node first, or when on a VMA
351da177e4SLinus Torvalds  *                use the process policy. This is what Linux always did
361da177e4SLinus Torvalds  *		  in a NUMA aware kernel and still does by, ahem, default.
371da177e4SLinus Torvalds  *
381da177e4SLinus Torvalds  * The process policy is applied for most non interrupt memory allocations
391da177e4SLinus Torvalds  * in that process' context. Interrupts ignore the policies and always
401da177e4SLinus Torvalds  * try to allocate on the local CPU. The VMA policy is only applied for memory
411da177e4SLinus Torvalds  * allocations for a VMA in the VM.
421da177e4SLinus Torvalds  *
431da177e4SLinus Torvalds  * Currently there are a few corner cases in swapping where the policy
441da177e4SLinus Torvalds  * is not applied, but the majority should be handled. When process policy
451da177e4SLinus Torvalds  * is used it is not remembered over swap outs/swap ins.
461da177e4SLinus Torvalds  *
471da177e4SLinus Torvalds  * Only the highest zone in the zone hierarchy gets policied. Allocations
481da177e4SLinus Torvalds  * requesting a lower zone just use default policy. This implies that
491da177e4SLinus Torvalds  * on systems with highmem kernel lowmem allocation don't get policied.
501da177e4SLinus Torvalds  * Same with GFP_DMA allocations.
511da177e4SLinus Torvalds  *
521da177e4SLinus Torvalds  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
531da177e4SLinus Torvalds  * all users and remembered even when nobody has memory mapped.
541da177e4SLinus Torvalds  */
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds /* Notebook:
571da177e4SLinus Torvalds    fix mmap readahead to honour policy and enable policy for any page cache
581da177e4SLinus Torvalds    object
591da177e4SLinus Torvalds    statistics for bigpages
601da177e4SLinus Torvalds    global policy for page cache? currently it uses process policy. Requires
611da177e4SLinus Torvalds    first item above.
621da177e4SLinus Torvalds    handle mremap for shared memory (currently ignored for the policy)
631da177e4SLinus Torvalds    grows down?
641da177e4SLinus Torvalds    make bind policy root only? It can trigger oom much faster and the
651da177e4SLinus Torvalds    kernel is not always grateful with that.
661da177e4SLinus Torvalds */
671da177e4SLinus Torvalds 
681da177e4SLinus Torvalds #include <linux/mempolicy.h>
691da177e4SLinus Torvalds #include <linux/mm.h>
701da177e4SLinus Torvalds #include <linux/highmem.h>
711da177e4SLinus Torvalds #include <linux/hugetlb.h>
721da177e4SLinus Torvalds #include <linux/kernel.h>
731da177e4SLinus Torvalds #include <linux/sched.h>
741da177e4SLinus Torvalds #include <linux/nodemask.h>
751da177e4SLinus Torvalds #include <linux/cpuset.h>
761da177e4SLinus Torvalds #include <linux/slab.h>
771da177e4SLinus Torvalds #include <linux/string.h>
78b95f1b31SPaul Gortmaker #include <linux/export.h>
79b488893aSPavel Emelyanov #include <linux/nsproxy.h>
801da177e4SLinus Torvalds #include <linux/interrupt.h>
811da177e4SLinus Torvalds #include <linux/init.h>
821da177e4SLinus Torvalds #include <linux/compat.h>
83dc9aa5b9SChristoph Lameter #include <linux/swap.h>
841a75a6c8SChristoph Lameter #include <linux/seq_file.h>
851a75a6c8SChristoph Lameter #include <linux/proc_fs.h>
86b20a3503SChristoph Lameter #include <linux/migrate.h>
8762b61f61SHugh Dickins #include <linux/ksm.h>
8895a402c3SChristoph Lameter #include <linux/rmap.h>
8986c3a764SDavid Quigley #include <linux/security.h>
90dbcb0f19SAdrian Bunk #include <linux/syscalls.h>
91095f1fc4SLee Schermerhorn #include <linux/ctype.h>
926d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h>
93b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h>
94dc9aa5b9SChristoph Lameter 
951da177e4SLinus Torvalds #include <asm/tlbflush.h>
961da177e4SLinus Torvalds #include <asm/uaccess.h>
97778d3b0fSMichal Hocko #include <linux/random.h>
981da177e4SLinus Torvalds 
9962695a84SNick Piggin #include "internal.h"
10062695a84SNick Piggin 
10138e35860SChristoph Lameter /* Internal flags */
102dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
10338e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
104dc9aa5b9SChristoph Lameter 
105fcc234f8SPekka Enberg static struct kmem_cache *policy_cache;
106fcc234f8SPekka Enberg static struct kmem_cache *sn_cache;
1071da177e4SLinus Torvalds 
1081da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not
1091da177e4SLinus Torvalds    policied. */
1106267276fSChristoph Lameter enum zone_type policy_zone = 0;
1111da177e4SLinus Torvalds 
112bea904d5SLee Schermerhorn /*
113bea904d5SLee Schermerhorn  * run-time system-wide default policy => local allocation
114bea904d5SLee Schermerhorn  */
115e754d79dSH Hartley Sweeten static struct mempolicy default_policy = {
1161da177e4SLinus Torvalds 	.refcnt = ATOMIC_INIT(1), /* never free it */
117bea904d5SLee Schermerhorn 	.mode = MPOL_PREFERRED,
118fc36b8d3SLee Schermerhorn 	.flags = MPOL_F_LOCAL,
1191da177e4SLinus Torvalds };
1201da177e4SLinus Torvalds 
1215606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES];
1225606e387SMel Gorman 
1235606e387SMel Gorman static struct mempolicy *get_task_policy(struct task_struct *p)
1245606e387SMel Gorman {
1255606e387SMel Gorman 	struct mempolicy *pol = p->mempolicy;
1265606e387SMel Gorman 
1275606e387SMel Gorman 	if (!pol) {
1281da6f0e1SJianguo Wu 		int node = numa_node_id();
1295606e387SMel Gorman 
1301da6f0e1SJianguo Wu 		if (node != NUMA_NO_NODE) {
1311da6f0e1SJianguo Wu 			pol = &preferred_node_policy[node];
1321da6f0e1SJianguo Wu 			/*
1331da6f0e1SJianguo Wu 			 * preferred_node_policy is not initialised early in
1341da6f0e1SJianguo Wu 			 * boot
1351da6f0e1SJianguo Wu 			 */
1365606e387SMel Gorman 			if (!pol->mode)
1375606e387SMel Gorman 				pol = NULL;
1385606e387SMel Gorman 		}
1391da6f0e1SJianguo Wu 	}
1405606e387SMel Gorman 
1415606e387SMel Gorman 	return pol;
1425606e387SMel Gorman }
1435606e387SMel Gorman 
14437012946SDavid Rientjes static const struct mempolicy_operations {
14537012946SDavid Rientjes 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
146708c1bbcSMiao Xie 	/*
147708c1bbcSMiao Xie 	 * If read-side task has no lock to protect task->mempolicy, write-side
148708c1bbcSMiao Xie 	 * task will rebind the task->mempolicy by two step. The first step is
149708c1bbcSMiao Xie 	 * setting all the newly nodes, and the second step is cleaning all the
150708c1bbcSMiao Xie 	 * disallowed nodes. In this way, we can avoid finding no node to alloc
151708c1bbcSMiao Xie 	 * page.
152708c1bbcSMiao Xie 	 * If we have a lock to protect task->mempolicy in read-side, we do
153708c1bbcSMiao Xie 	 * rebind directly.
154708c1bbcSMiao Xie 	 *
155708c1bbcSMiao Xie 	 * step:
156708c1bbcSMiao Xie 	 * 	MPOL_REBIND_ONCE - do rebind work at once
157708c1bbcSMiao Xie 	 * 	MPOL_REBIND_STEP1 - set all the newly nodes
158708c1bbcSMiao Xie 	 * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
159708c1bbcSMiao Xie 	 */
160708c1bbcSMiao Xie 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
161708c1bbcSMiao Xie 			enum mpol_rebind_step step);
16237012946SDavid Rientjes } mpol_ops[MPOL_MAX];
16337012946SDavid Rientjes 
16419770b32SMel Gorman /* Check that the nodemask contains at least one populated zone */
16537012946SDavid Rientjes static int is_valid_nodemask(const nodemask_t *nodemask)
1661da177e4SLinus Torvalds {
167d3eb1570SLai Jiangshan 	return nodes_intersects(*nodemask, node_states[N_MEMORY]);
1681da177e4SLinus Torvalds }
1691da177e4SLinus Torvalds 
170f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
171f5b087b5SDavid Rientjes {
1726d556294SBob Liu 	return pol->flags & MPOL_MODE_FLAGS;
1734c50bc01SDavid Rientjes }
1744c50bc01SDavid Rientjes 
1754c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
1764c50bc01SDavid Rientjes 				   const nodemask_t *rel)
1774c50bc01SDavid Rientjes {
1784c50bc01SDavid Rientjes 	nodemask_t tmp;
1794c50bc01SDavid Rientjes 	nodes_fold(tmp, *orig, nodes_weight(*rel));
1804c50bc01SDavid Rientjes 	nodes_onto(*ret, tmp, *rel);
181f5b087b5SDavid Rientjes }
182f5b087b5SDavid Rientjes 
18337012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
18437012946SDavid Rientjes {
18537012946SDavid Rientjes 	if (nodes_empty(*nodes))
18637012946SDavid Rientjes 		return -EINVAL;
18737012946SDavid Rientjes 	pol->v.nodes = *nodes;
18837012946SDavid Rientjes 	return 0;
18937012946SDavid Rientjes }
19037012946SDavid Rientjes 
19137012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
19237012946SDavid Rientjes {
19337012946SDavid Rientjes 	if (!nodes)
194fc36b8d3SLee Schermerhorn 		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
19537012946SDavid Rientjes 	else if (nodes_empty(*nodes))
19637012946SDavid Rientjes 		return -EINVAL;			/*  no allowed nodes */
19737012946SDavid Rientjes 	else
19837012946SDavid Rientjes 		pol->v.preferred_node = first_node(*nodes);
19937012946SDavid Rientjes 	return 0;
20037012946SDavid Rientjes }
20137012946SDavid Rientjes 
20237012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
20337012946SDavid Rientjes {
20437012946SDavid Rientjes 	if (!is_valid_nodemask(nodes))
20537012946SDavid Rientjes 		return -EINVAL;
20637012946SDavid Rientjes 	pol->v.nodes = *nodes;
20737012946SDavid Rientjes 	return 0;
20837012946SDavid Rientjes }
20937012946SDavid Rientjes 
21058568d2aSMiao Xie /*
21158568d2aSMiao Xie  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
21258568d2aSMiao Xie  * any, for the new policy.  mpol_new() has already validated the nodes
21358568d2aSMiao Xie  * parameter with respect to the policy mode and flags.  But, we need to
21458568d2aSMiao Xie  * handle an empty nodemask with MPOL_PREFERRED here.
21558568d2aSMiao Xie  *
21658568d2aSMiao Xie  * Must be called holding task's alloc_lock to protect task's mems_allowed
21758568d2aSMiao Xie  * and mempolicy.  May also be called holding the mmap_semaphore for write.
21858568d2aSMiao Xie  */
2194bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol,
2204bfc4495SKAMEZAWA Hiroyuki 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
22158568d2aSMiao Xie {
22258568d2aSMiao Xie 	int ret;
22358568d2aSMiao Xie 
22458568d2aSMiao Xie 	/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
22558568d2aSMiao Xie 	if (pol == NULL)
22658568d2aSMiao Xie 		return 0;
22701f13bd6SLai Jiangshan 	/* Check N_MEMORY */
2284bfc4495SKAMEZAWA Hiroyuki 	nodes_and(nsc->mask1,
22901f13bd6SLai Jiangshan 		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
23058568d2aSMiao Xie 
23158568d2aSMiao Xie 	VM_BUG_ON(!nodes);
23258568d2aSMiao Xie 	if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
23358568d2aSMiao Xie 		nodes = NULL;	/* explicit local allocation */
23458568d2aSMiao Xie 	else {
23558568d2aSMiao Xie 		if (pol->flags & MPOL_F_RELATIVE_NODES)
2364bfc4495SKAMEZAWA Hiroyuki 			mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
23758568d2aSMiao Xie 		else
2384bfc4495SKAMEZAWA Hiroyuki 			nodes_and(nsc->mask2, *nodes, nsc->mask1);
2394bfc4495SKAMEZAWA Hiroyuki 
24058568d2aSMiao Xie 		if (mpol_store_user_nodemask(pol))
24158568d2aSMiao Xie 			pol->w.user_nodemask = *nodes;
24258568d2aSMiao Xie 		else
24358568d2aSMiao Xie 			pol->w.cpuset_mems_allowed =
24458568d2aSMiao Xie 						cpuset_current_mems_allowed;
24558568d2aSMiao Xie 	}
24658568d2aSMiao Xie 
2474bfc4495SKAMEZAWA Hiroyuki 	if (nodes)
2484bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
2494bfc4495SKAMEZAWA Hiroyuki 	else
2504bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, NULL);
25158568d2aSMiao Xie 	return ret;
25258568d2aSMiao Xie }
25358568d2aSMiao Xie 
25458568d2aSMiao Xie /*
25558568d2aSMiao Xie  * This function just creates a new policy, does some check and simple
25658568d2aSMiao Xie  * initialization. You must invoke mpol_set_nodemask() to set nodes.
25758568d2aSMiao Xie  */
258028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
259028fec41SDavid Rientjes 				  nodemask_t *nodes)
2601da177e4SLinus Torvalds {
2611da177e4SLinus Torvalds 	struct mempolicy *policy;
2621da177e4SLinus Torvalds 
263028fec41SDavid Rientjes 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
26400ef2d2fSDavid Rientjes 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
265140d5a49SPaul Mundt 
2663e1f0645SDavid Rientjes 	if (mode == MPOL_DEFAULT) {
2673e1f0645SDavid Rientjes 		if (nodes && !nodes_empty(*nodes))
26837012946SDavid Rientjes 			return ERR_PTR(-EINVAL);
269d3a71033SLee Schermerhorn 		return NULL;
27037012946SDavid Rientjes 	}
2713e1f0645SDavid Rientjes 	VM_BUG_ON(!nodes);
2723e1f0645SDavid Rientjes 
2733e1f0645SDavid Rientjes 	/*
2743e1f0645SDavid Rientjes 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
2753e1f0645SDavid Rientjes 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
2763e1f0645SDavid Rientjes 	 * All other modes require a valid pointer to a non-empty nodemask.
2773e1f0645SDavid Rientjes 	 */
2783e1f0645SDavid Rientjes 	if (mode == MPOL_PREFERRED) {
2793e1f0645SDavid Rientjes 		if (nodes_empty(*nodes)) {
2803e1f0645SDavid Rientjes 			if (((flags & MPOL_F_STATIC_NODES) ||
2813e1f0645SDavid Rientjes 			     (flags & MPOL_F_RELATIVE_NODES)))
2823e1f0645SDavid Rientjes 				return ERR_PTR(-EINVAL);
2833e1f0645SDavid Rientjes 		}
284479e2802SPeter Zijlstra 	} else if (mode == MPOL_LOCAL) {
285479e2802SPeter Zijlstra 		if (!nodes_empty(*nodes))
286479e2802SPeter Zijlstra 			return ERR_PTR(-EINVAL);
287479e2802SPeter Zijlstra 		mode = MPOL_PREFERRED;
2883e1f0645SDavid Rientjes 	} else if (nodes_empty(*nodes))
2893e1f0645SDavid Rientjes 		return ERR_PTR(-EINVAL);
2901da177e4SLinus Torvalds 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2911da177e4SLinus Torvalds 	if (!policy)
2921da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2931da177e4SLinus Torvalds 	atomic_set(&policy->refcnt, 1);
29445c4745aSLee Schermerhorn 	policy->mode = mode;
29537012946SDavid Rientjes 	policy->flags = flags;
2963e1f0645SDavid Rientjes 
29737012946SDavid Rientjes 	return policy;
29837012946SDavid Rientjes }
29937012946SDavid Rientjes 
30052cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */
30152cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p)
30252cd3b07SLee Schermerhorn {
30352cd3b07SLee Schermerhorn 	if (!atomic_dec_and_test(&p->refcnt))
30452cd3b07SLee Schermerhorn 		return;
30552cd3b07SLee Schermerhorn 	kmem_cache_free(policy_cache, p);
30652cd3b07SLee Schermerhorn }
30752cd3b07SLee Schermerhorn 
308708c1bbcSMiao Xie static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
309708c1bbcSMiao Xie 				enum mpol_rebind_step step)
31037012946SDavid Rientjes {
31137012946SDavid Rientjes }
31237012946SDavid Rientjes 
313708c1bbcSMiao Xie /*
314708c1bbcSMiao Xie  * step:
315708c1bbcSMiao Xie  * 	MPOL_REBIND_ONCE  - do rebind work at once
316708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP1 - set all the newly nodes
317708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
318708c1bbcSMiao Xie  */
319708c1bbcSMiao Xie static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
320708c1bbcSMiao Xie 				 enum mpol_rebind_step step)
3211d0d2680SDavid Rientjes {
3221d0d2680SDavid Rientjes 	nodemask_t tmp;
3231d0d2680SDavid Rientjes 
32437012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES)
32537012946SDavid Rientjes 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
32637012946SDavid Rientjes 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
32737012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3281d0d2680SDavid Rientjes 	else {
329708c1bbcSMiao Xie 		/*
330708c1bbcSMiao Xie 		 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
331708c1bbcSMiao Xie 		 * result
332708c1bbcSMiao Xie 		 */
333708c1bbcSMiao Xie 		if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
334708c1bbcSMiao Xie 			nodes_remap(tmp, pol->v.nodes,
335708c1bbcSMiao Xie 					pol->w.cpuset_mems_allowed, *nodes);
336708c1bbcSMiao Xie 			pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
337708c1bbcSMiao Xie 		} else if (step == MPOL_REBIND_STEP2) {
338708c1bbcSMiao Xie 			tmp = pol->w.cpuset_mems_allowed;
33937012946SDavid Rientjes 			pol->w.cpuset_mems_allowed = *nodes;
340708c1bbcSMiao Xie 		} else
341708c1bbcSMiao Xie 			BUG();
3421d0d2680SDavid Rientjes 	}
34337012946SDavid Rientjes 
344708c1bbcSMiao Xie 	if (nodes_empty(tmp))
345708c1bbcSMiao Xie 		tmp = *nodes;
346708c1bbcSMiao Xie 
347708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP1)
348708c1bbcSMiao Xie 		nodes_or(pol->v.nodes, pol->v.nodes, tmp);
349708c1bbcSMiao Xie 	else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
3501d0d2680SDavid Rientjes 		pol->v.nodes = tmp;
351708c1bbcSMiao Xie 	else
352708c1bbcSMiao Xie 		BUG();
353708c1bbcSMiao Xie 
3541d0d2680SDavid Rientjes 	if (!node_isset(current->il_next, tmp)) {
3551d0d2680SDavid Rientjes 		current->il_next = next_node(current->il_next, tmp);
3561d0d2680SDavid Rientjes 		if (current->il_next >= MAX_NUMNODES)
3571d0d2680SDavid Rientjes 			current->il_next = first_node(tmp);
3581d0d2680SDavid Rientjes 		if (current->il_next >= MAX_NUMNODES)
3591d0d2680SDavid Rientjes 			current->il_next = numa_node_id();
3601d0d2680SDavid Rientjes 	}
36137012946SDavid Rientjes }
36237012946SDavid Rientjes 
36337012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol,
364708c1bbcSMiao Xie 				  const nodemask_t *nodes,
365708c1bbcSMiao Xie 				  enum mpol_rebind_step step)
36637012946SDavid Rientjes {
36737012946SDavid Rientjes 	nodemask_t tmp;
36837012946SDavid Rientjes 
36937012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES) {
3701d0d2680SDavid Rientjes 		int node = first_node(pol->w.user_nodemask);
3711d0d2680SDavid Rientjes 
372fc36b8d3SLee Schermerhorn 		if (node_isset(node, *nodes)) {
3731d0d2680SDavid Rientjes 			pol->v.preferred_node = node;
374fc36b8d3SLee Schermerhorn 			pol->flags &= ~MPOL_F_LOCAL;
375fc36b8d3SLee Schermerhorn 		} else
376fc36b8d3SLee Schermerhorn 			pol->flags |= MPOL_F_LOCAL;
37737012946SDavid Rientjes 	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
37837012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3791d0d2680SDavid Rientjes 		pol->v.preferred_node = first_node(tmp);
380fc36b8d3SLee Schermerhorn 	} else if (!(pol->flags & MPOL_F_LOCAL)) {
3811d0d2680SDavid Rientjes 		pol->v.preferred_node = node_remap(pol->v.preferred_node,
38237012946SDavid Rientjes 						   pol->w.cpuset_mems_allowed,
38337012946SDavid Rientjes 						   *nodes);
38437012946SDavid Rientjes 		pol->w.cpuset_mems_allowed = *nodes;
3851d0d2680SDavid Rientjes 	}
3861d0d2680SDavid Rientjes }
38737012946SDavid Rientjes 
388708c1bbcSMiao Xie /*
389708c1bbcSMiao Xie  * mpol_rebind_policy - Migrate a policy to a different set of nodes
390708c1bbcSMiao Xie  *
391708c1bbcSMiao Xie  * If read-side task has no lock to protect task->mempolicy, write-side
392708c1bbcSMiao Xie  * task will rebind the task->mempolicy by two step. The first step is
393708c1bbcSMiao Xie  * setting all the newly nodes, and the second step is cleaning all the
394708c1bbcSMiao Xie  * disallowed nodes. In this way, we can avoid finding no node to alloc
395708c1bbcSMiao Xie  * page.
396708c1bbcSMiao Xie  * If we have a lock to protect task->mempolicy in read-side, we do
397708c1bbcSMiao Xie  * rebind directly.
398708c1bbcSMiao Xie  *
399708c1bbcSMiao Xie  * step:
400708c1bbcSMiao Xie  * 	MPOL_REBIND_ONCE  - do rebind work at once
401708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP1 - set all the newly nodes
402708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
403708c1bbcSMiao Xie  */
404708c1bbcSMiao Xie static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
405708c1bbcSMiao Xie 				enum mpol_rebind_step step)
40637012946SDavid Rientjes {
40737012946SDavid Rientjes 	if (!pol)
40837012946SDavid Rientjes 		return;
40989c522c7SWang Sheng-Hui 	if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
41037012946SDavid Rientjes 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
41137012946SDavid Rientjes 		return;
412708c1bbcSMiao Xie 
413708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
414708c1bbcSMiao Xie 		return;
415708c1bbcSMiao Xie 
416708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
417708c1bbcSMiao Xie 		BUG();
418708c1bbcSMiao Xie 
419708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP1)
420708c1bbcSMiao Xie 		pol->flags |= MPOL_F_REBINDING;
421708c1bbcSMiao Xie 	else if (step == MPOL_REBIND_STEP2)
422708c1bbcSMiao Xie 		pol->flags &= ~MPOL_F_REBINDING;
423708c1bbcSMiao Xie 	else if (step >= MPOL_REBIND_NSTEP)
424708c1bbcSMiao Xie 		BUG();
425708c1bbcSMiao Xie 
426708c1bbcSMiao Xie 	mpol_ops[pol->mode].rebind(pol, newmask, step);
4271d0d2680SDavid Rientjes }
4281d0d2680SDavid Rientjes 
4291d0d2680SDavid Rientjes /*
4301d0d2680SDavid Rientjes  * Wrapper for mpol_rebind_policy() that just requires task
4311d0d2680SDavid Rientjes  * pointer, and updates task mempolicy.
43258568d2aSMiao Xie  *
43358568d2aSMiao Xie  * Called with task's alloc_lock held.
4341d0d2680SDavid Rientjes  */
4351d0d2680SDavid Rientjes 
436708c1bbcSMiao Xie void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
437708c1bbcSMiao Xie 			enum mpol_rebind_step step)
4381d0d2680SDavid Rientjes {
439708c1bbcSMiao Xie 	mpol_rebind_policy(tsk->mempolicy, new, step);
4401d0d2680SDavid Rientjes }
4411d0d2680SDavid Rientjes 
4421d0d2680SDavid Rientjes /*
4431d0d2680SDavid Rientjes  * Rebind each vma in mm to new nodemask.
4441d0d2680SDavid Rientjes  *
4451d0d2680SDavid Rientjes  * Call holding a reference to mm.  Takes mm->mmap_sem during call.
4461d0d2680SDavid Rientjes  */
4471d0d2680SDavid Rientjes 
4481d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
4491d0d2680SDavid Rientjes {
4501d0d2680SDavid Rientjes 	struct vm_area_struct *vma;
4511d0d2680SDavid Rientjes 
4521d0d2680SDavid Rientjes 	down_write(&mm->mmap_sem);
4531d0d2680SDavid Rientjes 	for (vma = mm->mmap; vma; vma = vma->vm_next)
454708c1bbcSMiao Xie 		mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
4551d0d2680SDavid Rientjes 	up_write(&mm->mmap_sem);
4561d0d2680SDavid Rientjes }
4571d0d2680SDavid Rientjes 
45837012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
45937012946SDavid Rientjes 	[MPOL_DEFAULT] = {
46037012946SDavid Rientjes 		.rebind = mpol_rebind_default,
46137012946SDavid Rientjes 	},
46237012946SDavid Rientjes 	[MPOL_INTERLEAVE] = {
46337012946SDavid Rientjes 		.create = mpol_new_interleave,
46437012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
46537012946SDavid Rientjes 	},
46637012946SDavid Rientjes 	[MPOL_PREFERRED] = {
46737012946SDavid Rientjes 		.create = mpol_new_preferred,
46837012946SDavid Rientjes 		.rebind = mpol_rebind_preferred,
46937012946SDavid Rientjes 	},
47037012946SDavid Rientjes 	[MPOL_BIND] = {
47137012946SDavid Rientjes 		.create = mpol_new_bind,
47237012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
47337012946SDavid Rientjes 	},
47437012946SDavid Rientjes };
47537012946SDavid Rientjes 
476fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
477fc301289SChristoph Lameter 				unsigned long flags);
4781a75a6c8SChristoph Lameter 
47938e35860SChristoph Lameter /* Scan through pages checking if pages follow certain conditions. */
480b5810039SNick Piggin static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
481dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
482dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
48338e35860SChristoph Lameter 		void *private)
4841da177e4SLinus Torvalds {
48591612e0dSHugh Dickins 	pte_t *orig_pte;
48691612e0dSHugh Dickins 	pte_t *pte;
487705e87c0SHugh Dickins 	spinlock_t *ptl;
488941150a3SHugh Dickins 
489705e87c0SHugh Dickins 	orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
49091612e0dSHugh Dickins 	do {
4916aab341eSLinus Torvalds 		struct page *page;
49225ba77c1SAndy Whitcroft 		int nid;
49391612e0dSHugh Dickins 
49491612e0dSHugh Dickins 		if (!pte_present(*pte))
49591612e0dSHugh Dickins 			continue;
4966aab341eSLinus Torvalds 		page = vm_normal_page(vma, addr, *pte);
4976aab341eSLinus Torvalds 		if (!page)
49891612e0dSHugh Dickins 			continue;
499053837fcSNick Piggin 		/*
50062b61f61SHugh Dickins 		 * vm_normal_page() filters out zero pages, but there might
50162b61f61SHugh Dickins 		 * still be PageReserved pages to skip, perhaps in a VDSO.
502053837fcSNick Piggin 		 */
503b79bc0a0SHugh Dickins 		if (PageReserved(page))
504f4598c8bSChristoph Lameter 			continue;
5056aab341eSLinus Torvalds 		nid = page_to_nid(page);
50638e35860SChristoph Lameter 		if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
50738e35860SChristoph Lameter 			continue;
50838e35860SChristoph Lameter 
509b1f72d18SStephen Wilson 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
510fc301289SChristoph Lameter 			migrate_page_add(page, private, flags);
511dc9aa5b9SChristoph Lameter 		else
5121da177e4SLinus Torvalds 			break;
51391612e0dSHugh Dickins 	} while (pte++, addr += PAGE_SIZE, addr != end);
514705e87c0SHugh Dickins 	pte_unmap_unlock(orig_pte, ptl);
51591612e0dSHugh Dickins 	return addr != end;
51691612e0dSHugh Dickins }
51791612e0dSHugh Dickins 
518e2d8cf40SNaoya Horiguchi static void check_hugetlb_pmd_range(struct vm_area_struct *vma, pmd_t *pmd,
519e2d8cf40SNaoya Horiguchi 		const nodemask_t *nodes, unsigned long flags,
520e2d8cf40SNaoya Horiguchi 				    void *private)
521e2d8cf40SNaoya Horiguchi {
522e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE
523e2d8cf40SNaoya Horiguchi 	int nid;
524e2d8cf40SNaoya Horiguchi 	struct page *page;
525e2d8cf40SNaoya Horiguchi 
526e2d8cf40SNaoya Horiguchi 	spin_lock(&vma->vm_mm->page_table_lock);
527e2d8cf40SNaoya Horiguchi 	page = pte_page(huge_ptep_get((pte_t *)pmd));
528e2d8cf40SNaoya Horiguchi 	nid = page_to_nid(page);
529e2d8cf40SNaoya Horiguchi 	if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
530e2d8cf40SNaoya Horiguchi 		goto unlock;
531e2d8cf40SNaoya Horiguchi 	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
532e2d8cf40SNaoya Horiguchi 	if (flags & (MPOL_MF_MOVE_ALL) ||
533e2d8cf40SNaoya Horiguchi 	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
534e2d8cf40SNaoya Horiguchi 		isolate_huge_page(page, private);
535e2d8cf40SNaoya Horiguchi unlock:
536e2d8cf40SNaoya Horiguchi 	spin_unlock(&vma->vm_mm->page_table_lock);
537e2d8cf40SNaoya Horiguchi #else
538e2d8cf40SNaoya Horiguchi 	BUG();
539e2d8cf40SNaoya Horiguchi #endif
540e2d8cf40SNaoya Horiguchi }
541e2d8cf40SNaoya Horiguchi 
542b5810039SNick Piggin static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
543dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
544dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
54538e35860SChristoph Lameter 		void *private)
54691612e0dSHugh Dickins {
54791612e0dSHugh Dickins 	pmd_t *pmd;
54891612e0dSHugh Dickins 	unsigned long next;
54991612e0dSHugh Dickins 
55091612e0dSHugh Dickins 	pmd = pmd_offset(pud, addr);
55191612e0dSHugh Dickins 	do {
55291612e0dSHugh Dickins 		next = pmd_addr_end(addr, end);
553e2d8cf40SNaoya Horiguchi 		if (!pmd_present(*pmd))
554e2d8cf40SNaoya Horiguchi 			continue;
555e2d8cf40SNaoya Horiguchi 		if (pmd_huge(*pmd) && is_vm_hugetlb_page(vma)) {
556e2d8cf40SNaoya Horiguchi 			check_hugetlb_pmd_range(vma, pmd, nodes,
557e2d8cf40SNaoya Horiguchi 						flags, private);
558e2d8cf40SNaoya Horiguchi 			continue;
559e2d8cf40SNaoya Horiguchi 		}
560e180377fSKirill A. Shutemov 		split_huge_page_pmd(vma, addr, pmd);
5611a5a9906SAndrea Arcangeli 		if (pmd_none_or_trans_huge_or_clear_bad(pmd))
56291612e0dSHugh Dickins 			continue;
563dc9aa5b9SChristoph Lameter 		if (check_pte_range(vma, pmd, addr, next, nodes,
56438e35860SChristoph Lameter 				    flags, private))
56591612e0dSHugh Dickins 			return -EIO;
56691612e0dSHugh Dickins 	} while (pmd++, addr = next, addr != end);
56791612e0dSHugh Dickins 	return 0;
56891612e0dSHugh Dickins }
56991612e0dSHugh Dickins 
570b5810039SNick Piggin static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
571dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
572dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
57338e35860SChristoph Lameter 		void *private)
57491612e0dSHugh Dickins {
57591612e0dSHugh Dickins 	pud_t *pud;
57691612e0dSHugh Dickins 	unsigned long next;
57791612e0dSHugh Dickins 
57891612e0dSHugh Dickins 	pud = pud_offset(pgd, addr);
57991612e0dSHugh Dickins 	do {
58091612e0dSHugh Dickins 		next = pud_addr_end(addr, end);
581e2d8cf40SNaoya Horiguchi 		if (pud_huge(*pud) && is_vm_hugetlb_page(vma))
582e2d8cf40SNaoya Horiguchi 			continue;
58391612e0dSHugh Dickins 		if (pud_none_or_clear_bad(pud))
58491612e0dSHugh Dickins 			continue;
585dc9aa5b9SChristoph Lameter 		if (check_pmd_range(vma, pud, addr, next, nodes,
58638e35860SChristoph Lameter 				    flags, private))
58791612e0dSHugh Dickins 			return -EIO;
58891612e0dSHugh Dickins 	} while (pud++, addr = next, addr != end);
58991612e0dSHugh Dickins 	return 0;
59091612e0dSHugh Dickins }
59191612e0dSHugh Dickins 
592b5810039SNick Piggin static inline int check_pgd_range(struct vm_area_struct *vma,
593dc9aa5b9SChristoph Lameter 		unsigned long addr, unsigned long end,
594dc9aa5b9SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags,
59538e35860SChristoph Lameter 		void *private)
59691612e0dSHugh Dickins {
59791612e0dSHugh Dickins 	pgd_t *pgd;
59891612e0dSHugh Dickins 	unsigned long next;
59991612e0dSHugh Dickins 
600b5810039SNick Piggin 	pgd = pgd_offset(vma->vm_mm, addr);
60191612e0dSHugh Dickins 	do {
60291612e0dSHugh Dickins 		next = pgd_addr_end(addr, end);
60391612e0dSHugh Dickins 		if (pgd_none_or_clear_bad(pgd))
60491612e0dSHugh Dickins 			continue;
605dc9aa5b9SChristoph Lameter 		if (check_pud_range(vma, pgd, addr, next, nodes,
60638e35860SChristoph Lameter 				    flags, private))
60791612e0dSHugh Dickins 			return -EIO;
60891612e0dSHugh Dickins 	} while (pgd++, addr = next, addr != end);
60991612e0dSHugh Dickins 	return 0;
6101da177e4SLinus Torvalds }
6111da177e4SLinus Torvalds 
612b24f53a0SLee Schermerhorn #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
613b24f53a0SLee Schermerhorn /*
6144b10e7d5SMel Gorman  * This is used to mark a range of virtual addresses to be inaccessible.
6154b10e7d5SMel Gorman  * These are later cleared by a NUMA hinting fault. Depending on these
6164b10e7d5SMel Gorman  * faults, pages may be migrated for better NUMA placement.
6174b10e7d5SMel Gorman  *
6184b10e7d5SMel Gorman  * This is assuming that NUMA faults are handled using PROT_NONE. If
6194b10e7d5SMel Gorman  * an architecture makes a different choice, it will need further
6204b10e7d5SMel Gorman  * changes to the core.
621b24f53a0SLee Schermerhorn  */
6224b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma,
6234b10e7d5SMel Gorman 			unsigned long addr, unsigned long end)
624b24f53a0SLee Schermerhorn {
6254b10e7d5SMel Gorman 	int nr_updated;
6264b10e7d5SMel Gorman 	BUILD_BUG_ON(_PAGE_NUMA != _PAGE_PROTNONE);
627b24f53a0SLee Schermerhorn 
6284b10e7d5SMel Gorman 	nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1);
62903c5a6e1SMel Gorman 	if (nr_updated)
63003c5a6e1SMel Gorman 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
631b24f53a0SLee Schermerhorn 
6324b10e7d5SMel Gorman 	return nr_updated;
633b24f53a0SLee Schermerhorn }
634b24f53a0SLee Schermerhorn #else
635b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma,
636b24f53a0SLee Schermerhorn 			unsigned long addr, unsigned long end)
637b24f53a0SLee Schermerhorn {
638b24f53a0SLee Schermerhorn 	return 0;
639b24f53a0SLee Schermerhorn }
640b24f53a0SLee Schermerhorn #endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */
641b24f53a0SLee Schermerhorn 
642dc9aa5b9SChristoph Lameter /*
643dc9aa5b9SChristoph Lameter  * Check if all pages in a range are on a set of nodes.
644dc9aa5b9SChristoph Lameter  * If pagelist != NULL then isolate pages from the LRU and
645dc9aa5b9SChristoph Lameter  * put them on the pagelist.
646dc9aa5b9SChristoph Lameter  */
6471da177e4SLinus Torvalds static struct vm_area_struct *
6481da177e4SLinus Torvalds check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
64938e35860SChristoph Lameter 		const nodemask_t *nodes, unsigned long flags, void *private)
6501da177e4SLinus Torvalds {
6511da177e4SLinus Torvalds 	int err;
6521da177e4SLinus Torvalds 	struct vm_area_struct *first, *vma, *prev;
6531da177e4SLinus Torvalds 
654053837fcSNick Piggin 
6551da177e4SLinus Torvalds 	first = find_vma(mm, start);
6561da177e4SLinus Torvalds 	if (!first)
6571da177e4SLinus Torvalds 		return ERR_PTR(-EFAULT);
6581da177e4SLinus Torvalds 	prev = NULL;
6591da177e4SLinus Torvalds 	for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
6605b952b3cSAndi Kleen 		unsigned long endvma = vma->vm_end;
661dc9aa5b9SChristoph Lameter 
6625b952b3cSAndi Kleen 		if (endvma > end)
6635b952b3cSAndi Kleen 			endvma = end;
6645b952b3cSAndi Kleen 		if (vma->vm_start > start)
6655b952b3cSAndi Kleen 			start = vma->vm_start;
666b24f53a0SLee Schermerhorn 
667b24f53a0SLee Schermerhorn 		if (!(flags & MPOL_MF_DISCONTIG_OK)) {
668b24f53a0SLee Schermerhorn 			if (!vma->vm_next && vma->vm_end < end)
669b24f53a0SLee Schermerhorn 				return ERR_PTR(-EFAULT);
670b24f53a0SLee Schermerhorn 			if (prev && prev->vm_end < vma->vm_start)
671b24f53a0SLee Schermerhorn 				return ERR_PTR(-EFAULT);
672b24f53a0SLee Schermerhorn 		}
673b24f53a0SLee Schermerhorn 
674b24f53a0SLee Schermerhorn 		if (flags & MPOL_MF_LAZY) {
675b24f53a0SLee Schermerhorn 			change_prot_numa(vma, start, endvma);
676b24f53a0SLee Schermerhorn 			goto next;
677b24f53a0SLee Schermerhorn 		}
678b24f53a0SLee Schermerhorn 
679b24f53a0SLee Schermerhorn 		if ((flags & MPOL_MF_STRICT) ||
680b24f53a0SLee Schermerhorn 		     ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
681b24f53a0SLee Schermerhorn 		      vma_migratable(vma))) {
682b24f53a0SLee Schermerhorn 
683dc9aa5b9SChristoph Lameter 			err = check_pgd_range(vma, start, endvma, nodes,
68438e35860SChristoph Lameter 						flags, private);
6851da177e4SLinus Torvalds 			if (err) {
6861da177e4SLinus Torvalds 				first = ERR_PTR(err);
6871da177e4SLinus Torvalds 				break;
6881da177e4SLinus Torvalds 			}
6891da177e4SLinus Torvalds 		}
690b24f53a0SLee Schermerhorn next:
6911da177e4SLinus Torvalds 		prev = vma;
6921da177e4SLinus Torvalds 	}
6931da177e4SLinus Torvalds 	return first;
6941da177e4SLinus Torvalds }
6951da177e4SLinus Torvalds 
696869833f2SKOSAKI Motohiro /*
697869833f2SKOSAKI Motohiro  * Apply policy to a single VMA
698869833f2SKOSAKI Motohiro  * This must be called with the mmap_sem held for writing.
699869833f2SKOSAKI Motohiro  */
700869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma,
701869833f2SKOSAKI Motohiro 						struct mempolicy *pol)
7028d34694cSKOSAKI Motohiro {
703869833f2SKOSAKI Motohiro 	int err;
704869833f2SKOSAKI Motohiro 	struct mempolicy *old;
705869833f2SKOSAKI Motohiro 	struct mempolicy *new;
7068d34694cSKOSAKI Motohiro 
7078d34694cSKOSAKI Motohiro 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
7088d34694cSKOSAKI Motohiro 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
7098d34694cSKOSAKI Motohiro 		 vma->vm_ops, vma->vm_file,
7108d34694cSKOSAKI Motohiro 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
7118d34694cSKOSAKI Motohiro 
712869833f2SKOSAKI Motohiro 	new = mpol_dup(pol);
713869833f2SKOSAKI Motohiro 	if (IS_ERR(new))
714869833f2SKOSAKI Motohiro 		return PTR_ERR(new);
715869833f2SKOSAKI Motohiro 
716869833f2SKOSAKI Motohiro 	if (vma->vm_ops && vma->vm_ops->set_policy) {
7178d34694cSKOSAKI Motohiro 		err = vma->vm_ops->set_policy(vma, new);
718869833f2SKOSAKI Motohiro 		if (err)
719869833f2SKOSAKI Motohiro 			goto err_out;
7208d34694cSKOSAKI Motohiro 	}
721869833f2SKOSAKI Motohiro 
722869833f2SKOSAKI Motohiro 	old = vma->vm_policy;
723869833f2SKOSAKI Motohiro 	vma->vm_policy = new; /* protected by mmap_sem */
724869833f2SKOSAKI Motohiro 	mpol_put(old);
725869833f2SKOSAKI Motohiro 
726869833f2SKOSAKI Motohiro 	return 0;
727869833f2SKOSAKI Motohiro  err_out:
728869833f2SKOSAKI Motohiro 	mpol_put(new);
7298d34694cSKOSAKI Motohiro 	return err;
7308d34694cSKOSAKI Motohiro }
7318d34694cSKOSAKI Motohiro 
7321da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */
7339d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start,
7349d8cebd4SKOSAKI Motohiro 		       unsigned long end, struct mempolicy *new_pol)
7351da177e4SLinus Torvalds {
7361da177e4SLinus Torvalds 	struct vm_area_struct *next;
7379d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *prev;
7389d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *vma;
7399d8cebd4SKOSAKI Motohiro 	int err = 0;
740e26a5114SKOSAKI Motohiro 	pgoff_t pgoff;
7419d8cebd4SKOSAKI Motohiro 	unsigned long vmstart;
7429d8cebd4SKOSAKI Motohiro 	unsigned long vmend;
7431da177e4SLinus Torvalds 
744097d5910SLinus Torvalds 	vma = find_vma(mm, start);
7459d8cebd4SKOSAKI Motohiro 	if (!vma || vma->vm_start > start)
7469d8cebd4SKOSAKI Motohiro 		return -EFAULT;
7479d8cebd4SKOSAKI Motohiro 
748097d5910SLinus Torvalds 	prev = vma->vm_prev;
749e26a5114SKOSAKI Motohiro 	if (start > vma->vm_start)
750e26a5114SKOSAKI Motohiro 		prev = vma;
751e26a5114SKOSAKI Motohiro 
7529d8cebd4SKOSAKI Motohiro 	for (; vma && vma->vm_start < end; prev = vma, vma = next) {
7531da177e4SLinus Torvalds 		next = vma->vm_next;
7549d8cebd4SKOSAKI Motohiro 		vmstart = max(start, vma->vm_start);
7559d8cebd4SKOSAKI Motohiro 		vmend   = min(end, vma->vm_end);
7569d8cebd4SKOSAKI Motohiro 
757e26a5114SKOSAKI Motohiro 		if (mpol_equal(vma_policy(vma), new_pol))
758e26a5114SKOSAKI Motohiro 			continue;
759e26a5114SKOSAKI Motohiro 
760e26a5114SKOSAKI Motohiro 		pgoff = vma->vm_pgoff +
761e26a5114SKOSAKI Motohiro 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
7629d8cebd4SKOSAKI Motohiro 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
763e26a5114SKOSAKI Motohiro 				  vma->anon_vma, vma->vm_file, pgoff,
7648aacc9f5SCaspar Zhang 				  new_pol);
7659d8cebd4SKOSAKI Motohiro 		if (prev) {
7669d8cebd4SKOSAKI Motohiro 			vma = prev;
7679d8cebd4SKOSAKI Motohiro 			next = vma->vm_next;
7683964acd0SOleg Nesterov 			if (mpol_equal(vma_policy(vma), new_pol))
7699d8cebd4SKOSAKI Motohiro 				continue;
7703964acd0SOleg Nesterov 			/* vma_merge() joined vma && vma->next, case 8 */
7713964acd0SOleg Nesterov 			goto replace;
7721da177e4SLinus Torvalds 		}
7739d8cebd4SKOSAKI Motohiro 		if (vma->vm_start != vmstart) {
7749d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
7759d8cebd4SKOSAKI Motohiro 			if (err)
7769d8cebd4SKOSAKI Motohiro 				goto out;
7779d8cebd4SKOSAKI Motohiro 		}
7789d8cebd4SKOSAKI Motohiro 		if (vma->vm_end != vmend) {
7799d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmend, 0);
7809d8cebd4SKOSAKI Motohiro 			if (err)
7819d8cebd4SKOSAKI Motohiro 				goto out;
7829d8cebd4SKOSAKI Motohiro 		}
7833964acd0SOleg Nesterov  replace:
784869833f2SKOSAKI Motohiro 		err = vma_replace_policy(vma, new_pol);
7859d8cebd4SKOSAKI Motohiro 		if (err)
7869d8cebd4SKOSAKI Motohiro 			goto out;
7879d8cebd4SKOSAKI Motohiro 	}
7889d8cebd4SKOSAKI Motohiro 
7899d8cebd4SKOSAKI Motohiro  out:
7901da177e4SLinus Torvalds 	return err;
7911da177e4SLinus Torvalds }
7921da177e4SLinus Torvalds 
793c61afb18SPaul Jackson /*
794c61afb18SPaul Jackson  * Update task->flags PF_MEMPOLICY bit: set iff non-default
795c61afb18SPaul Jackson  * mempolicy.  Allows more rapid checking of this (combined perhaps
796c61afb18SPaul Jackson  * with other PF_* flag bits) on memory allocation hot code paths.
797c61afb18SPaul Jackson  *
798c61afb18SPaul Jackson  * If called from outside this file, the task 'p' should -only- be
799c61afb18SPaul Jackson  * a newly forked child not yet visible on the task list, because
800c61afb18SPaul Jackson  * manipulating the task flags of a visible task is not safe.
801c61afb18SPaul Jackson  *
802c61afb18SPaul Jackson  * The above limitation is why this routine has the funny name
803c61afb18SPaul Jackson  * mpol_fix_fork_child_flag().
804c61afb18SPaul Jackson  *
805c61afb18SPaul Jackson  * It is also safe to call this with a task pointer of current,
806c61afb18SPaul Jackson  * which the static wrapper mpol_set_task_struct_flag() does,
807c61afb18SPaul Jackson  * for use within this file.
808c61afb18SPaul Jackson  */
809c61afb18SPaul Jackson 
810c61afb18SPaul Jackson void mpol_fix_fork_child_flag(struct task_struct *p)
811c61afb18SPaul Jackson {
812c61afb18SPaul Jackson 	if (p->mempolicy)
813c61afb18SPaul Jackson 		p->flags |= PF_MEMPOLICY;
814c61afb18SPaul Jackson 	else
815c61afb18SPaul Jackson 		p->flags &= ~PF_MEMPOLICY;
816c61afb18SPaul Jackson }
817c61afb18SPaul Jackson 
818c61afb18SPaul Jackson static void mpol_set_task_struct_flag(void)
819c61afb18SPaul Jackson {
820c61afb18SPaul Jackson 	mpol_fix_fork_child_flag(current);
821c61afb18SPaul Jackson }
822c61afb18SPaul Jackson 
8231da177e4SLinus Torvalds /* Set the process memory policy */
824028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags,
825028fec41SDavid Rientjes 			     nodemask_t *nodes)
8261da177e4SLinus Torvalds {
82758568d2aSMiao Xie 	struct mempolicy *new, *old;
828f4e53d91SLee Schermerhorn 	struct mm_struct *mm = current->mm;
8294bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH(scratch);
83058568d2aSMiao Xie 	int ret;
8311da177e4SLinus Torvalds 
8324bfc4495SKAMEZAWA Hiroyuki 	if (!scratch)
8334bfc4495SKAMEZAWA Hiroyuki 		return -ENOMEM;
834f4e53d91SLee Schermerhorn 
8354bfc4495SKAMEZAWA Hiroyuki 	new = mpol_new(mode, flags, nodes);
8364bfc4495SKAMEZAWA Hiroyuki 	if (IS_ERR(new)) {
8374bfc4495SKAMEZAWA Hiroyuki 		ret = PTR_ERR(new);
8384bfc4495SKAMEZAWA Hiroyuki 		goto out;
8394bfc4495SKAMEZAWA Hiroyuki 	}
840f4e53d91SLee Schermerhorn 	/*
841f4e53d91SLee Schermerhorn 	 * prevent changing our mempolicy while show_numa_maps()
842f4e53d91SLee Schermerhorn 	 * is using it.
843f4e53d91SLee Schermerhorn 	 * Note:  do_set_mempolicy() can be called at init time
844f4e53d91SLee Schermerhorn 	 * with no 'mm'.
845f4e53d91SLee Schermerhorn 	 */
846f4e53d91SLee Schermerhorn 	if (mm)
847f4e53d91SLee Schermerhorn 		down_write(&mm->mmap_sem);
84858568d2aSMiao Xie 	task_lock(current);
8494bfc4495SKAMEZAWA Hiroyuki 	ret = mpol_set_nodemask(new, nodes, scratch);
85058568d2aSMiao Xie 	if (ret) {
85158568d2aSMiao Xie 		task_unlock(current);
85258568d2aSMiao Xie 		if (mm)
85358568d2aSMiao Xie 			up_write(&mm->mmap_sem);
85458568d2aSMiao Xie 		mpol_put(new);
8554bfc4495SKAMEZAWA Hiroyuki 		goto out;
85658568d2aSMiao Xie 	}
85758568d2aSMiao Xie 	old = current->mempolicy;
8581da177e4SLinus Torvalds 	current->mempolicy = new;
859c61afb18SPaul Jackson 	mpol_set_task_struct_flag();
86045c4745aSLee Schermerhorn 	if (new && new->mode == MPOL_INTERLEAVE &&
861f5b087b5SDavid Rientjes 	    nodes_weight(new->v.nodes))
862dfcd3c0dSAndi Kleen 		current->il_next = first_node(new->v.nodes);
86358568d2aSMiao Xie 	task_unlock(current);
864f4e53d91SLee Schermerhorn 	if (mm)
865f4e53d91SLee Schermerhorn 		up_write(&mm->mmap_sem);
866f4e53d91SLee Schermerhorn 
86758568d2aSMiao Xie 	mpol_put(old);
8684bfc4495SKAMEZAWA Hiroyuki 	ret = 0;
8694bfc4495SKAMEZAWA Hiroyuki out:
8704bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH_FREE(scratch);
8714bfc4495SKAMEZAWA Hiroyuki 	return ret;
8721da177e4SLinus Torvalds }
8731da177e4SLinus Torvalds 
874bea904d5SLee Schermerhorn /*
875bea904d5SLee Schermerhorn  * Return nodemask for policy for get_mempolicy() query
87658568d2aSMiao Xie  *
87758568d2aSMiao Xie  * Called with task's alloc_lock held
878bea904d5SLee Schermerhorn  */
879bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
8801da177e4SLinus Torvalds {
881dfcd3c0dSAndi Kleen 	nodes_clear(*nodes);
882bea904d5SLee Schermerhorn 	if (p == &default_policy)
883bea904d5SLee Schermerhorn 		return;
884bea904d5SLee Schermerhorn 
88545c4745aSLee Schermerhorn 	switch (p->mode) {
88619770b32SMel Gorman 	case MPOL_BIND:
88719770b32SMel Gorman 		/* Fall through */
8881da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
889dfcd3c0dSAndi Kleen 		*nodes = p->v.nodes;
8901da177e4SLinus Torvalds 		break;
8911da177e4SLinus Torvalds 	case MPOL_PREFERRED:
892fc36b8d3SLee Schermerhorn 		if (!(p->flags & MPOL_F_LOCAL))
893dfcd3c0dSAndi Kleen 			node_set(p->v.preferred_node, *nodes);
89453f2556bSLee Schermerhorn 		/* else return empty node mask for local allocation */
8951da177e4SLinus Torvalds 		break;
8961da177e4SLinus Torvalds 	default:
8971da177e4SLinus Torvalds 		BUG();
8981da177e4SLinus Torvalds 	}
8991da177e4SLinus Torvalds }
9001da177e4SLinus Torvalds 
9011da177e4SLinus Torvalds static int lookup_node(struct mm_struct *mm, unsigned long addr)
9021da177e4SLinus Torvalds {
9031da177e4SLinus Torvalds 	struct page *p;
9041da177e4SLinus Torvalds 	int err;
9051da177e4SLinus Torvalds 
9061da177e4SLinus Torvalds 	err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
9071da177e4SLinus Torvalds 	if (err >= 0) {
9081da177e4SLinus Torvalds 		err = page_to_nid(p);
9091da177e4SLinus Torvalds 		put_page(p);
9101da177e4SLinus Torvalds 	}
9111da177e4SLinus Torvalds 	return err;
9121da177e4SLinus Torvalds }
9131da177e4SLinus Torvalds 
9141da177e4SLinus Torvalds /* Retrieve NUMA policy */
915dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask,
9161da177e4SLinus Torvalds 			     unsigned long addr, unsigned long flags)
9171da177e4SLinus Torvalds {
9188bccd85fSChristoph Lameter 	int err;
9191da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
9201da177e4SLinus Torvalds 	struct vm_area_struct *vma = NULL;
9211da177e4SLinus Torvalds 	struct mempolicy *pol = current->mempolicy;
9221da177e4SLinus Torvalds 
923754af6f5SLee Schermerhorn 	if (flags &
924754af6f5SLee Schermerhorn 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
9251da177e4SLinus Torvalds 		return -EINVAL;
926754af6f5SLee Schermerhorn 
927754af6f5SLee Schermerhorn 	if (flags & MPOL_F_MEMS_ALLOWED) {
928754af6f5SLee Schermerhorn 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
929754af6f5SLee Schermerhorn 			return -EINVAL;
930754af6f5SLee Schermerhorn 		*policy = 0;	/* just so it's initialized */
93158568d2aSMiao Xie 		task_lock(current);
932754af6f5SLee Schermerhorn 		*nmask  = cpuset_current_mems_allowed;
93358568d2aSMiao Xie 		task_unlock(current);
934754af6f5SLee Schermerhorn 		return 0;
935754af6f5SLee Schermerhorn 	}
936754af6f5SLee Schermerhorn 
9371da177e4SLinus Torvalds 	if (flags & MPOL_F_ADDR) {
938bea904d5SLee Schermerhorn 		/*
939bea904d5SLee Schermerhorn 		 * Do NOT fall back to task policy if the
940bea904d5SLee Schermerhorn 		 * vma/shared policy at addr is NULL.  We
941bea904d5SLee Schermerhorn 		 * want to return MPOL_DEFAULT in this case.
942bea904d5SLee Schermerhorn 		 */
9431da177e4SLinus Torvalds 		down_read(&mm->mmap_sem);
9441da177e4SLinus Torvalds 		vma = find_vma_intersection(mm, addr, addr+1);
9451da177e4SLinus Torvalds 		if (!vma) {
9461da177e4SLinus Torvalds 			up_read(&mm->mmap_sem);
9471da177e4SLinus Torvalds 			return -EFAULT;
9481da177e4SLinus Torvalds 		}
9491da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
9501da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
9511da177e4SLinus Torvalds 		else
9521da177e4SLinus Torvalds 			pol = vma->vm_policy;
9531da177e4SLinus Torvalds 	} else if (addr)
9541da177e4SLinus Torvalds 		return -EINVAL;
9551da177e4SLinus Torvalds 
9561da177e4SLinus Torvalds 	if (!pol)
957bea904d5SLee Schermerhorn 		pol = &default_policy;	/* indicates default behavior */
9581da177e4SLinus Torvalds 
9591da177e4SLinus Torvalds 	if (flags & MPOL_F_NODE) {
9601da177e4SLinus Torvalds 		if (flags & MPOL_F_ADDR) {
9611da177e4SLinus Torvalds 			err = lookup_node(mm, addr);
9621da177e4SLinus Torvalds 			if (err < 0)
9631da177e4SLinus Torvalds 				goto out;
9648bccd85fSChristoph Lameter 			*policy = err;
9651da177e4SLinus Torvalds 		} else if (pol == current->mempolicy &&
96645c4745aSLee Schermerhorn 				pol->mode == MPOL_INTERLEAVE) {
9678bccd85fSChristoph Lameter 			*policy = current->il_next;
9681da177e4SLinus Torvalds 		} else {
9691da177e4SLinus Torvalds 			err = -EINVAL;
9701da177e4SLinus Torvalds 			goto out;
9711da177e4SLinus Torvalds 		}
972bea904d5SLee Schermerhorn 	} else {
973bea904d5SLee Schermerhorn 		*policy = pol == &default_policy ? MPOL_DEFAULT :
974bea904d5SLee Schermerhorn 						pol->mode;
975d79df630SDavid Rientjes 		/*
976d79df630SDavid Rientjes 		 * Internal mempolicy flags must be masked off before exposing
977d79df630SDavid Rientjes 		 * the policy to userspace.
978d79df630SDavid Rientjes 		 */
979d79df630SDavid Rientjes 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
980bea904d5SLee Schermerhorn 	}
9811da177e4SLinus Torvalds 
9821da177e4SLinus Torvalds 	if (vma) {
9831da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
9841da177e4SLinus Torvalds 		vma = NULL;
9851da177e4SLinus Torvalds 	}
9861da177e4SLinus Torvalds 
9871da177e4SLinus Torvalds 	err = 0;
98858568d2aSMiao Xie 	if (nmask) {
989c6b6ef8bSLee Schermerhorn 		if (mpol_store_user_nodemask(pol)) {
990c6b6ef8bSLee Schermerhorn 			*nmask = pol->w.user_nodemask;
991c6b6ef8bSLee Schermerhorn 		} else {
99258568d2aSMiao Xie 			task_lock(current);
993bea904d5SLee Schermerhorn 			get_policy_nodemask(pol, nmask);
99458568d2aSMiao Xie 			task_unlock(current);
99558568d2aSMiao Xie 		}
996c6b6ef8bSLee Schermerhorn 	}
9971da177e4SLinus Torvalds 
9981da177e4SLinus Torvalds  out:
99952cd3b07SLee Schermerhorn 	mpol_cond_put(pol);
10001da177e4SLinus Torvalds 	if (vma)
10011da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
10021da177e4SLinus Torvalds 	return err;
10031da177e4SLinus Torvalds }
10041da177e4SLinus Torvalds 
1005b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION
10068bccd85fSChristoph Lameter /*
10076ce3c4c0SChristoph Lameter  * page migration
10086ce3c4c0SChristoph Lameter  */
1009fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
1010fc301289SChristoph Lameter 				unsigned long flags)
10116ce3c4c0SChristoph Lameter {
10126ce3c4c0SChristoph Lameter 	/*
1013fc301289SChristoph Lameter 	 * Avoid migrating a page that is shared with others.
10146ce3c4c0SChristoph Lameter 	 */
101562695a84SNick Piggin 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
101662695a84SNick Piggin 		if (!isolate_lru_page(page)) {
101762695a84SNick Piggin 			list_add_tail(&page->lru, pagelist);
10186d9c285aSKOSAKI Motohiro 			inc_zone_page_state(page, NR_ISOLATED_ANON +
10196d9c285aSKOSAKI Motohiro 					    page_is_file_cache(page));
102062695a84SNick Piggin 		}
102162695a84SNick Piggin 	}
10226ce3c4c0SChristoph Lameter }
10236ce3c4c0SChristoph Lameter 
1024742755a1SChristoph Lameter static struct page *new_node_page(struct page *page, unsigned long node, int **x)
102595a402c3SChristoph Lameter {
1026e2d8cf40SNaoya Horiguchi 	if (PageHuge(page))
1027e2d8cf40SNaoya Horiguchi 		return alloc_huge_page_node(page_hstate(compound_head(page)),
1028e2d8cf40SNaoya Horiguchi 					node);
1029e2d8cf40SNaoya Horiguchi 	else
10306484eb3eSMel Gorman 		return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
103195a402c3SChristoph Lameter }
103295a402c3SChristoph Lameter 
10336ce3c4c0SChristoph Lameter /*
10347e2ab150SChristoph Lameter  * Migrate pages from one node to a target node.
10357e2ab150SChristoph Lameter  * Returns error or the number of pages not migrated.
10367e2ab150SChristoph Lameter  */
1037dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1038dbcb0f19SAdrian Bunk 			   int flags)
10397e2ab150SChristoph Lameter {
10407e2ab150SChristoph Lameter 	nodemask_t nmask;
10417e2ab150SChristoph Lameter 	LIST_HEAD(pagelist);
10427e2ab150SChristoph Lameter 	int err = 0;
10437e2ab150SChristoph Lameter 
10447e2ab150SChristoph Lameter 	nodes_clear(nmask);
10457e2ab150SChristoph Lameter 	node_set(source, nmask);
10467e2ab150SChristoph Lameter 
104708270807SMinchan Kim 	/*
104808270807SMinchan Kim 	 * This does not "check" the range but isolates all pages that
104908270807SMinchan Kim 	 * need migration.  Between passing in the full user address
105008270807SMinchan Kim 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
105108270807SMinchan Kim 	 */
105208270807SMinchan Kim 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
105308270807SMinchan Kim 	check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
10547e2ab150SChristoph Lameter 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
10557e2ab150SChristoph Lameter 
1056cf608ac1SMinchan Kim 	if (!list_empty(&pagelist)) {
10577f0f2496SMel Gorman 		err = migrate_pages(&pagelist, new_node_page, dest,
10589c620e2bSHugh Dickins 					MIGRATE_SYNC, MR_SYSCALL);
1059cf608ac1SMinchan Kim 		if (err)
1060e2d8cf40SNaoya Horiguchi 			putback_movable_pages(&pagelist);
1061cf608ac1SMinchan Kim 	}
106295a402c3SChristoph Lameter 
10637e2ab150SChristoph Lameter 	return err;
10647e2ab150SChristoph Lameter }
10657e2ab150SChristoph Lameter 
10667e2ab150SChristoph Lameter /*
10677e2ab150SChristoph Lameter  * Move pages between the two nodesets so as to preserve the physical
10687e2ab150SChristoph Lameter  * layout as much as possible.
106939743889SChristoph Lameter  *
107039743889SChristoph Lameter  * Returns the number of page that could not be moved.
107139743889SChristoph Lameter  */
10720ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
10730ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
107439743889SChristoph Lameter {
10757e2ab150SChristoph Lameter 	int busy = 0;
10760aedadf9SChristoph Lameter 	int err;
10777e2ab150SChristoph Lameter 	nodemask_t tmp;
107839743889SChristoph Lameter 
10790aedadf9SChristoph Lameter 	err = migrate_prep();
10800aedadf9SChristoph Lameter 	if (err)
10810aedadf9SChristoph Lameter 		return err;
10820aedadf9SChristoph Lameter 
108339743889SChristoph Lameter 	down_read(&mm->mmap_sem);
1084d4984711SChristoph Lameter 
10850ce72d4fSAndrew Morton 	err = migrate_vmas(mm, from, to, flags);
10867b2259b3SChristoph Lameter 	if (err)
10877b2259b3SChristoph Lameter 		goto out;
10887b2259b3SChristoph Lameter 
10897e2ab150SChristoph Lameter 	/*
10907e2ab150SChristoph Lameter 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
10917e2ab150SChristoph Lameter 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
10927e2ab150SChristoph Lameter 	 * bit in 'tmp', and return that <source, dest> pair for migration.
10937e2ab150SChristoph Lameter 	 * The pair of nodemasks 'to' and 'from' define the map.
10947e2ab150SChristoph Lameter 	 *
10957e2ab150SChristoph Lameter 	 * If no pair of bits is found that way, fallback to picking some
10967e2ab150SChristoph Lameter 	 * pair of 'source' and 'dest' bits that are not the same.  If the
10977e2ab150SChristoph Lameter 	 * 'source' and 'dest' bits are the same, this represents a node
10987e2ab150SChristoph Lameter 	 * that will be migrating to itself, so no pages need move.
10997e2ab150SChristoph Lameter 	 *
11007e2ab150SChristoph Lameter 	 * If no bits are left in 'tmp', or if all remaining bits left
11017e2ab150SChristoph Lameter 	 * in 'tmp' correspond to the same bit in 'to', return false
11027e2ab150SChristoph Lameter 	 * (nothing left to migrate).
11037e2ab150SChristoph Lameter 	 *
11047e2ab150SChristoph Lameter 	 * This lets us pick a pair of nodes to migrate between, such that
11057e2ab150SChristoph Lameter 	 * if possible the dest node is not already occupied by some other
11067e2ab150SChristoph Lameter 	 * source node, minimizing the risk of overloading the memory on a
11077e2ab150SChristoph Lameter 	 * node that would happen if we migrated incoming memory to a node
11087e2ab150SChristoph Lameter 	 * before migrating outgoing memory source that same node.
11097e2ab150SChristoph Lameter 	 *
11107e2ab150SChristoph Lameter 	 * A single scan of tmp is sufficient.  As we go, we remember the
11117e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
11127e2ab150SChristoph Lameter 	 * that not only moved, but what's better, moved to an empty slot
11137e2ab150SChristoph Lameter 	 * (d is not set in tmp), then we break out then, with that pair.
1114ae0e47f0SJustin P. Mattock 	 * Otherwise when we finish scanning from_tmp, we at least have the
11157e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved.  If we get all the way through
11167e2ab150SChristoph Lameter 	 * the scan of tmp without finding any node that moved, much less
11177e2ab150SChristoph Lameter 	 * moved to an empty node, then there is nothing left worth migrating.
11187e2ab150SChristoph Lameter 	 */
11197e2ab150SChristoph Lameter 
11200ce72d4fSAndrew Morton 	tmp = *from;
11217e2ab150SChristoph Lameter 	while (!nodes_empty(tmp)) {
11227e2ab150SChristoph Lameter 		int s,d;
11237e2ab150SChristoph Lameter 		int source = -1;
11247e2ab150SChristoph Lameter 		int dest = 0;
11257e2ab150SChristoph Lameter 
11267e2ab150SChristoph Lameter 		for_each_node_mask(s, tmp) {
11274a5b18ccSLarry Woodman 
11284a5b18ccSLarry Woodman 			/*
11294a5b18ccSLarry Woodman 			 * do_migrate_pages() tries to maintain the relative
11304a5b18ccSLarry Woodman 			 * node relationship of the pages established between
11314a5b18ccSLarry Woodman 			 * threads and memory areas.
11324a5b18ccSLarry Woodman                          *
11334a5b18ccSLarry Woodman 			 * However if the number of source nodes is not equal to
11344a5b18ccSLarry Woodman 			 * the number of destination nodes we can not preserve
11354a5b18ccSLarry Woodman 			 * this node relative relationship.  In that case, skip
11364a5b18ccSLarry Woodman 			 * copying memory from a node that is in the destination
11374a5b18ccSLarry Woodman 			 * mask.
11384a5b18ccSLarry Woodman 			 *
11394a5b18ccSLarry Woodman 			 * Example: [2,3,4] -> [3,4,5] moves everything.
11404a5b18ccSLarry Woodman 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
11414a5b18ccSLarry Woodman 			 */
11424a5b18ccSLarry Woodman 
11430ce72d4fSAndrew Morton 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
11440ce72d4fSAndrew Morton 						(node_isset(s, *to)))
11454a5b18ccSLarry Woodman 				continue;
11464a5b18ccSLarry Woodman 
11470ce72d4fSAndrew Morton 			d = node_remap(s, *from, *to);
11487e2ab150SChristoph Lameter 			if (s == d)
11497e2ab150SChristoph Lameter 				continue;
11507e2ab150SChristoph Lameter 
11517e2ab150SChristoph Lameter 			source = s;	/* Node moved. Memorize */
11527e2ab150SChristoph Lameter 			dest = d;
11537e2ab150SChristoph Lameter 
11547e2ab150SChristoph Lameter 			/* dest not in remaining from nodes? */
11557e2ab150SChristoph Lameter 			if (!node_isset(dest, tmp))
11567e2ab150SChristoph Lameter 				break;
11577e2ab150SChristoph Lameter 		}
11587e2ab150SChristoph Lameter 		if (source == -1)
11597e2ab150SChristoph Lameter 			break;
11607e2ab150SChristoph Lameter 
11617e2ab150SChristoph Lameter 		node_clear(source, tmp);
11627e2ab150SChristoph Lameter 		err = migrate_to_node(mm, source, dest, flags);
11637e2ab150SChristoph Lameter 		if (err > 0)
11647e2ab150SChristoph Lameter 			busy += err;
11657e2ab150SChristoph Lameter 		if (err < 0)
11667e2ab150SChristoph Lameter 			break;
116739743889SChristoph Lameter 	}
11687b2259b3SChristoph Lameter out:
116939743889SChristoph Lameter 	up_read(&mm->mmap_sem);
11707e2ab150SChristoph Lameter 	if (err < 0)
11717e2ab150SChristoph Lameter 		return err;
11727e2ab150SChristoph Lameter 	return busy;
1173b20a3503SChristoph Lameter 
117439743889SChristoph Lameter }
117539743889SChristoph Lameter 
11763ad33b24SLee Schermerhorn /*
11773ad33b24SLee Schermerhorn  * Allocate a new page for page migration based on vma policy.
11783ad33b24SLee Schermerhorn  * Start assuming that page is mapped by vma pointed to by @private.
11793ad33b24SLee Schermerhorn  * Search forward from there, if not.  N.B., this assumes that the
11803ad33b24SLee Schermerhorn  * list of pages handed to migrate_pages()--which is how we get here--
11813ad33b24SLee Schermerhorn  * is in virtual address order.
11823ad33b24SLee Schermerhorn  */
1183742755a1SChristoph Lameter static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
118495a402c3SChristoph Lameter {
118595a402c3SChristoph Lameter 	struct vm_area_struct *vma = (struct vm_area_struct *)private;
11863ad33b24SLee Schermerhorn 	unsigned long uninitialized_var(address);
118795a402c3SChristoph Lameter 
11883ad33b24SLee Schermerhorn 	while (vma) {
11893ad33b24SLee Schermerhorn 		address = page_address_in_vma(page, vma);
11903ad33b24SLee Schermerhorn 		if (address != -EFAULT)
11913ad33b24SLee Schermerhorn 			break;
11923ad33b24SLee Schermerhorn 		vma = vma->vm_next;
11933ad33b24SLee Schermerhorn 	}
11943ad33b24SLee Schermerhorn 
1195*74060e4dSNaoya Horiguchi 	if (PageHuge(page))
1196*74060e4dSNaoya Horiguchi 		return alloc_huge_page_noerr(vma, address, 1);
11973ad33b24SLee Schermerhorn 	/*
11983ad33b24SLee Schermerhorn 	 * if !vma, alloc_page_vma() will use task or system default policy
11993ad33b24SLee Schermerhorn 	 */
12003ad33b24SLee Schermerhorn 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
120195a402c3SChristoph Lameter }
1202b20a3503SChristoph Lameter #else
1203b20a3503SChristoph Lameter 
1204b20a3503SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
1205b20a3503SChristoph Lameter 				unsigned long flags)
1206b20a3503SChristoph Lameter {
1207b20a3503SChristoph Lameter }
1208b20a3503SChristoph Lameter 
12090ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
12100ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
1211b20a3503SChristoph Lameter {
1212b20a3503SChristoph Lameter 	return -ENOSYS;
1213b20a3503SChristoph Lameter }
121495a402c3SChristoph Lameter 
121569939749SKeith Owens static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
121695a402c3SChristoph Lameter {
121795a402c3SChristoph Lameter 	return NULL;
121895a402c3SChristoph Lameter }
1219b20a3503SChristoph Lameter #endif
1220b20a3503SChristoph Lameter 
1221dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len,
1222028fec41SDavid Rientjes 		     unsigned short mode, unsigned short mode_flags,
1223028fec41SDavid Rientjes 		     nodemask_t *nmask, unsigned long flags)
12246ce3c4c0SChristoph Lameter {
12256ce3c4c0SChristoph Lameter 	struct vm_area_struct *vma;
12266ce3c4c0SChristoph Lameter 	struct mm_struct *mm = current->mm;
12276ce3c4c0SChristoph Lameter 	struct mempolicy *new;
12286ce3c4c0SChristoph Lameter 	unsigned long end;
12296ce3c4c0SChristoph Lameter 	int err;
12306ce3c4c0SChristoph Lameter 	LIST_HEAD(pagelist);
12316ce3c4c0SChristoph Lameter 
1232b24f53a0SLee Schermerhorn 	if (flags & ~(unsigned long)MPOL_MF_VALID)
12336ce3c4c0SChristoph Lameter 		return -EINVAL;
123474c00241SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
12356ce3c4c0SChristoph Lameter 		return -EPERM;
12366ce3c4c0SChristoph Lameter 
12376ce3c4c0SChristoph Lameter 	if (start & ~PAGE_MASK)
12386ce3c4c0SChristoph Lameter 		return -EINVAL;
12396ce3c4c0SChristoph Lameter 
12406ce3c4c0SChristoph Lameter 	if (mode == MPOL_DEFAULT)
12416ce3c4c0SChristoph Lameter 		flags &= ~MPOL_MF_STRICT;
12426ce3c4c0SChristoph Lameter 
12436ce3c4c0SChristoph Lameter 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
12446ce3c4c0SChristoph Lameter 	end = start + len;
12456ce3c4c0SChristoph Lameter 
12466ce3c4c0SChristoph Lameter 	if (end < start)
12476ce3c4c0SChristoph Lameter 		return -EINVAL;
12486ce3c4c0SChristoph Lameter 	if (end == start)
12496ce3c4c0SChristoph Lameter 		return 0;
12506ce3c4c0SChristoph Lameter 
1251028fec41SDavid Rientjes 	new = mpol_new(mode, mode_flags, nmask);
12526ce3c4c0SChristoph Lameter 	if (IS_ERR(new))
12536ce3c4c0SChristoph Lameter 		return PTR_ERR(new);
12546ce3c4c0SChristoph Lameter 
1255b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY)
1256b24f53a0SLee Schermerhorn 		new->flags |= MPOL_F_MOF;
1257b24f53a0SLee Schermerhorn 
12586ce3c4c0SChristoph Lameter 	/*
12596ce3c4c0SChristoph Lameter 	 * If we are using the default policy then operation
12606ce3c4c0SChristoph Lameter 	 * on discontinuous address spaces is okay after all
12616ce3c4c0SChristoph Lameter 	 */
12626ce3c4c0SChristoph Lameter 	if (!new)
12636ce3c4c0SChristoph Lameter 		flags |= MPOL_MF_DISCONTIG_OK;
12646ce3c4c0SChristoph Lameter 
1265028fec41SDavid Rientjes 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1266028fec41SDavid Rientjes 		 start, start + len, mode, mode_flags,
126700ef2d2fSDavid Rientjes 		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
12686ce3c4c0SChristoph Lameter 
12690aedadf9SChristoph Lameter 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
12700aedadf9SChristoph Lameter 
12710aedadf9SChristoph Lameter 		err = migrate_prep();
12720aedadf9SChristoph Lameter 		if (err)
1273b05ca738SKOSAKI Motohiro 			goto mpol_out;
12740aedadf9SChristoph Lameter 	}
12754bfc4495SKAMEZAWA Hiroyuki 	{
12764bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
12774bfc4495SKAMEZAWA Hiroyuki 		if (scratch) {
12786ce3c4c0SChristoph Lameter 			down_write(&mm->mmap_sem);
127958568d2aSMiao Xie 			task_lock(current);
12804bfc4495SKAMEZAWA Hiroyuki 			err = mpol_set_nodemask(new, nmask, scratch);
128158568d2aSMiao Xie 			task_unlock(current);
12824bfc4495SKAMEZAWA Hiroyuki 			if (err)
128358568d2aSMiao Xie 				up_write(&mm->mmap_sem);
12844bfc4495SKAMEZAWA Hiroyuki 		} else
12854bfc4495SKAMEZAWA Hiroyuki 			err = -ENOMEM;
12864bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
12874bfc4495SKAMEZAWA Hiroyuki 	}
1288b05ca738SKOSAKI Motohiro 	if (err)
1289b05ca738SKOSAKI Motohiro 		goto mpol_out;
1290b05ca738SKOSAKI Motohiro 
12916ce3c4c0SChristoph Lameter 	vma = check_range(mm, start, end, nmask,
12926ce3c4c0SChristoph Lameter 			  flags | MPOL_MF_INVERT, &pagelist);
12936ce3c4c0SChristoph Lameter 
1294b24f53a0SLee Schermerhorn 	err = PTR_ERR(vma);	/* maybe ... */
1295a720094dSMel Gorman 	if (!IS_ERR(vma))
12969d8cebd4SKOSAKI Motohiro 		err = mbind_range(mm, start, end, new);
12977e2ab150SChristoph Lameter 
1298b24f53a0SLee Schermerhorn 	if (!err) {
1299b24f53a0SLee Schermerhorn 		int nr_failed = 0;
1300b24f53a0SLee Schermerhorn 
1301cf608ac1SMinchan Kim 		if (!list_empty(&pagelist)) {
1302b24f53a0SLee Schermerhorn 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
130395a402c3SChristoph Lameter 			nr_failed = migrate_pages(&pagelist, new_vma_page,
13047f0f2496SMel Gorman 					(unsigned long)vma,
13059c620e2bSHugh Dickins 					MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1306cf608ac1SMinchan Kim 			if (nr_failed)
1307*74060e4dSNaoya Horiguchi 				putback_movable_pages(&pagelist);
1308cf608ac1SMinchan Kim 		}
13096ce3c4c0SChristoph Lameter 
1310b24f53a0SLee Schermerhorn 		if (nr_failed && (flags & MPOL_MF_STRICT))
13116ce3c4c0SChristoph Lameter 			err = -EIO;
1312ab8a3e14SKOSAKI Motohiro 	} else
1313ab8a3e14SKOSAKI Motohiro 		putback_lru_pages(&pagelist);
1314b20a3503SChristoph Lameter 
13156ce3c4c0SChristoph Lameter 	up_write(&mm->mmap_sem);
1316b05ca738SKOSAKI Motohiro  mpol_out:
1317f0be3d32SLee Schermerhorn 	mpol_put(new);
13186ce3c4c0SChristoph Lameter 	return err;
13196ce3c4c0SChristoph Lameter }
13206ce3c4c0SChristoph Lameter 
132139743889SChristoph Lameter /*
13228bccd85fSChristoph Lameter  * User space interface with variable sized bitmaps for nodelists.
13238bccd85fSChristoph Lameter  */
13248bccd85fSChristoph Lameter 
13258bccd85fSChristoph Lameter /* Copy a node mask from user space. */
132639743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
13278bccd85fSChristoph Lameter 		     unsigned long maxnode)
13288bccd85fSChristoph Lameter {
13298bccd85fSChristoph Lameter 	unsigned long k;
13308bccd85fSChristoph Lameter 	unsigned long nlongs;
13318bccd85fSChristoph Lameter 	unsigned long endmask;
13328bccd85fSChristoph Lameter 
13338bccd85fSChristoph Lameter 	--maxnode;
13348bccd85fSChristoph Lameter 	nodes_clear(*nodes);
13358bccd85fSChristoph Lameter 	if (maxnode == 0 || !nmask)
13368bccd85fSChristoph Lameter 		return 0;
1337a9c930baSAndi Kleen 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1338636f13c1SChris Wright 		return -EINVAL;
13398bccd85fSChristoph Lameter 
13408bccd85fSChristoph Lameter 	nlongs = BITS_TO_LONGS(maxnode);
13418bccd85fSChristoph Lameter 	if ((maxnode % BITS_PER_LONG) == 0)
13428bccd85fSChristoph Lameter 		endmask = ~0UL;
13438bccd85fSChristoph Lameter 	else
13448bccd85fSChristoph Lameter 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
13458bccd85fSChristoph Lameter 
13468bccd85fSChristoph Lameter 	/* When the user specified more nodes than supported just check
13478bccd85fSChristoph Lameter 	   if the non supported part is all zero. */
13488bccd85fSChristoph Lameter 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
13498bccd85fSChristoph Lameter 		if (nlongs > PAGE_SIZE/sizeof(long))
13508bccd85fSChristoph Lameter 			return -EINVAL;
13518bccd85fSChristoph Lameter 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
13528bccd85fSChristoph Lameter 			unsigned long t;
13538bccd85fSChristoph Lameter 			if (get_user(t, nmask + k))
13548bccd85fSChristoph Lameter 				return -EFAULT;
13558bccd85fSChristoph Lameter 			if (k == nlongs - 1) {
13568bccd85fSChristoph Lameter 				if (t & endmask)
13578bccd85fSChristoph Lameter 					return -EINVAL;
13588bccd85fSChristoph Lameter 			} else if (t)
13598bccd85fSChristoph Lameter 				return -EINVAL;
13608bccd85fSChristoph Lameter 		}
13618bccd85fSChristoph Lameter 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
13628bccd85fSChristoph Lameter 		endmask = ~0UL;
13638bccd85fSChristoph Lameter 	}
13648bccd85fSChristoph Lameter 
13658bccd85fSChristoph Lameter 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
13668bccd85fSChristoph Lameter 		return -EFAULT;
13678bccd85fSChristoph Lameter 	nodes_addr(*nodes)[nlongs-1] &= endmask;
13688bccd85fSChristoph Lameter 	return 0;
13698bccd85fSChristoph Lameter }
13708bccd85fSChristoph Lameter 
13718bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */
13728bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
13738bccd85fSChristoph Lameter 			      nodemask_t *nodes)
13748bccd85fSChristoph Lameter {
13758bccd85fSChristoph Lameter 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
13768bccd85fSChristoph Lameter 	const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
13778bccd85fSChristoph Lameter 
13788bccd85fSChristoph Lameter 	if (copy > nbytes) {
13798bccd85fSChristoph Lameter 		if (copy > PAGE_SIZE)
13808bccd85fSChristoph Lameter 			return -EINVAL;
13818bccd85fSChristoph Lameter 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
13828bccd85fSChristoph Lameter 			return -EFAULT;
13838bccd85fSChristoph Lameter 		copy = nbytes;
13848bccd85fSChristoph Lameter 	}
13858bccd85fSChristoph Lameter 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
13868bccd85fSChristoph Lameter }
13878bccd85fSChristoph Lameter 
1388938bb9f5SHeiko Carstens SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1389938bb9f5SHeiko Carstens 		unsigned long, mode, unsigned long __user *, nmask,
1390938bb9f5SHeiko Carstens 		unsigned long, maxnode, unsigned, flags)
13918bccd85fSChristoph Lameter {
13928bccd85fSChristoph Lameter 	nodemask_t nodes;
13938bccd85fSChristoph Lameter 	int err;
1394028fec41SDavid Rientjes 	unsigned short mode_flags;
13958bccd85fSChristoph Lameter 
1396028fec41SDavid Rientjes 	mode_flags = mode & MPOL_MODE_FLAGS;
1397028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1398a3b51e01SDavid Rientjes 	if (mode >= MPOL_MAX)
1399a3b51e01SDavid Rientjes 		return -EINVAL;
14004c50bc01SDavid Rientjes 	if ((mode_flags & MPOL_F_STATIC_NODES) &&
14014c50bc01SDavid Rientjes 	    (mode_flags & MPOL_F_RELATIVE_NODES))
14024c50bc01SDavid Rientjes 		return -EINVAL;
14038bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
14048bccd85fSChristoph Lameter 	if (err)
14058bccd85fSChristoph Lameter 		return err;
1406028fec41SDavid Rientjes 	return do_mbind(start, len, mode, mode_flags, &nodes, flags);
14078bccd85fSChristoph Lameter }
14088bccd85fSChristoph Lameter 
14098bccd85fSChristoph Lameter /* Set the process memory policy */
1410938bb9f5SHeiko Carstens SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask,
1411938bb9f5SHeiko Carstens 		unsigned long, maxnode)
14128bccd85fSChristoph Lameter {
14138bccd85fSChristoph Lameter 	int err;
14148bccd85fSChristoph Lameter 	nodemask_t nodes;
1415028fec41SDavid Rientjes 	unsigned short flags;
14168bccd85fSChristoph Lameter 
1417028fec41SDavid Rientjes 	flags = mode & MPOL_MODE_FLAGS;
1418028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1419028fec41SDavid Rientjes 	if ((unsigned int)mode >= MPOL_MAX)
14208bccd85fSChristoph Lameter 		return -EINVAL;
14214c50bc01SDavid Rientjes 	if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
14224c50bc01SDavid Rientjes 		return -EINVAL;
14238bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
14248bccd85fSChristoph Lameter 	if (err)
14258bccd85fSChristoph Lameter 		return err;
1426028fec41SDavid Rientjes 	return do_set_mempolicy(mode, flags, &nodes);
14278bccd85fSChristoph Lameter }
14288bccd85fSChristoph Lameter 
1429938bb9f5SHeiko Carstens SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1430938bb9f5SHeiko Carstens 		const unsigned long __user *, old_nodes,
1431938bb9f5SHeiko Carstens 		const unsigned long __user *, new_nodes)
143239743889SChristoph Lameter {
1433c69e8d9cSDavid Howells 	const struct cred *cred = current_cred(), *tcred;
1434596d7cfaSKOSAKI Motohiro 	struct mm_struct *mm = NULL;
143539743889SChristoph Lameter 	struct task_struct *task;
143639743889SChristoph Lameter 	nodemask_t task_nodes;
143739743889SChristoph Lameter 	int err;
1438596d7cfaSKOSAKI Motohiro 	nodemask_t *old;
1439596d7cfaSKOSAKI Motohiro 	nodemask_t *new;
1440596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH(scratch);
144139743889SChristoph Lameter 
1442596d7cfaSKOSAKI Motohiro 	if (!scratch)
1443596d7cfaSKOSAKI Motohiro 		return -ENOMEM;
144439743889SChristoph Lameter 
1445596d7cfaSKOSAKI Motohiro 	old = &scratch->mask1;
1446596d7cfaSKOSAKI Motohiro 	new = &scratch->mask2;
1447596d7cfaSKOSAKI Motohiro 
1448596d7cfaSKOSAKI Motohiro 	err = get_nodes(old, old_nodes, maxnode);
144939743889SChristoph Lameter 	if (err)
1450596d7cfaSKOSAKI Motohiro 		goto out;
1451596d7cfaSKOSAKI Motohiro 
1452596d7cfaSKOSAKI Motohiro 	err = get_nodes(new, new_nodes, maxnode);
1453596d7cfaSKOSAKI Motohiro 	if (err)
1454596d7cfaSKOSAKI Motohiro 		goto out;
145539743889SChristoph Lameter 
145639743889SChristoph Lameter 	/* Find the mm_struct */
145755cfaa3cSZeng Zhaoming 	rcu_read_lock();
1458228ebcbeSPavel Emelyanov 	task = pid ? find_task_by_vpid(pid) : current;
145939743889SChristoph Lameter 	if (!task) {
146055cfaa3cSZeng Zhaoming 		rcu_read_unlock();
1461596d7cfaSKOSAKI Motohiro 		err = -ESRCH;
1462596d7cfaSKOSAKI Motohiro 		goto out;
146339743889SChristoph Lameter 	}
14643268c63eSChristoph Lameter 	get_task_struct(task);
146539743889SChristoph Lameter 
1466596d7cfaSKOSAKI Motohiro 	err = -EINVAL;
146739743889SChristoph Lameter 
146839743889SChristoph Lameter 	/*
146939743889SChristoph Lameter 	 * Check if this process has the right to modify the specified
147039743889SChristoph Lameter 	 * process. The right exists if the process has administrative
14717f927fccSAlexey Dobriyan 	 * capabilities, superuser privileges or the same
147239743889SChristoph Lameter 	 * userid as the target process.
147339743889SChristoph Lameter 	 */
1474c69e8d9cSDavid Howells 	tcred = __task_cred(task);
1475b38a86ebSEric W. Biederman 	if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1476b38a86ebSEric W. Biederman 	    !uid_eq(cred->uid,  tcred->suid) && !uid_eq(cred->uid,  tcred->uid) &&
147774c00241SChristoph Lameter 	    !capable(CAP_SYS_NICE)) {
1478c69e8d9cSDavid Howells 		rcu_read_unlock();
147939743889SChristoph Lameter 		err = -EPERM;
14803268c63eSChristoph Lameter 		goto out_put;
148139743889SChristoph Lameter 	}
1482c69e8d9cSDavid Howells 	rcu_read_unlock();
148339743889SChristoph Lameter 
148439743889SChristoph Lameter 	task_nodes = cpuset_mems_allowed(task);
148539743889SChristoph Lameter 	/* Is the user allowed to access the target nodes? */
1486596d7cfaSKOSAKI Motohiro 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
148739743889SChristoph Lameter 		err = -EPERM;
14883268c63eSChristoph Lameter 		goto out_put;
148939743889SChristoph Lameter 	}
149039743889SChristoph Lameter 
149101f13bd6SLai Jiangshan 	if (!nodes_subset(*new, node_states[N_MEMORY])) {
14923b42d28bSChristoph Lameter 		err = -EINVAL;
14933268c63eSChristoph Lameter 		goto out_put;
14943b42d28bSChristoph Lameter 	}
14953b42d28bSChristoph Lameter 
149686c3a764SDavid Quigley 	err = security_task_movememory(task);
149786c3a764SDavid Quigley 	if (err)
14983268c63eSChristoph Lameter 		goto out_put;
149986c3a764SDavid Quigley 
15003268c63eSChristoph Lameter 	mm = get_task_mm(task);
15013268c63eSChristoph Lameter 	put_task_struct(task);
1502f2a9ef88SSasha Levin 
1503f2a9ef88SSasha Levin 	if (!mm) {
1504f2a9ef88SSasha Levin 		err = -EINVAL;
1505f2a9ef88SSasha Levin 		goto out;
1506f2a9ef88SSasha Levin 	}
1507f2a9ef88SSasha Levin 
1508596d7cfaSKOSAKI Motohiro 	err = do_migrate_pages(mm, old, new,
150974c00241SChristoph Lameter 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
15103268c63eSChristoph Lameter 
151139743889SChristoph Lameter 	mmput(mm);
15123268c63eSChristoph Lameter out:
1513596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH_FREE(scratch);
1514596d7cfaSKOSAKI Motohiro 
151539743889SChristoph Lameter 	return err;
15163268c63eSChristoph Lameter 
15173268c63eSChristoph Lameter out_put:
15183268c63eSChristoph Lameter 	put_task_struct(task);
15193268c63eSChristoph Lameter 	goto out;
15203268c63eSChristoph Lameter 
152139743889SChristoph Lameter }
152239743889SChristoph Lameter 
152339743889SChristoph Lameter 
15248bccd85fSChristoph Lameter /* Retrieve NUMA policy */
1525938bb9f5SHeiko Carstens SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1526938bb9f5SHeiko Carstens 		unsigned long __user *, nmask, unsigned long, maxnode,
1527938bb9f5SHeiko Carstens 		unsigned long, addr, unsigned long, flags)
15288bccd85fSChristoph Lameter {
1529dbcb0f19SAdrian Bunk 	int err;
1530dbcb0f19SAdrian Bunk 	int uninitialized_var(pval);
15318bccd85fSChristoph Lameter 	nodemask_t nodes;
15328bccd85fSChristoph Lameter 
15338bccd85fSChristoph Lameter 	if (nmask != NULL && maxnode < MAX_NUMNODES)
15348bccd85fSChristoph Lameter 		return -EINVAL;
15358bccd85fSChristoph Lameter 
15368bccd85fSChristoph Lameter 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
15378bccd85fSChristoph Lameter 
15388bccd85fSChristoph Lameter 	if (err)
15398bccd85fSChristoph Lameter 		return err;
15408bccd85fSChristoph Lameter 
15418bccd85fSChristoph Lameter 	if (policy && put_user(pval, policy))
15428bccd85fSChristoph Lameter 		return -EFAULT;
15438bccd85fSChristoph Lameter 
15448bccd85fSChristoph Lameter 	if (nmask)
15458bccd85fSChristoph Lameter 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
15468bccd85fSChristoph Lameter 
15478bccd85fSChristoph Lameter 	return err;
15488bccd85fSChristoph Lameter }
15498bccd85fSChristoph Lameter 
15501da177e4SLinus Torvalds #ifdef CONFIG_COMPAT
15511da177e4SLinus Torvalds 
15521da177e4SLinus Torvalds asmlinkage long compat_sys_get_mempolicy(int __user *policy,
15531da177e4SLinus Torvalds 				     compat_ulong_t __user *nmask,
15541da177e4SLinus Torvalds 				     compat_ulong_t maxnode,
15551da177e4SLinus Torvalds 				     compat_ulong_t addr, compat_ulong_t flags)
15561da177e4SLinus Torvalds {
15571da177e4SLinus Torvalds 	long err;
15581da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
15591da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
15601da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
15611da177e4SLinus Torvalds 
15621da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
15631da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
15641da177e4SLinus Torvalds 
15651da177e4SLinus Torvalds 	if (nmask)
15661da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
15671da177e4SLinus Torvalds 
15681da177e4SLinus Torvalds 	err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
15691da177e4SLinus Torvalds 
15701da177e4SLinus Torvalds 	if (!err && nmask) {
15712bbff6c7SKAMEZAWA Hiroyuki 		unsigned long copy_size;
15722bbff6c7SKAMEZAWA Hiroyuki 		copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
15732bbff6c7SKAMEZAWA Hiroyuki 		err = copy_from_user(bm, nm, copy_size);
15741da177e4SLinus Torvalds 		/* ensure entire bitmap is zeroed */
15751da177e4SLinus Torvalds 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
15761da177e4SLinus Torvalds 		err |= compat_put_bitmap(nmask, bm, nr_bits);
15771da177e4SLinus Torvalds 	}
15781da177e4SLinus Torvalds 
15791da177e4SLinus Torvalds 	return err;
15801da177e4SLinus Torvalds }
15811da177e4SLinus Torvalds 
15821da177e4SLinus Torvalds asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
15831da177e4SLinus Torvalds 				     compat_ulong_t maxnode)
15841da177e4SLinus Torvalds {
15851da177e4SLinus Torvalds 	long err = 0;
15861da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
15871da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
15881da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
15891da177e4SLinus Torvalds 
15901da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
15911da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
15921da177e4SLinus Torvalds 
15931da177e4SLinus Torvalds 	if (nmask) {
15941da177e4SLinus Torvalds 		err = compat_get_bitmap(bm, nmask, nr_bits);
15951da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
15961da177e4SLinus Torvalds 		err |= copy_to_user(nm, bm, alloc_size);
15971da177e4SLinus Torvalds 	}
15981da177e4SLinus Torvalds 
15991da177e4SLinus Torvalds 	if (err)
16001da177e4SLinus Torvalds 		return -EFAULT;
16011da177e4SLinus Torvalds 
16021da177e4SLinus Torvalds 	return sys_set_mempolicy(mode, nm, nr_bits+1);
16031da177e4SLinus Torvalds }
16041da177e4SLinus Torvalds 
16051da177e4SLinus Torvalds asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
16061da177e4SLinus Torvalds 			     compat_ulong_t mode, compat_ulong_t __user *nmask,
16071da177e4SLinus Torvalds 			     compat_ulong_t maxnode, compat_ulong_t flags)
16081da177e4SLinus Torvalds {
16091da177e4SLinus Torvalds 	long err = 0;
16101da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
16111da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
1612dfcd3c0dSAndi Kleen 	nodemask_t bm;
16131da177e4SLinus Torvalds 
16141da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
16151da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
16161da177e4SLinus Torvalds 
16171da177e4SLinus Torvalds 	if (nmask) {
1618dfcd3c0dSAndi Kleen 		err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
16191da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
1620dfcd3c0dSAndi Kleen 		err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
16211da177e4SLinus Torvalds 	}
16221da177e4SLinus Torvalds 
16231da177e4SLinus Torvalds 	if (err)
16241da177e4SLinus Torvalds 		return -EFAULT;
16251da177e4SLinus Torvalds 
16261da177e4SLinus Torvalds 	return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
16271da177e4SLinus Torvalds }
16281da177e4SLinus Torvalds 
16291da177e4SLinus Torvalds #endif
16301da177e4SLinus Torvalds 
1631480eccf9SLee Schermerhorn /*
1632480eccf9SLee Schermerhorn  * get_vma_policy(@task, @vma, @addr)
1633480eccf9SLee Schermerhorn  * @task - task for fallback if vma policy == default
1634480eccf9SLee Schermerhorn  * @vma   - virtual memory area whose policy is sought
1635480eccf9SLee Schermerhorn  * @addr  - address in @vma for shared policy lookup
1636480eccf9SLee Schermerhorn  *
1637480eccf9SLee Schermerhorn  * Returns effective policy for a VMA at specified address.
1638480eccf9SLee Schermerhorn  * Falls back to @task or system default policy, as necessary.
163932f8516aSDavid Rientjes  * Current or other task's task mempolicy and non-shared vma policies must be
164032f8516aSDavid Rientjes  * protected by task_lock(task) by the caller.
164152cd3b07SLee Schermerhorn  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
164252cd3b07SLee Schermerhorn  * count--added by the get_policy() vm_op, as appropriate--to protect against
164352cd3b07SLee Schermerhorn  * freeing by another task.  It is the caller's responsibility to free the
164452cd3b07SLee Schermerhorn  * extra reference for shared policies.
1645480eccf9SLee Schermerhorn  */
1646d98f6cb6SStephen Wilson struct mempolicy *get_vma_policy(struct task_struct *task,
164748fce342SChristoph Lameter 		struct vm_area_struct *vma, unsigned long addr)
16481da177e4SLinus Torvalds {
16495606e387SMel Gorman 	struct mempolicy *pol = get_task_policy(task);
16501da177e4SLinus Torvalds 
16511da177e4SLinus Torvalds 	if (vma) {
1652480eccf9SLee Schermerhorn 		if (vma->vm_ops && vma->vm_ops->get_policy) {
1653ae4d8c16SLee Schermerhorn 			struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1654ae4d8c16SLee Schermerhorn 									addr);
1655ae4d8c16SLee Schermerhorn 			if (vpol)
1656ae4d8c16SLee Schermerhorn 				pol = vpol;
165700442ad0SMel Gorman 		} else if (vma->vm_policy) {
16581da177e4SLinus Torvalds 			pol = vma->vm_policy;
165900442ad0SMel Gorman 
166000442ad0SMel Gorman 			/*
166100442ad0SMel Gorman 			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
166200442ad0SMel Gorman 			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
166300442ad0SMel Gorman 			 * count on these policies which will be dropped by
166400442ad0SMel Gorman 			 * mpol_cond_put() later
166500442ad0SMel Gorman 			 */
166600442ad0SMel Gorman 			if (mpol_needs_cond_ref(pol))
166700442ad0SMel Gorman 				mpol_get(pol);
166800442ad0SMel Gorman 		}
16691da177e4SLinus Torvalds 	}
16701da177e4SLinus Torvalds 	if (!pol)
16711da177e4SLinus Torvalds 		pol = &default_policy;
16721da177e4SLinus Torvalds 	return pol;
16731da177e4SLinus Torvalds }
16741da177e4SLinus Torvalds 
1675d3eb1570SLai Jiangshan static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1676d3eb1570SLai Jiangshan {
1677d3eb1570SLai Jiangshan 	enum zone_type dynamic_policy_zone = policy_zone;
1678d3eb1570SLai Jiangshan 
1679d3eb1570SLai Jiangshan 	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1680d3eb1570SLai Jiangshan 
1681d3eb1570SLai Jiangshan 	/*
1682d3eb1570SLai Jiangshan 	 * if policy->v.nodes has movable memory only,
1683d3eb1570SLai Jiangshan 	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1684d3eb1570SLai Jiangshan 	 *
1685d3eb1570SLai Jiangshan 	 * policy->v.nodes is intersect with node_states[N_MEMORY].
1686d3eb1570SLai Jiangshan 	 * so if the following test faile, it implies
1687d3eb1570SLai Jiangshan 	 * policy->v.nodes has movable memory only.
1688d3eb1570SLai Jiangshan 	 */
1689d3eb1570SLai Jiangshan 	if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1690d3eb1570SLai Jiangshan 		dynamic_policy_zone = ZONE_MOVABLE;
1691d3eb1570SLai Jiangshan 
1692d3eb1570SLai Jiangshan 	return zone >= dynamic_policy_zone;
1693d3eb1570SLai Jiangshan }
1694d3eb1570SLai Jiangshan 
169552cd3b07SLee Schermerhorn /*
169652cd3b07SLee Schermerhorn  * Return a nodemask representing a mempolicy for filtering nodes for
169752cd3b07SLee Schermerhorn  * page allocation
169852cd3b07SLee Schermerhorn  */
169952cd3b07SLee Schermerhorn static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
170019770b32SMel Gorman {
170119770b32SMel Gorman 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
170245c4745aSLee Schermerhorn 	if (unlikely(policy->mode == MPOL_BIND) &&
1703d3eb1570SLai Jiangshan 			apply_policy_zone(policy, gfp_zone(gfp)) &&
170419770b32SMel Gorman 			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
170519770b32SMel Gorman 		return &policy->v.nodes;
170619770b32SMel Gorman 
170719770b32SMel Gorman 	return NULL;
170819770b32SMel Gorman }
170919770b32SMel Gorman 
171052cd3b07SLee Schermerhorn /* Return a zonelist indicated by gfp for node representing a mempolicy */
17112f5f9486SAndi Kleen static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
17122f5f9486SAndi Kleen 	int nd)
17131da177e4SLinus Torvalds {
171445c4745aSLee Schermerhorn 	switch (policy->mode) {
17151da177e4SLinus Torvalds 	case MPOL_PREFERRED:
1716fc36b8d3SLee Schermerhorn 		if (!(policy->flags & MPOL_F_LOCAL))
17171da177e4SLinus Torvalds 			nd = policy->v.preferred_node;
17181da177e4SLinus Torvalds 		break;
17191da177e4SLinus Torvalds 	case MPOL_BIND:
172019770b32SMel Gorman 		/*
172152cd3b07SLee Schermerhorn 		 * Normally, MPOL_BIND allocations are node-local within the
172252cd3b07SLee Schermerhorn 		 * allowed nodemask.  However, if __GFP_THISNODE is set and the
17236eb27e1fSBob Liu 		 * current node isn't part of the mask, we use the zonelist for
172452cd3b07SLee Schermerhorn 		 * the first node in the mask instead.
172519770b32SMel Gorman 		 */
172619770b32SMel Gorman 		if (unlikely(gfp & __GFP_THISNODE) &&
172719770b32SMel Gorman 				unlikely(!node_isset(nd, policy->v.nodes)))
172819770b32SMel Gorman 			nd = first_node(policy->v.nodes);
172919770b32SMel Gorman 		break;
17301da177e4SLinus Torvalds 	default:
17311da177e4SLinus Torvalds 		BUG();
17321da177e4SLinus Torvalds 	}
17330e88460dSMel Gorman 	return node_zonelist(nd, gfp);
17341da177e4SLinus Torvalds }
17351da177e4SLinus Torvalds 
17361da177e4SLinus Torvalds /* Do dynamic interleaving for a process */
17371da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy)
17381da177e4SLinus Torvalds {
17391da177e4SLinus Torvalds 	unsigned nid, next;
17401da177e4SLinus Torvalds 	struct task_struct *me = current;
17411da177e4SLinus Torvalds 
17421da177e4SLinus Torvalds 	nid = me->il_next;
1743dfcd3c0dSAndi Kleen 	next = next_node(nid, policy->v.nodes);
17441da177e4SLinus Torvalds 	if (next >= MAX_NUMNODES)
1745dfcd3c0dSAndi Kleen 		next = first_node(policy->v.nodes);
1746f5b087b5SDavid Rientjes 	if (next < MAX_NUMNODES)
17471da177e4SLinus Torvalds 		me->il_next = next;
17481da177e4SLinus Torvalds 	return nid;
17491da177e4SLinus Torvalds }
17501da177e4SLinus Torvalds 
1751dc85da15SChristoph Lameter /*
1752dc85da15SChristoph Lameter  * Depending on the memory policy provide a node from which to allocate the
1753dc85da15SChristoph Lameter  * next slab entry.
175452cd3b07SLee Schermerhorn  * @policy must be protected by freeing by the caller.  If @policy is
175552cd3b07SLee Schermerhorn  * the current task's mempolicy, this protection is implicit, as only the
175652cd3b07SLee Schermerhorn  * task can change it's policy.  The system default policy requires no
175752cd3b07SLee Schermerhorn  * such protection.
1758dc85da15SChristoph Lameter  */
1759e7b691b0SAndi Kleen unsigned slab_node(void)
1760dc85da15SChristoph Lameter {
1761e7b691b0SAndi Kleen 	struct mempolicy *policy;
1762e7b691b0SAndi Kleen 
1763e7b691b0SAndi Kleen 	if (in_interrupt())
1764e7b691b0SAndi Kleen 		return numa_node_id();
1765e7b691b0SAndi Kleen 
1766e7b691b0SAndi Kleen 	policy = current->mempolicy;
1767fc36b8d3SLee Schermerhorn 	if (!policy || policy->flags & MPOL_F_LOCAL)
1768bea904d5SLee Schermerhorn 		return numa_node_id();
1769765c4507SChristoph Lameter 
1770bea904d5SLee Schermerhorn 	switch (policy->mode) {
1771bea904d5SLee Schermerhorn 	case MPOL_PREFERRED:
1772fc36b8d3SLee Schermerhorn 		/*
1773fc36b8d3SLee Schermerhorn 		 * handled MPOL_F_LOCAL above
1774fc36b8d3SLee Schermerhorn 		 */
1775bea904d5SLee Schermerhorn 		return policy->v.preferred_node;
1776bea904d5SLee Schermerhorn 
1777dc85da15SChristoph Lameter 	case MPOL_INTERLEAVE:
1778dc85da15SChristoph Lameter 		return interleave_nodes(policy);
1779dc85da15SChristoph Lameter 
1780dd1a239fSMel Gorman 	case MPOL_BIND: {
1781dc85da15SChristoph Lameter 		/*
1782dc85da15SChristoph Lameter 		 * Follow bind policy behavior and start allocation at the
1783dc85da15SChristoph Lameter 		 * first node.
1784dc85da15SChristoph Lameter 		 */
178519770b32SMel Gorman 		struct zonelist *zonelist;
178619770b32SMel Gorman 		struct zone *zone;
178719770b32SMel Gorman 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
178819770b32SMel Gorman 		zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
178919770b32SMel Gorman 		(void)first_zones_zonelist(zonelist, highest_zoneidx,
179019770b32SMel Gorman 							&policy->v.nodes,
179119770b32SMel Gorman 							&zone);
1792800416f7SEric Dumazet 		return zone ? zone->node : numa_node_id();
1793dd1a239fSMel Gorman 	}
1794dc85da15SChristoph Lameter 
1795dc85da15SChristoph Lameter 	default:
1796bea904d5SLee Schermerhorn 		BUG();
1797dc85da15SChristoph Lameter 	}
1798dc85da15SChristoph Lameter }
1799dc85da15SChristoph Lameter 
18001da177e4SLinus Torvalds /* Do static interleaving for a VMA with known offset. */
18011da177e4SLinus Torvalds static unsigned offset_il_node(struct mempolicy *pol,
18021da177e4SLinus Torvalds 		struct vm_area_struct *vma, unsigned long off)
18031da177e4SLinus Torvalds {
1804dfcd3c0dSAndi Kleen 	unsigned nnodes = nodes_weight(pol->v.nodes);
1805f5b087b5SDavid Rientjes 	unsigned target;
18061da177e4SLinus Torvalds 	int c;
18071da177e4SLinus Torvalds 	int nid = -1;
18081da177e4SLinus Torvalds 
1809f5b087b5SDavid Rientjes 	if (!nnodes)
1810f5b087b5SDavid Rientjes 		return numa_node_id();
1811f5b087b5SDavid Rientjes 	target = (unsigned int)off % nnodes;
18121da177e4SLinus Torvalds 	c = 0;
18131da177e4SLinus Torvalds 	do {
1814dfcd3c0dSAndi Kleen 		nid = next_node(nid, pol->v.nodes);
18151da177e4SLinus Torvalds 		c++;
18161da177e4SLinus Torvalds 	} while (c <= target);
18171da177e4SLinus Torvalds 	return nid;
18181da177e4SLinus Torvalds }
18191da177e4SLinus Torvalds 
18205da7ca86SChristoph Lameter /* Determine a node number for interleave */
18215da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol,
18225da7ca86SChristoph Lameter 		 struct vm_area_struct *vma, unsigned long addr, int shift)
18235da7ca86SChristoph Lameter {
18245da7ca86SChristoph Lameter 	if (vma) {
18255da7ca86SChristoph Lameter 		unsigned long off;
18265da7ca86SChristoph Lameter 
18273b98b087SNishanth Aravamudan 		/*
18283b98b087SNishanth Aravamudan 		 * for small pages, there is no difference between
18293b98b087SNishanth Aravamudan 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
18303b98b087SNishanth Aravamudan 		 * for huge pages, since vm_pgoff is in units of small
18313b98b087SNishanth Aravamudan 		 * pages, we need to shift off the always 0 bits to get
18323b98b087SNishanth Aravamudan 		 * a useful offset.
18333b98b087SNishanth Aravamudan 		 */
18343b98b087SNishanth Aravamudan 		BUG_ON(shift < PAGE_SHIFT);
18353b98b087SNishanth Aravamudan 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
18365da7ca86SChristoph Lameter 		off += (addr - vma->vm_start) >> shift;
18375da7ca86SChristoph Lameter 		return offset_il_node(pol, vma, off);
18385da7ca86SChristoph Lameter 	} else
18395da7ca86SChristoph Lameter 		return interleave_nodes(pol);
18405da7ca86SChristoph Lameter }
18415da7ca86SChristoph Lameter 
1842778d3b0fSMichal Hocko /*
1843778d3b0fSMichal Hocko  * Return the bit number of a random bit set in the nodemask.
1844778d3b0fSMichal Hocko  * (returns -1 if nodemask is empty)
1845778d3b0fSMichal Hocko  */
1846778d3b0fSMichal Hocko int node_random(const nodemask_t *maskp)
1847778d3b0fSMichal Hocko {
1848778d3b0fSMichal Hocko 	int w, bit = -1;
1849778d3b0fSMichal Hocko 
1850778d3b0fSMichal Hocko 	w = nodes_weight(*maskp);
1851778d3b0fSMichal Hocko 	if (w)
1852778d3b0fSMichal Hocko 		bit = bitmap_ord_to_pos(maskp->bits,
1853778d3b0fSMichal Hocko 			get_random_int() % w, MAX_NUMNODES);
1854778d3b0fSMichal Hocko 	return bit;
1855778d3b0fSMichal Hocko }
1856778d3b0fSMichal Hocko 
185700ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS
1858480eccf9SLee Schermerhorn /*
1859480eccf9SLee Schermerhorn  * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1860480eccf9SLee Schermerhorn  * @vma = virtual memory area whose policy is sought
1861480eccf9SLee Schermerhorn  * @addr = address in @vma for shared policy lookup and interleave policy
1862480eccf9SLee Schermerhorn  * @gfp_flags = for requested zone
186319770b32SMel Gorman  * @mpol = pointer to mempolicy pointer for reference counted mempolicy
186419770b32SMel Gorman  * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
1865480eccf9SLee Schermerhorn  *
186652cd3b07SLee Schermerhorn  * Returns a zonelist suitable for a huge page allocation and a pointer
186752cd3b07SLee Schermerhorn  * to the struct mempolicy for conditional unref after allocation.
186852cd3b07SLee Schermerhorn  * If the effective policy is 'BIND, returns a pointer to the mempolicy's
186952cd3b07SLee Schermerhorn  * @nodemask for filtering the zonelist.
1870c0ff7453SMiao Xie  *
1871c0ff7453SMiao Xie  * Must be protected by get_mems_allowed()
1872480eccf9SLee Schermerhorn  */
1873396faf03SMel Gorman struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
187419770b32SMel Gorman 				gfp_t gfp_flags, struct mempolicy **mpol,
187519770b32SMel Gorman 				nodemask_t **nodemask)
18765da7ca86SChristoph Lameter {
1877480eccf9SLee Schermerhorn 	struct zonelist *zl;
18785da7ca86SChristoph Lameter 
187952cd3b07SLee Schermerhorn 	*mpol = get_vma_policy(current, vma, addr);
188019770b32SMel Gorman 	*nodemask = NULL;	/* assume !MPOL_BIND */
18815da7ca86SChristoph Lameter 
188252cd3b07SLee Schermerhorn 	if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
188352cd3b07SLee Schermerhorn 		zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1884a5516438SAndi Kleen 				huge_page_shift(hstate_vma(vma))), gfp_flags);
188552cd3b07SLee Schermerhorn 	} else {
18862f5f9486SAndi Kleen 		zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
188752cd3b07SLee Schermerhorn 		if ((*mpol)->mode == MPOL_BIND)
188852cd3b07SLee Schermerhorn 			*nodemask = &(*mpol)->v.nodes;
1889480eccf9SLee Schermerhorn 	}
1890480eccf9SLee Schermerhorn 	return zl;
18915da7ca86SChristoph Lameter }
189206808b08SLee Schermerhorn 
189306808b08SLee Schermerhorn /*
189406808b08SLee Schermerhorn  * init_nodemask_of_mempolicy
189506808b08SLee Schermerhorn  *
189606808b08SLee Schermerhorn  * If the current task's mempolicy is "default" [NULL], return 'false'
189706808b08SLee Schermerhorn  * to indicate default policy.  Otherwise, extract the policy nodemask
189806808b08SLee Schermerhorn  * for 'bind' or 'interleave' policy into the argument nodemask, or
189906808b08SLee Schermerhorn  * initialize the argument nodemask to contain the single node for
190006808b08SLee Schermerhorn  * 'preferred' or 'local' policy and return 'true' to indicate presence
190106808b08SLee Schermerhorn  * of non-default mempolicy.
190206808b08SLee Schermerhorn  *
190306808b08SLee Schermerhorn  * We don't bother with reference counting the mempolicy [mpol_get/put]
190406808b08SLee Schermerhorn  * because the current task is examining it's own mempolicy and a task's
190506808b08SLee Schermerhorn  * mempolicy is only ever changed by the task itself.
190606808b08SLee Schermerhorn  *
190706808b08SLee Schermerhorn  * N.B., it is the caller's responsibility to free a returned nodemask.
190806808b08SLee Schermerhorn  */
190906808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask)
191006808b08SLee Schermerhorn {
191106808b08SLee Schermerhorn 	struct mempolicy *mempolicy;
191206808b08SLee Schermerhorn 	int nid;
191306808b08SLee Schermerhorn 
191406808b08SLee Schermerhorn 	if (!(mask && current->mempolicy))
191506808b08SLee Schermerhorn 		return false;
191606808b08SLee Schermerhorn 
1917c0ff7453SMiao Xie 	task_lock(current);
191806808b08SLee Schermerhorn 	mempolicy = current->mempolicy;
191906808b08SLee Schermerhorn 	switch (mempolicy->mode) {
192006808b08SLee Schermerhorn 	case MPOL_PREFERRED:
192106808b08SLee Schermerhorn 		if (mempolicy->flags & MPOL_F_LOCAL)
192206808b08SLee Schermerhorn 			nid = numa_node_id();
192306808b08SLee Schermerhorn 		else
192406808b08SLee Schermerhorn 			nid = mempolicy->v.preferred_node;
192506808b08SLee Schermerhorn 		init_nodemask_of_node(mask, nid);
192606808b08SLee Schermerhorn 		break;
192706808b08SLee Schermerhorn 
192806808b08SLee Schermerhorn 	case MPOL_BIND:
192906808b08SLee Schermerhorn 		/* Fall through */
193006808b08SLee Schermerhorn 	case MPOL_INTERLEAVE:
193106808b08SLee Schermerhorn 		*mask =  mempolicy->v.nodes;
193206808b08SLee Schermerhorn 		break;
193306808b08SLee Schermerhorn 
193406808b08SLee Schermerhorn 	default:
193506808b08SLee Schermerhorn 		BUG();
193606808b08SLee Schermerhorn 	}
1937c0ff7453SMiao Xie 	task_unlock(current);
193806808b08SLee Schermerhorn 
193906808b08SLee Schermerhorn 	return true;
194006808b08SLee Schermerhorn }
194100ac59adSChen, Kenneth W #endif
19425da7ca86SChristoph Lameter 
19436f48d0ebSDavid Rientjes /*
19446f48d0ebSDavid Rientjes  * mempolicy_nodemask_intersects
19456f48d0ebSDavid Rientjes  *
19466f48d0ebSDavid Rientjes  * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
19476f48d0ebSDavid Rientjes  * policy.  Otherwise, check for intersection between mask and the policy
19486f48d0ebSDavid Rientjes  * nodemask for 'bind' or 'interleave' policy.  For 'perferred' or 'local'
19496f48d0ebSDavid Rientjes  * policy, always return true since it may allocate elsewhere on fallback.
19506f48d0ebSDavid Rientjes  *
19516f48d0ebSDavid Rientjes  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
19526f48d0ebSDavid Rientjes  */
19536f48d0ebSDavid Rientjes bool mempolicy_nodemask_intersects(struct task_struct *tsk,
19546f48d0ebSDavid Rientjes 					const nodemask_t *mask)
19556f48d0ebSDavid Rientjes {
19566f48d0ebSDavid Rientjes 	struct mempolicy *mempolicy;
19576f48d0ebSDavid Rientjes 	bool ret = true;
19586f48d0ebSDavid Rientjes 
19596f48d0ebSDavid Rientjes 	if (!mask)
19606f48d0ebSDavid Rientjes 		return ret;
19616f48d0ebSDavid Rientjes 	task_lock(tsk);
19626f48d0ebSDavid Rientjes 	mempolicy = tsk->mempolicy;
19636f48d0ebSDavid Rientjes 	if (!mempolicy)
19646f48d0ebSDavid Rientjes 		goto out;
19656f48d0ebSDavid Rientjes 
19666f48d0ebSDavid Rientjes 	switch (mempolicy->mode) {
19676f48d0ebSDavid Rientjes 	case MPOL_PREFERRED:
19686f48d0ebSDavid Rientjes 		/*
19696f48d0ebSDavid Rientjes 		 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
19706f48d0ebSDavid Rientjes 		 * allocate from, they may fallback to other nodes when oom.
19716f48d0ebSDavid Rientjes 		 * Thus, it's possible for tsk to have allocated memory from
19726f48d0ebSDavid Rientjes 		 * nodes in mask.
19736f48d0ebSDavid Rientjes 		 */
19746f48d0ebSDavid Rientjes 		break;
19756f48d0ebSDavid Rientjes 	case MPOL_BIND:
19766f48d0ebSDavid Rientjes 	case MPOL_INTERLEAVE:
19776f48d0ebSDavid Rientjes 		ret = nodes_intersects(mempolicy->v.nodes, *mask);
19786f48d0ebSDavid Rientjes 		break;
19796f48d0ebSDavid Rientjes 	default:
19806f48d0ebSDavid Rientjes 		BUG();
19816f48d0ebSDavid Rientjes 	}
19826f48d0ebSDavid Rientjes out:
19836f48d0ebSDavid Rientjes 	task_unlock(tsk);
19846f48d0ebSDavid Rientjes 	return ret;
19856f48d0ebSDavid Rientjes }
19866f48d0ebSDavid Rientjes 
19871da177e4SLinus Torvalds /* Allocate a page in interleaved policy.
19881da177e4SLinus Torvalds    Own path because it needs to do special accounting. */
1989662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1990662f3a0bSAndi Kleen 					unsigned nid)
19911da177e4SLinus Torvalds {
19921da177e4SLinus Torvalds 	struct zonelist *zl;
19931da177e4SLinus Torvalds 	struct page *page;
19941da177e4SLinus Torvalds 
19950e88460dSMel Gorman 	zl = node_zonelist(nid, gfp);
19961da177e4SLinus Torvalds 	page = __alloc_pages(gfp, order, zl);
1997dd1a239fSMel Gorman 	if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1998ca889e6cSChristoph Lameter 		inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
19991da177e4SLinus Torvalds 	return page;
20001da177e4SLinus Torvalds }
20011da177e4SLinus Torvalds 
20021da177e4SLinus Torvalds /**
20030bbbc0b3SAndrea Arcangeli  * 	alloc_pages_vma	- Allocate a page for a VMA.
20041da177e4SLinus Torvalds  *
20051da177e4SLinus Torvalds  * 	@gfp:
20061da177e4SLinus Torvalds  *      %GFP_USER    user allocation.
20071da177e4SLinus Torvalds  *      %GFP_KERNEL  kernel allocations,
20081da177e4SLinus Torvalds  *      %GFP_HIGHMEM highmem/user allocations,
20091da177e4SLinus Torvalds  *      %GFP_FS      allocation should not call back into a file system.
20101da177e4SLinus Torvalds  *      %GFP_ATOMIC  don't sleep.
20111da177e4SLinus Torvalds  *
20120bbbc0b3SAndrea Arcangeli  *	@order:Order of the GFP allocation.
20131da177e4SLinus Torvalds  * 	@vma:  Pointer to VMA or NULL if not available.
20141da177e4SLinus Torvalds  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
20151da177e4SLinus Torvalds  *
20161da177e4SLinus Torvalds  * 	This function allocates a page from the kernel page pool and applies
20171da177e4SLinus Torvalds  *	a NUMA policy associated with the VMA or the current process.
20181da177e4SLinus Torvalds  *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
20191da177e4SLinus Torvalds  *	mm_struct of the VMA to prevent it from going away. Should be used for
20201da177e4SLinus Torvalds  *	all allocations for pages that will be mapped into
20211da177e4SLinus Torvalds  * 	user space. Returns NULL when no page can be allocated.
20221da177e4SLinus Torvalds  *
20231da177e4SLinus Torvalds  *	Should be called with the mm_sem of the vma hold.
20241da177e4SLinus Torvalds  */
20251da177e4SLinus Torvalds struct page *
20260bbbc0b3SAndrea Arcangeli alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
20272f5f9486SAndi Kleen 		unsigned long addr, int node)
20281da177e4SLinus Torvalds {
2029cc9a6c87SMel Gorman 	struct mempolicy *pol;
2030c0ff7453SMiao Xie 	struct page *page;
2031cc9a6c87SMel Gorman 	unsigned int cpuset_mems_cookie;
20321da177e4SLinus Torvalds 
2033cc9a6c87SMel Gorman retry_cpuset:
2034cc9a6c87SMel Gorman 	pol = get_vma_policy(current, vma, addr);
2035cc9a6c87SMel Gorman 	cpuset_mems_cookie = get_mems_allowed();
2036cc9a6c87SMel Gorman 
203745c4745aSLee Schermerhorn 	if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
20381da177e4SLinus Torvalds 		unsigned nid;
20395da7ca86SChristoph Lameter 
20408eac563cSAndi Kleen 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
204152cd3b07SLee Schermerhorn 		mpol_cond_put(pol);
20420bbbc0b3SAndrea Arcangeli 		page = alloc_page_interleave(gfp, order, nid);
2043cc9a6c87SMel Gorman 		if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2044cc9a6c87SMel Gorman 			goto retry_cpuset;
2045cc9a6c87SMel Gorman 
2046c0ff7453SMiao Xie 		return page;
20471da177e4SLinus Torvalds 	}
2048212a0a6fSDavid Rientjes 	page = __alloc_pages_nodemask(gfp, order,
2049212a0a6fSDavid Rientjes 				      policy_zonelist(gfp, pol, node),
20500bbbc0b3SAndrea Arcangeli 				      policy_nodemask(gfp, pol));
2051212a0a6fSDavid Rientjes 	if (unlikely(mpol_needs_cond_ref(pol)))
2052212a0a6fSDavid Rientjes 		__mpol_put(pol);
2053cc9a6c87SMel Gorman 	if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2054cc9a6c87SMel Gorman 		goto retry_cpuset;
2055c0ff7453SMiao Xie 	return page;
20561da177e4SLinus Torvalds }
20571da177e4SLinus Torvalds 
20581da177e4SLinus Torvalds /**
20591da177e4SLinus Torvalds  * 	alloc_pages_current - Allocate pages.
20601da177e4SLinus Torvalds  *
20611da177e4SLinus Torvalds  *	@gfp:
20621da177e4SLinus Torvalds  *		%GFP_USER   user allocation,
20631da177e4SLinus Torvalds  *      	%GFP_KERNEL kernel allocation,
20641da177e4SLinus Torvalds  *      	%GFP_HIGHMEM highmem allocation,
20651da177e4SLinus Torvalds  *      	%GFP_FS     don't call back into a file system.
20661da177e4SLinus Torvalds  *      	%GFP_ATOMIC don't sleep.
20671da177e4SLinus Torvalds  *	@order: Power of two of allocation size in pages. 0 is a single page.
20681da177e4SLinus Torvalds  *
20691da177e4SLinus Torvalds  *	Allocate a page from the kernel page pool.  When not in
20701da177e4SLinus Torvalds  *	interrupt context and apply the current process NUMA policy.
20711da177e4SLinus Torvalds  *	Returns NULL when no page can be allocated.
20721da177e4SLinus Torvalds  *
2073cf2a473cSPaul Jackson  *	Don't call cpuset_update_task_memory_state() unless
20741da177e4SLinus Torvalds  *	1) it's ok to take cpuset_sem (can WAIT), and
20751da177e4SLinus Torvalds  *	2) allocating for current task (not interrupt).
20761da177e4SLinus Torvalds  */
2077dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order)
20781da177e4SLinus Torvalds {
20795606e387SMel Gorman 	struct mempolicy *pol = get_task_policy(current);
2080c0ff7453SMiao Xie 	struct page *page;
2081cc9a6c87SMel Gorman 	unsigned int cpuset_mems_cookie;
20821da177e4SLinus Torvalds 
20839b819d20SChristoph Lameter 	if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
20841da177e4SLinus Torvalds 		pol = &default_policy;
208552cd3b07SLee Schermerhorn 
2086cc9a6c87SMel Gorman retry_cpuset:
2087cc9a6c87SMel Gorman 	cpuset_mems_cookie = get_mems_allowed();
2088cc9a6c87SMel Gorman 
208952cd3b07SLee Schermerhorn 	/*
209052cd3b07SLee Schermerhorn 	 * No reference counting needed for current->mempolicy
209152cd3b07SLee Schermerhorn 	 * nor system default_policy
209252cd3b07SLee Schermerhorn 	 */
209345c4745aSLee Schermerhorn 	if (pol->mode == MPOL_INTERLEAVE)
2094c0ff7453SMiao Xie 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2095c0ff7453SMiao Xie 	else
2096c0ff7453SMiao Xie 		page = __alloc_pages_nodemask(gfp, order,
20975c4b4be3SAndi Kleen 				policy_zonelist(gfp, pol, numa_node_id()),
20985c4b4be3SAndi Kleen 				policy_nodemask(gfp, pol));
2099cc9a6c87SMel Gorman 
2100cc9a6c87SMel Gorman 	if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2101cc9a6c87SMel Gorman 		goto retry_cpuset;
2102cc9a6c87SMel Gorman 
2103c0ff7453SMiao Xie 	return page;
21041da177e4SLinus Torvalds }
21051da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current);
21061da177e4SLinus Torvalds 
2107ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2108ef0855d3SOleg Nesterov {
2109ef0855d3SOleg Nesterov 	struct mempolicy *pol = mpol_dup(vma_policy(src));
2110ef0855d3SOleg Nesterov 
2111ef0855d3SOleg Nesterov 	if (IS_ERR(pol))
2112ef0855d3SOleg Nesterov 		return PTR_ERR(pol);
2113ef0855d3SOleg Nesterov 	dst->vm_policy = pol;
2114ef0855d3SOleg Nesterov 	return 0;
2115ef0855d3SOleg Nesterov }
2116ef0855d3SOleg Nesterov 
21174225399aSPaul Jackson /*
2118846a16bfSLee Schermerhorn  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
21194225399aSPaul Jackson  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
21204225399aSPaul Jackson  * with the mems_allowed returned by cpuset_mems_allowed().  This
21214225399aSPaul Jackson  * keeps mempolicies cpuset relative after its cpuset moves.  See
21224225399aSPaul Jackson  * further kernel/cpuset.c update_nodemask().
2123708c1bbcSMiao Xie  *
2124708c1bbcSMiao Xie  * current's mempolicy may be rebinded by the other task(the task that changes
2125708c1bbcSMiao Xie  * cpuset's mems), so we needn't do rebind work for current task.
21264225399aSPaul Jackson  */
21274225399aSPaul Jackson 
2128846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */
2129846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old)
21301da177e4SLinus Torvalds {
21311da177e4SLinus Torvalds 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
21321da177e4SLinus Torvalds 
21331da177e4SLinus Torvalds 	if (!new)
21341da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2135708c1bbcSMiao Xie 
2136708c1bbcSMiao Xie 	/* task's mempolicy is protected by alloc_lock */
2137708c1bbcSMiao Xie 	if (old == current->mempolicy) {
2138708c1bbcSMiao Xie 		task_lock(current);
2139708c1bbcSMiao Xie 		*new = *old;
2140708c1bbcSMiao Xie 		task_unlock(current);
2141708c1bbcSMiao Xie 	} else
2142708c1bbcSMiao Xie 		*new = *old;
2143708c1bbcSMiao Xie 
214499ee4ca7SPaul E. McKenney 	rcu_read_lock();
21454225399aSPaul Jackson 	if (current_cpuset_is_being_rebound()) {
21464225399aSPaul Jackson 		nodemask_t mems = cpuset_mems_allowed(current);
2147708c1bbcSMiao Xie 		if (new->flags & MPOL_F_REBINDING)
2148708c1bbcSMiao Xie 			mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
2149708c1bbcSMiao Xie 		else
2150708c1bbcSMiao Xie 			mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
21514225399aSPaul Jackson 	}
215299ee4ca7SPaul E. McKenney 	rcu_read_unlock();
21531da177e4SLinus Torvalds 	atomic_set(&new->refcnt, 1);
21541da177e4SLinus Torvalds 	return new;
21551da177e4SLinus Torvalds }
21561da177e4SLinus Torvalds 
21571da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */
2158fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
21591da177e4SLinus Torvalds {
21601da177e4SLinus Torvalds 	if (!a || !b)
2161fcfb4dccSKOSAKI Motohiro 		return false;
216245c4745aSLee Schermerhorn 	if (a->mode != b->mode)
2163fcfb4dccSKOSAKI Motohiro 		return false;
216419800502SBob Liu 	if (a->flags != b->flags)
2165fcfb4dccSKOSAKI Motohiro 		return false;
216619800502SBob Liu 	if (mpol_store_user_nodemask(a))
216719800502SBob Liu 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2168fcfb4dccSKOSAKI Motohiro 			return false;
216919800502SBob Liu 
217045c4745aSLee Schermerhorn 	switch (a->mode) {
217119770b32SMel Gorman 	case MPOL_BIND:
217219770b32SMel Gorman 		/* Fall through */
21731da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
2174fcfb4dccSKOSAKI Motohiro 		return !!nodes_equal(a->v.nodes, b->v.nodes);
21751da177e4SLinus Torvalds 	case MPOL_PREFERRED:
217675719661SNamhyung Kim 		return a->v.preferred_node == b->v.preferred_node;
21771da177e4SLinus Torvalds 	default:
21781da177e4SLinus Torvalds 		BUG();
2179fcfb4dccSKOSAKI Motohiro 		return false;
21801da177e4SLinus Torvalds 	}
21811da177e4SLinus Torvalds }
21821da177e4SLinus Torvalds 
21831da177e4SLinus Torvalds /*
21841da177e4SLinus Torvalds  * Shared memory backing store policy support.
21851da177e4SLinus Torvalds  *
21861da177e4SLinus Torvalds  * Remember policies even when nobody has shared memory mapped.
21871da177e4SLinus Torvalds  * The policies are kept in Red-Black tree linked from the inode.
21881da177e4SLinus Torvalds  * They are protected by the sp->lock spinlock, which should be held
21891da177e4SLinus Torvalds  * for any accesses to the tree.
21901da177e4SLinus Torvalds  */
21911da177e4SLinus Torvalds 
21921da177e4SLinus Torvalds /* lookup first element intersecting start-end */
219342288fe3SMel Gorman /* Caller holds sp->lock */
21941da177e4SLinus Torvalds static struct sp_node *
21951da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
21961da177e4SLinus Torvalds {
21971da177e4SLinus Torvalds 	struct rb_node *n = sp->root.rb_node;
21981da177e4SLinus Torvalds 
21991da177e4SLinus Torvalds 	while (n) {
22001da177e4SLinus Torvalds 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
22011da177e4SLinus Torvalds 
22021da177e4SLinus Torvalds 		if (start >= p->end)
22031da177e4SLinus Torvalds 			n = n->rb_right;
22041da177e4SLinus Torvalds 		else if (end <= p->start)
22051da177e4SLinus Torvalds 			n = n->rb_left;
22061da177e4SLinus Torvalds 		else
22071da177e4SLinus Torvalds 			break;
22081da177e4SLinus Torvalds 	}
22091da177e4SLinus Torvalds 	if (!n)
22101da177e4SLinus Torvalds 		return NULL;
22111da177e4SLinus Torvalds 	for (;;) {
22121da177e4SLinus Torvalds 		struct sp_node *w = NULL;
22131da177e4SLinus Torvalds 		struct rb_node *prev = rb_prev(n);
22141da177e4SLinus Torvalds 		if (!prev)
22151da177e4SLinus Torvalds 			break;
22161da177e4SLinus Torvalds 		w = rb_entry(prev, struct sp_node, nd);
22171da177e4SLinus Torvalds 		if (w->end <= start)
22181da177e4SLinus Torvalds 			break;
22191da177e4SLinus Torvalds 		n = prev;
22201da177e4SLinus Torvalds 	}
22211da177e4SLinus Torvalds 	return rb_entry(n, struct sp_node, nd);
22221da177e4SLinus Torvalds }
22231da177e4SLinus Torvalds 
22241da177e4SLinus Torvalds /* Insert a new shared policy into the list. */
22251da177e4SLinus Torvalds /* Caller holds sp->lock */
22261da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new)
22271da177e4SLinus Torvalds {
22281da177e4SLinus Torvalds 	struct rb_node **p = &sp->root.rb_node;
22291da177e4SLinus Torvalds 	struct rb_node *parent = NULL;
22301da177e4SLinus Torvalds 	struct sp_node *nd;
22311da177e4SLinus Torvalds 
22321da177e4SLinus Torvalds 	while (*p) {
22331da177e4SLinus Torvalds 		parent = *p;
22341da177e4SLinus Torvalds 		nd = rb_entry(parent, struct sp_node, nd);
22351da177e4SLinus Torvalds 		if (new->start < nd->start)
22361da177e4SLinus Torvalds 			p = &(*p)->rb_left;
22371da177e4SLinus Torvalds 		else if (new->end > nd->end)
22381da177e4SLinus Torvalds 			p = &(*p)->rb_right;
22391da177e4SLinus Torvalds 		else
22401da177e4SLinus Torvalds 			BUG();
22411da177e4SLinus Torvalds 	}
22421da177e4SLinus Torvalds 	rb_link_node(&new->nd, parent, p);
22431da177e4SLinus Torvalds 	rb_insert_color(&new->nd, &sp->root);
2244140d5a49SPaul Mundt 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
224545c4745aSLee Schermerhorn 		 new->policy ? new->policy->mode : 0);
22461da177e4SLinus Torvalds }
22471da177e4SLinus Torvalds 
22481da177e4SLinus Torvalds /* Find shared policy intersecting idx */
22491da177e4SLinus Torvalds struct mempolicy *
22501da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
22511da177e4SLinus Torvalds {
22521da177e4SLinus Torvalds 	struct mempolicy *pol = NULL;
22531da177e4SLinus Torvalds 	struct sp_node *sn;
22541da177e4SLinus Torvalds 
22551da177e4SLinus Torvalds 	if (!sp->root.rb_node)
22561da177e4SLinus Torvalds 		return NULL;
225742288fe3SMel Gorman 	spin_lock(&sp->lock);
22581da177e4SLinus Torvalds 	sn = sp_lookup(sp, idx, idx+1);
22591da177e4SLinus Torvalds 	if (sn) {
22601da177e4SLinus Torvalds 		mpol_get(sn->policy);
22611da177e4SLinus Torvalds 		pol = sn->policy;
22621da177e4SLinus Torvalds 	}
226342288fe3SMel Gorman 	spin_unlock(&sp->lock);
22641da177e4SLinus Torvalds 	return pol;
22651da177e4SLinus Torvalds }
22661da177e4SLinus Torvalds 
226763f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n)
226863f74ca2SKOSAKI Motohiro {
226963f74ca2SKOSAKI Motohiro 	mpol_put(n->policy);
227063f74ca2SKOSAKI Motohiro 	kmem_cache_free(sn_cache, n);
227163f74ca2SKOSAKI Motohiro }
227263f74ca2SKOSAKI Motohiro 
2273771fb4d8SLee Schermerhorn /**
2274771fb4d8SLee Schermerhorn  * mpol_misplaced - check whether current page node is valid in policy
2275771fb4d8SLee Schermerhorn  *
2276771fb4d8SLee Schermerhorn  * @page   - page to be checked
2277771fb4d8SLee Schermerhorn  * @vma    - vm area where page mapped
2278771fb4d8SLee Schermerhorn  * @addr   - virtual address where page mapped
2279771fb4d8SLee Schermerhorn  *
2280771fb4d8SLee Schermerhorn  * Lookup current policy node id for vma,addr and "compare to" page's
2281771fb4d8SLee Schermerhorn  * node id.
2282771fb4d8SLee Schermerhorn  *
2283771fb4d8SLee Schermerhorn  * Returns:
2284771fb4d8SLee Schermerhorn  *	-1	- not misplaced, page is in the right node
2285771fb4d8SLee Schermerhorn  *	node	- node id where the page should be
2286771fb4d8SLee Schermerhorn  *
2287771fb4d8SLee Schermerhorn  * Policy determination "mimics" alloc_page_vma().
2288771fb4d8SLee Schermerhorn  * Called from fault path where we know the vma and faulting address.
2289771fb4d8SLee Schermerhorn  */
2290771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2291771fb4d8SLee Schermerhorn {
2292771fb4d8SLee Schermerhorn 	struct mempolicy *pol;
2293771fb4d8SLee Schermerhorn 	struct zone *zone;
2294771fb4d8SLee Schermerhorn 	int curnid = page_to_nid(page);
2295771fb4d8SLee Schermerhorn 	unsigned long pgoff;
2296771fb4d8SLee Schermerhorn 	int polnid = -1;
2297771fb4d8SLee Schermerhorn 	int ret = -1;
2298771fb4d8SLee Schermerhorn 
2299771fb4d8SLee Schermerhorn 	BUG_ON(!vma);
2300771fb4d8SLee Schermerhorn 
2301771fb4d8SLee Schermerhorn 	pol = get_vma_policy(current, vma, addr);
2302771fb4d8SLee Schermerhorn 	if (!(pol->flags & MPOL_F_MOF))
2303771fb4d8SLee Schermerhorn 		goto out;
2304771fb4d8SLee Schermerhorn 
2305771fb4d8SLee Schermerhorn 	switch (pol->mode) {
2306771fb4d8SLee Schermerhorn 	case MPOL_INTERLEAVE:
2307771fb4d8SLee Schermerhorn 		BUG_ON(addr >= vma->vm_end);
2308771fb4d8SLee Schermerhorn 		BUG_ON(addr < vma->vm_start);
2309771fb4d8SLee Schermerhorn 
2310771fb4d8SLee Schermerhorn 		pgoff = vma->vm_pgoff;
2311771fb4d8SLee Schermerhorn 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2312771fb4d8SLee Schermerhorn 		polnid = offset_il_node(pol, vma, pgoff);
2313771fb4d8SLee Schermerhorn 		break;
2314771fb4d8SLee Schermerhorn 
2315771fb4d8SLee Schermerhorn 	case MPOL_PREFERRED:
2316771fb4d8SLee Schermerhorn 		if (pol->flags & MPOL_F_LOCAL)
2317771fb4d8SLee Schermerhorn 			polnid = numa_node_id();
2318771fb4d8SLee Schermerhorn 		else
2319771fb4d8SLee Schermerhorn 			polnid = pol->v.preferred_node;
2320771fb4d8SLee Schermerhorn 		break;
2321771fb4d8SLee Schermerhorn 
2322771fb4d8SLee Schermerhorn 	case MPOL_BIND:
2323771fb4d8SLee Schermerhorn 		/*
2324771fb4d8SLee Schermerhorn 		 * allows binding to multiple nodes.
2325771fb4d8SLee Schermerhorn 		 * use current page if in policy nodemask,
2326771fb4d8SLee Schermerhorn 		 * else select nearest allowed node, if any.
2327771fb4d8SLee Schermerhorn 		 * If no allowed nodes, use current [!misplaced].
2328771fb4d8SLee Schermerhorn 		 */
2329771fb4d8SLee Schermerhorn 		if (node_isset(curnid, pol->v.nodes))
2330771fb4d8SLee Schermerhorn 			goto out;
2331771fb4d8SLee Schermerhorn 		(void)first_zones_zonelist(
2332771fb4d8SLee Schermerhorn 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2333771fb4d8SLee Schermerhorn 				gfp_zone(GFP_HIGHUSER),
2334771fb4d8SLee Schermerhorn 				&pol->v.nodes, &zone);
2335771fb4d8SLee Schermerhorn 		polnid = zone->node;
2336771fb4d8SLee Schermerhorn 		break;
2337771fb4d8SLee Schermerhorn 
2338771fb4d8SLee Schermerhorn 	default:
2339771fb4d8SLee Schermerhorn 		BUG();
2340771fb4d8SLee Schermerhorn 	}
23415606e387SMel Gorman 
23425606e387SMel Gorman 	/* Migrate the page towards the node whose CPU is referencing it */
2343e42c8ff2SMel Gorman 	if (pol->flags & MPOL_F_MORON) {
2344e42c8ff2SMel Gorman 		int last_nid;
2345e42c8ff2SMel Gorman 
23465606e387SMel Gorman 		polnid = numa_node_id();
23475606e387SMel Gorman 
2348e42c8ff2SMel Gorman 		/*
2349e42c8ff2SMel Gorman 		 * Multi-stage node selection is used in conjunction
2350e42c8ff2SMel Gorman 		 * with a periodic migration fault to build a temporal
2351e42c8ff2SMel Gorman 		 * task<->page relation. By using a two-stage filter we
2352e42c8ff2SMel Gorman 		 * remove short/unlikely relations.
2353e42c8ff2SMel Gorman 		 *
2354e42c8ff2SMel Gorman 		 * Using P(p) ~ n_p / n_t as per frequentist
2355e42c8ff2SMel Gorman 		 * probability, we can equate a task's usage of a
2356e42c8ff2SMel Gorman 		 * particular page (n_p) per total usage of this
2357e42c8ff2SMel Gorman 		 * page (n_t) (in a given time-span) to a probability.
2358e42c8ff2SMel Gorman 		 *
2359e42c8ff2SMel Gorman 		 * Our periodic faults will sample this probability and
2360e42c8ff2SMel Gorman 		 * getting the same result twice in a row, given these
2361e42c8ff2SMel Gorman 		 * samples are fully independent, is then given by
2362e42c8ff2SMel Gorman 		 * P(n)^2, provided our sample period is sufficiently
2363e42c8ff2SMel Gorman 		 * short compared to the usage pattern.
2364e42c8ff2SMel Gorman 		 *
2365e42c8ff2SMel Gorman 		 * This quadric squishes small probabilities, making
2366e42c8ff2SMel Gorman 		 * it less likely we act on an unlikely task<->page
2367e42c8ff2SMel Gorman 		 * relation.
2368e42c8ff2SMel Gorman 		 */
236922b751c3SMel Gorman 		last_nid = page_nid_xchg_last(page, polnid);
2370e42c8ff2SMel Gorman 		if (last_nid != polnid)
2371e42c8ff2SMel Gorman 			goto out;
2372e42c8ff2SMel Gorman 	}
2373e42c8ff2SMel Gorman 
2374771fb4d8SLee Schermerhorn 	if (curnid != polnid)
2375771fb4d8SLee Schermerhorn 		ret = polnid;
2376771fb4d8SLee Schermerhorn out:
2377771fb4d8SLee Schermerhorn 	mpol_cond_put(pol);
2378771fb4d8SLee Schermerhorn 
2379771fb4d8SLee Schermerhorn 	return ret;
2380771fb4d8SLee Schermerhorn }
2381771fb4d8SLee Schermerhorn 
23821da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n)
23831da177e4SLinus Torvalds {
2384140d5a49SPaul Mundt 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
23851da177e4SLinus Torvalds 	rb_erase(&n->nd, &sp->root);
238663f74ca2SKOSAKI Motohiro 	sp_free(n);
23871da177e4SLinus Torvalds }
23881da177e4SLinus Torvalds 
238942288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start,
239042288fe3SMel Gorman 			unsigned long end, struct mempolicy *pol)
239142288fe3SMel Gorman {
239242288fe3SMel Gorman 	node->start = start;
239342288fe3SMel Gorman 	node->end = end;
239442288fe3SMel Gorman 	node->policy = pol;
239542288fe3SMel Gorman }
239642288fe3SMel Gorman 
2397dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2398dbcb0f19SAdrian Bunk 				struct mempolicy *pol)
23991da177e4SLinus Torvalds {
2400869833f2SKOSAKI Motohiro 	struct sp_node *n;
2401869833f2SKOSAKI Motohiro 	struct mempolicy *newpol;
24021da177e4SLinus Torvalds 
2403869833f2SKOSAKI Motohiro 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
24041da177e4SLinus Torvalds 	if (!n)
24051da177e4SLinus Torvalds 		return NULL;
2406869833f2SKOSAKI Motohiro 
2407869833f2SKOSAKI Motohiro 	newpol = mpol_dup(pol);
2408869833f2SKOSAKI Motohiro 	if (IS_ERR(newpol)) {
2409869833f2SKOSAKI Motohiro 		kmem_cache_free(sn_cache, n);
2410869833f2SKOSAKI Motohiro 		return NULL;
2411869833f2SKOSAKI Motohiro 	}
2412869833f2SKOSAKI Motohiro 	newpol->flags |= MPOL_F_SHARED;
241342288fe3SMel Gorman 	sp_node_init(n, start, end, newpol);
2414869833f2SKOSAKI Motohiro 
24151da177e4SLinus Torvalds 	return n;
24161da177e4SLinus Torvalds }
24171da177e4SLinus Torvalds 
24181da177e4SLinus Torvalds /* Replace a policy range. */
24191da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
24201da177e4SLinus Torvalds 				 unsigned long end, struct sp_node *new)
24211da177e4SLinus Torvalds {
2422b22d127aSMel Gorman 	struct sp_node *n;
242342288fe3SMel Gorman 	struct sp_node *n_new = NULL;
242442288fe3SMel Gorman 	struct mempolicy *mpol_new = NULL;
2425b22d127aSMel Gorman 	int ret = 0;
24261da177e4SLinus Torvalds 
242742288fe3SMel Gorman restart:
242842288fe3SMel Gorman 	spin_lock(&sp->lock);
24291da177e4SLinus Torvalds 	n = sp_lookup(sp, start, end);
24301da177e4SLinus Torvalds 	/* Take care of old policies in the same range. */
24311da177e4SLinus Torvalds 	while (n && n->start < end) {
24321da177e4SLinus Torvalds 		struct rb_node *next = rb_next(&n->nd);
24331da177e4SLinus Torvalds 		if (n->start >= start) {
24341da177e4SLinus Torvalds 			if (n->end <= end)
24351da177e4SLinus Torvalds 				sp_delete(sp, n);
24361da177e4SLinus Torvalds 			else
24371da177e4SLinus Torvalds 				n->start = end;
24381da177e4SLinus Torvalds 		} else {
24391da177e4SLinus Torvalds 			/* Old policy spanning whole new range. */
24401da177e4SLinus Torvalds 			if (n->end > end) {
244142288fe3SMel Gorman 				if (!n_new)
244242288fe3SMel Gorman 					goto alloc_new;
244342288fe3SMel Gorman 
244442288fe3SMel Gorman 				*mpol_new = *n->policy;
244542288fe3SMel Gorman 				atomic_set(&mpol_new->refcnt, 1);
24467880639cSKOSAKI Motohiro 				sp_node_init(n_new, end, n->end, mpol_new);
24471da177e4SLinus Torvalds 				n->end = start;
24485ca39575SHillf Danton 				sp_insert(sp, n_new);
244942288fe3SMel Gorman 				n_new = NULL;
245042288fe3SMel Gorman 				mpol_new = NULL;
24511da177e4SLinus Torvalds 				break;
24521da177e4SLinus Torvalds 			} else
24531da177e4SLinus Torvalds 				n->end = start;
24541da177e4SLinus Torvalds 		}
24551da177e4SLinus Torvalds 		if (!next)
24561da177e4SLinus Torvalds 			break;
24571da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
24581da177e4SLinus Torvalds 	}
24591da177e4SLinus Torvalds 	if (new)
24601da177e4SLinus Torvalds 		sp_insert(sp, new);
246142288fe3SMel Gorman 	spin_unlock(&sp->lock);
246242288fe3SMel Gorman 	ret = 0;
246342288fe3SMel Gorman 
246442288fe3SMel Gorman err_out:
246542288fe3SMel Gorman 	if (mpol_new)
246642288fe3SMel Gorman 		mpol_put(mpol_new);
246742288fe3SMel Gorman 	if (n_new)
246842288fe3SMel Gorman 		kmem_cache_free(sn_cache, n_new);
246942288fe3SMel Gorman 
2470b22d127aSMel Gorman 	return ret;
247142288fe3SMel Gorman 
247242288fe3SMel Gorman alloc_new:
247342288fe3SMel Gorman 	spin_unlock(&sp->lock);
247442288fe3SMel Gorman 	ret = -ENOMEM;
247542288fe3SMel Gorman 	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
247642288fe3SMel Gorman 	if (!n_new)
247742288fe3SMel Gorman 		goto err_out;
247842288fe3SMel Gorman 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
247942288fe3SMel Gorman 	if (!mpol_new)
248042288fe3SMel Gorman 		goto err_out;
248142288fe3SMel Gorman 	goto restart;
24821da177e4SLinus Torvalds }
24831da177e4SLinus Torvalds 
248471fe804bSLee Schermerhorn /**
248571fe804bSLee Schermerhorn  * mpol_shared_policy_init - initialize shared policy for inode
248671fe804bSLee Schermerhorn  * @sp: pointer to inode shared policy
248771fe804bSLee Schermerhorn  * @mpol:  struct mempolicy to install
248871fe804bSLee Schermerhorn  *
248971fe804bSLee Schermerhorn  * Install non-NULL @mpol in inode's shared policy rb-tree.
249071fe804bSLee Schermerhorn  * On entry, the current task has a reference on a non-NULL @mpol.
249171fe804bSLee Schermerhorn  * This must be released on exit.
24924bfc4495SKAMEZAWA Hiroyuki  * This is called at get_inode() calls and we can use GFP_KERNEL.
249371fe804bSLee Schermerhorn  */
249471fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
24957339ff83SRobin Holt {
249658568d2aSMiao Xie 	int ret;
249758568d2aSMiao Xie 
249871fe804bSLee Schermerhorn 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
249942288fe3SMel Gorman 	spin_lock_init(&sp->lock);
25007339ff83SRobin Holt 
250171fe804bSLee Schermerhorn 	if (mpol) {
25027339ff83SRobin Holt 		struct vm_area_struct pvma;
250371fe804bSLee Schermerhorn 		struct mempolicy *new;
25044bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
25057339ff83SRobin Holt 
25064bfc4495SKAMEZAWA Hiroyuki 		if (!scratch)
25075c0c1654SLee Schermerhorn 			goto put_mpol;
250871fe804bSLee Schermerhorn 		/* contextualize the tmpfs mount point mempolicy */
250971fe804bSLee Schermerhorn 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
251015d77835SLee Schermerhorn 		if (IS_ERR(new))
25110cae3457SDan Carpenter 			goto free_scratch; /* no valid nodemask intersection */
251258568d2aSMiao Xie 
251358568d2aSMiao Xie 		task_lock(current);
25144bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
251558568d2aSMiao Xie 		task_unlock(current);
251615d77835SLee Schermerhorn 		if (ret)
25175c0c1654SLee Schermerhorn 			goto put_new;
251871fe804bSLee Schermerhorn 
251971fe804bSLee Schermerhorn 		/* Create pseudo-vma that contains just the policy */
25207339ff83SRobin Holt 		memset(&pvma, 0, sizeof(struct vm_area_struct));
252171fe804bSLee Schermerhorn 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
252271fe804bSLee Schermerhorn 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
252315d77835SLee Schermerhorn 
25245c0c1654SLee Schermerhorn put_new:
252571fe804bSLee Schermerhorn 		mpol_put(new);			/* drop initial ref */
25260cae3457SDan Carpenter free_scratch:
25274bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
25285c0c1654SLee Schermerhorn put_mpol:
25295c0c1654SLee Schermerhorn 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
25307339ff83SRobin Holt 	}
25317339ff83SRobin Holt }
25327339ff83SRobin Holt 
25331da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info,
25341da177e4SLinus Torvalds 			struct vm_area_struct *vma, struct mempolicy *npol)
25351da177e4SLinus Torvalds {
25361da177e4SLinus Torvalds 	int err;
25371da177e4SLinus Torvalds 	struct sp_node *new = NULL;
25381da177e4SLinus Torvalds 	unsigned long sz = vma_pages(vma);
25391da177e4SLinus Torvalds 
2540028fec41SDavid Rientjes 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
25411da177e4SLinus Torvalds 		 vma->vm_pgoff,
254245c4745aSLee Schermerhorn 		 sz, npol ? npol->mode : -1,
2543028fec41SDavid Rientjes 		 npol ? npol->flags : -1,
254400ef2d2fSDavid Rientjes 		 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
25451da177e4SLinus Torvalds 
25461da177e4SLinus Torvalds 	if (npol) {
25471da177e4SLinus Torvalds 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
25481da177e4SLinus Torvalds 		if (!new)
25491da177e4SLinus Torvalds 			return -ENOMEM;
25501da177e4SLinus Torvalds 	}
25511da177e4SLinus Torvalds 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
25521da177e4SLinus Torvalds 	if (err && new)
255363f74ca2SKOSAKI Motohiro 		sp_free(new);
25541da177e4SLinus Torvalds 	return err;
25551da177e4SLinus Torvalds }
25561da177e4SLinus Torvalds 
25571da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */
25581da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p)
25591da177e4SLinus Torvalds {
25601da177e4SLinus Torvalds 	struct sp_node *n;
25611da177e4SLinus Torvalds 	struct rb_node *next;
25621da177e4SLinus Torvalds 
25631da177e4SLinus Torvalds 	if (!p->root.rb_node)
25641da177e4SLinus Torvalds 		return;
256542288fe3SMel Gorman 	spin_lock(&p->lock);
25661da177e4SLinus Torvalds 	next = rb_first(&p->root);
25671da177e4SLinus Torvalds 	while (next) {
25681da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
25691da177e4SLinus Torvalds 		next = rb_next(&n->nd);
257063f74ca2SKOSAKI Motohiro 		sp_delete(p, n);
25711da177e4SLinus Torvalds 	}
257242288fe3SMel Gorman 	spin_unlock(&p->lock);
25731da177e4SLinus Torvalds }
25741da177e4SLinus Torvalds 
25751a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING
25761a687c2eSMel Gorman static bool __initdata numabalancing_override;
25771a687c2eSMel Gorman 
25781a687c2eSMel Gorman static void __init check_numabalancing_enable(void)
25791a687c2eSMel Gorman {
25801a687c2eSMel Gorman 	bool numabalancing_default = false;
25811a687c2eSMel Gorman 
25821a687c2eSMel Gorman 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
25831a687c2eSMel Gorman 		numabalancing_default = true;
25841a687c2eSMel Gorman 
25851a687c2eSMel Gorman 	if (nr_node_ids > 1 && !numabalancing_override) {
25861a687c2eSMel Gorman 		printk(KERN_INFO "Enabling automatic NUMA balancing. "
25871a687c2eSMel Gorman 			"Configure with numa_balancing= or sysctl");
25881a687c2eSMel Gorman 		set_numabalancing_state(numabalancing_default);
25891a687c2eSMel Gorman 	}
25901a687c2eSMel Gorman }
25911a687c2eSMel Gorman 
25921a687c2eSMel Gorman static int __init setup_numabalancing(char *str)
25931a687c2eSMel Gorman {
25941a687c2eSMel Gorman 	int ret = 0;
25951a687c2eSMel Gorman 	if (!str)
25961a687c2eSMel Gorman 		goto out;
25971a687c2eSMel Gorman 	numabalancing_override = true;
25981a687c2eSMel Gorman 
25991a687c2eSMel Gorman 	if (!strcmp(str, "enable")) {
26001a687c2eSMel Gorman 		set_numabalancing_state(true);
26011a687c2eSMel Gorman 		ret = 1;
26021a687c2eSMel Gorman 	} else if (!strcmp(str, "disable")) {
26031a687c2eSMel Gorman 		set_numabalancing_state(false);
26041a687c2eSMel Gorman 		ret = 1;
26051a687c2eSMel Gorman 	}
26061a687c2eSMel Gorman out:
26071a687c2eSMel Gorman 	if (!ret)
26081a687c2eSMel Gorman 		printk(KERN_WARNING "Unable to parse numa_balancing=\n");
26091a687c2eSMel Gorman 
26101a687c2eSMel Gorman 	return ret;
26111a687c2eSMel Gorman }
26121a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing);
26131a687c2eSMel Gorman #else
26141a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void)
26151a687c2eSMel Gorman {
26161a687c2eSMel Gorman }
26171a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */
26181a687c2eSMel Gorman 
26191da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */
26201da177e4SLinus Torvalds void __init numa_policy_init(void)
26211da177e4SLinus Torvalds {
2622b71636e2SPaul Mundt 	nodemask_t interleave_nodes;
2623b71636e2SPaul Mundt 	unsigned long largest = 0;
2624b71636e2SPaul Mundt 	int nid, prefer = 0;
2625b71636e2SPaul Mundt 
26261da177e4SLinus Torvalds 	policy_cache = kmem_cache_create("numa_policy",
26271da177e4SLinus Torvalds 					 sizeof(struct mempolicy),
262820c2df83SPaul Mundt 					 0, SLAB_PANIC, NULL);
26291da177e4SLinus Torvalds 
26301da177e4SLinus Torvalds 	sn_cache = kmem_cache_create("shared_policy_node",
26311da177e4SLinus Torvalds 				     sizeof(struct sp_node),
263220c2df83SPaul Mundt 				     0, SLAB_PANIC, NULL);
26331da177e4SLinus Torvalds 
26345606e387SMel Gorman 	for_each_node(nid) {
26355606e387SMel Gorman 		preferred_node_policy[nid] = (struct mempolicy) {
26365606e387SMel Gorman 			.refcnt = ATOMIC_INIT(1),
26375606e387SMel Gorman 			.mode = MPOL_PREFERRED,
26385606e387SMel Gorman 			.flags = MPOL_F_MOF | MPOL_F_MORON,
26395606e387SMel Gorman 			.v = { .preferred_node = nid, },
26405606e387SMel Gorman 		};
26415606e387SMel Gorman 	}
26425606e387SMel Gorman 
2643b71636e2SPaul Mundt 	/*
2644b71636e2SPaul Mundt 	 * Set interleaving policy for system init. Interleaving is only
2645b71636e2SPaul Mundt 	 * enabled across suitably sized nodes (default is >= 16MB), or
2646b71636e2SPaul Mundt 	 * fall back to the largest node if they're all smaller.
2647b71636e2SPaul Mundt 	 */
2648b71636e2SPaul Mundt 	nodes_clear(interleave_nodes);
264901f13bd6SLai Jiangshan 	for_each_node_state(nid, N_MEMORY) {
2650b71636e2SPaul Mundt 		unsigned long total_pages = node_present_pages(nid);
26511da177e4SLinus Torvalds 
2652b71636e2SPaul Mundt 		/* Preserve the largest node */
2653b71636e2SPaul Mundt 		if (largest < total_pages) {
2654b71636e2SPaul Mundt 			largest = total_pages;
2655b71636e2SPaul Mundt 			prefer = nid;
2656b71636e2SPaul Mundt 		}
2657b71636e2SPaul Mundt 
2658b71636e2SPaul Mundt 		/* Interleave this node? */
2659b71636e2SPaul Mundt 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2660b71636e2SPaul Mundt 			node_set(nid, interleave_nodes);
2661b71636e2SPaul Mundt 	}
2662b71636e2SPaul Mundt 
2663b71636e2SPaul Mundt 	/* All too small, use the largest */
2664b71636e2SPaul Mundt 	if (unlikely(nodes_empty(interleave_nodes)))
2665b71636e2SPaul Mundt 		node_set(prefer, interleave_nodes);
2666b71636e2SPaul Mundt 
2667028fec41SDavid Rientjes 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
26681da177e4SLinus Torvalds 		printk("numa_policy_init: interleaving failed\n");
26691a687c2eSMel Gorman 
26701a687c2eSMel Gorman 	check_numabalancing_enable();
26711da177e4SLinus Torvalds }
26721da177e4SLinus Torvalds 
26738bccd85fSChristoph Lameter /* Reset policy of current process to default */
26741da177e4SLinus Torvalds void numa_default_policy(void)
26751da177e4SLinus Torvalds {
2676028fec41SDavid Rientjes 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
26771da177e4SLinus Torvalds }
267868860ec1SPaul Jackson 
26794225399aSPaul Jackson /*
2680095f1fc4SLee Schermerhorn  * Parse and format mempolicy from/to strings
2681095f1fc4SLee Schermerhorn  */
2682095f1fc4SLee Schermerhorn 
2683095f1fc4SLee Schermerhorn /*
2684f2a07f40SHugh Dickins  * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
26851a75a6c8SChristoph Lameter  */
2686345ace9cSLee Schermerhorn static const char * const policy_modes[] =
2687345ace9cSLee Schermerhorn {
2688345ace9cSLee Schermerhorn 	[MPOL_DEFAULT]    = "default",
2689345ace9cSLee Schermerhorn 	[MPOL_PREFERRED]  = "prefer",
2690345ace9cSLee Schermerhorn 	[MPOL_BIND]       = "bind",
2691345ace9cSLee Schermerhorn 	[MPOL_INTERLEAVE] = "interleave",
2692d3a71033SLee Schermerhorn 	[MPOL_LOCAL]      = "local",
2693345ace9cSLee Schermerhorn };
26941a75a6c8SChristoph Lameter 
2695095f1fc4SLee Schermerhorn 
2696095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS
2697095f1fc4SLee Schermerhorn /**
2698f2a07f40SHugh Dickins  * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2699095f1fc4SLee Schermerhorn  * @str:  string containing mempolicy to parse
270071fe804bSLee Schermerhorn  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2701095f1fc4SLee Schermerhorn  *
2702095f1fc4SLee Schermerhorn  * Format of input:
2703095f1fc4SLee Schermerhorn  *	<mode>[=<flags>][:<nodelist>]
2704095f1fc4SLee Schermerhorn  *
270571fe804bSLee Schermerhorn  * On success, returns 0, else 1
2706095f1fc4SLee Schermerhorn  */
2707a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol)
2708095f1fc4SLee Schermerhorn {
270971fe804bSLee Schermerhorn 	struct mempolicy *new = NULL;
2710b4652e84SLee Schermerhorn 	unsigned short mode;
2711f2a07f40SHugh Dickins 	unsigned short mode_flags;
271271fe804bSLee Schermerhorn 	nodemask_t nodes;
2713095f1fc4SLee Schermerhorn 	char *nodelist = strchr(str, ':');
2714095f1fc4SLee Schermerhorn 	char *flags = strchr(str, '=');
2715095f1fc4SLee Schermerhorn 	int err = 1;
2716095f1fc4SLee Schermerhorn 
2717095f1fc4SLee Schermerhorn 	if (nodelist) {
2718095f1fc4SLee Schermerhorn 		/* NUL-terminate mode or flags string */
2719095f1fc4SLee Schermerhorn 		*nodelist++ = '\0';
272071fe804bSLee Schermerhorn 		if (nodelist_parse(nodelist, nodes))
2721095f1fc4SLee Schermerhorn 			goto out;
272201f13bd6SLai Jiangshan 		if (!nodes_subset(nodes, node_states[N_MEMORY]))
2723095f1fc4SLee Schermerhorn 			goto out;
272471fe804bSLee Schermerhorn 	} else
272571fe804bSLee Schermerhorn 		nodes_clear(nodes);
272671fe804bSLee Schermerhorn 
2727095f1fc4SLee Schermerhorn 	if (flags)
2728095f1fc4SLee Schermerhorn 		*flags++ = '\0';	/* terminate mode string */
2729095f1fc4SLee Schermerhorn 
2730479e2802SPeter Zijlstra 	for (mode = 0; mode < MPOL_MAX; mode++) {
2731345ace9cSLee Schermerhorn 		if (!strcmp(str, policy_modes[mode])) {
2732095f1fc4SLee Schermerhorn 			break;
2733095f1fc4SLee Schermerhorn 		}
2734095f1fc4SLee Schermerhorn 	}
2735a720094dSMel Gorman 	if (mode >= MPOL_MAX)
2736095f1fc4SLee Schermerhorn 		goto out;
2737095f1fc4SLee Schermerhorn 
273871fe804bSLee Schermerhorn 	switch (mode) {
2739095f1fc4SLee Schermerhorn 	case MPOL_PREFERRED:
274071fe804bSLee Schermerhorn 		/*
274171fe804bSLee Schermerhorn 		 * Insist on a nodelist of one node only
274271fe804bSLee Schermerhorn 		 */
2743095f1fc4SLee Schermerhorn 		if (nodelist) {
2744095f1fc4SLee Schermerhorn 			char *rest = nodelist;
2745095f1fc4SLee Schermerhorn 			while (isdigit(*rest))
2746095f1fc4SLee Schermerhorn 				rest++;
2747926f2ae0SKOSAKI Motohiro 			if (*rest)
2748926f2ae0SKOSAKI Motohiro 				goto out;
2749095f1fc4SLee Schermerhorn 		}
2750095f1fc4SLee Schermerhorn 		break;
2751095f1fc4SLee Schermerhorn 	case MPOL_INTERLEAVE:
2752095f1fc4SLee Schermerhorn 		/*
2753095f1fc4SLee Schermerhorn 		 * Default to online nodes with memory if no nodelist
2754095f1fc4SLee Schermerhorn 		 */
2755095f1fc4SLee Schermerhorn 		if (!nodelist)
275601f13bd6SLai Jiangshan 			nodes = node_states[N_MEMORY];
27573f226aa1SLee Schermerhorn 		break;
275871fe804bSLee Schermerhorn 	case MPOL_LOCAL:
27593f226aa1SLee Schermerhorn 		/*
276071fe804bSLee Schermerhorn 		 * Don't allow a nodelist;  mpol_new() checks flags
27613f226aa1SLee Schermerhorn 		 */
276271fe804bSLee Schermerhorn 		if (nodelist)
27633f226aa1SLee Schermerhorn 			goto out;
276471fe804bSLee Schermerhorn 		mode = MPOL_PREFERRED;
27653f226aa1SLee Schermerhorn 		break;
2766413b43deSRavikiran G Thirumalai 	case MPOL_DEFAULT:
2767413b43deSRavikiran G Thirumalai 		/*
2768413b43deSRavikiran G Thirumalai 		 * Insist on a empty nodelist
2769413b43deSRavikiran G Thirumalai 		 */
2770413b43deSRavikiran G Thirumalai 		if (!nodelist)
2771413b43deSRavikiran G Thirumalai 			err = 0;
2772413b43deSRavikiran G Thirumalai 		goto out;
2773d69b2e63SKOSAKI Motohiro 	case MPOL_BIND:
277471fe804bSLee Schermerhorn 		/*
2775d69b2e63SKOSAKI Motohiro 		 * Insist on a nodelist
277671fe804bSLee Schermerhorn 		 */
2777d69b2e63SKOSAKI Motohiro 		if (!nodelist)
2778d69b2e63SKOSAKI Motohiro 			goto out;
2779095f1fc4SLee Schermerhorn 	}
2780095f1fc4SLee Schermerhorn 
278171fe804bSLee Schermerhorn 	mode_flags = 0;
2782095f1fc4SLee Schermerhorn 	if (flags) {
2783095f1fc4SLee Schermerhorn 		/*
2784095f1fc4SLee Schermerhorn 		 * Currently, we only support two mutually exclusive
2785095f1fc4SLee Schermerhorn 		 * mode flags.
2786095f1fc4SLee Schermerhorn 		 */
2787095f1fc4SLee Schermerhorn 		if (!strcmp(flags, "static"))
278871fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_STATIC_NODES;
2789095f1fc4SLee Schermerhorn 		else if (!strcmp(flags, "relative"))
279071fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_RELATIVE_NODES;
2791095f1fc4SLee Schermerhorn 		else
2792926f2ae0SKOSAKI Motohiro 			goto out;
2793095f1fc4SLee Schermerhorn 	}
279471fe804bSLee Schermerhorn 
279571fe804bSLee Schermerhorn 	new = mpol_new(mode, mode_flags, &nodes);
279671fe804bSLee Schermerhorn 	if (IS_ERR(new))
2797926f2ae0SKOSAKI Motohiro 		goto out;
2798926f2ae0SKOSAKI Motohiro 
2799f2a07f40SHugh Dickins 	/*
2800f2a07f40SHugh Dickins 	 * Save nodes for mpol_to_str() to show the tmpfs mount options
2801f2a07f40SHugh Dickins 	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2802f2a07f40SHugh Dickins 	 */
2803f2a07f40SHugh Dickins 	if (mode != MPOL_PREFERRED)
2804f2a07f40SHugh Dickins 		new->v.nodes = nodes;
2805f2a07f40SHugh Dickins 	else if (nodelist)
2806f2a07f40SHugh Dickins 		new->v.preferred_node = first_node(nodes);
2807f2a07f40SHugh Dickins 	else
2808f2a07f40SHugh Dickins 		new->flags |= MPOL_F_LOCAL;
2809f2a07f40SHugh Dickins 
2810f2a07f40SHugh Dickins 	/*
2811f2a07f40SHugh Dickins 	 * Save nodes for contextualization: this will be used to "clone"
2812f2a07f40SHugh Dickins 	 * the mempolicy in a specific context [cpuset] at a later time.
2813f2a07f40SHugh Dickins 	 */
2814e17f74afSLee Schermerhorn 	new->w.user_nodemask = nodes;
2815f2a07f40SHugh Dickins 
2816926f2ae0SKOSAKI Motohiro 	err = 0;
281771fe804bSLee Schermerhorn 
2818095f1fc4SLee Schermerhorn out:
2819095f1fc4SLee Schermerhorn 	/* Restore string for error message */
2820095f1fc4SLee Schermerhorn 	if (nodelist)
2821095f1fc4SLee Schermerhorn 		*--nodelist = ':';
2822095f1fc4SLee Schermerhorn 	if (flags)
2823095f1fc4SLee Schermerhorn 		*--flags = '=';
282471fe804bSLee Schermerhorn 	if (!err)
282571fe804bSLee Schermerhorn 		*mpol = new;
2826095f1fc4SLee Schermerhorn 	return err;
2827095f1fc4SLee Schermerhorn }
2828095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */
2829095f1fc4SLee Schermerhorn 
283071fe804bSLee Schermerhorn /**
283171fe804bSLee Schermerhorn  * mpol_to_str - format a mempolicy structure for printing
283271fe804bSLee Schermerhorn  * @buffer:  to contain formatted mempolicy string
283371fe804bSLee Schermerhorn  * @maxlen:  length of @buffer
283471fe804bSLee Schermerhorn  * @pol:  pointer to mempolicy to be formatted
283571fe804bSLee Schermerhorn  *
28361a75a6c8SChristoph Lameter  * Convert a mempolicy into a string.
28371a75a6c8SChristoph Lameter  * Returns the number of characters in buffer (if positive)
28381a75a6c8SChristoph Lameter  * or an error (negative)
28391a75a6c8SChristoph Lameter  */
2840a7a88b23SHugh Dickins int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
28411a75a6c8SChristoph Lameter {
28421a75a6c8SChristoph Lameter 	char *p = buffer;
28431a75a6c8SChristoph Lameter 	int l;
28441a75a6c8SChristoph Lameter 	nodemask_t nodes;
2845bea904d5SLee Schermerhorn 	unsigned short mode;
2846f5b087b5SDavid Rientjes 	unsigned short flags = pol ? pol->flags : 0;
28471a75a6c8SChristoph Lameter 
28482291990aSLee Schermerhorn 	/*
28492291990aSLee Schermerhorn 	 * Sanity check:  room for longest mode, flag and some nodes
28502291990aSLee Schermerhorn 	 */
28512291990aSLee Schermerhorn 	VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
28522291990aSLee Schermerhorn 
2853bea904d5SLee Schermerhorn 	if (!pol || pol == &default_policy)
2854bea904d5SLee Schermerhorn 		mode = MPOL_DEFAULT;
2855bea904d5SLee Schermerhorn 	else
2856bea904d5SLee Schermerhorn 		mode = pol->mode;
2857bea904d5SLee Schermerhorn 
28581a75a6c8SChristoph Lameter 	switch (mode) {
28591a75a6c8SChristoph Lameter 	case MPOL_DEFAULT:
28601a75a6c8SChristoph Lameter 		nodes_clear(nodes);
28611a75a6c8SChristoph Lameter 		break;
28621a75a6c8SChristoph Lameter 
28631a75a6c8SChristoph Lameter 	case MPOL_PREFERRED:
28641a75a6c8SChristoph Lameter 		nodes_clear(nodes);
2865fc36b8d3SLee Schermerhorn 		if (flags & MPOL_F_LOCAL)
2866f2a07f40SHugh Dickins 			mode = MPOL_LOCAL;
286753f2556bSLee Schermerhorn 		else
2868fc36b8d3SLee Schermerhorn 			node_set(pol->v.preferred_node, nodes);
28691a75a6c8SChristoph Lameter 		break;
28701a75a6c8SChristoph Lameter 
28711a75a6c8SChristoph Lameter 	case MPOL_BIND:
287219770b32SMel Gorman 		/* Fall through */
28731a75a6c8SChristoph Lameter 	case MPOL_INTERLEAVE:
28741a75a6c8SChristoph Lameter 		nodes = pol->v.nodes;
28751a75a6c8SChristoph Lameter 		break;
28761a75a6c8SChristoph Lameter 
28771a75a6c8SChristoph Lameter 	default:
287880de7c31SDave Jones 		return -EINVAL;
28791a75a6c8SChristoph Lameter 	}
28801a75a6c8SChristoph Lameter 
2881345ace9cSLee Schermerhorn 	l = strlen(policy_modes[mode]);
28821a75a6c8SChristoph Lameter 	if (buffer + maxlen < p + l + 1)
28831a75a6c8SChristoph Lameter 		return -ENOSPC;
28841a75a6c8SChristoph Lameter 
2885345ace9cSLee Schermerhorn 	strcpy(p, policy_modes[mode]);
28861a75a6c8SChristoph Lameter 	p += l;
28871a75a6c8SChristoph Lameter 
2888fc36b8d3SLee Schermerhorn 	if (flags & MPOL_MODE_FLAGS) {
2889f5b087b5SDavid Rientjes 		if (buffer + maxlen < p + 2)
2890f5b087b5SDavid Rientjes 			return -ENOSPC;
2891f5b087b5SDavid Rientjes 		*p++ = '=';
2892f5b087b5SDavid Rientjes 
28932291990aSLee Schermerhorn 		/*
28942291990aSLee Schermerhorn 		 * Currently, the only defined flags are mutually exclusive
28952291990aSLee Schermerhorn 		 */
2896f5b087b5SDavid Rientjes 		if (flags & MPOL_F_STATIC_NODES)
28972291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "static");
28982291990aSLee Schermerhorn 		else if (flags & MPOL_F_RELATIVE_NODES)
28992291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "relative");
2900f5b087b5SDavid Rientjes 	}
2901f5b087b5SDavid Rientjes 
29021a75a6c8SChristoph Lameter 	if (!nodes_empty(nodes)) {
29031a75a6c8SChristoph Lameter 		if (buffer + maxlen < p + 2)
29041a75a6c8SChristoph Lameter 			return -ENOSPC;
2905095f1fc4SLee Schermerhorn 		*p++ = ':';
29061a75a6c8SChristoph Lameter 	 	p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
29071a75a6c8SChristoph Lameter 	}
29081a75a6c8SChristoph Lameter 	return p - buffer;
29091a75a6c8SChristoph Lameter }
2910