xref: /openbmc/linux/mm/mempolicy.c (revision b0dc2b9bb4ab782115b964310518ee0b17784277)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Simple NUMA memory policy for the Linux kernel.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
58bccd85fSChristoph Lameter  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
61da177e4SLinus Torvalds  * Subject to the GNU Public License, version 2.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * NUMA policy allows the user to give hints in which node(s) memory should
91da177e4SLinus Torvalds  * be allocated.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Support four policies per VMA and per process:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * The VMA policy has priority over the process policy for a page fault.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * interleave     Allocate memory interleaved over a set of nodes,
161da177e4SLinus Torvalds  *                with normal fallback if it fails.
171da177e4SLinus Torvalds  *                For VMA based allocations this interleaves based on the
181da177e4SLinus Torvalds  *                offset into the backing object or offset into the mapping
191da177e4SLinus Torvalds  *                for anonymous memory. For process policy an process counter
201da177e4SLinus Torvalds  *                is used.
218bccd85fSChristoph Lameter  *
221da177e4SLinus Torvalds  * bind           Only allocate memory on a specific set of nodes,
231da177e4SLinus Torvalds  *                no fallback.
248bccd85fSChristoph Lameter  *                FIXME: memory is allocated starting with the first node
258bccd85fSChristoph Lameter  *                to the last. It would be better if bind would truly restrict
268bccd85fSChristoph Lameter  *                the allocation to memory nodes instead
278bccd85fSChristoph Lameter  *
281da177e4SLinus Torvalds  * preferred       Try a specific node first before normal fallback.
2900ef2d2fSDavid Rientjes  *                As a special case NUMA_NO_NODE here means do the allocation
301da177e4SLinus Torvalds  *                on the local CPU. This is normally identical to default,
311da177e4SLinus Torvalds  *                but useful to set in a VMA when you have a non default
321da177e4SLinus Torvalds  *                process policy.
338bccd85fSChristoph Lameter  *
341da177e4SLinus Torvalds  * default        Allocate on the local node first, or when on a VMA
351da177e4SLinus Torvalds  *                use the process policy. This is what Linux always did
361da177e4SLinus Torvalds  *		  in a NUMA aware kernel and still does by, ahem, default.
371da177e4SLinus Torvalds  *
381da177e4SLinus Torvalds  * The process policy is applied for most non interrupt memory allocations
391da177e4SLinus Torvalds  * in that process' context. Interrupts ignore the policies and always
401da177e4SLinus Torvalds  * try to allocate on the local CPU. The VMA policy is only applied for memory
411da177e4SLinus Torvalds  * allocations for a VMA in the VM.
421da177e4SLinus Torvalds  *
431da177e4SLinus Torvalds  * Currently there are a few corner cases in swapping where the policy
441da177e4SLinus Torvalds  * is not applied, but the majority should be handled. When process policy
451da177e4SLinus Torvalds  * is used it is not remembered over swap outs/swap ins.
461da177e4SLinus Torvalds  *
471da177e4SLinus Torvalds  * Only the highest zone in the zone hierarchy gets policied. Allocations
481da177e4SLinus Torvalds  * requesting a lower zone just use default policy. This implies that
491da177e4SLinus Torvalds  * on systems with highmem kernel lowmem allocation don't get policied.
501da177e4SLinus Torvalds  * Same with GFP_DMA allocations.
511da177e4SLinus Torvalds  *
521da177e4SLinus Torvalds  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
531da177e4SLinus Torvalds  * all users and remembered even when nobody has memory mapped.
541da177e4SLinus Torvalds  */
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds /* Notebook:
571da177e4SLinus Torvalds    fix mmap readahead to honour policy and enable policy for any page cache
581da177e4SLinus Torvalds    object
591da177e4SLinus Torvalds    statistics for bigpages
601da177e4SLinus Torvalds    global policy for page cache? currently it uses process policy. Requires
611da177e4SLinus Torvalds    first item above.
621da177e4SLinus Torvalds    handle mremap for shared memory (currently ignored for the policy)
631da177e4SLinus Torvalds    grows down?
641da177e4SLinus Torvalds    make bind policy root only? It can trigger oom much faster and the
651da177e4SLinus Torvalds    kernel is not always grateful with that.
661da177e4SLinus Torvalds */
671da177e4SLinus Torvalds 
68b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69b1de0d13SMitchel Humpherys 
701da177e4SLinus Torvalds #include <linux/mempolicy.h>
711da177e4SLinus Torvalds #include <linux/mm.h>
721da177e4SLinus Torvalds #include <linux/highmem.h>
731da177e4SLinus Torvalds #include <linux/hugetlb.h>
741da177e4SLinus Torvalds #include <linux/kernel.h>
751da177e4SLinus Torvalds #include <linux/sched.h>
761da177e4SLinus Torvalds #include <linux/nodemask.h>
771da177e4SLinus Torvalds #include <linux/cpuset.h>
781da177e4SLinus Torvalds #include <linux/slab.h>
791da177e4SLinus Torvalds #include <linux/string.h>
80b95f1b31SPaul Gortmaker #include <linux/export.h>
81b488893aSPavel Emelyanov #include <linux/nsproxy.h>
821da177e4SLinus Torvalds #include <linux/interrupt.h>
831da177e4SLinus Torvalds #include <linux/init.h>
841da177e4SLinus Torvalds #include <linux/compat.h>
85dc9aa5b9SChristoph Lameter #include <linux/swap.h>
861a75a6c8SChristoph Lameter #include <linux/seq_file.h>
871a75a6c8SChristoph Lameter #include <linux/proc_fs.h>
88b20a3503SChristoph Lameter #include <linux/migrate.h>
8962b61f61SHugh Dickins #include <linux/ksm.h>
9095a402c3SChristoph Lameter #include <linux/rmap.h>
9186c3a764SDavid Quigley #include <linux/security.h>
92dbcb0f19SAdrian Bunk #include <linux/syscalls.h>
93095f1fc4SLee Schermerhorn #include <linux/ctype.h>
946d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h>
95b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h>
96b1de0d13SMitchel Humpherys #include <linux/printk.h>
97dc9aa5b9SChristoph Lameter 
981da177e4SLinus Torvalds #include <asm/tlbflush.h>
991da177e4SLinus Torvalds #include <asm/uaccess.h>
100778d3b0fSMichal Hocko #include <linux/random.h>
1011da177e4SLinus Torvalds 
10262695a84SNick Piggin #include "internal.h"
10362695a84SNick Piggin 
10438e35860SChristoph Lameter /* Internal flags */
105dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
10638e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
107dc9aa5b9SChristoph Lameter 
108fcc234f8SPekka Enberg static struct kmem_cache *policy_cache;
109fcc234f8SPekka Enberg static struct kmem_cache *sn_cache;
1101da177e4SLinus Torvalds 
1111da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not
1121da177e4SLinus Torvalds    policied. */
1136267276fSChristoph Lameter enum zone_type policy_zone = 0;
1141da177e4SLinus Torvalds 
115bea904d5SLee Schermerhorn /*
116bea904d5SLee Schermerhorn  * run-time system-wide default policy => local allocation
117bea904d5SLee Schermerhorn  */
118e754d79dSH Hartley Sweeten static struct mempolicy default_policy = {
1191da177e4SLinus Torvalds 	.refcnt = ATOMIC_INIT(1), /* never free it */
120bea904d5SLee Schermerhorn 	.mode = MPOL_PREFERRED,
121fc36b8d3SLee Schermerhorn 	.flags = MPOL_F_LOCAL,
1221da177e4SLinus Torvalds };
1231da177e4SLinus Torvalds 
1245606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES];
1255606e387SMel Gorman 
12674d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p)
1275606e387SMel Gorman {
1285606e387SMel Gorman 	struct mempolicy *pol = p->mempolicy;
129f15ca78eSOleg Nesterov 	int node;
1305606e387SMel Gorman 
131f15ca78eSOleg Nesterov 	if (pol)
132f15ca78eSOleg Nesterov 		return pol;
1335606e387SMel Gorman 
134f15ca78eSOleg Nesterov 	node = numa_node_id();
1351da6f0e1SJianguo Wu 	if (node != NUMA_NO_NODE) {
1361da6f0e1SJianguo Wu 		pol = &preferred_node_policy[node];
137f15ca78eSOleg Nesterov 		/* preferred_node_policy is not initialised early in boot */
138f15ca78eSOleg Nesterov 		if (pol->mode)
139f15ca78eSOleg Nesterov 			return pol;
1401da6f0e1SJianguo Wu 	}
1415606e387SMel Gorman 
142f15ca78eSOleg Nesterov 	return &default_policy;
1435606e387SMel Gorman }
1445606e387SMel Gorman 
14537012946SDavid Rientjes static const struct mempolicy_operations {
14637012946SDavid Rientjes 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
147708c1bbcSMiao Xie 	/*
148708c1bbcSMiao Xie 	 * If read-side task has no lock to protect task->mempolicy, write-side
149708c1bbcSMiao Xie 	 * task will rebind the task->mempolicy by two step. The first step is
150708c1bbcSMiao Xie 	 * setting all the newly nodes, and the second step is cleaning all the
151708c1bbcSMiao Xie 	 * disallowed nodes. In this way, we can avoid finding no node to alloc
152708c1bbcSMiao Xie 	 * page.
153708c1bbcSMiao Xie 	 * If we have a lock to protect task->mempolicy in read-side, we do
154708c1bbcSMiao Xie 	 * rebind directly.
155708c1bbcSMiao Xie 	 *
156708c1bbcSMiao Xie 	 * step:
157708c1bbcSMiao Xie 	 * 	MPOL_REBIND_ONCE - do rebind work at once
158708c1bbcSMiao Xie 	 * 	MPOL_REBIND_STEP1 - set all the newly nodes
159708c1bbcSMiao Xie 	 * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
160708c1bbcSMiao Xie 	 */
161708c1bbcSMiao Xie 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
162708c1bbcSMiao Xie 			enum mpol_rebind_step step);
16337012946SDavid Rientjes } mpol_ops[MPOL_MAX];
16437012946SDavid Rientjes 
165f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
166f5b087b5SDavid Rientjes {
1676d556294SBob Liu 	return pol->flags & MPOL_MODE_FLAGS;
1684c50bc01SDavid Rientjes }
1694c50bc01SDavid Rientjes 
1704c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
1714c50bc01SDavid Rientjes 				   const nodemask_t *rel)
1724c50bc01SDavid Rientjes {
1734c50bc01SDavid Rientjes 	nodemask_t tmp;
1744c50bc01SDavid Rientjes 	nodes_fold(tmp, *orig, nodes_weight(*rel));
1754c50bc01SDavid Rientjes 	nodes_onto(*ret, tmp, *rel);
176f5b087b5SDavid Rientjes }
177f5b087b5SDavid Rientjes 
17837012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
17937012946SDavid Rientjes {
18037012946SDavid Rientjes 	if (nodes_empty(*nodes))
18137012946SDavid Rientjes 		return -EINVAL;
18237012946SDavid Rientjes 	pol->v.nodes = *nodes;
18337012946SDavid Rientjes 	return 0;
18437012946SDavid Rientjes }
18537012946SDavid Rientjes 
18637012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
18737012946SDavid Rientjes {
18837012946SDavid Rientjes 	if (!nodes)
189fc36b8d3SLee Schermerhorn 		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
19037012946SDavid Rientjes 	else if (nodes_empty(*nodes))
19137012946SDavid Rientjes 		return -EINVAL;			/*  no allowed nodes */
19237012946SDavid Rientjes 	else
19337012946SDavid Rientjes 		pol->v.preferred_node = first_node(*nodes);
19437012946SDavid Rientjes 	return 0;
19537012946SDavid Rientjes }
19637012946SDavid Rientjes 
19737012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
19837012946SDavid Rientjes {
199859f7ef1SZhihui Zhang 	if (nodes_empty(*nodes))
20037012946SDavid Rientjes 		return -EINVAL;
20137012946SDavid Rientjes 	pol->v.nodes = *nodes;
20237012946SDavid Rientjes 	return 0;
20337012946SDavid Rientjes }
20437012946SDavid Rientjes 
20558568d2aSMiao Xie /*
20658568d2aSMiao Xie  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
20758568d2aSMiao Xie  * any, for the new policy.  mpol_new() has already validated the nodes
20858568d2aSMiao Xie  * parameter with respect to the policy mode and flags.  But, we need to
20958568d2aSMiao Xie  * handle an empty nodemask with MPOL_PREFERRED here.
21058568d2aSMiao Xie  *
21158568d2aSMiao Xie  * Must be called holding task's alloc_lock to protect task's mems_allowed
21258568d2aSMiao Xie  * and mempolicy.  May also be called holding the mmap_semaphore for write.
21358568d2aSMiao Xie  */
2144bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol,
2154bfc4495SKAMEZAWA Hiroyuki 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
21658568d2aSMiao Xie {
21758568d2aSMiao Xie 	int ret;
21858568d2aSMiao Xie 
21958568d2aSMiao Xie 	/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
22058568d2aSMiao Xie 	if (pol == NULL)
22158568d2aSMiao Xie 		return 0;
22201f13bd6SLai Jiangshan 	/* Check N_MEMORY */
2234bfc4495SKAMEZAWA Hiroyuki 	nodes_and(nsc->mask1,
22401f13bd6SLai Jiangshan 		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
22558568d2aSMiao Xie 
22658568d2aSMiao Xie 	VM_BUG_ON(!nodes);
22758568d2aSMiao Xie 	if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
22858568d2aSMiao Xie 		nodes = NULL;	/* explicit local allocation */
22958568d2aSMiao Xie 	else {
23058568d2aSMiao Xie 		if (pol->flags & MPOL_F_RELATIVE_NODES)
2314bfc4495SKAMEZAWA Hiroyuki 			mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
23258568d2aSMiao Xie 		else
2334bfc4495SKAMEZAWA Hiroyuki 			nodes_and(nsc->mask2, *nodes, nsc->mask1);
2344bfc4495SKAMEZAWA Hiroyuki 
23558568d2aSMiao Xie 		if (mpol_store_user_nodemask(pol))
23658568d2aSMiao Xie 			pol->w.user_nodemask = *nodes;
23758568d2aSMiao Xie 		else
23858568d2aSMiao Xie 			pol->w.cpuset_mems_allowed =
23958568d2aSMiao Xie 						cpuset_current_mems_allowed;
24058568d2aSMiao Xie 	}
24158568d2aSMiao Xie 
2424bfc4495SKAMEZAWA Hiroyuki 	if (nodes)
2434bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
2444bfc4495SKAMEZAWA Hiroyuki 	else
2454bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, NULL);
24658568d2aSMiao Xie 	return ret;
24758568d2aSMiao Xie }
24858568d2aSMiao Xie 
24958568d2aSMiao Xie /*
25058568d2aSMiao Xie  * This function just creates a new policy, does some check and simple
25158568d2aSMiao Xie  * initialization. You must invoke mpol_set_nodemask() to set nodes.
25258568d2aSMiao Xie  */
253028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
254028fec41SDavid Rientjes 				  nodemask_t *nodes)
2551da177e4SLinus Torvalds {
2561da177e4SLinus Torvalds 	struct mempolicy *policy;
2571da177e4SLinus Torvalds 
258028fec41SDavid Rientjes 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
25900ef2d2fSDavid Rientjes 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
260140d5a49SPaul Mundt 
2613e1f0645SDavid Rientjes 	if (mode == MPOL_DEFAULT) {
2623e1f0645SDavid Rientjes 		if (nodes && !nodes_empty(*nodes))
26337012946SDavid Rientjes 			return ERR_PTR(-EINVAL);
264d3a71033SLee Schermerhorn 		return NULL;
26537012946SDavid Rientjes 	}
2663e1f0645SDavid Rientjes 	VM_BUG_ON(!nodes);
2673e1f0645SDavid Rientjes 
2683e1f0645SDavid Rientjes 	/*
2693e1f0645SDavid Rientjes 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
2703e1f0645SDavid Rientjes 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
2713e1f0645SDavid Rientjes 	 * All other modes require a valid pointer to a non-empty nodemask.
2723e1f0645SDavid Rientjes 	 */
2733e1f0645SDavid Rientjes 	if (mode == MPOL_PREFERRED) {
2743e1f0645SDavid Rientjes 		if (nodes_empty(*nodes)) {
2753e1f0645SDavid Rientjes 			if (((flags & MPOL_F_STATIC_NODES) ||
2763e1f0645SDavid Rientjes 			     (flags & MPOL_F_RELATIVE_NODES)))
2773e1f0645SDavid Rientjes 				return ERR_PTR(-EINVAL);
2783e1f0645SDavid Rientjes 		}
279479e2802SPeter Zijlstra 	} else if (mode == MPOL_LOCAL) {
280479e2802SPeter Zijlstra 		if (!nodes_empty(*nodes))
281479e2802SPeter Zijlstra 			return ERR_PTR(-EINVAL);
282479e2802SPeter Zijlstra 		mode = MPOL_PREFERRED;
2833e1f0645SDavid Rientjes 	} else if (nodes_empty(*nodes))
2843e1f0645SDavid Rientjes 		return ERR_PTR(-EINVAL);
2851da177e4SLinus Torvalds 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2861da177e4SLinus Torvalds 	if (!policy)
2871da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2881da177e4SLinus Torvalds 	atomic_set(&policy->refcnt, 1);
28945c4745aSLee Schermerhorn 	policy->mode = mode;
29037012946SDavid Rientjes 	policy->flags = flags;
2913e1f0645SDavid Rientjes 
29237012946SDavid Rientjes 	return policy;
29337012946SDavid Rientjes }
29437012946SDavid Rientjes 
29552cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */
29652cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p)
29752cd3b07SLee Schermerhorn {
29852cd3b07SLee Schermerhorn 	if (!atomic_dec_and_test(&p->refcnt))
29952cd3b07SLee Schermerhorn 		return;
30052cd3b07SLee Schermerhorn 	kmem_cache_free(policy_cache, p);
30152cd3b07SLee Schermerhorn }
30252cd3b07SLee Schermerhorn 
303708c1bbcSMiao Xie static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
304708c1bbcSMiao Xie 				enum mpol_rebind_step step)
30537012946SDavid Rientjes {
30637012946SDavid Rientjes }
30737012946SDavid Rientjes 
308708c1bbcSMiao Xie /*
309708c1bbcSMiao Xie  * step:
310708c1bbcSMiao Xie  * 	MPOL_REBIND_ONCE  - do rebind work at once
311708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP1 - set all the newly nodes
312708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
313708c1bbcSMiao Xie  */
314708c1bbcSMiao Xie static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
315708c1bbcSMiao Xie 				 enum mpol_rebind_step step)
3161d0d2680SDavid Rientjes {
3171d0d2680SDavid Rientjes 	nodemask_t tmp;
3181d0d2680SDavid Rientjes 
31937012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES)
32037012946SDavid Rientjes 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
32137012946SDavid Rientjes 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
32237012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3231d0d2680SDavid Rientjes 	else {
324708c1bbcSMiao Xie 		/*
325708c1bbcSMiao Xie 		 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
326708c1bbcSMiao Xie 		 * result
327708c1bbcSMiao Xie 		 */
328708c1bbcSMiao Xie 		if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
329708c1bbcSMiao Xie 			nodes_remap(tmp, pol->v.nodes,
330708c1bbcSMiao Xie 					pol->w.cpuset_mems_allowed, *nodes);
331708c1bbcSMiao Xie 			pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
332708c1bbcSMiao Xie 		} else if (step == MPOL_REBIND_STEP2) {
333708c1bbcSMiao Xie 			tmp = pol->w.cpuset_mems_allowed;
33437012946SDavid Rientjes 			pol->w.cpuset_mems_allowed = *nodes;
335708c1bbcSMiao Xie 		} else
336708c1bbcSMiao Xie 			BUG();
3371d0d2680SDavid Rientjes 	}
33837012946SDavid Rientjes 
339708c1bbcSMiao Xie 	if (nodes_empty(tmp))
340708c1bbcSMiao Xie 		tmp = *nodes;
341708c1bbcSMiao Xie 
342708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP1)
343708c1bbcSMiao Xie 		nodes_or(pol->v.nodes, pol->v.nodes, tmp);
344708c1bbcSMiao Xie 	else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
3451d0d2680SDavid Rientjes 		pol->v.nodes = tmp;
346708c1bbcSMiao Xie 	else
347708c1bbcSMiao Xie 		BUG();
348708c1bbcSMiao Xie 
3491d0d2680SDavid Rientjes 	if (!node_isset(current->il_next, tmp)) {
3501d0d2680SDavid Rientjes 		current->il_next = next_node(current->il_next, tmp);
3511d0d2680SDavid Rientjes 		if (current->il_next >= MAX_NUMNODES)
3521d0d2680SDavid Rientjes 			current->il_next = first_node(tmp);
3531d0d2680SDavid Rientjes 		if (current->il_next >= MAX_NUMNODES)
3541d0d2680SDavid Rientjes 			current->il_next = numa_node_id();
3551d0d2680SDavid Rientjes 	}
35637012946SDavid Rientjes }
35737012946SDavid Rientjes 
35837012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol,
359708c1bbcSMiao Xie 				  const nodemask_t *nodes,
360708c1bbcSMiao Xie 				  enum mpol_rebind_step step)
36137012946SDavid Rientjes {
36237012946SDavid Rientjes 	nodemask_t tmp;
36337012946SDavid Rientjes 
36437012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES) {
3651d0d2680SDavid Rientjes 		int node = first_node(pol->w.user_nodemask);
3661d0d2680SDavid Rientjes 
367fc36b8d3SLee Schermerhorn 		if (node_isset(node, *nodes)) {
3681d0d2680SDavid Rientjes 			pol->v.preferred_node = node;
369fc36b8d3SLee Schermerhorn 			pol->flags &= ~MPOL_F_LOCAL;
370fc36b8d3SLee Schermerhorn 		} else
371fc36b8d3SLee Schermerhorn 			pol->flags |= MPOL_F_LOCAL;
37237012946SDavid Rientjes 	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
37337012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3741d0d2680SDavid Rientjes 		pol->v.preferred_node = first_node(tmp);
375fc36b8d3SLee Schermerhorn 	} else if (!(pol->flags & MPOL_F_LOCAL)) {
3761d0d2680SDavid Rientjes 		pol->v.preferred_node = node_remap(pol->v.preferred_node,
37737012946SDavid Rientjes 						   pol->w.cpuset_mems_allowed,
37837012946SDavid Rientjes 						   *nodes);
37937012946SDavid Rientjes 		pol->w.cpuset_mems_allowed = *nodes;
3801d0d2680SDavid Rientjes 	}
3811d0d2680SDavid Rientjes }
38237012946SDavid Rientjes 
383708c1bbcSMiao Xie /*
384708c1bbcSMiao Xie  * mpol_rebind_policy - Migrate a policy to a different set of nodes
385708c1bbcSMiao Xie  *
386708c1bbcSMiao Xie  * If read-side task has no lock to protect task->mempolicy, write-side
387708c1bbcSMiao Xie  * task will rebind the task->mempolicy by two step. The first step is
388708c1bbcSMiao Xie  * setting all the newly nodes, and the second step is cleaning all the
389708c1bbcSMiao Xie  * disallowed nodes. In this way, we can avoid finding no node to alloc
390708c1bbcSMiao Xie  * page.
391708c1bbcSMiao Xie  * If we have a lock to protect task->mempolicy in read-side, we do
392708c1bbcSMiao Xie  * rebind directly.
393708c1bbcSMiao Xie  *
394708c1bbcSMiao Xie  * step:
395708c1bbcSMiao Xie  * 	MPOL_REBIND_ONCE  - do rebind work at once
396708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP1 - set all the newly nodes
397708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
398708c1bbcSMiao Xie  */
399708c1bbcSMiao Xie static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
400708c1bbcSMiao Xie 				enum mpol_rebind_step step)
40137012946SDavid Rientjes {
40237012946SDavid Rientjes 	if (!pol)
40337012946SDavid Rientjes 		return;
40489c522c7SWang Sheng-Hui 	if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
40537012946SDavid Rientjes 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
40637012946SDavid Rientjes 		return;
407708c1bbcSMiao Xie 
408708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
409708c1bbcSMiao Xie 		return;
410708c1bbcSMiao Xie 
411708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
412708c1bbcSMiao Xie 		BUG();
413708c1bbcSMiao Xie 
414708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP1)
415708c1bbcSMiao Xie 		pol->flags |= MPOL_F_REBINDING;
416708c1bbcSMiao Xie 	else if (step == MPOL_REBIND_STEP2)
417708c1bbcSMiao Xie 		pol->flags &= ~MPOL_F_REBINDING;
418708c1bbcSMiao Xie 	else if (step >= MPOL_REBIND_NSTEP)
419708c1bbcSMiao Xie 		BUG();
420708c1bbcSMiao Xie 
421708c1bbcSMiao Xie 	mpol_ops[pol->mode].rebind(pol, newmask, step);
4221d0d2680SDavid Rientjes }
4231d0d2680SDavid Rientjes 
4241d0d2680SDavid Rientjes /*
4251d0d2680SDavid Rientjes  * Wrapper for mpol_rebind_policy() that just requires task
4261d0d2680SDavid Rientjes  * pointer, and updates task mempolicy.
42758568d2aSMiao Xie  *
42858568d2aSMiao Xie  * Called with task's alloc_lock held.
4291d0d2680SDavid Rientjes  */
4301d0d2680SDavid Rientjes 
431708c1bbcSMiao Xie void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
432708c1bbcSMiao Xie 			enum mpol_rebind_step step)
4331d0d2680SDavid Rientjes {
434708c1bbcSMiao Xie 	mpol_rebind_policy(tsk->mempolicy, new, step);
4351d0d2680SDavid Rientjes }
4361d0d2680SDavid Rientjes 
4371d0d2680SDavid Rientjes /*
4381d0d2680SDavid Rientjes  * Rebind each vma in mm to new nodemask.
4391d0d2680SDavid Rientjes  *
4401d0d2680SDavid Rientjes  * Call holding a reference to mm.  Takes mm->mmap_sem during call.
4411d0d2680SDavid Rientjes  */
4421d0d2680SDavid Rientjes 
4431d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
4441d0d2680SDavid Rientjes {
4451d0d2680SDavid Rientjes 	struct vm_area_struct *vma;
4461d0d2680SDavid Rientjes 
4471d0d2680SDavid Rientjes 	down_write(&mm->mmap_sem);
4481d0d2680SDavid Rientjes 	for (vma = mm->mmap; vma; vma = vma->vm_next)
449708c1bbcSMiao Xie 		mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
4501d0d2680SDavid Rientjes 	up_write(&mm->mmap_sem);
4511d0d2680SDavid Rientjes }
4521d0d2680SDavid Rientjes 
45337012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
45437012946SDavid Rientjes 	[MPOL_DEFAULT] = {
45537012946SDavid Rientjes 		.rebind = mpol_rebind_default,
45637012946SDavid Rientjes 	},
45737012946SDavid Rientjes 	[MPOL_INTERLEAVE] = {
45837012946SDavid Rientjes 		.create = mpol_new_interleave,
45937012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
46037012946SDavid Rientjes 	},
46137012946SDavid Rientjes 	[MPOL_PREFERRED] = {
46237012946SDavid Rientjes 		.create = mpol_new_preferred,
46337012946SDavid Rientjes 		.rebind = mpol_rebind_preferred,
46437012946SDavid Rientjes 	},
46537012946SDavid Rientjes 	[MPOL_BIND] = {
46637012946SDavid Rientjes 		.create = mpol_new_bind,
46737012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
46837012946SDavid Rientjes 	},
46937012946SDavid Rientjes };
47037012946SDavid Rientjes 
471fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
472fc301289SChristoph Lameter 				unsigned long flags);
4731a75a6c8SChristoph Lameter 
4746f4576e3SNaoya Horiguchi struct queue_pages {
4756f4576e3SNaoya Horiguchi 	struct list_head *pagelist;
4766f4576e3SNaoya Horiguchi 	unsigned long flags;
4776f4576e3SNaoya Horiguchi 	nodemask_t *nmask;
4786f4576e3SNaoya Horiguchi 	struct vm_area_struct *prev;
4796f4576e3SNaoya Horiguchi };
4806f4576e3SNaoya Horiguchi 
48198094945SNaoya Horiguchi /*
48298094945SNaoya Horiguchi  * Scan through pages checking if pages follow certain conditions,
48398094945SNaoya Horiguchi  * and move them to the pagelist if they do.
48498094945SNaoya Horiguchi  */
4856f4576e3SNaoya Horiguchi static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
4866f4576e3SNaoya Horiguchi 			unsigned long end, struct mm_walk *walk)
4871da177e4SLinus Torvalds {
4886f4576e3SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
4896f4576e3SNaoya Horiguchi 	struct page *page;
4906f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
4916f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
4926f4576e3SNaoya Horiguchi 	int nid;
49391612e0dSHugh Dickins 	pte_t *pte;
494705e87c0SHugh Dickins 	spinlock_t *ptl;
495941150a3SHugh Dickins 
4966f4576e3SNaoya Horiguchi 	split_huge_page_pmd(vma, addr, pmd);
4976f4576e3SNaoya Horiguchi 	if (pmd_trans_unstable(pmd))
4986f4576e3SNaoya Horiguchi 		return 0;
49991612e0dSHugh Dickins 
5006f4576e3SNaoya Horiguchi 	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
5016f4576e3SNaoya Horiguchi 	for (; addr != end; pte++, addr += PAGE_SIZE) {
50291612e0dSHugh Dickins 		if (!pte_present(*pte))
50391612e0dSHugh Dickins 			continue;
5046aab341eSLinus Torvalds 		page = vm_normal_page(vma, addr, *pte);
5056aab341eSLinus Torvalds 		if (!page)
50691612e0dSHugh Dickins 			continue;
507053837fcSNick Piggin 		/*
50862b61f61SHugh Dickins 		 * vm_normal_page() filters out zero pages, but there might
50962b61f61SHugh Dickins 		 * still be PageReserved pages to skip, perhaps in a VDSO.
510053837fcSNick Piggin 		 */
511b79bc0a0SHugh Dickins 		if (PageReserved(page))
512f4598c8bSChristoph Lameter 			continue;
5136aab341eSLinus Torvalds 		nid = page_to_nid(page);
5146f4576e3SNaoya Horiguchi 		if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
51538e35860SChristoph Lameter 			continue;
51638e35860SChristoph Lameter 
517b1f72d18SStephen Wilson 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
5186f4576e3SNaoya Horiguchi 			migrate_page_add(page, qp->pagelist, flags);
5196f4576e3SNaoya Horiguchi 	}
5206f4576e3SNaoya Horiguchi 	pte_unmap_unlock(pte - 1, ptl);
5216f4576e3SNaoya Horiguchi 	cond_resched();
5226f4576e3SNaoya Horiguchi 	return 0;
52391612e0dSHugh Dickins }
52491612e0dSHugh Dickins 
5256f4576e3SNaoya Horiguchi static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
5266f4576e3SNaoya Horiguchi 			       unsigned long addr, unsigned long end,
5276f4576e3SNaoya Horiguchi 			       struct mm_walk *walk)
528e2d8cf40SNaoya Horiguchi {
529e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE
5306f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
5316f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
532e2d8cf40SNaoya Horiguchi 	int nid;
533e2d8cf40SNaoya Horiguchi 	struct page *page;
534cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
535d4c54919SNaoya Horiguchi 	pte_t entry;
536e2d8cf40SNaoya Horiguchi 
5376f4576e3SNaoya Horiguchi 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
5386f4576e3SNaoya Horiguchi 	entry = huge_ptep_get(pte);
539d4c54919SNaoya Horiguchi 	if (!pte_present(entry))
540d4c54919SNaoya Horiguchi 		goto unlock;
541d4c54919SNaoya Horiguchi 	page = pte_page(entry);
542e2d8cf40SNaoya Horiguchi 	nid = page_to_nid(page);
5436f4576e3SNaoya Horiguchi 	if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
544e2d8cf40SNaoya Horiguchi 		goto unlock;
545e2d8cf40SNaoya Horiguchi 	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
546e2d8cf40SNaoya Horiguchi 	if (flags & (MPOL_MF_MOVE_ALL) ||
547e2d8cf40SNaoya Horiguchi 	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
5486f4576e3SNaoya Horiguchi 		isolate_huge_page(page, qp->pagelist);
549e2d8cf40SNaoya Horiguchi unlock:
550cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
551e2d8cf40SNaoya Horiguchi #else
552e2d8cf40SNaoya Horiguchi 	BUG();
553e2d8cf40SNaoya Horiguchi #endif
55491612e0dSHugh Dickins 	return 0;
5551da177e4SLinus Torvalds }
5561da177e4SLinus Torvalds 
5575877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING
558b24f53a0SLee Schermerhorn /*
5594b10e7d5SMel Gorman  * This is used to mark a range of virtual addresses to be inaccessible.
5604b10e7d5SMel Gorman  * These are later cleared by a NUMA hinting fault. Depending on these
5614b10e7d5SMel Gorman  * faults, pages may be migrated for better NUMA placement.
5624b10e7d5SMel Gorman  *
5634b10e7d5SMel Gorman  * This is assuming that NUMA faults are handled using PROT_NONE. If
5644b10e7d5SMel Gorman  * an architecture makes a different choice, it will need further
5654b10e7d5SMel Gorman  * changes to the core.
566b24f53a0SLee Schermerhorn  */
5674b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma,
5684b10e7d5SMel Gorman 			unsigned long addr, unsigned long end)
569b24f53a0SLee Schermerhorn {
5704b10e7d5SMel Gorman 	int nr_updated;
571b24f53a0SLee Schermerhorn 
5724d942466SMel Gorman 	nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
57303c5a6e1SMel Gorman 	if (nr_updated)
57403c5a6e1SMel Gorman 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
575b24f53a0SLee Schermerhorn 
5764b10e7d5SMel Gorman 	return nr_updated;
577b24f53a0SLee Schermerhorn }
578b24f53a0SLee Schermerhorn #else
579b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma,
580b24f53a0SLee Schermerhorn 			unsigned long addr, unsigned long end)
581b24f53a0SLee Schermerhorn {
582b24f53a0SLee Schermerhorn 	return 0;
583b24f53a0SLee Schermerhorn }
5845877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */
585b24f53a0SLee Schermerhorn 
5866f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end,
5876f4576e3SNaoya Horiguchi 				struct mm_walk *walk)
5881da177e4SLinus Torvalds {
5896f4576e3SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
5906f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
5915b952b3cSAndi Kleen 	unsigned long endvma = vma->vm_end;
5926f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
593dc9aa5b9SChristoph Lameter 
59448684a65SNaoya Horiguchi 	if (vma->vm_flags & VM_PFNMAP)
59548684a65SNaoya Horiguchi 		return 1;
59648684a65SNaoya Horiguchi 
5975b952b3cSAndi Kleen 	if (endvma > end)
5985b952b3cSAndi Kleen 		endvma = end;
5995b952b3cSAndi Kleen 	if (vma->vm_start > start)
6005b952b3cSAndi Kleen 		start = vma->vm_start;
601b24f53a0SLee Schermerhorn 
602b24f53a0SLee Schermerhorn 	if (!(flags & MPOL_MF_DISCONTIG_OK)) {
603b24f53a0SLee Schermerhorn 		if (!vma->vm_next && vma->vm_end < end)
604d05f0cdcSHugh Dickins 			return -EFAULT;
6056f4576e3SNaoya Horiguchi 		if (qp->prev && qp->prev->vm_end < vma->vm_start)
606d05f0cdcSHugh Dickins 			return -EFAULT;
607b24f53a0SLee Schermerhorn 	}
608b24f53a0SLee Schermerhorn 
6096f4576e3SNaoya Horiguchi 	qp->prev = vma;
6106f4576e3SNaoya Horiguchi 
6116f4576e3SNaoya Horiguchi 	if (vma->vm_flags & VM_PFNMAP)
6126f4576e3SNaoya Horiguchi 		return 1;
6136f4576e3SNaoya Horiguchi 
614b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY) {
6152c0346a3SMel Gorman 		/* Similar to task_numa_work, skip inaccessible VMAs */
6162c0346a3SMel Gorman 		if (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))
617b24f53a0SLee Schermerhorn 			change_prot_numa(vma, start, endvma);
6186f4576e3SNaoya Horiguchi 		return 1;
619b24f53a0SLee Schermerhorn 	}
620b24f53a0SLee Schermerhorn 
621b24f53a0SLee Schermerhorn 	if ((flags & MPOL_MF_STRICT) ||
622b24f53a0SLee Schermerhorn 	    ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
6236f4576e3SNaoya Horiguchi 	     vma_migratable(vma)))
6246f4576e3SNaoya Horiguchi 		/* queue pages from current vma */
6256f4576e3SNaoya Horiguchi 		return 0;
6266f4576e3SNaoya Horiguchi 	return 1;
6276f4576e3SNaoya Horiguchi }
628b24f53a0SLee Schermerhorn 
6296f4576e3SNaoya Horiguchi /*
6306f4576e3SNaoya Horiguchi  * Walk through page tables and collect pages to be migrated.
6316f4576e3SNaoya Horiguchi  *
6326f4576e3SNaoya Horiguchi  * If pages found in a given range are on a set of nodes (determined by
6336f4576e3SNaoya Horiguchi  * @nodes and @flags,) it's isolated and queued to the pagelist which is
6346f4576e3SNaoya Horiguchi  * passed via @private.)
6356f4576e3SNaoya Horiguchi  */
6366f4576e3SNaoya Horiguchi static int
6376f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
6386f4576e3SNaoya Horiguchi 		nodemask_t *nodes, unsigned long flags,
6396f4576e3SNaoya Horiguchi 		struct list_head *pagelist)
6406f4576e3SNaoya Horiguchi {
6416f4576e3SNaoya Horiguchi 	struct queue_pages qp = {
6426f4576e3SNaoya Horiguchi 		.pagelist = pagelist,
6436f4576e3SNaoya Horiguchi 		.flags = flags,
6446f4576e3SNaoya Horiguchi 		.nmask = nodes,
6456f4576e3SNaoya Horiguchi 		.prev = NULL,
6466f4576e3SNaoya Horiguchi 	};
6476f4576e3SNaoya Horiguchi 	struct mm_walk queue_pages_walk = {
6486f4576e3SNaoya Horiguchi 		.hugetlb_entry = queue_pages_hugetlb,
6496f4576e3SNaoya Horiguchi 		.pmd_entry = queue_pages_pte_range,
6506f4576e3SNaoya Horiguchi 		.test_walk = queue_pages_test_walk,
6516f4576e3SNaoya Horiguchi 		.mm = mm,
6526f4576e3SNaoya Horiguchi 		.private = &qp,
6536f4576e3SNaoya Horiguchi 	};
6546f4576e3SNaoya Horiguchi 
6556f4576e3SNaoya Horiguchi 	return walk_page_range(start, end, &queue_pages_walk);
6561da177e4SLinus Torvalds }
6571da177e4SLinus Torvalds 
658869833f2SKOSAKI Motohiro /*
659869833f2SKOSAKI Motohiro  * Apply policy to a single VMA
660869833f2SKOSAKI Motohiro  * This must be called with the mmap_sem held for writing.
661869833f2SKOSAKI Motohiro  */
662869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma,
663869833f2SKOSAKI Motohiro 						struct mempolicy *pol)
6648d34694cSKOSAKI Motohiro {
665869833f2SKOSAKI Motohiro 	int err;
666869833f2SKOSAKI Motohiro 	struct mempolicy *old;
667869833f2SKOSAKI Motohiro 	struct mempolicy *new;
6688d34694cSKOSAKI Motohiro 
6698d34694cSKOSAKI Motohiro 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
6708d34694cSKOSAKI Motohiro 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
6718d34694cSKOSAKI Motohiro 		 vma->vm_ops, vma->vm_file,
6728d34694cSKOSAKI Motohiro 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
6738d34694cSKOSAKI Motohiro 
674869833f2SKOSAKI Motohiro 	new = mpol_dup(pol);
675869833f2SKOSAKI Motohiro 	if (IS_ERR(new))
676869833f2SKOSAKI Motohiro 		return PTR_ERR(new);
677869833f2SKOSAKI Motohiro 
678869833f2SKOSAKI Motohiro 	if (vma->vm_ops && vma->vm_ops->set_policy) {
6798d34694cSKOSAKI Motohiro 		err = vma->vm_ops->set_policy(vma, new);
680869833f2SKOSAKI Motohiro 		if (err)
681869833f2SKOSAKI Motohiro 			goto err_out;
6828d34694cSKOSAKI Motohiro 	}
683869833f2SKOSAKI Motohiro 
684869833f2SKOSAKI Motohiro 	old = vma->vm_policy;
685869833f2SKOSAKI Motohiro 	vma->vm_policy = new; /* protected by mmap_sem */
686869833f2SKOSAKI Motohiro 	mpol_put(old);
687869833f2SKOSAKI Motohiro 
688869833f2SKOSAKI Motohiro 	return 0;
689869833f2SKOSAKI Motohiro  err_out:
690869833f2SKOSAKI Motohiro 	mpol_put(new);
6918d34694cSKOSAKI Motohiro 	return err;
6928d34694cSKOSAKI Motohiro }
6938d34694cSKOSAKI Motohiro 
6941da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */
6959d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start,
6969d8cebd4SKOSAKI Motohiro 		       unsigned long end, struct mempolicy *new_pol)
6971da177e4SLinus Torvalds {
6981da177e4SLinus Torvalds 	struct vm_area_struct *next;
6999d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *prev;
7009d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *vma;
7019d8cebd4SKOSAKI Motohiro 	int err = 0;
702e26a5114SKOSAKI Motohiro 	pgoff_t pgoff;
7039d8cebd4SKOSAKI Motohiro 	unsigned long vmstart;
7049d8cebd4SKOSAKI Motohiro 	unsigned long vmend;
7051da177e4SLinus Torvalds 
706097d5910SLinus Torvalds 	vma = find_vma(mm, start);
7079d8cebd4SKOSAKI Motohiro 	if (!vma || vma->vm_start > start)
7089d8cebd4SKOSAKI Motohiro 		return -EFAULT;
7099d8cebd4SKOSAKI Motohiro 
710097d5910SLinus Torvalds 	prev = vma->vm_prev;
711e26a5114SKOSAKI Motohiro 	if (start > vma->vm_start)
712e26a5114SKOSAKI Motohiro 		prev = vma;
713e26a5114SKOSAKI Motohiro 
7149d8cebd4SKOSAKI Motohiro 	for (; vma && vma->vm_start < end; prev = vma, vma = next) {
7151da177e4SLinus Torvalds 		next = vma->vm_next;
7169d8cebd4SKOSAKI Motohiro 		vmstart = max(start, vma->vm_start);
7179d8cebd4SKOSAKI Motohiro 		vmend   = min(end, vma->vm_end);
7189d8cebd4SKOSAKI Motohiro 
719e26a5114SKOSAKI Motohiro 		if (mpol_equal(vma_policy(vma), new_pol))
720e26a5114SKOSAKI Motohiro 			continue;
721e26a5114SKOSAKI Motohiro 
722e26a5114SKOSAKI Motohiro 		pgoff = vma->vm_pgoff +
723e26a5114SKOSAKI Motohiro 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
7249d8cebd4SKOSAKI Motohiro 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
725e26a5114SKOSAKI Motohiro 				  vma->anon_vma, vma->vm_file, pgoff,
7268aacc9f5SCaspar Zhang 				  new_pol);
7279d8cebd4SKOSAKI Motohiro 		if (prev) {
7289d8cebd4SKOSAKI Motohiro 			vma = prev;
7299d8cebd4SKOSAKI Motohiro 			next = vma->vm_next;
7303964acd0SOleg Nesterov 			if (mpol_equal(vma_policy(vma), new_pol))
7319d8cebd4SKOSAKI Motohiro 				continue;
7323964acd0SOleg Nesterov 			/* vma_merge() joined vma && vma->next, case 8 */
7333964acd0SOleg Nesterov 			goto replace;
7341da177e4SLinus Torvalds 		}
7359d8cebd4SKOSAKI Motohiro 		if (vma->vm_start != vmstart) {
7369d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
7379d8cebd4SKOSAKI Motohiro 			if (err)
7389d8cebd4SKOSAKI Motohiro 				goto out;
7399d8cebd4SKOSAKI Motohiro 		}
7409d8cebd4SKOSAKI Motohiro 		if (vma->vm_end != vmend) {
7419d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmend, 0);
7429d8cebd4SKOSAKI Motohiro 			if (err)
7439d8cebd4SKOSAKI Motohiro 				goto out;
7449d8cebd4SKOSAKI Motohiro 		}
7453964acd0SOleg Nesterov  replace:
746869833f2SKOSAKI Motohiro 		err = vma_replace_policy(vma, new_pol);
7479d8cebd4SKOSAKI Motohiro 		if (err)
7489d8cebd4SKOSAKI Motohiro 			goto out;
7499d8cebd4SKOSAKI Motohiro 	}
7509d8cebd4SKOSAKI Motohiro 
7519d8cebd4SKOSAKI Motohiro  out:
7521da177e4SLinus Torvalds 	return err;
7531da177e4SLinus Torvalds }
7541da177e4SLinus Torvalds 
7551da177e4SLinus Torvalds /* Set the process memory policy */
756028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags,
757028fec41SDavid Rientjes 			     nodemask_t *nodes)
7581da177e4SLinus Torvalds {
75958568d2aSMiao Xie 	struct mempolicy *new, *old;
7604bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH(scratch);
76158568d2aSMiao Xie 	int ret;
7621da177e4SLinus Torvalds 
7634bfc4495SKAMEZAWA Hiroyuki 	if (!scratch)
7644bfc4495SKAMEZAWA Hiroyuki 		return -ENOMEM;
765f4e53d91SLee Schermerhorn 
7664bfc4495SKAMEZAWA Hiroyuki 	new = mpol_new(mode, flags, nodes);
7674bfc4495SKAMEZAWA Hiroyuki 	if (IS_ERR(new)) {
7684bfc4495SKAMEZAWA Hiroyuki 		ret = PTR_ERR(new);
7694bfc4495SKAMEZAWA Hiroyuki 		goto out;
7704bfc4495SKAMEZAWA Hiroyuki 	}
7712c7c3a7dSOleg Nesterov 
77258568d2aSMiao Xie 	task_lock(current);
7734bfc4495SKAMEZAWA Hiroyuki 	ret = mpol_set_nodemask(new, nodes, scratch);
77458568d2aSMiao Xie 	if (ret) {
77558568d2aSMiao Xie 		task_unlock(current);
77658568d2aSMiao Xie 		mpol_put(new);
7774bfc4495SKAMEZAWA Hiroyuki 		goto out;
77858568d2aSMiao Xie 	}
77958568d2aSMiao Xie 	old = current->mempolicy;
7801da177e4SLinus Torvalds 	current->mempolicy = new;
78145c4745aSLee Schermerhorn 	if (new && new->mode == MPOL_INTERLEAVE &&
782f5b087b5SDavid Rientjes 	    nodes_weight(new->v.nodes))
783dfcd3c0dSAndi Kleen 		current->il_next = first_node(new->v.nodes);
78458568d2aSMiao Xie 	task_unlock(current);
78558568d2aSMiao Xie 	mpol_put(old);
7864bfc4495SKAMEZAWA Hiroyuki 	ret = 0;
7874bfc4495SKAMEZAWA Hiroyuki out:
7884bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH_FREE(scratch);
7894bfc4495SKAMEZAWA Hiroyuki 	return ret;
7901da177e4SLinus Torvalds }
7911da177e4SLinus Torvalds 
792bea904d5SLee Schermerhorn /*
793bea904d5SLee Schermerhorn  * Return nodemask for policy for get_mempolicy() query
79458568d2aSMiao Xie  *
79558568d2aSMiao Xie  * Called with task's alloc_lock held
796bea904d5SLee Schermerhorn  */
797bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
7981da177e4SLinus Torvalds {
799dfcd3c0dSAndi Kleen 	nodes_clear(*nodes);
800bea904d5SLee Schermerhorn 	if (p == &default_policy)
801bea904d5SLee Schermerhorn 		return;
802bea904d5SLee Schermerhorn 
80345c4745aSLee Schermerhorn 	switch (p->mode) {
80419770b32SMel Gorman 	case MPOL_BIND:
80519770b32SMel Gorman 		/* Fall through */
8061da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
807dfcd3c0dSAndi Kleen 		*nodes = p->v.nodes;
8081da177e4SLinus Torvalds 		break;
8091da177e4SLinus Torvalds 	case MPOL_PREFERRED:
810fc36b8d3SLee Schermerhorn 		if (!(p->flags & MPOL_F_LOCAL))
811dfcd3c0dSAndi Kleen 			node_set(p->v.preferred_node, *nodes);
81253f2556bSLee Schermerhorn 		/* else return empty node mask for local allocation */
8131da177e4SLinus Torvalds 		break;
8141da177e4SLinus Torvalds 	default:
8151da177e4SLinus Torvalds 		BUG();
8161da177e4SLinus Torvalds 	}
8171da177e4SLinus Torvalds }
8181da177e4SLinus Torvalds 
8191da177e4SLinus Torvalds static int lookup_node(struct mm_struct *mm, unsigned long addr)
8201da177e4SLinus Torvalds {
8211da177e4SLinus Torvalds 	struct page *p;
8221da177e4SLinus Torvalds 	int err;
8231da177e4SLinus Torvalds 
8241da177e4SLinus Torvalds 	err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
8251da177e4SLinus Torvalds 	if (err >= 0) {
8261da177e4SLinus Torvalds 		err = page_to_nid(p);
8271da177e4SLinus Torvalds 		put_page(p);
8281da177e4SLinus Torvalds 	}
8291da177e4SLinus Torvalds 	return err;
8301da177e4SLinus Torvalds }
8311da177e4SLinus Torvalds 
8321da177e4SLinus Torvalds /* Retrieve NUMA policy */
833dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask,
8341da177e4SLinus Torvalds 			     unsigned long addr, unsigned long flags)
8351da177e4SLinus Torvalds {
8368bccd85fSChristoph Lameter 	int err;
8371da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
8381da177e4SLinus Torvalds 	struct vm_area_struct *vma = NULL;
8391da177e4SLinus Torvalds 	struct mempolicy *pol = current->mempolicy;
8401da177e4SLinus Torvalds 
841754af6f5SLee Schermerhorn 	if (flags &
842754af6f5SLee Schermerhorn 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
8431da177e4SLinus Torvalds 		return -EINVAL;
844754af6f5SLee Schermerhorn 
845754af6f5SLee Schermerhorn 	if (flags & MPOL_F_MEMS_ALLOWED) {
846754af6f5SLee Schermerhorn 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
847754af6f5SLee Schermerhorn 			return -EINVAL;
848754af6f5SLee Schermerhorn 		*policy = 0;	/* just so it's initialized */
84958568d2aSMiao Xie 		task_lock(current);
850754af6f5SLee Schermerhorn 		*nmask  = cpuset_current_mems_allowed;
85158568d2aSMiao Xie 		task_unlock(current);
852754af6f5SLee Schermerhorn 		return 0;
853754af6f5SLee Schermerhorn 	}
854754af6f5SLee Schermerhorn 
8551da177e4SLinus Torvalds 	if (flags & MPOL_F_ADDR) {
856bea904d5SLee Schermerhorn 		/*
857bea904d5SLee Schermerhorn 		 * Do NOT fall back to task policy if the
858bea904d5SLee Schermerhorn 		 * vma/shared policy at addr is NULL.  We
859bea904d5SLee Schermerhorn 		 * want to return MPOL_DEFAULT in this case.
860bea904d5SLee Schermerhorn 		 */
8611da177e4SLinus Torvalds 		down_read(&mm->mmap_sem);
8621da177e4SLinus Torvalds 		vma = find_vma_intersection(mm, addr, addr+1);
8631da177e4SLinus Torvalds 		if (!vma) {
8641da177e4SLinus Torvalds 			up_read(&mm->mmap_sem);
8651da177e4SLinus Torvalds 			return -EFAULT;
8661da177e4SLinus Torvalds 		}
8671da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
8681da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
8691da177e4SLinus Torvalds 		else
8701da177e4SLinus Torvalds 			pol = vma->vm_policy;
8711da177e4SLinus Torvalds 	} else if (addr)
8721da177e4SLinus Torvalds 		return -EINVAL;
8731da177e4SLinus Torvalds 
8741da177e4SLinus Torvalds 	if (!pol)
875bea904d5SLee Schermerhorn 		pol = &default_policy;	/* indicates default behavior */
8761da177e4SLinus Torvalds 
8771da177e4SLinus Torvalds 	if (flags & MPOL_F_NODE) {
8781da177e4SLinus Torvalds 		if (flags & MPOL_F_ADDR) {
8791da177e4SLinus Torvalds 			err = lookup_node(mm, addr);
8801da177e4SLinus Torvalds 			if (err < 0)
8811da177e4SLinus Torvalds 				goto out;
8828bccd85fSChristoph Lameter 			*policy = err;
8831da177e4SLinus Torvalds 		} else if (pol == current->mempolicy &&
88445c4745aSLee Schermerhorn 				pol->mode == MPOL_INTERLEAVE) {
8858bccd85fSChristoph Lameter 			*policy = current->il_next;
8861da177e4SLinus Torvalds 		} else {
8871da177e4SLinus Torvalds 			err = -EINVAL;
8881da177e4SLinus Torvalds 			goto out;
8891da177e4SLinus Torvalds 		}
890bea904d5SLee Schermerhorn 	} else {
891bea904d5SLee Schermerhorn 		*policy = pol == &default_policy ? MPOL_DEFAULT :
892bea904d5SLee Schermerhorn 						pol->mode;
893d79df630SDavid Rientjes 		/*
894d79df630SDavid Rientjes 		 * Internal mempolicy flags must be masked off before exposing
895d79df630SDavid Rientjes 		 * the policy to userspace.
896d79df630SDavid Rientjes 		 */
897d79df630SDavid Rientjes 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
898bea904d5SLee Schermerhorn 	}
8991da177e4SLinus Torvalds 
9001da177e4SLinus Torvalds 	if (vma) {
9011da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
9021da177e4SLinus Torvalds 		vma = NULL;
9031da177e4SLinus Torvalds 	}
9041da177e4SLinus Torvalds 
9051da177e4SLinus Torvalds 	err = 0;
90658568d2aSMiao Xie 	if (nmask) {
907c6b6ef8bSLee Schermerhorn 		if (mpol_store_user_nodemask(pol)) {
908c6b6ef8bSLee Schermerhorn 			*nmask = pol->w.user_nodemask;
909c6b6ef8bSLee Schermerhorn 		} else {
91058568d2aSMiao Xie 			task_lock(current);
911bea904d5SLee Schermerhorn 			get_policy_nodemask(pol, nmask);
91258568d2aSMiao Xie 			task_unlock(current);
91358568d2aSMiao Xie 		}
914c6b6ef8bSLee Schermerhorn 	}
9151da177e4SLinus Torvalds 
9161da177e4SLinus Torvalds  out:
91752cd3b07SLee Schermerhorn 	mpol_cond_put(pol);
9181da177e4SLinus Torvalds 	if (vma)
9191da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
9201da177e4SLinus Torvalds 	return err;
9211da177e4SLinus Torvalds }
9221da177e4SLinus Torvalds 
923b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION
9248bccd85fSChristoph Lameter /*
9256ce3c4c0SChristoph Lameter  * page migration
9266ce3c4c0SChristoph Lameter  */
927fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
928fc301289SChristoph Lameter 				unsigned long flags)
9296ce3c4c0SChristoph Lameter {
9306ce3c4c0SChristoph Lameter 	/*
931fc301289SChristoph Lameter 	 * Avoid migrating a page that is shared with others.
9326ce3c4c0SChristoph Lameter 	 */
93362695a84SNick Piggin 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
93462695a84SNick Piggin 		if (!isolate_lru_page(page)) {
93562695a84SNick Piggin 			list_add_tail(&page->lru, pagelist);
9366d9c285aSKOSAKI Motohiro 			inc_zone_page_state(page, NR_ISOLATED_ANON +
9376d9c285aSKOSAKI Motohiro 					    page_is_file_cache(page));
93862695a84SNick Piggin 		}
93962695a84SNick Piggin 	}
9406ce3c4c0SChristoph Lameter }
9416ce3c4c0SChristoph Lameter 
942742755a1SChristoph Lameter static struct page *new_node_page(struct page *page, unsigned long node, int **x)
94395a402c3SChristoph Lameter {
944e2d8cf40SNaoya Horiguchi 	if (PageHuge(page))
945e2d8cf40SNaoya Horiguchi 		return alloc_huge_page_node(page_hstate(compound_head(page)),
946e2d8cf40SNaoya Horiguchi 					node);
947e2d8cf40SNaoya Horiguchi 	else
948b360edb4SDavid Rientjes 		return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE |
949b360edb4SDavid Rientjes 						    __GFP_THISNODE, 0);
95095a402c3SChristoph Lameter }
95195a402c3SChristoph Lameter 
9526ce3c4c0SChristoph Lameter /*
9537e2ab150SChristoph Lameter  * Migrate pages from one node to a target node.
9547e2ab150SChristoph Lameter  * Returns error or the number of pages not migrated.
9557e2ab150SChristoph Lameter  */
956dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest,
957dbcb0f19SAdrian Bunk 			   int flags)
9587e2ab150SChristoph Lameter {
9597e2ab150SChristoph Lameter 	nodemask_t nmask;
9607e2ab150SChristoph Lameter 	LIST_HEAD(pagelist);
9617e2ab150SChristoph Lameter 	int err = 0;
9627e2ab150SChristoph Lameter 
9637e2ab150SChristoph Lameter 	nodes_clear(nmask);
9647e2ab150SChristoph Lameter 	node_set(source, nmask);
9657e2ab150SChristoph Lameter 
96608270807SMinchan Kim 	/*
96708270807SMinchan Kim 	 * This does not "check" the range but isolates all pages that
96808270807SMinchan Kim 	 * need migration.  Between passing in the full user address
96908270807SMinchan Kim 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
97008270807SMinchan Kim 	 */
97108270807SMinchan Kim 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
97298094945SNaoya Horiguchi 	queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
9737e2ab150SChristoph Lameter 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
9747e2ab150SChristoph Lameter 
975cf608ac1SMinchan Kim 	if (!list_empty(&pagelist)) {
97668711a74SDavid Rientjes 		err = migrate_pages(&pagelist, new_node_page, NULL, dest,
9779c620e2bSHugh Dickins 					MIGRATE_SYNC, MR_SYSCALL);
978cf608ac1SMinchan Kim 		if (err)
979e2d8cf40SNaoya Horiguchi 			putback_movable_pages(&pagelist);
980cf608ac1SMinchan Kim 	}
98195a402c3SChristoph Lameter 
9827e2ab150SChristoph Lameter 	return err;
9837e2ab150SChristoph Lameter }
9847e2ab150SChristoph Lameter 
9857e2ab150SChristoph Lameter /*
9867e2ab150SChristoph Lameter  * Move pages between the two nodesets so as to preserve the physical
9877e2ab150SChristoph Lameter  * layout as much as possible.
98839743889SChristoph Lameter  *
98939743889SChristoph Lameter  * Returns the number of page that could not be moved.
99039743889SChristoph Lameter  */
9910ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
9920ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
99339743889SChristoph Lameter {
9947e2ab150SChristoph Lameter 	int busy = 0;
9950aedadf9SChristoph Lameter 	int err;
9967e2ab150SChristoph Lameter 	nodemask_t tmp;
99739743889SChristoph Lameter 
9980aedadf9SChristoph Lameter 	err = migrate_prep();
9990aedadf9SChristoph Lameter 	if (err)
10000aedadf9SChristoph Lameter 		return err;
10010aedadf9SChristoph Lameter 
100239743889SChristoph Lameter 	down_read(&mm->mmap_sem);
1003d4984711SChristoph Lameter 
10047e2ab150SChristoph Lameter 	/*
10057e2ab150SChristoph Lameter 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
10067e2ab150SChristoph Lameter 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
10077e2ab150SChristoph Lameter 	 * bit in 'tmp', and return that <source, dest> pair for migration.
10087e2ab150SChristoph Lameter 	 * The pair of nodemasks 'to' and 'from' define the map.
10097e2ab150SChristoph Lameter 	 *
10107e2ab150SChristoph Lameter 	 * If no pair of bits is found that way, fallback to picking some
10117e2ab150SChristoph Lameter 	 * pair of 'source' and 'dest' bits that are not the same.  If the
10127e2ab150SChristoph Lameter 	 * 'source' and 'dest' bits are the same, this represents a node
10137e2ab150SChristoph Lameter 	 * that will be migrating to itself, so no pages need move.
10147e2ab150SChristoph Lameter 	 *
10157e2ab150SChristoph Lameter 	 * If no bits are left in 'tmp', or if all remaining bits left
10167e2ab150SChristoph Lameter 	 * in 'tmp' correspond to the same bit in 'to', return false
10177e2ab150SChristoph Lameter 	 * (nothing left to migrate).
10187e2ab150SChristoph Lameter 	 *
10197e2ab150SChristoph Lameter 	 * This lets us pick a pair of nodes to migrate between, such that
10207e2ab150SChristoph Lameter 	 * if possible the dest node is not already occupied by some other
10217e2ab150SChristoph Lameter 	 * source node, minimizing the risk of overloading the memory on a
10227e2ab150SChristoph Lameter 	 * node that would happen if we migrated incoming memory to a node
10237e2ab150SChristoph Lameter 	 * before migrating outgoing memory source that same node.
10247e2ab150SChristoph Lameter 	 *
10257e2ab150SChristoph Lameter 	 * A single scan of tmp is sufficient.  As we go, we remember the
10267e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
10277e2ab150SChristoph Lameter 	 * that not only moved, but what's better, moved to an empty slot
10287e2ab150SChristoph Lameter 	 * (d is not set in tmp), then we break out then, with that pair.
1029ae0e47f0SJustin P. Mattock 	 * Otherwise when we finish scanning from_tmp, we at least have the
10307e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved.  If we get all the way through
10317e2ab150SChristoph Lameter 	 * the scan of tmp without finding any node that moved, much less
10327e2ab150SChristoph Lameter 	 * moved to an empty node, then there is nothing left worth migrating.
10337e2ab150SChristoph Lameter 	 */
10347e2ab150SChristoph Lameter 
10350ce72d4fSAndrew Morton 	tmp = *from;
10367e2ab150SChristoph Lameter 	while (!nodes_empty(tmp)) {
10377e2ab150SChristoph Lameter 		int s,d;
1038b76ac7e7SJianguo Wu 		int source = NUMA_NO_NODE;
10397e2ab150SChristoph Lameter 		int dest = 0;
10407e2ab150SChristoph Lameter 
10417e2ab150SChristoph Lameter 		for_each_node_mask(s, tmp) {
10424a5b18ccSLarry Woodman 
10434a5b18ccSLarry Woodman 			/*
10444a5b18ccSLarry Woodman 			 * do_migrate_pages() tries to maintain the relative
10454a5b18ccSLarry Woodman 			 * node relationship of the pages established between
10464a5b18ccSLarry Woodman 			 * threads and memory areas.
10474a5b18ccSLarry Woodman                          *
10484a5b18ccSLarry Woodman 			 * However if the number of source nodes is not equal to
10494a5b18ccSLarry Woodman 			 * the number of destination nodes we can not preserve
10504a5b18ccSLarry Woodman 			 * this node relative relationship.  In that case, skip
10514a5b18ccSLarry Woodman 			 * copying memory from a node that is in the destination
10524a5b18ccSLarry Woodman 			 * mask.
10534a5b18ccSLarry Woodman 			 *
10544a5b18ccSLarry Woodman 			 * Example: [2,3,4] -> [3,4,5] moves everything.
10554a5b18ccSLarry Woodman 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
10564a5b18ccSLarry Woodman 			 */
10574a5b18ccSLarry Woodman 
10580ce72d4fSAndrew Morton 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
10590ce72d4fSAndrew Morton 						(node_isset(s, *to)))
10604a5b18ccSLarry Woodman 				continue;
10614a5b18ccSLarry Woodman 
10620ce72d4fSAndrew Morton 			d = node_remap(s, *from, *to);
10637e2ab150SChristoph Lameter 			if (s == d)
10647e2ab150SChristoph Lameter 				continue;
10657e2ab150SChristoph Lameter 
10667e2ab150SChristoph Lameter 			source = s;	/* Node moved. Memorize */
10677e2ab150SChristoph Lameter 			dest = d;
10687e2ab150SChristoph Lameter 
10697e2ab150SChristoph Lameter 			/* dest not in remaining from nodes? */
10707e2ab150SChristoph Lameter 			if (!node_isset(dest, tmp))
10717e2ab150SChristoph Lameter 				break;
10727e2ab150SChristoph Lameter 		}
1073b76ac7e7SJianguo Wu 		if (source == NUMA_NO_NODE)
10747e2ab150SChristoph Lameter 			break;
10757e2ab150SChristoph Lameter 
10767e2ab150SChristoph Lameter 		node_clear(source, tmp);
10777e2ab150SChristoph Lameter 		err = migrate_to_node(mm, source, dest, flags);
10787e2ab150SChristoph Lameter 		if (err > 0)
10797e2ab150SChristoph Lameter 			busy += err;
10807e2ab150SChristoph Lameter 		if (err < 0)
10817e2ab150SChristoph Lameter 			break;
108239743889SChristoph Lameter 	}
108339743889SChristoph Lameter 	up_read(&mm->mmap_sem);
10847e2ab150SChristoph Lameter 	if (err < 0)
10857e2ab150SChristoph Lameter 		return err;
10867e2ab150SChristoph Lameter 	return busy;
1087b20a3503SChristoph Lameter 
108839743889SChristoph Lameter }
108939743889SChristoph Lameter 
10903ad33b24SLee Schermerhorn /*
10913ad33b24SLee Schermerhorn  * Allocate a new page for page migration based on vma policy.
1092d05f0cdcSHugh Dickins  * Start by assuming the page is mapped by the same vma as contains @start.
10933ad33b24SLee Schermerhorn  * Search forward from there, if not.  N.B., this assumes that the
10943ad33b24SLee Schermerhorn  * list of pages handed to migrate_pages()--which is how we get here--
10953ad33b24SLee Schermerhorn  * is in virtual address order.
10963ad33b24SLee Schermerhorn  */
1097d05f0cdcSHugh Dickins static struct page *new_page(struct page *page, unsigned long start, int **x)
109895a402c3SChristoph Lameter {
1099d05f0cdcSHugh Dickins 	struct vm_area_struct *vma;
11003ad33b24SLee Schermerhorn 	unsigned long uninitialized_var(address);
110195a402c3SChristoph Lameter 
1102d05f0cdcSHugh Dickins 	vma = find_vma(current->mm, start);
11033ad33b24SLee Schermerhorn 	while (vma) {
11043ad33b24SLee Schermerhorn 		address = page_address_in_vma(page, vma);
11053ad33b24SLee Schermerhorn 		if (address != -EFAULT)
11063ad33b24SLee Schermerhorn 			break;
11073ad33b24SLee Schermerhorn 		vma = vma->vm_next;
11083ad33b24SLee Schermerhorn 	}
11093ad33b24SLee Schermerhorn 
111011c731e8SWanpeng Li 	if (PageHuge(page)) {
1111cc81717eSMichal Hocko 		BUG_ON(!vma);
111274060e4dSNaoya Horiguchi 		return alloc_huge_page_noerr(vma, address, 1);
111311c731e8SWanpeng Li 	}
111411c731e8SWanpeng Li 	/*
111511c731e8SWanpeng Li 	 * if !vma, alloc_page_vma() will use task or system default policy
111611c731e8SWanpeng Li 	 */
11173ad33b24SLee Schermerhorn 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
111895a402c3SChristoph Lameter }
1119b20a3503SChristoph Lameter #else
1120b20a3503SChristoph Lameter 
1121b20a3503SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
1122b20a3503SChristoph Lameter 				unsigned long flags)
1123b20a3503SChristoph Lameter {
1124b20a3503SChristoph Lameter }
1125b20a3503SChristoph Lameter 
11260ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
11270ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
1128b20a3503SChristoph Lameter {
1129b20a3503SChristoph Lameter 	return -ENOSYS;
1130b20a3503SChristoph Lameter }
113195a402c3SChristoph Lameter 
1132d05f0cdcSHugh Dickins static struct page *new_page(struct page *page, unsigned long start, int **x)
113395a402c3SChristoph Lameter {
113495a402c3SChristoph Lameter 	return NULL;
113595a402c3SChristoph Lameter }
1136b20a3503SChristoph Lameter #endif
1137b20a3503SChristoph Lameter 
1138dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len,
1139028fec41SDavid Rientjes 		     unsigned short mode, unsigned short mode_flags,
1140028fec41SDavid Rientjes 		     nodemask_t *nmask, unsigned long flags)
11416ce3c4c0SChristoph Lameter {
11426ce3c4c0SChristoph Lameter 	struct mm_struct *mm = current->mm;
11436ce3c4c0SChristoph Lameter 	struct mempolicy *new;
11446ce3c4c0SChristoph Lameter 	unsigned long end;
11456ce3c4c0SChristoph Lameter 	int err;
11466ce3c4c0SChristoph Lameter 	LIST_HEAD(pagelist);
11476ce3c4c0SChristoph Lameter 
1148b24f53a0SLee Schermerhorn 	if (flags & ~(unsigned long)MPOL_MF_VALID)
11496ce3c4c0SChristoph Lameter 		return -EINVAL;
115074c00241SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
11516ce3c4c0SChristoph Lameter 		return -EPERM;
11526ce3c4c0SChristoph Lameter 
11536ce3c4c0SChristoph Lameter 	if (start & ~PAGE_MASK)
11546ce3c4c0SChristoph Lameter 		return -EINVAL;
11556ce3c4c0SChristoph Lameter 
11566ce3c4c0SChristoph Lameter 	if (mode == MPOL_DEFAULT)
11576ce3c4c0SChristoph Lameter 		flags &= ~MPOL_MF_STRICT;
11586ce3c4c0SChristoph Lameter 
11596ce3c4c0SChristoph Lameter 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
11606ce3c4c0SChristoph Lameter 	end = start + len;
11616ce3c4c0SChristoph Lameter 
11626ce3c4c0SChristoph Lameter 	if (end < start)
11636ce3c4c0SChristoph Lameter 		return -EINVAL;
11646ce3c4c0SChristoph Lameter 	if (end == start)
11656ce3c4c0SChristoph Lameter 		return 0;
11666ce3c4c0SChristoph Lameter 
1167028fec41SDavid Rientjes 	new = mpol_new(mode, mode_flags, nmask);
11686ce3c4c0SChristoph Lameter 	if (IS_ERR(new))
11696ce3c4c0SChristoph Lameter 		return PTR_ERR(new);
11706ce3c4c0SChristoph Lameter 
1171b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY)
1172b24f53a0SLee Schermerhorn 		new->flags |= MPOL_F_MOF;
1173b24f53a0SLee Schermerhorn 
11746ce3c4c0SChristoph Lameter 	/*
11756ce3c4c0SChristoph Lameter 	 * If we are using the default policy then operation
11766ce3c4c0SChristoph Lameter 	 * on discontinuous address spaces is okay after all
11776ce3c4c0SChristoph Lameter 	 */
11786ce3c4c0SChristoph Lameter 	if (!new)
11796ce3c4c0SChristoph Lameter 		flags |= MPOL_MF_DISCONTIG_OK;
11806ce3c4c0SChristoph Lameter 
1181028fec41SDavid Rientjes 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1182028fec41SDavid Rientjes 		 start, start + len, mode, mode_flags,
118300ef2d2fSDavid Rientjes 		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
11846ce3c4c0SChristoph Lameter 
11850aedadf9SChristoph Lameter 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
11860aedadf9SChristoph Lameter 
11870aedadf9SChristoph Lameter 		err = migrate_prep();
11880aedadf9SChristoph Lameter 		if (err)
1189b05ca738SKOSAKI Motohiro 			goto mpol_out;
11900aedadf9SChristoph Lameter 	}
11914bfc4495SKAMEZAWA Hiroyuki 	{
11924bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
11934bfc4495SKAMEZAWA Hiroyuki 		if (scratch) {
11946ce3c4c0SChristoph Lameter 			down_write(&mm->mmap_sem);
119558568d2aSMiao Xie 			task_lock(current);
11964bfc4495SKAMEZAWA Hiroyuki 			err = mpol_set_nodemask(new, nmask, scratch);
119758568d2aSMiao Xie 			task_unlock(current);
11984bfc4495SKAMEZAWA Hiroyuki 			if (err)
119958568d2aSMiao Xie 				up_write(&mm->mmap_sem);
12004bfc4495SKAMEZAWA Hiroyuki 		} else
12014bfc4495SKAMEZAWA Hiroyuki 			err = -ENOMEM;
12024bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
12034bfc4495SKAMEZAWA Hiroyuki 	}
1204b05ca738SKOSAKI Motohiro 	if (err)
1205b05ca738SKOSAKI Motohiro 		goto mpol_out;
1206b05ca738SKOSAKI Motohiro 
1207d05f0cdcSHugh Dickins 	err = queue_pages_range(mm, start, end, nmask,
12086ce3c4c0SChristoph Lameter 			  flags | MPOL_MF_INVERT, &pagelist);
1209d05f0cdcSHugh Dickins 	if (!err)
12109d8cebd4SKOSAKI Motohiro 		err = mbind_range(mm, start, end, new);
12117e2ab150SChristoph Lameter 
1212b24f53a0SLee Schermerhorn 	if (!err) {
1213b24f53a0SLee Schermerhorn 		int nr_failed = 0;
1214b24f53a0SLee Schermerhorn 
1215cf608ac1SMinchan Kim 		if (!list_empty(&pagelist)) {
1216b24f53a0SLee Schermerhorn 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1217d05f0cdcSHugh Dickins 			nr_failed = migrate_pages(&pagelist, new_page, NULL,
1218d05f0cdcSHugh Dickins 				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1219cf608ac1SMinchan Kim 			if (nr_failed)
122074060e4dSNaoya Horiguchi 				putback_movable_pages(&pagelist);
1221cf608ac1SMinchan Kim 		}
12226ce3c4c0SChristoph Lameter 
1223b24f53a0SLee Schermerhorn 		if (nr_failed && (flags & MPOL_MF_STRICT))
12246ce3c4c0SChristoph Lameter 			err = -EIO;
1225ab8a3e14SKOSAKI Motohiro 	} else
1226b0e5fd73SJoonsoo Kim 		putback_movable_pages(&pagelist);
1227b20a3503SChristoph Lameter 
12286ce3c4c0SChristoph Lameter 	up_write(&mm->mmap_sem);
1229b05ca738SKOSAKI Motohiro  mpol_out:
1230f0be3d32SLee Schermerhorn 	mpol_put(new);
12316ce3c4c0SChristoph Lameter 	return err;
12326ce3c4c0SChristoph Lameter }
12336ce3c4c0SChristoph Lameter 
123439743889SChristoph Lameter /*
12358bccd85fSChristoph Lameter  * User space interface with variable sized bitmaps for nodelists.
12368bccd85fSChristoph Lameter  */
12378bccd85fSChristoph Lameter 
12388bccd85fSChristoph Lameter /* Copy a node mask from user space. */
123939743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
12408bccd85fSChristoph Lameter 		     unsigned long maxnode)
12418bccd85fSChristoph Lameter {
12428bccd85fSChristoph Lameter 	unsigned long k;
12438bccd85fSChristoph Lameter 	unsigned long nlongs;
12448bccd85fSChristoph Lameter 	unsigned long endmask;
12458bccd85fSChristoph Lameter 
12468bccd85fSChristoph Lameter 	--maxnode;
12478bccd85fSChristoph Lameter 	nodes_clear(*nodes);
12488bccd85fSChristoph Lameter 	if (maxnode == 0 || !nmask)
12498bccd85fSChristoph Lameter 		return 0;
1250a9c930baSAndi Kleen 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1251636f13c1SChris Wright 		return -EINVAL;
12528bccd85fSChristoph Lameter 
12538bccd85fSChristoph Lameter 	nlongs = BITS_TO_LONGS(maxnode);
12548bccd85fSChristoph Lameter 	if ((maxnode % BITS_PER_LONG) == 0)
12558bccd85fSChristoph Lameter 		endmask = ~0UL;
12568bccd85fSChristoph Lameter 	else
12578bccd85fSChristoph Lameter 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
12588bccd85fSChristoph Lameter 
12598bccd85fSChristoph Lameter 	/* When the user specified more nodes than supported just check
12608bccd85fSChristoph Lameter 	   if the non supported part is all zero. */
12618bccd85fSChristoph Lameter 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
12628bccd85fSChristoph Lameter 		if (nlongs > PAGE_SIZE/sizeof(long))
12638bccd85fSChristoph Lameter 			return -EINVAL;
12648bccd85fSChristoph Lameter 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
12658bccd85fSChristoph Lameter 			unsigned long t;
12668bccd85fSChristoph Lameter 			if (get_user(t, nmask + k))
12678bccd85fSChristoph Lameter 				return -EFAULT;
12688bccd85fSChristoph Lameter 			if (k == nlongs - 1) {
12698bccd85fSChristoph Lameter 				if (t & endmask)
12708bccd85fSChristoph Lameter 					return -EINVAL;
12718bccd85fSChristoph Lameter 			} else if (t)
12728bccd85fSChristoph Lameter 				return -EINVAL;
12738bccd85fSChristoph Lameter 		}
12748bccd85fSChristoph Lameter 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
12758bccd85fSChristoph Lameter 		endmask = ~0UL;
12768bccd85fSChristoph Lameter 	}
12778bccd85fSChristoph Lameter 
12788bccd85fSChristoph Lameter 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
12798bccd85fSChristoph Lameter 		return -EFAULT;
12808bccd85fSChristoph Lameter 	nodes_addr(*nodes)[nlongs-1] &= endmask;
12818bccd85fSChristoph Lameter 	return 0;
12828bccd85fSChristoph Lameter }
12838bccd85fSChristoph Lameter 
12848bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */
12858bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
12868bccd85fSChristoph Lameter 			      nodemask_t *nodes)
12878bccd85fSChristoph Lameter {
12888bccd85fSChristoph Lameter 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
12898bccd85fSChristoph Lameter 	const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
12908bccd85fSChristoph Lameter 
12918bccd85fSChristoph Lameter 	if (copy > nbytes) {
12928bccd85fSChristoph Lameter 		if (copy > PAGE_SIZE)
12938bccd85fSChristoph Lameter 			return -EINVAL;
12948bccd85fSChristoph Lameter 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
12958bccd85fSChristoph Lameter 			return -EFAULT;
12968bccd85fSChristoph Lameter 		copy = nbytes;
12978bccd85fSChristoph Lameter 	}
12988bccd85fSChristoph Lameter 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
12998bccd85fSChristoph Lameter }
13008bccd85fSChristoph Lameter 
1301938bb9f5SHeiko Carstens SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1302f7f28ca9SRasmus Villemoes 		unsigned long, mode, const unsigned long __user *, nmask,
1303938bb9f5SHeiko Carstens 		unsigned long, maxnode, unsigned, flags)
13048bccd85fSChristoph Lameter {
13058bccd85fSChristoph Lameter 	nodemask_t nodes;
13068bccd85fSChristoph Lameter 	int err;
1307028fec41SDavid Rientjes 	unsigned short mode_flags;
13088bccd85fSChristoph Lameter 
1309028fec41SDavid Rientjes 	mode_flags = mode & MPOL_MODE_FLAGS;
1310028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1311a3b51e01SDavid Rientjes 	if (mode >= MPOL_MAX)
1312a3b51e01SDavid Rientjes 		return -EINVAL;
13134c50bc01SDavid Rientjes 	if ((mode_flags & MPOL_F_STATIC_NODES) &&
13144c50bc01SDavid Rientjes 	    (mode_flags & MPOL_F_RELATIVE_NODES))
13154c50bc01SDavid Rientjes 		return -EINVAL;
13168bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
13178bccd85fSChristoph Lameter 	if (err)
13188bccd85fSChristoph Lameter 		return err;
1319028fec41SDavid Rientjes 	return do_mbind(start, len, mode, mode_flags, &nodes, flags);
13208bccd85fSChristoph Lameter }
13218bccd85fSChristoph Lameter 
13228bccd85fSChristoph Lameter /* Set the process memory policy */
132323c8902dSRasmus Villemoes SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1324938bb9f5SHeiko Carstens 		unsigned long, maxnode)
13258bccd85fSChristoph Lameter {
13268bccd85fSChristoph Lameter 	int err;
13278bccd85fSChristoph Lameter 	nodemask_t nodes;
1328028fec41SDavid Rientjes 	unsigned short flags;
13298bccd85fSChristoph Lameter 
1330028fec41SDavid Rientjes 	flags = mode & MPOL_MODE_FLAGS;
1331028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1332028fec41SDavid Rientjes 	if ((unsigned int)mode >= MPOL_MAX)
13338bccd85fSChristoph Lameter 		return -EINVAL;
13344c50bc01SDavid Rientjes 	if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
13354c50bc01SDavid Rientjes 		return -EINVAL;
13368bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
13378bccd85fSChristoph Lameter 	if (err)
13388bccd85fSChristoph Lameter 		return err;
1339028fec41SDavid Rientjes 	return do_set_mempolicy(mode, flags, &nodes);
13408bccd85fSChristoph Lameter }
13418bccd85fSChristoph Lameter 
1342938bb9f5SHeiko Carstens SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1343938bb9f5SHeiko Carstens 		const unsigned long __user *, old_nodes,
1344938bb9f5SHeiko Carstens 		const unsigned long __user *, new_nodes)
134539743889SChristoph Lameter {
1346c69e8d9cSDavid Howells 	const struct cred *cred = current_cred(), *tcred;
1347596d7cfaSKOSAKI Motohiro 	struct mm_struct *mm = NULL;
134839743889SChristoph Lameter 	struct task_struct *task;
134939743889SChristoph Lameter 	nodemask_t task_nodes;
135039743889SChristoph Lameter 	int err;
1351596d7cfaSKOSAKI Motohiro 	nodemask_t *old;
1352596d7cfaSKOSAKI Motohiro 	nodemask_t *new;
1353596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH(scratch);
135439743889SChristoph Lameter 
1355596d7cfaSKOSAKI Motohiro 	if (!scratch)
1356596d7cfaSKOSAKI Motohiro 		return -ENOMEM;
135739743889SChristoph Lameter 
1358596d7cfaSKOSAKI Motohiro 	old = &scratch->mask1;
1359596d7cfaSKOSAKI Motohiro 	new = &scratch->mask2;
1360596d7cfaSKOSAKI Motohiro 
1361596d7cfaSKOSAKI Motohiro 	err = get_nodes(old, old_nodes, maxnode);
136239743889SChristoph Lameter 	if (err)
1363596d7cfaSKOSAKI Motohiro 		goto out;
1364596d7cfaSKOSAKI Motohiro 
1365596d7cfaSKOSAKI Motohiro 	err = get_nodes(new, new_nodes, maxnode);
1366596d7cfaSKOSAKI Motohiro 	if (err)
1367596d7cfaSKOSAKI Motohiro 		goto out;
136839743889SChristoph Lameter 
136939743889SChristoph Lameter 	/* Find the mm_struct */
137055cfaa3cSZeng Zhaoming 	rcu_read_lock();
1371228ebcbeSPavel Emelyanov 	task = pid ? find_task_by_vpid(pid) : current;
137239743889SChristoph Lameter 	if (!task) {
137355cfaa3cSZeng Zhaoming 		rcu_read_unlock();
1374596d7cfaSKOSAKI Motohiro 		err = -ESRCH;
1375596d7cfaSKOSAKI Motohiro 		goto out;
137639743889SChristoph Lameter 	}
13773268c63eSChristoph Lameter 	get_task_struct(task);
137839743889SChristoph Lameter 
1379596d7cfaSKOSAKI Motohiro 	err = -EINVAL;
138039743889SChristoph Lameter 
138139743889SChristoph Lameter 	/*
138239743889SChristoph Lameter 	 * Check if this process has the right to modify the specified
138339743889SChristoph Lameter 	 * process. The right exists if the process has administrative
13847f927fccSAlexey Dobriyan 	 * capabilities, superuser privileges or the same
138539743889SChristoph Lameter 	 * userid as the target process.
138639743889SChristoph Lameter 	 */
1387c69e8d9cSDavid Howells 	tcred = __task_cred(task);
1388b38a86ebSEric W. Biederman 	if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1389b38a86ebSEric W. Biederman 	    !uid_eq(cred->uid,  tcred->suid) && !uid_eq(cred->uid,  tcred->uid) &&
139074c00241SChristoph Lameter 	    !capable(CAP_SYS_NICE)) {
1391c69e8d9cSDavid Howells 		rcu_read_unlock();
139239743889SChristoph Lameter 		err = -EPERM;
13933268c63eSChristoph Lameter 		goto out_put;
139439743889SChristoph Lameter 	}
1395c69e8d9cSDavid Howells 	rcu_read_unlock();
139639743889SChristoph Lameter 
139739743889SChristoph Lameter 	task_nodes = cpuset_mems_allowed(task);
139839743889SChristoph Lameter 	/* Is the user allowed to access the target nodes? */
1399596d7cfaSKOSAKI Motohiro 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
140039743889SChristoph Lameter 		err = -EPERM;
14013268c63eSChristoph Lameter 		goto out_put;
140239743889SChristoph Lameter 	}
140339743889SChristoph Lameter 
140401f13bd6SLai Jiangshan 	if (!nodes_subset(*new, node_states[N_MEMORY])) {
14053b42d28bSChristoph Lameter 		err = -EINVAL;
14063268c63eSChristoph Lameter 		goto out_put;
14073b42d28bSChristoph Lameter 	}
14083b42d28bSChristoph Lameter 
140986c3a764SDavid Quigley 	err = security_task_movememory(task);
141086c3a764SDavid Quigley 	if (err)
14113268c63eSChristoph Lameter 		goto out_put;
141286c3a764SDavid Quigley 
14133268c63eSChristoph Lameter 	mm = get_task_mm(task);
14143268c63eSChristoph Lameter 	put_task_struct(task);
1415f2a9ef88SSasha Levin 
1416f2a9ef88SSasha Levin 	if (!mm) {
1417f2a9ef88SSasha Levin 		err = -EINVAL;
1418f2a9ef88SSasha Levin 		goto out;
1419f2a9ef88SSasha Levin 	}
1420f2a9ef88SSasha Levin 
1421596d7cfaSKOSAKI Motohiro 	err = do_migrate_pages(mm, old, new,
142274c00241SChristoph Lameter 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
14233268c63eSChristoph Lameter 
142439743889SChristoph Lameter 	mmput(mm);
14253268c63eSChristoph Lameter out:
1426596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH_FREE(scratch);
1427596d7cfaSKOSAKI Motohiro 
142839743889SChristoph Lameter 	return err;
14293268c63eSChristoph Lameter 
14303268c63eSChristoph Lameter out_put:
14313268c63eSChristoph Lameter 	put_task_struct(task);
14323268c63eSChristoph Lameter 	goto out;
14333268c63eSChristoph Lameter 
143439743889SChristoph Lameter }
143539743889SChristoph Lameter 
143639743889SChristoph Lameter 
14378bccd85fSChristoph Lameter /* Retrieve NUMA policy */
1438938bb9f5SHeiko Carstens SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1439938bb9f5SHeiko Carstens 		unsigned long __user *, nmask, unsigned long, maxnode,
1440938bb9f5SHeiko Carstens 		unsigned long, addr, unsigned long, flags)
14418bccd85fSChristoph Lameter {
1442dbcb0f19SAdrian Bunk 	int err;
1443dbcb0f19SAdrian Bunk 	int uninitialized_var(pval);
14448bccd85fSChristoph Lameter 	nodemask_t nodes;
14458bccd85fSChristoph Lameter 
14468bccd85fSChristoph Lameter 	if (nmask != NULL && maxnode < MAX_NUMNODES)
14478bccd85fSChristoph Lameter 		return -EINVAL;
14488bccd85fSChristoph Lameter 
14498bccd85fSChristoph Lameter 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
14508bccd85fSChristoph Lameter 
14518bccd85fSChristoph Lameter 	if (err)
14528bccd85fSChristoph Lameter 		return err;
14538bccd85fSChristoph Lameter 
14548bccd85fSChristoph Lameter 	if (policy && put_user(pval, policy))
14558bccd85fSChristoph Lameter 		return -EFAULT;
14568bccd85fSChristoph Lameter 
14578bccd85fSChristoph Lameter 	if (nmask)
14588bccd85fSChristoph Lameter 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
14598bccd85fSChristoph Lameter 
14608bccd85fSChristoph Lameter 	return err;
14618bccd85fSChristoph Lameter }
14628bccd85fSChristoph Lameter 
14631da177e4SLinus Torvalds #ifdef CONFIG_COMPAT
14641da177e4SLinus Torvalds 
1465c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1466c93e0f6cSHeiko Carstens 		       compat_ulong_t __user *, nmask,
1467c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode,
1468c93e0f6cSHeiko Carstens 		       compat_ulong_t, addr, compat_ulong_t, flags)
14691da177e4SLinus Torvalds {
14701da177e4SLinus Torvalds 	long err;
14711da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
14721da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
14731da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
14741da177e4SLinus Torvalds 
14751da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
14761da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
14771da177e4SLinus Torvalds 
14781da177e4SLinus Torvalds 	if (nmask)
14791da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
14801da177e4SLinus Torvalds 
14811da177e4SLinus Torvalds 	err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
14821da177e4SLinus Torvalds 
14831da177e4SLinus Torvalds 	if (!err && nmask) {
14842bbff6c7SKAMEZAWA Hiroyuki 		unsigned long copy_size;
14852bbff6c7SKAMEZAWA Hiroyuki 		copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
14862bbff6c7SKAMEZAWA Hiroyuki 		err = copy_from_user(bm, nm, copy_size);
14871da177e4SLinus Torvalds 		/* ensure entire bitmap is zeroed */
14881da177e4SLinus Torvalds 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
14891da177e4SLinus Torvalds 		err |= compat_put_bitmap(nmask, bm, nr_bits);
14901da177e4SLinus Torvalds 	}
14911da177e4SLinus Torvalds 
14921da177e4SLinus Torvalds 	return err;
14931da177e4SLinus Torvalds }
14941da177e4SLinus Torvalds 
1495c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1496c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode)
14971da177e4SLinus Torvalds {
14981da177e4SLinus Torvalds 	long err = 0;
14991da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
15001da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
15011da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
15021da177e4SLinus Torvalds 
15031da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
15041da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
15051da177e4SLinus Torvalds 
15061da177e4SLinus Torvalds 	if (nmask) {
15071da177e4SLinus Torvalds 		err = compat_get_bitmap(bm, nmask, nr_bits);
15081da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
15091da177e4SLinus Torvalds 		err |= copy_to_user(nm, bm, alloc_size);
15101da177e4SLinus Torvalds 	}
15111da177e4SLinus Torvalds 
15121da177e4SLinus Torvalds 	if (err)
15131da177e4SLinus Torvalds 		return -EFAULT;
15141da177e4SLinus Torvalds 
15151da177e4SLinus Torvalds 	return sys_set_mempolicy(mode, nm, nr_bits+1);
15161da177e4SLinus Torvalds }
15171da177e4SLinus Torvalds 
1518c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1519c93e0f6cSHeiko Carstens 		       compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1520c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode, compat_ulong_t, flags)
15211da177e4SLinus Torvalds {
15221da177e4SLinus Torvalds 	long err = 0;
15231da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
15241da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
1525dfcd3c0dSAndi Kleen 	nodemask_t bm;
15261da177e4SLinus Torvalds 
15271da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
15281da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
15291da177e4SLinus Torvalds 
15301da177e4SLinus Torvalds 	if (nmask) {
1531dfcd3c0dSAndi Kleen 		err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
15321da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
1533dfcd3c0dSAndi Kleen 		err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
15341da177e4SLinus Torvalds 	}
15351da177e4SLinus Torvalds 
15361da177e4SLinus Torvalds 	if (err)
15371da177e4SLinus Torvalds 		return -EFAULT;
15381da177e4SLinus Torvalds 
15391da177e4SLinus Torvalds 	return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
15401da177e4SLinus Torvalds }
15411da177e4SLinus Torvalds 
15421da177e4SLinus Torvalds #endif
15431da177e4SLinus Torvalds 
154474d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
154574d2c3a0SOleg Nesterov 						unsigned long addr)
15461da177e4SLinus Torvalds {
15478d90274bSOleg Nesterov 	struct mempolicy *pol = NULL;
15481da177e4SLinus Torvalds 
15491da177e4SLinus Torvalds 	if (vma) {
1550480eccf9SLee Schermerhorn 		if (vma->vm_ops && vma->vm_ops->get_policy) {
15518d90274bSOleg Nesterov 			pol = vma->vm_ops->get_policy(vma, addr);
155200442ad0SMel Gorman 		} else if (vma->vm_policy) {
15531da177e4SLinus Torvalds 			pol = vma->vm_policy;
155400442ad0SMel Gorman 
155500442ad0SMel Gorman 			/*
155600442ad0SMel Gorman 			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
155700442ad0SMel Gorman 			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
155800442ad0SMel Gorman 			 * count on these policies which will be dropped by
155900442ad0SMel Gorman 			 * mpol_cond_put() later
156000442ad0SMel Gorman 			 */
156100442ad0SMel Gorman 			if (mpol_needs_cond_ref(pol))
156200442ad0SMel Gorman 				mpol_get(pol);
156300442ad0SMel Gorman 		}
15641da177e4SLinus Torvalds 	}
1565f15ca78eSOleg Nesterov 
156674d2c3a0SOleg Nesterov 	return pol;
156774d2c3a0SOleg Nesterov }
156874d2c3a0SOleg Nesterov 
156974d2c3a0SOleg Nesterov /*
1570dd6eecb9SOleg Nesterov  * get_vma_policy(@vma, @addr)
157174d2c3a0SOleg Nesterov  * @vma: virtual memory area whose policy is sought
157274d2c3a0SOleg Nesterov  * @addr: address in @vma for shared policy lookup
157374d2c3a0SOleg Nesterov  *
157474d2c3a0SOleg Nesterov  * Returns effective policy for a VMA at specified address.
1575dd6eecb9SOleg Nesterov  * Falls back to current->mempolicy or system default policy, as necessary.
157674d2c3a0SOleg Nesterov  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
157774d2c3a0SOleg Nesterov  * count--added by the get_policy() vm_op, as appropriate--to protect against
157874d2c3a0SOleg Nesterov  * freeing by another task.  It is the caller's responsibility to free the
157974d2c3a0SOleg Nesterov  * extra reference for shared policies.
158074d2c3a0SOleg Nesterov  */
1581dd6eecb9SOleg Nesterov static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1582dd6eecb9SOleg Nesterov 						unsigned long addr)
158374d2c3a0SOleg Nesterov {
158474d2c3a0SOleg Nesterov 	struct mempolicy *pol = __get_vma_policy(vma, addr);
158574d2c3a0SOleg Nesterov 
15868d90274bSOleg Nesterov 	if (!pol)
1587dd6eecb9SOleg Nesterov 		pol = get_task_policy(current);
15888d90274bSOleg Nesterov 
15891da177e4SLinus Torvalds 	return pol;
15901da177e4SLinus Torvalds }
15911da177e4SLinus Torvalds 
15926b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma)
1593fc314724SMel Gorman {
15946b6482bbSOleg Nesterov 	struct mempolicy *pol;
1595f15ca78eSOleg Nesterov 
1596fc314724SMel Gorman 	if (vma->vm_ops && vma->vm_ops->get_policy) {
1597fc314724SMel Gorman 		bool ret = false;
1598fc314724SMel Gorman 
1599fc314724SMel Gorman 		pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1600fc314724SMel Gorman 		if (pol && (pol->flags & MPOL_F_MOF))
1601fc314724SMel Gorman 			ret = true;
1602fc314724SMel Gorman 		mpol_cond_put(pol);
1603fc314724SMel Gorman 
1604fc314724SMel Gorman 		return ret;
16058d90274bSOleg Nesterov 	}
16068d90274bSOleg Nesterov 
1607fc314724SMel Gorman 	pol = vma->vm_policy;
16088d90274bSOleg Nesterov 	if (!pol)
16096b6482bbSOleg Nesterov 		pol = get_task_policy(current);
1610fc314724SMel Gorman 
1611fc314724SMel Gorman 	return pol->flags & MPOL_F_MOF;
1612fc314724SMel Gorman }
1613fc314724SMel Gorman 
1614d3eb1570SLai Jiangshan static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1615d3eb1570SLai Jiangshan {
1616d3eb1570SLai Jiangshan 	enum zone_type dynamic_policy_zone = policy_zone;
1617d3eb1570SLai Jiangshan 
1618d3eb1570SLai Jiangshan 	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1619d3eb1570SLai Jiangshan 
1620d3eb1570SLai Jiangshan 	/*
1621d3eb1570SLai Jiangshan 	 * if policy->v.nodes has movable memory only,
1622d3eb1570SLai Jiangshan 	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1623d3eb1570SLai Jiangshan 	 *
1624d3eb1570SLai Jiangshan 	 * policy->v.nodes is intersect with node_states[N_MEMORY].
1625d3eb1570SLai Jiangshan 	 * so if the following test faile, it implies
1626d3eb1570SLai Jiangshan 	 * policy->v.nodes has movable memory only.
1627d3eb1570SLai Jiangshan 	 */
1628d3eb1570SLai Jiangshan 	if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1629d3eb1570SLai Jiangshan 		dynamic_policy_zone = ZONE_MOVABLE;
1630d3eb1570SLai Jiangshan 
1631d3eb1570SLai Jiangshan 	return zone >= dynamic_policy_zone;
1632d3eb1570SLai Jiangshan }
1633d3eb1570SLai Jiangshan 
163452cd3b07SLee Schermerhorn /*
163552cd3b07SLee Schermerhorn  * Return a nodemask representing a mempolicy for filtering nodes for
163652cd3b07SLee Schermerhorn  * page allocation
163752cd3b07SLee Schermerhorn  */
163852cd3b07SLee Schermerhorn static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
163919770b32SMel Gorman {
164019770b32SMel Gorman 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
164145c4745aSLee Schermerhorn 	if (unlikely(policy->mode == MPOL_BIND) &&
1642d3eb1570SLai Jiangshan 			apply_policy_zone(policy, gfp_zone(gfp)) &&
164319770b32SMel Gorman 			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
164419770b32SMel Gorman 		return &policy->v.nodes;
164519770b32SMel Gorman 
164619770b32SMel Gorman 	return NULL;
164719770b32SMel Gorman }
164819770b32SMel Gorman 
164952cd3b07SLee Schermerhorn /* Return a zonelist indicated by gfp for node representing a mempolicy */
16502f5f9486SAndi Kleen static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
16512f5f9486SAndi Kleen 	int nd)
16521da177e4SLinus Torvalds {
165345c4745aSLee Schermerhorn 	switch (policy->mode) {
16541da177e4SLinus Torvalds 	case MPOL_PREFERRED:
1655fc36b8d3SLee Schermerhorn 		if (!(policy->flags & MPOL_F_LOCAL))
16561da177e4SLinus Torvalds 			nd = policy->v.preferred_node;
16571da177e4SLinus Torvalds 		break;
16581da177e4SLinus Torvalds 	case MPOL_BIND:
165919770b32SMel Gorman 		/*
166052cd3b07SLee Schermerhorn 		 * Normally, MPOL_BIND allocations are node-local within the
166152cd3b07SLee Schermerhorn 		 * allowed nodemask.  However, if __GFP_THISNODE is set and the
16626eb27e1fSBob Liu 		 * current node isn't part of the mask, we use the zonelist for
166352cd3b07SLee Schermerhorn 		 * the first node in the mask instead.
166419770b32SMel Gorman 		 */
166519770b32SMel Gorman 		if (unlikely(gfp & __GFP_THISNODE) &&
166619770b32SMel Gorman 				unlikely(!node_isset(nd, policy->v.nodes)))
166719770b32SMel Gorman 			nd = first_node(policy->v.nodes);
166819770b32SMel Gorman 		break;
16691da177e4SLinus Torvalds 	default:
16701da177e4SLinus Torvalds 		BUG();
16711da177e4SLinus Torvalds 	}
16720e88460dSMel Gorman 	return node_zonelist(nd, gfp);
16731da177e4SLinus Torvalds }
16741da177e4SLinus Torvalds 
16751da177e4SLinus Torvalds /* Do dynamic interleaving for a process */
16761da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy)
16771da177e4SLinus Torvalds {
16781da177e4SLinus Torvalds 	unsigned nid, next;
16791da177e4SLinus Torvalds 	struct task_struct *me = current;
16801da177e4SLinus Torvalds 
16811da177e4SLinus Torvalds 	nid = me->il_next;
1682dfcd3c0dSAndi Kleen 	next = next_node(nid, policy->v.nodes);
16831da177e4SLinus Torvalds 	if (next >= MAX_NUMNODES)
1684dfcd3c0dSAndi Kleen 		next = first_node(policy->v.nodes);
1685f5b087b5SDavid Rientjes 	if (next < MAX_NUMNODES)
16861da177e4SLinus Torvalds 		me->il_next = next;
16871da177e4SLinus Torvalds 	return nid;
16881da177e4SLinus Torvalds }
16891da177e4SLinus Torvalds 
1690dc85da15SChristoph Lameter /*
1691dc85da15SChristoph Lameter  * Depending on the memory policy provide a node from which to allocate the
1692dc85da15SChristoph Lameter  * next slab entry.
1693dc85da15SChristoph Lameter  */
16942a389610SDavid Rientjes unsigned int mempolicy_slab_node(void)
1695dc85da15SChristoph Lameter {
1696e7b691b0SAndi Kleen 	struct mempolicy *policy;
16972a389610SDavid Rientjes 	int node = numa_mem_id();
1698e7b691b0SAndi Kleen 
1699e7b691b0SAndi Kleen 	if (in_interrupt())
17002a389610SDavid Rientjes 		return node;
1701e7b691b0SAndi Kleen 
1702e7b691b0SAndi Kleen 	policy = current->mempolicy;
1703fc36b8d3SLee Schermerhorn 	if (!policy || policy->flags & MPOL_F_LOCAL)
17042a389610SDavid Rientjes 		return node;
1705765c4507SChristoph Lameter 
1706bea904d5SLee Schermerhorn 	switch (policy->mode) {
1707bea904d5SLee Schermerhorn 	case MPOL_PREFERRED:
1708fc36b8d3SLee Schermerhorn 		/*
1709fc36b8d3SLee Schermerhorn 		 * handled MPOL_F_LOCAL above
1710fc36b8d3SLee Schermerhorn 		 */
1711bea904d5SLee Schermerhorn 		return policy->v.preferred_node;
1712bea904d5SLee Schermerhorn 
1713dc85da15SChristoph Lameter 	case MPOL_INTERLEAVE:
1714dc85da15SChristoph Lameter 		return interleave_nodes(policy);
1715dc85da15SChristoph Lameter 
1716dd1a239fSMel Gorman 	case MPOL_BIND: {
1717dc85da15SChristoph Lameter 		/*
1718dc85da15SChristoph Lameter 		 * Follow bind policy behavior and start allocation at the
1719dc85da15SChristoph Lameter 		 * first node.
1720dc85da15SChristoph Lameter 		 */
172119770b32SMel Gorman 		struct zonelist *zonelist;
172219770b32SMel Gorman 		struct zone *zone;
172319770b32SMel Gorman 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
17242a389610SDavid Rientjes 		zonelist = &NODE_DATA(node)->node_zonelists[0];
172519770b32SMel Gorman 		(void)first_zones_zonelist(zonelist, highest_zoneidx,
172619770b32SMel Gorman 							&policy->v.nodes,
172719770b32SMel Gorman 							&zone);
17282a389610SDavid Rientjes 		return zone ? zone->node : node;
1729dd1a239fSMel Gorman 	}
1730dc85da15SChristoph Lameter 
1731dc85da15SChristoph Lameter 	default:
1732bea904d5SLee Schermerhorn 		BUG();
1733dc85da15SChristoph Lameter 	}
1734dc85da15SChristoph Lameter }
1735dc85da15SChristoph Lameter 
17361da177e4SLinus Torvalds /* Do static interleaving for a VMA with known offset. */
17371da177e4SLinus Torvalds static unsigned offset_il_node(struct mempolicy *pol,
17381da177e4SLinus Torvalds 		struct vm_area_struct *vma, unsigned long off)
17391da177e4SLinus Torvalds {
1740dfcd3c0dSAndi Kleen 	unsigned nnodes = nodes_weight(pol->v.nodes);
1741f5b087b5SDavid Rientjes 	unsigned target;
17421da177e4SLinus Torvalds 	int c;
1743b76ac7e7SJianguo Wu 	int nid = NUMA_NO_NODE;
17441da177e4SLinus Torvalds 
1745f5b087b5SDavid Rientjes 	if (!nnodes)
1746f5b087b5SDavid Rientjes 		return numa_node_id();
1747f5b087b5SDavid Rientjes 	target = (unsigned int)off % nnodes;
17481da177e4SLinus Torvalds 	c = 0;
17491da177e4SLinus Torvalds 	do {
1750dfcd3c0dSAndi Kleen 		nid = next_node(nid, pol->v.nodes);
17511da177e4SLinus Torvalds 		c++;
17521da177e4SLinus Torvalds 	} while (c <= target);
17531da177e4SLinus Torvalds 	return nid;
17541da177e4SLinus Torvalds }
17551da177e4SLinus Torvalds 
17565da7ca86SChristoph Lameter /* Determine a node number for interleave */
17575da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol,
17585da7ca86SChristoph Lameter 		 struct vm_area_struct *vma, unsigned long addr, int shift)
17595da7ca86SChristoph Lameter {
17605da7ca86SChristoph Lameter 	if (vma) {
17615da7ca86SChristoph Lameter 		unsigned long off;
17625da7ca86SChristoph Lameter 
17633b98b087SNishanth Aravamudan 		/*
17643b98b087SNishanth Aravamudan 		 * for small pages, there is no difference between
17653b98b087SNishanth Aravamudan 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
17663b98b087SNishanth Aravamudan 		 * for huge pages, since vm_pgoff is in units of small
17673b98b087SNishanth Aravamudan 		 * pages, we need to shift off the always 0 bits to get
17683b98b087SNishanth Aravamudan 		 * a useful offset.
17693b98b087SNishanth Aravamudan 		 */
17703b98b087SNishanth Aravamudan 		BUG_ON(shift < PAGE_SHIFT);
17713b98b087SNishanth Aravamudan 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
17725da7ca86SChristoph Lameter 		off += (addr - vma->vm_start) >> shift;
17735da7ca86SChristoph Lameter 		return offset_il_node(pol, vma, off);
17745da7ca86SChristoph Lameter 	} else
17755da7ca86SChristoph Lameter 		return interleave_nodes(pol);
17765da7ca86SChristoph Lameter }
17775da7ca86SChristoph Lameter 
1778778d3b0fSMichal Hocko /*
1779778d3b0fSMichal Hocko  * Return the bit number of a random bit set in the nodemask.
1780b76ac7e7SJianguo Wu  * (returns NUMA_NO_NODE if nodemask is empty)
1781778d3b0fSMichal Hocko  */
1782778d3b0fSMichal Hocko int node_random(const nodemask_t *maskp)
1783778d3b0fSMichal Hocko {
1784b76ac7e7SJianguo Wu 	int w, bit = NUMA_NO_NODE;
1785778d3b0fSMichal Hocko 
1786778d3b0fSMichal Hocko 	w = nodes_weight(*maskp);
1787778d3b0fSMichal Hocko 	if (w)
1788778d3b0fSMichal Hocko 		bit = bitmap_ord_to_pos(maskp->bits,
1789778d3b0fSMichal Hocko 			get_random_int() % w, MAX_NUMNODES);
1790778d3b0fSMichal Hocko 	return bit;
1791778d3b0fSMichal Hocko }
1792778d3b0fSMichal Hocko 
179300ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS
1794480eccf9SLee Schermerhorn /*
1795480eccf9SLee Schermerhorn  * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1796b46e14acSFabian Frederick  * @vma: virtual memory area whose policy is sought
1797b46e14acSFabian Frederick  * @addr: address in @vma for shared policy lookup and interleave policy
1798b46e14acSFabian Frederick  * @gfp_flags: for requested zone
1799b46e14acSFabian Frederick  * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1800b46e14acSFabian Frederick  * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
1801480eccf9SLee Schermerhorn  *
180252cd3b07SLee Schermerhorn  * Returns a zonelist suitable for a huge page allocation and a pointer
180352cd3b07SLee Schermerhorn  * to the struct mempolicy for conditional unref after allocation.
180452cd3b07SLee Schermerhorn  * If the effective policy is 'BIND, returns a pointer to the mempolicy's
180552cd3b07SLee Schermerhorn  * @nodemask for filtering the zonelist.
1806c0ff7453SMiao Xie  *
1807d26914d1SMel Gorman  * Must be protected by read_mems_allowed_begin()
1808480eccf9SLee Schermerhorn  */
1809396faf03SMel Gorman struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
181019770b32SMel Gorman 				gfp_t gfp_flags, struct mempolicy **mpol,
181119770b32SMel Gorman 				nodemask_t **nodemask)
18125da7ca86SChristoph Lameter {
1813480eccf9SLee Schermerhorn 	struct zonelist *zl;
18145da7ca86SChristoph Lameter 
1815dd6eecb9SOleg Nesterov 	*mpol = get_vma_policy(vma, addr);
181619770b32SMel Gorman 	*nodemask = NULL;	/* assume !MPOL_BIND */
18175da7ca86SChristoph Lameter 
181852cd3b07SLee Schermerhorn 	if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
181952cd3b07SLee Schermerhorn 		zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1820a5516438SAndi Kleen 				huge_page_shift(hstate_vma(vma))), gfp_flags);
182152cd3b07SLee Schermerhorn 	} else {
18222f5f9486SAndi Kleen 		zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
182352cd3b07SLee Schermerhorn 		if ((*mpol)->mode == MPOL_BIND)
182452cd3b07SLee Schermerhorn 			*nodemask = &(*mpol)->v.nodes;
1825480eccf9SLee Schermerhorn 	}
1826480eccf9SLee Schermerhorn 	return zl;
18275da7ca86SChristoph Lameter }
182806808b08SLee Schermerhorn 
182906808b08SLee Schermerhorn /*
183006808b08SLee Schermerhorn  * init_nodemask_of_mempolicy
183106808b08SLee Schermerhorn  *
183206808b08SLee Schermerhorn  * If the current task's mempolicy is "default" [NULL], return 'false'
183306808b08SLee Schermerhorn  * to indicate default policy.  Otherwise, extract the policy nodemask
183406808b08SLee Schermerhorn  * for 'bind' or 'interleave' policy into the argument nodemask, or
183506808b08SLee Schermerhorn  * initialize the argument nodemask to contain the single node for
183606808b08SLee Schermerhorn  * 'preferred' or 'local' policy and return 'true' to indicate presence
183706808b08SLee Schermerhorn  * of non-default mempolicy.
183806808b08SLee Schermerhorn  *
183906808b08SLee Schermerhorn  * We don't bother with reference counting the mempolicy [mpol_get/put]
184006808b08SLee Schermerhorn  * because the current task is examining it's own mempolicy and a task's
184106808b08SLee Schermerhorn  * mempolicy is only ever changed by the task itself.
184206808b08SLee Schermerhorn  *
184306808b08SLee Schermerhorn  * N.B., it is the caller's responsibility to free a returned nodemask.
184406808b08SLee Schermerhorn  */
184506808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask)
184606808b08SLee Schermerhorn {
184706808b08SLee Schermerhorn 	struct mempolicy *mempolicy;
184806808b08SLee Schermerhorn 	int nid;
184906808b08SLee Schermerhorn 
185006808b08SLee Schermerhorn 	if (!(mask && current->mempolicy))
185106808b08SLee Schermerhorn 		return false;
185206808b08SLee Schermerhorn 
1853c0ff7453SMiao Xie 	task_lock(current);
185406808b08SLee Schermerhorn 	mempolicy = current->mempolicy;
185506808b08SLee Schermerhorn 	switch (mempolicy->mode) {
185606808b08SLee Schermerhorn 	case MPOL_PREFERRED:
185706808b08SLee Schermerhorn 		if (mempolicy->flags & MPOL_F_LOCAL)
185806808b08SLee Schermerhorn 			nid = numa_node_id();
185906808b08SLee Schermerhorn 		else
186006808b08SLee Schermerhorn 			nid = mempolicy->v.preferred_node;
186106808b08SLee Schermerhorn 		init_nodemask_of_node(mask, nid);
186206808b08SLee Schermerhorn 		break;
186306808b08SLee Schermerhorn 
186406808b08SLee Schermerhorn 	case MPOL_BIND:
186506808b08SLee Schermerhorn 		/* Fall through */
186606808b08SLee Schermerhorn 	case MPOL_INTERLEAVE:
186706808b08SLee Schermerhorn 		*mask =  mempolicy->v.nodes;
186806808b08SLee Schermerhorn 		break;
186906808b08SLee Schermerhorn 
187006808b08SLee Schermerhorn 	default:
187106808b08SLee Schermerhorn 		BUG();
187206808b08SLee Schermerhorn 	}
1873c0ff7453SMiao Xie 	task_unlock(current);
187406808b08SLee Schermerhorn 
187506808b08SLee Schermerhorn 	return true;
187606808b08SLee Schermerhorn }
187700ac59adSChen, Kenneth W #endif
18785da7ca86SChristoph Lameter 
18796f48d0ebSDavid Rientjes /*
18806f48d0ebSDavid Rientjes  * mempolicy_nodemask_intersects
18816f48d0ebSDavid Rientjes  *
18826f48d0ebSDavid Rientjes  * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
18836f48d0ebSDavid Rientjes  * policy.  Otherwise, check for intersection between mask and the policy
18846f48d0ebSDavid Rientjes  * nodemask for 'bind' or 'interleave' policy.  For 'perferred' or 'local'
18856f48d0ebSDavid Rientjes  * policy, always return true since it may allocate elsewhere on fallback.
18866f48d0ebSDavid Rientjes  *
18876f48d0ebSDavid Rientjes  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
18886f48d0ebSDavid Rientjes  */
18896f48d0ebSDavid Rientjes bool mempolicy_nodemask_intersects(struct task_struct *tsk,
18906f48d0ebSDavid Rientjes 					const nodemask_t *mask)
18916f48d0ebSDavid Rientjes {
18926f48d0ebSDavid Rientjes 	struct mempolicy *mempolicy;
18936f48d0ebSDavid Rientjes 	bool ret = true;
18946f48d0ebSDavid Rientjes 
18956f48d0ebSDavid Rientjes 	if (!mask)
18966f48d0ebSDavid Rientjes 		return ret;
18976f48d0ebSDavid Rientjes 	task_lock(tsk);
18986f48d0ebSDavid Rientjes 	mempolicy = tsk->mempolicy;
18996f48d0ebSDavid Rientjes 	if (!mempolicy)
19006f48d0ebSDavid Rientjes 		goto out;
19016f48d0ebSDavid Rientjes 
19026f48d0ebSDavid Rientjes 	switch (mempolicy->mode) {
19036f48d0ebSDavid Rientjes 	case MPOL_PREFERRED:
19046f48d0ebSDavid Rientjes 		/*
19056f48d0ebSDavid Rientjes 		 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
19066f48d0ebSDavid Rientjes 		 * allocate from, they may fallback to other nodes when oom.
19076f48d0ebSDavid Rientjes 		 * Thus, it's possible for tsk to have allocated memory from
19086f48d0ebSDavid Rientjes 		 * nodes in mask.
19096f48d0ebSDavid Rientjes 		 */
19106f48d0ebSDavid Rientjes 		break;
19116f48d0ebSDavid Rientjes 	case MPOL_BIND:
19126f48d0ebSDavid Rientjes 	case MPOL_INTERLEAVE:
19136f48d0ebSDavid Rientjes 		ret = nodes_intersects(mempolicy->v.nodes, *mask);
19146f48d0ebSDavid Rientjes 		break;
19156f48d0ebSDavid Rientjes 	default:
19166f48d0ebSDavid Rientjes 		BUG();
19176f48d0ebSDavid Rientjes 	}
19186f48d0ebSDavid Rientjes out:
19196f48d0ebSDavid Rientjes 	task_unlock(tsk);
19206f48d0ebSDavid Rientjes 	return ret;
19216f48d0ebSDavid Rientjes }
19226f48d0ebSDavid Rientjes 
19231da177e4SLinus Torvalds /* Allocate a page in interleaved policy.
19241da177e4SLinus Torvalds    Own path because it needs to do special accounting. */
1925662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1926662f3a0bSAndi Kleen 					unsigned nid)
19271da177e4SLinus Torvalds {
19281da177e4SLinus Torvalds 	struct zonelist *zl;
19291da177e4SLinus Torvalds 	struct page *page;
19301da177e4SLinus Torvalds 
19310e88460dSMel Gorman 	zl = node_zonelist(nid, gfp);
19321da177e4SLinus Torvalds 	page = __alloc_pages(gfp, order, zl);
1933dd1a239fSMel Gorman 	if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1934ca889e6cSChristoph Lameter 		inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
19351da177e4SLinus Torvalds 	return page;
19361da177e4SLinus Torvalds }
19371da177e4SLinus Torvalds 
19381da177e4SLinus Torvalds /**
19390bbbc0b3SAndrea Arcangeli  * 	alloc_pages_vma	- Allocate a page for a VMA.
19401da177e4SLinus Torvalds  *
19411da177e4SLinus Torvalds  * 	@gfp:
19421da177e4SLinus Torvalds  *      %GFP_USER    user allocation.
19431da177e4SLinus Torvalds  *      %GFP_KERNEL  kernel allocations,
19441da177e4SLinus Torvalds  *      %GFP_HIGHMEM highmem/user allocations,
19451da177e4SLinus Torvalds  *      %GFP_FS      allocation should not call back into a file system.
19461da177e4SLinus Torvalds  *      %GFP_ATOMIC  don't sleep.
19471da177e4SLinus Torvalds  *
19480bbbc0b3SAndrea Arcangeli  *	@order:Order of the GFP allocation.
19491da177e4SLinus Torvalds  * 	@vma:  Pointer to VMA or NULL if not available.
19501da177e4SLinus Torvalds  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
1951be97a41bSVlastimil Babka  *	@node: Which node to prefer for allocation (modulo policy).
1952be97a41bSVlastimil Babka  *	@hugepage: for hugepages try only the preferred node if possible
19531da177e4SLinus Torvalds  *
19541da177e4SLinus Torvalds  * 	This function allocates a page from the kernel page pool and applies
19551da177e4SLinus Torvalds  *	a NUMA policy associated with the VMA or the current process.
19561da177e4SLinus Torvalds  *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
19571da177e4SLinus Torvalds  *	mm_struct of the VMA to prevent it from going away. Should be used for
1958be97a41bSVlastimil Babka  *	all allocations for pages that will be mapped into user space. Returns
1959be97a41bSVlastimil Babka  *	NULL when no page can be allocated.
19601da177e4SLinus Torvalds  */
19611da177e4SLinus Torvalds struct page *
19620bbbc0b3SAndrea Arcangeli alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
1963be97a41bSVlastimil Babka 		unsigned long addr, int node, bool hugepage)
19641da177e4SLinus Torvalds {
1965cc9a6c87SMel Gorman 	struct mempolicy *pol;
1966c0ff7453SMiao Xie 	struct page *page;
1967cc9a6c87SMel Gorman 	unsigned int cpuset_mems_cookie;
1968be97a41bSVlastimil Babka 	struct zonelist *zl;
1969be97a41bSVlastimil Babka 	nodemask_t *nmask;
19701da177e4SLinus Torvalds 
1971cc9a6c87SMel Gorman retry_cpuset:
1972dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
1973d26914d1SMel Gorman 	cpuset_mems_cookie = read_mems_allowed_begin();
1974cc9a6c87SMel Gorman 
1975be97a41bSVlastimil Babka 	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage &&
1976be97a41bSVlastimil Babka 					pol->mode != MPOL_INTERLEAVE)) {
1977be97a41bSVlastimil Babka 		/*
1978be97a41bSVlastimil Babka 		 * For hugepage allocation and non-interleave policy which
1979be97a41bSVlastimil Babka 		 * allows the current node, we only try to allocate from the
1980be97a41bSVlastimil Babka 		 * current node and don't fall back to other nodes, as the
1981be97a41bSVlastimil Babka 		 * cost of remote accesses would likely offset THP benefits.
1982be97a41bSVlastimil Babka 		 *
1983be97a41bSVlastimil Babka 		 * If the policy is interleave, or does not allow the current
1984be97a41bSVlastimil Babka 		 * node in its nodemask, we allocate the standard way.
1985be97a41bSVlastimil Babka 		 */
1986be97a41bSVlastimil Babka 		nmask = policy_nodemask(gfp, pol);
1987be97a41bSVlastimil Babka 		if (!nmask || node_isset(node, *nmask)) {
1988be97a41bSVlastimil Babka 			mpol_cond_put(pol);
19895265047aSDavid Rientjes 			page = alloc_pages_exact_node(node,
19905265047aSDavid Rientjes 						gfp | __GFP_THISNODE, order);
1991be97a41bSVlastimil Babka 			goto out;
1992be97a41bSVlastimil Babka 		}
1993be97a41bSVlastimil Babka 	}
1994be97a41bSVlastimil Babka 
1995be97a41bSVlastimil Babka 	if (pol->mode == MPOL_INTERLEAVE) {
19961da177e4SLinus Torvalds 		unsigned nid;
19975da7ca86SChristoph Lameter 
19988eac563cSAndi Kleen 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
199952cd3b07SLee Schermerhorn 		mpol_cond_put(pol);
20000bbbc0b3SAndrea Arcangeli 		page = alloc_page_interleave(gfp, order, nid);
2001be97a41bSVlastimil Babka 		goto out;
20021da177e4SLinus Torvalds 	}
20031da177e4SLinus Torvalds 
2004077fcf11SAneesh Kumar K.V 	nmask = policy_nodemask(gfp, pol);
2005be97a41bSVlastimil Babka 	zl = policy_zonelist(gfp, pol, node);
2006077fcf11SAneesh Kumar K.V 	mpol_cond_put(pol);
2007be97a41bSVlastimil Babka 	page = __alloc_pages_nodemask(gfp, order, zl, nmask);
2008be97a41bSVlastimil Babka out:
2009be97a41bSVlastimil Babka 	if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2010077fcf11SAneesh Kumar K.V 		goto retry_cpuset;
2011077fcf11SAneesh Kumar K.V 	return page;
2012077fcf11SAneesh Kumar K.V }
2013077fcf11SAneesh Kumar K.V 
20141da177e4SLinus Torvalds /**
20151da177e4SLinus Torvalds  * 	alloc_pages_current - Allocate pages.
20161da177e4SLinus Torvalds  *
20171da177e4SLinus Torvalds  *	@gfp:
20181da177e4SLinus Torvalds  *		%GFP_USER   user allocation,
20191da177e4SLinus Torvalds  *      	%GFP_KERNEL kernel allocation,
20201da177e4SLinus Torvalds  *      	%GFP_HIGHMEM highmem allocation,
20211da177e4SLinus Torvalds  *      	%GFP_FS     don't call back into a file system.
20221da177e4SLinus Torvalds  *      	%GFP_ATOMIC don't sleep.
20231da177e4SLinus Torvalds  *	@order: Power of two of allocation size in pages. 0 is a single page.
20241da177e4SLinus Torvalds  *
20251da177e4SLinus Torvalds  *	Allocate a page from the kernel page pool.  When not in
20261da177e4SLinus Torvalds  *	interrupt context and apply the current process NUMA policy.
20271da177e4SLinus Torvalds  *	Returns NULL when no page can be allocated.
20281da177e4SLinus Torvalds  *
2029cf2a473cSPaul Jackson  *	Don't call cpuset_update_task_memory_state() unless
20301da177e4SLinus Torvalds  *	1) it's ok to take cpuset_sem (can WAIT), and
20311da177e4SLinus Torvalds  *	2) allocating for current task (not interrupt).
20321da177e4SLinus Torvalds  */
2033dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order)
20341da177e4SLinus Torvalds {
20358d90274bSOleg Nesterov 	struct mempolicy *pol = &default_policy;
2036c0ff7453SMiao Xie 	struct page *page;
2037cc9a6c87SMel Gorman 	unsigned int cpuset_mems_cookie;
20381da177e4SLinus Torvalds 
20398d90274bSOleg Nesterov 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
20408d90274bSOleg Nesterov 		pol = get_task_policy(current);
204152cd3b07SLee Schermerhorn 
2042cc9a6c87SMel Gorman retry_cpuset:
2043d26914d1SMel Gorman 	cpuset_mems_cookie = read_mems_allowed_begin();
2044cc9a6c87SMel Gorman 
204552cd3b07SLee Schermerhorn 	/*
204652cd3b07SLee Schermerhorn 	 * No reference counting needed for current->mempolicy
204752cd3b07SLee Schermerhorn 	 * nor system default_policy
204852cd3b07SLee Schermerhorn 	 */
204945c4745aSLee Schermerhorn 	if (pol->mode == MPOL_INTERLEAVE)
2050c0ff7453SMiao Xie 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2051c0ff7453SMiao Xie 	else
2052c0ff7453SMiao Xie 		page = __alloc_pages_nodemask(gfp, order,
20535c4b4be3SAndi Kleen 				policy_zonelist(gfp, pol, numa_node_id()),
20545c4b4be3SAndi Kleen 				policy_nodemask(gfp, pol));
2055cc9a6c87SMel Gorman 
2056d26914d1SMel Gorman 	if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2057cc9a6c87SMel Gorman 		goto retry_cpuset;
2058cc9a6c87SMel Gorman 
2059c0ff7453SMiao Xie 	return page;
20601da177e4SLinus Torvalds }
20611da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current);
20621da177e4SLinus Torvalds 
2063ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2064ef0855d3SOleg Nesterov {
2065ef0855d3SOleg Nesterov 	struct mempolicy *pol = mpol_dup(vma_policy(src));
2066ef0855d3SOleg Nesterov 
2067ef0855d3SOleg Nesterov 	if (IS_ERR(pol))
2068ef0855d3SOleg Nesterov 		return PTR_ERR(pol);
2069ef0855d3SOleg Nesterov 	dst->vm_policy = pol;
2070ef0855d3SOleg Nesterov 	return 0;
2071ef0855d3SOleg Nesterov }
2072ef0855d3SOleg Nesterov 
20734225399aSPaul Jackson /*
2074846a16bfSLee Schermerhorn  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
20754225399aSPaul Jackson  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
20764225399aSPaul Jackson  * with the mems_allowed returned by cpuset_mems_allowed().  This
20774225399aSPaul Jackson  * keeps mempolicies cpuset relative after its cpuset moves.  See
20784225399aSPaul Jackson  * further kernel/cpuset.c update_nodemask().
2079708c1bbcSMiao Xie  *
2080708c1bbcSMiao Xie  * current's mempolicy may be rebinded by the other task(the task that changes
2081708c1bbcSMiao Xie  * cpuset's mems), so we needn't do rebind work for current task.
20824225399aSPaul Jackson  */
20834225399aSPaul Jackson 
2084846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */
2085846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old)
20861da177e4SLinus Torvalds {
20871da177e4SLinus Torvalds 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
20881da177e4SLinus Torvalds 
20891da177e4SLinus Torvalds 	if (!new)
20901da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2091708c1bbcSMiao Xie 
2092708c1bbcSMiao Xie 	/* task's mempolicy is protected by alloc_lock */
2093708c1bbcSMiao Xie 	if (old == current->mempolicy) {
2094708c1bbcSMiao Xie 		task_lock(current);
2095708c1bbcSMiao Xie 		*new = *old;
2096708c1bbcSMiao Xie 		task_unlock(current);
2097708c1bbcSMiao Xie 	} else
2098708c1bbcSMiao Xie 		*new = *old;
2099708c1bbcSMiao Xie 
21004225399aSPaul Jackson 	if (current_cpuset_is_being_rebound()) {
21014225399aSPaul Jackson 		nodemask_t mems = cpuset_mems_allowed(current);
2102708c1bbcSMiao Xie 		if (new->flags & MPOL_F_REBINDING)
2103708c1bbcSMiao Xie 			mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
2104708c1bbcSMiao Xie 		else
2105708c1bbcSMiao Xie 			mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
21064225399aSPaul Jackson 	}
21071da177e4SLinus Torvalds 	atomic_set(&new->refcnt, 1);
21081da177e4SLinus Torvalds 	return new;
21091da177e4SLinus Torvalds }
21101da177e4SLinus Torvalds 
21111da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */
2112fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
21131da177e4SLinus Torvalds {
21141da177e4SLinus Torvalds 	if (!a || !b)
2115fcfb4dccSKOSAKI Motohiro 		return false;
211645c4745aSLee Schermerhorn 	if (a->mode != b->mode)
2117fcfb4dccSKOSAKI Motohiro 		return false;
211819800502SBob Liu 	if (a->flags != b->flags)
2119fcfb4dccSKOSAKI Motohiro 		return false;
212019800502SBob Liu 	if (mpol_store_user_nodemask(a))
212119800502SBob Liu 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2122fcfb4dccSKOSAKI Motohiro 			return false;
212319800502SBob Liu 
212445c4745aSLee Schermerhorn 	switch (a->mode) {
212519770b32SMel Gorman 	case MPOL_BIND:
212619770b32SMel Gorman 		/* Fall through */
21271da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
2128fcfb4dccSKOSAKI Motohiro 		return !!nodes_equal(a->v.nodes, b->v.nodes);
21291da177e4SLinus Torvalds 	case MPOL_PREFERRED:
213075719661SNamhyung Kim 		return a->v.preferred_node == b->v.preferred_node;
21311da177e4SLinus Torvalds 	default:
21321da177e4SLinus Torvalds 		BUG();
2133fcfb4dccSKOSAKI Motohiro 		return false;
21341da177e4SLinus Torvalds 	}
21351da177e4SLinus Torvalds }
21361da177e4SLinus Torvalds 
21371da177e4SLinus Torvalds /*
21381da177e4SLinus Torvalds  * Shared memory backing store policy support.
21391da177e4SLinus Torvalds  *
21401da177e4SLinus Torvalds  * Remember policies even when nobody has shared memory mapped.
21411da177e4SLinus Torvalds  * The policies are kept in Red-Black tree linked from the inode.
21421da177e4SLinus Torvalds  * They are protected by the sp->lock spinlock, which should be held
21431da177e4SLinus Torvalds  * for any accesses to the tree.
21441da177e4SLinus Torvalds  */
21451da177e4SLinus Torvalds 
21461da177e4SLinus Torvalds /* lookup first element intersecting start-end */
214742288fe3SMel Gorman /* Caller holds sp->lock */
21481da177e4SLinus Torvalds static struct sp_node *
21491da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
21501da177e4SLinus Torvalds {
21511da177e4SLinus Torvalds 	struct rb_node *n = sp->root.rb_node;
21521da177e4SLinus Torvalds 
21531da177e4SLinus Torvalds 	while (n) {
21541da177e4SLinus Torvalds 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
21551da177e4SLinus Torvalds 
21561da177e4SLinus Torvalds 		if (start >= p->end)
21571da177e4SLinus Torvalds 			n = n->rb_right;
21581da177e4SLinus Torvalds 		else if (end <= p->start)
21591da177e4SLinus Torvalds 			n = n->rb_left;
21601da177e4SLinus Torvalds 		else
21611da177e4SLinus Torvalds 			break;
21621da177e4SLinus Torvalds 	}
21631da177e4SLinus Torvalds 	if (!n)
21641da177e4SLinus Torvalds 		return NULL;
21651da177e4SLinus Torvalds 	for (;;) {
21661da177e4SLinus Torvalds 		struct sp_node *w = NULL;
21671da177e4SLinus Torvalds 		struct rb_node *prev = rb_prev(n);
21681da177e4SLinus Torvalds 		if (!prev)
21691da177e4SLinus Torvalds 			break;
21701da177e4SLinus Torvalds 		w = rb_entry(prev, struct sp_node, nd);
21711da177e4SLinus Torvalds 		if (w->end <= start)
21721da177e4SLinus Torvalds 			break;
21731da177e4SLinus Torvalds 		n = prev;
21741da177e4SLinus Torvalds 	}
21751da177e4SLinus Torvalds 	return rb_entry(n, struct sp_node, nd);
21761da177e4SLinus Torvalds }
21771da177e4SLinus Torvalds 
21781da177e4SLinus Torvalds /* Insert a new shared policy into the list. */
21791da177e4SLinus Torvalds /* Caller holds sp->lock */
21801da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new)
21811da177e4SLinus Torvalds {
21821da177e4SLinus Torvalds 	struct rb_node **p = &sp->root.rb_node;
21831da177e4SLinus Torvalds 	struct rb_node *parent = NULL;
21841da177e4SLinus Torvalds 	struct sp_node *nd;
21851da177e4SLinus Torvalds 
21861da177e4SLinus Torvalds 	while (*p) {
21871da177e4SLinus Torvalds 		parent = *p;
21881da177e4SLinus Torvalds 		nd = rb_entry(parent, struct sp_node, nd);
21891da177e4SLinus Torvalds 		if (new->start < nd->start)
21901da177e4SLinus Torvalds 			p = &(*p)->rb_left;
21911da177e4SLinus Torvalds 		else if (new->end > nd->end)
21921da177e4SLinus Torvalds 			p = &(*p)->rb_right;
21931da177e4SLinus Torvalds 		else
21941da177e4SLinus Torvalds 			BUG();
21951da177e4SLinus Torvalds 	}
21961da177e4SLinus Torvalds 	rb_link_node(&new->nd, parent, p);
21971da177e4SLinus Torvalds 	rb_insert_color(&new->nd, &sp->root);
2198140d5a49SPaul Mundt 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
219945c4745aSLee Schermerhorn 		 new->policy ? new->policy->mode : 0);
22001da177e4SLinus Torvalds }
22011da177e4SLinus Torvalds 
22021da177e4SLinus Torvalds /* Find shared policy intersecting idx */
22031da177e4SLinus Torvalds struct mempolicy *
22041da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
22051da177e4SLinus Torvalds {
22061da177e4SLinus Torvalds 	struct mempolicy *pol = NULL;
22071da177e4SLinus Torvalds 	struct sp_node *sn;
22081da177e4SLinus Torvalds 
22091da177e4SLinus Torvalds 	if (!sp->root.rb_node)
22101da177e4SLinus Torvalds 		return NULL;
221142288fe3SMel Gorman 	spin_lock(&sp->lock);
22121da177e4SLinus Torvalds 	sn = sp_lookup(sp, idx, idx+1);
22131da177e4SLinus Torvalds 	if (sn) {
22141da177e4SLinus Torvalds 		mpol_get(sn->policy);
22151da177e4SLinus Torvalds 		pol = sn->policy;
22161da177e4SLinus Torvalds 	}
221742288fe3SMel Gorman 	spin_unlock(&sp->lock);
22181da177e4SLinus Torvalds 	return pol;
22191da177e4SLinus Torvalds }
22201da177e4SLinus Torvalds 
222163f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n)
222263f74ca2SKOSAKI Motohiro {
222363f74ca2SKOSAKI Motohiro 	mpol_put(n->policy);
222463f74ca2SKOSAKI Motohiro 	kmem_cache_free(sn_cache, n);
222563f74ca2SKOSAKI Motohiro }
222663f74ca2SKOSAKI Motohiro 
2227771fb4d8SLee Schermerhorn /**
2228771fb4d8SLee Schermerhorn  * mpol_misplaced - check whether current page node is valid in policy
2229771fb4d8SLee Schermerhorn  *
2230b46e14acSFabian Frederick  * @page: page to be checked
2231b46e14acSFabian Frederick  * @vma: vm area where page mapped
2232b46e14acSFabian Frederick  * @addr: virtual address where page mapped
2233771fb4d8SLee Schermerhorn  *
2234771fb4d8SLee Schermerhorn  * Lookup current policy node id for vma,addr and "compare to" page's
2235771fb4d8SLee Schermerhorn  * node id.
2236771fb4d8SLee Schermerhorn  *
2237771fb4d8SLee Schermerhorn  * Returns:
2238771fb4d8SLee Schermerhorn  *	-1	- not misplaced, page is in the right node
2239771fb4d8SLee Schermerhorn  *	node	- node id where the page should be
2240771fb4d8SLee Schermerhorn  *
2241771fb4d8SLee Schermerhorn  * Policy determination "mimics" alloc_page_vma().
2242771fb4d8SLee Schermerhorn  * Called from fault path where we know the vma and faulting address.
2243771fb4d8SLee Schermerhorn  */
2244771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2245771fb4d8SLee Schermerhorn {
2246771fb4d8SLee Schermerhorn 	struct mempolicy *pol;
2247771fb4d8SLee Schermerhorn 	struct zone *zone;
2248771fb4d8SLee Schermerhorn 	int curnid = page_to_nid(page);
2249771fb4d8SLee Schermerhorn 	unsigned long pgoff;
225090572890SPeter Zijlstra 	int thiscpu = raw_smp_processor_id();
225190572890SPeter Zijlstra 	int thisnid = cpu_to_node(thiscpu);
2252771fb4d8SLee Schermerhorn 	int polnid = -1;
2253771fb4d8SLee Schermerhorn 	int ret = -1;
2254771fb4d8SLee Schermerhorn 
2255771fb4d8SLee Schermerhorn 	BUG_ON(!vma);
2256771fb4d8SLee Schermerhorn 
2257dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2258771fb4d8SLee Schermerhorn 	if (!(pol->flags & MPOL_F_MOF))
2259771fb4d8SLee Schermerhorn 		goto out;
2260771fb4d8SLee Schermerhorn 
2261771fb4d8SLee Schermerhorn 	switch (pol->mode) {
2262771fb4d8SLee Schermerhorn 	case MPOL_INTERLEAVE:
2263771fb4d8SLee Schermerhorn 		BUG_ON(addr >= vma->vm_end);
2264771fb4d8SLee Schermerhorn 		BUG_ON(addr < vma->vm_start);
2265771fb4d8SLee Schermerhorn 
2266771fb4d8SLee Schermerhorn 		pgoff = vma->vm_pgoff;
2267771fb4d8SLee Schermerhorn 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2268771fb4d8SLee Schermerhorn 		polnid = offset_il_node(pol, vma, pgoff);
2269771fb4d8SLee Schermerhorn 		break;
2270771fb4d8SLee Schermerhorn 
2271771fb4d8SLee Schermerhorn 	case MPOL_PREFERRED:
2272771fb4d8SLee Schermerhorn 		if (pol->flags & MPOL_F_LOCAL)
2273771fb4d8SLee Schermerhorn 			polnid = numa_node_id();
2274771fb4d8SLee Schermerhorn 		else
2275771fb4d8SLee Schermerhorn 			polnid = pol->v.preferred_node;
2276771fb4d8SLee Schermerhorn 		break;
2277771fb4d8SLee Schermerhorn 
2278771fb4d8SLee Schermerhorn 	case MPOL_BIND:
2279771fb4d8SLee Schermerhorn 		/*
2280771fb4d8SLee Schermerhorn 		 * allows binding to multiple nodes.
2281771fb4d8SLee Schermerhorn 		 * use current page if in policy nodemask,
2282771fb4d8SLee Schermerhorn 		 * else select nearest allowed node, if any.
2283771fb4d8SLee Schermerhorn 		 * If no allowed nodes, use current [!misplaced].
2284771fb4d8SLee Schermerhorn 		 */
2285771fb4d8SLee Schermerhorn 		if (node_isset(curnid, pol->v.nodes))
2286771fb4d8SLee Schermerhorn 			goto out;
2287771fb4d8SLee Schermerhorn 		(void)first_zones_zonelist(
2288771fb4d8SLee Schermerhorn 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2289771fb4d8SLee Schermerhorn 				gfp_zone(GFP_HIGHUSER),
2290771fb4d8SLee Schermerhorn 				&pol->v.nodes, &zone);
2291771fb4d8SLee Schermerhorn 		polnid = zone->node;
2292771fb4d8SLee Schermerhorn 		break;
2293771fb4d8SLee Schermerhorn 
2294771fb4d8SLee Schermerhorn 	default:
2295771fb4d8SLee Schermerhorn 		BUG();
2296771fb4d8SLee Schermerhorn 	}
22975606e387SMel Gorman 
22985606e387SMel Gorman 	/* Migrate the page towards the node whose CPU is referencing it */
2299e42c8ff2SMel Gorman 	if (pol->flags & MPOL_F_MORON) {
230090572890SPeter Zijlstra 		polnid = thisnid;
23015606e387SMel Gorman 
230210f39042SRik van Riel 		if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2303de1c9ce6SRik van Riel 			goto out;
2304de1c9ce6SRik van Riel 	}
2305e42c8ff2SMel Gorman 
2306771fb4d8SLee Schermerhorn 	if (curnid != polnid)
2307771fb4d8SLee Schermerhorn 		ret = polnid;
2308771fb4d8SLee Schermerhorn out:
2309771fb4d8SLee Schermerhorn 	mpol_cond_put(pol);
2310771fb4d8SLee Schermerhorn 
2311771fb4d8SLee Schermerhorn 	return ret;
2312771fb4d8SLee Schermerhorn }
2313771fb4d8SLee Schermerhorn 
23141da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n)
23151da177e4SLinus Torvalds {
2316140d5a49SPaul Mundt 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
23171da177e4SLinus Torvalds 	rb_erase(&n->nd, &sp->root);
231863f74ca2SKOSAKI Motohiro 	sp_free(n);
23191da177e4SLinus Torvalds }
23201da177e4SLinus Torvalds 
232142288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start,
232242288fe3SMel Gorman 			unsigned long end, struct mempolicy *pol)
232342288fe3SMel Gorman {
232442288fe3SMel Gorman 	node->start = start;
232542288fe3SMel Gorman 	node->end = end;
232642288fe3SMel Gorman 	node->policy = pol;
232742288fe3SMel Gorman }
232842288fe3SMel Gorman 
2329dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2330dbcb0f19SAdrian Bunk 				struct mempolicy *pol)
23311da177e4SLinus Torvalds {
2332869833f2SKOSAKI Motohiro 	struct sp_node *n;
2333869833f2SKOSAKI Motohiro 	struct mempolicy *newpol;
23341da177e4SLinus Torvalds 
2335869833f2SKOSAKI Motohiro 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
23361da177e4SLinus Torvalds 	if (!n)
23371da177e4SLinus Torvalds 		return NULL;
2338869833f2SKOSAKI Motohiro 
2339869833f2SKOSAKI Motohiro 	newpol = mpol_dup(pol);
2340869833f2SKOSAKI Motohiro 	if (IS_ERR(newpol)) {
2341869833f2SKOSAKI Motohiro 		kmem_cache_free(sn_cache, n);
2342869833f2SKOSAKI Motohiro 		return NULL;
2343869833f2SKOSAKI Motohiro 	}
2344869833f2SKOSAKI Motohiro 	newpol->flags |= MPOL_F_SHARED;
234542288fe3SMel Gorman 	sp_node_init(n, start, end, newpol);
2346869833f2SKOSAKI Motohiro 
23471da177e4SLinus Torvalds 	return n;
23481da177e4SLinus Torvalds }
23491da177e4SLinus Torvalds 
23501da177e4SLinus Torvalds /* Replace a policy range. */
23511da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
23521da177e4SLinus Torvalds 				 unsigned long end, struct sp_node *new)
23531da177e4SLinus Torvalds {
2354b22d127aSMel Gorman 	struct sp_node *n;
235542288fe3SMel Gorman 	struct sp_node *n_new = NULL;
235642288fe3SMel Gorman 	struct mempolicy *mpol_new = NULL;
2357b22d127aSMel Gorman 	int ret = 0;
23581da177e4SLinus Torvalds 
235942288fe3SMel Gorman restart:
236042288fe3SMel Gorman 	spin_lock(&sp->lock);
23611da177e4SLinus Torvalds 	n = sp_lookup(sp, start, end);
23621da177e4SLinus Torvalds 	/* Take care of old policies in the same range. */
23631da177e4SLinus Torvalds 	while (n && n->start < end) {
23641da177e4SLinus Torvalds 		struct rb_node *next = rb_next(&n->nd);
23651da177e4SLinus Torvalds 		if (n->start >= start) {
23661da177e4SLinus Torvalds 			if (n->end <= end)
23671da177e4SLinus Torvalds 				sp_delete(sp, n);
23681da177e4SLinus Torvalds 			else
23691da177e4SLinus Torvalds 				n->start = end;
23701da177e4SLinus Torvalds 		} else {
23711da177e4SLinus Torvalds 			/* Old policy spanning whole new range. */
23721da177e4SLinus Torvalds 			if (n->end > end) {
237342288fe3SMel Gorman 				if (!n_new)
237442288fe3SMel Gorman 					goto alloc_new;
237542288fe3SMel Gorman 
237642288fe3SMel Gorman 				*mpol_new = *n->policy;
237742288fe3SMel Gorman 				atomic_set(&mpol_new->refcnt, 1);
23787880639cSKOSAKI Motohiro 				sp_node_init(n_new, end, n->end, mpol_new);
23791da177e4SLinus Torvalds 				n->end = start;
23805ca39575SHillf Danton 				sp_insert(sp, n_new);
238142288fe3SMel Gorman 				n_new = NULL;
238242288fe3SMel Gorman 				mpol_new = NULL;
23831da177e4SLinus Torvalds 				break;
23841da177e4SLinus Torvalds 			} else
23851da177e4SLinus Torvalds 				n->end = start;
23861da177e4SLinus Torvalds 		}
23871da177e4SLinus Torvalds 		if (!next)
23881da177e4SLinus Torvalds 			break;
23891da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
23901da177e4SLinus Torvalds 	}
23911da177e4SLinus Torvalds 	if (new)
23921da177e4SLinus Torvalds 		sp_insert(sp, new);
239342288fe3SMel Gorman 	spin_unlock(&sp->lock);
239442288fe3SMel Gorman 	ret = 0;
239542288fe3SMel Gorman 
239642288fe3SMel Gorman err_out:
239742288fe3SMel Gorman 	if (mpol_new)
239842288fe3SMel Gorman 		mpol_put(mpol_new);
239942288fe3SMel Gorman 	if (n_new)
240042288fe3SMel Gorman 		kmem_cache_free(sn_cache, n_new);
240142288fe3SMel Gorman 
2402b22d127aSMel Gorman 	return ret;
240342288fe3SMel Gorman 
240442288fe3SMel Gorman alloc_new:
240542288fe3SMel Gorman 	spin_unlock(&sp->lock);
240642288fe3SMel Gorman 	ret = -ENOMEM;
240742288fe3SMel Gorman 	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
240842288fe3SMel Gorman 	if (!n_new)
240942288fe3SMel Gorman 		goto err_out;
241042288fe3SMel Gorman 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
241142288fe3SMel Gorman 	if (!mpol_new)
241242288fe3SMel Gorman 		goto err_out;
241342288fe3SMel Gorman 	goto restart;
24141da177e4SLinus Torvalds }
24151da177e4SLinus Torvalds 
241671fe804bSLee Schermerhorn /**
241771fe804bSLee Schermerhorn  * mpol_shared_policy_init - initialize shared policy for inode
241871fe804bSLee Schermerhorn  * @sp: pointer to inode shared policy
241971fe804bSLee Schermerhorn  * @mpol:  struct mempolicy to install
242071fe804bSLee Schermerhorn  *
242171fe804bSLee Schermerhorn  * Install non-NULL @mpol in inode's shared policy rb-tree.
242271fe804bSLee Schermerhorn  * On entry, the current task has a reference on a non-NULL @mpol.
242371fe804bSLee Schermerhorn  * This must be released on exit.
24244bfc4495SKAMEZAWA Hiroyuki  * This is called at get_inode() calls and we can use GFP_KERNEL.
242571fe804bSLee Schermerhorn  */
242671fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
24277339ff83SRobin Holt {
242858568d2aSMiao Xie 	int ret;
242958568d2aSMiao Xie 
243071fe804bSLee Schermerhorn 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
243142288fe3SMel Gorman 	spin_lock_init(&sp->lock);
24327339ff83SRobin Holt 
243371fe804bSLee Schermerhorn 	if (mpol) {
24347339ff83SRobin Holt 		struct vm_area_struct pvma;
243571fe804bSLee Schermerhorn 		struct mempolicy *new;
24364bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
24377339ff83SRobin Holt 
24384bfc4495SKAMEZAWA Hiroyuki 		if (!scratch)
24395c0c1654SLee Schermerhorn 			goto put_mpol;
244071fe804bSLee Schermerhorn 		/* contextualize the tmpfs mount point mempolicy */
244171fe804bSLee Schermerhorn 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
244215d77835SLee Schermerhorn 		if (IS_ERR(new))
24430cae3457SDan Carpenter 			goto free_scratch; /* no valid nodemask intersection */
244458568d2aSMiao Xie 
244558568d2aSMiao Xie 		task_lock(current);
24464bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
244758568d2aSMiao Xie 		task_unlock(current);
244815d77835SLee Schermerhorn 		if (ret)
24495c0c1654SLee Schermerhorn 			goto put_new;
245071fe804bSLee Schermerhorn 
245171fe804bSLee Schermerhorn 		/* Create pseudo-vma that contains just the policy */
24527339ff83SRobin Holt 		memset(&pvma, 0, sizeof(struct vm_area_struct));
245371fe804bSLee Schermerhorn 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
245471fe804bSLee Schermerhorn 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
245515d77835SLee Schermerhorn 
24565c0c1654SLee Schermerhorn put_new:
245771fe804bSLee Schermerhorn 		mpol_put(new);			/* drop initial ref */
24580cae3457SDan Carpenter free_scratch:
24594bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
24605c0c1654SLee Schermerhorn put_mpol:
24615c0c1654SLee Schermerhorn 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
24627339ff83SRobin Holt 	}
24637339ff83SRobin Holt }
24647339ff83SRobin Holt 
24651da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info,
24661da177e4SLinus Torvalds 			struct vm_area_struct *vma, struct mempolicy *npol)
24671da177e4SLinus Torvalds {
24681da177e4SLinus Torvalds 	int err;
24691da177e4SLinus Torvalds 	struct sp_node *new = NULL;
24701da177e4SLinus Torvalds 	unsigned long sz = vma_pages(vma);
24711da177e4SLinus Torvalds 
2472028fec41SDavid Rientjes 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
24731da177e4SLinus Torvalds 		 vma->vm_pgoff,
247445c4745aSLee Schermerhorn 		 sz, npol ? npol->mode : -1,
2475028fec41SDavid Rientjes 		 npol ? npol->flags : -1,
247600ef2d2fSDavid Rientjes 		 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
24771da177e4SLinus Torvalds 
24781da177e4SLinus Torvalds 	if (npol) {
24791da177e4SLinus Torvalds 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
24801da177e4SLinus Torvalds 		if (!new)
24811da177e4SLinus Torvalds 			return -ENOMEM;
24821da177e4SLinus Torvalds 	}
24831da177e4SLinus Torvalds 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
24841da177e4SLinus Torvalds 	if (err && new)
248563f74ca2SKOSAKI Motohiro 		sp_free(new);
24861da177e4SLinus Torvalds 	return err;
24871da177e4SLinus Torvalds }
24881da177e4SLinus Torvalds 
24891da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */
24901da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p)
24911da177e4SLinus Torvalds {
24921da177e4SLinus Torvalds 	struct sp_node *n;
24931da177e4SLinus Torvalds 	struct rb_node *next;
24941da177e4SLinus Torvalds 
24951da177e4SLinus Torvalds 	if (!p->root.rb_node)
24961da177e4SLinus Torvalds 		return;
249742288fe3SMel Gorman 	spin_lock(&p->lock);
24981da177e4SLinus Torvalds 	next = rb_first(&p->root);
24991da177e4SLinus Torvalds 	while (next) {
25001da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
25011da177e4SLinus Torvalds 		next = rb_next(&n->nd);
250263f74ca2SKOSAKI Motohiro 		sp_delete(p, n);
25031da177e4SLinus Torvalds 	}
250442288fe3SMel Gorman 	spin_unlock(&p->lock);
25051da177e4SLinus Torvalds }
25061da177e4SLinus Torvalds 
25071a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING
2508c297663cSMel Gorman static int __initdata numabalancing_override;
25091a687c2eSMel Gorman 
25101a687c2eSMel Gorman static void __init check_numabalancing_enable(void)
25111a687c2eSMel Gorman {
25121a687c2eSMel Gorman 	bool numabalancing_default = false;
25131a687c2eSMel Gorman 
25141a687c2eSMel Gorman 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
25151a687c2eSMel Gorman 		numabalancing_default = true;
25161a687c2eSMel Gorman 
2517c297663cSMel Gorman 	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2518c297663cSMel Gorman 	if (numabalancing_override)
2519c297663cSMel Gorman 		set_numabalancing_state(numabalancing_override == 1);
2520c297663cSMel Gorman 
2521*b0dc2b9bSMel Gorman 	if (num_online_nodes() > 1 && !numabalancing_override) {
25224a404beaSAndrew Morton 		pr_info("%s automatic NUMA balancing. "
2523c297663cSMel Gorman 			"Configure with numa_balancing= or the "
2524c297663cSMel Gorman 			"kernel.numa_balancing sysctl",
2525c297663cSMel Gorman 			numabalancing_default ? "Enabling" : "Disabling");
25261a687c2eSMel Gorman 		set_numabalancing_state(numabalancing_default);
25271a687c2eSMel Gorman 	}
25281a687c2eSMel Gorman }
25291a687c2eSMel Gorman 
25301a687c2eSMel Gorman static int __init setup_numabalancing(char *str)
25311a687c2eSMel Gorman {
25321a687c2eSMel Gorman 	int ret = 0;
25331a687c2eSMel Gorman 	if (!str)
25341a687c2eSMel Gorman 		goto out;
25351a687c2eSMel Gorman 
25361a687c2eSMel Gorman 	if (!strcmp(str, "enable")) {
2537c297663cSMel Gorman 		numabalancing_override = 1;
25381a687c2eSMel Gorman 		ret = 1;
25391a687c2eSMel Gorman 	} else if (!strcmp(str, "disable")) {
2540c297663cSMel Gorman 		numabalancing_override = -1;
25411a687c2eSMel Gorman 		ret = 1;
25421a687c2eSMel Gorman 	}
25431a687c2eSMel Gorman out:
25441a687c2eSMel Gorman 	if (!ret)
25454a404beaSAndrew Morton 		pr_warn("Unable to parse numa_balancing=\n");
25461a687c2eSMel Gorman 
25471a687c2eSMel Gorman 	return ret;
25481a687c2eSMel Gorman }
25491a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing);
25501a687c2eSMel Gorman #else
25511a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void)
25521a687c2eSMel Gorman {
25531a687c2eSMel Gorman }
25541a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */
25551a687c2eSMel Gorman 
25561da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */
25571da177e4SLinus Torvalds void __init numa_policy_init(void)
25581da177e4SLinus Torvalds {
2559b71636e2SPaul Mundt 	nodemask_t interleave_nodes;
2560b71636e2SPaul Mundt 	unsigned long largest = 0;
2561b71636e2SPaul Mundt 	int nid, prefer = 0;
2562b71636e2SPaul Mundt 
25631da177e4SLinus Torvalds 	policy_cache = kmem_cache_create("numa_policy",
25641da177e4SLinus Torvalds 					 sizeof(struct mempolicy),
256520c2df83SPaul Mundt 					 0, SLAB_PANIC, NULL);
25661da177e4SLinus Torvalds 
25671da177e4SLinus Torvalds 	sn_cache = kmem_cache_create("shared_policy_node",
25681da177e4SLinus Torvalds 				     sizeof(struct sp_node),
256920c2df83SPaul Mundt 				     0, SLAB_PANIC, NULL);
25701da177e4SLinus Torvalds 
25715606e387SMel Gorman 	for_each_node(nid) {
25725606e387SMel Gorman 		preferred_node_policy[nid] = (struct mempolicy) {
25735606e387SMel Gorman 			.refcnt = ATOMIC_INIT(1),
25745606e387SMel Gorman 			.mode = MPOL_PREFERRED,
25755606e387SMel Gorman 			.flags = MPOL_F_MOF | MPOL_F_MORON,
25765606e387SMel Gorman 			.v = { .preferred_node = nid, },
25775606e387SMel Gorman 		};
25785606e387SMel Gorman 	}
25795606e387SMel Gorman 
2580b71636e2SPaul Mundt 	/*
2581b71636e2SPaul Mundt 	 * Set interleaving policy for system init. Interleaving is only
2582b71636e2SPaul Mundt 	 * enabled across suitably sized nodes (default is >= 16MB), or
2583b71636e2SPaul Mundt 	 * fall back to the largest node if they're all smaller.
2584b71636e2SPaul Mundt 	 */
2585b71636e2SPaul Mundt 	nodes_clear(interleave_nodes);
258601f13bd6SLai Jiangshan 	for_each_node_state(nid, N_MEMORY) {
2587b71636e2SPaul Mundt 		unsigned long total_pages = node_present_pages(nid);
25881da177e4SLinus Torvalds 
2589b71636e2SPaul Mundt 		/* Preserve the largest node */
2590b71636e2SPaul Mundt 		if (largest < total_pages) {
2591b71636e2SPaul Mundt 			largest = total_pages;
2592b71636e2SPaul Mundt 			prefer = nid;
2593b71636e2SPaul Mundt 		}
2594b71636e2SPaul Mundt 
2595b71636e2SPaul Mundt 		/* Interleave this node? */
2596b71636e2SPaul Mundt 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2597b71636e2SPaul Mundt 			node_set(nid, interleave_nodes);
2598b71636e2SPaul Mundt 	}
2599b71636e2SPaul Mundt 
2600b71636e2SPaul Mundt 	/* All too small, use the largest */
2601b71636e2SPaul Mundt 	if (unlikely(nodes_empty(interleave_nodes)))
2602b71636e2SPaul Mundt 		node_set(prefer, interleave_nodes);
2603b71636e2SPaul Mundt 
2604028fec41SDavid Rientjes 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2605b1de0d13SMitchel Humpherys 		pr_err("%s: interleaving failed\n", __func__);
26061a687c2eSMel Gorman 
26071a687c2eSMel Gorman 	check_numabalancing_enable();
26081da177e4SLinus Torvalds }
26091da177e4SLinus Torvalds 
26108bccd85fSChristoph Lameter /* Reset policy of current process to default */
26111da177e4SLinus Torvalds void numa_default_policy(void)
26121da177e4SLinus Torvalds {
2613028fec41SDavid Rientjes 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
26141da177e4SLinus Torvalds }
261568860ec1SPaul Jackson 
26164225399aSPaul Jackson /*
2617095f1fc4SLee Schermerhorn  * Parse and format mempolicy from/to strings
2618095f1fc4SLee Schermerhorn  */
2619095f1fc4SLee Schermerhorn 
2620095f1fc4SLee Schermerhorn /*
2621f2a07f40SHugh Dickins  * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
26221a75a6c8SChristoph Lameter  */
2623345ace9cSLee Schermerhorn static const char * const policy_modes[] =
2624345ace9cSLee Schermerhorn {
2625345ace9cSLee Schermerhorn 	[MPOL_DEFAULT]    = "default",
2626345ace9cSLee Schermerhorn 	[MPOL_PREFERRED]  = "prefer",
2627345ace9cSLee Schermerhorn 	[MPOL_BIND]       = "bind",
2628345ace9cSLee Schermerhorn 	[MPOL_INTERLEAVE] = "interleave",
2629d3a71033SLee Schermerhorn 	[MPOL_LOCAL]      = "local",
2630345ace9cSLee Schermerhorn };
26311a75a6c8SChristoph Lameter 
2632095f1fc4SLee Schermerhorn 
2633095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS
2634095f1fc4SLee Schermerhorn /**
2635f2a07f40SHugh Dickins  * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2636095f1fc4SLee Schermerhorn  * @str:  string containing mempolicy to parse
263771fe804bSLee Schermerhorn  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2638095f1fc4SLee Schermerhorn  *
2639095f1fc4SLee Schermerhorn  * Format of input:
2640095f1fc4SLee Schermerhorn  *	<mode>[=<flags>][:<nodelist>]
2641095f1fc4SLee Schermerhorn  *
264271fe804bSLee Schermerhorn  * On success, returns 0, else 1
2643095f1fc4SLee Schermerhorn  */
2644a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol)
2645095f1fc4SLee Schermerhorn {
264671fe804bSLee Schermerhorn 	struct mempolicy *new = NULL;
2647b4652e84SLee Schermerhorn 	unsigned short mode;
2648f2a07f40SHugh Dickins 	unsigned short mode_flags;
264971fe804bSLee Schermerhorn 	nodemask_t nodes;
2650095f1fc4SLee Schermerhorn 	char *nodelist = strchr(str, ':');
2651095f1fc4SLee Schermerhorn 	char *flags = strchr(str, '=');
2652095f1fc4SLee Schermerhorn 	int err = 1;
2653095f1fc4SLee Schermerhorn 
2654095f1fc4SLee Schermerhorn 	if (nodelist) {
2655095f1fc4SLee Schermerhorn 		/* NUL-terminate mode or flags string */
2656095f1fc4SLee Schermerhorn 		*nodelist++ = '\0';
265771fe804bSLee Schermerhorn 		if (nodelist_parse(nodelist, nodes))
2658095f1fc4SLee Schermerhorn 			goto out;
265901f13bd6SLai Jiangshan 		if (!nodes_subset(nodes, node_states[N_MEMORY]))
2660095f1fc4SLee Schermerhorn 			goto out;
266171fe804bSLee Schermerhorn 	} else
266271fe804bSLee Schermerhorn 		nodes_clear(nodes);
266371fe804bSLee Schermerhorn 
2664095f1fc4SLee Schermerhorn 	if (flags)
2665095f1fc4SLee Schermerhorn 		*flags++ = '\0';	/* terminate mode string */
2666095f1fc4SLee Schermerhorn 
2667479e2802SPeter Zijlstra 	for (mode = 0; mode < MPOL_MAX; mode++) {
2668345ace9cSLee Schermerhorn 		if (!strcmp(str, policy_modes[mode])) {
2669095f1fc4SLee Schermerhorn 			break;
2670095f1fc4SLee Schermerhorn 		}
2671095f1fc4SLee Schermerhorn 	}
2672a720094dSMel Gorman 	if (mode >= MPOL_MAX)
2673095f1fc4SLee Schermerhorn 		goto out;
2674095f1fc4SLee Schermerhorn 
267571fe804bSLee Schermerhorn 	switch (mode) {
2676095f1fc4SLee Schermerhorn 	case MPOL_PREFERRED:
267771fe804bSLee Schermerhorn 		/*
267871fe804bSLee Schermerhorn 		 * Insist on a nodelist of one node only
267971fe804bSLee Schermerhorn 		 */
2680095f1fc4SLee Schermerhorn 		if (nodelist) {
2681095f1fc4SLee Schermerhorn 			char *rest = nodelist;
2682095f1fc4SLee Schermerhorn 			while (isdigit(*rest))
2683095f1fc4SLee Schermerhorn 				rest++;
2684926f2ae0SKOSAKI Motohiro 			if (*rest)
2685926f2ae0SKOSAKI Motohiro 				goto out;
2686095f1fc4SLee Schermerhorn 		}
2687095f1fc4SLee Schermerhorn 		break;
2688095f1fc4SLee Schermerhorn 	case MPOL_INTERLEAVE:
2689095f1fc4SLee Schermerhorn 		/*
2690095f1fc4SLee Schermerhorn 		 * Default to online nodes with memory if no nodelist
2691095f1fc4SLee Schermerhorn 		 */
2692095f1fc4SLee Schermerhorn 		if (!nodelist)
269301f13bd6SLai Jiangshan 			nodes = node_states[N_MEMORY];
26943f226aa1SLee Schermerhorn 		break;
269571fe804bSLee Schermerhorn 	case MPOL_LOCAL:
26963f226aa1SLee Schermerhorn 		/*
269771fe804bSLee Schermerhorn 		 * Don't allow a nodelist;  mpol_new() checks flags
26983f226aa1SLee Schermerhorn 		 */
269971fe804bSLee Schermerhorn 		if (nodelist)
27003f226aa1SLee Schermerhorn 			goto out;
270171fe804bSLee Schermerhorn 		mode = MPOL_PREFERRED;
27023f226aa1SLee Schermerhorn 		break;
2703413b43deSRavikiran G Thirumalai 	case MPOL_DEFAULT:
2704413b43deSRavikiran G Thirumalai 		/*
2705413b43deSRavikiran G Thirumalai 		 * Insist on a empty nodelist
2706413b43deSRavikiran G Thirumalai 		 */
2707413b43deSRavikiran G Thirumalai 		if (!nodelist)
2708413b43deSRavikiran G Thirumalai 			err = 0;
2709413b43deSRavikiran G Thirumalai 		goto out;
2710d69b2e63SKOSAKI Motohiro 	case MPOL_BIND:
271171fe804bSLee Schermerhorn 		/*
2712d69b2e63SKOSAKI Motohiro 		 * Insist on a nodelist
271371fe804bSLee Schermerhorn 		 */
2714d69b2e63SKOSAKI Motohiro 		if (!nodelist)
2715d69b2e63SKOSAKI Motohiro 			goto out;
2716095f1fc4SLee Schermerhorn 	}
2717095f1fc4SLee Schermerhorn 
271871fe804bSLee Schermerhorn 	mode_flags = 0;
2719095f1fc4SLee Schermerhorn 	if (flags) {
2720095f1fc4SLee Schermerhorn 		/*
2721095f1fc4SLee Schermerhorn 		 * Currently, we only support two mutually exclusive
2722095f1fc4SLee Schermerhorn 		 * mode flags.
2723095f1fc4SLee Schermerhorn 		 */
2724095f1fc4SLee Schermerhorn 		if (!strcmp(flags, "static"))
272571fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_STATIC_NODES;
2726095f1fc4SLee Schermerhorn 		else if (!strcmp(flags, "relative"))
272771fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_RELATIVE_NODES;
2728095f1fc4SLee Schermerhorn 		else
2729926f2ae0SKOSAKI Motohiro 			goto out;
2730095f1fc4SLee Schermerhorn 	}
273171fe804bSLee Schermerhorn 
273271fe804bSLee Schermerhorn 	new = mpol_new(mode, mode_flags, &nodes);
273371fe804bSLee Schermerhorn 	if (IS_ERR(new))
2734926f2ae0SKOSAKI Motohiro 		goto out;
2735926f2ae0SKOSAKI Motohiro 
2736f2a07f40SHugh Dickins 	/*
2737f2a07f40SHugh Dickins 	 * Save nodes for mpol_to_str() to show the tmpfs mount options
2738f2a07f40SHugh Dickins 	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2739f2a07f40SHugh Dickins 	 */
2740f2a07f40SHugh Dickins 	if (mode != MPOL_PREFERRED)
2741f2a07f40SHugh Dickins 		new->v.nodes = nodes;
2742f2a07f40SHugh Dickins 	else if (nodelist)
2743f2a07f40SHugh Dickins 		new->v.preferred_node = first_node(nodes);
2744f2a07f40SHugh Dickins 	else
2745f2a07f40SHugh Dickins 		new->flags |= MPOL_F_LOCAL;
2746f2a07f40SHugh Dickins 
2747f2a07f40SHugh Dickins 	/*
2748f2a07f40SHugh Dickins 	 * Save nodes for contextualization: this will be used to "clone"
2749f2a07f40SHugh Dickins 	 * the mempolicy in a specific context [cpuset] at a later time.
2750f2a07f40SHugh Dickins 	 */
2751e17f74afSLee Schermerhorn 	new->w.user_nodemask = nodes;
2752f2a07f40SHugh Dickins 
2753926f2ae0SKOSAKI Motohiro 	err = 0;
275471fe804bSLee Schermerhorn 
2755095f1fc4SLee Schermerhorn out:
2756095f1fc4SLee Schermerhorn 	/* Restore string for error message */
2757095f1fc4SLee Schermerhorn 	if (nodelist)
2758095f1fc4SLee Schermerhorn 		*--nodelist = ':';
2759095f1fc4SLee Schermerhorn 	if (flags)
2760095f1fc4SLee Schermerhorn 		*--flags = '=';
276171fe804bSLee Schermerhorn 	if (!err)
276271fe804bSLee Schermerhorn 		*mpol = new;
2763095f1fc4SLee Schermerhorn 	return err;
2764095f1fc4SLee Schermerhorn }
2765095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */
2766095f1fc4SLee Schermerhorn 
276771fe804bSLee Schermerhorn /**
276871fe804bSLee Schermerhorn  * mpol_to_str - format a mempolicy structure for printing
276971fe804bSLee Schermerhorn  * @buffer:  to contain formatted mempolicy string
277071fe804bSLee Schermerhorn  * @maxlen:  length of @buffer
277171fe804bSLee Schermerhorn  * @pol:  pointer to mempolicy to be formatted
277271fe804bSLee Schermerhorn  *
2773948927eeSDavid Rientjes  * Convert @pol into a string.  If @buffer is too short, truncate the string.
2774948927eeSDavid Rientjes  * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2775948927eeSDavid Rientjes  * longest flag, "relative", and to display at least a few node ids.
27761a75a6c8SChristoph Lameter  */
2777948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
27781a75a6c8SChristoph Lameter {
27791a75a6c8SChristoph Lameter 	char *p = buffer;
2780948927eeSDavid Rientjes 	nodemask_t nodes = NODE_MASK_NONE;
2781948927eeSDavid Rientjes 	unsigned short mode = MPOL_DEFAULT;
2782948927eeSDavid Rientjes 	unsigned short flags = 0;
27831a75a6c8SChristoph Lameter 
27848790c71aSDavid Rientjes 	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
2785bea904d5SLee Schermerhorn 		mode = pol->mode;
2786948927eeSDavid Rientjes 		flags = pol->flags;
2787948927eeSDavid Rientjes 	}
2788bea904d5SLee Schermerhorn 
27891a75a6c8SChristoph Lameter 	switch (mode) {
27901a75a6c8SChristoph Lameter 	case MPOL_DEFAULT:
27911a75a6c8SChristoph Lameter 		break;
27921a75a6c8SChristoph Lameter 	case MPOL_PREFERRED:
2793fc36b8d3SLee Schermerhorn 		if (flags & MPOL_F_LOCAL)
2794f2a07f40SHugh Dickins 			mode = MPOL_LOCAL;
279553f2556bSLee Schermerhorn 		else
2796fc36b8d3SLee Schermerhorn 			node_set(pol->v.preferred_node, nodes);
27971a75a6c8SChristoph Lameter 		break;
27981a75a6c8SChristoph Lameter 	case MPOL_BIND:
27991a75a6c8SChristoph Lameter 	case MPOL_INTERLEAVE:
28001a75a6c8SChristoph Lameter 		nodes = pol->v.nodes;
28011a75a6c8SChristoph Lameter 		break;
28021a75a6c8SChristoph Lameter 	default:
2803948927eeSDavid Rientjes 		WARN_ON_ONCE(1);
2804948927eeSDavid Rientjes 		snprintf(p, maxlen, "unknown");
2805948927eeSDavid Rientjes 		return;
28061a75a6c8SChristoph Lameter 	}
28071a75a6c8SChristoph Lameter 
2808b7a9f420SDavid Rientjes 	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
28091a75a6c8SChristoph Lameter 
2810fc36b8d3SLee Schermerhorn 	if (flags & MPOL_MODE_FLAGS) {
2811948927eeSDavid Rientjes 		p += snprintf(p, buffer + maxlen - p, "=");
2812f5b087b5SDavid Rientjes 
28132291990aSLee Schermerhorn 		/*
28142291990aSLee Schermerhorn 		 * Currently, the only defined flags are mutually exclusive
28152291990aSLee Schermerhorn 		 */
2816f5b087b5SDavid Rientjes 		if (flags & MPOL_F_STATIC_NODES)
28172291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "static");
28182291990aSLee Schermerhorn 		else if (flags & MPOL_F_RELATIVE_NODES)
28192291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "relative");
2820f5b087b5SDavid Rientjes 	}
2821f5b087b5SDavid Rientjes 
28229e763e0fSTejun Heo 	if (!nodes_empty(nodes))
28239e763e0fSTejun Heo 		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
28249e763e0fSTejun Heo 			       nodemask_pr_args(&nodes));
28251a75a6c8SChristoph Lameter }
2826