xref: /openbmc/linux/mm/mempolicy.c (revision 248db92da13f25073e7ebbd5fb95615aafd771d1)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Simple NUMA memory policy for the Linux kernel.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
58bccd85fSChristoph Lameter  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
61da177e4SLinus Torvalds  * Subject to the GNU Public License, version 2.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * NUMA policy allows the user to give hints in which node(s) memory should
91da177e4SLinus Torvalds  * be allocated.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Support four policies per VMA and per process:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * The VMA policy has priority over the process policy for a page fault.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * interleave     Allocate memory interleaved over a set of nodes,
161da177e4SLinus Torvalds  *                with normal fallback if it fails.
171da177e4SLinus Torvalds  *                For VMA based allocations this interleaves based on the
181da177e4SLinus Torvalds  *                offset into the backing object or offset into the mapping
191da177e4SLinus Torvalds  *                for anonymous memory. For process policy an process counter
201da177e4SLinus Torvalds  *                is used.
218bccd85fSChristoph Lameter  *
221da177e4SLinus Torvalds  * bind           Only allocate memory on a specific set of nodes,
231da177e4SLinus Torvalds  *                no fallback.
248bccd85fSChristoph Lameter  *                FIXME: memory is allocated starting with the first node
258bccd85fSChristoph Lameter  *                to the last. It would be better if bind would truly restrict
268bccd85fSChristoph Lameter  *                the allocation to memory nodes instead
278bccd85fSChristoph Lameter  *
281da177e4SLinus Torvalds  * preferred       Try a specific node first before normal fallback.
2900ef2d2fSDavid Rientjes  *                As a special case NUMA_NO_NODE here means do the allocation
301da177e4SLinus Torvalds  *                on the local CPU. This is normally identical to default,
311da177e4SLinus Torvalds  *                but useful to set in a VMA when you have a non default
321da177e4SLinus Torvalds  *                process policy.
338bccd85fSChristoph Lameter  *
341da177e4SLinus Torvalds  * default        Allocate on the local node first, or when on a VMA
351da177e4SLinus Torvalds  *                use the process policy. This is what Linux always did
361da177e4SLinus Torvalds  *		  in a NUMA aware kernel and still does by, ahem, default.
371da177e4SLinus Torvalds  *
381da177e4SLinus Torvalds  * The process policy is applied for most non interrupt memory allocations
391da177e4SLinus Torvalds  * in that process' context. Interrupts ignore the policies and always
401da177e4SLinus Torvalds  * try to allocate on the local CPU. The VMA policy is only applied for memory
411da177e4SLinus Torvalds  * allocations for a VMA in the VM.
421da177e4SLinus Torvalds  *
431da177e4SLinus Torvalds  * Currently there are a few corner cases in swapping where the policy
441da177e4SLinus Torvalds  * is not applied, but the majority should be handled. When process policy
451da177e4SLinus Torvalds  * is used it is not remembered over swap outs/swap ins.
461da177e4SLinus Torvalds  *
471da177e4SLinus Torvalds  * Only the highest zone in the zone hierarchy gets policied. Allocations
481da177e4SLinus Torvalds  * requesting a lower zone just use default policy. This implies that
491da177e4SLinus Torvalds  * on systems with highmem kernel lowmem allocation don't get policied.
501da177e4SLinus Torvalds  * Same with GFP_DMA allocations.
511da177e4SLinus Torvalds  *
521da177e4SLinus Torvalds  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
531da177e4SLinus Torvalds  * all users and remembered even when nobody has memory mapped.
541da177e4SLinus Torvalds  */
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds /* Notebook:
571da177e4SLinus Torvalds    fix mmap readahead to honour policy and enable policy for any page cache
581da177e4SLinus Torvalds    object
591da177e4SLinus Torvalds    statistics for bigpages
601da177e4SLinus Torvalds    global policy for page cache? currently it uses process policy. Requires
611da177e4SLinus Torvalds    first item above.
621da177e4SLinus Torvalds    handle mremap for shared memory (currently ignored for the policy)
631da177e4SLinus Torvalds    grows down?
641da177e4SLinus Torvalds    make bind policy root only? It can trigger oom much faster and the
651da177e4SLinus Torvalds    kernel is not always grateful with that.
661da177e4SLinus Torvalds */
671da177e4SLinus Torvalds 
68b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69b1de0d13SMitchel Humpherys 
701da177e4SLinus Torvalds #include <linux/mempolicy.h>
711da177e4SLinus Torvalds #include <linux/mm.h>
721da177e4SLinus Torvalds #include <linux/highmem.h>
731da177e4SLinus Torvalds #include <linux/hugetlb.h>
741da177e4SLinus Torvalds #include <linux/kernel.h>
751da177e4SLinus Torvalds #include <linux/sched.h>
761da177e4SLinus Torvalds #include <linux/nodemask.h>
771da177e4SLinus Torvalds #include <linux/cpuset.h>
781da177e4SLinus Torvalds #include <linux/slab.h>
791da177e4SLinus Torvalds #include <linux/string.h>
80b95f1b31SPaul Gortmaker #include <linux/export.h>
81b488893aSPavel Emelyanov #include <linux/nsproxy.h>
821da177e4SLinus Torvalds #include <linux/interrupt.h>
831da177e4SLinus Torvalds #include <linux/init.h>
841da177e4SLinus Torvalds #include <linux/compat.h>
85dc9aa5b9SChristoph Lameter #include <linux/swap.h>
861a75a6c8SChristoph Lameter #include <linux/seq_file.h>
871a75a6c8SChristoph Lameter #include <linux/proc_fs.h>
88b20a3503SChristoph Lameter #include <linux/migrate.h>
8962b61f61SHugh Dickins #include <linux/ksm.h>
9095a402c3SChristoph Lameter #include <linux/rmap.h>
9186c3a764SDavid Quigley #include <linux/security.h>
92dbcb0f19SAdrian Bunk #include <linux/syscalls.h>
93095f1fc4SLee Schermerhorn #include <linux/ctype.h>
946d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h>
95b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h>
96b1de0d13SMitchel Humpherys #include <linux/printk.h>
97dc9aa5b9SChristoph Lameter 
981da177e4SLinus Torvalds #include <asm/tlbflush.h>
991da177e4SLinus Torvalds #include <asm/uaccess.h>
100778d3b0fSMichal Hocko #include <linux/random.h>
1011da177e4SLinus Torvalds 
10262695a84SNick Piggin #include "internal.h"
10362695a84SNick Piggin 
10438e35860SChristoph Lameter /* Internal flags */
105dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
10638e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
107dc9aa5b9SChristoph Lameter 
108fcc234f8SPekka Enberg static struct kmem_cache *policy_cache;
109fcc234f8SPekka Enberg static struct kmem_cache *sn_cache;
1101da177e4SLinus Torvalds 
1111da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not
1121da177e4SLinus Torvalds    policied. */
1136267276fSChristoph Lameter enum zone_type policy_zone = 0;
1141da177e4SLinus Torvalds 
115bea904d5SLee Schermerhorn /*
116bea904d5SLee Schermerhorn  * run-time system-wide default policy => local allocation
117bea904d5SLee Schermerhorn  */
118e754d79dSH Hartley Sweeten static struct mempolicy default_policy = {
1191da177e4SLinus Torvalds 	.refcnt = ATOMIC_INIT(1), /* never free it */
120bea904d5SLee Schermerhorn 	.mode = MPOL_PREFERRED,
121fc36b8d3SLee Schermerhorn 	.flags = MPOL_F_LOCAL,
1221da177e4SLinus Torvalds };
1231da177e4SLinus Torvalds 
1245606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES];
1255606e387SMel Gorman 
12674d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p)
1275606e387SMel Gorman {
1285606e387SMel Gorman 	struct mempolicy *pol = p->mempolicy;
129f15ca78eSOleg Nesterov 	int node;
1305606e387SMel Gorman 
131f15ca78eSOleg Nesterov 	if (pol)
132f15ca78eSOleg Nesterov 		return pol;
1335606e387SMel Gorman 
134f15ca78eSOleg Nesterov 	node = numa_node_id();
1351da6f0e1SJianguo Wu 	if (node != NUMA_NO_NODE) {
1361da6f0e1SJianguo Wu 		pol = &preferred_node_policy[node];
137f15ca78eSOleg Nesterov 		/* preferred_node_policy is not initialised early in boot */
138f15ca78eSOleg Nesterov 		if (pol->mode)
139f15ca78eSOleg Nesterov 			return pol;
1401da6f0e1SJianguo Wu 	}
1415606e387SMel Gorman 
142f15ca78eSOleg Nesterov 	return &default_policy;
1435606e387SMel Gorman }
1445606e387SMel Gorman 
14537012946SDavid Rientjes static const struct mempolicy_operations {
14637012946SDavid Rientjes 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
147708c1bbcSMiao Xie 	/*
148708c1bbcSMiao Xie 	 * If read-side task has no lock to protect task->mempolicy, write-side
149708c1bbcSMiao Xie 	 * task will rebind the task->mempolicy by two step. The first step is
150708c1bbcSMiao Xie 	 * setting all the newly nodes, and the second step is cleaning all the
151708c1bbcSMiao Xie 	 * disallowed nodes. In this way, we can avoid finding no node to alloc
152708c1bbcSMiao Xie 	 * page.
153708c1bbcSMiao Xie 	 * If we have a lock to protect task->mempolicy in read-side, we do
154708c1bbcSMiao Xie 	 * rebind directly.
155708c1bbcSMiao Xie 	 *
156708c1bbcSMiao Xie 	 * step:
157708c1bbcSMiao Xie 	 * 	MPOL_REBIND_ONCE - do rebind work at once
158708c1bbcSMiao Xie 	 * 	MPOL_REBIND_STEP1 - set all the newly nodes
159708c1bbcSMiao Xie 	 * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
160708c1bbcSMiao Xie 	 */
161708c1bbcSMiao Xie 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
162708c1bbcSMiao Xie 			enum mpol_rebind_step step);
16337012946SDavid Rientjes } mpol_ops[MPOL_MAX];
16437012946SDavid Rientjes 
165f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
166f5b087b5SDavid Rientjes {
1676d556294SBob Liu 	return pol->flags & MPOL_MODE_FLAGS;
1684c50bc01SDavid Rientjes }
1694c50bc01SDavid Rientjes 
1704c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
1714c50bc01SDavid Rientjes 				   const nodemask_t *rel)
1724c50bc01SDavid Rientjes {
1734c50bc01SDavid Rientjes 	nodemask_t tmp;
1744c50bc01SDavid Rientjes 	nodes_fold(tmp, *orig, nodes_weight(*rel));
1754c50bc01SDavid Rientjes 	nodes_onto(*ret, tmp, *rel);
176f5b087b5SDavid Rientjes }
177f5b087b5SDavid Rientjes 
17837012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
17937012946SDavid Rientjes {
18037012946SDavid Rientjes 	if (nodes_empty(*nodes))
18137012946SDavid Rientjes 		return -EINVAL;
18237012946SDavid Rientjes 	pol->v.nodes = *nodes;
18337012946SDavid Rientjes 	return 0;
18437012946SDavid Rientjes }
18537012946SDavid Rientjes 
18637012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
18737012946SDavid Rientjes {
18837012946SDavid Rientjes 	if (!nodes)
189fc36b8d3SLee Schermerhorn 		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
19037012946SDavid Rientjes 	else if (nodes_empty(*nodes))
19137012946SDavid Rientjes 		return -EINVAL;			/*  no allowed nodes */
19237012946SDavid Rientjes 	else
19337012946SDavid Rientjes 		pol->v.preferred_node = first_node(*nodes);
19437012946SDavid Rientjes 	return 0;
19537012946SDavid Rientjes }
19637012946SDavid Rientjes 
19737012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
19837012946SDavid Rientjes {
199859f7ef1SZhihui Zhang 	if (nodes_empty(*nodes))
20037012946SDavid Rientjes 		return -EINVAL;
20137012946SDavid Rientjes 	pol->v.nodes = *nodes;
20237012946SDavid Rientjes 	return 0;
20337012946SDavid Rientjes }
20437012946SDavid Rientjes 
20558568d2aSMiao Xie /*
20658568d2aSMiao Xie  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
20758568d2aSMiao Xie  * any, for the new policy.  mpol_new() has already validated the nodes
20858568d2aSMiao Xie  * parameter with respect to the policy mode and flags.  But, we need to
20958568d2aSMiao Xie  * handle an empty nodemask with MPOL_PREFERRED here.
21058568d2aSMiao Xie  *
21158568d2aSMiao Xie  * Must be called holding task's alloc_lock to protect task's mems_allowed
21258568d2aSMiao Xie  * and mempolicy.  May also be called holding the mmap_semaphore for write.
21358568d2aSMiao Xie  */
2144bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol,
2154bfc4495SKAMEZAWA Hiroyuki 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
21658568d2aSMiao Xie {
21758568d2aSMiao Xie 	int ret;
21858568d2aSMiao Xie 
21958568d2aSMiao Xie 	/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
22058568d2aSMiao Xie 	if (pol == NULL)
22158568d2aSMiao Xie 		return 0;
22201f13bd6SLai Jiangshan 	/* Check N_MEMORY */
2234bfc4495SKAMEZAWA Hiroyuki 	nodes_and(nsc->mask1,
22401f13bd6SLai Jiangshan 		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
22558568d2aSMiao Xie 
22658568d2aSMiao Xie 	VM_BUG_ON(!nodes);
22758568d2aSMiao Xie 	if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
22858568d2aSMiao Xie 		nodes = NULL;	/* explicit local allocation */
22958568d2aSMiao Xie 	else {
23058568d2aSMiao Xie 		if (pol->flags & MPOL_F_RELATIVE_NODES)
2314bfc4495SKAMEZAWA Hiroyuki 			mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
23258568d2aSMiao Xie 		else
2334bfc4495SKAMEZAWA Hiroyuki 			nodes_and(nsc->mask2, *nodes, nsc->mask1);
2344bfc4495SKAMEZAWA Hiroyuki 
23558568d2aSMiao Xie 		if (mpol_store_user_nodemask(pol))
23658568d2aSMiao Xie 			pol->w.user_nodemask = *nodes;
23758568d2aSMiao Xie 		else
23858568d2aSMiao Xie 			pol->w.cpuset_mems_allowed =
23958568d2aSMiao Xie 						cpuset_current_mems_allowed;
24058568d2aSMiao Xie 	}
24158568d2aSMiao Xie 
2424bfc4495SKAMEZAWA Hiroyuki 	if (nodes)
2434bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
2444bfc4495SKAMEZAWA Hiroyuki 	else
2454bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, NULL);
24658568d2aSMiao Xie 	return ret;
24758568d2aSMiao Xie }
24858568d2aSMiao Xie 
24958568d2aSMiao Xie /*
25058568d2aSMiao Xie  * This function just creates a new policy, does some check and simple
25158568d2aSMiao Xie  * initialization. You must invoke mpol_set_nodemask() to set nodes.
25258568d2aSMiao Xie  */
253028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
254028fec41SDavid Rientjes 				  nodemask_t *nodes)
2551da177e4SLinus Torvalds {
2561da177e4SLinus Torvalds 	struct mempolicy *policy;
2571da177e4SLinus Torvalds 
258028fec41SDavid Rientjes 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
25900ef2d2fSDavid Rientjes 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
260140d5a49SPaul Mundt 
2613e1f0645SDavid Rientjes 	if (mode == MPOL_DEFAULT) {
2623e1f0645SDavid Rientjes 		if (nodes && !nodes_empty(*nodes))
26337012946SDavid Rientjes 			return ERR_PTR(-EINVAL);
264d3a71033SLee Schermerhorn 		return NULL;
26537012946SDavid Rientjes 	}
2663e1f0645SDavid Rientjes 	VM_BUG_ON(!nodes);
2673e1f0645SDavid Rientjes 
2683e1f0645SDavid Rientjes 	/*
2693e1f0645SDavid Rientjes 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
2703e1f0645SDavid Rientjes 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
2713e1f0645SDavid Rientjes 	 * All other modes require a valid pointer to a non-empty nodemask.
2723e1f0645SDavid Rientjes 	 */
2733e1f0645SDavid Rientjes 	if (mode == MPOL_PREFERRED) {
2743e1f0645SDavid Rientjes 		if (nodes_empty(*nodes)) {
2753e1f0645SDavid Rientjes 			if (((flags & MPOL_F_STATIC_NODES) ||
2763e1f0645SDavid Rientjes 			     (flags & MPOL_F_RELATIVE_NODES)))
2773e1f0645SDavid Rientjes 				return ERR_PTR(-EINVAL);
2783e1f0645SDavid Rientjes 		}
279479e2802SPeter Zijlstra 	} else if (mode == MPOL_LOCAL) {
280479e2802SPeter Zijlstra 		if (!nodes_empty(*nodes))
281479e2802SPeter Zijlstra 			return ERR_PTR(-EINVAL);
282479e2802SPeter Zijlstra 		mode = MPOL_PREFERRED;
2833e1f0645SDavid Rientjes 	} else if (nodes_empty(*nodes))
2843e1f0645SDavid Rientjes 		return ERR_PTR(-EINVAL);
2851da177e4SLinus Torvalds 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2861da177e4SLinus Torvalds 	if (!policy)
2871da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2881da177e4SLinus Torvalds 	atomic_set(&policy->refcnt, 1);
28945c4745aSLee Schermerhorn 	policy->mode = mode;
29037012946SDavid Rientjes 	policy->flags = flags;
2913e1f0645SDavid Rientjes 
29237012946SDavid Rientjes 	return policy;
29337012946SDavid Rientjes }
29437012946SDavid Rientjes 
29552cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */
29652cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p)
29752cd3b07SLee Schermerhorn {
29852cd3b07SLee Schermerhorn 	if (!atomic_dec_and_test(&p->refcnt))
29952cd3b07SLee Schermerhorn 		return;
30052cd3b07SLee Schermerhorn 	kmem_cache_free(policy_cache, p);
30152cd3b07SLee Schermerhorn }
30252cd3b07SLee Schermerhorn 
303708c1bbcSMiao Xie static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
304708c1bbcSMiao Xie 				enum mpol_rebind_step step)
30537012946SDavid Rientjes {
30637012946SDavid Rientjes }
30737012946SDavid Rientjes 
308708c1bbcSMiao Xie /*
309708c1bbcSMiao Xie  * step:
310708c1bbcSMiao Xie  * 	MPOL_REBIND_ONCE  - do rebind work at once
311708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP1 - set all the newly nodes
312708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
313708c1bbcSMiao Xie  */
314708c1bbcSMiao Xie static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
315708c1bbcSMiao Xie 				 enum mpol_rebind_step step)
3161d0d2680SDavid Rientjes {
3171d0d2680SDavid Rientjes 	nodemask_t tmp;
3181d0d2680SDavid Rientjes 
31937012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES)
32037012946SDavid Rientjes 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
32137012946SDavid Rientjes 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
32237012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3231d0d2680SDavid Rientjes 	else {
324708c1bbcSMiao Xie 		/*
325708c1bbcSMiao Xie 		 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
326708c1bbcSMiao Xie 		 * result
327708c1bbcSMiao Xie 		 */
328708c1bbcSMiao Xie 		if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
329708c1bbcSMiao Xie 			nodes_remap(tmp, pol->v.nodes,
330708c1bbcSMiao Xie 					pol->w.cpuset_mems_allowed, *nodes);
331708c1bbcSMiao Xie 			pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
332708c1bbcSMiao Xie 		} else if (step == MPOL_REBIND_STEP2) {
333708c1bbcSMiao Xie 			tmp = pol->w.cpuset_mems_allowed;
33437012946SDavid Rientjes 			pol->w.cpuset_mems_allowed = *nodes;
335708c1bbcSMiao Xie 		} else
336708c1bbcSMiao Xie 			BUG();
3371d0d2680SDavid Rientjes 	}
33837012946SDavid Rientjes 
339708c1bbcSMiao Xie 	if (nodes_empty(tmp))
340708c1bbcSMiao Xie 		tmp = *nodes;
341708c1bbcSMiao Xie 
342708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP1)
343708c1bbcSMiao Xie 		nodes_or(pol->v.nodes, pol->v.nodes, tmp);
344708c1bbcSMiao Xie 	else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
3451d0d2680SDavid Rientjes 		pol->v.nodes = tmp;
346708c1bbcSMiao Xie 	else
347708c1bbcSMiao Xie 		BUG();
348708c1bbcSMiao Xie 
3491d0d2680SDavid Rientjes 	if (!node_isset(current->il_next, tmp)) {
3501d0d2680SDavid Rientjes 		current->il_next = next_node(current->il_next, tmp);
3511d0d2680SDavid Rientjes 		if (current->il_next >= MAX_NUMNODES)
3521d0d2680SDavid Rientjes 			current->il_next = first_node(tmp);
3531d0d2680SDavid Rientjes 		if (current->il_next >= MAX_NUMNODES)
3541d0d2680SDavid Rientjes 			current->il_next = numa_node_id();
3551d0d2680SDavid Rientjes 	}
35637012946SDavid Rientjes }
35737012946SDavid Rientjes 
35837012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol,
359708c1bbcSMiao Xie 				  const nodemask_t *nodes,
360708c1bbcSMiao Xie 				  enum mpol_rebind_step step)
36137012946SDavid Rientjes {
36237012946SDavid Rientjes 	nodemask_t tmp;
36337012946SDavid Rientjes 
36437012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES) {
3651d0d2680SDavid Rientjes 		int node = first_node(pol->w.user_nodemask);
3661d0d2680SDavid Rientjes 
367fc36b8d3SLee Schermerhorn 		if (node_isset(node, *nodes)) {
3681d0d2680SDavid Rientjes 			pol->v.preferred_node = node;
369fc36b8d3SLee Schermerhorn 			pol->flags &= ~MPOL_F_LOCAL;
370fc36b8d3SLee Schermerhorn 		} else
371fc36b8d3SLee Schermerhorn 			pol->flags |= MPOL_F_LOCAL;
37237012946SDavid Rientjes 	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
37337012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3741d0d2680SDavid Rientjes 		pol->v.preferred_node = first_node(tmp);
375fc36b8d3SLee Schermerhorn 	} else if (!(pol->flags & MPOL_F_LOCAL)) {
3761d0d2680SDavid Rientjes 		pol->v.preferred_node = node_remap(pol->v.preferred_node,
37737012946SDavid Rientjes 						   pol->w.cpuset_mems_allowed,
37837012946SDavid Rientjes 						   *nodes);
37937012946SDavid Rientjes 		pol->w.cpuset_mems_allowed = *nodes;
3801d0d2680SDavid Rientjes 	}
3811d0d2680SDavid Rientjes }
38237012946SDavid Rientjes 
383708c1bbcSMiao Xie /*
384708c1bbcSMiao Xie  * mpol_rebind_policy - Migrate a policy to a different set of nodes
385708c1bbcSMiao Xie  *
386708c1bbcSMiao Xie  * If read-side task has no lock to protect task->mempolicy, write-side
387708c1bbcSMiao Xie  * task will rebind the task->mempolicy by two step. The first step is
388708c1bbcSMiao Xie  * setting all the newly nodes, and the second step is cleaning all the
389708c1bbcSMiao Xie  * disallowed nodes. In this way, we can avoid finding no node to alloc
390708c1bbcSMiao Xie  * page.
391708c1bbcSMiao Xie  * If we have a lock to protect task->mempolicy in read-side, we do
392708c1bbcSMiao Xie  * rebind directly.
393708c1bbcSMiao Xie  *
394708c1bbcSMiao Xie  * step:
395708c1bbcSMiao Xie  * 	MPOL_REBIND_ONCE  - do rebind work at once
396708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP1 - set all the newly nodes
397708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
398708c1bbcSMiao Xie  */
399708c1bbcSMiao Xie static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
400708c1bbcSMiao Xie 				enum mpol_rebind_step step)
40137012946SDavid Rientjes {
40237012946SDavid Rientjes 	if (!pol)
40337012946SDavid Rientjes 		return;
40489c522c7SWang Sheng-Hui 	if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
40537012946SDavid Rientjes 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
40637012946SDavid Rientjes 		return;
407708c1bbcSMiao Xie 
408708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
409708c1bbcSMiao Xie 		return;
410708c1bbcSMiao Xie 
411708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
412708c1bbcSMiao Xie 		BUG();
413708c1bbcSMiao Xie 
414708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP1)
415708c1bbcSMiao Xie 		pol->flags |= MPOL_F_REBINDING;
416708c1bbcSMiao Xie 	else if (step == MPOL_REBIND_STEP2)
417708c1bbcSMiao Xie 		pol->flags &= ~MPOL_F_REBINDING;
418708c1bbcSMiao Xie 	else if (step >= MPOL_REBIND_NSTEP)
419708c1bbcSMiao Xie 		BUG();
420708c1bbcSMiao Xie 
421708c1bbcSMiao Xie 	mpol_ops[pol->mode].rebind(pol, newmask, step);
4221d0d2680SDavid Rientjes }
4231d0d2680SDavid Rientjes 
4241d0d2680SDavid Rientjes /*
4251d0d2680SDavid Rientjes  * Wrapper for mpol_rebind_policy() that just requires task
4261d0d2680SDavid Rientjes  * pointer, and updates task mempolicy.
42758568d2aSMiao Xie  *
42858568d2aSMiao Xie  * Called with task's alloc_lock held.
4291d0d2680SDavid Rientjes  */
4301d0d2680SDavid Rientjes 
431708c1bbcSMiao Xie void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
432708c1bbcSMiao Xie 			enum mpol_rebind_step step)
4331d0d2680SDavid Rientjes {
434708c1bbcSMiao Xie 	mpol_rebind_policy(tsk->mempolicy, new, step);
4351d0d2680SDavid Rientjes }
4361d0d2680SDavid Rientjes 
4371d0d2680SDavid Rientjes /*
4381d0d2680SDavid Rientjes  * Rebind each vma in mm to new nodemask.
4391d0d2680SDavid Rientjes  *
4401d0d2680SDavid Rientjes  * Call holding a reference to mm.  Takes mm->mmap_sem during call.
4411d0d2680SDavid Rientjes  */
4421d0d2680SDavid Rientjes 
4431d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
4441d0d2680SDavid Rientjes {
4451d0d2680SDavid Rientjes 	struct vm_area_struct *vma;
4461d0d2680SDavid Rientjes 
4471d0d2680SDavid Rientjes 	down_write(&mm->mmap_sem);
4481d0d2680SDavid Rientjes 	for (vma = mm->mmap; vma; vma = vma->vm_next)
449708c1bbcSMiao Xie 		mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
4501d0d2680SDavid Rientjes 	up_write(&mm->mmap_sem);
4511d0d2680SDavid Rientjes }
4521d0d2680SDavid Rientjes 
45337012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
45437012946SDavid Rientjes 	[MPOL_DEFAULT] = {
45537012946SDavid Rientjes 		.rebind = mpol_rebind_default,
45637012946SDavid Rientjes 	},
45737012946SDavid Rientjes 	[MPOL_INTERLEAVE] = {
45837012946SDavid Rientjes 		.create = mpol_new_interleave,
45937012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
46037012946SDavid Rientjes 	},
46137012946SDavid Rientjes 	[MPOL_PREFERRED] = {
46237012946SDavid Rientjes 		.create = mpol_new_preferred,
46337012946SDavid Rientjes 		.rebind = mpol_rebind_preferred,
46437012946SDavid Rientjes 	},
46537012946SDavid Rientjes 	[MPOL_BIND] = {
46637012946SDavid Rientjes 		.create = mpol_new_bind,
46737012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
46837012946SDavid Rientjes 	},
46937012946SDavid Rientjes };
47037012946SDavid Rientjes 
471fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
472fc301289SChristoph Lameter 				unsigned long flags);
4731a75a6c8SChristoph Lameter 
4746f4576e3SNaoya Horiguchi struct queue_pages {
4756f4576e3SNaoya Horiguchi 	struct list_head *pagelist;
4766f4576e3SNaoya Horiguchi 	unsigned long flags;
4776f4576e3SNaoya Horiguchi 	nodemask_t *nmask;
4786f4576e3SNaoya Horiguchi 	struct vm_area_struct *prev;
4796f4576e3SNaoya Horiguchi };
4806f4576e3SNaoya Horiguchi 
48198094945SNaoya Horiguchi /*
48298094945SNaoya Horiguchi  * Scan through pages checking if pages follow certain conditions,
48398094945SNaoya Horiguchi  * and move them to the pagelist if they do.
48498094945SNaoya Horiguchi  */
4856f4576e3SNaoya Horiguchi static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
4866f4576e3SNaoya Horiguchi 			unsigned long end, struct mm_walk *walk)
4871da177e4SLinus Torvalds {
4886f4576e3SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
4896f4576e3SNaoya Horiguchi 	struct page *page;
4906f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
4916f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
492*248db92dSKirill A. Shutemov 	int nid, ret;
49391612e0dSHugh Dickins 	pte_t *pte;
494705e87c0SHugh Dickins 	spinlock_t *ptl;
495941150a3SHugh Dickins 
496*248db92dSKirill A. Shutemov 	if (pmd_trans_huge(*pmd)) {
497*248db92dSKirill A. Shutemov 		ptl = pmd_lock(walk->mm, pmd);
498*248db92dSKirill A. Shutemov 		if (pmd_trans_huge(*pmd)) {
499*248db92dSKirill A. Shutemov 			page = pmd_page(*pmd);
500*248db92dSKirill A. Shutemov 			if (is_huge_zero_page(page)) {
501*248db92dSKirill A. Shutemov 				spin_unlock(ptl);
50278ddc534SKirill A. Shutemov 				split_huge_pmd(vma, pmd, addr);
503*248db92dSKirill A. Shutemov 			} else {
504*248db92dSKirill A. Shutemov 				get_page(page);
505*248db92dSKirill A. Shutemov 				spin_unlock(ptl);
506*248db92dSKirill A. Shutemov 				lock_page(page);
507*248db92dSKirill A. Shutemov 				ret = split_huge_page(page);
508*248db92dSKirill A. Shutemov 				unlock_page(page);
509*248db92dSKirill A. Shutemov 				put_page(page);
510*248db92dSKirill A. Shutemov 				if (ret)
5116f4576e3SNaoya Horiguchi 					return 0;
512*248db92dSKirill A. Shutemov 			}
513*248db92dSKirill A. Shutemov 		} else {
514*248db92dSKirill A. Shutemov 			spin_unlock(ptl);
515*248db92dSKirill A. Shutemov 		}
516*248db92dSKirill A. Shutemov 	}
51791612e0dSHugh Dickins 
518*248db92dSKirill A. Shutemov retry:
5196f4576e3SNaoya Horiguchi 	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
5206f4576e3SNaoya Horiguchi 	for (; addr != end; pte++, addr += PAGE_SIZE) {
52191612e0dSHugh Dickins 		if (!pte_present(*pte))
52291612e0dSHugh Dickins 			continue;
5236aab341eSLinus Torvalds 		page = vm_normal_page(vma, addr, *pte);
5246aab341eSLinus Torvalds 		if (!page)
52591612e0dSHugh Dickins 			continue;
526053837fcSNick Piggin 		/*
52762b61f61SHugh Dickins 		 * vm_normal_page() filters out zero pages, but there might
52862b61f61SHugh Dickins 		 * still be PageReserved pages to skip, perhaps in a VDSO.
529053837fcSNick Piggin 		 */
530b79bc0a0SHugh Dickins 		if (PageReserved(page))
531f4598c8bSChristoph Lameter 			continue;
5326aab341eSLinus Torvalds 		nid = page_to_nid(page);
5336f4576e3SNaoya Horiguchi 		if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
53438e35860SChristoph Lameter 			continue;
535*248db92dSKirill A. Shutemov 		if (PageTail(page) && PageAnon(page)) {
536*248db92dSKirill A. Shutemov 			get_page(page);
537*248db92dSKirill A. Shutemov 			pte_unmap_unlock(pte, ptl);
538*248db92dSKirill A. Shutemov 			lock_page(page);
539*248db92dSKirill A. Shutemov 			ret = split_huge_page(page);
540*248db92dSKirill A. Shutemov 			unlock_page(page);
541*248db92dSKirill A. Shutemov 			put_page(page);
542*248db92dSKirill A. Shutemov 			/* Failed to split -- skip. */
543*248db92dSKirill A. Shutemov 			if (ret) {
544*248db92dSKirill A. Shutemov 				pte = pte_offset_map_lock(walk->mm, pmd,
545*248db92dSKirill A. Shutemov 						addr, &ptl);
546*248db92dSKirill A. Shutemov 				continue;
547*248db92dSKirill A. Shutemov 			}
548*248db92dSKirill A. Shutemov 			goto retry;
549*248db92dSKirill A. Shutemov 		}
55038e35860SChristoph Lameter 
551b1f72d18SStephen Wilson 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
5526f4576e3SNaoya Horiguchi 			migrate_page_add(page, qp->pagelist, flags);
5536f4576e3SNaoya Horiguchi 	}
5546f4576e3SNaoya Horiguchi 	pte_unmap_unlock(pte - 1, ptl);
5556f4576e3SNaoya Horiguchi 	cond_resched();
5566f4576e3SNaoya Horiguchi 	return 0;
55791612e0dSHugh Dickins }
55891612e0dSHugh Dickins 
5596f4576e3SNaoya Horiguchi static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
5606f4576e3SNaoya Horiguchi 			       unsigned long addr, unsigned long end,
5616f4576e3SNaoya Horiguchi 			       struct mm_walk *walk)
562e2d8cf40SNaoya Horiguchi {
563e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE
5646f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
5656f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
566e2d8cf40SNaoya Horiguchi 	int nid;
567e2d8cf40SNaoya Horiguchi 	struct page *page;
568cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
569d4c54919SNaoya Horiguchi 	pte_t entry;
570e2d8cf40SNaoya Horiguchi 
5716f4576e3SNaoya Horiguchi 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
5726f4576e3SNaoya Horiguchi 	entry = huge_ptep_get(pte);
573d4c54919SNaoya Horiguchi 	if (!pte_present(entry))
574d4c54919SNaoya Horiguchi 		goto unlock;
575d4c54919SNaoya Horiguchi 	page = pte_page(entry);
576e2d8cf40SNaoya Horiguchi 	nid = page_to_nid(page);
5776f4576e3SNaoya Horiguchi 	if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
578e2d8cf40SNaoya Horiguchi 		goto unlock;
579e2d8cf40SNaoya Horiguchi 	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
580e2d8cf40SNaoya Horiguchi 	if (flags & (MPOL_MF_MOVE_ALL) ||
581e2d8cf40SNaoya Horiguchi 	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
5826f4576e3SNaoya Horiguchi 		isolate_huge_page(page, qp->pagelist);
583e2d8cf40SNaoya Horiguchi unlock:
584cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
585e2d8cf40SNaoya Horiguchi #else
586e2d8cf40SNaoya Horiguchi 	BUG();
587e2d8cf40SNaoya Horiguchi #endif
58891612e0dSHugh Dickins 	return 0;
5891da177e4SLinus Torvalds }
5901da177e4SLinus Torvalds 
5915877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING
592b24f53a0SLee Schermerhorn /*
5934b10e7d5SMel Gorman  * This is used to mark a range of virtual addresses to be inaccessible.
5944b10e7d5SMel Gorman  * These are later cleared by a NUMA hinting fault. Depending on these
5954b10e7d5SMel Gorman  * faults, pages may be migrated for better NUMA placement.
5964b10e7d5SMel Gorman  *
5974b10e7d5SMel Gorman  * This is assuming that NUMA faults are handled using PROT_NONE. If
5984b10e7d5SMel Gorman  * an architecture makes a different choice, it will need further
5994b10e7d5SMel Gorman  * changes to the core.
600b24f53a0SLee Schermerhorn  */
6014b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma,
6024b10e7d5SMel Gorman 			unsigned long addr, unsigned long end)
603b24f53a0SLee Schermerhorn {
6044b10e7d5SMel Gorman 	int nr_updated;
605b24f53a0SLee Schermerhorn 
6064d942466SMel Gorman 	nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
60703c5a6e1SMel Gorman 	if (nr_updated)
60803c5a6e1SMel Gorman 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
609b24f53a0SLee Schermerhorn 
6104b10e7d5SMel Gorman 	return nr_updated;
611b24f53a0SLee Schermerhorn }
612b24f53a0SLee Schermerhorn #else
613b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma,
614b24f53a0SLee Schermerhorn 			unsigned long addr, unsigned long end)
615b24f53a0SLee Schermerhorn {
616b24f53a0SLee Schermerhorn 	return 0;
617b24f53a0SLee Schermerhorn }
6185877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */
619b24f53a0SLee Schermerhorn 
6206f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end,
6216f4576e3SNaoya Horiguchi 				struct mm_walk *walk)
6221da177e4SLinus Torvalds {
6236f4576e3SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
6246f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
6255b952b3cSAndi Kleen 	unsigned long endvma = vma->vm_end;
6266f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
627dc9aa5b9SChristoph Lameter 
62848684a65SNaoya Horiguchi 	if (vma->vm_flags & VM_PFNMAP)
62948684a65SNaoya Horiguchi 		return 1;
63048684a65SNaoya Horiguchi 
6315b952b3cSAndi Kleen 	if (endvma > end)
6325b952b3cSAndi Kleen 		endvma = end;
6335b952b3cSAndi Kleen 	if (vma->vm_start > start)
6345b952b3cSAndi Kleen 		start = vma->vm_start;
635b24f53a0SLee Schermerhorn 
636b24f53a0SLee Schermerhorn 	if (!(flags & MPOL_MF_DISCONTIG_OK)) {
637b24f53a0SLee Schermerhorn 		if (!vma->vm_next && vma->vm_end < end)
638d05f0cdcSHugh Dickins 			return -EFAULT;
6396f4576e3SNaoya Horiguchi 		if (qp->prev && qp->prev->vm_end < vma->vm_start)
640d05f0cdcSHugh Dickins 			return -EFAULT;
641b24f53a0SLee Schermerhorn 	}
642b24f53a0SLee Schermerhorn 
6436f4576e3SNaoya Horiguchi 	qp->prev = vma;
6446f4576e3SNaoya Horiguchi 
645b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY) {
6462c0346a3SMel Gorman 		/* Similar to task_numa_work, skip inaccessible VMAs */
6472c0346a3SMel Gorman 		if (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))
648b24f53a0SLee Schermerhorn 			change_prot_numa(vma, start, endvma);
6496f4576e3SNaoya Horiguchi 		return 1;
650b24f53a0SLee Schermerhorn 	}
651b24f53a0SLee Schermerhorn 
652b24f53a0SLee Schermerhorn 	if ((flags & MPOL_MF_STRICT) ||
653b24f53a0SLee Schermerhorn 	    ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
6546f4576e3SNaoya Horiguchi 	     vma_migratable(vma)))
6556f4576e3SNaoya Horiguchi 		/* queue pages from current vma */
6566f4576e3SNaoya Horiguchi 		return 0;
6576f4576e3SNaoya Horiguchi 	return 1;
6586f4576e3SNaoya Horiguchi }
659b24f53a0SLee Schermerhorn 
6606f4576e3SNaoya Horiguchi /*
6616f4576e3SNaoya Horiguchi  * Walk through page tables and collect pages to be migrated.
6626f4576e3SNaoya Horiguchi  *
6636f4576e3SNaoya Horiguchi  * If pages found in a given range are on a set of nodes (determined by
6646f4576e3SNaoya Horiguchi  * @nodes and @flags,) it's isolated and queued to the pagelist which is
6656f4576e3SNaoya Horiguchi  * passed via @private.)
6666f4576e3SNaoya Horiguchi  */
6676f4576e3SNaoya Horiguchi static int
6686f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
6696f4576e3SNaoya Horiguchi 		nodemask_t *nodes, unsigned long flags,
6706f4576e3SNaoya Horiguchi 		struct list_head *pagelist)
6716f4576e3SNaoya Horiguchi {
6726f4576e3SNaoya Horiguchi 	struct queue_pages qp = {
6736f4576e3SNaoya Horiguchi 		.pagelist = pagelist,
6746f4576e3SNaoya Horiguchi 		.flags = flags,
6756f4576e3SNaoya Horiguchi 		.nmask = nodes,
6766f4576e3SNaoya Horiguchi 		.prev = NULL,
6776f4576e3SNaoya Horiguchi 	};
6786f4576e3SNaoya Horiguchi 	struct mm_walk queue_pages_walk = {
6796f4576e3SNaoya Horiguchi 		.hugetlb_entry = queue_pages_hugetlb,
6806f4576e3SNaoya Horiguchi 		.pmd_entry = queue_pages_pte_range,
6816f4576e3SNaoya Horiguchi 		.test_walk = queue_pages_test_walk,
6826f4576e3SNaoya Horiguchi 		.mm = mm,
6836f4576e3SNaoya Horiguchi 		.private = &qp,
6846f4576e3SNaoya Horiguchi 	};
6856f4576e3SNaoya Horiguchi 
6866f4576e3SNaoya Horiguchi 	return walk_page_range(start, end, &queue_pages_walk);
6871da177e4SLinus Torvalds }
6881da177e4SLinus Torvalds 
689869833f2SKOSAKI Motohiro /*
690869833f2SKOSAKI Motohiro  * Apply policy to a single VMA
691869833f2SKOSAKI Motohiro  * This must be called with the mmap_sem held for writing.
692869833f2SKOSAKI Motohiro  */
693869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma,
694869833f2SKOSAKI Motohiro 						struct mempolicy *pol)
6958d34694cSKOSAKI Motohiro {
696869833f2SKOSAKI Motohiro 	int err;
697869833f2SKOSAKI Motohiro 	struct mempolicy *old;
698869833f2SKOSAKI Motohiro 	struct mempolicy *new;
6998d34694cSKOSAKI Motohiro 
7008d34694cSKOSAKI Motohiro 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
7018d34694cSKOSAKI Motohiro 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
7028d34694cSKOSAKI Motohiro 		 vma->vm_ops, vma->vm_file,
7038d34694cSKOSAKI Motohiro 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
7048d34694cSKOSAKI Motohiro 
705869833f2SKOSAKI Motohiro 	new = mpol_dup(pol);
706869833f2SKOSAKI Motohiro 	if (IS_ERR(new))
707869833f2SKOSAKI Motohiro 		return PTR_ERR(new);
708869833f2SKOSAKI Motohiro 
709869833f2SKOSAKI Motohiro 	if (vma->vm_ops && vma->vm_ops->set_policy) {
7108d34694cSKOSAKI Motohiro 		err = vma->vm_ops->set_policy(vma, new);
711869833f2SKOSAKI Motohiro 		if (err)
712869833f2SKOSAKI Motohiro 			goto err_out;
7138d34694cSKOSAKI Motohiro 	}
714869833f2SKOSAKI Motohiro 
715869833f2SKOSAKI Motohiro 	old = vma->vm_policy;
716869833f2SKOSAKI Motohiro 	vma->vm_policy = new; /* protected by mmap_sem */
717869833f2SKOSAKI Motohiro 	mpol_put(old);
718869833f2SKOSAKI Motohiro 
719869833f2SKOSAKI Motohiro 	return 0;
720869833f2SKOSAKI Motohiro  err_out:
721869833f2SKOSAKI Motohiro 	mpol_put(new);
7228d34694cSKOSAKI Motohiro 	return err;
7238d34694cSKOSAKI Motohiro }
7248d34694cSKOSAKI Motohiro 
7251da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */
7269d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start,
7279d8cebd4SKOSAKI Motohiro 		       unsigned long end, struct mempolicy *new_pol)
7281da177e4SLinus Torvalds {
7291da177e4SLinus Torvalds 	struct vm_area_struct *next;
7309d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *prev;
7319d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *vma;
7329d8cebd4SKOSAKI Motohiro 	int err = 0;
733e26a5114SKOSAKI Motohiro 	pgoff_t pgoff;
7349d8cebd4SKOSAKI Motohiro 	unsigned long vmstart;
7359d8cebd4SKOSAKI Motohiro 	unsigned long vmend;
7361da177e4SLinus Torvalds 
737097d5910SLinus Torvalds 	vma = find_vma(mm, start);
7389d8cebd4SKOSAKI Motohiro 	if (!vma || vma->vm_start > start)
7399d8cebd4SKOSAKI Motohiro 		return -EFAULT;
7409d8cebd4SKOSAKI Motohiro 
741097d5910SLinus Torvalds 	prev = vma->vm_prev;
742e26a5114SKOSAKI Motohiro 	if (start > vma->vm_start)
743e26a5114SKOSAKI Motohiro 		prev = vma;
744e26a5114SKOSAKI Motohiro 
7459d8cebd4SKOSAKI Motohiro 	for (; vma && vma->vm_start < end; prev = vma, vma = next) {
7461da177e4SLinus Torvalds 		next = vma->vm_next;
7479d8cebd4SKOSAKI Motohiro 		vmstart = max(start, vma->vm_start);
7489d8cebd4SKOSAKI Motohiro 		vmend   = min(end, vma->vm_end);
7499d8cebd4SKOSAKI Motohiro 
750e26a5114SKOSAKI Motohiro 		if (mpol_equal(vma_policy(vma), new_pol))
751e26a5114SKOSAKI Motohiro 			continue;
752e26a5114SKOSAKI Motohiro 
753e26a5114SKOSAKI Motohiro 		pgoff = vma->vm_pgoff +
754e26a5114SKOSAKI Motohiro 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
7559d8cebd4SKOSAKI Motohiro 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
756e26a5114SKOSAKI Motohiro 				 vma->anon_vma, vma->vm_file, pgoff,
75719a809afSAndrea Arcangeli 				 new_pol, vma->vm_userfaultfd_ctx);
7589d8cebd4SKOSAKI Motohiro 		if (prev) {
7599d8cebd4SKOSAKI Motohiro 			vma = prev;
7609d8cebd4SKOSAKI Motohiro 			next = vma->vm_next;
7613964acd0SOleg Nesterov 			if (mpol_equal(vma_policy(vma), new_pol))
7629d8cebd4SKOSAKI Motohiro 				continue;
7633964acd0SOleg Nesterov 			/* vma_merge() joined vma && vma->next, case 8 */
7643964acd0SOleg Nesterov 			goto replace;
7651da177e4SLinus Torvalds 		}
7669d8cebd4SKOSAKI Motohiro 		if (vma->vm_start != vmstart) {
7679d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
7689d8cebd4SKOSAKI Motohiro 			if (err)
7699d8cebd4SKOSAKI Motohiro 				goto out;
7709d8cebd4SKOSAKI Motohiro 		}
7719d8cebd4SKOSAKI Motohiro 		if (vma->vm_end != vmend) {
7729d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmend, 0);
7739d8cebd4SKOSAKI Motohiro 			if (err)
7749d8cebd4SKOSAKI Motohiro 				goto out;
7759d8cebd4SKOSAKI Motohiro 		}
7763964acd0SOleg Nesterov  replace:
777869833f2SKOSAKI Motohiro 		err = vma_replace_policy(vma, new_pol);
7789d8cebd4SKOSAKI Motohiro 		if (err)
7799d8cebd4SKOSAKI Motohiro 			goto out;
7809d8cebd4SKOSAKI Motohiro 	}
7819d8cebd4SKOSAKI Motohiro 
7829d8cebd4SKOSAKI Motohiro  out:
7831da177e4SLinus Torvalds 	return err;
7841da177e4SLinus Torvalds }
7851da177e4SLinus Torvalds 
7861da177e4SLinus Torvalds /* Set the process memory policy */
787028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags,
788028fec41SDavid Rientjes 			     nodemask_t *nodes)
7891da177e4SLinus Torvalds {
79058568d2aSMiao Xie 	struct mempolicy *new, *old;
7914bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH(scratch);
79258568d2aSMiao Xie 	int ret;
7931da177e4SLinus Torvalds 
7944bfc4495SKAMEZAWA Hiroyuki 	if (!scratch)
7954bfc4495SKAMEZAWA Hiroyuki 		return -ENOMEM;
796f4e53d91SLee Schermerhorn 
7974bfc4495SKAMEZAWA Hiroyuki 	new = mpol_new(mode, flags, nodes);
7984bfc4495SKAMEZAWA Hiroyuki 	if (IS_ERR(new)) {
7994bfc4495SKAMEZAWA Hiroyuki 		ret = PTR_ERR(new);
8004bfc4495SKAMEZAWA Hiroyuki 		goto out;
8014bfc4495SKAMEZAWA Hiroyuki 	}
8022c7c3a7dSOleg Nesterov 
80358568d2aSMiao Xie 	task_lock(current);
8044bfc4495SKAMEZAWA Hiroyuki 	ret = mpol_set_nodemask(new, nodes, scratch);
80558568d2aSMiao Xie 	if (ret) {
80658568d2aSMiao Xie 		task_unlock(current);
80758568d2aSMiao Xie 		mpol_put(new);
8084bfc4495SKAMEZAWA Hiroyuki 		goto out;
80958568d2aSMiao Xie 	}
81058568d2aSMiao Xie 	old = current->mempolicy;
8111da177e4SLinus Torvalds 	current->mempolicy = new;
81245c4745aSLee Schermerhorn 	if (new && new->mode == MPOL_INTERLEAVE &&
813f5b087b5SDavid Rientjes 	    nodes_weight(new->v.nodes))
814dfcd3c0dSAndi Kleen 		current->il_next = first_node(new->v.nodes);
81558568d2aSMiao Xie 	task_unlock(current);
81658568d2aSMiao Xie 	mpol_put(old);
8174bfc4495SKAMEZAWA Hiroyuki 	ret = 0;
8184bfc4495SKAMEZAWA Hiroyuki out:
8194bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH_FREE(scratch);
8204bfc4495SKAMEZAWA Hiroyuki 	return ret;
8211da177e4SLinus Torvalds }
8221da177e4SLinus Torvalds 
823bea904d5SLee Schermerhorn /*
824bea904d5SLee Schermerhorn  * Return nodemask for policy for get_mempolicy() query
82558568d2aSMiao Xie  *
82658568d2aSMiao Xie  * Called with task's alloc_lock held
827bea904d5SLee Schermerhorn  */
828bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
8291da177e4SLinus Torvalds {
830dfcd3c0dSAndi Kleen 	nodes_clear(*nodes);
831bea904d5SLee Schermerhorn 	if (p == &default_policy)
832bea904d5SLee Schermerhorn 		return;
833bea904d5SLee Schermerhorn 
83445c4745aSLee Schermerhorn 	switch (p->mode) {
83519770b32SMel Gorman 	case MPOL_BIND:
83619770b32SMel Gorman 		/* Fall through */
8371da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
838dfcd3c0dSAndi Kleen 		*nodes = p->v.nodes;
8391da177e4SLinus Torvalds 		break;
8401da177e4SLinus Torvalds 	case MPOL_PREFERRED:
841fc36b8d3SLee Schermerhorn 		if (!(p->flags & MPOL_F_LOCAL))
842dfcd3c0dSAndi Kleen 			node_set(p->v.preferred_node, *nodes);
84353f2556bSLee Schermerhorn 		/* else return empty node mask for local allocation */
8441da177e4SLinus Torvalds 		break;
8451da177e4SLinus Torvalds 	default:
8461da177e4SLinus Torvalds 		BUG();
8471da177e4SLinus Torvalds 	}
8481da177e4SLinus Torvalds }
8491da177e4SLinus Torvalds 
8501da177e4SLinus Torvalds static int lookup_node(struct mm_struct *mm, unsigned long addr)
8511da177e4SLinus Torvalds {
8521da177e4SLinus Torvalds 	struct page *p;
8531da177e4SLinus Torvalds 	int err;
8541da177e4SLinus Torvalds 
8551da177e4SLinus Torvalds 	err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
8561da177e4SLinus Torvalds 	if (err >= 0) {
8571da177e4SLinus Torvalds 		err = page_to_nid(p);
8581da177e4SLinus Torvalds 		put_page(p);
8591da177e4SLinus Torvalds 	}
8601da177e4SLinus Torvalds 	return err;
8611da177e4SLinus Torvalds }
8621da177e4SLinus Torvalds 
8631da177e4SLinus Torvalds /* Retrieve NUMA policy */
864dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask,
8651da177e4SLinus Torvalds 			     unsigned long addr, unsigned long flags)
8661da177e4SLinus Torvalds {
8678bccd85fSChristoph Lameter 	int err;
8681da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
8691da177e4SLinus Torvalds 	struct vm_area_struct *vma = NULL;
8701da177e4SLinus Torvalds 	struct mempolicy *pol = current->mempolicy;
8711da177e4SLinus Torvalds 
872754af6f5SLee Schermerhorn 	if (flags &
873754af6f5SLee Schermerhorn 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
8741da177e4SLinus Torvalds 		return -EINVAL;
875754af6f5SLee Schermerhorn 
876754af6f5SLee Schermerhorn 	if (flags & MPOL_F_MEMS_ALLOWED) {
877754af6f5SLee Schermerhorn 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
878754af6f5SLee Schermerhorn 			return -EINVAL;
879754af6f5SLee Schermerhorn 		*policy = 0;	/* just so it's initialized */
88058568d2aSMiao Xie 		task_lock(current);
881754af6f5SLee Schermerhorn 		*nmask  = cpuset_current_mems_allowed;
88258568d2aSMiao Xie 		task_unlock(current);
883754af6f5SLee Schermerhorn 		return 0;
884754af6f5SLee Schermerhorn 	}
885754af6f5SLee Schermerhorn 
8861da177e4SLinus Torvalds 	if (flags & MPOL_F_ADDR) {
887bea904d5SLee Schermerhorn 		/*
888bea904d5SLee Schermerhorn 		 * Do NOT fall back to task policy if the
889bea904d5SLee Schermerhorn 		 * vma/shared policy at addr is NULL.  We
890bea904d5SLee Schermerhorn 		 * want to return MPOL_DEFAULT in this case.
891bea904d5SLee Schermerhorn 		 */
8921da177e4SLinus Torvalds 		down_read(&mm->mmap_sem);
8931da177e4SLinus Torvalds 		vma = find_vma_intersection(mm, addr, addr+1);
8941da177e4SLinus Torvalds 		if (!vma) {
8951da177e4SLinus Torvalds 			up_read(&mm->mmap_sem);
8961da177e4SLinus Torvalds 			return -EFAULT;
8971da177e4SLinus Torvalds 		}
8981da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
8991da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
9001da177e4SLinus Torvalds 		else
9011da177e4SLinus Torvalds 			pol = vma->vm_policy;
9021da177e4SLinus Torvalds 	} else if (addr)
9031da177e4SLinus Torvalds 		return -EINVAL;
9041da177e4SLinus Torvalds 
9051da177e4SLinus Torvalds 	if (!pol)
906bea904d5SLee Schermerhorn 		pol = &default_policy;	/* indicates default behavior */
9071da177e4SLinus Torvalds 
9081da177e4SLinus Torvalds 	if (flags & MPOL_F_NODE) {
9091da177e4SLinus Torvalds 		if (flags & MPOL_F_ADDR) {
9101da177e4SLinus Torvalds 			err = lookup_node(mm, addr);
9111da177e4SLinus Torvalds 			if (err < 0)
9121da177e4SLinus Torvalds 				goto out;
9138bccd85fSChristoph Lameter 			*policy = err;
9141da177e4SLinus Torvalds 		} else if (pol == current->mempolicy &&
91545c4745aSLee Schermerhorn 				pol->mode == MPOL_INTERLEAVE) {
9168bccd85fSChristoph Lameter 			*policy = current->il_next;
9171da177e4SLinus Torvalds 		} else {
9181da177e4SLinus Torvalds 			err = -EINVAL;
9191da177e4SLinus Torvalds 			goto out;
9201da177e4SLinus Torvalds 		}
921bea904d5SLee Schermerhorn 	} else {
922bea904d5SLee Schermerhorn 		*policy = pol == &default_policy ? MPOL_DEFAULT :
923bea904d5SLee Schermerhorn 						pol->mode;
924d79df630SDavid Rientjes 		/*
925d79df630SDavid Rientjes 		 * Internal mempolicy flags must be masked off before exposing
926d79df630SDavid Rientjes 		 * the policy to userspace.
927d79df630SDavid Rientjes 		 */
928d79df630SDavid Rientjes 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
929bea904d5SLee Schermerhorn 	}
9301da177e4SLinus Torvalds 
9311da177e4SLinus Torvalds 	if (vma) {
9321da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
9331da177e4SLinus Torvalds 		vma = NULL;
9341da177e4SLinus Torvalds 	}
9351da177e4SLinus Torvalds 
9361da177e4SLinus Torvalds 	err = 0;
93758568d2aSMiao Xie 	if (nmask) {
938c6b6ef8bSLee Schermerhorn 		if (mpol_store_user_nodemask(pol)) {
939c6b6ef8bSLee Schermerhorn 			*nmask = pol->w.user_nodemask;
940c6b6ef8bSLee Schermerhorn 		} else {
94158568d2aSMiao Xie 			task_lock(current);
942bea904d5SLee Schermerhorn 			get_policy_nodemask(pol, nmask);
94358568d2aSMiao Xie 			task_unlock(current);
94458568d2aSMiao Xie 		}
945c6b6ef8bSLee Schermerhorn 	}
9461da177e4SLinus Torvalds 
9471da177e4SLinus Torvalds  out:
94852cd3b07SLee Schermerhorn 	mpol_cond_put(pol);
9491da177e4SLinus Torvalds 	if (vma)
9501da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
9511da177e4SLinus Torvalds 	return err;
9521da177e4SLinus Torvalds }
9531da177e4SLinus Torvalds 
954b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION
9558bccd85fSChristoph Lameter /*
9566ce3c4c0SChristoph Lameter  * page migration
9576ce3c4c0SChristoph Lameter  */
958fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
959fc301289SChristoph Lameter 				unsigned long flags)
9606ce3c4c0SChristoph Lameter {
9616ce3c4c0SChristoph Lameter 	/*
962fc301289SChristoph Lameter 	 * Avoid migrating a page that is shared with others.
9636ce3c4c0SChristoph Lameter 	 */
96462695a84SNick Piggin 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
96562695a84SNick Piggin 		if (!isolate_lru_page(page)) {
96662695a84SNick Piggin 			list_add_tail(&page->lru, pagelist);
9676d9c285aSKOSAKI Motohiro 			inc_zone_page_state(page, NR_ISOLATED_ANON +
9686d9c285aSKOSAKI Motohiro 					    page_is_file_cache(page));
96962695a84SNick Piggin 		}
97062695a84SNick Piggin 	}
9716ce3c4c0SChristoph Lameter }
9726ce3c4c0SChristoph Lameter 
973742755a1SChristoph Lameter static struct page *new_node_page(struct page *page, unsigned long node, int **x)
97495a402c3SChristoph Lameter {
975e2d8cf40SNaoya Horiguchi 	if (PageHuge(page))
976e2d8cf40SNaoya Horiguchi 		return alloc_huge_page_node(page_hstate(compound_head(page)),
977e2d8cf40SNaoya Horiguchi 					node);
978e2d8cf40SNaoya Horiguchi 	else
97996db800fSVlastimil Babka 		return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
980b360edb4SDavid Rientjes 						    __GFP_THISNODE, 0);
98195a402c3SChristoph Lameter }
98295a402c3SChristoph Lameter 
9836ce3c4c0SChristoph Lameter /*
9847e2ab150SChristoph Lameter  * Migrate pages from one node to a target node.
9857e2ab150SChristoph Lameter  * Returns error or the number of pages not migrated.
9867e2ab150SChristoph Lameter  */
987dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest,
988dbcb0f19SAdrian Bunk 			   int flags)
9897e2ab150SChristoph Lameter {
9907e2ab150SChristoph Lameter 	nodemask_t nmask;
9917e2ab150SChristoph Lameter 	LIST_HEAD(pagelist);
9927e2ab150SChristoph Lameter 	int err = 0;
9937e2ab150SChristoph Lameter 
9947e2ab150SChristoph Lameter 	nodes_clear(nmask);
9957e2ab150SChristoph Lameter 	node_set(source, nmask);
9967e2ab150SChristoph Lameter 
99708270807SMinchan Kim 	/*
99808270807SMinchan Kim 	 * This does not "check" the range but isolates all pages that
99908270807SMinchan Kim 	 * need migration.  Between passing in the full user address
100008270807SMinchan Kim 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
100108270807SMinchan Kim 	 */
100208270807SMinchan Kim 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
100398094945SNaoya Horiguchi 	queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
10047e2ab150SChristoph Lameter 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
10057e2ab150SChristoph Lameter 
1006cf608ac1SMinchan Kim 	if (!list_empty(&pagelist)) {
100768711a74SDavid Rientjes 		err = migrate_pages(&pagelist, new_node_page, NULL, dest,
10089c620e2bSHugh Dickins 					MIGRATE_SYNC, MR_SYSCALL);
1009cf608ac1SMinchan Kim 		if (err)
1010e2d8cf40SNaoya Horiguchi 			putback_movable_pages(&pagelist);
1011cf608ac1SMinchan Kim 	}
101295a402c3SChristoph Lameter 
10137e2ab150SChristoph Lameter 	return err;
10147e2ab150SChristoph Lameter }
10157e2ab150SChristoph Lameter 
10167e2ab150SChristoph Lameter /*
10177e2ab150SChristoph Lameter  * Move pages between the two nodesets so as to preserve the physical
10187e2ab150SChristoph Lameter  * layout as much as possible.
101939743889SChristoph Lameter  *
102039743889SChristoph Lameter  * Returns the number of page that could not be moved.
102139743889SChristoph Lameter  */
10220ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
10230ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
102439743889SChristoph Lameter {
10257e2ab150SChristoph Lameter 	int busy = 0;
10260aedadf9SChristoph Lameter 	int err;
10277e2ab150SChristoph Lameter 	nodemask_t tmp;
102839743889SChristoph Lameter 
10290aedadf9SChristoph Lameter 	err = migrate_prep();
10300aedadf9SChristoph Lameter 	if (err)
10310aedadf9SChristoph Lameter 		return err;
10320aedadf9SChristoph Lameter 
103339743889SChristoph Lameter 	down_read(&mm->mmap_sem);
1034d4984711SChristoph Lameter 
10357e2ab150SChristoph Lameter 	/*
10367e2ab150SChristoph Lameter 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
10377e2ab150SChristoph Lameter 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
10387e2ab150SChristoph Lameter 	 * bit in 'tmp', and return that <source, dest> pair for migration.
10397e2ab150SChristoph Lameter 	 * The pair of nodemasks 'to' and 'from' define the map.
10407e2ab150SChristoph Lameter 	 *
10417e2ab150SChristoph Lameter 	 * If no pair of bits is found that way, fallback to picking some
10427e2ab150SChristoph Lameter 	 * pair of 'source' and 'dest' bits that are not the same.  If the
10437e2ab150SChristoph Lameter 	 * 'source' and 'dest' bits are the same, this represents a node
10447e2ab150SChristoph Lameter 	 * that will be migrating to itself, so no pages need move.
10457e2ab150SChristoph Lameter 	 *
10467e2ab150SChristoph Lameter 	 * If no bits are left in 'tmp', or if all remaining bits left
10477e2ab150SChristoph Lameter 	 * in 'tmp' correspond to the same bit in 'to', return false
10487e2ab150SChristoph Lameter 	 * (nothing left to migrate).
10497e2ab150SChristoph Lameter 	 *
10507e2ab150SChristoph Lameter 	 * This lets us pick a pair of nodes to migrate between, such that
10517e2ab150SChristoph Lameter 	 * if possible the dest node is not already occupied by some other
10527e2ab150SChristoph Lameter 	 * source node, minimizing the risk of overloading the memory on a
10537e2ab150SChristoph Lameter 	 * node that would happen if we migrated incoming memory to a node
10547e2ab150SChristoph Lameter 	 * before migrating outgoing memory source that same node.
10557e2ab150SChristoph Lameter 	 *
10567e2ab150SChristoph Lameter 	 * A single scan of tmp is sufficient.  As we go, we remember the
10577e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
10587e2ab150SChristoph Lameter 	 * that not only moved, but what's better, moved to an empty slot
10597e2ab150SChristoph Lameter 	 * (d is not set in tmp), then we break out then, with that pair.
1060ae0e47f0SJustin P. Mattock 	 * Otherwise when we finish scanning from_tmp, we at least have the
10617e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved.  If we get all the way through
10627e2ab150SChristoph Lameter 	 * the scan of tmp without finding any node that moved, much less
10637e2ab150SChristoph Lameter 	 * moved to an empty node, then there is nothing left worth migrating.
10647e2ab150SChristoph Lameter 	 */
10657e2ab150SChristoph Lameter 
10660ce72d4fSAndrew Morton 	tmp = *from;
10677e2ab150SChristoph Lameter 	while (!nodes_empty(tmp)) {
10687e2ab150SChristoph Lameter 		int s,d;
1069b76ac7e7SJianguo Wu 		int source = NUMA_NO_NODE;
10707e2ab150SChristoph Lameter 		int dest = 0;
10717e2ab150SChristoph Lameter 
10727e2ab150SChristoph Lameter 		for_each_node_mask(s, tmp) {
10734a5b18ccSLarry Woodman 
10744a5b18ccSLarry Woodman 			/*
10754a5b18ccSLarry Woodman 			 * do_migrate_pages() tries to maintain the relative
10764a5b18ccSLarry Woodman 			 * node relationship of the pages established between
10774a5b18ccSLarry Woodman 			 * threads and memory areas.
10784a5b18ccSLarry Woodman                          *
10794a5b18ccSLarry Woodman 			 * However if the number of source nodes is not equal to
10804a5b18ccSLarry Woodman 			 * the number of destination nodes we can not preserve
10814a5b18ccSLarry Woodman 			 * this node relative relationship.  In that case, skip
10824a5b18ccSLarry Woodman 			 * copying memory from a node that is in the destination
10834a5b18ccSLarry Woodman 			 * mask.
10844a5b18ccSLarry Woodman 			 *
10854a5b18ccSLarry Woodman 			 * Example: [2,3,4] -> [3,4,5] moves everything.
10864a5b18ccSLarry Woodman 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
10874a5b18ccSLarry Woodman 			 */
10884a5b18ccSLarry Woodman 
10890ce72d4fSAndrew Morton 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
10900ce72d4fSAndrew Morton 						(node_isset(s, *to)))
10914a5b18ccSLarry Woodman 				continue;
10924a5b18ccSLarry Woodman 
10930ce72d4fSAndrew Morton 			d = node_remap(s, *from, *to);
10947e2ab150SChristoph Lameter 			if (s == d)
10957e2ab150SChristoph Lameter 				continue;
10967e2ab150SChristoph Lameter 
10977e2ab150SChristoph Lameter 			source = s;	/* Node moved. Memorize */
10987e2ab150SChristoph Lameter 			dest = d;
10997e2ab150SChristoph Lameter 
11007e2ab150SChristoph Lameter 			/* dest not in remaining from nodes? */
11017e2ab150SChristoph Lameter 			if (!node_isset(dest, tmp))
11027e2ab150SChristoph Lameter 				break;
11037e2ab150SChristoph Lameter 		}
1104b76ac7e7SJianguo Wu 		if (source == NUMA_NO_NODE)
11057e2ab150SChristoph Lameter 			break;
11067e2ab150SChristoph Lameter 
11077e2ab150SChristoph Lameter 		node_clear(source, tmp);
11087e2ab150SChristoph Lameter 		err = migrate_to_node(mm, source, dest, flags);
11097e2ab150SChristoph Lameter 		if (err > 0)
11107e2ab150SChristoph Lameter 			busy += err;
11117e2ab150SChristoph Lameter 		if (err < 0)
11127e2ab150SChristoph Lameter 			break;
111339743889SChristoph Lameter 	}
111439743889SChristoph Lameter 	up_read(&mm->mmap_sem);
11157e2ab150SChristoph Lameter 	if (err < 0)
11167e2ab150SChristoph Lameter 		return err;
11177e2ab150SChristoph Lameter 	return busy;
1118b20a3503SChristoph Lameter 
111939743889SChristoph Lameter }
112039743889SChristoph Lameter 
11213ad33b24SLee Schermerhorn /*
11223ad33b24SLee Schermerhorn  * Allocate a new page for page migration based on vma policy.
1123d05f0cdcSHugh Dickins  * Start by assuming the page is mapped by the same vma as contains @start.
11243ad33b24SLee Schermerhorn  * Search forward from there, if not.  N.B., this assumes that the
11253ad33b24SLee Schermerhorn  * list of pages handed to migrate_pages()--which is how we get here--
11263ad33b24SLee Schermerhorn  * is in virtual address order.
11273ad33b24SLee Schermerhorn  */
1128d05f0cdcSHugh Dickins static struct page *new_page(struct page *page, unsigned long start, int **x)
112995a402c3SChristoph Lameter {
1130d05f0cdcSHugh Dickins 	struct vm_area_struct *vma;
11313ad33b24SLee Schermerhorn 	unsigned long uninitialized_var(address);
113295a402c3SChristoph Lameter 
1133d05f0cdcSHugh Dickins 	vma = find_vma(current->mm, start);
11343ad33b24SLee Schermerhorn 	while (vma) {
11353ad33b24SLee Schermerhorn 		address = page_address_in_vma(page, vma);
11363ad33b24SLee Schermerhorn 		if (address != -EFAULT)
11373ad33b24SLee Schermerhorn 			break;
11383ad33b24SLee Schermerhorn 		vma = vma->vm_next;
11393ad33b24SLee Schermerhorn 	}
11403ad33b24SLee Schermerhorn 
114111c731e8SWanpeng Li 	if (PageHuge(page)) {
1142cc81717eSMichal Hocko 		BUG_ON(!vma);
114374060e4dSNaoya Horiguchi 		return alloc_huge_page_noerr(vma, address, 1);
114411c731e8SWanpeng Li 	}
114511c731e8SWanpeng Li 	/*
114611c731e8SWanpeng Li 	 * if !vma, alloc_page_vma() will use task or system default policy
114711c731e8SWanpeng Li 	 */
11483ad33b24SLee Schermerhorn 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
114995a402c3SChristoph Lameter }
1150b20a3503SChristoph Lameter #else
1151b20a3503SChristoph Lameter 
1152b20a3503SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
1153b20a3503SChristoph Lameter 				unsigned long flags)
1154b20a3503SChristoph Lameter {
1155b20a3503SChristoph Lameter }
1156b20a3503SChristoph Lameter 
11570ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
11580ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
1159b20a3503SChristoph Lameter {
1160b20a3503SChristoph Lameter 	return -ENOSYS;
1161b20a3503SChristoph Lameter }
116295a402c3SChristoph Lameter 
1163d05f0cdcSHugh Dickins static struct page *new_page(struct page *page, unsigned long start, int **x)
116495a402c3SChristoph Lameter {
116595a402c3SChristoph Lameter 	return NULL;
116695a402c3SChristoph Lameter }
1167b20a3503SChristoph Lameter #endif
1168b20a3503SChristoph Lameter 
1169dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len,
1170028fec41SDavid Rientjes 		     unsigned short mode, unsigned short mode_flags,
1171028fec41SDavid Rientjes 		     nodemask_t *nmask, unsigned long flags)
11726ce3c4c0SChristoph Lameter {
11736ce3c4c0SChristoph Lameter 	struct mm_struct *mm = current->mm;
11746ce3c4c0SChristoph Lameter 	struct mempolicy *new;
11756ce3c4c0SChristoph Lameter 	unsigned long end;
11766ce3c4c0SChristoph Lameter 	int err;
11776ce3c4c0SChristoph Lameter 	LIST_HEAD(pagelist);
11786ce3c4c0SChristoph Lameter 
1179b24f53a0SLee Schermerhorn 	if (flags & ~(unsigned long)MPOL_MF_VALID)
11806ce3c4c0SChristoph Lameter 		return -EINVAL;
118174c00241SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
11826ce3c4c0SChristoph Lameter 		return -EPERM;
11836ce3c4c0SChristoph Lameter 
11846ce3c4c0SChristoph Lameter 	if (start & ~PAGE_MASK)
11856ce3c4c0SChristoph Lameter 		return -EINVAL;
11866ce3c4c0SChristoph Lameter 
11876ce3c4c0SChristoph Lameter 	if (mode == MPOL_DEFAULT)
11886ce3c4c0SChristoph Lameter 		flags &= ~MPOL_MF_STRICT;
11896ce3c4c0SChristoph Lameter 
11906ce3c4c0SChristoph Lameter 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
11916ce3c4c0SChristoph Lameter 	end = start + len;
11926ce3c4c0SChristoph Lameter 
11936ce3c4c0SChristoph Lameter 	if (end < start)
11946ce3c4c0SChristoph Lameter 		return -EINVAL;
11956ce3c4c0SChristoph Lameter 	if (end == start)
11966ce3c4c0SChristoph Lameter 		return 0;
11976ce3c4c0SChristoph Lameter 
1198028fec41SDavid Rientjes 	new = mpol_new(mode, mode_flags, nmask);
11996ce3c4c0SChristoph Lameter 	if (IS_ERR(new))
12006ce3c4c0SChristoph Lameter 		return PTR_ERR(new);
12016ce3c4c0SChristoph Lameter 
1202b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY)
1203b24f53a0SLee Schermerhorn 		new->flags |= MPOL_F_MOF;
1204b24f53a0SLee Schermerhorn 
12056ce3c4c0SChristoph Lameter 	/*
12066ce3c4c0SChristoph Lameter 	 * If we are using the default policy then operation
12076ce3c4c0SChristoph Lameter 	 * on discontinuous address spaces is okay after all
12086ce3c4c0SChristoph Lameter 	 */
12096ce3c4c0SChristoph Lameter 	if (!new)
12106ce3c4c0SChristoph Lameter 		flags |= MPOL_MF_DISCONTIG_OK;
12116ce3c4c0SChristoph Lameter 
1212028fec41SDavid Rientjes 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1213028fec41SDavid Rientjes 		 start, start + len, mode, mode_flags,
121400ef2d2fSDavid Rientjes 		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
12156ce3c4c0SChristoph Lameter 
12160aedadf9SChristoph Lameter 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
12170aedadf9SChristoph Lameter 
12180aedadf9SChristoph Lameter 		err = migrate_prep();
12190aedadf9SChristoph Lameter 		if (err)
1220b05ca738SKOSAKI Motohiro 			goto mpol_out;
12210aedadf9SChristoph Lameter 	}
12224bfc4495SKAMEZAWA Hiroyuki 	{
12234bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
12244bfc4495SKAMEZAWA Hiroyuki 		if (scratch) {
12256ce3c4c0SChristoph Lameter 			down_write(&mm->mmap_sem);
122658568d2aSMiao Xie 			task_lock(current);
12274bfc4495SKAMEZAWA Hiroyuki 			err = mpol_set_nodemask(new, nmask, scratch);
122858568d2aSMiao Xie 			task_unlock(current);
12294bfc4495SKAMEZAWA Hiroyuki 			if (err)
123058568d2aSMiao Xie 				up_write(&mm->mmap_sem);
12314bfc4495SKAMEZAWA Hiroyuki 		} else
12324bfc4495SKAMEZAWA Hiroyuki 			err = -ENOMEM;
12334bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
12344bfc4495SKAMEZAWA Hiroyuki 	}
1235b05ca738SKOSAKI Motohiro 	if (err)
1236b05ca738SKOSAKI Motohiro 		goto mpol_out;
1237b05ca738SKOSAKI Motohiro 
1238d05f0cdcSHugh Dickins 	err = queue_pages_range(mm, start, end, nmask,
12396ce3c4c0SChristoph Lameter 			  flags | MPOL_MF_INVERT, &pagelist);
1240d05f0cdcSHugh Dickins 	if (!err)
12419d8cebd4SKOSAKI Motohiro 		err = mbind_range(mm, start, end, new);
12427e2ab150SChristoph Lameter 
1243b24f53a0SLee Schermerhorn 	if (!err) {
1244b24f53a0SLee Schermerhorn 		int nr_failed = 0;
1245b24f53a0SLee Schermerhorn 
1246cf608ac1SMinchan Kim 		if (!list_empty(&pagelist)) {
1247b24f53a0SLee Schermerhorn 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1248d05f0cdcSHugh Dickins 			nr_failed = migrate_pages(&pagelist, new_page, NULL,
1249d05f0cdcSHugh Dickins 				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1250cf608ac1SMinchan Kim 			if (nr_failed)
125174060e4dSNaoya Horiguchi 				putback_movable_pages(&pagelist);
1252cf608ac1SMinchan Kim 		}
12536ce3c4c0SChristoph Lameter 
1254b24f53a0SLee Schermerhorn 		if (nr_failed && (flags & MPOL_MF_STRICT))
12556ce3c4c0SChristoph Lameter 			err = -EIO;
1256ab8a3e14SKOSAKI Motohiro 	} else
1257b0e5fd73SJoonsoo Kim 		putback_movable_pages(&pagelist);
1258b20a3503SChristoph Lameter 
12596ce3c4c0SChristoph Lameter 	up_write(&mm->mmap_sem);
1260b05ca738SKOSAKI Motohiro  mpol_out:
1261f0be3d32SLee Schermerhorn 	mpol_put(new);
12626ce3c4c0SChristoph Lameter 	return err;
12636ce3c4c0SChristoph Lameter }
12646ce3c4c0SChristoph Lameter 
126539743889SChristoph Lameter /*
12668bccd85fSChristoph Lameter  * User space interface with variable sized bitmaps for nodelists.
12678bccd85fSChristoph Lameter  */
12688bccd85fSChristoph Lameter 
12698bccd85fSChristoph Lameter /* Copy a node mask from user space. */
127039743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
12718bccd85fSChristoph Lameter 		     unsigned long maxnode)
12728bccd85fSChristoph Lameter {
12738bccd85fSChristoph Lameter 	unsigned long k;
12748bccd85fSChristoph Lameter 	unsigned long nlongs;
12758bccd85fSChristoph Lameter 	unsigned long endmask;
12768bccd85fSChristoph Lameter 
12778bccd85fSChristoph Lameter 	--maxnode;
12788bccd85fSChristoph Lameter 	nodes_clear(*nodes);
12798bccd85fSChristoph Lameter 	if (maxnode == 0 || !nmask)
12808bccd85fSChristoph Lameter 		return 0;
1281a9c930baSAndi Kleen 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1282636f13c1SChris Wright 		return -EINVAL;
12838bccd85fSChristoph Lameter 
12848bccd85fSChristoph Lameter 	nlongs = BITS_TO_LONGS(maxnode);
12858bccd85fSChristoph Lameter 	if ((maxnode % BITS_PER_LONG) == 0)
12868bccd85fSChristoph Lameter 		endmask = ~0UL;
12878bccd85fSChristoph Lameter 	else
12888bccd85fSChristoph Lameter 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
12898bccd85fSChristoph Lameter 
12908bccd85fSChristoph Lameter 	/* When the user specified more nodes than supported just check
12918bccd85fSChristoph Lameter 	   if the non supported part is all zero. */
12928bccd85fSChristoph Lameter 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
12938bccd85fSChristoph Lameter 		if (nlongs > PAGE_SIZE/sizeof(long))
12948bccd85fSChristoph Lameter 			return -EINVAL;
12958bccd85fSChristoph Lameter 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
12968bccd85fSChristoph Lameter 			unsigned long t;
12978bccd85fSChristoph Lameter 			if (get_user(t, nmask + k))
12988bccd85fSChristoph Lameter 				return -EFAULT;
12998bccd85fSChristoph Lameter 			if (k == nlongs - 1) {
13008bccd85fSChristoph Lameter 				if (t & endmask)
13018bccd85fSChristoph Lameter 					return -EINVAL;
13028bccd85fSChristoph Lameter 			} else if (t)
13038bccd85fSChristoph Lameter 				return -EINVAL;
13048bccd85fSChristoph Lameter 		}
13058bccd85fSChristoph Lameter 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
13068bccd85fSChristoph Lameter 		endmask = ~0UL;
13078bccd85fSChristoph Lameter 	}
13088bccd85fSChristoph Lameter 
13098bccd85fSChristoph Lameter 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
13108bccd85fSChristoph Lameter 		return -EFAULT;
13118bccd85fSChristoph Lameter 	nodes_addr(*nodes)[nlongs-1] &= endmask;
13128bccd85fSChristoph Lameter 	return 0;
13138bccd85fSChristoph Lameter }
13148bccd85fSChristoph Lameter 
13158bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */
13168bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
13178bccd85fSChristoph Lameter 			      nodemask_t *nodes)
13188bccd85fSChristoph Lameter {
13198bccd85fSChristoph Lameter 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
13208bccd85fSChristoph Lameter 	const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
13218bccd85fSChristoph Lameter 
13228bccd85fSChristoph Lameter 	if (copy > nbytes) {
13238bccd85fSChristoph Lameter 		if (copy > PAGE_SIZE)
13248bccd85fSChristoph Lameter 			return -EINVAL;
13258bccd85fSChristoph Lameter 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
13268bccd85fSChristoph Lameter 			return -EFAULT;
13278bccd85fSChristoph Lameter 		copy = nbytes;
13288bccd85fSChristoph Lameter 	}
13298bccd85fSChristoph Lameter 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
13308bccd85fSChristoph Lameter }
13318bccd85fSChristoph Lameter 
1332938bb9f5SHeiko Carstens SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1333f7f28ca9SRasmus Villemoes 		unsigned long, mode, const unsigned long __user *, nmask,
1334938bb9f5SHeiko Carstens 		unsigned long, maxnode, unsigned, flags)
13358bccd85fSChristoph Lameter {
13368bccd85fSChristoph Lameter 	nodemask_t nodes;
13378bccd85fSChristoph Lameter 	int err;
1338028fec41SDavid Rientjes 	unsigned short mode_flags;
13398bccd85fSChristoph Lameter 
1340028fec41SDavid Rientjes 	mode_flags = mode & MPOL_MODE_FLAGS;
1341028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1342a3b51e01SDavid Rientjes 	if (mode >= MPOL_MAX)
1343a3b51e01SDavid Rientjes 		return -EINVAL;
13444c50bc01SDavid Rientjes 	if ((mode_flags & MPOL_F_STATIC_NODES) &&
13454c50bc01SDavid Rientjes 	    (mode_flags & MPOL_F_RELATIVE_NODES))
13464c50bc01SDavid Rientjes 		return -EINVAL;
13478bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
13488bccd85fSChristoph Lameter 	if (err)
13498bccd85fSChristoph Lameter 		return err;
1350028fec41SDavid Rientjes 	return do_mbind(start, len, mode, mode_flags, &nodes, flags);
13518bccd85fSChristoph Lameter }
13528bccd85fSChristoph Lameter 
13538bccd85fSChristoph Lameter /* Set the process memory policy */
135423c8902dSRasmus Villemoes SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1355938bb9f5SHeiko Carstens 		unsigned long, maxnode)
13568bccd85fSChristoph Lameter {
13578bccd85fSChristoph Lameter 	int err;
13588bccd85fSChristoph Lameter 	nodemask_t nodes;
1359028fec41SDavid Rientjes 	unsigned short flags;
13608bccd85fSChristoph Lameter 
1361028fec41SDavid Rientjes 	flags = mode & MPOL_MODE_FLAGS;
1362028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1363028fec41SDavid Rientjes 	if ((unsigned int)mode >= MPOL_MAX)
13648bccd85fSChristoph Lameter 		return -EINVAL;
13654c50bc01SDavid Rientjes 	if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
13664c50bc01SDavid Rientjes 		return -EINVAL;
13678bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
13688bccd85fSChristoph Lameter 	if (err)
13698bccd85fSChristoph Lameter 		return err;
1370028fec41SDavid Rientjes 	return do_set_mempolicy(mode, flags, &nodes);
13718bccd85fSChristoph Lameter }
13728bccd85fSChristoph Lameter 
1373938bb9f5SHeiko Carstens SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1374938bb9f5SHeiko Carstens 		const unsigned long __user *, old_nodes,
1375938bb9f5SHeiko Carstens 		const unsigned long __user *, new_nodes)
137639743889SChristoph Lameter {
1377c69e8d9cSDavid Howells 	const struct cred *cred = current_cred(), *tcred;
1378596d7cfaSKOSAKI Motohiro 	struct mm_struct *mm = NULL;
137939743889SChristoph Lameter 	struct task_struct *task;
138039743889SChristoph Lameter 	nodemask_t task_nodes;
138139743889SChristoph Lameter 	int err;
1382596d7cfaSKOSAKI Motohiro 	nodemask_t *old;
1383596d7cfaSKOSAKI Motohiro 	nodemask_t *new;
1384596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH(scratch);
138539743889SChristoph Lameter 
1386596d7cfaSKOSAKI Motohiro 	if (!scratch)
1387596d7cfaSKOSAKI Motohiro 		return -ENOMEM;
138839743889SChristoph Lameter 
1389596d7cfaSKOSAKI Motohiro 	old = &scratch->mask1;
1390596d7cfaSKOSAKI Motohiro 	new = &scratch->mask2;
1391596d7cfaSKOSAKI Motohiro 
1392596d7cfaSKOSAKI Motohiro 	err = get_nodes(old, old_nodes, maxnode);
139339743889SChristoph Lameter 	if (err)
1394596d7cfaSKOSAKI Motohiro 		goto out;
1395596d7cfaSKOSAKI Motohiro 
1396596d7cfaSKOSAKI Motohiro 	err = get_nodes(new, new_nodes, maxnode);
1397596d7cfaSKOSAKI Motohiro 	if (err)
1398596d7cfaSKOSAKI Motohiro 		goto out;
139939743889SChristoph Lameter 
140039743889SChristoph Lameter 	/* Find the mm_struct */
140155cfaa3cSZeng Zhaoming 	rcu_read_lock();
1402228ebcbeSPavel Emelyanov 	task = pid ? find_task_by_vpid(pid) : current;
140339743889SChristoph Lameter 	if (!task) {
140455cfaa3cSZeng Zhaoming 		rcu_read_unlock();
1405596d7cfaSKOSAKI Motohiro 		err = -ESRCH;
1406596d7cfaSKOSAKI Motohiro 		goto out;
140739743889SChristoph Lameter 	}
14083268c63eSChristoph Lameter 	get_task_struct(task);
140939743889SChristoph Lameter 
1410596d7cfaSKOSAKI Motohiro 	err = -EINVAL;
141139743889SChristoph Lameter 
141239743889SChristoph Lameter 	/*
141339743889SChristoph Lameter 	 * Check if this process has the right to modify the specified
141439743889SChristoph Lameter 	 * process. The right exists if the process has administrative
14157f927fccSAlexey Dobriyan 	 * capabilities, superuser privileges or the same
141639743889SChristoph Lameter 	 * userid as the target process.
141739743889SChristoph Lameter 	 */
1418c69e8d9cSDavid Howells 	tcred = __task_cred(task);
1419b38a86ebSEric W. Biederman 	if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1420b38a86ebSEric W. Biederman 	    !uid_eq(cred->uid,  tcred->suid) && !uid_eq(cred->uid,  tcred->uid) &&
142174c00241SChristoph Lameter 	    !capable(CAP_SYS_NICE)) {
1422c69e8d9cSDavid Howells 		rcu_read_unlock();
142339743889SChristoph Lameter 		err = -EPERM;
14243268c63eSChristoph Lameter 		goto out_put;
142539743889SChristoph Lameter 	}
1426c69e8d9cSDavid Howells 	rcu_read_unlock();
142739743889SChristoph Lameter 
142839743889SChristoph Lameter 	task_nodes = cpuset_mems_allowed(task);
142939743889SChristoph Lameter 	/* Is the user allowed to access the target nodes? */
1430596d7cfaSKOSAKI Motohiro 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
143139743889SChristoph Lameter 		err = -EPERM;
14323268c63eSChristoph Lameter 		goto out_put;
143339743889SChristoph Lameter 	}
143439743889SChristoph Lameter 
143501f13bd6SLai Jiangshan 	if (!nodes_subset(*new, node_states[N_MEMORY])) {
14363b42d28bSChristoph Lameter 		err = -EINVAL;
14373268c63eSChristoph Lameter 		goto out_put;
14383b42d28bSChristoph Lameter 	}
14393b42d28bSChristoph Lameter 
144086c3a764SDavid Quigley 	err = security_task_movememory(task);
144186c3a764SDavid Quigley 	if (err)
14423268c63eSChristoph Lameter 		goto out_put;
144386c3a764SDavid Quigley 
14443268c63eSChristoph Lameter 	mm = get_task_mm(task);
14453268c63eSChristoph Lameter 	put_task_struct(task);
1446f2a9ef88SSasha Levin 
1447f2a9ef88SSasha Levin 	if (!mm) {
1448f2a9ef88SSasha Levin 		err = -EINVAL;
1449f2a9ef88SSasha Levin 		goto out;
1450f2a9ef88SSasha Levin 	}
1451f2a9ef88SSasha Levin 
1452596d7cfaSKOSAKI Motohiro 	err = do_migrate_pages(mm, old, new,
145374c00241SChristoph Lameter 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
14543268c63eSChristoph Lameter 
145539743889SChristoph Lameter 	mmput(mm);
14563268c63eSChristoph Lameter out:
1457596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH_FREE(scratch);
1458596d7cfaSKOSAKI Motohiro 
145939743889SChristoph Lameter 	return err;
14603268c63eSChristoph Lameter 
14613268c63eSChristoph Lameter out_put:
14623268c63eSChristoph Lameter 	put_task_struct(task);
14633268c63eSChristoph Lameter 	goto out;
14643268c63eSChristoph Lameter 
146539743889SChristoph Lameter }
146639743889SChristoph Lameter 
146739743889SChristoph Lameter 
14688bccd85fSChristoph Lameter /* Retrieve NUMA policy */
1469938bb9f5SHeiko Carstens SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1470938bb9f5SHeiko Carstens 		unsigned long __user *, nmask, unsigned long, maxnode,
1471938bb9f5SHeiko Carstens 		unsigned long, addr, unsigned long, flags)
14728bccd85fSChristoph Lameter {
1473dbcb0f19SAdrian Bunk 	int err;
1474dbcb0f19SAdrian Bunk 	int uninitialized_var(pval);
14758bccd85fSChristoph Lameter 	nodemask_t nodes;
14768bccd85fSChristoph Lameter 
14778bccd85fSChristoph Lameter 	if (nmask != NULL && maxnode < MAX_NUMNODES)
14788bccd85fSChristoph Lameter 		return -EINVAL;
14798bccd85fSChristoph Lameter 
14808bccd85fSChristoph Lameter 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
14818bccd85fSChristoph Lameter 
14828bccd85fSChristoph Lameter 	if (err)
14838bccd85fSChristoph Lameter 		return err;
14848bccd85fSChristoph Lameter 
14858bccd85fSChristoph Lameter 	if (policy && put_user(pval, policy))
14868bccd85fSChristoph Lameter 		return -EFAULT;
14878bccd85fSChristoph Lameter 
14888bccd85fSChristoph Lameter 	if (nmask)
14898bccd85fSChristoph Lameter 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
14908bccd85fSChristoph Lameter 
14918bccd85fSChristoph Lameter 	return err;
14928bccd85fSChristoph Lameter }
14938bccd85fSChristoph Lameter 
14941da177e4SLinus Torvalds #ifdef CONFIG_COMPAT
14951da177e4SLinus Torvalds 
1496c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1497c93e0f6cSHeiko Carstens 		       compat_ulong_t __user *, nmask,
1498c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode,
1499c93e0f6cSHeiko Carstens 		       compat_ulong_t, addr, compat_ulong_t, flags)
15001da177e4SLinus Torvalds {
15011da177e4SLinus Torvalds 	long err;
15021da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
15031da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
15041da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
15051da177e4SLinus Torvalds 
15061da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
15071da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
15081da177e4SLinus Torvalds 
15091da177e4SLinus Torvalds 	if (nmask)
15101da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
15111da177e4SLinus Torvalds 
15121da177e4SLinus Torvalds 	err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
15131da177e4SLinus Torvalds 
15141da177e4SLinus Torvalds 	if (!err && nmask) {
15152bbff6c7SKAMEZAWA Hiroyuki 		unsigned long copy_size;
15162bbff6c7SKAMEZAWA Hiroyuki 		copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
15172bbff6c7SKAMEZAWA Hiroyuki 		err = copy_from_user(bm, nm, copy_size);
15181da177e4SLinus Torvalds 		/* ensure entire bitmap is zeroed */
15191da177e4SLinus Torvalds 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
15201da177e4SLinus Torvalds 		err |= compat_put_bitmap(nmask, bm, nr_bits);
15211da177e4SLinus Torvalds 	}
15221da177e4SLinus Torvalds 
15231da177e4SLinus Torvalds 	return err;
15241da177e4SLinus Torvalds }
15251da177e4SLinus Torvalds 
1526c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1527c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode)
15281da177e4SLinus Torvalds {
15291da177e4SLinus Torvalds 	long err = 0;
15301da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
15311da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
15321da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
15331da177e4SLinus Torvalds 
15341da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
15351da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
15361da177e4SLinus Torvalds 
15371da177e4SLinus Torvalds 	if (nmask) {
15381da177e4SLinus Torvalds 		err = compat_get_bitmap(bm, nmask, nr_bits);
15391da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
15401da177e4SLinus Torvalds 		err |= copy_to_user(nm, bm, alloc_size);
15411da177e4SLinus Torvalds 	}
15421da177e4SLinus Torvalds 
15431da177e4SLinus Torvalds 	if (err)
15441da177e4SLinus Torvalds 		return -EFAULT;
15451da177e4SLinus Torvalds 
15461da177e4SLinus Torvalds 	return sys_set_mempolicy(mode, nm, nr_bits+1);
15471da177e4SLinus Torvalds }
15481da177e4SLinus Torvalds 
1549c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1550c93e0f6cSHeiko Carstens 		       compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1551c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode, compat_ulong_t, flags)
15521da177e4SLinus Torvalds {
15531da177e4SLinus Torvalds 	long err = 0;
15541da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
15551da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
1556dfcd3c0dSAndi Kleen 	nodemask_t bm;
15571da177e4SLinus Torvalds 
15581da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
15591da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
15601da177e4SLinus Torvalds 
15611da177e4SLinus Torvalds 	if (nmask) {
1562dfcd3c0dSAndi Kleen 		err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
15631da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
1564dfcd3c0dSAndi Kleen 		err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
15651da177e4SLinus Torvalds 	}
15661da177e4SLinus Torvalds 
15671da177e4SLinus Torvalds 	if (err)
15681da177e4SLinus Torvalds 		return -EFAULT;
15691da177e4SLinus Torvalds 
15701da177e4SLinus Torvalds 	return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
15711da177e4SLinus Torvalds }
15721da177e4SLinus Torvalds 
15731da177e4SLinus Torvalds #endif
15741da177e4SLinus Torvalds 
157574d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
157674d2c3a0SOleg Nesterov 						unsigned long addr)
15771da177e4SLinus Torvalds {
15788d90274bSOleg Nesterov 	struct mempolicy *pol = NULL;
15791da177e4SLinus Torvalds 
15801da177e4SLinus Torvalds 	if (vma) {
1581480eccf9SLee Schermerhorn 		if (vma->vm_ops && vma->vm_ops->get_policy) {
15828d90274bSOleg Nesterov 			pol = vma->vm_ops->get_policy(vma, addr);
158300442ad0SMel Gorman 		} else if (vma->vm_policy) {
15841da177e4SLinus Torvalds 			pol = vma->vm_policy;
158500442ad0SMel Gorman 
158600442ad0SMel Gorman 			/*
158700442ad0SMel Gorman 			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
158800442ad0SMel Gorman 			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
158900442ad0SMel Gorman 			 * count on these policies which will be dropped by
159000442ad0SMel Gorman 			 * mpol_cond_put() later
159100442ad0SMel Gorman 			 */
159200442ad0SMel Gorman 			if (mpol_needs_cond_ref(pol))
159300442ad0SMel Gorman 				mpol_get(pol);
159400442ad0SMel Gorman 		}
15951da177e4SLinus Torvalds 	}
1596f15ca78eSOleg Nesterov 
159774d2c3a0SOleg Nesterov 	return pol;
159874d2c3a0SOleg Nesterov }
159974d2c3a0SOleg Nesterov 
160074d2c3a0SOleg Nesterov /*
1601dd6eecb9SOleg Nesterov  * get_vma_policy(@vma, @addr)
160274d2c3a0SOleg Nesterov  * @vma: virtual memory area whose policy is sought
160374d2c3a0SOleg Nesterov  * @addr: address in @vma for shared policy lookup
160474d2c3a0SOleg Nesterov  *
160574d2c3a0SOleg Nesterov  * Returns effective policy for a VMA at specified address.
1606dd6eecb9SOleg Nesterov  * Falls back to current->mempolicy or system default policy, as necessary.
160774d2c3a0SOleg Nesterov  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
160874d2c3a0SOleg Nesterov  * count--added by the get_policy() vm_op, as appropriate--to protect against
160974d2c3a0SOleg Nesterov  * freeing by another task.  It is the caller's responsibility to free the
161074d2c3a0SOleg Nesterov  * extra reference for shared policies.
161174d2c3a0SOleg Nesterov  */
1612dd6eecb9SOleg Nesterov static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1613dd6eecb9SOleg Nesterov 						unsigned long addr)
161474d2c3a0SOleg Nesterov {
161574d2c3a0SOleg Nesterov 	struct mempolicy *pol = __get_vma_policy(vma, addr);
161674d2c3a0SOleg Nesterov 
16178d90274bSOleg Nesterov 	if (!pol)
1618dd6eecb9SOleg Nesterov 		pol = get_task_policy(current);
16198d90274bSOleg Nesterov 
16201da177e4SLinus Torvalds 	return pol;
16211da177e4SLinus Torvalds }
16221da177e4SLinus Torvalds 
16236b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma)
1624fc314724SMel Gorman {
16256b6482bbSOleg Nesterov 	struct mempolicy *pol;
1626f15ca78eSOleg Nesterov 
1627fc314724SMel Gorman 	if (vma->vm_ops && vma->vm_ops->get_policy) {
1628fc314724SMel Gorman 		bool ret = false;
1629fc314724SMel Gorman 
1630fc314724SMel Gorman 		pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1631fc314724SMel Gorman 		if (pol && (pol->flags & MPOL_F_MOF))
1632fc314724SMel Gorman 			ret = true;
1633fc314724SMel Gorman 		mpol_cond_put(pol);
1634fc314724SMel Gorman 
1635fc314724SMel Gorman 		return ret;
16368d90274bSOleg Nesterov 	}
16378d90274bSOleg Nesterov 
1638fc314724SMel Gorman 	pol = vma->vm_policy;
16398d90274bSOleg Nesterov 	if (!pol)
16406b6482bbSOleg Nesterov 		pol = get_task_policy(current);
1641fc314724SMel Gorman 
1642fc314724SMel Gorman 	return pol->flags & MPOL_F_MOF;
1643fc314724SMel Gorman }
1644fc314724SMel Gorman 
1645d3eb1570SLai Jiangshan static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1646d3eb1570SLai Jiangshan {
1647d3eb1570SLai Jiangshan 	enum zone_type dynamic_policy_zone = policy_zone;
1648d3eb1570SLai Jiangshan 
1649d3eb1570SLai Jiangshan 	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1650d3eb1570SLai Jiangshan 
1651d3eb1570SLai Jiangshan 	/*
1652d3eb1570SLai Jiangshan 	 * if policy->v.nodes has movable memory only,
1653d3eb1570SLai Jiangshan 	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1654d3eb1570SLai Jiangshan 	 *
1655d3eb1570SLai Jiangshan 	 * policy->v.nodes is intersect with node_states[N_MEMORY].
1656d3eb1570SLai Jiangshan 	 * so if the following test faile, it implies
1657d3eb1570SLai Jiangshan 	 * policy->v.nodes has movable memory only.
1658d3eb1570SLai Jiangshan 	 */
1659d3eb1570SLai Jiangshan 	if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1660d3eb1570SLai Jiangshan 		dynamic_policy_zone = ZONE_MOVABLE;
1661d3eb1570SLai Jiangshan 
1662d3eb1570SLai Jiangshan 	return zone >= dynamic_policy_zone;
1663d3eb1570SLai Jiangshan }
1664d3eb1570SLai Jiangshan 
166552cd3b07SLee Schermerhorn /*
166652cd3b07SLee Schermerhorn  * Return a nodemask representing a mempolicy for filtering nodes for
166752cd3b07SLee Schermerhorn  * page allocation
166852cd3b07SLee Schermerhorn  */
166952cd3b07SLee Schermerhorn static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
167019770b32SMel Gorman {
167119770b32SMel Gorman 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
167245c4745aSLee Schermerhorn 	if (unlikely(policy->mode == MPOL_BIND) &&
1673d3eb1570SLai Jiangshan 			apply_policy_zone(policy, gfp_zone(gfp)) &&
167419770b32SMel Gorman 			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
167519770b32SMel Gorman 		return &policy->v.nodes;
167619770b32SMel Gorman 
167719770b32SMel Gorman 	return NULL;
167819770b32SMel Gorman }
167919770b32SMel Gorman 
168052cd3b07SLee Schermerhorn /* Return a zonelist indicated by gfp for node representing a mempolicy */
16812f5f9486SAndi Kleen static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
16822f5f9486SAndi Kleen 	int nd)
16831da177e4SLinus Torvalds {
168445c4745aSLee Schermerhorn 	switch (policy->mode) {
16851da177e4SLinus Torvalds 	case MPOL_PREFERRED:
1686fc36b8d3SLee Schermerhorn 		if (!(policy->flags & MPOL_F_LOCAL))
16871da177e4SLinus Torvalds 			nd = policy->v.preferred_node;
16881da177e4SLinus Torvalds 		break;
16891da177e4SLinus Torvalds 	case MPOL_BIND:
169019770b32SMel Gorman 		/*
169152cd3b07SLee Schermerhorn 		 * Normally, MPOL_BIND allocations are node-local within the
169252cd3b07SLee Schermerhorn 		 * allowed nodemask.  However, if __GFP_THISNODE is set and the
16936eb27e1fSBob Liu 		 * current node isn't part of the mask, we use the zonelist for
169452cd3b07SLee Schermerhorn 		 * the first node in the mask instead.
169519770b32SMel Gorman 		 */
169619770b32SMel Gorman 		if (unlikely(gfp & __GFP_THISNODE) &&
169719770b32SMel Gorman 				unlikely(!node_isset(nd, policy->v.nodes)))
169819770b32SMel Gorman 			nd = first_node(policy->v.nodes);
169919770b32SMel Gorman 		break;
17001da177e4SLinus Torvalds 	default:
17011da177e4SLinus Torvalds 		BUG();
17021da177e4SLinus Torvalds 	}
17030e88460dSMel Gorman 	return node_zonelist(nd, gfp);
17041da177e4SLinus Torvalds }
17051da177e4SLinus Torvalds 
17061da177e4SLinus Torvalds /* Do dynamic interleaving for a process */
17071da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy)
17081da177e4SLinus Torvalds {
17091da177e4SLinus Torvalds 	unsigned nid, next;
17101da177e4SLinus Torvalds 	struct task_struct *me = current;
17111da177e4SLinus Torvalds 
17121da177e4SLinus Torvalds 	nid = me->il_next;
1713dfcd3c0dSAndi Kleen 	next = next_node(nid, policy->v.nodes);
17141da177e4SLinus Torvalds 	if (next >= MAX_NUMNODES)
1715dfcd3c0dSAndi Kleen 		next = first_node(policy->v.nodes);
1716f5b087b5SDavid Rientjes 	if (next < MAX_NUMNODES)
17171da177e4SLinus Torvalds 		me->il_next = next;
17181da177e4SLinus Torvalds 	return nid;
17191da177e4SLinus Torvalds }
17201da177e4SLinus Torvalds 
1721dc85da15SChristoph Lameter /*
1722dc85da15SChristoph Lameter  * Depending on the memory policy provide a node from which to allocate the
1723dc85da15SChristoph Lameter  * next slab entry.
1724dc85da15SChristoph Lameter  */
17252a389610SDavid Rientjes unsigned int mempolicy_slab_node(void)
1726dc85da15SChristoph Lameter {
1727e7b691b0SAndi Kleen 	struct mempolicy *policy;
17282a389610SDavid Rientjes 	int node = numa_mem_id();
1729e7b691b0SAndi Kleen 
1730e7b691b0SAndi Kleen 	if (in_interrupt())
17312a389610SDavid Rientjes 		return node;
1732e7b691b0SAndi Kleen 
1733e7b691b0SAndi Kleen 	policy = current->mempolicy;
1734fc36b8d3SLee Schermerhorn 	if (!policy || policy->flags & MPOL_F_LOCAL)
17352a389610SDavid Rientjes 		return node;
1736765c4507SChristoph Lameter 
1737bea904d5SLee Schermerhorn 	switch (policy->mode) {
1738bea904d5SLee Schermerhorn 	case MPOL_PREFERRED:
1739fc36b8d3SLee Schermerhorn 		/*
1740fc36b8d3SLee Schermerhorn 		 * handled MPOL_F_LOCAL above
1741fc36b8d3SLee Schermerhorn 		 */
1742bea904d5SLee Schermerhorn 		return policy->v.preferred_node;
1743bea904d5SLee Schermerhorn 
1744dc85da15SChristoph Lameter 	case MPOL_INTERLEAVE:
1745dc85da15SChristoph Lameter 		return interleave_nodes(policy);
1746dc85da15SChristoph Lameter 
1747dd1a239fSMel Gorman 	case MPOL_BIND: {
1748dc85da15SChristoph Lameter 		/*
1749dc85da15SChristoph Lameter 		 * Follow bind policy behavior and start allocation at the
1750dc85da15SChristoph Lameter 		 * first node.
1751dc85da15SChristoph Lameter 		 */
175219770b32SMel Gorman 		struct zonelist *zonelist;
175319770b32SMel Gorman 		struct zone *zone;
175419770b32SMel Gorman 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
17552a389610SDavid Rientjes 		zonelist = &NODE_DATA(node)->node_zonelists[0];
175619770b32SMel Gorman 		(void)first_zones_zonelist(zonelist, highest_zoneidx,
175719770b32SMel Gorman 							&policy->v.nodes,
175819770b32SMel Gorman 							&zone);
17592a389610SDavid Rientjes 		return zone ? zone->node : node;
1760dd1a239fSMel Gorman 	}
1761dc85da15SChristoph Lameter 
1762dc85da15SChristoph Lameter 	default:
1763bea904d5SLee Schermerhorn 		BUG();
1764dc85da15SChristoph Lameter 	}
1765dc85da15SChristoph Lameter }
1766dc85da15SChristoph Lameter 
17671da177e4SLinus Torvalds /* Do static interleaving for a VMA with known offset. */
17681da177e4SLinus Torvalds static unsigned offset_il_node(struct mempolicy *pol,
17691da177e4SLinus Torvalds 		struct vm_area_struct *vma, unsigned long off)
17701da177e4SLinus Torvalds {
1771dfcd3c0dSAndi Kleen 	unsigned nnodes = nodes_weight(pol->v.nodes);
1772f5b087b5SDavid Rientjes 	unsigned target;
17731da177e4SLinus Torvalds 	int c;
1774b76ac7e7SJianguo Wu 	int nid = NUMA_NO_NODE;
17751da177e4SLinus Torvalds 
1776f5b087b5SDavid Rientjes 	if (!nnodes)
1777f5b087b5SDavid Rientjes 		return numa_node_id();
1778f5b087b5SDavid Rientjes 	target = (unsigned int)off % nnodes;
17791da177e4SLinus Torvalds 	c = 0;
17801da177e4SLinus Torvalds 	do {
1781dfcd3c0dSAndi Kleen 		nid = next_node(nid, pol->v.nodes);
17821da177e4SLinus Torvalds 		c++;
17831da177e4SLinus Torvalds 	} while (c <= target);
17841da177e4SLinus Torvalds 	return nid;
17851da177e4SLinus Torvalds }
17861da177e4SLinus Torvalds 
17875da7ca86SChristoph Lameter /* Determine a node number for interleave */
17885da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol,
17895da7ca86SChristoph Lameter 		 struct vm_area_struct *vma, unsigned long addr, int shift)
17905da7ca86SChristoph Lameter {
17915da7ca86SChristoph Lameter 	if (vma) {
17925da7ca86SChristoph Lameter 		unsigned long off;
17935da7ca86SChristoph Lameter 
17943b98b087SNishanth Aravamudan 		/*
17953b98b087SNishanth Aravamudan 		 * for small pages, there is no difference between
17963b98b087SNishanth Aravamudan 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
17973b98b087SNishanth Aravamudan 		 * for huge pages, since vm_pgoff is in units of small
17983b98b087SNishanth Aravamudan 		 * pages, we need to shift off the always 0 bits to get
17993b98b087SNishanth Aravamudan 		 * a useful offset.
18003b98b087SNishanth Aravamudan 		 */
18013b98b087SNishanth Aravamudan 		BUG_ON(shift < PAGE_SHIFT);
18023b98b087SNishanth Aravamudan 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
18035da7ca86SChristoph Lameter 		off += (addr - vma->vm_start) >> shift;
18045da7ca86SChristoph Lameter 		return offset_il_node(pol, vma, off);
18055da7ca86SChristoph Lameter 	} else
18065da7ca86SChristoph Lameter 		return interleave_nodes(pol);
18075da7ca86SChristoph Lameter }
18085da7ca86SChristoph Lameter 
1809778d3b0fSMichal Hocko /*
1810778d3b0fSMichal Hocko  * Return the bit number of a random bit set in the nodemask.
1811b76ac7e7SJianguo Wu  * (returns NUMA_NO_NODE if nodemask is empty)
1812778d3b0fSMichal Hocko  */
1813778d3b0fSMichal Hocko int node_random(const nodemask_t *maskp)
1814778d3b0fSMichal Hocko {
1815b76ac7e7SJianguo Wu 	int w, bit = NUMA_NO_NODE;
1816778d3b0fSMichal Hocko 
1817778d3b0fSMichal Hocko 	w = nodes_weight(*maskp);
1818778d3b0fSMichal Hocko 	if (w)
1819778d3b0fSMichal Hocko 		bit = bitmap_ord_to_pos(maskp->bits,
1820778d3b0fSMichal Hocko 			get_random_int() % w, MAX_NUMNODES);
1821778d3b0fSMichal Hocko 	return bit;
1822778d3b0fSMichal Hocko }
1823778d3b0fSMichal Hocko 
182400ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS
1825480eccf9SLee Schermerhorn /*
1826480eccf9SLee Schermerhorn  * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1827b46e14acSFabian Frederick  * @vma: virtual memory area whose policy is sought
1828b46e14acSFabian Frederick  * @addr: address in @vma for shared policy lookup and interleave policy
1829b46e14acSFabian Frederick  * @gfp_flags: for requested zone
1830b46e14acSFabian Frederick  * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1831b46e14acSFabian Frederick  * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
1832480eccf9SLee Schermerhorn  *
183352cd3b07SLee Schermerhorn  * Returns a zonelist suitable for a huge page allocation and a pointer
183452cd3b07SLee Schermerhorn  * to the struct mempolicy for conditional unref after allocation.
183552cd3b07SLee Schermerhorn  * If the effective policy is 'BIND, returns a pointer to the mempolicy's
183652cd3b07SLee Schermerhorn  * @nodemask for filtering the zonelist.
1837c0ff7453SMiao Xie  *
1838d26914d1SMel Gorman  * Must be protected by read_mems_allowed_begin()
1839480eccf9SLee Schermerhorn  */
1840396faf03SMel Gorman struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
184119770b32SMel Gorman 				gfp_t gfp_flags, struct mempolicy **mpol,
184219770b32SMel Gorman 				nodemask_t **nodemask)
18435da7ca86SChristoph Lameter {
1844480eccf9SLee Schermerhorn 	struct zonelist *zl;
18455da7ca86SChristoph Lameter 
1846dd6eecb9SOleg Nesterov 	*mpol = get_vma_policy(vma, addr);
184719770b32SMel Gorman 	*nodemask = NULL;	/* assume !MPOL_BIND */
18485da7ca86SChristoph Lameter 
184952cd3b07SLee Schermerhorn 	if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
185052cd3b07SLee Schermerhorn 		zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1851a5516438SAndi Kleen 				huge_page_shift(hstate_vma(vma))), gfp_flags);
185252cd3b07SLee Schermerhorn 	} else {
18532f5f9486SAndi Kleen 		zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
185452cd3b07SLee Schermerhorn 		if ((*mpol)->mode == MPOL_BIND)
185552cd3b07SLee Schermerhorn 			*nodemask = &(*mpol)->v.nodes;
1856480eccf9SLee Schermerhorn 	}
1857480eccf9SLee Schermerhorn 	return zl;
18585da7ca86SChristoph Lameter }
185906808b08SLee Schermerhorn 
186006808b08SLee Schermerhorn /*
186106808b08SLee Schermerhorn  * init_nodemask_of_mempolicy
186206808b08SLee Schermerhorn  *
186306808b08SLee Schermerhorn  * If the current task's mempolicy is "default" [NULL], return 'false'
186406808b08SLee Schermerhorn  * to indicate default policy.  Otherwise, extract the policy nodemask
186506808b08SLee Schermerhorn  * for 'bind' or 'interleave' policy into the argument nodemask, or
186606808b08SLee Schermerhorn  * initialize the argument nodemask to contain the single node for
186706808b08SLee Schermerhorn  * 'preferred' or 'local' policy and return 'true' to indicate presence
186806808b08SLee Schermerhorn  * of non-default mempolicy.
186906808b08SLee Schermerhorn  *
187006808b08SLee Schermerhorn  * We don't bother with reference counting the mempolicy [mpol_get/put]
187106808b08SLee Schermerhorn  * because the current task is examining it's own mempolicy and a task's
187206808b08SLee Schermerhorn  * mempolicy is only ever changed by the task itself.
187306808b08SLee Schermerhorn  *
187406808b08SLee Schermerhorn  * N.B., it is the caller's responsibility to free a returned nodemask.
187506808b08SLee Schermerhorn  */
187606808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask)
187706808b08SLee Schermerhorn {
187806808b08SLee Schermerhorn 	struct mempolicy *mempolicy;
187906808b08SLee Schermerhorn 	int nid;
188006808b08SLee Schermerhorn 
188106808b08SLee Schermerhorn 	if (!(mask && current->mempolicy))
188206808b08SLee Schermerhorn 		return false;
188306808b08SLee Schermerhorn 
1884c0ff7453SMiao Xie 	task_lock(current);
188506808b08SLee Schermerhorn 	mempolicy = current->mempolicy;
188606808b08SLee Schermerhorn 	switch (mempolicy->mode) {
188706808b08SLee Schermerhorn 	case MPOL_PREFERRED:
188806808b08SLee Schermerhorn 		if (mempolicy->flags & MPOL_F_LOCAL)
188906808b08SLee Schermerhorn 			nid = numa_node_id();
189006808b08SLee Schermerhorn 		else
189106808b08SLee Schermerhorn 			nid = mempolicy->v.preferred_node;
189206808b08SLee Schermerhorn 		init_nodemask_of_node(mask, nid);
189306808b08SLee Schermerhorn 		break;
189406808b08SLee Schermerhorn 
189506808b08SLee Schermerhorn 	case MPOL_BIND:
189606808b08SLee Schermerhorn 		/* Fall through */
189706808b08SLee Schermerhorn 	case MPOL_INTERLEAVE:
189806808b08SLee Schermerhorn 		*mask =  mempolicy->v.nodes;
189906808b08SLee Schermerhorn 		break;
190006808b08SLee Schermerhorn 
190106808b08SLee Schermerhorn 	default:
190206808b08SLee Schermerhorn 		BUG();
190306808b08SLee Schermerhorn 	}
1904c0ff7453SMiao Xie 	task_unlock(current);
190506808b08SLee Schermerhorn 
190606808b08SLee Schermerhorn 	return true;
190706808b08SLee Schermerhorn }
190800ac59adSChen, Kenneth W #endif
19095da7ca86SChristoph Lameter 
19106f48d0ebSDavid Rientjes /*
19116f48d0ebSDavid Rientjes  * mempolicy_nodemask_intersects
19126f48d0ebSDavid Rientjes  *
19136f48d0ebSDavid Rientjes  * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
19146f48d0ebSDavid Rientjes  * policy.  Otherwise, check for intersection between mask and the policy
19156f48d0ebSDavid Rientjes  * nodemask for 'bind' or 'interleave' policy.  For 'perferred' or 'local'
19166f48d0ebSDavid Rientjes  * policy, always return true since it may allocate elsewhere on fallback.
19176f48d0ebSDavid Rientjes  *
19186f48d0ebSDavid Rientjes  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
19196f48d0ebSDavid Rientjes  */
19206f48d0ebSDavid Rientjes bool mempolicy_nodemask_intersects(struct task_struct *tsk,
19216f48d0ebSDavid Rientjes 					const nodemask_t *mask)
19226f48d0ebSDavid Rientjes {
19236f48d0ebSDavid Rientjes 	struct mempolicy *mempolicy;
19246f48d0ebSDavid Rientjes 	bool ret = true;
19256f48d0ebSDavid Rientjes 
19266f48d0ebSDavid Rientjes 	if (!mask)
19276f48d0ebSDavid Rientjes 		return ret;
19286f48d0ebSDavid Rientjes 	task_lock(tsk);
19296f48d0ebSDavid Rientjes 	mempolicy = tsk->mempolicy;
19306f48d0ebSDavid Rientjes 	if (!mempolicy)
19316f48d0ebSDavid Rientjes 		goto out;
19326f48d0ebSDavid Rientjes 
19336f48d0ebSDavid Rientjes 	switch (mempolicy->mode) {
19346f48d0ebSDavid Rientjes 	case MPOL_PREFERRED:
19356f48d0ebSDavid Rientjes 		/*
19366f48d0ebSDavid Rientjes 		 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
19376f48d0ebSDavid Rientjes 		 * allocate from, they may fallback to other nodes when oom.
19386f48d0ebSDavid Rientjes 		 * Thus, it's possible for tsk to have allocated memory from
19396f48d0ebSDavid Rientjes 		 * nodes in mask.
19406f48d0ebSDavid Rientjes 		 */
19416f48d0ebSDavid Rientjes 		break;
19426f48d0ebSDavid Rientjes 	case MPOL_BIND:
19436f48d0ebSDavid Rientjes 	case MPOL_INTERLEAVE:
19446f48d0ebSDavid Rientjes 		ret = nodes_intersects(mempolicy->v.nodes, *mask);
19456f48d0ebSDavid Rientjes 		break;
19466f48d0ebSDavid Rientjes 	default:
19476f48d0ebSDavid Rientjes 		BUG();
19486f48d0ebSDavid Rientjes 	}
19496f48d0ebSDavid Rientjes out:
19506f48d0ebSDavid Rientjes 	task_unlock(tsk);
19516f48d0ebSDavid Rientjes 	return ret;
19526f48d0ebSDavid Rientjes }
19536f48d0ebSDavid Rientjes 
19541da177e4SLinus Torvalds /* Allocate a page in interleaved policy.
19551da177e4SLinus Torvalds    Own path because it needs to do special accounting. */
1956662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1957662f3a0bSAndi Kleen 					unsigned nid)
19581da177e4SLinus Torvalds {
19591da177e4SLinus Torvalds 	struct zonelist *zl;
19601da177e4SLinus Torvalds 	struct page *page;
19611da177e4SLinus Torvalds 
19620e88460dSMel Gorman 	zl = node_zonelist(nid, gfp);
19631da177e4SLinus Torvalds 	page = __alloc_pages(gfp, order, zl);
1964dd1a239fSMel Gorman 	if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1965ca889e6cSChristoph Lameter 		inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
19661da177e4SLinus Torvalds 	return page;
19671da177e4SLinus Torvalds }
19681da177e4SLinus Torvalds 
19691da177e4SLinus Torvalds /**
19700bbbc0b3SAndrea Arcangeli  * 	alloc_pages_vma	- Allocate a page for a VMA.
19711da177e4SLinus Torvalds  *
19721da177e4SLinus Torvalds  * 	@gfp:
19731da177e4SLinus Torvalds  *      %GFP_USER    user allocation.
19741da177e4SLinus Torvalds  *      %GFP_KERNEL  kernel allocations,
19751da177e4SLinus Torvalds  *      %GFP_HIGHMEM highmem/user allocations,
19761da177e4SLinus Torvalds  *      %GFP_FS      allocation should not call back into a file system.
19771da177e4SLinus Torvalds  *      %GFP_ATOMIC  don't sleep.
19781da177e4SLinus Torvalds  *
19790bbbc0b3SAndrea Arcangeli  *	@order:Order of the GFP allocation.
19801da177e4SLinus Torvalds  * 	@vma:  Pointer to VMA or NULL if not available.
19811da177e4SLinus Torvalds  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
1982be97a41bSVlastimil Babka  *	@node: Which node to prefer for allocation (modulo policy).
1983be97a41bSVlastimil Babka  *	@hugepage: for hugepages try only the preferred node if possible
19841da177e4SLinus Torvalds  *
19851da177e4SLinus Torvalds  * 	This function allocates a page from the kernel page pool and applies
19861da177e4SLinus Torvalds  *	a NUMA policy associated with the VMA or the current process.
19871da177e4SLinus Torvalds  *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
19881da177e4SLinus Torvalds  *	mm_struct of the VMA to prevent it from going away. Should be used for
1989be97a41bSVlastimil Babka  *	all allocations for pages that will be mapped into user space. Returns
1990be97a41bSVlastimil Babka  *	NULL when no page can be allocated.
19911da177e4SLinus Torvalds  */
19921da177e4SLinus Torvalds struct page *
19930bbbc0b3SAndrea Arcangeli alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
1994be97a41bSVlastimil Babka 		unsigned long addr, int node, bool hugepage)
19951da177e4SLinus Torvalds {
1996cc9a6c87SMel Gorman 	struct mempolicy *pol;
1997c0ff7453SMiao Xie 	struct page *page;
1998cc9a6c87SMel Gorman 	unsigned int cpuset_mems_cookie;
1999be97a41bSVlastimil Babka 	struct zonelist *zl;
2000be97a41bSVlastimil Babka 	nodemask_t *nmask;
20011da177e4SLinus Torvalds 
2002cc9a6c87SMel Gorman retry_cpuset:
2003dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2004d26914d1SMel Gorman 	cpuset_mems_cookie = read_mems_allowed_begin();
2005cc9a6c87SMel Gorman 
2006be97a41bSVlastimil Babka 	if (pol->mode == MPOL_INTERLEAVE) {
20071da177e4SLinus Torvalds 		unsigned nid;
20085da7ca86SChristoph Lameter 
20098eac563cSAndi Kleen 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
201052cd3b07SLee Schermerhorn 		mpol_cond_put(pol);
20110bbbc0b3SAndrea Arcangeli 		page = alloc_page_interleave(gfp, order, nid);
2012be97a41bSVlastimil Babka 		goto out;
20131da177e4SLinus Torvalds 	}
20141da177e4SLinus Torvalds 
20150867a57cSVlastimil Babka 	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
20160867a57cSVlastimil Babka 		int hpage_node = node;
20170867a57cSVlastimil Babka 
20180867a57cSVlastimil Babka 		/*
20190867a57cSVlastimil Babka 		 * For hugepage allocation and non-interleave policy which
20200867a57cSVlastimil Babka 		 * allows the current node (or other explicitly preferred
20210867a57cSVlastimil Babka 		 * node) we only try to allocate from the current/preferred
20220867a57cSVlastimil Babka 		 * node and don't fall back to other nodes, as the cost of
20230867a57cSVlastimil Babka 		 * remote accesses would likely offset THP benefits.
20240867a57cSVlastimil Babka 		 *
20250867a57cSVlastimil Babka 		 * If the policy is interleave, or does not allow the current
20260867a57cSVlastimil Babka 		 * node in its nodemask, we allocate the standard way.
20270867a57cSVlastimil Babka 		 */
20280867a57cSVlastimil Babka 		if (pol->mode == MPOL_PREFERRED &&
20290867a57cSVlastimil Babka 						!(pol->flags & MPOL_F_LOCAL))
20300867a57cSVlastimil Babka 			hpage_node = pol->v.preferred_node;
20310867a57cSVlastimil Babka 
20320867a57cSVlastimil Babka 		nmask = policy_nodemask(gfp, pol);
20330867a57cSVlastimil Babka 		if (!nmask || node_isset(hpage_node, *nmask)) {
20340867a57cSVlastimil Babka 			mpol_cond_put(pol);
203596db800fSVlastimil Babka 			page = __alloc_pages_node(hpage_node,
20360867a57cSVlastimil Babka 						gfp | __GFP_THISNODE, order);
20370867a57cSVlastimil Babka 			goto out;
20380867a57cSVlastimil Babka 		}
20390867a57cSVlastimil Babka 	}
20400867a57cSVlastimil Babka 
2041077fcf11SAneesh Kumar K.V 	nmask = policy_nodemask(gfp, pol);
2042be97a41bSVlastimil Babka 	zl = policy_zonelist(gfp, pol, node);
2043077fcf11SAneesh Kumar K.V 	mpol_cond_put(pol);
2044be97a41bSVlastimil Babka 	page = __alloc_pages_nodemask(gfp, order, zl, nmask);
2045be97a41bSVlastimil Babka out:
2046be97a41bSVlastimil Babka 	if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2047077fcf11SAneesh Kumar K.V 		goto retry_cpuset;
2048077fcf11SAneesh Kumar K.V 	return page;
2049077fcf11SAneesh Kumar K.V }
2050077fcf11SAneesh Kumar K.V 
20511da177e4SLinus Torvalds /**
20521da177e4SLinus Torvalds  * 	alloc_pages_current - Allocate pages.
20531da177e4SLinus Torvalds  *
20541da177e4SLinus Torvalds  *	@gfp:
20551da177e4SLinus Torvalds  *		%GFP_USER   user allocation,
20561da177e4SLinus Torvalds  *      	%GFP_KERNEL kernel allocation,
20571da177e4SLinus Torvalds  *      	%GFP_HIGHMEM highmem allocation,
20581da177e4SLinus Torvalds  *      	%GFP_FS     don't call back into a file system.
20591da177e4SLinus Torvalds  *      	%GFP_ATOMIC don't sleep.
20601da177e4SLinus Torvalds  *	@order: Power of two of allocation size in pages. 0 is a single page.
20611da177e4SLinus Torvalds  *
20621da177e4SLinus Torvalds  *	Allocate a page from the kernel page pool.  When not in
20631da177e4SLinus Torvalds  *	interrupt context and apply the current process NUMA policy.
20641da177e4SLinus Torvalds  *	Returns NULL when no page can be allocated.
20651da177e4SLinus Torvalds  *
2066cf2a473cSPaul Jackson  *	Don't call cpuset_update_task_memory_state() unless
20671da177e4SLinus Torvalds  *	1) it's ok to take cpuset_sem (can WAIT), and
20681da177e4SLinus Torvalds  *	2) allocating for current task (not interrupt).
20691da177e4SLinus Torvalds  */
2070dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order)
20711da177e4SLinus Torvalds {
20728d90274bSOleg Nesterov 	struct mempolicy *pol = &default_policy;
2073c0ff7453SMiao Xie 	struct page *page;
2074cc9a6c87SMel Gorman 	unsigned int cpuset_mems_cookie;
20751da177e4SLinus Torvalds 
20768d90274bSOleg Nesterov 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
20778d90274bSOleg Nesterov 		pol = get_task_policy(current);
207852cd3b07SLee Schermerhorn 
2079cc9a6c87SMel Gorman retry_cpuset:
2080d26914d1SMel Gorman 	cpuset_mems_cookie = read_mems_allowed_begin();
2081cc9a6c87SMel Gorman 
208252cd3b07SLee Schermerhorn 	/*
208352cd3b07SLee Schermerhorn 	 * No reference counting needed for current->mempolicy
208452cd3b07SLee Schermerhorn 	 * nor system default_policy
208552cd3b07SLee Schermerhorn 	 */
208645c4745aSLee Schermerhorn 	if (pol->mode == MPOL_INTERLEAVE)
2087c0ff7453SMiao Xie 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2088c0ff7453SMiao Xie 	else
2089c0ff7453SMiao Xie 		page = __alloc_pages_nodemask(gfp, order,
20905c4b4be3SAndi Kleen 				policy_zonelist(gfp, pol, numa_node_id()),
20915c4b4be3SAndi Kleen 				policy_nodemask(gfp, pol));
2092cc9a6c87SMel Gorman 
2093d26914d1SMel Gorman 	if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2094cc9a6c87SMel Gorman 		goto retry_cpuset;
2095cc9a6c87SMel Gorman 
2096c0ff7453SMiao Xie 	return page;
20971da177e4SLinus Torvalds }
20981da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current);
20991da177e4SLinus Torvalds 
2100ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2101ef0855d3SOleg Nesterov {
2102ef0855d3SOleg Nesterov 	struct mempolicy *pol = mpol_dup(vma_policy(src));
2103ef0855d3SOleg Nesterov 
2104ef0855d3SOleg Nesterov 	if (IS_ERR(pol))
2105ef0855d3SOleg Nesterov 		return PTR_ERR(pol);
2106ef0855d3SOleg Nesterov 	dst->vm_policy = pol;
2107ef0855d3SOleg Nesterov 	return 0;
2108ef0855d3SOleg Nesterov }
2109ef0855d3SOleg Nesterov 
21104225399aSPaul Jackson /*
2111846a16bfSLee Schermerhorn  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
21124225399aSPaul Jackson  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
21134225399aSPaul Jackson  * with the mems_allowed returned by cpuset_mems_allowed().  This
21144225399aSPaul Jackson  * keeps mempolicies cpuset relative after its cpuset moves.  See
21154225399aSPaul Jackson  * further kernel/cpuset.c update_nodemask().
2116708c1bbcSMiao Xie  *
2117708c1bbcSMiao Xie  * current's mempolicy may be rebinded by the other task(the task that changes
2118708c1bbcSMiao Xie  * cpuset's mems), so we needn't do rebind work for current task.
21194225399aSPaul Jackson  */
21204225399aSPaul Jackson 
2121846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */
2122846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old)
21231da177e4SLinus Torvalds {
21241da177e4SLinus Torvalds 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
21251da177e4SLinus Torvalds 
21261da177e4SLinus Torvalds 	if (!new)
21271da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2128708c1bbcSMiao Xie 
2129708c1bbcSMiao Xie 	/* task's mempolicy is protected by alloc_lock */
2130708c1bbcSMiao Xie 	if (old == current->mempolicy) {
2131708c1bbcSMiao Xie 		task_lock(current);
2132708c1bbcSMiao Xie 		*new = *old;
2133708c1bbcSMiao Xie 		task_unlock(current);
2134708c1bbcSMiao Xie 	} else
2135708c1bbcSMiao Xie 		*new = *old;
2136708c1bbcSMiao Xie 
21374225399aSPaul Jackson 	if (current_cpuset_is_being_rebound()) {
21384225399aSPaul Jackson 		nodemask_t mems = cpuset_mems_allowed(current);
2139708c1bbcSMiao Xie 		if (new->flags & MPOL_F_REBINDING)
2140708c1bbcSMiao Xie 			mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
2141708c1bbcSMiao Xie 		else
2142708c1bbcSMiao Xie 			mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
21434225399aSPaul Jackson 	}
21441da177e4SLinus Torvalds 	atomic_set(&new->refcnt, 1);
21451da177e4SLinus Torvalds 	return new;
21461da177e4SLinus Torvalds }
21471da177e4SLinus Torvalds 
21481da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */
2149fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
21501da177e4SLinus Torvalds {
21511da177e4SLinus Torvalds 	if (!a || !b)
2152fcfb4dccSKOSAKI Motohiro 		return false;
215345c4745aSLee Schermerhorn 	if (a->mode != b->mode)
2154fcfb4dccSKOSAKI Motohiro 		return false;
215519800502SBob Liu 	if (a->flags != b->flags)
2156fcfb4dccSKOSAKI Motohiro 		return false;
215719800502SBob Liu 	if (mpol_store_user_nodemask(a))
215819800502SBob Liu 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2159fcfb4dccSKOSAKI Motohiro 			return false;
216019800502SBob Liu 
216145c4745aSLee Schermerhorn 	switch (a->mode) {
216219770b32SMel Gorman 	case MPOL_BIND:
216319770b32SMel Gorman 		/* Fall through */
21641da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
2165fcfb4dccSKOSAKI Motohiro 		return !!nodes_equal(a->v.nodes, b->v.nodes);
21661da177e4SLinus Torvalds 	case MPOL_PREFERRED:
216775719661SNamhyung Kim 		return a->v.preferred_node == b->v.preferred_node;
21681da177e4SLinus Torvalds 	default:
21691da177e4SLinus Torvalds 		BUG();
2170fcfb4dccSKOSAKI Motohiro 		return false;
21711da177e4SLinus Torvalds 	}
21721da177e4SLinus Torvalds }
21731da177e4SLinus Torvalds 
21741da177e4SLinus Torvalds /*
21751da177e4SLinus Torvalds  * Shared memory backing store policy support.
21761da177e4SLinus Torvalds  *
21771da177e4SLinus Torvalds  * Remember policies even when nobody has shared memory mapped.
21781da177e4SLinus Torvalds  * The policies are kept in Red-Black tree linked from the inode.
21794a8c7bb5SNathan Zimmer  * They are protected by the sp->lock rwlock, which should be held
21801da177e4SLinus Torvalds  * for any accesses to the tree.
21811da177e4SLinus Torvalds  */
21821da177e4SLinus Torvalds 
21834a8c7bb5SNathan Zimmer /*
21844a8c7bb5SNathan Zimmer  * lookup first element intersecting start-end.  Caller holds sp->lock for
21854a8c7bb5SNathan Zimmer  * reading or for writing
21864a8c7bb5SNathan Zimmer  */
21871da177e4SLinus Torvalds static struct sp_node *
21881da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
21891da177e4SLinus Torvalds {
21901da177e4SLinus Torvalds 	struct rb_node *n = sp->root.rb_node;
21911da177e4SLinus Torvalds 
21921da177e4SLinus Torvalds 	while (n) {
21931da177e4SLinus Torvalds 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
21941da177e4SLinus Torvalds 
21951da177e4SLinus Torvalds 		if (start >= p->end)
21961da177e4SLinus Torvalds 			n = n->rb_right;
21971da177e4SLinus Torvalds 		else if (end <= p->start)
21981da177e4SLinus Torvalds 			n = n->rb_left;
21991da177e4SLinus Torvalds 		else
22001da177e4SLinus Torvalds 			break;
22011da177e4SLinus Torvalds 	}
22021da177e4SLinus Torvalds 	if (!n)
22031da177e4SLinus Torvalds 		return NULL;
22041da177e4SLinus Torvalds 	for (;;) {
22051da177e4SLinus Torvalds 		struct sp_node *w = NULL;
22061da177e4SLinus Torvalds 		struct rb_node *prev = rb_prev(n);
22071da177e4SLinus Torvalds 		if (!prev)
22081da177e4SLinus Torvalds 			break;
22091da177e4SLinus Torvalds 		w = rb_entry(prev, struct sp_node, nd);
22101da177e4SLinus Torvalds 		if (w->end <= start)
22111da177e4SLinus Torvalds 			break;
22121da177e4SLinus Torvalds 		n = prev;
22131da177e4SLinus Torvalds 	}
22141da177e4SLinus Torvalds 	return rb_entry(n, struct sp_node, nd);
22151da177e4SLinus Torvalds }
22161da177e4SLinus Torvalds 
22174a8c7bb5SNathan Zimmer /*
22184a8c7bb5SNathan Zimmer  * Insert a new shared policy into the list.  Caller holds sp->lock for
22194a8c7bb5SNathan Zimmer  * writing.
22204a8c7bb5SNathan Zimmer  */
22211da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new)
22221da177e4SLinus Torvalds {
22231da177e4SLinus Torvalds 	struct rb_node **p = &sp->root.rb_node;
22241da177e4SLinus Torvalds 	struct rb_node *parent = NULL;
22251da177e4SLinus Torvalds 	struct sp_node *nd;
22261da177e4SLinus Torvalds 
22271da177e4SLinus Torvalds 	while (*p) {
22281da177e4SLinus Torvalds 		parent = *p;
22291da177e4SLinus Torvalds 		nd = rb_entry(parent, struct sp_node, nd);
22301da177e4SLinus Torvalds 		if (new->start < nd->start)
22311da177e4SLinus Torvalds 			p = &(*p)->rb_left;
22321da177e4SLinus Torvalds 		else if (new->end > nd->end)
22331da177e4SLinus Torvalds 			p = &(*p)->rb_right;
22341da177e4SLinus Torvalds 		else
22351da177e4SLinus Torvalds 			BUG();
22361da177e4SLinus Torvalds 	}
22371da177e4SLinus Torvalds 	rb_link_node(&new->nd, parent, p);
22381da177e4SLinus Torvalds 	rb_insert_color(&new->nd, &sp->root);
2239140d5a49SPaul Mundt 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
224045c4745aSLee Schermerhorn 		 new->policy ? new->policy->mode : 0);
22411da177e4SLinus Torvalds }
22421da177e4SLinus Torvalds 
22431da177e4SLinus Torvalds /* Find shared policy intersecting idx */
22441da177e4SLinus Torvalds struct mempolicy *
22451da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
22461da177e4SLinus Torvalds {
22471da177e4SLinus Torvalds 	struct mempolicy *pol = NULL;
22481da177e4SLinus Torvalds 	struct sp_node *sn;
22491da177e4SLinus Torvalds 
22501da177e4SLinus Torvalds 	if (!sp->root.rb_node)
22511da177e4SLinus Torvalds 		return NULL;
22524a8c7bb5SNathan Zimmer 	read_lock(&sp->lock);
22531da177e4SLinus Torvalds 	sn = sp_lookup(sp, idx, idx+1);
22541da177e4SLinus Torvalds 	if (sn) {
22551da177e4SLinus Torvalds 		mpol_get(sn->policy);
22561da177e4SLinus Torvalds 		pol = sn->policy;
22571da177e4SLinus Torvalds 	}
22584a8c7bb5SNathan Zimmer 	read_unlock(&sp->lock);
22591da177e4SLinus Torvalds 	return pol;
22601da177e4SLinus Torvalds }
22611da177e4SLinus Torvalds 
226263f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n)
226363f74ca2SKOSAKI Motohiro {
226463f74ca2SKOSAKI Motohiro 	mpol_put(n->policy);
226563f74ca2SKOSAKI Motohiro 	kmem_cache_free(sn_cache, n);
226663f74ca2SKOSAKI Motohiro }
226763f74ca2SKOSAKI Motohiro 
2268771fb4d8SLee Schermerhorn /**
2269771fb4d8SLee Schermerhorn  * mpol_misplaced - check whether current page node is valid in policy
2270771fb4d8SLee Schermerhorn  *
2271b46e14acSFabian Frederick  * @page: page to be checked
2272b46e14acSFabian Frederick  * @vma: vm area where page mapped
2273b46e14acSFabian Frederick  * @addr: virtual address where page mapped
2274771fb4d8SLee Schermerhorn  *
2275771fb4d8SLee Schermerhorn  * Lookup current policy node id for vma,addr and "compare to" page's
2276771fb4d8SLee Schermerhorn  * node id.
2277771fb4d8SLee Schermerhorn  *
2278771fb4d8SLee Schermerhorn  * Returns:
2279771fb4d8SLee Schermerhorn  *	-1	- not misplaced, page is in the right node
2280771fb4d8SLee Schermerhorn  *	node	- node id where the page should be
2281771fb4d8SLee Schermerhorn  *
2282771fb4d8SLee Schermerhorn  * Policy determination "mimics" alloc_page_vma().
2283771fb4d8SLee Schermerhorn  * Called from fault path where we know the vma and faulting address.
2284771fb4d8SLee Schermerhorn  */
2285771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2286771fb4d8SLee Schermerhorn {
2287771fb4d8SLee Schermerhorn 	struct mempolicy *pol;
2288771fb4d8SLee Schermerhorn 	struct zone *zone;
2289771fb4d8SLee Schermerhorn 	int curnid = page_to_nid(page);
2290771fb4d8SLee Schermerhorn 	unsigned long pgoff;
229190572890SPeter Zijlstra 	int thiscpu = raw_smp_processor_id();
229290572890SPeter Zijlstra 	int thisnid = cpu_to_node(thiscpu);
2293771fb4d8SLee Schermerhorn 	int polnid = -1;
2294771fb4d8SLee Schermerhorn 	int ret = -1;
2295771fb4d8SLee Schermerhorn 
2296771fb4d8SLee Schermerhorn 	BUG_ON(!vma);
2297771fb4d8SLee Schermerhorn 
2298dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2299771fb4d8SLee Schermerhorn 	if (!(pol->flags & MPOL_F_MOF))
2300771fb4d8SLee Schermerhorn 		goto out;
2301771fb4d8SLee Schermerhorn 
2302771fb4d8SLee Schermerhorn 	switch (pol->mode) {
2303771fb4d8SLee Schermerhorn 	case MPOL_INTERLEAVE:
2304771fb4d8SLee Schermerhorn 		BUG_ON(addr >= vma->vm_end);
2305771fb4d8SLee Schermerhorn 		BUG_ON(addr < vma->vm_start);
2306771fb4d8SLee Schermerhorn 
2307771fb4d8SLee Schermerhorn 		pgoff = vma->vm_pgoff;
2308771fb4d8SLee Schermerhorn 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2309771fb4d8SLee Schermerhorn 		polnid = offset_il_node(pol, vma, pgoff);
2310771fb4d8SLee Schermerhorn 		break;
2311771fb4d8SLee Schermerhorn 
2312771fb4d8SLee Schermerhorn 	case MPOL_PREFERRED:
2313771fb4d8SLee Schermerhorn 		if (pol->flags & MPOL_F_LOCAL)
2314771fb4d8SLee Schermerhorn 			polnid = numa_node_id();
2315771fb4d8SLee Schermerhorn 		else
2316771fb4d8SLee Schermerhorn 			polnid = pol->v.preferred_node;
2317771fb4d8SLee Schermerhorn 		break;
2318771fb4d8SLee Schermerhorn 
2319771fb4d8SLee Schermerhorn 	case MPOL_BIND:
2320771fb4d8SLee Schermerhorn 		/*
2321771fb4d8SLee Schermerhorn 		 * allows binding to multiple nodes.
2322771fb4d8SLee Schermerhorn 		 * use current page if in policy nodemask,
2323771fb4d8SLee Schermerhorn 		 * else select nearest allowed node, if any.
2324771fb4d8SLee Schermerhorn 		 * If no allowed nodes, use current [!misplaced].
2325771fb4d8SLee Schermerhorn 		 */
2326771fb4d8SLee Schermerhorn 		if (node_isset(curnid, pol->v.nodes))
2327771fb4d8SLee Schermerhorn 			goto out;
2328771fb4d8SLee Schermerhorn 		(void)first_zones_zonelist(
2329771fb4d8SLee Schermerhorn 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2330771fb4d8SLee Schermerhorn 				gfp_zone(GFP_HIGHUSER),
2331771fb4d8SLee Schermerhorn 				&pol->v.nodes, &zone);
2332771fb4d8SLee Schermerhorn 		polnid = zone->node;
2333771fb4d8SLee Schermerhorn 		break;
2334771fb4d8SLee Schermerhorn 
2335771fb4d8SLee Schermerhorn 	default:
2336771fb4d8SLee Schermerhorn 		BUG();
2337771fb4d8SLee Schermerhorn 	}
23385606e387SMel Gorman 
23395606e387SMel Gorman 	/* Migrate the page towards the node whose CPU is referencing it */
2340e42c8ff2SMel Gorman 	if (pol->flags & MPOL_F_MORON) {
234190572890SPeter Zijlstra 		polnid = thisnid;
23425606e387SMel Gorman 
234310f39042SRik van Riel 		if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2344de1c9ce6SRik van Riel 			goto out;
2345de1c9ce6SRik van Riel 	}
2346e42c8ff2SMel Gorman 
2347771fb4d8SLee Schermerhorn 	if (curnid != polnid)
2348771fb4d8SLee Schermerhorn 		ret = polnid;
2349771fb4d8SLee Schermerhorn out:
2350771fb4d8SLee Schermerhorn 	mpol_cond_put(pol);
2351771fb4d8SLee Schermerhorn 
2352771fb4d8SLee Schermerhorn 	return ret;
2353771fb4d8SLee Schermerhorn }
2354771fb4d8SLee Schermerhorn 
23551da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n)
23561da177e4SLinus Torvalds {
2357140d5a49SPaul Mundt 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
23581da177e4SLinus Torvalds 	rb_erase(&n->nd, &sp->root);
235963f74ca2SKOSAKI Motohiro 	sp_free(n);
23601da177e4SLinus Torvalds }
23611da177e4SLinus Torvalds 
236242288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start,
236342288fe3SMel Gorman 			unsigned long end, struct mempolicy *pol)
236442288fe3SMel Gorman {
236542288fe3SMel Gorman 	node->start = start;
236642288fe3SMel Gorman 	node->end = end;
236742288fe3SMel Gorman 	node->policy = pol;
236842288fe3SMel Gorman }
236942288fe3SMel Gorman 
2370dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2371dbcb0f19SAdrian Bunk 				struct mempolicy *pol)
23721da177e4SLinus Torvalds {
2373869833f2SKOSAKI Motohiro 	struct sp_node *n;
2374869833f2SKOSAKI Motohiro 	struct mempolicy *newpol;
23751da177e4SLinus Torvalds 
2376869833f2SKOSAKI Motohiro 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
23771da177e4SLinus Torvalds 	if (!n)
23781da177e4SLinus Torvalds 		return NULL;
2379869833f2SKOSAKI Motohiro 
2380869833f2SKOSAKI Motohiro 	newpol = mpol_dup(pol);
2381869833f2SKOSAKI Motohiro 	if (IS_ERR(newpol)) {
2382869833f2SKOSAKI Motohiro 		kmem_cache_free(sn_cache, n);
2383869833f2SKOSAKI Motohiro 		return NULL;
2384869833f2SKOSAKI Motohiro 	}
2385869833f2SKOSAKI Motohiro 	newpol->flags |= MPOL_F_SHARED;
238642288fe3SMel Gorman 	sp_node_init(n, start, end, newpol);
2387869833f2SKOSAKI Motohiro 
23881da177e4SLinus Torvalds 	return n;
23891da177e4SLinus Torvalds }
23901da177e4SLinus Torvalds 
23911da177e4SLinus Torvalds /* Replace a policy range. */
23921da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
23931da177e4SLinus Torvalds 				 unsigned long end, struct sp_node *new)
23941da177e4SLinus Torvalds {
2395b22d127aSMel Gorman 	struct sp_node *n;
239642288fe3SMel Gorman 	struct sp_node *n_new = NULL;
239742288fe3SMel Gorman 	struct mempolicy *mpol_new = NULL;
2398b22d127aSMel Gorman 	int ret = 0;
23991da177e4SLinus Torvalds 
240042288fe3SMel Gorman restart:
24014a8c7bb5SNathan Zimmer 	write_lock(&sp->lock);
24021da177e4SLinus Torvalds 	n = sp_lookup(sp, start, end);
24031da177e4SLinus Torvalds 	/* Take care of old policies in the same range. */
24041da177e4SLinus Torvalds 	while (n && n->start < end) {
24051da177e4SLinus Torvalds 		struct rb_node *next = rb_next(&n->nd);
24061da177e4SLinus Torvalds 		if (n->start >= start) {
24071da177e4SLinus Torvalds 			if (n->end <= end)
24081da177e4SLinus Torvalds 				sp_delete(sp, n);
24091da177e4SLinus Torvalds 			else
24101da177e4SLinus Torvalds 				n->start = end;
24111da177e4SLinus Torvalds 		} else {
24121da177e4SLinus Torvalds 			/* Old policy spanning whole new range. */
24131da177e4SLinus Torvalds 			if (n->end > end) {
241442288fe3SMel Gorman 				if (!n_new)
241542288fe3SMel Gorman 					goto alloc_new;
241642288fe3SMel Gorman 
241742288fe3SMel Gorman 				*mpol_new = *n->policy;
241842288fe3SMel Gorman 				atomic_set(&mpol_new->refcnt, 1);
24197880639cSKOSAKI Motohiro 				sp_node_init(n_new, end, n->end, mpol_new);
24201da177e4SLinus Torvalds 				n->end = start;
24215ca39575SHillf Danton 				sp_insert(sp, n_new);
242242288fe3SMel Gorman 				n_new = NULL;
242342288fe3SMel Gorman 				mpol_new = NULL;
24241da177e4SLinus Torvalds 				break;
24251da177e4SLinus Torvalds 			} else
24261da177e4SLinus Torvalds 				n->end = start;
24271da177e4SLinus Torvalds 		}
24281da177e4SLinus Torvalds 		if (!next)
24291da177e4SLinus Torvalds 			break;
24301da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
24311da177e4SLinus Torvalds 	}
24321da177e4SLinus Torvalds 	if (new)
24331da177e4SLinus Torvalds 		sp_insert(sp, new);
24344a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
243542288fe3SMel Gorman 	ret = 0;
243642288fe3SMel Gorman 
243742288fe3SMel Gorman err_out:
243842288fe3SMel Gorman 	if (mpol_new)
243942288fe3SMel Gorman 		mpol_put(mpol_new);
244042288fe3SMel Gorman 	if (n_new)
244142288fe3SMel Gorman 		kmem_cache_free(sn_cache, n_new);
244242288fe3SMel Gorman 
2443b22d127aSMel Gorman 	return ret;
244442288fe3SMel Gorman 
244542288fe3SMel Gorman alloc_new:
24464a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
244742288fe3SMel Gorman 	ret = -ENOMEM;
244842288fe3SMel Gorman 	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
244942288fe3SMel Gorman 	if (!n_new)
245042288fe3SMel Gorman 		goto err_out;
245142288fe3SMel Gorman 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
245242288fe3SMel Gorman 	if (!mpol_new)
245342288fe3SMel Gorman 		goto err_out;
245442288fe3SMel Gorman 	goto restart;
24551da177e4SLinus Torvalds }
24561da177e4SLinus Torvalds 
245771fe804bSLee Schermerhorn /**
245871fe804bSLee Schermerhorn  * mpol_shared_policy_init - initialize shared policy for inode
245971fe804bSLee Schermerhorn  * @sp: pointer to inode shared policy
246071fe804bSLee Schermerhorn  * @mpol:  struct mempolicy to install
246171fe804bSLee Schermerhorn  *
246271fe804bSLee Schermerhorn  * Install non-NULL @mpol in inode's shared policy rb-tree.
246371fe804bSLee Schermerhorn  * On entry, the current task has a reference on a non-NULL @mpol.
246471fe804bSLee Schermerhorn  * This must be released on exit.
24654bfc4495SKAMEZAWA Hiroyuki  * This is called at get_inode() calls and we can use GFP_KERNEL.
246671fe804bSLee Schermerhorn  */
246771fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
24687339ff83SRobin Holt {
246958568d2aSMiao Xie 	int ret;
247058568d2aSMiao Xie 
247171fe804bSLee Schermerhorn 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
24724a8c7bb5SNathan Zimmer 	rwlock_init(&sp->lock);
24737339ff83SRobin Holt 
247471fe804bSLee Schermerhorn 	if (mpol) {
24757339ff83SRobin Holt 		struct vm_area_struct pvma;
247671fe804bSLee Schermerhorn 		struct mempolicy *new;
24774bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
24787339ff83SRobin Holt 
24794bfc4495SKAMEZAWA Hiroyuki 		if (!scratch)
24805c0c1654SLee Schermerhorn 			goto put_mpol;
248171fe804bSLee Schermerhorn 		/* contextualize the tmpfs mount point mempolicy */
248271fe804bSLee Schermerhorn 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
248315d77835SLee Schermerhorn 		if (IS_ERR(new))
24840cae3457SDan Carpenter 			goto free_scratch; /* no valid nodemask intersection */
248558568d2aSMiao Xie 
248658568d2aSMiao Xie 		task_lock(current);
24874bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
248858568d2aSMiao Xie 		task_unlock(current);
248915d77835SLee Schermerhorn 		if (ret)
24905c0c1654SLee Schermerhorn 			goto put_new;
249171fe804bSLee Schermerhorn 
249271fe804bSLee Schermerhorn 		/* Create pseudo-vma that contains just the policy */
24937339ff83SRobin Holt 		memset(&pvma, 0, sizeof(struct vm_area_struct));
249471fe804bSLee Schermerhorn 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
249571fe804bSLee Schermerhorn 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
249615d77835SLee Schermerhorn 
24975c0c1654SLee Schermerhorn put_new:
249871fe804bSLee Schermerhorn 		mpol_put(new);			/* drop initial ref */
24990cae3457SDan Carpenter free_scratch:
25004bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
25015c0c1654SLee Schermerhorn put_mpol:
25025c0c1654SLee Schermerhorn 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
25037339ff83SRobin Holt 	}
25047339ff83SRobin Holt }
25057339ff83SRobin Holt 
25061da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info,
25071da177e4SLinus Torvalds 			struct vm_area_struct *vma, struct mempolicy *npol)
25081da177e4SLinus Torvalds {
25091da177e4SLinus Torvalds 	int err;
25101da177e4SLinus Torvalds 	struct sp_node *new = NULL;
25111da177e4SLinus Torvalds 	unsigned long sz = vma_pages(vma);
25121da177e4SLinus Torvalds 
2513028fec41SDavid Rientjes 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
25141da177e4SLinus Torvalds 		 vma->vm_pgoff,
251545c4745aSLee Schermerhorn 		 sz, npol ? npol->mode : -1,
2516028fec41SDavid Rientjes 		 npol ? npol->flags : -1,
251700ef2d2fSDavid Rientjes 		 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
25181da177e4SLinus Torvalds 
25191da177e4SLinus Torvalds 	if (npol) {
25201da177e4SLinus Torvalds 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
25211da177e4SLinus Torvalds 		if (!new)
25221da177e4SLinus Torvalds 			return -ENOMEM;
25231da177e4SLinus Torvalds 	}
25241da177e4SLinus Torvalds 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
25251da177e4SLinus Torvalds 	if (err && new)
252663f74ca2SKOSAKI Motohiro 		sp_free(new);
25271da177e4SLinus Torvalds 	return err;
25281da177e4SLinus Torvalds }
25291da177e4SLinus Torvalds 
25301da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */
25311da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p)
25321da177e4SLinus Torvalds {
25331da177e4SLinus Torvalds 	struct sp_node *n;
25341da177e4SLinus Torvalds 	struct rb_node *next;
25351da177e4SLinus Torvalds 
25361da177e4SLinus Torvalds 	if (!p->root.rb_node)
25371da177e4SLinus Torvalds 		return;
25384a8c7bb5SNathan Zimmer 	write_lock(&p->lock);
25391da177e4SLinus Torvalds 	next = rb_first(&p->root);
25401da177e4SLinus Torvalds 	while (next) {
25411da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
25421da177e4SLinus Torvalds 		next = rb_next(&n->nd);
254363f74ca2SKOSAKI Motohiro 		sp_delete(p, n);
25441da177e4SLinus Torvalds 	}
25454a8c7bb5SNathan Zimmer 	write_unlock(&p->lock);
25461da177e4SLinus Torvalds }
25471da177e4SLinus Torvalds 
25481a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING
2549c297663cSMel Gorman static int __initdata numabalancing_override;
25501a687c2eSMel Gorman 
25511a687c2eSMel Gorman static void __init check_numabalancing_enable(void)
25521a687c2eSMel Gorman {
25531a687c2eSMel Gorman 	bool numabalancing_default = false;
25541a687c2eSMel Gorman 
25551a687c2eSMel Gorman 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
25561a687c2eSMel Gorman 		numabalancing_default = true;
25571a687c2eSMel Gorman 
2558c297663cSMel Gorman 	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2559c297663cSMel Gorman 	if (numabalancing_override)
2560c297663cSMel Gorman 		set_numabalancing_state(numabalancing_override == 1);
2561c297663cSMel Gorman 
2562b0dc2b9bSMel Gorman 	if (num_online_nodes() > 1 && !numabalancing_override) {
25634a404beaSAndrew Morton 		pr_info("%s automatic NUMA balancing. "
2564c297663cSMel Gorman 			"Configure with numa_balancing= or the "
2565c297663cSMel Gorman 			"kernel.numa_balancing sysctl",
2566c297663cSMel Gorman 			numabalancing_default ? "Enabling" : "Disabling");
25671a687c2eSMel Gorman 		set_numabalancing_state(numabalancing_default);
25681a687c2eSMel Gorman 	}
25691a687c2eSMel Gorman }
25701a687c2eSMel Gorman 
25711a687c2eSMel Gorman static int __init setup_numabalancing(char *str)
25721a687c2eSMel Gorman {
25731a687c2eSMel Gorman 	int ret = 0;
25741a687c2eSMel Gorman 	if (!str)
25751a687c2eSMel Gorman 		goto out;
25761a687c2eSMel Gorman 
25771a687c2eSMel Gorman 	if (!strcmp(str, "enable")) {
2578c297663cSMel Gorman 		numabalancing_override = 1;
25791a687c2eSMel Gorman 		ret = 1;
25801a687c2eSMel Gorman 	} else if (!strcmp(str, "disable")) {
2581c297663cSMel Gorman 		numabalancing_override = -1;
25821a687c2eSMel Gorman 		ret = 1;
25831a687c2eSMel Gorman 	}
25841a687c2eSMel Gorman out:
25851a687c2eSMel Gorman 	if (!ret)
25864a404beaSAndrew Morton 		pr_warn("Unable to parse numa_balancing=\n");
25871a687c2eSMel Gorman 
25881a687c2eSMel Gorman 	return ret;
25891a687c2eSMel Gorman }
25901a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing);
25911a687c2eSMel Gorman #else
25921a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void)
25931a687c2eSMel Gorman {
25941a687c2eSMel Gorman }
25951a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */
25961a687c2eSMel Gorman 
25971da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */
25981da177e4SLinus Torvalds void __init numa_policy_init(void)
25991da177e4SLinus Torvalds {
2600b71636e2SPaul Mundt 	nodemask_t interleave_nodes;
2601b71636e2SPaul Mundt 	unsigned long largest = 0;
2602b71636e2SPaul Mundt 	int nid, prefer = 0;
2603b71636e2SPaul Mundt 
26041da177e4SLinus Torvalds 	policy_cache = kmem_cache_create("numa_policy",
26051da177e4SLinus Torvalds 					 sizeof(struct mempolicy),
260620c2df83SPaul Mundt 					 0, SLAB_PANIC, NULL);
26071da177e4SLinus Torvalds 
26081da177e4SLinus Torvalds 	sn_cache = kmem_cache_create("shared_policy_node",
26091da177e4SLinus Torvalds 				     sizeof(struct sp_node),
261020c2df83SPaul Mundt 				     0, SLAB_PANIC, NULL);
26111da177e4SLinus Torvalds 
26125606e387SMel Gorman 	for_each_node(nid) {
26135606e387SMel Gorman 		preferred_node_policy[nid] = (struct mempolicy) {
26145606e387SMel Gorman 			.refcnt = ATOMIC_INIT(1),
26155606e387SMel Gorman 			.mode = MPOL_PREFERRED,
26165606e387SMel Gorman 			.flags = MPOL_F_MOF | MPOL_F_MORON,
26175606e387SMel Gorman 			.v = { .preferred_node = nid, },
26185606e387SMel Gorman 		};
26195606e387SMel Gorman 	}
26205606e387SMel Gorman 
2621b71636e2SPaul Mundt 	/*
2622b71636e2SPaul Mundt 	 * Set interleaving policy for system init. Interleaving is only
2623b71636e2SPaul Mundt 	 * enabled across suitably sized nodes (default is >= 16MB), or
2624b71636e2SPaul Mundt 	 * fall back to the largest node if they're all smaller.
2625b71636e2SPaul Mundt 	 */
2626b71636e2SPaul Mundt 	nodes_clear(interleave_nodes);
262701f13bd6SLai Jiangshan 	for_each_node_state(nid, N_MEMORY) {
2628b71636e2SPaul Mundt 		unsigned long total_pages = node_present_pages(nid);
26291da177e4SLinus Torvalds 
2630b71636e2SPaul Mundt 		/* Preserve the largest node */
2631b71636e2SPaul Mundt 		if (largest < total_pages) {
2632b71636e2SPaul Mundt 			largest = total_pages;
2633b71636e2SPaul Mundt 			prefer = nid;
2634b71636e2SPaul Mundt 		}
2635b71636e2SPaul Mundt 
2636b71636e2SPaul Mundt 		/* Interleave this node? */
2637b71636e2SPaul Mundt 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2638b71636e2SPaul Mundt 			node_set(nid, interleave_nodes);
2639b71636e2SPaul Mundt 	}
2640b71636e2SPaul Mundt 
2641b71636e2SPaul Mundt 	/* All too small, use the largest */
2642b71636e2SPaul Mundt 	if (unlikely(nodes_empty(interleave_nodes)))
2643b71636e2SPaul Mundt 		node_set(prefer, interleave_nodes);
2644b71636e2SPaul Mundt 
2645028fec41SDavid Rientjes 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2646b1de0d13SMitchel Humpherys 		pr_err("%s: interleaving failed\n", __func__);
26471a687c2eSMel Gorman 
26481a687c2eSMel Gorman 	check_numabalancing_enable();
26491da177e4SLinus Torvalds }
26501da177e4SLinus Torvalds 
26518bccd85fSChristoph Lameter /* Reset policy of current process to default */
26521da177e4SLinus Torvalds void numa_default_policy(void)
26531da177e4SLinus Torvalds {
2654028fec41SDavid Rientjes 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
26551da177e4SLinus Torvalds }
265668860ec1SPaul Jackson 
26574225399aSPaul Jackson /*
2658095f1fc4SLee Schermerhorn  * Parse and format mempolicy from/to strings
2659095f1fc4SLee Schermerhorn  */
2660095f1fc4SLee Schermerhorn 
2661095f1fc4SLee Schermerhorn /*
2662f2a07f40SHugh Dickins  * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
26631a75a6c8SChristoph Lameter  */
2664345ace9cSLee Schermerhorn static const char * const policy_modes[] =
2665345ace9cSLee Schermerhorn {
2666345ace9cSLee Schermerhorn 	[MPOL_DEFAULT]    = "default",
2667345ace9cSLee Schermerhorn 	[MPOL_PREFERRED]  = "prefer",
2668345ace9cSLee Schermerhorn 	[MPOL_BIND]       = "bind",
2669345ace9cSLee Schermerhorn 	[MPOL_INTERLEAVE] = "interleave",
2670d3a71033SLee Schermerhorn 	[MPOL_LOCAL]      = "local",
2671345ace9cSLee Schermerhorn };
26721a75a6c8SChristoph Lameter 
2673095f1fc4SLee Schermerhorn 
2674095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS
2675095f1fc4SLee Schermerhorn /**
2676f2a07f40SHugh Dickins  * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2677095f1fc4SLee Schermerhorn  * @str:  string containing mempolicy to parse
267871fe804bSLee Schermerhorn  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2679095f1fc4SLee Schermerhorn  *
2680095f1fc4SLee Schermerhorn  * Format of input:
2681095f1fc4SLee Schermerhorn  *	<mode>[=<flags>][:<nodelist>]
2682095f1fc4SLee Schermerhorn  *
268371fe804bSLee Schermerhorn  * On success, returns 0, else 1
2684095f1fc4SLee Schermerhorn  */
2685a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol)
2686095f1fc4SLee Schermerhorn {
268771fe804bSLee Schermerhorn 	struct mempolicy *new = NULL;
2688b4652e84SLee Schermerhorn 	unsigned short mode;
2689f2a07f40SHugh Dickins 	unsigned short mode_flags;
269071fe804bSLee Schermerhorn 	nodemask_t nodes;
2691095f1fc4SLee Schermerhorn 	char *nodelist = strchr(str, ':');
2692095f1fc4SLee Schermerhorn 	char *flags = strchr(str, '=');
2693095f1fc4SLee Schermerhorn 	int err = 1;
2694095f1fc4SLee Schermerhorn 
2695095f1fc4SLee Schermerhorn 	if (nodelist) {
2696095f1fc4SLee Schermerhorn 		/* NUL-terminate mode or flags string */
2697095f1fc4SLee Schermerhorn 		*nodelist++ = '\0';
269871fe804bSLee Schermerhorn 		if (nodelist_parse(nodelist, nodes))
2699095f1fc4SLee Schermerhorn 			goto out;
270001f13bd6SLai Jiangshan 		if (!nodes_subset(nodes, node_states[N_MEMORY]))
2701095f1fc4SLee Schermerhorn 			goto out;
270271fe804bSLee Schermerhorn 	} else
270371fe804bSLee Schermerhorn 		nodes_clear(nodes);
270471fe804bSLee Schermerhorn 
2705095f1fc4SLee Schermerhorn 	if (flags)
2706095f1fc4SLee Schermerhorn 		*flags++ = '\0';	/* terminate mode string */
2707095f1fc4SLee Schermerhorn 
2708479e2802SPeter Zijlstra 	for (mode = 0; mode < MPOL_MAX; mode++) {
2709345ace9cSLee Schermerhorn 		if (!strcmp(str, policy_modes[mode])) {
2710095f1fc4SLee Schermerhorn 			break;
2711095f1fc4SLee Schermerhorn 		}
2712095f1fc4SLee Schermerhorn 	}
2713a720094dSMel Gorman 	if (mode >= MPOL_MAX)
2714095f1fc4SLee Schermerhorn 		goto out;
2715095f1fc4SLee Schermerhorn 
271671fe804bSLee Schermerhorn 	switch (mode) {
2717095f1fc4SLee Schermerhorn 	case MPOL_PREFERRED:
271871fe804bSLee Schermerhorn 		/*
271971fe804bSLee Schermerhorn 		 * Insist on a nodelist of one node only
272071fe804bSLee Schermerhorn 		 */
2721095f1fc4SLee Schermerhorn 		if (nodelist) {
2722095f1fc4SLee Schermerhorn 			char *rest = nodelist;
2723095f1fc4SLee Schermerhorn 			while (isdigit(*rest))
2724095f1fc4SLee Schermerhorn 				rest++;
2725926f2ae0SKOSAKI Motohiro 			if (*rest)
2726926f2ae0SKOSAKI Motohiro 				goto out;
2727095f1fc4SLee Schermerhorn 		}
2728095f1fc4SLee Schermerhorn 		break;
2729095f1fc4SLee Schermerhorn 	case MPOL_INTERLEAVE:
2730095f1fc4SLee Schermerhorn 		/*
2731095f1fc4SLee Schermerhorn 		 * Default to online nodes with memory if no nodelist
2732095f1fc4SLee Schermerhorn 		 */
2733095f1fc4SLee Schermerhorn 		if (!nodelist)
273401f13bd6SLai Jiangshan 			nodes = node_states[N_MEMORY];
27353f226aa1SLee Schermerhorn 		break;
273671fe804bSLee Schermerhorn 	case MPOL_LOCAL:
27373f226aa1SLee Schermerhorn 		/*
273871fe804bSLee Schermerhorn 		 * Don't allow a nodelist;  mpol_new() checks flags
27393f226aa1SLee Schermerhorn 		 */
274071fe804bSLee Schermerhorn 		if (nodelist)
27413f226aa1SLee Schermerhorn 			goto out;
274271fe804bSLee Schermerhorn 		mode = MPOL_PREFERRED;
27433f226aa1SLee Schermerhorn 		break;
2744413b43deSRavikiran G Thirumalai 	case MPOL_DEFAULT:
2745413b43deSRavikiran G Thirumalai 		/*
2746413b43deSRavikiran G Thirumalai 		 * Insist on a empty nodelist
2747413b43deSRavikiran G Thirumalai 		 */
2748413b43deSRavikiran G Thirumalai 		if (!nodelist)
2749413b43deSRavikiran G Thirumalai 			err = 0;
2750413b43deSRavikiran G Thirumalai 		goto out;
2751d69b2e63SKOSAKI Motohiro 	case MPOL_BIND:
275271fe804bSLee Schermerhorn 		/*
2753d69b2e63SKOSAKI Motohiro 		 * Insist on a nodelist
275471fe804bSLee Schermerhorn 		 */
2755d69b2e63SKOSAKI Motohiro 		if (!nodelist)
2756d69b2e63SKOSAKI Motohiro 			goto out;
2757095f1fc4SLee Schermerhorn 	}
2758095f1fc4SLee Schermerhorn 
275971fe804bSLee Schermerhorn 	mode_flags = 0;
2760095f1fc4SLee Schermerhorn 	if (flags) {
2761095f1fc4SLee Schermerhorn 		/*
2762095f1fc4SLee Schermerhorn 		 * Currently, we only support two mutually exclusive
2763095f1fc4SLee Schermerhorn 		 * mode flags.
2764095f1fc4SLee Schermerhorn 		 */
2765095f1fc4SLee Schermerhorn 		if (!strcmp(flags, "static"))
276671fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_STATIC_NODES;
2767095f1fc4SLee Schermerhorn 		else if (!strcmp(flags, "relative"))
276871fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_RELATIVE_NODES;
2769095f1fc4SLee Schermerhorn 		else
2770926f2ae0SKOSAKI Motohiro 			goto out;
2771095f1fc4SLee Schermerhorn 	}
277271fe804bSLee Schermerhorn 
277371fe804bSLee Schermerhorn 	new = mpol_new(mode, mode_flags, &nodes);
277471fe804bSLee Schermerhorn 	if (IS_ERR(new))
2775926f2ae0SKOSAKI Motohiro 		goto out;
2776926f2ae0SKOSAKI Motohiro 
2777f2a07f40SHugh Dickins 	/*
2778f2a07f40SHugh Dickins 	 * Save nodes for mpol_to_str() to show the tmpfs mount options
2779f2a07f40SHugh Dickins 	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2780f2a07f40SHugh Dickins 	 */
2781f2a07f40SHugh Dickins 	if (mode != MPOL_PREFERRED)
2782f2a07f40SHugh Dickins 		new->v.nodes = nodes;
2783f2a07f40SHugh Dickins 	else if (nodelist)
2784f2a07f40SHugh Dickins 		new->v.preferred_node = first_node(nodes);
2785f2a07f40SHugh Dickins 	else
2786f2a07f40SHugh Dickins 		new->flags |= MPOL_F_LOCAL;
2787f2a07f40SHugh Dickins 
2788f2a07f40SHugh Dickins 	/*
2789f2a07f40SHugh Dickins 	 * Save nodes for contextualization: this will be used to "clone"
2790f2a07f40SHugh Dickins 	 * the mempolicy in a specific context [cpuset] at a later time.
2791f2a07f40SHugh Dickins 	 */
2792e17f74afSLee Schermerhorn 	new->w.user_nodemask = nodes;
2793f2a07f40SHugh Dickins 
2794926f2ae0SKOSAKI Motohiro 	err = 0;
279571fe804bSLee Schermerhorn 
2796095f1fc4SLee Schermerhorn out:
2797095f1fc4SLee Schermerhorn 	/* Restore string for error message */
2798095f1fc4SLee Schermerhorn 	if (nodelist)
2799095f1fc4SLee Schermerhorn 		*--nodelist = ':';
2800095f1fc4SLee Schermerhorn 	if (flags)
2801095f1fc4SLee Schermerhorn 		*--flags = '=';
280271fe804bSLee Schermerhorn 	if (!err)
280371fe804bSLee Schermerhorn 		*mpol = new;
2804095f1fc4SLee Schermerhorn 	return err;
2805095f1fc4SLee Schermerhorn }
2806095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */
2807095f1fc4SLee Schermerhorn 
280871fe804bSLee Schermerhorn /**
280971fe804bSLee Schermerhorn  * mpol_to_str - format a mempolicy structure for printing
281071fe804bSLee Schermerhorn  * @buffer:  to contain formatted mempolicy string
281171fe804bSLee Schermerhorn  * @maxlen:  length of @buffer
281271fe804bSLee Schermerhorn  * @pol:  pointer to mempolicy to be formatted
281371fe804bSLee Schermerhorn  *
2814948927eeSDavid Rientjes  * Convert @pol into a string.  If @buffer is too short, truncate the string.
2815948927eeSDavid Rientjes  * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2816948927eeSDavid Rientjes  * longest flag, "relative", and to display at least a few node ids.
28171a75a6c8SChristoph Lameter  */
2818948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
28191a75a6c8SChristoph Lameter {
28201a75a6c8SChristoph Lameter 	char *p = buffer;
2821948927eeSDavid Rientjes 	nodemask_t nodes = NODE_MASK_NONE;
2822948927eeSDavid Rientjes 	unsigned short mode = MPOL_DEFAULT;
2823948927eeSDavid Rientjes 	unsigned short flags = 0;
28241a75a6c8SChristoph Lameter 
28258790c71aSDavid Rientjes 	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
2826bea904d5SLee Schermerhorn 		mode = pol->mode;
2827948927eeSDavid Rientjes 		flags = pol->flags;
2828948927eeSDavid Rientjes 	}
2829bea904d5SLee Schermerhorn 
28301a75a6c8SChristoph Lameter 	switch (mode) {
28311a75a6c8SChristoph Lameter 	case MPOL_DEFAULT:
28321a75a6c8SChristoph Lameter 		break;
28331a75a6c8SChristoph Lameter 	case MPOL_PREFERRED:
2834fc36b8d3SLee Schermerhorn 		if (flags & MPOL_F_LOCAL)
2835f2a07f40SHugh Dickins 			mode = MPOL_LOCAL;
283653f2556bSLee Schermerhorn 		else
2837fc36b8d3SLee Schermerhorn 			node_set(pol->v.preferred_node, nodes);
28381a75a6c8SChristoph Lameter 		break;
28391a75a6c8SChristoph Lameter 	case MPOL_BIND:
28401a75a6c8SChristoph Lameter 	case MPOL_INTERLEAVE:
28411a75a6c8SChristoph Lameter 		nodes = pol->v.nodes;
28421a75a6c8SChristoph Lameter 		break;
28431a75a6c8SChristoph Lameter 	default:
2844948927eeSDavid Rientjes 		WARN_ON_ONCE(1);
2845948927eeSDavid Rientjes 		snprintf(p, maxlen, "unknown");
2846948927eeSDavid Rientjes 		return;
28471a75a6c8SChristoph Lameter 	}
28481a75a6c8SChristoph Lameter 
2849b7a9f420SDavid Rientjes 	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
28501a75a6c8SChristoph Lameter 
2851fc36b8d3SLee Schermerhorn 	if (flags & MPOL_MODE_FLAGS) {
2852948927eeSDavid Rientjes 		p += snprintf(p, buffer + maxlen - p, "=");
2853f5b087b5SDavid Rientjes 
28542291990aSLee Schermerhorn 		/*
28552291990aSLee Schermerhorn 		 * Currently, the only defined flags are mutually exclusive
28562291990aSLee Schermerhorn 		 */
2857f5b087b5SDavid Rientjes 		if (flags & MPOL_F_STATIC_NODES)
28582291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "static");
28592291990aSLee Schermerhorn 		else if (flags & MPOL_F_RELATIVE_NODES)
28602291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "relative");
2861f5b087b5SDavid Rientjes 	}
2862f5b087b5SDavid Rientjes 
28639e763e0fSTejun Heo 	if (!nodes_empty(nodes))
28649e763e0fSTejun Heo 		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
28659e763e0fSTejun Heo 			       nodemask_pr_args(&nodes));
28661a75a6c8SChristoph Lameter }
2867