xref: /openbmc/linux/mm/mempolicy.c (revision 6a3827d7509cbf96b7e961f8957c1f01d1bcf894)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Simple NUMA memory policy for the Linux kernel.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
58bccd85fSChristoph Lameter  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
61da177e4SLinus Torvalds  * Subject to the GNU Public License, version 2.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * NUMA policy allows the user to give hints in which node(s) memory should
91da177e4SLinus Torvalds  * be allocated.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Support four policies per VMA and per process:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * The VMA policy has priority over the process policy for a page fault.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * interleave     Allocate memory interleaved over a set of nodes,
161da177e4SLinus Torvalds  *                with normal fallback if it fails.
171da177e4SLinus Torvalds  *                For VMA based allocations this interleaves based on the
181da177e4SLinus Torvalds  *                offset into the backing object or offset into the mapping
191da177e4SLinus Torvalds  *                for anonymous memory. For process policy an process counter
201da177e4SLinus Torvalds  *                is used.
218bccd85fSChristoph Lameter  *
221da177e4SLinus Torvalds  * bind           Only allocate memory on a specific set of nodes,
231da177e4SLinus Torvalds  *                no fallback.
248bccd85fSChristoph Lameter  *                FIXME: memory is allocated starting with the first node
258bccd85fSChristoph Lameter  *                to the last. It would be better if bind would truly restrict
268bccd85fSChristoph Lameter  *                the allocation to memory nodes instead
278bccd85fSChristoph Lameter  *
281da177e4SLinus Torvalds  * preferred       Try a specific node first before normal fallback.
2900ef2d2fSDavid Rientjes  *                As a special case NUMA_NO_NODE here means do the allocation
301da177e4SLinus Torvalds  *                on the local CPU. This is normally identical to default,
311da177e4SLinus Torvalds  *                but useful to set in a VMA when you have a non default
321da177e4SLinus Torvalds  *                process policy.
338bccd85fSChristoph Lameter  *
341da177e4SLinus Torvalds  * default        Allocate on the local node first, or when on a VMA
351da177e4SLinus Torvalds  *                use the process policy. This is what Linux always did
361da177e4SLinus Torvalds  *		  in a NUMA aware kernel and still does by, ahem, default.
371da177e4SLinus Torvalds  *
381da177e4SLinus Torvalds  * The process policy is applied for most non interrupt memory allocations
391da177e4SLinus Torvalds  * in that process' context. Interrupts ignore the policies and always
401da177e4SLinus Torvalds  * try to allocate on the local CPU. The VMA policy is only applied for memory
411da177e4SLinus Torvalds  * allocations for a VMA in the VM.
421da177e4SLinus Torvalds  *
431da177e4SLinus Torvalds  * Currently there are a few corner cases in swapping where the policy
441da177e4SLinus Torvalds  * is not applied, but the majority should be handled. When process policy
451da177e4SLinus Torvalds  * is used it is not remembered over swap outs/swap ins.
461da177e4SLinus Torvalds  *
471da177e4SLinus Torvalds  * Only the highest zone in the zone hierarchy gets policied. Allocations
481da177e4SLinus Torvalds  * requesting a lower zone just use default policy. This implies that
491da177e4SLinus Torvalds  * on systems with highmem kernel lowmem allocation don't get policied.
501da177e4SLinus Torvalds  * Same with GFP_DMA allocations.
511da177e4SLinus Torvalds  *
521da177e4SLinus Torvalds  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
531da177e4SLinus Torvalds  * all users and remembered even when nobody has memory mapped.
541da177e4SLinus Torvalds  */
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds /* Notebook:
571da177e4SLinus Torvalds    fix mmap readahead to honour policy and enable policy for any page cache
581da177e4SLinus Torvalds    object
591da177e4SLinus Torvalds    statistics for bigpages
601da177e4SLinus Torvalds    global policy for page cache? currently it uses process policy. Requires
611da177e4SLinus Torvalds    first item above.
621da177e4SLinus Torvalds    handle mremap for shared memory (currently ignored for the policy)
631da177e4SLinus Torvalds    grows down?
641da177e4SLinus Torvalds    make bind policy root only? It can trigger oom much faster and the
651da177e4SLinus Torvalds    kernel is not always grateful with that.
661da177e4SLinus Torvalds */
671da177e4SLinus Torvalds 
68b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69b1de0d13SMitchel Humpherys 
701da177e4SLinus Torvalds #include <linux/mempolicy.h>
711da177e4SLinus Torvalds #include <linux/mm.h>
721da177e4SLinus Torvalds #include <linux/highmem.h>
731da177e4SLinus Torvalds #include <linux/hugetlb.h>
741da177e4SLinus Torvalds #include <linux/kernel.h>
751da177e4SLinus Torvalds #include <linux/sched.h>
766e84f315SIngo Molnar #include <linux/sched/mm.h>
77*6a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h>
781da177e4SLinus Torvalds #include <linux/nodemask.h>
791da177e4SLinus Torvalds #include <linux/cpuset.h>
801da177e4SLinus Torvalds #include <linux/slab.h>
811da177e4SLinus Torvalds #include <linux/string.h>
82b95f1b31SPaul Gortmaker #include <linux/export.h>
83b488893aSPavel Emelyanov #include <linux/nsproxy.h>
841da177e4SLinus Torvalds #include <linux/interrupt.h>
851da177e4SLinus Torvalds #include <linux/init.h>
861da177e4SLinus Torvalds #include <linux/compat.h>
87dc9aa5b9SChristoph Lameter #include <linux/swap.h>
881a75a6c8SChristoph Lameter #include <linux/seq_file.h>
891a75a6c8SChristoph Lameter #include <linux/proc_fs.h>
90b20a3503SChristoph Lameter #include <linux/migrate.h>
9162b61f61SHugh Dickins #include <linux/ksm.h>
9295a402c3SChristoph Lameter #include <linux/rmap.h>
9386c3a764SDavid Quigley #include <linux/security.h>
94dbcb0f19SAdrian Bunk #include <linux/syscalls.h>
95095f1fc4SLee Schermerhorn #include <linux/ctype.h>
966d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h>
97b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h>
98b1de0d13SMitchel Humpherys #include <linux/printk.h>
99dc9aa5b9SChristoph Lameter 
1001da177e4SLinus Torvalds #include <asm/tlbflush.h>
1017c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
1021da177e4SLinus Torvalds 
10362695a84SNick Piggin #include "internal.h"
10462695a84SNick Piggin 
10538e35860SChristoph Lameter /* Internal flags */
106dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
10738e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
108dc9aa5b9SChristoph Lameter 
109fcc234f8SPekka Enberg static struct kmem_cache *policy_cache;
110fcc234f8SPekka Enberg static struct kmem_cache *sn_cache;
1111da177e4SLinus Torvalds 
1121da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not
1131da177e4SLinus Torvalds    policied. */
1146267276fSChristoph Lameter enum zone_type policy_zone = 0;
1151da177e4SLinus Torvalds 
116bea904d5SLee Schermerhorn /*
117bea904d5SLee Schermerhorn  * run-time system-wide default policy => local allocation
118bea904d5SLee Schermerhorn  */
119e754d79dSH Hartley Sweeten static struct mempolicy default_policy = {
1201da177e4SLinus Torvalds 	.refcnt = ATOMIC_INIT(1), /* never free it */
121bea904d5SLee Schermerhorn 	.mode = MPOL_PREFERRED,
122fc36b8d3SLee Schermerhorn 	.flags = MPOL_F_LOCAL,
1231da177e4SLinus Torvalds };
1241da177e4SLinus Torvalds 
1255606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES];
1265606e387SMel Gorman 
12774d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p)
1285606e387SMel Gorman {
1295606e387SMel Gorman 	struct mempolicy *pol = p->mempolicy;
130f15ca78eSOleg Nesterov 	int node;
1315606e387SMel Gorman 
132f15ca78eSOleg Nesterov 	if (pol)
133f15ca78eSOleg Nesterov 		return pol;
1345606e387SMel Gorman 
135f15ca78eSOleg Nesterov 	node = numa_node_id();
1361da6f0e1SJianguo Wu 	if (node != NUMA_NO_NODE) {
1371da6f0e1SJianguo Wu 		pol = &preferred_node_policy[node];
138f15ca78eSOleg Nesterov 		/* preferred_node_policy is not initialised early in boot */
139f15ca78eSOleg Nesterov 		if (pol->mode)
140f15ca78eSOleg Nesterov 			return pol;
1411da6f0e1SJianguo Wu 	}
1425606e387SMel Gorman 
143f15ca78eSOleg Nesterov 	return &default_policy;
1445606e387SMel Gorman }
1455606e387SMel Gorman 
14637012946SDavid Rientjes static const struct mempolicy_operations {
14737012946SDavid Rientjes 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
148708c1bbcSMiao Xie 	/*
149708c1bbcSMiao Xie 	 * If read-side task has no lock to protect task->mempolicy, write-side
150708c1bbcSMiao Xie 	 * task will rebind the task->mempolicy by two step. The first step is
151708c1bbcSMiao Xie 	 * setting all the newly nodes, and the second step is cleaning all the
152708c1bbcSMiao Xie 	 * disallowed nodes. In this way, we can avoid finding no node to alloc
153708c1bbcSMiao Xie 	 * page.
154708c1bbcSMiao Xie 	 * If we have a lock to protect task->mempolicy in read-side, we do
155708c1bbcSMiao Xie 	 * rebind directly.
156708c1bbcSMiao Xie 	 *
157708c1bbcSMiao Xie 	 * step:
158708c1bbcSMiao Xie 	 * 	MPOL_REBIND_ONCE - do rebind work at once
159708c1bbcSMiao Xie 	 * 	MPOL_REBIND_STEP1 - set all the newly nodes
160708c1bbcSMiao Xie 	 * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
161708c1bbcSMiao Xie 	 */
162708c1bbcSMiao Xie 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
163708c1bbcSMiao Xie 			enum mpol_rebind_step step);
16437012946SDavid Rientjes } mpol_ops[MPOL_MAX];
16537012946SDavid Rientjes 
166f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
167f5b087b5SDavid Rientjes {
1686d556294SBob Liu 	return pol->flags & MPOL_MODE_FLAGS;
1694c50bc01SDavid Rientjes }
1704c50bc01SDavid Rientjes 
1714c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
1724c50bc01SDavid Rientjes 				   const nodemask_t *rel)
1734c50bc01SDavid Rientjes {
1744c50bc01SDavid Rientjes 	nodemask_t tmp;
1754c50bc01SDavid Rientjes 	nodes_fold(tmp, *orig, nodes_weight(*rel));
1764c50bc01SDavid Rientjes 	nodes_onto(*ret, tmp, *rel);
177f5b087b5SDavid Rientjes }
178f5b087b5SDavid Rientjes 
17937012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
18037012946SDavid Rientjes {
18137012946SDavid Rientjes 	if (nodes_empty(*nodes))
18237012946SDavid Rientjes 		return -EINVAL;
18337012946SDavid Rientjes 	pol->v.nodes = *nodes;
18437012946SDavid Rientjes 	return 0;
18537012946SDavid Rientjes }
18637012946SDavid Rientjes 
18737012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
18837012946SDavid Rientjes {
18937012946SDavid Rientjes 	if (!nodes)
190fc36b8d3SLee Schermerhorn 		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
19137012946SDavid Rientjes 	else if (nodes_empty(*nodes))
19237012946SDavid Rientjes 		return -EINVAL;			/*  no allowed nodes */
19337012946SDavid Rientjes 	else
19437012946SDavid Rientjes 		pol->v.preferred_node = first_node(*nodes);
19537012946SDavid Rientjes 	return 0;
19637012946SDavid Rientjes }
19737012946SDavid Rientjes 
19837012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
19937012946SDavid Rientjes {
200859f7ef1SZhihui Zhang 	if (nodes_empty(*nodes))
20137012946SDavid Rientjes 		return -EINVAL;
20237012946SDavid Rientjes 	pol->v.nodes = *nodes;
20337012946SDavid Rientjes 	return 0;
20437012946SDavid Rientjes }
20537012946SDavid Rientjes 
20658568d2aSMiao Xie /*
20758568d2aSMiao Xie  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
20858568d2aSMiao Xie  * any, for the new policy.  mpol_new() has already validated the nodes
20958568d2aSMiao Xie  * parameter with respect to the policy mode and flags.  But, we need to
21058568d2aSMiao Xie  * handle an empty nodemask with MPOL_PREFERRED here.
21158568d2aSMiao Xie  *
21258568d2aSMiao Xie  * Must be called holding task's alloc_lock to protect task's mems_allowed
21358568d2aSMiao Xie  * and mempolicy.  May also be called holding the mmap_semaphore for write.
21458568d2aSMiao Xie  */
2154bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol,
2164bfc4495SKAMEZAWA Hiroyuki 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
21758568d2aSMiao Xie {
21858568d2aSMiao Xie 	int ret;
21958568d2aSMiao Xie 
22058568d2aSMiao Xie 	/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
22158568d2aSMiao Xie 	if (pol == NULL)
22258568d2aSMiao Xie 		return 0;
22301f13bd6SLai Jiangshan 	/* Check N_MEMORY */
2244bfc4495SKAMEZAWA Hiroyuki 	nodes_and(nsc->mask1,
22501f13bd6SLai Jiangshan 		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
22658568d2aSMiao Xie 
22758568d2aSMiao Xie 	VM_BUG_ON(!nodes);
22858568d2aSMiao Xie 	if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
22958568d2aSMiao Xie 		nodes = NULL;	/* explicit local allocation */
23058568d2aSMiao Xie 	else {
23158568d2aSMiao Xie 		if (pol->flags & MPOL_F_RELATIVE_NODES)
2324bfc4495SKAMEZAWA Hiroyuki 			mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
23358568d2aSMiao Xie 		else
2344bfc4495SKAMEZAWA Hiroyuki 			nodes_and(nsc->mask2, *nodes, nsc->mask1);
2354bfc4495SKAMEZAWA Hiroyuki 
23658568d2aSMiao Xie 		if (mpol_store_user_nodemask(pol))
23758568d2aSMiao Xie 			pol->w.user_nodemask = *nodes;
23858568d2aSMiao Xie 		else
23958568d2aSMiao Xie 			pol->w.cpuset_mems_allowed =
24058568d2aSMiao Xie 						cpuset_current_mems_allowed;
24158568d2aSMiao Xie 	}
24258568d2aSMiao Xie 
2434bfc4495SKAMEZAWA Hiroyuki 	if (nodes)
2444bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
2454bfc4495SKAMEZAWA Hiroyuki 	else
2464bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, NULL);
24758568d2aSMiao Xie 	return ret;
24858568d2aSMiao Xie }
24958568d2aSMiao Xie 
25058568d2aSMiao Xie /*
25158568d2aSMiao Xie  * This function just creates a new policy, does some check and simple
25258568d2aSMiao Xie  * initialization. You must invoke mpol_set_nodemask() to set nodes.
25358568d2aSMiao Xie  */
254028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
255028fec41SDavid Rientjes 				  nodemask_t *nodes)
2561da177e4SLinus Torvalds {
2571da177e4SLinus Torvalds 	struct mempolicy *policy;
2581da177e4SLinus Torvalds 
259028fec41SDavid Rientjes 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
26000ef2d2fSDavid Rientjes 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
261140d5a49SPaul Mundt 
2623e1f0645SDavid Rientjes 	if (mode == MPOL_DEFAULT) {
2633e1f0645SDavid Rientjes 		if (nodes && !nodes_empty(*nodes))
26437012946SDavid Rientjes 			return ERR_PTR(-EINVAL);
265d3a71033SLee Schermerhorn 		return NULL;
26637012946SDavid Rientjes 	}
2673e1f0645SDavid Rientjes 	VM_BUG_ON(!nodes);
2683e1f0645SDavid Rientjes 
2693e1f0645SDavid Rientjes 	/*
2703e1f0645SDavid Rientjes 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
2713e1f0645SDavid Rientjes 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
2723e1f0645SDavid Rientjes 	 * All other modes require a valid pointer to a non-empty nodemask.
2733e1f0645SDavid Rientjes 	 */
2743e1f0645SDavid Rientjes 	if (mode == MPOL_PREFERRED) {
2753e1f0645SDavid Rientjes 		if (nodes_empty(*nodes)) {
2763e1f0645SDavid Rientjes 			if (((flags & MPOL_F_STATIC_NODES) ||
2773e1f0645SDavid Rientjes 			     (flags & MPOL_F_RELATIVE_NODES)))
2783e1f0645SDavid Rientjes 				return ERR_PTR(-EINVAL);
2793e1f0645SDavid Rientjes 		}
280479e2802SPeter Zijlstra 	} else if (mode == MPOL_LOCAL) {
2818d303e44SPiotr Kwapulinski 		if (!nodes_empty(*nodes) ||
2828d303e44SPiotr Kwapulinski 		    (flags & MPOL_F_STATIC_NODES) ||
2838d303e44SPiotr Kwapulinski 		    (flags & MPOL_F_RELATIVE_NODES))
284479e2802SPeter Zijlstra 			return ERR_PTR(-EINVAL);
285479e2802SPeter Zijlstra 		mode = MPOL_PREFERRED;
2863e1f0645SDavid Rientjes 	} else if (nodes_empty(*nodes))
2873e1f0645SDavid Rientjes 		return ERR_PTR(-EINVAL);
2881da177e4SLinus Torvalds 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2891da177e4SLinus Torvalds 	if (!policy)
2901da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2911da177e4SLinus Torvalds 	atomic_set(&policy->refcnt, 1);
29245c4745aSLee Schermerhorn 	policy->mode = mode;
29337012946SDavid Rientjes 	policy->flags = flags;
2943e1f0645SDavid Rientjes 
29537012946SDavid Rientjes 	return policy;
29637012946SDavid Rientjes }
29737012946SDavid Rientjes 
29852cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */
29952cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p)
30052cd3b07SLee Schermerhorn {
30152cd3b07SLee Schermerhorn 	if (!atomic_dec_and_test(&p->refcnt))
30252cd3b07SLee Schermerhorn 		return;
30352cd3b07SLee Schermerhorn 	kmem_cache_free(policy_cache, p);
30452cd3b07SLee Schermerhorn }
30552cd3b07SLee Schermerhorn 
306708c1bbcSMiao Xie static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
307708c1bbcSMiao Xie 				enum mpol_rebind_step step)
30837012946SDavid Rientjes {
30937012946SDavid Rientjes }
31037012946SDavid Rientjes 
311708c1bbcSMiao Xie /*
312708c1bbcSMiao Xie  * step:
313708c1bbcSMiao Xie  * 	MPOL_REBIND_ONCE  - do rebind work at once
314708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP1 - set all the newly nodes
315708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
316708c1bbcSMiao Xie  */
317708c1bbcSMiao Xie static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
318708c1bbcSMiao Xie 				 enum mpol_rebind_step step)
3191d0d2680SDavid Rientjes {
3201d0d2680SDavid Rientjes 	nodemask_t tmp;
3211d0d2680SDavid Rientjes 
32237012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES)
32337012946SDavid Rientjes 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
32437012946SDavid Rientjes 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
32537012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3261d0d2680SDavid Rientjes 	else {
327708c1bbcSMiao Xie 		/*
328708c1bbcSMiao Xie 		 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
329708c1bbcSMiao Xie 		 * result
330708c1bbcSMiao Xie 		 */
331708c1bbcSMiao Xie 		if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
332708c1bbcSMiao Xie 			nodes_remap(tmp, pol->v.nodes,
333708c1bbcSMiao Xie 					pol->w.cpuset_mems_allowed, *nodes);
334708c1bbcSMiao Xie 			pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
335708c1bbcSMiao Xie 		} else if (step == MPOL_REBIND_STEP2) {
336708c1bbcSMiao Xie 			tmp = pol->w.cpuset_mems_allowed;
33737012946SDavid Rientjes 			pol->w.cpuset_mems_allowed = *nodes;
338708c1bbcSMiao Xie 		} else
339708c1bbcSMiao Xie 			BUG();
3401d0d2680SDavid Rientjes 	}
34137012946SDavid Rientjes 
342708c1bbcSMiao Xie 	if (nodes_empty(tmp))
343708c1bbcSMiao Xie 		tmp = *nodes;
344708c1bbcSMiao Xie 
345708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP1)
346708c1bbcSMiao Xie 		nodes_or(pol->v.nodes, pol->v.nodes, tmp);
347708c1bbcSMiao Xie 	else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
3481d0d2680SDavid Rientjes 		pol->v.nodes = tmp;
349708c1bbcSMiao Xie 	else
350708c1bbcSMiao Xie 		BUG();
351708c1bbcSMiao Xie 
3521d0d2680SDavid Rientjes 	if (!node_isset(current->il_next, tmp)) {
3530edaf86cSAndrew Morton 		current->il_next = next_node_in(current->il_next, tmp);
3541d0d2680SDavid Rientjes 		if (current->il_next >= MAX_NUMNODES)
3551d0d2680SDavid Rientjes 			current->il_next = numa_node_id();
3561d0d2680SDavid Rientjes 	}
35737012946SDavid Rientjes }
35837012946SDavid Rientjes 
35937012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol,
360708c1bbcSMiao Xie 				  const nodemask_t *nodes,
361708c1bbcSMiao Xie 				  enum mpol_rebind_step step)
36237012946SDavid Rientjes {
36337012946SDavid Rientjes 	nodemask_t tmp;
36437012946SDavid Rientjes 
36537012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES) {
3661d0d2680SDavid Rientjes 		int node = first_node(pol->w.user_nodemask);
3671d0d2680SDavid Rientjes 
368fc36b8d3SLee Schermerhorn 		if (node_isset(node, *nodes)) {
3691d0d2680SDavid Rientjes 			pol->v.preferred_node = node;
370fc36b8d3SLee Schermerhorn 			pol->flags &= ~MPOL_F_LOCAL;
371fc36b8d3SLee Schermerhorn 		} else
372fc36b8d3SLee Schermerhorn 			pol->flags |= MPOL_F_LOCAL;
37337012946SDavid Rientjes 	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
37437012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3751d0d2680SDavid Rientjes 		pol->v.preferred_node = first_node(tmp);
376fc36b8d3SLee Schermerhorn 	} else if (!(pol->flags & MPOL_F_LOCAL)) {
3771d0d2680SDavid Rientjes 		pol->v.preferred_node = node_remap(pol->v.preferred_node,
37837012946SDavid Rientjes 						   pol->w.cpuset_mems_allowed,
37937012946SDavid Rientjes 						   *nodes);
38037012946SDavid Rientjes 		pol->w.cpuset_mems_allowed = *nodes;
3811d0d2680SDavid Rientjes 	}
3821d0d2680SDavid Rientjes }
38337012946SDavid Rientjes 
384708c1bbcSMiao Xie /*
385708c1bbcSMiao Xie  * mpol_rebind_policy - Migrate a policy to a different set of nodes
386708c1bbcSMiao Xie  *
387708c1bbcSMiao Xie  * If read-side task has no lock to protect task->mempolicy, write-side
388708c1bbcSMiao Xie  * task will rebind the task->mempolicy by two step. The first step is
389708c1bbcSMiao Xie  * setting all the newly nodes, and the second step is cleaning all the
390708c1bbcSMiao Xie  * disallowed nodes. In this way, we can avoid finding no node to alloc
391708c1bbcSMiao Xie  * page.
392708c1bbcSMiao Xie  * If we have a lock to protect task->mempolicy in read-side, we do
393708c1bbcSMiao Xie  * rebind directly.
394708c1bbcSMiao Xie  *
395708c1bbcSMiao Xie  * step:
396708c1bbcSMiao Xie  * 	MPOL_REBIND_ONCE  - do rebind work at once
397708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP1 - set all the newly nodes
398708c1bbcSMiao Xie  * 	MPOL_REBIND_STEP2 - clean all the disallowed nodes
399708c1bbcSMiao Xie  */
400708c1bbcSMiao Xie static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
401708c1bbcSMiao Xie 				enum mpol_rebind_step step)
40237012946SDavid Rientjes {
40337012946SDavid Rientjes 	if (!pol)
40437012946SDavid Rientjes 		return;
40589c522c7SWang Sheng-Hui 	if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
40637012946SDavid Rientjes 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
40737012946SDavid Rientjes 		return;
408708c1bbcSMiao Xie 
409708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
410708c1bbcSMiao Xie 		return;
411708c1bbcSMiao Xie 
412708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
413708c1bbcSMiao Xie 		BUG();
414708c1bbcSMiao Xie 
415708c1bbcSMiao Xie 	if (step == MPOL_REBIND_STEP1)
416708c1bbcSMiao Xie 		pol->flags |= MPOL_F_REBINDING;
417708c1bbcSMiao Xie 	else if (step == MPOL_REBIND_STEP2)
418708c1bbcSMiao Xie 		pol->flags &= ~MPOL_F_REBINDING;
419708c1bbcSMiao Xie 	else if (step >= MPOL_REBIND_NSTEP)
420708c1bbcSMiao Xie 		BUG();
421708c1bbcSMiao Xie 
422708c1bbcSMiao Xie 	mpol_ops[pol->mode].rebind(pol, newmask, step);
4231d0d2680SDavid Rientjes }
4241d0d2680SDavid Rientjes 
4251d0d2680SDavid Rientjes /*
4261d0d2680SDavid Rientjes  * Wrapper for mpol_rebind_policy() that just requires task
4271d0d2680SDavid Rientjes  * pointer, and updates task mempolicy.
42858568d2aSMiao Xie  *
42958568d2aSMiao Xie  * Called with task's alloc_lock held.
4301d0d2680SDavid Rientjes  */
4311d0d2680SDavid Rientjes 
432708c1bbcSMiao Xie void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
433708c1bbcSMiao Xie 			enum mpol_rebind_step step)
4341d0d2680SDavid Rientjes {
435708c1bbcSMiao Xie 	mpol_rebind_policy(tsk->mempolicy, new, step);
4361d0d2680SDavid Rientjes }
4371d0d2680SDavid Rientjes 
4381d0d2680SDavid Rientjes /*
4391d0d2680SDavid Rientjes  * Rebind each vma in mm to new nodemask.
4401d0d2680SDavid Rientjes  *
4411d0d2680SDavid Rientjes  * Call holding a reference to mm.  Takes mm->mmap_sem during call.
4421d0d2680SDavid Rientjes  */
4431d0d2680SDavid Rientjes 
4441d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
4451d0d2680SDavid Rientjes {
4461d0d2680SDavid Rientjes 	struct vm_area_struct *vma;
4471d0d2680SDavid Rientjes 
4481d0d2680SDavid Rientjes 	down_write(&mm->mmap_sem);
4491d0d2680SDavid Rientjes 	for (vma = mm->mmap; vma; vma = vma->vm_next)
450708c1bbcSMiao Xie 		mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
4511d0d2680SDavid Rientjes 	up_write(&mm->mmap_sem);
4521d0d2680SDavid Rientjes }
4531d0d2680SDavid Rientjes 
45437012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
45537012946SDavid Rientjes 	[MPOL_DEFAULT] = {
45637012946SDavid Rientjes 		.rebind = mpol_rebind_default,
45737012946SDavid Rientjes 	},
45837012946SDavid Rientjes 	[MPOL_INTERLEAVE] = {
45937012946SDavid Rientjes 		.create = mpol_new_interleave,
46037012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
46137012946SDavid Rientjes 	},
46237012946SDavid Rientjes 	[MPOL_PREFERRED] = {
46337012946SDavid Rientjes 		.create = mpol_new_preferred,
46437012946SDavid Rientjes 		.rebind = mpol_rebind_preferred,
46537012946SDavid Rientjes 	},
46637012946SDavid Rientjes 	[MPOL_BIND] = {
46737012946SDavid Rientjes 		.create = mpol_new_bind,
46837012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
46937012946SDavid Rientjes 	},
47037012946SDavid Rientjes };
47137012946SDavid Rientjes 
472fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
473fc301289SChristoph Lameter 				unsigned long flags);
4741a75a6c8SChristoph Lameter 
4756f4576e3SNaoya Horiguchi struct queue_pages {
4766f4576e3SNaoya Horiguchi 	struct list_head *pagelist;
4776f4576e3SNaoya Horiguchi 	unsigned long flags;
4786f4576e3SNaoya Horiguchi 	nodemask_t *nmask;
4796f4576e3SNaoya Horiguchi 	struct vm_area_struct *prev;
4806f4576e3SNaoya Horiguchi };
4816f4576e3SNaoya Horiguchi 
48298094945SNaoya Horiguchi /*
48398094945SNaoya Horiguchi  * Scan through pages checking if pages follow certain conditions,
48498094945SNaoya Horiguchi  * and move them to the pagelist if they do.
48598094945SNaoya Horiguchi  */
4866f4576e3SNaoya Horiguchi static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
4876f4576e3SNaoya Horiguchi 			unsigned long end, struct mm_walk *walk)
4881da177e4SLinus Torvalds {
4896f4576e3SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
4906f4576e3SNaoya Horiguchi 	struct page *page;
4916f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
4926f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
493248db92dSKirill A. Shutemov 	int nid, ret;
49491612e0dSHugh Dickins 	pte_t *pte;
495705e87c0SHugh Dickins 	spinlock_t *ptl;
496941150a3SHugh Dickins 
497248db92dSKirill A. Shutemov 	if (pmd_trans_huge(*pmd)) {
498248db92dSKirill A. Shutemov 		ptl = pmd_lock(walk->mm, pmd);
499248db92dSKirill A. Shutemov 		if (pmd_trans_huge(*pmd)) {
500248db92dSKirill A. Shutemov 			page = pmd_page(*pmd);
501248db92dSKirill A. Shutemov 			if (is_huge_zero_page(page)) {
502248db92dSKirill A. Shutemov 				spin_unlock(ptl);
503fd60775aSDavid Rientjes 				__split_huge_pmd(vma, pmd, addr, false, NULL);
504248db92dSKirill A. Shutemov 			} else {
505248db92dSKirill A. Shutemov 				get_page(page);
506248db92dSKirill A. Shutemov 				spin_unlock(ptl);
507248db92dSKirill A. Shutemov 				lock_page(page);
508248db92dSKirill A. Shutemov 				ret = split_huge_page(page);
509248db92dSKirill A. Shutemov 				unlock_page(page);
510248db92dSKirill A. Shutemov 				put_page(page);
511248db92dSKirill A. Shutemov 				if (ret)
5126f4576e3SNaoya Horiguchi 					return 0;
513248db92dSKirill A. Shutemov 			}
514248db92dSKirill A. Shutemov 		} else {
515248db92dSKirill A. Shutemov 			spin_unlock(ptl);
516248db92dSKirill A. Shutemov 		}
517248db92dSKirill A. Shutemov 	}
51891612e0dSHugh Dickins 
519337d9abfSNaoya Horiguchi 	if (pmd_trans_unstable(pmd))
520337d9abfSNaoya Horiguchi 		return 0;
521248db92dSKirill A. Shutemov retry:
5226f4576e3SNaoya Horiguchi 	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
5236f4576e3SNaoya Horiguchi 	for (; addr != end; pte++, addr += PAGE_SIZE) {
52491612e0dSHugh Dickins 		if (!pte_present(*pte))
52591612e0dSHugh Dickins 			continue;
5266aab341eSLinus Torvalds 		page = vm_normal_page(vma, addr, *pte);
5276aab341eSLinus Torvalds 		if (!page)
52891612e0dSHugh Dickins 			continue;
529053837fcSNick Piggin 		/*
53062b61f61SHugh Dickins 		 * vm_normal_page() filters out zero pages, but there might
53162b61f61SHugh Dickins 		 * still be PageReserved pages to skip, perhaps in a VDSO.
532053837fcSNick Piggin 		 */
533b79bc0a0SHugh Dickins 		if (PageReserved(page))
534f4598c8bSChristoph Lameter 			continue;
5356aab341eSLinus Torvalds 		nid = page_to_nid(page);
5366f4576e3SNaoya Horiguchi 		if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
53738e35860SChristoph Lameter 			continue;
538800d8c63SKirill A. Shutemov 		if (PageTransCompound(page)) {
539248db92dSKirill A. Shutemov 			get_page(page);
540248db92dSKirill A. Shutemov 			pte_unmap_unlock(pte, ptl);
541248db92dSKirill A. Shutemov 			lock_page(page);
542248db92dSKirill A. Shutemov 			ret = split_huge_page(page);
543248db92dSKirill A. Shutemov 			unlock_page(page);
544248db92dSKirill A. Shutemov 			put_page(page);
545248db92dSKirill A. Shutemov 			/* Failed to split -- skip. */
546248db92dSKirill A. Shutemov 			if (ret) {
547248db92dSKirill A. Shutemov 				pte = pte_offset_map_lock(walk->mm, pmd,
548248db92dSKirill A. Shutemov 						addr, &ptl);
549248db92dSKirill A. Shutemov 				continue;
550248db92dSKirill A. Shutemov 			}
551248db92dSKirill A. Shutemov 			goto retry;
552248db92dSKirill A. Shutemov 		}
55338e35860SChristoph Lameter 
5546f4576e3SNaoya Horiguchi 		migrate_page_add(page, qp->pagelist, flags);
5556f4576e3SNaoya Horiguchi 	}
5566f4576e3SNaoya Horiguchi 	pte_unmap_unlock(pte - 1, ptl);
5576f4576e3SNaoya Horiguchi 	cond_resched();
5586f4576e3SNaoya Horiguchi 	return 0;
55991612e0dSHugh Dickins }
56091612e0dSHugh Dickins 
5616f4576e3SNaoya Horiguchi static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
5626f4576e3SNaoya Horiguchi 			       unsigned long addr, unsigned long end,
5636f4576e3SNaoya Horiguchi 			       struct mm_walk *walk)
564e2d8cf40SNaoya Horiguchi {
565e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE
5666f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
5676f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
568e2d8cf40SNaoya Horiguchi 	int nid;
569e2d8cf40SNaoya Horiguchi 	struct page *page;
570cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
571d4c54919SNaoya Horiguchi 	pte_t entry;
572e2d8cf40SNaoya Horiguchi 
5736f4576e3SNaoya Horiguchi 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
5746f4576e3SNaoya Horiguchi 	entry = huge_ptep_get(pte);
575d4c54919SNaoya Horiguchi 	if (!pte_present(entry))
576d4c54919SNaoya Horiguchi 		goto unlock;
577d4c54919SNaoya Horiguchi 	page = pte_page(entry);
578e2d8cf40SNaoya Horiguchi 	nid = page_to_nid(page);
5796f4576e3SNaoya Horiguchi 	if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
580e2d8cf40SNaoya Horiguchi 		goto unlock;
581e2d8cf40SNaoya Horiguchi 	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
582e2d8cf40SNaoya Horiguchi 	if (flags & (MPOL_MF_MOVE_ALL) ||
583e2d8cf40SNaoya Horiguchi 	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
5846f4576e3SNaoya Horiguchi 		isolate_huge_page(page, qp->pagelist);
585e2d8cf40SNaoya Horiguchi unlock:
586cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
587e2d8cf40SNaoya Horiguchi #else
588e2d8cf40SNaoya Horiguchi 	BUG();
589e2d8cf40SNaoya Horiguchi #endif
59091612e0dSHugh Dickins 	return 0;
5911da177e4SLinus Torvalds }
5921da177e4SLinus Torvalds 
5935877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING
594b24f53a0SLee Schermerhorn /*
5954b10e7d5SMel Gorman  * This is used to mark a range of virtual addresses to be inaccessible.
5964b10e7d5SMel Gorman  * These are later cleared by a NUMA hinting fault. Depending on these
5974b10e7d5SMel Gorman  * faults, pages may be migrated for better NUMA placement.
5984b10e7d5SMel Gorman  *
5994b10e7d5SMel Gorman  * This is assuming that NUMA faults are handled using PROT_NONE. If
6004b10e7d5SMel Gorman  * an architecture makes a different choice, it will need further
6014b10e7d5SMel Gorman  * changes to the core.
602b24f53a0SLee Schermerhorn  */
6034b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma,
6044b10e7d5SMel Gorman 			unsigned long addr, unsigned long end)
605b24f53a0SLee Schermerhorn {
6064b10e7d5SMel Gorman 	int nr_updated;
607b24f53a0SLee Schermerhorn 
6084d942466SMel Gorman 	nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
60903c5a6e1SMel Gorman 	if (nr_updated)
61003c5a6e1SMel Gorman 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
611b24f53a0SLee Schermerhorn 
6124b10e7d5SMel Gorman 	return nr_updated;
613b24f53a0SLee Schermerhorn }
614b24f53a0SLee Schermerhorn #else
615b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma,
616b24f53a0SLee Schermerhorn 			unsigned long addr, unsigned long end)
617b24f53a0SLee Schermerhorn {
618b24f53a0SLee Schermerhorn 	return 0;
619b24f53a0SLee Schermerhorn }
6205877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */
621b24f53a0SLee Schermerhorn 
6226f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end,
6236f4576e3SNaoya Horiguchi 				struct mm_walk *walk)
6241da177e4SLinus Torvalds {
6256f4576e3SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
6266f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
6275b952b3cSAndi Kleen 	unsigned long endvma = vma->vm_end;
6286f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
629dc9aa5b9SChristoph Lameter 
63077bf45e7SKirill A. Shutemov 	if (!vma_migratable(vma))
63148684a65SNaoya Horiguchi 		return 1;
63248684a65SNaoya Horiguchi 
6335b952b3cSAndi Kleen 	if (endvma > end)
6345b952b3cSAndi Kleen 		endvma = end;
6355b952b3cSAndi Kleen 	if (vma->vm_start > start)
6365b952b3cSAndi Kleen 		start = vma->vm_start;
637b24f53a0SLee Schermerhorn 
638b24f53a0SLee Schermerhorn 	if (!(flags & MPOL_MF_DISCONTIG_OK)) {
639b24f53a0SLee Schermerhorn 		if (!vma->vm_next && vma->vm_end < end)
640d05f0cdcSHugh Dickins 			return -EFAULT;
6416f4576e3SNaoya Horiguchi 		if (qp->prev && qp->prev->vm_end < vma->vm_start)
642d05f0cdcSHugh Dickins 			return -EFAULT;
643b24f53a0SLee Schermerhorn 	}
644b24f53a0SLee Schermerhorn 
6456f4576e3SNaoya Horiguchi 	qp->prev = vma;
6466f4576e3SNaoya Horiguchi 
647b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY) {
6482c0346a3SMel Gorman 		/* Similar to task_numa_work, skip inaccessible VMAs */
6494355c018SLiang Chen 		if (!is_vm_hugetlb_page(vma) &&
6504355c018SLiang Chen 			(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) &&
6514355c018SLiang Chen 			!(vma->vm_flags & VM_MIXEDMAP))
652b24f53a0SLee Schermerhorn 			change_prot_numa(vma, start, endvma);
6536f4576e3SNaoya Horiguchi 		return 1;
654b24f53a0SLee Schermerhorn 	}
655b24f53a0SLee Schermerhorn 
6566f4576e3SNaoya Horiguchi 	/* queue pages from current vma */
65777bf45e7SKirill A. Shutemov 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
6586f4576e3SNaoya Horiguchi 		return 0;
6596f4576e3SNaoya Horiguchi 	return 1;
6606f4576e3SNaoya Horiguchi }
661b24f53a0SLee Schermerhorn 
6626f4576e3SNaoya Horiguchi /*
6636f4576e3SNaoya Horiguchi  * Walk through page tables and collect pages to be migrated.
6646f4576e3SNaoya Horiguchi  *
6656f4576e3SNaoya Horiguchi  * If pages found in a given range are on a set of nodes (determined by
6666f4576e3SNaoya Horiguchi  * @nodes and @flags,) it's isolated and queued to the pagelist which is
6676f4576e3SNaoya Horiguchi  * passed via @private.)
6686f4576e3SNaoya Horiguchi  */
6696f4576e3SNaoya Horiguchi static int
6706f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
6716f4576e3SNaoya Horiguchi 		nodemask_t *nodes, unsigned long flags,
6726f4576e3SNaoya Horiguchi 		struct list_head *pagelist)
6736f4576e3SNaoya Horiguchi {
6746f4576e3SNaoya Horiguchi 	struct queue_pages qp = {
6756f4576e3SNaoya Horiguchi 		.pagelist = pagelist,
6766f4576e3SNaoya Horiguchi 		.flags = flags,
6776f4576e3SNaoya Horiguchi 		.nmask = nodes,
6786f4576e3SNaoya Horiguchi 		.prev = NULL,
6796f4576e3SNaoya Horiguchi 	};
6806f4576e3SNaoya Horiguchi 	struct mm_walk queue_pages_walk = {
6816f4576e3SNaoya Horiguchi 		.hugetlb_entry = queue_pages_hugetlb,
6826f4576e3SNaoya Horiguchi 		.pmd_entry = queue_pages_pte_range,
6836f4576e3SNaoya Horiguchi 		.test_walk = queue_pages_test_walk,
6846f4576e3SNaoya Horiguchi 		.mm = mm,
6856f4576e3SNaoya Horiguchi 		.private = &qp,
6866f4576e3SNaoya Horiguchi 	};
6876f4576e3SNaoya Horiguchi 
6886f4576e3SNaoya Horiguchi 	return walk_page_range(start, end, &queue_pages_walk);
6891da177e4SLinus Torvalds }
6901da177e4SLinus Torvalds 
691869833f2SKOSAKI Motohiro /*
692869833f2SKOSAKI Motohiro  * Apply policy to a single VMA
693869833f2SKOSAKI Motohiro  * This must be called with the mmap_sem held for writing.
694869833f2SKOSAKI Motohiro  */
695869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma,
696869833f2SKOSAKI Motohiro 						struct mempolicy *pol)
6978d34694cSKOSAKI Motohiro {
698869833f2SKOSAKI Motohiro 	int err;
699869833f2SKOSAKI Motohiro 	struct mempolicy *old;
700869833f2SKOSAKI Motohiro 	struct mempolicy *new;
7018d34694cSKOSAKI Motohiro 
7028d34694cSKOSAKI Motohiro 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
7038d34694cSKOSAKI Motohiro 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
7048d34694cSKOSAKI Motohiro 		 vma->vm_ops, vma->vm_file,
7058d34694cSKOSAKI Motohiro 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
7068d34694cSKOSAKI Motohiro 
707869833f2SKOSAKI Motohiro 	new = mpol_dup(pol);
708869833f2SKOSAKI Motohiro 	if (IS_ERR(new))
709869833f2SKOSAKI Motohiro 		return PTR_ERR(new);
710869833f2SKOSAKI Motohiro 
711869833f2SKOSAKI Motohiro 	if (vma->vm_ops && vma->vm_ops->set_policy) {
7128d34694cSKOSAKI Motohiro 		err = vma->vm_ops->set_policy(vma, new);
713869833f2SKOSAKI Motohiro 		if (err)
714869833f2SKOSAKI Motohiro 			goto err_out;
7158d34694cSKOSAKI Motohiro 	}
716869833f2SKOSAKI Motohiro 
717869833f2SKOSAKI Motohiro 	old = vma->vm_policy;
718869833f2SKOSAKI Motohiro 	vma->vm_policy = new; /* protected by mmap_sem */
719869833f2SKOSAKI Motohiro 	mpol_put(old);
720869833f2SKOSAKI Motohiro 
721869833f2SKOSAKI Motohiro 	return 0;
722869833f2SKOSAKI Motohiro  err_out:
723869833f2SKOSAKI Motohiro 	mpol_put(new);
7248d34694cSKOSAKI Motohiro 	return err;
7258d34694cSKOSAKI Motohiro }
7268d34694cSKOSAKI Motohiro 
7271da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */
7289d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start,
7299d8cebd4SKOSAKI Motohiro 		       unsigned long end, struct mempolicy *new_pol)
7301da177e4SLinus Torvalds {
7311da177e4SLinus Torvalds 	struct vm_area_struct *next;
7329d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *prev;
7339d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *vma;
7349d8cebd4SKOSAKI Motohiro 	int err = 0;
735e26a5114SKOSAKI Motohiro 	pgoff_t pgoff;
7369d8cebd4SKOSAKI Motohiro 	unsigned long vmstart;
7379d8cebd4SKOSAKI Motohiro 	unsigned long vmend;
7381da177e4SLinus Torvalds 
739097d5910SLinus Torvalds 	vma = find_vma(mm, start);
7409d8cebd4SKOSAKI Motohiro 	if (!vma || vma->vm_start > start)
7419d8cebd4SKOSAKI Motohiro 		return -EFAULT;
7429d8cebd4SKOSAKI Motohiro 
743097d5910SLinus Torvalds 	prev = vma->vm_prev;
744e26a5114SKOSAKI Motohiro 	if (start > vma->vm_start)
745e26a5114SKOSAKI Motohiro 		prev = vma;
746e26a5114SKOSAKI Motohiro 
7479d8cebd4SKOSAKI Motohiro 	for (; vma && vma->vm_start < end; prev = vma, vma = next) {
7481da177e4SLinus Torvalds 		next = vma->vm_next;
7499d8cebd4SKOSAKI Motohiro 		vmstart = max(start, vma->vm_start);
7509d8cebd4SKOSAKI Motohiro 		vmend   = min(end, vma->vm_end);
7519d8cebd4SKOSAKI Motohiro 
752e26a5114SKOSAKI Motohiro 		if (mpol_equal(vma_policy(vma), new_pol))
753e26a5114SKOSAKI Motohiro 			continue;
754e26a5114SKOSAKI Motohiro 
755e26a5114SKOSAKI Motohiro 		pgoff = vma->vm_pgoff +
756e26a5114SKOSAKI Motohiro 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
7579d8cebd4SKOSAKI Motohiro 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
758e26a5114SKOSAKI Motohiro 				 vma->anon_vma, vma->vm_file, pgoff,
75919a809afSAndrea Arcangeli 				 new_pol, vma->vm_userfaultfd_ctx);
7609d8cebd4SKOSAKI Motohiro 		if (prev) {
7619d8cebd4SKOSAKI Motohiro 			vma = prev;
7629d8cebd4SKOSAKI Motohiro 			next = vma->vm_next;
7633964acd0SOleg Nesterov 			if (mpol_equal(vma_policy(vma), new_pol))
7649d8cebd4SKOSAKI Motohiro 				continue;
7653964acd0SOleg Nesterov 			/* vma_merge() joined vma && vma->next, case 8 */
7663964acd0SOleg Nesterov 			goto replace;
7671da177e4SLinus Torvalds 		}
7689d8cebd4SKOSAKI Motohiro 		if (vma->vm_start != vmstart) {
7699d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
7709d8cebd4SKOSAKI Motohiro 			if (err)
7719d8cebd4SKOSAKI Motohiro 				goto out;
7729d8cebd4SKOSAKI Motohiro 		}
7739d8cebd4SKOSAKI Motohiro 		if (vma->vm_end != vmend) {
7749d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmend, 0);
7759d8cebd4SKOSAKI Motohiro 			if (err)
7769d8cebd4SKOSAKI Motohiro 				goto out;
7779d8cebd4SKOSAKI Motohiro 		}
7783964acd0SOleg Nesterov  replace:
779869833f2SKOSAKI Motohiro 		err = vma_replace_policy(vma, new_pol);
7809d8cebd4SKOSAKI Motohiro 		if (err)
7819d8cebd4SKOSAKI Motohiro 			goto out;
7829d8cebd4SKOSAKI Motohiro 	}
7839d8cebd4SKOSAKI Motohiro 
7849d8cebd4SKOSAKI Motohiro  out:
7851da177e4SLinus Torvalds 	return err;
7861da177e4SLinus Torvalds }
7871da177e4SLinus Torvalds 
7881da177e4SLinus Torvalds /* Set the process memory policy */
789028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags,
790028fec41SDavid Rientjes 			     nodemask_t *nodes)
7911da177e4SLinus Torvalds {
79258568d2aSMiao Xie 	struct mempolicy *new, *old;
7934bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH(scratch);
79458568d2aSMiao Xie 	int ret;
7951da177e4SLinus Torvalds 
7964bfc4495SKAMEZAWA Hiroyuki 	if (!scratch)
7974bfc4495SKAMEZAWA Hiroyuki 		return -ENOMEM;
798f4e53d91SLee Schermerhorn 
7994bfc4495SKAMEZAWA Hiroyuki 	new = mpol_new(mode, flags, nodes);
8004bfc4495SKAMEZAWA Hiroyuki 	if (IS_ERR(new)) {
8014bfc4495SKAMEZAWA Hiroyuki 		ret = PTR_ERR(new);
8024bfc4495SKAMEZAWA Hiroyuki 		goto out;
8034bfc4495SKAMEZAWA Hiroyuki 	}
8042c7c3a7dSOleg Nesterov 
80558568d2aSMiao Xie 	task_lock(current);
8064bfc4495SKAMEZAWA Hiroyuki 	ret = mpol_set_nodemask(new, nodes, scratch);
80758568d2aSMiao Xie 	if (ret) {
80858568d2aSMiao Xie 		task_unlock(current);
80958568d2aSMiao Xie 		mpol_put(new);
8104bfc4495SKAMEZAWA Hiroyuki 		goto out;
81158568d2aSMiao Xie 	}
81258568d2aSMiao Xie 	old = current->mempolicy;
8131da177e4SLinus Torvalds 	current->mempolicy = new;
81445c4745aSLee Schermerhorn 	if (new && new->mode == MPOL_INTERLEAVE &&
815f5b087b5SDavid Rientjes 	    nodes_weight(new->v.nodes))
816dfcd3c0dSAndi Kleen 		current->il_next = first_node(new->v.nodes);
81758568d2aSMiao Xie 	task_unlock(current);
81858568d2aSMiao Xie 	mpol_put(old);
8194bfc4495SKAMEZAWA Hiroyuki 	ret = 0;
8204bfc4495SKAMEZAWA Hiroyuki out:
8214bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH_FREE(scratch);
8224bfc4495SKAMEZAWA Hiroyuki 	return ret;
8231da177e4SLinus Torvalds }
8241da177e4SLinus Torvalds 
825bea904d5SLee Schermerhorn /*
826bea904d5SLee Schermerhorn  * Return nodemask for policy for get_mempolicy() query
82758568d2aSMiao Xie  *
82858568d2aSMiao Xie  * Called with task's alloc_lock held
829bea904d5SLee Schermerhorn  */
830bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
8311da177e4SLinus Torvalds {
832dfcd3c0dSAndi Kleen 	nodes_clear(*nodes);
833bea904d5SLee Schermerhorn 	if (p == &default_policy)
834bea904d5SLee Schermerhorn 		return;
835bea904d5SLee Schermerhorn 
83645c4745aSLee Schermerhorn 	switch (p->mode) {
83719770b32SMel Gorman 	case MPOL_BIND:
83819770b32SMel Gorman 		/* Fall through */
8391da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
840dfcd3c0dSAndi Kleen 		*nodes = p->v.nodes;
8411da177e4SLinus Torvalds 		break;
8421da177e4SLinus Torvalds 	case MPOL_PREFERRED:
843fc36b8d3SLee Schermerhorn 		if (!(p->flags & MPOL_F_LOCAL))
844dfcd3c0dSAndi Kleen 			node_set(p->v.preferred_node, *nodes);
84553f2556bSLee Schermerhorn 		/* else return empty node mask for local allocation */
8461da177e4SLinus Torvalds 		break;
8471da177e4SLinus Torvalds 	default:
8481da177e4SLinus Torvalds 		BUG();
8491da177e4SLinus Torvalds 	}
8501da177e4SLinus Torvalds }
8511da177e4SLinus Torvalds 
852d4edcf0dSDave Hansen static int lookup_node(unsigned long addr)
8531da177e4SLinus Torvalds {
8541da177e4SLinus Torvalds 	struct page *p;
8551da177e4SLinus Torvalds 	int err;
8561da177e4SLinus Torvalds 
857768ae309SLorenzo Stoakes 	err = get_user_pages(addr & PAGE_MASK, 1, 0, &p, NULL);
8581da177e4SLinus Torvalds 	if (err >= 0) {
8591da177e4SLinus Torvalds 		err = page_to_nid(p);
8601da177e4SLinus Torvalds 		put_page(p);
8611da177e4SLinus Torvalds 	}
8621da177e4SLinus Torvalds 	return err;
8631da177e4SLinus Torvalds }
8641da177e4SLinus Torvalds 
8651da177e4SLinus Torvalds /* Retrieve NUMA policy */
866dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask,
8671da177e4SLinus Torvalds 			     unsigned long addr, unsigned long flags)
8681da177e4SLinus Torvalds {
8698bccd85fSChristoph Lameter 	int err;
8701da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
8711da177e4SLinus Torvalds 	struct vm_area_struct *vma = NULL;
8721da177e4SLinus Torvalds 	struct mempolicy *pol = current->mempolicy;
8731da177e4SLinus Torvalds 
874754af6f5SLee Schermerhorn 	if (flags &
875754af6f5SLee Schermerhorn 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
8761da177e4SLinus Torvalds 		return -EINVAL;
877754af6f5SLee Schermerhorn 
878754af6f5SLee Schermerhorn 	if (flags & MPOL_F_MEMS_ALLOWED) {
879754af6f5SLee Schermerhorn 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
880754af6f5SLee Schermerhorn 			return -EINVAL;
881754af6f5SLee Schermerhorn 		*policy = 0;	/* just so it's initialized */
88258568d2aSMiao Xie 		task_lock(current);
883754af6f5SLee Schermerhorn 		*nmask  = cpuset_current_mems_allowed;
88458568d2aSMiao Xie 		task_unlock(current);
885754af6f5SLee Schermerhorn 		return 0;
886754af6f5SLee Schermerhorn 	}
887754af6f5SLee Schermerhorn 
8881da177e4SLinus Torvalds 	if (flags & MPOL_F_ADDR) {
889bea904d5SLee Schermerhorn 		/*
890bea904d5SLee Schermerhorn 		 * Do NOT fall back to task policy if the
891bea904d5SLee Schermerhorn 		 * vma/shared policy at addr is NULL.  We
892bea904d5SLee Schermerhorn 		 * want to return MPOL_DEFAULT in this case.
893bea904d5SLee Schermerhorn 		 */
8941da177e4SLinus Torvalds 		down_read(&mm->mmap_sem);
8951da177e4SLinus Torvalds 		vma = find_vma_intersection(mm, addr, addr+1);
8961da177e4SLinus Torvalds 		if (!vma) {
8971da177e4SLinus Torvalds 			up_read(&mm->mmap_sem);
8981da177e4SLinus Torvalds 			return -EFAULT;
8991da177e4SLinus Torvalds 		}
9001da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
9011da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
9021da177e4SLinus Torvalds 		else
9031da177e4SLinus Torvalds 			pol = vma->vm_policy;
9041da177e4SLinus Torvalds 	} else if (addr)
9051da177e4SLinus Torvalds 		return -EINVAL;
9061da177e4SLinus Torvalds 
9071da177e4SLinus Torvalds 	if (!pol)
908bea904d5SLee Schermerhorn 		pol = &default_policy;	/* indicates default behavior */
9091da177e4SLinus Torvalds 
9101da177e4SLinus Torvalds 	if (flags & MPOL_F_NODE) {
9111da177e4SLinus Torvalds 		if (flags & MPOL_F_ADDR) {
912d4edcf0dSDave Hansen 			err = lookup_node(addr);
9131da177e4SLinus Torvalds 			if (err < 0)
9141da177e4SLinus Torvalds 				goto out;
9158bccd85fSChristoph Lameter 			*policy = err;
9161da177e4SLinus Torvalds 		} else if (pol == current->mempolicy &&
91745c4745aSLee Schermerhorn 				pol->mode == MPOL_INTERLEAVE) {
9188bccd85fSChristoph Lameter 			*policy = current->il_next;
9191da177e4SLinus Torvalds 		} else {
9201da177e4SLinus Torvalds 			err = -EINVAL;
9211da177e4SLinus Torvalds 			goto out;
9221da177e4SLinus Torvalds 		}
923bea904d5SLee Schermerhorn 	} else {
924bea904d5SLee Schermerhorn 		*policy = pol == &default_policy ? MPOL_DEFAULT :
925bea904d5SLee Schermerhorn 						pol->mode;
926d79df630SDavid Rientjes 		/*
927d79df630SDavid Rientjes 		 * Internal mempolicy flags must be masked off before exposing
928d79df630SDavid Rientjes 		 * the policy to userspace.
929d79df630SDavid Rientjes 		 */
930d79df630SDavid Rientjes 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
931bea904d5SLee Schermerhorn 	}
9321da177e4SLinus Torvalds 
9331da177e4SLinus Torvalds 	if (vma) {
9341da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
9351da177e4SLinus Torvalds 		vma = NULL;
9361da177e4SLinus Torvalds 	}
9371da177e4SLinus Torvalds 
9381da177e4SLinus Torvalds 	err = 0;
93958568d2aSMiao Xie 	if (nmask) {
940c6b6ef8bSLee Schermerhorn 		if (mpol_store_user_nodemask(pol)) {
941c6b6ef8bSLee Schermerhorn 			*nmask = pol->w.user_nodemask;
942c6b6ef8bSLee Schermerhorn 		} else {
94358568d2aSMiao Xie 			task_lock(current);
944bea904d5SLee Schermerhorn 			get_policy_nodemask(pol, nmask);
94558568d2aSMiao Xie 			task_unlock(current);
94658568d2aSMiao Xie 		}
947c6b6ef8bSLee Schermerhorn 	}
9481da177e4SLinus Torvalds 
9491da177e4SLinus Torvalds  out:
95052cd3b07SLee Schermerhorn 	mpol_cond_put(pol);
9511da177e4SLinus Torvalds 	if (vma)
9521da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
9531da177e4SLinus Torvalds 	return err;
9541da177e4SLinus Torvalds }
9551da177e4SLinus Torvalds 
956b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION
9578bccd85fSChristoph Lameter /*
9586ce3c4c0SChristoph Lameter  * page migration
9596ce3c4c0SChristoph Lameter  */
960fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
961fc301289SChristoph Lameter 				unsigned long flags)
9626ce3c4c0SChristoph Lameter {
9636ce3c4c0SChristoph Lameter 	/*
964fc301289SChristoph Lameter 	 * Avoid migrating a page that is shared with others.
9656ce3c4c0SChristoph Lameter 	 */
96662695a84SNick Piggin 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
96762695a84SNick Piggin 		if (!isolate_lru_page(page)) {
96862695a84SNick Piggin 			list_add_tail(&page->lru, pagelist);
969599d0c95SMel Gorman 			inc_node_page_state(page, NR_ISOLATED_ANON +
9706d9c285aSKOSAKI Motohiro 					    page_is_file_cache(page));
97162695a84SNick Piggin 		}
97262695a84SNick Piggin 	}
9736ce3c4c0SChristoph Lameter }
9746ce3c4c0SChristoph Lameter 
975742755a1SChristoph Lameter static struct page *new_node_page(struct page *page, unsigned long node, int **x)
97695a402c3SChristoph Lameter {
977e2d8cf40SNaoya Horiguchi 	if (PageHuge(page))
978e2d8cf40SNaoya Horiguchi 		return alloc_huge_page_node(page_hstate(compound_head(page)),
979e2d8cf40SNaoya Horiguchi 					node);
980e2d8cf40SNaoya Horiguchi 	else
98196db800fSVlastimil Babka 		return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
982b360edb4SDavid Rientjes 						    __GFP_THISNODE, 0);
98395a402c3SChristoph Lameter }
98495a402c3SChristoph Lameter 
9856ce3c4c0SChristoph Lameter /*
9867e2ab150SChristoph Lameter  * Migrate pages from one node to a target node.
9877e2ab150SChristoph Lameter  * Returns error or the number of pages not migrated.
9887e2ab150SChristoph Lameter  */
989dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest,
990dbcb0f19SAdrian Bunk 			   int flags)
9917e2ab150SChristoph Lameter {
9927e2ab150SChristoph Lameter 	nodemask_t nmask;
9937e2ab150SChristoph Lameter 	LIST_HEAD(pagelist);
9947e2ab150SChristoph Lameter 	int err = 0;
9957e2ab150SChristoph Lameter 
9967e2ab150SChristoph Lameter 	nodes_clear(nmask);
9977e2ab150SChristoph Lameter 	node_set(source, nmask);
9987e2ab150SChristoph Lameter 
99908270807SMinchan Kim 	/*
100008270807SMinchan Kim 	 * This does not "check" the range but isolates all pages that
100108270807SMinchan Kim 	 * need migration.  Between passing in the full user address
100208270807SMinchan Kim 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
100308270807SMinchan Kim 	 */
100408270807SMinchan Kim 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
100598094945SNaoya Horiguchi 	queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
10067e2ab150SChristoph Lameter 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
10077e2ab150SChristoph Lameter 
1008cf608ac1SMinchan Kim 	if (!list_empty(&pagelist)) {
100968711a74SDavid Rientjes 		err = migrate_pages(&pagelist, new_node_page, NULL, dest,
10109c620e2bSHugh Dickins 					MIGRATE_SYNC, MR_SYSCALL);
1011cf608ac1SMinchan Kim 		if (err)
1012e2d8cf40SNaoya Horiguchi 			putback_movable_pages(&pagelist);
1013cf608ac1SMinchan Kim 	}
101495a402c3SChristoph Lameter 
10157e2ab150SChristoph Lameter 	return err;
10167e2ab150SChristoph Lameter }
10177e2ab150SChristoph Lameter 
10187e2ab150SChristoph Lameter /*
10197e2ab150SChristoph Lameter  * Move pages between the two nodesets so as to preserve the physical
10207e2ab150SChristoph Lameter  * layout as much as possible.
102139743889SChristoph Lameter  *
102239743889SChristoph Lameter  * Returns the number of page that could not be moved.
102339743889SChristoph Lameter  */
10240ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
10250ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
102639743889SChristoph Lameter {
10277e2ab150SChristoph Lameter 	int busy = 0;
10280aedadf9SChristoph Lameter 	int err;
10297e2ab150SChristoph Lameter 	nodemask_t tmp;
103039743889SChristoph Lameter 
10310aedadf9SChristoph Lameter 	err = migrate_prep();
10320aedadf9SChristoph Lameter 	if (err)
10330aedadf9SChristoph Lameter 		return err;
10340aedadf9SChristoph Lameter 
103539743889SChristoph Lameter 	down_read(&mm->mmap_sem);
1036d4984711SChristoph Lameter 
10377e2ab150SChristoph Lameter 	/*
10387e2ab150SChristoph Lameter 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
10397e2ab150SChristoph Lameter 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
10407e2ab150SChristoph Lameter 	 * bit in 'tmp', and return that <source, dest> pair for migration.
10417e2ab150SChristoph Lameter 	 * The pair of nodemasks 'to' and 'from' define the map.
10427e2ab150SChristoph Lameter 	 *
10437e2ab150SChristoph Lameter 	 * If no pair of bits is found that way, fallback to picking some
10447e2ab150SChristoph Lameter 	 * pair of 'source' and 'dest' bits that are not the same.  If the
10457e2ab150SChristoph Lameter 	 * 'source' and 'dest' bits are the same, this represents a node
10467e2ab150SChristoph Lameter 	 * that will be migrating to itself, so no pages need move.
10477e2ab150SChristoph Lameter 	 *
10487e2ab150SChristoph Lameter 	 * If no bits are left in 'tmp', or if all remaining bits left
10497e2ab150SChristoph Lameter 	 * in 'tmp' correspond to the same bit in 'to', return false
10507e2ab150SChristoph Lameter 	 * (nothing left to migrate).
10517e2ab150SChristoph Lameter 	 *
10527e2ab150SChristoph Lameter 	 * This lets us pick a pair of nodes to migrate between, such that
10537e2ab150SChristoph Lameter 	 * if possible the dest node is not already occupied by some other
10547e2ab150SChristoph Lameter 	 * source node, minimizing the risk of overloading the memory on a
10557e2ab150SChristoph Lameter 	 * node that would happen if we migrated incoming memory to a node
10567e2ab150SChristoph Lameter 	 * before migrating outgoing memory source that same node.
10577e2ab150SChristoph Lameter 	 *
10587e2ab150SChristoph Lameter 	 * A single scan of tmp is sufficient.  As we go, we remember the
10597e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
10607e2ab150SChristoph Lameter 	 * that not only moved, but what's better, moved to an empty slot
10617e2ab150SChristoph Lameter 	 * (d is not set in tmp), then we break out then, with that pair.
1062ae0e47f0SJustin P. Mattock 	 * Otherwise when we finish scanning from_tmp, we at least have the
10637e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved.  If we get all the way through
10647e2ab150SChristoph Lameter 	 * the scan of tmp without finding any node that moved, much less
10657e2ab150SChristoph Lameter 	 * moved to an empty node, then there is nothing left worth migrating.
10667e2ab150SChristoph Lameter 	 */
10677e2ab150SChristoph Lameter 
10680ce72d4fSAndrew Morton 	tmp = *from;
10697e2ab150SChristoph Lameter 	while (!nodes_empty(tmp)) {
10707e2ab150SChristoph Lameter 		int s,d;
1071b76ac7e7SJianguo Wu 		int source = NUMA_NO_NODE;
10727e2ab150SChristoph Lameter 		int dest = 0;
10737e2ab150SChristoph Lameter 
10747e2ab150SChristoph Lameter 		for_each_node_mask(s, tmp) {
10754a5b18ccSLarry Woodman 
10764a5b18ccSLarry Woodman 			/*
10774a5b18ccSLarry Woodman 			 * do_migrate_pages() tries to maintain the relative
10784a5b18ccSLarry Woodman 			 * node relationship of the pages established between
10794a5b18ccSLarry Woodman 			 * threads and memory areas.
10804a5b18ccSLarry Woodman                          *
10814a5b18ccSLarry Woodman 			 * However if the number of source nodes is not equal to
10824a5b18ccSLarry Woodman 			 * the number of destination nodes we can not preserve
10834a5b18ccSLarry Woodman 			 * this node relative relationship.  In that case, skip
10844a5b18ccSLarry Woodman 			 * copying memory from a node that is in the destination
10854a5b18ccSLarry Woodman 			 * mask.
10864a5b18ccSLarry Woodman 			 *
10874a5b18ccSLarry Woodman 			 * Example: [2,3,4] -> [3,4,5] moves everything.
10884a5b18ccSLarry Woodman 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
10894a5b18ccSLarry Woodman 			 */
10904a5b18ccSLarry Woodman 
10910ce72d4fSAndrew Morton 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
10920ce72d4fSAndrew Morton 						(node_isset(s, *to)))
10934a5b18ccSLarry Woodman 				continue;
10944a5b18ccSLarry Woodman 
10950ce72d4fSAndrew Morton 			d = node_remap(s, *from, *to);
10967e2ab150SChristoph Lameter 			if (s == d)
10977e2ab150SChristoph Lameter 				continue;
10987e2ab150SChristoph Lameter 
10997e2ab150SChristoph Lameter 			source = s;	/* Node moved. Memorize */
11007e2ab150SChristoph Lameter 			dest = d;
11017e2ab150SChristoph Lameter 
11027e2ab150SChristoph Lameter 			/* dest not in remaining from nodes? */
11037e2ab150SChristoph Lameter 			if (!node_isset(dest, tmp))
11047e2ab150SChristoph Lameter 				break;
11057e2ab150SChristoph Lameter 		}
1106b76ac7e7SJianguo Wu 		if (source == NUMA_NO_NODE)
11077e2ab150SChristoph Lameter 			break;
11087e2ab150SChristoph Lameter 
11097e2ab150SChristoph Lameter 		node_clear(source, tmp);
11107e2ab150SChristoph Lameter 		err = migrate_to_node(mm, source, dest, flags);
11117e2ab150SChristoph Lameter 		if (err > 0)
11127e2ab150SChristoph Lameter 			busy += err;
11137e2ab150SChristoph Lameter 		if (err < 0)
11147e2ab150SChristoph Lameter 			break;
111539743889SChristoph Lameter 	}
111639743889SChristoph Lameter 	up_read(&mm->mmap_sem);
11177e2ab150SChristoph Lameter 	if (err < 0)
11187e2ab150SChristoph Lameter 		return err;
11197e2ab150SChristoph Lameter 	return busy;
1120b20a3503SChristoph Lameter 
112139743889SChristoph Lameter }
112239743889SChristoph Lameter 
11233ad33b24SLee Schermerhorn /*
11243ad33b24SLee Schermerhorn  * Allocate a new page for page migration based on vma policy.
1125d05f0cdcSHugh Dickins  * Start by assuming the page is mapped by the same vma as contains @start.
11263ad33b24SLee Schermerhorn  * Search forward from there, if not.  N.B., this assumes that the
11273ad33b24SLee Schermerhorn  * list of pages handed to migrate_pages()--which is how we get here--
11283ad33b24SLee Schermerhorn  * is in virtual address order.
11293ad33b24SLee Schermerhorn  */
1130d05f0cdcSHugh Dickins static struct page *new_page(struct page *page, unsigned long start, int **x)
113195a402c3SChristoph Lameter {
1132d05f0cdcSHugh Dickins 	struct vm_area_struct *vma;
11333ad33b24SLee Schermerhorn 	unsigned long uninitialized_var(address);
113495a402c3SChristoph Lameter 
1135d05f0cdcSHugh Dickins 	vma = find_vma(current->mm, start);
11363ad33b24SLee Schermerhorn 	while (vma) {
11373ad33b24SLee Schermerhorn 		address = page_address_in_vma(page, vma);
11383ad33b24SLee Schermerhorn 		if (address != -EFAULT)
11393ad33b24SLee Schermerhorn 			break;
11403ad33b24SLee Schermerhorn 		vma = vma->vm_next;
11413ad33b24SLee Schermerhorn 	}
11423ad33b24SLee Schermerhorn 
114311c731e8SWanpeng Li 	if (PageHuge(page)) {
1144cc81717eSMichal Hocko 		BUG_ON(!vma);
114574060e4dSNaoya Horiguchi 		return alloc_huge_page_noerr(vma, address, 1);
114611c731e8SWanpeng Li 	}
114711c731e8SWanpeng Li 	/*
114811c731e8SWanpeng Li 	 * if !vma, alloc_page_vma() will use task or system default policy
114911c731e8SWanpeng Li 	 */
11503ad33b24SLee Schermerhorn 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
115195a402c3SChristoph Lameter }
1152b20a3503SChristoph Lameter #else
1153b20a3503SChristoph Lameter 
1154b20a3503SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
1155b20a3503SChristoph Lameter 				unsigned long flags)
1156b20a3503SChristoph Lameter {
1157b20a3503SChristoph Lameter }
1158b20a3503SChristoph Lameter 
11590ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
11600ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
1161b20a3503SChristoph Lameter {
1162b20a3503SChristoph Lameter 	return -ENOSYS;
1163b20a3503SChristoph Lameter }
116495a402c3SChristoph Lameter 
1165d05f0cdcSHugh Dickins static struct page *new_page(struct page *page, unsigned long start, int **x)
116695a402c3SChristoph Lameter {
116795a402c3SChristoph Lameter 	return NULL;
116895a402c3SChristoph Lameter }
1169b20a3503SChristoph Lameter #endif
1170b20a3503SChristoph Lameter 
1171dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len,
1172028fec41SDavid Rientjes 		     unsigned short mode, unsigned short mode_flags,
1173028fec41SDavid Rientjes 		     nodemask_t *nmask, unsigned long flags)
11746ce3c4c0SChristoph Lameter {
11756ce3c4c0SChristoph Lameter 	struct mm_struct *mm = current->mm;
11766ce3c4c0SChristoph Lameter 	struct mempolicy *new;
11776ce3c4c0SChristoph Lameter 	unsigned long end;
11786ce3c4c0SChristoph Lameter 	int err;
11796ce3c4c0SChristoph Lameter 	LIST_HEAD(pagelist);
11806ce3c4c0SChristoph Lameter 
1181b24f53a0SLee Schermerhorn 	if (flags & ~(unsigned long)MPOL_MF_VALID)
11826ce3c4c0SChristoph Lameter 		return -EINVAL;
118374c00241SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
11846ce3c4c0SChristoph Lameter 		return -EPERM;
11856ce3c4c0SChristoph Lameter 
11866ce3c4c0SChristoph Lameter 	if (start & ~PAGE_MASK)
11876ce3c4c0SChristoph Lameter 		return -EINVAL;
11886ce3c4c0SChristoph Lameter 
11896ce3c4c0SChristoph Lameter 	if (mode == MPOL_DEFAULT)
11906ce3c4c0SChristoph Lameter 		flags &= ~MPOL_MF_STRICT;
11916ce3c4c0SChristoph Lameter 
11926ce3c4c0SChristoph Lameter 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
11936ce3c4c0SChristoph Lameter 	end = start + len;
11946ce3c4c0SChristoph Lameter 
11956ce3c4c0SChristoph Lameter 	if (end < start)
11966ce3c4c0SChristoph Lameter 		return -EINVAL;
11976ce3c4c0SChristoph Lameter 	if (end == start)
11986ce3c4c0SChristoph Lameter 		return 0;
11996ce3c4c0SChristoph Lameter 
1200028fec41SDavid Rientjes 	new = mpol_new(mode, mode_flags, nmask);
12016ce3c4c0SChristoph Lameter 	if (IS_ERR(new))
12026ce3c4c0SChristoph Lameter 		return PTR_ERR(new);
12036ce3c4c0SChristoph Lameter 
1204b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY)
1205b24f53a0SLee Schermerhorn 		new->flags |= MPOL_F_MOF;
1206b24f53a0SLee Schermerhorn 
12076ce3c4c0SChristoph Lameter 	/*
12086ce3c4c0SChristoph Lameter 	 * If we are using the default policy then operation
12096ce3c4c0SChristoph Lameter 	 * on discontinuous address spaces is okay after all
12106ce3c4c0SChristoph Lameter 	 */
12116ce3c4c0SChristoph Lameter 	if (!new)
12126ce3c4c0SChristoph Lameter 		flags |= MPOL_MF_DISCONTIG_OK;
12136ce3c4c0SChristoph Lameter 
1214028fec41SDavid Rientjes 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1215028fec41SDavid Rientjes 		 start, start + len, mode, mode_flags,
121600ef2d2fSDavid Rientjes 		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
12176ce3c4c0SChristoph Lameter 
12180aedadf9SChristoph Lameter 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
12190aedadf9SChristoph Lameter 
12200aedadf9SChristoph Lameter 		err = migrate_prep();
12210aedadf9SChristoph Lameter 		if (err)
1222b05ca738SKOSAKI Motohiro 			goto mpol_out;
12230aedadf9SChristoph Lameter 	}
12244bfc4495SKAMEZAWA Hiroyuki 	{
12254bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
12264bfc4495SKAMEZAWA Hiroyuki 		if (scratch) {
12276ce3c4c0SChristoph Lameter 			down_write(&mm->mmap_sem);
122858568d2aSMiao Xie 			task_lock(current);
12294bfc4495SKAMEZAWA Hiroyuki 			err = mpol_set_nodemask(new, nmask, scratch);
123058568d2aSMiao Xie 			task_unlock(current);
12314bfc4495SKAMEZAWA Hiroyuki 			if (err)
123258568d2aSMiao Xie 				up_write(&mm->mmap_sem);
12334bfc4495SKAMEZAWA Hiroyuki 		} else
12344bfc4495SKAMEZAWA Hiroyuki 			err = -ENOMEM;
12354bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
12364bfc4495SKAMEZAWA Hiroyuki 	}
1237b05ca738SKOSAKI Motohiro 	if (err)
1238b05ca738SKOSAKI Motohiro 		goto mpol_out;
1239b05ca738SKOSAKI Motohiro 
1240d05f0cdcSHugh Dickins 	err = queue_pages_range(mm, start, end, nmask,
12416ce3c4c0SChristoph Lameter 			  flags | MPOL_MF_INVERT, &pagelist);
1242d05f0cdcSHugh Dickins 	if (!err)
12439d8cebd4SKOSAKI Motohiro 		err = mbind_range(mm, start, end, new);
12447e2ab150SChristoph Lameter 
1245b24f53a0SLee Schermerhorn 	if (!err) {
1246b24f53a0SLee Schermerhorn 		int nr_failed = 0;
1247b24f53a0SLee Schermerhorn 
1248cf608ac1SMinchan Kim 		if (!list_empty(&pagelist)) {
1249b24f53a0SLee Schermerhorn 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1250d05f0cdcSHugh Dickins 			nr_failed = migrate_pages(&pagelist, new_page, NULL,
1251d05f0cdcSHugh Dickins 				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1252cf608ac1SMinchan Kim 			if (nr_failed)
125374060e4dSNaoya Horiguchi 				putback_movable_pages(&pagelist);
1254cf608ac1SMinchan Kim 		}
12556ce3c4c0SChristoph Lameter 
1256b24f53a0SLee Schermerhorn 		if (nr_failed && (flags & MPOL_MF_STRICT))
12576ce3c4c0SChristoph Lameter 			err = -EIO;
1258ab8a3e14SKOSAKI Motohiro 	} else
1259b0e5fd73SJoonsoo Kim 		putback_movable_pages(&pagelist);
1260b20a3503SChristoph Lameter 
12616ce3c4c0SChristoph Lameter 	up_write(&mm->mmap_sem);
1262b05ca738SKOSAKI Motohiro  mpol_out:
1263f0be3d32SLee Schermerhorn 	mpol_put(new);
12646ce3c4c0SChristoph Lameter 	return err;
12656ce3c4c0SChristoph Lameter }
12666ce3c4c0SChristoph Lameter 
126739743889SChristoph Lameter /*
12688bccd85fSChristoph Lameter  * User space interface with variable sized bitmaps for nodelists.
12698bccd85fSChristoph Lameter  */
12708bccd85fSChristoph Lameter 
12718bccd85fSChristoph Lameter /* Copy a node mask from user space. */
127239743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
12738bccd85fSChristoph Lameter 		     unsigned long maxnode)
12748bccd85fSChristoph Lameter {
12758bccd85fSChristoph Lameter 	unsigned long k;
12768bccd85fSChristoph Lameter 	unsigned long nlongs;
12778bccd85fSChristoph Lameter 	unsigned long endmask;
12788bccd85fSChristoph Lameter 
12798bccd85fSChristoph Lameter 	--maxnode;
12808bccd85fSChristoph Lameter 	nodes_clear(*nodes);
12818bccd85fSChristoph Lameter 	if (maxnode == 0 || !nmask)
12828bccd85fSChristoph Lameter 		return 0;
1283a9c930baSAndi Kleen 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1284636f13c1SChris Wright 		return -EINVAL;
12858bccd85fSChristoph Lameter 
12868bccd85fSChristoph Lameter 	nlongs = BITS_TO_LONGS(maxnode);
12878bccd85fSChristoph Lameter 	if ((maxnode % BITS_PER_LONG) == 0)
12888bccd85fSChristoph Lameter 		endmask = ~0UL;
12898bccd85fSChristoph Lameter 	else
12908bccd85fSChristoph Lameter 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
12918bccd85fSChristoph Lameter 
12928bccd85fSChristoph Lameter 	/* When the user specified more nodes than supported just check
12938bccd85fSChristoph Lameter 	   if the non supported part is all zero. */
12948bccd85fSChristoph Lameter 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
12958bccd85fSChristoph Lameter 		if (nlongs > PAGE_SIZE/sizeof(long))
12968bccd85fSChristoph Lameter 			return -EINVAL;
12978bccd85fSChristoph Lameter 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
12988bccd85fSChristoph Lameter 			unsigned long t;
12998bccd85fSChristoph Lameter 			if (get_user(t, nmask + k))
13008bccd85fSChristoph Lameter 				return -EFAULT;
13018bccd85fSChristoph Lameter 			if (k == nlongs - 1) {
13028bccd85fSChristoph Lameter 				if (t & endmask)
13038bccd85fSChristoph Lameter 					return -EINVAL;
13048bccd85fSChristoph Lameter 			} else if (t)
13058bccd85fSChristoph Lameter 				return -EINVAL;
13068bccd85fSChristoph Lameter 		}
13078bccd85fSChristoph Lameter 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
13088bccd85fSChristoph Lameter 		endmask = ~0UL;
13098bccd85fSChristoph Lameter 	}
13108bccd85fSChristoph Lameter 
13118bccd85fSChristoph Lameter 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
13128bccd85fSChristoph Lameter 		return -EFAULT;
13138bccd85fSChristoph Lameter 	nodes_addr(*nodes)[nlongs-1] &= endmask;
13148bccd85fSChristoph Lameter 	return 0;
13158bccd85fSChristoph Lameter }
13168bccd85fSChristoph Lameter 
13178bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */
13188bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
13198bccd85fSChristoph Lameter 			      nodemask_t *nodes)
13208bccd85fSChristoph Lameter {
13218bccd85fSChristoph Lameter 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
13228bccd85fSChristoph Lameter 	const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
13238bccd85fSChristoph Lameter 
13248bccd85fSChristoph Lameter 	if (copy > nbytes) {
13258bccd85fSChristoph Lameter 		if (copy > PAGE_SIZE)
13268bccd85fSChristoph Lameter 			return -EINVAL;
13278bccd85fSChristoph Lameter 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
13288bccd85fSChristoph Lameter 			return -EFAULT;
13298bccd85fSChristoph Lameter 		copy = nbytes;
13308bccd85fSChristoph Lameter 	}
13318bccd85fSChristoph Lameter 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
13328bccd85fSChristoph Lameter }
13338bccd85fSChristoph Lameter 
1334938bb9f5SHeiko Carstens SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1335f7f28ca9SRasmus Villemoes 		unsigned long, mode, const unsigned long __user *, nmask,
1336938bb9f5SHeiko Carstens 		unsigned long, maxnode, unsigned, flags)
13378bccd85fSChristoph Lameter {
13388bccd85fSChristoph Lameter 	nodemask_t nodes;
13398bccd85fSChristoph Lameter 	int err;
1340028fec41SDavid Rientjes 	unsigned short mode_flags;
13418bccd85fSChristoph Lameter 
1342028fec41SDavid Rientjes 	mode_flags = mode & MPOL_MODE_FLAGS;
1343028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1344a3b51e01SDavid Rientjes 	if (mode >= MPOL_MAX)
1345a3b51e01SDavid Rientjes 		return -EINVAL;
13464c50bc01SDavid Rientjes 	if ((mode_flags & MPOL_F_STATIC_NODES) &&
13474c50bc01SDavid Rientjes 	    (mode_flags & MPOL_F_RELATIVE_NODES))
13484c50bc01SDavid Rientjes 		return -EINVAL;
13498bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
13508bccd85fSChristoph Lameter 	if (err)
13518bccd85fSChristoph Lameter 		return err;
1352028fec41SDavid Rientjes 	return do_mbind(start, len, mode, mode_flags, &nodes, flags);
13538bccd85fSChristoph Lameter }
13548bccd85fSChristoph Lameter 
13558bccd85fSChristoph Lameter /* Set the process memory policy */
135623c8902dSRasmus Villemoes SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1357938bb9f5SHeiko Carstens 		unsigned long, maxnode)
13588bccd85fSChristoph Lameter {
13598bccd85fSChristoph Lameter 	int err;
13608bccd85fSChristoph Lameter 	nodemask_t nodes;
1361028fec41SDavid Rientjes 	unsigned short flags;
13628bccd85fSChristoph Lameter 
1363028fec41SDavid Rientjes 	flags = mode & MPOL_MODE_FLAGS;
1364028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1365028fec41SDavid Rientjes 	if ((unsigned int)mode >= MPOL_MAX)
13668bccd85fSChristoph Lameter 		return -EINVAL;
13674c50bc01SDavid Rientjes 	if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
13684c50bc01SDavid Rientjes 		return -EINVAL;
13698bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
13708bccd85fSChristoph Lameter 	if (err)
13718bccd85fSChristoph Lameter 		return err;
1372028fec41SDavid Rientjes 	return do_set_mempolicy(mode, flags, &nodes);
13738bccd85fSChristoph Lameter }
13748bccd85fSChristoph Lameter 
1375938bb9f5SHeiko Carstens SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1376938bb9f5SHeiko Carstens 		const unsigned long __user *, old_nodes,
1377938bb9f5SHeiko Carstens 		const unsigned long __user *, new_nodes)
137839743889SChristoph Lameter {
1379c69e8d9cSDavid Howells 	const struct cred *cred = current_cred(), *tcred;
1380596d7cfaSKOSAKI Motohiro 	struct mm_struct *mm = NULL;
138139743889SChristoph Lameter 	struct task_struct *task;
138239743889SChristoph Lameter 	nodemask_t task_nodes;
138339743889SChristoph Lameter 	int err;
1384596d7cfaSKOSAKI Motohiro 	nodemask_t *old;
1385596d7cfaSKOSAKI Motohiro 	nodemask_t *new;
1386596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH(scratch);
138739743889SChristoph Lameter 
1388596d7cfaSKOSAKI Motohiro 	if (!scratch)
1389596d7cfaSKOSAKI Motohiro 		return -ENOMEM;
139039743889SChristoph Lameter 
1391596d7cfaSKOSAKI Motohiro 	old = &scratch->mask1;
1392596d7cfaSKOSAKI Motohiro 	new = &scratch->mask2;
1393596d7cfaSKOSAKI Motohiro 
1394596d7cfaSKOSAKI Motohiro 	err = get_nodes(old, old_nodes, maxnode);
139539743889SChristoph Lameter 	if (err)
1396596d7cfaSKOSAKI Motohiro 		goto out;
1397596d7cfaSKOSAKI Motohiro 
1398596d7cfaSKOSAKI Motohiro 	err = get_nodes(new, new_nodes, maxnode);
1399596d7cfaSKOSAKI Motohiro 	if (err)
1400596d7cfaSKOSAKI Motohiro 		goto out;
140139743889SChristoph Lameter 
140239743889SChristoph Lameter 	/* Find the mm_struct */
140355cfaa3cSZeng Zhaoming 	rcu_read_lock();
1404228ebcbeSPavel Emelyanov 	task = pid ? find_task_by_vpid(pid) : current;
140539743889SChristoph Lameter 	if (!task) {
140655cfaa3cSZeng Zhaoming 		rcu_read_unlock();
1407596d7cfaSKOSAKI Motohiro 		err = -ESRCH;
1408596d7cfaSKOSAKI Motohiro 		goto out;
140939743889SChristoph Lameter 	}
14103268c63eSChristoph Lameter 	get_task_struct(task);
141139743889SChristoph Lameter 
1412596d7cfaSKOSAKI Motohiro 	err = -EINVAL;
141339743889SChristoph Lameter 
141439743889SChristoph Lameter 	/*
141539743889SChristoph Lameter 	 * Check if this process has the right to modify the specified
141639743889SChristoph Lameter 	 * process. The right exists if the process has administrative
14177f927fccSAlexey Dobriyan 	 * capabilities, superuser privileges or the same
141839743889SChristoph Lameter 	 * userid as the target process.
141939743889SChristoph Lameter 	 */
1420c69e8d9cSDavid Howells 	tcred = __task_cred(task);
1421b38a86ebSEric W. Biederman 	if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1422b38a86ebSEric W. Biederman 	    !uid_eq(cred->uid,  tcred->suid) && !uid_eq(cred->uid,  tcred->uid) &&
142374c00241SChristoph Lameter 	    !capable(CAP_SYS_NICE)) {
1424c69e8d9cSDavid Howells 		rcu_read_unlock();
142539743889SChristoph Lameter 		err = -EPERM;
14263268c63eSChristoph Lameter 		goto out_put;
142739743889SChristoph Lameter 	}
1428c69e8d9cSDavid Howells 	rcu_read_unlock();
142939743889SChristoph Lameter 
143039743889SChristoph Lameter 	task_nodes = cpuset_mems_allowed(task);
143139743889SChristoph Lameter 	/* Is the user allowed to access the target nodes? */
1432596d7cfaSKOSAKI Motohiro 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
143339743889SChristoph Lameter 		err = -EPERM;
14343268c63eSChristoph Lameter 		goto out_put;
143539743889SChristoph Lameter 	}
143639743889SChristoph Lameter 
143701f13bd6SLai Jiangshan 	if (!nodes_subset(*new, node_states[N_MEMORY])) {
14383b42d28bSChristoph Lameter 		err = -EINVAL;
14393268c63eSChristoph Lameter 		goto out_put;
14403b42d28bSChristoph Lameter 	}
14413b42d28bSChristoph Lameter 
144286c3a764SDavid Quigley 	err = security_task_movememory(task);
144386c3a764SDavid Quigley 	if (err)
14443268c63eSChristoph Lameter 		goto out_put;
144586c3a764SDavid Quigley 
14463268c63eSChristoph Lameter 	mm = get_task_mm(task);
14473268c63eSChristoph Lameter 	put_task_struct(task);
1448f2a9ef88SSasha Levin 
1449f2a9ef88SSasha Levin 	if (!mm) {
1450f2a9ef88SSasha Levin 		err = -EINVAL;
1451f2a9ef88SSasha Levin 		goto out;
1452f2a9ef88SSasha Levin 	}
1453f2a9ef88SSasha Levin 
1454596d7cfaSKOSAKI Motohiro 	err = do_migrate_pages(mm, old, new,
145574c00241SChristoph Lameter 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
14563268c63eSChristoph Lameter 
145739743889SChristoph Lameter 	mmput(mm);
14583268c63eSChristoph Lameter out:
1459596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH_FREE(scratch);
1460596d7cfaSKOSAKI Motohiro 
146139743889SChristoph Lameter 	return err;
14623268c63eSChristoph Lameter 
14633268c63eSChristoph Lameter out_put:
14643268c63eSChristoph Lameter 	put_task_struct(task);
14653268c63eSChristoph Lameter 	goto out;
14663268c63eSChristoph Lameter 
146739743889SChristoph Lameter }
146839743889SChristoph Lameter 
146939743889SChristoph Lameter 
14708bccd85fSChristoph Lameter /* Retrieve NUMA policy */
1471938bb9f5SHeiko Carstens SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1472938bb9f5SHeiko Carstens 		unsigned long __user *, nmask, unsigned long, maxnode,
1473938bb9f5SHeiko Carstens 		unsigned long, addr, unsigned long, flags)
14748bccd85fSChristoph Lameter {
1475dbcb0f19SAdrian Bunk 	int err;
1476dbcb0f19SAdrian Bunk 	int uninitialized_var(pval);
14778bccd85fSChristoph Lameter 	nodemask_t nodes;
14788bccd85fSChristoph Lameter 
14798bccd85fSChristoph Lameter 	if (nmask != NULL && maxnode < MAX_NUMNODES)
14808bccd85fSChristoph Lameter 		return -EINVAL;
14818bccd85fSChristoph Lameter 
14828bccd85fSChristoph Lameter 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
14838bccd85fSChristoph Lameter 
14848bccd85fSChristoph Lameter 	if (err)
14858bccd85fSChristoph Lameter 		return err;
14868bccd85fSChristoph Lameter 
14878bccd85fSChristoph Lameter 	if (policy && put_user(pval, policy))
14888bccd85fSChristoph Lameter 		return -EFAULT;
14898bccd85fSChristoph Lameter 
14908bccd85fSChristoph Lameter 	if (nmask)
14918bccd85fSChristoph Lameter 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
14928bccd85fSChristoph Lameter 
14938bccd85fSChristoph Lameter 	return err;
14948bccd85fSChristoph Lameter }
14958bccd85fSChristoph Lameter 
14961da177e4SLinus Torvalds #ifdef CONFIG_COMPAT
14971da177e4SLinus Torvalds 
1498c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1499c93e0f6cSHeiko Carstens 		       compat_ulong_t __user *, nmask,
1500c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode,
1501c93e0f6cSHeiko Carstens 		       compat_ulong_t, addr, compat_ulong_t, flags)
15021da177e4SLinus Torvalds {
15031da177e4SLinus Torvalds 	long err;
15041da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
15051da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
15061da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
15071da177e4SLinus Torvalds 
15081da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
15091da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
15101da177e4SLinus Torvalds 
15111da177e4SLinus Torvalds 	if (nmask)
15121da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
15131da177e4SLinus Torvalds 
15141da177e4SLinus Torvalds 	err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
15151da177e4SLinus Torvalds 
15161da177e4SLinus Torvalds 	if (!err && nmask) {
15172bbff6c7SKAMEZAWA Hiroyuki 		unsigned long copy_size;
15182bbff6c7SKAMEZAWA Hiroyuki 		copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
15192bbff6c7SKAMEZAWA Hiroyuki 		err = copy_from_user(bm, nm, copy_size);
15201da177e4SLinus Torvalds 		/* ensure entire bitmap is zeroed */
15211da177e4SLinus Torvalds 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
15221da177e4SLinus Torvalds 		err |= compat_put_bitmap(nmask, bm, nr_bits);
15231da177e4SLinus Torvalds 	}
15241da177e4SLinus Torvalds 
15251da177e4SLinus Torvalds 	return err;
15261da177e4SLinus Torvalds }
15271da177e4SLinus Torvalds 
1528c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1529c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode)
15301da177e4SLinus Torvalds {
15311da177e4SLinus Torvalds 	long err = 0;
15321da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
15331da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
15341da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
15351da177e4SLinus Torvalds 
15361da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
15371da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
15381da177e4SLinus Torvalds 
15391da177e4SLinus Torvalds 	if (nmask) {
15401da177e4SLinus Torvalds 		err = compat_get_bitmap(bm, nmask, nr_bits);
15411da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
15421da177e4SLinus Torvalds 		err |= copy_to_user(nm, bm, alloc_size);
15431da177e4SLinus Torvalds 	}
15441da177e4SLinus Torvalds 
15451da177e4SLinus Torvalds 	if (err)
15461da177e4SLinus Torvalds 		return -EFAULT;
15471da177e4SLinus Torvalds 
15481da177e4SLinus Torvalds 	return sys_set_mempolicy(mode, nm, nr_bits+1);
15491da177e4SLinus Torvalds }
15501da177e4SLinus Torvalds 
1551c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1552c93e0f6cSHeiko Carstens 		       compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1553c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode, compat_ulong_t, flags)
15541da177e4SLinus Torvalds {
15551da177e4SLinus Torvalds 	long err = 0;
15561da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
15571da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
1558dfcd3c0dSAndi Kleen 	nodemask_t bm;
15591da177e4SLinus Torvalds 
15601da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
15611da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
15621da177e4SLinus Torvalds 
15631da177e4SLinus Torvalds 	if (nmask) {
1564dfcd3c0dSAndi Kleen 		err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
15651da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
1566dfcd3c0dSAndi Kleen 		err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
15671da177e4SLinus Torvalds 	}
15681da177e4SLinus Torvalds 
15691da177e4SLinus Torvalds 	if (err)
15701da177e4SLinus Torvalds 		return -EFAULT;
15711da177e4SLinus Torvalds 
15721da177e4SLinus Torvalds 	return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
15731da177e4SLinus Torvalds }
15741da177e4SLinus Torvalds 
15751da177e4SLinus Torvalds #endif
15761da177e4SLinus Torvalds 
157774d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
157874d2c3a0SOleg Nesterov 						unsigned long addr)
15791da177e4SLinus Torvalds {
15808d90274bSOleg Nesterov 	struct mempolicy *pol = NULL;
15811da177e4SLinus Torvalds 
15821da177e4SLinus Torvalds 	if (vma) {
1583480eccf9SLee Schermerhorn 		if (vma->vm_ops && vma->vm_ops->get_policy) {
15848d90274bSOleg Nesterov 			pol = vma->vm_ops->get_policy(vma, addr);
158500442ad0SMel Gorman 		} else if (vma->vm_policy) {
15861da177e4SLinus Torvalds 			pol = vma->vm_policy;
158700442ad0SMel Gorman 
158800442ad0SMel Gorman 			/*
158900442ad0SMel Gorman 			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
159000442ad0SMel Gorman 			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
159100442ad0SMel Gorman 			 * count on these policies which will be dropped by
159200442ad0SMel Gorman 			 * mpol_cond_put() later
159300442ad0SMel Gorman 			 */
159400442ad0SMel Gorman 			if (mpol_needs_cond_ref(pol))
159500442ad0SMel Gorman 				mpol_get(pol);
159600442ad0SMel Gorman 		}
15971da177e4SLinus Torvalds 	}
1598f15ca78eSOleg Nesterov 
159974d2c3a0SOleg Nesterov 	return pol;
160074d2c3a0SOleg Nesterov }
160174d2c3a0SOleg Nesterov 
160274d2c3a0SOleg Nesterov /*
1603dd6eecb9SOleg Nesterov  * get_vma_policy(@vma, @addr)
160474d2c3a0SOleg Nesterov  * @vma: virtual memory area whose policy is sought
160574d2c3a0SOleg Nesterov  * @addr: address in @vma for shared policy lookup
160674d2c3a0SOleg Nesterov  *
160774d2c3a0SOleg Nesterov  * Returns effective policy for a VMA at specified address.
1608dd6eecb9SOleg Nesterov  * Falls back to current->mempolicy or system default policy, as necessary.
160974d2c3a0SOleg Nesterov  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
161074d2c3a0SOleg Nesterov  * count--added by the get_policy() vm_op, as appropriate--to protect against
161174d2c3a0SOleg Nesterov  * freeing by another task.  It is the caller's responsibility to free the
161274d2c3a0SOleg Nesterov  * extra reference for shared policies.
161374d2c3a0SOleg Nesterov  */
1614dd6eecb9SOleg Nesterov static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1615dd6eecb9SOleg Nesterov 						unsigned long addr)
161674d2c3a0SOleg Nesterov {
161774d2c3a0SOleg Nesterov 	struct mempolicy *pol = __get_vma_policy(vma, addr);
161874d2c3a0SOleg Nesterov 
16198d90274bSOleg Nesterov 	if (!pol)
1620dd6eecb9SOleg Nesterov 		pol = get_task_policy(current);
16218d90274bSOleg Nesterov 
16221da177e4SLinus Torvalds 	return pol;
16231da177e4SLinus Torvalds }
16241da177e4SLinus Torvalds 
16256b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma)
1626fc314724SMel Gorman {
16276b6482bbSOleg Nesterov 	struct mempolicy *pol;
1628f15ca78eSOleg Nesterov 
1629fc314724SMel Gorman 	if (vma->vm_ops && vma->vm_ops->get_policy) {
1630fc314724SMel Gorman 		bool ret = false;
1631fc314724SMel Gorman 
1632fc314724SMel Gorman 		pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1633fc314724SMel Gorman 		if (pol && (pol->flags & MPOL_F_MOF))
1634fc314724SMel Gorman 			ret = true;
1635fc314724SMel Gorman 		mpol_cond_put(pol);
1636fc314724SMel Gorman 
1637fc314724SMel Gorman 		return ret;
16388d90274bSOleg Nesterov 	}
16398d90274bSOleg Nesterov 
1640fc314724SMel Gorman 	pol = vma->vm_policy;
16418d90274bSOleg Nesterov 	if (!pol)
16426b6482bbSOleg Nesterov 		pol = get_task_policy(current);
1643fc314724SMel Gorman 
1644fc314724SMel Gorman 	return pol->flags & MPOL_F_MOF;
1645fc314724SMel Gorman }
1646fc314724SMel Gorman 
1647d3eb1570SLai Jiangshan static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1648d3eb1570SLai Jiangshan {
1649d3eb1570SLai Jiangshan 	enum zone_type dynamic_policy_zone = policy_zone;
1650d3eb1570SLai Jiangshan 
1651d3eb1570SLai Jiangshan 	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1652d3eb1570SLai Jiangshan 
1653d3eb1570SLai Jiangshan 	/*
1654d3eb1570SLai Jiangshan 	 * if policy->v.nodes has movable memory only,
1655d3eb1570SLai Jiangshan 	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1656d3eb1570SLai Jiangshan 	 *
1657d3eb1570SLai Jiangshan 	 * policy->v.nodes is intersect with node_states[N_MEMORY].
1658d3eb1570SLai Jiangshan 	 * so if the following test faile, it implies
1659d3eb1570SLai Jiangshan 	 * policy->v.nodes has movable memory only.
1660d3eb1570SLai Jiangshan 	 */
1661d3eb1570SLai Jiangshan 	if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1662d3eb1570SLai Jiangshan 		dynamic_policy_zone = ZONE_MOVABLE;
1663d3eb1570SLai Jiangshan 
1664d3eb1570SLai Jiangshan 	return zone >= dynamic_policy_zone;
1665d3eb1570SLai Jiangshan }
1666d3eb1570SLai Jiangshan 
166752cd3b07SLee Schermerhorn /*
166852cd3b07SLee Schermerhorn  * Return a nodemask representing a mempolicy for filtering nodes for
166952cd3b07SLee Schermerhorn  * page allocation
167052cd3b07SLee Schermerhorn  */
167152cd3b07SLee Schermerhorn static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
167219770b32SMel Gorman {
167319770b32SMel Gorman 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
167445c4745aSLee Schermerhorn 	if (unlikely(policy->mode == MPOL_BIND) &&
1675d3eb1570SLai Jiangshan 			apply_policy_zone(policy, gfp_zone(gfp)) &&
167619770b32SMel Gorman 			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
167719770b32SMel Gorman 		return &policy->v.nodes;
167819770b32SMel Gorman 
167919770b32SMel Gorman 	return NULL;
168019770b32SMel Gorman }
168119770b32SMel Gorman 
168252cd3b07SLee Schermerhorn /* Return a zonelist indicated by gfp for node representing a mempolicy */
16832f5f9486SAndi Kleen static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
16842f5f9486SAndi Kleen 	int nd)
16851da177e4SLinus Torvalds {
16866d840958SMichal Hocko 	if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
16871da177e4SLinus Torvalds 		nd = policy->v.preferred_node;
16886d840958SMichal Hocko 	else {
168919770b32SMel Gorman 		/*
16906d840958SMichal Hocko 		 * __GFP_THISNODE shouldn't even be used with the bind policy
16916d840958SMichal Hocko 		 * because we might easily break the expectation to stay on the
16926d840958SMichal Hocko 		 * requested node and not break the policy.
169319770b32SMel Gorman 		 */
16946d840958SMichal Hocko 		WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
16951da177e4SLinus Torvalds 	}
16966d840958SMichal Hocko 
16970e88460dSMel Gorman 	return node_zonelist(nd, gfp);
16981da177e4SLinus Torvalds }
16991da177e4SLinus Torvalds 
17001da177e4SLinus Torvalds /* Do dynamic interleaving for a process */
17011da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy)
17021da177e4SLinus Torvalds {
17031da177e4SLinus Torvalds 	unsigned nid, next;
17041da177e4SLinus Torvalds 	struct task_struct *me = current;
17051da177e4SLinus Torvalds 
17061da177e4SLinus Torvalds 	nid = me->il_next;
17070edaf86cSAndrew Morton 	next = next_node_in(nid, policy->v.nodes);
1708f5b087b5SDavid Rientjes 	if (next < MAX_NUMNODES)
17091da177e4SLinus Torvalds 		me->il_next = next;
17101da177e4SLinus Torvalds 	return nid;
17111da177e4SLinus Torvalds }
17121da177e4SLinus Torvalds 
1713dc85da15SChristoph Lameter /*
1714dc85da15SChristoph Lameter  * Depending on the memory policy provide a node from which to allocate the
1715dc85da15SChristoph Lameter  * next slab entry.
1716dc85da15SChristoph Lameter  */
17172a389610SDavid Rientjes unsigned int mempolicy_slab_node(void)
1718dc85da15SChristoph Lameter {
1719e7b691b0SAndi Kleen 	struct mempolicy *policy;
17202a389610SDavid Rientjes 	int node = numa_mem_id();
1721e7b691b0SAndi Kleen 
1722e7b691b0SAndi Kleen 	if (in_interrupt())
17232a389610SDavid Rientjes 		return node;
1724e7b691b0SAndi Kleen 
1725e7b691b0SAndi Kleen 	policy = current->mempolicy;
1726fc36b8d3SLee Schermerhorn 	if (!policy || policy->flags & MPOL_F_LOCAL)
17272a389610SDavid Rientjes 		return node;
1728765c4507SChristoph Lameter 
1729bea904d5SLee Schermerhorn 	switch (policy->mode) {
1730bea904d5SLee Schermerhorn 	case MPOL_PREFERRED:
1731fc36b8d3SLee Schermerhorn 		/*
1732fc36b8d3SLee Schermerhorn 		 * handled MPOL_F_LOCAL above
1733fc36b8d3SLee Schermerhorn 		 */
1734bea904d5SLee Schermerhorn 		return policy->v.preferred_node;
1735bea904d5SLee Schermerhorn 
1736dc85da15SChristoph Lameter 	case MPOL_INTERLEAVE:
1737dc85da15SChristoph Lameter 		return interleave_nodes(policy);
1738dc85da15SChristoph Lameter 
1739dd1a239fSMel Gorman 	case MPOL_BIND: {
1740c33d6c06SMel Gorman 		struct zoneref *z;
1741c33d6c06SMel Gorman 
1742dc85da15SChristoph Lameter 		/*
1743dc85da15SChristoph Lameter 		 * Follow bind policy behavior and start allocation at the
1744dc85da15SChristoph Lameter 		 * first node.
1745dc85da15SChristoph Lameter 		 */
174619770b32SMel Gorman 		struct zonelist *zonelist;
174719770b32SMel Gorman 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1748c9634cf0SAneesh Kumar K.V 		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1749c33d6c06SMel Gorman 		z = first_zones_zonelist(zonelist, highest_zoneidx,
1750c33d6c06SMel Gorman 							&policy->v.nodes);
1751c33d6c06SMel Gorman 		return z->zone ? z->zone->node : node;
1752dd1a239fSMel Gorman 	}
1753dc85da15SChristoph Lameter 
1754dc85da15SChristoph Lameter 	default:
1755bea904d5SLee Schermerhorn 		BUG();
1756dc85da15SChristoph Lameter 	}
1757dc85da15SChristoph Lameter }
1758dc85da15SChristoph Lameter 
1759fee83b3aSAndrew Morton /*
1760fee83b3aSAndrew Morton  * Do static interleaving for a VMA with known offset @n.  Returns the n'th
1761fee83b3aSAndrew Morton  * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
1762fee83b3aSAndrew Morton  * number of present nodes.
1763fee83b3aSAndrew Morton  */
17641da177e4SLinus Torvalds static unsigned offset_il_node(struct mempolicy *pol,
1765fee83b3aSAndrew Morton 			       struct vm_area_struct *vma, unsigned long n)
17661da177e4SLinus Torvalds {
1767dfcd3c0dSAndi Kleen 	unsigned nnodes = nodes_weight(pol->v.nodes);
1768f5b087b5SDavid Rientjes 	unsigned target;
1769fee83b3aSAndrew Morton 	int i;
1770fee83b3aSAndrew Morton 	int nid;
17711da177e4SLinus Torvalds 
1772f5b087b5SDavid Rientjes 	if (!nnodes)
1773f5b087b5SDavid Rientjes 		return numa_node_id();
1774fee83b3aSAndrew Morton 	target = (unsigned int)n % nnodes;
1775fee83b3aSAndrew Morton 	nid = first_node(pol->v.nodes);
1776fee83b3aSAndrew Morton 	for (i = 0; i < target; i++)
1777dfcd3c0dSAndi Kleen 		nid = next_node(nid, pol->v.nodes);
17781da177e4SLinus Torvalds 	return nid;
17791da177e4SLinus Torvalds }
17801da177e4SLinus Torvalds 
17815da7ca86SChristoph Lameter /* Determine a node number for interleave */
17825da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol,
17835da7ca86SChristoph Lameter 		 struct vm_area_struct *vma, unsigned long addr, int shift)
17845da7ca86SChristoph Lameter {
17855da7ca86SChristoph Lameter 	if (vma) {
17865da7ca86SChristoph Lameter 		unsigned long off;
17875da7ca86SChristoph Lameter 
17883b98b087SNishanth Aravamudan 		/*
17893b98b087SNishanth Aravamudan 		 * for small pages, there is no difference between
17903b98b087SNishanth Aravamudan 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
17913b98b087SNishanth Aravamudan 		 * for huge pages, since vm_pgoff is in units of small
17923b98b087SNishanth Aravamudan 		 * pages, we need to shift off the always 0 bits to get
17933b98b087SNishanth Aravamudan 		 * a useful offset.
17943b98b087SNishanth Aravamudan 		 */
17953b98b087SNishanth Aravamudan 		BUG_ON(shift < PAGE_SHIFT);
17963b98b087SNishanth Aravamudan 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
17975da7ca86SChristoph Lameter 		off += (addr - vma->vm_start) >> shift;
17985da7ca86SChristoph Lameter 		return offset_il_node(pol, vma, off);
17995da7ca86SChristoph Lameter 	} else
18005da7ca86SChristoph Lameter 		return interleave_nodes(pol);
18015da7ca86SChristoph Lameter }
18025da7ca86SChristoph Lameter 
180300ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS
1804480eccf9SLee Schermerhorn /*
1805480eccf9SLee Schermerhorn  * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1806b46e14acSFabian Frederick  * @vma: virtual memory area whose policy is sought
1807b46e14acSFabian Frederick  * @addr: address in @vma for shared policy lookup and interleave policy
1808b46e14acSFabian Frederick  * @gfp_flags: for requested zone
1809b46e14acSFabian Frederick  * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1810b46e14acSFabian Frederick  * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
1811480eccf9SLee Schermerhorn  *
181252cd3b07SLee Schermerhorn  * Returns a zonelist suitable for a huge page allocation and a pointer
181352cd3b07SLee Schermerhorn  * to the struct mempolicy for conditional unref after allocation.
181452cd3b07SLee Schermerhorn  * If the effective policy is 'BIND, returns a pointer to the mempolicy's
181552cd3b07SLee Schermerhorn  * @nodemask for filtering the zonelist.
1816c0ff7453SMiao Xie  *
1817d26914d1SMel Gorman  * Must be protected by read_mems_allowed_begin()
1818480eccf9SLee Schermerhorn  */
1819396faf03SMel Gorman struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
182019770b32SMel Gorman 				gfp_t gfp_flags, struct mempolicy **mpol,
182119770b32SMel Gorman 				nodemask_t **nodemask)
18225da7ca86SChristoph Lameter {
1823480eccf9SLee Schermerhorn 	struct zonelist *zl;
18245da7ca86SChristoph Lameter 
1825dd6eecb9SOleg Nesterov 	*mpol = get_vma_policy(vma, addr);
182619770b32SMel Gorman 	*nodemask = NULL;	/* assume !MPOL_BIND */
18275da7ca86SChristoph Lameter 
182852cd3b07SLee Schermerhorn 	if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
182952cd3b07SLee Schermerhorn 		zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1830a5516438SAndi Kleen 				huge_page_shift(hstate_vma(vma))), gfp_flags);
183152cd3b07SLee Schermerhorn 	} else {
18322f5f9486SAndi Kleen 		zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
183352cd3b07SLee Schermerhorn 		if ((*mpol)->mode == MPOL_BIND)
183452cd3b07SLee Schermerhorn 			*nodemask = &(*mpol)->v.nodes;
1835480eccf9SLee Schermerhorn 	}
1836480eccf9SLee Schermerhorn 	return zl;
18375da7ca86SChristoph Lameter }
183806808b08SLee Schermerhorn 
183906808b08SLee Schermerhorn /*
184006808b08SLee Schermerhorn  * init_nodemask_of_mempolicy
184106808b08SLee Schermerhorn  *
184206808b08SLee Schermerhorn  * If the current task's mempolicy is "default" [NULL], return 'false'
184306808b08SLee Schermerhorn  * to indicate default policy.  Otherwise, extract the policy nodemask
184406808b08SLee Schermerhorn  * for 'bind' or 'interleave' policy into the argument nodemask, or
184506808b08SLee Schermerhorn  * initialize the argument nodemask to contain the single node for
184606808b08SLee Schermerhorn  * 'preferred' or 'local' policy and return 'true' to indicate presence
184706808b08SLee Schermerhorn  * of non-default mempolicy.
184806808b08SLee Schermerhorn  *
184906808b08SLee Schermerhorn  * We don't bother with reference counting the mempolicy [mpol_get/put]
185006808b08SLee Schermerhorn  * because the current task is examining it's own mempolicy and a task's
185106808b08SLee Schermerhorn  * mempolicy is only ever changed by the task itself.
185206808b08SLee Schermerhorn  *
185306808b08SLee Schermerhorn  * N.B., it is the caller's responsibility to free a returned nodemask.
185406808b08SLee Schermerhorn  */
185506808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask)
185606808b08SLee Schermerhorn {
185706808b08SLee Schermerhorn 	struct mempolicy *mempolicy;
185806808b08SLee Schermerhorn 	int nid;
185906808b08SLee Schermerhorn 
186006808b08SLee Schermerhorn 	if (!(mask && current->mempolicy))
186106808b08SLee Schermerhorn 		return false;
186206808b08SLee Schermerhorn 
1863c0ff7453SMiao Xie 	task_lock(current);
186406808b08SLee Schermerhorn 	mempolicy = current->mempolicy;
186506808b08SLee Schermerhorn 	switch (mempolicy->mode) {
186606808b08SLee Schermerhorn 	case MPOL_PREFERRED:
186706808b08SLee Schermerhorn 		if (mempolicy->flags & MPOL_F_LOCAL)
186806808b08SLee Schermerhorn 			nid = numa_node_id();
186906808b08SLee Schermerhorn 		else
187006808b08SLee Schermerhorn 			nid = mempolicy->v.preferred_node;
187106808b08SLee Schermerhorn 		init_nodemask_of_node(mask, nid);
187206808b08SLee Schermerhorn 		break;
187306808b08SLee Schermerhorn 
187406808b08SLee Schermerhorn 	case MPOL_BIND:
187506808b08SLee Schermerhorn 		/* Fall through */
187606808b08SLee Schermerhorn 	case MPOL_INTERLEAVE:
187706808b08SLee Schermerhorn 		*mask =  mempolicy->v.nodes;
187806808b08SLee Schermerhorn 		break;
187906808b08SLee Schermerhorn 
188006808b08SLee Schermerhorn 	default:
188106808b08SLee Schermerhorn 		BUG();
188206808b08SLee Schermerhorn 	}
1883c0ff7453SMiao Xie 	task_unlock(current);
188406808b08SLee Schermerhorn 
188506808b08SLee Schermerhorn 	return true;
188606808b08SLee Schermerhorn }
188700ac59adSChen, Kenneth W #endif
18885da7ca86SChristoph Lameter 
18896f48d0ebSDavid Rientjes /*
18906f48d0ebSDavid Rientjes  * mempolicy_nodemask_intersects
18916f48d0ebSDavid Rientjes  *
18926f48d0ebSDavid Rientjes  * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
18936f48d0ebSDavid Rientjes  * policy.  Otherwise, check for intersection between mask and the policy
18946f48d0ebSDavid Rientjes  * nodemask for 'bind' or 'interleave' policy.  For 'perferred' or 'local'
18956f48d0ebSDavid Rientjes  * policy, always return true since it may allocate elsewhere on fallback.
18966f48d0ebSDavid Rientjes  *
18976f48d0ebSDavid Rientjes  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
18986f48d0ebSDavid Rientjes  */
18996f48d0ebSDavid Rientjes bool mempolicy_nodemask_intersects(struct task_struct *tsk,
19006f48d0ebSDavid Rientjes 					const nodemask_t *mask)
19016f48d0ebSDavid Rientjes {
19026f48d0ebSDavid Rientjes 	struct mempolicy *mempolicy;
19036f48d0ebSDavid Rientjes 	bool ret = true;
19046f48d0ebSDavid Rientjes 
19056f48d0ebSDavid Rientjes 	if (!mask)
19066f48d0ebSDavid Rientjes 		return ret;
19076f48d0ebSDavid Rientjes 	task_lock(tsk);
19086f48d0ebSDavid Rientjes 	mempolicy = tsk->mempolicy;
19096f48d0ebSDavid Rientjes 	if (!mempolicy)
19106f48d0ebSDavid Rientjes 		goto out;
19116f48d0ebSDavid Rientjes 
19126f48d0ebSDavid Rientjes 	switch (mempolicy->mode) {
19136f48d0ebSDavid Rientjes 	case MPOL_PREFERRED:
19146f48d0ebSDavid Rientjes 		/*
19156f48d0ebSDavid Rientjes 		 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
19166f48d0ebSDavid Rientjes 		 * allocate from, they may fallback to other nodes when oom.
19176f48d0ebSDavid Rientjes 		 * Thus, it's possible for tsk to have allocated memory from
19186f48d0ebSDavid Rientjes 		 * nodes in mask.
19196f48d0ebSDavid Rientjes 		 */
19206f48d0ebSDavid Rientjes 		break;
19216f48d0ebSDavid Rientjes 	case MPOL_BIND:
19226f48d0ebSDavid Rientjes 	case MPOL_INTERLEAVE:
19236f48d0ebSDavid Rientjes 		ret = nodes_intersects(mempolicy->v.nodes, *mask);
19246f48d0ebSDavid Rientjes 		break;
19256f48d0ebSDavid Rientjes 	default:
19266f48d0ebSDavid Rientjes 		BUG();
19276f48d0ebSDavid Rientjes 	}
19286f48d0ebSDavid Rientjes out:
19296f48d0ebSDavid Rientjes 	task_unlock(tsk);
19306f48d0ebSDavid Rientjes 	return ret;
19316f48d0ebSDavid Rientjes }
19326f48d0ebSDavid Rientjes 
19331da177e4SLinus Torvalds /* Allocate a page in interleaved policy.
19341da177e4SLinus Torvalds    Own path because it needs to do special accounting. */
1935662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1936662f3a0bSAndi Kleen 					unsigned nid)
19371da177e4SLinus Torvalds {
19381da177e4SLinus Torvalds 	struct zonelist *zl;
19391da177e4SLinus Torvalds 	struct page *page;
19401da177e4SLinus Torvalds 
19410e88460dSMel Gorman 	zl = node_zonelist(nid, gfp);
19421da177e4SLinus Torvalds 	page = __alloc_pages(gfp, order, zl);
1943dd1a239fSMel Gorman 	if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1944ca889e6cSChristoph Lameter 		inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
19451da177e4SLinus Torvalds 	return page;
19461da177e4SLinus Torvalds }
19471da177e4SLinus Torvalds 
19481da177e4SLinus Torvalds /**
19490bbbc0b3SAndrea Arcangeli  * 	alloc_pages_vma	- Allocate a page for a VMA.
19501da177e4SLinus Torvalds  *
19511da177e4SLinus Torvalds  * 	@gfp:
19521da177e4SLinus Torvalds  *      %GFP_USER    user allocation.
19531da177e4SLinus Torvalds  *      %GFP_KERNEL  kernel allocations,
19541da177e4SLinus Torvalds  *      %GFP_HIGHMEM highmem/user allocations,
19551da177e4SLinus Torvalds  *      %GFP_FS      allocation should not call back into a file system.
19561da177e4SLinus Torvalds  *      %GFP_ATOMIC  don't sleep.
19571da177e4SLinus Torvalds  *
19580bbbc0b3SAndrea Arcangeli  *	@order:Order of the GFP allocation.
19591da177e4SLinus Torvalds  * 	@vma:  Pointer to VMA or NULL if not available.
19601da177e4SLinus Torvalds  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
1961be97a41bSVlastimil Babka  *	@node: Which node to prefer for allocation (modulo policy).
1962be97a41bSVlastimil Babka  *	@hugepage: for hugepages try only the preferred node if possible
19631da177e4SLinus Torvalds  *
19641da177e4SLinus Torvalds  * 	This function allocates a page from the kernel page pool and applies
19651da177e4SLinus Torvalds  *	a NUMA policy associated with the VMA or the current process.
19661da177e4SLinus Torvalds  *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
19671da177e4SLinus Torvalds  *	mm_struct of the VMA to prevent it from going away. Should be used for
1968be97a41bSVlastimil Babka  *	all allocations for pages that will be mapped into user space. Returns
1969be97a41bSVlastimil Babka  *	NULL when no page can be allocated.
19701da177e4SLinus Torvalds  */
19711da177e4SLinus Torvalds struct page *
19720bbbc0b3SAndrea Arcangeli alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
1973be97a41bSVlastimil Babka 		unsigned long addr, int node, bool hugepage)
19741da177e4SLinus Torvalds {
1975cc9a6c87SMel Gorman 	struct mempolicy *pol;
1976c0ff7453SMiao Xie 	struct page *page;
1977cc9a6c87SMel Gorman 	unsigned int cpuset_mems_cookie;
1978be97a41bSVlastimil Babka 	struct zonelist *zl;
1979be97a41bSVlastimil Babka 	nodemask_t *nmask;
19801da177e4SLinus Torvalds 
1981cc9a6c87SMel Gorman retry_cpuset:
1982dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
1983d26914d1SMel Gorman 	cpuset_mems_cookie = read_mems_allowed_begin();
1984cc9a6c87SMel Gorman 
1985be97a41bSVlastimil Babka 	if (pol->mode == MPOL_INTERLEAVE) {
19861da177e4SLinus Torvalds 		unsigned nid;
19875da7ca86SChristoph Lameter 
19888eac563cSAndi Kleen 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
198952cd3b07SLee Schermerhorn 		mpol_cond_put(pol);
19900bbbc0b3SAndrea Arcangeli 		page = alloc_page_interleave(gfp, order, nid);
1991be97a41bSVlastimil Babka 		goto out;
19921da177e4SLinus Torvalds 	}
19931da177e4SLinus Torvalds 
19940867a57cSVlastimil Babka 	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
19950867a57cSVlastimil Babka 		int hpage_node = node;
19960867a57cSVlastimil Babka 
19970867a57cSVlastimil Babka 		/*
19980867a57cSVlastimil Babka 		 * For hugepage allocation and non-interleave policy which
19990867a57cSVlastimil Babka 		 * allows the current node (or other explicitly preferred
20000867a57cSVlastimil Babka 		 * node) we only try to allocate from the current/preferred
20010867a57cSVlastimil Babka 		 * node and don't fall back to other nodes, as the cost of
20020867a57cSVlastimil Babka 		 * remote accesses would likely offset THP benefits.
20030867a57cSVlastimil Babka 		 *
20040867a57cSVlastimil Babka 		 * If the policy is interleave, or does not allow the current
20050867a57cSVlastimil Babka 		 * node in its nodemask, we allocate the standard way.
20060867a57cSVlastimil Babka 		 */
20070867a57cSVlastimil Babka 		if (pol->mode == MPOL_PREFERRED &&
20080867a57cSVlastimil Babka 						!(pol->flags & MPOL_F_LOCAL))
20090867a57cSVlastimil Babka 			hpage_node = pol->v.preferred_node;
20100867a57cSVlastimil Babka 
20110867a57cSVlastimil Babka 		nmask = policy_nodemask(gfp, pol);
20120867a57cSVlastimil Babka 		if (!nmask || node_isset(hpage_node, *nmask)) {
20130867a57cSVlastimil Babka 			mpol_cond_put(pol);
201496db800fSVlastimil Babka 			page = __alloc_pages_node(hpage_node,
20150867a57cSVlastimil Babka 						gfp | __GFP_THISNODE, order);
20160867a57cSVlastimil Babka 			goto out;
20170867a57cSVlastimil Babka 		}
20180867a57cSVlastimil Babka 	}
20190867a57cSVlastimil Babka 
2020077fcf11SAneesh Kumar K.V 	nmask = policy_nodemask(gfp, pol);
2021be97a41bSVlastimil Babka 	zl = policy_zonelist(gfp, pol, node);
2022be97a41bSVlastimil Babka 	page = __alloc_pages_nodemask(gfp, order, zl, nmask);
2023d51e9894SVlastimil Babka 	mpol_cond_put(pol);
2024be97a41bSVlastimil Babka out:
2025be97a41bSVlastimil Babka 	if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2026077fcf11SAneesh Kumar K.V 		goto retry_cpuset;
2027077fcf11SAneesh Kumar K.V 	return page;
2028077fcf11SAneesh Kumar K.V }
2029077fcf11SAneesh Kumar K.V 
20301da177e4SLinus Torvalds /**
20311da177e4SLinus Torvalds  * 	alloc_pages_current - Allocate pages.
20321da177e4SLinus Torvalds  *
20331da177e4SLinus Torvalds  *	@gfp:
20341da177e4SLinus Torvalds  *		%GFP_USER   user allocation,
20351da177e4SLinus Torvalds  *      	%GFP_KERNEL kernel allocation,
20361da177e4SLinus Torvalds  *      	%GFP_HIGHMEM highmem allocation,
20371da177e4SLinus Torvalds  *      	%GFP_FS     don't call back into a file system.
20381da177e4SLinus Torvalds  *      	%GFP_ATOMIC don't sleep.
20391da177e4SLinus Torvalds  *	@order: Power of two of allocation size in pages. 0 is a single page.
20401da177e4SLinus Torvalds  *
20411da177e4SLinus Torvalds  *	Allocate a page from the kernel page pool.  When not in
20421da177e4SLinus Torvalds  *	interrupt context and apply the current process NUMA policy.
20431da177e4SLinus Torvalds  *	Returns NULL when no page can be allocated.
20441da177e4SLinus Torvalds  *
2045cf2a473cSPaul Jackson  *	Don't call cpuset_update_task_memory_state() unless
20461da177e4SLinus Torvalds  *	1) it's ok to take cpuset_sem (can WAIT), and
20471da177e4SLinus Torvalds  *	2) allocating for current task (not interrupt).
20481da177e4SLinus Torvalds  */
2049dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order)
20501da177e4SLinus Torvalds {
20518d90274bSOleg Nesterov 	struct mempolicy *pol = &default_policy;
2052c0ff7453SMiao Xie 	struct page *page;
2053cc9a6c87SMel Gorman 	unsigned int cpuset_mems_cookie;
20541da177e4SLinus Torvalds 
20558d90274bSOleg Nesterov 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
20568d90274bSOleg Nesterov 		pol = get_task_policy(current);
205752cd3b07SLee Schermerhorn 
2058cc9a6c87SMel Gorman retry_cpuset:
2059d26914d1SMel Gorman 	cpuset_mems_cookie = read_mems_allowed_begin();
2060cc9a6c87SMel Gorman 
206152cd3b07SLee Schermerhorn 	/*
206252cd3b07SLee Schermerhorn 	 * No reference counting needed for current->mempolicy
206352cd3b07SLee Schermerhorn 	 * nor system default_policy
206452cd3b07SLee Schermerhorn 	 */
206545c4745aSLee Schermerhorn 	if (pol->mode == MPOL_INTERLEAVE)
2066c0ff7453SMiao Xie 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2067c0ff7453SMiao Xie 	else
2068c0ff7453SMiao Xie 		page = __alloc_pages_nodemask(gfp, order,
20695c4b4be3SAndi Kleen 				policy_zonelist(gfp, pol, numa_node_id()),
20705c4b4be3SAndi Kleen 				policy_nodemask(gfp, pol));
2071cc9a6c87SMel Gorman 
2072d26914d1SMel Gorman 	if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2073cc9a6c87SMel Gorman 		goto retry_cpuset;
2074cc9a6c87SMel Gorman 
2075c0ff7453SMiao Xie 	return page;
20761da177e4SLinus Torvalds }
20771da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current);
20781da177e4SLinus Torvalds 
2079ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2080ef0855d3SOleg Nesterov {
2081ef0855d3SOleg Nesterov 	struct mempolicy *pol = mpol_dup(vma_policy(src));
2082ef0855d3SOleg Nesterov 
2083ef0855d3SOleg Nesterov 	if (IS_ERR(pol))
2084ef0855d3SOleg Nesterov 		return PTR_ERR(pol);
2085ef0855d3SOleg Nesterov 	dst->vm_policy = pol;
2086ef0855d3SOleg Nesterov 	return 0;
2087ef0855d3SOleg Nesterov }
2088ef0855d3SOleg Nesterov 
20894225399aSPaul Jackson /*
2090846a16bfSLee Schermerhorn  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
20914225399aSPaul Jackson  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
20924225399aSPaul Jackson  * with the mems_allowed returned by cpuset_mems_allowed().  This
20934225399aSPaul Jackson  * keeps mempolicies cpuset relative after its cpuset moves.  See
20944225399aSPaul Jackson  * further kernel/cpuset.c update_nodemask().
2095708c1bbcSMiao Xie  *
2096708c1bbcSMiao Xie  * current's mempolicy may be rebinded by the other task(the task that changes
2097708c1bbcSMiao Xie  * cpuset's mems), so we needn't do rebind work for current task.
20984225399aSPaul Jackson  */
20994225399aSPaul Jackson 
2100846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */
2101846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old)
21021da177e4SLinus Torvalds {
21031da177e4SLinus Torvalds 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
21041da177e4SLinus Torvalds 
21051da177e4SLinus Torvalds 	if (!new)
21061da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2107708c1bbcSMiao Xie 
2108708c1bbcSMiao Xie 	/* task's mempolicy is protected by alloc_lock */
2109708c1bbcSMiao Xie 	if (old == current->mempolicy) {
2110708c1bbcSMiao Xie 		task_lock(current);
2111708c1bbcSMiao Xie 		*new = *old;
2112708c1bbcSMiao Xie 		task_unlock(current);
2113708c1bbcSMiao Xie 	} else
2114708c1bbcSMiao Xie 		*new = *old;
2115708c1bbcSMiao Xie 
21164225399aSPaul Jackson 	if (current_cpuset_is_being_rebound()) {
21174225399aSPaul Jackson 		nodemask_t mems = cpuset_mems_allowed(current);
2118708c1bbcSMiao Xie 		if (new->flags & MPOL_F_REBINDING)
2119708c1bbcSMiao Xie 			mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
2120708c1bbcSMiao Xie 		else
2121708c1bbcSMiao Xie 			mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
21224225399aSPaul Jackson 	}
21231da177e4SLinus Torvalds 	atomic_set(&new->refcnt, 1);
21241da177e4SLinus Torvalds 	return new;
21251da177e4SLinus Torvalds }
21261da177e4SLinus Torvalds 
21271da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */
2128fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
21291da177e4SLinus Torvalds {
21301da177e4SLinus Torvalds 	if (!a || !b)
2131fcfb4dccSKOSAKI Motohiro 		return false;
213245c4745aSLee Schermerhorn 	if (a->mode != b->mode)
2133fcfb4dccSKOSAKI Motohiro 		return false;
213419800502SBob Liu 	if (a->flags != b->flags)
2135fcfb4dccSKOSAKI Motohiro 		return false;
213619800502SBob Liu 	if (mpol_store_user_nodemask(a))
213719800502SBob Liu 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2138fcfb4dccSKOSAKI Motohiro 			return false;
213919800502SBob Liu 
214045c4745aSLee Schermerhorn 	switch (a->mode) {
214119770b32SMel Gorman 	case MPOL_BIND:
214219770b32SMel Gorman 		/* Fall through */
21431da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
2144fcfb4dccSKOSAKI Motohiro 		return !!nodes_equal(a->v.nodes, b->v.nodes);
21451da177e4SLinus Torvalds 	case MPOL_PREFERRED:
214675719661SNamhyung Kim 		return a->v.preferred_node == b->v.preferred_node;
21471da177e4SLinus Torvalds 	default:
21481da177e4SLinus Torvalds 		BUG();
2149fcfb4dccSKOSAKI Motohiro 		return false;
21501da177e4SLinus Torvalds 	}
21511da177e4SLinus Torvalds }
21521da177e4SLinus Torvalds 
21531da177e4SLinus Torvalds /*
21541da177e4SLinus Torvalds  * Shared memory backing store policy support.
21551da177e4SLinus Torvalds  *
21561da177e4SLinus Torvalds  * Remember policies even when nobody has shared memory mapped.
21571da177e4SLinus Torvalds  * The policies are kept in Red-Black tree linked from the inode.
21584a8c7bb5SNathan Zimmer  * They are protected by the sp->lock rwlock, which should be held
21591da177e4SLinus Torvalds  * for any accesses to the tree.
21601da177e4SLinus Torvalds  */
21611da177e4SLinus Torvalds 
21624a8c7bb5SNathan Zimmer /*
21634a8c7bb5SNathan Zimmer  * lookup first element intersecting start-end.  Caller holds sp->lock for
21644a8c7bb5SNathan Zimmer  * reading or for writing
21654a8c7bb5SNathan Zimmer  */
21661da177e4SLinus Torvalds static struct sp_node *
21671da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
21681da177e4SLinus Torvalds {
21691da177e4SLinus Torvalds 	struct rb_node *n = sp->root.rb_node;
21701da177e4SLinus Torvalds 
21711da177e4SLinus Torvalds 	while (n) {
21721da177e4SLinus Torvalds 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
21731da177e4SLinus Torvalds 
21741da177e4SLinus Torvalds 		if (start >= p->end)
21751da177e4SLinus Torvalds 			n = n->rb_right;
21761da177e4SLinus Torvalds 		else if (end <= p->start)
21771da177e4SLinus Torvalds 			n = n->rb_left;
21781da177e4SLinus Torvalds 		else
21791da177e4SLinus Torvalds 			break;
21801da177e4SLinus Torvalds 	}
21811da177e4SLinus Torvalds 	if (!n)
21821da177e4SLinus Torvalds 		return NULL;
21831da177e4SLinus Torvalds 	for (;;) {
21841da177e4SLinus Torvalds 		struct sp_node *w = NULL;
21851da177e4SLinus Torvalds 		struct rb_node *prev = rb_prev(n);
21861da177e4SLinus Torvalds 		if (!prev)
21871da177e4SLinus Torvalds 			break;
21881da177e4SLinus Torvalds 		w = rb_entry(prev, struct sp_node, nd);
21891da177e4SLinus Torvalds 		if (w->end <= start)
21901da177e4SLinus Torvalds 			break;
21911da177e4SLinus Torvalds 		n = prev;
21921da177e4SLinus Torvalds 	}
21931da177e4SLinus Torvalds 	return rb_entry(n, struct sp_node, nd);
21941da177e4SLinus Torvalds }
21951da177e4SLinus Torvalds 
21964a8c7bb5SNathan Zimmer /*
21974a8c7bb5SNathan Zimmer  * Insert a new shared policy into the list.  Caller holds sp->lock for
21984a8c7bb5SNathan Zimmer  * writing.
21994a8c7bb5SNathan Zimmer  */
22001da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new)
22011da177e4SLinus Torvalds {
22021da177e4SLinus Torvalds 	struct rb_node **p = &sp->root.rb_node;
22031da177e4SLinus Torvalds 	struct rb_node *parent = NULL;
22041da177e4SLinus Torvalds 	struct sp_node *nd;
22051da177e4SLinus Torvalds 
22061da177e4SLinus Torvalds 	while (*p) {
22071da177e4SLinus Torvalds 		parent = *p;
22081da177e4SLinus Torvalds 		nd = rb_entry(parent, struct sp_node, nd);
22091da177e4SLinus Torvalds 		if (new->start < nd->start)
22101da177e4SLinus Torvalds 			p = &(*p)->rb_left;
22111da177e4SLinus Torvalds 		else if (new->end > nd->end)
22121da177e4SLinus Torvalds 			p = &(*p)->rb_right;
22131da177e4SLinus Torvalds 		else
22141da177e4SLinus Torvalds 			BUG();
22151da177e4SLinus Torvalds 	}
22161da177e4SLinus Torvalds 	rb_link_node(&new->nd, parent, p);
22171da177e4SLinus Torvalds 	rb_insert_color(&new->nd, &sp->root);
2218140d5a49SPaul Mundt 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
221945c4745aSLee Schermerhorn 		 new->policy ? new->policy->mode : 0);
22201da177e4SLinus Torvalds }
22211da177e4SLinus Torvalds 
22221da177e4SLinus Torvalds /* Find shared policy intersecting idx */
22231da177e4SLinus Torvalds struct mempolicy *
22241da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
22251da177e4SLinus Torvalds {
22261da177e4SLinus Torvalds 	struct mempolicy *pol = NULL;
22271da177e4SLinus Torvalds 	struct sp_node *sn;
22281da177e4SLinus Torvalds 
22291da177e4SLinus Torvalds 	if (!sp->root.rb_node)
22301da177e4SLinus Torvalds 		return NULL;
22314a8c7bb5SNathan Zimmer 	read_lock(&sp->lock);
22321da177e4SLinus Torvalds 	sn = sp_lookup(sp, idx, idx+1);
22331da177e4SLinus Torvalds 	if (sn) {
22341da177e4SLinus Torvalds 		mpol_get(sn->policy);
22351da177e4SLinus Torvalds 		pol = sn->policy;
22361da177e4SLinus Torvalds 	}
22374a8c7bb5SNathan Zimmer 	read_unlock(&sp->lock);
22381da177e4SLinus Torvalds 	return pol;
22391da177e4SLinus Torvalds }
22401da177e4SLinus Torvalds 
224163f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n)
224263f74ca2SKOSAKI Motohiro {
224363f74ca2SKOSAKI Motohiro 	mpol_put(n->policy);
224463f74ca2SKOSAKI Motohiro 	kmem_cache_free(sn_cache, n);
224563f74ca2SKOSAKI Motohiro }
224663f74ca2SKOSAKI Motohiro 
2247771fb4d8SLee Schermerhorn /**
2248771fb4d8SLee Schermerhorn  * mpol_misplaced - check whether current page node is valid in policy
2249771fb4d8SLee Schermerhorn  *
2250b46e14acSFabian Frederick  * @page: page to be checked
2251b46e14acSFabian Frederick  * @vma: vm area where page mapped
2252b46e14acSFabian Frederick  * @addr: virtual address where page mapped
2253771fb4d8SLee Schermerhorn  *
2254771fb4d8SLee Schermerhorn  * Lookup current policy node id for vma,addr and "compare to" page's
2255771fb4d8SLee Schermerhorn  * node id.
2256771fb4d8SLee Schermerhorn  *
2257771fb4d8SLee Schermerhorn  * Returns:
2258771fb4d8SLee Schermerhorn  *	-1	- not misplaced, page is in the right node
2259771fb4d8SLee Schermerhorn  *	node	- node id where the page should be
2260771fb4d8SLee Schermerhorn  *
2261771fb4d8SLee Schermerhorn  * Policy determination "mimics" alloc_page_vma().
2262771fb4d8SLee Schermerhorn  * Called from fault path where we know the vma and faulting address.
2263771fb4d8SLee Schermerhorn  */
2264771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2265771fb4d8SLee Schermerhorn {
2266771fb4d8SLee Schermerhorn 	struct mempolicy *pol;
2267c33d6c06SMel Gorman 	struct zoneref *z;
2268771fb4d8SLee Schermerhorn 	int curnid = page_to_nid(page);
2269771fb4d8SLee Schermerhorn 	unsigned long pgoff;
227090572890SPeter Zijlstra 	int thiscpu = raw_smp_processor_id();
227190572890SPeter Zijlstra 	int thisnid = cpu_to_node(thiscpu);
2272771fb4d8SLee Schermerhorn 	int polnid = -1;
2273771fb4d8SLee Schermerhorn 	int ret = -1;
2274771fb4d8SLee Schermerhorn 
2275771fb4d8SLee Schermerhorn 	BUG_ON(!vma);
2276771fb4d8SLee Schermerhorn 
2277dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2278771fb4d8SLee Schermerhorn 	if (!(pol->flags & MPOL_F_MOF))
2279771fb4d8SLee Schermerhorn 		goto out;
2280771fb4d8SLee Schermerhorn 
2281771fb4d8SLee Schermerhorn 	switch (pol->mode) {
2282771fb4d8SLee Schermerhorn 	case MPOL_INTERLEAVE:
2283771fb4d8SLee Schermerhorn 		BUG_ON(addr >= vma->vm_end);
2284771fb4d8SLee Schermerhorn 		BUG_ON(addr < vma->vm_start);
2285771fb4d8SLee Schermerhorn 
2286771fb4d8SLee Schermerhorn 		pgoff = vma->vm_pgoff;
2287771fb4d8SLee Schermerhorn 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2288771fb4d8SLee Schermerhorn 		polnid = offset_il_node(pol, vma, pgoff);
2289771fb4d8SLee Schermerhorn 		break;
2290771fb4d8SLee Schermerhorn 
2291771fb4d8SLee Schermerhorn 	case MPOL_PREFERRED:
2292771fb4d8SLee Schermerhorn 		if (pol->flags & MPOL_F_LOCAL)
2293771fb4d8SLee Schermerhorn 			polnid = numa_node_id();
2294771fb4d8SLee Schermerhorn 		else
2295771fb4d8SLee Schermerhorn 			polnid = pol->v.preferred_node;
2296771fb4d8SLee Schermerhorn 		break;
2297771fb4d8SLee Schermerhorn 
2298771fb4d8SLee Schermerhorn 	case MPOL_BIND:
2299c33d6c06SMel Gorman 
2300771fb4d8SLee Schermerhorn 		/*
2301771fb4d8SLee Schermerhorn 		 * allows binding to multiple nodes.
2302771fb4d8SLee Schermerhorn 		 * use current page if in policy nodemask,
2303771fb4d8SLee Schermerhorn 		 * else select nearest allowed node, if any.
2304771fb4d8SLee Schermerhorn 		 * If no allowed nodes, use current [!misplaced].
2305771fb4d8SLee Schermerhorn 		 */
2306771fb4d8SLee Schermerhorn 		if (node_isset(curnid, pol->v.nodes))
2307771fb4d8SLee Schermerhorn 			goto out;
2308c33d6c06SMel Gorman 		z = first_zones_zonelist(
2309771fb4d8SLee Schermerhorn 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2310771fb4d8SLee Schermerhorn 				gfp_zone(GFP_HIGHUSER),
2311c33d6c06SMel Gorman 				&pol->v.nodes);
2312c33d6c06SMel Gorman 		polnid = z->zone->node;
2313771fb4d8SLee Schermerhorn 		break;
2314771fb4d8SLee Schermerhorn 
2315771fb4d8SLee Schermerhorn 	default:
2316771fb4d8SLee Schermerhorn 		BUG();
2317771fb4d8SLee Schermerhorn 	}
23185606e387SMel Gorman 
23195606e387SMel Gorman 	/* Migrate the page towards the node whose CPU is referencing it */
2320e42c8ff2SMel Gorman 	if (pol->flags & MPOL_F_MORON) {
232190572890SPeter Zijlstra 		polnid = thisnid;
23225606e387SMel Gorman 
232310f39042SRik van Riel 		if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2324de1c9ce6SRik van Riel 			goto out;
2325de1c9ce6SRik van Riel 	}
2326e42c8ff2SMel Gorman 
2327771fb4d8SLee Schermerhorn 	if (curnid != polnid)
2328771fb4d8SLee Schermerhorn 		ret = polnid;
2329771fb4d8SLee Schermerhorn out:
2330771fb4d8SLee Schermerhorn 	mpol_cond_put(pol);
2331771fb4d8SLee Schermerhorn 
2332771fb4d8SLee Schermerhorn 	return ret;
2333771fb4d8SLee Schermerhorn }
2334771fb4d8SLee Schermerhorn 
2335c11600e4SDavid Rientjes /*
2336c11600e4SDavid Rientjes  * Drop the (possibly final) reference to task->mempolicy.  It needs to be
2337c11600e4SDavid Rientjes  * dropped after task->mempolicy is set to NULL so that any allocation done as
2338c11600e4SDavid Rientjes  * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2339c11600e4SDavid Rientjes  * policy.
2340c11600e4SDavid Rientjes  */
2341c11600e4SDavid Rientjes void mpol_put_task_policy(struct task_struct *task)
2342c11600e4SDavid Rientjes {
2343c11600e4SDavid Rientjes 	struct mempolicy *pol;
2344c11600e4SDavid Rientjes 
2345c11600e4SDavid Rientjes 	task_lock(task);
2346c11600e4SDavid Rientjes 	pol = task->mempolicy;
2347c11600e4SDavid Rientjes 	task->mempolicy = NULL;
2348c11600e4SDavid Rientjes 	task_unlock(task);
2349c11600e4SDavid Rientjes 	mpol_put(pol);
2350c11600e4SDavid Rientjes }
2351c11600e4SDavid Rientjes 
23521da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n)
23531da177e4SLinus Torvalds {
2354140d5a49SPaul Mundt 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
23551da177e4SLinus Torvalds 	rb_erase(&n->nd, &sp->root);
235663f74ca2SKOSAKI Motohiro 	sp_free(n);
23571da177e4SLinus Torvalds }
23581da177e4SLinus Torvalds 
235942288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start,
236042288fe3SMel Gorman 			unsigned long end, struct mempolicy *pol)
236142288fe3SMel Gorman {
236242288fe3SMel Gorman 	node->start = start;
236342288fe3SMel Gorman 	node->end = end;
236442288fe3SMel Gorman 	node->policy = pol;
236542288fe3SMel Gorman }
236642288fe3SMel Gorman 
2367dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2368dbcb0f19SAdrian Bunk 				struct mempolicy *pol)
23691da177e4SLinus Torvalds {
2370869833f2SKOSAKI Motohiro 	struct sp_node *n;
2371869833f2SKOSAKI Motohiro 	struct mempolicy *newpol;
23721da177e4SLinus Torvalds 
2373869833f2SKOSAKI Motohiro 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
23741da177e4SLinus Torvalds 	if (!n)
23751da177e4SLinus Torvalds 		return NULL;
2376869833f2SKOSAKI Motohiro 
2377869833f2SKOSAKI Motohiro 	newpol = mpol_dup(pol);
2378869833f2SKOSAKI Motohiro 	if (IS_ERR(newpol)) {
2379869833f2SKOSAKI Motohiro 		kmem_cache_free(sn_cache, n);
2380869833f2SKOSAKI Motohiro 		return NULL;
2381869833f2SKOSAKI Motohiro 	}
2382869833f2SKOSAKI Motohiro 	newpol->flags |= MPOL_F_SHARED;
238342288fe3SMel Gorman 	sp_node_init(n, start, end, newpol);
2384869833f2SKOSAKI Motohiro 
23851da177e4SLinus Torvalds 	return n;
23861da177e4SLinus Torvalds }
23871da177e4SLinus Torvalds 
23881da177e4SLinus Torvalds /* Replace a policy range. */
23891da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
23901da177e4SLinus Torvalds 				 unsigned long end, struct sp_node *new)
23911da177e4SLinus Torvalds {
2392b22d127aSMel Gorman 	struct sp_node *n;
239342288fe3SMel Gorman 	struct sp_node *n_new = NULL;
239442288fe3SMel Gorman 	struct mempolicy *mpol_new = NULL;
2395b22d127aSMel Gorman 	int ret = 0;
23961da177e4SLinus Torvalds 
239742288fe3SMel Gorman restart:
23984a8c7bb5SNathan Zimmer 	write_lock(&sp->lock);
23991da177e4SLinus Torvalds 	n = sp_lookup(sp, start, end);
24001da177e4SLinus Torvalds 	/* Take care of old policies in the same range. */
24011da177e4SLinus Torvalds 	while (n && n->start < end) {
24021da177e4SLinus Torvalds 		struct rb_node *next = rb_next(&n->nd);
24031da177e4SLinus Torvalds 		if (n->start >= start) {
24041da177e4SLinus Torvalds 			if (n->end <= end)
24051da177e4SLinus Torvalds 				sp_delete(sp, n);
24061da177e4SLinus Torvalds 			else
24071da177e4SLinus Torvalds 				n->start = end;
24081da177e4SLinus Torvalds 		} else {
24091da177e4SLinus Torvalds 			/* Old policy spanning whole new range. */
24101da177e4SLinus Torvalds 			if (n->end > end) {
241142288fe3SMel Gorman 				if (!n_new)
241242288fe3SMel Gorman 					goto alloc_new;
241342288fe3SMel Gorman 
241442288fe3SMel Gorman 				*mpol_new = *n->policy;
241542288fe3SMel Gorman 				atomic_set(&mpol_new->refcnt, 1);
24167880639cSKOSAKI Motohiro 				sp_node_init(n_new, end, n->end, mpol_new);
24171da177e4SLinus Torvalds 				n->end = start;
24185ca39575SHillf Danton 				sp_insert(sp, n_new);
241942288fe3SMel Gorman 				n_new = NULL;
242042288fe3SMel Gorman 				mpol_new = NULL;
24211da177e4SLinus Torvalds 				break;
24221da177e4SLinus Torvalds 			} else
24231da177e4SLinus Torvalds 				n->end = start;
24241da177e4SLinus Torvalds 		}
24251da177e4SLinus Torvalds 		if (!next)
24261da177e4SLinus Torvalds 			break;
24271da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
24281da177e4SLinus Torvalds 	}
24291da177e4SLinus Torvalds 	if (new)
24301da177e4SLinus Torvalds 		sp_insert(sp, new);
24314a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
243242288fe3SMel Gorman 	ret = 0;
243342288fe3SMel Gorman 
243442288fe3SMel Gorman err_out:
243542288fe3SMel Gorman 	if (mpol_new)
243642288fe3SMel Gorman 		mpol_put(mpol_new);
243742288fe3SMel Gorman 	if (n_new)
243842288fe3SMel Gorman 		kmem_cache_free(sn_cache, n_new);
243942288fe3SMel Gorman 
2440b22d127aSMel Gorman 	return ret;
244142288fe3SMel Gorman 
244242288fe3SMel Gorman alloc_new:
24434a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
244442288fe3SMel Gorman 	ret = -ENOMEM;
244542288fe3SMel Gorman 	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
244642288fe3SMel Gorman 	if (!n_new)
244742288fe3SMel Gorman 		goto err_out;
244842288fe3SMel Gorman 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
244942288fe3SMel Gorman 	if (!mpol_new)
245042288fe3SMel Gorman 		goto err_out;
245142288fe3SMel Gorman 	goto restart;
24521da177e4SLinus Torvalds }
24531da177e4SLinus Torvalds 
245471fe804bSLee Schermerhorn /**
245571fe804bSLee Schermerhorn  * mpol_shared_policy_init - initialize shared policy for inode
245671fe804bSLee Schermerhorn  * @sp: pointer to inode shared policy
245771fe804bSLee Schermerhorn  * @mpol:  struct mempolicy to install
245871fe804bSLee Schermerhorn  *
245971fe804bSLee Schermerhorn  * Install non-NULL @mpol in inode's shared policy rb-tree.
246071fe804bSLee Schermerhorn  * On entry, the current task has a reference on a non-NULL @mpol.
246171fe804bSLee Schermerhorn  * This must be released on exit.
24624bfc4495SKAMEZAWA Hiroyuki  * This is called at get_inode() calls and we can use GFP_KERNEL.
246371fe804bSLee Schermerhorn  */
246471fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
24657339ff83SRobin Holt {
246658568d2aSMiao Xie 	int ret;
246758568d2aSMiao Xie 
246871fe804bSLee Schermerhorn 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
24694a8c7bb5SNathan Zimmer 	rwlock_init(&sp->lock);
24707339ff83SRobin Holt 
247171fe804bSLee Schermerhorn 	if (mpol) {
24727339ff83SRobin Holt 		struct vm_area_struct pvma;
247371fe804bSLee Schermerhorn 		struct mempolicy *new;
24744bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
24757339ff83SRobin Holt 
24764bfc4495SKAMEZAWA Hiroyuki 		if (!scratch)
24775c0c1654SLee Schermerhorn 			goto put_mpol;
247871fe804bSLee Schermerhorn 		/* contextualize the tmpfs mount point mempolicy */
247971fe804bSLee Schermerhorn 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
248015d77835SLee Schermerhorn 		if (IS_ERR(new))
24810cae3457SDan Carpenter 			goto free_scratch; /* no valid nodemask intersection */
248258568d2aSMiao Xie 
248358568d2aSMiao Xie 		task_lock(current);
24844bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
248558568d2aSMiao Xie 		task_unlock(current);
248615d77835SLee Schermerhorn 		if (ret)
24875c0c1654SLee Schermerhorn 			goto put_new;
248871fe804bSLee Schermerhorn 
248971fe804bSLee Schermerhorn 		/* Create pseudo-vma that contains just the policy */
24907339ff83SRobin Holt 		memset(&pvma, 0, sizeof(struct vm_area_struct));
249171fe804bSLee Schermerhorn 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
249271fe804bSLee Schermerhorn 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
249315d77835SLee Schermerhorn 
24945c0c1654SLee Schermerhorn put_new:
249571fe804bSLee Schermerhorn 		mpol_put(new);			/* drop initial ref */
24960cae3457SDan Carpenter free_scratch:
24974bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
24985c0c1654SLee Schermerhorn put_mpol:
24995c0c1654SLee Schermerhorn 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
25007339ff83SRobin Holt 	}
25017339ff83SRobin Holt }
25027339ff83SRobin Holt 
25031da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info,
25041da177e4SLinus Torvalds 			struct vm_area_struct *vma, struct mempolicy *npol)
25051da177e4SLinus Torvalds {
25061da177e4SLinus Torvalds 	int err;
25071da177e4SLinus Torvalds 	struct sp_node *new = NULL;
25081da177e4SLinus Torvalds 	unsigned long sz = vma_pages(vma);
25091da177e4SLinus Torvalds 
2510028fec41SDavid Rientjes 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
25111da177e4SLinus Torvalds 		 vma->vm_pgoff,
251245c4745aSLee Schermerhorn 		 sz, npol ? npol->mode : -1,
2513028fec41SDavid Rientjes 		 npol ? npol->flags : -1,
251400ef2d2fSDavid Rientjes 		 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
25151da177e4SLinus Torvalds 
25161da177e4SLinus Torvalds 	if (npol) {
25171da177e4SLinus Torvalds 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
25181da177e4SLinus Torvalds 		if (!new)
25191da177e4SLinus Torvalds 			return -ENOMEM;
25201da177e4SLinus Torvalds 	}
25211da177e4SLinus Torvalds 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
25221da177e4SLinus Torvalds 	if (err && new)
252363f74ca2SKOSAKI Motohiro 		sp_free(new);
25241da177e4SLinus Torvalds 	return err;
25251da177e4SLinus Torvalds }
25261da177e4SLinus Torvalds 
25271da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */
25281da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p)
25291da177e4SLinus Torvalds {
25301da177e4SLinus Torvalds 	struct sp_node *n;
25311da177e4SLinus Torvalds 	struct rb_node *next;
25321da177e4SLinus Torvalds 
25331da177e4SLinus Torvalds 	if (!p->root.rb_node)
25341da177e4SLinus Torvalds 		return;
25354a8c7bb5SNathan Zimmer 	write_lock(&p->lock);
25361da177e4SLinus Torvalds 	next = rb_first(&p->root);
25371da177e4SLinus Torvalds 	while (next) {
25381da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
25391da177e4SLinus Torvalds 		next = rb_next(&n->nd);
254063f74ca2SKOSAKI Motohiro 		sp_delete(p, n);
25411da177e4SLinus Torvalds 	}
25424a8c7bb5SNathan Zimmer 	write_unlock(&p->lock);
25431da177e4SLinus Torvalds }
25441da177e4SLinus Torvalds 
25451a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING
2546c297663cSMel Gorman static int __initdata numabalancing_override;
25471a687c2eSMel Gorman 
25481a687c2eSMel Gorman static void __init check_numabalancing_enable(void)
25491a687c2eSMel Gorman {
25501a687c2eSMel Gorman 	bool numabalancing_default = false;
25511a687c2eSMel Gorman 
25521a687c2eSMel Gorman 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
25531a687c2eSMel Gorman 		numabalancing_default = true;
25541a687c2eSMel Gorman 
2555c297663cSMel Gorman 	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2556c297663cSMel Gorman 	if (numabalancing_override)
2557c297663cSMel Gorman 		set_numabalancing_state(numabalancing_override == 1);
2558c297663cSMel Gorman 
2559b0dc2b9bSMel Gorman 	if (num_online_nodes() > 1 && !numabalancing_override) {
2560756a025fSJoe Perches 		pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2561c297663cSMel Gorman 			numabalancing_default ? "Enabling" : "Disabling");
25621a687c2eSMel Gorman 		set_numabalancing_state(numabalancing_default);
25631a687c2eSMel Gorman 	}
25641a687c2eSMel Gorman }
25651a687c2eSMel Gorman 
25661a687c2eSMel Gorman static int __init setup_numabalancing(char *str)
25671a687c2eSMel Gorman {
25681a687c2eSMel Gorman 	int ret = 0;
25691a687c2eSMel Gorman 	if (!str)
25701a687c2eSMel Gorman 		goto out;
25711a687c2eSMel Gorman 
25721a687c2eSMel Gorman 	if (!strcmp(str, "enable")) {
2573c297663cSMel Gorman 		numabalancing_override = 1;
25741a687c2eSMel Gorman 		ret = 1;
25751a687c2eSMel Gorman 	} else if (!strcmp(str, "disable")) {
2576c297663cSMel Gorman 		numabalancing_override = -1;
25771a687c2eSMel Gorman 		ret = 1;
25781a687c2eSMel Gorman 	}
25791a687c2eSMel Gorman out:
25801a687c2eSMel Gorman 	if (!ret)
25814a404beaSAndrew Morton 		pr_warn("Unable to parse numa_balancing=\n");
25821a687c2eSMel Gorman 
25831a687c2eSMel Gorman 	return ret;
25841a687c2eSMel Gorman }
25851a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing);
25861a687c2eSMel Gorman #else
25871a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void)
25881a687c2eSMel Gorman {
25891a687c2eSMel Gorman }
25901a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */
25911a687c2eSMel Gorman 
25921da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */
25931da177e4SLinus Torvalds void __init numa_policy_init(void)
25941da177e4SLinus Torvalds {
2595b71636e2SPaul Mundt 	nodemask_t interleave_nodes;
2596b71636e2SPaul Mundt 	unsigned long largest = 0;
2597b71636e2SPaul Mundt 	int nid, prefer = 0;
2598b71636e2SPaul Mundt 
25991da177e4SLinus Torvalds 	policy_cache = kmem_cache_create("numa_policy",
26001da177e4SLinus Torvalds 					 sizeof(struct mempolicy),
260120c2df83SPaul Mundt 					 0, SLAB_PANIC, NULL);
26021da177e4SLinus Torvalds 
26031da177e4SLinus Torvalds 	sn_cache = kmem_cache_create("shared_policy_node",
26041da177e4SLinus Torvalds 				     sizeof(struct sp_node),
260520c2df83SPaul Mundt 				     0, SLAB_PANIC, NULL);
26061da177e4SLinus Torvalds 
26075606e387SMel Gorman 	for_each_node(nid) {
26085606e387SMel Gorman 		preferred_node_policy[nid] = (struct mempolicy) {
26095606e387SMel Gorman 			.refcnt = ATOMIC_INIT(1),
26105606e387SMel Gorman 			.mode = MPOL_PREFERRED,
26115606e387SMel Gorman 			.flags = MPOL_F_MOF | MPOL_F_MORON,
26125606e387SMel Gorman 			.v = { .preferred_node = nid, },
26135606e387SMel Gorman 		};
26145606e387SMel Gorman 	}
26155606e387SMel Gorman 
2616b71636e2SPaul Mundt 	/*
2617b71636e2SPaul Mundt 	 * Set interleaving policy for system init. Interleaving is only
2618b71636e2SPaul Mundt 	 * enabled across suitably sized nodes (default is >= 16MB), or
2619b71636e2SPaul Mundt 	 * fall back to the largest node if they're all smaller.
2620b71636e2SPaul Mundt 	 */
2621b71636e2SPaul Mundt 	nodes_clear(interleave_nodes);
262201f13bd6SLai Jiangshan 	for_each_node_state(nid, N_MEMORY) {
2623b71636e2SPaul Mundt 		unsigned long total_pages = node_present_pages(nid);
26241da177e4SLinus Torvalds 
2625b71636e2SPaul Mundt 		/* Preserve the largest node */
2626b71636e2SPaul Mundt 		if (largest < total_pages) {
2627b71636e2SPaul Mundt 			largest = total_pages;
2628b71636e2SPaul Mundt 			prefer = nid;
2629b71636e2SPaul Mundt 		}
2630b71636e2SPaul Mundt 
2631b71636e2SPaul Mundt 		/* Interleave this node? */
2632b71636e2SPaul Mundt 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2633b71636e2SPaul Mundt 			node_set(nid, interleave_nodes);
2634b71636e2SPaul Mundt 	}
2635b71636e2SPaul Mundt 
2636b71636e2SPaul Mundt 	/* All too small, use the largest */
2637b71636e2SPaul Mundt 	if (unlikely(nodes_empty(interleave_nodes)))
2638b71636e2SPaul Mundt 		node_set(prefer, interleave_nodes);
2639b71636e2SPaul Mundt 
2640028fec41SDavid Rientjes 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2641b1de0d13SMitchel Humpherys 		pr_err("%s: interleaving failed\n", __func__);
26421a687c2eSMel Gorman 
26431a687c2eSMel Gorman 	check_numabalancing_enable();
26441da177e4SLinus Torvalds }
26451da177e4SLinus Torvalds 
26468bccd85fSChristoph Lameter /* Reset policy of current process to default */
26471da177e4SLinus Torvalds void numa_default_policy(void)
26481da177e4SLinus Torvalds {
2649028fec41SDavid Rientjes 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
26501da177e4SLinus Torvalds }
265168860ec1SPaul Jackson 
26524225399aSPaul Jackson /*
2653095f1fc4SLee Schermerhorn  * Parse and format mempolicy from/to strings
2654095f1fc4SLee Schermerhorn  */
2655095f1fc4SLee Schermerhorn 
2656095f1fc4SLee Schermerhorn /*
2657f2a07f40SHugh Dickins  * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
26581a75a6c8SChristoph Lameter  */
2659345ace9cSLee Schermerhorn static const char * const policy_modes[] =
2660345ace9cSLee Schermerhorn {
2661345ace9cSLee Schermerhorn 	[MPOL_DEFAULT]    = "default",
2662345ace9cSLee Schermerhorn 	[MPOL_PREFERRED]  = "prefer",
2663345ace9cSLee Schermerhorn 	[MPOL_BIND]       = "bind",
2664345ace9cSLee Schermerhorn 	[MPOL_INTERLEAVE] = "interleave",
2665d3a71033SLee Schermerhorn 	[MPOL_LOCAL]      = "local",
2666345ace9cSLee Schermerhorn };
26671a75a6c8SChristoph Lameter 
2668095f1fc4SLee Schermerhorn 
2669095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS
2670095f1fc4SLee Schermerhorn /**
2671f2a07f40SHugh Dickins  * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2672095f1fc4SLee Schermerhorn  * @str:  string containing mempolicy to parse
267371fe804bSLee Schermerhorn  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2674095f1fc4SLee Schermerhorn  *
2675095f1fc4SLee Schermerhorn  * Format of input:
2676095f1fc4SLee Schermerhorn  *	<mode>[=<flags>][:<nodelist>]
2677095f1fc4SLee Schermerhorn  *
267871fe804bSLee Schermerhorn  * On success, returns 0, else 1
2679095f1fc4SLee Schermerhorn  */
2680a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol)
2681095f1fc4SLee Schermerhorn {
268271fe804bSLee Schermerhorn 	struct mempolicy *new = NULL;
2683b4652e84SLee Schermerhorn 	unsigned short mode;
2684f2a07f40SHugh Dickins 	unsigned short mode_flags;
268571fe804bSLee Schermerhorn 	nodemask_t nodes;
2686095f1fc4SLee Schermerhorn 	char *nodelist = strchr(str, ':');
2687095f1fc4SLee Schermerhorn 	char *flags = strchr(str, '=');
2688095f1fc4SLee Schermerhorn 	int err = 1;
2689095f1fc4SLee Schermerhorn 
2690095f1fc4SLee Schermerhorn 	if (nodelist) {
2691095f1fc4SLee Schermerhorn 		/* NUL-terminate mode or flags string */
2692095f1fc4SLee Schermerhorn 		*nodelist++ = '\0';
269371fe804bSLee Schermerhorn 		if (nodelist_parse(nodelist, nodes))
2694095f1fc4SLee Schermerhorn 			goto out;
269501f13bd6SLai Jiangshan 		if (!nodes_subset(nodes, node_states[N_MEMORY]))
2696095f1fc4SLee Schermerhorn 			goto out;
269771fe804bSLee Schermerhorn 	} else
269871fe804bSLee Schermerhorn 		nodes_clear(nodes);
269971fe804bSLee Schermerhorn 
2700095f1fc4SLee Schermerhorn 	if (flags)
2701095f1fc4SLee Schermerhorn 		*flags++ = '\0';	/* terminate mode string */
2702095f1fc4SLee Schermerhorn 
2703479e2802SPeter Zijlstra 	for (mode = 0; mode < MPOL_MAX; mode++) {
2704345ace9cSLee Schermerhorn 		if (!strcmp(str, policy_modes[mode])) {
2705095f1fc4SLee Schermerhorn 			break;
2706095f1fc4SLee Schermerhorn 		}
2707095f1fc4SLee Schermerhorn 	}
2708a720094dSMel Gorman 	if (mode >= MPOL_MAX)
2709095f1fc4SLee Schermerhorn 		goto out;
2710095f1fc4SLee Schermerhorn 
271171fe804bSLee Schermerhorn 	switch (mode) {
2712095f1fc4SLee Schermerhorn 	case MPOL_PREFERRED:
271371fe804bSLee Schermerhorn 		/*
271471fe804bSLee Schermerhorn 		 * Insist on a nodelist of one node only
271571fe804bSLee Schermerhorn 		 */
2716095f1fc4SLee Schermerhorn 		if (nodelist) {
2717095f1fc4SLee Schermerhorn 			char *rest = nodelist;
2718095f1fc4SLee Schermerhorn 			while (isdigit(*rest))
2719095f1fc4SLee Schermerhorn 				rest++;
2720926f2ae0SKOSAKI Motohiro 			if (*rest)
2721926f2ae0SKOSAKI Motohiro 				goto out;
2722095f1fc4SLee Schermerhorn 		}
2723095f1fc4SLee Schermerhorn 		break;
2724095f1fc4SLee Schermerhorn 	case MPOL_INTERLEAVE:
2725095f1fc4SLee Schermerhorn 		/*
2726095f1fc4SLee Schermerhorn 		 * Default to online nodes with memory if no nodelist
2727095f1fc4SLee Schermerhorn 		 */
2728095f1fc4SLee Schermerhorn 		if (!nodelist)
272901f13bd6SLai Jiangshan 			nodes = node_states[N_MEMORY];
27303f226aa1SLee Schermerhorn 		break;
273171fe804bSLee Schermerhorn 	case MPOL_LOCAL:
27323f226aa1SLee Schermerhorn 		/*
273371fe804bSLee Schermerhorn 		 * Don't allow a nodelist;  mpol_new() checks flags
27343f226aa1SLee Schermerhorn 		 */
273571fe804bSLee Schermerhorn 		if (nodelist)
27363f226aa1SLee Schermerhorn 			goto out;
273771fe804bSLee Schermerhorn 		mode = MPOL_PREFERRED;
27383f226aa1SLee Schermerhorn 		break;
2739413b43deSRavikiran G Thirumalai 	case MPOL_DEFAULT:
2740413b43deSRavikiran G Thirumalai 		/*
2741413b43deSRavikiran G Thirumalai 		 * Insist on a empty nodelist
2742413b43deSRavikiran G Thirumalai 		 */
2743413b43deSRavikiran G Thirumalai 		if (!nodelist)
2744413b43deSRavikiran G Thirumalai 			err = 0;
2745413b43deSRavikiran G Thirumalai 		goto out;
2746d69b2e63SKOSAKI Motohiro 	case MPOL_BIND:
274771fe804bSLee Schermerhorn 		/*
2748d69b2e63SKOSAKI Motohiro 		 * Insist on a nodelist
274971fe804bSLee Schermerhorn 		 */
2750d69b2e63SKOSAKI Motohiro 		if (!nodelist)
2751d69b2e63SKOSAKI Motohiro 			goto out;
2752095f1fc4SLee Schermerhorn 	}
2753095f1fc4SLee Schermerhorn 
275471fe804bSLee Schermerhorn 	mode_flags = 0;
2755095f1fc4SLee Schermerhorn 	if (flags) {
2756095f1fc4SLee Schermerhorn 		/*
2757095f1fc4SLee Schermerhorn 		 * Currently, we only support two mutually exclusive
2758095f1fc4SLee Schermerhorn 		 * mode flags.
2759095f1fc4SLee Schermerhorn 		 */
2760095f1fc4SLee Schermerhorn 		if (!strcmp(flags, "static"))
276171fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_STATIC_NODES;
2762095f1fc4SLee Schermerhorn 		else if (!strcmp(flags, "relative"))
276371fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_RELATIVE_NODES;
2764095f1fc4SLee Schermerhorn 		else
2765926f2ae0SKOSAKI Motohiro 			goto out;
2766095f1fc4SLee Schermerhorn 	}
276771fe804bSLee Schermerhorn 
276871fe804bSLee Schermerhorn 	new = mpol_new(mode, mode_flags, &nodes);
276971fe804bSLee Schermerhorn 	if (IS_ERR(new))
2770926f2ae0SKOSAKI Motohiro 		goto out;
2771926f2ae0SKOSAKI Motohiro 
2772f2a07f40SHugh Dickins 	/*
2773f2a07f40SHugh Dickins 	 * Save nodes for mpol_to_str() to show the tmpfs mount options
2774f2a07f40SHugh Dickins 	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2775f2a07f40SHugh Dickins 	 */
2776f2a07f40SHugh Dickins 	if (mode != MPOL_PREFERRED)
2777f2a07f40SHugh Dickins 		new->v.nodes = nodes;
2778f2a07f40SHugh Dickins 	else if (nodelist)
2779f2a07f40SHugh Dickins 		new->v.preferred_node = first_node(nodes);
2780f2a07f40SHugh Dickins 	else
2781f2a07f40SHugh Dickins 		new->flags |= MPOL_F_LOCAL;
2782f2a07f40SHugh Dickins 
2783f2a07f40SHugh Dickins 	/*
2784f2a07f40SHugh Dickins 	 * Save nodes for contextualization: this will be used to "clone"
2785f2a07f40SHugh Dickins 	 * the mempolicy in a specific context [cpuset] at a later time.
2786f2a07f40SHugh Dickins 	 */
2787e17f74afSLee Schermerhorn 	new->w.user_nodemask = nodes;
2788f2a07f40SHugh Dickins 
2789926f2ae0SKOSAKI Motohiro 	err = 0;
279071fe804bSLee Schermerhorn 
2791095f1fc4SLee Schermerhorn out:
2792095f1fc4SLee Schermerhorn 	/* Restore string for error message */
2793095f1fc4SLee Schermerhorn 	if (nodelist)
2794095f1fc4SLee Schermerhorn 		*--nodelist = ':';
2795095f1fc4SLee Schermerhorn 	if (flags)
2796095f1fc4SLee Schermerhorn 		*--flags = '=';
279771fe804bSLee Schermerhorn 	if (!err)
279871fe804bSLee Schermerhorn 		*mpol = new;
2799095f1fc4SLee Schermerhorn 	return err;
2800095f1fc4SLee Schermerhorn }
2801095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */
2802095f1fc4SLee Schermerhorn 
280371fe804bSLee Schermerhorn /**
280471fe804bSLee Schermerhorn  * mpol_to_str - format a mempolicy structure for printing
280571fe804bSLee Schermerhorn  * @buffer:  to contain formatted mempolicy string
280671fe804bSLee Schermerhorn  * @maxlen:  length of @buffer
280771fe804bSLee Schermerhorn  * @pol:  pointer to mempolicy to be formatted
280871fe804bSLee Schermerhorn  *
2809948927eeSDavid Rientjes  * Convert @pol into a string.  If @buffer is too short, truncate the string.
2810948927eeSDavid Rientjes  * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2811948927eeSDavid Rientjes  * longest flag, "relative", and to display at least a few node ids.
28121a75a6c8SChristoph Lameter  */
2813948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
28141a75a6c8SChristoph Lameter {
28151a75a6c8SChristoph Lameter 	char *p = buffer;
2816948927eeSDavid Rientjes 	nodemask_t nodes = NODE_MASK_NONE;
2817948927eeSDavid Rientjes 	unsigned short mode = MPOL_DEFAULT;
2818948927eeSDavid Rientjes 	unsigned short flags = 0;
28191a75a6c8SChristoph Lameter 
28208790c71aSDavid Rientjes 	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
2821bea904d5SLee Schermerhorn 		mode = pol->mode;
2822948927eeSDavid Rientjes 		flags = pol->flags;
2823948927eeSDavid Rientjes 	}
2824bea904d5SLee Schermerhorn 
28251a75a6c8SChristoph Lameter 	switch (mode) {
28261a75a6c8SChristoph Lameter 	case MPOL_DEFAULT:
28271a75a6c8SChristoph Lameter 		break;
28281a75a6c8SChristoph Lameter 	case MPOL_PREFERRED:
2829fc36b8d3SLee Schermerhorn 		if (flags & MPOL_F_LOCAL)
2830f2a07f40SHugh Dickins 			mode = MPOL_LOCAL;
283153f2556bSLee Schermerhorn 		else
2832fc36b8d3SLee Schermerhorn 			node_set(pol->v.preferred_node, nodes);
28331a75a6c8SChristoph Lameter 		break;
28341a75a6c8SChristoph Lameter 	case MPOL_BIND:
28351a75a6c8SChristoph Lameter 	case MPOL_INTERLEAVE:
28361a75a6c8SChristoph Lameter 		nodes = pol->v.nodes;
28371a75a6c8SChristoph Lameter 		break;
28381a75a6c8SChristoph Lameter 	default:
2839948927eeSDavid Rientjes 		WARN_ON_ONCE(1);
2840948927eeSDavid Rientjes 		snprintf(p, maxlen, "unknown");
2841948927eeSDavid Rientjes 		return;
28421a75a6c8SChristoph Lameter 	}
28431a75a6c8SChristoph Lameter 
2844b7a9f420SDavid Rientjes 	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
28451a75a6c8SChristoph Lameter 
2846fc36b8d3SLee Schermerhorn 	if (flags & MPOL_MODE_FLAGS) {
2847948927eeSDavid Rientjes 		p += snprintf(p, buffer + maxlen - p, "=");
2848f5b087b5SDavid Rientjes 
28492291990aSLee Schermerhorn 		/*
28502291990aSLee Schermerhorn 		 * Currently, the only defined flags are mutually exclusive
28512291990aSLee Schermerhorn 		 */
2852f5b087b5SDavid Rientjes 		if (flags & MPOL_F_STATIC_NODES)
28532291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "static");
28542291990aSLee Schermerhorn 		else if (flags & MPOL_F_RELATIVE_NODES)
28552291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "relative");
2856f5b087b5SDavid Rientjes 	}
2857f5b087b5SDavid Rientjes 
28589e763e0fSTejun Heo 	if (!nodes_empty(nodes))
28599e763e0fSTejun Heo 		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
28609e763e0fSTejun Heo 			       nodemask_pr_args(&nodes));
28611a75a6c8SChristoph Lameter }
2862