xref: /openbmc/linux/mm/mempolicy.c (revision 313674661925ee265f16570c893ea13cb9e00b82)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Simple NUMA memory policy for the Linux kernel.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
58bccd85fSChristoph Lameter  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
61da177e4SLinus Torvalds  * Subject to the GNU Public License, version 2.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * NUMA policy allows the user to give hints in which node(s) memory should
91da177e4SLinus Torvalds  * be allocated.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Support four policies per VMA and per process:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * The VMA policy has priority over the process policy for a page fault.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * interleave     Allocate memory interleaved over a set of nodes,
161da177e4SLinus Torvalds  *                with normal fallback if it fails.
171da177e4SLinus Torvalds  *                For VMA based allocations this interleaves based on the
181da177e4SLinus Torvalds  *                offset into the backing object or offset into the mapping
191da177e4SLinus Torvalds  *                for anonymous memory. For process policy an process counter
201da177e4SLinus Torvalds  *                is used.
218bccd85fSChristoph Lameter  *
221da177e4SLinus Torvalds  * bind           Only allocate memory on a specific set of nodes,
231da177e4SLinus Torvalds  *                no fallback.
248bccd85fSChristoph Lameter  *                FIXME: memory is allocated starting with the first node
258bccd85fSChristoph Lameter  *                to the last. It would be better if bind would truly restrict
268bccd85fSChristoph Lameter  *                the allocation to memory nodes instead
278bccd85fSChristoph Lameter  *
281da177e4SLinus Torvalds  * preferred       Try a specific node first before normal fallback.
2900ef2d2fSDavid Rientjes  *                As a special case NUMA_NO_NODE here means do the allocation
301da177e4SLinus Torvalds  *                on the local CPU. This is normally identical to default,
311da177e4SLinus Torvalds  *                but useful to set in a VMA when you have a non default
321da177e4SLinus Torvalds  *                process policy.
338bccd85fSChristoph Lameter  *
341da177e4SLinus Torvalds  * default        Allocate on the local node first, or when on a VMA
351da177e4SLinus Torvalds  *                use the process policy. This is what Linux always did
361da177e4SLinus Torvalds  *		  in a NUMA aware kernel and still does by, ahem, default.
371da177e4SLinus Torvalds  *
381da177e4SLinus Torvalds  * The process policy is applied for most non interrupt memory allocations
391da177e4SLinus Torvalds  * in that process' context. Interrupts ignore the policies and always
401da177e4SLinus Torvalds  * try to allocate on the local CPU. The VMA policy is only applied for memory
411da177e4SLinus Torvalds  * allocations for a VMA in the VM.
421da177e4SLinus Torvalds  *
431da177e4SLinus Torvalds  * Currently there are a few corner cases in swapping where the policy
441da177e4SLinus Torvalds  * is not applied, but the majority should be handled. When process policy
451da177e4SLinus Torvalds  * is used it is not remembered over swap outs/swap ins.
461da177e4SLinus Torvalds  *
471da177e4SLinus Torvalds  * Only the highest zone in the zone hierarchy gets policied. Allocations
481da177e4SLinus Torvalds  * requesting a lower zone just use default policy. This implies that
491da177e4SLinus Torvalds  * on systems with highmem kernel lowmem allocation don't get policied.
501da177e4SLinus Torvalds  * Same with GFP_DMA allocations.
511da177e4SLinus Torvalds  *
521da177e4SLinus Torvalds  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
531da177e4SLinus Torvalds  * all users and remembered even when nobody has memory mapped.
541da177e4SLinus Torvalds  */
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds /* Notebook:
571da177e4SLinus Torvalds    fix mmap readahead to honour policy and enable policy for any page cache
581da177e4SLinus Torvalds    object
591da177e4SLinus Torvalds    statistics for bigpages
601da177e4SLinus Torvalds    global policy for page cache? currently it uses process policy. Requires
611da177e4SLinus Torvalds    first item above.
621da177e4SLinus Torvalds    handle mremap for shared memory (currently ignored for the policy)
631da177e4SLinus Torvalds    grows down?
641da177e4SLinus Torvalds    make bind policy root only? It can trigger oom much faster and the
651da177e4SLinus Torvalds    kernel is not always grateful with that.
661da177e4SLinus Torvalds */
671da177e4SLinus Torvalds 
68b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69b1de0d13SMitchel Humpherys 
701da177e4SLinus Torvalds #include <linux/mempolicy.h>
711da177e4SLinus Torvalds #include <linux/mm.h>
721da177e4SLinus Torvalds #include <linux/highmem.h>
731da177e4SLinus Torvalds #include <linux/hugetlb.h>
741da177e4SLinus Torvalds #include <linux/kernel.h>
751da177e4SLinus Torvalds #include <linux/sched.h>
766e84f315SIngo Molnar #include <linux/sched/mm.h>
776a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h>
78f719ff9bSIngo Molnar #include <linux/sched/task.h>
791da177e4SLinus Torvalds #include <linux/nodemask.h>
801da177e4SLinus Torvalds #include <linux/cpuset.h>
811da177e4SLinus Torvalds #include <linux/slab.h>
821da177e4SLinus Torvalds #include <linux/string.h>
83b95f1b31SPaul Gortmaker #include <linux/export.h>
84b488893aSPavel Emelyanov #include <linux/nsproxy.h>
851da177e4SLinus Torvalds #include <linux/interrupt.h>
861da177e4SLinus Torvalds #include <linux/init.h>
871da177e4SLinus Torvalds #include <linux/compat.h>
88*31367466SOtto Ebeling #include <linux/ptrace.h>
89dc9aa5b9SChristoph Lameter #include <linux/swap.h>
901a75a6c8SChristoph Lameter #include <linux/seq_file.h>
911a75a6c8SChristoph Lameter #include <linux/proc_fs.h>
92b20a3503SChristoph Lameter #include <linux/migrate.h>
9362b61f61SHugh Dickins #include <linux/ksm.h>
9495a402c3SChristoph Lameter #include <linux/rmap.h>
9586c3a764SDavid Quigley #include <linux/security.h>
96dbcb0f19SAdrian Bunk #include <linux/syscalls.h>
97095f1fc4SLee Schermerhorn #include <linux/ctype.h>
986d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h>
99b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h>
100b1de0d13SMitchel Humpherys #include <linux/printk.h>
101c8633798SNaoya Horiguchi #include <linux/swapops.h>
102dc9aa5b9SChristoph Lameter 
1031da177e4SLinus Torvalds #include <asm/tlbflush.h>
1047c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
1051da177e4SLinus Torvalds 
10662695a84SNick Piggin #include "internal.h"
10762695a84SNick Piggin 
10838e35860SChristoph Lameter /* Internal flags */
109dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
11038e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
111dc9aa5b9SChristoph Lameter 
112fcc234f8SPekka Enberg static struct kmem_cache *policy_cache;
113fcc234f8SPekka Enberg static struct kmem_cache *sn_cache;
1141da177e4SLinus Torvalds 
1151da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not
1161da177e4SLinus Torvalds    policied. */
1176267276fSChristoph Lameter enum zone_type policy_zone = 0;
1181da177e4SLinus Torvalds 
119bea904d5SLee Schermerhorn /*
120bea904d5SLee Schermerhorn  * run-time system-wide default policy => local allocation
121bea904d5SLee Schermerhorn  */
122e754d79dSH Hartley Sweeten static struct mempolicy default_policy = {
1231da177e4SLinus Torvalds 	.refcnt = ATOMIC_INIT(1), /* never free it */
124bea904d5SLee Schermerhorn 	.mode = MPOL_PREFERRED,
125fc36b8d3SLee Schermerhorn 	.flags = MPOL_F_LOCAL,
1261da177e4SLinus Torvalds };
1271da177e4SLinus Torvalds 
1285606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES];
1295606e387SMel Gorman 
13074d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p)
1315606e387SMel Gorman {
1325606e387SMel Gorman 	struct mempolicy *pol = p->mempolicy;
133f15ca78eSOleg Nesterov 	int node;
1345606e387SMel Gorman 
135f15ca78eSOleg Nesterov 	if (pol)
136f15ca78eSOleg Nesterov 		return pol;
1375606e387SMel Gorman 
138f15ca78eSOleg Nesterov 	node = numa_node_id();
1391da6f0e1SJianguo Wu 	if (node != NUMA_NO_NODE) {
1401da6f0e1SJianguo Wu 		pol = &preferred_node_policy[node];
141f15ca78eSOleg Nesterov 		/* preferred_node_policy is not initialised early in boot */
142f15ca78eSOleg Nesterov 		if (pol->mode)
143f15ca78eSOleg Nesterov 			return pol;
1441da6f0e1SJianguo Wu 	}
1455606e387SMel Gorman 
146f15ca78eSOleg Nesterov 	return &default_policy;
1475606e387SMel Gorman }
1485606e387SMel Gorman 
14937012946SDavid Rientjes static const struct mempolicy_operations {
15037012946SDavid Rientjes 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
151213980c0SVlastimil Babka 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
15237012946SDavid Rientjes } mpol_ops[MPOL_MAX];
15337012946SDavid Rientjes 
154f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
155f5b087b5SDavid Rientjes {
1566d556294SBob Liu 	return pol->flags & MPOL_MODE_FLAGS;
1574c50bc01SDavid Rientjes }
1584c50bc01SDavid Rientjes 
1594c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
1604c50bc01SDavid Rientjes 				   const nodemask_t *rel)
1614c50bc01SDavid Rientjes {
1624c50bc01SDavid Rientjes 	nodemask_t tmp;
1634c50bc01SDavid Rientjes 	nodes_fold(tmp, *orig, nodes_weight(*rel));
1644c50bc01SDavid Rientjes 	nodes_onto(*ret, tmp, *rel);
165f5b087b5SDavid Rientjes }
166f5b087b5SDavid Rientjes 
16737012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
16837012946SDavid Rientjes {
16937012946SDavid Rientjes 	if (nodes_empty(*nodes))
17037012946SDavid Rientjes 		return -EINVAL;
17137012946SDavid Rientjes 	pol->v.nodes = *nodes;
17237012946SDavid Rientjes 	return 0;
17337012946SDavid Rientjes }
17437012946SDavid Rientjes 
17537012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
17637012946SDavid Rientjes {
17737012946SDavid Rientjes 	if (!nodes)
178fc36b8d3SLee Schermerhorn 		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
17937012946SDavid Rientjes 	else if (nodes_empty(*nodes))
18037012946SDavid Rientjes 		return -EINVAL;			/*  no allowed nodes */
18137012946SDavid Rientjes 	else
18237012946SDavid Rientjes 		pol->v.preferred_node = first_node(*nodes);
18337012946SDavid Rientjes 	return 0;
18437012946SDavid Rientjes }
18537012946SDavid Rientjes 
18637012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
18737012946SDavid Rientjes {
188859f7ef1SZhihui Zhang 	if (nodes_empty(*nodes))
18937012946SDavid Rientjes 		return -EINVAL;
19037012946SDavid Rientjes 	pol->v.nodes = *nodes;
19137012946SDavid Rientjes 	return 0;
19237012946SDavid Rientjes }
19337012946SDavid Rientjes 
19458568d2aSMiao Xie /*
19558568d2aSMiao Xie  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
19658568d2aSMiao Xie  * any, for the new policy.  mpol_new() has already validated the nodes
19758568d2aSMiao Xie  * parameter with respect to the policy mode and flags.  But, we need to
19858568d2aSMiao Xie  * handle an empty nodemask with MPOL_PREFERRED here.
19958568d2aSMiao Xie  *
20058568d2aSMiao Xie  * Must be called holding task's alloc_lock to protect task's mems_allowed
20158568d2aSMiao Xie  * and mempolicy.  May also be called holding the mmap_semaphore for write.
20258568d2aSMiao Xie  */
2034bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol,
2044bfc4495SKAMEZAWA Hiroyuki 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
20558568d2aSMiao Xie {
20658568d2aSMiao Xie 	int ret;
20758568d2aSMiao Xie 
20858568d2aSMiao Xie 	/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
20958568d2aSMiao Xie 	if (pol == NULL)
21058568d2aSMiao Xie 		return 0;
21101f13bd6SLai Jiangshan 	/* Check N_MEMORY */
2124bfc4495SKAMEZAWA Hiroyuki 	nodes_and(nsc->mask1,
21301f13bd6SLai Jiangshan 		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
21458568d2aSMiao Xie 
21558568d2aSMiao Xie 	VM_BUG_ON(!nodes);
21658568d2aSMiao Xie 	if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
21758568d2aSMiao Xie 		nodes = NULL;	/* explicit local allocation */
21858568d2aSMiao Xie 	else {
21958568d2aSMiao Xie 		if (pol->flags & MPOL_F_RELATIVE_NODES)
2204bfc4495SKAMEZAWA Hiroyuki 			mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
22158568d2aSMiao Xie 		else
2224bfc4495SKAMEZAWA Hiroyuki 			nodes_and(nsc->mask2, *nodes, nsc->mask1);
2234bfc4495SKAMEZAWA Hiroyuki 
22458568d2aSMiao Xie 		if (mpol_store_user_nodemask(pol))
22558568d2aSMiao Xie 			pol->w.user_nodemask = *nodes;
22658568d2aSMiao Xie 		else
22758568d2aSMiao Xie 			pol->w.cpuset_mems_allowed =
22858568d2aSMiao Xie 						cpuset_current_mems_allowed;
22958568d2aSMiao Xie 	}
23058568d2aSMiao Xie 
2314bfc4495SKAMEZAWA Hiroyuki 	if (nodes)
2324bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
2334bfc4495SKAMEZAWA Hiroyuki 	else
2344bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, NULL);
23558568d2aSMiao Xie 	return ret;
23658568d2aSMiao Xie }
23758568d2aSMiao Xie 
23858568d2aSMiao Xie /*
23958568d2aSMiao Xie  * This function just creates a new policy, does some check and simple
24058568d2aSMiao Xie  * initialization. You must invoke mpol_set_nodemask() to set nodes.
24158568d2aSMiao Xie  */
242028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
243028fec41SDavid Rientjes 				  nodemask_t *nodes)
2441da177e4SLinus Torvalds {
2451da177e4SLinus Torvalds 	struct mempolicy *policy;
2461da177e4SLinus Torvalds 
247028fec41SDavid Rientjes 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
24800ef2d2fSDavid Rientjes 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
249140d5a49SPaul Mundt 
2503e1f0645SDavid Rientjes 	if (mode == MPOL_DEFAULT) {
2513e1f0645SDavid Rientjes 		if (nodes && !nodes_empty(*nodes))
25237012946SDavid Rientjes 			return ERR_PTR(-EINVAL);
253d3a71033SLee Schermerhorn 		return NULL;
25437012946SDavid Rientjes 	}
2553e1f0645SDavid Rientjes 	VM_BUG_ON(!nodes);
2563e1f0645SDavid Rientjes 
2573e1f0645SDavid Rientjes 	/*
2583e1f0645SDavid Rientjes 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
2593e1f0645SDavid Rientjes 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
2603e1f0645SDavid Rientjes 	 * All other modes require a valid pointer to a non-empty nodemask.
2613e1f0645SDavid Rientjes 	 */
2623e1f0645SDavid Rientjes 	if (mode == MPOL_PREFERRED) {
2633e1f0645SDavid Rientjes 		if (nodes_empty(*nodes)) {
2643e1f0645SDavid Rientjes 			if (((flags & MPOL_F_STATIC_NODES) ||
2653e1f0645SDavid Rientjes 			     (flags & MPOL_F_RELATIVE_NODES)))
2663e1f0645SDavid Rientjes 				return ERR_PTR(-EINVAL);
2673e1f0645SDavid Rientjes 		}
268479e2802SPeter Zijlstra 	} else if (mode == MPOL_LOCAL) {
2698d303e44SPiotr Kwapulinski 		if (!nodes_empty(*nodes) ||
2708d303e44SPiotr Kwapulinski 		    (flags & MPOL_F_STATIC_NODES) ||
2718d303e44SPiotr Kwapulinski 		    (flags & MPOL_F_RELATIVE_NODES))
272479e2802SPeter Zijlstra 			return ERR_PTR(-EINVAL);
273479e2802SPeter Zijlstra 		mode = MPOL_PREFERRED;
2743e1f0645SDavid Rientjes 	} else if (nodes_empty(*nodes))
2753e1f0645SDavid Rientjes 		return ERR_PTR(-EINVAL);
2761da177e4SLinus Torvalds 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2771da177e4SLinus Torvalds 	if (!policy)
2781da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2791da177e4SLinus Torvalds 	atomic_set(&policy->refcnt, 1);
28045c4745aSLee Schermerhorn 	policy->mode = mode;
28137012946SDavid Rientjes 	policy->flags = flags;
2823e1f0645SDavid Rientjes 
28337012946SDavid Rientjes 	return policy;
28437012946SDavid Rientjes }
28537012946SDavid Rientjes 
28652cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */
28752cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p)
28852cd3b07SLee Schermerhorn {
28952cd3b07SLee Schermerhorn 	if (!atomic_dec_and_test(&p->refcnt))
29052cd3b07SLee Schermerhorn 		return;
29152cd3b07SLee Schermerhorn 	kmem_cache_free(policy_cache, p);
29252cd3b07SLee Schermerhorn }
29352cd3b07SLee Schermerhorn 
294213980c0SVlastimil Babka static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
29537012946SDavid Rientjes {
29637012946SDavid Rientjes }
29737012946SDavid Rientjes 
298213980c0SVlastimil Babka static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
2991d0d2680SDavid Rientjes {
3001d0d2680SDavid Rientjes 	nodemask_t tmp;
3011d0d2680SDavid Rientjes 
30237012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES)
30337012946SDavid Rientjes 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
30437012946SDavid Rientjes 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
30537012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3061d0d2680SDavid Rientjes 	else {
307213980c0SVlastimil Babka 		nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
308213980c0SVlastimil Babka 								*nodes);
309213980c0SVlastimil Babka 		pol->w.cpuset_mems_allowed = tmp;
3101d0d2680SDavid Rientjes 	}
31137012946SDavid Rientjes 
312708c1bbcSMiao Xie 	if (nodes_empty(tmp))
313708c1bbcSMiao Xie 		tmp = *nodes;
314708c1bbcSMiao Xie 
3151d0d2680SDavid Rientjes 	pol->v.nodes = tmp;
31637012946SDavid Rientjes }
31737012946SDavid Rientjes 
31837012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol,
319213980c0SVlastimil Babka 						const nodemask_t *nodes)
32037012946SDavid Rientjes {
32137012946SDavid Rientjes 	nodemask_t tmp;
32237012946SDavid Rientjes 
32337012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES) {
3241d0d2680SDavid Rientjes 		int node = first_node(pol->w.user_nodemask);
3251d0d2680SDavid Rientjes 
326fc36b8d3SLee Schermerhorn 		if (node_isset(node, *nodes)) {
3271d0d2680SDavid Rientjes 			pol->v.preferred_node = node;
328fc36b8d3SLee Schermerhorn 			pol->flags &= ~MPOL_F_LOCAL;
329fc36b8d3SLee Schermerhorn 		} else
330fc36b8d3SLee Schermerhorn 			pol->flags |= MPOL_F_LOCAL;
33137012946SDavid Rientjes 	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
33237012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3331d0d2680SDavid Rientjes 		pol->v.preferred_node = first_node(tmp);
334fc36b8d3SLee Schermerhorn 	} else if (!(pol->flags & MPOL_F_LOCAL)) {
3351d0d2680SDavid Rientjes 		pol->v.preferred_node = node_remap(pol->v.preferred_node,
33637012946SDavid Rientjes 						   pol->w.cpuset_mems_allowed,
33737012946SDavid Rientjes 						   *nodes);
33837012946SDavid Rientjes 		pol->w.cpuset_mems_allowed = *nodes;
3391d0d2680SDavid Rientjes 	}
3401d0d2680SDavid Rientjes }
34137012946SDavid Rientjes 
342708c1bbcSMiao Xie /*
343708c1bbcSMiao Xie  * mpol_rebind_policy - Migrate a policy to a different set of nodes
344708c1bbcSMiao Xie  *
345213980c0SVlastimil Babka  * Per-vma policies are protected by mmap_sem. Allocations using per-task
346213980c0SVlastimil Babka  * policies are protected by task->mems_allowed_seq to prevent a premature
347213980c0SVlastimil Babka  * OOM/allocation failure due to parallel nodemask modification.
348708c1bbcSMiao Xie  */
349213980c0SVlastimil Babka static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
35037012946SDavid Rientjes {
35137012946SDavid Rientjes 	if (!pol)
35237012946SDavid Rientjes 		return;
353213980c0SVlastimil Babka 	if (!mpol_store_user_nodemask(pol) &&
35437012946SDavid Rientjes 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
35537012946SDavid Rientjes 		return;
356708c1bbcSMiao Xie 
357213980c0SVlastimil Babka 	mpol_ops[pol->mode].rebind(pol, newmask);
3581d0d2680SDavid Rientjes }
3591d0d2680SDavid Rientjes 
3601d0d2680SDavid Rientjes /*
3611d0d2680SDavid Rientjes  * Wrapper for mpol_rebind_policy() that just requires task
3621d0d2680SDavid Rientjes  * pointer, and updates task mempolicy.
36358568d2aSMiao Xie  *
36458568d2aSMiao Xie  * Called with task's alloc_lock held.
3651d0d2680SDavid Rientjes  */
3661d0d2680SDavid Rientjes 
367213980c0SVlastimil Babka void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
3681d0d2680SDavid Rientjes {
369213980c0SVlastimil Babka 	mpol_rebind_policy(tsk->mempolicy, new);
3701d0d2680SDavid Rientjes }
3711d0d2680SDavid Rientjes 
3721d0d2680SDavid Rientjes /*
3731d0d2680SDavid Rientjes  * Rebind each vma in mm to new nodemask.
3741d0d2680SDavid Rientjes  *
3751d0d2680SDavid Rientjes  * Call holding a reference to mm.  Takes mm->mmap_sem during call.
3761d0d2680SDavid Rientjes  */
3771d0d2680SDavid Rientjes 
3781d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
3791d0d2680SDavid Rientjes {
3801d0d2680SDavid Rientjes 	struct vm_area_struct *vma;
3811d0d2680SDavid Rientjes 
3821d0d2680SDavid Rientjes 	down_write(&mm->mmap_sem);
3831d0d2680SDavid Rientjes 	for (vma = mm->mmap; vma; vma = vma->vm_next)
384213980c0SVlastimil Babka 		mpol_rebind_policy(vma->vm_policy, new);
3851d0d2680SDavid Rientjes 	up_write(&mm->mmap_sem);
3861d0d2680SDavid Rientjes }
3871d0d2680SDavid Rientjes 
38837012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
38937012946SDavid Rientjes 	[MPOL_DEFAULT] = {
39037012946SDavid Rientjes 		.rebind = mpol_rebind_default,
39137012946SDavid Rientjes 	},
39237012946SDavid Rientjes 	[MPOL_INTERLEAVE] = {
39337012946SDavid Rientjes 		.create = mpol_new_interleave,
39437012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
39537012946SDavid Rientjes 	},
39637012946SDavid Rientjes 	[MPOL_PREFERRED] = {
39737012946SDavid Rientjes 		.create = mpol_new_preferred,
39837012946SDavid Rientjes 		.rebind = mpol_rebind_preferred,
39937012946SDavid Rientjes 	},
40037012946SDavid Rientjes 	[MPOL_BIND] = {
40137012946SDavid Rientjes 		.create = mpol_new_bind,
40237012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
40337012946SDavid Rientjes 	},
40437012946SDavid Rientjes };
40537012946SDavid Rientjes 
406fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
407fc301289SChristoph Lameter 				unsigned long flags);
4081a75a6c8SChristoph Lameter 
4096f4576e3SNaoya Horiguchi struct queue_pages {
4106f4576e3SNaoya Horiguchi 	struct list_head *pagelist;
4116f4576e3SNaoya Horiguchi 	unsigned long flags;
4126f4576e3SNaoya Horiguchi 	nodemask_t *nmask;
4136f4576e3SNaoya Horiguchi 	struct vm_area_struct *prev;
4146f4576e3SNaoya Horiguchi };
4156f4576e3SNaoya Horiguchi 
41698094945SNaoya Horiguchi /*
41788aaa2a1SNaoya Horiguchi  * Check if the page's nid is in qp->nmask.
41888aaa2a1SNaoya Horiguchi  *
41988aaa2a1SNaoya Horiguchi  * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
42088aaa2a1SNaoya Horiguchi  * in the invert of qp->nmask.
42188aaa2a1SNaoya Horiguchi  */
42288aaa2a1SNaoya Horiguchi static inline bool queue_pages_required(struct page *page,
42388aaa2a1SNaoya Horiguchi 					struct queue_pages *qp)
42488aaa2a1SNaoya Horiguchi {
42588aaa2a1SNaoya Horiguchi 	int nid = page_to_nid(page);
42688aaa2a1SNaoya Horiguchi 	unsigned long flags = qp->flags;
42788aaa2a1SNaoya Horiguchi 
42888aaa2a1SNaoya Horiguchi 	return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
42988aaa2a1SNaoya Horiguchi }
43088aaa2a1SNaoya Horiguchi 
431c8633798SNaoya Horiguchi static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
432c8633798SNaoya Horiguchi 				unsigned long end, struct mm_walk *walk)
433c8633798SNaoya Horiguchi {
434c8633798SNaoya Horiguchi 	int ret = 0;
435c8633798SNaoya Horiguchi 	struct page *page;
436c8633798SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
437c8633798SNaoya Horiguchi 	unsigned long flags;
438c8633798SNaoya Horiguchi 
439c8633798SNaoya Horiguchi 	if (unlikely(is_pmd_migration_entry(*pmd))) {
440c8633798SNaoya Horiguchi 		ret = 1;
441c8633798SNaoya Horiguchi 		goto unlock;
442c8633798SNaoya Horiguchi 	}
443c8633798SNaoya Horiguchi 	page = pmd_page(*pmd);
444c8633798SNaoya Horiguchi 	if (is_huge_zero_page(page)) {
445c8633798SNaoya Horiguchi 		spin_unlock(ptl);
446c8633798SNaoya Horiguchi 		__split_huge_pmd(walk->vma, pmd, addr, false, NULL);
447c8633798SNaoya Horiguchi 		goto out;
448c8633798SNaoya Horiguchi 	}
449c8633798SNaoya Horiguchi 	if (!thp_migration_supported()) {
450c8633798SNaoya Horiguchi 		get_page(page);
451c8633798SNaoya Horiguchi 		spin_unlock(ptl);
452c8633798SNaoya Horiguchi 		lock_page(page);
453c8633798SNaoya Horiguchi 		ret = split_huge_page(page);
454c8633798SNaoya Horiguchi 		unlock_page(page);
455c8633798SNaoya Horiguchi 		put_page(page);
456c8633798SNaoya Horiguchi 		goto out;
457c8633798SNaoya Horiguchi 	}
458c8633798SNaoya Horiguchi 	if (!queue_pages_required(page, qp)) {
459c8633798SNaoya Horiguchi 		ret = 1;
460c8633798SNaoya Horiguchi 		goto unlock;
461c8633798SNaoya Horiguchi 	}
462c8633798SNaoya Horiguchi 
463c8633798SNaoya Horiguchi 	ret = 1;
464c8633798SNaoya Horiguchi 	flags = qp->flags;
465c8633798SNaoya Horiguchi 	/* go to thp migration */
466c8633798SNaoya Horiguchi 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
467c8633798SNaoya Horiguchi 		migrate_page_add(page, qp->pagelist, flags);
468c8633798SNaoya Horiguchi unlock:
469c8633798SNaoya Horiguchi 	spin_unlock(ptl);
470c8633798SNaoya Horiguchi out:
471c8633798SNaoya Horiguchi 	return ret;
472c8633798SNaoya Horiguchi }
473c8633798SNaoya Horiguchi 
47488aaa2a1SNaoya Horiguchi /*
47598094945SNaoya Horiguchi  * Scan through pages checking if pages follow certain conditions,
47698094945SNaoya Horiguchi  * and move them to the pagelist if they do.
47798094945SNaoya Horiguchi  */
4786f4576e3SNaoya Horiguchi static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
4796f4576e3SNaoya Horiguchi 			unsigned long end, struct mm_walk *walk)
4801da177e4SLinus Torvalds {
4816f4576e3SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
4826f4576e3SNaoya Horiguchi 	struct page *page;
4836f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
4846f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
485c8633798SNaoya Horiguchi 	int ret;
48691612e0dSHugh Dickins 	pte_t *pte;
487705e87c0SHugh Dickins 	spinlock_t *ptl;
488941150a3SHugh Dickins 
489c8633798SNaoya Horiguchi 	ptl = pmd_trans_huge_lock(pmd, vma);
490c8633798SNaoya Horiguchi 	if (ptl) {
491c8633798SNaoya Horiguchi 		ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
492248db92dSKirill A. Shutemov 		if (ret)
4936f4576e3SNaoya Horiguchi 			return 0;
494248db92dSKirill A. Shutemov 	}
49591612e0dSHugh Dickins 
496337d9abfSNaoya Horiguchi 	if (pmd_trans_unstable(pmd))
497337d9abfSNaoya Horiguchi 		return 0;
498248db92dSKirill A. Shutemov retry:
4996f4576e3SNaoya Horiguchi 	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
5006f4576e3SNaoya Horiguchi 	for (; addr != end; pte++, addr += PAGE_SIZE) {
50191612e0dSHugh Dickins 		if (!pte_present(*pte))
50291612e0dSHugh Dickins 			continue;
5036aab341eSLinus Torvalds 		page = vm_normal_page(vma, addr, *pte);
5046aab341eSLinus Torvalds 		if (!page)
50591612e0dSHugh Dickins 			continue;
506053837fcSNick Piggin 		/*
50762b61f61SHugh Dickins 		 * vm_normal_page() filters out zero pages, but there might
50862b61f61SHugh Dickins 		 * still be PageReserved pages to skip, perhaps in a VDSO.
509053837fcSNick Piggin 		 */
510b79bc0a0SHugh Dickins 		if (PageReserved(page))
511f4598c8bSChristoph Lameter 			continue;
51288aaa2a1SNaoya Horiguchi 		if (!queue_pages_required(page, qp))
51338e35860SChristoph Lameter 			continue;
514c8633798SNaoya Horiguchi 		if (PageTransCompound(page) && !thp_migration_supported()) {
515248db92dSKirill A. Shutemov 			get_page(page);
516248db92dSKirill A. Shutemov 			pte_unmap_unlock(pte, ptl);
517248db92dSKirill A. Shutemov 			lock_page(page);
518248db92dSKirill A. Shutemov 			ret = split_huge_page(page);
519248db92dSKirill A. Shutemov 			unlock_page(page);
520248db92dSKirill A. Shutemov 			put_page(page);
521248db92dSKirill A. Shutemov 			/* Failed to split -- skip. */
522248db92dSKirill A. Shutemov 			if (ret) {
523248db92dSKirill A. Shutemov 				pte = pte_offset_map_lock(walk->mm, pmd,
524248db92dSKirill A. Shutemov 						addr, &ptl);
525248db92dSKirill A. Shutemov 				continue;
526248db92dSKirill A. Shutemov 			}
527248db92dSKirill A. Shutemov 			goto retry;
528248db92dSKirill A. Shutemov 		}
52938e35860SChristoph Lameter 
5306f4576e3SNaoya Horiguchi 		migrate_page_add(page, qp->pagelist, flags);
5316f4576e3SNaoya Horiguchi 	}
5326f4576e3SNaoya Horiguchi 	pte_unmap_unlock(pte - 1, ptl);
5336f4576e3SNaoya Horiguchi 	cond_resched();
5346f4576e3SNaoya Horiguchi 	return 0;
53591612e0dSHugh Dickins }
53691612e0dSHugh Dickins 
5376f4576e3SNaoya Horiguchi static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
5386f4576e3SNaoya Horiguchi 			       unsigned long addr, unsigned long end,
5396f4576e3SNaoya Horiguchi 			       struct mm_walk *walk)
540e2d8cf40SNaoya Horiguchi {
541e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE
5426f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
5436f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
544e2d8cf40SNaoya Horiguchi 	struct page *page;
545cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
546d4c54919SNaoya Horiguchi 	pte_t entry;
547e2d8cf40SNaoya Horiguchi 
5486f4576e3SNaoya Horiguchi 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
5496f4576e3SNaoya Horiguchi 	entry = huge_ptep_get(pte);
550d4c54919SNaoya Horiguchi 	if (!pte_present(entry))
551d4c54919SNaoya Horiguchi 		goto unlock;
552d4c54919SNaoya Horiguchi 	page = pte_page(entry);
55388aaa2a1SNaoya Horiguchi 	if (!queue_pages_required(page, qp))
554e2d8cf40SNaoya Horiguchi 		goto unlock;
555e2d8cf40SNaoya Horiguchi 	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
556e2d8cf40SNaoya Horiguchi 	if (flags & (MPOL_MF_MOVE_ALL) ||
557e2d8cf40SNaoya Horiguchi 	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
5586f4576e3SNaoya Horiguchi 		isolate_huge_page(page, qp->pagelist);
559e2d8cf40SNaoya Horiguchi unlock:
560cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
561e2d8cf40SNaoya Horiguchi #else
562e2d8cf40SNaoya Horiguchi 	BUG();
563e2d8cf40SNaoya Horiguchi #endif
56491612e0dSHugh Dickins 	return 0;
5651da177e4SLinus Torvalds }
5661da177e4SLinus Torvalds 
5675877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING
568b24f53a0SLee Schermerhorn /*
5694b10e7d5SMel Gorman  * This is used to mark a range of virtual addresses to be inaccessible.
5704b10e7d5SMel Gorman  * These are later cleared by a NUMA hinting fault. Depending on these
5714b10e7d5SMel Gorman  * faults, pages may be migrated for better NUMA placement.
5724b10e7d5SMel Gorman  *
5734b10e7d5SMel Gorman  * This is assuming that NUMA faults are handled using PROT_NONE. If
5744b10e7d5SMel Gorman  * an architecture makes a different choice, it will need further
5754b10e7d5SMel Gorman  * changes to the core.
576b24f53a0SLee Schermerhorn  */
5774b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma,
5784b10e7d5SMel Gorman 			unsigned long addr, unsigned long end)
579b24f53a0SLee Schermerhorn {
5804b10e7d5SMel Gorman 	int nr_updated;
581b24f53a0SLee Schermerhorn 
5824d942466SMel Gorman 	nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
58303c5a6e1SMel Gorman 	if (nr_updated)
58403c5a6e1SMel Gorman 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
585b24f53a0SLee Schermerhorn 
5864b10e7d5SMel Gorman 	return nr_updated;
587b24f53a0SLee Schermerhorn }
588b24f53a0SLee Schermerhorn #else
589b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma,
590b24f53a0SLee Schermerhorn 			unsigned long addr, unsigned long end)
591b24f53a0SLee Schermerhorn {
592b24f53a0SLee Schermerhorn 	return 0;
593b24f53a0SLee Schermerhorn }
5945877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */
595b24f53a0SLee Schermerhorn 
5966f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end,
5976f4576e3SNaoya Horiguchi 				struct mm_walk *walk)
5981da177e4SLinus Torvalds {
5996f4576e3SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
6006f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
6015b952b3cSAndi Kleen 	unsigned long endvma = vma->vm_end;
6026f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
603dc9aa5b9SChristoph Lameter 
60477bf45e7SKirill A. Shutemov 	if (!vma_migratable(vma))
60548684a65SNaoya Horiguchi 		return 1;
60648684a65SNaoya Horiguchi 
6075b952b3cSAndi Kleen 	if (endvma > end)
6085b952b3cSAndi Kleen 		endvma = end;
6095b952b3cSAndi Kleen 	if (vma->vm_start > start)
6105b952b3cSAndi Kleen 		start = vma->vm_start;
611b24f53a0SLee Schermerhorn 
612b24f53a0SLee Schermerhorn 	if (!(flags & MPOL_MF_DISCONTIG_OK)) {
613b24f53a0SLee Schermerhorn 		if (!vma->vm_next && vma->vm_end < end)
614d05f0cdcSHugh Dickins 			return -EFAULT;
6156f4576e3SNaoya Horiguchi 		if (qp->prev && qp->prev->vm_end < vma->vm_start)
616d05f0cdcSHugh Dickins 			return -EFAULT;
617b24f53a0SLee Schermerhorn 	}
618b24f53a0SLee Schermerhorn 
6196f4576e3SNaoya Horiguchi 	qp->prev = vma;
6206f4576e3SNaoya Horiguchi 
621b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY) {
6222c0346a3SMel Gorman 		/* Similar to task_numa_work, skip inaccessible VMAs */
6234355c018SLiang Chen 		if (!is_vm_hugetlb_page(vma) &&
6244355c018SLiang Chen 			(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) &&
6254355c018SLiang Chen 			!(vma->vm_flags & VM_MIXEDMAP))
626b24f53a0SLee Schermerhorn 			change_prot_numa(vma, start, endvma);
6276f4576e3SNaoya Horiguchi 		return 1;
628b24f53a0SLee Schermerhorn 	}
629b24f53a0SLee Schermerhorn 
6306f4576e3SNaoya Horiguchi 	/* queue pages from current vma */
63177bf45e7SKirill A. Shutemov 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
6326f4576e3SNaoya Horiguchi 		return 0;
6336f4576e3SNaoya Horiguchi 	return 1;
6346f4576e3SNaoya Horiguchi }
635b24f53a0SLee Schermerhorn 
6366f4576e3SNaoya Horiguchi /*
6376f4576e3SNaoya Horiguchi  * Walk through page tables and collect pages to be migrated.
6386f4576e3SNaoya Horiguchi  *
6396f4576e3SNaoya Horiguchi  * If pages found in a given range are on a set of nodes (determined by
6406f4576e3SNaoya Horiguchi  * @nodes and @flags,) it's isolated and queued to the pagelist which is
6416f4576e3SNaoya Horiguchi  * passed via @private.)
6426f4576e3SNaoya Horiguchi  */
6436f4576e3SNaoya Horiguchi static int
6446f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
6456f4576e3SNaoya Horiguchi 		nodemask_t *nodes, unsigned long flags,
6466f4576e3SNaoya Horiguchi 		struct list_head *pagelist)
6476f4576e3SNaoya Horiguchi {
6486f4576e3SNaoya Horiguchi 	struct queue_pages qp = {
6496f4576e3SNaoya Horiguchi 		.pagelist = pagelist,
6506f4576e3SNaoya Horiguchi 		.flags = flags,
6516f4576e3SNaoya Horiguchi 		.nmask = nodes,
6526f4576e3SNaoya Horiguchi 		.prev = NULL,
6536f4576e3SNaoya Horiguchi 	};
6546f4576e3SNaoya Horiguchi 	struct mm_walk queue_pages_walk = {
6556f4576e3SNaoya Horiguchi 		.hugetlb_entry = queue_pages_hugetlb,
6566f4576e3SNaoya Horiguchi 		.pmd_entry = queue_pages_pte_range,
6576f4576e3SNaoya Horiguchi 		.test_walk = queue_pages_test_walk,
6586f4576e3SNaoya Horiguchi 		.mm = mm,
6596f4576e3SNaoya Horiguchi 		.private = &qp,
6606f4576e3SNaoya Horiguchi 	};
6616f4576e3SNaoya Horiguchi 
6626f4576e3SNaoya Horiguchi 	return walk_page_range(start, end, &queue_pages_walk);
6631da177e4SLinus Torvalds }
6641da177e4SLinus Torvalds 
665869833f2SKOSAKI Motohiro /*
666869833f2SKOSAKI Motohiro  * Apply policy to a single VMA
667869833f2SKOSAKI Motohiro  * This must be called with the mmap_sem held for writing.
668869833f2SKOSAKI Motohiro  */
669869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma,
670869833f2SKOSAKI Motohiro 						struct mempolicy *pol)
6718d34694cSKOSAKI Motohiro {
672869833f2SKOSAKI Motohiro 	int err;
673869833f2SKOSAKI Motohiro 	struct mempolicy *old;
674869833f2SKOSAKI Motohiro 	struct mempolicy *new;
6758d34694cSKOSAKI Motohiro 
6768d34694cSKOSAKI Motohiro 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
6778d34694cSKOSAKI Motohiro 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
6788d34694cSKOSAKI Motohiro 		 vma->vm_ops, vma->vm_file,
6798d34694cSKOSAKI Motohiro 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
6808d34694cSKOSAKI Motohiro 
681869833f2SKOSAKI Motohiro 	new = mpol_dup(pol);
682869833f2SKOSAKI Motohiro 	if (IS_ERR(new))
683869833f2SKOSAKI Motohiro 		return PTR_ERR(new);
684869833f2SKOSAKI Motohiro 
685869833f2SKOSAKI Motohiro 	if (vma->vm_ops && vma->vm_ops->set_policy) {
6868d34694cSKOSAKI Motohiro 		err = vma->vm_ops->set_policy(vma, new);
687869833f2SKOSAKI Motohiro 		if (err)
688869833f2SKOSAKI Motohiro 			goto err_out;
6898d34694cSKOSAKI Motohiro 	}
690869833f2SKOSAKI Motohiro 
691869833f2SKOSAKI Motohiro 	old = vma->vm_policy;
692869833f2SKOSAKI Motohiro 	vma->vm_policy = new; /* protected by mmap_sem */
693869833f2SKOSAKI Motohiro 	mpol_put(old);
694869833f2SKOSAKI Motohiro 
695869833f2SKOSAKI Motohiro 	return 0;
696869833f2SKOSAKI Motohiro  err_out:
697869833f2SKOSAKI Motohiro 	mpol_put(new);
6988d34694cSKOSAKI Motohiro 	return err;
6998d34694cSKOSAKI Motohiro }
7008d34694cSKOSAKI Motohiro 
7011da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */
7029d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start,
7039d8cebd4SKOSAKI Motohiro 		       unsigned long end, struct mempolicy *new_pol)
7041da177e4SLinus Torvalds {
7051da177e4SLinus Torvalds 	struct vm_area_struct *next;
7069d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *prev;
7079d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *vma;
7089d8cebd4SKOSAKI Motohiro 	int err = 0;
709e26a5114SKOSAKI Motohiro 	pgoff_t pgoff;
7109d8cebd4SKOSAKI Motohiro 	unsigned long vmstart;
7119d8cebd4SKOSAKI Motohiro 	unsigned long vmend;
7121da177e4SLinus Torvalds 
713097d5910SLinus Torvalds 	vma = find_vma(mm, start);
7149d8cebd4SKOSAKI Motohiro 	if (!vma || vma->vm_start > start)
7159d8cebd4SKOSAKI Motohiro 		return -EFAULT;
7169d8cebd4SKOSAKI Motohiro 
717097d5910SLinus Torvalds 	prev = vma->vm_prev;
718e26a5114SKOSAKI Motohiro 	if (start > vma->vm_start)
719e26a5114SKOSAKI Motohiro 		prev = vma;
720e26a5114SKOSAKI Motohiro 
7219d8cebd4SKOSAKI Motohiro 	for (; vma && vma->vm_start < end; prev = vma, vma = next) {
7221da177e4SLinus Torvalds 		next = vma->vm_next;
7239d8cebd4SKOSAKI Motohiro 		vmstart = max(start, vma->vm_start);
7249d8cebd4SKOSAKI Motohiro 		vmend   = min(end, vma->vm_end);
7259d8cebd4SKOSAKI Motohiro 
726e26a5114SKOSAKI Motohiro 		if (mpol_equal(vma_policy(vma), new_pol))
727e26a5114SKOSAKI Motohiro 			continue;
728e26a5114SKOSAKI Motohiro 
729e26a5114SKOSAKI Motohiro 		pgoff = vma->vm_pgoff +
730e26a5114SKOSAKI Motohiro 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
7319d8cebd4SKOSAKI Motohiro 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
732e26a5114SKOSAKI Motohiro 				 vma->anon_vma, vma->vm_file, pgoff,
73319a809afSAndrea Arcangeli 				 new_pol, vma->vm_userfaultfd_ctx);
7349d8cebd4SKOSAKI Motohiro 		if (prev) {
7359d8cebd4SKOSAKI Motohiro 			vma = prev;
7369d8cebd4SKOSAKI Motohiro 			next = vma->vm_next;
7373964acd0SOleg Nesterov 			if (mpol_equal(vma_policy(vma), new_pol))
7389d8cebd4SKOSAKI Motohiro 				continue;
7393964acd0SOleg Nesterov 			/* vma_merge() joined vma && vma->next, case 8 */
7403964acd0SOleg Nesterov 			goto replace;
7411da177e4SLinus Torvalds 		}
7429d8cebd4SKOSAKI Motohiro 		if (vma->vm_start != vmstart) {
7439d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
7449d8cebd4SKOSAKI Motohiro 			if (err)
7459d8cebd4SKOSAKI Motohiro 				goto out;
7469d8cebd4SKOSAKI Motohiro 		}
7479d8cebd4SKOSAKI Motohiro 		if (vma->vm_end != vmend) {
7489d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmend, 0);
7499d8cebd4SKOSAKI Motohiro 			if (err)
7509d8cebd4SKOSAKI Motohiro 				goto out;
7519d8cebd4SKOSAKI Motohiro 		}
7523964acd0SOleg Nesterov  replace:
753869833f2SKOSAKI Motohiro 		err = vma_replace_policy(vma, new_pol);
7549d8cebd4SKOSAKI Motohiro 		if (err)
7559d8cebd4SKOSAKI Motohiro 			goto out;
7569d8cebd4SKOSAKI Motohiro 	}
7579d8cebd4SKOSAKI Motohiro 
7589d8cebd4SKOSAKI Motohiro  out:
7591da177e4SLinus Torvalds 	return err;
7601da177e4SLinus Torvalds }
7611da177e4SLinus Torvalds 
7621da177e4SLinus Torvalds /* Set the process memory policy */
763028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags,
764028fec41SDavid Rientjes 			     nodemask_t *nodes)
7651da177e4SLinus Torvalds {
76658568d2aSMiao Xie 	struct mempolicy *new, *old;
7674bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH(scratch);
76858568d2aSMiao Xie 	int ret;
7691da177e4SLinus Torvalds 
7704bfc4495SKAMEZAWA Hiroyuki 	if (!scratch)
7714bfc4495SKAMEZAWA Hiroyuki 		return -ENOMEM;
772f4e53d91SLee Schermerhorn 
7734bfc4495SKAMEZAWA Hiroyuki 	new = mpol_new(mode, flags, nodes);
7744bfc4495SKAMEZAWA Hiroyuki 	if (IS_ERR(new)) {
7754bfc4495SKAMEZAWA Hiroyuki 		ret = PTR_ERR(new);
7764bfc4495SKAMEZAWA Hiroyuki 		goto out;
7774bfc4495SKAMEZAWA Hiroyuki 	}
7782c7c3a7dSOleg Nesterov 
77958568d2aSMiao Xie 	task_lock(current);
7804bfc4495SKAMEZAWA Hiroyuki 	ret = mpol_set_nodemask(new, nodes, scratch);
78158568d2aSMiao Xie 	if (ret) {
78258568d2aSMiao Xie 		task_unlock(current);
78358568d2aSMiao Xie 		mpol_put(new);
7844bfc4495SKAMEZAWA Hiroyuki 		goto out;
78558568d2aSMiao Xie 	}
78658568d2aSMiao Xie 	old = current->mempolicy;
7871da177e4SLinus Torvalds 	current->mempolicy = new;
78845816682SVlastimil Babka 	if (new && new->mode == MPOL_INTERLEAVE)
78945816682SVlastimil Babka 		current->il_prev = MAX_NUMNODES-1;
79058568d2aSMiao Xie 	task_unlock(current);
79158568d2aSMiao Xie 	mpol_put(old);
7924bfc4495SKAMEZAWA Hiroyuki 	ret = 0;
7934bfc4495SKAMEZAWA Hiroyuki out:
7944bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH_FREE(scratch);
7954bfc4495SKAMEZAWA Hiroyuki 	return ret;
7961da177e4SLinus Torvalds }
7971da177e4SLinus Torvalds 
798bea904d5SLee Schermerhorn /*
799bea904d5SLee Schermerhorn  * Return nodemask for policy for get_mempolicy() query
80058568d2aSMiao Xie  *
80158568d2aSMiao Xie  * Called with task's alloc_lock held
802bea904d5SLee Schermerhorn  */
803bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
8041da177e4SLinus Torvalds {
805dfcd3c0dSAndi Kleen 	nodes_clear(*nodes);
806bea904d5SLee Schermerhorn 	if (p == &default_policy)
807bea904d5SLee Schermerhorn 		return;
808bea904d5SLee Schermerhorn 
80945c4745aSLee Schermerhorn 	switch (p->mode) {
81019770b32SMel Gorman 	case MPOL_BIND:
81119770b32SMel Gorman 		/* Fall through */
8121da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
813dfcd3c0dSAndi Kleen 		*nodes = p->v.nodes;
8141da177e4SLinus Torvalds 		break;
8151da177e4SLinus Torvalds 	case MPOL_PREFERRED:
816fc36b8d3SLee Schermerhorn 		if (!(p->flags & MPOL_F_LOCAL))
817dfcd3c0dSAndi Kleen 			node_set(p->v.preferred_node, *nodes);
81853f2556bSLee Schermerhorn 		/* else return empty node mask for local allocation */
8191da177e4SLinus Torvalds 		break;
8201da177e4SLinus Torvalds 	default:
8211da177e4SLinus Torvalds 		BUG();
8221da177e4SLinus Torvalds 	}
8231da177e4SLinus Torvalds }
8241da177e4SLinus Torvalds 
825d4edcf0dSDave Hansen static int lookup_node(unsigned long addr)
8261da177e4SLinus Torvalds {
8271da177e4SLinus Torvalds 	struct page *p;
8281da177e4SLinus Torvalds 	int err;
8291da177e4SLinus Torvalds 
830768ae309SLorenzo Stoakes 	err = get_user_pages(addr & PAGE_MASK, 1, 0, &p, NULL);
8311da177e4SLinus Torvalds 	if (err >= 0) {
8321da177e4SLinus Torvalds 		err = page_to_nid(p);
8331da177e4SLinus Torvalds 		put_page(p);
8341da177e4SLinus Torvalds 	}
8351da177e4SLinus Torvalds 	return err;
8361da177e4SLinus Torvalds }
8371da177e4SLinus Torvalds 
8381da177e4SLinus Torvalds /* Retrieve NUMA policy */
839dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask,
8401da177e4SLinus Torvalds 			     unsigned long addr, unsigned long flags)
8411da177e4SLinus Torvalds {
8428bccd85fSChristoph Lameter 	int err;
8431da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
8441da177e4SLinus Torvalds 	struct vm_area_struct *vma = NULL;
8451da177e4SLinus Torvalds 	struct mempolicy *pol = current->mempolicy;
8461da177e4SLinus Torvalds 
847754af6f5SLee Schermerhorn 	if (flags &
848754af6f5SLee Schermerhorn 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
8491da177e4SLinus Torvalds 		return -EINVAL;
850754af6f5SLee Schermerhorn 
851754af6f5SLee Schermerhorn 	if (flags & MPOL_F_MEMS_ALLOWED) {
852754af6f5SLee Schermerhorn 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
853754af6f5SLee Schermerhorn 			return -EINVAL;
854754af6f5SLee Schermerhorn 		*policy = 0;	/* just so it's initialized */
85558568d2aSMiao Xie 		task_lock(current);
856754af6f5SLee Schermerhorn 		*nmask  = cpuset_current_mems_allowed;
85758568d2aSMiao Xie 		task_unlock(current);
858754af6f5SLee Schermerhorn 		return 0;
859754af6f5SLee Schermerhorn 	}
860754af6f5SLee Schermerhorn 
8611da177e4SLinus Torvalds 	if (flags & MPOL_F_ADDR) {
862bea904d5SLee Schermerhorn 		/*
863bea904d5SLee Schermerhorn 		 * Do NOT fall back to task policy if the
864bea904d5SLee Schermerhorn 		 * vma/shared policy at addr is NULL.  We
865bea904d5SLee Schermerhorn 		 * want to return MPOL_DEFAULT in this case.
866bea904d5SLee Schermerhorn 		 */
8671da177e4SLinus Torvalds 		down_read(&mm->mmap_sem);
8681da177e4SLinus Torvalds 		vma = find_vma_intersection(mm, addr, addr+1);
8691da177e4SLinus Torvalds 		if (!vma) {
8701da177e4SLinus Torvalds 			up_read(&mm->mmap_sem);
8711da177e4SLinus Torvalds 			return -EFAULT;
8721da177e4SLinus Torvalds 		}
8731da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
8741da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
8751da177e4SLinus Torvalds 		else
8761da177e4SLinus Torvalds 			pol = vma->vm_policy;
8771da177e4SLinus Torvalds 	} else if (addr)
8781da177e4SLinus Torvalds 		return -EINVAL;
8791da177e4SLinus Torvalds 
8801da177e4SLinus Torvalds 	if (!pol)
881bea904d5SLee Schermerhorn 		pol = &default_policy;	/* indicates default behavior */
8821da177e4SLinus Torvalds 
8831da177e4SLinus Torvalds 	if (flags & MPOL_F_NODE) {
8841da177e4SLinus Torvalds 		if (flags & MPOL_F_ADDR) {
885d4edcf0dSDave Hansen 			err = lookup_node(addr);
8861da177e4SLinus Torvalds 			if (err < 0)
8871da177e4SLinus Torvalds 				goto out;
8888bccd85fSChristoph Lameter 			*policy = err;
8891da177e4SLinus Torvalds 		} else if (pol == current->mempolicy &&
89045c4745aSLee Schermerhorn 				pol->mode == MPOL_INTERLEAVE) {
89145816682SVlastimil Babka 			*policy = next_node_in(current->il_prev, pol->v.nodes);
8921da177e4SLinus Torvalds 		} else {
8931da177e4SLinus Torvalds 			err = -EINVAL;
8941da177e4SLinus Torvalds 			goto out;
8951da177e4SLinus Torvalds 		}
896bea904d5SLee Schermerhorn 	} else {
897bea904d5SLee Schermerhorn 		*policy = pol == &default_policy ? MPOL_DEFAULT :
898bea904d5SLee Schermerhorn 						pol->mode;
899d79df630SDavid Rientjes 		/*
900d79df630SDavid Rientjes 		 * Internal mempolicy flags must be masked off before exposing
901d79df630SDavid Rientjes 		 * the policy to userspace.
902d79df630SDavid Rientjes 		 */
903d79df630SDavid Rientjes 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
904bea904d5SLee Schermerhorn 	}
9051da177e4SLinus Torvalds 
9061da177e4SLinus Torvalds 	err = 0;
90758568d2aSMiao Xie 	if (nmask) {
908c6b6ef8bSLee Schermerhorn 		if (mpol_store_user_nodemask(pol)) {
909c6b6ef8bSLee Schermerhorn 			*nmask = pol->w.user_nodemask;
910c6b6ef8bSLee Schermerhorn 		} else {
91158568d2aSMiao Xie 			task_lock(current);
912bea904d5SLee Schermerhorn 			get_policy_nodemask(pol, nmask);
91358568d2aSMiao Xie 			task_unlock(current);
91458568d2aSMiao Xie 		}
915c6b6ef8bSLee Schermerhorn 	}
9161da177e4SLinus Torvalds 
9171da177e4SLinus Torvalds  out:
91852cd3b07SLee Schermerhorn 	mpol_cond_put(pol);
9191da177e4SLinus Torvalds 	if (vma)
9201da177e4SLinus Torvalds 		up_read(&current->mm->mmap_sem);
9211da177e4SLinus Torvalds 	return err;
9221da177e4SLinus Torvalds }
9231da177e4SLinus Torvalds 
924b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION
9258bccd85fSChristoph Lameter /*
926c8633798SNaoya Horiguchi  * page migration, thp tail pages can be passed.
9276ce3c4c0SChristoph Lameter  */
928fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
929fc301289SChristoph Lameter 				unsigned long flags)
9306ce3c4c0SChristoph Lameter {
931c8633798SNaoya Horiguchi 	struct page *head = compound_head(page);
9326ce3c4c0SChristoph Lameter 	/*
933fc301289SChristoph Lameter 	 * Avoid migrating a page that is shared with others.
9346ce3c4c0SChristoph Lameter 	 */
935c8633798SNaoya Horiguchi 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
936c8633798SNaoya Horiguchi 		if (!isolate_lru_page(head)) {
937c8633798SNaoya Horiguchi 			list_add_tail(&head->lru, pagelist);
938c8633798SNaoya Horiguchi 			mod_node_page_state(page_pgdat(head),
939c8633798SNaoya Horiguchi 				NR_ISOLATED_ANON + page_is_file_cache(head),
940c8633798SNaoya Horiguchi 				hpage_nr_pages(head));
94162695a84SNick Piggin 		}
94262695a84SNick Piggin 	}
9436ce3c4c0SChristoph Lameter }
9446ce3c4c0SChristoph Lameter 
945742755a1SChristoph Lameter static struct page *new_node_page(struct page *page, unsigned long node, int **x)
94695a402c3SChristoph Lameter {
947e2d8cf40SNaoya Horiguchi 	if (PageHuge(page))
948e2d8cf40SNaoya Horiguchi 		return alloc_huge_page_node(page_hstate(compound_head(page)),
949e2d8cf40SNaoya Horiguchi 					node);
950c8633798SNaoya Horiguchi 	else if (thp_migration_supported() && PageTransHuge(page)) {
951c8633798SNaoya Horiguchi 		struct page *thp;
952c8633798SNaoya Horiguchi 
953c8633798SNaoya Horiguchi 		thp = alloc_pages_node(node,
954c8633798SNaoya Horiguchi 			(GFP_TRANSHUGE | __GFP_THISNODE),
955c8633798SNaoya Horiguchi 			HPAGE_PMD_ORDER);
956c8633798SNaoya Horiguchi 		if (!thp)
957c8633798SNaoya Horiguchi 			return NULL;
958c8633798SNaoya Horiguchi 		prep_transhuge_page(thp);
959c8633798SNaoya Horiguchi 		return thp;
960c8633798SNaoya Horiguchi 	} else
96196db800fSVlastimil Babka 		return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
962b360edb4SDavid Rientjes 						    __GFP_THISNODE, 0);
96395a402c3SChristoph Lameter }
96495a402c3SChristoph Lameter 
9656ce3c4c0SChristoph Lameter /*
9667e2ab150SChristoph Lameter  * Migrate pages from one node to a target node.
9677e2ab150SChristoph Lameter  * Returns error or the number of pages not migrated.
9687e2ab150SChristoph Lameter  */
969dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest,
970dbcb0f19SAdrian Bunk 			   int flags)
9717e2ab150SChristoph Lameter {
9727e2ab150SChristoph Lameter 	nodemask_t nmask;
9737e2ab150SChristoph Lameter 	LIST_HEAD(pagelist);
9747e2ab150SChristoph Lameter 	int err = 0;
9757e2ab150SChristoph Lameter 
9767e2ab150SChristoph Lameter 	nodes_clear(nmask);
9777e2ab150SChristoph Lameter 	node_set(source, nmask);
9787e2ab150SChristoph Lameter 
97908270807SMinchan Kim 	/*
98008270807SMinchan Kim 	 * This does not "check" the range but isolates all pages that
98108270807SMinchan Kim 	 * need migration.  Between passing in the full user address
98208270807SMinchan Kim 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
98308270807SMinchan Kim 	 */
98408270807SMinchan Kim 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
98598094945SNaoya Horiguchi 	queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
9867e2ab150SChristoph Lameter 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
9877e2ab150SChristoph Lameter 
988cf608ac1SMinchan Kim 	if (!list_empty(&pagelist)) {
98968711a74SDavid Rientjes 		err = migrate_pages(&pagelist, new_node_page, NULL, dest,
9909c620e2bSHugh Dickins 					MIGRATE_SYNC, MR_SYSCALL);
991cf608ac1SMinchan Kim 		if (err)
992e2d8cf40SNaoya Horiguchi 			putback_movable_pages(&pagelist);
993cf608ac1SMinchan Kim 	}
99495a402c3SChristoph Lameter 
9957e2ab150SChristoph Lameter 	return err;
9967e2ab150SChristoph Lameter }
9977e2ab150SChristoph Lameter 
9987e2ab150SChristoph Lameter /*
9997e2ab150SChristoph Lameter  * Move pages between the two nodesets so as to preserve the physical
10007e2ab150SChristoph Lameter  * layout as much as possible.
100139743889SChristoph Lameter  *
100239743889SChristoph Lameter  * Returns the number of page that could not be moved.
100339743889SChristoph Lameter  */
10040ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
10050ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
100639743889SChristoph Lameter {
10077e2ab150SChristoph Lameter 	int busy = 0;
10080aedadf9SChristoph Lameter 	int err;
10097e2ab150SChristoph Lameter 	nodemask_t tmp;
101039743889SChristoph Lameter 
10110aedadf9SChristoph Lameter 	err = migrate_prep();
10120aedadf9SChristoph Lameter 	if (err)
10130aedadf9SChristoph Lameter 		return err;
10140aedadf9SChristoph Lameter 
101539743889SChristoph Lameter 	down_read(&mm->mmap_sem);
1016d4984711SChristoph Lameter 
10177e2ab150SChristoph Lameter 	/*
10187e2ab150SChristoph Lameter 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
10197e2ab150SChristoph Lameter 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
10207e2ab150SChristoph Lameter 	 * bit in 'tmp', and return that <source, dest> pair for migration.
10217e2ab150SChristoph Lameter 	 * The pair of nodemasks 'to' and 'from' define the map.
10227e2ab150SChristoph Lameter 	 *
10237e2ab150SChristoph Lameter 	 * If no pair of bits is found that way, fallback to picking some
10247e2ab150SChristoph Lameter 	 * pair of 'source' and 'dest' bits that are not the same.  If the
10257e2ab150SChristoph Lameter 	 * 'source' and 'dest' bits are the same, this represents a node
10267e2ab150SChristoph Lameter 	 * that will be migrating to itself, so no pages need move.
10277e2ab150SChristoph Lameter 	 *
10287e2ab150SChristoph Lameter 	 * If no bits are left in 'tmp', or if all remaining bits left
10297e2ab150SChristoph Lameter 	 * in 'tmp' correspond to the same bit in 'to', return false
10307e2ab150SChristoph Lameter 	 * (nothing left to migrate).
10317e2ab150SChristoph Lameter 	 *
10327e2ab150SChristoph Lameter 	 * This lets us pick a pair of nodes to migrate between, such that
10337e2ab150SChristoph Lameter 	 * if possible the dest node is not already occupied by some other
10347e2ab150SChristoph Lameter 	 * source node, minimizing the risk of overloading the memory on a
10357e2ab150SChristoph Lameter 	 * node that would happen if we migrated incoming memory to a node
10367e2ab150SChristoph Lameter 	 * before migrating outgoing memory source that same node.
10377e2ab150SChristoph Lameter 	 *
10387e2ab150SChristoph Lameter 	 * A single scan of tmp is sufficient.  As we go, we remember the
10397e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
10407e2ab150SChristoph Lameter 	 * that not only moved, but what's better, moved to an empty slot
10417e2ab150SChristoph Lameter 	 * (d is not set in tmp), then we break out then, with that pair.
1042ae0e47f0SJustin P. Mattock 	 * Otherwise when we finish scanning from_tmp, we at least have the
10437e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved.  If we get all the way through
10447e2ab150SChristoph Lameter 	 * the scan of tmp without finding any node that moved, much less
10457e2ab150SChristoph Lameter 	 * moved to an empty node, then there is nothing left worth migrating.
10467e2ab150SChristoph Lameter 	 */
10477e2ab150SChristoph Lameter 
10480ce72d4fSAndrew Morton 	tmp = *from;
10497e2ab150SChristoph Lameter 	while (!nodes_empty(tmp)) {
10507e2ab150SChristoph Lameter 		int s,d;
1051b76ac7e7SJianguo Wu 		int source = NUMA_NO_NODE;
10527e2ab150SChristoph Lameter 		int dest = 0;
10537e2ab150SChristoph Lameter 
10547e2ab150SChristoph Lameter 		for_each_node_mask(s, tmp) {
10554a5b18ccSLarry Woodman 
10564a5b18ccSLarry Woodman 			/*
10574a5b18ccSLarry Woodman 			 * do_migrate_pages() tries to maintain the relative
10584a5b18ccSLarry Woodman 			 * node relationship of the pages established between
10594a5b18ccSLarry Woodman 			 * threads and memory areas.
10604a5b18ccSLarry Woodman                          *
10614a5b18ccSLarry Woodman 			 * However if the number of source nodes is not equal to
10624a5b18ccSLarry Woodman 			 * the number of destination nodes we can not preserve
10634a5b18ccSLarry Woodman 			 * this node relative relationship.  In that case, skip
10644a5b18ccSLarry Woodman 			 * copying memory from a node that is in the destination
10654a5b18ccSLarry Woodman 			 * mask.
10664a5b18ccSLarry Woodman 			 *
10674a5b18ccSLarry Woodman 			 * Example: [2,3,4] -> [3,4,5] moves everything.
10684a5b18ccSLarry Woodman 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
10694a5b18ccSLarry Woodman 			 */
10704a5b18ccSLarry Woodman 
10710ce72d4fSAndrew Morton 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
10720ce72d4fSAndrew Morton 						(node_isset(s, *to)))
10734a5b18ccSLarry Woodman 				continue;
10744a5b18ccSLarry Woodman 
10750ce72d4fSAndrew Morton 			d = node_remap(s, *from, *to);
10767e2ab150SChristoph Lameter 			if (s == d)
10777e2ab150SChristoph Lameter 				continue;
10787e2ab150SChristoph Lameter 
10797e2ab150SChristoph Lameter 			source = s;	/* Node moved. Memorize */
10807e2ab150SChristoph Lameter 			dest = d;
10817e2ab150SChristoph Lameter 
10827e2ab150SChristoph Lameter 			/* dest not in remaining from nodes? */
10837e2ab150SChristoph Lameter 			if (!node_isset(dest, tmp))
10847e2ab150SChristoph Lameter 				break;
10857e2ab150SChristoph Lameter 		}
1086b76ac7e7SJianguo Wu 		if (source == NUMA_NO_NODE)
10877e2ab150SChristoph Lameter 			break;
10887e2ab150SChristoph Lameter 
10897e2ab150SChristoph Lameter 		node_clear(source, tmp);
10907e2ab150SChristoph Lameter 		err = migrate_to_node(mm, source, dest, flags);
10917e2ab150SChristoph Lameter 		if (err > 0)
10927e2ab150SChristoph Lameter 			busy += err;
10937e2ab150SChristoph Lameter 		if (err < 0)
10947e2ab150SChristoph Lameter 			break;
109539743889SChristoph Lameter 	}
109639743889SChristoph Lameter 	up_read(&mm->mmap_sem);
10977e2ab150SChristoph Lameter 	if (err < 0)
10987e2ab150SChristoph Lameter 		return err;
10997e2ab150SChristoph Lameter 	return busy;
1100b20a3503SChristoph Lameter 
110139743889SChristoph Lameter }
110239743889SChristoph Lameter 
11033ad33b24SLee Schermerhorn /*
11043ad33b24SLee Schermerhorn  * Allocate a new page for page migration based on vma policy.
1105d05f0cdcSHugh Dickins  * Start by assuming the page is mapped by the same vma as contains @start.
11063ad33b24SLee Schermerhorn  * Search forward from there, if not.  N.B., this assumes that the
11073ad33b24SLee Schermerhorn  * list of pages handed to migrate_pages()--which is how we get here--
11083ad33b24SLee Schermerhorn  * is in virtual address order.
11093ad33b24SLee Schermerhorn  */
1110d05f0cdcSHugh Dickins static struct page *new_page(struct page *page, unsigned long start, int **x)
111195a402c3SChristoph Lameter {
1112d05f0cdcSHugh Dickins 	struct vm_area_struct *vma;
11133ad33b24SLee Schermerhorn 	unsigned long uninitialized_var(address);
111495a402c3SChristoph Lameter 
1115d05f0cdcSHugh Dickins 	vma = find_vma(current->mm, start);
11163ad33b24SLee Schermerhorn 	while (vma) {
11173ad33b24SLee Schermerhorn 		address = page_address_in_vma(page, vma);
11183ad33b24SLee Schermerhorn 		if (address != -EFAULT)
11193ad33b24SLee Schermerhorn 			break;
11203ad33b24SLee Schermerhorn 		vma = vma->vm_next;
11213ad33b24SLee Schermerhorn 	}
11223ad33b24SLee Schermerhorn 
112311c731e8SWanpeng Li 	if (PageHuge(page)) {
1124cc81717eSMichal Hocko 		BUG_ON(!vma);
112574060e4dSNaoya Horiguchi 		return alloc_huge_page_noerr(vma, address, 1);
1126c8633798SNaoya Horiguchi 	} else if (thp_migration_supported() && PageTransHuge(page)) {
1127c8633798SNaoya Horiguchi 		struct page *thp;
1128c8633798SNaoya Horiguchi 
1129c8633798SNaoya Horiguchi 		thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
1130c8633798SNaoya Horiguchi 					 HPAGE_PMD_ORDER);
1131c8633798SNaoya Horiguchi 		if (!thp)
1132c8633798SNaoya Horiguchi 			return NULL;
1133c8633798SNaoya Horiguchi 		prep_transhuge_page(thp);
1134c8633798SNaoya Horiguchi 		return thp;
113511c731e8SWanpeng Li 	}
113611c731e8SWanpeng Li 	/*
113711c731e8SWanpeng Li 	 * if !vma, alloc_page_vma() will use task or system default policy
113811c731e8SWanpeng Li 	 */
11390f556856SMichal Hocko 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
11400f556856SMichal Hocko 			vma, address);
114195a402c3SChristoph Lameter }
1142b20a3503SChristoph Lameter #else
1143b20a3503SChristoph Lameter 
1144b20a3503SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
1145b20a3503SChristoph Lameter 				unsigned long flags)
1146b20a3503SChristoph Lameter {
1147b20a3503SChristoph Lameter }
1148b20a3503SChristoph Lameter 
11490ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
11500ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
1151b20a3503SChristoph Lameter {
1152b20a3503SChristoph Lameter 	return -ENOSYS;
1153b20a3503SChristoph Lameter }
115495a402c3SChristoph Lameter 
1155d05f0cdcSHugh Dickins static struct page *new_page(struct page *page, unsigned long start, int **x)
115695a402c3SChristoph Lameter {
115795a402c3SChristoph Lameter 	return NULL;
115895a402c3SChristoph Lameter }
1159b20a3503SChristoph Lameter #endif
1160b20a3503SChristoph Lameter 
1161dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len,
1162028fec41SDavid Rientjes 		     unsigned short mode, unsigned short mode_flags,
1163028fec41SDavid Rientjes 		     nodemask_t *nmask, unsigned long flags)
11646ce3c4c0SChristoph Lameter {
11656ce3c4c0SChristoph Lameter 	struct mm_struct *mm = current->mm;
11666ce3c4c0SChristoph Lameter 	struct mempolicy *new;
11676ce3c4c0SChristoph Lameter 	unsigned long end;
11686ce3c4c0SChristoph Lameter 	int err;
11696ce3c4c0SChristoph Lameter 	LIST_HEAD(pagelist);
11706ce3c4c0SChristoph Lameter 
1171b24f53a0SLee Schermerhorn 	if (flags & ~(unsigned long)MPOL_MF_VALID)
11726ce3c4c0SChristoph Lameter 		return -EINVAL;
117374c00241SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
11746ce3c4c0SChristoph Lameter 		return -EPERM;
11756ce3c4c0SChristoph Lameter 
11766ce3c4c0SChristoph Lameter 	if (start & ~PAGE_MASK)
11776ce3c4c0SChristoph Lameter 		return -EINVAL;
11786ce3c4c0SChristoph Lameter 
11796ce3c4c0SChristoph Lameter 	if (mode == MPOL_DEFAULT)
11806ce3c4c0SChristoph Lameter 		flags &= ~MPOL_MF_STRICT;
11816ce3c4c0SChristoph Lameter 
11826ce3c4c0SChristoph Lameter 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
11836ce3c4c0SChristoph Lameter 	end = start + len;
11846ce3c4c0SChristoph Lameter 
11856ce3c4c0SChristoph Lameter 	if (end < start)
11866ce3c4c0SChristoph Lameter 		return -EINVAL;
11876ce3c4c0SChristoph Lameter 	if (end == start)
11886ce3c4c0SChristoph Lameter 		return 0;
11896ce3c4c0SChristoph Lameter 
1190028fec41SDavid Rientjes 	new = mpol_new(mode, mode_flags, nmask);
11916ce3c4c0SChristoph Lameter 	if (IS_ERR(new))
11926ce3c4c0SChristoph Lameter 		return PTR_ERR(new);
11936ce3c4c0SChristoph Lameter 
1194b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY)
1195b24f53a0SLee Schermerhorn 		new->flags |= MPOL_F_MOF;
1196b24f53a0SLee Schermerhorn 
11976ce3c4c0SChristoph Lameter 	/*
11986ce3c4c0SChristoph Lameter 	 * If we are using the default policy then operation
11996ce3c4c0SChristoph Lameter 	 * on discontinuous address spaces is okay after all
12006ce3c4c0SChristoph Lameter 	 */
12016ce3c4c0SChristoph Lameter 	if (!new)
12026ce3c4c0SChristoph Lameter 		flags |= MPOL_MF_DISCONTIG_OK;
12036ce3c4c0SChristoph Lameter 
1204028fec41SDavid Rientjes 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1205028fec41SDavid Rientjes 		 start, start + len, mode, mode_flags,
120600ef2d2fSDavid Rientjes 		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
12076ce3c4c0SChristoph Lameter 
12080aedadf9SChristoph Lameter 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
12090aedadf9SChristoph Lameter 
12100aedadf9SChristoph Lameter 		err = migrate_prep();
12110aedadf9SChristoph Lameter 		if (err)
1212b05ca738SKOSAKI Motohiro 			goto mpol_out;
12130aedadf9SChristoph Lameter 	}
12144bfc4495SKAMEZAWA Hiroyuki 	{
12154bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
12164bfc4495SKAMEZAWA Hiroyuki 		if (scratch) {
12176ce3c4c0SChristoph Lameter 			down_write(&mm->mmap_sem);
121858568d2aSMiao Xie 			task_lock(current);
12194bfc4495SKAMEZAWA Hiroyuki 			err = mpol_set_nodemask(new, nmask, scratch);
122058568d2aSMiao Xie 			task_unlock(current);
12214bfc4495SKAMEZAWA Hiroyuki 			if (err)
122258568d2aSMiao Xie 				up_write(&mm->mmap_sem);
12234bfc4495SKAMEZAWA Hiroyuki 		} else
12244bfc4495SKAMEZAWA Hiroyuki 			err = -ENOMEM;
12254bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
12264bfc4495SKAMEZAWA Hiroyuki 	}
1227b05ca738SKOSAKI Motohiro 	if (err)
1228b05ca738SKOSAKI Motohiro 		goto mpol_out;
1229b05ca738SKOSAKI Motohiro 
1230d05f0cdcSHugh Dickins 	err = queue_pages_range(mm, start, end, nmask,
12316ce3c4c0SChristoph Lameter 			  flags | MPOL_MF_INVERT, &pagelist);
1232d05f0cdcSHugh Dickins 	if (!err)
12339d8cebd4SKOSAKI Motohiro 		err = mbind_range(mm, start, end, new);
12347e2ab150SChristoph Lameter 
1235b24f53a0SLee Schermerhorn 	if (!err) {
1236b24f53a0SLee Schermerhorn 		int nr_failed = 0;
1237b24f53a0SLee Schermerhorn 
1238cf608ac1SMinchan Kim 		if (!list_empty(&pagelist)) {
1239b24f53a0SLee Schermerhorn 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1240d05f0cdcSHugh Dickins 			nr_failed = migrate_pages(&pagelist, new_page, NULL,
1241d05f0cdcSHugh Dickins 				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1242cf608ac1SMinchan Kim 			if (nr_failed)
124374060e4dSNaoya Horiguchi 				putback_movable_pages(&pagelist);
1244cf608ac1SMinchan Kim 		}
12456ce3c4c0SChristoph Lameter 
1246b24f53a0SLee Schermerhorn 		if (nr_failed && (flags & MPOL_MF_STRICT))
12476ce3c4c0SChristoph Lameter 			err = -EIO;
1248ab8a3e14SKOSAKI Motohiro 	} else
1249b0e5fd73SJoonsoo Kim 		putback_movable_pages(&pagelist);
1250b20a3503SChristoph Lameter 
12516ce3c4c0SChristoph Lameter 	up_write(&mm->mmap_sem);
1252b05ca738SKOSAKI Motohiro  mpol_out:
1253f0be3d32SLee Schermerhorn 	mpol_put(new);
12546ce3c4c0SChristoph Lameter 	return err;
12556ce3c4c0SChristoph Lameter }
12566ce3c4c0SChristoph Lameter 
125739743889SChristoph Lameter /*
12588bccd85fSChristoph Lameter  * User space interface with variable sized bitmaps for nodelists.
12598bccd85fSChristoph Lameter  */
12608bccd85fSChristoph Lameter 
12618bccd85fSChristoph Lameter /* Copy a node mask from user space. */
126239743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
12638bccd85fSChristoph Lameter 		     unsigned long maxnode)
12648bccd85fSChristoph Lameter {
12658bccd85fSChristoph Lameter 	unsigned long k;
12668bccd85fSChristoph Lameter 	unsigned long nlongs;
12678bccd85fSChristoph Lameter 	unsigned long endmask;
12688bccd85fSChristoph Lameter 
12698bccd85fSChristoph Lameter 	--maxnode;
12708bccd85fSChristoph Lameter 	nodes_clear(*nodes);
12718bccd85fSChristoph Lameter 	if (maxnode == 0 || !nmask)
12728bccd85fSChristoph Lameter 		return 0;
1273a9c930baSAndi Kleen 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1274636f13c1SChris Wright 		return -EINVAL;
12758bccd85fSChristoph Lameter 
12768bccd85fSChristoph Lameter 	nlongs = BITS_TO_LONGS(maxnode);
12778bccd85fSChristoph Lameter 	if ((maxnode % BITS_PER_LONG) == 0)
12788bccd85fSChristoph Lameter 		endmask = ~0UL;
12798bccd85fSChristoph Lameter 	else
12808bccd85fSChristoph Lameter 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
12818bccd85fSChristoph Lameter 
12828bccd85fSChristoph Lameter 	/* When the user specified more nodes than supported just check
12838bccd85fSChristoph Lameter 	   if the non supported part is all zero. */
12848bccd85fSChristoph Lameter 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
12858bccd85fSChristoph Lameter 		if (nlongs > PAGE_SIZE/sizeof(long))
12868bccd85fSChristoph Lameter 			return -EINVAL;
12878bccd85fSChristoph Lameter 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
12888bccd85fSChristoph Lameter 			unsigned long t;
12898bccd85fSChristoph Lameter 			if (get_user(t, nmask + k))
12908bccd85fSChristoph Lameter 				return -EFAULT;
12918bccd85fSChristoph Lameter 			if (k == nlongs - 1) {
12928bccd85fSChristoph Lameter 				if (t & endmask)
12938bccd85fSChristoph Lameter 					return -EINVAL;
12948bccd85fSChristoph Lameter 			} else if (t)
12958bccd85fSChristoph Lameter 				return -EINVAL;
12968bccd85fSChristoph Lameter 		}
12978bccd85fSChristoph Lameter 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
12988bccd85fSChristoph Lameter 		endmask = ~0UL;
12998bccd85fSChristoph Lameter 	}
13008bccd85fSChristoph Lameter 
13018bccd85fSChristoph Lameter 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
13028bccd85fSChristoph Lameter 		return -EFAULT;
13038bccd85fSChristoph Lameter 	nodes_addr(*nodes)[nlongs-1] &= endmask;
13048bccd85fSChristoph Lameter 	return 0;
13058bccd85fSChristoph Lameter }
13068bccd85fSChristoph Lameter 
13078bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */
13088bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
13098bccd85fSChristoph Lameter 			      nodemask_t *nodes)
13108bccd85fSChristoph Lameter {
13118bccd85fSChristoph Lameter 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
13128bccd85fSChristoph Lameter 	const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
13138bccd85fSChristoph Lameter 
13148bccd85fSChristoph Lameter 	if (copy > nbytes) {
13158bccd85fSChristoph Lameter 		if (copy > PAGE_SIZE)
13168bccd85fSChristoph Lameter 			return -EINVAL;
13178bccd85fSChristoph Lameter 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
13188bccd85fSChristoph Lameter 			return -EFAULT;
13198bccd85fSChristoph Lameter 		copy = nbytes;
13208bccd85fSChristoph Lameter 	}
13218bccd85fSChristoph Lameter 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
13228bccd85fSChristoph Lameter }
13238bccd85fSChristoph Lameter 
1324938bb9f5SHeiko Carstens SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1325f7f28ca9SRasmus Villemoes 		unsigned long, mode, const unsigned long __user *, nmask,
1326938bb9f5SHeiko Carstens 		unsigned long, maxnode, unsigned, flags)
13278bccd85fSChristoph Lameter {
13288bccd85fSChristoph Lameter 	nodemask_t nodes;
13298bccd85fSChristoph Lameter 	int err;
1330028fec41SDavid Rientjes 	unsigned short mode_flags;
13318bccd85fSChristoph Lameter 
1332028fec41SDavid Rientjes 	mode_flags = mode & MPOL_MODE_FLAGS;
1333028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1334a3b51e01SDavid Rientjes 	if (mode >= MPOL_MAX)
1335a3b51e01SDavid Rientjes 		return -EINVAL;
13364c50bc01SDavid Rientjes 	if ((mode_flags & MPOL_F_STATIC_NODES) &&
13374c50bc01SDavid Rientjes 	    (mode_flags & MPOL_F_RELATIVE_NODES))
13384c50bc01SDavid Rientjes 		return -EINVAL;
13398bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
13408bccd85fSChristoph Lameter 	if (err)
13418bccd85fSChristoph Lameter 		return err;
1342028fec41SDavid Rientjes 	return do_mbind(start, len, mode, mode_flags, &nodes, flags);
13438bccd85fSChristoph Lameter }
13448bccd85fSChristoph Lameter 
13458bccd85fSChristoph Lameter /* Set the process memory policy */
134623c8902dSRasmus Villemoes SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1347938bb9f5SHeiko Carstens 		unsigned long, maxnode)
13488bccd85fSChristoph Lameter {
13498bccd85fSChristoph Lameter 	int err;
13508bccd85fSChristoph Lameter 	nodemask_t nodes;
1351028fec41SDavid Rientjes 	unsigned short flags;
13528bccd85fSChristoph Lameter 
1353028fec41SDavid Rientjes 	flags = mode & MPOL_MODE_FLAGS;
1354028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1355028fec41SDavid Rientjes 	if ((unsigned int)mode >= MPOL_MAX)
13568bccd85fSChristoph Lameter 		return -EINVAL;
13574c50bc01SDavid Rientjes 	if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
13584c50bc01SDavid Rientjes 		return -EINVAL;
13598bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
13608bccd85fSChristoph Lameter 	if (err)
13618bccd85fSChristoph Lameter 		return err;
1362028fec41SDavid Rientjes 	return do_set_mempolicy(mode, flags, &nodes);
13638bccd85fSChristoph Lameter }
13648bccd85fSChristoph Lameter 
1365938bb9f5SHeiko Carstens SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1366938bb9f5SHeiko Carstens 		const unsigned long __user *, old_nodes,
1367938bb9f5SHeiko Carstens 		const unsigned long __user *, new_nodes)
136839743889SChristoph Lameter {
1369596d7cfaSKOSAKI Motohiro 	struct mm_struct *mm = NULL;
137039743889SChristoph Lameter 	struct task_struct *task;
137139743889SChristoph Lameter 	nodemask_t task_nodes;
137239743889SChristoph Lameter 	int err;
1373596d7cfaSKOSAKI Motohiro 	nodemask_t *old;
1374596d7cfaSKOSAKI Motohiro 	nodemask_t *new;
1375596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH(scratch);
137639743889SChristoph Lameter 
1377596d7cfaSKOSAKI Motohiro 	if (!scratch)
1378596d7cfaSKOSAKI Motohiro 		return -ENOMEM;
137939743889SChristoph Lameter 
1380596d7cfaSKOSAKI Motohiro 	old = &scratch->mask1;
1381596d7cfaSKOSAKI Motohiro 	new = &scratch->mask2;
1382596d7cfaSKOSAKI Motohiro 
1383596d7cfaSKOSAKI Motohiro 	err = get_nodes(old, old_nodes, maxnode);
138439743889SChristoph Lameter 	if (err)
1385596d7cfaSKOSAKI Motohiro 		goto out;
1386596d7cfaSKOSAKI Motohiro 
1387596d7cfaSKOSAKI Motohiro 	err = get_nodes(new, new_nodes, maxnode);
1388596d7cfaSKOSAKI Motohiro 	if (err)
1389596d7cfaSKOSAKI Motohiro 		goto out;
139039743889SChristoph Lameter 
139139743889SChristoph Lameter 	/* Find the mm_struct */
139255cfaa3cSZeng Zhaoming 	rcu_read_lock();
1393228ebcbeSPavel Emelyanov 	task = pid ? find_task_by_vpid(pid) : current;
139439743889SChristoph Lameter 	if (!task) {
139555cfaa3cSZeng Zhaoming 		rcu_read_unlock();
1396596d7cfaSKOSAKI Motohiro 		err = -ESRCH;
1397596d7cfaSKOSAKI Motohiro 		goto out;
139839743889SChristoph Lameter 	}
13993268c63eSChristoph Lameter 	get_task_struct(task);
140039743889SChristoph Lameter 
1401596d7cfaSKOSAKI Motohiro 	err = -EINVAL;
140239743889SChristoph Lameter 
140339743889SChristoph Lameter 	/*
1404*31367466SOtto Ebeling 	 * Check if this process has the right to modify the specified process.
1405*31367466SOtto Ebeling 	 * Use the regular "ptrace_may_access()" checks.
140639743889SChristoph Lameter 	 */
1407*31367466SOtto Ebeling 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1408c69e8d9cSDavid Howells 		rcu_read_unlock();
140939743889SChristoph Lameter 		err = -EPERM;
14103268c63eSChristoph Lameter 		goto out_put;
141139743889SChristoph Lameter 	}
1412c69e8d9cSDavid Howells 	rcu_read_unlock();
141339743889SChristoph Lameter 
141439743889SChristoph Lameter 	task_nodes = cpuset_mems_allowed(task);
141539743889SChristoph Lameter 	/* Is the user allowed to access the target nodes? */
1416596d7cfaSKOSAKI Motohiro 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
141739743889SChristoph Lameter 		err = -EPERM;
14183268c63eSChristoph Lameter 		goto out_put;
141939743889SChristoph Lameter 	}
142039743889SChristoph Lameter 
142101f13bd6SLai Jiangshan 	if (!nodes_subset(*new, node_states[N_MEMORY])) {
14223b42d28bSChristoph Lameter 		err = -EINVAL;
14233268c63eSChristoph Lameter 		goto out_put;
14243b42d28bSChristoph Lameter 	}
14253b42d28bSChristoph Lameter 
142686c3a764SDavid Quigley 	err = security_task_movememory(task);
142786c3a764SDavid Quigley 	if (err)
14283268c63eSChristoph Lameter 		goto out_put;
142986c3a764SDavid Quigley 
14303268c63eSChristoph Lameter 	mm = get_task_mm(task);
14313268c63eSChristoph Lameter 	put_task_struct(task);
1432f2a9ef88SSasha Levin 
1433f2a9ef88SSasha Levin 	if (!mm) {
1434f2a9ef88SSasha Levin 		err = -EINVAL;
1435f2a9ef88SSasha Levin 		goto out;
1436f2a9ef88SSasha Levin 	}
1437f2a9ef88SSasha Levin 
1438596d7cfaSKOSAKI Motohiro 	err = do_migrate_pages(mm, old, new,
143974c00241SChristoph Lameter 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
14403268c63eSChristoph Lameter 
144139743889SChristoph Lameter 	mmput(mm);
14423268c63eSChristoph Lameter out:
1443596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH_FREE(scratch);
1444596d7cfaSKOSAKI Motohiro 
144539743889SChristoph Lameter 	return err;
14463268c63eSChristoph Lameter 
14473268c63eSChristoph Lameter out_put:
14483268c63eSChristoph Lameter 	put_task_struct(task);
14493268c63eSChristoph Lameter 	goto out;
14503268c63eSChristoph Lameter 
145139743889SChristoph Lameter }
145239743889SChristoph Lameter 
145339743889SChristoph Lameter 
14548bccd85fSChristoph Lameter /* Retrieve NUMA policy */
1455938bb9f5SHeiko Carstens SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1456938bb9f5SHeiko Carstens 		unsigned long __user *, nmask, unsigned long, maxnode,
1457938bb9f5SHeiko Carstens 		unsigned long, addr, unsigned long, flags)
14588bccd85fSChristoph Lameter {
1459dbcb0f19SAdrian Bunk 	int err;
1460dbcb0f19SAdrian Bunk 	int uninitialized_var(pval);
14618bccd85fSChristoph Lameter 	nodemask_t nodes;
14628bccd85fSChristoph Lameter 
14638bccd85fSChristoph Lameter 	if (nmask != NULL && maxnode < MAX_NUMNODES)
14648bccd85fSChristoph Lameter 		return -EINVAL;
14658bccd85fSChristoph Lameter 
14668bccd85fSChristoph Lameter 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
14678bccd85fSChristoph Lameter 
14688bccd85fSChristoph Lameter 	if (err)
14698bccd85fSChristoph Lameter 		return err;
14708bccd85fSChristoph Lameter 
14718bccd85fSChristoph Lameter 	if (policy && put_user(pval, policy))
14728bccd85fSChristoph Lameter 		return -EFAULT;
14738bccd85fSChristoph Lameter 
14748bccd85fSChristoph Lameter 	if (nmask)
14758bccd85fSChristoph Lameter 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
14768bccd85fSChristoph Lameter 
14778bccd85fSChristoph Lameter 	return err;
14788bccd85fSChristoph Lameter }
14798bccd85fSChristoph Lameter 
14801da177e4SLinus Torvalds #ifdef CONFIG_COMPAT
14811da177e4SLinus Torvalds 
1482c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1483c93e0f6cSHeiko Carstens 		       compat_ulong_t __user *, nmask,
1484c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode,
1485c93e0f6cSHeiko Carstens 		       compat_ulong_t, addr, compat_ulong_t, flags)
14861da177e4SLinus Torvalds {
14871da177e4SLinus Torvalds 	long err;
14881da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
14891da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
14901da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
14911da177e4SLinus Torvalds 
14921da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
14931da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
14941da177e4SLinus Torvalds 
14951da177e4SLinus Torvalds 	if (nmask)
14961da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
14971da177e4SLinus Torvalds 
14981da177e4SLinus Torvalds 	err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
14991da177e4SLinus Torvalds 
15001da177e4SLinus Torvalds 	if (!err && nmask) {
15012bbff6c7SKAMEZAWA Hiroyuki 		unsigned long copy_size;
15022bbff6c7SKAMEZAWA Hiroyuki 		copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
15032bbff6c7SKAMEZAWA Hiroyuki 		err = copy_from_user(bm, nm, copy_size);
15041da177e4SLinus Torvalds 		/* ensure entire bitmap is zeroed */
15051da177e4SLinus Torvalds 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
15061da177e4SLinus Torvalds 		err |= compat_put_bitmap(nmask, bm, nr_bits);
15071da177e4SLinus Torvalds 	}
15081da177e4SLinus Torvalds 
15091da177e4SLinus Torvalds 	return err;
15101da177e4SLinus Torvalds }
15111da177e4SLinus Torvalds 
1512c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1513c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode)
15141da177e4SLinus Torvalds {
15151da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
15161da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
15171da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
15181da177e4SLinus Torvalds 
15191da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
15201da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
15211da177e4SLinus Torvalds 
15221da177e4SLinus Torvalds 	if (nmask) {
1523cf01fb99SChris Salls 		if (compat_get_bitmap(bm, nmask, nr_bits))
15241da177e4SLinus Torvalds 			return -EFAULT;
1525cf01fb99SChris Salls 		nm = compat_alloc_user_space(alloc_size);
1526cf01fb99SChris Salls 		if (copy_to_user(nm, bm, alloc_size))
1527cf01fb99SChris Salls 			return -EFAULT;
1528cf01fb99SChris Salls 	}
15291da177e4SLinus Torvalds 
15301da177e4SLinus Torvalds 	return sys_set_mempolicy(mode, nm, nr_bits+1);
15311da177e4SLinus Torvalds }
15321da177e4SLinus Torvalds 
1533c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1534c93e0f6cSHeiko Carstens 		       compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1535c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode, compat_ulong_t, flags)
15361da177e4SLinus Torvalds {
15371da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
15381da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
1539dfcd3c0dSAndi Kleen 	nodemask_t bm;
15401da177e4SLinus Torvalds 
15411da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
15421da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
15431da177e4SLinus Torvalds 
15441da177e4SLinus Torvalds 	if (nmask) {
1545cf01fb99SChris Salls 		if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
15461da177e4SLinus Torvalds 			return -EFAULT;
1547cf01fb99SChris Salls 		nm = compat_alloc_user_space(alloc_size);
1548cf01fb99SChris Salls 		if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1549cf01fb99SChris Salls 			return -EFAULT;
1550cf01fb99SChris Salls 	}
15511da177e4SLinus Torvalds 
15521da177e4SLinus Torvalds 	return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
15531da177e4SLinus Torvalds }
15541da177e4SLinus Torvalds 
15551da177e4SLinus Torvalds #endif
15561da177e4SLinus Torvalds 
155774d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
155874d2c3a0SOleg Nesterov 						unsigned long addr)
15591da177e4SLinus Torvalds {
15608d90274bSOleg Nesterov 	struct mempolicy *pol = NULL;
15611da177e4SLinus Torvalds 
15621da177e4SLinus Torvalds 	if (vma) {
1563480eccf9SLee Schermerhorn 		if (vma->vm_ops && vma->vm_ops->get_policy) {
15648d90274bSOleg Nesterov 			pol = vma->vm_ops->get_policy(vma, addr);
156500442ad0SMel Gorman 		} else if (vma->vm_policy) {
15661da177e4SLinus Torvalds 			pol = vma->vm_policy;
156700442ad0SMel Gorman 
156800442ad0SMel Gorman 			/*
156900442ad0SMel Gorman 			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
157000442ad0SMel Gorman 			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
157100442ad0SMel Gorman 			 * count on these policies which will be dropped by
157200442ad0SMel Gorman 			 * mpol_cond_put() later
157300442ad0SMel Gorman 			 */
157400442ad0SMel Gorman 			if (mpol_needs_cond_ref(pol))
157500442ad0SMel Gorman 				mpol_get(pol);
157600442ad0SMel Gorman 		}
15771da177e4SLinus Torvalds 	}
1578f15ca78eSOleg Nesterov 
157974d2c3a0SOleg Nesterov 	return pol;
158074d2c3a0SOleg Nesterov }
158174d2c3a0SOleg Nesterov 
158274d2c3a0SOleg Nesterov /*
1583dd6eecb9SOleg Nesterov  * get_vma_policy(@vma, @addr)
158474d2c3a0SOleg Nesterov  * @vma: virtual memory area whose policy is sought
158574d2c3a0SOleg Nesterov  * @addr: address in @vma for shared policy lookup
158674d2c3a0SOleg Nesterov  *
158774d2c3a0SOleg Nesterov  * Returns effective policy for a VMA at specified address.
1588dd6eecb9SOleg Nesterov  * Falls back to current->mempolicy or system default policy, as necessary.
158974d2c3a0SOleg Nesterov  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
159074d2c3a0SOleg Nesterov  * count--added by the get_policy() vm_op, as appropriate--to protect against
159174d2c3a0SOleg Nesterov  * freeing by another task.  It is the caller's responsibility to free the
159274d2c3a0SOleg Nesterov  * extra reference for shared policies.
159374d2c3a0SOleg Nesterov  */
1594dd6eecb9SOleg Nesterov static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1595dd6eecb9SOleg Nesterov 						unsigned long addr)
159674d2c3a0SOleg Nesterov {
159774d2c3a0SOleg Nesterov 	struct mempolicy *pol = __get_vma_policy(vma, addr);
159874d2c3a0SOleg Nesterov 
15998d90274bSOleg Nesterov 	if (!pol)
1600dd6eecb9SOleg Nesterov 		pol = get_task_policy(current);
16018d90274bSOleg Nesterov 
16021da177e4SLinus Torvalds 	return pol;
16031da177e4SLinus Torvalds }
16041da177e4SLinus Torvalds 
16056b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma)
1606fc314724SMel Gorman {
16076b6482bbSOleg Nesterov 	struct mempolicy *pol;
1608f15ca78eSOleg Nesterov 
1609fc314724SMel Gorman 	if (vma->vm_ops && vma->vm_ops->get_policy) {
1610fc314724SMel Gorman 		bool ret = false;
1611fc314724SMel Gorman 
1612fc314724SMel Gorman 		pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1613fc314724SMel Gorman 		if (pol && (pol->flags & MPOL_F_MOF))
1614fc314724SMel Gorman 			ret = true;
1615fc314724SMel Gorman 		mpol_cond_put(pol);
1616fc314724SMel Gorman 
1617fc314724SMel Gorman 		return ret;
16188d90274bSOleg Nesterov 	}
16198d90274bSOleg Nesterov 
1620fc314724SMel Gorman 	pol = vma->vm_policy;
16218d90274bSOleg Nesterov 	if (!pol)
16226b6482bbSOleg Nesterov 		pol = get_task_policy(current);
1623fc314724SMel Gorman 
1624fc314724SMel Gorman 	return pol->flags & MPOL_F_MOF;
1625fc314724SMel Gorman }
1626fc314724SMel Gorman 
1627d3eb1570SLai Jiangshan static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1628d3eb1570SLai Jiangshan {
1629d3eb1570SLai Jiangshan 	enum zone_type dynamic_policy_zone = policy_zone;
1630d3eb1570SLai Jiangshan 
1631d3eb1570SLai Jiangshan 	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1632d3eb1570SLai Jiangshan 
1633d3eb1570SLai Jiangshan 	/*
1634d3eb1570SLai Jiangshan 	 * if policy->v.nodes has movable memory only,
1635d3eb1570SLai Jiangshan 	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1636d3eb1570SLai Jiangshan 	 *
1637d3eb1570SLai Jiangshan 	 * policy->v.nodes is intersect with node_states[N_MEMORY].
1638d3eb1570SLai Jiangshan 	 * so if the following test faile, it implies
1639d3eb1570SLai Jiangshan 	 * policy->v.nodes has movable memory only.
1640d3eb1570SLai Jiangshan 	 */
1641d3eb1570SLai Jiangshan 	if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1642d3eb1570SLai Jiangshan 		dynamic_policy_zone = ZONE_MOVABLE;
1643d3eb1570SLai Jiangshan 
1644d3eb1570SLai Jiangshan 	return zone >= dynamic_policy_zone;
1645d3eb1570SLai Jiangshan }
1646d3eb1570SLai Jiangshan 
164752cd3b07SLee Schermerhorn /*
164852cd3b07SLee Schermerhorn  * Return a nodemask representing a mempolicy for filtering nodes for
164952cd3b07SLee Schermerhorn  * page allocation
165052cd3b07SLee Schermerhorn  */
165152cd3b07SLee Schermerhorn static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
165219770b32SMel Gorman {
165319770b32SMel Gorman 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
165445c4745aSLee Schermerhorn 	if (unlikely(policy->mode == MPOL_BIND) &&
1655d3eb1570SLai Jiangshan 			apply_policy_zone(policy, gfp_zone(gfp)) &&
165619770b32SMel Gorman 			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
165719770b32SMel Gorman 		return &policy->v.nodes;
165819770b32SMel Gorman 
165919770b32SMel Gorman 	return NULL;
166019770b32SMel Gorman }
166119770b32SMel Gorman 
166204ec6264SVlastimil Babka /* Return the node id preferred by the given mempolicy, or the given id */
166304ec6264SVlastimil Babka static int policy_node(gfp_t gfp, struct mempolicy *policy,
16642f5f9486SAndi Kleen 								int nd)
16651da177e4SLinus Torvalds {
16666d840958SMichal Hocko 	if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
16671da177e4SLinus Torvalds 		nd = policy->v.preferred_node;
16686d840958SMichal Hocko 	else {
166919770b32SMel Gorman 		/*
16706d840958SMichal Hocko 		 * __GFP_THISNODE shouldn't even be used with the bind policy
16716d840958SMichal Hocko 		 * because we might easily break the expectation to stay on the
16726d840958SMichal Hocko 		 * requested node and not break the policy.
167319770b32SMel Gorman 		 */
16746d840958SMichal Hocko 		WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
16751da177e4SLinus Torvalds 	}
16766d840958SMichal Hocko 
167704ec6264SVlastimil Babka 	return nd;
16781da177e4SLinus Torvalds }
16791da177e4SLinus Torvalds 
16801da177e4SLinus Torvalds /* Do dynamic interleaving for a process */
16811da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy)
16821da177e4SLinus Torvalds {
168345816682SVlastimil Babka 	unsigned next;
16841da177e4SLinus Torvalds 	struct task_struct *me = current;
16851da177e4SLinus Torvalds 
168645816682SVlastimil Babka 	next = next_node_in(me->il_prev, policy->v.nodes);
1687f5b087b5SDavid Rientjes 	if (next < MAX_NUMNODES)
168845816682SVlastimil Babka 		me->il_prev = next;
168945816682SVlastimil Babka 	return next;
16901da177e4SLinus Torvalds }
16911da177e4SLinus Torvalds 
1692dc85da15SChristoph Lameter /*
1693dc85da15SChristoph Lameter  * Depending on the memory policy provide a node from which to allocate the
1694dc85da15SChristoph Lameter  * next slab entry.
1695dc85da15SChristoph Lameter  */
16962a389610SDavid Rientjes unsigned int mempolicy_slab_node(void)
1697dc85da15SChristoph Lameter {
1698e7b691b0SAndi Kleen 	struct mempolicy *policy;
16992a389610SDavid Rientjes 	int node = numa_mem_id();
1700e7b691b0SAndi Kleen 
1701e7b691b0SAndi Kleen 	if (in_interrupt())
17022a389610SDavid Rientjes 		return node;
1703e7b691b0SAndi Kleen 
1704e7b691b0SAndi Kleen 	policy = current->mempolicy;
1705fc36b8d3SLee Schermerhorn 	if (!policy || policy->flags & MPOL_F_LOCAL)
17062a389610SDavid Rientjes 		return node;
1707765c4507SChristoph Lameter 
1708bea904d5SLee Schermerhorn 	switch (policy->mode) {
1709bea904d5SLee Schermerhorn 	case MPOL_PREFERRED:
1710fc36b8d3SLee Schermerhorn 		/*
1711fc36b8d3SLee Schermerhorn 		 * handled MPOL_F_LOCAL above
1712fc36b8d3SLee Schermerhorn 		 */
1713bea904d5SLee Schermerhorn 		return policy->v.preferred_node;
1714bea904d5SLee Schermerhorn 
1715dc85da15SChristoph Lameter 	case MPOL_INTERLEAVE:
1716dc85da15SChristoph Lameter 		return interleave_nodes(policy);
1717dc85da15SChristoph Lameter 
1718dd1a239fSMel Gorman 	case MPOL_BIND: {
1719c33d6c06SMel Gorman 		struct zoneref *z;
1720c33d6c06SMel Gorman 
1721dc85da15SChristoph Lameter 		/*
1722dc85da15SChristoph Lameter 		 * Follow bind policy behavior and start allocation at the
1723dc85da15SChristoph Lameter 		 * first node.
1724dc85da15SChristoph Lameter 		 */
172519770b32SMel Gorman 		struct zonelist *zonelist;
172619770b32SMel Gorman 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1727c9634cf0SAneesh Kumar K.V 		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1728c33d6c06SMel Gorman 		z = first_zones_zonelist(zonelist, highest_zoneidx,
1729c33d6c06SMel Gorman 							&policy->v.nodes);
1730c33d6c06SMel Gorman 		return z->zone ? z->zone->node : node;
1731dd1a239fSMel Gorman 	}
1732dc85da15SChristoph Lameter 
1733dc85da15SChristoph Lameter 	default:
1734bea904d5SLee Schermerhorn 		BUG();
1735dc85da15SChristoph Lameter 	}
1736dc85da15SChristoph Lameter }
1737dc85da15SChristoph Lameter 
1738fee83b3aSAndrew Morton /*
1739fee83b3aSAndrew Morton  * Do static interleaving for a VMA with known offset @n.  Returns the n'th
1740fee83b3aSAndrew Morton  * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
1741fee83b3aSAndrew Morton  * number of present nodes.
1742fee83b3aSAndrew Morton  */
174398c70baaSLaurent Dufour static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
17441da177e4SLinus Torvalds {
1745dfcd3c0dSAndi Kleen 	unsigned nnodes = nodes_weight(pol->v.nodes);
1746f5b087b5SDavid Rientjes 	unsigned target;
1747fee83b3aSAndrew Morton 	int i;
1748fee83b3aSAndrew Morton 	int nid;
17491da177e4SLinus Torvalds 
1750f5b087b5SDavid Rientjes 	if (!nnodes)
1751f5b087b5SDavid Rientjes 		return numa_node_id();
1752fee83b3aSAndrew Morton 	target = (unsigned int)n % nnodes;
1753fee83b3aSAndrew Morton 	nid = first_node(pol->v.nodes);
1754fee83b3aSAndrew Morton 	for (i = 0; i < target; i++)
1755dfcd3c0dSAndi Kleen 		nid = next_node(nid, pol->v.nodes);
17561da177e4SLinus Torvalds 	return nid;
17571da177e4SLinus Torvalds }
17581da177e4SLinus Torvalds 
17595da7ca86SChristoph Lameter /* Determine a node number for interleave */
17605da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol,
17615da7ca86SChristoph Lameter 		 struct vm_area_struct *vma, unsigned long addr, int shift)
17625da7ca86SChristoph Lameter {
17635da7ca86SChristoph Lameter 	if (vma) {
17645da7ca86SChristoph Lameter 		unsigned long off;
17655da7ca86SChristoph Lameter 
17663b98b087SNishanth Aravamudan 		/*
17673b98b087SNishanth Aravamudan 		 * for small pages, there is no difference between
17683b98b087SNishanth Aravamudan 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
17693b98b087SNishanth Aravamudan 		 * for huge pages, since vm_pgoff is in units of small
17703b98b087SNishanth Aravamudan 		 * pages, we need to shift off the always 0 bits to get
17713b98b087SNishanth Aravamudan 		 * a useful offset.
17723b98b087SNishanth Aravamudan 		 */
17733b98b087SNishanth Aravamudan 		BUG_ON(shift < PAGE_SHIFT);
17743b98b087SNishanth Aravamudan 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
17755da7ca86SChristoph Lameter 		off += (addr - vma->vm_start) >> shift;
177698c70baaSLaurent Dufour 		return offset_il_node(pol, off);
17775da7ca86SChristoph Lameter 	} else
17785da7ca86SChristoph Lameter 		return interleave_nodes(pol);
17795da7ca86SChristoph Lameter }
17805da7ca86SChristoph Lameter 
178100ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS
1782480eccf9SLee Schermerhorn /*
178304ec6264SVlastimil Babka  * huge_node(@vma, @addr, @gfp_flags, @mpol)
1784b46e14acSFabian Frederick  * @vma: virtual memory area whose policy is sought
1785b46e14acSFabian Frederick  * @addr: address in @vma for shared policy lookup and interleave policy
1786b46e14acSFabian Frederick  * @gfp_flags: for requested zone
1787b46e14acSFabian Frederick  * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1788b46e14acSFabian Frederick  * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
1789480eccf9SLee Schermerhorn  *
179004ec6264SVlastimil Babka  * Returns a nid suitable for a huge page allocation and a pointer
179152cd3b07SLee Schermerhorn  * to the struct mempolicy for conditional unref after allocation.
179252cd3b07SLee Schermerhorn  * If the effective policy is 'BIND, returns a pointer to the mempolicy's
179352cd3b07SLee Schermerhorn  * @nodemask for filtering the zonelist.
1794c0ff7453SMiao Xie  *
1795d26914d1SMel Gorman  * Must be protected by read_mems_allowed_begin()
1796480eccf9SLee Schermerhorn  */
179704ec6264SVlastimil Babka int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
179804ec6264SVlastimil Babka 				struct mempolicy **mpol, nodemask_t **nodemask)
17995da7ca86SChristoph Lameter {
180004ec6264SVlastimil Babka 	int nid;
18015da7ca86SChristoph Lameter 
1802dd6eecb9SOleg Nesterov 	*mpol = get_vma_policy(vma, addr);
180319770b32SMel Gorman 	*nodemask = NULL;	/* assume !MPOL_BIND */
18045da7ca86SChristoph Lameter 
180552cd3b07SLee Schermerhorn 	if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
180604ec6264SVlastimil Babka 		nid = interleave_nid(*mpol, vma, addr,
180704ec6264SVlastimil Babka 					huge_page_shift(hstate_vma(vma)));
180852cd3b07SLee Schermerhorn 	} else {
180904ec6264SVlastimil Babka 		nid = policy_node(gfp_flags, *mpol, numa_node_id());
181052cd3b07SLee Schermerhorn 		if ((*mpol)->mode == MPOL_BIND)
181152cd3b07SLee Schermerhorn 			*nodemask = &(*mpol)->v.nodes;
1812480eccf9SLee Schermerhorn 	}
181304ec6264SVlastimil Babka 	return nid;
18145da7ca86SChristoph Lameter }
181506808b08SLee Schermerhorn 
181606808b08SLee Schermerhorn /*
181706808b08SLee Schermerhorn  * init_nodemask_of_mempolicy
181806808b08SLee Schermerhorn  *
181906808b08SLee Schermerhorn  * If the current task's mempolicy is "default" [NULL], return 'false'
182006808b08SLee Schermerhorn  * to indicate default policy.  Otherwise, extract the policy nodemask
182106808b08SLee Schermerhorn  * for 'bind' or 'interleave' policy into the argument nodemask, or
182206808b08SLee Schermerhorn  * initialize the argument nodemask to contain the single node for
182306808b08SLee Schermerhorn  * 'preferred' or 'local' policy and return 'true' to indicate presence
182406808b08SLee Schermerhorn  * of non-default mempolicy.
182506808b08SLee Schermerhorn  *
182606808b08SLee Schermerhorn  * We don't bother with reference counting the mempolicy [mpol_get/put]
182706808b08SLee Schermerhorn  * because the current task is examining it's own mempolicy and a task's
182806808b08SLee Schermerhorn  * mempolicy is only ever changed by the task itself.
182906808b08SLee Schermerhorn  *
183006808b08SLee Schermerhorn  * N.B., it is the caller's responsibility to free a returned nodemask.
183106808b08SLee Schermerhorn  */
183206808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask)
183306808b08SLee Schermerhorn {
183406808b08SLee Schermerhorn 	struct mempolicy *mempolicy;
183506808b08SLee Schermerhorn 	int nid;
183606808b08SLee Schermerhorn 
183706808b08SLee Schermerhorn 	if (!(mask && current->mempolicy))
183806808b08SLee Schermerhorn 		return false;
183906808b08SLee Schermerhorn 
1840c0ff7453SMiao Xie 	task_lock(current);
184106808b08SLee Schermerhorn 	mempolicy = current->mempolicy;
184206808b08SLee Schermerhorn 	switch (mempolicy->mode) {
184306808b08SLee Schermerhorn 	case MPOL_PREFERRED:
184406808b08SLee Schermerhorn 		if (mempolicy->flags & MPOL_F_LOCAL)
184506808b08SLee Schermerhorn 			nid = numa_node_id();
184606808b08SLee Schermerhorn 		else
184706808b08SLee Schermerhorn 			nid = mempolicy->v.preferred_node;
184806808b08SLee Schermerhorn 		init_nodemask_of_node(mask, nid);
184906808b08SLee Schermerhorn 		break;
185006808b08SLee Schermerhorn 
185106808b08SLee Schermerhorn 	case MPOL_BIND:
185206808b08SLee Schermerhorn 		/* Fall through */
185306808b08SLee Schermerhorn 	case MPOL_INTERLEAVE:
185406808b08SLee Schermerhorn 		*mask =  mempolicy->v.nodes;
185506808b08SLee Schermerhorn 		break;
185606808b08SLee Schermerhorn 
185706808b08SLee Schermerhorn 	default:
185806808b08SLee Schermerhorn 		BUG();
185906808b08SLee Schermerhorn 	}
1860c0ff7453SMiao Xie 	task_unlock(current);
186106808b08SLee Schermerhorn 
186206808b08SLee Schermerhorn 	return true;
186306808b08SLee Schermerhorn }
186400ac59adSChen, Kenneth W #endif
18655da7ca86SChristoph Lameter 
18666f48d0ebSDavid Rientjes /*
18676f48d0ebSDavid Rientjes  * mempolicy_nodemask_intersects
18686f48d0ebSDavid Rientjes  *
18696f48d0ebSDavid Rientjes  * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
18706f48d0ebSDavid Rientjes  * policy.  Otherwise, check for intersection between mask and the policy
18716f48d0ebSDavid Rientjes  * nodemask for 'bind' or 'interleave' policy.  For 'perferred' or 'local'
18726f48d0ebSDavid Rientjes  * policy, always return true since it may allocate elsewhere on fallback.
18736f48d0ebSDavid Rientjes  *
18746f48d0ebSDavid Rientjes  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
18756f48d0ebSDavid Rientjes  */
18766f48d0ebSDavid Rientjes bool mempolicy_nodemask_intersects(struct task_struct *tsk,
18776f48d0ebSDavid Rientjes 					const nodemask_t *mask)
18786f48d0ebSDavid Rientjes {
18796f48d0ebSDavid Rientjes 	struct mempolicy *mempolicy;
18806f48d0ebSDavid Rientjes 	bool ret = true;
18816f48d0ebSDavid Rientjes 
18826f48d0ebSDavid Rientjes 	if (!mask)
18836f48d0ebSDavid Rientjes 		return ret;
18846f48d0ebSDavid Rientjes 	task_lock(tsk);
18856f48d0ebSDavid Rientjes 	mempolicy = tsk->mempolicy;
18866f48d0ebSDavid Rientjes 	if (!mempolicy)
18876f48d0ebSDavid Rientjes 		goto out;
18886f48d0ebSDavid Rientjes 
18896f48d0ebSDavid Rientjes 	switch (mempolicy->mode) {
18906f48d0ebSDavid Rientjes 	case MPOL_PREFERRED:
18916f48d0ebSDavid Rientjes 		/*
18926f48d0ebSDavid Rientjes 		 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
18936f48d0ebSDavid Rientjes 		 * allocate from, they may fallback to other nodes when oom.
18946f48d0ebSDavid Rientjes 		 * Thus, it's possible for tsk to have allocated memory from
18956f48d0ebSDavid Rientjes 		 * nodes in mask.
18966f48d0ebSDavid Rientjes 		 */
18976f48d0ebSDavid Rientjes 		break;
18986f48d0ebSDavid Rientjes 	case MPOL_BIND:
18996f48d0ebSDavid Rientjes 	case MPOL_INTERLEAVE:
19006f48d0ebSDavid Rientjes 		ret = nodes_intersects(mempolicy->v.nodes, *mask);
19016f48d0ebSDavid Rientjes 		break;
19026f48d0ebSDavid Rientjes 	default:
19036f48d0ebSDavid Rientjes 		BUG();
19046f48d0ebSDavid Rientjes 	}
19056f48d0ebSDavid Rientjes out:
19066f48d0ebSDavid Rientjes 	task_unlock(tsk);
19076f48d0ebSDavid Rientjes 	return ret;
19086f48d0ebSDavid Rientjes }
19096f48d0ebSDavid Rientjes 
19101da177e4SLinus Torvalds /* Allocate a page in interleaved policy.
19111da177e4SLinus Torvalds    Own path because it needs to do special accounting. */
1912662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1913662f3a0bSAndi Kleen 					unsigned nid)
19141da177e4SLinus Torvalds {
19151da177e4SLinus Torvalds 	struct page *page;
19161da177e4SLinus Torvalds 
191704ec6264SVlastimil Babka 	page = __alloc_pages(gfp, order, nid);
1918de55c8b2SAndrey Ryabinin 	if (page && page_to_nid(page) == nid) {
1919de55c8b2SAndrey Ryabinin 		preempt_disable();
1920de55c8b2SAndrey Ryabinin 		__inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT);
1921de55c8b2SAndrey Ryabinin 		preempt_enable();
1922de55c8b2SAndrey Ryabinin 	}
19231da177e4SLinus Torvalds 	return page;
19241da177e4SLinus Torvalds }
19251da177e4SLinus Torvalds 
19261da177e4SLinus Torvalds /**
19270bbbc0b3SAndrea Arcangeli  * 	alloc_pages_vma	- Allocate a page for a VMA.
19281da177e4SLinus Torvalds  *
19291da177e4SLinus Torvalds  * 	@gfp:
19301da177e4SLinus Torvalds  *      %GFP_USER    user allocation.
19311da177e4SLinus Torvalds  *      %GFP_KERNEL  kernel allocations,
19321da177e4SLinus Torvalds  *      %GFP_HIGHMEM highmem/user allocations,
19331da177e4SLinus Torvalds  *      %GFP_FS      allocation should not call back into a file system.
19341da177e4SLinus Torvalds  *      %GFP_ATOMIC  don't sleep.
19351da177e4SLinus Torvalds  *
19360bbbc0b3SAndrea Arcangeli  *	@order:Order of the GFP allocation.
19371da177e4SLinus Torvalds  * 	@vma:  Pointer to VMA or NULL if not available.
19381da177e4SLinus Torvalds  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
1939be97a41bSVlastimil Babka  *	@node: Which node to prefer for allocation (modulo policy).
1940be97a41bSVlastimil Babka  *	@hugepage: for hugepages try only the preferred node if possible
19411da177e4SLinus Torvalds  *
19421da177e4SLinus Torvalds  * 	This function allocates a page from the kernel page pool and applies
19431da177e4SLinus Torvalds  *	a NUMA policy associated with the VMA or the current process.
19441da177e4SLinus Torvalds  *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
19451da177e4SLinus Torvalds  *	mm_struct of the VMA to prevent it from going away. Should be used for
1946be97a41bSVlastimil Babka  *	all allocations for pages that will be mapped into user space. Returns
1947be97a41bSVlastimil Babka  *	NULL when no page can be allocated.
19481da177e4SLinus Torvalds  */
19491da177e4SLinus Torvalds struct page *
19500bbbc0b3SAndrea Arcangeli alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
1951be97a41bSVlastimil Babka 		unsigned long addr, int node, bool hugepage)
19521da177e4SLinus Torvalds {
1953cc9a6c87SMel Gorman 	struct mempolicy *pol;
1954c0ff7453SMiao Xie 	struct page *page;
195504ec6264SVlastimil Babka 	int preferred_nid;
1956be97a41bSVlastimil Babka 	nodemask_t *nmask;
19571da177e4SLinus Torvalds 
1958dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
1959cc9a6c87SMel Gorman 
1960be97a41bSVlastimil Babka 	if (pol->mode == MPOL_INTERLEAVE) {
19611da177e4SLinus Torvalds 		unsigned nid;
19625da7ca86SChristoph Lameter 
19638eac563cSAndi Kleen 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
196452cd3b07SLee Schermerhorn 		mpol_cond_put(pol);
19650bbbc0b3SAndrea Arcangeli 		page = alloc_page_interleave(gfp, order, nid);
1966be97a41bSVlastimil Babka 		goto out;
19671da177e4SLinus Torvalds 	}
19681da177e4SLinus Torvalds 
19690867a57cSVlastimil Babka 	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
19700867a57cSVlastimil Babka 		int hpage_node = node;
19710867a57cSVlastimil Babka 
19720867a57cSVlastimil Babka 		/*
19730867a57cSVlastimil Babka 		 * For hugepage allocation and non-interleave policy which
19740867a57cSVlastimil Babka 		 * allows the current node (or other explicitly preferred
19750867a57cSVlastimil Babka 		 * node) we only try to allocate from the current/preferred
19760867a57cSVlastimil Babka 		 * node and don't fall back to other nodes, as the cost of
19770867a57cSVlastimil Babka 		 * remote accesses would likely offset THP benefits.
19780867a57cSVlastimil Babka 		 *
19790867a57cSVlastimil Babka 		 * If the policy is interleave, or does not allow the current
19800867a57cSVlastimil Babka 		 * node in its nodemask, we allocate the standard way.
19810867a57cSVlastimil Babka 		 */
19820867a57cSVlastimil Babka 		if (pol->mode == MPOL_PREFERRED &&
19830867a57cSVlastimil Babka 						!(pol->flags & MPOL_F_LOCAL))
19840867a57cSVlastimil Babka 			hpage_node = pol->v.preferred_node;
19850867a57cSVlastimil Babka 
19860867a57cSVlastimil Babka 		nmask = policy_nodemask(gfp, pol);
19870867a57cSVlastimil Babka 		if (!nmask || node_isset(hpage_node, *nmask)) {
19880867a57cSVlastimil Babka 			mpol_cond_put(pol);
198996db800fSVlastimil Babka 			page = __alloc_pages_node(hpage_node,
19900867a57cSVlastimil Babka 						gfp | __GFP_THISNODE, order);
19910867a57cSVlastimil Babka 			goto out;
19920867a57cSVlastimil Babka 		}
19930867a57cSVlastimil Babka 	}
19940867a57cSVlastimil Babka 
1995077fcf11SAneesh Kumar K.V 	nmask = policy_nodemask(gfp, pol);
199604ec6264SVlastimil Babka 	preferred_nid = policy_node(gfp, pol, node);
199704ec6264SVlastimil Babka 	page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
1998d51e9894SVlastimil Babka 	mpol_cond_put(pol);
1999be97a41bSVlastimil Babka out:
2000077fcf11SAneesh Kumar K.V 	return page;
2001077fcf11SAneesh Kumar K.V }
2002077fcf11SAneesh Kumar K.V 
20031da177e4SLinus Torvalds /**
20041da177e4SLinus Torvalds  * 	alloc_pages_current - Allocate pages.
20051da177e4SLinus Torvalds  *
20061da177e4SLinus Torvalds  *	@gfp:
20071da177e4SLinus Torvalds  *		%GFP_USER   user allocation,
20081da177e4SLinus Torvalds  *      	%GFP_KERNEL kernel allocation,
20091da177e4SLinus Torvalds  *      	%GFP_HIGHMEM highmem allocation,
20101da177e4SLinus Torvalds  *      	%GFP_FS     don't call back into a file system.
20111da177e4SLinus Torvalds  *      	%GFP_ATOMIC don't sleep.
20121da177e4SLinus Torvalds  *	@order: Power of two of allocation size in pages. 0 is a single page.
20131da177e4SLinus Torvalds  *
20141da177e4SLinus Torvalds  *	Allocate a page from the kernel page pool.  When not in
20151da177e4SLinus Torvalds  *	interrupt context and apply the current process NUMA policy.
20161da177e4SLinus Torvalds  *	Returns NULL when no page can be allocated.
20171da177e4SLinus Torvalds  */
2018dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order)
20191da177e4SLinus Torvalds {
20208d90274bSOleg Nesterov 	struct mempolicy *pol = &default_policy;
2021c0ff7453SMiao Xie 	struct page *page;
20221da177e4SLinus Torvalds 
20238d90274bSOleg Nesterov 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
20248d90274bSOleg Nesterov 		pol = get_task_policy(current);
202552cd3b07SLee Schermerhorn 
202652cd3b07SLee Schermerhorn 	/*
202752cd3b07SLee Schermerhorn 	 * No reference counting needed for current->mempolicy
202852cd3b07SLee Schermerhorn 	 * nor system default_policy
202952cd3b07SLee Schermerhorn 	 */
203045c4745aSLee Schermerhorn 	if (pol->mode == MPOL_INTERLEAVE)
2031c0ff7453SMiao Xie 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2032c0ff7453SMiao Xie 	else
2033c0ff7453SMiao Xie 		page = __alloc_pages_nodemask(gfp, order,
203404ec6264SVlastimil Babka 				policy_node(gfp, pol, numa_node_id()),
20355c4b4be3SAndi Kleen 				policy_nodemask(gfp, pol));
2036cc9a6c87SMel Gorman 
2037c0ff7453SMiao Xie 	return page;
20381da177e4SLinus Torvalds }
20391da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current);
20401da177e4SLinus Torvalds 
2041ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2042ef0855d3SOleg Nesterov {
2043ef0855d3SOleg Nesterov 	struct mempolicy *pol = mpol_dup(vma_policy(src));
2044ef0855d3SOleg Nesterov 
2045ef0855d3SOleg Nesterov 	if (IS_ERR(pol))
2046ef0855d3SOleg Nesterov 		return PTR_ERR(pol);
2047ef0855d3SOleg Nesterov 	dst->vm_policy = pol;
2048ef0855d3SOleg Nesterov 	return 0;
2049ef0855d3SOleg Nesterov }
2050ef0855d3SOleg Nesterov 
20514225399aSPaul Jackson /*
2052846a16bfSLee Schermerhorn  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
20534225399aSPaul Jackson  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
20544225399aSPaul Jackson  * with the mems_allowed returned by cpuset_mems_allowed().  This
20554225399aSPaul Jackson  * keeps mempolicies cpuset relative after its cpuset moves.  See
20564225399aSPaul Jackson  * further kernel/cpuset.c update_nodemask().
2057708c1bbcSMiao Xie  *
2058708c1bbcSMiao Xie  * current's mempolicy may be rebinded by the other task(the task that changes
2059708c1bbcSMiao Xie  * cpuset's mems), so we needn't do rebind work for current task.
20604225399aSPaul Jackson  */
20614225399aSPaul Jackson 
2062846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */
2063846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old)
20641da177e4SLinus Torvalds {
20651da177e4SLinus Torvalds 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
20661da177e4SLinus Torvalds 
20671da177e4SLinus Torvalds 	if (!new)
20681da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2069708c1bbcSMiao Xie 
2070708c1bbcSMiao Xie 	/* task's mempolicy is protected by alloc_lock */
2071708c1bbcSMiao Xie 	if (old == current->mempolicy) {
2072708c1bbcSMiao Xie 		task_lock(current);
2073708c1bbcSMiao Xie 		*new = *old;
2074708c1bbcSMiao Xie 		task_unlock(current);
2075708c1bbcSMiao Xie 	} else
2076708c1bbcSMiao Xie 		*new = *old;
2077708c1bbcSMiao Xie 
20784225399aSPaul Jackson 	if (current_cpuset_is_being_rebound()) {
20794225399aSPaul Jackson 		nodemask_t mems = cpuset_mems_allowed(current);
2080213980c0SVlastimil Babka 		mpol_rebind_policy(new, &mems);
20814225399aSPaul Jackson 	}
20821da177e4SLinus Torvalds 	atomic_set(&new->refcnt, 1);
20831da177e4SLinus Torvalds 	return new;
20841da177e4SLinus Torvalds }
20851da177e4SLinus Torvalds 
20861da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */
2087fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
20881da177e4SLinus Torvalds {
20891da177e4SLinus Torvalds 	if (!a || !b)
2090fcfb4dccSKOSAKI Motohiro 		return false;
209145c4745aSLee Schermerhorn 	if (a->mode != b->mode)
2092fcfb4dccSKOSAKI Motohiro 		return false;
209319800502SBob Liu 	if (a->flags != b->flags)
2094fcfb4dccSKOSAKI Motohiro 		return false;
209519800502SBob Liu 	if (mpol_store_user_nodemask(a))
209619800502SBob Liu 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2097fcfb4dccSKOSAKI Motohiro 			return false;
209819800502SBob Liu 
209945c4745aSLee Schermerhorn 	switch (a->mode) {
210019770b32SMel Gorman 	case MPOL_BIND:
210119770b32SMel Gorman 		/* Fall through */
21021da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
2103fcfb4dccSKOSAKI Motohiro 		return !!nodes_equal(a->v.nodes, b->v.nodes);
21041da177e4SLinus Torvalds 	case MPOL_PREFERRED:
210575719661SNamhyung Kim 		return a->v.preferred_node == b->v.preferred_node;
21061da177e4SLinus Torvalds 	default:
21071da177e4SLinus Torvalds 		BUG();
2108fcfb4dccSKOSAKI Motohiro 		return false;
21091da177e4SLinus Torvalds 	}
21101da177e4SLinus Torvalds }
21111da177e4SLinus Torvalds 
21121da177e4SLinus Torvalds /*
21131da177e4SLinus Torvalds  * Shared memory backing store policy support.
21141da177e4SLinus Torvalds  *
21151da177e4SLinus Torvalds  * Remember policies even when nobody has shared memory mapped.
21161da177e4SLinus Torvalds  * The policies are kept in Red-Black tree linked from the inode.
21174a8c7bb5SNathan Zimmer  * They are protected by the sp->lock rwlock, which should be held
21181da177e4SLinus Torvalds  * for any accesses to the tree.
21191da177e4SLinus Torvalds  */
21201da177e4SLinus Torvalds 
21214a8c7bb5SNathan Zimmer /*
21224a8c7bb5SNathan Zimmer  * lookup first element intersecting start-end.  Caller holds sp->lock for
21234a8c7bb5SNathan Zimmer  * reading or for writing
21244a8c7bb5SNathan Zimmer  */
21251da177e4SLinus Torvalds static struct sp_node *
21261da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
21271da177e4SLinus Torvalds {
21281da177e4SLinus Torvalds 	struct rb_node *n = sp->root.rb_node;
21291da177e4SLinus Torvalds 
21301da177e4SLinus Torvalds 	while (n) {
21311da177e4SLinus Torvalds 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
21321da177e4SLinus Torvalds 
21331da177e4SLinus Torvalds 		if (start >= p->end)
21341da177e4SLinus Torvalds 			n = n->rb_right;
21351da177e4SLinus Torvalds 		else if (end <= p->start)
21361da177e4SLinus Torvalds 			n = n->rb_left;
21371da177e4SLinus Torvalds 		else
21381da177e4SLinus Torvalds 			break;
21391da177e4SLinus Torvalds 	}
21401da177e4SLinus Torvalds 	if (!n)
21411da177e4SLinus Torvalds 		return NULL;
21421da177e4SLinus Torvalds 	for (;;) {
21431da177e4SLinus Torvalds 		struct sp_node *w = NULL;
21441da177e4SLinus Torvalds 		struct rb_node *prev = rb_prev(n);
21451da177e4SLinus Torvalds 		if (!prev)
21461da177e4SLinus Torvalds 			break;
21471da177e4SLinus Torvalds 		w = rb_entry(prev, struct sp_node, nd);
21481da177e4SLinus Torvalds 		if (w->end <= start)
21491da177e4SLinus Torvalds 			break;
21501da177e4SLinus Torvalds 		n = prev;
21511da177e4SLinus Torvalds 	}
21521da177e4SLinus Torvalds 	return rb_entry(n, struct sp_node, nd);
21531da177e4SLinus Torvalds }
21541da177e4SLinus Torvalds 
21554a8c7bb5SNathan Zimmer /*
21564a8c7bb5SNathan Zimmer  * Insert a new shared policy into the list.  Caller holds sp->lock for
21574a8c7bb5SNathan Zimmer  * writing.
21584a8c7bb5SNathan Zimmer  */
21591da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new)
21601da177e4SLinus Torvalds {
21611da177e4SLinus Torvalds 	struct rb_node **p = &sp->root.rb_node;
21621da177e4SLinus Torvalds 	struct rb_node *parent = NULL;
21631da177e4SLinus Torvalds 	struct sp_node *nd;
21641da177e4SLinus Torvalds 
21651da177e4SLinus Torvalds 	while (*p) {
21661da177e4SLinus Torvalds 		parent = *p;
21671da177e4SLinus Torvalds 		nd = rb_entry(parent, struct sp_node, nd);
21681da177e4SLinus Torvalds 		if (new->start < nd->start)
21691da177e4SLinus Torvalds 			p = &(*p)->rb_left;
21701da177e4SLinus Torvalds 		else if (new->end > nd->end)
21711da177e4SLinus Torvalds 			p = &(*p)->rb_right;
21721da177e4SLinus Torvalds 		else
21731da177e4SLinus Torvalds 			BUG();
21741da177e4SLinus Torvalds 	}
21751da177e4SLinus Torvalds 	rb_link_node(&new->nd, parent, p);
21761da177e4SLinus Torvalds 	rb_insert_color(&new->nd, &sp->root);
2177140d5a49SPaul Mundt 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
217845c4745aSLee Schermerhorn 		 new->policy ? new->policy->mode : 0);
21791da177e4SLinus Torvalds }
21801da177e4SLinus Torvalds 
21811da177e4SLinus Torvalds /* Find shared policy intersecting idx */
21821da177e4SLinus Torvalds struct mempolicy *
21831da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
21841da177e4SLinus Torvalds {
21851da177e4SLinus Torvalds 	struct mempolicy *pol = NULL;
21861da177e4SLinus Torvalds 	struct sp_node *sn;
21871da177e4SLinus Torvalds 
21881da177e4SLinus Torvalds 	if (!sp->root.rb_node)
21891da177e4SLinus Torvalds 		return NULL;
21904a8c7bb5SNathan Zimmer 	read_lock(&sp->lock);
21911da177e4SLinus Torvalds 	sn = sp_lookup(sp, idx, idx+1);
21921da177e4SLinus Torvalds 	if (sn) {
21931da177e4SLinus Torvalds 		mpol_get(sn->policy);
21941da177e4SLinus Torvalds 		pol = sn->policy;
21951da177e4SLinus Torvalds 	}
21964a8c7bb5SNathan Zimmer 	read_unlock(&sp->lock);
21971da177e4SLinus Torvalds 	return pol;
21981da177e4SLinus Torvalds }
21991da177e4SLinus Torvalds 
220063f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n)
220163f74ca2SKOSAKI Motohiro {
220263f74ca2SKOSAKI Motohiro 	mpol_put(n->policy);
220363f74ca2SKOSAKI Motohiro 	kmem_cache_free(sn_cache, n);
220463f74ca2SKOSAKI Motohiro }
220563f74ca2SKOSAKI Motohiro 
2206771fb4d8SLee Schermerhorn /**
2207771fb4d8SLee Schermerhorn  * mpol_misplaced - check whether current page node is valid in policy
2208771fb4d8SLee Schermerhorn  *
2209b46e14acSFabian Frederick  * @page: page to be checked
2210b46e14acSFabian Frederick  * @vma: vm area where page mapped
2211b46e14acSFabian Frederick  * @addr: virtual address where page mapped
2212771fb4d8SLee Schermerhorn  *
2213771fb4d8SLee Schermerhorn  * Lookup current policy node id for vma,addr and "compare to" page's
2214771fb4d8SLee Schermerhorn  * node id.
2215771fb4d8SLee Schermerhorn  *
2216771fb4d8SLee Schermerhorn  * Returns:
2217771fb4d8SLee Schermerhorn  *	-1	- not misplaced, page is in the right node
2218771fb4d8SLee Schermerhorn  *	node	- node id where the page should be
2219771fb4d8SLee Schermerhorn  *
2220771fb4d8SLee Schermerhorn  * Policy determination "mimics" alloc_page_vma().
2221771fb4d8SLee Schermerhorn  * Called from fault path where we know the vma and faulting address.
2222771fb4d8SLee Schermerhorn  */
2223771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2224771fb4d8SLee Schermerhorn {
2225771fb4d8SLee Schermerhorn 	struct mempolicy *pol;
2226c33d6c06SMel Gorman 	struct zoneref *z;
2227771fb4d8SLee Schermerhorn 	int curnid = page_to_nid(page);
2228771fb4d8SLee Schermerhorn 	unsigned long pgoff;
222990572890SPeter Zijlstra 	int thiscpu = raw_smp_processor_id();
223090572890SPeter Zijlstra 	int thisnid = cpu_to_node(thiscpu);
2231771fb4d8SLee Schermerhorn 	int polnid = -1;
2232771fb4d8SLee Schermerhorn 	int ret = -1;
2233771fb4d8SLee Schermerhorn 
2234dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2235771fb4d8SLee Schermerhorn 	if (!(pol->flags & MPOL_F_MOF))
2236771fb4d8SLee Schermerhorn 		goto out;
2237771fb4d8SLee Schermerhorn 
2238771fb4d8SLee Schermerhorn 	switch (pol->mode) {
2239771fb4d8SLee Schermerhorn 	case MPOL_INTERLEAVE:
2240771fb4d8SLee Schermerhorn 		pgoff = vma->vm_pgoff;
2241771fb4d8SLee Schermerhorn 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
224298c70baaSLaurent Dufour 		polnid = offset_il_node(pol, pgoff);
2243771fb4d8SLee Schermerhorn 		break;
2244771fb4d8SLee Schermerhorn 
2245771fb4d8SLee Schermerhorn 	case MPOL_PREFERRED:
2246771fb4d8SLee Schermerhorn 		if (pol->flags & MPOL_F_LOCAL)
2247771fb4d8SLee Schermerhorn 			polnid = numa_node_id();
2248771fb4d8SLee Schermerhorn 		else
2249771fb4d8SLee Schermerhorn 			polnid = pol->v.preferred_node;
2250771fb4d8SLee Schermerhorn 		break;
2251771fb4d8SLee Schermerhorn 
2252771fb4d8SLee Schermerhorn 	case MPOL_BIND:
2253c33d6c06SMel Gorman 
2254771fb4d8SLee Schermerhorn 		/*
2255771fb4d8SLee Schermerhorn 		 * allows binding to multiple nodes.
2256771fb4d8SLee Schermerhorn 		 * use current page if in policy nodemask,
2257771fb4d8SLee Schermerhorn 		 * else select nearest allowed node, if any.
2258771fb4d8SLee Schermerhorn 		 * If no allowed nodes, use current [!misplaced].
2259771fb4d8SLee Schermerhorn 		 */
2260771fb4d8SLee Schermerhorn 		if (node_isset(curnid, pol->v.nodes))
2261771fb4d8SLee Schermerhorn 			goto out;
2262c33d6c06SMel Gorman 		z = first_zones_zonelist(
2263771fb4d8SLee Schermerhorn 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2264771fb4d8SLee Schermerhorn 				gfp_zone(GFP_HIGHUSER),
2265c33d6c06SMel Gorman 				&pol->v.nodes);
2266c33d6c06SMel Gorman 		polnid = z->zone->node;
2267771fb4d8SLee Schermerhorn 		break;
2268771fb4d8SLee Schermerhorn 
2269771fb4d8SLee Schermerhorn 	default:
2270771fb4d8SLee Schermerhorn 		BUG();
2271771fb4d8SLee Schermerhorn 	}
22725606e387SMel Gorman 
22735606e387SMel Gorman 	/* Migrate the page towards the node whose CPU is referencing it */
2274e42c8ff2SMel Gorman 	if (pol->flags & MPOL_F_MORON) {
227590572890SPeter Zijlstra 		polnid = thisnid;
22765606e387SMel Gorman 
227710f39042SRik van Riel 		if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2278de1c9ce6SRik van Riel 			goto out;
2279de1c9ce6SRik van Riel 	}
2280e42c8ff2SMel Gorman 
2281771fb4d8SLee Schermerhorn 	if (curnid != polnid)
2282771fb4d8SLee Schermerhorn 		ret = polnid;
2283771fb4d8SLee Schermerhorn out:
2284771fb4d8SLee Schermerhorn 	mpol_cond_put(pol);
2285771fb4d8SLee Schermerhorn 
2286771fb4d8SLee Schermerhorn 	return ret;
2287771fb4d8SLee Schermerhorn }
2288771fb4d8SLee Schermerhorn 
2289c11600e4SDavid Rientjes /*
2290c11600e4SDavid Rientjes  * Drop the (possibly final) reference to task->mempolicy.  It needs to be
2291c11600e4SDavid Rientjes  * dropped after task->mempolicy is set to NULL so that any allocation done as
2292c11600e4SDavid Rientjes  * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2293c11600e4SDavid Rientjes  * policy.
2294c11600e4SDavid Rientjes  */
2295c11600e4SDavid Rientjes void mpol_put_task_policy(struct task_struct *task)
2296c11600e4SDavid Rientjes {
2297c11600e4SDavid Rientjes 	struct mempolicy *pol;
2298c11600e4SDavid Rientjes 
2299c11600e4SDavid Rientjes 	task_lock(task);
2300c11600e4SDavid Rientjes 	pol = task->mempolicy;
2301c11600e4SDavid Rientjes 	task->mempolicy = NULL;
2302c11600e4SDavid Rientjes 	task_unlock(task);
2303c11600e4SDavid Rientjes 	mpol_put(pol);
2304c11600e4SDavid Rientjes }
2305c11600e4SDavid Rientjes 
23061da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n)
23071da177e4SLinus Torvalds {
2308140d5a49SPaul Mundt 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
23091da177e4SLinus Torvalds 	rb_erase(&n->nd, &sp->root);
231063f74ca2SKOSAKI Motohiro 	sp_free(n);
23111da177e4SLinus Torvalds }
23121da177e4SLinus Torvalds 
231342288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start,
231442288fe3SMel Gorman 			unsigned long end, struct mempolicy *pol)
231542288fe3SMel Gorman {
231642288fe3SMel Gorman 	node->start = start;
231742288fe3SMel Gorman 	node->end = end;
231842288fe3SMel Gorman 	node->policy = pol;
231942288fe3SMel Gorman }
232042288fe3SMel Gorman 
2321dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2322dbcb0f19SAdrian Bunk 				struct mempolicy *pol)
23231da177e4SLinus Torvalds {
2324869833f2SKOSAKI Motohiro 	struct sp_node *n;
2325869833f2SKOSAKI Motohiro 	struct mempolicy *newpol;
23261da177e4SLinus Torvalds 
2327869833f2SKOSAKI Motohiro 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
23281da177e4SLinus Torvalds 	if (!n)
23291da177e4SLinus Torvalds 		return NULL;
2330869833f2SKOSAKI Motohiro 
2331869833f2SKOSAKI Motohiro 	newpol = mpol_dup(pol);
2332869833f2SKOSAKI Motohiro 	if (IS_ERR(newpol)) {
2333869833f2SKOSAKI Motohiro 		kmem_cache_free(sn_cache, n);
2334869833f2SKOSAKI Motohiro 		return NULL;
2335869833f2SKOSAKI Motohiro 	}
2336869833f2SKOSAKI Motohiro 	newpol->flags |= MPOL_F_SHARED;
233742288fe3SMel Gorman 	sp_node_init(n, start, end, newpol);
2338869833f2SKOSAKI Motohiro 
23391da177e4SLinus Torvalds 	return n;
23401da177e4SLinus Torvalds }
23411da177e4SLinus Torvalds 
23421da177e4SLinus Torvalds /* Replace a policy range. */
23431da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
23441da177e4SLinus Torvalds 				 unsigned long end, struct sp_node *new)
23451da177e4SLinus Torvalds {
2346b22d127aSMel Gorman 	struct sp_node *n;
234742288fe3SMel Gorman 	struct sp_node *n_new = NULL;
234842288fe3SMel Gorman 	struct mempolicy *mpol_new = NULL;
2349b22d127aSMel Gorman 	int ret = 0;
23501da177e4SLinus Torvalds 
235142288fe3SMel Gorman restart:
23524a8c7bb5SNathan Zimmer 	write_lock(&sp->lock);
23531da177e4SLinus Torvalds 	n = sp_lookup(sp, start, end);
23541da177e4SLinus Torvalds 	/* Take care of old policies in the same range. */
23551da177e4SLinus Torvalds 	while (n && n->start < end) {
23561da177e4SLinus Torvalds 		struct rb_node *next = rb_next(&n->nd);
23571da177e4SLinus Torvalds 		if (n->start >= start) {
23581da177e4SLinus Torvalds 			if (n->end <= end)
23591da177e4SLinus Torvalds 				sp_delete(sp, n);
23601da177e4SLinus Torvalds 			else
23611da177e4SLinus Torvalds 				n->start = end;
23621da177e4SLinus Torvalds 		} else {
23631da177e4SLinus Torvalds 			/* Old policy spanning whole new range. */
23641da177e4SLinus Torvalds 			if (n->end > end) {
236542288fe3SMel Gorman 				if (!n_new)
236642288fe3SMel Gorman 					goto alloc_new;
236742288fe3SMel Gorman 
236842288fe3SMel Gorman 				*mpol_new = *n->policy;
236942288fe3SMel Gorman 				atomic_set(&mpol_new->refcnt, 1);
23707880639cSKOSAKI Motohiro 				sp_node_init(n_new, end, n->end, mpol_new);
23711da177e4SLinus Torvalds 				n->end = start;
23725ca39575SHillf Danton 				sp_insert(sp, n_new);
237342288fe3SMel Gorman 				n_new = NULL;
237442288fe3SMel Gorman 				mpol_new = NULL;
23751da177e4SLinus Torvalds 				break;
23761da177e4SLinus Torvalds 			} else
23771da177e4SLinus Torvalds 				n->end = start;
23781da177e4SLinus Torvalds 		}
23791da177e4SLinus Torvalds 		if (!next)
23801da177e4SLinus Torvalds 			break;
23811da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
23821da177e4SLinus Torvalds 	}
23831da177e4SLinus Torvalds 	if (new)
23841da177e4SLinus Torvalds 		sp_insert(sp, new);
23854a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
238642288fe3SMel Gorman 	ret = 0;
238742288fe3SMel Gorman 
238842288fe3SMel Gorman err_out:
238942288fe3SMel Gorman 	if (mpol_new)
239042288fe3SMel Gorman 		mpol_put(mpol_new);
239142288fe3SMel Gorman 	if (n_new)
239242288fe3SMel Gorman 		kmem_cache_free(sn_cache, n_new);
239342288fe3SMel Gorman 
2394b22d127aSMel Gorman 	return ret;
239542288fe3SMel Gorman 
239642288fe3SMel Gorman alloc_new:
23974a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
239842288fe3SMel Gorman 	ret = -ENOMEM;
239942288fe3SMel Gorman 	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
240042288fe3SMel Gorman 	if (!n_new)
240142288fe3SMel Gorman 		goto err_out;
240242288fe3SMel Gorman 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
240342288fe3SMel Gorman 	if (!mpol_new)
240442288fe3SMel Gorman 		goto err_out;
240542288fe3SMel Gorman 	goto restart;
24061da177e4SLinus Torvalds }
24071da177e4SLinus Torvalds 
240871fe804bSLee Schermerhorn /**
240971fe804bSLee Schermerhorn  * mpol_shared_policy_init - initialize shared policy for inode
241071fe804bSLee Schermerhorn  * @sp: pointer to inode shared policy
241171fe804bSLee Schermerhorn  * @mpol:  struct mempolicy to install
241271fe804bSLee Schermerhorn  *
241371fe804bSLee Schermerhorn  * Install non-NULL @mpol in inode's shared policy rb-tree.
241471fe804bSLee Schermerhorn  * On entry, the current task has a reference on a non-NULL @mpol.
241571fe804bSLee Schermerhorn  * This must be released on exit.
24164bfc4495SKAMEZAWA Hiroyuki  * This is called at get_inode() calls and we can use GFP_KERNEL.
241771fe804bSLee Schermerhorn  */
241871fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
24197339ff83SRobin Holt {
242058568d2aSMiao Xie 	int ret;
242158568d2aSMiao Xie 
242271fe804bSLee Schermerhorn 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
24234a8c7bb5SNathan Zimmer 	rwlock_init(&sp->lock);
24247339ff83SRobin Holt 
242571fe804bSLee Schermerhorn 	if (mpol) {
24267339ff83SRobin Holt 		struct vm_area_struct pvma;
242771fe804bSLee Schermerhorn 		struct mempolicy *new;
24284bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
24297339ff83SRobin Holt 
24304bfc4495SKAMEZAWA Hiroyuki 		if (!scratch)
24315c0c1654SLee Schermerhorn 			goto put_mpol;
243271fe804bSLee Schermerhorn 		/* contextualize the tmpfs mount point mempolicy */
243371fe804bSLee Schermerhorn 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
243415d77835SLee Schermerhorn 		if (IS_ERR(new))
24350cae3457SDan Carpenter 			goto free_scratch; /* no valid nodemask intersection */
243658568d2aSMiao Xie 
243758568d2aSMiao Xie 		task_lock(current);
24384bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
243958568d2aSMiao Xie 		task_unlock(current);
244015d77835SLee Schermerhorn 		if (ret)
24415c0c1654SLee Schermerhorn 			goto put_new;
244271fe804bSLee Schermerhorn 
244371fe804bSLee Schermerhorn 		/* Create pseudo-vma that contains just the policy */
24447339ff83SRobin Holt 		memset(&pvma, 0, sizeof(struct vm_area_struct));
244571fe804bSLee Schermerhorn 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
244671fe804bSLee Schermerhorn 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
244715d77835SLee Schermerhorn 
24485c0c1654SLee Schermerhorn put_new:
244971fe804bSLee Schermerhorn 		mpol_put(new);			/* drop initial ref */
24500cae3457SDan Carpenter free_scratch:
24514bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
24525c0c1654SLee Schermerhorn put_mpol:
24535c0c1654SLee Schermerhorn 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
24547339ff83SRobin Holt 	}
24557339ff83SRobin Holt }
24567339ff83SRobin Holt 
24571da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info,
24581da177e4SLinus Torvalds 			struct vm_area_struct *vma, struct mempolicy *npol)
24591da177e4SLinus Torvalds {
24601da177e4SLinus Torvalds 	int err;
24611da177e4SLinus Torvalds 	struct sp_node *new = NULL;
24621da177e4SLinus Torvalds 	unsigned long sz = vma_pages(vma);
24631da177e4SLinus Torvalds 
2464028fec41SDavid Rientjes 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
24651da177e4SLinus Torvalds 		 vma->vm_pgoff,
246645c4745aSLee Schermerhorn 		 sz, npol ? npol->mode : -1,
2467028fec41SDavid Rientjes 		 npol ? npol->flags : -1,
246800ef2d2fSDavid Rientjes 		 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
24691da177e4SLinus Torvalds 
24701da177e4SLinus Torvalds 	if (npol) {
24711da177e4SLinus Torvalds 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
24721da177e4SLinus Torvalds 		if (!new)
24731da177e4SLinus Torvalds 			return -ENOMEM;
24741da177e4SLinus Torvalds 	}
24751da177e4SLinus Torvalds 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
24761da177e4SLinus Torvalds 	if (err && new)
247763f74ca2SKOSAKI Motohiro 		sp_free(new);
24781da177e4SLinus Torvalds 	return err;
24791da177e4SLinus Torvalds }
24801da177e4SLinus Torvalds 
24811da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */
24821da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p)
24831da177e4SLinus Torvalds {
24841da177e4SLinus Torvalds 	struct sp_node *n;
24851da177e4SLinus Torvalds 	struct rb_node *next;
24861da177e4SLinus Torvalds 
24871da177e4SLinus Torvalds 	if (!p->root.rb_node)
24881da177e4SLinus Torvalds 		return;
24894a8c7bb5SNathan Zimmer 	write_lock(&p->lock);
24901da177e4SLinus Torvalds 	next = rb_first(&p->root);
24911da177e4SLinus Torvalds 	while (next) {
24921da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
24931da177e4SLinus Torvalds 		next = rb_next(&n->nd);
249463f74ca2SKOSAKI Motohiro 		sp_delete(p, n);
24951da177e4SLinus Torvalds 	}
24964a8c7bb5SNathan Zimmer 	write_unlock(&p->lock);
24971da177e4SLinus Torvalds }
24981da177e4SLinus Torvalds 
24991a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING
2500c297663cSMel Gorman static int __initdata numabalancing_override;
25011a687c2eSMel Gorman 
25021a687c2eSMel Gorman static void __init check_numabalancing_enable(void)
25031a687c2eSMel Gorman {
25041a687c2eSMel Gorman 	bool numabalancing_default = false;
25051a687c2eSMel Gorman 
25061a687c2eSMel Gorman 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
25071a687c2eSMel Gorman 		numabalancing_default = true;
25081a687c2eSMel Gorman 
2509c297663cSMel Gorman 	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2510c297663cSMel Gorman 	if (numabalancing_override)
2511c297663cSMel Gorman 		set_numabalancing_state(numabalancing_override == 1);
2512c297663cSMel Gorman 
2513b0dc2b9bSMel Gorman 	if (num_online_nodes() > 1 && !numabalancing_override) {
2514756a025fSJoe Perches 		pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2515c297663cSMel Gorman 			numabalancing_default ? "Enabling" : "Disabling");
25161a687c2eSMel Gorman 		set_numabalancing_state(numabalancing_default);
25171a687c2eSMel Gorman 	}
25181a687c2eSMel Gorman }
25191a687c2eSMel Gorman 
25201a687c2eSMel Gorman static int __init setup_numabalancing(char *str)
25211a687c2eSMel Gorman {
25221a687c2eSMel Gorman 	int ret = 0;
25231a687c2eSMel Gorman 	if (!str)
25241a687c2eSMel Gorman 		goto out;
25251a687c2eSMel Gorman 
25261a687c2eSMel Gorman 	if (!strcmp(str, "enable")) {
2527c297663cSMel Gorman 		numabalancing_override = 1;
25281a687c2eSMel Gorman 		ret = 1;
25291a687c2eSMel Gorman 	} else if (!strcmp(str, "disable")) {
2530c297663cSMel Gorman 		numabalancing_override = -1;
25311a687c2eSMel Gorman 		ret = 1;
25321a687c2eSMel Gorman 	}
25331a687c2eSMel Gorman out:
25341a687c2eSMel Gorman 	if (!ret)
25354a404beaSAndrew Morton 		pr_warn("Unable to parse numa_balancing=\n");
25361a687c2eSMel Gorman 
25371a687c2eSMel Gorman 	return ret;
25381a687c2eSMel Gorman }
25391a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing);
25401a687c2eSMel Gorman #else
25411a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void)
25421a687c2eSMel Gorman {
25431a687c2eSMel Gorman }
25441a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */
25451a687c2eSMel Gorman 
25461da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */
25471da177e4SLinus Torvalds void __init numa_policy_init(void)
25481da177e4SLinus Torvalds {
2549b71636e2SPaul Mundt 	nodemask_t interleave_nodes;
2550b71636e2SPaul Mundt 	unsigned long largest = 0;
2551b71636e2SPaul Mundt 	int nid, prefer = 0;
2552b71636e2SPaul Mundt 
25531da177e4SLinus Torvalds 	policy_cache = kmem_cache_create("numa_policy",
25541da177e4SLinus Torvalds 					 sizeof(struct mempolicy),
255520c2df83SPaul Mundt 					 0, SLAB_PANIC, NULL);
25561da177e4SLinus Torvalds 
25571da177e4SLinus Torvalds 	sn_cache = kmem_cache_create("shared_policy_node",
25581da177e4SLinus Torvalds 				     sizeof(struct sp_node),
255920c2df83SPaul Mundt 				     0, SLAB_PANIC, NULL);
25601da177e4SLinus Torvalds 
25615606e387SMel Gorman 	for_each_node(nid) {
25625606e387SMel Gorman 		preferred_node_policy[nid] = (struct mempolicy) {
25635606e387SMel Gorman 			.refcnt = ATOMIC_INIT(1),
25645606e387SMel Gorman 			.mode = MPOL_PREFERRED,
25655606e387SMel Gorman 			.flags = MPOL_F_MOF | MPOL_F_MORON,
25665606e387SMel Gorman 			.v = { .preferred_node = nid, },
25675606e387SMel Gorman 		};
25685606e387SMel Gorman 	}
25695606e387SMel Gorman 
2570b71636e2SPaul Mundt 	/*
2571b71636e2SPaul Mundt 	 * Set interleaving policy for system init. Interleaving is only
2572b71636e2SPaul Mundt 	 * enabled across suitably sized nodes (default is >= 16MB), or
2573b71636e2SPaul Mundt 	 * fall back to the largest node if they're all smaller.
2574b71636e2SPaul Mundt 	 */
2575b71636e2SPaul Mundt 	nodes_clear(interleave_nodes);
257601f13bd6SLai Jiangshan 	for_each_node_state(nid, N_MEMORY) {
2577b71636e2SPaul Mundt 		unsigned long total_pages = node_present_pages(nid);
25781da177e4SLinus Torvalds 
2579b71636e2SPaul Mundt 		/* Preserve the largest node */
2580b71636e2SPaul Mundt 		if (largest < total_pages) {
2581b71636e2SPaul Mundt 			largest = total_pages;
2582b71636e2SPaul Mundt 			prefer = nid;
2583b71636e2SPaul Mundt 		}
2584b71636e2SPaul Mundt 
2585b71636e2SPaul Mundt 		/* Interleave this node? */
2586b71636e2SPaul Mundt 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2587b71636e2SPaul Mundt 			node_set(nid, interleave_nodes);
2588b71636e2SPaul Mundt 	}
2589b71636e2SPaul Mundt 
2590b71636e2SPaul Mundt 	/* All too small, use the largest */
2591b71636e2SPaul Mundt 	if (unlikely(nodes_empty(interleave_nodes)))
2592b71636e2SPaul Mundt 		node_set(prefer, interleave_nodes);
2593b71636e2SPaul Mundt 
2594028fec41SDavid Rientjes 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2595b1de0d13SMitchel Humpherys 		pr_err("%s: interleaving failed\n", __func__);
25961a687c2eSMel Gorman 
25971a687c2eSMel Gorman 	check_numabalancing_enable();
25981da177e4SLinus Torvalds }
25991da177e4SLinus Torvalds 
26008bccd85fSChristoph Lameter /* Reset policy of current process to default */
26011da177e4SLinus Torvalds void numa_default_policy(void)
26021da177e4SLinus Torvalds {
2603028fec41SDavid Rientjes 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
26041da177e4SLinus Torvalds }
260568860ec1SPaul Jackson 
26064225399aSPaul Jackson /*
2607095f1fc4SLee Schermerhorn  * Parse and format mempolicy from/to strings
2608095f1fc4SLee Schermerhorn  */
2609095f1fc4SLee Schermerhorn 
2610095f1fc4SLee Schermerhorn /*
2611f2a07f40SHugh Dickins  * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
26121a75a6c8SChristoph Lameter  */
2613345ace9cSLee Schermerhorn static const char * const policy_modes[] =
2614345ace9cSLee Schermerhorn {
2615345ace9cSLee Schermerhorn 	[MPOL_DEFAULT]    = "default",
2616345ace9cSLee Schermerhorn 	[MPOL_PREFERRED]  = "prefer",
2617345ace9cSLee Schermerhorn 	[MPOL_BIND]       = "bind",
2618345ace9cSLee Schermerhorn 	[MPOL_INTERLEAVE] = "interleave",
2619d3a71033SLee Schermerhorn 	[MPOL_LOCAL]      = "local",
2620345ace9cSLee Schermerhorn };
26211a75a6c8SChristoph Lameter 
2622095f1fc4SLee Schermerhorn 
2623095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS
2624095f1fc4SLee Schermerhorn /**
2625f2a07f40SHugh Dickins  * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2626095f1fc4SLee Schermerhorn  * @str:  string containing mempolicy to parse
262771fe804bSLee Schermerhorn  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2628095f1fc4SLee Schermerhorn  *
2629095f1fc4SLee Schermerhorn  * Format of input:
2630095f1fc4SLee Schermerhorn  *	<mode>[=<flags>][:<nodelist>]
2631095f1fc4SLee Schermerhorn  *
263271fe804bSLee Schermerhorn  * On success, returns 0, else 1
2633095f1fc4SLee Schermerhorn  */
2634a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol)
2635095f1fc4SLee Schermerhorn {
263671fe804bSLee Schermerhorn 	struct mempolicy *new = NULL;
2637b4652e84SLee Schermerhorn 	unsigned short mode;
2638f2a07f40SHugh Dickins 	unsigned short mode_flags;
263971fe804bSLee Schermerhorn 	nodemask_t nodes;
2640095f1fc4SLee Schermerhorn 	char *nodelist = strchr(str, ':');
2641095f1fc4SLee Schermerhorn 	char *flags = strchr(str, '=');
2642095f1fc4SLee Schermerhorn 	int err = 1;
2643095f1fc4SLee Schermerhorn 
2644095f1fc4SLee Schermerhorn 	if (nodelist) {
2645095f1fc4SLee Schermerhorn 		/* NUL-terminate mode or flags string */
2646095f1fc4SLee Schermerhorn 		*nodelist++ = '\0';
264771fe804bSLee Schermerhorn 		if (nodelist_parse(nodelist, nodes))
2648095f1fc4SLee Schermerhorn 			goto out;
264901f13bd6SLai Jiangshan 		if (!nodes_subset(nodes, node_states[N_MEMORY]))
2650095f1fc4SLee Schermerhorn 			goto out;
265171fe804bSLee Schermerhorn 	} else
265271fe804bSLee Schermerhorn 		nodes_clear(nodes);
265371fe804bSLee Schermerhorn 
2654095f1fc4SLee Schermerhorn 	if (flags)
2655095f1fc4SLee Schermerhorn 		*flags++ = '\0';	/* terminate mode string */
2656095f1fc4SLee Schermerhorn 
2657479e2802SPeter Zijlstra 	for (mode = 0; mode < MPOL_MAX; mode++) {
2658345ace9cSLee Schermerhorn 		if (!strcmp(str, policy_modes[mode])) {
2659095f1fc4SLee Schermerhorn 			break;
2660095f1fc4SLee Schermerhorn 		}
2661095f1fc4SLee Schermerhorn 	}
2662a720094dSMel Gorman 	if (mode >= MPOL_MAX)
2663095f1fc4SLee Schermerhorn 		goto out;
2664095f1fc4SLee Schermerhorn 
266571fe804bSLee Schermerhorn 	switch (mode) {
2666095f1fc4SLee Schermerhorn 	case MPOL_PREFERRED:
266771fe804bSLee Schermerhorn 		/*
266871fe804bSLee Schermerhorn 		 * Insist on a nodelist of one node only
266971fe804bSLee Schermerhorn 		 */
2670095f1fc4SLee Schermerhorn 		if (nodelist) {
2671095f1fc4SLee Schermerhorn 			char *rest = nodelist;
2672095f1fc4SLee Schermerhorn 			while (isdigit(*rest))
2673095f1fc4SLee Schermerhorn 				rest++;
2674926f2ae0SKOSAKI Motohiro 			if (*rest)
2675926f2ae0SKOSAKI Motohiro 				goto out;
2676095f1fc4SLee Schermerhorn 		}
2677095f1fc4SLee Schermerhorn 		break;
2678095f1fc4SLee Schermerhorn 	case MPOL_INTERLEAVE:
2679095f1fc4SLee Schermerhorn 		/*
2680095f1fc4SLee Schermerhorn 		 * Default to online nodes with memory if no nodelist
2681095f1fc4SLee Schermerhorn 		 */
2682095f1fc4SLee Schermerhorn 		if (!nodelist)
268301f13bd6SLai Jiangshan 			nodes = node_states[N_MEMORY];
26843f226aa1SLee Schermerhorn 		break;
268571fe804bSLee Schermerhorn 	case MPOL_LOCAL:
26863f226aa1SLee Schermerhorn 		/*
268771fe804bSLee Schermerhorn 		 * Don't allow a nodelist;  mpol_new() checks flags
26883f226aa1SLee Schermerhorn 		 */
268971fe804bSLee Schermerhorn 		if (nodelist)
26903f226aa1SLee Schermerhorn 			goto out;
269171fe804bSLee Schermerhorn 		mode = MPOL_PREFERRED;
26923f226aa1SLee Schermerhorn 		break;
2693413b43deSRavikiran G Thirumalai 	case MPOL_DEFAULT:
2694413b43deSRavikiran G Thirumalai 		/*
2695413b43deSRavikiran G Thirumalai 		 * Insist on a empty nodelist
2696413b43deSRavikiran G Thirumalai 		 */
2697413b43deSRavikiran G Thirumalai 		if (!nodelist)
2698413b43deSRavikiran G Thirumalai 			err = 0;
2699413b43deSRavikiran G Thirumalai 		goto out;
2700d69b2e63SKOSAKI Motohiro 	case MPOL_BIND:
270171fe804bSLee Schermerhorn 		/*
2702d69b2e63SKOSAKI Motohiro 		 * Insist on a nodelist
270371fe804bSLee Schermerhorn 		 */
2704d69b2e63SKOSAKI Motohiro 		if (!nodelist)
2705d69b2e63SKOSAKI Motohiro 			goto out;
2706095f1fc4SLee Schermerhorn 	}
2707095f1fc4SLee Schermerhorn 
270871fe804bSLee Schermerhorn 	mode_flags = 0;
2709095f1fc4SLee Schermerhorn 	if (flags) {
2710095f1fc4SLee Schermerhorn 		/*
2711095f1fc4SLee Schermerhorn 		 * Currently, we only support two mutually exclusive
2712095f1fc4SLee Schermerhorn 		 * mode flags.
2713095f1fc4SLee Schermerhorn 		 */
2714095f1fc4SLee Schermerhorn 		if (!strcmp(flags, "static"))
271571fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_STATIC_NODES;
2716095f1fc4SLee Schermerhorn 		else if (!strcmp(flags, "relative"))
271771fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_RELATIVE_NODES;
2718095f1fc4SLee Schermerhorn 		else
2719926f2ae0SKOSAKI Motohiro 			goto out;
2720095f1fc4SLee Schermerhorn 	}
272171fe804bSLee Schermerhorn 
272271fe804bSLee Schermerhorn 	new = mpol_new(mode, mode_flags, &nodes);
272371fe804bSLee Schermerhorn 	if (IS_ERR(new))
2724926f2ae0SKOSAKI Motohiro 		goto out;
2725926f2ae0SKOSAKI Motohiro 
2726f2a07f40SHugh Dickins 	/*
2727f2a07f40SHugh Dickins 	 * Save nodes for mpol_to_str() to show the tmpfs mount options
2728f2a07f40SHugh Dickins 	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2729f2a07f40SHugh Dickins 	 */
2730f2a07f40SHugh Dickins 	if (mode != MPOL_PREFERRED)
2731f2a07f40SHugh Dickins 		new->v.nodes = nodes;
2732f2a07f40SHugh Dickins 	else if (nodelist)
2733f2a07f40SHugh Dickins 		new->v.preferred_node = first_node(nodes);
2734f2a07f40SHugh Dickins 	else
2735f2a07f40SHugh Dickins 		new->flags |= MPOL_F_LOCAL;
2736f2a07f40SHugh Dickins 
2737f2a07f40SHugh Dickins 	/*
2738f2a07f40SHugh Dickins 	 * Save nodes for contextualization: this will be used to "clone"
2739f2a07f40SHugh Dickins 	 * the mempolicy in a specific context [cpuset] at a later time.
2740f2a07f40SHugh Dickins 	 */
2741e17f74afSLee Schermerhorn 	new->w.user_nodemask = nodes;
2742f2a07f40SHugh Dickins 
2743926f2ae0SKOSAKI Motohiro 	err = 0;
274471fe804bSLee Schermerhorn 
2745095f1fc4SLee Schermerhorn out:
2746095f1fc4SLee Schermerhorn 	/* Restore string for error message */
2747095f1fc4SLee Schermerhorn 	if (nodelist)
2748095f1fc4SLee Schermerhorn 		*--nodelist = ':';
2749095f1fc4SLee Schermerhorn 	if (flags)
2750095f1fc4SLee Schermerhorn 		*--flags = '=';
275171fe804bSLee Schermerhorn 	if (!err)
275271fe804bSLee Schermerhorn 		*mpol = new;
2753095f1fc4SLee Schermerhorn 	return err;
2754095f1fc4SLee Schermerhorn }
2755095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */
2756095f1fc4SLee Schermerhorn 
275771fe804bSLee Schermerhorn /**
275871fe804bSLee Schermerhorn  * mpol_to_str - format a mempolicy structure for printing
275971fe804bSLee Schermerhorn  * @buffer:  to contain formatted mempolicy string
276071fe804bSLee Schermerhorn  * @maxlen:  length of @buffer
276171fe804bSLee Schermerhorn  * @pol:  pointer to mempolicy to be formatted
276271fe804bSLee Schermerhorn  *
2763948927eeSDavid Rientjes  * Convert @pol into a string.  If @buffer is too short, truncate the string.
2764948927eeSDavid Rientjes  * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2765948927eeSDavid Rientjes  * longest flag, "relative", and to display at least a few node ids.
27661a75a6c8SChristoph Lameter  */
2767948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
27681a75a6c8SChristoph Lameter {
27691a75a6c8SChristoph Lameter 	char *p = buffer;
2770948927eeSDavid Rientjes 	nodemask_t nodes = NODE_MASK_NONE;
2771948927eeSDavid Rientjes 	unsigned short mode = MPOL_DEFAULT;
2772948927eeSDavid Rientjes 	unsigned short flags = 0;
27731a75a6c8SChristoph Lameter 
27748790c71aSDavid Rientjes 	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
2775bea904d5SLee Schermerhorn 		mode = pol->mode;
2776948927eeSDavid Rientjes 		flags = pol->flags;
2777948927eeSDavid Rientjes 	}
2778bea904d5SLee Schermerhorn 
27791a75a6c8SChristoph Lameter 	switch (mode) {
27801a75a6c8SChristoph Lameter 	case MPOL_DEFAULT:
27811a75a6c8SChristoph Lameter 		break;
27821a75a6c8SChristoph Lameter 	case MPOL_PREFERRED:
2783fc36b8d3SLee Schermerhorn 		if (flags & MPOL_F_LOCAL)
2784f2a07f40SHugh Dickins 			mode = MPOL_LOCAL;
278553f2556bSLee Schermerhorn 		else
2786fc36b8d3SLee Schermerhorn 			node_set(pol->v.preferred_node, nodes);
27871a75a6c8SChristoph Lameter 		break;
27881a75a6c8SChristoph Lameter 	case MPOL_BIND:
27891a75a6c8SChristoph Lameter 	case MPOL_INTERLEAVE:
27901a75a6c8SChristoph Lameter 		nodes = pol->v.nodes;
27911a75a6c8SChristoph Lameter 		break;
27921a75a6c8SChristoph Lameter 	default:
2793948927eeSDavid Rientjes 		WARN_ON_ONCE(1);
2794948927eeSDavid Rientjes 		snprintf(p, maxlen, "unknown");
2795948927eeSDavid Rientjes 		return;
27961a75a6c8SChristoph Lameter 	}
27971a75a6c8SChristoph Lameter 
2798b7a9f420SDavid Rientjes 	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
27991a75a6c8SChristoph Lameter 
2800fc36b8d3SLee Schermerhorn 	if (flags & MPOL_MODE_FLAGS) {
2801948927eeSDavid Rientjes 		p += snprintf(p, buffer + maxlen - p, "=");
2802f5b087b5SDavid Rientjes 
28032291990aSLee Schermerhorn 		/*
28042291990aSLee Schermerhorn 		 * Currently, the only defined flags are mutually exclusive
28052291990aSLee Schermerhorn 		 */
2806f5b087b5SDavid Rientjes 		if (flags & MPOL_F_STATIC_NODES)
28072291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "static");
28082291990aSLee Schermerhorn 		else if (flags & MPOL_F_RELATIVE_NODES)
28092291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "relative");
2810f5b087b5SDavid Rientjes 	}
2811f5b087b5SDavid Rientjes 
28129e763e0fSTejun Heo 	if (!nodes_empty(nodes))
28139e763e0fSTejun Heo 		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
28149e763e0fSTejun Heo 			       nodemask_pr_args(&nodes));
28151a75a6c8SChristoph Lameter }
2816