xref: /openbmc/linux/mm/mempolicy.c (revision dedf2c73b80b4566dfcae8ebe9ed46a38b63a1f9)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * Simple NUMA memory policy for the Linux kernel.
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
58bccd85fSChristoph Lameter  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
61da177e4SLinus Torvalds  * Subject to the GNU Public License, version 2.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * NUMA policy allows the user to give hints in which node(s) memory should
91da177e4SLinus Torvalds  * be allocated.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Support four policies per VMA and per process:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * The VMA policy has priority over the process policy for a page fault.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * interleave     Allocate memory interleaved over a set of nodes,
161da177e4SLinus Torvalds  *                with normal fallback if it fails.
171da177e4SLinus Torvalds  *                For VMA based allocations this interleaves based on the
181da177e4SLinus Torvalds  *                offset into the backing object or offset into the mapping
191da177e4SLinus Torvalds  *                for anonymous memory. For process policy an process counter
201da177e4SLinus Torvalds  *                is used.
218bccd85fSChristoph Lameter  *
221da177e4SLinus Torvalds  * bind           Only allocate memory on a specific set of nodes,
231da177e4SLinus Torvalds  *                no fallback.
248bccd85fSChristoph Lameter  *                FIXME: memory is allocated starting with the first node
258bccd85fSChristoph Lameter  *                to the last. It would be better if bind would truly restrict
268bccd85fSChristoph Lameter  *                the allocation to memory nodes instead
278bccd85fSChristoph Lameter  *
281da177e4SLinus Torvalds  * preferred       Try a specific node first before normal fallback.
2900ef2d2fSDavid Rientjes  *                As a special case NUMA_NO_NODE here means do the allocation
301da177e4SLinus Torvalds  *                on the local CPU. This is normally identical to default,
311da177e4SLinus Torvalds  *                but useful to set in a VMA when you have a non default
321da177e4SLinus Torvalds  *                process policy.
338bccd85fSChristoph Lameter  *
341da177e4SLinus Torvalds  * default        Allocate on the local node first, or when on a VMA
351da177e4SLinus Torvalds  *                use the process policy. This is what Linux always did
361da177e4SLinus Torvalds  *		  in a NUMA aware kernel and still does by, ahem, default.
371da177e4SLinus Torvalds  *
381da177e4SLinus Torvalds  * The process policy is applied for most non interrupt memory allocations
391da177e4SLinus Torvalds  * in that process' context. Interrupts ignore the policies and always
401da177e4SLinus Torvalds  * try to allocate on the local CPU. The VMA policy is only applied for memory
411da177e4SLinus Torvalds  * allocations for a VMA in the VM.
421da177e4SLinus Torvalds  *
431da177e4SLinus Torvalds  * Currently there are a few corner cases in swapping where the policy
441da177e4SLinus Torvalds  * is not applied, but the majority should be handled. When process policy
451da177e4SLinus Torvalds  * is used it is not remembered over swap outs/swap ins.
461da177e4SLinus Torvalds  *
471da177e4SLinus Torvalds  * Only the highest zone in the zone hierarchy gets policied. Allocations
481da177e4SLinus Torvalds  * requesting a lower zone just use default policy. This implies that
491da177e4SLinus Torvalds  * on systems with highmem kernel lowmem allocation don't get policied.
501da177e4SLinus Torvalds  * Same with GFP_DMA allocations.
511da177e4SLinus Torvalds  *
521da177e4SLinus Torvalds  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
531da177e4SLinus Torvalds  * all users and remembered even when nobody has memory mapped.
541da177e4SLinus Torvalds  */
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds /* Notebook:
571da177e4SLinus Torvalds    fix mmap readahead to honour policy and enable policy for any page cache
581da177e4SLinus Torvalds    object
591da177e4SLinus Torvalds    statistics for bigpages
601da177e4SLinus Torvalds    global policy for page cache? currently it uses process policy. Requires
611da177e4SLinus Torvalds    first item above.
621da177e4SLinus Torvalds    handle mremap for shared memory (currently ignored for the policy)
631da177e4SLinus Torvalds    grows down?
641da177e4SLinus Torvalds    make bind policy root only? It can trigger oom much faster and the
651da177e4SLinus Torvalds    kernel is not always grateful with that.
661da177e4SLinus Torvalds */
671da177e4SLinus Torvalds 
68b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69b1de0d13SMitchel Humpherys 
701da177e4SLinus Torvalds #include <linux/mempolicy.h>
711da177e4SLinus Torvalds #include <linux/mm.h>
721da177e4SLinus Torvalds #include <linux/highmem.h>
731da177e4SLinus Torvalds #include <linux/hugetlb.h>
741da177e4SLinus Torvalds #include <linux/kernel.h>
751da177e4SLinus Torvalds #include <linux/sched.h>
766e84f315SIngo Molnar #include <linux/sched/mm.h>
776a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h>
78f719ff9bSIngo Molnar #include <linux/sched/task.h>
791da177e4SLinus Torvalds #include <linux/nodemask.h>
801da177e4SLinus Torvalds #include <linux/cpuset.h>
811da177e4SLinus Torvalds #include <linux/slab.h>
821da177e4SLinus Torvalds #include <linux/string.h>
83b95f1b31SPaul Gortmaker #include <linux/export.h>
84b488893aSPavel Emelyanov #include <linux/nsproxy.h>
851da177e4SLinus Torvalds #include <linux/interrupt.h>
861da177e4SLinus Torvalds #include <linux/init.h>
871da177e4SLinus Torvalds #include <linux/compat.h>
8831367466SOtto Ebeling #include <linux/ptrace.h>
89dc9aa5b9SChristoph Lameter #include <linux/swap.h>
901a75a6c8SChristoph Lameter #include <linux/seq_file.h>
911a75a6c8SChristoph Lameter #include <linux/proc_fs.h>
92b20a3503SChristoph Lameter #include <linux/migrate.h>
9362b61f61SHugh Dickins #include <linux/ksm.h>
9495a402c3SChristoph Lameter #include <linux/rmap.h>
9586c3a764SDavid Quigley #include <linux/security.h>
96dbcb0f19SAdrian Bunk #include <linux/syscalls.h>
97095f1fc4SLee Schermerhorn #include <linux/ctype.h>
986d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h>
99b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h>
100b1de0d13SMitchel Humpherys #include <linux/printk.h>
101c8633798SNaoya Horiguchi #include <linux/swapops.h>
102dc9aa5b9SChristoph Lameter 
1031da177e4SLinus Torvalds #include <asm/tlbflush.h>
1047c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
1051da177e4SLinus Torvalds 
10662695a84SNick Piggin #include "internal.h"
10762695a84SNick Piggin 
10838e35860SChristoph Lameter /* Internal flags */
109dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
11038e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
111dc9aa5b9SChristoph Lameter 
112fcc234f8SPekka Enberg static struct kmem_cache *policy_cache;
113fcc234f8SPekka Enberg static struct kmem_cache *sn_cache;
1141da177e4SLinus Torvalds 
1151da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not
1161da177e4SLinus Torvalds    policied. */
1176267276fSChristoph Lameter enum zone_type policy_zone = 0;
1181da177e4SLinus Torvalds 
119bea904d5SLee Schermerhorn /*
120bea904d5SLee Schermerhorn  * run-time system-wide default policy => local allocation
121bea904d5SLee Schermerhorn  */
122e754d79dSH Hartley Sweeten static struct mempolicy default_policy = {
1231da177e4SLinus Torvalds 	.refcnt = ATOMIC_INIT(1), /* never free it */
124bea904d5SLee Schermerhorn 	.mode = MPOL_PREFERRED,
125fc36b8d3SLee Schermerhorn 	.flags = MPOL_F_LOCAL,
1261da177e4SLinus Torvalds };
1271da177e4SLinus Torvalds 
1285606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES];
1295606e387SMel Gorman 
13074d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p)
1315606e387SMel Gorman {
1325606e387SMel Gorman 	struct mempolicy *pol = p->mempolicy;
133f15ca78eSOleg Nesterov 	int node;
1345606e387SMel Gorman 
135f15ca78eSOleg Nesterov 	if (pol)
136f15ca78eSOleg Nesterov 		return pol;
1375606e387SMel Gorman 
138f15ca78eSOleg Nesterov 	node = numa_node_id();
1391da6f0e1SJianguo Wu 	if (node != NUMA_NO_NODE) {
1401da6f0e1SJianguo Wu 		pol = &preferred_node_policy[node];
141f15ca78eSOleg Nesterov 		/* preferred_node_policy is not initialised early in boot */
142f15ca78eSOleg Nesterov 		if (pol->mode)
143f15ca78eSOleg Nesterov 			return pol;
1441da6f0e1SJianguo Wu 	}
1455606e387SMel Gorman 
146f15ca78eSOleg Nesterov 	return &default_policy;
1475606e387SMel Gorman }
1485606e387SMel Gorman 
14937012946SDavid Rientjes static const struct mempolicy_operations {
15037012946SDavid Rientjes 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
151213980c0SVlastimil Babka 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
15237012946SDavid Rientjes } mpol_ops[MPOL_MAX];
15337012946SDavid Rientjes 
154f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
155f5b087b5SDavid Rientjes {
1566d556294SBob Liu 	return pol->flags & MPOL_MODE_FLAGS;
1574c50bc01SDavid Rientjes }
1584c50bc01SDavid Rientjes 
1594c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
1604c50bc01SDavid Rientjes 				   const nodemask_t *rel)
1614c50bc01SDavid Rientjes {
1624c50bc01SDavid Rientjes 	nodemask_t tmp;
1634c50bc01SDavid Rientjes 	nodes_fold(tmp, *orig, nodes_weight(*rel));
1644c50bc01SDavid Rientjes 	nodes_onto(*ret, tmp, *rel);
165f5b087b5SDavid Rientjes }
166f5b087b5SDavid Rientjes 
16737012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
16837012946SDavid Rientjes {
16937012946SDavid Rientjes 	if (nodes_empty(*nodes))
17037012946SDavid Rientjes 		return -EINVAL;
17137012946SDavid Rientjes 	pol->v.nodes = *nodes;
17237012946SDavid Rientjes 	return 0;
17337012946SDavid Rientjes }
17437012946SDavid Rientjes 
17537012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
17637012946SDavid Rientjes {
17737012946SDavid Rientjes 	if (!nodes)
178fc36b8d3SLee Schermerhorn 		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
17937012946SDavid Rientjes 	else if (nodes_empty(*nodes))
18037012946SDavid Rientjes 		return -EINVAL;			/*  no allowed nodes */
18137012946SDavid Rientjes 	else
18237012946SDavid Rientjes 		pol->v.preferred_node = first_node(*nodes);
18337012946SDavid Rientjes 	return 0;
18437012946SDavid Rientjes }
18537012946SDavid Rientjes 
18637012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
18737012946SDavid Rientjes {
188859f7ef1SZhihui Zhang 	if (nodes_empty(*nodes))
18937012946SDavid Rientjes 		return -EINVAL;
19037012946SDavid Rientjes 	pol->v.nodes = *nodes;
19137012946SDavid Rientjes 	return 0;
19237012946SDavid Rientjes }
19337012946SDavid Rientjes 
19458568d2aSMiao Xie /*
19558568d2aSMiao Xie  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
19658568d2aSMiao Xie  * any, for the new policy.  mpol_new() has already validated the nodes
19758568d2aSMiao Xie  * parameter with respect to the policy mode and flags.  But, we need to
19858568d2aSMiao Xie  * handle an empty nodemask with MPOL_PREFERRED here.
19958568d2aSMiao Xie  *
20058568d2aSMiao Xie  * Must be called holding task's alloc_lock to protect task's mems_allowed
20158568d2aSMiao Xie  * and mempolicy.  May also be called holding the mmap_semaphore for write.
20258568d2aSMiao Xie  */
2034bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol,
2044bfc4495SKAMEZAWA Hiroyuki 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
20558568d2aSMiao Xie {
20658568d2aSMiao Xie 	int ret;
20758568d2aSMiao Xie 
20858568d2aSMiao Xie 	/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
20958568d2aSMiao Xie 	if (pol == NULL)
21058568d2aSMiao Xie 		return 0;
21101f13bd6SLai Jiangshan 	/* Check N_MEMORY */
2124bfc4495SKAMEZAWA Hiroyuki 	nodes_and(nsc->mask1,
21301f13bd6SLai Jiangshan 		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
21458568d2aSMiao Xie 
21558568d2aSMiao Xie 	VM_BUG_ON(!nodes);
21658568d2aSMiao Xie 	if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
21758568d2aSMiao Xie 		nodes = NULL;	/* explicit local allocation */
21858568d2aSMiao Xie 	else {
21958568d2aSMiao Xie 		if (pol->flags & MPOL_F_RELATIVE_NODES)
2204bfc4495SKAMEZAWA Hiroyuki 			mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
22158568d2aSMiao Xie 		else
2224bfc4495SKAMEZAWA Hiroyuki 			nodes_and(nsc->mask2, *nodes, nsc->mask1);
2234bfc4495SKAMEZAWA Hiroyuki 
22458568d2aSMiao Xie 		if (mpol_store_user_nodemask(pol))
22558568d2aSMiao Xie 			pol->w.user_nodemask = *nodes;
22658568d2aSMiao Xie 		else
22758568d2aSMiao Xie 			pol->w.cpuset_mems_allowed =
22858568d2aSMiao Xie 						cpuset_current_mems_allowed;
22958568d2aSMiao Xie 	}
23058568d2aSMiao Xie 
2314bfc4495SKAMEZAWA Hiroyuki 	if (nodes)
2324bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
2334bfc4495SKAMEZAWA Hiroyuki 	else
2344bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, NULL);
23558568d2aSMiao Xie 	return ret;
23658568d2aSMiao Xie }
23758568d2aSMiao Xie 
23858568d2aSMiao Xie /*
23958568d2aSMiao Xie  * This function just creates a new policy, does some check and simple
24058568d2aSMiao Xie  * initialization. You must invoke mpol_set_nodemask() to set nodes.
24158568d2aSMiao Xie  */
242028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
243028fec41SDavid Rientjes 				  nodemask_t *nodes)
2441da177e4SLinus Torvalds {
2451da177e4SLinus Torvalds 	struct mempolicy *policy;
2461da177e4SLinus Torvalds 
247028fec41SDavid Rientjes 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
24800ef2d2fSDavid Rientjes 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
249140d5a49SPaul Mundt 
2503e1f0645SDavid Rientjes 	if (mode == MPOL_DEFAULT) {
2513e1f0645SDavid Rientjes 		if (nodes && !nodes_empty(*nodes))
25237012946SDavid Rientjes 			return ERR_PTR(-EINVAL);
253d3a71033SLee Schermerhorn 		return NULL;
25437012946SDavid Rientjes 	}
2553e1f0645SDavid Rientjes 	VM_BUG_ON(!nodes);
2563e1f0645SDavid Rientjes 
2573e1f0645SDavid Rientjes 	/*
2583e1f0645SDavid Rientjes 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
2593e1f0645SDavid Rientjes 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
2603e1f0645SDavid Rientjes 	 * All other modes require a valid pointer to a non-empty nodemask.
2613e1f0645SDavid Rientjes 	 */
2623e1f0645SDavid Rientjes 	if (mode == MPOL_PREFERRED) {
2633e1f0645SDavid Rientjes 		if (nodes_empty(*nodes)) {
2643e1f0645SDavid Rientjes 			if (((flags & MPOL_F_STATIC_NODES) ||
2653e1f0645SDavid Rientjes 			     (flags & MPOL_F_RELATIVE_NODES)))
2663e1f0645SDavid Rientjes 				return ERR_PTR(-EINVAL);
2673e1f0645SDavid Rientjes 		}
268479e2802SPeter Zijlstra 	} else if (mode == MPOL_LOCAL) {
2698d303e44SPiotr Kwapulinski 		if (!nodes_empty(*nodes) ||
2708d303e44SPiotr Kwapulinski 		    (flags & MPOL_F_STATIC_NODES) ||
2718d303e44SPiotr Kwapulinski 		    (flags & MPOL_F_RELATIVE_NODES))
272479e2802SPeter Zijlstra 			return ERR_PTR(-EINVAL);
273479e2802SPeter Zijlstra 		mode = MPOL_PREFERRED;
2743e1f0645SDavid Rientjes 	} else if (nodes_empty(*nodes))
2753e1f0645SDavid Rientjes 		return ERR_PTR(-EINVAL);
2761da177e4SLinus Torvalds 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2771da177e4SLinus Torvalds 	if (!policy)
2781da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2791da177e4SLinus Torvalds 	atomic_set(&policy->refcnt, 1);
28045c4745aSLee Schermerhorn 	policy->mode = mode;
28137012946SDavid Rientjes 	policy->flags = flags;
2823e1f0645SDavid Rientjes 
28337012946SDavid Rientjes 	return policy;
28437012946SDavid Rientjes }
28537012946SDavid Rientjes 
28652cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */
28752cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p)
28852cd3b07SLee Schermerhorn {
28952cd3b07SLee Schermerhorn 	if (!atomic_dec_and_test(&p->refcnt))
29052cd3b07SLee Schermerhorn 		return;
29152cd3b07SLee Schermerhorn 	kmem_cache_free(policy_cache, p);
29252cd3b07SLee Schermerhorn }
29352cd3b07SLee Schermerhorn 
294213980c0SVlastimil Babka static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
29537012946SDavid Rientjes {
29637012946SDavid Rientjes }
29737012946SDavid Rientjes 
298213980c0SVlastimil Babka static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
2991d0d2680SDavid Rientjes {
3001d0d2680SDavid Rientjes 	nodemask_t tmp;
3011d0d2680SDavid Rientjes 
30237012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES)
30337012946SDavid Rientjes 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
30437012946SDavid Rientjes 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
30537012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3061d0d2680SDavid Rientjes 	else {
307213980c0SVlastimil Babka 		nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
308213980c0SVlastimil Babka 								*nodes);
309213980c0SVlastimil Babka 		pol->w.cpuset_mems_allowed = tmp;
3101d0d2680SDavid Rientjes 	}
31137012946SDavid Rientjes 
312708c1bbcSMiao Xie 	if (nodes_empty(tmp))
313708c1bbcSMiao Xie 		tmp = *nodes;
314708c1bbcSMiao Xie 
3151d0d2680SDavid Rientjes 	pol->v.nodes = tmp;
31637012946SDavid Rientjes }
31737012946SDavid Rientjes 
31837012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol,
319213980c0SVlastimil Babka 						const nodemask_t *nodes)
32037012946SDavid Rientjes {
32137012946SDavid Rientjes 	nodemask_t tmp;
32237012946SDavid Rientjes 
32337012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES) {
3241d0d2680SDavid Rientjes 		int node = first_node(pol->w.user_nodemask);
3251d0d2680SDavid Rientjes 
326fc36b8d3SLee Schermerhorn 		if (node_isset(node, *nodes)) {
3271d0d2680SDavid Rientjes 			pol->v.preferred_node = node;
328fc36b8d3SLee Schermerhorn 			pol->flags &= ~MPOL_F_LOCAL;
329fc36b8d3SLee Schermerhorn 		} else
330fc36b8d3SLee Schermerhorn 			pol->flags |= MPOL_F_LOCAL;
33137012946SDavid Rientjes 	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
33237012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3331d0d2680SDavid Rientjes 		pol->v.preferred_node = first_node(tmp);
334fc36b8d3SLee Schermerhorn 	} else if (!(pol->flags & MPOL_F_LOCAL)) {
3351d0d2680SDavid Rientjes 		pol->v.preferred_node = node_remap(pol->v.preferred_node,
33637012946SDavid Rientjes 						   pol->w.cpuset_mems_allowed,
33737012946SDavid Rientjes 						   *nodes);
33837012946SDavid Rientjes 		pol->w.cpuset_mems_allowed = *nodes;
3391d0d2680SDavid Rientjes 	}
3401d0d2680SDavid Rientjes }
34137012946SDavid Rientjes 
342708c1bbcSMiao Xie /*
343708c1bbcSMiao Xie  * mpol_rebind_policy - Migrate a policy to a different set of nodes
344708c1bbcSMiao Xie  *
345213980c0SVlastimil Babka  * Per-vma policies are protected by mmap_sem. Allocations using per-task
346213980c0SVlastimil Babka  * policies are protected by task->mems_allowed_seq to prevent a premature
347213980c0SVlastimil Babka  * OOM/allocation failure due to parallel nodemask modification.
348708c1bbcSMiao Xie  */
349213980c0SVlastimil Babka static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
35037012946SDavid Rientjes {
35137012946SDavid Rientjes 	if (!pol)
35237012946SDavid Rientjes 		return;
353213980c0SVlastimil Babka 	if (!mpol_store_user_nodemask(pol) &&
35437012946SDavid Rientjes 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
35537012946SDavid Rientjes 		return;
356708c1bbcSMiao Xie 
357213980c0SVlastimil Babka 	mpol_ops[pol->mode].rebind(pol, newmask);
3581d0d2680SDavid Rientjes }
3591d0d2680SDavid Rientjes 
3601d0d2680SDavid Rientjes /*
3611d0d2680SDavid Rientjes  * Wrapper for mpol_rebind_policy() that just requires task
3621d0d2680SDavid Rientjes  * pointer, and updates task mempolicy.
36358568d2aSMiao Xie  *
36458568d2aSMiao Xie  * Called with task's alloc_lock held.
3651d0d2680SDavid Rientjes  */
3661d0d2680SDavid Rientjes 
367213980c0SVlastimil Babka void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
3681d0d2680SDavid Rientjes {
369213980c0SVlastimil Babka 	mpol_rebind_policy(tsk->mempolicy, new);
3701d0d2680SDavid Rientjes }
3711d0d2680SDavid Rientjes 
3721d0d2680SDavid Rientjes /*
3731d0d2680SDavid Rientjes  * Rebind each vma in mm to new nodemask.
3741d0d2680SDavid Rientjes  *
3751d0d2680SDavid Rientjes  * Call holding a reference to mm.  Takes mm->mmap_sem during call.
3761d0d2680SDavid Rientjes  */
3771d0d2680SDavid Rientjes 
3781d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
3791d0d2680SDavid Rientjes {
3801d0d2680SDavid Rientjes 	struct vm_area_struct *vma;
3811d0d2680SDavid Rientjes 
3821d0d2680SDavid Rientjes 	down_write(&mm->mmap_sem);
3831d0d2680SDavid Rientjes 	for (vma = mm->mmap; vma; vma = vma->vm_next)
384213980c0SVlastimil Babka 		mpol_rebind_policy(vma->vm_policy, new);
3851d0d2680SDavid Rientjes 	up_write(&mm->mmap_sem);
3861d0d2680SDavid Rientjes }
3871d0d2680SDavid Rientjes 
38837012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
38937012946SDavid Rientjes 	[MPOL_DEFAULT] = {
39037012946SDavid Rientjes 		.rebind = mpol_rebind_default,
39137012946SDavid Rientjes 	},
39237012946SDavid Rientjes 	[MPOL_INTERLEAVE] = {
39337012946SDavid Rientjes 		.create = mpol_new_interleave,
39437012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
39537012946SDavid Rientjes 	},
39637012946SDavid Rientjes 	[MPOL_PREFERRED] = {
39737012946SDavid Rientjes 		.create = mpol_new_preferred,
39837012946SDavid Rientjes 		.rebind = mpol_rebind_preferred,
39937012946SDavid Rientjes 	},
40037012946SDavid Rientjes 	[MPOL_BIND] = {
40137012946SDavid Rientjes 		.create = mpol_new_bind,
40237012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
40337012946SDavid Rientjes 	},
40437012946SDavid Rientjes };
40537012946SDavid Rientjes 
406fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
407fc301289SChristoph Lameter 				unsigned long flags);
4081a75a6c8SChristoph Lameter 
4096f4576e3SNaoya Horiguchi struct queue_pages {
4106f4576e3SNaoya Horiguchi 	struct list_head *pagelist;
4116f4576e3SNaoya Horiguchi 	unsigned long flags;
4126f4576e3SNaoya Horiguchi 	nodemask_t *nmask;
4136f4576e3SNaoya Horiguchi 	struct vm_area_struct *prev;
4146f4576e3SNaoya Horiguchi };
4156f4576e3SNaoya Horiguchi 
41698094945SNaoya Horiguchi /*
41788aaa2a1SNaoya Horiguchi  * Check if the page's nid is in qp->nmask.
41888aaa2a1SNaoya Horiguchi  *
41988aaa2a1SNaoya Horiguchi  * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
42088aaa2a1SNaoya Horiguchi  * in the invert of qp->nmask.
42188aaa2a1SNaoya Horiguchi  */
42288aaa2a1SNaoya Horiguchi static inline bool queue_pages_required(struct page *page,
42388aaa2a1SNaoya Horiguchi 					struct queue_pages *qp)
42488aaa2a1SNaoya Horiguchi {
42588aaa2a1SNaoya Horiguchi 	int nid = page_to_nid(page);
42688aaa2a1SNaoya Horiguchi 	unsigned long flags = qp->flags;
42788aaa2a1SNaoya Horiguchi 
42888aaa2a1SNaoya Horiguchi 	return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
42988aaa2a1SNaoya Horiguchi }
43088aaa2a1SNaoya Horiguchi 
431c8633798SNaoya Horiguchi static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
432c8633798SNaoya Horiguchi 				unsigned long end, struct mm_walk *walk)
433c8633798SNaoya Horiguchi {
434c8633798SNaoya Horiguchi 	int ret = 0;
435c8633798SNaoya Horiguchi 	struct page *page;
436c8633798SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
437c8633798SNaoya Horiguchi 	unsigned long flags;
438c8633798SNaoya Horiguchi 
439c8633798SNaoya Horiguchi 	if (unlikely(is_pmd_migration_entry(*pmd))) {
440c8633798SNaoya Horiguchi 		ret = 1;
441c8633798SNaoya Horiguchi 		goto unlock;
442c8633798SNaoya Horiguchi 	}
443c8633798SNaoya Horiguchi 	page = pmd_page(*pmd);
444c8633798SNaoya Horiguchi 	if (is_huge_zero_page(page)) {
445c8633798SNaoya Horiguchi 		spin_unlock(ptl);
446c8633798SNaoya Horiguchi 		__split_huge_pmd(walk->vma, pmd, addr, false, NULL);
447c8633798SNaoya Horiguchi 		goto out;
448c8633798SNaoya Horiguchi 	}
449c8633798SNaoya Horiguchi 	if (!queue_pages_required(page, qp)) {
450c8633798SNaoya Horiguchi 		ret = 1;
451c8633798SNaoya Horiguchi 		goto unlock;
452c8633798SNaoya Horiguchi 	}
453c8633798SNaoya Horiguchi 
454c8633798SNaoya Horiguchi 	ret = 1;
455c8633798SNaoya Horiguchi 	flags = qp->flags;
456c8633798SNaoya Horiguchi 	/* go to thp migration */
457c8633798SNaoya Horiguchi 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
458c8633798SNaoya Horiguchi 		migrate_page_add(page, qp->pagelist, flags);
459c8633798SNaoya Horiguchi unlock:
460c8633798SNaoya Horiguchi 	spin_unlock(ptl);
461c8633798SNaoya Horiguchi out:
462c8633798SNaoya Horiguchi 	return ret;
463c8633798SNaoya Horiguchi }
464c8633798SNaoya Horiguchi 
46588aaa2a1SNaoya Horiguchi /*
46698094945SNaoya Horiguchi  * Scan through pages checking if pages follow certain conditions,
46798094945SNaoya Horiguchi  * and move them to the pagelist if they do.
46898094945SNaoya Horiguchi  */
4696f4576e3SNaoya Horiguchi static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
4706f4576e3SNaoya Horiguchi 			unsigned long end, struct mm_walk *walk)
4711da177e4SLinus Torvalds {
4726f4576e3SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
4736f4576e3SNaoya Horiguchi 	struct page *page;
4746f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
4756f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
476c8633798SNaoya Horiguchi 	int ret;
47791612e0dSHugh Dickins 	pte_t *pte;
478705e87c0SHugh Dickins 	spinlock_t *ptl;
479941150a3SHugh Dickins 
480c8633798SNaoya Horiguchi 	ptl = pmd_trans_huge_lock(pmd, vma);
481c8633798SNaoya Horiguchi 	if (ptl) {
482c8633798SNaoya Horiguchi 		ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
483248db92dSKirill A. Shutemov 		if (ret)
4846f4576e3SNaoya Horiguchi 			return 0;
485248db92dSKirill A. Shutemov 	}
48691612e0dSHugh Dickins 
487337d9abfSNaoya Horiguchi 	if (pmd_trans_unstable(pmd))
488337d9abfSNaoya Horiguchi 		return 0;
48994723aafSMichal Hocko 
4906f4576e3SNaoya Horiguchi 	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
4916f4576e3SNaoya Horiguchi 	for (; addr != end; pte++, addr += PAGE_SIZE) {
49291612e0dSHugh Dickins 		if (!pte_present(*pte))
49391612e0dSHugh Dickins 			continue;
4946aab341eSLinus Torvalds 		page = vm_normal_page(vma, addr, *pte);
4956aab341eSLinus Torvalds 		if (!page)
49691612e0dSHugh Dickins 			continue;
497053837fcSNick Piggin 		/*
49862b61f61SHugh Dickins 		 * vm_normal_page() filters out zero pages, but there might
49962b61f61SHugh Dickins 		 * still be PageReserved pages to skip, perhaps in a VDSO.
500053837fcSNick Piggin 		 */
501b79bc0a0SHugh Dickins 		if (PageReserved(page))
502f4598c8bSChristoph Lameter 			continue;
50388aaa2a1SNaoya Horiguchi 		if (!queue_pages_required(page, qp))
50438e35860SChristoph Lameter 			continue;
5056f4576e3SNaoya Horiguchi 		migrate_page_add(page, qp->pagelist, flags);
5066f4576e3SNaoya Horiguchi 	}
5076f4576e3SNaoya Horiguchi 	pte_unmap_unlock(pte - 1, ptl);
5086f4576e3SNaoya Horiguchi 	cond_resched();
5096f4576e3SNaoya Horiguchi 	return 0;
51091612e0dSHugh Dickins }
51191612e0dSHugh Dickins 
5126f4576e3SNaoya Horiguchi static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
5136f4576e3SNaoya Horiguchi 			       unsigned long addr, unsigned long end,
5146f4576e3SNaoya Horiguchi 			       struct mm_walk *walk)
515e2d8cf40SNaoya Horiguchi {
516e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE
5176f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
5186f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
519e2d8cf40SNaoya Horiguchi 	struct page *page;
520cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
521d4c54919SNaoya Horiguchi 	pte_t entry;
522e2d8cf40SNaoya Horiguchi 
5236f4576e3SNaoya Horiguchi 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
5246f4576e3SNaoya Horiguchi 	entry = huge_ptep_get(pte);
525d4c54919SNaoya Horiguchi 	if (!pte_present(entry))
526d4c54919SNaoya Horiguchi 		goto unlock;
527d4c54919SNaoya Horiguchi 	page = pte_page(entry);
52888aaa2a1SNaoya Horiguchi 	if (!queue_pages_required(page, qp))
529e2d8cf40SNaoya Horiguchi 		goto unlock;
530e2d8cf40SNaoya Horiguchi 	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
531e2d8cf40SNaoya Horiguchi 	if (flags & (MPOL_MF_MOVE_ALL) ||
532e2d8cf40SNaoya Horiguchi 	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
5336f4576e3SNaoya Horiguchi 		isolate_huge_page(page, qp->pagelist);
534e2d8cf40SNaoya Horiguchi unlock:
535cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
536e2d8cf40SNaoya Horiguchi #else
537e2d8cf40SNaoya Horiguchi 	BUG();
538e2d8cf40SNaoya Horiguchi #endif
53991612e0dSHugh Dickins 	return 0;
5401da177e4SLinus Torvalds }
5411da177e4SLinus Torvalds 
5425877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING
543b24f53a0SLee Schermerhorn /*
5444b10e7d5SMel Gorman  * This is used to mark a range of virtual addresses to be inaccessible.
5454b10e7d5SMel Gorman  * These are later cleared by a NUMA hinting fault. Depending on these
5464b10e7d5SMel Gorman  * faults, pages may be migrated for better NUMA placement.
5474b10e7d5SMel Gorman  *
5484b10e7d5SMel Gorman  * This is assuming that NUMA faults are handled using PROT_NONE. If
5494b10e7d5SMel Gorman  * an architecture makes a different choice, it will need further
5504b10e7d5SMel Gorman  * changes to the core.
551b24f53a0SLee Schermerhorn  */
5524b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma,
5534b10e7d5SMel Gorman 			unsigned long addr, unsigned long end)
554b24f53a0SLee Schermerhorn {
5554b10e7d5SMel Gorman 	int nr_updated;
556b24f53a0SLee Schermerhorn 
5574d942466SMel Gorman 	nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
55803c5a6e1SMel Gorman 	if (nr_updated)
55903c5a6e1SMel Gorman 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
560b24f53a0SLee Schermerhorn 
5614b10e7d5SMel Gorman 	return nr_updated;
562b24f53a0SLee Schermerhorn }
563b24f53a0SLee Schermerhorn #else
564b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma,
565b24f53a0SLee Schermerhorn 			unsigned long addr, unsigned long end)
566b24f53a0SLee Schermerhorn {
567b24f53a0SLee Schermerhorn 	return 0;
568b24f53a0SLee Schermerhorn }
5695877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */
570b24f53a0SLee Schermerhorn 
5716f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end,
5726f4576e3SNaoya Horiguchi 				struct mm_walk *walk)
5731da177e4SLinus Torvalds {
5746f4576e3SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
5756f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
5765b952b3cSAndi Kleen 	unsigned long endvma = vma->vm_end;
5776f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
578dc9aa5b9SChristoph Lameter 
57977bf45e7SKirill A. Shutemov 	if (!vma_migratable(vma))
58048684a65SNaoya Horiguchi 		return 1;
58148684a65SNaoya Horiguchi 
5825b952b3cSAndi Kleen 	if (endvma > end)
5835b952b3cSAndi Kleen 		endvma = end;
5845b952b3cSAndi Kleen 	if (vma->vm_start > start)
5855b952b3cSAndi Kleen 		start = vma->vm_start;
586b24f53a0SLee Schermerhorn 
587b24f53a0SLee Schermerhorn 	if (!(flags & MPOL_MF_DISCONTIG_OK)) {
588b24f53a0SLee Schermerhorn 		if (!vma->vm_next && vma->vm_end < end)
589d05f0cdcSHugh Dickins 			return -EFAULT;
5906f4576e3SNaoya Horiguchi 		if (qp->prev && qp->prev->vm_end < vma->vm_start)
591d05f0cdcSHugh Dickins 			return -EFAULT;
592b24f53a0SLee Schermerhorn 	}
593b24f53a0SLee Schermerhorn 
5946f4576e3SNaoya Horiguchi 	qp->prev = vma;
5956f4576e3SNaoya Horiguchi 
596b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY) {
5972c0346a3SMel Gorman 		/* Similar to task_numa_work, skip inaccessible VMAs */
5984355c018SLiang Chen 		if (!is_vm_hugetlb_page(vma) &&
5994355c018SLiang Chen 			(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) &&
6004355c018SLiang Chen 			!(vma->vm_flags & VM_MIXEDMAP))
601b24f53a0SLee Schermerhorn 			change_prot_numa(vma, start, endvma);
6026f4576e3SNaoya Horiguchi 		return 1;
603b24f53a0SLee Schermerhorn 	}
604b24f53a0SLee Schermerhorn 
6056f4576e3SNaoya Horiguchi 	/* queue pages from current vma */
60677bf45e7SKirill A. Shutemov 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
6076f4576e3SNaoya Horiguchi 		return 0;
6086f4576e3SNaoya Horiguchi 	return 1;
6096f4576e3SNaoya Horiguchi }
610b24f53a0SLee Schermerhorn 
6116f4576e3SNaoya Horiguchi /*
6126f4576e3SNaoya Horiguchi  * Walk through page tables and collect pages to be migrated.
6136f4576e3SNaoya Horiguchi  *
6146f4576e3SNaoya Horiguchi  * If pages found in a given range are on a set of nodes (determined by
6156f4576e3SNaoya Horiguchi  * @nodes and @flags,) it's isolated and queued to the pagelist which is
6166f4576e3SNaoya Horiguchi  * passed via @private.)
6176f4576e3SNaoya Horiguchi  */
6186f4576e3SNaoya Horiguchi static int
6196f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
6206f4576e3SNaoya Horiguchi 		nodemask_t *nodes, unsigned long flags,
6216f4576e3SNaoya Horiguchi 		struct list_head *pagelist)
6226f4576e3SNaoya Horiguchi {
6236f4576e3SNaoya Horiguchi 	struct queue_pages qp = {
6246f4576e3SNaoya Horiguchi 		.pagelist = pagelist,
6256f4576e3SNaoya Horiguchi 		.flags = flags,
6266f4576e3SNaoya Horiguchi 		.nmask = nodes,
6276f4576e3SNaoya Horiguchi 		.prev = NULL,
6286f4576e3SNaoya Horiguchi 	};
6296f4576e3SNaoya Horiguchi 	struct mm_walk queue_pages_walk = {
6306f4576e3SNaoya Horiguchi 		.hugetlb_entry = queue_pages_hugetlb,
6316f4576e3SNaoya Horiguchi 		.pmd_entry = queue_pages_pte_range,
6326f4576e3SNaoya Horiguchi 		.test_walk = queue_pages_test_walk,
6336f4576e3SNaoya Horiguchi 		.mm = mm,
6346f4576e3SNaoya Horiguchi 		.private = &qp,
6356f4576e3SNaoya Horiguchi 	};
6366f4576e3SNaoya Horiguchi 
6376f4576e3SNaoya Horiguchi 	return walk_page_range(start, end, &queue_pages_walk);
6381da177e4SLinus Torvalds }
6391da177e4SLinus Torvalds 
640869833f2SKOSAKI Motohiro /*
641869833f2SKOSAKI Motohiro  * Apply policy to a single VMA
642869833f2SKOSAKI Motohiro  * This must be called with the mmap_sem held for writing.
643869833f2SKOSAKI Motohiro  */
644869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma,
645869833f2SKOSAKI Motohiro 						struct mempolicy *pol)
6468d34694cSKOSAKI Motohiro {
647869833f2SKOSAKI Motohiro 	int err;
648869833f2SKOSAKI Motohiro 	struct mempolicy *old;
649869833f2SKOSAKI Motohiro 	struct mempolicy *new;
6508d34694cSKOSAKI Motohiro 
6518d34694cSKOSAKI Motohiro 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
6528d34694cSKOSAKI Motohiro 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
6538d34694cSKOSAKI Motohiro 		 vma->vm_ops, vma->vm_file,
6548d34694cSKOSAKI Motohiro 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
6558d34694cSKOSAKI Motohiro 
656869833f2SKOSAKI Motohiro 	new = mpol_dup(pol);
657869833f2SKOSAKI Motohiro 	if (IS_ERR(new))
658869833f2SKOSAKI Motohiro 		return PTR_ERR(new);
659869833f2SKOSAKI Motohiro 
660869833f2SKOSAKI Motohiro 	if (vma->vm_ops && vma->vm_ops->set_policy) {
6618d34694cSKOSAKI Motohiro 		err = vma->vm_ops->set_policy(vma, new);
662869833f2SKOSAKI Motohiro 		if (err)
663869833f2SKOSAKI Motohiro 			goto err_out;
6648d34694cSKOSAKI Motohiro 	}
665869833f2SKOSAKI Motohiro 
666869833f2SKOSAKI Motohiro 	old = vma->vm_policy;
667869833f2SKOSAKI Motohiro 	vma->vm_policy = new; /* protected by mmap_sem */
668869833f2SKOSAKI Motohiro 	mpol_put(old);
669869833f2SKOSAKI Motohiro 
670869833f2SKOSAKI Motohiro 	return 0;
671869833f2SKOSAKI Motohiro  err_out:
672869833f2SKOSAKI Motohiro 	mpol_put(new);
6738d34694cSKOSAKI Motohiro 	return err;
6748d34694cSKOSAKI Motohiro }
6758d34694cSKOSAKI Motohiro 
6761da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */
6779d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start,
6789d8cebd4SKOSAKI Motohiro 		       unsigned long end, struct mempolicy *new_pol)
6791da177e4SLinus Torvalds {
6801da177e4SLinus Torvalds 	struct vm_area_struct *next;
6819d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *prev;
6829d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *vma;
6839d8cebd4SKOSAKI Motohiro 	int err = 0;
684e26a5114SKOSAKI Motohiro 	pgoff_t pgoff;
6859d8cebd4SKOSAKI Motohiro 	unsigned long vmstart;
6869d8cebd4SKOSAKI Motohiro 	unsigned long vmend;
6871da177e4SLinus Torvalds 
688097d5910SLinus Torvalds 	vma = find_vma(mm, start);
6899d8cebd4SKOSAKI Motohiro 	if (!vma || vma->vm_start > start)
6909d8cebd4SKOSAKI Motohiro 		return -EFAULT;
6919d8cebd4SKOSAKI Motohiro 
692097d5910SLinus Torvalds 	prev = vma->vm_prev;
693e26a5114SKOSAKI Motohiro 	if (start > vma->vm_start)
694e26a5114SKOSAKI Motohiro 		prev = vma;
695e26a5114SKOSAKI Motohiro 
6969d8cebd4SKOSAKI Motohiro 	for (; vma && vma->vm_start < end; prev = vma, vma = next) {
6971da177e4SLinus Torvalds 		next = vma->vm_next;
6989d8cebd4SKOSAKI Motohiro 		vmstart = max(start, vma->vm_start);
6999d8cebd4SKOSAKI Motohiro 		vmend   = min(end, vma->vm_end);
7009d8cebd4SKOSAKI Motohiro 
701e26a5114SKOSAKI Motohiro 		if (mpol_equal(vma_policy(vma), new_pol))
702e26a5114SKOSAKI Motohiro 			continue;
703e26a5114SKOSAKI Motohiro 
704e26a5114SKOSAKI Motohiro 		pgoff = vma->vm_pgoff +
705e26a5114SKOSAKI Motohiro 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
7069d8cebd4SKOSAKI Motohiro 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
707e26a5114SKOSAKI Motohiro 				 vma->anon_vma, vma->vm_file, pgoff,
70819a809afSAndrea Arcangeli 				 new_pol, vma->vm_userfaultfd_ctx);
7099d8cebd4SKOSAKI Motohiro 		if (prev) {
7109d8cebd4SKOSAKI Motohiro 			vma = prev;
7119d8cebd4SKOSAKI Motohiro 			next = vma->vm_next;
7123964acd0SOleg Nesterov 			if (mpol_equal(vma_policy(vma), new_pol))
7139d8cebd4SKOSAKI Motohiro 				continue;
7143964acd0SOleg Nesterov 			/* vma_merge() joined vma && vma->next, case 8 */
7153964acd0SOleg Nesterov 			goto replace;
7161da177e4SLinus Torvalds 		}
7179d8cebd4SKOSAKI Motohiro 		if (vma->vm_start != vmstart) {
7189d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
7199d8cebd4SKOSAKI Motohiro 			if (err)
7209d8cebd4SKOSAKI Motohiro 				goto out;
7219d8cebd4SKOSAKI Motohiro 		}
7229d8cebd4SKOSAKI Motohiro 		if (vma->vm_end != vmend) {
7239d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmend, 0);
7249d8cebd4SKOSAKI Motohiro 			if (err)
7259d8cebd4SKOSAKI Motohiro 				goto out;
7269d8cebd4SKOSAKI Motohiro 		}
7273964acd0SOleg Nesterov  replace:
728869833f2SKOSAKI Motohiro 		err = vma_replace_policy(vma, new_pol);
7299d8cebd4SKOSAKI Motohiro 		if (err)
7309d8cebd4SKOSAKI Motohiro 			goto out;
7319d8cebd4SKOSAKI Motohiro 	}
7329d8cebd4SKOSAKI Motohiro 
7339d8cebd4SKOSAKI Motohiro  out:
7341da177e4SLinus Torvalds 	return err;
7351da177e4SLinus Torvalds }
7361da177e4SLinus Torvalds 
7371da177e4SLinus Torvalds /* Set the process memory policy */
738028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags,
739028fec41SDavid Rientjes 			     nodemask_t *nodes)
7401da177e4SLinus Torvalds {
74158568d2aSMiao Xie 	struct mempolicy *new, *old;
7424bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH(scratch);
74358568d2aSMiao Xie 	int ret;
7441da177e4SLinus Torvalds 
7454bfc4495SKAMEZAWA Hiroyuki 	if (!scratch)
7464bfc4495SKAMEZAWA Hiroyuki 		return -ENOMEM;
747f4e53d91SLee Schermerhorn 
7484bfc4495SKAMEZAWA Hiroyuki 	new = mpol_new(mode, flags, nodes);
7494bfc4495SKAMEZAWA Hiroyuki 	if (IS_ERR(new)) {
7504bfc4495SKAMEZAWA Hiroyuki 		ret = PTR_ERR(new);
7514bfc4495SKAMEZAWA Hiroyuki 		goto out;
7524bfc4495SKAMEZAWA Hiroyuki 	}
7532c7c3a7dSOleg Nesterov 
75458568d2aSMiao Xie 	task_lock(current);
7554bfc4495SKAMEZAWA Hiroyuki 	ret = mpol_set_nodemask(new, nodes, scratch);
75658568d2aSMiao Xie 	if (ret) {
75758568d2aSMiao Xie 		task_unlock(current);
75858568d2aSMiao Xie 		mpol_put(new);
7594bfc4495SKAMEZAWA Hiroyuki 		goto out;
76058568d2aSMiao Xie 	}
76158568d2aSMiao Xie 	old = current->mempolicy;
7621da177e4SLinus Torvalds 	current->mempolicy = new;
76345816682SVlastimil Babka 	if (new && new->mode == MPOL_INTERLEAVE)
76445816682SVlastimil Babka 		current->il_prev = MAX_NUMNODES-1;
76558568d2aSMiao Xie 	task_unlock(current);
76658568d2aSMiao Xie 	mpol_put(old);
7674bfc4495SKAMEZAWA Hiroyuki 	ret = 0;
7684bfc4495SKAMEZAWA Hiroyuki out:
7694bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH_FREE(scratch);
7704bfc4495SKAMEZAWA Hiroyuki 	return ret;
7711da177e4SLinus Torvalds }
7721da177e4SLinus Torvalds 
773bea904d5SLee Schermerhorn /*
774bea904d5SLee Schermerhorn  * Return nodemask for policy for get_mempolicy() query
77558568d2aSMiao Xie  *
77658568d2aSMiao Xie  * Called with task's alloc_lock held
777bea904d5SLee Schermerhorn  */
778bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
7791da177e4SLinus Torvalds {
780dfcd3c0dSAndi Kleen 	nodes_clear(*nodes);
781bea904d5SLee Schermerhorn 	if (p == &default_policy)
782bea904d5SLee Schermerhorn 		return;
783bea904d5SLee Schermerhorn 
78445c4745aSLee Schermerhorn 	switch (p->mode) {
78519770b32SMel Gorman 	case MPOL_BIND:
78619770b32SMel Gorman 		/* Fall through */
7871da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
788dfcd3c0dSAndi Kleen 		*nodes = p->v.nodes;
7891da177e4SLinus Torvalds 		break;
7901da177e4SLinus Torvalds 	case MPOL_PREFERRED:
791fc36b8d3SLee Schermerhorn 		if (!(p->flags & MPOL_F_LOCAL))
792dfcd3c0dSAndi Kleen 			node_set(p->v.preferred_node, *nodes);
79353f2556bSLee Schermerhorn 		/* else return empty node mask for local allocation */
7941da177e4SLinus Torvalds 		break;
7951da177e4SLinus Torvalds 	default:
7961da177e4SLinus Torvalds 		BUG();
7971da177e4SLinus Torvalds 	}
7981da177e4SLinus Torvalds }
7991da177e4SLinus Torvalds 
8003b9aadf7SAndrea Arcangeli static int lookup_node(struct mm_struct *mm, unsigned long addr)
8011da177e4SLinus Torvalds {
8021da177e4SLinus Torvalds 	struct page *p;
8031da177e4SLinus Torvalds 	int err;
8041da177e4SLinus Torvalds 
8053b9aadf7SAndrea Arcangeli 	int locked = 1;
8063b9aadf7SAndrea Arcangeli 	err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked);
8071da177e4SLinus Torvalds 	if (err >= 0) {
8081da177e4SLinus Torvalds 		err = page_to_nid(p);
8091da177e4SLinus Torvalds 		put_page(p);
8101da177e4SLinus Torvalds 	}
8113b9aadf7SAndrea Arcangeli 	if (locked)
8123b9aadf7SAndrea Arcangeli 		up_read(&mm->mmap_sem);
8131da177e4SLinus Torvalds 	return err;
8141da177e4SLinus Torvalds }
8151da177e4SLinus Torvalds 
8161da177e4SLinus Torvalds /* Retrieve NUMA policy */
817dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask,
8181da177e4SLinus Torvalds 			     unsigned long addr, unsigned long flags)
8191da177e4SLinus Torvalds {
8208bccd85fSChristoph Lameter 	int err;
8211da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
8221da177e4SLinus Torvalds 	struct vm_area_struct *vma = NULL;
8233b9aadf7SAndrea Arcangeli 	struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
8241da177e4SLinus Torvalds 
825754af6f5SLee Schermerhorn 	if (flags &
826754af6f5SLee Schermerhorn 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
8271da177e4SLinus Torvalds 		return -EINVAL;
828754af6f5SLee Schermerhorn 
829754af6f5SLee Schermerhorn 	if (flags & MPOL_F_MEMS_ALLOWED) {
830754af6f5SLee Schermerhorn 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
831754af6f5SLee Schermerhorn 			return -EINVAL;
832754af6f5SLee Schermerhorn 		*policy = 0;	/* just so it's initialized */
83358568d2aSMiao Xie 		task_lock(current);
834754af6f5SLee Schermerhorn 		*nmask  = cpuset_current_mems_allowed;
83558568d2aSMiao Xie 		task_unlock(current);
836754af6f5SLee Schermerhorn 		return 0;
837754af6f5SLee Schermerhorn 	}
838754af6f5SLee Schermerhorn 
8391da177e4SLinus Torvalds 	if (flags & MPOL_F_ADDR) {
840bea904d5SLee Schermerhorn 		/*
841bea904d5SLee Schermerhorn 		 * Do NOT fall back to task policy if the
842bea904d5SLee Schermerhorn 		 * vma/shared policy at addr is NULL.  We
843bea904d5SLee Schermerhorn 		 * want to return MPOL_DEFAULT in this case.
844bea904d5SLee Schermerhorn 		 */
8451da177e4SLinus Torvalds 		down_read(&mm->mmap_sem);
8461da177e4SLinus Torvalds 		vma = find_vma_intersection(mm, addr, addr+1);
8471da177e4SLinus Torvalds 		if (!vma) {
8481da177e4SLinus Torvalds 			up_read(&mm->mmap_sem);
8491da177e4SLinus Torvalds 			return -EFAULT;
8501da177e4SLinus Torvalds 		}
8511da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
8521da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
8531da177e4SLinus Torvalds 		else
8541da177e4SLinus Torvalds 			pol = vma->vm_policy;
8551da177e4SLinus Torvalds 	} else if (addr)
8561da177e4SLinus Torvalds 		return -EINVAL;
8571da177e4SLinus Torvalds 
8581da177e4SLinus Torvalds 	if (!pol)
859bea904d5SLee Schermerhorn 		pol = &default_policy;	/* indicates default behavior */
8601da177e4SLinus Torvalds 
8611da177e4SLinus Torvalds 	if (flags & MPOL_F_NODE) {
8621da177e4SLinus Torvalds 		if (flags & MPOL_F_ADDR) {
8633b9aadf7SAndrea Arcangeli 			/*
8643b9aadf7SAndrea Arcangeli 			 * Take a refcount on the mpol, lookup_node()
8653b9aadf7SAndrea Arcangeli 			 * wil drop the mmap_sem, so after calling
8663b9aadf7SAndrea Arcangeli 			 * lookup_node() only "pol" remains valid, "vma"
8673b9aadf7SAndrea Arcangeli 			 * is stale.
8683b9aadf7SAndrea Arcangeli 			 */
8693b9aadf7SAndrea Arcangeli 			pol_refcount = pol;
8703b9aadf7SAndrea Arcangeli 			vma = NULL;
8713b9aadf7SAndrea Arcangeli 			mpol_get(pol);
8723b9aadf7SAndrea Arcangeli 			err = lookup_node(mm, addr);
8731da177e4SLinus Torvalds 			if (err < 0)
8741da177e4SLinus Torvalds 				goto out;
8758bccd85fSChristoph Lameter 			*policy = err;
8761da177e4SLinus Torvalds 		} else if (pol == current->mempolicy &&
87745c4745aSLee Schermerhorn 				pol->mode == MPOL_INTERLEAVE) {
87845816682SVlastimil Babka 			*policy = next_node_in(current->il_prev, pol->v.nodes);
8791da177e4SLinus Torvalds 		} else {
8801da177e4SLinus Torvalds 			err = -EINVAL;
8811da177e4SLinus Torvalds 			goto out;
8821da177e4SLinus Torvalds 		}
883bea904d5SLee Schermerhorn 	} else {
884bea904d5SLee Schermerhorn 		*policy = pol == &default_policy ? MPOL_DEFAULT :
885bea904d5SLee Schermerhorn 						pol->mode;
886d79df630SDavid Rientjes 		/*
887d79df630SDavid Rientjes 		 * Internal mempolicy flags must be masked off before exposing
888d79df630SDavid Rientjes 		 * the policy to userspace.
889d79df630SDavid Rientjes 		 */
890d79df630SDavid Rientjes 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
891bea904d5SLee Schermerhorn 	}
8921da177e4SLinus Torvalds 
8931da177e4SLinus Torvalds 	err = 0;
89458568d2aSMiao Xie 	if (nmask) {
895c6b6ef8bSLee Schermerhorn 		if (mpol_store_user_nodemask(pol)) {
896c6b6ef8bSLee Schermerhorn 			*nmask = pol->w.user_nodemask;
897c6b6ef8bSLee Schermerhorn 		} else {
89858568d2aSMiao Xie 			task_lock(current);
899bea904d5SLee Schermerhorn 			get_policy_nodemask(pol, nmask);
90058568d2aSMiao Xie 			task_unlock(current);
90158568d2aSMiao Xie 		}
902c6b6ef8bSLee Schermerhorn 	}
9031da177e4SLinus Torvalds 
9041da177e4SLinus Torvalds  out:
90552cd3b07SLee Schermerhorn 	mpol_cond_put(pol);
9061da177e4SLinus Torvalds 	if (vma)
9073b9aadf7SAndrea Arcangeli 		up_read(&mm->mmap_sem);
9083b9aadf7SAndrea Arcangeli 	if (pol_refcount)
9093b9aadf7SAndrea Arcangeli 		mpol_put(pol_refcount);
9101da177e4SLinus Torvalds 	return err;
9111da177e4SLinus Torvalds }
9121da177e4SLinus Torvalds 
913b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION
9148bccd85fSChristoph Lameter /*
915c8633798SNaoya Horiguchi  * page migration, thp tail pages can be passed.
9166ce3c4c0SChristoph Lameter  */
917fc301289SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
918fc301289SChristoph Lameter 				unsigned long flags)
9196ce3c4c0SChristoph Lameter {
920c8633798SNaoya Horiguchi 	struct page *head = compound_head(page);
9216ce3c4c0SChristoph Lameter 	/*
922fc301289SChristoph Lameter 	 * Avoid migrating a page that is shared with others.
9236ce3c4c0SChristoph Lameter 	 */
924c8633798SNaoya Horiguchi 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
925c8633798SNaoya Horiguchi 		if (!isolate_lru_page(head)) {
926c8633798SNaoya Horiguchi 			list_add_tail(&head->lru, pagelist);
927c8633798SNaoya Horiguchi 			mod_node_page_state(page_pgdat(head),
928c8633798SNaoya Horiguchi 				NR_ISOLATED_ANON + page_is_file_cache(head),
929c8633798SNaoya Horiguchi 				hpage_nr_pages(head));
93062695a84SNick Piggin 		}
93162695a84SNick Piggin 	}
9326ce3c4c0SChristoph Lameter }
9336ce3c4c0SChristoph Lameter 
934a49bd4d7SMichal Hocko /* page allocation callback for NUMA node migration */
935666feb21SMichal Hocko struct page *alloc_new_node_page(struct page *page, unsigned long node)
93695a402c3SChristoph Lameter {
937e2d8cf40SNaoya Horiguchi 	if (PageHuge(page))
938e2d8cf40SNaoya Horiguchi 		return alloc_huge_page_node(page_hstate(compound_head(page)),
939e2d8cf40SNaoya Horiguchi 					node);
94094723aafSMichal Hocko 	else if (PageTransHuge(page)) {
941c8633798SNaoya Horiguchi 		struct page *thp;
942c8633798SNaoya Horiguchi 
943c8633798SNaoya Horiguchi 		thp = alloc_pages_node(node,
944c8633798SNaoya Horiguchi 			(GFP_TRANSHUGE | __GFP_THISNODE),
945c8633798SNaoya Horiguchi 			HPAGE_PMD_ORDER);
946c8633798SNaoya Horiguchi 		if (!thp)
947c8633798SNaoya Horiguchi 			return NULL;
948c8633798SNaoya Horiguchi 		prep_transhuge_page(thp);
949c8633798SNaoya Horiguchi 		return thp;
950c8633798SNaoya Horiguchi 	} else
95196db800fSVlastimil Babka 		return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
952b360edb4SDavid Rientjes 						    __GFP_THISNODE, 0);
95395a402c3SChristoph Lameter }
95495a402c3SChristoph Lameter 
9556ce3c4c0SChristoph Lameter /*
9567e2ab150SChristoph Lameter  * Migrate pages from one node to a target node.
9577e2ab150SChristoph Lameter  * Returns error or the number of pages not migrated.
9587e2ab150SChristoph Lameter  */
959dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest,
960dbcb0f19SAdrian Bunk 			   int flags)
9617e2ab150SChristoph Lameter {
9627e2ab150SChristoph Lameter 	nodemask_t nmask;
9637e2ab150SChristoph Lameter 	LIST_HEAD(pagelist);
9647e2ab150SChristoph Lameter 	int err = 0;
9657e2ab150SChristoph Lameter 
9667e2ab150SChristoph Lameter 	nodes_clear(nmask);
9677e2ab150SChristoph Lameter 	node_set(source, nmask);
9687e2ab150SChristoph Lameter 
96908270807SMinchan Kim 	/*
97008270807SMinchan Kim 	 * This does not "check" the range but isolates all pages that
97108270807SMinchan Kim 	 * need migration.  Between passing in the full user address
97208270807SMinchan Kim 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
97308270807SMinchan Kim 	 */
97408270807SMinchan Kim 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
97598094945SNaoya Horiguchi 	queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
9767e2ab150SChristoph Lameter 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
9777e2ab150SChristoph Lameter 
978cf608ac1SMinchan Kim 	if (!list_empty(&pagelist)) {
979a49bd4d7SMichal Hocko 		err = migrate_pages(&pagelist, alloc_new_node_page, NULL, dest,
9809c620e2bSHugh Dickins 					MIGRATE_SYNC, MR_SYSCALL);
981cf608ac1SMinchan Kim 		if (err)
982e2d8cf40SNaoya Horiguchi 			putback_movable_pages(&pagelist);
983cf608ac1SMinchan Kim 	}
98495a402c3SChristoph Lameter 
9857e2ab150SChristoph Lameter 	return err;
9867e2ab150SChristoph Lameter }
9877e2ab150SChristoph Lameter 
9887e2ab150SChristoph Lameter /*
9897e2ab150SChristoph Lameter  * Move pages between the two nodesets so as to preserve the physical
9907e2ab150SChristoph Lameter  * layout as much as possible.
99139743889SChristoph Lameter  *
99239743889SChristoph Lameter  * Returns the number of page that could not be moved.
99339743889SChristoph Lameter  */
9940ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
9950ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
99639743889SChristoph Lameter {
9977e2ab150SChristoph Lameter 	int busy = 0;
9980aedadf9SChristoph Lameter 	int err;
9997e2ab150SChristoph Lameter 	nodemask_t tmp;
100039743889SChristoph Lameter 
10010aedadf9SChristoph Lameter 	err = migrate_prep();
10020aedadf9SChristoph Lameter 	if (err)
10030aedadf9SChristoph Lameter 		return err;
10040aedadf9SChristoph Lameter 
100539743889SChristoph Lameter 	down_read(&mm->mmap_sem);
1006d4984711SChristoph Lameter 
10077e2ab150SChristoph Lameter 	/*
10087e2ab150SChristoph Lameter 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
10097e2ab150SChristoph Lameter 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
10107e2ab150SChristoph Lameter 	 * bit in 'tmp', and return that <source, dest> pair for migration.
10117e2ab150SChristoph Lameter 	 * The pair of nodemasks 'to' and 'from' define the map.
10127e2ab150SChristoph Lameter 	 *
10137e2ab150SChristoph Lameter 	 * If no pair of bits is found that way, fallback to picking some
10147e2ab150SChristoph Lameter 	 * pair of 'source' and 'dest' bits that are not the same.  If the
10157e2ab150SChristoph Lameter 	 * 'source' and 'dest' bits are the same, this represents a node
10167e2ab150SChristoph Lameter 	 * that will be migrating to itself, so no pages need move.
10177e2ab150SChristoph Lameter 	 *
10187e2ab150SChristoph Lameter 	 * If no bits are left in 'tmp', or if all remaining bits left
10197e2ab150SChristoph Lameter 	 * in 'tmp' correspond to the same bit in 'to', return false
10207e2ab150SChristoph Lameter 	 * (nothing left to migrate).
10217e2ab150SChristoph Lameter 	 *
10227e2ab150SChristoph Lameter 	 * This lets us pick a pair of nodes to migrate between, such that
10237e2ab150SChristoph Lameter 	 * if possible the dest node is not already occupied by some other
10247e2ab150SChristoph Lameter 	 * source node, minimizing the risk of overloading the memory on a
10257e2ab150SChristoph Lameter 	 * node that would happen if we migrated incoming memory to a node
10267e2ab150SChristoph Lameter 	 * before migrating outgoing memory source that same node.
10277e2ab150SChristoph Lameter 	 *
10287e2ab150SChristoph Lameter 	 * A single scan of tmp is sufficient.  As we go, we remember the
10297e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
10307e2ab150SChristoph Lameter 	 * that not only moved, but what's better, moved to an empty slot
10317e2ab150SChristoph Lameter 	 * (d is not set in tmp), then we break out then, with that pair.
1032ae0e47f0SJustin P. Mattock 	 * Otherwise when we finish scanning from_tmp, we at least have the
10337e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved.  If we get all the way through
10347e2ab150SChristoph Lameter 	 * the scan of tmp without finding any node that moved, much less
10357e2ab150SChristoph Lameter 	 * moved to an empty node, then there is nothing left worth migrating.
10367e2ab150SChristoph Lameter 	 */
10377e2ab150SChristoph Lameter 
10380ce72d4fSAndrew Morton 	tmp = *from;
10397e2ab150SChristoph Lameter 	while (!nodes_empty(tmp)) {
10407e2ab150SChristoph Lameter 		int s,d;
1041b76ac7e7SJianguo Wu 		int source = NUMA_NO_NODE;
10427e2ab150SChristoph Lameter 		int dest = 0;
10437e2ab150SChristoph Lameter 
10447e2ab150SChristoph Lameter 		for_each_node_mask(s, tmp) {
10454a5b18ccSLarry Woodman 
10464a5b18ccSLarry Woodman 			/*
10474a5b18ccSLarry Woodman 			 * do_migrate_pages() tries to maintain the relative
10484a5b18ccSLarry Woodman 			 * node relationship of the pages established between
10494a5b18ccSLarry Woodman 			 * threads and memory areas.
10504a5b18ccSLarry Woodman                          *
10514a5b18ccSLarry Woodman 			 * However if the number of source nodes is not equal to
10524a5b18ccSLarry Woodman 			 * the number of destination nodes we can not preserve
10534a5b18ccSLarry Woodman 			 * this node relative relationship.  In that case, skip
10544a5b18ccSLarry Woodman 			 * copying memory from a node that is in the destination
10554a5b18ccSLarry Woodman 			 * mask.
10564a5b18ccSLarry Woodman 			 *
10574a5b18ccSLarry Woodman 			 * Example: [2,3,4] -> [3,4,5] moves everything.
10584a5b18ccSLarry Woodman 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
10594a5b18ccSLarry Woodman 			 */
10604a5b18ccSLarry Woodman 
10610ce72d4fSAndrew Morton 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
10620ce72d4fSAndrew Morton 						(node_isset(s, *to)))
10634a5b18ccSLarry Woodman 				continue;
10644a5b18ccSLarry Woodman 
10650ce72d4fSAndrew Morton 			d = node_remap(s, *from, *to);
10667e2ab150SChristoph Lameter 			if (s == d)
10677e2ab150SChristoph Lameter 				continue;
10687e2ab150SChristoph Lameter 
10697e2ab150SChristoph Lameter 			source = s;	/* Node moved. Memorize */
10707e2ab150SChristoph Lameter 			dest = d;
10717e2ab150SChristoph Lameter 
10727e2ab150SChristoph Lameter 			/* dest not in remaining from nodes? */
10737e2ab150SChristoph Lameter 			if (!node_isset(dest, tmp))
10747e2ab150SChristoph Lameter 				break;
10757e2ab150SChristoph Lameter 		}
1076b76ac7e7SJianguo Wu 		if (source == NUMA_NO_NODE)
10777e2ab150SChristoph Lameter 			break;
10787e2ab150SChristoph Lameter 
10797e2ab150SChristoph Lameter 		node_clear(source, tmp);
10807e2ab150SChristoph Lameter 		err = migrate_to_node(mm, source, dest, flags);
10817e2ab150SChristoph Lameter 		if (err > 0)
10827e2ab150SChristoph Lameter 			busy += err;
10837e2ab150SChristoph Lameter 		if (err < 0)
10847e2ab150SChristoph Lameter 			break;
108539743889SChristoph Lameter 	}
108639743889SChristoph Lameter 	up_read(&mm->mmap_sem);
10877e2ab150SChristoph Lameter 	if (err < 0)
10887e2ab150SChristoph Lameter 		return err;
10897e2ab150SChristoph Lameter 	return busy;
1090b20a3503SChristoph Lameter 
109139743889SChristoph Lameter }
109239743889SChristoph Lameter 
10933ad33b24SLee Schermerhorn /*
10943ad33b24SLee Schermerhorn  * Allocate a new page for page migration based on vma policy.
1095d05f0cdcSHugh Dickins  * Start by assuming the page is mapped by the same vma as contains @start.
10963ad33b24SLee Schermerhorn  * Search forward from there, if not.  N.B., this assumes that the
10973ad33b24SLee Schermerhorn  * list of pages handed to migrate_pages()--which is how we get here--
10983ad33b24SLee Schermerhorn  * is in virtual address order.
10993ad33b24SLee Schermerhorn  */
1100666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start)
110195a402c3SChristoph Lameter {
1102d05f0cdcSHugh Dickins 	struct vm_area_struct *vma;
11033ad33b24SLee Schermerhorn 	unsigned long uninitialized_var(address);
110495a402c3SChristoph Lameter 
1105d05f0cdcSHugh Dickins 	vma = find_vma(current->mm, start);
11063ad33b24SLee Schermerhorn 	while (vma) {
11073ad33b24SLee Schermerhorn 		address = page_address_in_vma(page, vma);
11083ad33b24SLee Schermerhorn 		if (address != -EFAULT)
11093ad33b24SLee Schermerhorn 			break;
11103ad33b24SLee Schermerhorn 		vma = vma->vm_next;
11113ad33b24SLee Schermerhorn 	}
11123ad33b24SLee Schermerhorn 
111311c731e8SWanpeng Li 	if (PageHuge(page)) {
1114389c8178SMichal Hocko 		return alloc_huge_page_vma(page_hstate(compound_head(page)),
1115389c8178SMichal Hocko 				vma, address);
111694723aafSMichal Hocko 	} else if (PageTransHuge(page)) {
1117c8633798SNaoya Horiguchi 		struct page *thp;
1118c8633798SNaoya Horiguchi 
1119c8633798SNaoya Horiguchi 		thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
1120c8633798SNaoya Horiguchi 					 HPAGE_PMD_ORDER);
1121c8633798SNaoya Horiguchi 		if (!thp)
1122c8633798SNaoya Horiguchi 			return NULL;
1123c8633798SNaoya Horiguchi 		prep_transhuge_page(thp);
1124c8633798SNaoya Horiguchi 		return thp;
112511c731e8SWanpeng Li 	}
112611c731e8SWanpeng Li 	/*
112711c731e8SWanpeng Li 	 * if !vma, alloc_page_vma() will use task or system default policy
112811c731e8SWanpeng Li 	 */
11290f556856SMichal Hocko 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
11300f556856SMichal Hocko 			vma, address);
113195a402c3SChristoph Lameter }
1132b20a3503SChristoph Lameter #else
1133b20a3503SChristoph Lameter 
1134b20a3503SChristoph Lameter static void migrate_page_add(struct page *page, struct list_head *pagelist,
1135b20a3503SChristoph Lameter 				unsigned long flags)
1136b20a3503SChristoph Lameter {
1137b20a3503SChristoph Lameter }
1138b20a3503SChristoph Lameter 
11390ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
11400ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
1141b20a3503SChristoph Lameter {
1142b20a3503SChristoph Lameter 	return -ENOSYS;
1143b20a3503SChristoph Lameter }
114495a402c3SChristoph Lameter 
1145666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start)
114695a402c3SChristoph Lameter {
114795a402c3SChristoph Lameter 	return NULL;
114895a402c3SChristoph Lameter }
1149b20a3503SChristoph Lameter #endif
1150b20a3503SChristoph Lameter 
1151dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len,
1152028fec41SDavid Rientjes 		     unsigned short mode, unsigned short mode_flags,
1153028fec41SDavid Rientjes 		     nodemask_t *nmask, unsigned long flags)
11546ce3c4c0SChristoph Lameter {
11556ce3c4c0SChristoph Lameter 	struct mm_struct *mm = current->mm;
11566ce3c4c0SChristoph Lameter 	struct mempolicy *new;
11576ce3c4c0SChristoph Lameter 	unsigned long end;
11586ce3c4c0SChristoph Lameter 	int err;
11596ce3c4c0SChristoph Lameter 	LIST_HEAD(pagelist);
11606ce3c4c0SChristoph Lameter 
1161b24f53a0SLee Schermerhorn 	if (flags & ~(unsigned long)MPOL_MF_VALID)
11626ce3c4c0SChristoph Lameter 		return -EINVAL;
116374c00241SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
11646ce3c4c0SChristoph Lameter 		return -EPERM;
11656ce3c4c0SChristoph Lameter 
11666ce3c4c0SChristoph Lameter 	if (start & ~PAGE_MASK)
11676ce3c4c0SChristoph Lameter 		return -EINVAL;
11686ce3c4c0SChristoph Lameter 
11696ce3c4c0SChristoph Lameter 	if (mode == MPOL_DEFAULT)
11706ce3c4c0SChristoph Lameter 		flags &= ~MPOL_MF_STRICT;
11716ce3c4c0SChristoph Lameter 
11726ce3c4c0SChristoph Lameter 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
11736ce3c4c0SChristoph Lameter 	end = start + len;
11746ce3c4c0SChristoph Lameter 
11756ce3c4c0SChristoph Lameter 	if (end < start)
11766ce3c4c0SChristoph Lameter 		return -EINVAL;
11776ce3c4c0SChristoph Lameter 	if (end == start)
11786ce3c4c0SChristoph Lameter 		return 0;
11796ce3c4c0SChristoph Lameter 
1180028fec41SDavid Rientjes 	new = mpol_new(mode, mode_flags, nmask);
11816ce3c4c0SChristoph Lameter 	if (IS_ERR(new))
11826ce3c4c0SChristoph Lameter 		return PTR_ERR(new);
11836ce3c4c0SChristoph Lameter 
1184b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY)
1185b24f53a0SLee Schermerhorn 		new->flags |= MPOL_F_MOF;
1186b24f53a0SLee Schermerhorn 
11876ce3c4c0SChristoph Lameter 	/*
11886ce3c4c0SChristoph Lameter 	 * If we are using the default policy then operation
11896ce3c4c0SChristoph Lameter 	 * on discontinuous address spaces is okay after all
11906ce3c4c0SChristoph Lameter 	 */
11916ce3c4c0SChristoph Lameter 	if (!new)
11926ce3c4c0SChristoph Lameter 		flags |= MPOL_MF_DISCONTIG_OK;
11936ce3c4c0SChristoph Lameter 
1194028fec41SDavid Rientjes 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1195028fec41SDavid Rientjes 		 start, start + len, mode, mode_flags,
119600ef2d2fSDavid Rientjes 		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
11976ce3c4c0SChristoph Lameter 
11980aedadf9SChristoph Lameter 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
11990aedadf9SChristoph Lameter 
12000aedadf9SChristoph Lameter 		err = migrate_prep();
12010aedadf9SChristoph Lameter 		if (err)
1202b05ca738SKOSAKI Motohiro 			goto mpol_out;
12030aedadf9SChristoph Lameter 	}
12044bfc4495SKAMEZAWA Hiroyuki 	{
12054bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
12064bfc4495SKAMEZAWA Hiroyuki 		if (scratch) {
12076ce3c4c0SChristoph Lameter 			down_write(&mm->mmap_sem);
120858568d2aSMiao Xie 			task_lock(current);
12094bfc4495SKAMEZAWA Hiroyuki 			err = mpol_set_nodemask(new, nmask, scratch);
121058568d2aSMiao Xie 			task_unlock(current);
12114bfc4495SKAMEZAWA Hiroyuki 			if (err)
121258568d2aSMiao Xie 				up_write(&mm->mmap_sem);
12134bfc4495SKAMEZAWA Hiroyuki 		} else
12144bfc4495SKAMEZAWA Hiroyuki 			err = -ENOMEM;
12154bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
12164bfc4495SKAMEZAWA Hiroyuki 	}
1217b05ca738SKOSAKI Motohiro 	if (err)
1218b05ca738SKOSAKI Motohiro 		goto mpol_out;
1219b05ca738SKOSAKI Motohiro 
1220d05f0cdcSHugh Dickins 	err = queue_pages_range(mm, start, end, nmask,
12216ce3c4c0SChristoph Lameter 			  flags | MPOL_MF_INVERT, &pagelist);
1222d05f0cdcSHugh Dickins 	if (!err)
12239d8cebd4SKOSAKI Motohiro 		err = mbind_range(mm, start, end, new);
12247e2ab150SChristoph Lameter 
1225b24f53a0SLee Schermerhorn 	if (!err) {
1226b24f53a0SLee Schermerhorn 		int nr_failed = 0;
1227b24f53a0SLee Schermerhorn 
1228cf608ac1SMinchan Kim 		if (!list_empty(&pagelist)) {
1229b24f53a0SLee Schermerhorn 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1230d05f0cdcSHugh Dickins 			nr_failed = migrate_pages(&pagelist, new_page, NULL,
1231d05f0cdcSHugh Dickins 				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1232cf608ac1SMinchan Kim 			if (nr_failed)
123374060e4dSNaoya Horiguchi 				putback_movable_pages(&pagelist);
1234cf608ac1SMinchan Kim 		}
12356ce3c4c0SChristoph Lameter 
1236b24f53a0SLee Schermerhorn 		if (nr_failed && (flags & MPOL_MF_STRICT))
12376ce3c4c0SChristoph Lameter 			err = -EIO;
1238ab8a3e14SKOSAKI Motohiro 	} else
1239b0e5fd73SJoonsoo Kim 		putback_movable_pages(&pagelist);
1240b20a3503SChristoph Lameter 
12416ce3c4c0SChristoph Lameter 	up_write(&mm->mmap_sem);
1242b05ca738SKOSAKI Motohiro  mpol_out:
1243f0be3d32SLee Schermerhorn 	mpol_put(new);
12446ce3c4c0SChristoph Lameter 	return err;
12456ce3c4c0SChristoph Lameter }
12466ce3c4c0SChristoph Lameter 
124739743889SChristoph Lameter /*
12488bccd85fSChristoph Lameter  * User space interface with variable sized bitmaps for nodelists.
12498bccd85fSChristoph Lameter  */
12508bccd85fSChristoph Lameter 
12518bccd85fSChristoph Lameter /* Copy a node mask from user space. */
125239743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
12538bccd85fSChristoph Lameter 		     unsigned long maxnode)
12548bccd85fSChristoph Lameter {
12558bccd85fSChristoph Lameter 	unsigned long k;
125656521e7aSYisheng Xie 	unsigned long t;
12578bccd85fSChristoph Lameter 	unsigned long nlongs;
12588bccd85fSChristoph Lameter 	unsigned long endmask;
12598bccd85fSChristoph Lameter 
12608bccd85fSChristoph Lameter 	--maxnode;
12618bccd85fSChristoph Lameter 	nodes_clear(*nodes);
12628bccd85fSChristoph Lameter 	if (maxnode == 0 || !nmask)
12638bccd85fSChristoph Lameter 		return 0;
1264a9c930baSAndi Kleen 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1265636f13c1SChris Wright 		return -EINVAL;
12668bccd85fSChristoph Lameter 
12678bccd85fSChristoph Lameter 	nlongs = BITS_TO_LONGS(maxnode);
12688bccd85fSChristoph Lameter 	if ((maxnode % BITS_PER_LONG) == 0)
12698bccd85fSChristoph Lameter 		endmask = ~0UL;
12708bccd85fSChristoph Lameter 	else
12718bccd85fSChristoph Lameter 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
12728bccd85fSChristoph Lameter 
127356521e7aSYisheng Xie 	/*
127456521e7aSYisheng Xie 	 * When the user specified more nodes than supported just check
127556521e7aSYisheng Xie 	 * if the non supported part is all zero.
127656521e7aSYisheng Xie 	 *
127756521e7aSYisheng Xie 	 * If maxnode have more longs than MAX_NUMNODES, check
127856521e7aSYisheng Xie 	 * the bits in that area first. And then go through to
127956521e7aSYisheng Xie 	 * check the rest bits which equal or bigger than MAX_NUMNODES.
128056521e7aSYisheng Xie 	 * Otherwise, just check bits [MAX_NUMNODES, maxnode).
128156521e7aSYisheng Xie 	 */
12828bccd85fSChristoph Lameter 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
12838bccd85fSChristoph Lameter 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
12848bccd85fSChristoph Lameter 			if (get_user(t, nmask + k))
12858bccd85fSChristoph Lameter 				return -EFAULT;
12868bccd85fSChristoph Lameter 			if (k == nlongs - 1) {
12878bccd85fSChristoph Lameter 				if (t & endmask)
12888bccd85fSChristoph Lameter 					return -EINVAL;
12898bccd85fSChristoph Lameter 			} else if (t)
12908bccd85fSChristoph Lameter 				return -EINVAL;
12918bccd85fSChristoph Lameter 		}
12928bccd85fSChristoph Lameter 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
12938bccd85fSChristoph Lameter 		endmask = ~0UL;
12948bccd85fSChristoph Lameter 	}
12958bccd85fSChristoph Lameter 
129656521e7aSYisheng Xie 	if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
129756521e7aSYisheng Xie 		unsigned long valid_mask = endmask;
129856521e7aSYisheng Xie 
129956521e7aSYisheng Xie 		valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
130056521e7aSYisheng Xie 		if (get_user(t, nmask + nlongs - 1))
130156521e7aSYisheng Xie 			return -EFAULT;
130256521e7aSYisheng Xie 		if (t & valid_mask)
130356521e7aSYisheng Xie 			return -EINVAL;
130456521e7aSYisheng Xie 	}
130556521e7aSYisheng Xie 
13068bccd85fSChristoph Lameter 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
13078bccd85fSChristoph Lameter 		return -EFAULT;
13088bccd85fSChristoph Lameter 	nodes_addr(*nodes)[nlongs-1] &= endmask;
13098bccd85fSChristoph Lameter 	return 0;
13108bccd85fSChristoph Lameter }
13118bccd85fSChristoph Lameter 
13128bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */
13138bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
13148bccd85fSChristoph Lameter 			      nodemask_t *nodes)
13158bccd85fSChristoph Lameter {
13168bccd85fSChristoph Lameter 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
13178bccd85fSChristoph Lameter 	const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
13188bccd85fSChristoph Lameter 
13198bccd85fSChristoph Lameter 	if (copy > nbytes) {
13208bccd85fSChristoph Lameter 		if (copy > PAGE_SIZE)
13218bccd85fSChristoph Lameter 			return -EINVAL;
13228bccd85fSChristoph Lameter 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
13238bccd85fSChristoph Lameter 			return -EFAULT;
13248bccd85fSChristoph Lameter 		copy = nbytes;
13258bccd85fSChristoph Lameter 	}
13268bccd85fSChristoph Lameter 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
13278bccd85fSChristoph Lameter }
13288bccd85fSChristoph Lameter 
1329e7dc9ad6SDominik Brodowski static long kernel_mbind(unsigned long start, unsigned long len,
1330e7dc9ad6SDominik Brodowski 			 unsigned long mode, const unsigned long __user *nmask,
1331e7dc9ad6SDominik Brodowski 			 unsigned long maxnode, unsigned int flags)
13328bccd85fSChristoph Lameter {
13338bccd85fSChristoph Lameter 	nodemask_t nodes;
13348bccd85fSChristoph Lameter 	int err;
1335028fec41SDavid Rientjes 	unsigned short mode_flags;
13368bccd85fSChristoph Lameter 
1337028fec41SDavid Rientjes 	mode_flags = mode & MPOL_MODE_FLAGS;
1338028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1339a3b51e01SDavid Rientjes 	if (mode >= MPOL_MAX)
1340a3b51e01SDavid Rientjes 		return -EINVAL;
13414c50bc01SDavid Rientjes 	if ((mode_flags & MPOL_F_STATIC_NODES) &&
13424c50bc01SDavid Rientjes 	    (mode_flags & MPOL_F_RELATIVE_NODES))
13434c50bc01SDavid Rientjes 		return -EINVAL;
13448bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
13458bccd85fSChristoph Lameter 	if (err)
13468bccd85fSChristoph Lameter 		return err;
1347028fec41SDavid Rientjes 	return do_mbind(start, len, mode, mode_flags, &nodes, flags);
13488bccd85fSChristoph Lameter }
13498bccd85fSChristoph Lameter 
1350e7dc9ad6SDominik Brodowski SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1351e7dc9ad6SDominik Brodowski 		unsigned long, mode, const unsigned long __user *, nmask,
1352e7dc9ad6SDominik Brodowski 		unsigned long, maxnode, unsigned int, flags)
1353e7dc9ad6SDominik Brodowski {
1354e7dc9ad6SDominik Brodowski 	return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1355e7dc9ad6SDominik Brodowski }
1356e7dc9ad6SDominik Brodowski 
13578bccd85fSChristoph Lameter /* Set the process memory policy */
1358af03c4acSDominik Brodowski static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1359af03c4acSDominik Brodowski 				 unsigned long maxnode)
13608bccd85fSChristoph Lameter {
13618bccd85fSChristoph Lameter 	int err;
13628bccd85fSChristoph Lameter 	nodemask_t nodes;
1363028fec41SDavid Rientjes 	unsigned short flags;
13648bccd85fSChristoph Lameter 
1365028fec41SDavid Rientjes 	flags = mode & MPOL_MODE_FLAGS;
1366028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1367028fec41SDavid Rientjes 	if ((unsigned int)mode >= MPOL_MAX)
13688bccd85fSChristoph Lameter 		return -EINVAL;
13694c50bc01SDavid Rientjes 	if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
13704c50bc01SDavid Rientjes 		return -EINVAL;
13718bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
13728bccd85fSChristoph Lameter 	if (err)
13738bccd85fSChristoph Lameter 		return err;
1374028fec41SDavid Rientjes 	return do_set_mempolicy(mode, flags, &nodes);
13758bccd85fSChristoph Lameter }
13768bccd85fSChristoph Lameter 
1377af03c4acSDominik Brodowski SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1378af03c4acSDominik Brodowski 		unsigned long, maxnode)
1379af03c4acSDominik Brodowski {
1380af03c4acSDominik Brodowski 	return kernel_set_mempolicy(mode, nmask, maxnode);
1381af03c4acSDominik Brodowski }
1382af03c4acSDominik Brodowski 
1383b6e9b0baSDominik Brodowski static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1384b6e9b0baSDominik Brodowski 				const unsigned long __user *old_nodes,
1385b6e9b0baSDominik Brodowski 				const unsigned long __user *new_nodes)
138639743889SChristoph Lameter {
1387596d7cfaSKOSAKI Motohiro 	struct mm_struct *mm = NULL;
138839743889SChristoph Lameter 	struct task_struct *task;
138939743889SChristoph Lameter 	nodemask_t task_nodes;
139039743889SChristoph Lameter 	int err;
1391596d7cfaSKOSAKI Motohiro 	nodemask_t *old;
1392596d7cfaSKOSAKI Motohiro 	nodemask_t *new;
1393596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH(scratch);
139439743889SChristoph Lameter 
1395596d7cfaSKOSAKI Motohiro 	if (!scratch)
1396596d7cfaSKOSAKI Motohiro 		return -ENOMEM;
139739743889SChristoph Lameter 
1398596d7cfaSKOSAKI Motohiro 	old = &scratch->mask1;
1399596d7cfaSKOSAKI Motohiro 	new = &scratch->mask2;
1400596d7cfaSKOSAKI Motohiro 
1401596d7cfaSKOSAKI Motohiro 	err = get_nodes(old, old_nodes, maxnode);
140239743889SChristoph Lameter 	if (err)
1403596d7cfaSKOSAKI Motohiro 		goto out;
1404596d7cfaSKOSAKI Motohiro 
1405596d7cfaSKOSAKI Motohiro 	err = get_nodes(new, new_nodes, maxnode);
1406596d7cfaSKOSAKI Motohiro 	if (err)
1407596d7cfaSKOSAKI Motohiro 		goto out;
140839743889SChristoph Lameter 
140939743889SChristoph Lameter 	/* Find the mm_struct */
141055cfaa3cSZeng Zhaoming 	rcu_read_lock();
1411228ebcbeSPavel Emelyanov 	task = pid ? find_task_by_vpid(pid) : current;
141239743889SChristoph Lameter 	if (!task) {
141355cfaa3cSZeng Zhaoming 		rcu_read_unlock();
1414596d7cfaSKOSAKI Motohiro 		err = -ESRCH;
1415596d7cfaSKOSAKI Motohiro 		goto out;
141639743889SChristoph Lameter 	}
14173268c63eSChristoph Lameter 	get_task_struct(task);
141839743889SChristoph Lameter 
1419596d7cfaSKOSAKI Motohiro 	err = -EINVAL;
142039743889SChristoph Lameter 
142139743889SChristoph Lameter 	/*
142231367466SOtto Ebeling 	 * Check if this process has the right to modify the specified process.
142331367466SOtto Ebeling 	 * Use the regular "ptrace_may_access()" checks.
142439743889SChristoph Lameter 	 */
142531367466SOtto Ebeling 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1426c69e8d9cSDavid Howells 		rcu_read_unlock();
142739743889SChristoph Lameter 		err = -EPERM;
14283268c63eSChristoph Lameter 		goto out_put;
142939743889SChristoph Lameter 	}
1430c69e8d9cSDavid Howells 	rcu_read_unlock();
143139743889SChristoph Lameter 
143239743889SChristoph Lameter 	task_nodes = cpuset_mems_allowed(task);
143339743889SChristoph Lameter 	/* Is the user allowed to access the target nodes? */
1434596d7cfaSKOSAKI Motohiro 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
143539743889SChristoph Lameter 		err = -EPERM;
14363268c63eSChristoph Lameter 		goto out_put;
143739743889SChristoph Lameter 	}
143839743889SChristoph Lameter 
14390486a38bSYisheng Xie 	task_nodes = cpuset_mems_allowed(current);
14400486a38bSYisheng Xie 	nodes_and(*new, *new, task_nodes);
14410486a38bSYisheng Xie 	if (nodes_empty(*new))
14423268c63eSChristoph Lameter 		goto out_put;
14430486a38bSYisheng Xie 
14440486a38bSYisheng Xie 	nodes_and(*new, *new, node_states[N_MEMORY]);
14450486a38bSYisheng Xie 	if (nodes_empty(*new))
14460486a38bSYisheng Xie 		goto out_put;
14473b42d28bSChristoph Lameter 
144886c3a764SDavid Quigley 	err = security_task_movememory(task);
144986c3a764SDavid Quigley 	if (err)
14503268c63eSChristoph Lameter 		goto out_put;
145186c3a764SDavid Quigley 
14523268c63eSChristoph Lameter 	mm = get_task_mm(task);
14533268c63eSChristoph Lameter 	put_task_struct(task);
1454f2a9ef88SSasha Levin 
1455f2a9ef88SSasha Levin 	if (!mm) {
1456f2a9ef88SSasha Levin 		err = -EINVAL;
1457f2a9ef88SSasha Levin 		goto out;
1458f2a9ef88SSasha Levin 	}
1459f2a9ef88SSasha Levin 
1460596d7cfaSKOSAKI Motohiro 	err = do_migrate_pages(mm, old, new,
146174c00241SChristoph Lameter 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
14623268c63eSChristoph Lameter 
146339743889SChristoph Lameter 	mmput(mm);
14643268c63eSChristoph Lameter out:
1465596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH_FREE(scratch);
1466596d7cfaSKOSAKI Motohiro 
146739743889SChristoph Lameter 	return err;
14683268c63eSChristoph Lameter 
14693268c63eSChristoph Lameter out_put:
14703268c63eSChristoph Lameter 	put_task_struct(task);
14713268c63eSChristoph Lameter 	goto out;
14723268c63eSChristoph Lameter 
147339743889SChristoph Lameter }
147439743889SChristoph Lameter 
1475b6e9b0baSDominik Brodowski SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1476b6e9b0baSDominik Brodowski 		const unsigned long __user *, old_nodes,
1477b6e9b0baSDominik Brodowski 		const unsigned long __user *, new_nodes)
1478b6e9b0baSDominik Brodowski {
1479b6e9b0baSDominik Brodowski 	return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1480b6e9b0baSDominik Brodowski }
1481b6e9b0baSDominik Brodowski 
148239743889SChristoph Lameter 
14838bccd85fSChristoph Lameter /* Retrieve NUMA policy */
1484af03c4acSDominik Brodowski static int kernel_get_mempolicy(int __user *policy,
1485af03c4acSDominik Brodowski 				unsigned long __user *nmask,
1486af03c4acSDominik Brodowski 				unsigned long maxnode,
1487af03c4acSDominik Brodowski 				unsigned long addr,
1488af03c4acSDominik Brodowski 				unsigned long flags)
14898bccd85fSChristoph Lameter {
1490dbcb0f19SAdrian Bunk 	int err;
1491dbcb0f19SAdrian Bunk 	int uninitialized_var(pval);
14928bccd85fSChristoph Lameter 	nodemask_t nodes;
14938bccd85fSChristoph Lameter 
14948bccd85fSChristoph Lameter 	if (nmask != NULL && maxnode < MAX_NUMNODES)
14958bccd85fSChristoph Lameter 		return -EINVAL;
14968bccd85fSChristoph Lameter 
14978bccd85fSChristoph Lameter 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
14988bccd85fSChristoph Lameter 
14998bccd85fSChristoph Lameter 	if (err)
15008bccd85fSChristoph Lameter 		return err;
15018bccd85fSChristoph Lameter 
15028bccd85fSChristoph Lameter 	if (policy && put_user(pval, policy))
15038bccd85fSChristoph Lameter 		return -EFAULT;
15048bccd85fSChristoph Lameter 
15058bccd85fSChristoph Lameter 	if (nmask)
15068bccd85fSChristoph Lameter 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
15078bccd85fSChristoph Lameter 
15088bccd85fSChristoph Lameter 	return err;
15098bccd85fSChristoph Lameter }
15108bccd85fSChristoph Lameter 
1511af03c4acSDominik Brodowski SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1512af03c4acSDominik Brodowski 		unsigned long __user *, nmask, unsigned long, maxnode,
1513af03c4acSDominik Brodowski 		unsigned long, addr, unsigned long, flags)
1514af03c4acSDominik Brodowski {
1515af03c4acSDominik Brodowski 	return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1516af03c4acSDominik Brodowski }
1517af03c4acSDominik Brodowski 
15181da177e4SLinus Torvalds #ifdef CONFIG_COMPAT
15191da177e4SLinus Torvalds 
1520c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1521c93e0f6cSHeiko Carstens 		       compat_ulong_t __user *, nmask,
1522c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode,
1523c93e0f6cSHeiko Carstens 		       compat_ulong_t, addr, compat_ulong_t, flags)
15241da177e4SLinus Torvalds {
15251da177e4SLinus Torvalds 	long err;
15261da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
15271da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
15281da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
15291da177e4SLinus Torvalds 
15301da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
15311da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
15321da177e4SLinus Torvalds 
15331da177e4SLinus Torvalds 	if (nmask)
15341da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
15351da177e4SLinus Torvalds 
1536af03c4acSDominik Brodowski 	err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
15371da177e4SLinus Torvalds 
15381da177e4SLinus Torvalds 	if (!err && nmask) {
15392bbff6c7SKAMEZAWA Hiroyuki 		unsigned long copy_size;
15402bbff6c7SKAMEZAWA Hiroyuki 		copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
15412bbff6c7SKAMEZAWA Hiroyuki 		err = copy_from_user(bm, nm, copy_size);
15421da177e4SLinus Torvalds 		/* ensure entire bitmap is zeroed */
15431da177e4SLinus Torvalds 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
15441da177e4SLinus Torvalds 		err |= compat_put_bitmap(nmask, bm, nr_bits);
15451da177e4SLinus Torvalds 	}
15461da177e4SLinus Torvalds 
15471da177e4SLinus Torvalds 	return err;
15481da177e4SLinus Torvalds }
15491da177e4SLinus Torvalds 
1550c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1551c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode)
15521da177e4SLinus Torvalds {
15531da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
15541da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
15551da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
15561da177e4SLinus Torvalds 
15571da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
15581da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
15591da177e4SLinus Torvalds 
15601da177e4SLinus Torvalds 	if (nmask) {
1561cf01fb99SChris Salls 		if (compat_get_bitmap(bm, nmask, nr_bits))
15621da177e4SLinus Torvalds 			return -EFAULT;
1563cf01fb99SChris Salls 		nm = compat_alloc_user_space(alloc_size);
1564cf01fb99SChris Salls 		if (copy_to_user(nm, bm, alloc_size))
1565cf01fb99SChris Salls 			return -EFAULT;
1566cf01fb99SChris Salls 	}
15671da177e4SLinus Torvalds 
1568af03c4acSDominik Brodowski 	return kernel_set_mempolicy(mode, nm, nr_bits+1);
15691da177e4SLinus Torvalds }
15701da177e4SLinus Torvalds 
1571c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1572c93e0f6cSHeiko Carstens 		       compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1573c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode, compat_ulong_t, flags)
15741da177e4SLinus Torvalds {
15751da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
15761da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
1577dfcd3c0dSAndi Kleen 	nodemask_t bm;
15781da177e4SLinus Torvalds 
15791da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
15801da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
15811da177e4SLinus Torvalds 
15821da177e4SLinus Torvalds 	if (nmask) {
1583cf01fb99SChris Salls 		if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
15841da177e4SLinus Torvalds 			return -EFAULT;
1585cf01fb99SChris Salls 		nm = compat_alloc_user_space(alloc_size);
1586cf01fb99SChris Salls 		if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1587cf01fb99SChris Salls 			return -EFAULT;
1588cf01fb99SChris Salls 	}
15891da177e4SLinus Torvalds 
1590e7dc9ad6SDominik Brodowski 	return kernel_mbind(start, len, mode, nm, nr_bits+1, flags);
15911da177e4SLinus Torvalds }
15921da177e4SLinus Torvalds 
1593b6e9b0baSDominik Brodowski COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid,
1594b6e9b0baSDominik Brodowski 		       compat_ulong_t, maxnode,
1595b6e9b0baSDominik Brodowski 		       const compat_ulong_t __user *, old_nodes,
1596b6e9b0baSDominik Brodowski 		       const compat_ulong_t __user *, new_nodes)
1597b6e9b0baSDominik Brodowski {
1598b6e9b0baSDominik Brodowski 	unsigned long __user *old = NULL;
1599b6e9b0baSDominik Brodowski 	unsigned long __user *new = NULL;
1600b6e9b0baSDominik Brodowski 	nodemask_t tmp_mask;
1601b6e9b0baSDominik Brodowski 	unsigned long nr_bits;
1602b6e9b0baSDominik Brodowski 	unsigned long size;
1603b6e9b0baSDominik Brodowski 
1604b6e9b0baSDominik Brodowski 	nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES);
1605b6e9b0baSDominik Brodowski 	size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1606b6e9b0baSDominik Brodowski 	if (old_nodes) {
1607b6e9b0baSDominik Brodowski 		if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits))
1608b6e9b0baSDominik Brodowski 			return -EFAULT;
1609b6e9b0baSDominik Brodowski 		old = compat_alloc_user_space(new_nodes ? size * 2 : size);
1610b6e9b0baSDominik Brodowski 		if (new_nodes)
1611b6e9b0baSDominik Brodowski 			new = old + size / sizeof(unsigned long);
1612b6e9b0baSDominik Brodowski 		if (copy_to_user(old, nodes_addr(tmp_mask), size))
1613b6e9b0baSDominik Brodowski 			return -EFAULT;
1614b6e9b0baSDominik Brodowski 	}
1615b6e9b0baSDominik Brodowski 	if (new_nodes) {
1616b6e9b0baSDominik Brodowski 		if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits))
1617b6e9b0baSDominik Brodowski 			return -EFAULT;
1618b6e9b0baSDominik Brodowski 		if (new == NULL)
1619b6e9b0baSDominik Brodowski 			new = compat_alloc_user_space(size);
1620b6e9b0baSDominik Brodowski 		if (copy_to_user(new, nodes_addr(tmp_mask), size))
1621b6e9b0baSDominik Brodowski 			return -EFAULT;
1622b6e9b0baSDominik Brodowski 	}
1623b6e9b0baSDominik Brodowski 	return kernel_migrate_pages(pid, nr_bits + 1, old, new);
1624b6e9b0baSDominik Brodowski }
1625b6e9b0baSDominik Brodowski 
1626b6e9b0baSDominik Brodowski #endif /* CONFIG_COMPAT */
16271da177e4SLinus Torvalds 
162874d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
162974d2c3a0SOleg Nesterov 						unsigned long addr)
16301da177e4SLinus Torvalds {
16318d90274bSOleg Nesterov 	struct mempolicy *pol = NULL;
16321da177e4SLinus Torvalds 
16331da177e4SLinus Torvalds 	if (vma) {
1634480eccf9SLee Schermerhorn 		if (vma->vm_ops && vma->vm_ops->get_policy) {
16358d90274bSOleg Nesterov 			pol = vma->vm_ops->get_policy(vma, addr);
163600442ad0SMel Gorman 		} else if (vma->vm_policy) {
16371da177e4SLinus Torvalds 			pol = vma->vm_policy;
163800442ad0SMel Gorman 
163900442ad0SMel Gorman 			/*
164000442ad0SMel Gorman 			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
164100442ad0SMel Gorman 			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
164200442ad0SMel Gorman 			 * count on these policies which will be dropped by
164300442ad0SMel Gorman 			 * mpol_cond_put() later
164400442ad0SMel Gorman 			 */
164500442ad0SMel Gorman 			if (mpol_needs_cond_ref(pol))
164600442ad0SMel Gorman 				mpol_get(pol);
164700442ad0SMel Gorman 		}
16481da177e4SLinus Torvalds 	}
1649f15ca78eSOleg Nesterov 
165074d2c3a0SOleg Nesterov 	return pol;
165174d2c3a0SOleg Nesterov }
165274d2c3a0SOleg Nesterov 
165374d2c3a0SOleg Nesterov /*
1654dd6eecb9SOleg Nesterov  * get_vma_policy(@vma, @addr)
165574d2c3a0SOleg Nesterov  * @vma: virtual memory area whose policy is sought
165674d2c3a0SOleg Nesterov  * @addr: address in @vma for shared policy lookup
165774d2c3a0SOleg Nesterov  *
165874d2c3a0SOleg Nesterov  * Returns effective policy for a VMA at specified address.
1659dd6eecb9SOleg Nesterov  * Falls back to current->mempolicy or system default policy, as necessary.
166074d2c3a0SOleg Nesterov  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
166174d2c3a0SOleg Nesterov  * count--added by the get_policy() vm_op, as appropriate--to protect against
166274d2c3a0SOleg Nesterov  * freeing by another task.  It is the caller's responsibility to free the
166374d2c3a0SOleg Nesterov  * extra reference for shared policies.
166474d2c3a0SOleg Nesterov  */
1665dd6eecb9SOleg Nesterov static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1666dd6eecb9SOleg Nesterov 						unsigned long addr)
166774d2c3a0SOleg Nesterov {
166874d2c3a0SOleg Nesterov 	struct mempolicy *pol = __get_vma_policy(vma, addr);
166974d2c3a0SOleg Nesterov 
16708d90274bSOleg Nesterov 	if (!pol)
1671dd6eecb9SOleg Nesterov 		pol = get_task_policy(current);
16728d90274bSOleg Nesterov 
16731da177e4SLinus Torvalds 	return pol;
16741da177e4SLinus Torvalds }
16751da177e4SLinus Torvalds 
16766b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma)
1677fc314724SMel Gorman {
16786b6482bbSOleg Nesterov 	struct mempolicy *pol;
1679f15ca78eSOleg Nesterov 
1680fc314724SMel Gorman 	if (vma->vm_ops && vma->vm_ops->get_policy) {
1681fc314724SMel Gorman 		bool ret = false;
1682fc314724SMel Gorman 
1683fc314724SMel Gorman 		pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1684fc314724SMel Gorman 		if (pol && (pol->flags & MPOL_F_MOF))
1685fc314724SMel Gorman 			ret = true;
1686fc314724SMel Gorman 		mpol_cond_put(pol);
1687fc314724SMel Gorman 
1688fc314724SMel Gorman 		return ret;
16898d90274bSOleg Nesterov 	}
16908d90274bSOleg Nesterov 
1691fc314724SMel Gorman 	pol = vma->vm_policy;
16928d90274bSOleg Nesterov 	if (!pol)
16936b6482bbSOleg Nesterov 		pol = get_task_policy(current);
1694fc314724SMel Gorman 
1695fc314724SMel Gorman 	return pol->flags & MPOL_F_MOF;
1696fc314724SMel Gorman }
1697fc314724SMel Gorman 
1698d3eb1570SLai Jiangshan static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1699d3eb1570SLai Jiangshan {
1700d3eb1570SLai Jiangshan 	enum zone_type dynamic_policy_zone = policy_zone;
1701d3eb1570SLai Jiangshan 
1702d3eb1570SLai Jiangshan 	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1703d3eb1570SLai Jiangshan 
1704d3eb1570SLai Jiangshan 	/*
1705d3eb1570SLai Jiangshan 	 * if policy->v.nodes has movable memory only,
1706d3eb1570SLai Jiangshan 	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1707d3eb1570SLai Jiangshan 	 *
1708d3eb1570SLai Jiangshan 	 * policy->v.nodes is intersect with node_states[N_MEMORY].
1709d3eb1570SLai Jiangshan 	 * so if the following test faile, it implies
1710d3eb1570SLai Jiangshan 	 * policy->v.nodes has movable memory only.
1711d3eb1570SLai Jiangshan 	 */
1712d3eb1570SLai Jiangshan 	if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1713d3eb1570SLai Jiangshan 		dynamic_policy_zone = ZONE_MOVABLE;
1714d3eb1570SLai Jiangshan 
1715d3eb1570SLai Jiangshan 	return zone >= dynamic_policy_zone;
1716d3eb1570SLai Jiangshan }
1717d3eb1570SLai Jiangshan 
171852cd3b07SLee Schermerhorn /*
171952cd3b07SLee Schermerhorn  * Return a nodemask representing a mempolicy for filtering nodes for
172052cd3b07SLee Schermerhorn  * page allocation
172152cd3b07SLee Schermerhorn  */
172252cd3b07SLee Schermerhorn static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
172319770b32SMel Gorman {
172419770b32SMel Gorman 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
172545c4745aSLee Schermerhorn 	if (unlikely(policy->mode == MPOL_BIND) &&
1726d3eb1570SLai Jiangshan 			apply_policy_zone(policy, gfp_zone(gfp)) &&
172719770b32SMel Gorman 			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
172819770b32SMel Gorman 		return &policy->v.nodes;
172919770b32SMel Gorman 
173019770b32SMel Gorman 	return NULL;
173119770b32SMel Gorman }
173219770b32SMel Gorman 
173304ec6264SVlastimil Babka /* Return the node id preferred by the given mempolicy, or the given id */
173404ec6264SVlastimil Babka static int policy_node(gfp_t gfp, struct mempolicy *policy,
17352f5f9486SAndi Kleen 								int nd)
17361da177e4SLinus Torvalds {
17376d840958SMichal Hocko 	if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
17381da177e4SLinus Torvalds 		nd = policy->v.preferred_node;
17396d840958SMichal Hocko 	else {
174019770b32SMel Gorman 		/*
17416d840958SMichal Hocko 		 * __GFP_THISNODE shouldn't even be used with the bind policy
17426d840958SMichal Hocko 		 * because we might easily break the expectation to stay on the
17436d840958SMichal Hocko 		 * requested node and not break the policy.
174419770b32SMel Gorman 		 */
17456d840958SMichal Hocko 		WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
17461da177e4SLinus Torvalds 	}
17476d840958SMichal Hocko 
174804ec6264SVlastimil Babka 	return nd;
17491da177e4SLinus Torvalds }
17501da177e4SLinus Torvalds 
17511da177e4SLinus Torvalds /* Do dynamic interleaving for a process */
17521da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy)
17531da177e4SLinus Torvalds {
175445816682SVlastimil Babka 	unsigned next;
17551da177e4SLinus Torvalds 	struct task_struct *me = current;
17561da177e4SLinus Torvalds 
175745816682SVlastimil Babka 	next = next_node_in(me->il_prev, policy->v.nodes);
1758f5b087b5SDavid Rientjes 	if (next < MAX_NUMNODES)
175945816682SVlastimil Babka 		me->il_prev = next;
176045816682SVlastimil Babka 	return next;
17611da177e4SLinus Torvalds }
17621da177e4SLinus Torvalds 
1763dc85da15SChristoph Lameter /*
1764dc85da15SChristoph Lameter  * Depending on the memory policy provide a node from which to allocate the
1765dc85da15SChristoph Lameter  * next slab entry.
1766dc85da15SChristoph Lameter  */
17672a389610SDavid Rientjes unsigned int mempolicy_slab_node(void)
1768dc85da15SChristoph Lameter {
1769e7b691b0SAndi Kleen 	struct mempolicy *policy;
17702a389610SDavid Rientjes 	int node = numa_mem_id();
1771e7b691b0SAndi Kleen 
1772e7b691b0SAndi Kleen 	if (in_interrupt())
17732a389610SDavid Rientjes 		return node;
1774e7b691b0SAndi Kleen 
1775e7b691b0SAndi Kleen 	policy = current->mempolicy;
1776fc36b8d3SLee Schermerhorn 	if (!policy || policy->flags & MPOL_F_LOCAL)
17772a389610SDavid Rientjes 		return node;
1778765c4507SChristoph Lameter 
1779bea904d5SLee Schermerhorn 	switch (policy->mode) {
1780bea904d5SLee Schermerhorn 	case MPOL_PREFERRED:
1781fc36b8d3SLee Schermerhorn 		/*
1782fc36b8d3SLee Schermerhorn 		 * handled MPOL_F_LOCAL above
1783fc36b8d3SLee Schermerhorn 		 */
1784bea904d5SLee Schermerhorn 		return policy->v.preferred_node;
1785bea904d5SLee Schermerhorn 
1786dc85da15SChristoph Lameter 	case MPOL_INTERLEAVE:
1787dc85da15SChristoph Lameter 		return interleave_nodes(policy);
1788dc85da15SChristoph Lameter 
1789dd1a239fSMel Gorman 	case MPOL_BIND: {
1790c33d6c06SMel Gorman 		struct zoneref *z;
1791c33d6c06SMel Gorman 
1792dc85da15SChristoph Lameter 		/*
1793dc85da15SChristoph Lameter 		 * Follow bind policy behavior and start allocation at the
1794dc85da15SChristoph Lameter 		 * first node.
1795dc85da15SChristoph Lameter 		 */
179619770b32SMel Gorman 		struct zonelist *zonelist;
179719770b32SMel Gorman 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1798c9634cf0SAneesh Kumar K.V 		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1799c33d6c06SMel Gorman 		z = first_zones_zonelist(zonelist, highest_zoneidx,
1800c33d6c06SMel Gorman 							&policy->v.nodes);
1801c1093b74SPavel Tatashin 		return z->zone ? zone_to_nid(z->zone) : node;
1802dd1a239fSMel Gorman 	}
1803dc85da15SChristoph Lameter 
1804dc85da15SChristoph Lameter 	default:
1805bea904d5SLee Schermerhorn 		BUG();
1806dc85da15SChristoph Lameter 	}
1807dc85da15SChristoph Lameter }
1808dc85da15SChristoph Lameter 
1809fee83b3aSAndrew Morton /*
1810fee83b3aSAndrew Morton  * Do static interleaving for a VMA with known offset @n.  Returns the n'th
1811fee83b3aSAndrew Morton  * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
1812fee83b3aSAndrew Morton  * number of present nodes.
1813fee83b3aSAndrew Morton  */
181498c70baaSLaurent Dufour static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
18151da177e4SLinus Torvalds {
1816dfcd3c0dSAndi Kleen 	unsigned nnodes = nodes_weight(pol->v.nodes);
1817f5b087b5SDavid Rientjes 	unsigned target;
1818fee83b3aSAndrew Morton 	int i;
1819fee83b3aSAndrew Morton 	int nid;
18201da177e4SLinus Torvalds 
1821f5b087b5SDavid Rientjes 	if (!nnodes)
1822f5b087b5SDavid Rientjes 		return numa_node_id();
1823fee83b3aSAndrew Morton 	target = (unsigned int)n % nnodes;
1824fee83b3aSAndrew Morton 	nid = first_node(pol->v.nodes);
1825fee83b3aSAndrew Morton 	for (i = 0; i < target; i++)
1826dfcd3c0dSAndi Kleen 		nid = next_node(nid, pol->v.nodes);
18271da177e4SLinus Torvalds 	return nid;
18281da177e4SLinus Torvalds }
18291da177e4SLinus Torvalds 
18305da7ca86SChristoph Lameter /* Determine a node number for interleave */
18315da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol,
18325da7ca86SChristoph Lameter 		 struct vm_area_struct *vma, unsigned long addr, int shift)
18335da7ca86SChristoph Lameter {
18345da7ca86SChristoph Lameter 	if (vma) {
18355da7ca86SChristoph Lameter 		unsigned long off;
18365da7ca86SChristoph Lameter 
18373b98b087SNishanth Aravamudan 		/*
18383b98b087SNishanth Aravamudan 		 * for small pages, there is no difference between
18393b98b087SNishanth Aravamudan 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
18403b98b087SNishanth Aravamudan 		 * for huge pages, since vm_pgoff is in units of small
18413b98b087SNishanth Aravamudan 		 * pages, we need to shift off the always 0 bits to get
18423b98b087SNishanth Aravamudan 		 * a useful offset.
18433b98b087SNishanth Aravamudan 		 */
18443b98b087SNishanth Aravamudan 		BUG_ON(shift < PAGE_SHIFT);
18453b98b087SNishanth Aravamudan 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
18465da7ca86SChristoph Lameter 		off += (addr - vma->vm_start) >> shift;
184798c70baaSLaurent Dufour 		return offset_il_node(pol, off);
18485da7ca86SChristoph Lameter 	} else
18495da7ca86SChristoph Lameter 		return interleave_nodes(pol);
18505da7ca86SChristoph Lameter }
18515da7ca86SChristoph Lameter 
185200ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS
1853480eccf9SLee Schermerhorn /*
185404ec6264SVlastimil Babka  * huge_node(@vma, @addr, @gfp_flags, @mpol)
1855b46e14acSFabian Frederick  * @vma: virtual memory area whose policy is sought
1856b46e14acSFabian Frederick  * @addr: address in @vma for shared policy lookup and interleave policy
1857b46e14acSFabian Frederick  * @gfp_flags: for requested zone
1858b46e14acSFabian Frederick  * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1859b46e14acSFabian Frederick  * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
1860480eccf9SLee Schermerhorn  *
186104ec6264SVlastimil Babka  * Returns a nid suitable for a huge page allocation and a pointer
186252cd3b07SLee Schermerhorn  * to the struct mempolicy for conditional unref after allocation.
186352cd3b07SLee Schermerhorn  * If the effective policy is 'BIND, returns a pointer to the mempolicy's
186452cd3b07SLee Schermerhorn  * @nodemask for filtering the zonelist.
1865c0ff7453SMiao Xie  *
1866d26914d1SMel Gorman  * Must be protected by read_mems_allowed_begin()
1867480eccf9SLee Schermerhorn  */
186804ec6264SVlastimil Babka int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
186904ec6264SVlastimil Babka 				struct mempolicy **mpol, nodemask_t **nodemask)
18705da7ca86SChristoph Lameter {
187104ec6264SVlastimil Babka 	int nid;
18725da7ca86SChristoph Lameter 
1873dd6eecb9SOleg Nesterov 	*mpol = get_vma_policy(vma, addr);
187419770b32SMel Gorman 	*nodemask = NULL;	/* assume !MPOL_BIND */
18755da7ca86SChristoph Lameter 
187652cd3b07SLee Schermerhorn 	if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
187704ec6264SVlastimil Babka 		nid = interleave_nid(*mpol, vma, addr,
187804ec6264SVlastimil Babka 					huge_page_shift(hstate_vma(vma)));
187952cd3b07SLee Schermerhorn 	} else {
188004ec6264SVlastimil Babka 		nid = policy_node(gfp_flags, *mpol, numa_node_id());
188152cd3b07SLee Schermerhorn 		if ((*mpol)->mode == MPOL_BIND)
188252cd3b07SLee Schermerhorn 			*nodemask = &(*mpol)->v.nodes;
1883480eccf9SLee Schermerhorn 	}
188404ec6264SVlastimil Babka 	return nid;
18855da7ca86SChristoph Lameter }
188606808b08SLee Schermerhorn 
188706808b08SLee Schermerhorn /*
188806808b08SLee Schermerhorn  * init_nodemask_of_mempolicy
188906808b08SLee Schermerhorn  *
189006808b08SLee Schermerhorn  * If the current task's mempolicy is "default" [NULL], return 'false'
189106808b08SLee Schermerhorn  * to indicate default policy.  Otherwise, extract the policy nodemask
189206808b08SLee Schermerhorn  * for 'bind' or 'interleave' policy into the argument nodemask, or
189306808b08SLee Schermerhorn  * initialize the argument nodemask to contain the single node for
189406808b08SLee Schermerhorn  * 'preferred' or 'local' policy and return 'true' to indicate presence
189506808b08SLee Schermerhorn  * of non-default mempolicy.
189606808b08SLee Schermerhorn  *
189706808b08SLee Schermerhorn  * We don't bother with reference counting the mempolicy [mpol_get/put]
189806808b08SLee Schermerhorn  * because the current task is examining it's own mempolicy and a task's
189906808b08SLee Schermerhorn  * mempolicy is only ever changed by the task itself.
190006808b08SLee Schermerhorn  *
190106808b08SLee Schermerhorn  * N.B., it is the caller's responsibility to free a returned nodemask.
190206808b08SLee Schermerhorn  */
190306808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask)
190406808b08SLee Schermerhorn {
190506808b08SLee Schermerhorn 	struct mempolicy *mempolicy;
190606808b08SLee Schermerhorn 	int nid;
190706808b08SLee Schermerhorn 
190806808b08SLee Schermerhorn 	if (!(mask && current->mempolicy))
190906808b08SLee Schermerhorn 		return false;
191006808b08SLee Schermerhorn 
1911c0ff7453SMiao Xie 	task_lock(current);
191206808b08SLee Schermerhorn 	mempolicy = current->mempolicy;
191306808b08SLee Schermerhorn 	switch (mempolicy->mode) {
191406808b08SLee Schermerhorn 	case MPOL_PREFERRED:
191506808b08SLee Schermerhorn 		if (mempolicy->flags & MPOL_F_LOCAL)
191606808b08SLee Schermerhorn 			nid = numa_node_id();
191706808b08SLee Schermerhorn 		else
191806808b08SLee Schermerhorn 			nid = mempolicy->v.preferred_node;
191906808b08SLee Schermerhorn 		init_nodemask_of_node(mask, nid);
192006808b08SLee Schermerhorn 		break;
192106808b08SLee Schermerhorn 
192206808b08SLee Schermerhorn 	case MPOL_BIND:
192306808b08SLee Schermerhorn 		/* Fall through */
192406808b08SLee Schermerhorn 	case MPOL_INTERLEAVE:
192506808b08SLee Schermerhorn 		*mask =  mempolicy->v.nodes;
192606808b08SLee Schermerhorn 		break;
192706808b08SLee Schermerhorn 
192806808b08SLee Schermerhorn 	default:
192906808b08SLee Schermerhorn 		BUG();
193006808b08SLee Schermerhorn 	}
1931c0ff7453SMiao Xie 	task_unlock(current);
193206808b08SLee Schermerhorn 
193306808b08SLee Schermerhorn 	return true;
193406808b08SLee Schermerhorn }
193500ac59adSChen, Kenneth W #endif
19365da7ca86SChristoph Lameter 
19376f48d0ebSDavid Rientjes /*
19386f48d0ebSDavid Rientjes  * mempolicy_nodemask_intersects
19396f48d0ebSDavid Rientjes  *
19406f48d0ebSDavid Rientjes  * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
19416f48d0ebSDavid Rientjes  * policy.  Otherwise, check for intersection between mask and the policy
19426f48d0ebSDavid Rientjes  * nodemask for 'bind' or 'interleave' policy.  For 'perferred' or 'local'
19436f48d0ebSDavid Rientjes  * policy, always return true since it may allocate elsewhere on fallback.
19446f48d0ebSDavid Rientjes  *
19456f48d0ebSDavid Rientjes  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
19466f48d0ebSDavid Rientjes  */
19476f48d0ebSDavid Rientjes bool mempolicy_nodemask_intersects(struct task_struct *tsk,
19486f48d0ebSDavid Rientjes 					const nodemask_t *mask)
19496f48d0ebSDavid Rientjes {
19506f48d0ebSDavid Rientjes 	struct mempolicy *mempolicy;
19516f48d0ebSDavid Rientjes 	bool ret = true;
19526f48d0ebSDavid Rientjes 
19536f48d0ebSDavid Rientjes 	if (!mask)
19546f48d0ebSDavid Rientjes 		return ret;
19556f48d0ebSDavid Rientjes 	task_lock(tsk);
19566f48d0ebSDavid Rientjes 	mempolicy = tsk->mempolicy;
19576f48d0ebSDavid Rientjes 	if (!mempolicy)
19586f48d0ebSDavid Rientjes 		goto out;
19596f48d0ebSDavid Rientjes 
19606f48d0ebSDavid Rientjes 	switch (mempolicy->mode) {
19616f48d0ebSDavid Rientjes 	case MPOL_PREFERRED:
19626f48d0ebSDavid Rientjes 		/*
19636f48d0ebSDavid Rientjes 		 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
19646f48d0ebSDavid Rientjes 		 * allocate from, they may fallback to other nodes when oom.
19656f48d0ebSDavid Rientjes 		 * Thus, it's possible for tsk to have allocated memory from
19666f48d0ebSDavid Rientjes 		 * nodes in mask.
19676f48d0ebSDavid Rientjes 		 */
19686f48d0ebSDavid Rientjes 		break;
19696f48d0ebSDavid Rientjes 	case MPOL_BIND:
19706f48d0ebSDavid Rientjes 	case MPOL_INTERLEAVE:
19716f48d0ebSDavid Rientjes 		ret = nodes_intersects(mempolicy->v.nodes, *mask);
19726f48d0ebSDavid Rientjes 		break;
19736f48d0ebSDavid Rientjes 	default:
19746f48d0ebSDavid Rientjes 		BUG();
19756f48d0ebSDavid Rientjes 	}
19766f48d0ebSDavid Rientjes out:
19776f48d0ebSDavid Rientjes 	task_unlock(tsk);
19786f48d0ebSDavid Rientjes 	return ret;
19796f48d0ebSDavid Rientjes }
19806f48d0ebSDavid Rientjes 
19811da177e4SLinus Torvalds /* Allocate a page in interleaved policy.
19821da177e4SLinus Torvalds    Own path because it needs to do special accounting. */
1983662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1984662f3a0bSAndi Kleen 					unsigned nid)
19851da177e4SLinus Torvalds {
19861da177e4SLinus Torvalds 	struct page *page;
19871da177e4SLinus Torvalds 
198804ec6264SVlastimil Babka 	page = __alloc_pages(gfp, order, nid);
19894518085eSKemi Wang 	/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
19904518085eSKemi Wang 	if (!static_branch_likely(&vm_numa_stat_key))
19914518085eSKemi Wang 		return page;
1992de55c8b2SAndrey Ryabinin 	if (page && page_to_nid(page) == nid) {
1993de55c8b2SAndrey Ryabinin 		preempt_disable();
1994de55c8b2SAndrey Ryabinin 		__inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT);
1995de55c8b2SAndrey Ryabinin 		preempt_enable();
1996de55c8b2SAndrey Ryabinin 	}
19971da177e4SLinus Torvalds 	return page;
19981da177e4SLinus Torvalds }
19991da177e4SLinus Torvalds 
20001da177e4SLinus Torvalds /**
20010bbbc0b3SAndrea Arcangeli  * 	alloc_pages_vma	- Allocate a page for a VMA.
20021da177e4SLinus Torvalds  *
20031da177e4SLinus Torvalds  * 	@gfp:
20041da177e4SLinus Torvalds  *      %GFP_USER    user allocation.
20051da177e4SLinus Torvalds  *      %GFP_KERNEL  kernel allocations,
20061da177e4SLinus Torvalds  *      %GFP_HIGHMEM highmem/user allocations,
20071da177e4SLinus Torvalds  *      %GFP_FS      allocation should not call back into a file system.
20081da177e4SLinus Torvalds  *      %GFP_ATOMIC  don't sleep.
20091da177e4SLinus Torvalds  *
20100bbbc0b3SAndrea Arcangeli  *	@order:Order of the GFP allocation.
20111da177e4SLinus Torvalds  * 	@vma:  Pointer to VMA or NULL if not available.
20121da177e4SLinus Torvalds  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
2013be97a41bSVlastimil Babka  *	@node: Which node to prefer for allocation (modulo policy).
2014be97a41bSVlastimil Babka  *	@hugepage: for hugepages try only the preferred node if possible
20151da177e4SLinus Torvalds  *
20161da177e4SLinus Torvalds  * 	This function allocates a page from the kernel page pool and applies
20171da177e4SLinus Torvalds  *	a NUMA policy associated with the VMA or the current process.
20181da177e4SLinus Torvalds  *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
20191da177e4SLinus Torvalds  *	mm_struct of the VMA to prevent it from going away. Should be used for
2020be97a41bSVlastimil Babka  *	all allocations for pages that will be mapped into user space. Returns
2021be97a41bSVlastimil Babka  *	NULL when no page can be allocated.
20221da177e4SLinus Torvalds  */
20231da177e4SLinus Torvalds struct page *
20240bbbc0b3SAndrea Arcangeli alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
2025be97a41bSVlastimil Babka 		unsigned long addr, int node, bool hugepage)
20261da177e4SLinus Torvalds {
2027cc9a6c87SMel Gorman 	struct mempolicy *pol;
2028c0ff7453SMiao Xie 	struct page *page;
202904ec6264SVlastimil Babka 	int preferred_nid;
2030be97a41bSVlastimil Babka 	nodemask_t *nmask;
20311da177e4SLinus Torvalds 
2032dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2033cc9a6c87SMel Gorman 
2034be97a41bSVlastimil Babka 	if (pol->mode == MPOL_INTERLEAVE) {
20351da177e4SLinus Torvalds 		unsigned nid;
20365da7ca86SChristoph Lameter 
20378eac563cSAndi Kleen 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
203852cd3b07SLee Schermerhorn 		mpol_cond_put(pol);
20390bbbc0b3SAndrea Arcangeli 		page = alloc_page_interleave(gfp, order, nid);
2040be97a41bSVlastimil Babka 		goto out;
20411da177e4SLinus Torvalds 	}
20421da177e4SLinus Torvalds 
20430867a57cSVlastimil Babka 	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
20440867a57cSVlastimil Babka 		int hpage_node = node;
20450867a57cSVlastimil Babka 
20460867a57cSVlastimil Babka 		/*
20470867a57cSVlastimil Babka 		 * For hugepage allocation and non-interleave policy which
20480867a57cSVlastimil Babka 		 * allows the current node (or other explicitly preferred
20490867a57cSVlastimil Babka 		 * node) we only try to allocate from the current/preferred
20500867a57cSVlastimil Babka 		 * node and don't fall back to other nodes, as the cost of
20510867a57cSVlastimil Babka 		 * remote accesses would likely offset THP benefits.
20520867a57cSVlastimil Babka 		 *
20530867a57cSVlastimil Babka 		 * If the policy is interleave, or does not allow the current
20540867a57cSVlastimil Babka 		 * node in its nodemask, we allocate the standard way.
20550867a57cSVlastimil Babka 		 */
20560867a57cSVlastimil Babka 		if (pol->mode == MPOL_PREFERRED &&
20570867a57cSVlastimil Babka 						!(pol->flags & MPOL_F_LOCAL))
20580867a57cSVlastimil Babka 			hpage_node = pol->v.preferred_node;
20590867a57cSVlastimil Babka 
20600867a57cSVlastimil Babka 		nmask = policy_nodemask(gfp, pol);
20610867a57cSVlastimil Babka 		if (!nmask || node_isset(hpage_node, *nmask)) {
20620867a57cSVlastimil Babka 			mpol_cond_put(pol);
206396db800fSVlastimil Babka 			page = __alloc_pages_node(hpage_node,
20640867a57cSVlastimil Babka 						gfp | __GFP_THISNODE, order);
20650867a57cSVlastimil Babka 			goto out;
20660867a57cSVlastimil Babka 		}
20670867a57cSVlastimil Babka 	}
20680867a57cSVlastimil Babka 
2069077fcf11SAneesh Kumar K.V 	nmask = policy_nodemask(gfp, pol);
207004ec6264SVlastimil Babka 	preferred_nid = policy_node(gfp, pol, node);
207104ec6264SVlastimil Babka 	page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
2072d51e9894SVlastimil Babka 	mpol_cond_put(pol);
2073be97a41bSVlastimil Babka out:
2074077fcf11SAneesh Kumar K.V 	return page;
2075077fcf11SAneesh Kumar K.V }
2076077fcf11SAneesh Kumar K.V 
20771da177e4SLinus Torvalds /**
20781da177e4SLinus Torvalds  * 	alloc_pages_current - Allocate pages.
20791da177e4SLinus Torvalds  *
20801da177e4SLinus Torvalds  *	@gfp:
20811da177e4SLinus Torvalds  *		%GFP_USER   user allocation,
20821da177e4SLinus Torvalds  *      	%GFP_KERNEL kernel allocation,
20831da177e4SLinus Torvalds  *      	%GFP_HIGHMEM highmem allocation,
20841da177e4SLinus Torvalds  *      	%GFP_FS     don't call back into a file system.
20851da177e4SLinus Torvalds  *      	%GFP_ATOMIC don't sleep.
20861da177e4SLinus Torvalds  *	@order: Power of two of allocation size in pages. 0 is a single page.
20871da177e4SLinus Torvalds  *
20881da177e4SLinus Torvalds  *	Allocate a page from the kernel page pool.  When not in
20891da177e4SLinus Torvalds  *	interrupt context and apply the current process NUMA policy.
20901da177e4SLinus Torvalds  *	Returns NULL when no page can be allocated.
20911da177e4SLinus Torvalds  */
2092dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order)
20931da177e4SLinus Torvalds {
20948d90274bSOleg Nesterov 	struct mempolicy *pol = &default_policy;
2095c0ff7453SMiao Xie 	struct page *page;
20961da177e4SLinus Torvalds 
20978d90274bSOleg Nesterov 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
20988d90274bSOleg Nesterov 		pol = get_task_policy(current);
209952cd3b07SLee Schermerhorn 
210052cd3b07SLee Schermerhorn 	/*
210152cd3b07SLee Schermerhorn 	 * No reference counting needed for current->mempolicy
210252cd3b07SLee Schermerhorn 	 * nor system default_policy
210352cd3b07SLee Schermerhorn 	 */
210445c4745aSLee Schermerhorn 	if (pol->mode == MPOL_INTERLEAVE)
2105c0ff7453SMiao Xie 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2106c0ff7453SMiao Xie 	else
2107c0ff7453SMiao Xie 		page = __alloc_pages_nodemask(gfp, order,
210804ec6264SVlastimil Babka 				policy_node(gfp, pol, numa_node_id()),
21095c4b4be3SAndi Kleen 				policy_nodemask(gfp, pol));
2110cc9a6c87SMel Gorman 
2111c0ff7453SMiao Xie 	return page;
21121da177e4SLinus Torvalds }
21131da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current);
21141da177e4SLinus Torvalds 
2115ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2116ef0855d3SOleg Nesterov {
2117ef0855d3SOleg Nesterov 	struct mempolicy *pol = mpol_dup(vma_policy(src));
2118ef0855d3SOleg Nesterov 
2119ef0855d3SOleg Nesterov 	if (IS_ERR(pol))
2120ef0855d3SOleg Nesterov 		return PTR_ERR(pol);
2121ef0855d3SOleg Nesterov 	dst->vm_policy = pol;
2122ef0855d3SOleg Nesterov 	return 0;
2123ef0855d3SOleg Nesterov }
2124ef0855d3SOleg Nesterov 
21254225399aSPaul Jackson /*
2126846a16bfSLee Schermerhorn  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
21274225399aSPaul Jackson  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
21284225399aSPaul Jackson  * with the mems_allowed returned by cpuset_mems_allowed().  This
21294225399aSPaul Jackson  * keeps mempolicies cpuset relative after its cpuset moves.  See
21304225399aSPaul Jackson  * further kernel/cpuset.c update_nodemask().
2131708c1bbcSMiao Xie  *
2132708c1bbcSMiao Xie  * current's mempolicy may be rebinded by the other task(the task that changes
2133708c1bbcSMiao Xie  * cpuset's mems), so we needn't do rebind work for current task.
21344225399aSPaul Jackson  */
21354225399aSPaul Jackson 
2136846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */
2137846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old)
21381da177e4SLinus Torvalds {
21391da177e4SLinus Torvalds 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
21401da177e4SLinus Torvalds 
21411da177e4SLinus Torvalds 	if (!new)
21421da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2143708c1bbcSMiao Xie 
2144708c1bbcSMiao Xie 	/* task's mempolicy is protected by alloc_lock */
2145708c1bbcSMiao Xie 	if (old == current->mempolicy) {
2146708c1bbcSMiao Xie 		task_lock(current);
2147708c1bbcSMiao Xie 		*new = *old;
2148708c1bbcSMiao Xie 		task_unlock(current);
2149708c1bbcSMiao Xie 	} else
2150708c1bbcSMiao Xie 		*new = *old;
2151708c1bbcSMiao Xie 
21524225399aSPaul Jackson 	if (current_cpuset_is_being_rebound()) {
21534225399aSPaul Jackson 		nodemask_t mems = cpuset_mems_allowed(current);
2154213980c0SVlastimil Babka 		mpol_rebind_policy(new, &mems);
21554225399aSPaul Jackson 	}
21561da177e4SLinus Torvalds 	atomic_set(&new->refcnt, 1);
21571da177e4SLinus Torvalds 	return new;
21581da177e4SLinus Torvalds }
21591da177e4SLinus Torvalds 
21601da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */
2161fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
21621da177e4SLinus Torvalds {
21631da177e4SLinus Torvalds 	if (!a || !b)
2164fcfb4dccSKOSAKI Motohiro 		return false;
216545c4745aSLee Schermerhorn 	if (a->mode != b->mode)
2166fcfb4dccSKOSAKI Motohiro 		return false;
216719800502SBob Liu 	if (a->flags != b->flags)
2168fcfb4dccSKOSAKI Motohiro 		return false;
216919800502SBob Liu 	if (mpol_store_user_nodemask(a))
217019800502SBob Liu 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2171fcfb4dccSKOSAKI Motohiro 			return false;
217219800502SBob Liu 
217345c4745aSLee Schermerhorn 	switch (a->mode) {
217419770b32SMel Gorman 	case MPOL_BIND:
217519770b32SMel Gorman 		/* Fall through */
21761da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
2177fcfb4dccSKOSAKI Motohiro 		return !!nodes_equal(a->v.nodes, b->v.nodes);
21781da177e4SLinus Torvalds 	case MPOL_PREFERRED:
21798970a63eSYisheng Xie 		/* a's ->flags is the same as b's */
21808970a63eSYisheng Xie 		if (a->flags & MPOL_F_LOCAL)
21818970a63eSYisheng Xie 			return true;
218275719661SNamhyung Kim 		return a->v.preferred_node == b->v.preferred_node;
21831da177e4SLinus Torvalds 	default:
21841da177e4SLinus Torvalds 		BUG();
2185fcfb4dccSKOSAKI Motohiro 		return false;
21861da177e4SLinus Torvalds 	}
21871da177e4SLinus Torvalds }
21881da177e4SLinus Torvalds 
21891da177e4SLinus Torvalds /*
21901da177e4SLinus Torvalds  * Shared memory backing store policy support.
21911da177e4SLinus Torvalds  *
21921da177e4SLinus Torvalds  * Remember policies even when nobody has shared memory mapped.
21931da177e4SLinus Torvalds  * The policies are kept in Red-Black tree linked from the inode.
21944a8c7bb5SNathan Zimmer  * They are protected by the sp->lock rwlock, which should be held
21951da177e4SLinus Torvalds  * for any accesses to the tree.
21961da177e4SLinus Torvalds  */
21971da177e4SLinus Torvalds 
21984a8c7bb5SNathan Zimmer /*
21994a8c7bb5SNathan Zimmer  * lookup first element intersecting start-end.  Caller holds sp->lock for
22004a8c7bb5SNathan Zimmer  * reading or for writing
22014a8c7bb5SNathan Zimmer  */
22021da177e4SLinus Torvalds static struct sp_node *
22031da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
22041da177e4SLinus Torvalds {
22051da177e4SLinus Torvalds 	struct rb_node *n = sp->root.rb_node;
22061da177e4SLinus Torvalds 
22071da177e4SLinus Torvalds 	while (n) {
22081da177e4SLinus Torvalds 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
22091da177e4SLinus Torvalds 
22101da177e4SLinus Torvalds 		if (start >= p->end)
22111da177e4SLinus Torvalds 			n = n->rb_right;
22121da177e4SLinus Torvalds 		else if (end <= p->start)
22131da177e4SLinus Torvalds 			n = n->rb_left;
22141da177e4SLinus Torvalds 		else
22151da177e4SLinus Torvalds 			break;
22161da177e4SLinus Torvalds 	}
22171da177e4SLinus Torvalds 	if (!n)
22181da177e4SLinus Torvalds 		return NULL;
22191da177e4SLinus Torvalds 	for (;;) {
22201da177e4SLinus Torvalds 		struct sp_node *w = NULL;
22211da177e4SLinus Torvalds 		struct rb_node *prev = rb_prev(n);
22221da177e4SLinus Torvalds 		if (!prev)
22231da177e4SLinus Torvalds 			break;
22241da177e4SLinus Torvalds 		w = rb_entry(prev, struct sp_node, nd);
22251da177e4SLinus Torvalds 		if (w->end <= start)
22261da177e4SLinus Torvalds 			break;
22271da177e4SLinus Torvalds 		n = prev;
22281da177e4SLinus Torvalds 	}
22291da177e4SLinus Torvalds 	return rb_entry(n, struct sp_node, nd);
22301da177e4SLinus Torvalds }
22311da177e4SLinus Torvalds 
22324a8c7bb5SNathan Zimmer /*
22334a8c7bb5SNathan Zimmer  * Insert a new shared policy into the list.  Caller holds sp->lock for
22344a8c7bb5SNathan Zimmer  * writing.
22354a8c7bb5SNathan Zimmer  */
22361da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new)
22371da177e4SLinus Torvalds {
22381da177e4SLinus Torvalds 	struct rb_node **p = &sp->root.rb_node;
22391da177e4SLinus Torvalds 	struct rb_node *parent = NULL;
22401da177e4SLinus Torvalds 	struct sp_node *nd;
22411da177e4SLinus Torvalds 
22421da177e4SLinus Torvalds 	while (*p) {
22431da177e4SLinus Torvalds 		parent = *p;
22441da177e4SLinus Torvalds 		nd = rb_entry(parent, struct sp_node, nd);
22451da177e4SLinus Torvalds 		if (new->start < nd->start)
22461da177e4SLinus Torvalds 			p = &(*p)->rb_left;
22471da177e4SLinus Torvalds 		else if (new->end > nd->end)
22481da177e4SLinus Torvalds 			p = &(*p)->rb_right;
22491da177e4SLinus Torvalds 		else
22501da177e4SLinus Torvalds 			BUG();
22511da177e4SLinus Torvalds 	}
22521da177e4SLinus Torvalds 	rb_link_node(&new->nd, parent, p);
22531da177e4SLinus Torvalds 	rb_insert_color(&new->nd, &sp->root);
2254140d5a49SPaul Mundt 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
225545c4745aSLee Schermerhorn 		 new->policy ? new->policy->mode : 0);
22561da177e4SLinus Torvalds }
22571da177e4SLinus Torvalds 
22581da177e4SLinus Torvalds /* Find shared policy intersecting idx */
22591da177e4SLinus Torvalds struct mempolicy *
22601da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
22611da177e4SLinus Torvalds {
22621da177e4SLinus Torvalds 	struct mempolicy *pol = NULL;
22631da177e4SLinus Torvalds 	struct sp_node *sn;
22641da177e4SLinus Torvalds 
22651da177e4SLinus Torvalds 	if (!sp->root.rb_node)
22661da177e4SLinus Torvalds 		return NULL;
22674a8c7bb5SNathan Zimmer 	read_lock(&sp->lock);
22681da177e4SLinus Torvalds 	sn = sp_lookup(sp, idx, idx+1);
22691da177e4SLinus Torvalds 	if (sn) {
22701da177e4SLinus Torvalds 		mpol_get(sn->policy);
22711da177e4SLinus Torvalds 		pol = sn->policy;
22721da177e4SLinus Torvalds 	}
22734a8c7bb5SNathan Zimmer 	read_unlock(&sp->lock);
22741da177e4SLinus Torvalds 	return pol;
22751da177e4SLinus Torvalds }
22761da177e4SLinus Torvalds 
227763f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n)
227863f74ca2SKOSAKI Motohiro {
227963f74ca2SKOSAKI Motohiro 	mpol_put(n->policy);
228063f74ca2SKOSAKI Motohiro 	kmem_cache_free(sn_cache, n);
228163f74ca2SKOSAKI Motohiro }
228263f74ca2SKOSAKI Motohiro 
2283771fb4d8SLee Schermerhorn /**
2284771fb4d8SLee Schermerhorn  * mpol_misplaced - check whether current page node is valid in policy
2285771fb4d8SLee Schermerhorn  *
2286b46e14acSFabian Frederick  * @page: page to be checked
2287b46e14acSFabian Frederick  * @vma: vm area where page mapped
2288b46e14acSFabian Frederick  * @addr: virtual address where page mapped
2289771fb4d8SLee Schermerhorn  *
2290771fb4d8SLee Schermerhorn  * Lookup current policy node id for vma,addr and "compare to" page's
2291771fb4d8SLee Schermerhorn  * node id.
2292771fb4d8SLee Schermerhorn  *
2293771fb4d8SLee Schermerhorn  * Returns:
2294771fb4d8SLee Schermerhorn  *	-1	- not misplaced, page is in the right node
2295771fb4d8SLee Schermerhorn  *	node	- node id where the page should be
2296771fb4d8SLee Schermerhorn  *
2297771fb4d8SLee Schermerhorn  * Policy determination "mimics" alloc_page_vma().
2298771fb4d8SLee Schermerhorn  * Called from fault path where we know the vma and faulting address.
2299771fb4d8SLee Schermerhorn  */
2300771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2301771fb4d8SLee Schermerhorn {
2302771fb4d8SLee Schermerhorn 	struct mempolicy *pol;
2303c33d6c06SMel Gorman 	struct zoneref *z;
2304771fb4d8SLee Schermerhorn 	int curnid = page_to_nid(page);
2305771fb4d8SLee Schermerhorn 	unsigned long pgoff;
230690572890SPeter Zijlstra 	int thiscpu = raw_smp_processor_id();
230790572890SPeter Zijlstra 	int thisnid = cpu_to_node(thiscpu);
2308771fb4d8SLee Schermerhorn 	int polnid = -1;
2309771fb4d8SLee Schermerhorn 	int ret = -1;
2310771fb4d8SLee Schermerhorn 
2311dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2312771fb4d8SLee Schermerhorn 	if (!(pol->flags & MPOL_F_MOF))
2313771fb4d8SLee Schermerhorn 		goto out;
2314771fb4d8SLee Schermerhorn 
2315771fb4d8SLee Schermerhorn 	switch (pol->mode) {
2316771fb4d8SLee Schermerhorn 	case MPOL_INTERLEAVE:
2317771fb4d8SLee Schermerhorn 		pgoff = vma->vm_pgoff;
2318771fb4d8SLee Schermerhorn 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
231998c70baaSLaurent Dufour 		polnid = offset_il_node(pol, pgoff);
2320771fb4d8SLee Schermerhorn 		break;
2321771fb4d8SLee Schermerhorn 
2322771fb4d8SLee Schermerhorn 	case MPOL_PREFERRED:
2323771fb4d8SLee Schermerhorn 		if (pol->flags & MPOL_F_LOCAL)
2324771fb4d8SLee Schermerhorn 			polnid = numa_node_id();
2325771fb4d8SLee Schermerhorn 		else
2326771fb4d8SLee Schermerhorn 			polnid = pol->v.preferred_node;
2327771fb4d8SLee Schermerhorn 		break;
2328771fb4d8SLee Schermerhorn 
2329771fb4d8SLee Schermerhorn 	case MPOL_BIND:
2330c33d6c06SMel Gorman 
2331771fb4d8SLee Schermerhorn 		/*
2332771fb4d8SLee Schermerhorn 		 * allows binding to multiple nodes.
2333771fb4d8SLee Schermerhorn 		 * use current page if in policy nodemask,
2334771fb4d8SLee Schermerhorn 		 * else select nearest allowed node, if any.
2335771fb4d8SLee Schermerhorn 		 * If no allowed nodes, use current [!misplaced].
2336771fb4d8SLee Schermerhorn 		 */
2337771fb4d8SLee Schermerhorn 		if (node_isset(curnid, pol->v.nodes))
2338771fb4d8SLee Schermerhorn 			goto out;
2339c33d6c06SMel Gorman 		z = first_zones_zonelist(
2340771fb4d8SLee Schermerhorn 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2341771fb4d8SLee Schermerhorn 				gfp_zone(GFP_HIGHUSER),
2342c33d6c06SMel Gorman 				&pol->v.nodes);
2343c1093b74SPavel Tatashin 		polnid = zone_to_nid(z->zone);
2344771fb4d8SLee Schermerhorn 		break;
2345771fb4d8SLee Schermerhorn 
2346771fb4d8SLee Schermerhorn 	default:
2347771fb4d8SLee Schermerhorn 		BUG();
2348771fb4d8SLee Schermerhorn 	}
23495606e387SMel Gorman 
23505606e387SMel Gorman 	/* Migrate the page towards the node whose CPU is referencing it */
2351e42c8ff2SMel Gorman 	if (pol->flags & MPOL_F_MORON) {
235290572890SPeter Zijlstra 		polnid = thisnid;
23535606e387SMel Gorman 
235410f39042SRik van Riel 		if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2355de1c9ce6SRik van Riel 			goto out;
2356de1c9ce6SRik van Riel 	}
2357e42c8ff2SMel Gorman 
2358771fb4d8SLee Schermerhorn 	if (curnid != polnid)
2359771fb4d8SLee Schermerhorn 		ret = polnid;
2360771fb4d8SLee Schermerhorn out:
2361771fb4d8SLee Schermerhorn 	mpol_cond_put(pol);
2362771fb4d8SLee Schermerhorn 
2363771fb4d8SLee Schermerhorn 	return ret;
2364771fb4d8SLee Schermerhorn }
2365771fb4d8SLee Schermerhorn 
2366c11600e4SDavid Rientjes /*
2367c11600e4SDavid Rientjes  * Drop the (possibly final) reference to task->mempolicy.  It needs to be
2368c11600e4SDavid Rientjes  * dropped after task->mempolicy is set to NULL so that any allocation done as
2369c11600e4SDavid Rientjes  * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2370c11600e4SDavid Rientjes  * policy.
2371c11600e4SDavid Rientjes  */
2372c11600e4SDavid Rientjes void mpol_put_task_policy(struct task_struct *task)
2373c11600e4SDavid Rientjes {
2374c11600e4SDavid Rientjes 	struct mempolicy *pol;
2375c11600e4SDavid Rientjes 
2376c11600e4SDavid Rientjes 	task_lock(task);
2377c11600e4SDavid Rientjes 	pol = task->mempolicy;
2378c11600e4SDavid Rientjes 	task->mempolicy = NULL;
2379c11600e4SDavid Rientjes 	task_unlock(task);
2380c11600e4SDavid Rientjes 	mpol_put(pol);
2381c11600e4SDavid Rientjes }
2382c11600e4SDavid Rientjes 
23831da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n)
23841da177e4SLinus Torvalds {
2385140d5a49SPaul Mundt 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
23861da177e4SLinus Torvalds 	rb_erase(&n->nd, &sp->root);
238763f74ca2SKOSAKI Motohiro 	sp_free(n);
23881da177e4SLinus Torvalds }
23891da177e4SLinus Torvalds 
239042288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start,
239142288fe3SMel Gorman 			unsigned long end, struct mempolicy *pol)
239242288fe3SMel Gorman {
239342288fe3SMel Gorman 	node->start = start;
239442288fe3SMel Gorman 	node->end = end;
239542288fe3SMel Gorman 	node->policy = pol;
239642288fe3SMel Gorman }
239742288fe3SMel Gorman 
2398dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2399dbcb0f19SAdrian Bunk 				struct mempolicy *pol)
24001da177e4SLinus Torvalds {
2401869833f2SKOSAKI Motohiro 	struct sp_node *n;
2402869833f2SKOSAKI Motohiro 	struct mempolicy *newpol;
24031da177e4SLinus Torvalds 
2404869833f2SKOSAKI Motohiro 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
24051da177e4SLinus Torvalds 	if (!n)
24061da177e4SLinus Torvalds 		return NULL;
2407869833f2SKOSAKI Motohiro 
2408869833f2SKOSAKI Motohiro 	newpol = mpol_dup(pol);
2409869833f2SKOSAKI Motohiro 	if (IS_ERR(newpol)) {
2410869833f2SKOSAKI Motohiro 		kmem_cache_free(sn_cache, n);
2411869833f2SKOSAKI Motohiro 		return NULL;
2412869833f2SKOSAKI Motohiro 	}
2413869833f2SKOSAKI Motohiro 	newpol->flags |= MPOL_F_SHARED;
241442288fe3SMel Gorman 	sp_node_init(n, start, end, newpol);
2415869833f2SKOSAKI Motohiro 
24161da177e4SLinus Torvalds 	return n;
24171da177e4SLinus Torvalds }
24181da177e4SLinus Torvalds 
24191da177e4SLinus Torvalds /* Replace a policy range. */
24201da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
24211da177e4SLinus Torvalds 				 unsigned long end, struct sp_node *new)
24221da177e4SLinus Torvalds {
2423b22d127aSMel Gorman 	struct sp_node *n;
242442288fe3SMel Gorman 	struct sp_node *n_new = NULL;
242542288fe3SMel Gorman 	struct mempolicy *mpol_new = NULL;
2426b22d127aSMel Gorman 	int ret = 0;
24271da177e4SLinus Torvalds 
242842288fe3SMel Gorman restart:
24294a8c7bb5SNathan Zimmer 	write_lock(&sp->lock);
24301da177e4SLinus Torvalds 	n = sp_lookup(sp, start, end);
24311da177e4SLinus Torvalds 	/* Take care of old policies in the same range. */
24321da177e4SLinus Torvalds 	while (n && n->start < end) {
24331da177e4SLinus Torvalds 		struct rb_node *next = rb_next(&n->nd);
24341da177e4SLinus Torvalds 		if (n->start >= start) {
24351da177e4SLinus Torvalds 			if (n->end <= end)
24361da177e4SLinus Torvalds 				sp_delete(sp, n);
24371da177e4SLinus Torvalds 			else
24381da177e4SLinus Torvalds 				n->start = end;
24391da177e4SLinus Torvalds 		} else {
24401da177e4SLinus Torvalds 			/* Old policy spanning whole new range. */
24411da177e4SLinus Torvalds 			if (n->end > end) {
244242288fe3SMel Gorman 				if (!n_new)
244342288fe3SMel Gorman 					goto alloc_new;
244442288fe3SMel Gorman 
244542288fe3SMel Gorman 				*mpol_new = *n->policy;
244642288fe3SMel Gorman 				atomic_set(&mpol_new->refcnt, 1);
24477880639cSKOSAKI Motohiro 				sp_node_init(n_new, end, n->end, mpol_new);
24481da177e4SLinus Torvalds 				n->end = start;
24495ca39575SHillf Danton 				sp_insert(sp, n_new);
245042288fe3SMel Gorman 				n_new = NULL;
245142288fe3SMel Gorman 				mpol_new = NULL;
24521da177e4SLinus Torvalds 				break;
24531da177e4SLinus Torvalds 			} else
24541da177e4SLinus Torvalds 				n->end = start;
24551da177e4SLinus Torvalds 		}
24561da177e4SLinus Torvalds 		if (!next)
24571da177e4SLinus Torvalds 			break;
24581da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
24591da177e4SLinus Torvalds 	}
24601da177e4SLinus Torvalds 	if (new)
24611da177e4SLinus Torvalds 		sp_insert(sp, new);
24624a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
246342288fe3SMel Gorman 	ret = 0;
246442288fe3SMel Gorman 
246542288fe3SMel Gorman err_out:
246642288fe3SMel Gorman 	if (mpol_new)
246742288fe3SMel Gorman 		mpol_put(mpol_new);
246842288fe3SMel Gorman 	if (n_new)
246942288fe3SMel Gorman 		kmem_cache_free(sn_cache, n_new);
247042288fe3SMel Gorman 
2471b22d127aSMel Gorman 	return ret;
247242288fe3SMel Gorman 
247342288fe3SMel Gorman alloc_new:
24744a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
247542288fe3SMel Gorman 	ret = -ENOMEM;
247642288fe3SMel Gorman 	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
247742288fe3SMel Gorman 	if (!n_new)
247842288fe3SMel Gorman 		goto err_out;
247942288fe3SMel Gorman 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
248042288fe3SMel Gorman 	if (!mpol_new)
248142288fe3SMel Gorman 		goto err_out;
248242288fe3SMel Gorman 	goto restart;
24831da177e4SLinus Torvalds }
24841da177e4SLinus Torvalds 
248571fe804bSLee Schermerhorn /**
248671fe804bSLee Schermerhorn  * mpol_shared_policy_init - initialize shared policy for inode
248771fe804bSLee Schermerhorn  * @sp: pointer to inode shared policy
248871fe804bSLee Schermerhorn  * @mpol:  struct mempolicy to install
248971fe804bSLee Schermerhorn  *
249071fe804bSLee Schermerhorn  * Install non-NULL @mpol in inode's shared policy rb-tree.
249171fe804bSLee Schermerhorn  * On entry, the current task has a reference on a non-NULL @mpol.
249271fe804bSLee Schermerhorn  * This must be released on exit.
24934bfc4495SKAMEZAWA Hiroyuki  * This is called at get_inode() calls and we can use GFP_KERNEL.
249471fe804bSLee Schermerhorn  */
249571fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
24967339ff83SRobin Holt {
249758568d2aSMiao Xie 	int ret;
249858568d2aSMiao Xie 
249971fe804bSLee Schermerhorn 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
25004a8c7bb5SNathan Zimmer 	rwlock_init(&sp->lock);
25017339ff83SRobin Holt 
250271fe804bSLee Schermerhorn 	if (mpol) {
25037339ff83SRobin Holt 		struct vm_area_struct pvma;
250471fe804bSLee Schermerhorn 		struct mempolicy *new;
25054bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
25067339ff83SRobin Holt 
25074bfc4495SKAMEZAWA Hiroyuki 		if (!scratch)
25085c0c1654SLee Schermerhorn 			goto put_mpol;
250971fe804bSLee Schermerhorn 		/* contextualize the tmpfs mount point mempolicy */
251071fe804bSLee Schermerhorn 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
251115d77835SLee Schermerhorn 		if (IS_ERR(new))
25120cae3457SDan Carpenter 			goto free_scratch; /* no valid nodemask intersection */
251358568d2aSMiao Xie 
251458568d2aSMiao Xie 		task_lock(current);
25154bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
251658568d2aSMiao Xie 		task_unlock(current);
251715d77835SLee Schermerhorn 		if (ret)
25185c0c1654SLee Schermerhorn 			goto put_new;
251971fe804bSLee Schermerhorn 
252071fe804bSLee Schermerhorn 		/* Create pseudo-vma that contains just the policy */
25212c4541e2SKirill A. Shutemov 		vma_init(&pvma, NULL);
252271fe804bSLee Schermerhorn 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
252371fe804bSLee Schermerhorn 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
252415d77835SLee Schermerhorn 
25255c0c1654SLee Schermerhorn put_new:
252671fe804bSLee Schermerhorn 		mpol_put(new);			/* drop initial ref */
25270cae3457SDan Carpenter free_scratch:
25284bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
25295c0c1654SLee Schermerhorn put_mpol:
25305c0c1654SLee Schermerhorn 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
25317339ff83SRobin Holt 	}
25327339ff83SRobin Holt }
25337339ff83SRobin Holt 
25341da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info,
25351da177e4SLinus Torvalds 			struct vm_area_struct *vma, struct mempolicy *npol)
25361da177e4SLinus Torvalds {
25371da177e4SLinus Torvalds 	int err;
25381da177e4SLinus Torvalds 	struct sp_node *new = NULL;
25391da177e4SLinus Torvalds 	unsigned long sz = vma_pages(vma);
25401da177e4SLinus Torvalds 
2541028fec41SDavid Rientjes 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
25421da177e4SLinus Torvalds 		 vma->vm_pgoff,
254345c4745aSLee Schermerhorn 		 sz, npol ? npol->mode : -1,
2544028fec41SDavid Rientjes 		 npol ? npol->flags : -1,
254500ef2d2fSDavid Rientjes 		 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
25461da177e4SLinus Torvalds 
25471da177e4SLinus Torvalds 	if (npol) {
25481da177e4SLinus Torvalds 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
25491da177e4SLinus Torvalds 		if (!new)
25501da177e4SLinus Torvalds 			return -ENOMEM;
25511da177e4SLinus Torvalds 	}
25521da177e4SLinus Torvalds 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
25531da177e4SLinus Torvalds 	if (err && new)
255463f74ca2SKOSAKI Motohiro 		sp_free(new);
25551da177e4SLinus Torvalds 	return err;
25561da177e4SLinus Torvalds }
25571da177e4SLinus Torvalds 
25581da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */
25591da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p)
25601da177e4SLinus Torvalds {
25611da177e4SLinus Torvalds 	struct sp_node *n;
25621da177e4SLinus Torvalds 	struct rb_node *next;
25631da177e4SLinus Torvalds 
25641da177e4SLinus Torvalds 	if (!p->root.rb_node)
25651da177e4SLinus Torvalds 		return;
25664a8c7bb5SNathan Zimmer 	write_lock(&p->lock);
25671da177e4SLinus Torvalds 	next = rb_first(&p->root);
25681da177e4SLinus Torvalds 	while (next) {
25691da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
25701da177e4SLinus Torvalds 		next = rb_next(&n->nd);
257163f74ca2SKOSAKI Motohiro 		sp_delete(p, n);
25721da177e4SLinus Torvalds 	}
25734a8c7bb5SNathan Zimmer 	write_unlock(&p->lock);
25741da177e4SLinus Torvalds }
25751da177e4SLinus Torvalds 
25761a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING
2577c297663cSMel Gorman static int __initdata numabalancing_override;
25781a687c2eSMel Gorman 
25791a687c2eSMel Gorman static void __init check_numabalancing_enable(void)
25801a687c2eSMel Gorman {
25811a687c2eSMel Gorman 	bool numabalancing_default = false;
25821a687c2eSMel Gorman 
25831a687c2eSMel Gorman 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
25841a687c2eSMel Gorman 		numabalancing_default = true;
25851a687c2eSMel Gorman 
2586c297663cSMel Gorman 	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2587c297663cSMel Gorman 	if (numabalancing_override)
2588c297663cSMel Gorman 		set_numabalancing_state(numabalancing_override == 1);
2589c297663cSMel Gorman 
2590b0dc2b9bSMel Gorman 	if (num_online_nodes() > 1 && !numabalancing_override) {
2591756a025fSJoe Perches 		pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2592c297663cSMel Gorman 			numabalancing_default ? "Enabling" : "Disabling");
25931a687c2eSMel Gorman 		set_numabalancing_state(numabalancing_default);
25941a687c2eSMel Gorman 	}
25951a687c2eSMel Gorman }
25961a687c2eSMel Gorman 
25971a687c2eSMel Gorman static int __init setup_numabalancing(char *str)
25981a687c2eSMel Gorman {
25991a687c2eSMel Gorman 	int ret = 0;
26001a687c2eSMel Gorman 	if (!str)
26011a687c2eSMel Gorman 		goto out;
26021a687c2eSMel Gorman 
26031a687c2eSMel Gorman 	if (!strcmp(str, "enable")) {
2604c297663cSMel Gorman 		numabalancing_override = 1;
26051a687c2eSMel Gorman 		ret = 1;
26061a687c2eSMel Gorman 	} else if (!strcmp(str, "disable")) {
2607c297663cSMel Gorman 		numabalancing_override = -1;
26081a687c2eSMel Gorman 		ret = 1;
26091a687c2eSMel Gorman 	}
26101a687c2eSMel Gorman out:
26111a687c2eSMel Gorman 	if (!ret)
26124a404beaSAndrew Morton 		pr_warn("Unable to parse numa_balancing=\n");
26131a687c2eSMel Gorman 
26141a687c2eSMel Gorman 	return ret;
26151a687c2eSMel Gorman }
26161a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing);
26171a687c2eSMel Gorman #else
26181a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void)
26191a687c2eSMel Gorman {
26201a687c2eSMel Gorman }
26211a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */
26221a687c2eSMel Gorman 
26231da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */
26241da177e4SLinus Torvalds void __init numa_policy_init(void)
26251da177e4SLinus Torvalds {
2626b71636e2SPaul Mundt 	nodemask_t interleave_nodes;
2627b71636e2SPaul Mundt 	unsigned long largest = 0;
2628b71636e2SPaul Mundt 	int nid, prefer = 0;
2629b71636e2SPaul Mundt 
26301da177e4SLinus Torvalds 	policy_cache = kmem_cache_create("numa_policy",
26311da177e4SLinus Torvalds 					 sizeof(struct mempolicy),
263220c2df83SPaul Mundt 					 0, SLAB_PANIC, NULL);
26331da177e4SLinus Torvalds 
26341da177e4SLinus Torvalds 	sn_cache = kmem_cache_create("shared_policy_node",
26351da177e4SLinus Torvalds 				     sizeof(struct sp_node),
263620c2df83SPaul Mundt 				     0, SLAB_PANIC, NULL);
26371da177e4SLinus Torvalds 
26385606e387SMel Gorman 	for_each_node(nid) {
26395606e387SMel Gorman 		preferred_node_policy[nid] = (struct mempolicy) {
26405606e387SMel Gorman 			.refcnt = ATOMIC_INIT(1),
26415606e387SMel Gorman 			.mode = MPOL_PREFERRED,
26425606e387SMel Gorman 			.flags = MPOL_F_MOF | MPOL_F_MORON,
26435606e387SMel Gorman 			.v = { .preferred_node = nid, },
26445606e387SMel Gorman 		};
26455606e387SMel Gorman 	}
26465606e387SMel Gorman 
2647b71636e2SPaul Mundt 	/*
2648b71636e2SPaul Mundt 	 * Set interleaving policy for system init. Interleaving is only
2649b71636e2SPaul Mundt 	 * enabled across suitably sized nodes (default is >= 16MB), or
2650b71636e2SPaul Mundt 	 * fall back to the largest node if they're all smaller.
2651b71636e2SPaul Mundt 	 */
2652b71636e2SPaul Mundt 	nodes_clear(interleave_nodes);
265301f13bd6SLai Jiangshan 	for_each_node_state(nid, N_MEMORY) {
2654b71636e2SPaul Mundt 		unsigned long total_pages = node_present_pages(nid);
26551da177e4SLinus Torvalds 
2656b71636e2SPaul Mundt 		/* Preserve the largest node */
2657b71636e2SPaul Mundt 		if (largest < total_pages) {
2658b71636e2SPaul Mundt 			largest = total_pages;
2659b71636e2SPaul Mundt 			prefer = nid;
2660b71636e2SPaul Mundt 		}
2661b71636e2SPaul Mundt 
2662b71636e2SPaul Mundt 		/* Interleave this node? */
2663b71636e2SPaul Mundt 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2664b71636e2SPaul Mundt 			node_set(nid, interleave_nodes);
2665b71636e2SPaul Mundt 	}
2666b71636e2SPaul Mundt 
2667b71636e2SPaul Mundt 	/* All too small, use the largest */
2668b71636e2SPaul Mundt 	if (unlikely(nodes_empty(interleave_nodes)))
2669b71636e2SPaul Mundt 		node_set(prefer, interleave_nodes);
2670b71636e2SPaul Mundt 
2671028fec41SDavid Rientjes 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2672b1de0d13SMitchel Humpherys 		pr_err("%s: interleaving failed\n", __func__);
26731a687c2eSMel Gorman 
26741a687c2eSMel Gorman 	check_numabalancing_enable();
26751da177e4SLinus Torvalds }
26761da177e4SLinus Torvalds 
26778bccd85fSChristoph Lameter /* Reset policy of current process to default */
26781da177e4SLinus Torvalds void numa_default_policy(void)
26791da177e4SLinus Torvalds {
2680028fec41SDavid Rientjes 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
26811da177e4SLinus Torvalds }
268268860ec1SPaul Jackson 
26834225399aSPaul Jackson /*
2684095f1fc4SLee Schermerhorn  * Parse and format mempolicy from/to strings
2685095f1fc4SLee Schermerhorn  */
2686095f1fc4SLee Schermerhorn 
2687095f1fc4SLee Schermerhorn /*
2688f2a07f40SHugh Dickins  * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
26891a75a6c8SChristoph Lameter  */
2690345ace9cSLee Schermerhorn static const char * const policy_modes[] =
2691345ace9cSLee Schermerhorn {
2692345ace9cSLee Schermerhorn 	[MPOL_DEFAULT]    = "default",
2693345ace9cSLee Schermerhorn 	[MPOL_PREFERRED]  = "prefer",
2694345ace9cSLee Schermerhorn 	[MPOL_BIND]       = "bind",
2695345ace9cSLee Schermerhorn 	[MPOL_INTERLEAVE] = "interleave",
2696d3a71033SLee Schermerhorn 	[MPOL_LOCAL]      = "local",
2697345ace9cSLee Schermerhorn };
26981a75a6c8SChristoph Lameter 
2699095f1fc4SLee Schermerhorn 
2700095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS
2701095f1fc4SLee Schermerhorn /**
2702f2a07f40SHugh Dickins  * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2703095f1fc4SLee Schermerhorn  * @str:  string containing mempolicy to parse
270471fe804bSLee Schermerhorn  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2705095f1fc4SLee Schermerhorn  *
2706095f1fc4SLee Schermerhorn  * Format of input:
2707095f1fc4SLee Schermerhorn  *	<mode>[=<flags>][:<nodelist>]
2708095f1fc4SLee Schermerhorn  *
270971fe804bSLee Schermerhorn  * On success, returns 0, else 1
2710095f1fc4SLee Schermerhorn  */
2711a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol)
2712095f1fc4SLee Schermerhorn {
271371fe804bSLee Schermerhorn 	struct mempolicy *new = NULL;
2714f2a07f40SHugh Dickins 	unsigned short mode_flags;
271571fe804bSLee Schermerhorn 	nodemask_t nodes;
2716095f1fc4SLee Schermerhorn 	char *nodelist = strchr(str, ':');
2717095f1fc4SLee Schermerhorn 	char *flags = strchr(str, '=');
2718*dedf2c73Szhong jiang 	int err = 1, mode;
2719095f1fc4SLee Schermerhorn 
2720095f1fc4SLee Schermerhorn 	if (nodelist) {
2721095f1fc4SLee Schermerhorn 		/* NUL-terminate mode or flags string */
2722095f1fc4SLee Schermerhorn 		*nodelist++ = '\0';
272371fe804bSLee Schermerhorn 		if (nodelist_parse(nodelist, nodes))
2724095f1fc4SLee Schermerhorn 			goto out;
272501f13bd6SLai Jiangshan 		if (!nodes_subset(nodes, node_states[N_MEMORY]))
2726095f1fc4SLee Schermerhorn 			goto out;
272771fe804bSLee Schermerhorn 	} else
272871fe804bSLee Schermerhorn 		nodes_clear(nodes);
272971fe804bSLee Schermerhorn 
2730095f1fc4SLee Schermerhorn 	if (flags)
2731095f1fc4SLee Schermerhorn 		*flags++ = '\0';	/* terminate mode string */
2732095f1fc4SLee Schermerhorn 
2733*dedf2c73Szhong jiang 	mode = match_string(policy_modes, MPOL_MAX, str);
2734*dedf2c73Szhong jiang 	if (mode < 0)
2735095f1fc4SLee Schermerhorn 		goto out;
2736095f1fc4SLee Schermerhorn 
273771fe804bSLee Schermerhorn 	switch (mode) {
2738095f1fc4SLee Schermerhorn 	case MPOL_PREFERRED:
273971fe804bSLee Schermerhorn 		/*
274071fe804bSLee Schermerhorn 		 * Insist on a nodelist of one node only
274171fe804bSLee Schermerhorn 		 */
2742095f1fc4SLee Schermerhorn 		if (nodelist) {
2743095f1fc4SLee Schermerhorn 			char *rest = nodelist;
2744095f1fc4SLee Schermerhorn 			while (isdigit(*rest))
2745095f1fc4SLee Schermerhorn 				rest++;
2746926f2ae0SKOSAKI Motohiro 			if (*rest)
2747926f2ae0SKOSAKI Motohiro 				goto out;
2748095f1fc4SLee Schermerhorn 		}
2749095f1fc4SLee Schermerhorn 		break;
2750095f1fc4SLee Schermerhorn 	case MPOL_INTERLEAVE:
2751095f1fc4SLee Schermerhorn 		/*
2752095f1fc4SLee Schermerhorn 		 * Default to online nodes with memory if no nodelist
2753095f1fc4SLee Schermerhorn 		 */
2754095f1fc4SLee Schermerhorn 		if (!nodelist)
275501f13bd6SLai Jiangshan 			nodes = node_states[N_MEMORY];
27563f226aa1SLee Schermerhorn 		break;
275771fe804bSLee Schermerhorn 	case MPOL_LOCAL:
27583f226aa1SLee Schermerhorn 		/*
275971fe804bSLee Schermerhorn 		 * Don't allow a nodelist;  mpol_new() checks flags
27603f226aa1SLee Schermerhorn 		 */
276171fe804bSLee Schermerhorn 		if (nodelist)
27623f226aa1SLee Schermerhorn 			goto out;
276371fe804bSLee Schermerhorn 		mode = MPOL_PREFERRED;
27643f226aa1SLee Schermerhorn 		break;
2765413b43deSRavikiran G Thirumalai 	case MPOL_DEFAULT:
2766413b43deSRavikiran G Thirumalai 		/*
2767413b43deSRavikiran G Thirumalai 		 * Insist on a empty nodelist
2768413b43deSRavikiran G Thirumalai 		 */
2769413b43deSRavikiran G Thirumalai 		if (!nodelist)
2770413b43deSRavikiran G Thirumalai 			err = 0;
2771413b43deSRavikiran G Thirumalai 		goto out;
2772d69b2e63SKOSAKI Motohiro 	case MPOL_BIND:
277371fe804bSLee Schermerhorn 		/*
2774d69b2e63SKOSAKI Motohiro 		 * Insist on a nodelist
277571fe804bSLee Schermerhorn 		 */
2776d69b2e63SKOSAKI Motohiro 		if (!nodelist)
2777d69b2e63SKOSAKI Motohiro 			goto out;
2778095f1fc4SLee Schermerhorn 	}
2779095f1fc4SLee Schermerhorn 
278071fe804bSLee Schermerhorn 	mode_flags = 0;
2781095f1fc4SLee Schermerhorn 	if (flags) {
2782095f1fc4SLee Schermerhorn 		/*
2783095f1fc4SLee Schermerhorn 		 * Currently, we only support two mutually exclusive
2784095f1fc4SLee Schermerhorn 		 * mode flags.
2785095f1fc4SLee Schermerhorn 		 */
2786095f1fc4SLee Schermerhorn 		if (!strcmp(flags, "static"))
278771fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_STATIC_NODES;
2788095f1fc4SLee Schermerhorn 		else if (!strcmp(flags, "relative"))
278971fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_RELATIVE_NODES;
2790095f1fc4SLee Schermerhorn 		else
2791926f2ae0SKOSAKI Motohiro 			goto out;
2792095f1fc4SLee Schermerhorn 	}
279371fe804bSLee Schermerhorn 
279471fe804bSLee Schermerhorn 	new = mpol_new(mode, mode_flags, &nodes);
279571fe804bSLee Schermerhorn 	if (IS_ERR(new))
2796926f2ae0SKOSAKI Motohiro 		goto out;
2797926f2ae0SKOSAKI Motohiro 
2798f2a07f40SHugh Dickins 	/*
2799f2a07f40SHugh Dickins 	 * Save nodes for mpol_to_str() to show the tmpfs mount options
2800f2a07f40SHugh Dickins 	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2801f2a07f40SHugh Dickins 	 */
2802f2a07f40SHugh Dickins 	if (mode != MPOL_PREFERRED)
2803f2a07f40SHugh Dickins 		new->v.nodes = nodes;
2804f2a07f40SHugh Dickins 	else if (nodelist)
2805f2a07f40SHugh Dickins 		new->v.preferred_node = first_node(nodes);
2806f2a07f40SHugh Dickins 	else
2807f2a07f40SHugh Dickins 		new->flags |= MPOL_F_LOCAL;
2808f2a07f40SHugh Dickins 
2809f2a07f40SHugh Dickins 	/*
2810f2a07f40SHugh Dickins 	 * Save nodes for contextualization: this will be used to "clone"
2811f2a07f40SHugh Dickins 	 * the mempolicy in a specific context [cpuset] at a later time.
2812f2a07f40SHugh Dickins 	 */
2813e17f74afSLee Schermerhorn 	new->w.user_nodemask = nodes;
2814f2a07f40SHugh Dickins 
2815926f2ae0SKOSAKI Motohiro 	err = 0;
281671fe804bSLee Schermerhorn 
2817095f1fc4SLee Schermerhorn out:
2818095f1fc4SLee Schermerhorn 	/* Restore string for error message */
2819095f1fc4SLee Schermerhorn 	if (nodelist)
2820095f1fc4SLee Schermerhorn 		*--nodelist = ':';
2821095f1fc4SLee Schermerhorn 	if (flags)
2822095f1fc4SLee Schermerhorn 		*--flags = '=';
282371fe804bSLee Schermerhorn 	if (!err)
282471fe804bSLee Schermerhorn 		*mpol = new;
2825095f1fc4SLee Schermerhorn 	return err;
2826095f1fc4SLee Schermerhorn }
2827095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */
2828095f1fc4SLee Schermerhorn 
282971fe804bSLee Schermerhorn /**
283071fe804bSLee Schermerhorn  * mpol_to_str - format a mempolicy structure for printing
283171fe804bSLee Schermerhorn  * @buffer:  to contain formatted mempolicy string
283271fe804bSLee Schermerhorn  * @maxlen:  length of @buffer
283371fe804bSLee Schermerhorn  * @pol:  pointer to mempolicy to be formatted
283471fe804bSLee Schermerhorn  *
2835948927eeSDavid Rientjes  * Convert @pol into a string.  If @buffer is too short, truncate the string.
2836948927eeSDavid Rientjes  * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2837948927eeSDavid Rientjes  * longest flag, "relative", and to display at least a few node ids.
28381a75a6c8SChristoph Lameter  */
2839948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
28401a75a6c8SChristoph Lameter {
28411a75a6c8SChristoph Lameter 	char *p = buffer;
2842948927eeSDavid Rientjes 	nodemask_t nodes = NODE_MASK_NONE;
2843948927eeSDavid Rientjes 	unsigned short mode = MPOL_DEFAULT;
2844948927eeSDavid Rientjes 	unsigned short flags = 0;
28451a75a6c8SChristoph Lameter 
28468790c71aSDavid Rientjes 	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
2847bea904d5SLee Schermerhorn 		mode = pol->mode;
2848948927eeSDavid Rientjes 		flags = pol->flags;
2849948927eeSDavid Rientjes 	}
2850bea904d5SLee Schermerhorn 
28511a75a6c8SChristoph Lameter 	switch (mode) {
28521a75a6c8SChristoph Lameter 	case MPOL_DEFAULT:
28531a75a6c8SChristoph Lameter 		break;
28541a75a6c8SChristoph Lameter 	case MPOL_PREFERRED:
2855fc36b8d3SLee Schermerhorn 		if (flags & MPOL_F_LOCAL)
2856f2a07f40SHugh Dickins 			mode = MPOL_LOCAL;
285753f2556bSLee Schermerhorn 		else
2858fc36b8d3SLee Schermerhorn 			node_set(pol->v.preferred_node, nodes);
28591a75a6c8SChristoph Lameter 		break;
28601a75a6c8SChristoph Lameter 	case MPOL_BIND:
28611a75a6c8SChristoph Lameter 	case MPOL_INTERLEAVE:
28621a75a6c8SChristoph Lameter 		nodes = pol->v.nodes;
28631a75a6c8SChristoph Lameter 		break;
28641a75a6c8SChristoph Lameter 	default:
2865948927eeSDavid Rientjes 		WARN_ON_ONCE(1);
2866948927eeSDavid Rientjes 		snprintf(p, maxlen, "unknown");
2867948927eeSDavid Rientjes 		return;
28681a75a6c8SChristoph Lameter 	}
28691a75a6c8SChristoph Lameter 
2870b7a9f420SDavid Rientjes 	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
28711a75a6c8SChristoph Lameter 
2872fc36b8d3SLee Schermerhorn 	if (flags & MPOL_MODE_FLAGS) {
2873948927eeSDavid Rientjes 		p += snprintf(p, buffer + maxlen - p, "=");
2874f5b087b5SDavid Rientjes 
28752291990aSLee Schermerhorn 		/*
28762291990aSLee Schermerhorn 		 * Currently, the only defined flags are mutually exclusive
28772291990aSLee Schermerhorn 		 */
2878f5b087b5SDavid Rientjes 		if (flags & MPOL_F_STATIC_NODES)
28792291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "static");
28802291990aSLee Schermerhorn 		else if (flags & MPOL_F_RELATIVE_NODES)
28812291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "relative");
2882f5b087b5SDavid Rientjes 	}
2883f5b087b5SDavid Rientjes 
28849e763e0fSTejun Heo 	if (!nodes_empty(nodes))
28859e763e0fSTejun Heo 		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
28869e763e0fSTejun Heo 			       nodemask_pr_args(&nodes));
28871a75a6c8SChristoph Lameter }
2888