xref: /openbmc/linux/mm/mempolicy.c (revision 4605f057aace9291e80bf9982cc7c8babc917f56)
146aeb7e6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * Simple NUMA memory policy for the Linux kernel.
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
68bccd85fSChristoph Lameter  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * NUMA policy allows the user to give hints in which node(s) memory should
91da177e4SLinus Torvalds  * be allocated.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Support four policies per VMA and per process:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * The VMA policy has priority over the process policy for a page fault.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * interleave     Allocate memory interleaved over a set of nodes,
161da177e4SLinus Torvalds  *                with normal fallback if it fails.
171da177e4SLinus Torvalds  *                For VMA based allocations this interleaves based on the
181da177e4SLinus Torvalds  *                offset into the backing object or offset into the mapping
191da177e4SLinus Torvalds  *                for anonymous memory. For process policy an process counter
201da177e4SLinus Torvalds  *                is used.
218bccd85fSChristoph Lameter  *
221da177e4SLinus Torvalds  * bind           Only allocate memory on a specific set of nodes,
231da177e4SLinus Torvalds  *                no fallback.
248bccd85fSChristoph Lameter  *                FIXME: memory is allocated starting with the first node
258bccd85fSChristoph Lameter  *                to the last. It would be better if bind would truly restrict
268bccd85fSChristoph Lameter  *                the allocation to memory nodes instead
278bccd85fSChristoph Lameter  *
281da177e4SLinus Torvalds  * preferred       Try a specific node first before normal fallback.
2900ef2d2fSDavid Rientjes  *                As a special case NUMA_NO_NODE here means do the allocation
301da177e4SLinus Torvalds  *                on the local CPU. This is normally identical to default,
311da177e4SLinus Torvalds  *                but useful to set in a VMA when you have a non default
321da177e4SLinus Torvalds  *                process policy.
338bccd85fSChristoph Lameter  *
341da177e4SLinus Torvalds  * default        Allocate on the local node first, or when on a VMA
351da177e4SLinus Torvalds  *                use the process policy. This is what Linux always did
361da177e4SLinus Torvalds  *		  in a NUMA aware kernel and still does by, ahem, default.
371da177e4SLinus Torvalds  *
381da177e4SLinus Torvalds  * The process policy is applied for most non interrupt memory allocations
391da177e4SLinus Torvalds  * in that process' context. Interrupts ignore the policies and always
401da177e4SLinus Torvalds  * try to allocate on the local CPU. The VMA policy is only applied for memory
411da177e4SLinus Torvalds  * allocations for a VMA in the VM.
421da177e4SLinus Torvalds  *
431da177e4SLinus Torvalds  * Currently there are a few corner cases in swapping where the policy
441da177e4SLinus Torvalds  * is not applied, but the majority should be handled. When process policy
451da177e4SLinus Torvalds  * is used it is not remembered over swap outs/swap ins.
461da177e4SLinus Torvalds  *
471da177e4SLinus Torvalds  * Only the highest zone in the zone hierarchy gets policied. Allocations
481da177e4SLinus Torvalds  * requesting a lower zone just use default policy. This implies that
491da177e4SLinus Torvalds  * on systems with highmem kernel lowmem allocation don't get policied.
501da177e4SLinus Torvalds  * Same with GFP_DMA allocations.
511da177e4SLinus Torvalds  *
521da177e4SLinus Torvalds  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
531da177e4SLinus Torvalds  * all users and remembered even when nobody has memory mapped.
541da177e4SLinus Torvalds  */
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds /* Notebook:
571da177e4SLinus Torvalds    fix mmap readahead to honour policy and enable policy for any page cache
581da177e4SLinus Torvalds    object
591da177e4SLinus Torvalds    statistics for bigpages
601da177e4SLinus Torvalds    global policy for page cache? currently it uses process policy. Requires
611da177e4SLinus Torvalds    first item above.
621da177e4SLinus Torvalds    handle mremap for shared memory (currently ignored for the policy)
631da177e4SLinus Torvalds    grows down?
641da177e4SLinus Torvalds    make bind policy root only? It can trigger oom much faster and the
651da177e4SLinus Torvalds    kernel is not always grateful with that.
661da177e4SLinus Torvalds */
671da177e4SLinus Torvalds 
68b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69b1de0d13SMitchel Humpherys 
701da177e4SLinus Torvalds #include <linux/mempolicy.h>
71a520110eSChristoph Hellwig #include <linux/pagewalk.h>
721da177e4SLinus Torvalds #include <linux/highmem.h>
731da177e4SLinus Torvalds #include <linux/hugetlb.h>
741da177e4SLinus Torvalds #include <linux/kernel.h>
751da177e4SLinus Torvalds #include <linux/sched.h>
766e84f315SIngo Molnar #include <linux/sched/mm.h>
776a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h>
78f719ff9bSIngo Molnar #include <linux/sched/task.h>
791da177e4SLinus Torvalds #include <linux/nodemask.h>
801da177e4SLinus Torvalds #include <linux/cpuset.h>
811da177e4SLinus Torvalds #include <linux/slab.h>
821da177e4SLinus Torvalds #include <linux/string.h>
83b95f1b31SPaul Gortmaker #include <linux/export.h>
84b488893aSPavel Emelyanov #include <linux/nsproxy.h>
851da177e4SLinus Torvalds #include <linux/interrupt.h>
861da177e4SLinus Torvalds #include <linux/init.h>
871da177e4SLinus Torvalds #include <linux/compat.h>
8831367466SOtto Ebeling #include <linux/ptrace.h>
89dc9aa5b9SChristoph Lameter #include <linux/swap.h>
901a75a6c8SChristoph Lameter #include <linux/seq_file.h>
911a75a6c8SChristoph Lameter #include <linux/proc_fs.h>
92b20a3503SChristoph Lameter #include <linux/migrate.h>
9362b61f61SHugh Dickins #include <linux/ksm.h>
9495a402c3SChristoph Lameter #include <linux/rmap.h>
9586c3a764SDavid Quigley #include <linux/security.h>
96dbcb0f19SAdrian Bunk #include <linux/syscalls.h>
97095f1fc4SLee Schermerhorn #include <linux/ctype.h>
986d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h>
99b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h>
100b1de0d13SMitchel Humpherys #include <linux/printk.h>
101c8633798SNaoya Horiguchi #include <linux/swapops.h>
102dc9aa5b9SChristoph Lameter 
1031da177e4SLinus Torvalds #include <asm/tlbflush.h>
1047c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
1051da177e4SLinus Torvalds 
10662695a84SNick Piggin #include "internal.h"
10762695a84SNick Piggin 
10838e35860SChristoph Lameter /* Internal flags */
109dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
11038e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
111dc9aa5b9SChristoph Lameter 
112fcc234f8SPekka Enberg static struct kmem_cache *policy_cache;
113fcc234f8SPekka Enberg static struct kmem_cache *sn_cache;
1141da177e4SLinus Torvalds 
1151da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not
1161da177e4SLinus Torvalds    policied. */
1176267276fSChristoph Lameter enum zone_type policy_zone = 0;
1181da177e4SLinus Torvalds 
119bea904d5SLee Schermerhorn /*
120bea904d5SLee Schermerhorn  * run-time system-wide default policy => local allocation
121bea904d5SLee Schermerhorn  */
122e754d79dSH Hartley Sweeten static struct mempolicy default_policy = {
1231da177e4SLinus Torvalds 	.refcnt = ATOMIC_INIT(1), /* never free it */
124bea904d5SLee Schermerhorn 	.mode = MPOL_PREFERRED,
125fc36b8d3SLee Schermerhorn 	.flags = MPOL_F_LOCAL,
1261da177e4SLinus Torvalds };
1271da177e4SLinus Torvalds 
1285606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES];
1295606e387SMel Gorman 
130b2ca916cSDan Williams /**
131b2ca916cSDan Williams  * numa_map_to_online_node - Find closest online node
132f6e92f40SKrzysztof Kozlowski  * @node: Node id to start the search
133b2ca916cSDan Williams  *
134b2ca916cSDan Williams  * Lookup the next closest node by distance if @nid is not online.
135b2ca916cSDan Williams  */
136b2ca916cSDan Williams int numa_map_to_online_node(int node)
137b2ca916cSDan Williams {
1384fcbe96eSDan Williams 	int min_dist = INT_MAX, dist, n, min_node;
139b2ca916cSDan Williams 
1404fcbe96eSDan Williams 	if (node == NUMA_NO_NODE || node_online(node))
1414fcbe96eSDan Williams 		return node;
142b2ca916cSDan Williams 
143b2ca916cSDan Williams 	min_node = node;
144b2ca916cSDan Williams 	for_each_online_node(n) {
145b2ca916cSDan Williams 		dist = node_distance(node, n);
146b2ca916cSDan Williams 		if (dist < min_dist) {
147b2ca916cSDan Williams 			min_dist = dist;
148b2ca916cSDan Williams 			min_node = n;
149b2ca916cSDan Williams 		}
150b2ca916cSDan Williams 	}
151b2ca916cSDan Williams 
152b2ca916cSDan Williams 	return min_node;
153b2ca916cSDan Williams }
154b2ca916cSDan Williams EXPORT_SYMBOL_GPL(numa_map_to_online_node);
155b2ca916cSDan Williams 
15674d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p)
1575606e387SMel Gorman {
1585606e387SMel Gorman 	struct mempolicy *pol = p->mempolicy;
159f15ca78eSOleg Nesterov 	int node;
1605606e387SMel Gorman 
161f15ca78eSOleg Nesterov 	if (pol)
162f15ca78eSOleg Nesterov 		return pol;
1635606e387SMel Gorman 
164f15ca78eSOleg Nesterov 	node = numa_node_id();
1651da6f0e1SJianguo Wu 	if (node != NUMA_NO_NODE) {
1661da6f0e1SJianguo Wu 		pol = &preferred_node_policy[node];
167f15ca78eSOleg Nesterov 		/* preferred_node_policy is not initialised early in boot */
168f15ca78eSOleg Nesterov 		if (pol->mode)
169f15ca78eSOleg Nesterov 			return pol;
1701da6f0e1SJianguo Wu 	}
1715606e387SMel Gorman 
172f15ca78eSOleg Nesterov 	return &default_policy;
1735606e387SMel Gorman }
1745606e387SMel Gorman 
17537012946SDavid Rientjes static const struct mempolicy_operations {
17637012946SDavid Rientjes 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
177213980c0SVlastimil Babka 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
17837012946SDavid Rientjes } mpol_ops[MPOL_MAX];
17937012946SDavid Rientjes 
180f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
181f5b087b5SDavid Rientjes {
1826d556294SBob Liu 	return pol->flags & MPOL_MODE_FLAGS;
1834c50bc01SDavid Rientjes }
1844c50bc01SDavid Rientjes 
1854c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
1864c50bc01SDavid Rientjes 				   const nodemask_t *rel)
1874c50bc01SDavid Rientjes {
1884c50bc01SDavid Rientjes 	nodemask_t tmp;
1894c50bc01SDavid Rientjes 	nodes_fold(tmp, *orig, nodes_weight(*rel));
1904c50bc01SDavid Rientjes 	nodes_onto(*ret, tmp, *rel);
191f5b087b5SDavid Rientjes }
192f5b087b5SDavid Rientjes 
19337012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
19437012946SDavid Rientjes {
19537012946SDavid Rientjes 	if (nodes_empty(*nodes))
19637012946SDavid Rientjes 		return -EINVAL;
19737012946SDavid Rientjes 	pol->v.nodes = *nodes;
19837012946SDavid Rientjes 	return 0;
19937012946SDavid Rientjes }
20037012946SDavid Rientjes 
20137012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
20237012946SDavid Rientjes {
20337012946SDavid Rientjes 	if (!nodes)
204fc36b8d3SLee Schermerhorn 		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
20537012946SDavid Rientjes 	else if (nodes_empty(*nodes))
20637012946SDavid Rientjes 		return -EINVAL;			/*  no allowed nodes */
20737012946SDavid Rientjes 	else
20837012946SDavid Rientjes 		pol->v.preferred_node = first_node(*nodes);
20937012946SDavid Rientjes 	return 0;
21037012946SDavid Rientjes }
21137012946SDavid Rientjes 
21237012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
21337012946SDavid Rientjes {
214859f7ef1SZhihui Zhang 	if (nodes_empty(*nodes))
21537012946SDavid Rientjes 		return -EINVAL;
21637012946SDavid Rientjes 	pol->v.nodes = *nodes;
21737012946SDavid Rientjes 	return 0;
21837012946SDavid Rientjes }
21937012946SDavid Rientjes 
22058568d2aSMiao Xie /*
22158568d2aSMiao Xie  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
22258568d2aSMiao Xie  * any, for the new policy.  mpol_new() has already validated the nodes
22358568d2aSMiao Xie  * parameter with respect to the policy mode and flags.  But, we need to
22458568d2aSMiao Xie  * handle an empty nodemask with MPOL_PREFERRED here.
22558568d2aSMiao Xie  *
22658568d2aSMiao Xie  * Must be called holding task's alloc_lock to protect task's mems_allowed
227c1e8d7c6SMichel Lespinasse  * and mempolicy.  May also be called holding the mmap_lock for write.
22858568d2aSMiao Xie  */
2294bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol,
2304bfc4495SKAMEZAWA Hiroyuki 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
23158568d2aSMiao Xie {
23258568d2aSMiao Xie 	int ret;
23358568d2aSMiao Xie 
23458568d2aSMiao Xie 	/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
23558568d2aSMiao Xie 	if (pol == NULL)
23658568d2aSMiao Xie 		return 0;
23701f13bd6SLai Jiangshan 	/* Check N_MEMORY */
2384bfc4495SKAMEZAWA Hiroyuki 	nodes_and(nsc->mask1,
23901f13bd6SLai Jiangshan 		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
24058568d2aSMiao Xie 
24158568d2aSMiao Xie 	VM_BUG_ON(!nodes);
24258568d2aSMiao Xie 	if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
24358568d2aSMiao Xie 		nodes = NULL;	/* explicit local allocation */
24458568d2aSMiao Xie 	else {
24558568d2aSMiao Xie 		if (pol->flags & MPOL_F_RELATIVE_NODES)
2464bfc4495SKAMEZAWA Hiroyuki 			mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
24758568d2aSMiao Xie 		else
2484bfc4495SKAMEZAWA Hiroyuki 			nodes_and(nsc->mask2, *nodes, nsc->mask1);
2494bfc4495SKAMEZAWA Hiroyuki 
25058568d2aSMiao Xie 		if (mpol_store_user_nodemask(pol))
25158568d2aSMiao Xie 			pol->w.user_nodemask = *nodes;
25258568d2aSMiao Xie 		else
25358568d2aSMiao Xie 			pol->w.cpuset_mems_allowed =
25458568d2aSMiao Xie 						cpuset_current_mems_allowed;
25558568d2aSMiao Xie 	}
25658568d2aSMiao Xie 
2574bfc4495SKAMEZAWA Hiroyuki 	if (nodes)
2584bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
2594bfc4495SKAMEZAWA Hiroyuki 	else
2604bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, NULL);
26158568d2aSMiao Xie 	return ret;
26258568d2aSMiao Xie }
26358568d2aSMiao Xie 
26458568d2aSMiao Xie /*
26558568d2aSMiao Xie  * This function just creates a new policy, does some check and simple
26658568d2aSMiao Xie  * initialization. You must invoke mpol_set_nodemask() to set nodes.
26758568d2aSMiao Xie  */
268028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
269028fec41SDavid Rientjes 				  nodemask_t *nodes)
2701da177e4SLinus Torvalds {
2711da177e4SLinus Torvalds 	struct mempolicy *policy;
2721da177e4SLinus Torvalds 
273028fec41SDavid Rientjes 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
27400ef2d2fSDavid Rientjes 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
275140d5a49SPaul Mundt 
2763e1f0645SDavid Rientjes 	if (mode == MPOL_DEFAULT) {
2773e1f0645SDavid Rientjes 		if (nodes && !nodes_empty(*nodes))
27837012946SDavid Rientjes 			return ERR_PTR(-EINVAL);
279d3a71033SLee Schermerhorn 		return NULL;
28037012946SDavid Rientjes 	}
2813e1f0645SDavid Rientjes 	VM_BUG_ON(!nodes);
2823e1f0645SDavid Rientjes 
2833e1f0645SDavid Rientjes 	/*
2843e1f0645SDavid Rientjes 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
2853e1f0645SDavid Rientjes 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
2863e1f0645SDavid Rientjes 	 * All other modes require a valid pointer to a non-empty nodemask.
2873e1f0645SDavid Rientjes 	 */
2883e1f0645SDavid Rientjes 	if (mode == MPOL_PREFERRED) {
2893e1f0645SDavid Rientjes 		if (nodes_empty(*nodes)) {
2903e1f0645SDavid Rientjes 			if (((flags & MPOL_F_STATIC_NODES) ||
2913e1f0645SDavid Rientjes 			     (flags & MPOL_F_RELATIVE_NODES)))
2923e1f0645SDavid Rientjes 				return ERR_PTR(-EINVAL);
2933e1f0645SDavid Rientjes 		}
294479e2802SPeter Zijlstra 	} else if (mode == MPOL_LOCAL) {
2958d303e44SPiotr Kwapulinski 		if (!nodes_empty(*nodes) ||
2968d303e44SPiotr Kwapulinski 		    (flags & MPOL_F_STATIC_NODES) ||
2978d303e44SPiotr Kwapulinski 		    (flags & MPOL_F_RELATIVE_NODES))
298479e2802SPeter Zijlstra 			return ERR_PTR(-EINVAL);
299479e2802SPeter Zijlstra 		mode = MPOL_PREFERRED;
3003e1f0645SDavid Rientjes 	} else if (nodes_empty(*nodes))
3013e1f0645SDavid Rientjes 		return ERR_PTR(-EINVAL);
3021da177e4SLinus Torvalds 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
3031da177e4SLinus Torvalds 	if (!policy)
3041da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
3051da177e4SLinus Torvalds 	atomic_set(&policy->refcnt, 1);
30645c4745aSLee Schermerhorn 	policy->mode = mode;
30737012946SDavid Rientjes 	policy->flags = flags;
3083e1f0645SDavid Rientjes 
30937012946SDavid Rientjes 	return policy;
31037012946SDavid Rientjes }
31137012946SDavid Rientjes 
31252cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */
31352cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p)
31452cd3b07SLee Schermerhorn {
31552cd3b07SLee Schermerhorn 	if (!atomic_dec_and_test(&p->refcnt))
31652cd3b07SLee Schermerhorn 		return;
31752cd3b07SLee Schermerhorn 	kmem_cache_free(policy_cache, p);
31852cd3b07SLee Schermerhorn }
31952cd3b07SLee Schermerhorn 
320213980c0SVlastimil Babka static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
32137012946SDavid Rientjes {
32237012946SDavid Rientjes }
32337012946SDavid Rientjes 
324213980c0SVlastimil Babka static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
3251d0d2680SDavid Rientjes {
3261d0d2680SDavid Rientjes 	nodemask_t tmp;
3271d0d2680SDavid Rientjes 
32837012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES)
32937012946SDavid Rientjes 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
33037012946SDavid Rientjes 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
33137012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3321d0d2680SDavid Rientjes 	else {
333213980c0SVlastimil Babka 		nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
334213980c0SVlastimil Babka 								*nodes);
33529b190faSzhong jiang 		pol->w.cpuset_mems_allowed = *nodes;
3361d0d2680SDavid Rientjes 	}
33737012946SDavid Rientjes 
338708c1bbcSMiao Xie 	if (nodes_empty(tmp))
339708c1bbcSMiao Xie 		tmp = *nodes;
340708c1bbcSMiao Xie 
3411d0d2680SDavid Rientjes 	pol->v.nodes = tmp;
34237012946SDavid Rientjes }
34337012946SDavid Rientjes 
34437012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol,
345213980c0SVlastimil Babka 						const nodemask_t *nodes)
34637012946SDavid Rientjes {
34737012946SDavid Rientjes 	nodemask_t tmp;
34837012946SDavid Rientjes 
34937012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES) {
3501d0d2680SDavid Rientjes 		int node = first_node(pol->w.user_nodemask);
3511d0d2680SDavid Rientjes 
352fc36b8d3SLee Schermerhorn 		if (node_isset(node, *nodes)) {
3531d0d2680SDavid Rientjes 			pol->v.preferred_node = node;
354fc36b8d3SLee Schermerhorn 			pol->flags &= ~MPOL_F_LOCAL;
355fc36b8d3SLee Schermerhorn 		} else
356fc36b8d3SLee Schermerhorn 			pol->flags |= MPOL_F_LOCAL;
35737012946SDavid Rientjes 	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
35837012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3591d0d2680SDavid Rientjes 		pol->v.preferred_node = first_node(tmp);
360fc36b8d3SLee Schermerhorn 	} else if (!(pol->flags & MPOL_F_LOCAL)) {
3611d0d2680SDavid Rientjes 		pol->v.preferred_node = node_remap(pol->v.preferred_node,
36237012946SDavid Rientjes 						   pol->w.cpuset_mems_allowed,
36337012946SDavid Rientjes 						   *nodes);
36437012946SDavid Rientjes 		pol->w.cpuset_mems_allowed = *nodes;
3651d0d2680SDavid Rientjes 	}
3661d0d2680SDavid Rientjes }
36737012946SDavid Rientjes 
368708c1bbcSMiao Xie /*
369708c1bbcSMiao Xie  * mpol_rebind_policy - Migrate a policy to a different set of nodes
370708c1bbcSMiao Xie  *
371c1e8d7c6SMichel Lespinasse  * Per-vma policies are protected by mmap_lock. Allocations using per-task
372213980c0SVlastimil Babka  * policies are protected by task->mems_allowed_seq to prevent a premature
373213980c0SVlastimil Babka  * OOM/allocation failure due to parallel nodemask modification.
374708c1bbcSMiao Xie  */
375213980c0SVlastimil Babka static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
37637012946SDavid Rientjes {
37737012946SDavid Rientjes 	if (!pol)
37837012946SDavid Rientjes 		return;
3792e25644eSVlastimil Babka 	if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) &&
38037012946SDavid Rientjes 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
38137012946SDavid Rientjes 		return;
382708c1bbcSMiao Xie 
383213980c0SVlastimil Babka 	mpol_ops[pol->mode].rebind(pol, newmask);
3841d0d2680SDavid Rientjes }
3851d0d2680SDavid Rientjes 
3861d0d2680SDavid Rientjes /*
3871d0d2680SDavid Rientjes  * Wrapper for mpol_rebind_policy() that just requires task
3881d0d2680SDavid Rientjes  * pointer, and updates task mempolicy.
38958568d2aSMiao Xie  *
39058568d2aSMiao Xie  * Called with task's alloc_lock held.
3911d0d2680SDavid Rientjes  */
3921d0d2680SDavid Rientjes 
393213980c0SVlastimil Babka void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
3941d0d2680SDavid Rientjes {
395213980c0SVlastimil Babka 	mpol_rebind_policy(tsk->mempolicy, new);
3961d0d2680SDavid Rientjes }
3971d0d2680SDavid Rientjes 
3981d0d2680SDavid Rientjes /*
3991d0d2680SDavid Rientjes  * Rebind each vma in mm to new nodemask.
4001d0d2680SDavid Rientjes  *
401c1e8d7c6SMichel Lespinasse  * Call holding a reference to mm.  Takes mm->mmap_lock during call.
4021d0d2680SDavid Rientjes  */
4031d0d2680SDavid Rientjes 
4041d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
4051d0d2680SDavid Rientjes {
4061d0d2680SDavid Rientjes 	struct vm_area_struct *vma;
4071d0d2680SDavid Rientjes 
408d8ed45c5SMichel Lespinasse 	mmap_write_lock(mm);
4091d0d2680SDavid Rientjes 	for (vma = mm->mmap; vma; vma = vma->vm_next)
410213980c0SVlastimil Babka 		mpol_rebind_policy(vma->vm_policy, new);
411d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
4121d0d2680SDavid Rientjes }
4131d0d2680SDavid Rientjes 
41437012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
41537012946SDavid Rientjes 	[MPOL_DEFAULT] = {
41637012946SDavid Rientjes 		.rebind = mpol_rebind_default,
41737012946SDavid Rientjes 	},
41837012946SDavid Rientjes 	[MPOL_INTERLEAVE] = {
41937012946SDavid Rientjes 		.create = mpol_new_interleave,
42037012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
42137012946SDavid Rientjes 	},
42237012946SDavid Rientjes 	[MPOL_PREFERRED] = {
42337012946SDavid Rientjes 		.create = mpol_new_preferred,
42437012946SDavid Rientjes 		.rebind = mpol_rebind_preferred,
42537012946SDavid Rientjes 	},
42637012946SDavid Rientjes 	[MPOL_BIND] = {
42737012946SDavid Rientjes 		.create = mpol_new_bind,
42837012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
42937012946SDavid Rientjes 	},
43037012946SDavid Rientjes };
43137012946SDavid Rientjes 
432a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist,
433fc301289SChristoph Lameter 				unsigned long flags);
4341a75a6c8SChristoph Lameter 
4356f4576e3SNaoya Horiguchi struct queue_pages {
4366f4576e3SNaoya Horiguchi 	struct list_head *pagelist;
4376f4576e3SNaoya Horiguchi 	unsigned long flags;
4386f4576e3SNaoya Horiguchi 	nodemask_t *nmask;
439f18da660SLi Xinhai 	unsigned long start;
440f18da660SLi Xinhai 	unsigned long end;
441f18da660SLi Xinhai 	struct vm_area_struct *first;
4426f4576e3SNaoya Horiguchi };
4436f4576e3SNaoya Horiguchi 
44498094945SNaoya Horiguchi /*
44588aaa2a1SNaoya Horiguchi  * Check if the page's nid is in qp->nmask.
44688aaa2a1SNaoya Horiguchi  *
44788aaa2a1SNaoya Horiguchi  * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
44888aaa2a1SNaoya Horiguchi  * in the invert of qp->nmask.
44988aaa2a1SNaoya Horiguchi  */
45088aaa2a1SNaoya Horiguchi static inline bool queue_pages_required(struct page *page,
45188aaa2a1SNaoya Horiguchi 					struct queue_pages *qp)
45288aaa2a1SNaoya Horiguchi {
45388aaa2a1SNaoya Horiguchi 	int nid = page_to_nid(page);
45488aaa2a1SNaoya Horiguchi 	unsigned long flags = qp->flags;
45588aaa2a1SNaoya Horiguchi 
45688aaa2a1SNaoya Horiguchi 	return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
45788aaa2a1SNaoya Horiguchi }
45888aaa2a1SNaoya Horiguchi 
459a7f40cfeSYang Shi /*
460d8835445SYang Shi  * queue_pages_pmd() has four possible return values:
461d8835445SYang Shi  * 0 - pages are placed on the right node or queued successfully.
462d8835445SYang Shi  * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
463d8835445SYang Shi  *     specified.
464d8835445SYang Shi  * 2 - THP was split.
465d8835445SYang Shi  * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
466d8835445SYang Shi  *        existing page was already on a node that does not follow the
467d8835445SYang Shi  *        policy.
468a7f40cfeSYang Shi  */
469c8633798SNaoya Horiguchi static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
470c8633798SNaoya Horiguchi 				unsigned long end, struct mm_walk *walk)
471959a7e13SJules Irenge 	__releases(ptl)
472c8633798SNaoya Horiguchi {
473c8633798SNaoya Horiguchi 	int ret = 0;
474c8633798SNaoya Horiguchi 	struct page *page;
475c8633798SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
476c8633798SNaoya Horiguchi 	unsigned long flags;
477c8633798SNaoya Horiguchi 
478c8633798SNaoya Horiguchi 	if (unlikely(is_pmd_migration_entry(*pmd))) {
479a7f40cfeSYang Shi 		ret = -EIO;
480c8633798SNaoya Horiguchi 		goto unlock;
481c8633798SNaoya Horiguchi 	}
482c8633798SNaoya Horiguchi 	page = pmd_page(*pmd);
483c8633798SNaoya Horiguchi 	if (is_huge_zero_page(page)) {
484c8633798SNaoya Horiguchi 		spin_unlock(ptl);
485c8633798SNaoya Horiguchi 		__split_huge_pmd(walk->vma, pmd, addr, false, NULL);
486d8835445SYang Shi 		ret = 2;
487c8633798SNaoya Horiguchi 		goto out;
488c8633798SNaoya Horiguchi 	}
489d8835445SYang Shi 	if (!queue_pages_required(page, qp))
490c8633798SNaoya Horiguchi 		goto unlock;
491c8633798SNaoya Horiguchi 
492c8633798SNaoya Horiguchi 	flags = qp->flags;
493c8633798SNaoya Horiguchi 	/* go to thp migration */
494a7f40cfeSYang Shi 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
495a53190a4SYang Shi 		if (!vma_migratable(walk->vma) ||
496a53190a4SYang Shi 		    migrate_page_add(page, qp->pagelist, flags)) {
497d8835445SYang Shi 			ret = 1;
498a7f40cfeSYang Shi 			goto unlock;
499a7f40cfeSYang Shi 		}
500a7f40cfeSYang Shi 	} else
501a7f40cfeSYang Shi 		ret = -EIO;
502c8633798SNaoya Horiguchi unlock:
503c8633798SNaoya Horiguchi 	spin_unlock(ptl);
504c8633798SNaoya Horiguchi out:
505c8633798SNaoya Horiguchi 	return ret;
506c8633798SNaoya Horiguchi }
507c8633798SNaoya Horiguchi 
50888aaa2a1SNaoya Horiguchi /*
50998094945SNaoya Horiguchi  * Scan through pages checking if pages follow certain conditions,
51098094945SNaoya Horiguchi  * and move them to the pagelist if they do.
511d8835445SYang Shi  *
512d8835445SYang Shi  * queue_pages_pte_range() has three possible return values:
513d8835445SYang Shi  * 0 - pages are placed on the right node or queued successfully.
514d8835445SYang Shi  * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
515d8835445SYang Shi  *     specified.
516d8835445SYang Shi  * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
517d8835445SYang Shi  *        on a node that does not follow the policy.
51898094945SNaoya Horiguchi  */
5196f4576e3SNaoya Horiguchi static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
5206f4576e3SNaoya Horiguchi 			unsigned long end, struct mm_walk *walk)
5211da177e4SLinus Torvalds {
5226f4576e3SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
5236f4576e3SNaoya Horiguchi 	struct page *page;
5246f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
5256f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
526c8633798SNaoya Horiguchi 	int ret;
527d8835445SYang Shi 	bool has_unmovable = false;
52891612e0dSHugh Dickins 	pte_t *pte;
529705e87c0SHugh Dickins 	spinlock_t *ptl;
530941150a3SHugh Dickins 
531c8633798SNaoya Horiguchi 	ptl = pmd_trans_huge_lock(pmd, vma);
532c8633798SNaoya Horiguchi 	if (ptl) {
533c8633798SNaoya Horiguchi 		ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
534d8835445SYang Shi 		if (ret != 2)
535a7f40cfeSYang Shi 			return ret;
536248db92dSKirill A. Shutemov 	}
537d8835445SYang Shi 	/* THP was split, fall through to pte walk */
53891612e0dSHugh Dickins 
539337d9abfSNaoya Horiguchi 	if (pmd_trans_unstable(pmd))
540337d9abfSNaoya Horiguchi 		return 0;
54194723aafSMichal Hocko 
5426f4576e3SNaoya Horiguchi 	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
5436f4576e3SNaoya Horiguchi 	for (; addr != end; pte++, addr += PAGE_SIZE) {
54491612e0dSHugh Dickins 		if (!pte_present(*pte))
54591612e0dSHugh Dickins 			continue;
5466aab341eSLinus Torvalds 		page = vm_normal_page(vma, addr, *pte);
5476aab341eSLinus Torvalds 		if (!page)
54891612e0dSHugh Dickins 			continue;
549053837fcSNick Piggin 		/*
55062b61f61SHugh Dickins 		 * vm_normal_page() filters out zero pages, but there might
55162b61f61SHugh Dickins 		 * still be PageReserved pages to skip, perhaps in a VDSO.
552053837fcSNick Piggin 		 */
553b79bc0a0SHugh Dickins 		if (PageReserved(page))
554f4598c8bSChristoph Lameter 			continue;
55588aaa2a1SNaoya Horiguchi 		if (!queue_pages_required(page, qp))
55638e35860SChristoph Lameter 			continue;
557a7f40cfeSYang Shi 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
558d8835445SYang Shi 			/* MPOL_MF_STRICT must be specified if we get here */
559d8835445SYang Shi 			if (!vma_migratable(vma)) {
560d8835445SYang Shi 				has_unmovable = true;
561a7f40cfeSYang Shi 				break;
562d8835445SYang Shi 			}
563a53190a4SYang Shi 
564a53190a4SYang Shi 			/*
565a53190a4SYang Shi 			 * Do not abort immediately since there may be
566a53190a4SYang Shi 			 * temporary off LRU pages in the range.  Still
567a53190a4SYang Shi 			 * need migrate other LRU pages.
568a53190a4SYang Shi 			 */
569a53190a4SYang Shi 			if (migrate_page_add(page, qp->pagelist, flags))
570a53190a4SYang Shi 				has_unmovable = true;
571a7f40cfeSYang Shi 		} else
572a7f40cfeSYang Shi 			break;
5736f4576e3SNaoya Horiguchi 	}
5746f4576e3SNaoya Horiguchi 	pte_unmap_unlock(pte - 1, ptl);
5756f4576e3SNaoya Horiguchi 	cond_resched();
576d8835445SYang Shi 
577d8835445SYang Shi 	if (has_unmovable)
578d8835445SYang Shi 		return 1;
579d8835445SYang Shi 
580a7f40cfeSYang Shi 	return addr != end ? -EIO : 0;
58191612e0dSHugh Dickins }
58291612e0dSHugh Dickins 
5836f4576e3SNaoya Horiguchi static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
5846f4576e3SNaoya Horiguchi 			       unsigned long addr, unsigned long end,
5856f4576e3SNaoya Horiguchi 			       struct mm_walk *walk)
586e2d8cf40SNaoya Horiguchi {
587dcf17635SLi Xinhai 	int ret = 0;
588e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE
5896f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
590dcf17635SLi Xinhai 	unsigned long flags = (qp->flags & MPOL_MF_VALID);
591e2d8cf40SNaoya Horiguchi 	struct page *page;
592cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
593d4c54919SNaoya Horiguchi 	pte_t entry;
594e2d8cf40SNaoya Horiguchi 
5956f4576e3SNaoya Horiguchi 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
5966f4576e3SNaoya Horiguchi 	entry = huge_ptep_get(pte);
597d4c54919SNaoya Horiguchi 	if (!pte_present(entry))
598d4c54919SNaoya Horiguchi 		goto unlock;
599d4c54919SNaoya Horiguchi 	page = pte_page(entry);
60088aaa2a1SNaoya Horiguchi 	if (!queue_pages_required(page, qp))
601e2d8cf40SNaoya Horiguchi 		goto unlock;
602dcf17635SLi Xinhai 
603dcf17635SLi Xinhai 	if (flags == MPOL_MF_STRICT) {
604dcf17635SLi Xinhai 		/*
605dcf17635SLi Xinhai 		 * STRICT alone means only detecting misplaced page and no
606dcf17635SLi Xinhai 		 * need to further check other vma.
607dcf17635SLi Xinhai 		 */
608dcf17635SLi Xinhai 		ret = -EIO;
609dcf17635SLi Xinhai 		goto unlock;
610dcf17635SLi Xinhai 	}
611dcf17635SLi Xinhai 
612dcf17635SLi Xinhai 	if (!vma_migratable(walk->vma)) {
613dcf17635SLi Xinhai 		/*
614dcf17635SLi Xinhai 		 * Must be STRICT with MOVE*, otherwise .test_walk() have
615dcf17635SLi Xinhai 		 * stopped walking current vma.
616dcf17635SLi Xinhai 		 * Detecting misplaced page but allow migrating pages which
617dcf17635SLi Xinhai 		 * have been queued.
618dcf17635SLi Xinhai 		 */
619dcf17635SLi Xinhai 		ret = 1;
620dcf17635SLi Xinhai 		goto unlock;
621dcf17635SLi Xinhai 	}
622dcf17635SLi Xinhai 
623e2d8cf40SNaoya Horiguchi 	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
624e2d8cf40SNaoya Horiguchi 	if (flags & (MPOL_MF_MOVE_ALL) ||
625dcf17635SLi Xinhai 	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) {
626dcf17635SLi Xinhai 		if (!isolate_huge_page(page, qp->pagelist) &&
627dcf17635SLi Xinhai 			(flags & MPOL_MF_STRICT))
628dcf17635SLi Xinhai 			/*
629dcf17635SLi Xinhai 			 * Failed to isolate page but allow migrating pages
630dcf17635SLi Xinhai 			 * which have been queued.
631dcf17635SLi Xinhai 			 */
632dcf17635SLi Xinhai 			ret = 1;
633dcf17635SLi Xinhai 	}
634e2d8cf40SNaoya Horiguchi unlock:
635cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
636e2d8cf40SNaoya Horiguchi #else
637e2d8cf40SNaoya Horiguchi 	BUG();
638e2d8cf40SNaoya Horiguchi #endif
639dcf17635SLi Xinhai 	return ret;
6401da177e4SLinus Torvalds }
6411da177e4SLinus Torvalds 
6425877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING
643b24f53a0SLee Schermerhorn /*
6444b10e7d5SMel Gorman  * This is used to mark a range of virtual addresses to be inaccessible.
6454b10e7d5SMel Gorman  * These are later cleared by a NUMA hinting fault. Depending on these
6464b10e7d5SMel Gorman  * faults, pages may be migrated for better NUMA placement.
6474b10e7d5SMel Gorman  *
6484b10e7d5SMel Gorman  * This is assuming that NUMA faults are handled using PROT_NONE. If
6494b10e7d5SMel Gorman  * an architecture makes a different choice, it will need further
6504b10e7d5SMel Gorman  * changes to the core.
651b24f53a0SLee Schermerhorn  */
6524b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma,
6534b10e7d5SMel Gorman 			unsigned long addr, unsigned long end)
654b24f53a0SLee Schermerhorn {
6554b10e7d5SMel Gorman 	int nr_updated;
656b24f53a0SLee Schermerhorn 
65758705444SPeter Xu 	nr_updated = change_protection(vma, addr, end, PAGE_NONE, MM_CP_PROT_NUMA);
65803c5a6e1SMel Gorman 	if (nr_updated)
65903c5a6e1SMel Gorman 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
660b24f53a0SLee Schermerhorn 
6614b10e7d5SMel Gorman 	return nr_updated;
662b24f53a0SLee Schermerhorn }
663b24f53a0SLee Schermerhorn #else
664b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma,
665b24f53a0SLee Schermerhorn 			unsigned long addr, unsigned long end)
666b24f53a0SLee Schermerhorn {
667b24f53a0SLee Schermerhorn 	return 0;
668b24f53a0SLee Schermerhorn }
6695877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */
670b24f53a0SLee Schermerhorn 
6716f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end,
6726f4576e3SNaoya Horiguchi 				struct mm_walk *walk)
6731da177e4SLinus Torvalds {
6746f4576e3SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
6756f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
6765b952b3cSAndi Kleen 	unsigned long endvma = vma->vm_end;
6776f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
678dc9aa5b9SChristoph Lameter 
679a18b3ac2SLi Xinhai 	/* range check first */
680d888fb2bSYang Shi 	VM_BUG_ON_VMA((vma->vm_start > start) || (vma->vm_end < end), vma);
681f18da660SLi Xinhai 
682f18da660SLi Xinhai 	if (!qp->first) {
683f18da660SLi Xinhai 		qp->first = vma;
684f18da660SLi Xinhai 		if (!(flags & MPOL_MF_DISCONTIG_OK) &&
685f18da660SLi Xinhai 			(qp->start < vma->vm_start))
686f18da660SLi Xinhai 			/* hole at head side of range */
687a18b3ac2SLi Xinhai 			return -EFAULT;
688a18b3ac2SLi Xinhai 	}
689f18da660SLi Xinhai 	if (!(flags & MPOL_MF_DISCONTIG_OK) &&
690f18da660SLi Xinhai 		((vma->vm_end < qp->end) &&
691f18da660SLi Xinhai 		(!vma->vm_next || vma->vm_end < vma->vm_next->vm_start)))
692f18da660SLi Xinhai 		/* hole at middle or tail of range */
693f18da660SLi Xinhai 		return -EFAULT;
694a18b3ac2SLi Xinhai 
695a7f40cfeSYang Shi 	/*
696a7f40cfeSYang Shi 	 * Need check MPOL_MF_STRICT to return -EIO if possible
697a7f40cfeSYang Shi 	 * regardless of vma_migratable
698a7f40cfeSYang Shi 	 */
699a7f40cfeSYang Shi 	if (!vma_migratable(vma) &&
700a7f40cfeSYang Shi 	    !(flags & MPOL_MF_STRICT))
70148684a65SNaoya Horiguchi 		return 1;
70248684a65SNaoya Horiguchi 
7035b952b3cSAndi Kleen 	if (endvma > end)
7045b952b3cSAndi Kleen 		endvma = end;
705b24f53a0SLee Schermerhorn 
706b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY) {
7072c0346a3SMel Gorman 		/* Similar to task_numa_work, skip inaccessible VMAs */
7083122e80eSAnshuman Khandual 		if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
7094355c018SLiang Chen 			!(vma->vm_flags & VM_MIXEDMAP))
710b24f53a0SLee Schermerhorn 			change_prot_numa(vma, start, endvma);
7116f4576e3SNaoya Horiguchi 		return 1;
712b24f53a0SLee Schermerhorn 	}
713b24f53a0SLee Schermerhorn 
7146f4576e3SNaoya Horiguchi 	/* queue pages from current vma */
715a7f40cfeSYang Shi 	if (flags & MPOL_MF_VALID)
7166f4576e3SNaoya Horiguchi 		return 0;
7176f4576e3SNaoya Horiguchi 	return 1;
7186f4576e3SNaoya Horiguchi }
719b24f53a0SLee Schermerhorn 
7207b86ac33SChristoph Hellwig static const struct mm_walk_ops queue_pages_walk_ops = {
7217b86ac33SChristoph Hellwig 	.hugetlb_entry		= queue_pages_hugetlb,
7227b86ac33SChristoph Hellwig 	.pmd_entry		= queue_pages_pte_range,
7237b86ac33SChristoph Hellwig 	.test_walk		= queue_pages_test_walk,
7247b86ac33SChristoph Hellwig };
7257b86ac33SChristoph Hellwig 
7266f4576e3SNaoya Horiguchi /*
7276f4576e3SNaoya Horiguchi  * Walk through page tables and collect pages to be migrated.
7286f4576e3SNaoya Horiguchi  *
7296f4576e3SNaoya Horiguchi  * If pages found in a given range are on a set of nodes (determined by
7306f4576e3SNaoya Horiguchi  * @nodes and @flags,) it's isolated and queued to the pagelist which is
731d8835445SYang Shi  * passed via @private.
732d8835445SYang Shi  *
733d8835445SYang Shi  * queue_pages_range() has three possible return values:
734d8835445SYang Shi  * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
735d8835445SYang Shi  *     specified.
736d8835445SYang Shi  * 0 - queue pages successfully or no misplaced page.
737a85dfc30SYang Shi  * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
738a85dfc30SYang Shi  *         memory range specified by nodemask and maxnode points outside
739a85dfc30SYang Shi  *         your accessible address space (-EFAULT)
7406f4576e3SNaoya Horiguchi  */
7416f4576e3SNaoya Horiguchi static int
7426f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
7436f4576e3SNaoya Horiguchi 		nodemask_t *nodes, unsigned long flags,
7446f4576e3SNaoya Horiguchi 		struct list_head *pagelist)
7456f4576e3SNaoya Horiguchi {
746f18da660SLi Xinhai 	int err;
7476f4576e3SNaoya Horiguchi 	struct queue_pages qp = {
7486f4576e3SNaoya Horiguchi 		.pagelist = pagelist,
7496f4576e3SNaoya Horiguchi 		.flags = flags,
7506f4576e3SNaoya Horiguchi 		.nmask = nodes,
751f18da660SLi Xinhai 		.start = start,
752f18da660SLi Xinhai 		.end = end,
753f18da660SLi Xinhai 		.first = NULL,
7546f4576e3SNaoya Horiguchi 	};
7556f4576e3SNaoya Horiguchi 
756f18da660SLi Xinhai 	err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
757f18da660SLi Xinhai 
758f18da660SLi Xinhai 	if (!qp.first)
759f18da660SLi Xinhai 		/* whole range in hole */
760f18da660SLi Xinhai 		err = -EFAULT;
761f18da660SLi Xinhai 
762f18da660SLi Xinhai 	return err;
7631da177e4SLinus Torvalds }
7641da177e4SLinus Torvalds 
765869833f2SKOSAKI Motohiro /*
766869833f2SKOSAKI Motohiro  * Apply policy to a single VMA
767c1e8d7c6SMichel Lespinasse  * This must be called with the mmap_lock held for writing.
768869833f2SKOSAKI Motohiro  */
769869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma,
770869833f2SKOSAKI Motohiro 						struct mempolicy *pol)
7718d34694cSKOSAKI Motohiro {
772869833f2SKOSAKI Motohiro 	int err;
773869833f2SKOSAKI Motohiro 	struct mempolicy *old;
774869833f2SKOSAKI Motohiro 	struct mempolicy *new;
7758d34694cSKOSAKI Motohiro 
7768d34694cSKOSAKI Motohiro 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
7778d34694cSKOSAKI Motohiro 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
7788d34694cSKOSAKI Motohiro 		 vma->vm_ops, vma->vm_file,
7798d34694cSKOSAKI Motohiro 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
7808d34694cSKOSAKI Motohiro 
781869833f2SKOSAKI Motohiro 	new = mpol_dup(pol);
782869833f2SKOSAKI Motohiro 	if (IS_ERR(new))
783869833f2SKOSAKI Motohiro 		return PTR_ERR(new);
784869833f2SKOSAKI Motohiro 
785869833f2SKOSAKI Motohiro 	if (vma->vm_ops && vma->vm_ops->set_policy) {
7868d34694cSKOSAKI Motohiro 		err = vma->vm_ops->set_policy(vma, new);
787869833f2SKOSAKI Motohiro 		if (err)
788869833f2SKOSAKI Motohiro 			goto err_out;
7898d34694cSKOSAKI Motohiro 	}
790869833f2SKOSAKI Motohiro 
791869833f2SKOSAKI Motohiro 	old = vma->vm_policy;
792c1e8d7c6SMichel Lespinasse 	vma->vm_policy = new; /* protected by mmap_lock */
793869833f2SKOSAKI Motohiro 	mpol_put(old);
794869833f2SKOSAKI Motohiro 
795869833f2SKOSAKI Motohiro 	return 0;
796869833f2SKOSAKI Motohiro  err_out:
797869833f2SKOSAKI Motohiro 	mpol_put(new);
7988d34694cSKOSAKI Motohiro 	return err;
7998d34694cSKOSAKI Motohiro }
8008d34694cSKOSAKI Motohiro 
8011da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */
8029d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start,
8039d8cebd4SKOSAKI Motohiro 		       unsigned long end, struct mempolicy *new_pol)
8041da177e4SLinus Torvalds {
8051da177e4SLinus Torvalds 	struct vm_area_struct *next;
8069d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *prev;
8079d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *vma;
8089d8cebd4SKOSAKI Motohiro 	int err = 0;
809e26a5114SKOSAKI Motohiro 	pgoff_t pgoff;
8109d8cebd4SKOSAKI Motohiro 	unsigned long vmstart;
8119d8cebd4SKOSAKI Motohiro 	unsigned long vmend;
8121da177e4SLinus Torvalds 
813097d5910SLinus Torvalds 	vma = find_vma(mm, start);
814f18da660SLi Xinhai 	VM_BUG_ON(!vma);
8159d8cebd4SKOSAKI Motohiro 
816097d5910SLinus Torvalds 	prev = vma->vm_prev;
817e26a5114SKOSAKI Motohiro 	if (start > vma->vm_start)
818e26a5114SKOSAKI Motohiro 		prev = vma;
819e26a5114SKOSAKI Motohiro 
8209d8cebd4SKOSAKI Motohiro 	for (; vma && vma->vm_start < end; prev = vma, vma = next) {
8211da177e4SLinus Torvalds 		next = vma->vm_next;
8229d8cebd4SKOSAKI Motohiro 		vmstart = max(start, vma->vm_start);
8239d8cebd4SKOSAKI Motohiro 		vmend   = min(end, vma->vm_end);
8249d8cebd4SKOSAKI Motohiro 
825e26a5114SKOSAKI Motohiro 		if (mpol_equal(vma_policy(vma), new_pol))
826e26a5114SKOSAKI Motohiro 			continue;
827e26a5114SKOSAKI Motohiro 
828e26a5114SKOSAKI Motohiro 		pgoff = vma->vm_pgoff +
829e26a5114SKOSAKI Motohiro 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
8309d8cebd4SKOSAKI Motohiro 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
831e26a5114SKOSAKI Motohiro 				 vma->anon_vma, vma->vm_file, pgoff,
83219a809afSAndrea Arcangeli 				 new_pol, vma->vm_userfaultfd_ctx);
8339d8cebd4SKOSAKI Motohiro 		if (prev) {
8349d8cebd4SKOSAKI Motohiro 			vma = prev;
8359d8cebd4SKOSAKI Motohiro 			next = vma->vm_next;
8363964acd0SOleg Nesterov 			if (mpol_equal(vma_policy(vma), new_pol))
8379d8cebd4SKOSAKI Motohiro 				continue;
8383964acd0SOleg Nesterov 			/* vma_merge() joined vma && vma->next, case 8 */
8393964acd0SOleg Nesterov 			goto replace;
8401da177e4SLinus Torvalds 		}
8419d8cebd4SKOSAKI Motohiro 		if (vma->vm_start != vmstart) {
8429d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
8439d8cebd4SKOSAKI Motohiro 			if (err)
8449d8cebd4SKOSAKI Motohiro 				goto out;
8459d8cebd4SKOSAKI Motohiro 		}
8469d8cebd4SKOSAKI Motohiro 		if (vma->vm_end != vmend) {
8479d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmend, 0);
8489d8cebd4SKOSAKI Motohiro 			if (err)
8499d8cebd4SKOSAKI Motohiro 				goto out;
8509d8cebd4SKOSAKI Motohiro 		}
8513964acd0SOleg Nesterov  replace:
852869833f2SKOSAKI Motohiro 		err = vma_replace_policy(vma, new_pol);
8539d8cebd4SKOSAKI Motohiro 		if (err)
8549d8cebd4SKOSAKI Motohiro 			goto out;
8559d8cebd4SKOSAKI Motohiro 	}
8569d8cebd4SKOSAKI Motohiro 
8579d8cebd4SKOSAKI Motohiro  out:
8581da177e4SLinus Torvalds 	return err;
8591da177e4SLinus Torvalds }
8601da177e4SLinus Torvalds 
8611da177e4SLinus Torvalds /* Set the process memory policy */
862028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags,
863028fec41SDavid Rientjes 			     nodemask_t *nodes)
8641da177e4SLinus Torvalds {
86558568d2aSMiao Xie 	struct mempolicy *new, *old;
8664bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH(scratch);
86758568d2aSMiao Xie 	int ret;
8681da177e4SLinus Torvalds 
8694bfc4495SKAMEZAWA Hiroyuki 	if (!scratch)
8704bfc4495SKAMEZAWA Hiroyuki 		return -ENOMEM;
871f4e53d91SLee Schermerhorn 
8724bfc4495SKAMEZAWA Hiroyuki 	new = mpol_new(mode, flags, nodes);
8734bfc4495SKAMEZAWA Hiroyuki 	if (IS_ERR(new)) {
8744bfc4495SKAMEZAWA Hiroyuki 		ret = PTR_ERR(new);
8754bfc4495SKAMEZAWA Hiroyuki 		goto out;
8764bfc4495SKAMEZAWA Hiroyuki 	}
8772c7c3a7dSOleg Nesterov 
87858568d2aSMiao Xie 	task_lock(current);
8794bfc4495SKAMEZAWA Hiroyuki 	ret = mpol_set_nodemask(new, nodes, scratch);
88058568d2aSMiao Xie 	if (ret) {
88158568d2aSMiao Xie 		task_unlock(current);
88258568d2aSMiao Xie 		mpol_put(new);
8834bfc4495SKAMEZAWA Hiroyuki 		goto out;
88458568d2aSMiao Xie 	}
88558568d2aSMiao Xie 	old = current->mempolicy;
8861da177e4SLinus Torvalds 	current->mempolicy = new;
88745816682SVlastimil Babka 	if (new && new->mode == MPOL_INTERLEAVE)
88845816682SVlastimil Babka 		current->il_prev = MAX_NUMNODES-1;
88958568d2aSMiao Xie 	task_unlock(current);
89058568d2aSMiao Xie 	mpol_put(old);
8914bfc4495SKAMEZAWA Hiroyuki 	ret = 0;
8924bfc4495SKAMEZAWA Hiroyuki out:
8934bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH_FREE(scratch);
8944bfc4495SKAMEZAWA Hiroyuki 	return ret;
8951da177e4SLinus Torvalds }
8961da177e4SLinus Torvalds 
897bea904d5SLee Schermerhorn /*
898bea904d5SLee Schermerhorn  * Return nodemask for policy for get_mempolicy() query
89958568d2aSMiao Xie  *
90058568d2aSMiao Xie  * Called with task's alloc_lock held
901bea904d5SLee Schermerhorn  */
902bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
9031da177e4SLinus Torvalds {
904dfcd3c0dSAndi Kleen 	nodes_clear(*nodes);
905bea904d5SLee Schermerhorn 	if (p == &default_policy)
906bea904d5SLee Schermerhorn 		return;
907bea904d5SLee Schermerhorn 
90845c4745aSLee Schermerhorn 	switch (p->mode) {
90919770b32SMel Gorman 	case MPOL_BIND:
9101da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
911dfcd3c0dSAndi Kleen 		*nodes = p->v.nodes;
9121da177e4SLinus Torvalds 		break;
9131da177e4SLinus Torvalds 	case MPOL_PREFERRED:
914fc36b8d3SLee Schermerhorn 		if (!(p->flags & MPOL_F_LOCAL))
915dfcd3c0dSAndi Kleen 			node_set(p->v.preferred_node, *nodes);
91653f2556bSLee Schermerhorn 		/* else return empty node mask for local allocation */
9171da177e4SLinus Torvalds 		break;
9181da177e4SLinus Torvalds 	default:
9191da177e4SLinus Torvalds 		BUG();
9201da177e4SLinus Torvalds 	}
9211da177e4SLinus Torvalds }
9221da177e4SLinus Torvalds 
9233b9aadf7SAndrea Arcangeli static int lookup_node(struct mm_struct *mm, unsigned long addr)
9241da177e4SLinus Torvalds {
925ba841078SPeter Xu 	struct page *p = NULL;
9261da177e4SLinus Torvalds 	int err;
9271da177e4SLinus Torvalds 
9283b9aadf7SAndrea Arcangeli 	int locked = 1;
9293b9aadf7SAndrea Arcangeli 	err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked);
9302d3a36a4SMichal Hocko 	if (err > 0) {
9311da177e4SLinus Torvalds 		err = page_to_nid(p);
9321da177e4SLinus Torvalds 		put_page(p);
9331da177e4SLinus Torvalds 	}
9343b9aadf7SAndrea Arcangeli 	if (locked)
935d8ed45c5SMichel Lespinasse 		mmap_read_unlock(mm);
9361da177e4SLinus Torvalds 	return err;
9371da177e4SLinus Torvalds }
9381da177e4SLinus Torvalds 
9391da177e4SLinus Torvalds /* Retrieve NUMA policy */
940dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask,
9411da177e4SLinus Torvalds 			     unsigned long addr, unsigned long flags)
9421da177e4SLinus Torvalds {
9438bccd85fSChristoph Lameter 	int err;
9441da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
9451da177e4SLinus Torvalds 	struct vm_area_struct *vma = NULL;
9463b9aadf7SAndrea Arcangeli 	struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
9471da177e4SLinus Torvalds 
948754af6f5SLee Schermerhorn 	if (flags &
949754af6f5SLee Schermerhorn 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
9501da177e4SLinus Torvalds 		return -EINVAL;
951754af6f5SLee Schermerhorn 
952754af6f5SLee Schermerhorn 	if (flags & MPOL_F_MEMS_ALLOWED) {
953754af6f5SLee Schermerhorn 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
954754af6f5SLee Schermerhorn 			return -EINVAL;
955754af6f5SLee Schermerhorn 		*policy = 0;	/* just so it's initialized */
95658568d2aSMiao Xie 		task_lock(current);
957754af6f5SLee Schermerhorn 		*nmask  = cpuset_current_mems_allowed;
95858568d2aSMiao Xie 		task_unlock(current);
959754af6f5SLee Schermerhorn 		return 0;
960754af6f5SLee Schermerhorn 	}
961754af6f5SLee Schermerhorn 
9621da177e4SLinus Torvalds 	if (flags & MPOL_F_ADDR) {
963bea904d5SLee Schermerhorn 		/*
964bea904d5SLee Schermerhorn 		 * Do NOT fall back to task policy if the
965bea904d5SLee Schermerhorn 		 * vma/shared policy at addr is NULL.  We
966bea904d5SLee Schermerhorn 		 * want to return MPOL_DEFAULT in this case.
967bea904d5SLee Schermerhorn 		 */
968d8ed45c5SMichel Lespinasse 		mmap_read_lock(mm);
9691da177e4SLinus Torvalds 		vma = find_vma_intersection(mm, addr, addr+1);
9701da177e4SLinus Torvalds 		if (!vma) {
971d8ed45c5SMichel Lespinasse 			mmap_read_unlock(mm);
9721da177e4SLinus Torvalds 			return -EFAULT;
9731da177e4SLinus Torvalds 		}
9741da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
9751da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
9761da177e4SLinus Torvalds 		else
9771da177e4SLinus Torvalds 			pol = vma->vm_policy;
9781da177e4SLinus Torvalds 	} else if (addr)
9791da177e4SLinus Torvalds 		return -EINVAL;
9801da177e4SLinus Torvalds 
9811da177e4SLinus Torvalds 	if (!pol)
982bea904d5SLee Schermerhorn 		pol = &default_policy;	/* indicates default behavior */
9831da177e4SLinus Torvalds 
9841da177e4SLinus Torvalds 	if (flags & MPOL_F_NODE) {
9851da177e4SLinus Torvalds 		if (flags & MPOL_F_ADDR) {
9863b9aadf7SAndrea Arcangeli 			/*
9873b9aadf7SAndrea Arcangeli 			 * Take a refcount on the mpol, lookup_node()
988c1e8d7c6SMichel Lespinasse 			 * wil drop the mmap_lock, so after calling
9893b9aadf7SAndrea Arcangeli 			 * lookup_node() only "pol" remains valid, "vma"
9903b9aadf7SAndrea Arcangeli 			 * is stale.
9913b9aadf7SAndrea Arcangeli 			 */
9923b9aadf7SAndrea Arcangeli 			pol_refcount = pol;
9933b9aadf7SAndrea Arcangeli 			vma = NULL;
9943b9aadf7SAndrea Arcangeli 			mpol_get(pol);
9953b9aadf7SAndrea Arcangeli 			err = lookup_node(mm, addr);
9961da177e4SLinus Torvalds 			if (err < 0)
9971da177e4SLinus Torvalds 				goto out;
9988bccd85fSChristoph Lameter 			*policy = err;
9991da177e4SLinus Torvalds 		} else if (pol == current->mempolicy &&
100045c4745aSLee Schermerhorn 				pol->mode == MPOL_INTERLEAVE) {
100145816682SVlastimil Babka 			*policy = next_node_in(current->il_prev, pol->v.nodes);
10021da177e4SLinus Torvalds 		} else {
10031da177e4SLinus Torvalds 			err = -EINVAL;
10041da177e4SLinus Torvalds 			goto out;
10051da177e4SLinus Torvalds 		}
1006bea904d5SLee Schermerhorn 	} else {
1007bea904d5SLee Schermerhorn 		*policy = pol == &default_policy ? MPOL_DEFAULT :
1008bea904d5SLee Schermerhorn 						pol->mode;
1009d79df630SDavid Rientjes 		/*
1010d79df630SDavid Rientjes 		 * Internal mempolicy flags must be masked off before exposing
1011d79df630SDavid Rientjes 		 * the policy to userspace.
1012d79df630SDavid Rientjes 		 */
1013d79df630SDavid Rientjes 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
1014bea904d5SLee Schermerhorn 	}
10151da177e4SLinus Torvalds 
10161da177e4SLinus Torvalds 	err = 0;
101758568d2aSMiao Xie 	if (nmask) {
1018c6b6ef8bSLee Schermerhorn 		if (mpol_store_user_nodemask(pol)) {
1019c6b6ef8bSLee Schermerhorn 			*nmask = pol->w.user_nodemask;
1020c6b6ef8bSLee Schermerhorn 		} else {
102158568d2aSMiao Xie 			task_lock(current);
1022bea904d5SLee Schermerhorn 			get_policy_nodemask(pol, nmask);
102358568d2aSMiao Xie 			task_unlock(current);
102458568d2aSMiao Xie 		}
1025c6b6ef8bSLee Schermerhorn 	}
10261da177e4SLinus Torvalds 
10271da177e4SLinus Torvalds  out:
102852cd3b07SLee Schermerhorn 	mpol_cond_put(pol);
10291da177e4SLinus Torvalds 	if (vma)
1030d8ed45c5SMichel Lespinasse 		mmap_read_unlock(mm);
10313b9aadf7SAndrea Arcangeli 	if (pol_refcount)
10323b9aadf7SAndrea Arcangeli 		mpol_put(pol_refcount);
10331da177e4SLinus Torvalds 	return err;
10341da177e4SLinus Torvalds }
10351da177e4SLinus Torvalds 
1036b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION
10378bccd85fSChristoph Lameter /*
1038c8633798SNaoya Horiguchi  * page migration, thp tail pages can be passed.
10396ce3c4c0SChristoph Lameter  */
1040a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist,
1041fc301289SChristoph Lameter 				unsigned long flags)
10426ce3c4c0SChristoph Lameter {
1043c8633798SNaoya Horiguchi 	struct page *head = compound_head(page);
10446ce3c4c0SChristoph Lameter 	/*
1045fc301289SChristoph Lameter 	 * Avoid migrating a page that is shared with others.
10466ce3c4c0SChristoph Lameter 	 */
1047c8633798SNaoya Horiguchi 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
1048c8633798SNaoya Horiguchi 		if (!isolate_lru_page(head)) {
1049c8633798SNaoya Horiguchi 			list_add_tail(&head->lru, pagelist);
1050c8633798SNaoya Horiguchi 			mod_node_page_state(page_pgdat(head),
10519de4f22aSHuang Ying 				NR_ISOLATED_ANON + page_is_file_lru(head),
1052c8633798SNaoya Horiguchi 				hpage_nr_pages(head));
1053a53190a4SYang Shi 		} else if (flags & MPOL_MF_STRICT) {
1054a53190a4SYang Shi 			/*
1055a53190a4SYang Shi 			 * Non-movable page may reach here.  And, there may be
1056a53190a4SYang Shi 			 * temporary off LRU pages or non-LRU movable pages.
1057a53190a4SYang Shi 			 * Treat them as unmovable pages since they can't be
1058a53190a4SYang Shi 			 * isolated, so they can't be moved at the moment.  It
1059a53190a4SYang Shi 			 * should return -EIO for this case too.
1060a53190a4SYang Shi 			 */
1061a53190a4SYang Shi 			return -EIO;
106262695a84SNick Piggin 		}
106362695a84SNick Piggin 	}
1064a53190a4SYang Shi 
1065a53190a4SYang Shi 	return 0;
10666ce3c4c0SChristoph Lameter }
10676ce3c4c0SChristoph Lameter 
1068a49bd4d7SMichal Hocko /* page allocation callback for NUMA node migration */
1069666feb21SMichal Hocko struct page *alloc_new_node_page(struct page *page, unsigned long node)
107095a402c3SChristoph Lameter {
1071e2d8cf40SNaoya Horiguchi 	if (PageHuge(page))
1072e2d8cf40SNaoya Horiguchi 		return alloc_huge_page_node(page_hstate(compound_head(page)),
1073e2d8cf40SNaoya Horiguchi 					node);
107494723aafSMichal Hocko 	else if (PageTransHuge(page)) {
1075c8633798SNaoya Horiguchi 		struct page *thp;
1076c8633798SNaoya Horiguchi 
1077c8633798SNaoya Horiguchi 		thp = alloc_pages_node(node,
1078c8633798SNaoya Horiguchi 			(GFP_TRANSHUGE | __GFP_THISNODE),
1079c8633798SNaoya Horiguchi 			HPAGE_PMD_ORDER);
1080c8633798SNaoya Horiguchi 		if (!thp)
1081c8633798SNaoya Horiguchi 			return NULL;
1082c8633798SNaoya Horiguchi 		prep_transhuge_page(thp);
1083c8633798SNaoya Horiguchi 		return thp;
1084c8633798SNaoya Horiguchi 	} else
108596db800fSVlastimil Babka 		return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
1086b360edb4SDavid Rientjes 						    __GFP_THISNODE, 0);
108795a402c3SChristoph Lameter }
108895a402c3SChristoph Lameter 
10896ce3c4c0SChristoph Lameter /*
10907e2ab150SChristoph Lameter  * Migrate pages from one node to a target node.
10917e2ab150SChristoph Lameter  * Returns error or the number of pages not migrated.
10927e2ab150SChristoph Lameter  */
1093dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1094dbcb0f19SAdrian Bunk 			   int flags)
10957e2ab150SChristoph Lameter {
10967e2ab150SChristoph Lameter 	nodemask_t nmask;
10977e2ab150SChristoph Lameter 	LIST_HEAD(pagelist);
10987e2ab150SChristoph Lameter 	int err = 0;
10997e2ab150SChristoph Lameter 
11007e2ab150SChristoph Lameter 	nodes_clear(nmask);
11017e2ab150SChristoph Lameter 	node_set(source, nmask);
11027e2ab150SChristoph Lameter 
110308270807SMinchan Kim 	/*
110408270807SMinchan Kim 	 * This does not "check" the range but isolates all pages that
110508270807SMinchan Kim 	 * need migration.  Between passing in the full user address
110608270807SMinchan Kim 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
110708270807SMinchan Kim 	 */
110808270807SMinchan Kim 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
110998094945SNaoya Horiguchi 	queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
11107e2ab150SChristoph Lameter 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
11117e2ab150SChristoph Lameter 
1112cf608ac1SMinchan Kim 	if (!list_empty(&pagelist)) {
1113a49bd4d7SMichal Hocko 		err = migrate_pages(&pagelist, alloc_new_node_page, NULL, dest,
11149c620e2bSHugh Dickins 					MIGRATE_SYNC, MR_SYSCALL);
1115cf608ac1SMinchan Kim 		if (err)
1116e2d8cf40SNaoya Horiguchi 			putback_movable_pages(&pagelist);
1117cf608ac1SMinchan Kim 	}
111895a402c3SChristoph Lameter 
11197e2ab150SChristoph Lameter 	return err;
11207e2ab150SChristoph Lameter }
11217e2ab150SChristoph Lameter 
11227e2ab150SChristoph Lameter /*
11237e2ab150SChristoph Lameter  * Move pages between the two nodesets so as to preserve the physical
11247e2ab150SChristoph Lameter  * layout as much as possible.
112539743889SChristoph Lameter  *
112639743889SChristoph Lameter  * Returns the number of page that could not be moved.
112739743889SChristoph Lameter  */
11280ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
11290ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
113039743889SChristoph Lameter {
11317e2ab150SChristoph Lameter 	int busy = 0;
11320aedadf9SChristoph Lameter 	int err;
11337e2ab150SChristoph Lameter 	nodemask_t tmp;
113439743889SChristoph Lameter 
11350aedadf9SChristoph Lameter 	err = migrate_prep();
11360aedadf9SChristoph Lameter 	if (err)
11370aedadf9SChristoph Lameter 		return err;
11380aedadf9SChristoph Lameter 
1139d8ed45c5SMichel Lespinasse 	mmap_read_lock(mm);
1140d4984711SChristoph Lameter 
11417e2ab150SChristoph Lameter 	/*
11427e2ab150SChristoph Lameter 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
11437e2ab150SChristoph Lameter 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
11447e2ab150SChristoph Lameter 	 * bit in 'tmp', and return that <source, dest> pair for migration.
11457e2ab150SChristoph Lameter 	 * The pair of nodemasks 'to' and 'from' define the map.
11467e2ab150SChristoph Lameter 	 *
11477e2ab150SChristoph Lameter 	 * If no pair of bits is found that way, fallback to picking some
11487e2ab150SChristoph Lameter 	 * pair of 'source' and 'dest' bits that are not the same.  If the
11497e2ab150SChristoph Lameter 	 * 'source' and 'dest' bits are the same, this represents a node
11507e2ab150SChristoph Lameter 	 * that will be migrating to itself, so no pages need move.
11517e2ab150SChristoph Lameter 	 *
11527e2ab150SChristoph Lameter 	 * If no bits are left in 'tmp', or if all remaining bits left
11537e2ab150SChristoph Lameter 	 * in 'tmp' correspond to the same bit in 'to', return false
11547e2ab150SChristoph Lameter 	 * (nothing left to migrate).
11557e2ab150SChristoph Lameter 	 *
11567e2ab150SChristoph Lameter 	 * This lets us pick a pair of nodes to migrate between, such that
11577e2ab150SChristoph Lameter 	 * if possible the dest node is not already occupied by some other
11587e2ab150SChristoph Lameter 	 * source node, minimizing the risk of overloading the memory on a
11597e2ab150SChristoph Lameter 	 * node that would happen if we migrated incoming memory to a node
11607e2ab150SChristoph Lameter 	 * before migrating outgoing memory source that same node.
11617e2ab150SChristoph Lameter 	 *
11627e2ab150SChristoph Lameter 	 * A single scan of tmp is sufficient.  As we go, we remember the
11637e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
11647e2ab150SChristoph Lameter 	 * that not only moved, but what's better, moved to an empty slot
11657e2ab150SChristoph Lameter 	 * (d is not set in tmp), then we break out then, with that pair.
1166ae0e47f0SJustin P. Mattock 	 * Otherwise when we finish scanning from_tmp, we at least have the
11677e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved.  If we get all the way through
11687e2ab150SChristoph Lameter 	 * the scan of tmp without finding any node that moved, much less
11697e2ab150SChristoph Lameter 	 * moved to an empty node, then there is nothing left worth migrating.
11707e2ab150SChristoph Lameter 	 */
11717e2ab150SChristoph Lameter 
11720ce72d4fSAndrew Morton 	tmp = *from;
11737e2ab150SChristoph Lameter 	while (!nodes_empty(tmp)) {
11747e2ab150SChristoph Lameter 		int s,d;
1175b76ac7e7SJianguo Wu 		int source = NUMA_NO_NODE;
11767e2ab150SChristoph Lameter 		int dest = 0;
11777e2ab150SChristoph Lameter 
11787e2ab150SChristoph Lameter 		for_each_node_mask(s, tmp) {
11794a5b18ccSLarry Woodman 
11804a5b18ccSLarry Woodman 			/*
11814a5b18ccSLarry Woodman 			 * do_migrate_pages() tries to maintain the relative
11824a5b18ccSLarry Woodman 			 * node relationship of the pages established between
11834a5b18ccSLarry Woodman 			 * threads and memory areas.
11844a5b18ccSLarry Woodman                          *
11854a5b18ccSLarry Woodman 			 * However if the number of source nodes is not equal to
11864a5b18ccSLarry Woodman 			 * the number of destination nodes we can not preserve
11874a5b18ccSLarry Woodman 			 * this node relative relationship.  In that case, skip
11884a5b18ccSLarry Woodman 			 * copying memory from a node that is in the destination
11894a5b18ccSLarry Woodman 			 * mask.
11904a5b18ccSLarry Woodman 			 *
11914a5b18ccSLarry Woodman 			 * Example: [2,3,4] -> [3,4,5] moves everything.
11924a5b18ccSLarry Woodman 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
11934a5b18ccSLarry Woodman 			 */
11944a5b18ccSLarry Woodman 
11950ce72d4fSAndrew Morton 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
11960ce72d4fSAndrew Morton 						(node_isset(s, *to)))
11974a5b18ccSLarry Woodman 				continue;
11984a5b18ccSLarry Woodman 
11990ce72d4fSAndrew Morton 			d = node_remap(s, *from, *to);
12007e2ab150SChristoph Lameter 			if (s == d)
12017e2ab150SChristoph Lameter 				continue;
12027e2ab150SChristoph Lameter 
12037e2ab150SChristoph Lameter 			source = s;	/* Node moved. Memorize */
12047e2ab150SChristoph Lameter 			dest = d;
12057e2ab150SChristoph Lameter 
12067e2ab150SChristoph Lameter 			/* dest not in remaining from nodes? */
12077e2ab150SChristoph Lameter 			if (!node_isset(dest, tmp))
12087e2ab150SChristoph Lameter 				break;
12097e2ab150SChristoph Lameter 		}
1210b76ac7e7SJianguo Wu 		if (source == NUMA_NO_NODE)
12117e2ab150SChristoph Lameter 			break;
12127e2ab150SChristoph Lameter 
12137e2ab150SChristoph Lameter 		node_clear(source, tmp);
12147e2ab150SChristoph Lameter 		err = migrate_to_node(mm, source, dest, flags);
12157e2ab150SChristoph Lameter 		if (err > 0)
12167e2ab150SChristoph Lameter 			busy += err;
12177e2ab150SChristoph Lameter 		if (err < 0)
12187e2ab150SChristoph Lameter 			break;
121939743889SChristoph Lameter 	}
1220d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
12217e2ab150SChristoph Lameter 	if (err < 0)
12227e2ab150SChristoph Lameter 		return err;
12237e2ab150SChristoph Lameter 	return busy;
1224b20a3503SChristoph Lameter 
122539743889SChristoph Lameter }
122639743889SChristoph Lameter 
12273ad33b24SLee Schermerhorn /*
12283ad33b24SLee Schermerhorn  * Allocate a new page for page migration based on vma policy.
1229d05f0cdcSHugh Dickins  * Start by assuming the page is mapped by the same vma as contains @start.
12303ad33b24SLee Schermerhorn  * Search forward from there, if not.  N.B., this assumes that the
12313ad33b24SLee Schermerhorn  * list of pages handed to migrate_pages()--which is how we get here--
12323ad33b24SLee Schermerhorn  * is in virtual address order.
12333ad33b24SLee Schermerhorn  */
1234666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start)
123595a402c3SChristoph Lameter {
1236d05f0cdcSHugh Dickins 	struct vm_area_struct *vma;
12373f649ab7SKees Cook 	unsigned long address;
123895a402c3SChristoph Lameter 
1239d05f0cdcSHugh Dickins 	vma = find_vma(current->mm, start);
12403ad33b24SLee Schermerhorn 	while (vma) {
12413ad33b24SLee Schermerhorn 		address = page_address_in_vma(page, vma);
12423ad33b24SLee Schermerhorn 		if (address != -EFAULT)
12433ad33b24SLee Schermerhorn 			break;
12443ad33b24SLee Schermerhorn 		vma = vma->vm_next;
12453ad33b24SLee Schermerhorn 	}
12463ad33b24SLee Schermerhorn 
124711c731e8SWanpeng Li 	if (PageHuge(page)) {
1248389c8178SMichal Hocko 		return alloc_huge_page_vma(page_hstate(compound_head(page)),
1249389c8178SMichal Hocko 				vma, address);
125094723aafSMichal Hocko 	} else if (PageTransHuge(page)) {
1251c8633798SNaoya Horiguchi 		struct page *thp;
1252c8633798SNaoya Horiguchi 
125319deb769SDavid Rientjes 		thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
125419deb769SDavid Rientjes 					 HPAGE_PMD_ORDER);
1255c8633798SNaoya Horiguchi 		if (!thp)
1256c8633798SNaoya Horiguchi 			return NULL;
1257c8633798SNaoya Horiguchi 		prep_transhuge_page(thp);
1258c8633798SNaoya Horiguchi 		return thp;
125911c731e8SWanpeng Li 	}
126011c731e8SWanpeng Li 	/*
126111c731e8SWanpeng Li 	 * if !vma, alloc_page_vma() will use task or system default policy
126211c731e8SWanpeng Li 	 */
12630f556856SMichal Hocko 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
12640f556856SMichal Hocko 			vma, address);
126595a402c3SChristoph Lameter }
1266b20a3503SChristoph Lameter #else
1267b20a3503SChristoph Lameter 
1268a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist,
1269b20a3503SChristoph Lameter 				unsigned long flags)
1270b20a3503SChristoph Lameter {
1271a53190a4SYang Shi 	return -EIO;
1272b20a3503SChristoph Lameter }
1273b20a3503SChristoph Lameter 
12740ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
12750ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
1276b20a3503SChristoph Lameter {
1277b20a3503SChristoph Lameter 	return -ENOSYS;
1278b20a3503SChristoph Lameter }
127995a402c3SChristoph Lameter 
1280666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start)
128195a402c3SChristoph Lameter {
128295a402c3SChristoph Lameter 	return NULL;
128395a402c3SChristoph Lameter }
1284b20a3503SChristoph Lameter #endif
1285b20a3503SChristoph Lameter 
1286dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len,
1287028fec41SDavid Rientjes 		     unsigned short mode, unsigned short mode_flags,
1288028fec41SDavid Rientjes 		     nodemask_t *nmask, unsigned long flags)
12896ce3c4c0SChristoph Lameter {
12906ce3c4c0SChristoph Lameter 	struct mm_struct *mm = current->mm;
12916ce3c4c0SChristoph Lameter 	struct mempolicy *new;
12926ce3c4c0SChristoph Lameter 	unsigned long end;
12936ce3c4c0SChristoph Lameter 	int err;
1294d8835445SYang Shi 	int ret;
12956ce3c4c0SChristoph Lameter 	LIST_HEAD(pagelist);
12966ce3c4c0SChristoph Lameter 
1297b24f53a0SLee Schermerhorn 	if (flags & ~(unsigned long)MPOL_MF_VALID)
12986ce3c4c0SChristoph Lameter 		return -EINVAL;
129974c00241SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
13006ce3c4c0SChristoph Lameter 		return -EPERM;
13016ce3c4c0SChristoph Lameter 
13026ce3c4c0SChristoph Lameter 	if (start & ~PAGE_MASK)
13036ce3c4c0SChristoph Lameter 		return -EINVAL;
13046ce3c4c0SChristoph Lameter 
13056ce3c4c0SChristoph Lameter 	if (mode == MPOL_DEFAULT)
13066ce3c4c0SChristoph Lameter 		flags &= ~MPOL_MF_STRICT;
13076ce3c4c0SChristoph Lameter 
13086ce3c4c0SChristoph Lameter 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
13096ce3c4c0SChristoph Lameter 	end = start + len;
13106ce3c4c0SChristoph Lameter 
13116ce3c4c0SChristoph Lameter 	if (end < start)
13126ce3c4c0SChristoph Lameter 		return -EINVAL;
13136ce3c4c0SChristoph Lameter 	if (end == start)
13146ce3c4c0SChristoph Lameter 		return 0;
13156ce3c4c0SChristoph Lameter 
1316028fec41SDavid Rientjes 	new = mpol_new(mode, mode_flags, nmask);
13176ce3c4c0SChristoph Lameter 	if (IS_ERR(new))
13186ce3c4c0SChristoph Lameter 		return PTR_ERR(new);
13196ce3c4c0SChristoph Lameter 
1320b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY)
1321b24f53a0SLee Schermerhorn 		new->flags |= MPOL_F_MOF;
1322b24f53a0SLee Schermerhorn 
13236ce3c4c0SChristoph Lameter 	/*
13246ce3c4c0SChristoph Lameter 	 * If we are using the default policy then operation
13256ce3c4c0SChristoph Lameter 	 * on discontinuous address spaces is okay after all
13266ce3c4c0SChristoph Lameter 	 */
13276ce3c4c0SChristoph Lameter 	if (!new)
13286ce3c4c0SChristoph Lameter 		flags |= MPOL_MF_DISCONTIG_OK;
13296ce3c4c0SChristoph Lameter 
1330028fec41SDavid Rientjes 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1331028fec41SDavid Rientjes 		 start, start + len, mode, mode_flags,
133200ef2d2fSDavid Rientjes 		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
13336ce3c4c0SChristoph Lameter 
13340aedadf9SChristoph Lameter 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
13350aedadf9SChristoph Lameter 
13360aedadf9SChristoph Lameter 		err = migrate_prep();
13370aedadf9SChristoph Lameter 		if (err)
1338b05ca738SKOSAKI Motohiro 			goto mpol_out;
13390aedadf9SChristoph Lameter 	}
13404bfc4495SKAMEZAWA Hiroyuki 	{
13414bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
13424bfc4495SKAMEZAWA Hiroyuki 		if (scratch) {
1343d8ed45c5SMichel Lespinasse 			mmap_write_lock(mm);
134458568d2aSMiao Xie 			task_lock(current);
13454bfc4495SKAMEZAWA Hiroyuki 			err = mpol_set_nodemask(new, nmask, scratch);
134658568d2aSMiao Xie 			task_unlock(current);
13474bfc4495SKAMEZAWA Hiroyuki 			if (err)
1348d8ed45c5SMichel Lespinasse 				mmap_write_unlock(mm);
13494bfc4495SKAMEZAWA Hiroyuki 		} else
13504bfc4495SKAMEZAWA Hiroyuki 			err = -ENOMEM;
13514bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
13524bfc4495SKAMEZAWA Hiroyuki 	}
1353b05ca738SKOSAKI Motohiro 	if (err)
1354b05ca738SKOSAKI Motohiro 		goto mpol_out;
1355b05ca738SKOSAKI Motohiro 
1356d8835445SYang Shi 	ret = queue_pages_range(mm, start, end, nmask,
13576ce3c4c0SChristoph Lameter 			  flags | MPOL_MF_INVERT, &pagelist);
1358d8835445SYang Shi 
1359d8835445SYang Shi 	if (ret < 0) {
1360a85dfc30SYang Shi 		err = ret;
1361d8835445SYang Shi 		goto up_out;
1362d8835445SYang Shi 	}
1363d8835445SYang Shi 
13649d8cebd4SKOSAKI Motohiro 	err = mbind_range(mm, start, end, new);
13657e2ab150SChristoph Lameter 
1366b24f53a0SLee Schermerhorn 	if (!err) {
1367b24f53a0SLee Schermerhorn 		int nr_failed = 0;
1368b24f53a0SLee Schermerhorn 
1369cf608ac1SMinchan Kim 		if (!list_empty(&pagelist)) {
1370b24f53a0SLee Schermerhorn 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1371d05f0cdcSHugh Dickins 			nr_failed = migrate_pages(&pagelist, new_page, NULL,
1372d05f0cdcSHugh Dickins 				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1373cf608ac1SMinchan Kim 			if (nr_failed)
137474060e4dSNaoya Horiguchi 				putback_movable_pages(&pagelist);
1375cf608ac1SMinchan Kim 		}
13766ce3c4c0SChristoph Lameter 
1377d8835445SYang Shi 		if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
13786ce3c4c0SChristoph Lameter 			err = -EIO;
1379a85dfc30SYang Shi 	} else {
1380d8835445SYang Shi up_out:
1381a85dfc30SYang Shi 		if (!list_empty(&pagelist))
1382a85dfc30SYang Shi 			putback_movable_pages(&pagelist);
1383a85dfc30SYang Shi 	}
1384a85dfc30SYang Shi 
1385d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
1386b05ca738SKOSAKI Motohiro mpol_out:
1387f0be3d32SLee Schermerhorn 	mpol_put(new);
13886ce3c4c0SChristoph Lameter 	return err;
13896ce3c4c0SChristoph Lameter }
13906ce3c4c0SChristoph Lameter 
139139743889SChristoph Lameter /*
13928bccd85fSChristoph Lameter  * User space interface with variable sized bitmaps for nodelists.
13938bccd85fSChristoph Lameter  */
13948bccd85fSChristoph Lameter 
13958bccd85fSChristoph Lameter /* Copy a node mask from user space. */
139639743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
13978bccd85fSChristoph Lameter 		     unsigned long maxnode)
13988bccd85fSChristoph Lameter {
13998bccd85fSChristoph Lameter 	unsigned long k;
140056521e7aSYisheng Xie 	unsigned long t;
14018bccd85fSChristoph Lameter 	unsigned long nlongs;
14028bccd85fSChristoph Lameter 	unsigned long endmask;
14038bccd85fSChristoph Lameter 
14048bccd85fSChristoph Lameter 	--maxnode;
14058bccd85fSChristoph Lameter 	nodes_clear(*nodes);
14068bccd85fSChristoph Lameter 	if (maxnode == 0 || !nmask)
14078bccd85fSChristoph Lameter 		return 0;
1408a9c930baSAndi Kleen 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1409636f13c1SChris Wright 		return -EINVAL;
14108bccd85fSChristoph Lameter 
14118bccd85fSChristoph Lameter 	nlongs = BITS_TO_LONGS(maxnode);
14128bccd85fSChristoph Lameter 	if ((maxnode % BITS_PER_LONG) == 0)
14138bccd85fSChristoph Lameter 		endmask = ~0UL;
14148bccd85fSChristoph Lameter 	else
14158bccd85fSChristoph Lameter 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
14168bccd85fSChristoph Lameter 
141756521e7aSYisheng Xie 	/*
141856521e7aSYisheng Xie 	 * When the user specified more nodes than supported just check
141956521e7aSYisheng Xie 	 * if the non supported part is all zero.
142056521e7aSYisheng Xie 	 *
142156521e7aSYisheng Xie 	 * If maxnode have more longs than MAX_NUMNODES, check
142256521e7aSYisheng Xie 	 * the bits in that area first. And then go through to
142356521e7aSYisheng Xie 	 * check the rest bits which equal or bigger than MAX_NUMNODES.
142456521e7aSYisheng Xie 	 * Otherwise, just check bits [MAX_NUMNODES, maxnode).
142556521e7aSYisheng Xie 	 */
14268bccd85fSChristoph Lameter 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
14278bccd85fSChristoph Lameter 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
14288bccd85fSChristoph Lameter 			if (get_user(t, nmask + k))
14298bccd85fSChristoph Lameter 				return -EFAULT;
14308bccd85fSChristoph Lameter 			if (k == nlongs - 1) {
14318bccd85fSChristoph Lameter 				if (t & endmask)
14328bccd85fSChristoph Lameter 					return -EINVAL;
14338bccd85fSChristoph Lameter 			} else if (t)
14348bccd85fSChristoph Lameter 				return -EINVAL;
14358bccd85fSChristoph Lameter 		}
14368bccd85fSChristoph Lameter 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
14378bccd85fSChristoph Lameter 		endmask = ~0UL;
14388bccd85fSChristoph Lameter 	}
14398bccd85fSChristoph Lameter 
144056521e7aSYisheng Xie 	if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
144156521e7aSYisheng Xie 		unsigned long valid_mask = endmask;
144256521e7aSYisheng Xie 
144356521e7aSYisheng Xie 		valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
144456521e7aSYisheng Xie 		if (get_user(t, nmask + nlongs - 1))
144556521e7aSYisheng Xie 			return -EFAULT;
144656521e7aSYisheng Xie 		if (t & valid_mask)
144756521e7aSYisheng Xie 			return -EINVAL;
144856521e7aSYisheng Xie 	}
144956521e7aSYisheng Xie 
14508bccd85fSChristoph Lameter 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
14518bccd85fSChristoph Lameter 		return -EFAULT;
14528bccd85fSChristoph Lameter 	nodes_addr(*nodes)[nlongs-1] &= endmask;
14538bccd85fSChristoph Lameter 	return 0;
14548bccd85fSChristoph Lameter }
14558bccd85fSChristoph Lameter 
14568bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */
14578bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
14588bccd85fSChristoph Lameter 			      nodemask_t *nodes)
14598bccd85fSChristoph Lameter {
14608bccd85fSChristoph Lameter 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1461050c17f2SRalph Campbell 	unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
14628bccd85fSChristoph Lameter 
14638bccd85fSChristoph Lameter 	if (copy > nbytes) {
14648bccd85fSChristoph Lameter 		if (copy > PAGE_SIZE)
14658bccd85fSChristoph Lameter 			return -EINVAL;
14668bccd85fSChristoph Lameter 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
14678bccd85fSChristoph Lameter 			return -EFAULT;
14688bccd85fSChristoph Lameter 		copy = nbytes;
14698bccd85fSChristoph Lameter 	}
14708bccd85fSChristoph Lameter 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
14718bccd85fSChristoph Lameter }
14728bccd85fSChristoph Lameter 
1473e7dc9ad6SDominik Brodowski static long kernel_mbind(unsigned long start, unsigned long len,
1474e7dc9ad6SDominik Brodowski 			 unsigned long mode, const unsigned long __user *nmask,
1475e7dc9ad6SDominik Brodowski 			 unsigned long maxnode, unsigned int flags)
14768bccd85fSChristoph Lameter {
14778bccd85fSChristoph Lameter 	nodemask_t nodes;
14788bccd85fSChristoph Lameter 	int err;
1479028fec41SDavid Rientjes 	unsigned short mode_flags;
14808bccd85fSChristoph Lameter 
1481057d3389SAndrey Konovalov 	start = untagged_addr(start);
1482028fec41SDavid Rientjes 	mode_flags = mode & MPOL_MODE_FLAGS;
1483028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1484a3b51e01SDavid Rientjes 	if (mode >= MPOL_MAX)
1485a3b51e01SDavid Rientjes 		return -EINVAL;
14864c50bc01SDavid Rientjes 	if ((mode_flags & MPOL_F_STATIC_NODES) &&
14874c50bc01SDavid Rientjes 	    (mode_flags & MPOL_F_RELATIVE_NODES))
14884c50bc01SDavid Rientjes 		return -EINVAL;
14898bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
14908bccd85fSChristoph Lameter 	if (err)
14918bccd85fSChristoph Lameter 		return err;
1492028fec41SDavid Rientjes 	return do_mbind(start, len, mode, mode_flags, &nodes, flags);
14938bccd85fSChristoph Lameter }
14948bccd85fSChristoph Lameter 
1495e7dc9ad6SDominik Brodowski SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1496e7dc9ad6SDominik Brodowski 		unsigned long, mode, const unsigned long __user *, nmask,
1497e7dc9ad6SDominik Brodowski 		unsigned long, maxnode, unsigned int, flags)
1498e7dc9ad6SDominik Brodowski {
1499e7dc9ad6SDominik Brodowski 	return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1500e7dc9ad6SDominik Brodowski }
1501e7dc9ad6SDominik Brodowski 
15028bccd85fSChristoph Lameter /* Set the process memory policy */
1503af03c4acSDominik Brodowski static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1504af03c4acSDominik Brodowski 				 unsigned long maxnode)
15058bccd85fSChristoph Lameter {
15068bccd85fSChristoph Lameter 	int err;
15078bccd85fSChristoph Lameter 	nodemask_t nodes;
1508028fec41SDavid Rientjes 	unsigned short flags;
15098bccd85fSChristoph Lameter 
1510028fec41SDavid Rientjes 	flags = mode & MPOL_MODE_FLAGS;
1511028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1512028fec41SDavid Rientjes 	if ((unsigned int)mode >= MPOL_MAX)
15138bccd85fSChristoph Lameter 		return -EINVAL;
15144c50bc01SDavid Rientjes 	if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
15154c50bc01SDavid Rientjes 		return -EINVAL;
15168bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
15178bccd85fSChristoph Lameter 	if (err)
15188bccd85fSChristoph Lameter 		return err;
1519028fec41SDavid Rientjes 	return do_set_mempolicy(mode, flags, &nodes);
15208bccd85fSChristoph Lameter }
15218bccd85fSChristoph Lameter 
1522af03c4acSDominik Brodowski SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1523af03c4acSDominik Brodowski 		unsigned long, maxnode)
1524af03c4acSDominik Brodowski {
1525af03c4acSDominik Brodowski 	return kernel_set_mempolicy(mode, nmask, maxnode);
1526af03c4acSDominik Brodowski }
1527af03c4acSDominik Brodowski 
1528b6e9b0baSDominik Brodowski static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1529b6e9b0baSDominik Brodowski 				const unsigned long __user *old_nodes,
1530b6e9b0baSDominik Brodowski 				const unsigned long __user *new_nodes)
153139743889SChristoph Lameter {
1532596d7cfaSKOSAKI Motohiro 	struct mm_struct *mm = NULL;
153339743889SChristoph Lameter 	struct task_struct *task;
153439743889SChristoph Lameter 	nodemask_t task_nodes;
153539743889SChristoph Lameter 	int err;
1536596d7cfaSKOSAKI Motohiro 	nodemask_t *old;
1537596d7cfaSKOSAKI Motohiro 	nodemask_t *new;
1538596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH(scratch);
153939743889SChristoph Lameter 
1540596d7cfaSKOSAKI Motohiro 	if (!scratch)
1541596d7cfaSKOSAKI Motohiro 		return -ENOMEM;
154239743889SChristoph Lameter 
1543596d7cfaSKOSAKI Motohiro 	old = &scratch->mask1;
1544596d7cfaSKOSAKI Motohiro 	new = &scratch->mask2;
1545596d7cfaSKOSAKI Motohiro 
1546596d7cfaSKOSAKI Motohiro 	err = get_nodes(old, old_nodes, maxnode);
154739743889SChristoph Lameter 	if (err)
1548596d7cfaSKOSAKI Motohiro 		goto out;
1549596d7cfaSKOSAKI Motohiro 
1550596d7cfaSKOSAKI Motohiro 	err = get_nodes(new, new_nodes, maxnode);
1551596d7cfaSKOSAKI Motohiro 	if (err)
1552596d7cfaSKOSAKI Motohiro 		goto out;
155339743889SChristoph Lameter 
155439743889SChristoph Lameter 	/* Find the mm_struct */
155555cfaa3cSZeng Zhaoming 	rcu_read_lock();
1556228ebcbeSPavel Emelyanov 	task = pid ? find_task_by_vpid(pid) : current;
155739743889SChristoph Lameter 	if (!task) {
155855cfaa3cSZeng Zhaoming 		rcu_read_unlock();
1559596d7cfaSKOSAKI Motohiro 		err = -ESRCH;
1560596d7cfaSKOSAKI Motohiro 		goto out;
156139743889SChristoph Lameter 	}
15623268c63eSChristoph Lameter 	get_task_struct(task);
156339743889SChristoph Lameter 
1564596d7cfaSKOSAKI Motohiro 	err = -EINVAL;
156539743889SChristoph Lameter 
156639743889SChristoph Lameter 	/*
156731367466SOtto Ebeling 	 * Check if this process has the right to modify the specified process.
156831367466SOtto Ebeling 	 * Use the regular "ptrace_may_access()" checks.
156939743889SChristoph Lameter 	 */
157031367466SOtto Ebeling 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1571c69e8d9cSDavid Howells 		rcu_read_unlock();
157239743889SChristoph Lameter 		err = -EPERM;
15733268c63eSChristoph Lameter 		goto out_put;
157439743889SChristoph Lameter 	}
1575c69e8d9cSDavid Howells 	rcu_read_unlock();
157639743889SChristoph Lameter 
157739743889SChristoph Lameter 	task_nodes = cpuset_mems_allowed(task);
157839743889SChristoph Lameter 	/* Is the user allowed to access the target nodes? */
1579596d7cfaSKOSAKI Motohiro 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
158039743889SChristoph Lameter 		err = -EPERM;
15813268c63eSChristoph Lameter 		goto out_put;
158239743889SChristoph Lameter 	}
158339743889SChristoph Lameter 
15840486a38bSYisheng Xie 	task_nodes = cpuset_mems_allowed(current);
15850486a38bSYisheng Xie 	nodes_and(*new, *new, task_nodes);
15860486a38bSYisheng Xie 	if (nodes_empty(*new))
15873268c63eSChristoph Lameter 		goto out_put;
15880486a38bSYisheng Xie 
158986c3a764SDavid Quigley 	err = security_task_movememory(task);
159086c3a764SDavid Quigley 	if (err)
15913268c63eSChristoph Lameter 		goto out_put;
159286c3a764SDavid Quigley 
15933268c63eSChristoph Lameter 	mm = get_task_mm(task);
15943268c63eSChristoph Lameter 	put_task_struct(task);
1595f2a9ef88SSasha Levin 
1596f2a9ef88SSasha Levin 	if (!mm) {
1597f2a9ef88SSasha Levin 		err = -EINVAL;
1598f2a9ef88SSasha Levin 		goto out;
1599f2a9ef88SSasha Levin 	}
1600f2a9ef88SSasha Levin 
1601596d7cfaSKOSAKI Motohiro 	err = do_migrate_pages(mm, old, new,
160274c00241SChristoph Lameter 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
16033268c63eSChristoph Lameter 
160439743889SChristoph Lameter 	mmput(mm);
16053268c63eSChristoph Lameter out:
1606596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH_FREE(scratch);
1607596d7cfaSKOSAKI Motohiro 
160839743889SChristoph Lameter 	return err;
16093268c63eSChristoph Lameter 
16103268c63eSChristoph Lameter out_put:
16113268c63eSChristoph Lameter 	put_task_struct(task);
16123268c63eSChristoph Lameter 	goto out;
16133268c63eSChristoph Lameter 
161439743889SChristoph Lameter }
161539743889SChristoph Lameter 
1616b6e9b0baSDominik Brodowski SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1617b6e9b0baSDominik Brodowski 		const unsigned long __user *, old_nodes,
1618b6e9b0baSDominik Brodowski 		const unsigned long __user *, new_nodes)
1619b6e9b0baSDominik Brodowski {
1620b6e9b0baSDominik Brodowski 	return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1621b6e9b0baSDominik Brodowski }
1622b6e9b0baSDominik Brodowski 
162339743889SChristoph Lameter 
16248bccd85fSChristoph Lameter /* Retrieve NUMA policy */
1625af03c4acSDominik Brodowski static int kernel_get_mempolicy(int __user *policy,
1626af03c4acSDominik Brodowski 				unsigned long __user *nmask,
1627af03c4acSDominik Brodowski 				unsigned long maxnode,
1628af03c4acSDominik Brodowski 				unsigned long addr,
1629af03c4acSDominik Brodowski 				unsigned long flags)
16308bccd85fSChristoph Lameter {
1631dbcb0f19SAdrian Bunk 	int err;
16323f649ab7SKees Cook 	int pval;
16338bccd85fSChristoph Lameter 	nodemask_t nodes;
16348bccd85fSChristoph Lameter 
1635050c17f2SRalph Campbell 	if (nmask != NULL && maxnode < nr_node_ids)
16368bccd85fSChristoph Lameter 		return -EINVAL;
16378bccd85fSChristoph Lameter 
1638*4605f057SWenchao Hao 	addr = untagged_addr(addr);
1639*4605f057SWenchao Hao 
16408bccd85fSChristoph Lameter 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
16418bccd85fSChristoph Lameter 
16428bccd85fSChristoph Lameter 	if (err)
16438bccd85fSChristoph Lameter 		return err;
16448bccd85fSChristoph Lameter 
16458bccd85fSChristoph Lameter 	if (policy && put_user(pval, policy))
16468bccd85fSChristoph Lameter 		return -EFAULT;
16478bccd85fSChristoph Lameter 
16488bccd85fSChristoph Lameter 	if (nmask)
16498bccd85fSChristoph Lameter 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
16508bccd85fSChristoph Lameter 
16518bccd85fSChristoph Lameter 	return err;
16528bccd85fSChristoph Lameter }
16538bccd85fSChristoph Lameter 
1654af03c4acSDominik Brodowski SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1655af03c4acSDominik Brodowski 		unsigned long __user *, nmask, unsigned long, maxnode,
1656af03c4acSDominik Brodowski 		unsigned long, addr, unsigned long, flags)
1657af03c4acSDominik Brodowski {
1658af03c4acSDominik Brodowski 	return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1659af03c4acSDominik Brodowski }
1660af03c4acSDominik Brodowski 
16611da177e4SLinus Torvalds #ifdef CONFIG_COMPAT
16621da177e4SLinus Torvalds 
1663c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1664c93e0f6cSHeiko Carstens 		       compat_ulong_t __user *, nmask,
1665c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode,
1666c93e0f6cSHeiko Carstens 		       compat_ulong_t, addr, compat_ulong_t, flags)
16671da177e4SLinus Torvalds {
16681da177e4SLinus Torvalds 	long err;
16691da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
16701da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
16711da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
16721da177e4SLinus Torvalds 
1673050c17f2SRalph Campbell 	nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
16741da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
16751da177e4SLinus Torvalds 
16761da177e4SLinus Torvalds 	if (nmask)
16771da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
16781da177e4SLinus Torvalds 
1679af03c4acSDominik Brodowski 	err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
16801da177e4SLinus Torvalds 
16811da177e4SLinus Torvalds 	if (!err && nmask) {
16822bbff6c7SKAMEZAWA Hiroyuki 		unsigned long copy_size;
16832bbff6c7SKAMEZAWA Hiroyuki 		copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
16842bbff6c7SKAMEZAWA Hiroyuki 		err = copy_from_user(bm, nm, copy_size);
16851da177e4SLinus Torvalds 		/* ensure entire bitmap is zeroed */
16861da177e4SLinus Torvalds 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
16871da177e4SLinus Torvalds 		err |= compat_put_bitmap(nmask, bm, nr_bits);
16881da177e4SLinus Torvalds 	}
16891da177e4SLinus Torvalds 
16901da177e4SLinus Torvalds 	return err;
16911da177e4SLinus Torvalds }
16921da177e4SLinus Torvalds 
1693c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1694c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode)
16951da177e4SLinus Torvalds {
16961da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
16971da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
16981da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
16991da177e4SLinus Torvalds 
17001da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
17011da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
17021da177e4SLinus Torvalds 
17031da177e4SLinus Torvalds 	if (nmask) {
1704cf01fb99SChris Salls 		if (compat_get_bitmap(bm, nmask, nr_bits))
17051da177e4SLinus Torvalds 			return -EFAULT;
1706cf01fb99SChris Salls 		nm = compat_alloc_user_space(alloc_size);
1707cf01fb99SChris Salls 		if (copy_to_user(nm, bm, alloc_size))
1708cf01fb99SChris Salls 			return -EFAULT;
1709cf01fb99SChris Salls 	}
17101da177e4SLinus Torvalds 
1711af03c4acSDominik Brodowski 	return kernel_set_mempolicy(mode, nm, nr_bits+1);
17121da177e4SLinus Torvalds }
17131da177e4SLinus Torvalds 
1714c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1715c93e0f6cSHeiko Carstens 		       compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1716c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode, compat_ulong_t, flags)
17171da177e4SLinus Torvalds {
17181da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
17191da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
1720dfcd3c0dSAndi Kleen 	nodemask_t bm;
17211da177e4SLinus Torvalds 
17221da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
17231da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
17241da177e4SLinus Torvalds 
17251da177e4SLinus Torvalds 	if (nmask) {
1726cf01fb99SChris Salls 		if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
17271da177e4SLinus Torvalds 			return -EFAULT;
1728cf01fb99SChris Salls 		nm = compat_alloc_user_space(alloc_size);
1729cf01fb99SChris Salls 		if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1730cf01fb99SChris Salls 			return -EFAULT;
1731cf01fb99SChris Salls 	}
17321da177e4SLinus Torvalds 
1733e7dc9ad6SDominik Brodowski 	return kernel_mbind(start, len, mode, nm, nr_bits+1, flags);
17341da177e4SLinus Torvalds }
17351da177e4SLinus Torvalds 
1736b6e9b0baSDominik Brodowski COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid,
1737b6e9b0baSDominik Brodowski 		       compat_ulong_t, maxnode,
1738b6e9b0baSDominik Brodowski 		       const compat_ulong_t __user *, old_nodes,
1739b6e9b0baSDominik Brodowski 		       const compat_ulong_t __user *, new_nodes)
1740b6e9b0baSDominik Brodowski {
1741b6e9b0baSDominik Brodowski 	unsigned long __user *old = NULL;
1742b6e9b0baSDominik Brodowski 	unsigned long __user *new = NULL;
1743b6e9b0baSDominik Brodowski 	nodemask_t tmp_mask;
1744b6e9b0baSDominik Brodowski 	unsigned long nr_bits;
1745b6e9b0baSDominik Brodowski 	unsigned long size;
1746b6e9b0baSDominik Brodowski 
1747b6e9b0baSDominik Brodowski 	nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES);
1748b6e9b0baSDominik Brodowski 	size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1749b6e9b0baSDominik Brodowski 	if (old_nodes) {
1750b6e9b0baSDominik Brodowski 		if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits))
1751b6e9b0baSDominik Brodowski 			return -EFAULT;
1752b6e9b0baSDominik Brodowski 		old = compat_alloc_user_space(new_nodes ? size * 2 : size);
1753b6e9b0baSDominik Brodowski 		if (new_nodes)
1754b6e9b0baSDominik Brodowski 			new = old + size / sizeof(unsigned long);
1755b6e9b0baSDominik Brodowski 		if (copy_to_user(old, nodes_addr(tmp_mask), size))
1756b6e9b0baSDominik Brodowski 			return -EFAULT;
1757b6e9b0baSDominik Brodowski 	}
1758b6e9b0baSDominik Brodowski 	if (new_nodes) {
1759b6e9b0baSDominik Brodowski 		if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits))
1760b6e9b0baSDominik Brodowski 			return -EFAULT;
1761b6e9b0baSDominik Brodowski 		if (new == NULL)
1762b6e9b0baSDominik Brodowski 			new = compat_alloc_user_space(size);
1763b6e9b0baSDominik Brodowski 		if (copy_to_user(new, nodes_addr(tmp_mask), size))
1764b6e9b0baSDominik Brodowski 			return -EFAULT;
1765b6e9b0baSDominik Brodowski 	}
1766b6e9b0baSDominik Brodowski 	return kernel_migrate_pages(pid, nr_bits + 1, old, new);
1767b6e9b0baSDominik Brodowski }
1768b6e9b0baSDominik Brodowski 
1769b6e9b0baSDominik Brodowski #endif /* CONFIG_COMPAT */
17701da177e4SLinus Torvalds 
177120ca87f2SLi Xinhai bool vma_migratable(struct vm_area_struct *vma)
177220ca87f2SLi Xinhai {
177320ca87f2SLi Xinhai 	if (vma->vm_flags & (VM_IO | VM_PFNMAP))
177420ca87f2SLi Xinhai 		return false;
177520ca87f2SLi Xinhai 
177620ca87f2SLi Xinhai 	/*
177720ca87f2SLi Xinhai 	 * DAX device mappings require predictable access latency, so avoid
177820ca87f2SLi Xinhai 	 * incurring periodic faults.
177920ca87f2SLi Xinhai 	 */
178020ca87f2SLi Xinhai 	if (vma_is_dax(vma))
178120ca87f2SLi Xinhai 		return false;
178220ca87f2SLi Xinhai 
178320ca87f2SLi Xinhai 	if (is_vm_hugetlb_page(vma) &&
178420ca87f2SLi Xinhai 		!hugepage_migration_supported(hstate_vma(vma)))
178520ca87f2SLi Xinhai 		return false;
178620ca87f2SLi Xinhai 
178720ca87f2SLi Xinhai 	/*
178820ca87f2SLi Xinhai 	 * Migration allocates pages in the highest zone. If we cannot
178920ca87f2SLi Xinhai 	 * do so then migration (at least from node to node) is not
179020ca87f2SLi Xinhai 	 * possible.
179120ca87f2SLi Xinhai 	 */
179220ca87f2SLi Xinhai 	if (vma->vm_file &&
179320ca87f2SLi Xinhai 		gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
179420ca87f2SLi Xinhai 			< policy_zone)
179520ca87f2SLi Xinhai 		return false;
179620ca87f2SLi Xinhai 	return true;
179720ca87f2SLi Xinhai }
179820ca87f2SLi Xinhai 
179974d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
180074d2c3a0SOleg Nesterov 						unsigned long addr)
18011da177e4SLinus Torvalds {
18028d90274bSOleg Nesterov 	struct mempolicy *pol = NULL;
18031da177e4SLinus Torvalds 
18041da177e4SLinus Torvalds 	if (vma) {
1805480eccf9SLee Schermerhorn 		if (vma->vm_ops && vma->vm_ops->get_policy) {
18068d90274bSOleg Nesterov 			pol = vma->vm_ops->get_policy(vma, addr);
180700442ad0SMel Gorman 		} else if (vma->vm_policy) {
18081da177e4SLinus Torvalds 			pol = vma->vm_policy;
180900442ad0SMel Gorman 
181000442ad0SMel Gorman 			/*
181100442ad0SMel Gorman 			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
181200442ad0SMel Gorman 			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
181300442ad0SMel Gorman 			 * count on these policies which will be dropped by
181400442ad0SMel Gorman 			 * mpol_cond_put() later
181500442ad0SMel Gorman 			 */
181600442ad0SMel Gorman 			if (mpol_needs_cond_ref(pol))
181700442ad0SMel Gorman 				mpol_get(pol);
181800442ad0SMel Gorman 		}
18191da177e4SLinus Torvalds 	}
1820f15ca78eSOleg Nesterov 
182174d2c3a0SOleg Nesterov 	return pol;
182274d2c3a0SOleg Nesterov }
182374d2c3a0SOleg Nesterov 
182474d2c3a0SOleg Nesterov /*
1825dd6eecb9SOleg Nesterov  * get_vma_policy(@vma, @addr)
182674d2c3a0SOleg Nesterov  * @vma: virtual memory area whose policy is sought
182774d2c3a0SOleg Nesterov  * @addr: address in @vma for shared policy lookup
182874d2c3a0SOleg Nesterov  *
182974d2c3a0SOleg Nesterov  * Returns effective policy for a VMA at specified address.
1830dd6eecb9SOleg Nesterov  * Falls back to current->mempolicy or system default policy, as necessary.
183174d2c3a0SOleg Nesterov  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
183274d2c3a0SOleg Nesterov  * count--added by the get_policy() vm_op, as appropriate--to protect against
183374d2c3a0SOleg Nesterov  * freeing by another task.  It is the caller's responsibility to free the
183474d2c3a0SOleg Nesterov  * extra reference for shared policies.
183574d2c3a0SOleg Nesterov  */
1836ac79f78dSDavid Rientjes static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1837dd6eecb9SOleg Nesterov 						unsigned long addr)
183874d2c3a0SOleg Nesterov {
183974d2c3a0SOleg Nesterov 	struct mempolicy *pol = __get_vma_policy(vma, addr);
184074d2c3a0SOleg Nesterov 
18418d90274bSOleg Nesterov 	if (!pol)
1842dd6eecb9SOleg Nesterov 		pol = get_task_policy(current);
18438d90274bSOleg Nesterov 
18441da177e4SLinus Torvalds 	return pol;
18451da177e4SLinus Torvalds }
18461da177e4SLinus Torvalds 
18476b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma)
1848fc314724SMel Gorman {
18496b6482bbSOleg Nesterov 	struct mempolicy *pol;
1850f15ca78eSOleg Nesterov 
1851fc314724SMel Gorman 	if (vma->vm_ops && vma->vm_ops->get_policy) {
1852fc314724SMel Gorman 		bool ret = false;
1853fc314724SMel Gorman 
1854fc314724SMel Gorman 		pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1855fc314724SMel Gorman 		if (pol && (pol->flags & MPOL_F_MOF))
1856fc314724SMel Gorman 			ret = true;
1857fc314724SMel Gorman 		mpol_cond_put(pol);
1858fc314724SMel Gorman 
1859fc314724SMel Gorman 		return ret;
18608d90274bSOleg Nesterov 	}
18618d90274bSOleg Nesterov 
1862fc314724SMel Gorman 	pol = vma->vm_policy;
18638d90274bSOleg Nesterov 	if (!pol)
18646b6482bbSOleg Nesterov 		pol = get_task_policy(current);
1865fc314724SMel Gorman 
1866fc314724SMel Gorman 	return pol->flags & MPOL_F_MOF;
1867fc314724SMel Gorman }
1868fc314724SMel Gorman 
1869d3eb1570SLai Jiangshan static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1870d3eb1570SLai Jiangshan {
1871d3eb1570SLai Jiangshan 	enum zone_type dynamic_policy_zone = policy_zone;
1872d3eb1570SLai Jiangshan 
1873d3eb1570SLai Jiangshan 	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1874d3eb1570SLai Jiangshan 
1875d3eb1570SLai Jiangshan 	/*
1876d3eb1570SLai Jiangshan 	 * if policy->v.nodes has movable memory only,
1877d3eb1570SLai Jiangshan 	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1878d3eb1570SLai Jiangshan 	 *
1879d3eb1570SLai Jiangshan 	 * policy->v.nodes is intersect with node_states[N_MEMORY].
1880d3eb1570SLai Jiangshan 	 * so if the following test faile, it implies
1881d3eb1570SLai Jiangshan 	 * policy->v.nodes has movable memory only.
1882d3eb1570SLai Jiangshan 	 */
1883d3eb1570SLai Jiangshan 	if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1884d3eb1570SLai Jiangshan 		dynamic_policy_zone = ZONE_MOVABLE;
1885d3eb1570SLai Jiangshan 
1886d3eb1570SLai Jiangshan 	return zone >= dynamic_policy_zone;
1887d3eb1570SLai Jiangshan }
1888d3eb1570SLai Jiangshan 
188952cd3b07SLee Schermerhorn /*
189052cd3b07SLee Schermerhorn  * Return a nodemask representing a mempolicy for filtering nodes for
189152cd3b07SLee Schermerhorn  * page allocation
189252cd3b07SLee Schermerhorn  */
18938ca39e68SMuchun Song nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
189419770b32SMel Gorman {
189519770b32SMel Gorman 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
189645c4745aSLee Schermerhorn 	if (unlikely(policy->mode == MPOL_BIND) &&
1897d3eb1570SLai Jiangshan 			apply_policy_zone(policy, gfp_zone(gfp)) &&
189819770b32SMel Gorman 			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
189919770b32SMel Gorman 		return &policy->v.nodes;
190019770b32SMel Gorman 
190119770b32SMel Gorman 	return NULL;
190219770b32SMel Gorman }
190319770b32SMel Gorman 
190404ec6264SVlastimil Babka /* Return the node id preferred by the given mempolicy, or the given id */
190504ec6264SVlastimil Babka static int policy_node(gfp_t gfp, struct mempolicy *policy,
19062f5f9486SAndi Kleen 								int nd)
19071da177e4SLinus Torvalds {
19086d840958SMichal Hocko 	if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
19091da177e4SLinus Torvalds 		nd = policy->v.preferred_node;
19106d840958SMichal Hocko 	else {
191119770b32SMel Gorman 		/*
19126d840958SMichal Hocko 		 * __GFP_THISNODE shouldn't even be used with the bind policy
19136d840958SMichal Hocko 		 * because we might easily break the expectation to stay on the
19146d840958SMichal Hocko 		 * requested node and not break the policy.
191519770b32SMel Gorman 		 */
19166d840958SMichal Hocko 		WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
19171da177e4SLinus Torvalds 	}
19186d840958SMichal Hocko 
191904ec6264SVlastimil Babka 	return nd;
19201da177e4SLinus Torvalds }
19211da177e4SLinus Torvalds 
19221da177e4SLinus Torvalds /* Do dynamic interleaving for a process */
19231da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy)
19241da177e4SLinus Torvalds {
192545816682SVlastimil Babka 	unsigned next;
19261da177e4SLinus Torvalds 	struct task_struct *me = current;
19271da177e4SLinus Torvalds 
192845816682SVlastimil Babka 	next = next_node_in(me->il_prev, policy->v.nodes);
1929f5b087b5SDavid Rientjes 	if (next < MAX_NUMNODES)
193045816682SVlastimil Babka 		me->il_prev = next;
193145816682SVlastimil Babka 	return next;
19321da177e4SLinus Torvalds }
19331da177e4SLinus Torvalds 
1934dc85da15SChristoph Lameter /*
1935dc85da15SChristoph Lameter  * Depending on the memory policy provide a node from which to allocate the
1936dc85da15SChristoph Lameter  * next slab entry.
1937dc85da15SChristoph Lameter  */
19382a389610SDavid Rientjes unsigned int mempolicy_slab_node(void)
1939dc85da15SChristoph Lameter {
1940e7b691b0SAndi Kleen 	struct mempolicy *policy;
19412a389610SDavid Rientjes 	int node = numa_mem_id();
1942e7b691b0SAndi Kleen 
1943e7b691b0SAndi Kleen 	if (in_interrupt())
19442a389610SDavid Rientjes 		return node;
1945e7b691b0SAndi Kleen 
1946e7b691b0SAndi Kleen 	policy = current->mempolicy;
1947fc36b8d3SLee Schermerhorn 	if (!policy || policy->flags & MPOL_F_LOCAL)
19482a389610SDavid Rientjes 		return node;
1949765c4507SChristoph Lameter 
1950bea904d5SLee Schermerhorn 	switch (policy->mode) {
1951bea904d5SLee Schermerhorn 	case MPOL_PREFERRED:
1952fc36b8d3SLee Schermerhorn 		/*
1953fc36b8d3SLee Schermerhorn 		 * handled MPOL_F_LOCAL above
1954fc36b8d3SLee Schermerhorn 		 */
1955bea904d5SLee Schermerhorn 		return policy->v.preferred_node;
1956bea904d5SLee Schermerhorn 
1957dc85da15SChristoph Lameter 	case MPOL_INTERLEAVE:
1958dc85da15SChristoph Lameter 		return interleave_nodes(policy);
1959dc85da15SChristoph Lameter 
1960dd1a239fSMel Gorman 	case MPOL_BIND: {
1961c33d6c06SMel Gorman 		struct zoneref *z;
1962c33d6c06SMel Gorman 
1963dc85da15SChristoph Lameter 		/*
1964dc85da15SChristoph Lameter 		 * Follow bind policy behavior and start allocation at the
1965dc85da15SChristoph Lameter 		 * first node.
1966dc85da15SChristoph Lameter 		 */
196719770b32SMel Gorman 		struct zonelist *zonelist;
196819770b32SMel Gorman 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1969c9634cf0SAneesh Kumar K.V 		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1970c33d6c06SMel Gorman 		z = first_zones_zonelist(zonelist, highest_zoneidx,
1971c33d6c06SMel Gorman 							&policy->v.nodes);
1972c1093b74SPavel Tatashin 		return z->zone ? zone_to_nid(z->zone) : node;
1973dd1a239fSMel Gorman 	}
1974dc85da15SChristoph Lameter 
1975dc85da15SChristoph Lameter 	default:
1976bea904d5SLee Schermerhorn 		BUG();
1977dc85da15SChristoph Lameter 	}
1978dc85da15SChristoph Lameter }
1979dc85da15SChristoph Lameter 
1980fee83b3aSAndrew Morton /*
1981fee83b3aSAndrew Morton  * Do static interleaving for a VMA with known offset @n.  Returns the n'th
1982fee83b3aSAndrew Morton  * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
1983fee83b3aSAndrew Morton  * number of present nodes.
1984fee83b3aSAndrew Morton  */
198598c70baaSLaurent Dufour static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
19861da177e4SLinus Torvalds {
1987dfcd3c0dSAndi Kleen 	unsigned nnodes = nodes_weight(pol->v.nodes);
1988f5b087b5SDavid Rientjes 	unsigned target;
1989fee83b3aSAndrew Morton 	int i;
1990fee83b3aSAndrew Morton 	int nid;
19911da177e4SLinus Torvalds 
1992f5b087b5SDavid Rientjes 	if (!nnodes)
1993f5b087b5SDavid Rientjes 		return numa_node_id();
1994fee83b3aSAndrew Morton 	target = (unsigned int)n % nnodes;
1995fee83b3aSAndrew Morton 	nid = first_node(pol->v.nodes);
1996fee83b3aSAndrew Morton 	for (i = 0; i < target; i++)
1997dfcd3c0dSAndi Kleen 		nid = next_node(nid, pol->v.nodes);
19981da177e4SLinus Torvalds 	return nid;
19991da177e4SLinus Torvalds }
20001da177e4SLinus Torvalds 
20015da7ca86SChristoph Lameter /* Determine a node number for interleave */
20025da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol,
20035da7ca86SChristoph Lameter 		 struct vm_area_struct *vma, unsigned long addr, int shift)
20045da7ca86SChristoph Lameter {
20055da7ca86SChristoph Lameter 	if (vma) {
20065da7ca86SChristoph Lameter 		unsigned long off;
20075da7ca86SChristoph Lameter 
20083b98b087SNishanth Aravamudan 		/*
20093b98b087SNishanth Aravamudan 		 * for small pages, there is no difference between
20103b98b087SNishanth Aravamudan 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
20113b98b087SNishanth Aravamudan 		 * for huge pages, since vm_pgoff is in units of small
20123b98b087SNishanth Aravamudan 		 * pages, we need to shift off the always 0 bits to get
20133b98b087SNishanth Aravamudan 		 * a useful offset.
20143b98b087SNishanth Aravamudan 		 */
20153b98b087SNishanth Aravamudan 		BUG_ON(shift < PAGE_SHIFT);
20163b98b087SNishanth Aravamudan 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
20175da7ca86SChristoph Lameter 		off += (addr - vma->vm_start) >> shift;
201898c70baaSLaurent Dufour 		return offset_il_node(pol, off);
20195da7ca86SChristoph Lameter 	} else
20205da7ca86SChristoph Lameter 		return interleave_nodes(pol);
20215da7ca86SChristoph Lameter }
20225da7ca86SChristoph Lameter 
202300ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS
2024480eccf9SLee Schermerhorn /*
202504ec6264SVlastimil Babka  * huge_node(@vma, @addr, @gfp_flags, @mpol)
2026b46e14acSFabian Frederick  * @vma: virtual memory area whose policy is sought
2027b46e14acSFabian Frederick  * @addr: address in @vma for shared policy lookup and interleave policy
2028b46e14acSFabian Frederick  * @gfp_flags: for requested zone
2029b46e14acSFabian Frederick  * @mpol: pointer to mempolicy pointer for reference counted mempolicy
2030b46e14acSFabian Frederick  * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
2031480eccf9SLee Schermerhorn  *
203204ec6264SVlastimil Babka  * Returns a nid suitable for a huge page allocation and a pointer
203352cd3b07SLee Schermerhorn  * to the struct mempolicy for conditional unref after allocation.
203452cd3b07SLee Schermerhorn  * If the effective policy is 'BIND, returns a pointer to the mempolicy's
203552cd3b07SLee Schermerhorn  * @nodemask for filtering the zonelist.
2036c0ff7453SMiao Xie  *
2037d26914d1SMel Gorman  * Must be protected by read_mems_allowed_begin()
2038480eccf9SLee Schermerhorn  */
203904ec6264SVlastimil Babka int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
204004ec6264SVlastimil Babka 				struct mempolicy **mpol, nodemask_t **nodemask)
20415da7ca86SChristoph Lameter {
204204ec6264SVlastimil Babka 	int nid;
20435da7ca86SChristoph Lameter 
2044dd6eecb9SOleg Nesterov 	*mpol = get_vma_policy(vma, addr);
204519770b32SMel Gorman 	*nodemask = NULL;	/* assume !MPOL_BIND */
20465da7ca86SChristoph Lameter 
204752cd3b07SLee Schermerhorn 	if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
204804ec6264SVlastimil Babka 		nid = interleave_nid(*mpol, vma, addr,
204904ec6264SVlastimil Babka 					huge_page_shift(hstate_vma(vma)));
205052cd3b07SLee Schermerhorn 	} else {
205104ec6264SVlastimil Babka 		nid = policy_node(gfp_flags, *mpol, numa_node_id());
205252cd3b07SLee Schermerhorn 		if ((*mpol)->mode == MPOL_BIND)
205352cd3b07SLee Schermerhorn 			*nodemask = &(*mpol)->v.nodes;
2054480eccf9SLee Schermerhorn 	}
205504ec6264SVlastimil Babka 	return nid;
20565da7ca86SChristoph Lameter }
205706808b08SLee Schermerhorn 
205806808b08SLee Schermerhorn /*
205906808b08SLee Schermerhorn  * init_nodemask_of_mempolicy
206006808b08SLee Schermerhorn  *
206106808b08SLee Schermerhorn  * If the current task's mempolicy is "default" [NULL], return 'false'
206206808b08SLee Schermerhorn  * to indicate default policy.  Otherwise, extract the policy nodemask
206306808b08SLee Schermerhorn  * for 'bind' or 'interleave' policy into the argument nodemask, or
206406808b08SLee Schermerhorn  * initialize the argument nodemask to contain the single node for
206506808b08SLee Schermerhorn  * 'preferred' or 'local' policy and return 'true' to indicate presence
206606808b08SLee Schermerhorn  * of non-default mempolicy.
206706808b08SLee Schermerhorn  *
206806808b08SLee Schermerhorn  * We don't bother with reference counting the mempolicy [mpol_get/put]
206906808b08SLee Schermerhorn  * because the current task is examining it's own mempolicy and a task's
207006808b08SLee Schermerhorn  * mempolicy is only ever changed by the task itself.
207106808b08SLee Schermerhorn  *
207206808b08SLee Schermerhorn  * N.B., it is the caller's responsibility to free a returned nodemask.
207306808b08SLee Schermerhorn  */
207406808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask)
207506808b08SLee Schermerhorn {
207606808b08SLee Schermerhorn 	struct mempolicy *mempolicy;
207706808b08SLee Schermerhorn 	int nid;
207806808b08SLee Schermerhorn 
207906808b08SLee Schermerhorn 	if (!(mask && current->mempolicy))
208006808b08SLee Schermerhorn 		return false;
208106808b08SLee Schermerhorn 
2082c0ff7453SMiao Xie 	task_lock(current);
208306808b08SLee Schermerhorn 	mempolicy = current->mempolicy;
208406808b08SLee Schermerhorn 	switch (mempolicy->mode) {
208506808b08SLee Schermerhorn 	case MPOL_PREFERRED:
208606808b08SLee Schermerhorn 		if (mempolicy->flags & MPOL_F_LOCAL)
208706808b08SLee Schermerhorn 			nid = numa_node_id();
208806808b08SLee Schermerhorn 		else
208906808b08SLee Schermerhorn 			nid = mempolicy->v.preferred_node;
209006808b08SLee Schermerhorn 		init_nodemask_of_node(mask, nid);
209106808b08SLee Schermerhorn 		break;
209206808b08SLee Schermerhorn 
209306808b08SLee Schermerhorn 	case MPOL_BIND:
209406808b08SLee Schermerhorn 	case MPOL_INTERLEAVE:
209506808b08SLee Schermerhorn 		*mask =  mempolicy->v.nodes;
209606808b08SLee Schermerhorn 		break;
209706808b08SLee Schermerhorn 
209806808b08SLee Schermerhorn 	default:
209906808b08SLee Schermerhorn 		BUG();
210006808b08SLee Schermerhorn 	}
2101c0ff7453SMiao Xie 	task_unlock(current);
210206808b08SLee Schermerhorn 
210306808b08SLee Schermerhorn 	return true;
210406808b08SLee Schermerhorn }
210500ac59adSChen, Kenneth W #endif
21065da7ca86SChristoph Lameter 
21076f48d0ebSDavid Rientjes /*
21086f48d0ebSDavid Rientjes  * mempolicy_nodemask_intersects
21096f48d0ebSDavid Rientjes  *
21106f48d0ebSDavid Rientjes  * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
21116f48d0ebSDavid Rientjes  * policy.  Otherwise, check for intersection between mask and the policy
21126f48d0ebSDavid Rientjes  * nodemask for 'bind' or 'interleave' policy.  For 'perferred' or 'local'
21136f48d0ebSDavid Rientjes  * policy, always return true since it may allocate elsewhere on fallback.
21146f48d0ebSDavid Rientjes  *
21156f48d0ebSDavid Rientjes  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
21166f48d0ebSDavid Rientjes  */
21176f48d0ebSDavid Rientjes bool mempolicy_nodemask_intersects(struct task_struct *tsk,
21186f48d0ebSDavid Rientjes 					const nodemask_t *mask)
21196f48d0ebSDavid Rientjes {
21206f48d0ebSDavid Rientjes 	struct mempolicy *mempolicy;
21216f48d0ebSDavid Rientjes 	bool ret = true;
21226f48d0ebSDavid Rientjes 
21236f48d0ebSDavid Rientjes 	if (!mask)
21246f48d0ebSDavid Rientjes 		return ret;
21256f48d0ebSDavid Rientjes 	task_lock(tsk);
21266f48d0ebSDavid Rientjes 	mempolicy = tsk->mempolicy;
21276f48d0ebSDavid Rientjes 	if (!mempolicy)
21286f48d0ebSDavid Rientjes 		goto out;
21296f48d0ebSDavid Rientjes 
21306f48d0ebSDavid Rientjes 	switch (mempolicy->mode) {
21316f48d0ebSDavid Rientjes 	case MPOL_PREFERRED:
21326f48d0ebSDavid Rientjes 		/*
21336f48d0ebSDavid Rientjes 		 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
21346f48d0ebSDavid Rientjes 		 * allocate from, they may fallback to other nodes when oom.
21356f48d0ebSDavid Rientjes 		 * Thus, it's possible for tsk to have allocated memory from
21366f48d0ebSDavid Rientjes 		 * nodes in mask.
21376f48d0ebSDavid Rientjes 		 */
21386f48d0ebSDavid Rientjes 		break;
21396f48d0ebSDavid Rientjes 	case MPOL_BIND:
21406f48d0ebSDavid Rientjes 	case MPOL_INTERLEAVE:
21416f48d0ebSDavid Rientjes 		ret = nodes_intersects(mempolicy->v.nodes, *mask);
21426f48d0ebSDavid Rientjes 		break;
21436f48d0ebSDavid Rientjes 	default:
21446f48d0ebSDavid Rientjes 		BUG();
21456f48d0ebSDavid Rientjes 	}
21466f48d0ebSDavid Rientjes out:
21476f48d0ebSDavid Rientjes 	task_unlock(tsk);
21486f48d0ebSDavid Rientjes 	return ret;
21496f48d0ebSDavid Rientjes }
21506f48d0ebSDavid Rientjes 
21511da177e4SLinus Torvalds /* Allocate a page in interleaved policy.
21521da177e4SLinus Torvalds    Own path because it needs to do special accounting. */
2153662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2154662f3a0bSAndi Kleen 					unsigned nid)
21551da177e4SLinus Torvalds {
21561da177e4SLinus Torvalds 	struct page *page;
21571da177e4SLinus Torvalds 
215804ec6264SVlastimil Babka 	page = __alloc_pages(gfp, order, nid);
21594518085eSKemi Wang 	/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
21604518085eSKemi Wang 	if (!static_branch_likely(&vm_numa_stat_key))
21614518085eSKemi Wang 		return page;
2162de55c8b2SAndrey Ryabinin 	if (page && page_to_nid(page) == nid) {
2163de55c8b2SAndrey Ryabinin 		preempt_disable();
2164de55c8b2SAndrey Ryabinin 		__inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT);
2165de55c8b2SAndrey Ryabinin 		preempt_enable();
2166de55c8b2SAndrey Ryabinin 	}
21671da177e4SLinus Torvalds 	return page;
21681da177e4SLinus Torvalds }
21691da177e4SLinus Torvalds 
21701da177e4SLinus Torvalds /**
21710bbbc0b3SAndrea Arcangeli  * 	alloc_pages_vma	- Allocate a page for a VMA.
21721da177e4SLinus Torvalds  *
21731da177e4SLinus Torvalds  * 	@gfp:
21741da177e4SLinus Torvalds  *      %GFP_USER    user allocation.
21751da177e4SLinus Torvalds  *      %GFP_KERNEL  kernel allocations,
21761da177e4SLinus Torvalds  *      %GFP_HIGHMEM highmem/user allocations,
21771da177e4SLinus Torvalds  *      %GFP_FS      allocation should not call back into a file system.
21781da177e4SLinus Torvalds  *      %GFP_ATOMIC  don't sleep.
21791da177e4SLinus Torvalds  *
21800bbbc0b3SAndrea Arcangeli  *	@order:Order of the GFP allocation.
21811da177e4SLinus Torvalds  * 	@vma:  Pointer to VMA or NULL if not available.
21821da177e4SLinus Torvalds  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
2183be97a41bSVlastimil Babka  *	@node: Which node to prefer for allocation (modulo policy).
218419deb769SDavid Rientjes  *	@hugepage: for hugepages try only the preferred node if possible
21851da177e4SLinus Torvalds  *
21861da177e4SLinus Torvalds  * 	This function allocates a page from the kernel page pool and applies
21871da177e4SLinus Torvalds  *	a NUMA policy associated with the VMA or the current process.
21883e4e28c5SMichel Lespinasse  *	When VMA is not NULL caller must read-lock the mmap_lock of the
21891da177e4SLinus Torvalds  *	mm_struct of the VMA to prevent it from going away. Should be used for
2190be97a41bSVlastimil Babka  *	all allocations for pages that will be mapped into user space. Returns
2191be97a41bSVlastimil Babka  *	NULL when no page can be allocated.
21921da177e4SLinus Torvalds  */
21931da177e4SLinus Torvalds struct page *
21940bbbc0b3SAndrea Arcangeli alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
219519deb769SDavid Rientjes 		unsigned long addr, int node, bool hugepage)
21961da177e4SLinus Torvalds {
2197cc9a6c87SMel Gorman 	struct mempolicy *pol;
2198c0ff7453SMiao Xie 	struct page *page;
219904ec6264SVlastimil Babka 	int preferred_nid;
2200be97a41bSVlastimil Babka 	nodemask_t *nmask;
22011da177e4SLinus Torvalds 
2202dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2203cc9a6c87SMel Gorman 
2204be97a41bSVlastimil Babka 	if (pol->mode == MPOL_INTERLEAVE) {
22051da177e4SLinus Torvalds 		unsigned nid;
22065da7ca86SChristoph Lameter 
22078eac563cSAndi Kleen 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
220852cd3b07SLee Schermerhorn 		mpol_cond_put(pol);
22090bbbc0b3SAndrea Arcangeli 		page = alloc_page_interleave(gfp, order, nid);
2210be97a41bSVlastimil Babka 		goto out;
22111da177e4SLinus Torvalds 	}
22121da177e4SLinus Torvalds 
221319deb769SDavid Rientjes 	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
221419deb769SDavid Rientjes 		int hpage_node = node;
221519deb769SDavid Rientjes 
221619deb769SDavid Rientjes 		/*
221719deb769SDavid Rientjes 		 * For hugepage allocation and non-interleave policy which
221819deb769SDavid Rientjes 		 * allows the current node (or other explicitly preferred
221919deb769SDavid Rientjes 		 * node) we only try to allocate from the current/preferred
222019deb769SDavid Rientjes 		 * node and don't fall back to other nodes, as the cost of
222119deb769SDavid Rientjes 		 * remote accesses would likely offset THP benefits.
222219deb769SDavid Rientjes 		 *
222319deb769SDavid Rientjes 		 * If the policy is interleave, or does not allow the current
222419deb769SDavid Rientjes 		 * node in its nodemask, we allocate the standard way.
222519deb769SDavid Rientjes 		 */
222619deb769SDavid Rientjes 		if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL))
222719deb769SDavid Rientjes 			hpage_node = pol->v.preferred_node;
222819deb769SDavid Rientjes 
222919deb769SDavid Rientjes 		nmask = policy_nodemask(gfp, pol);
223019deb769SDavid Rientjes 		if (!nmask || node_isset(hpage_node, *nmask)) {
223119deb769SDavid Rientjes 			mpol_cond_put(pol);
2232cc638f32SVlastimil Babka 			/*
2233cc638f32SVlastimil Babka 			 * First, try to allocate THP only on local node, but
2234cc638f32SVlastimil Babka 			 * don't reclaim unnecessarily, just compact.
2235cc638f32SVlastimil Babka 			 */
223619deb769SDavid Rientjes 			page = __alloc_pages_node(hpage_node,
2237cc638f32SVlastimil Babka 				gfp | __GFP_THISNODE | __GFP_NORETRY, order);
223876e654ccSDavid Rientjes 
223976e654ccSDavid Rientjes 			/*
224076e654ccSDavid Rientjes 			 * If hugepage allocations are configured to always
224176e654ccSDavid Rientjes 			 * synchronous compact or the vma has been madvised
224276e654ccSDavid Rientjes 			 * to prefer hugepage backing, retry allowing remote
2243cc638f32SVlastimil Babka 			 * memory with both reclaim and compact as well.
224476e654ccSDavid Rientjes 			 */
224576e654ccSDavid Rientjes 			if (!page && (gfp & __GFP_DIRECT_RECLAIM))
224676e654ccSDavid Rientjes 				page = __alloc_pages_node(hpage_node,
2247cc638f32SVlastimil Babka 								gfp, order);
224876e654ccSDavid Rientjes 
224919deb769SDavid Rientjes 			goto out;
225019deb769SDavid Rientjes 		}
225119deb769SDavid Rientjes 	}
225219deb769SDavid Rientjes 
2253077fcf11SAneesh Kumar K.V 	nmask = policy_nodemask(gfp, pol);
225404ec6264SVlastimil Babka 	preferred_nid = policy_node(gfp, pol, node);
225504ec6264SVlastimil Babka 	page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
2256d51e9894SVlastimil Babka 	mpol_cond_put(pol);
2257be97a41bSVlastimil Babka out:
2258077fcf11SAneesh Kumar K.V 	return page;
2259077fcf11SAneesh Kumar K.V }
226069262215SChristoph Hellwig EXPORT_SYMBOL(alloc_pages_vma);
2261077fcf11SAneesh Kumar K.V 
22621da177e4SLinus Torvalds /**
22631da177e4SLinus Torvalds  * 	alloc_pages_current - Allocate pages.
22641da177e4SLinus Torvalds  *
22651da177e4SLinus Torvalds  *	@gfp:
22661da177e4SLinus Torvalds  *		%GFP_USER   user allocation,
22671da177e4SLinus Torvalds  *      	%GFP_KERNEL kernel allocation,
22681da177e4SLinus Torvalds  *      	%GFP_HIGHMEM highmem allocation,
22691da177e4SLinus Torvalds  *      	%GFP_FS     don't call back into a file system.
22701da177e4SLinus Torvalds  *      	%GFP_ATOMIC don't sleep.
22711da177e4SLinus Torvalds  *	@order: Power of two of allocation size in pages. 0 is a single page.
22721da177e4SLinus Torvalds  *
22731da177e4SLinus Torvalds  *	Allocate a page from the kernel page pool.  When not in
22741da177e4SLinus Torvalds  *	interrupt context and apply the current process NUMA policy.
22751da177e4SLinus Torvalds  *	Returns NULL when no page can be allocated.
22761da177e4SLinus Torvalds  */
2277dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order)
22781da177e4SLinus Torvalds {
22798d90274bSOleg Nesterov 	struct mempolicy *pol = &default_policy;
2280c0ff7453SMiao Xie 	struct page *page;
22811da177e4SLinus Torvalds 
22828d90274bSOleg Nesterov 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
22838d90274bSOleg Nesterov 		pol = get_task_policy(current);
228452cd3b07SLee Schermerhorn 
228552cd3b07SLee Schermerhorn 	/*
228652cd3b07SLee Schermerhorn 	 * No reference counting needed for current->mempolicy
228752cd3b07SLee Schermerhorn 	 * nor system default_policy
228852cd3b07SLee Schermerhorn 	 */
228945c4745aSLee Schermerhorn 	if (pol->mode == MPOL_INTERLEAVE)
2290c0ff7453SMiao Xie 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2291c0ff7453SMiao Xie 	else
2292c0ff7453SMiao Xie 		page = __alloc_pages_nodemask(gfp, order,
229304ec6264SVlastimil Babka 				policy_node(gfp, pol, numa_node_id()),
22945c4b4be3SAndi Kleen 				policy_nodemask(gfp, pol));
2295cc9a6c87SMel Gorman 
2296c0ff7453SMiao Xie 	return page;
22971da177e4SLinus Torvalds }
22981da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current);
22991da177e4SLinus Torvalds 
2300ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2301ef0855d3SOleg Nesterov {
2302ef0855d3SOleg Nesterov 	struct mempolicy *pol = mpol_dup(vma_policy(src));
2303ef0855d3SOleg Nesterov 
2304ef0855d3SOleg Nesterov 	if (IS_ERR(pol))
2305ef0855d3SOleg Nesterov 		return PTR_ERR(pol);
2306ef0855d3SOleg Nesterov 	dst->vm_policy = pol;
2307ef0855d3SOleg Nesterov 	return 0;
2308ef0855d3SOleg Nesterov }
2309ef0855d3SOleg Nesterov 
23104225399aSPaul Jackson /*
2311846a16bfSLee Schermerhorn  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
23124225399aSPaul Jackson  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
23134225399aSPaul Jackson  * with the mems_allowed returned by cpuset_mems_allowed().  This
23144225399aSPaul Jackson  * keeps mempolicies cpuset relative after its cpuset moves.  See
23154225399aSPaul Jackson  * further kernel/cpuset.c update_nodemask().
2316708c1bbcSMiao Xie  *
2317708c1bbcSMiao Xie  * current's mempolicy may be rebinded by the other task(the task that changes
2318708c1bbcSMiao Xie  * cpuset's mems), so we needn't do rebind work for current task.
23194225399aSPaul Jackson  */
23204225399aSPaul Jackson 
2321846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */
2322846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old)
23231da177e4SLinus Torvalds {
23241da177e4SLinus Torvalds 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
23251da177e4SLinus Torvalds 
23261da177e4SLinus Torvalds 	if (!new)
23271da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2328708c1bbcSMiao Xie 
2329708c1bbcSMiao Xie 	/* task's mempolicy is protected by alloc_lock */
2330708c1bbcSMiao Xie 	if (old == current->mempolicy) {
2331708c1bbcSMiao Xie 		task_lock(current);
2332708c1bbcSMiao Xie 		*new = *old;
2333708c1bbcSMiao Xie 		task_unlock(current);
2334708c1bbcSMiao Xie 	} else
2335708c1bbcSMiao Xie 		*new = *old;
2336708c1bbcSMiao Xie 
23374225399aSPaul Jackson 	if (current_cpuset_is_being_rebound()) {
23384225399aSPaul Jackson 		nodemask_t mems = cpuset_mems_allowed(current);
2339213980c0SVlastimil Babka 		mpol_rebind_policy(new, &mems);
23404225399aSPaul Jackson 	}
23411da177e4SLinus Torvalds 	atomic_set(&new->refcnt, 1);
23421da177e4SLinus Torvalds 	return new;
23431da177e4SLinus Torvalds }
23441da177e4SLinus Torvalds 
23451da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */
2346fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
23471da177e4SLinus Torvalds {
23481da177e4SLinus Torvalds 	if (!a || !b)
2349fcfb4dccSKOSAKI Motohiro 		return false;
235045c4745aSLee Schermerhorn 	if (a->mode != b->mode)
2351fcfb4dccSKOSAKI Motohiro 		return false;
235219800502SBob Liu 	if (a->flags != b->flags)
2353fcfb4dccSKOSAKI Motohiro 		return false;
235419800502SBob Liu 	if (mpol_store_user_nodemask(a))
235519800502SBob Liu 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2356fcfb4dccSKOSAKI Motohiro 			return false;
235719800502SBob Liu 
235845c4745aSLee Schermerhorn 	switch (a->mode) {
235919770b32SMel Gorman 	case MPOL_BIND:
23601da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
2361fcfb4dccSKOSAKI Motohiro 		return !!nodes_equal(a->v.nodes, b->v.nodes);
23621da177e4SLinus Torvalds 	case MPOL_PREFERRED:
23638970a63eSYisheng Xie 		/* a's ->flags is the same as b's */
23648970a63eSYisheng Xie 		if (a->flags & MPOL_F_LOCAL)
23658970a63eSYisheng Xie 			return true;
236675719661SNamhyung Kim 		return a->v.preferred_node == b->v.preferred_node;
23671da177e4SLinus Torvalds 	default:
23681da177e4SLinus Torvalds 		BUG();
2369fcfb4dccSKOSAKI Motohiro 		return false;
23701da177e4SLinus Torvalds 	}
23711da177e4SLinus Torvalds }
23721da177e4SLinus Torvalds 
23731da177e4SLinus Torvalds /*
23741da177e4SLinus Torvalds  * Shared memory backing store policy support.
23751da177e4SLinus Torvalds  *
23761da177e4SLinus Torvalds  * Remember policies even when nobody has shared memory mapped.
23771da177e4SLinus Torvalds  * The policies are kept in Red-Black tree linked from the inode.
23784a8c7bb5SNathan Zimmer  * They are protected by the sp->lock rwlock, which should be held
23791da177e4SLinus Torvalds  * for any accesses to the tree.
23801da177e4SLinus Torvalds  */
23811da177e4SLinus Torvalds 
23824a8c7bb5SNathan Zimmer /*
23834a8c7bb5SNathan Zimmer  * lookup first element intersecting start-end.  Caller holds sp->lock for
23844a8c7bb5SNathan Zimmer  * reading or for writing
23854a8c7bb5SNathan Zimmer  */
23861da177e4SLinus Torvalds static struct sp_node *
23871da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
23881da177e4SLinus Torvalds {
23891da177e4SLinus Torvalds 	struct rb_node *n = sp->root.rb_node;
23901da177e4SLinus Torvalds 
23911da177e4SLinus Torvalds 	while (n) {
23921da177e4SLinus Torvalds 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
23931da177e4SLinus Torvalds 
23941da177e4SLinus Torvalds 		if (start >= p->end)
23951da177e4SLinus Torvalds 			n = n->rb_right;
23961da177e4SLinus Torvalds 		else if (end <= p->start)
23971da177e4SLinus Torvalds 			n = n->rb_left;
23981da177e4SLinus Torvalds 		else
23991da177e4SLinus Torvalds 			break;
24001da177e4SLinus Torvalds 	}
24011da177e4SLinus Torvalds 	if (!n)
24021da177e4SLinus Torvalds 		return NULL;
24031da177e4SLinus Torvalds 	for (;;) {
24041da177e4SLinus Torvalds 		struct sp_node *w = NULL;
24051da177e4SLinus Torvalds 		struct rb_node *prev = rb_prev(n);
24061da177e4SLinus Torvalds 		if (!prev)
24071da177e4SLinus Torvalds 			break;
24081da177e4SLinus Torvalds 		w = rb_entry(prev, struct sp_node, nd);
24091da177e4SLinus Torvalds 		if (w->end <= start)
24101da177e4SLinus Torvalds 			break;
24111da177e4SLinus Torvalds 		n = prev;
24121da177e4SLinus Torvalds 	}
24131da177e4SLinus Torvalds 	return rb_entry(n, struct sp_node, nd);
24141da177e4SLinus Torvalds }
24151da177e4SLinus Torvalds 
24164a8c7bb5SNathan Zimmer /*
24174a8c7bb5SNathan Zimmer  * Insert a new shared policy into the list.  Caller holds sp->lock for
24184a8c7bb5SNathan Zimmer  * writing.
24194a8c7bb5SNathan Zimmer  */
24201da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new)
24211da177e4SLinus Torvalds {
24221da177e4SLinus Torvalds 	struct rb_node **p = &sp->root.rb_node;
24231da177e4SLinus Torvalds 	struct rb_node *parent = NULL;
24241da177e4SLinus Torvalds 	struct sp_node *nd;
24251da177e4SLinus Torvalds 
24261da177e4SLinus Torvalds 	while (*p) {
24271da177e4SLinus Torvalds 		parent = *p;
24281da177e4SLinus Torvalds 		nd = rb_entry(parent, struct sp_node, nd);
24291da177e4SLinus Torvalds 		if (new->start < nd->start)
24301da177e4SLinus Torvalds 			p = &(*p)->rb_left;
24311da177e4SLinus Torvalds 		else if (new->end > nd->end)
24321da177e4SLinus Torvalds 			p = &(*p)->rb_right;
24331da177e4SLinus Torvalds 		else
24341da177e4SLinus Torvalds 			BUG();
24351da177e4SLinus Torvalds 	}
24361da177e4SLinus Torvalds 	rb_link_node(&new->nd, parent, p);
24371da177e4SLinus Torvalds 	rb_insert_color(&new->nd, &sp->root);
2438140d5a49SPaul Mundt 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
243945c4745aSLee Schermerhorn 		 new->policy ? new->policy->mode : 0);
24401da177e4SLinus Torvalds }
24411da177e4SLinus Torvalds 
24421da177e4SLinus Torvalds /* Find shared policy intersecting idx */
24431da177e4SLinus Torvalds struct mempolicy *
24441da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
24451da177e4SLinus Torvalds {
24461da177e4SLinus Torvalds 	struct mempolicy *pol = NULL;
24471da177e4SLinus Torvalds 	struct sp_node *sn;
24481da177e4SLinus Torvalds 
24491da177e4SLinus Torvalds 	if (!sp->root.rb_node)
24501da177e4SLinus Torvalds 		return NULL;
24514a8c7bb5SNathan Zimmer 	read_lock(&sp->lock);
24521da177e4SLinus Torvalds 	sn = sp_lookup(sp, idx, idx+1);
24531da177e4SLinus Torvalds 	if (sn) {
24541da177e4SLinus Torvalds 		mpol_get(sn->policy);
24551da177e4SLinus Torvalds 		pol = sn->policy;
24561da177e4SLinus Torvalds 	}
24574a8c7bb5SNathan Zimmer 	read_unlock(&sp->lock);
24581da177e4SLinus Torvalds 	return pol;
24591da177e4SLinus Torvalds }
24601da177e4SLinus Torvalds 
246163f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n)
246263f74ca2SKOSAKI Motohiro {
246363f74ca2SKOSAKI Motohiro 	mpol_put(n->policy);
246463f74ca2SKOSAKI Motohiro 	kmem_cache_free(sn_cache, n);
246563f74ca2SKOSAKI Motohiro }
246663f74ca2SKOSAKI Motohiro 
2467771fb4d8SLee Schermerhorn /**
2468771fb4d8SLee Schermerhorn  * mpol_misplaced - check whether current page node is valid in policy
2469771fb4d8SLee Schermerhorn  *
2470b46e14acSFabian Frederick  * @page: page to be checked
2471b46e14acSFabian Frederick  * @vma: vm area where page mapped
2472b46e14acSFabian Frederick  * @addr: virtual address where page mapped
2473771fb4d8SLee Schermerhorn  *
2474771fb4d8SLee Schermerhorn  * Lookup current policy node id for vma,addr and "compare to" page's
2475771fb4d8SLee Schermerhorn  * node id.
2476771fb4d8SLee Schermerhorn  *
2477771fb4d8SLee Schermerhorn  * Returns:
2478771fb4d8SLee Schermerhorn  *	-1	- not misplaced, page is in the right node
2479771fb4d8SLee Schermerhorn  *	node	- node id where the page should be
2480771fb4d8SLee Schermerhorn  *
2481771fb4d8SLee Schermerhorn  * Policy determination "mimics" alloc_page_vma().
2482771fb4d8SLee Schermerhorn  * Called from fault path where we know the vma and faulting address.
2483771fb4d8SLee Schermerhorn  */
2484771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2485771fb4d8SLee Schermerhorn {
2486771fb4d8SLee Schermerhorn 	struct mempolicy *pol;
2487c33d6c06SMel Gorman 	struct zoneref *z;
2488771fb4d8SLee Schermerhorn 	int curnid = page_to_nid(page);
2489771fb4d8SLee Schermerhorn 	unsigned long pgoff;
249090572890SPeter Zijlstra 	int thiscpu = raw_smp_processor_id();
249190572890SPeter Zijlstra 	int thisnid = cpu_to_node(thiscpu);
249298fa15f3SAnshuman Khandual 	int polnid = NUMA_NO_NODE;
2493771fb4d8SLee Schermerhorn 	int ret = -1;
2494771fb4d8SLee Schermerhorn 
2495dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2496771fb4d8SLee Schermerhorn 	if (!(pol->flags & MPOL_F_MOF))
2497771fb4d8SLee Schermerhorn 		goto out;
2498771fb4d8SLee Schermerhorn 
2499771fb4d8SLee Schermerhorn 	switch (pol->mode) {
2500771fb4d8SLee Schermerhorn 	case MPOL_INTERLEAVE:
2501771fb4d8SLee Schermerhorn 		pgoff = vma->vm_pgoff;
2502771fb4d8SLee Schermerhorn 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
250398c70baaSLaurent Dufour 		polnid = offset_il_node(pol, pgoff);
2504771fb4d8SLee Schermerhorn 		break;
2505771fb4d8SLee Schermerhorn 
2506771fb4d8SLee Schermerhorn 	case MPOL_PREFERRED:
2507771fb4d8SLee Schermerhorn 		if (pol->flags & MPOL_F_LOCAL)
2508771fb4d8SLee Schermerhorn 			polnid = numa_node_id();
2509771fb4d8SLee Schermerhorn 		else
2510771fb4d8SLee Schermerhorn 			polnid = pol->v.preferred_node;
2511771fb4d8SLee Schermerhorn 		break;
2512771fb4d8SLee Schermerhorn 
2513771fb4d8SLee Schermerhorn 	case MPOL_BIND:
2514c33d6c06SMel Gorman 
2515771fb4d8SLee Schermerhorn 		/*
2516771fb4d8SLee Schermerhorn 		 * allows binding to multiple nodes.
2517771fb4d8SLee Schermerhorn 		 * use current page if in policy nodemask,
2518771fb4d8SLee Schermerhorn 		 * else select nearest allowed node, if any.
2519771fb4d8SLee Schermerhorn 		 * If no allowed nodes, use current [!misplaced].
2520771fb4d8SLee Schermerhorn 		 */
2521771fb4d8SLee Schermerhorn 		if (node_isset(curnid, pol->v.nodes))
2522771fb4d8SLee Schermerhorn 			goto out;
2523c33d6c06SMel Gorman 		z = first_zones_zonelist(
2524771fb4d8SLee Schermerhorn 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2525771fb4d8SLee Schermerhorn 				gfp_zone(GFP_HIGHUSER),
2526c33d6c06SMel Gorman 				&pol->v.nodes);
2527c1093b74SPavel Tatashin 		polnid = zone_to_nid(z->zone);
2528771fb4d8SLee Schermerhorn 		break;
2529771fb4d8SLee Schermerhorn 
2530771fb4d8SLee Schermerhorn 	default:
2531771fb4d8SLee Schermerhorn 		BUG();
2532771fb4d8SLee Schermerhorn 	}
25335606e387SMel Gorman 
25345606e387SMel Gorman 	/* Migrate the page towards the node whose CPU is referencing it */
2535e42c8ff2SMel Gorman 	if (pol->flags & MPOL_F_MORON) {
253690572890SPeter Zijlstra 		polnid = thisnid;
25375606e387SMel Gorman 
253810f39042SRik van Riel 		if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2539de1c9ce6SRik van Riel 			goto out;
2540de1c9ce6SRik van Riel 	}
2541e42c8ff2SMel Gorman 
2542771fb4d8SLee Schermerhorn 	if (curnid != polnid)
2543771fb4d8SLee Schermerhorn 		ret = polnid;
2544771fb4d8SLee Schermerhorn out:
2545771fb4d8SLee Schermerhorn 	mpol_cond_put(pol);
2546771fb4d8SLee Schermerhorn 
2547771fb4d8SLee Schermerhorn 	return ret;
2548771fb4d8SLee Schermerhorn }
2549771fb4d8SLee Schermerhorn 
2550c11600e4SDavid Rientjes /*
2551c11600e4SDavid Rientjes  * Drop the (possibly final) reference to task->mempolicy.  It needs to be
2552c11600e4SDavid Rientjes  * dropped after task->mempolicy is set to NULL so that any allocation done as
2553c11600e4SDavid Rientjes  * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2554c11600e4SDavid Rientjes  * policy.
2555c11600e4SDavid Rientjes  */
2556c11600e4SDavid Rientjes void mpol_put_task_policy(struct task_struct *task)
2557c11600e4SDavid Rientjes {
2558c11600e4SDavid Rientjes 	struct mempolicy *pol;
2559c11600e4SDavid Rientjes 
2560c11600e4SDavid Rientjes 	task_lock(task);
2561c11600e4SDavid Rientjes 	pol = task->mempolicy;
2562c11600e4SDavid Rientjes 	task->mempolicy = NULL;
2563c11600e4SDavid Rientjes 	task_unlock(task);
2564c11600e4SDavid Rientjes 	mpol_put(pol);
2565c11600e4SDavid Rientjes }
2566c11600e4SDavid Rientjes 
25671da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n)
25681da177e4SLinus Torvalds {
2569140d5a49SPaul Mundt 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
25701da177e4SLinus Torvalds 	rb_erase(&n->nd, &sp->root);
257163f74ca2SKOSAKI Motohiro 	sp_free(n);
25721da177e4SLinus Torvalds }
25731da177e4SLinus Torvalds 
257442288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start,
257542288fe3SMel Gorman 			unsigned long end, struct mempolicy *pol)
257642288fe3SMel Gorman {
257742288fe3SMel Gorman 	node->start = start;
257842288fe3SMel Gorman 	node->end = end;
257942288fe3SMel Gorman 	node->policy = pol;
258042288fe3SMel Gorman }
258142288fe3SMel Gorman 
2582dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2583dbcb0f19SAdrian Bunk 				struct mempolicy *pol)
25841da177e4SLinus Torvalds {
2585869833f2SKOSAKI Motohiro 	struct sp_node *n;
2586869833f2SKOSAKI Motohiro 	struct mempolicy *newpol;
25871da177e4SLinus Torvalds 
2588869833f2SKOSAKI Motohiro 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
25891da177e4SLinus Torvalds 	if (!n)
25901da177e4SLinus Torvalds 		return NULL;
2591869833f2SKOSAKI Motohiro 
2592869833f2SKOSAKI Motohiro 	newpol = mpol_dup(pol);
2593869833f2SKOSAKI Motohiro 	if (IS_ERR(newpol)) {
2594869833f2SKOSAKI Motohiro 		kmem_cache_free(sn_cache, n);
2595869833f2SKOSAKI Motohiro 		return NULL;
2596869833f2SKOSAKI Motohiro 	}
2597869833f2SKOSAKI Motohiro 	newpol->flags |= MPOL_F_SHARED;
259842288fe3SMel Gorman 	sp_node_init(n, start, end, newpol);
2599869833f2SKOSAKI Motohiro 
26001da177e4SLinus Torvalds 	return n;
26011da177e4SLinus Torvalds }
26021da177e4SLinus Torvalds 
26031da177e4SLinus Torvalds /* Replace a policy range. */
26041da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
26051da177e4SLinus Torvalds 				 unsigned long end, struct sp_node *new)
26061da177e4SLinus Torvalds {
2607b22d127aSMel Gorman 	struct sp_node *n;
260842288fe3SMel Gorman 	struct sp_node *n_new = NULL;
260942288fe3SMel Gorman 	struct mempolicy *mpol_new = NULL;
2610b22d127aSMel Gorman 	int ret = 0;
26111da177e4SLinus Torvalds 
261242288fe3SMel Gorman restart:
26134a8c7bb5SNathan Zimmer 	write_lock(&sp->lock);
26141da177e4SLinus Torvalds 	n = sp_lookup(sp, start, end);
26151da177e4SLinus Torvalds 	/* Take care of old policies in the same range. */
26161da177e4SLinus Torvalds 	while (n && n->start < end) {
26171da177e4SLinus Torvalds 		struct rb_node *next = rb_next(&n->nd);
26181da177e4SLinus Torvalds 		if (n->start >= start) {
26191da177e4SLinus Torvalds 			if (n->end <= end)
26201da177e4SLinus Torvalds 				sp_delete(sp, n);
26211da177e4SLinus Torvalds 			else
26221da177e4SLinus Torvalds 				n->start = end;
26231da177e4SLinus Torvalds 		} else {
26241da177e4SLinus Torvalds 			/* Old policy spanning whole new range. */
26251da177e4SLinus Torvalds 			if (n->end > end) {
262642288fe3SMel Gorman 				if (!n_new)
262742288fe3SMel Gorman 					goto alloc_new;
262842288fe3SMel Gorman 
262942288fe3SMel Gorman 				*mpol_new = *n->policy;
263042288fe3SMel Gorman 				atomic_set(&mpol_new->refcnt, 1);
26317880639cSKOSAKI Motohiro 				sp_node_init(n_new, end, n->end, mpol_new);
26321da177e4SLinus Torvalds 				n->end = start;
26335ca39575SHillf Danton 				sp_insert(sp, n_new);
263442288fe3SMel Gorman 				n_new = NULL;
263542288fe3SMel Gorman 				mpol_new = NULL;
26361da177e4SLinus Torvalds 				break;
26371da177e4SLinus Torvalds 			} else
26381da177e4SLinus Torvalds 				n->end = start;
26391da177e4SLinus Torvalds 		}
26401da177e4SLinus Torvalds 		if (!next)
26411da177e4SLinus Torvalds 			break;
26421da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
26431da177e4SLinus Torvalds 	}
26441da177e4SLinus Torvalds 	if (new)
26451da177e4SLinus Torvalds 		sp_insert(sp, new);
26464a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
264742288fe3SMel Gorman 	ret = 0;
264842288fe3SMel Gorman 
264942288fe3SMel Gorman err_out:
265042288fe3SMel Gorman 	if (mpol_new)
265142288fe3SMel Gorman 		mpol_put(mpol_new);
265242288fe3SMel Gorman 	if (n_new)
265342288fe3SMel Gorman 		kmem_cache_free(sn_cache, n_new);
265442288fe3SMel Gorman 
2655b22d127aSMel Gorman 	return ret;
265642288fe3SMel Gorman 
265742288fe3SMel Gorman alloc_new:
26584a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
265942288fe3SMel Gorman 	ret = -ENOMEM;
266042288fe3SMel Gorman 	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
266142288fe3SMel Gorman 	if (!n_new)
266242288fe3SMel Gorman 		goto err_out;
266342288fe3SMel Gorman 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
266442288fe3SMel Gorman 	if (!mpol_new)
266542288fe3SMel Gorman 		goto err_out;
266642288fe3SMel Gorman 	goto restart;
26671da177e4SLinus Torvalds }
26681da177e4SLinus Torvalds 
266971fe804bSLee Schermerhorn /**
267071fe804bSLee Schermerhorn  * mpol_shared_policy_init - initialize shared policy for inode
267171fe804bSLee Schermerhorn  * @sp: pointer to inode shared policy
267271fe804bSLee Schermerhorn  * @mpol:  struct mempolicy to install
267371fe804bSLee Schermerhorn  *
267471fe804bSLee Schermerhorn  * Install non-NULL @mpol in inode's shared policy rb-tree.
267571fe804bSLee Schermerhorn  * On entry, the current task has a reference on a non-NULL @mpol.
267671fe804bSLee Schermerhorn  * This must be released on exit.
26774bfc4495SKAMEZAWA Hiroyuki  * This is called at get_inode() calls and we can use GFP_KERNEL.
267871fe804bSLee Schermerhorn  */
267971fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
26807339ff83SRobin Holt {
268158568d2aSMiao Xie 	int ret;
268258568d2aSMiao Xie 
268371fe804bSLee Schermerhorn 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
26844a8c7bb5SNathan Zimmer 	rwlock_init(&sp->lock);
26857339ff83SRobin Holt 
268671fe804bSLee Schermerhorn 	if (mpol) {
26877339ff83SRobin Holt 		struct vm_area_struct pvma;
268871fe804bSLee Schermerhorn 		struct mempolicy *new;
26894bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
26907339ff83SRobin Holt 
26914bfc4495SKAMEZAWA Hiroyuki 		if (!scratch)
26925c0c1654SLee Schermerhorn 			goto put_mpol;
269371fe804bSLee Schermerhorn 		/* contextualize the tmpfs mount point mempolicy */
269471fe804bSLee Schermerhorn 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
269515d77835SLee Schermerhorn 		if (IS_ERR(new))
26960cae3457SDan Carpenter 			goto free_scratch; /* no valid nodemask intersection */
269758568d2aSMiao Xie 
269858568d2aSMiao Xie 		task_lock(current);
26994bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
270058568d2aSMiao Xie 		task_unlock(current);
270115d77835SLee Schermerhorn 		if (ret)
27025c0c1654SLee Schermerhorn 			goto put_new;
270371fe804bSLee Schermerhorn 
270471fe804bSLee Schermerhorn 		/* Create pseudo-vma that contains just the policy */
27052c4541e2SKirill A. Shutemov 		vma_init(&pvma, NULL);
270671fe804bSLee Schermerhorn 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
270771fe804bSLee Schermerhorn 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
270815d77835SLee Schermerhorn 
27095c0c1654SLee Schermerhorn put_new:
271071fe804bSLee Schermerhorn 		mpol_put(new);			/* drop initial ref */
27110cae3457SDan Carpenter free_scratch:
27124bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
27135c0c1654SLee Schermerhorn put_mpol:
27145c0c1654SLee Schermerhorn 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
27157339ff83SRobin Holt 	}
27167339ff83SRobin Holt }
27177339ff83SRobin Holt 
27181da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info,
27191da177e4SLinus Torvalds 			struct vm_area_struct *vma, struct mempolicy *npol)
27201da177e4SLinus Torvalds {
27211da177e4SLinus Torvalds 	int err;
27221da177e4SLinus Torvalds 	struct sp_node *new = NULL;
27231da177e4SLinus Torvalds 	unsigned long sz = vma_pages(vma);
27241da177e4SLinus Torvalds 
2725028fec41SDavid Rientjes 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
27261da177e4SLinus Torvalds 		 vma->vm_pgoff,
272745c4745aSLee Schermerhorn 		 sz, npol ? npol->mode : -1,
2728028fec41SDavid Rientjes 		 npol ? npol->flags : -1,
272900ef2d2fSDavid Rientjes 		 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
27301da177e4SLinus Torvalds 
27311da177e4SLinus Torvalds 	if (npol) {
27321da177e4SLinus Torvalds 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
27331da177e4SLinus Torvalds 		if (!new)
27341da177e4SLinus Torvalds 			return -ENOMEM;
27351da177e4SLinus Torvalds 	}
27361da177e4SLinus Torvalds 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
27371da177e4SLinus Torvalds 	if (err && new)
273863f74ca2SKOSAKI Motohiro 		sp_free(new);
27391da177e4SLinus Torvalds 	return err;
27401da177e4SLinus Torvalds }
27411da177e4SLinus Torvalds 
27421da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */
27431da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p)
27441da177e4SLinus Torvalds {
27451da177e4SLinus Torvalds 	struct sp_node *n;
27461da177e4SLinus Torvalds 	struct rb_node *next;
27471da177e4SLinus Torvalds 
27481da177e4SLinus Torvalds 	if (!p->root.rb_node)
27491da177e4SLinus Torvalds 		return;
27504a8c7bb5SNathan Zimmer 	write_lock(&p->lock);
27511da177e4SLinus Torvalds 	next = rb_first(&p->root);
27521da177e4SLinus Torvalds 	while (next) {
27531da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
27541da177e4SLinus Torvalds 		next = rb_next(&n->nd);
275563f74ca2SKOSAKI Motohiro 		sp_delete(p, n);
27561da177e4SLinus Torvalds 	}
27574a8c7bb5SNathan Zimmer 	write_unlock(&p->lock);
27581da177e4SLinus Torvalds }
27591da177e4SLinus Torvalds 
27601a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING
2761c297663cSMel Gorman static int __initdata numabalancing_override;
27621a687c2eSMel Gorman 
27631a687c2eSMel Gorman static void __init check_numabalancing_enable(void)
27641a687c2eSMel Gorman {
27651a687c2eSMel Gorman 	bool numabalancing_default = false;
27661a687c2eSMel Gorman 
27671a687c2eSMel Gorman 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
27681a687c2eSMel Gorman 		numabalancing_default = true;
27691a687c2eSMel Gorman 
2770c297663cSMel Gorman 	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2771c297663cSMel Gorman 	if (numabalancing_override)
2772c297663cSMel Gorman 		set_numabalancing_state(numabalancing_override == 1);
2773c297663cSMel Gorman 
2774b0dc2b9bSMel Gorman 	if (num_online_nodes() > 1 && !numabalancing_override) {
2775756a025fSJoe Perches 		pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2776c297663cSMel Gorman 			numabalancing_default ? "Enabling" : "Disabling");
27771a687c2eSMel Gorman 		set_numabalancing_state(numabalancing_default);
27781a687c2eSMel Gorman 	}
27791a687c2eSMel Gorman }
27801a687c2eSMel Gorman 
27811a687c2eSMel Gorman static int __init setup_numabalancing(char *str)
27821a687c2eSMel Gorman {
27831a687c2eSMel Gorman 	int ret = 0;
27841a687c2eSMel Gorman 	if (!str)
27851a687c2eSMel Gorman 		goto out;
27861a687c2eSMel Gorman 
27871a687c2eSMel Gorman 	if (!strcmp(str, "enable")) {
2788c297663cSMel Gorman 		numabalancing_override = 1;
27891a687c2eSMel Gorman 		ret = 1;
27901a687c2eSMel Gorman 	} else if (!strcmp(str, "disable")) {
2791c297663cSMel Gorman 		numabalancing_override = -1;
27921a687c2eSMel Gorman 		ret = 1;
27931a687c2eSMel Gorman 	}
27941a687c2eSMel Gorman out:
27951a687c2eSMel Gorman 	if (!ret)
27964a404beaSAndrew Morton 		pr_warn("Unable to parse numa_balancing=\n");
27971a687c2eSMel Gorman 
27981a687c2eSMel Gorman 	return ret;
27991a687c2eSMel Gorman }
28001a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing);
28011a687c2eSMel Gorman #else
28021a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void)
28031a687c2eSMel Gorman {
28041a687c2eSMel Gorman }
28051a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */
28061a687c2eSMel Gorman 
28071da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */
28081da177e4SLinus Torvalds void __init numa_policy_init(void)
28091da177e4SLinus Torvalds {
2810b71636e2SPaul Mundt 	nodemask_t interleave_nodes;
2811b71636e2SPaul Mundt 	unsigned long largest = 0;
2812b71636e2SPaul Mundt 	int nid, prefer = 0;
2813b71636e2SPaul Mundt 
28141da177e4SLinus Torvalds 	policy_cache = kmem_cache_create("numa_policy",
28151da177e4SLinus Torvalds 					 sizeof(struct mempolicy),
281620c2df83SPaul Mundt 					 0, SLAB_PANIC, NULL);
28171da177e4SLinus Torvalds 
28181da177e4SLinus Torvalds 	sn_cache = kmem_cache_create("shared_policy_node",
28191da177e4SLinus Torvalds 				     sizeof(struct sp_node),
282020c2df83SPaul Mundt 				     0, SLAB_PANIC, NULL);
28211da177e4SLinus Torvalds 
28225606e387SMel Gorman 	for_each_node(nid) {
28235606e387SMel Gorman 		preferred_node_policy[nid] = (struct mempolicy) {
28245606e387SMel Gorman 			.refcnt = ATOMIC_INIT(1),
28255606e387SMel Gorman 			.mode = MPOL_PREFERRED,
28265606e387SMel Gorman 			.flags = MPOL_F_MOF | MPOL_F_MORON,
28275606e387SMel Gorman 			.v = { .preferred_node = nid, },
28285606e387SMel Gorman 		};
28295606e387SMel Gorman 	}
28305606e387SMel Gorman 
2831b71636e2SPaul Mundt 	/*
2832b71636e2SPaul Mundt 	 * Set interleaving policy for system init. Interleaving is only
2833b71636e2SPaul Mundt 	 * enabled across suitably sized nodes (default is >= 16MB), or
2834b71636e2SPaul Mundt 	 * fall back to the largest node if they're all smaller.
2835b71636e2SPaul Mundt 	 */
2836b71636e2SPaul Mundt 	nodes_clear(interleave_nodes);
283701f13bd6SLai Jiangshan 	for_each_node_state(nid, N_MEMORY) {
2838b71636e2SPaul Mundt 		unsigned long total_pages = node_present_pages(nid);
28391da177e4SLinus Torvalds 
2840b71636e2SPaul Mundt 		/* Preserve the largest node */
2841b71636e2SPaul Mundt 		if (largest < total_pages) {
2842b71636e2SPaul Mundt 			largest = total_pages;
2843b71636e2SPaul Mundt 			prefer = nid;
2844b71636e2SPaul Mundt 		}
2845b71636e2SPaul Mundt 
2846b71636e2SPaul Mundt 		/* Interleave this node? */
2847b71636e2SPaul Mundt 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2848b71636e2SPaul Mundt 			node_set(nid, interleave_nodes);
2849b71636e2SPaul Mundt 	}
2850b71636e2SPaul Mundt 
2851b71636e2SPaul Mundt 	/* All too small, use the largest */
2852b71636e2SPaul Mundt 	if (unlikely(nodes_empty(interleave_nodes)))
2853b71636e2SPaul Mundt 		node_set(prefer, interleave_nodes);
2854b71636e2SPaul Mundt 
2855028fec41SDavid Rientjes 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2856b1de0d13SMitchel Humpherys 		pr_err("%s: interleaving failed\n", __func__);
28571a687c2eSMel Gorman 
28581a687c2eSMel Gorman 	check_numabalancing_enable();
28591da177e4SLinus Torvalds }
28601da177e4SLinus Torvalds 
28618bccd85fSChristoph Lameter /* Reset policy of current process to default */
28621da177e4SLinus Torvalds void numa_default_policy(void)
28631da177e4SLinus Torvalds {
2864028fec41SDavid Rientjes 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
28651da177e4SLinus Torvalds }
286668860ec1SPaul Jackson 
28674225399aSPaul Jackson /*
2868095f1fc4SLee Schermerhorn  * Parse and format mempolicy from/to strings
2869095f1fc4SLee Schermerhorn  */
2870095f1fc4SLee Schermerhorn 
2871095f1fc4SLee Schermerhorn /*
2872f2a07f40SHugh Dickins  * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
28731a75a6c8SChristoph Lameter  */
2874345ace9cSLee Schermerhorn static const char * const policy_modes[] =
2875345ace9cSLee Schermerhorn {
2876345ace9cSLee Schermerhorn 	[MPOL_DEFAULT]    = "default",
2877345ace9cSLee Schermerhorn 	[MPOL_PREFERRED]  = "prefer",
2878345ace9cSLee Schermerhorn 	[MPOL_BIND]       = "bind",
2879345ace9cSLee Schermerhorn 	[MPOL_INTERLEAVE] = "interleave",
2880d3a71033SLee Schermerhorn 	[MPOL_LOCAL]      = "local",
2881345ace9cSLee Schermerhorn };
28821a75a6c8SChristoph Lameter 
2883095f1fc4SLee Schermerhorn 
2884095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS
2885095f1fc4SLee Schermerhorn /**
2886f2a07f40SHugh Dickins  * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2887095f1fc4SLee Schermerhorn  * @str:  string containing mempolicy to parse
288871fe804bSLee Schermerhorn  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2889095f1fc4SLee Schermerhorn  *
2890095f1fc4SLee Schermerhorn  * Format of input:
2891095f1fc4SLee Schermerhorn  *	<mode>[=<flags>][:<nodelist>]
2892095f1fc4SLee Schermerhorn  *
289371fe804bSLee Schermerhorn  * On success, returns 0, else 1
2894095f1fc4SLee Schermerhorn  */
2895a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol)
2896095f1fc4SLee Schermerhorn {
289771fe804bSLee Schermerhorn 	struct mempolicy *new = NULL;
2898f2a07f40SHugh Dickins 	unsigned short mode_flags;
289971fe804bSLee Schermerhorn 	nodemask_t nodes;
2900095f1fc4SLee Schermerhorn 	char *nodelist = strchr(str, ':');
2901095f1fc4SLee Schermerhorn 	char *flags = strchr(str, '=');
2902dedf2c73Szhong jiang 	int err = 1, mode;
2903095f1fc4SLee Schermerhorn 
2904c7a91bc7SDan Carpenter 	if (flags)
2905c7a91bc7SDan Carpenter 		*flags++ = '\0';	/* terminate mode string */
2906c7a91bc7SDan Carpenter 
2907095f1fc4SLee Schermerhorn 	if (nodelist) {
2908095f1fc4SLee Schermerhorn 		/* NUL-terminate mode or flags string */
2909095f1fc4SLee Schermerhorn 		*nodelist++ = '\0';
291071fe804bSLee Schermerhorn 		if (nodelist_parse(nodelist, nodes))
2911095f1fc4SLee Schermerhorn 			goto out;
291201f13bd6SLai Jiangshan 		if (!nodes_subset(nodes, node_states[N_MEMORY]))
2913095f1fc4SLee Schermerhorn 			goto out;
291471fe804bSLee Schermerhorn 	} else
291571fe804bSLee Schermerhorn 		nodes_clear(nodes);
291671fe804bSLee Schermerhorn 
2917dedf2c73Szhong jiang 	mode = match_string(policy_modes, MPOL_MAX, str);
2918dedf2c73Szhong jiang 	if (mode < 0)
2919095f1fc4SLee Schermerhorn 		goto out;
2920095f1fc4SLee Schermerhorn 
292171fe804bSLee Schermerhorn 	switch (mode) {
2922095f1fc4SLee Schermerhorn 	case MPOL_PREFERRED:
292371fe804bSLee Schermerhorn 		/*
2924aa9f7d51SRandy Dunlap 		 * Insist on a nodelist of one node only, although later
2925aa9f7d51SRandy Dunlap 		 * we use first_node(nodes) to grab a single node, so here
2926aa9f7d51SRandy Dunlap 		 * nodelist (or nodes) cannot be empty.
292771fe804bSLee Schermerhorn 		 */
2928095f1fc4SLee Schermerhorn 		if (nodelist) {
2929095f1fc4SLee Schermerhorn 			char *rest = nodelist;
2930095f1fc4SLee Schermerhorn 			while (isdigit(*rest))
2931095f1fc4SLee Schermerhorn 				rest++;
2932926f2ae0SKOSAKI Motohiro 			if (*rest)
2933926f2ae0SKOSAKI Motohiro 				goto out;
2934aa9f7d51SRandy Dunlap 			if (nodes_empty(nodes))
2935aa9f7d51SRandy Dunlap 				goto out;
2936095f1fc4SLee Schermerhorn 		}
2937095f1fc4SLee Schermerhorn 		break;
2938095f1fc4SLee Schermerhorn 	case MPOL_INTERLEAVE:
2939095f1fc4SLee Schermerhorn 		/*
2940095f1fc4SLee Schermerhorn 		 * Default to online nodes with memory if no nodelist
2941095f1fc4SLee Schermerhorn 		 */
2942095f1fc4SLee Schermerhorn 		if (!nodelist)
294301f13bd6SLai Jiangshan 			nodes = node_states[N_MEMORY];
29443f226aa1SLee Schermerhorn 		break;
294571fe804bSLee Schermerhorn 	case MPOL_LOCAL:
29463f226aa1SLee Schermerhorn 		/*
294771fe804bSLee Schermerhorn 		 * Don't allow a nodelist;  mpol_new() checks flags
29483f226aa1SLee Schermerhorn 		 */
294971fe804bSLee Schermerhorn 		if (nodelist)
29503f226aa1SLee Schermerhorn 			goto out;
295171fe804bSLee Schermerhorn 		mode = MPOL_PREFERRED;
29523f226aa1SLee Schermerhorn 		break;
2953413b43deSRavikiran G Thirumalai 	case MPOL_DEFAULT:
2954413b43deSRavikiran G Thirumalai 		/*
2955413b43deSRavikiran G Thirumalai 		 * Insist on a empty nodelist
2956413b43deSRavikiran G Thirumalai 		 */
2957413b43deSRavikiran G Thirumalai 		if (!nodelist)
2958413b43deSRavikiran G Thirumalai 			err = 0;
2959413b43deSRavikiran G Thirumalai 		goto out;
2960d69b2e63SKOSAKI Motohiro 	case MPOL_BIND:
296171fe804bSLee Schermerhorn 		/*
2962d69b2e63SKOSAKI Motohiro 		 * Insist on a nodelist
296371fe804bSLee Schermerhorn 		 */
2964d69b2e63SKOSAKI Motohiro 		if (!nodelist)
2965d69b2e63SKOSAKI Motohiro 			goto out;
2966095f1fc4SLee Schermerhorn 	}
2967095f1fc4SLee Schermerhorn 
296871fe804bSLee Schermerhorn 	mode_flags = 0;
2969095f1fc4SLee Schermerhorn 	if (flags) {
2970095f1fc4SLee Schermerhorn 		/*
2971095f1fc4SLee Schermerhorn 		 * Currently, we only support two mutually exclusive
2972095f1fc4SLee Schermerhorn 		 * mode flags.
2973095f1fc4SLee Schermerhorn 		 */
2974095f1fc4SLee Schermerhorn 		if (!strcmp(flags, "static"))
297571fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_STATIC_NODES;
2976095f1fc4SLee Schermerhorn 		else if (!strcmp(flags, "relative"))
297771fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_RELATIVE_NODES;
2978095f1fc4SLee Schermerhorn 		else
2979926f2ae0SKOSAKI Motohiro 			goto out;
2980095f1fc4SLee Schermerhorn 	}
298171fe804bSLee Schermerhorn 
298271fe804bSLee Schermerhorn 	new = mpol_new(mode, mode_flags, &nodes);
298371fe804bSLee Schermerhorn 	if (IS_ERR(new))
2984926f2ae0SKOSAKI Motohiro 		goto out;
2985926f2ae0SKOSAKI Motohiro 
2986f2a07f40SHugh Dickins 	/*
2987f2a07f40SHugh Dickins 	 * Save nodes for mpol_to_str() to show the tmpfs mount options
2988f2a07f40SHugh Dickins 	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2989f2a07f40SHugh Dickins 	 */
2990f2a07f40SHugh Dickins 	if (mode != MPOL_PREFERRED)
2991f2a07f40SHugh Dickins 		new->v.nodes = nodes;
2992f2a07f40SHugh Dickins 	else if (nodelist)
2993f2a07f40SHugh Dickins 		new->v.preferred_node = first_node(nodes);
2994f2a07f40SHugh Dickins 	else
2995f2a07f40SHugh Dickins 		new->flags |= MPOL_F_LOCAL;
2996f2a07f40SHugh Dickins 
2997f2a07f40SHugh Dickins 	/*
2998f2a07f40SHugh Dickins 	 * Save nodes for contextualization: this will be used to "clone"
2999f2a07f40SHugh Dickins 	 * the mempolicy in a specific context [cpuset] at a later time.
3000f2a07f40SHugh Dickins 	 */
3001e17f74afSLee Schermerhorn 	new->w.user_nodemask = nodes;
3002f2a07f40SHugh Dickins 
3003926f2ae0SKOSAKI Motohiro 	err = 0;
300471fe804bSLee Schermerhorn 
3005095f1fc4SLee Schermerhorn out:
3006095f1fc4SLee Schermerhorn 	/* Restore string for error message */
3007095f1fc4SLee Schermerhorn 	if (nodelist)
3008095f1fc4SLee Schermerhorn 		*--nodelist = ':';
3009095f1fc4SLee Schermerhorn 	if (flags)
3010095f1fc4SLee Schermerhorn 		*--flags = '=';
301171fe804bSLee Schermerhorn 	if (!err)
301271fe804bSLee Schermerhorn 		*mpol = new;
3013095f1fc4SLee Schermerhorn 	return err;
3014095f1fc4SLee Schermerhorn }
3015095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */
3016095f1fc4SLee Schermerhorn 
301771fe804bSLee Schermerhorn /**
301871fe804bSLee Schermerhorn  * mpol_to_str - format a mempolicy structure for printing
301971fe804bSLee Schermerhorn  * @buffer:  to contain formatted mempolicy string
302071fe804bSLee Schermerhorn  * @maxlen:  length of @buffer
302171fe804bSLee Schermerhorn  * @pol:  pointer to mempolicy to be formatted
302271fe804bSLee Schermerhorn  *
3023948927eeSDavid Rientjes  * Convert @pol into a string.  If @buffer is too short, truncate the string.
3024948927eeSDavid Rientjes  * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
3025948927eeSDavid Rientjes  * longest flag, "relative", and to display at least a few node ids.
30261a75a6c8SChristoph Lameter  */
3027948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
30281a75a6c8SChristoph Lameter {
30291a75a6c8SChristoph Lameter 	char *p = buffer;
3030948927eeSDavid Rientjes 	nodemask_t nodes = NODE_MASK_NONE;
3031948927eeSDavid Rientjes 	unsigned short mode = MPOL_DEFAULT;
3032948927eeSDavid Rientjes 	unsigned short flags = 0;
30331a75a6c8SChristoph Lameter 
30348790c71aSDavid Rientjes 	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
3035bea904d5SLee Schermerhorn 		mode = pol->mode;
3036948927eeSDavid Rientjes 		flags = pol->flags;
3037948927eeSDavid Rientjes 	}
3038bea904d5SLee Schermerhorn 
30391a75a6c8SChristoph Lameter 	switch (mode) {
30401a75a6c8SChristoph Lameter 	case MPOL_DEFAULT:
30411a75a6c8SChristoph Lameter 		break;
30421a75a6c8SChristoph Lameter 	case MPOL_PREFERRED:
3043fc36b8d3SLee Schermerhorn 		if (flags & MPOL_F_LOCAL)
3044f2a07f40SHugh Dickins 			mode = MPOL_LOCAL;
304553f2556bSLee Schermerhorn 		else
3046fc36b8d3SLee Schermerhorn 			node_set(pol->v.preferred_node, nodes);
30471a75a6c8SChristoph Lameter 		break;
30481a75a6c8SChristoph Lameter 	case MPOL_BIND:
30491a75a6c8SChristoph Lameter 	case MPOL_INTERLEAVE:
30501a75a6c8SChristoph Lameter 		nodes = pol->v.nodes;
30511a75a6c8SChristoph Lameter 		break;
30521a75a6c8SChristoph Lameter 	default:
3053948927eeSDavid Rientjes 		WARN_ON_ONCE(1);
3054948927eeSDavid Rientjes 		snprintf(p, maxlen, "unknown");
3055948927eeSDavid Rientjes 		return;
30561a75a6c8SChristoph Lameter 	}
30571a75a6c8SChristoph Lameter 
3058b7a9f420SDavid Rientjes 	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
30591a75a6c8SChristoph Lameter 
3060fc36b8d3SLee Schermerhorn 	if (flags & MPOL_MODE_FLAGS) {
3061948927eeSDavid Rientjes 		p += snprintf(p, buffer + maxlen - p, "=");
3062f5b087b5SDavid Rientjes 
30632291990aSLee Schermerhorn 		/*
30642291990aSLee Schermerhorn 		 * Currently, the only defined flags are mutually exclusive
30652291990aSLee Schermerhorn 		 */
3066f5b087b5SDavid Rientjes 		if (flags & MPOL_F_STATIC_NODES)
30672291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "static");
30682291990aSLee Schermerhorn 		else if (flags & MPOL_F_RELATIVE_NODES)
30692291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "relative");
3070f5b087b5SDavid Rientjes 	}
3071f5b087b5SDavid Rientjes 
30729e763e0fSTejun Heo 	if (!nodes_empty(nodes))
30739e763e0fSTejun Heo 		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
30749e763e0fSTejun Heo 			       nodemask_pr_args(&nodes));
30751a75a6c8SChristoph Lameter }
3076