xref: /openbmc/linux/mm/mempolicy.c (revision 4fcbe96e4d0bc4abe22ee10573ff663b9ebbcf17)
146aeb7e6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * Simple NUMA memory policy for the Linux kernel.
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
68bccd85fSChristoph Lameter  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * NUMA policy allows the user to give hints in which node(s) memory should
91da177e4SLinus Torvalds  * be allocated.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Support four policies per VMA and per process:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * The VMA policy has priority over the process policy for a page fault.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * interleave     Allocate memory interleaved over a set of nodes,
161da177e4SLinus Torvalds  *                with normal fallback if it fails.
171da177e4SLinus Torvalds  *                For VMA based allocations this interleaves based on the
181da177e4SLinus Torvalds  *                offset into the backing object or offset into the mapping
191da177e4SLinus Torvalds  *                for anonymous memory. For process policy an process counter
201da177e4SLinus Torvalds  *                is used.
218bccd85fSChristoph Lameter  *
221da177e4SLinus Torvalds  * bind           Only allocate memory on a specific set of nodes,
231da177e4SLinus Torvalds  *                no fallback.
248bccd85fSChristoph Lameter  *                FIXME: memory is allocated starting with the first node
258bccd85fSChristoph Lameter  *                to the last. It would be better if bind would truly restrict
268bccd85fSChristoph Lameter  *                the allocation to memory nodes instead
278bccd85fSChristoph Lameter  *
281da177e4SLinus Torvalds  * preferred       Try a specific node first before normal fallback.
2900ef2d2fSDavid Rientjes  *                As a special case NUMA_NO_NODE here means do the allocation
301da177e4SLinus Torvalds  *                on the local CPU. This is normally identical to default,
311da177e4SLinus Torvalds  *                but useful to set in a VMA when you have a non default
321da177e4SLinus Torvalds  *                process policy.
338bccd85fSChristoph Lameter  *
341da177e4SLinus Torvalds  * default        Allocate on the local node first, or when on a VMA
351da177e4SLinus Torvalds  *                use the process policy. This is what Linux always did
361da177e4SLinus Torvalds  *		  in a NUMA aware kernel and still does by, ahem, default.
371da177e4SLinus Torvalds  *
381da177e4SLinus Torvalds  * The process policy is applied for most non interrupt memory allocations
391da177e4SLinus Torvalds  * in that process' context. Interrupts ignore the policies and always
401da177e4SLinus Torvalds  * try to allocate on the local CPU. The VMA policy is only applied for memory
411da177e4SLinus Torvalds  * allocations for a VMA in the VM.
421da177e4SLinus Torvalds  *
431da177e4SLinus Torvalds  * Currently there are a few corner cases in swapping where the policy
441da177e4SLinus Torvalds  * is not applied, but the majority should be handled. When process policy
451da177e4SLinus Torvalds  * is used it is not remembered over swap outs/swap ins.
461da177e4SLinus Torvalds  *
471da177e4SLinus Torvalds  * Only the highest zone in the zone hierarchy gets policied. Allocations
481da177e4SLinus Torvalds  * requesting a lower zone just use default policy. This implies that
491da177e4SLinus Torvalds  * on systems with highmem kernel lowmem allocation don't get policied.
501da177e4SLinus Torvalds  * Same with GFP_DMA allocations.
511da177e4SLinus Torvalds  *
521da177e4SLinus Torvalds  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
531da177e4SLinus Torvalds  * all users and remembered even when nobody has memory mapped.
541da177e4SLinus Torvalds  */
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds /* Notebook:
571da177e4SLinus Torvalds    fix mmap readahead to honour policy and enable policy for any page cache
581da177e4SLinus Torvalds    object
591da177e4SLinus Torvalds    statistics for bigpages
601da177e4SLinus Torvalds    global policy for page cache? currently it uses process policy. Requires
611da177e4SLinus Torvalds    first item above.
621da177e4SLinus Torvalds    handle mremap for shared memory (currently ignored for the policy)
631da177e4SLinus Torvalds    grows down?
641da177e4SLinus Torvalds    make bind policy root only? It can trigger oom much faster and the
651da177e4SLinus Torvalds    kernel is not always grateful with that.
661da177e4SLinus Torvalds */
671da177e4SLinus Torvalds 
68b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69b1de0d13SMitchel Humpherys 
701da177e4SLinus Torvalds #include <linux/mempolicy.h>
71a520110eSChristoph Hellwig #include <linux/pagewalk.h>
721da177e4SLinus Torvalds #include <linux/highmem.h>
731da177e4SLinus Torvalds #include <linux/hugetlb.h>
741da177e4SLinus Torvalds #include <linux/kernel.h>
751da177e4SLinus Torvalds #include <linux/sched.h>
766e84f315SIngo Molnar #include <linux/sched/mm.h>
776a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h>
78f719ff9bSIngo Molnar #include <linux/sched/task.h>
791da177e4SLinus Torvalds #include <linux/nodemask.h>
801da177e4SLinus Torvalds #include <linux/cpuset.h>
811da177e4SLinus Torvalds #include <linux/slab.h>
821da177e4SLinus Torvalds #include <linux/string.h>
83b95f1b31SPaul Gortmaker #include <linux/export.h>
84b488893aSPavel Emelyanov #include <linux/nsproxy.h>
851da177e4SLinus Torvalds #include <linux/interrupt.h>
861da177e4SLinus Torvalds #include <linux/init.h>
871da177e4SLinus Torvalds #include <linux/compat.h>
8831367466SOtto Ebeling #include <linux/ptrace.h>
89dc9aa5b9SChristoph Lameter #include <linux/swap.h>
901a75a6c8SChristoph Lameter #include <linux/seq_file.h>
911a75a6c8SChristoph Lameter #include <linux/proc_fs.h>
92b20a3503SChristoph Lameter #include <linux/migrate.h>
9362b61f61SHugh Dickins #include <linux/ksm.h>
9495a402c3SChristoph Lameter #include <linux/rmap.h>
9586c3a764SDavid Quigley #include <linux/security.h>
96dbcb0f19SAdrian Bunk #include <linux/syscalls.h>
97095f1fc4SLee Schermerhorn #include <linux/ctype.h>
986d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h>
99b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h>
100b1de0d13SMitchel Humpherys #include <linux/printk.h>
101c8633798SNaoya Horiguchi #include <linux/swapops.h>
102dc9aa5b9SChristoph Lameter 
1031da177e4SLinus Torvalds #include <asm/tlbflush.h>
1047c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
1051da177e4SLinus Torvalds 
10662695a84SNick Piggin #include "internal.h"
10762695a84SNick Piggin 
10838e35860SChristoph Lameter /* Internal flags */
109dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
11038e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
111dc9aa5b9SChristoph Lameter 
112fcc234f8SPekka Enberg static struct kmem_cache *policy_cache;
113fcc234f8SPekka Enberg static struct kmem_cache *sn_cache;
1141da177e4SLinus Torvalds 
1151da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not
1161da177e4SLinus Torvalds    policied. */
1176267276fSChristoph Lameter enum zone_type policy_zone = 0;
1181da177e4SLinus Torvalds 
119bea904d5SLee Schermerhorn /*
120bea904d5SLee Schermerhorn  * run-time system-wide default policy => local allocation
121bea904d5SLee Schermerhorn  */
122e754d79dSH Hartley Sweeten static struct mempolicy default_policy = {
1231da177e4SLinus Torvalds 	.refcnt = ATOMIC_INIT(1), /* never free it */
124bea904d5SLee Schermerhorn 	.mode = MPOL_PREFERRED,
125fc36b8d3SLee Schermerhorn 	.flags = MPOL_F_LOCAL,
1261da177e4SLinus Torvalds };
1271da177e4SLinus Torvalds 
1285606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES];
1295606e387SMel Gorman 
130b2ca916cSDan Williams /**
131b2ca916cSDan Williams  * numa_map_to_online_node - Find closest online node
132b2ca916cSDan Williams  * @nid: Node id to start the search
133b2ca916cSDan Williams  *
134b2ca916cSDan Williams  * Lookup the next closest node by distance if @nid is not online.
135b2ca916cSDan Williams  */
136b2ca916cSDan Williams int numa_map_to_online_node(int node)
137b2ca916cSDan Williams {
138*4fcbe96eSDan Williams 	int min_dist = INT_MAX, dist, n, min_node;
139b2ca916cSDan Williams 
140*4fcbe96eSDan Williams 	if (node == NUMA_NO_NODE || node_online(node))
141*4fcbe96eSDan Williams 		return node;
142b2ca916cSDan Williams 
143b2ca916cSDan Williams 	min_node = node;
144b2ca916cSDan Williams 	for_each_online_node(n) {
145b2ca916cSDan Williams 		dist = node_distance(node, n);
146b2ca916cSDan Williams 		if (dist < min_dist) {
147b2ca916cSDan Williams 			min_dist = dist;
148b2ca916cSDan Williams 			min_node = n;
149b2ca916cSDan Williams 		}
150b2ca916cSDan Williams 	}
151b2ca916cSDan Williams 
152b2ca916cSDan Williams 	return min_node;
153b2ca916cSDan Williams }
154b2ca916cSDan Williams EXPORT_SYMBOL_GPL(numa_map_to_online_node);
155b2ca916cSDan Williams 
15674d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p)
1575606e387SMel Gorman {
1585606e387SMel Gorman 	struct mempolicy *pol = p->mempolicy;
159f15ca78eSOleg Nesterov 	int node;
1605606e387SMel Gorman 
161f15ca78eSOleg Nesterov 	if (pol)
162f15ca78eSOleg Nesterov 		return pol;
1635606e387SMel Gorman 
164f15ca78eSOleg Nesterov 	node = numa_node_id();
1651da6f0e1SJianguo Wu 	if (node != NUMA_NO_NODE) {
1661da6f0e1SJianguo Wu 		pol = &preferred_node_policy[node];
167f15ca78eSOleg Nesterov 		/* preferred_node_policy is not initialised early in boot */
168f15ca78eSOleg Nesterov 		if (pol->mode)
169f15ca78eSOleg Nesterov 			return pol;
1701da6f0e1SJianguo Wu 	}
1715606e387SMel Gorman 
172f15ca78eSOleg Nesterov 	return &default_policy;
1735606e387SMel Gorman }
1745606e387SMel Gorman 
17537012946SDavid Rientjes static const struct mempolicy_operations {
17637012946SDavid Rientjes 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
177213980c0SVlastimil Babka 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
17837012946SDavid Rientjes } mpol_ops[MPOL_MAX];
17937012946SDavid Rientjes 
180f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
181f5b087b5SDavid Rientjes {
1826d556294SBob Liu 	return pol->flags & MPOL_MODE_FLAGS;
1834c50bc01SDavid Rientjes }
1844c50bc01SDavid Rientjes 
1854c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
1864c50bc01SDavid Rientjes 				   const nodemask_t *rel)
1874c50bc01SDavid Rientjes {
1884c50bc01SDavid Rientjes 	nodemask_t tmp;
1894c50bc01SDavid Rientjes 	nodes_fold(tmp, *orig, nodes_weight(*rel));
1904c50bc01SDavid Rientjes 	nodes_onto(*ret, tmp, *rel);
191f5b087b5SDavid Rientjes }
192f5b087b5SDavid Rientjes 
19337012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
19437012946SDavid Rientjes {
19537012946SDavid Rientjes 	if (nodes_empty(*nodes))
19637012946SDavid Rientjes 		return -EINVAL;
19737012946SDavid Rientjes 	pol->v.nodes = *nodes;
19837012946SDavid Rientjes 	return 0;
19937012946SDavid Rientjes }
20037012946SDavid Rientjes 
20137012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
20237012946SDavid Rientjes {
20337012946SDavid Rientjes 	if (!nodes)
204fc36b8d3SLee Schermerhorn 		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
20537012946SDavid Rientjes 	else if (nodes_empty(*nodes))
20637012946SDavid Rientjes 		return -EINVAL;			/*  no allowed nodes */
20737012946SDavid Rientjes 	else
20837012946SDavid Rientjes 		pol->v.preferred_node = first_node(*nodes);
20937012946SDavid Rientjes 	return 0;
21037012946SDavid Rientjes }
21137012946SDavid Rientjes 
21237012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
21337012946SDavid Rientjes {
214859f7ef1SZhihui Zhang 	if (nodes_empty(*nodes))
21537012946SDavid Rientjes 		return -EINVAL;
21637012946SDavid Rientjes 	pol->v.nodes = *nodes;
21737012946SDavid Rientjes 	return 0;
21837012946SDavid Rientjes }
21937012946SDavid Rientjes 
22058568d2aSMiao Xie /*
22158568d2aSMiao Xie  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
22258568d2aSMiao Xie  * any, for the new policy.  mpol_new() has already validated the nodes
22358568d2aSMiao Xie  * parameter with respect to the policy mode and flags.  But, we need to
22458568d2aSMiao Xie  * handle an empty nodemask with MPOL_PREFERRED here.
22558568d2aSMiao Xie  *
22658568d2aSMiao Xie  * Must be called holding task's alloc_lock to protect task's mems_allowed
22758568d2aSMiao Xie  * and mempolicy.  May also be called holding the mmap_semaphore for write.
22858568d2aSMiao Xie  */
2294bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol,
2304bfc4495SKAMEZAWA Hiroyuki 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
23158568d2aSMiao Xie {
23258568d2aSMiao Xie 	int ret;
23358568d2aSMiao Xie 
23458568d2aSMiao Xie 	/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
23558568d2aSMiao Xie 	if (pol == NULL)
23658568d2aSMiao Xie 		return 0;
23701f13bd6SLai Jiangshan 	/* Check N_MEMORY */
2384bfc4495SKAMEZAWA Hiroyuki 	nodes_and(nsc->mask1,
23901f13bd6SLai Jiangshan 		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
24058568d2aSMiao Xie 
24158568d2aSMiao Xie 	VM_BUG_ON(!nodes);
24258568d2aSMiao Xie 	if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
24358568d2aSMiao Xie 		nodes = NULL;	/* explicit local allocation */
24458568d2aSMiao Xie 	else {
24558568d2aSMiao Xie 		if (pol->flags & MPOL_F_RELATIVE_NODES)
2464bfc4495SKAMEZAWA Hiroyuki 			mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
24758568d2aSMiao Xie 		else
2484bfc4495SKAMEZAWA Hiroyuki 			nodes_and(nsc->mask2, *nodes, nsc->mask1);
2494bfc4495SKAMEZAWA Hiroyuki 
25058568d2aSMiao Xie 		if (mpol_store_user_nodemask(pol))
25158568d2aSMiao Xie 			pol->w.user_nodemask = *nodes;
25258568d2aSMiao Xie 		else
25358568d2aSMiao Xie 			pol->w.cpuset_mems_allowed =
25458568d2aSMiao Xie 						cpuset_current_mems_allowed;
25558568d2aSMiao Xie 	}
25658568d2aSMiao Xie 
2574bfc4495SKAMEZAWA Hiroyuki 	if (nodes)
2584bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
2594bfc4495SKAMEZAWA Hiroyuki 	else
2604bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, NULL);
26158568d2aSMiao Xie 	return ret;
26258568d2aSMiao Xie }
26358568d2aSMiao Xie 
26458568d2aSMiao Xie /*
26558568d2aSMiao Xie  * This function just creates a new policy, does some check and simple
26658568d2aSMiao Xie  * initialization. You must invoke mpol_set_nodemask() to set nodes.
26758568d2aSMiao Xie  */
268028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
269028fec41SDavid Rientjes 				  nodemask_t *nodes)
2701da177e4SLinus Torvalds {
2711da177e4SLinus Torvalds 	struct mempolicy *policy;
2721da177e4SLinus Torvalds 
273028fec41SDavid Rientjes 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
27400ef2d2fSDavid Rientjes 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
275140d5a49SPaul Mundt 
2763e1f0645SDavid Rientjes 	if (mode == MPOL_DEFAULT) {
2773e1f0645SDavid Rientjes 		if (nodes && !nodes_empty(*nodes))
27837012946SDavid Rientjes 			return ERR_PTR(-EINVAL);
279d3a71033SLee Schermerhorn 		return NULL;
28037012946SDavid Rientjes 	}
2813e1f0645SDavid Rientjes 	VM_BUG_ON(!nodes);
2823e1f0645SDavid Rientjes 
2833e1f0645SDavid Rientjes 	/*
2843e1f0645SDavid Rientjes 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
2853e1f0645SDavid Rientjes 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
2863e1f0645SDavid Rientjes 	 * All other modes require a valid pointer to a non-empty nodemask.
2873e1f0645SDavid Rientjes 	 */
2883e1f0645SDavid Rientjes 	if (mode == MPOL_PREFERRED) {
2893e1f0645SDavid Rientjes 		if (nodes_empty(*nodes)) {
2903e1f0645SDavid Rientjes 			if (((flags & MPOL_F_STATIC_NODES) ||
2913e1f0645SDavid Rientjes 			     (flags & MPOL_F_RELATIVE_NODES)))
2923e1f0645SDavid Rientjes 				return ERR_PTR(-EINVAL);
2933e1f0645SDavid Rientjes 		}
294479e2802SPeter Zijlstra 	} else if (mode == MPOL_LOCAL) {
2958d303e44SPiotr Kwapulinski 		if (!nodes_empty(*nodes) ||
2968d303e44SPiotr Kwapulinski 		    (flags & MPOL_F_STATIC_NODES) ||
2978d303e44SPiotr Kwapulinski 		    (flags & MPOL_F_RELATIVE_NODES))
298479e2802SPeter Zijlstra 			return ERR_PTR(-EINVAL);
299479e2802SPeter Zijlstra 		mode = MPOL_PREFERRED;
3003e1f0645SDavid Rientjes 	} else if (nodes_empty(*nodes))
3013e1f0645SDavid Rientjes 		return ERR_PTR(-EINVAL);
3021da177e4SLinus Torvalds 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
3031da177e4SLinus Torvalds 	if (!policy)
3041da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
3051da177e4SLinus Torvalds 	atomic_set(&policy->refcnt, 1);
30645c4745aSLee Schermerhorn 	policy->mode = mode;
30737012946SDavid Rientjes 	policy->flags = flags;
3083e1f0645SDavid Rientjes 
30937012946SDavid Rientjes 	return policy;
31037012946SDavid Rientjes }
31137012946SDavid Rientjes 
31252cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */
31352cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p)
31452cd3b07SLee Schermerhorn {
31552cd3b07SLee Schermerhorn 	if (!atomic_dec_and_test(&p->refcnt))
31652cd3b07SLee Schermerhorn 		return;
31752cd3b07SLee Schermerhorn 	kmem_cache_free(policy_cache, p);
31852cd3b07SLee Schermerhorn }
31952cd3b07SLee Schermerhorn 
320213980c0SVlastimil Babka static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
32137012946SDavid Rientjes {
32237012946SDavid Rientjes }
32337012946SDavid Rientjes 
324213980c0SVlastimil Babka static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
3251d0d2680SDavid Rientjes {
3261d0d2680SDavid Rientjes 	nodemask_t tmp;
3271d0d2680SDavid Rientjes 
32837012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES)
32937012946SDavid Rientjes 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
33037012946SDavid Rientjes 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
33137012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3321d0d2680SDavid Rientjes 	else {
333213980c0SVlastimil Babka 		nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
334213980c0SVlastimil Babka 								*nodes);
33529b190faSzhong jiang 		pol->w.cpuset_mems_allowed = *nodes;
3361d0d2680SDavid Rientjes 	}
33737012946SDavid Rientjes 
338708c1bbcSMiao Xie 	if (nodes_empty(tmp))
339708c1bbcSMiao Xie 		tmp = *nodes;
340708c1bbcSMiao Xie 
3411d0d2680SDavid Rientjes 	pol->v.nodes = tmp;
34237012946SDavid Rientjes }
34337012946SDavid Rientjes 
34437012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol,
345213980c0SVlastimil Babka 						const nodemask_t *nodes)
34637012946SDavid Rientjes {
34737012946SDavid Rientjes 	nodemask_t tmp;
34837012946SDavid Rientjes 
34937012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES) {
3501d0d2680SDavid Rientjes 		int node = first_node(pol->w.user_nodemask);
3511d0d2680SDavid Rientjes 
352fc36b8d3SLee Schermerhorn 		if (node_isset(node, *nodes)) {
3531d0d2680SDavid Rientjes 			pol->v.preferred_node = node;
354fc36b8d3SLee Schermerhorn 			pol->flags &= ~MPOL_F_LOCAL;
355fc36b8d3SLee Schermerhorn 		} else
356fc36b8d3SLee Schermerhorn 			pol->flags |= MPOL_F_LOCAL;
35737012946SDavid Rientjes 	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
35837012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3591d0d2680SDavid Rientjes 		pol->v.preferred_node = first_node(tmp);
360fc36b8d3SLee Schermerhorn 	} else if (!(pol->flags & MPOL_F_LOCAL)) {
3611d0d2680SDavid Rientjes 		pol->v.preferred_node = node_remap(pol->v.preferred_node,
36237012946SDavid Rientjes 						   pol->w.cpuset_mems_allowed,
36337012946SDavid Rientjes 						   *nodes);
36437012946SDavid Rientjes 		pol->w.cpuset_mems_allowed = *nodes;
3651d0d2680SDavid Rientjes 	}
3661d0d2680SDavid Rientjes }
36737012946SDavid Rientjes 
368708c1bbcSMiao Xie /*
369708c1bbcSMiao Xie  * mpol_rebind_policy - Migrate a policy to a different set of nodes
370708c1bbcSMiao Xie  *
371213980c0SVlastimil Babka  * Per-vma policies are protected by mmap_sem. Allocations using per-task
372213980c0SVlastimil Babka  * policies are protected by task->mems_allowed_seq to prevent a premature
373213980c0SVlastimil Babka  * OOM/allocation failure due to parallel nodemask modification.
374708c1bbcSMiao Xie  */
375213980c0SVlastimil Babka static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
37637012946SDavid Rientjes {
37737012946SDavid Rientjes 	if (!pol)
37837012946SDavid Rientjes 		return;
3792e25644eSVlastimil Babka 	if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) &&
38037012946SDavid Rientjes 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
38137012946SDavid Rientjes 		return;
382708c1bbcSMiao Xie 
383213980c0SVlastimil Babka 	mpol_ops[pol->mode].rebind(pol, newmask);
3841d0d2680SDavid Rientjes }
3851d0d2680SDavid Rientjes 
3861d0d2680SDavid Rientjes /*
3871d0d2680SDavid Rientjes  * Wrapper for mpol_rebind_policy() that just requires task
3881d0d2680SDavid Rientjes  * pointer, and updates task mempolicy.
38958568d2aSMiao Xie  *
39058568d2aSMiao Xie  * Called with task's alloc_lock held.
3911d0d2680SDavid Rientjes  */
3921d0d2680SDavid Rientjes 
393213980c0SVlastimil Babka void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
3941d0d2680SDavid Rientjes {
395213980c0SVlastimil Babka 	mpol_rebind_policy(tsk->mempolicy, new);
3961d0d2680SDavid Rientjes }
3971d0d2680SDavid Rientjes 
3981d0d2680SDavid Rientjes /*
3991d0d2680SDavid Rientjes  * Rebind each vma in mm to new nodemask.
4001d0d2680SDavid Rientjes  *
4011d0d2680SDavid Rientjes  * Call holding a reference to mm.  Takes mm->mmap_sem during call.
4021d0d2680SDavid Rientjes  */
4031d0d2680SDavid Rientjes 
4041d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
4051d0d2680SDavid Rientjes {
4061d0d2680SDavid Rientjes 	struct vm_area_struct *vma;
4071d0d2680SDavid Rientjes 
4081d0d2680SDavid Rientjes 	down_write(&mm->mmap_sem);
4091d0d2680SDavid Rientjes 	for (vma = mm->mmap; vma; vma = vma->vm_next)
410213980c0SVlastimil Babka 		mpol_rebind_policy(vma->vm_policy, new);
4111d0d2680SDavid Rientjes 	up_write(&mm->mmap_sem);
4121d0d2680SDavid Rientjes }
4131d0d2680SDavid Rientjes 
41437012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
41537012946SDavid Rientjes 	[MPOL_DEFAULT] = {
41637012946SDavid Rientjes 		.rebind = mpol_rebind_default,
41737012946SDavid Rientjes 	},
41837012946SDavid Rientjes 	[MPOL_INTERLEAVE] = {
41937012946SDavid Rientjes 		.create = mpol_new_interleave,
42037012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
42137012946SDavid Rientjes 	},
42237012946SDavid Rientjes 	[MPOL_PREFERRED] = {
42337012946SDavid Rientjes 		.create = mpol_new_preferred,
42437012946SDavid Rientjes 		.rebind = mpol_rebind_preferred,
42537012946SDavid Rientjes 	},
42637012946SDavid Rientjes 	[MPOL_BIND] = {
42737012946SDavid Rientjes 		.create = mpol_new_bind,
42837012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
42937012946SDavid Rientjes 	},
43037012946SDavid Rientjes };
43137012946SDavid Rientjes 
432a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist,
433fc301289SChristoph Lameter 				unsigned long flags);
4341a75a6c8SChristoph Lameter 
4356f4576e3SNaoya Horiguchi struct queue_pages {
4366f4576e3SNaoya Horiguchi 	struct list_head *pagelist;
4376f4576e3SNaoya Horiguchi 	unsigned long flags;
4386f4576e3SNaoya Horiguchi 	nodemask_t *nmask;
439f18da660SLi Xinhai 	unsigned long start;
440f18da660SLi Xinhai 	unsigned long end;
441f18da660SLi Xinhai 	struct vm_area_struct *first;
4426f4576e3SNaoya Horiguchi };
4436f4576e3SNaoya Horiguchi 
44498094945SNaoya Horiguchi /*
44588aaa2a1SNaoya Horiguchi  * Check if the page's nid is in qp->nmask.
44688aaa2a1SNaoya Horiguchi  *
44788aaa2a1SNaoya Horiguchi  * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
44888aaa2a1SNaoya Horiguchi  * in the invert of qp->nmask.
44988aaa2a1SNaoya Horiguchi  */
45088aaa2a1SNaoya Horiguchi static inline bool queue_pages_required(struct page *page,
45188aaa2a1SNaoya Horiguchi 					struct queue_pages *qp)
45288aaa2a1SNaoya Horiguchi {
45388aaa2a1SNaoya Horiguchi 	int nid = page_to_nid(page);
45488aaa2a1SNaoya Horiguchi 	unsigned long flags = qp->flags;
45588aaa2a1SNaoya Horiguchi 
45688aaa2a1SNaoya Horiguchi 	return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
45788aaa2a1SNaoya Horiguchi }
45888aaa2a1SNaoya Horiguchi 
459a7f40cfeSYang Shi /*
460d8835445SYang Shi  * queue_pages_pmd() has four possible return values:
461d8835445SYang Shi  * 0 - pages are placed on the right node or queued successfully.
462d8835445SYang Shi  * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
463d8835445SYang Shi  *     specified.
464d8835445SYang Shi  * 2 - THP was split.
465d8835445SYang Shi  * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
466d8835445SYang Shi  *        existing page was already on a node that does not follow the
467d8835445SYang Shi  *        policy.
468a7f40cfeSYang Shi  */
469c8633798SNaoya Horiguchi static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
470c8633798SNaoya Horiguchi 				unsigned long end, struct mm_walk *walk)
471c8633798SNaoya Horiguchi {
472c8633798SNaoya Horiguchi 	int ret = 0;
473c8633798SNaoya Horiguchi 	struct page *page;
474c8633798SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
475c8633798SNaoya Horiguchi 	unsigned long flags;
476c8633798SNaoya Horiguchi 
477c8633798SNaoya Horiguchi 	if (unlikely(is_pmd_migration_entry(*pmd))) {
478a7f40cfeSYang Shi 		ret = -EIO;
479c8633798SNaoya Horiguchi 		goto unlock;
480c8633798SNaoya Horiguchi 	}
481c8633798SNaoya Horiguchi 	page = pmd_page(*pmd);
482c8633798SNaoya Horiguchi 	if (is_huge_zero_page(page)) {
483c8633798SNaoya Horiguchi 		spin_unlock(ptl);
484c8633798SNaoya Horiguchi 		__split_huge_pmd(walk->vma, pmd, addr, false, NULL);
485d8835445SYang Shi 		ret = 2;
486c8633798SNaoya Horiguchi 		goto out;
487c8633798SNaoya Horiguchi 	}
488d8835445SYang Shi 	if (!queue_pages_required(page, qp))
489c8633798SNaoya Horiguchi 		goto unlock;
490c8633798SNaoya Horiguchi 
491c8633798SNaoya Horiguchi 	flags = qp->flags;
492c8633798SNaoya Horiguchi 	/* go to thp migration */
493a7f40cfeSYang Shi 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
494a53190a4SYang Shi 		if (!vma_migratable(walk->vma) ||
495a53190a4SYang Shi 		    migrate_page_add(page, qp->pagelist, flags)) {
496d8835445SYang Shi 			ret = 1;
497a7f40cfeSYang Shi 			goto unlock;
498a7f40cfeSYang Shi 		}
499a7f40cfeSYang Shi 	} else
500a7f40cfeSYang Shi 		ret = -EIO;
501c8633798SNaoya Horiguchi unlock:
502c8633798SNaoya Horiguchi 	spin_unlock(ptl);
503c8633798SNaoya Horiguchi out:
504c8633798SNaoya Horiguchi 	return ret;
505c8633798SNaoya Horiguchi }
506c8633798SNaoya Horiguchi 
50788aaa2a1SNaoya Horiguchi /*
50898094945SNaoya Horiguchi  * Scan through pages checking if pages follow certain conditions,
50998094945SNaoya Horiguchi  * and move them to the pagelist if they do.
510d8835445SYang Shi  *
511d8835445SYang Shi  * queue_pages_pte_range() has three possible return values:
512d8835445SYang Shi  * 0 - pages are placed on the right node or queued successfully.
513d8835445SYang Shi  * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
514d8835445SYang Shi  *     specified.
515d8835445SYang Shi  * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
516d8835445SYang Shi  *        on a node that does not follow the policy.
51798094945SNaoya Horiguchi  */
5186f4576e3SNaoya Horiguchi static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
5196f4576e3SNaoya Horiguchi 			unsigned long end, struct mm_walk *walk)
5201da177e4SLinus Torvalds {
5216f4576e3SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
5226f4576e3SNaoya Horiguchi 	struct page *page;
5236f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
5246f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
525c8633798SNaoya Horiguchi 	int ret;
526d8835445SYang Shi 	bool has_unmovable = false;
52791612e0dSHugh Dickins 	pte_t *pte;
528705e87c0SHugh Dickins 	spinlock_t *ptl;
529941150a3SHugh Dickins 
530c8633798SNaoya Horiguchi 	ptl = pmd_trans_huge_lock(pmd, vma);
531c8633798SNaoya Horiguchi 	if (ptl) {
532c8633798SNaoya Horiguchi 		ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
533d8835445SYang Shi 		if (ret != 2)
534a7f40cfeSYang Shi 			return ret;
535248db92dSKirill A. Shutemov 	}
536d8835445SYang Shi 	/* THP was split, fall through to pte walk */
53791612e0dSHugh Dickins 
538337d9abfSNaoya Horiguchi 	if (pmd_trans_unstable(pmd))
539337d9abfSNaoya Horiguchi 		return 0;
54094723aafSMichal Hocko 
5416f4576e3SNaoya Horiguchi 	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
5426f4576e3SNaoya Horiguchi 	for (; addr != end; pte++, addr += PAGE_SIZE) {
54391612e0dSHugh Dickins 		if (!pte_present(*pte))
54491612e0dSHugh Dickins 			continue;
5456aab341eSLinus Torvalds 		page = vm_normal_page(vma, addr, *pte);
5466aab341eSLinus Torvalds 		if (!page)
54791612e0dSHugh Dickins 			continue;
548053837fcSNick Piggin 		/*
54962b61f61SHugh Dickins 		 * vm_normal_page() filters out zero pages, but there might
55062b61f61SHugh Dickins 		 * still be PageReserved pages to skip, perhaps in a VDSO.
551053837fcSNick Piggin 		 */
552b79bc0a0SHugh Dickins 		if (PageReserved(page))
553f4598c8bSChristoph Lameter 			continue;
55488aaa2a1SNaoya Horiguchi 		if (!queue_pages_required(page, qp))
55538e35860SChristoph Lameter 			continue;
556a7f40cfeSYang Shi 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
557d8835445SYang Shi 			/* MPOL_MF_STRICT must be specified if we get here */
558d8835445SYang Shi 			if (!vma_migratable(vma)) {
559d8835445SYang Shi 				has_unmovable = true;
560a7f40cfeSYang Shi 				break;
561d8835445SYang Shi 			}
562a53190a4SYang Shi 
563a53190a4SYang Shi 			/*
564a53190a4SYang Shi 			 * Do not abort immediately since there may be
565a53190a4SYang Shi 			 * temporary off LRU pages in the range.  Still
566a53190a4SYang Shi 			 * need migrate other LRU pages.
567a53190a4SYang Shi 			 */
568a53190a4SYang Shi 			if (migrate_page_add(page, qp->pagelist, flags))
569a53190a4SYang Shi 				has_unmovable = true;
570a7f40cfeSYang Shi 		} else
571a7f40cfeSYang Shi 			break;
5726f4576e3SNaoya Horiguchi 	}
5736f4576e3SNaoya Horiguchi 	pte_unmap_unlock(pte - 1, ptl);
5746f4576e3SNaoya Horiguchi 	cond_resched();
575d8835445SYang Shi 
576d8835445SYang Shi 	if (has_unmovable)
577d8835445SYang Shi 		return 1;
578d8835445SYang Shi 
579a7f40cfeSYang Shi 	return addr != end ? -EIO : 0;
58091612e0dSHugh Dickins }
58191612e0dSHugh Dickins 
5826f4576e3SNaoya Horiguchi static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
5836f4576e3SNaoya Horiguchi 			       unsigned long addr, unsigned long end,
5846f4576e3SNaoya Horiguchi 			       struct mm_walk *walk)
585e2d8cf40SNaoya Horiguchi {
586e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE
5876f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
5886f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
589e2d8cf40SNaoya Horiguchi 	struct page *page;
590cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
591d4c54919SNaoya Horiguchi 	pte_t entry;
592e2d8cf40SNaoya Horiguchi 
5936f4576e3SNaoya Horiguchi 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
5946f4576e3SNaoya Horiguchi 	entry = huge_ptep_get(pte);
595d4c54919SNaoya Horiguchi 	if (!pte_present(entry))
596d4c54919SNaoya Horiguchi 		goto unlock;
597d4c54919SNaoya Horiguchi 	page = pte_page(entry);
59888aaa2a1SNaoya Horiguchi 	if (!queue_pages_required(page, qp))
599e2d8cf40SNaoya Horiguchi 		goto unlock;
600e2d8cf40SNaoya Horiguchi 	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
601e2d8cf40SNaoya Horiguchi 	if (flags & (MPOL_MF_MOVE_ALL) ||
602e2d8cf40SNaoya Horiguchi 	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
6036f4576e3SNaoya Horiguchi 		isolate_huge_page(page, qp->pagelist);
604e2d8cf40SNaoya Horiguchi unlock:
605cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
606e2d8cf40SNaoya Horiguchi #else
607e2d8cf40SNaoya Horiguchi 	BUG();
608e2d8cf40SNaoya Horiguchi #endif
60991612e0dSHugh Dickins 	return 0;
6101da177e4SLinus Torvalds }
6111da177e4SLinus Torvalds 
6125877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING
613b24f53a0SLee Schermerhorn /*
6144b10e7d5SMel Gorman  * This is used to mark a range of virtual addresses to be inaccessible.
6154b10e7d5SMel Gorman  * These are later cleared by a NUMA hinting fault. Depending on these
6164b10e7d5SMel Gorman  * faults, pages may be migrated for better NUMA placement.
6174b10e7d5SMel Gorman  *
6184b10e7d5SMel Gorman  * This is assuming that NUMA faults are handled using PROT_NONE. If
6194b10e7d5SMel Gorman  * an architecture makes a different choice, it will need further
6204b10e7d5SMel Gorman  * changes to the core.
621b24f53a0SLee Schermerhorn  */
6224b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma,
6234b10e7d5SMel Gorman 			unsigned long addr, unsigned long end)
624b24f53a0SLee Schermerhorn {
6254b10e7d5SMel Gorman 	int nr_updated;
626b24f53a0SLee Schermerhorn 
6274d942466SMel Gorman 	nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
62803c5a6e1SMel Gorman 	if (nr_updated)
62903c5a6e1SMel Gorman 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
630b24f53a0SLee Schermerhorn 
6314b10e7d5SMel Gorman 	return nr_updated;
632b24f53a0SLee Schermerhorn }
633b24f53a0SLee Schermerhorn #else
634b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma,
635b24f53a0SLee Schermerhorn 			unsigned long addr, unsigned long end)
636b24f53a0SLee Schermerhorn {
637b24f53a0SLee Schermerhorn 	return 0;
638b24f53a0SLee Schermerhorn }
6395877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */
640b24f53a0SLee Schermerhorn 
6416f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end,
6426f4576e3SNaoya Horiguchi 				struct mm_walk *walk)
6431da177e4SLinus Torvalds {
6446f4576e3SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
6456f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
6465b952b3cSAndi Kleen 	unsigned long endvma = vma->vm_end;
6476f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
648dc9aa5b9SChristoph Lameter 
649a18b3ac2SLi Xinhai 	/* range check first */
650f18da660SLi Xinhai 	VM_BUG_ON((vma->vm_start > start) || (vma->vm_end < end));
651f18da660SLi Xinhai 
652f18da660SLi Xinhai 	if (!qp->first) {
653f18da660SLi Xinhai 		qp->first = vma;
654f18da660SLi Xinhai 		if (!(flags & MPOL_MF_DISCONTIG_OK) &&
655f18da660SLi Xinhai 			(qp->start < vma->vm_start))
656f18da660SLi Xinhai 			/* hole at head side of range */
657a18b3ac2SLi Xinhai 			return -EFAULT;
658a18b3ac2SLi Xinhai 	}
659f18da660SLi Xinhai 	if (!(flags & MPOL_MF_DISCONTIG_OK) &&
660f18da660SLi Xinhai 		((vma->vm_end < qp->end) &&
661f18da660SLi Xinhai 		(!vma->vm_next || vma->vm_end < vma->vm_next->vm_start)))
662f18da660SLi Xinhai 		/* hole at middle or tail of range */
663f18da660SLi Xinhai 		return -EFAULT;
664a18b3ac2SLi Xinhai 
665a7f40cfeSYang Shi 	/*
666a7f40cfeSYang Shi 	 * Need check MPOL_MF_STRICT to return -EIO if possible
667a7f40cfeSYang Shi 	 * regardless of vma_migratable
668a7f40cfeSYang Shi 	 */
669a7f40cfeSYang Shi 	if (!vma_migratable(vma) &&
670a7f40cfeSYang Shi 	    !(flags & MPOL_MF_STRICT))
67148684a65SNaoya Horiguchi 		return 1;
67248684a65SNaoya Horiguchi 
6735b952b3cSAndi Kleen 	if (endvma > end)
6745b952b3cSAndi Kleen 		endvma = end;
675b24f53a0SLee Schermerhorn 
676b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY) {
6772c0346a3SMel Gorman 		/* Similar to task_numa_work, skip inaccessible VMAs */
6784355c018SLiang Chen 		if (!is_vm_hugetlb_page(vma) &&
6794355c018SLiang Chen 			(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) &&
6804355c018SLiang Chen 			!(vma->vm_flags & VM_MIXEDMAP))
681b24f53a0SLee Schermerhorn 			change_prot_numa(vma, start, endvma);
6826f4576e3SNaoya Horiguchi 		return 1;
683b24f53a0SLee Schermerhorn 	}
684b24f53a0SLee Schermerhorn 
6856f4576e3SNaoya Horiguchi 	/* queue pages from current vma */
686a7f40cfeSYang Shi 	if (flags & MPOL_MF_VALID)
6876f4576e3SNaoya Horiguchi 		return 0;
6886f4576e3SNaoya Horiguchi 	return 1;
6896f4576e3SNaoya Horiguchi }
690b24f53a0SLee Schermerhorn 
6917b86ac33SChristoph Hellwig static const struct mm_walk_ops queue_pages_walk_ops = {
6927b86ac33SChristoph Hellwig 	.hugetlb_entry		= queue_pages_hugetlb,
6937b86ac33SChristoph Hellwig 	.pmd_entry		= queue_pages_pte_range,
6947b86ac33SChristoph Hellwig 	.test_walk		= queue_pages_test_walk,
6957b86ac33SChristoph Hellwig };
6967b86ac33SChristoph Hellwig 
6976f4576e3SNaoya Horiguchi /*
6986f4576e3SNaoya Horiguchi  * Walk through page tables and collect pages to be migrated.
6996f4576e3SNaoya Horiguchi  *
7006f4576e3SNaoya Horiguchi  * If pages found in a given range are on a set of nodes (determined by
7016f4576e3SNaoya Horiguchi  * @nodes and @flags,) it's isolated and queued to the pagelist which is
702d8835445SYang Shi  * passed via @private.
703d8835445SYang Shi  *
704d8835445SYang Shi  * queue_pages_range() has three possible return values:
705d8835445SYang Shi  * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
706d8835445SYang Shi  *     specified.
707d8835445SYang Shi  * 0 - queue pages successfully or no misplaced page.
708a85dfc30SYang Shi  * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
709a85dfc30SYang Shi  *         memory range specified by nodemask and maxnode points outside
710a85dfc30SYang Shi  *         your accessible address space (-EFAULT)
7116f4576e3SNaoya Horiguchi  */
7126f4576e3SNaoya Horiguchi static int
7136f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
7146f4576e3SNaoya Horiguchi 		nodemask_t *nodes, unsigned long flags,
7156f4576e3SNaoya Horiguchi 		struct list_head *pagelist)
7166f4576e3SNaoya Horiguchi {
717f18da660SLi Xinhai 	int err;
7186f4576e3SNaoya Horiguchi 	struct queue_pages qp = {
7196f4576e3SNaoya Horiguchi 		.pagelist = pagelist,
7206f4576e3SNaoya Horiguchi 		.flags = flags,
7216f4576e3SNaoya Horiguchi 		.nmask = nodes,
722f18da660SLi Xinhai 		.start = start,
723f18da660SLi Xinhai 		.end = end,
724f18da660SLi Xinhai 		.first = NULL,
7256f4576e3SNaoya Horiguchi 	};
7266f4576e3SNaoya Horiguchi 
727f18da660SLi Xinhai 	err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
728f18da660SLi Xinhai 
729f18da660SLi Xinhai 	if (!qp.first)
730f18da660SLi Xinhai 		/* whole range in hole */
731f18da660SLi Xinhai 		err = -EFAULT;
732f18da660SLi Xinhai 
733f18da660SLi Xinhai 	return err;
7341da177e4SLinus Torvalds }
7351da177e4SLinus Torvalds 
736869833f2SKOSAKI Motohiro /*
737869833f2SKOSAKI Motohiro  * Apply policy to a single VMA
738869833f2SKOSAKI Motohiro  * This must be called with the mmap_sem held for writing.
739869833f2SKOSAKI Motohiro  */
740869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma,
741869833f2SKOSAKI Motohiro 						struct mempolicy *pol)
7428d34694cSKOSAKI Motohiro {
743869833f2SKOSAKI Motohiro 	int err;
744869833f2SKOSAKI Motohiro 	struct mempolicy *old;
745869833f2SKOSAKI Motohiro 	struct mempolicy *new;
7468d34694cSKOSAKI Motohiro 
7478d34694cSKOSAKI Motohiro 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
7488d34694cSKOSAKI Motohiro 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
7498d34694cSKOSAKI Motohiro 		 vma->vm_ops, vma->vm_file,
7508d34694cSKOSAKI Motohiro 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
7518d34694cSKOSAKI Motohiro 
752869833f2SKOSAKI Motohiro 	new = mpol_dup(pol);
753869833f2SKOSAKI Motohiro 	if (IS_ERR(new))
754869833f2SKOSAKI Motohiro 		return PTR_ERR(new);
755869833f2SKOSAKI Motohiro 
756869833f2SKOSAKI Motohiro 	if (vma->vm_ops && vma->vm_ops->set_policy) {
7578d34694cSKOSAKI Motohiro 		err = vma->vm_ops->set_policy(vma, new);
758869833f2SKOSAKI Motohiro 		if (err)
759869833f2SKOSAKI Motohiro 			goto err_out;
7608d34694cSKOSAKI Motohiro 	}
761869833f2SKOSAKI Motohiro 
762869833f2SKOSAKI Motohiro 	old = vma->vm_policy;
763869833f2SKOSAKI Motohiro 	vma->vm_policy = new; /* protected by mmap_sem */
764869833f2SKOSAKI Motohiro 	mpol_put(old);
765869833f2SKOSAKI Motohiro 
766869833f2SKOSAKI Motohiro 	return 0;
767869833f2SKOSAKI Motohiro  err_out:
768869833f2SKOSAKI Motohiro 	mpol_put(new);
7698d34694cSKOSAKI Motohiro 	return err;
7708d34694cSKOSAKI Motohiro }
7718d34694cSKOSAKI Motohiro 
7721da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */
7739d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start,
7749d8cebd4SKOSAKI Motohiro 		       unsigned long end, struct mempolicy *new_pol)
7751da177e4SLinus Torvalds {
7761da177e4SLinus Torvalds 	struct vm_area_struct *next;
7779d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *prev;
7789d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *vma;
7799d8cebd4SKOSAKI Motohiro 	int err = 0;
780e26a5114SKOSAKI Motohiro 	pgoff_t pgoff;
7819d8cebd4SKOSAKI Motohiro 	unsigned long vmstart;
7829d8cebd4SKOSAKI Motohiro 	unsigned long vmend;
7831da177e4SLinus Torvalds 
784097d5910SLinus Torvalds 	vma = find_vma(mm, start);
785f18da660SLi Xinhai 	VM_BUG_ON(!vma);
7869d8cebd4SKOSAKI Motohiro 
787097d5910SLinus Torvalds 	prev = vma->vm_prev;
788e26a5114SKOSAKI Motohiro 	if (start > vma->vm_start)
789e26a5114SKOSAKI Motohiro 		prev = vma;
790e26a5114SKOSAKI Motohiro 
7919d8cebd4SKOSAKI Motohiro 	for (; vma && vma->vm_start < end; prev = vma, vma = next) {
7921da177e4SLinus Torvalds 		next = vma->vm_next;
7939d8cebd4SKOSAKI Motohiro 		vmstart = max(start, vma->vm_start);
7949d8cebd4SKOSAKI Motohiro 		vmend   = min(end, vma->vm_end);
7959d8cebd4SKOSAKI Motohiro 
796e26a5114SKOSAKI Motohiro 		if (mpol_equal(vma_policy(vma), new_pol))
797e26a5114SKOSAKI Motohiro 			continue;
798e26a5114SKOSAKI Motohiro 
799e26a5114SKOSAKI Motohiro 		pgoff = vma->vm_pgoff +
800e26a5114SKOSAKI Motohiro 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
8019d8cebd4SKOSAKI Motohiro 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
802e26a5114SKOSAKI Motohiro 				 vma->anon_vma, vma->vm_file, pgoff,
80319a809afSAndrea Arcangeli 				 new_pol, vma->vm_userfaultfd_ctx);
8049d8cebd4SKOSAKI Motohiro 		if (prev) {
8059d8cebd4SKOSAKI Motohiro 			vma = prev;
8069d8cebd4SKOSAKI Motohiro 			next = vma->vm_next;
8073964acd0SOleg Nesterov 			if (mpol_equal(vma_policy(vma), new_pol))
8089d8cebd4SKOSAKI Motohiro 				continue;
8093964acd0SOleg Nesterov 			/* vma_merge() joined vma && vma->next, case 8 */
8103964acd0SOleg Nesterov 			goto replace;
8111da177e4SLinus Torvalds 		}
8129d8cebd4SKOSAKI Motohiro 		if (vma->vm_start != vmstart) {
8139d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
8149d8cebd4SKOSAKI Motohiro 			if (err)
8159d8cebd4SKOSAKI Motohiro 				goto out;
8169d8cebd4SKOSAKI Motohiro 		}
8179d8cebd4SKOSAKI Motohiro 		if (vma->vm_end != vmend) {
8189d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmend, 0);
8199d8cebd4SKOSAKI Motohiro 			if (err)
8209d8cebd4SKOSAKI Motohiro 				goto out;
8219d8cebd4SKOSAKI Motohiro 		}
8223964acd0SOleg Nesterov  replace:
823869833f2SKOSAKI Motohiro 		err = vma_replace_policy(vma, new_pol);
8249d8cebd4SKOSAKI Motohiro 		if (err)
8259d8cebd4SKOSAKI Motohiro 			goto out;
8269d8cebd4SKOSAKI Motohiro 	}
8279d8cebd4SKOSAKI Motohiro 
8289d8cebd4SKOSAKI Motohiro  out:
8291da177e4SLinus Torvalds 	return err;
8301da177e4SLinus Torvalds }
8311da177e4SLinus Torvalds 
8321da177e4SLinus Torvalds /* Set the process memory policy */
833028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags,
834028fec41SDavid Rientjes 			     nodemask_t *nodes)
8351da177e4SLinus Torvalds {
83658568d2aSMiao Xie 	struct mempolicy *new, *old;
8374bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH(scratch);
83858568d2aSMiao Xie 	int ret;
8391da177e4SLinus Torvalds 
8404bfc4495SKAMEZAWA Hiroyuki 	if (!scratch)
8414bfc4495SKAMEZAWA Hiroyuki 		return -ENOMEM;
842f4e53d91SLee Schermerhorn 
8434bfc4495SKAMEZAWA Hiroyuki 	new = mpol_new(mode, flags, nodes);
8444bfc4495SKAMEZAWA Hiroyuki 	if (IS_ERR(new)) {
8454bfc4495SKAMEZAWA Hiroyuki 		ret = PTR_ERR(new);
8464bfc4495SKAMEZAWA Hiroyuki 		goto out;
8474bfc4495SKAMEZAWA Hiroyuki 	}
8482c7c3a7dSOleg Nesterov 
84958568d2aSMiao Xie 	task_lock(current);
8504bfc4495SKAMEZAWA Hiroyuki 	ret = mpol_set_nodemask(new, nodes, scratch);
85158568d2aSMiao Xie 	if (ret) {
85258568d2aSMiao Xie 		task_unlock(current);
85358568d2aSMiao Xie 		mpol_put(new);
8544bfc4495SKAMEZAWA Hiroyuki 		goto out;
85558568d2aSMiao Xie 	}
85658568d2aSMiao Xie 	old = current->mempolicy;
8571da177e4SLinus Torvalds 	current->mempolicy = new;
85845816682SVlastimil Babka 	if (new && new->mode == MPOL_INTERLEAVE)
85945816682SVlastimil Babka 		current->il_prev = MAX_NUMNODES-1;
86058568d2aSMiao Xie 	task_unlock(current);
86158568d2aSMiao Xie 	mpol_put(old);
8624bfc4495SKAMEZAWA Hiroyuki 	ret = 0;
8634bfc4495SKAMEZAWA Hiroyuki out:
8644bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH_FREE(scratch);
8654bfc4495SKAMEZAWA Hiroyuki 	return ret;
8661da177e4SLinus Torvalds }
8671da177e4SLinus Torvalds 
868bea904d5SLee Schermerhorn /*
869bea904d5SLee Schermerhorn  * Return nodemask for policy for get_mempolicy() query
87058568d2aSMiao Xie  *
87158568d2aSMiao Xie  * Called with task's alloc_lock held
872bea904d5SLee Schermerhorn  */
873bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
8741da177e4SLinus Torvalds {
875dfcd3c0dSAndi Kleen 	nodes_clear(*nodes);
876bea904d5SLee Schermerhorn 	if (p == &default_policy)
877bea904d5SLee Schermerhorn 		return;
878bea904d5SLee Schermerhorn 
87945c4745aSLee Schermerhorn 	switch (p->mode) {
88019770b32SMel Gorman 	case MPOL_BIND:
88119770b32SMel Gorman 		/* Fall through */
8821da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
883dfcd3c0dSAndi Kleen 		*nodes = p->v.nodes;
8841da177e4SLinus Torvalds 		break;
8851da177e4SLinus Torvalds 	case MPOL_PREFERRED:
886fc36b8d3SLee Schermerhorn 		if (!(p->flags & MPOL_F_LOCAL))
887dfcd3c0dSAndi Kleen 			node_set(p->v.preferred_node, *nodes);
88853f2556bSLee Schermerhorn 		/* else return empty node mask for local allocation */
8891da177e4SLinus Torvalds 		break;
8901da177e4SLinus Torvalds 	default:
8911da177e4SLinus Torvalds 		BUG();
8921da177e4SLinus Torvalds 	}
8931da177e4SLinus Torvalds }
8941da177e4SLinus Torvalds 
8953b9aadf7SAndrea Arcangeli static int lookup_node(struct mm_struct *mm, unsigned long addr)
8961da177e4SLinus Torvalds {
8971da177e4SLinus Torvalds 	struct page *p;
8981da177e4SLinus Torvalds 	int err;
8991da177e4SLinus Torvalds 
9003b9aadf7SAndrea Arcangeli 	int locked = 1;
9013b9aadf7SAndrea Arcangeli 	err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked);
9021da177e4SLinus Torvalds 	if (err >= 0) {
9031da177e4SLinus Torvalds 		err = page_to_nid(p);
9041da177e4SLinus Torvalds 		put_page(p);
9051da177e4SLinus Torvalds 	}
9063b9aadf7SAndrea Arcangeli 	if (locked)
9073b9aadf7SAndrea Arcangeli 		up_read(&mm->mmap_sem);
9081da177e4SLinus Torvalds 	return err;
9091da177e4SLinus Torvalds }
9101da177e4SLinus Torvalds 
9111da177e4SLinus Torvalds /* Retrieve NUMA policy */
912dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask,
9131da177e4SLinus Torvalds 			     unsigned long addr, unsigned long flags)
9141da177e4SLinus Torvalds {
9158bccd85fSChristoph Lameter 	int err;
9161da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
9171da177e4SLinus Torvalds 	struct vm_area_struct *vma = NULL;
9183b9aadf7SAndrea Arcangeli 	struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
9191da177e4SLinus Torvalds 
920754af6f5SLee Schermerhorn 	if (flags &
921754af6f5SLee Schermerhorn 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
9221da177e4SLinus Torvalds 		return -EINVAL;
923754af6f5SLee Schermerhorn 
924754af6f5SLee Schermerhorn 	if (flags & MPOL_F_MEMS_ALLOWED) {
925754af6f5SLee Schermerhorn 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
926754af6f5SLee Schermerhorn 			return -EINVAL;
927754af6f5SLee Schermerhorn 		*policy = 0;	/* just so it's initialized */
92858568d2aSMiao Xie 		task_lock(current);
929754af6f5SLee Schermerhorn 		*nmask  = cpuset_current_mems_allowed;
93058568d2aSMiao Xie 		task_unlock(current);
931754af6f5SLee Schermerhorn 		return 0;
932754af6f5SLee Schermerhorn 	}
933754af6f5SLee Schermerhorn 
9341da177e4SLinus Torvalds 	if (flags & MPOL_F_ADDR) {
935bea904d5SLee Schermerhorn 		/*
936bea904d5SLee Schermerhorn 		 * Do NOT fall back to task policy if the
937bea904d5SLee Schermerhorn 		 * vma/shared policy at addr is NULL.  We
938bea904d5SLee Schermerhorn 		 * want to return MPOL_DEFAULT in this case.
939bea904d5SLee Schermerhorn 		 */
9401da177e4SLinus Torvalds 		down_read(&mm->mmap_sem);
9411da177e4SLinus Torvalds 		vma = find_vma_intersection(mm, addr, addr+1);
9421da177e4SLinus Torvalds 		if (!vma) {
9431da177e4SLinus Torvalds 			up_read(&mm->mmap_sem);
9441da177e4SLinus Torvalds 			return -EFAULT;
9451da177e4SLinus Torvalds 		}
9461da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
9471da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
9481da177e4SLinus Torvalds 		else
9491da177e4SLinus Torvalds 			pol = vma->vm_policy;
9501da177e4SLinus Torvalds 	} else if (addr)
9511da177e4SLinus Torvalds 		return -EINVAL;
9521da177e4SLinus Torvalds 
9531da177e4SLinus Torvalds 	if (!pol)
954bea904d5SLee Schermerhorn 		pol = &default_policy;	/* indicates default behavior */
9551da177e4SLinus Torvalds 
9561da177e4SLinus Torvalds 	if (flags & MPOL_F_NODE) {
9571da177e4SLinus Torvalds 		if (flags & MPOL_F_ADDR) {
9583b9aadf7SAndrea Arcangeli 			/*
9593b9aadf7SAndrea Arcangeli 			 * Take a refcount on the mpol, lookup_node()
9603b9aadf7SAndrea Arcangeli 			 * wil drop the mmap_sem, so after calling
9613b9aadf7SAndrea Arcangeli 			 * lookup_node() only "pol" remains valid, "vma"
9623b9aadf7SAndrea Arcangeli 			 * is stale.
9633b9aadf7SAndrea Arcangeli 			 */
9643b9aadf7SAndrea Arcangeli 			pol_refcount = pol;
9653b9aadf7SAndrea Arcangeli 			vma = NULL;
9663b9aadf7SAndrea Arcangeli 			mpol_get(pol);
9673b9aadf7SAndrea Arcangeli 			err = lookup_node(mm, addr);
9681da177e4SLinus Torvalds 			if (err < 0)
9691da177e4SLinus Torvalds 				goto out;
9708bccd85fSChristoph Lameter 			*policy = err;
9711da177e4SLinus Torvalds 		} else if (pol == current->mempolicy &&
97245c4745aSLee Schermerhorn 				pol->mode == MPOL_INTERLEAVE) {
97345816682SVlastimil Babka 			*policy = next_node_in(current->il_prev, pol->v.nodes);
9741da177e4SLinus Torvalds 		} else {
9751da177e4SLinus Torvalds 			err = -EINVAL;
9761da177e4SLinus Torvalds 			goto out;
9771da177e4SLinus Torvalds 		}
978bea904d5SLee Schermerhorn 	} else {
979bea904d5SLee Schermerhorn 		*policy = pol == &default_policy ? MPOL_DEFAULT :
980bea904d5SLee Schermerhorn 						pol->mode;
981d79df630SDavid Rientjes 		/*
982d79df630SDavid Rientjes 		 * Internal mempolicy flags must be masked off before exposing
983d79df630SDavid Rientjes 		 * the policy to userspace.
984d79df630SDavid Rientjes 		 */
985d79df630SDavid Rientjes 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
986bea904d5SLee Schermerhorn 	}
9871da177e4SLinus Torvalds 
9881da177e4SLinus Torvalds 	err = 0;
98958568d2aSMiao Xie 	if (nmask) {
990c6b6ef8bSLee Schermerhorn 		if (mpol_store_user_nodemask(pol)) {
991c6b6ef8bSLee Schermerhorn 			*nmask = pol->w.user_nodemask;
992c6b6ef8bSLee Schermerhorn 		} else {
99358568d2aSMiao Xie 			task_lock(current);
994bea904d5SLee Schermerhorn 			get_policy_nodemask(pol, nmask);
99558568d2aSMiao Xie 			task_unlock(current);
99658568d2aSMiao Xie 		}
997c6b6ef8bSLee Schermerhorn 	}
9981da177e4SLinus Torvalds 
9991da177e4SLinus Torvalds  out:
100052cd3b07SLee Schermerhorn 	mpol_cond_put(pol);
10011da177e4SLinus Torvalds 	if (vma)
10023b9aadf7SAndrea Arcangeli 		up_read(&mm->mmap_sem);
10033b9aadf7SAndrea Arcangeli 	if (pol_refcount)
10043b9aadf7SAndrea Arcangeli 		mpol_put(pol_refcount);
10051da177e4SLinus Torvalds 	return err;
10061da177e4SLinus Torvalds }
10071da177e4SLinus Torvalds 
1008b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION
10098bccd85fSChristoph Lameter /*
1010c8633798SNaoya Horiguchi  * page migration, thp tail pages can be passed.
10116ce3c4c0SChristoph Lameter  */
1012a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist,
1013fc301289SChristoph Lameter 				unsigned long flags)
10146ce3c4c0SChristoph Lameter {
1015c8633798SNaoya Horiguchi 	struct page *head = compound_head(page);
10166ce3c4c0SChristoph Lameter 	/*
1017fc301289SChristoph Lameter 	 * Avoid migrating a page that is shared with others.
10186ce3c4c0SChristoph Lameter 	 */
1019c8633798SNaoya Horiguchi 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
1020c8633798SNaoya Horiguchi 		if (!isolate_lru_page(head)) {
1021c8633798SNaoya Horiguchi 			list_add_tail(&head->lru, pagelist);
1022c8633798SNaoya Horiguchi 			mod_node_page_state(page_pgdat(head),
1023c8633798SNaoya Horiguchi 				NR_ISOLATED_ANON + page_is_file_cache(head),
1024c8633798SNaoya Horiguchi 				hpage_nr_pages(head));
1025a53190a4SYang Shi 		} else if (flags & MPOL_MF_STRICT) {
1026a53190a4SYang Shi 			/*
1027a53190a4SYang Shi 			 * Non-movable page may reach here.  And, there may be
1028a53190a4SYang Shi 			 * temporary off LRU pages or non-LRU movable pages.
1029a53190a4SYang Shi 			 * Treat them as unmovable pages since they can't be
1030a53190a4SYang Shi 			 * isolated, so they can't be moved at the moment.  It
1031a53190a4SYang Shi 			 * should return -EIO for this case too.
1032a53190a4SYang Shi 			 */
1033a53190a4SYang Shi 			return -EIO;
103462695a84SNick Piggin 		}
103562695a84SNick Piggin 	}
1036a53190a4SYang Shi 
1037a53190a4SYang Shi 	return 0;
10386ce3c4c0SChristoph Lameter }
10396ce3c4c0SChristoph Lameter 
1040a49bd4d7SMichal Hocko /* page allocation callback for NUMA node migration */
1041666feb21SMichal Hocko struct page *alloc_new_node_page(struct page *page, unsigned long node)
104295a402c3SChristoph Lameter {
1043e2d8cf40SNaoya Horiguchi 	if (PageHuge(page))
1044e2d8cf40SNaoya Horiguchi 		return alloc_huge_page_node(page_hstate(compound_head(page)),
1045e2d8cf40SNaoya Horiguchi 					node);
104694723aafSMichal Hocko 	else if (PageTransHuge(page)) {
1047c8633798SNaoya Horiguchi 		struct page *thp;
1048c8633798SNaoya Horiguchi 
1049c8633798SNaoya Horiguchi 		thp = alloc_pages_node(node,
1050c8633798SNaoya Horiguchi 			(GFP_TRANSHUGE | __GFP_THISNODE),
1051c8633798SNaoya Horiguchi 			HPAGE_PMD_ORDER);
1052c8633798SNaoya Horiguchi 		if (!thp)
1053c8633798SNaoya Horiguchi 			return NULL;
1054c8633798SNaoya Horiguchi 		prep_transhuge_page(thp);
1055c8633798SNaoya Horiguchi 		return thp;
1056c8633798SNaoya Horiguchi 	} else
105796db800fSVlastimil Babka 		return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
1058b360edb4SDavid Rientjes 						    __GFP_THISNODE, 0);
105995a402c3SChristoph Lameter }
106095a402c3SChristoph Lameter 
10616ce3c4c0SChristoph Lameter /*
10627e2ab150SChristoph Lameter  * Migrate pages from one node to a target node.
10637e2ab150SChristoph Lameter  * Returns error or the number of pages not migrated.
10647e2ab150SChristoph Lameter  */
1065dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1066dbcb0f19SAdrian Bunk 			   int flags)
10677e2ab150SChristoph Lameter {
10687e2ab150SChristoph Lameter 	nodemask_t nmask;
10697e2ab150SChristoph Lameter 	LIST_HEAD(pagelist);
10707e2ab150SChristoph Lameter 	int err = 0;
10717e2ab150SChristoph Lameter 
10727e2ab150SChristoph Lameter 	nodes_clear(nmask);
10737e2ab150SChristoph Lameter 	node_set(source, nmask);
10747e2ab150SChristoph Lameter 
107508270807SMinchan Kim 	/*
107608270807SMinchan Kim 	 * This does not "check" the range but isolates all pages that
107708270807SMinchan Kim 	 * need migration.  Between passing in the full user address
107808270807SMinchan Kim 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
107908270807SMinchan Kim 	 */
108008270807SMinchan Kim 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
108198094945SNaoya Horiguchi 	queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
10827e2ab150SChristoph Lameter 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
10837e2ab150SChristoph Lameter 
1084cf608ac1SMinchan Kim 	if (!list_empty(&pagelist)) {
1085a49bd4d7SMichal Hocko 		err = migrate_pages(&pagelist, alloc_new_node_page, NULL, dest,
10869c620e2bSHugh Dickins 					MIGRATE_SYNC, MR_SYSCALL);
1087cf608ac1SMinchan Kim 		if (err)
1088e2d8cf40SNaoya Horiguchi 			putback_movable_pages(&pagelist);
1089cf608ac1SMinchan Kim 	}
109095a402c3SChristoph Lameter 
10917e2ab150SChristoph Lameter 	return err;
10927e2ab150SChristoph Lameter }
10937e2ab150SChristoph Lameter 
10947e2ab150SChristoph Lameter /*
10957e2ab150SChristoph Lameter  * Move pages between the two nodesets so as to preserve the physical
10967e2ab150SChristoph Lameter  * layout as much as possible.
109739743889SChristoph Lameter  *
109839743889SChristoph Lameter  * Returns the number of page that could not be moved.
109939743889SChristoph Lameter  */
11000ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
11010ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
110239743889SChristoph Lameter {
11037e2ab150SChristoph Lameter 	int busy = 0;
11040aedadf9SChristoph Lameter 	int err;
11057e2ab150SChristoph Lameter 	nodemask_t tmp;
110639743889SChristoph Lameter 
11070aedadf9SChristoph Lameter 	err = migrate_prep();
11080aedadf9SChristoph Lameter 	if (err)
11090aedadf9SChristoph Lameter 		return err;
11100aedadf9SChristoph Lameter 
111139743889SChristoph Lameter 	down_read(&mm->mmap_sem);
1112d4984711SChristoph Lameter 
11137e2ab150SChristoph Lameter 	/*
11147e2ab150SChristoph Lameter 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
11157e2ab150SChristoph Lameter 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
11167e2ab150SChristoph Lameter 	 * bit in 'tmp', and return that <source, dest> pair for migration.
11177e2ab150SChristoph Lameter 	 * The pair of nodemasks 'to' and 'from' define the map.
11187e2ab150SChristoph Lameter 	 *
11197e2ab150SChristoph Lameter 	 * If no pair of bits is found that way, fallback to picking some
11207e2ab150SChristoph Lameter 	 * pair of 'source' and 'dest' bits that are not the same.  If the
11217e2ab150SChristoph Lameter 	 * 'source' and 'dest' bits are the same, this represents a node
11227e2ab150SChristoph Lameter 	 * that will be migrating to itself, so no pages need move.
11237e2ab150SChristoph Lameter 	 *
11247e2ab150SChristoph Lameter 	 * If no bits are left in 'tmp', or if all remaining bits left
11257e2ab150SChristoph Lameter 	 * in 'tmp' correspond to the same bit in 'to', return false
11267e2ab150SChristoph Lameter 	 * (nothing left to migrate).
11277e2ab150SChristoph Lameter 	 *
11287e2ab150SChristoph Lameter 	 * This lets us pick a pair of nodes to migrate between, such that
11297e2ab150SChristoph Lameter 	 * if possible the dest node is not already occupied by some other
11307e2ab150SChristoph Lameter 	 * source node, minimizing the risk of overloading the memory on a
11317e2ab150SChristoph Lameter 	 * node that would happen if we migrated incoming memory to a node
11327e2ab150SChristoph Lameter 	 * before migrating outgoing memory source that same node.
11337e2ab150SChristoph Lameter 	 *
11347e2ab150SChristoph Lameter 	 * A single scan of tmp is sufficient.  As we go, we remember the
11357e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
11367e2ab150SChristoph Lameter 	 * that not only moved, but what's better, moved to an empty slot
11377e2ab150SChristoph Lameter 	 * (d is not set in tmp), then we break out then, with that pair.
1138ae0e47f0SJustin P. Mattock 	 * Otherwise when we finish scanning from_tmp, we at least have the
11397e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved.  If we get all the way through
11407e2ab150SChristoph Lameter 	 * the scan of tmp without finding any node that moved, much less
11417e2ab150SChristoph Lameter 	 * moved to an empty node, then there is nothing left worth migrating.
11427e2ab150SChristoph Lameter 	 */
11437e2ab150SChristoph Lameter 
11440ce72d4fSAndrew Morton 	tmp = *from;
11457e2ab150SChristoph Lameter 	while (!nodes_empty(tmp)) {
11467e2ab150SChristoph Lameter 		int s,d;
1147b76ac7e7SJianguo Wu 		int source = NUMA_NO_NODE;
11487e2ab150SChristoph Lameter 		int dest = 0;
11497e2ab150SChristoph Lameter 
11507e2ab150SChristoph Lameter 		for_each_node_mask(s, tmp) {
11514a5b18ccSLarry Woodman 
11524a5b18ccSLarry Woodman 			/*
11534a5b18ccSLarry Woodman 			 * do_migrate_pages() tries to maintain the relative
11544a5b18ccSLarry Woodman 			 * node relationship of the pages established between
11554a5b18ccSLarry Woodman 			 * threads and memory areas.
11564a5b18ccSLarry Woodman                          *
11574a5b18ccSLarry Woodman 			 * However if the number of source nodes is not equal to
11584a5b18ccSLarry Woodman 			 * the number of destination nodes we can not preserve
11594a5b18ccSLarry Woodman 			 * this node relative relationship.  In that case, skip
11604a5b18ccSLarry Woodman 			 * copying memory from a node that is in the destination
11614a5b18ccSLarry Woodman 			 * mask.
11624a5b18ccSLarry Woodman 			 *
11634a5b18ccSLarry Woodman 			 * Example: [2,3,4] -> [3,4,5] moves everything.
11644a5b18ccSLarry Woodman 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
11654a5b18ccSLarry Woodman 			 */
11664a5b18ccSLarry Woodman 
11670ce72d4fSAndrew Morton 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
11680ce72d4fSAndrew Morton 						(node_isset(s, *to)))
11694a5b18ccSLarry Woodman 				continue;
11704a5b18ccSLarry Woodman 
11710ce72d4fSAndrew Morton 			d = node_remap(s, *from, *to);
11727e2ab150SChristoph Lameter 			if (s == d)
11737e2ab150SChristoph Lameter 				continue;
11747e2ab150SChristoph Lameter 
11757e2ab150SChristoph Lameter 			source = s;	/* Node moved. Memorize */
11767e2ab150SChristoph Lameter 			dest = d;
11777e2ab150SChristoph Lameter 
11787e2ab150SChristoph Lameter 			/* dest not in remaining from nodes? */
11797e2ab150SChristoph Lameter 			if (!node_isset(dest, tmp))
11807e2ab150SChristoph Lameter 				break;
11817e2ab150SChristoph Lameter 		}
1182b76ac7e7SJianguo Wu 		if (source == NUMA_NO_NODE)
11837e2ab150SChristoph Lameter 			break;
11847e2ab150SChristoph Lameter 
11857e2ab150SChristoph Lameter 		node_clear(source, tmp);
11867e2ab150SChristoph Lameter 		err = migrate_to_node(mm, source, dest, flags);
11877e2ab150SChristoph Lameter 		if (err > 0)
11887e2ab150SChristoph Lameter 			busy += err;
11897e2ab150SChristoph Lameter 		if (err < 0)
11907e2ab150SChristoph Lameter 			break;
119139743889SChristoph Lameter 	}
119239743889SChristoph Lameter 	up_read(&mm->mmap_sem);
11937e2ab150SChristoph Lameter 	if (err < 0)
11947e2ab150SChristoph Lameter 		return err;
11957e2ab150SChristoph Lameter 	return busy;
1196b20a3503SChristoph Lameter 
119739743889SChristoph Lameter }
119839743889SChristoph Lameter 
11993ad33b24SLee Schermerhorn /*
12003ad33b24SLee Schermerhorn  * Allocate a new page for page migration based on vma policy.
1201d05f0cdcSHugh Dickins  * Start by assuming the page is mapped by the same vma as contains @start.
12023ad33b24SLee Schermerhorn  * Search forward from there, if not.  N.B., this assumes that the
12033ad33b24SLee Schermerhorn  * list of pages handed to migrate_pages()--which is how we get here--
12043ad33b24SLee Schermerhorn  * is in virtual address order.
12053ad33b24SLee Schermerhorn  */
1206666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start)
120795a402c3SChristoph Lameter {
1208d05f0cdcSHugh Dickins 	struct vm_area_struct *vma;
12093ad33b24SLee Schermerhorn 	unsigned long uninitialized_var(address);
121095a402c3SChristoph Lameter 
1211d05f0cdcSHugh Dickins 	vma = find_vma(current->mm, start);
12123ad33b24SLee Schermerhorn 	while (vma) {
12133ad33b24SLee Schermerhorn 		address = page_address_in_vma(page, vma);
12143ad33b24SLee Schermerhorn 		if (address != -EFAULT)
12153ad33b24SLee Schermerhorn 			break;
12163ad33b24SLee Schermerhorn 		vma = vma->vm_next;
12173ad33b24SLee Schermerhorn 	}
12183ad33b24SLee Schermerhorn 
121911c731e8SWanpeng Li 	if (PageHuge(page)) {
1220389c8178SMichal Hocko 		return alloc_huge_page_vma(page_hstate(compound_head(page)),
1221389c8178SMichal Hocko 				vma, address);
122294723aafSMichal Hocko 	} else if (PageTransHuge(page)) {
1223c8633798SNaoya Horiguchi 		struct page *thp;
1224c8633798SNaoya Horiguchi 
122519deb769SDavid Rientjes 		thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
122619deb769SDavid Rientjes 					 HPAGE_PMD_ORDER);
1227c8633798SNaoya Horiguchi 		if (!thp)
1228c8633798SNaoya Horiguchi 			return NULL;
1229c8633798SNaoya Horiguchi 		prep_transhuge_page(thp);
1230c8633798SNaoya Horiguchi 		return thp;
123111c731e8SWanpeng Li 	}
123211c731e8SWanpeng Li 	/*
123311c731e8SWanpeng Li 	 * if !vma, alloc_page_vma() will use task or system default policy
123411c731e8SWanpeng Li 	 */
12350f556856SMichal Hocko 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
12360f556856SMichal Hocko 			vma, address);
123795a402c3SChristoph Lameter }
1238b20a3503SChristoph Lameter #else
1239b20a3503SChristoph Lameter 
1240a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist,
1241b20a3503SChristoph Lameter 				unsigned long flags)
1242b20a3503SChristoph Lameter {
1243a53190a4SYang Shi 	return -EIO;
1244b20a3503SChristoph Lameter }
1245b20a3503SChristoph Lameter 
12460ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
12470ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
1248b20a3503SChristoph Lameter {
1249b20a3503SChristoph Lameter 	return -ENOSYS;
1250b20a3503SChristoph Lameter }
125195a402c3SChristoph Lameter 
1252666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start)
125395a402c3SChristoph Lameter {
125495a402c3SChristoph Lameter 	return NULL;
125595a402c3SChristoph Lameter }
1256b20a3503SChristoph Lameter #endif
1257b20a3503SChristoph Lameter 
1258dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len,
1259028fec41SDavid Rientjes 		     unsigned short mode, unsigned short mode_flags,
1260028fec41SDavid Rientjes 		     nodemask_t *nmask, unsigned long flags)
12616ce3c4c0SChristoph Lameter {
12626ce3c4c0SChristoph Lameter 	struct mm_struct *mm = current->mm;
12636ce3c4c0SChristoph Lameter 	struct mempolicy *new;
12646ce3c4c0SChristoph Lameter 	unsigned long end;
12656ce3c4c0SChristoph Lameter 	int err;
1266d8835445SYang Shi 	int ret;
12676ce3c4c0SChristoph Lameter 	LIST_HEAD(pagelist);
12686ce3c4c0SChristoph Lameter 
1269b24f53a0SLee Schermerhorn 	if (flags & ~(unsigned long)MPOL_MF_VALID)
12706ce3c4c0SChristoph Lameter 		return -EINVAL;
127174c00241SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
12726ce3c4c0SChristoph Lameter 		return -EPERM;
12736ce3c4c0SChristoph Lameter 
12746ce3c4c0SChristoph Lameter 	if (start & ~PAGE_MASK)
12756ce3c4c0SChristoph Lameter 		return -EINVAL;
12766ce3c4c0SChristoph Lameter 
12776ce3c4c0SChristoph Lameter 	if (mode == MPOL_DEFAULT)
12786ce3c4c0SChristoph Lameter 		flags &= ~MPOL_MF_STRICT;
12796ce3c4c0SChristoph Lameter 
12806ce3c4c0SChristoph Lameter 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
12816ce3c4c0SChristoph Lameter 	end = start + len;
12826ce3c4c0SChristoph Lameter 
12836ce3c4c0SChristoph Lameter 	if (end < start)
12846ce3c4c0SChristoph Lameter 		return -EINVAL;
12856ce3c4c0SChristoph Lameter 	if (end == start)
12866ce3c4c0SChristoph Lameter 		return 0;
12876ce3c4c0SChristoph Lameter 
1288028fec41SDavid Rientjes 	new = mpol_new(mode, mode_flags, nmask);
12896ce3c4c0SChristoph Lameter 	if (IS_ERR(new))
12906ce3c4c0SChristoph Lameter 		return PTR_ERR(new);
12916ce3c4c0SChristoph Lameter 
1292b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY)
1293b24f53a0SLee Schermerhorn 		new->flags |= MPOL_F_MOF;
1294b24f53a0SLee Schermerhorn 
12956ce3c4c0SChristoph Lameter 	/*
12966ce3c4c0SChristoph Lameter 	 * If we are using the default policy then operation
12976ce3c4c0SChristoph Lameter 	 * on discontinuous address spaces is okay after all
12986ce3c4c0SChristoph Lameter 	 */
12996ce3c4c0SChristoph Lameter 	if (!new)
13006ce3c4c0SChristoph Lameter 		flags |= MPOL_MF_DISCONTIG_OK;
13016ce3c4c0SChristoph Lameter 
1302028fec41SDavid Rientjes 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1303028fec41SDavid Rientjes 		 start, start + len, mode, mode_flags,
130400ef2d2fSDavid Rientjes 		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
13056ce3c4c0SChristoph Lameter 
13060aedadf9SChristoph Lameter 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
13070aedadf9SChristoph Lameter 
13080aedadf9SChristoph Lameter 		err = migrate_prep();
13090aedadf9SChristoph Lameter 		if (err)
1310b05ca738SKOSAKI Motohiro 			goto mpol_out;
13110aedadf9SChristoph Lameter 	}
13124bfc4495SKAMEZAWA Hiroyuki 	{
13134bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
13144bfc4495SKAMEZAWA Hiroyuki 		if (scratch) {
13156ce3c4c0SChristoph Lameter 			down_write(&mm->mmap_sem);
131658568d2aSMiao Xie 			task_lock(current);
13174bfc4495SKAMEZAWA Hiroyuki 			err = mpol_set_nodemask(new, nmask, scratch);
131858568d2aSMiao Xie 			task_unlock(current);
13194bfc4495SKAMEZAWA Hiroyuki 			if (err)
132058568d2aSMiao Xie 				up_write(&mm->mmap_sem);
13214bfc4495SKAMEZAWA Hiroyuki 		} else
13224bfc4495SKAMEZAWA Hiroyuki 			err = -ENOMEM;
13234bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
13244bfc4495SKAMEZAWA Hiroyuki 	}
1325b05ca738SKOSAKI Motohiro 	if (err)
1326b05ca738SKOSAKI Motohiro 		goto mpol_out;
1327b05ca738SKOSAKI Motohiro 
1328d8835445SYang Shi 	ret = queue_pages_range(mm, start, end, nmask,
13296ce3c4c0SChristoph Lameter 			  flags | MPOL_MF_INVERT, &pagelist);
1330d8835445SYang Shi 
1331d8835445SYang Shi 	if (ret < 0) {
1332a85dfc30SYang Shi 		err = ret;
1333d8835445SYang Shi 		goto up_out;
1334d8835445SYang Shi 	}
1335d8835445SYang Shi 
13369d8cebd4SKOSAKI Motohiro 	err = mbind_range(mm, start, end, new);
13377e2ab150SChristoph Lameter 
1338b24f53a0SLee Schermerhorn 	if (!err) {
1339b24f53a0SLee Schermerhorn 		int nr_failed = 0;
1340b24f53a0SLee Schermerhorn 
1341cf608ac1SMinchan Kim 		if (!list_empty(&pagelist)) {
1342b24f53a0SLee Schermerhorn 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1343d05f0cdcSHugh Dickins 			nr_failed = migrate_pages(&pagelist, new_page, NULL,
1344d05f0cdcSHugh Dickins 				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1345cf608ac1SMinchan Kim 			if (nr_failed)
134674060e4dSNaoya Horiguchi 				putback_movable_pages(&pagelist);
1347cf608ac1SMinchan Kim 		}
13486ce3c4c0SChristoph Lameter 
1349d8835445SYang Shi 		if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
13506ce3c4c0SChristoph Lameter 			err = -EIO;
1351a85dfc30SYang Shi 	} else {
1352d8835445SYang Shi up_out:
1353a85dfc30SYang Shi 		if (!list_empty(&pagelist))
1354a85dfc30SYang Shi 			putback_movable_pages(&pagelist);
1355a85dfc30SYang Shi 	}
1356a85dfc30SYang Shi 
13576ce3c4c0SChristoph Lameter 	up_write(&mm->mmap_sem);
1358b05ca738SKOSAKI Motohiro mpol_out:
1359f0be3d32SLee Schermerhorn 	mpol_put(new);
13606ce3c4c0SChristoph Lameter 	return err;
13616ce3c4c0SChristoph Lameter }
13626ce3c4c0SChristoph Lameter 
136339743889SChristoph Lameter /*
13648bccd85fSChristoph Lameter  * User space interface with variable sized bitmaps for nodelists.
13658bccd85fSChristoph Lameter  */
13668bccd85fSChristoph Lameter 
13678bccd85fSChristoph Lameter /* Copy a node mask from user space. */
136839743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
13698bccd85fSChristoph Lameter 		     unsigned long maxnode)
13708bccd85fSChristoph Lameter {
13718bccd85fSChristoph Lameter 	unsigned long k;
137256521e7aSYisheng Xie 	unsigned long t;
13738bccd85fSChristoph Lameter 	unsigned long nlongs;
13748bccd85fSChristoph Lameter 	unsigned long endmask;
13758bccd85fSChristoph Lameter 
13768bccd85fSChristoph Lameter 	--maxnode;
13778bccd85fSChristoph Lameter 	nodes_clear(*nodes);
13788bccd85fSChristoph Lameter 	if (maxnode == 0 || !nmask)
13798bccd85fSChristoph Lameter 		return 0;
1380a9c930baSAndi Kleen 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1381636f13c1SChris Wright 		return -EINVAL;
13828bccd85fSChristoph Lameter 
13838bccd85fSChristoph Lameter 	nlongs = BITS_TO_LONGS(maxnode);
13848bccd85fSChristoph Lameter 	if ((maxnode % BITS_PER_LONG) == 0)
13858bccd85fSChristoph Lameter 		endmask = ~0UL;
13868bccd85fSChristoph Lameter 	else
13878bccd85fSChristoph Lameter 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
13888bccd85fSChristoph Lameter 
138956521e7aSYisheng Xie 	/*
139056521e7aSYisheng Xie 	 * When the user specified more nodes than supported just check
139156521e7aSYisheng Xie 	 * if the non supported part is all zero.
139256521e7aSYisheng Xie 	 *
139356521e7aSYisheng Xie 	 * If maxnode have more longs than MAX_NUMNODES, check
139456521e7aSYisheng Xie 	 * the bits in that area first. And then go through to
139556521e7aSYisheng Xie 	 * check the rest bits which equal or bigger than MAX_NUMNODES.
139656521e7aSYisheng Xie 	 * Otherwise, just check bits [MAX_NUMNODES, maxnode).
139756521e7aSYisheng Xie 	 */
13988bccd85fSChristoph Lameter 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
13998bccd85fSChristoph Lameter 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
14008bccd85fSChristoph Lameter 			if (get_user(t, nmask + k))
14018bccd85fSChristoph Lameter 				return -EFAULT;
14028bccd85fSChristoph Lameter 			if (k == nlongs - 1) {
14038bccd85fSChristoph Lameter 				if (t & endmask)
14048bccd85fSChristoph Lameter 					return -EINVAL;
14058bccd85fSChristoph Lameter 			} else if (t)
14068bccd85fSChristoph Lameter 				return -EINVAL;
14078bccd85fSChristoph Lameter 		}
14088bccd85fSChristoph Lameter 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
14098bccd85fSChristoph Lameter 		endmask = ~0UL;
14108bccd85fSChristoph Lameter 	}
14118bccd85fSChristoph Lameter 
141256521e7aSYisheng Xie 	if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
141356521e7aSYisheng Xie 		unsigned long valid_mask = endmask;
141456521e7aSYisheng Xie 
141556521e7aSYisheng Xie 		valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
141656521e7aSYisheng Xie 		if (get_user(t, nmask + nlongs - 1))
141756521e7aSYisheng Xie 			return -EFAULT;
141856521e7aSYisheng Xie 		if (t & valid_mask)
141956521e7aSYisheng Xie 			return -EINVAL;
142056521e7aSYisheng Xie 	}
142156521e7aSYisheng Xie 
14228bccd85fSChristoph Lameter 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
14238bccd85fSChristoph Lameter 		return -EFAULT;
14248bccd85fSChristoph Lameter 	nodes_addr(*nodes)[nlongs-1] &= endmask;
14258bccd85fSChristoph Lameter 	return 0;
14268bccd85fSChristoph Lameter }
14278bccd85fSChristoph Lameter 
14288bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */
14298bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
14308bccd85fSChristoph Lameter 			      nodemask_t *nodes)
14318bccd85fSChristoph Lameter {
14328bccd85fSChristoph Lameter 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1433050c17f2SRalph Campbell 	unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
14348bccd85fSChristoph Lameter 
14358bccd85fSChristoph Lameter 	if (copy > nbytes) {
14368bccd85fSChristoph Lameter 		if (copy > PAGE_SIZE)
14378bccd85fSChristoph Lameter 			return -EINVAL;
14388bccd85fSChristoph Lameter 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
14398bccd85fSChristoph Lameter 			return -EFAULT;
14408bccd85fSChristoph Lameter 		copy = nbytes;
14418bccd85fSChristoph Lameter 	}
14428bccd85fSChristoph Lameter 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
14438bccd85fSChristoph Lameter }
14448bccd85fSChristoph Lameter 
1445e7dc9ad6SDominik Brodowski static long kernel_mbind(unsigned long start, unsigned long len,
1446e7dc9ad6SDominik Brodowski 			 unsigned long mode, const unsigned long __user *nmask,
1447e7dc9ad6SDominik Brodowski 			 unsigned long maxnode, unsigned int flags)
14488bccd85fSChristoph Lameter {
14498bccd85fSChristoph Lameter 	nodemask_t nodes;
14508bccd85fSChristoph Lameter 	int err;
1451028fec41SDavid Rientjes 	unsigned short mode_flags;
14528bccd85fSChristoph Lameter 
1453057d3389SAndrey Konovalov 	start = untagged_addr(start);
1454028fec41SDavid Rientjes 	mode_flags = mode & MPOL_MODE_FLAGS;
1455028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1456a3b51e01SDavid Rientjes 	if (mode >= MPOL_MAX)
1457a3b51e01SDavid Rientjes 		return -EINVAL;
14584c50bc01SDavid Rientjes 	if ((mode_flags & MPOL_F_STATIC_NODES) &&
14594c50bc01SDavid Rientjes 	    (mode_flags & MPOL_F_RELATIVE_NODES))
14604c50bc01SDavid Rientjes 		return -EINVAL;
14618bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
14628bccd85fSChristoph Lameter 	if (err)
14638bccd85fSChristoph Lameter 		return err;
1464028fec41SDavid Rientjes 	return do_mbind(start, len, mode, mode_flags, &nodes, flags);
14658bccd85fSChristoph Lameter }
14668bccd85fSChristoph Lameter 
1467e7dc9ad6SDominik Brodowski SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1468e7dc9ad6SDominik Brodowski 		unsigned long, mode, const unsigned long __user *, nmask,
1469e7dc9ad6SDominik Brodowski 		unsigned long, maxnode, unsigned int, flags)
1470e7dc9ad6SDominik Brodowski {
1471e7dc9ad6SDominik Brodowski 	return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1472e7dc9ad6SDominik Brodowski }
1473e7dc9ad6SDominik Brodowski 
14748bccd85fSChristoph Lameter /* Set the process memory policy */
1475af03c4acSDominik Brodowski static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1476af03c4acSDominik Brodowski 				 unsigned long maxnode)
14778bccd85fSChristoph Lameter {
14788bccd85fSChristoph Lameter 	int err;
14798bccd85fSChristoph Lameter 	nodemask_t nodes;
1480028fec41SDavid Rientjes 	unsigned short flags;
14818bccd85fSChristoph Lameter 
1482028fec41SDavid Rientjes 	flags = mode & MPOL_MODE_FLAGS;
1483028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1484028fec41SDavid Rientjes 	if ((unsigned int)mode >= MPOL_MAX)
14858bccd85fSChristoph Lameter 		return -EINVAL;
14864c50bc01SDavid Rientjes 	if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
14874c50bc01SDavid Rientjes 		return -EINVAL;
14888bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
14898bccd85fSChristoph Lameter 	if (err)
14908bccd85fSChristoph Lameter 		return err;
1491028fec41SDavid Rientjes 	return do_set_mempolicy(mode, flags, &nodes);
14928bccd85fSChristoph Lameter }
14938bccd85fSChristoph Lameter 
1494af03c4acSDominik Brodowski SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1495af03c4acSDominik Brodowski 		unsigned long, maxnode)
1496af03c4acSDominik Brodowski {
1497af03c4acSDominik Brodowski 	return kernel_set_mempolicy(mode, nmask, maxnode);
1498af03c4acSDominik Brodowski }
1499af03c4acSDominik Brodowski 
1500b6e9b0baSDominik Brodowski static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1501b6e9b0baSDominik Brodowski 				const unsigned long __user *old_nodes,
1502b6e9b0baSDominik Brodowski 				const unsigned long __user *new_nodes)
150339743889SChristoph Lameter {
1504596d7cfaSKOSAKI Motohiro 	struct mm_struct *mm = NULL;
150539743889SChristoph Lameter 	struct task_struct *task;
150639743889SChristoph Lameter 	nodemask_t task_nodes;
150739743889SChristoph Lameter 	int err;
1508596d7cfaSKOSAKI Motohiro 	nodemask_t *old;
1509596d7cfaSKOSAKI Motohiro 	nodemask_t *new;
1510596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH(scratch);
151139743889SChristoph Lameter 
1512596d7cfaSKOSAKI Motohiro 	if (!scratch)
1513596d7cfaSKOSAKI Motohiro 		return -ENOMEM;
151439743889SChristoph Lameter 
1515596d7cfaSKOSAKI Motohiro 	old = &scratch->mask1;
1516596d7cfaSKOSAKI Motohiro 	new = &scratch->mask2;
1517596d7cfaSKOSAKI Motohiro 
1518596d7cfaSKOSAKI Motohiro 	err = get_nodes(old, old_nodes, maxnode);
151939743889SChristoph Lameter 	if (err)
1520596d7cfaSKOSAKI Motohiro 		goto out;
1521596d7cfaSKOSAKI Motohiro 
1522596d7cfaSKOSAKI Motohiro 	err = get_nodes(new, new_nodes, maxnode);
1523596d7cfaSKOSAKI Motohiro 	if (err)
1524596d7cfaSKOSAKI Motohiro 		goto out;
152539743889SChristoph Lameter 
152639743889SChristoph Lameter 	/* Find the mm_struct */
152755cfaa3cSZeng Zhaoming 	rcu_read_lock();
1528228ebcbeSPavel Emelyanov 	task = pid ? find_task_by_vpid(pid) : current;
152939743889SChristoph Lameter 	if (!task) {
153055cfaa3cSZeng Zhaoming 		rcu_read_unlock();
1531596d7cfaSKOSAKI Motohiro 		err = -ESRCH;
1532596d7cfaSKOSAKI Motohiro 		goto out;
153339743889SChristoph Lameter 	}
15343268c63eSChristoph Lameter 	get_task_struct(task);
153539743889SChristoph Lameter 
1536596d7cfaSKOSAKI Motohiro 	err = -EINVAL;
153739743889SChristoph Lameter 
153839743889SChristoph Lameter 	/*
153931367466SOtto Ebeling 	 * Check if this process has the right to modify the specified process.
154031367466SOtto Ebeling 	 * Use the regular "ptrace_may_access()" checks.
154139743889SChristoph Lameter 	 */
154231367466SOtto Ebeling 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1543c69e8d9cSDavid Howells 		rcu_read_unlock();
154439743889SChristoph Lameter 		err = -EPERM;
15453268c63eSChristoph Lameter 		goto out_put;
154639743889SChristoph Lameter 	}
1547c69e8d9cSDavid Howells 	rcu_read_unlock();
154839743889SChristoph Lameter 
154939743889SChristoph Lameter 	task_nodes = cpuset_mems_allowed(task);
155039743889SChristoph Lameter 	/* Is the user allowed to access the target nodes? */
1551596d7cfaSKOSAKI Motohiro 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
155239743889SChristoph Lameter 		err = -EPERM;
15533268c63eSChristoph Lameter 		goto out_put;
155439743889SChristoph Lameter 	}
155539743889SChristoph Lameter 
15560486a38bSYisheng Xie 	task_nodes = cpuset_mems_allowed(current);
15570486a38bSYisheng Xie 	nodes_and(*new, *new, task_nodes);
15580486a38bSYisheng Xie 	if (nodes_empty(*new))
15593268c63eSChristoph Lameter 		goto out_put;
15600486a38bSYisheng Xie 
156186c3a764SDavid Quigley 	err = security_task_movememory(task);
156286c3a764SDavid Quigley 	if (err)
15633268c63eSChristoph Lameter 		goto out_put;
156486c3a764SDavid Quigley 
15653268c63eSChristoph Lameter 	mm = get_task_mm(task);
15663268c63eSChristoph Lameter 	put_task_struct(task);
1567f2a9ef88SSasha Levin 
1568f2a9ef88SSasha Levin 	if (!mm) {
1569f2a9ef88SSasha Levin 		err = -EINVAL;
1570f2a9ef88SSasha Levin 		goto out;
1571f2a9ef88SSasha Levin 	}
1572f2a9ef88SSasha Levin 
1573596d7cfaSKOSAKI Motohiro 	err = do_migrate_pages(mm, old, new,
157474c00241SChristoph Lameter 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
15753268c63eSChristoph Lameter 
157639743889SChristoph Lameter 	mmput(mm);
15773268c63eSChristoph Lameter out:
1578596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH_FREE(scratch);
1579596d7cfaSKOSAKI Motohiro 
158039743889SChristoph Lameter 	return err;
15813268c63eSChristoph Lameter 
15823268c63eSChristoph Lameter out_put:
15833268c63eSChristoph Lameter 	put_task_struct(task);
15843268c63eSChristoph Lameter 	goto out;
15853268c63eSChristoph Lameter 
158639743889SChristoph Lameter }
158739743889SChristoph Lameter 
1588b6e9b0baSDominik Brodowski SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1589b6e9b0baSDominik Brodowski 		const unsigned long __user *, old_nodes,
1590b6e9b0baSDominik Brodowski 		const unsigned long __user *, new_nodes)
1591b6e9b0baSDominik Brodowski {
1592b6e9b0baSDominik Brodowski 	return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1593b6e9b0baSDominik Brodowski }
1594b6e9b0baSDominik Brodowski 
159539743889SChristoph Lameter 
15968bccd85fSChristoph Lameter /* Retrieve NUMA policy */
1597af03c4acSDominik Brodowski static int kernel_get_mempolicy(int __user *policy,
1598af03c4acSDominik Brodowski 				unsigned long __user *nmask,
1599af03c4acSDominik Brodowski 				unsigned long maxnode,
1600af03c4acSDominik Brodowski 				unsigned long addr,
1601af03c4acSDominik Brodowski 				unsigned long flags)
16028bccd85fSChristoph Lameter {
1603dbcb0f19SAdrian Bunk 	int err;
1604dbcb0f19SAdrian Bunk 	int uninitialized_var(pval);
16058bccd85fSChristoph Lameter 	nodemask_t nodes;
16068bccd85fSChristoph Lameter 
1607057d3389SAndrey Konovalov 	addr = untagged_addr(addr);
1608057d3389SAndrey Konovalov 
1609050c17f2SRalph Campbell 	if (nmask != NULL && maxnode < nr_node_ids)
16108bccd85fSChristoph Lameter 		return -EINVAL;
16118bccd85fSChristoph Lameter 
16128bccd85fSChristoph Lameter 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
16138bccd85fSChristoph Lameter 
16148bccd85fSChristoph Lameter 	if (err)
16158bccd85fSChristoph Lameter 		return err;
16168bccd85fSChristoph Lameter 
16178bccd85fSChristoph Lameter 	if (policy && put_user(pval, policy))
16188bccd85fSChristoph Lameter 		return -EFAULT;
16198bccd85fSChristoph Lameter 
16208bccd85fSChristoph Lameter 	if (nmask)
16218bccd85fSChristoph Lameter 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
16228bccd85fSChristoph Lameter 
16238bccd85fSChristoph Lameter 	return err;
16248bccd85fSChristoph Lameter }
16258bccd85fSChristoph Lameter 
1626af03c4acSDominik Brodowski SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1627af03c4acSDominik Brodowski 		unsigned long __user *, nmask, unsigned long, maxnode,
1628af03c4acSDominik Brodowski 		unsigned long, addr, unsigned long, flags)
1629af03c4acSDominik Brodowski {
1630af03c4acSDominik Brodowski 	return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1631af03c4acSDominik Brodowski }
1632af03c4acSDominik Brodowski 
16331da177e4SLinus Torvalds #ifdef CONFIG_COMPAT
16341da177e4SLinus Torvalds 
1635c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1636c93e0f6cSHeiko Carstens 		       compat_ulong_t __user *, nmask,
1637c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode,
1638c93e0f6cSHeiko Carstens 		       compat_ulong_t, addr, compat_ulong_t, flags)
16391da177e4SLinus Torvalds {
16401da177e4SLinus Torvalds 	long err;
16411da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
16421da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
16431da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
16441da177e4SLinus Torvalds 
1645050c17f2SRalph Campbell 	nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
16461da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
16471da177e4SLinus Torvalds 
16481da177e4SLinus Torvalds 	if (nmask)
16491da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
16501da177e4SLinus Torvalds 
1651af03c4acSDominik Brodowski 	err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
16521da177e4SLinus Torvalds 
16531da177e4SLinus Torvalds 	if (!err && nmask) {
16542bbff6c7SKAMEZAWA Hiroyuki 		unsigned long copy_size;
16552bbff6c7SKAMEZAWA Hiroyuki 		copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
16562bbff6c7SKAMEZAWA Hiroyuki 		err = copy_from_user(bm, nm, copy_size);
16571da177e4SLinus Torvalds 		/* ensure entire bitmap is zeroed */
16581da177e4SLinus Torvalds 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
16591da177e4SLinus Torvalds 		err |= compat_put_bitmap(nmask, bm, nr_bits);
16601da177e4SLinus Torvalds 	}
16611da177e4SLinus Torvalds 
16621da177e4SLinus Torvalds 	return err;
16631da177e4SLinus Torvalds }
16641da177e4SLinus Torvalds 
1665c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1666c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode)
16671da177e4SLinus Torvalds {
16681da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
16691da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
16701da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
16711da177e4SLinus Torvalds 
16721da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
16731da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
16741da177e4SLinus Torvalds 
16751da177e4SLinus Torvalds 	if (nmask) {
1676cf01fb99SChris Salls 		if (compat_get_bitmap(bm, nmask, nr_bits))
16771da177e4SLinus Torvalds 			return -EFAULT;
1678cf01fb99SChris Salls 		nm = compat_alloc_user_space(alloc_size);
1679cf01fb99SChris Salls 		if (copy_to_user(nm, bm, alloc_size))
1680cf01fb99SChris Salls 			return -EFAULT;
1681cf01fb99SChris Salls 	}
16821da177e4SLinus Torvalds 
1683af03c4acSDominik Brodowski 	return kernel_set_mempolicy(mode, nm, nr_bits+1);
16841da177e4SLinus Torvalds }
16851da177e4SLinus Torvalds 
1686c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1687c93e0f6cSHeiko Carstens 		       compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1688c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode, compat_ulong_t, flags)
16891da177e4SLinus Torvalds {
16901da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
16911da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
1692dfcd3c0dSAndi Kleen 	nodemask_t bm;
16931da177e4SLinus Torvalds 
16941da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
16951da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
16961da177e4SLinus Torvalds 
16971da177e4SLinus Torvalds 	if (nmask) {
1698cf01fb99SChris Salls 		if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
16991da177e4SLinus Torvalds 			return -EFAULT;
1700cf01fb99SChris Salls 		nm = compat_alloc_user_space(alloc_size);
1701cf01fb99SChris Salls 		if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1702cf01fb99SChris Salls 			return -EFAULT;
1703cf01fb99SChris Salls 	}
17041da177e4SLinus Torvalds 
1705e7dc9ad6SDominik Brodowski 	return kernel_mbind(start, len, mode, nm, nr_bits+1, flags);
17061da177e4SLinus Torvalds }
17071da177e4SLinus Torvalds 
1708b6e9b0baSDominik Brodowski COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid,
1709b6e9b0baSDominik Brodowski 		       compat_ulong_t, maxnode,
1710b6e9b0baSDominik Brodowski 		       const compat_ulong_t __user *, old_nodes,
1711b6e9b0baSDominik Brodowski 		       const compat_ulong_t __user *, new_nodes)
1712b6e9b0baSDominik Brodowski {
1713b6e9b0baSDominik Brodowski 	unsigned long __user *old = NULL;
1714b6e9b0baSDominik Brodowski 	unsigned long __user *new = NULL;
1715b6e9b0baSDominik Brodowski 	nodemask_t tmp_mask;
1716b6e9b0baSDominik Brodowski 	unsigned long nr_bits;
1717b6e9b0baSDominik Brodowski 	unsigned long size;
1718b6e9b0baSDominik Brodowski 
1719b6e9b0baSDominik Brodowski 	nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES);
1720b6e9b0baSDominik Brodowski 	size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1721b6e9b0baSDominik Brodowski 	if (old_nodes) {
1722b6e9b0baSDominik Brodowski 		if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits))
1723b6e9b0baSDominik Brodowski 			return -EFAULT;
1724b6e9b0baSDominik Brodowski 		old = compat_alloc_user_space(new_nodes ? size * 2 : size);
1725b6e9b0baSDominik Brodowski 		if (new_nodes)
1726b6e9b0baSDominik Brodowski 			new = old + size / sizeof(unsigned long);
1727b6e9b0baSDominik Brodowski 		if (copy_to_user(old, nodes_addr(tmp_mask), size))
1728b6e9b0baSDominik Brodowski 			return -EFAULT;
1729b6e9b0baSDominik Brodowski 	}
1730b6e9b0baSDominik Brodowski 	if (new_nodes) {
1731b6e9b0baSDominik Brodowski 		if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits))
1732b6e9b0baSDominik Brodowski 			return -EFAULT;
1733b6e9b0baSDominik Brodowski 		if (new == NULL)
1734b6e9b0baSDominik Brodowski 			new = compat_alloc_user_space(size);
1735b6e9b0baSDominik Brodowski 		if (copy_to_user(new, nodes_addr(tmp_mask), size))
1736b6e9b0baSDominik Brodowski 			return -EFAULT;
1737b6e9b0baSDominik Brodowski 	}
1738b6e9b0baSDominik Brodowski 	return kernel_migrate_pages(pid, nr_bits + 1, old, new);
1739b6e9b0baSDominik Brodowski }
1740b6e9b0baSDominik Brodowski 
1741b6e9b0baSDominik Brodowski #endif /* CONFIG_COMPAT */
17421da177e4SLinus Torvalds 
174374d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
174474d2c3a0SOleg Nesterov 						unsigned long addr)
17451da177e4SLinus Torvalds {
17468d90274bSOleg Nesterov 	struct mempolicy *pol = NULL;
17471da177e4SLinus Torvalds 
17481da177e4SLinus Torvalds 	if (vma) {
1749480eccf9SLee Schermerhorn 		if (vma->vm_ops && vma->vm_ops->get_policy) {
17508d90274bSOleg Nesterov 			pol = vma->vm_ops->get_policy(vma, addr);
175100442ad0SMel Gorman 		} else if (vma->vm_policy) {
17521da177e4SLinus Torvalds 			pol = vma->vm_policy;
175300442ad0SMel Gorman 
175400442ad0SMel Gorman 			/*
175500442ad0SMel Gorman 			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
175600442ad0SMel Gorman 			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
175700442ad0SMel Gorman 			 * count on these policies which will be dropped by
175800442ad0SMel Gorman 			 * mpol_cond_put() later
175900442ad0SMel Gorman 			 */
176000442ad0SMel Gorman 			if (mpol_needs_cond_ref(pol))
176100442ad0SMel Gorman 				mpol_get(pol);
176200442ad0SMel Gorman 		}
17631da177e4SLinus Torvalds 	}
1764f15ca78eSOleg Nesterov 
176574d2c3a0SOleg Nesterov 	return pol;
176674d2c3a0SOleg Nesterov }
176774d2c3a0SOleg Nesterov 
176874d2c3a0SOleg Nesterov /*
1769dd6eecb9SOleg Nesterov  * get_vma_policy(@vma, @addr)
177074d2c3a0SOleg Nesterov  * @vma: virtual memory area whose policy is sought
177174d2c3a0SOleg Nesterov  * @addr: address in @vma for shared policy lookup
177274d2c3a0SOleg Nesterov  *
177374d2c3a0SOleg Nesterov  * Returns effective policy for a VMA at specified address.
1774dd6eecb9SOleg Nesterov  * Falls back to current->mempolicy or system default policy, as necessary.
177574d2c3a0SOleg Nesterov  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
177674d2c3a0SOleg Nesterov  * count--added by the get_policy() vm_op, as appropriate--to protect against
177774d2c3a0SOleg Nesterov  * freeing by another task.  It is the caller's responsibility to free the
177874d2c3a0SOleg Nesterov  * extra reference for shared policies.
177974d2c3a0SOleg Nesterov  */
1780ac79f78dSDavid Rientjes static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1781dd6eecb9SOleg Nesterov 						unsigned long addr)
178274d2c3a0SOleg Nesterov {
178374d2c3a0SOleg Nesterov 	struct mempolicy *pol = __get_vma_policy(vma, addr);
178474d2c3a0SOleg Nesterov 
17858d90274bSOleg Nesterov 	if (!pol)
1786dd6eecb9SOleg Nesterov 		pol = get_task_policy(current);
17878d90274bSOleg Nesterov 
17881da177e4SLinus Torvalds 	return pol;
17891da177e4SLinus Torvalds }
17901da177e4SLinus Torvalds 
17916b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma)
1792fc314724SMel Gorman {
17936b6482bbSOleg Nesterov 	struct mempolicy *pol;
1794f15ca78eSOleg Nesterov 
1795fc314724SMel Gorman 	if (vma->vm_ops && vma->vm_ops->get_policy) {
1796fc314724SMel Gorman 		bool ret = false;
1797fc314724SMel Gorman 
1798fc314724SMel Gorman 		pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1799fc314724SMel Gorman 		if (pol && (pol->flags & MPOL_F_MOF))
1800fc314724SMel Gorman 			ret = true;
1801fc314724SMel Gorman 		mpol_cond_put(pol);
1802fc314724SMel Gorman 
1803fc314724SMel Gorman 		return ret;
18048d90274bSOleg Nesterov 	}
18058d90274bSOleg Nesterov 
1806fc314724SMel Gorman 	pol = vma->vm_policy;
18078d90274bSOleg Nesterov 	if (!pol)
18086b6482bbSOleg Nesterov 		pol = get_task_policy(current);
1809fc314724SMel Gorman 
1810fc314724SMel Gorman 	return pol->flags & MPOL_F_MOF;
1811fc314724SMel Gorman }
1812fc314724SMel Gorman 
1813d3eb1570SLai Jiangshan static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1814d3eb1570SLai Jiangshan {
1815d3eb1570SLai Jiangshan 	enum zone_type dynamic_policy_zone = policy_zone;
1816d3eb1570SLai Jiangshan 
1817d3eb1570SLai Jiangshan 	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1818d3eb1570SLai Jiangshan 
1819d3eb1570SLai Jiangshan 	/*
1820d3eb1570SLai Jiangshan 	 * if policy->v.nodes has movable memory only,
1821d3eb1570SLai Jiangshan 	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1822d3eb1570SLai Jiangshan 	 *
1823d3eb1570SLai Jiangshan 	 * policy->v.nodes is intersect with node_states[N_MEMORY].
1824d3eb1570SLai Jiangshan 	 * so if the following test faile, it implies
1825d3eb1570SLai Jiangshan 	 * policy->v.nodes has movable memory only.
1826d3eb1570SLai Jiangshan 	 */
1827d3eb1570SLai Jiangshan 	if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1828d3eb1570SLai Jiangshan 		dynamic_policy_zone = ZONE_MOVABLE;
1829d3eb1570SLai Jiangshan 
1830d3eb1570SLai Jiangshan 	return zone >= dynamic_policy_zone;
1831d3eb1570SLai Jiangshan }
1832d3eb1570SLai Jiangshan 
183352cd3b07SLee Schermerhorn /*
183452cd3b07SLee Schermerhorn  * Return a nodemask representing a mempolicy for filtering nodes for
183552cd3b07SLee Schermerhorn  * page allocation
183652cd3b07SLee Schermerhorn  */
183752cd3b07SLee Schermerhorn static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
183819770b32SMel Gorman {
183919770b32SMel Gorman 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
184045c4745aSLee Schermerhorn 	if (unlikely(policy->mode == MPOL_BIND) &&
1841d3eb1570SLai Jiangshan 			apply_policy_zone(policy, gfp_zone(gfp)) &&
184219770b32SMel Gorman 			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
184319770b32SMel Gorman 		return &policy->v.nodes;
184419770b32SMel Gorman 
184519770b32SMel Gorman 	return NULL;
184619770b32SMel Gorman }
184719770b32SMel Gorman 
184804ec6264SVlastimil Babka /* Return the node id preferred by the given mempolicy, or the given id */
184904ec6264SVlastimil Babka static int policy_node(gfp_t gfp, struct mempolicy *policy,
18502f5f9486SAndi Kleen 								int nd)
18511da177e4SLinus Torvalds {
18526d840958SMichal Hocko 	if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
18531da177e4SLinus Torvalds 		nd = policy->v.preferred_node;
18546d840958SMichal Hocko 	else {
185519770b32SMel Gorman 		/*
18566d840958SMichal Hocko 		 * __GFP_THISNODE shouldn't even be used with the bind policy
18576d840958SMichal Hocko 		 * because we might easily break the expectation to stay on the
18586d840958SMichal Hocko 		 * requested node and not break the policy.
185919770b32SMel Gorman 		 */
18606d840958SMichal Hocko 		WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
18611da177e4SLinus Torvalds 	}
18626d840958SMichal Hocko 
186304ec6264SVlastimil Babka 	return nd;
18641da177e4SLinus Torvalds }
18651da177e4SLinus Torvalds 
18661da177e4SLinus Torvalds /* Do dynamic interleaving for a process */
18671da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy)
18681da177e4SLinus Torvalds {
186945816682SVlastimil Babka 	unsigned next;
18701da177e4SLinus Torvalds 	struct task_struct *me = current;
18711da177e4SLinus Torvalds 
187245816682SVlastimil Babka 	next = next_node_in(me->il_prev, policy->v.nodes);
1873f5b087b5SDavid Rientjes 	if (next < MAX_NUMNODES)
187445816682SVlastimil Babka 		me->il_prev = next;
187545816682SVlastimil Babka 	return next;
18761da177e4SLinus Torvalds }
18771da177e4SLinus Torvalds 
1878dc85da15SChristoph Lameter /*
1879dc85da15SChristoph Lameter  * Depending on the memory policy provide a node from which to allocate the
1880dc85da15SChristoph Lameter  * next slab entry.
1881dc85da15SChristoph Lameter  */
18822a389610SDavid Rientjes unsigned int mempolicy_slab_node(void)
1883dc85da15SChristoph Lameter {
1884e7b691b0SAndi Kleen 	struct mempolicy *policy;
18852a389610SDavid Rientjes 	int node = numa_mem_id();
1886e7b691b0SAndi Kleen 
1887e7b691b0SAndi Kleen 	if (in_interrupt())
18882a389610SDavid Rientjes 		return node;
1889e7b691b0SAndi Kleen 
1890e7b691b0SAndi Kleen 	policy = current->mempolicy;
1891fc36b8d3SLee Schermerhorn 	if (!policy || policy->flags & MPOL_F_LOCAL)
18922a389610SDavid Rientjes 		return node;
1893765c4507SChristoph Lameter 
1894bea904d5SLee Schermerhorn 	switch (policy->mode) {
1895bea904d5SLee Schermerhorn 	case MPOL_PREFERRED:
1896fc36b8d3SLee Schermerhorn 		/*
1897fc36b8d3SLee Schermerhorn 		 * handled MPOL_F_LOCAL above
1898fc36b8d3SLee Schermerhorn 		 */
1899bea904d5SLee Schermerhorn 		return policy->v.preferred_node;
1900bea904d5SLee Schermerhorn 
1901dc85da15SChristoph Lameter 	case MPOL_INTERLEAVE:
1902dc85da15SChristoph Lameter 		return interleave_nodes(policy);
1903dc85da15SChristoph Lameter 
1904dd1a239fSMel Gorman 	case MPOL_BIND: {
1905c33d6c06SMel Gorman 		struct zoneref *z;
1906c33d6c06SMel Gorman 
1907dc85da15SChristoph Lameter 		/*
1908dc85da15SChristoph Lameter 		 * Follow bind policy behavior and start allocation at the
1909dc85da15SChristoph Lameter 		 * first node.
1910dc85da15SChristoph Lameter 		 */
191119770b32SMel Gorman 		struct zonelist *zonelist;
191219770b32SMel Gorman 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1913c9634cf0SAneesh Kumar K.V 		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1914c33d6c06SMel Gorman 		z = first_zones_zonelist(zonelist, highest_zoneidx,
1915c33d6c06SMel Gorman 							&policy->v.nodes);
1916c1093b74SPavel Tatashin 		return z->zone ? zone_to_nid(z->zone) : node;
1917dd1a239fSMel Gorman 	}
1918dc85da15SChristoph Lameter 
1919dc85da15SChristoph Lameter 	default:
1920bea904d5SLee Schermerhorn 		BUG();
1921dc85da15SChristoph Lameter 	}
1922dc85da15SChristoph Lameter }
1923dc85da15SChristoph Lameter 
1924fee83b3aSAndrew Morton /*
1925fee83b3aSAndrew Morton  * Do static interleaving for a VMA with known offset @n.  Returns the n'th
1926fee83b3aSAndrew Morton  * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
1927fee83b3aSAndrew Morton  * number of present nodes.
1928fee83b3aSAndrew Morton  */
192998c70baaSLaurent Dufour static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
19301da177e4SLinus Torvalds {
1931dfcd3c0dSAndi Kleen 	unsigned nnodes = nodes_weight(pol->v.nodes);
1932f5b087b5SDavid Rientjes 	unsigned target;
1933fee83b3aSAndrew Morton 	int i;
1934fee83b3aSAndrew Morton 	int nid;
19351da177e4SLinus Torvalds 
1936f5b087b5SDavid Rientjes 	if (!nnodes)
1937f5b087b5SDavid Rientjes 		return numa_node_id();
1938fee83b3aSAndrew Morton 	target = (unsigned int)n % nnodes;
1939fee83b3aSAndrew Morton 	nid = first_node(pol->v.nodes);
1940fee83b3aSAndrew Morton 	for (i = 0; i < target; i++)
1941dfcd3c0dSAndi Kleen 		nid = next_node(nid, pol->v.nodes);
19421da177e4SLinus Torvalds 	return nid;
19431da177e4SLinus Torvalds }
19441da177e4SLinus Torvalds 
19455da7ca86SChristoph Lameter /* Determine a node number for interleave */
19465da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol,
19475da7ca86SChristoph Lameter 		 struct vm_area_struct *vma, unsigned long addr, int shift)
19485da7ca86SChristoph Lameter {
19495da7ca86SChristoph Lameter 	if (vma) {
19505da7ca86SChristoph Lameter 		unsigned long off;
19515da7ca86SChristoph Lameter 
19523b98b087SNishanth Aravamudan 		/*
19533b98b087SNishanth Aravamudan 		 * for small pages, there is no difference between
19543b98b087SNishanth Aravamudan 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
19553b98b087SNishanth Aravamudan 		 * for huge pages, since vm_pgoff is in units of small
19563b98b087SNishanth Aravamudan 		 * pages, we need to shift off the always 0 bits to get
19573b98b087SNishanth Aravamudan 		 * a useful offset.
19583b98b087SNishanth Aravamudan 		 */
19593b98b087SNishanth Aravamudan 		BUG_ON(shift < PAGE_SHIFT);
19603b98b087SNishanth Aravamudan 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
19615da7ca86SChristoph Lameter 		off += (addr - vma->vm_start) >> shift;
196298c70baaSLaurent Dufour 		return offset_il_node(pol, off);
19635da7ca86SChristoph Lameter 	} else
19645da7ca86SChristoph Lameter 		return interleave_nodes(pol);
19655da7ca86SChristoph Lameter }
19665da7ca86SChristoph Lameter 
196700ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS
1968480eccf9SLee Schermerhorn /*
196904ec6264SVlastimil Babka  * huge_node(@vma, @addr, @gfp_flags, @mpol)
1970b46e14acSFabian Frederick  * @vma: virtual memory area whose policy is sought
1971b46e14acSFabian Frederick  * @addr: address in @vma for shared policy lookup and interleave policy
1972b46e14acSFabian Frederick  * @gfp_flags: for requested zone
1973b46e14acSFabian Frederick  * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1974b46e14acSFabian Frederick  * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
1975480eccf9SLee Schermerhorn  *
197604ec6264SVlastimil Babka  * Returns a nid suitable for a huge page allocation and a pointer
197752cd3b07SLee Schermerhorn  * to the struct mempolicy for conditional unref after allocation.
197852cd3b07SLee Schermerhorn  * If the effective policy is 'BIND, returns a pointer to the mempolicy's
197952cd3b07SLee Schermerhorn  * @nodemask for filtering the zonelist.
1980c0ff7453SMiao Xie  *
1981d26914d1SMel Gorman  * Must be protected by read_mems_allowed_begin()
1982480eccf9SLee Schermerhorn  */
198304ec6264SVlastimil Babka int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
198404ec6264SVlastimil Babka 				struct mempolicy **mpol, nodemask_t **nodemask)
19855da7ca86SChristoph Lameter {
198604ec6264SVlastimil Babka 	int nid;
19875da7ca86SChristoph Lameter 
1988dd6eecb9SOleg Nesterov 	*mpol = get_vma_policy(vma, addr);
198919770b32SMel Gorman 	*nodemask = NULL;	/* assume !MPOL_BIND */
19905da7ca86SChristoph Lameter 
199152cd3b07SLee Schermerhorn 	if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
199204ec6264SVlastimil Babka 		nid = interleave_nid(*mpol, vma, addr,
199304ec6264SVlastimil Babka 					huge_page_shift(hstate_vma(vma)));
199452cd3b07SLee Schermerhorn 	} else {
199504ec6264SVlastimil Babka 		nid = policy_node(gfp_flags, *mpol, numa_node_id());
199652cd3b07SLee Schermerhorn 		if ((*mpol)->mode == MPOL_BIND)
199752cd3b07SLee Schermerhorn 			*nodemask = &(*mpol)->v.nodes;
1998480eccf9SLee Schermerhorn 	}
199904ec6264SVlastimil Babka 	return nid;
20005da7ca86SChristoph Lameter }
200106808b08SLee Schermerhorn 
200206808b08SLee Schermerhorn /*
200306808b08SLee Schermerhorn  * init_nodemask_of_mempolicy
200406808b08SLee Schermerhorn  *
200506808b08SLee Schermerhorn  * If the current task's mempolicy is "default" [NULL], return 'false'
200606808b08SLee Schermerhorn  * to indicate default policy.  Otherwise, extract the policy nodemask
200706808b08SLee Schermerhorn  * for 'bind' or 'interleave' policy into the argument nodemask, or
200806808b08SLee Schermerhorn  * initialize the argument nodemask to contain the single node for
200906808b08SLee Schermerhorn  * 'preferred' or 'local' policy and return 'true' to indicate presence
201006808b08SLee Schermerhorn  * of non-default mempolicy.
201106808b08SLee Schermerhorn  *
201206808b08SLee Schermerhorn  * We don't bother with reference counting the mempolicy [mpol_get/put]
201306808b08SLee Schermerhorn  * because the current task is examining it's own mempolicy and a task's
201406808b08SLee Schermerhorn  * mempolicy is only ever changed by the task itself.
201506808b08SLee Schermerhorn  *
201606808b08SLee Schermerhorn  * N.B., it is the caller's responsibility to free a returned nodemask.
201706808b08SLee Schermerhorn  */
201806808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask)
201906808b08SLee Schermerhorn {
202006808b08SLee Schermerhorn 	struct mempolicy *mempolicy;
202106808b08SLee Schermerhorn 	int nid;
202206808b08SLee Schermerhorn 
202306808b08SLee Schermerhorn 	if (!(mask && current->mempolicy))
202406808b08SLee Schermerhorn 		return false;
202506808b08SLee Schermerhorn 
2026c0ff7453SMiao Xie 	task_lock(current);
202706808b08SLee Schermerhorn 	mempolicy = current->mempolicy;
202806808b08SLee Schermerhorn 	switch (mempolicy->mode) {
202906808b08SLee Schermerhorn 	case MPOL_PREFERRED:
203006808b08SLee Schermerhorn 		if (mempolicy->flags & MPOL_F_LOCAL)
203106808b08SLee Schermerhorn 			nid = numa_node_id();
203206808b08SLee Schermerhorn 		else
203306808b08SLee Schermerhorn 			nid = mempolicy->v.preferred_node;
203406808b08SLee Schermerhorn 		init_nodemask_of_node(mask, nid);
203506808b08SLee Schermerhorn 		break;
203606808b08SLee Schermerhorn 
203706808b08SLee Schermerhorn 	case MPOL_BIND:
203806808b08SLee Schermerhorn 		/* Fall through */
203906808b08SLee Schermerhorn 	case MPOL_INTERLEAVE:
204006808b08SLee Schermerhorn 		*mask =  mempolicy->v.nodes;
204106808b08SLee Schermerhorn 		break;
204206808b08SLee Schermerhorn 
204306808b08SLee Schermerhorn 	default:
204406808b08SLee Schermerhorn 		BUG();
204506808b08SLee Schermerhorn 	}
2046c0ff7453SMiao Xie 	task_unlock(current);
204706808b08SLee Schermerhorn 
204806808b08SLee Schermerhorn 	return true;
204906808b08SLee Schermerhorn }
205000ac59adSChen, Kenneth W #endif
20515da7ca86SChristoph Lameter 
20526f48d0ebSDavid Rientjes /*
20536f48d0ebSDavid Rientjes  * mempolicy_nodemask_intersects
20546f48d0ebSDavid Rientjes  *
20556f48d0ebSDavid Rientjes  * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
20566f48d0ebSDavid Rientjes  * policy.  Otherwise, check for intersection between mask and the policy
20576f48d0ebSDavid Rientjes  * nodemask for 'bind' or 'interleave' policy.  For 'perferred' or 'local'
20586f48d0ebSDavid Rientjes  * policy, always return true since it may allocate elsewhere on fallback.
20596f48d0ebSDavid Rientjes  *
20606f48d0ebSDavid Rientjes  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
20616f48d0ebSDavid Rientjes  */
20626f48d0ebSDavid Rientjes bool mempolicy_nodemask_intersects(struct task_struct *tsk,
20636f48d0ebSDavid Rientjes 					const nodemask_t *mask)
20646f48d0ebSDavid Rientjes {
20656f48d0ebSDavid Rientjes 	struct mempolicy *mempolicy;
20666f48d0ebSDavid Rientjes 	bool ret = true;
20676f48d0ebSDavid Rientjes 
20686f48d0ebSDavid Rientjes 	if (!mask)
20696f48d0ebSDavid Rientjes 		return ret;
20706f48d0ebSDavid Rientjes 	task_lock(tsk);
20716f48d0ebSDavid Rientjes 	mempolicy = tsk->mempolicy;
20726f48d0ebSDavid Rientjes 	if (!mempolicy)
20736f48d0ebSDavid Rientjes 		goto out;
20746f48d0ebSDavid Rientjes 
20756f48d0ebSDavid Rientjes 	switch (mempolicy->mode) {
20766f48d0ebSDavid Rientjes 	case MPOL_PREFERRED:
20776f48d0ebSDavid Rientjes 		/*
20786f48d0ebSDavid Rientjes 		 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
20796f48d0ebSDavid Rientjes 		 * allocate from, they may fallback to other nodes when oom.
20806f48d0ebSDavid Rientjes 		 * Thus, it's possible for tsk to have allocated memory from
20816f48d0ebSDavid Rientjes 		 * nodes in mask.
20826f48d0ebSDavid Rientjes 		 */
20836f48d0ebSDavid Rientjes 		break;
20846f48d0ebSDavid Rientjes 	case MPOL_BIND:
20856f48d0ebSDavid Rientjes 	case MPOL_INTERLEAVE:
20866f48d0ebSDavid Rientjes 		ret = nodes_intersects(mempolicy->v.nodes, *mask);
20876f48d0ebSDavid Rientjes 		break;
20886f48d0ebSDavid Rientjes 	default:
20896f48d0ebSDavid Rientjes 		BUG();
20906f48d0ebSDavid Rientjes 	}
20916f48d0ebSDavid Rientjes out:
20926f48d0ebSDavid Rientjes 	task_unlock(tsk);
20936f48d0ebSDavid Rientjes 	return ret;
20946f48d0ebSDavid Rientjes }
20956f48d0ebSDavid Rientjes 
20961da177e4SLinus Torvalds /* Allocate a page in interleaved policy.
20971da177e4SLinus Torvalds    Own path because it needs to do special accounting. */
2098662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2099662f3a0bSAndi Kleen 					unsigned nid)
21001da177e4SLinus Torvalds {
21011da177e4SLinus Torvalds 	struct page *page;
21021da177e4SLinus Torvalds 
210304ec6264SVlastimil Babka 	page = __alloc_pages(gfp, order, nid);
21044518085eSKemi Wang 	/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
21054518085eSKemi Wang 	if (!static_branch_likely(&vm_numa_stat_key))
21064518085eSKemi Wang 		return page;
2107de55c8b2SAndrey Ryabinin 	if (page && page_to_nid(page) == nid) {
2108de55c8b2SAndrey Ryabinin 		preempt_disable();
2109de55c8b2SAndrey Ryabinin 		__inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT);
2110de55c8b2SAndrey Ryabinin 		preempt_enable();
2111de55c8b2SAndrey Ryabinin 	}
21121da177e4SLinus Torvalds 	return page;
21131da177e4SLinus Torvalds }
21141da177e4SLinus Torvalds 
21151da177e4SLinus Torvalds /**
21160bbbc0b3SAndrea Arcangeli  * 	alloc_pages_vma	- Allocate a page for a VMA.
21171da177e4SLinus Torvalds  *
21181da177e4SLinus Torvalds  * 	@gfp:
21191da177e4SLinus Torvalds  *      %GFP_USER    user allocation.
21201da177e4SLinus Torvalds  *      %GFP_KERNEL  kernel allocations,
21211da177e4SLinus Torvalds  *      %GFP_HIGHMEM highmem/user allocations,
21221da177e4SLinus Torvalds  *      %GFP_FS      allocation should not call back into a file system.
21231da177e4SLinus Torvalds  *      %GFP_ATOMIC  don't sleep.
21241da177e4SLinus Torvalds  *
21250bbbc0b3SAndrea Arcangeli  *	@order:Order of the GFP allocation.
21261da177e4SLinus Torvalds  * 	@vma:  Pointer to VMA or NULL if not available.
21271da177e4SLinus Torvalds  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
2128be97a41bSVlastimil Babka  *	@node: Which node to prefer for allocation (modulo policy).
212919deb769SDavid Rientjes  *	@hugepage: for hugepages try only the preferred node if possible
21301da177e4SLinus Torvalds  *
21311da177e4SLinus Torvalds  * 	This function allocates a page from the kernel page pool and applies
21321da177e4SLinus Torvalds  *	a NUMA policy associated with the VMA or the current process.
21331da177e4SLinus Torvalds  *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
21341da177e4SLinus Torvalds  *	mm_struct of the VMA to prevent it from going away. Should be used for
2135be97a41bSVlastimil Babka  *	all allocations for pages that will be mapped into user space. Returns
2136be97a41bSVlastimil Babka  *	NULL when no page can be allocated.
21371da177e4SLinus Torvalds  */
21381da177e4SLinus Torvalds struct page *
21390bbbc0b3SAndrea Arcangeli alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
214019deb769SDavid Rientjes 		unsigned long addr, int node, bool hugepage)
21411da177e4SLinus Torvalds {
2142cc9a6c87SMel Gorman 	struct mempolicy *pol;
2143c0ff7453SMiao Xie 	struct page *page;
214404ec6264SVlastimil Babka 	int preferred_nid;
2145be97a41bSVlastimil Babka 	nodemask_t *nmask;
21461da177e4SLinus Torvalds 
2147dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2148cc9a6c87SMel Gorman 
2149be97a41bSVlastimil Babka 	if (pol->mode == MPOL_INTERLEAVE) {
21501da177e4SLinus Torvalds 		unsigned nid;
21515da7ca86SChristoph Lameter 
21528eac563cSAndi Kleen 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
215352cd3b07SLee Schermerhorn 		mpol_cond_put(pol);
21540bbbc0b3SAndrea Arcangeli 		page = alloc_page_interleave(gfp, order, nid);
2155be97a41bSVlastimil Babka 		goto out;
21561da177e4SLinus Torvalds 	}
21571da177e4SLinus Torvalds 
215819deb769SDavid Rientjes 	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
215919deb769SDavid Rientjes 		int hpage_node = node;
216019deb769SDavid Rientjes 
216119deb769SDavid Rientjes 		/*
216219deb769SDavid Rientjes 		 * For hugepage allocation and non-interleave policy which
216319deb769SDavid Rientjes 		 * allows the current node (or other explicitly preferred
216419deb769SDavid Rientjes 		 * node) we only try to allocate from the current/preferred
216519deb769SDavid Rientjes 		 * node and don't fall back to other nodes, as the cost of
216619deb769SDavid Rientjes 		 * remote accesses would likely offset THP benefits.
216719deb769SDavid Rientjes 		 *
216819deb769SDavid Rientjes 		 * If the policy is interleave, or does not allow the current
216919deb769SDavid Rientjes 		 * node in its nodemask, we allocate the standard way.
217019deb769SDavid Rientjes 		 */
217119deb769SDavid Rientjes 		if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL))
217219deb769SDavid Rientjes 			hpage_node = pol->v.preferred_node;
217319deb769SDavid Rientjes 
217419deb769SDavid Rientjes 		nmask = policy_nodemask(gfp, pol);
217519deb769SDavid Rientjes 		if (!nmask || node_isset(hpage_node, *nmask)) {
217619deb769SDavid Rientjes 			mpol_cond_put(pol);
2177cc638f32SVlastimil Babka 			/*
2178cc638f32SVlastimil Babka 			 * First, try to allocate THP only on local node, but
2179cc638f32SVlastimil Babka 			 * don't reclaim unnecessarily, just compact.
2180cc638f32SVlastimil Babka 			 */
218119deb769SDavid Rientjes 			page = __alloc_pages_node(hpage_node,
2182cc638f32SVlastimil Babka 				gfp | __GFP_THISNODE | __GFP_NORETRY, order);
218376e654ccSDavid Rientjes 
218476e654ccSDavid Rientjes 			/*
218576e654ccSDavid Rientjes 			 * If hugepage allocations are configured to always
218676e654ccSDavid Rientjes 			 * synchronous compact or the vma has been madvised
218776e654ccSDavid Rientjes 			 * to prefer hugepage backing, retry allowing remote
2188cc638f32SVlastimil Babka 			 * memory with both reclaim and compact as well.
218976e654ccSDavid Rientjes 			 */
219076e654ccSDavid Rientjes 			if (!page && (gfp & __GFP_DIRECT_RECLAIM))
219176e654ccSDavid Rientjes 				page = __alloc_pages_node(hpage_node,
2192cc638f32SVlastimil Babka 								gfp, order);
219376e654ccSDavid Rientjes 
219419deb769SDavid Rientjes 			goto out;
219519deb769SDavid Rientjes 		}
219619deb769SDavid Rientjes 	}
219719deb769SDavid Rientjes 
2198077fcf11SAneesh Kumar K.V 	nmask = policy_nodemask(gfp, pol);
219904ec6264SVlastimil Babka 	preferred_nid = policy_node(gfp, pol, node);
220004ec6264SVlastimil Babka 	page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
2201d51e9894SVlastimil Babka 	mpol_cond_put(pol);
2202be97a41bSVlastimil Babka out:
2203077fcf11SAneesh Kumar K.V 	return page;
2204077fcf11SAneesh Kumar K.V }
220569262215SChristoph Hellwig EXPORT_SYMBOL(alloc_pages_vma);
2206077fcf11SAneesh Kumar K.V 
22071da177e4SLinus Torvalds /**
22081da177e4SLinus Torvalds  * 	alloc_pages_current - Allocate pages.
22091da177e4SLinus Torvalds  *
22101da177e4SLinus Torvalds  *	@gfp:
22111da177e4SLinus Torvalds  *		%GFP_USER   user allocation,
22121da177e4SLinus Torvalds  *      	%GFP_KERNEL kernel allocation,
22131da177e4SLinus Torvalds  *      	%GFP_HIGHMEM highmem allocation,
22141da177e4SLinus Torvalds  *      	%GFP_FS     don't call back into a file system.
22151da177e4SLinus Torvalds  *      	%GFP_ATOMIC don't sleep.
22161da177e4SLinus Torvalds  *	@order: Power of two of allocation size in pages. 0 is a single page.
22171da177e4SLinus Torvalds  *
22181da177e4SLinus Torvalds  *	Allocate a page from the kernel page pool.  When not in
22191da177e4SLinus Torvalds  *	interrupt context and apply the current process NUMA policy.
22201da177e4SLinus Torvalds  *	Returns NULL when no page can be allocated.
22211da177e4SLinus Torvalds  */
2222dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order)
22231da177e4SLinus Torvalds {
22248d90274bSOleg Nesterov 	struct mempolicy *pol = &default_policy;
2225c0ff7453SMiao Xie 	struct page *page;
22261da177e4SLinus Torvalds 
22278d90274bSOleg Nesterov 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
22288d90274bSOleg Nesterov 		pol = get_task_policy(current);
222952cd3b07SLee Schermerhorn 
223052cd3b07SLee Schermerhorn 	/*
223152cd3b07SLee Schermerhorn 	 * No reference counting needed for current->mempolicy
223252cd3b07SLee Schermerhorn 	 * nor system default_policy
223352cd3b07SLee Schermerhorn 	 */
223445c4745aSLee Schermerhorn 	if (pol->mode == MPOL_INTERLEAVE)
2235c0ff7453SMiao Xie 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2236c0ff7453SMiao Xie 	else
2237c0ff7453SMiao Xie 		page = __alloc_pages_nodemask(gfp, order,
223804ec6264SVlastimil Babka 				policy_node(gfp, pol, numa_node_id()),
22395c4b4be3SAndi Kleen 				policy_nodemask(gfp, pol));
2240cc9a6c87SMel Gorman 
2241c0ff7453SMiao Xie 	return page;
22421da177e4SLinus Torvalds }
22431da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current);
22441da177e4SLinus Torvalds 
2245ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2246ef0855d3SOleg Nesterov {
2247ef0855d3SOleg Nesterov 	struct mempolicy *pol = mpol_dup(vma_policy(src));
2248ef0855d3SOleg Nesterov 
2249ef0855d3SOleg Nesterov 	if (IS_ERR(pol))
2250ef0855d3SOleg Nesterov 		return PTR_ERR(pol);
2251ef0855d3SOleg Nesterov 	dst->vm_policy = pol;
2252ef0855d3SOleg Nesterov 	return 0;
2253ef0855d3SOleg Nesterov }
2254ef0855d3SOleg Nesterov 
22554225399aSPaul Jackson /*
2256846a16bfSLee Schermerhorn  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
22574225399aSPaul Jackson  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
22584225399aSPaul Jackson  * with the mems_allowed returned by cpuset_mems_allowed().  This
22594225399aSPaul Jackson  * keeps mempolicies cpuset relative after its cpuset moves.  See
22604225399aSPaul Jackson  * further kernel/cpuset.c update_nodemask().
2261708c1bbcSMiao Xie  *
2262708c1bbcSMiao Xie  * current's mempolicy may be rebinded by the other task(the task that changes
2263708c1bbcSMiao Xie  * cpuset's mems), so we needn't do rebind work for current task.
22644225399aSPaul Jackson  */
22654225399aSPaul Jackson 
2266846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */
2267846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old)
22681da177e4SLinus Torvalds {
22691da177e4SLinus Torvalds 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
22701da177e4SLinus Torvalds 
22711da177e4SLinus Torvalds 	if (!new)
22721da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2273708c1bbcSMiao Xie 
2274708c1bbcSMiao Xie 	/* task's mempolicy is protected by alloc_lock */
2275708c1bbcSMiao Xie 	if (old == current->mempolicy) {
2276708c1bbcSMiao Xie 		task_lock(current);
2277708c1bbcSMiao Xie 		*new = *old;
2278708c1bbcSMiao Xie 		task_unlock(current);
2279708c1bbcSMiao Xie 	} else
2280708c1bbcSMiao Xie 		*new = *old;
2281708c1bbcSMiao Xie 
22824225399aSPaul Jackson 	if (current_cpuset_is_being_rebound()) {
22834225399aSPaul Jackson 		nodemask_t mems = cpuset_mems_allowed(current);
2284213980c0SVlastimil Babka 		mpol_rebind_policy(new, &mems);
22854225399aSPaul Jackson 	}
22861da177e4SLinus Torvalds 	atomic_set(&new->refcnt, 1);
22871da177e4SLinus Torvalds 	return new;
22881da177e4SLinus Torvalds }
22891da177e4SLinus Torvalds 
22901da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */
2291fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
22921da177e4SLinus Torvalds {
22931da177e4SLinus Torvalds 	if (!a || !b)
2294fcfb4dccSKOSAKI Motohiro 		return false;
229545c4745aSLee Schermerhorn 	if (a->mode != b->mode)
2296fcfb4dccSKOSAKI Motohiro 		return false;
229719800502SBob Liu 	if (a->flags != b->flags)
2298fcfb4dccSKOSAKI Motohiro 		return false;
229919800502SBob Liu 	if (mpol_store_user_nodemask(a))
230019800502SBob Liu 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2301fcfb4dccSKOSAKI Motohiro 			return false;
230219800502SBob Liu 
230345c4745aSLee Schermerhorn 	switch (a->mode) {
230419770b32SMel Gorman 	case MPOL_BIND:
230519770b32SMel Gorman 		/* Fall through */
23061da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
2307fcfb4dccSKOSAKI Motohiro 		return !!nodes_equal(a->v.nodes, b->v.nodes);
23081da177e4SLinus Torvalds 	case MPOL_PREFERRED:
23098970a63eSYisheng Xie 		/* a's ->flags is the same as b's */
23108970a63eSYisheng Xie 		if (a->flags & MPOL_F_LOCAL)
23118970a63eSYisheng Xie 			return true;
231275719661SNamhyung Kim 		return a->v.preferred_node == b->v.preferred_node;
23131da177e4SLinus Torvalds 	default:
23141da177e4SLinus Torvalds 		BUG();
2315fcfb4dccSKOSAKI Motohiro 		return false;
23161da177e4SLinus Torvalds 	}
23171da177e4SLinus Torvalds }
23181da177e4SLinus Torvalds 
23191da177e4SLinus Torvalds /*
23201da177e4SLinus Torvalds  * Shared memory backing store policy support.
23211da177e4SLinus Torvalds  *
23221da177e4SLinus Torvalds  * Remember policies even when nobody has shared memory mapped.
23231da177e4SLinus Torvalds  * The policies are kept in Red-Black tree linked from the inode.
23244a8c7bb5SNathan Zimmer  * They are protected by the sp->lock rwlock, which should be held
23251da177e4SLinus Torvalds  * for any accesses to the tree.
23261da177e4SLinus Torvalds  */
23271da177e4SLinus Torvalds 
23284a8c7bb5SNathan Zimmer /*
23294a8c7bb5SNathan Zimmer  * lookup first element intersecting start-end.  Caller holds sp->lock for
23304a8c7bb5SNathan Zimmer  * reading or for writing
23314a8c7bb5SNathan Zimmer  */
23321da177e4SLinus Torvalds static struct sp_node *
23331da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
23341da177e4SLinus Torvalds {
23351da177e4SLinus Torvalds 	struct rb_node *n = sp->root.rb_node;
23361da177e4SLinus Torvalds 
23371da177e4SLinus Torvalds 	while (n) {
23381da177e4SLinus Torvalds 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
23391da177e4SLinus Torvalds 
23401da177e4SLinus Torvalds 		if (start >= p->end)
23411da177e4SLinus Torvalds 			n = n->rb_right;
23421da177e4SLinus Torvalds 		else if (end <= p->start)
23431da177e4SLinus Torvalds 			n = n->rb_left;
23441da177e4SLinus Torvalds 		else
23451da177e4SLinus Torvalds 			break;
23461da177e4SLinus Torvalds 	}
23471da177e4SLinus Torvalds 	if (!n)
23481da177e4SLinus Torvalds 		return NULL;
23491da177e4SLinus Torvalds 	for (;;) {
23501da177e4SLinus Torvalds 		struct sp_node *w = NULL;
23511da177e4SLinus Torvalds 		struct rb_node *prev = rb_prev(n);
23521da177e4SLinus Torvalds 		if (!prev)
23531da177e4SLinus Torvalds 			break;
23541da177e4SLinus Torvalds 		w = rb_entry(prev, struct sp_node, nd);
23551da177e4SLinus Torvalds 		if (w->end <= start)
23561da177e4SLinus Torvalds 			break;
23571da177e4SLinus Torvalds 		n = prev;
23581da177e4SLinus Torvalds 	}
23591da177e4SLinus Torvalds 	return rb_entry(n, struct sp_node, nd);
23601da177e4SLinus Torvalds }
23611da177e4SLinus Torvalds 
23624a8c7bb5SNathan Zimmer /*
23634a8c7bb5SNathan Zimmer  * Insert a new shared policy into the list.  Caller holds sp->lock for
23644a8c7bb5SNathan Zimmer  * writing.
23654a8c7bb5SNathan Zimmer  */
23661da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new)
23671da177e4SLinus Torvalds {
23681da177e4SLinus Torvalds 	struct rb_node **p = &sp->root.rb_node;
23691da177e4SLinus Torvalds 	struct rb_node *parent = NULL;
23701da177e4SLinus Torvalds 	struct sp_node *nd;
23711da177e4SLinus Torvalds 
23721da177e4SLinus Torvalds 	while (*p) {
23731da177e4SLinus Torvalds 		parent = *p;
23741da177e4SLinus Torvalds 		nd = rb_entry(parent, struct sp_node, nd);
23751da177e4SLinus Torvalds 		if (new->start < nd->start)
23761da177e4SLinus Torvalds 			p = &(*p)->rb_left;
23771da177e4SLinus Torvalds 		else if (new->end > nd->end)
23781da177e4SLinus Torvalds 			p = &(*p)->rb_right;
23791da177e4SLinus Torvalds 		else
23801da177e4SLinus Torvalds 			BUG();
23811da177e4SLinus Torvalds 	}
23821da177e4SLinus Torvalds 	rb_link_node(&new->nd, parent, p);
23831da177e4SLinus Torvalds 	rb_insert_color(&new->nd, &sp->root);
2384140d5a49SPaul Mundt 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
238545c4745aSLee Schermerhorn 		 new->policy ? new->policy->mode : 0);
23861da177e4SLinus Torvalds }
23871da177e4SLinus Torvalds 
23881da177e4SLinus Torvalds /* Find shared policy intersecting idx */
23891da177e4SLinus Torvalds struct mempolicy *
23901da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
23911da177e4SLinus Torvalds {
23921da177e4SLinus Torvalds 	struct mempolicy *pol = NULL;
23931da177e4SLinus Torvalds 	struct sp_node *sn;
23941da177e4SLinus Torvalds 
23951da177e4SLinus Torvalds 	if (!sp->root.rb_node)
23961da177e4SLinus Torvalds 		return NULL;
23974a8c7bb5SNathan Zimmer 	read_lock(&sp->lock);
23981da177e4SLinus Torvalds 	sn = sp_lookup(sp, idx, idx+1);
23991da177e4SLinus Torvalds 	if (sn) {
24001da177e4SLinus Torvalds 		mpol_get(sn->policy);
24011da177e4SLinus Torvalds 		pol = sn->policy;
24021da177e4SLinus Torvalds 	}
24034a8c7bb5SNathan Zimmer 	read_unlock(&sp->lock);
24041da177e4SLinus Torvalds 	return pol;
24051da177e4SLinus Torvalds }
24061da177e4SLinus Torvalds 
240763f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n)
240863f74ca2SKOSAKI Motohiro {
240963f74ca2SKOSAKI Motohiro 	mpol_put(n->policy);
241063f74ca2SKOSAKI Motohiro 	kmem_cache_free(sn_cache, n);
241163f74ca2SKOSAKI Motohiro }
241263f74ca2SKOSAKI Motohiro 
2413771fb4d8SLee Schermerhorn /**
2414771fb4d8SLee Schermerhorn  * mpol_misplaced - check whether current page node is valid in policy
2415771fb4d8SLee Schermerhorn  *
2416b46e14acSFabian Frederick  * @page: page to be checked
2417b46e14acSFabian Frederick  * @vma: vm area where page mapped
2418b46e14acSFabian Frederick  * @addr: virtual address where page mapped
2419771fb4d8SLee Schermerhorn  *
2420771fb4d8SLee Schermerhorn  * Lookup current policy node id for vma,addr and "compare to" page's
2421771fb4d8SLee Schermerhorn  * node id.
2422771fb4d8SLee Schermerhorn  *
2423771fb4d8SLee Schermerhorn  * Returns:
2424771fb4d8SLee Schermerhorn  *	-1	- not misplaced, page is in the right node
2425771fb4d8SLee Schermerhorn  *	node	- node id where the page should be
2426771fb4d8SLee Schermerhorn  *
2427771fb4d8SLee Schermerhorn  * Policy determination "mimics" alloc_page_vma().
2428771fb4d8SLee Schermerhorn  * Called from fault path where we know the vma and faulting address.
2429771fb4d8SLee Schermerhorn  */
2430771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2431771fb4d8SLee Schermerhorn {
2432771fb4d8SLee Schermerhorn 	struct mempolicy *pol;
2433c33d6c06SMel Gorman 	struct zoneref *z;
2434771fb4d8SLee Schermerhorn 	int curnid = page_to_nid(page);
2435771fb4d8SLee Schermerhorn 	unsigned long pgoff;
243690572890SPeter Zijlstra 	int thiscpu = raw_smp_processor_id();
243790572890SPeter Zijlstra 	int thisnid = cpu_to_node(thiscpu);
243898fa15f3SAnshuman Khandual 	int polnid = NUMA_NO_NODE;
2439771fb4d8SLee Schermerhorn 	int ret = -1;
2440771fb4d8SLee Schermerhorn 
2441dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2442771fb4d8SLee Schermerhorn 	if (!(pol->flags & MPOL_F_MOF))
2443771fb4d8SLee Schermerhorn 		goto out;
2444771fb4d8SLee Schermerhorn 
2445771fb4d8SLee Schermerhorn 	switch (pol->mode) {
2446771fb4d8SLee Schermerhorn 	case MPOL_INTERLEAVE:
2447771fb4d8SLee Schermerhorn 		pgoff = vma->vm_pgoff;
2448771fb4d8SLee Schermerhorn 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
244998c70baaSLaurent Dufour 		polnid = offset_il_node(pol, pgoff);
2450771fb4d8SLee Schermerhorn 		break;
2451771fb4d8SLee Schermerhorn 
2452771fb4d8SLee Schermerhorn 	case MPOL_PREFERRED:
2453771fb4d8SLee Schermerhorn 		if (pol->flags & MPOL_F_LOCAL)
2454771fb4d8SLee Schermerhorn 			polnid = numa_node_id();
2455771fb4d8SLee Schermerhorn 		else
2456771fb4d8SLee Schermerhorn 			polnid = pol->v.preferred_node;
2457771fb4d8SLee Schermerhorn 		break;
2458771fb4d8SLee Schermerhorn 
2459771fb4d8SLee Schermerhorn 	case MPOL_BIND:
2460c33d6c06SMel Gorman 
2461771fb4d8SLee Schermerhorn 		/*
2462771fb4d8SLee Schermerhorn 		 * allows binding to multiple nodes.
2463771fb4d8SLee Schermerhorn 		 * use current page if in policy nodemask,
2464771fb4d8SLee Schermerhorn 		 * else select nearest allowed node, if any.
2465771fb4d8SLee Schermerhorn 		 * If no allowed nodes, use current [!misplaced].
2466771fb4d8SLee Schermerhorn 		 */
2467771fb4d8SLee Schermerhorn 		if (node_isset(curnid, pol->v.nodes))
2468771fb4d8SLee Schermerhorn 			goto out;
2469c33d6c06SMel Gorman 		z = first_zones_zonelist(
2470771fb4d8SLee Schermerhorn 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2471771fb4d8SLee Schermerhorn 				gfp_zone(GFP_HIGHUSER),
2472c33d6c06SMel Gorman 				&pol->v.nodes);
2473c1093b74SPavel Tatashin 		polnid = zone_to_nid(z->zone);
2474771fb4d8SLee Schermerhorn 		break;
2475771fb4d8SLee Schermerhorn 
2476771fb4d8SLee Schermerhorn 	default:
2477771fb4d8SLee Schermerhorn 		BUG();
2478771fb4d8SLee Schermerhorn 	}
24795606e387SMel Gorman 
24805606e387SMel Gorman 	/* Migrate the page towards the node whose CPU is referencing it */
2481e42c8ff2SMel Gorman 	if (pol->flags & MPOL_F_MORON) {
248290572890SPeter Zijlstra 		polnid = thisnid;
24835606e387SMel Gorman 
248410f39042SRik van Riel 		if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2485de1c9ce6SRik van Riel 			goto out;
2486de1c9ce6SRik van Riel 	}
2487e42c8ff2SMel Gorman 
2488771fb4d8SLee Schermerhorn 	if (curnid != polnid)
2489771fb4d8SLee Schermerhorn 		ret = polnid;
2490771fb4d8SLee Schermerhorn out:
2491771fb4d8SLee Schermerhorn 	mpol_cond_put(pol);
2492771fb4d8SLee Schermerhorn 
2493771fb4d8SLee Schermerhorn 	return ret;
2494771fb4d8SLee Schermerhorn }
2495771fb4d8SLee Schermerhorn 
2496c11600e4SDavid Rientjes /*
2497c11600e4SDavid Rientjes  * Drop the (possibly final) reference to task->mempolicy.  It needs to be
2498c11600e4SDavid Rientjes  * dropped after task->mempolicy is set to NULL so that any allocation done as
2499c11600e4SDavid Rientjes  * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2500c11600e4SDavid Rientjes  * policy.
2501c11600e4SDavid Rientjes  */
2502c11600e4SDavid Rientjes void mpol_put_task_policy(struct task_struct *task)
2503c11600e4SDavid Rientjes {
2504c11600e4SDavid Rientjes 	struct mempolicy *pol;
2505c11600e4SDavid Rientjes 
2506c11600e4SDavid Rientjes 	task_lock(task);
2507c11600e4SDavid Rientjes 	pol = task->mempolicy;
2508c11600e4SDavid Rientjes 	task->mempolicy = NULL;
2509c11600e4SDavid Rientjes 	task_unlock(task);
2510c11600e4SDavid Rientjes 	mpol_put(pol);
2511c11600e4SDavid Rientjes }
2512c11600e4SDavid Rientjes 
25131da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n)
25141da177e4SLinus Torvalds {
2515140d5a49SPaul Mundt 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
25161da177e4SLinus Torvalds 	rb_erase(&n->nd, &sp->root);
251763f74ca2SKOSAKI Motohiro 	sp_free(n);
25181da177e4SLinus Torvalds }
25191da177e4SLinus Torvalds 
252042288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start,
252142288fe3SMel Gorman 			unsigned long end, struct mempolicy *pol)
252242288fe3SMel Gorman {
252342288fe3SMel Gorman 	node->start = start;
252442288fe3SMel Gorman 	node->end = end;
252542288fe3SMel Gorman 	node->policy = pol;
252642288fe3SMel Gorman }
252742288fe3SMel Gorman 
2528dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2529dbcb0f19SAdrian Bunk 				struct mempolicy *pol)
25301da177e4SLinus Torvalds {
2531869833f2SKOSAKI Motohiro 	struct sp_node *n;
2532869833f2SKOSAKI Motohiro 	struct mempolicy *newpol;
25331da177e4SLinus Torvalds 
2534869833f2SKOSAKI Motohiro 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
25351da177e4SLinus Torvalds 	if (!n)
25361da177e4SLinus Torvalds 		return NULL;
2537869833f2SKOSAKI Motohiro 
2538869833f2SKOSAKI Motohiro 	newpol = mpol_dup(pol);
2539869833f2SKOSAKI Motohiro 	if (IS_ERR(newpol)) {
2540869833f2SKOSAKI Motohiro 		kmem_cache_free(sn_cache, n);
2541869833f2SKOSAKI Motohiro 		return NULL;
2542869833f2SKOSAKI Motohiro 	}
2543869833f2SKOSAKI Motohiro 	newpol->flags |= MPOL_F_SHARED;
254442288fe3SMel Gorman 	sp_node_init(n, start, end, newpol);
2545869833f2SKOSAKI Motohiro 
25461da177e4SLinus Torvalds 	return n;
25471da177e4SLinus Torvalds }
25481da177e4SLinus Torvalds 
25491da177e4SLinus Torvalds /* Replace a policy range. */
25501da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
25511da177e4SLinus Torvalds 				 unsigned long end, struct sp_node *new)
25521da177e4SLinus Torvalds {
2553b22d127aSMel Gorman 	struct sp_node *n;
255442288fe3SMel Gorman 	struct sp_node *n_new = NULL;
255542288fe3SMel Gorman 	struct mempolicy *mpol_new = NULL;
2556b22d127aSMel Gorman 	int ret = 0;
25571da177e4SLinus Torvalds 
255842288fe3SMel Gorman restart:
25594a8c7bb5SNathan Zimmer 	write_lock(&sp->lock);
25601da177e4SLinus Torvalds 	n = sp_lookup(sp, start, end);
25611da177e4SLinus Torvalds 	/* Take care of old policies in the same range. */
25621da177e4SLinus Torvalds 	while (n && n->start < end) {
25631da177e4SLinus Torvalds 		struct rb_node *next = rb_next(&n->nd);
25641da177e4SLinus Torvalds 		if (n->start >= start) {
25651da177e4SLinus Torvalds 			if (n->end <= end)
25661da177e4SLinus Torvalds 				sp_delete(sp, n);
25671da177e4SLinus Torvalds 			else
25681da177e4SLinus Torvalds 				n->start = end;
25691da177e4SLinus Torvalds 		} else {
25701da177e4SLinus Torvalds 			/* Old policy spanning whole new range. */
25711da177e4SLinus Torvalds 			if (n->end > end) {
257242288fe3SMel Gorman 				if (!n_new)
257342288fe3SMel Gorman 					goto alloc_new;
257442288fe3SMel Gorman 
257542288fe3SMel Gorman 				*mpol_new = *n->policy;
257642288fe3SMel Gorman 				atomic_set(&mpol_new->refcnt, 1);
25777880639cSKOSAKI Motohiro 				sp_node_init(n_new, end, n->end, mpol_new);
25781da177e4SLinus Torvalds 				n->end = start;
25795ca39575SHillf Danton 				sp_insert(sp, n_new);
258042288fe3SMel Gorman 				n_new = NULL;
258142288fe3SMel Gorman 				mpol_new = NULL;
25821da177e4SLinus Torvalds 				break;
25831da177e4SLinus Torvalds 			} else
25841da177e4SLinus Torvalds 				n->end = start;
25851da177e4SLinus Torvalds 		}
25861da177e4SLinus Torvalds 		if (!next)
25871da177e4SLinus Torvalds 			break;
25881da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
25891da177e4SLinus Torvalds 	}
25901da177e4SLinus Torvalds 	if (new)
25911da177e4SLinus Torvalds 		sp_insert(sp, new);
25924a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
259342288fe3SMel Gorman 	ret = 0;
259442288fe3SMel Gorman 
259542288fe3SMel Gorman err_out:
259642288fe3SMel Gorman 	if (mpol_new)
259742288fe3SMel Gorman 		mpol_put(mpol_new);
259842288fe3SMel Gorman 	if (n_new)
259942288fe3SMel Gorman 		kmem_cache_free(sn_cache, n_new);
260042288fe3SMel Gorman 
2601b22d127aSMel Gorman 	return ret;
260242288fe3SMel Gorman 
260342288fe3SMel Gorman alloc_new:
26044a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
260542288fe3SMel Gorman 	ret = -ENOMEM;
260642288fe3SMel Gorman 	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
260742288fe3SMel Gorman 	if (!n_new)
260842288fe3SMel Gorman 		goto err_out;
260942288fe3SMel Gorman 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
261042288fe3SMel Gorman 	if (!mpol_new)
261142288fe3SMel Gorman 		goto err_out;
261242288fe3SMel Gorman 	goto restart;
26131da177e4SLinus Torvalds }
26141da177e4SLinus Torvalds 
261571fe804bSLee Schermerhorn /**
261671fe804bSLee Schermerhorn  * mpol_shared_policy_init - initialize shared policy for inode
261771fe804bSLee Schermerhorn  * @sp: pointer to inode shared policy
261871fe804bSLee Schermerhorn  * @mpol:  struct mempolicy to install
261971fe804bSLee Schermerhorn  *
262071fe804bSLee Schermerhorn  * Install non-NULL @mpol in inode's shared policy rb-tree.
262171fe804bSLee Schermerhorn  * On entry, the current task has a reference on a non-NULL @mpol.
262271fe804bSLee Schermerhorn  * This must be released on exit.
26234bfc4495SKAMEZAWA Hiroyuki  * This is called at get_inode() calls and we can use GFP_KERNEL.
262471fe804bSLee Schermerhorn  */
262571fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
26267339ff83SRobin Holt {
262758568d2aSMiao Xie 	int ret;
262858568d2aSMiao Xie 
262971fe804bSLee Schermerhorn 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
26304a8c7bb5SNathan Zimmer 	rwlock_init(&sp->lock);
26317339ff83SRobin Holt 
263271fe804bSLee Schermerhorn 	if (mpol) {
26337339ff83SRobin Holt 		struct vm_area_struct pvma;
263471fe804bSLee Schermerhorn 		struct mempolicy *new;
26354bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
26367339ff83SRobin Holt 
26374bfc4495SKAMEZAWA Hiroyuki 		if (!scratch)
26385c0c1654SLee Schermerhorn 			goto put_mpol;
263971fe804bSLee Schermerhorn 		/* contextualize the tmpfs mount point mempolicy */
264071fe804bSLee Schermerhorn 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
264115d77835SLee Schermerhorn 		if (IS_ERR(new))
26420cae3457SDan Carpenter 			goto free_scratch; /* no valid nodemask intersection */
264358568d2aSMiao Xie 
264458568d2aSMiao Xie 		task_lock(current);
26454bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
264658568d2aSMiao Xie 		task_unlock(current);
264715d77835SLee Schermerhorn 		if (ret)
26485c0c1654SLee Schermerhorn 			goto put_new;
264971fe804bSLee Schermerhorn 
265071fe804bSLee Schermerhorn 		/* Create pseudo-vma that contains just the policy */
26512c4541e2SKirill A. Shutemov 		vma_init(&pvma, NULL);
265271fe804bSLee Schermerhorn 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
265371fe804bSLee Schermerhorn 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
265415d77835SLee Schermerhorn 
26555c0c1654SLee Schermerhorn put_new:
265671fe804bSLee Schermerhorn 		mpol_put(new);			/* drop initial ref */
26570cae3457SDan Carpenter free_scratch:
26584bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
26595c0c1654SLee Schermerhorn put_mpol:
26605c0c1654SLee Schermerhorn 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
26617339ff83SRobin Holt 	}
26627339ff83SRobin Holt }
26637339ff83SRobin Holt 
26641da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info,
26651da177e4SLinus Torvalds 			struct vm_area_struct *vma, struct mempolicy *npol)
26661da177e4SLinus Torvalds {
26671da177e4SLinus Torvalds 	int err;
26681da177e4SLinus Torvalds 	struct sp_node *new = NULL;
26691da177e4SLinus Torvalds 	unsigned long sz = vma_pages(vma);
26701da177e4SLinus Torvalds 
2671028fec41SDavid Rientjes 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
26721da177e4SLinus Torvalds 		 vma->vm_pgoff,
267345c4745aSLee Schermerhorn 		 sz, npol ? npol->mode : -1,
2674028fec41SDavid Rientjes 		 npol ? npol->flags : -1,
267500ef2d2fSDavid Rientjes 		 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
26761da177e4SLinus Torvalds 
26771da177e4SLinus Torvalds 	if (npol) {
26781da177e4SLinus Torvalds 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
26791da177e4SLinus Torvalds 		if (!new)
26801da177e4SLinus Torvalds 			return -ENOMEM;
26811da177e4SLinus Torvalds 	}
26821da177e4SLinus Torvalds 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
26831da177e4SLinus Torvalds 	if (err && new)
268463f74ca2SKOSAKI Motohiro 		sp_free(new);
26851da177e4SLinus Torvalds 	return err;
26861da177e4SLinus Torvalds }
26871da177e4SLinus Torvalds 
26881da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */
26891da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p)
26901da177e4SLinus Torvalds {
26911da177e4SLinus Torvalds 	struct sp_node *n;
26921da177e4SLinus Torvalds 	struct rb_node *next;
26931da177e4SLinus Torvalds 
26941da177e4SLinus Torvalds 	if (!p->root.rb_node)
26951da177e4SLinus Torvalds 		return;
26964a8c7bb5SNathan Zimmer 	write_lock(&p->lock);
26971da177e4SLinus Torvalds 	next = rb_first(&p->root);
26981da177e4SLinus Torvalds 	while (next) {
26991da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
27001da177e4SLinus Torvalds 		next = rb_next(&n->nd);
270163f74ca2SKOSAKI Motohiro 		sp_delete(p, n);
27021da177e4SLinus Torvalds 	}
27034a8c7bb5SNathan Zimmer 	write_unlock(&p->lock);
27041da177e4SLinus Torvalds }
27051da177e4SLinus Torvalds 
27061a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING
2707c297663cSMel Gorman static int __initdata numabalancing_override;
27081a687c2eSMel Gorman 
27091a687c2eSMel Gorman static void __init check_numabalancing_enable(void)
27101a687c2eSMel Gorman {
27111a687c2eSMel Gorman 	bool numabalancing_default = false;
27121a687c2eSMel Gorman 
27131a687c2eSMel Gorman 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
27141a687c2eSMel Gorman 		numabalancing_default = true;
27151a687c2eSMel Gorman 
2716c297663cSMel Gorman 	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2717c297663cSMel Gorman 	if (numabalancing_override)
2718c297663cSMel Gorman 		set_numabalancing_state(numabalancing_override == 1);
2719c297663cSMel Gorman 
2720b0dc2b9bSMel Gorman 	if (num_online_nodes() > 1 && !numabalancing_override) {
2721756a025fSJoe Perches 		pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2722c297663cSMel Gorman 			numabalancing_default ? "Enabling" : "Disabling");
27231a687c2eSMel Gorman 		set_numabalancing_state(numabalancing_default);
27241a687c2eSMel Gorman 	}
27251a687c2eSMel Gorman }
27261a687c2eSMel Gorman 
27271a687c2eSMel Gorman static int __init setup_numabalancing(char *str)
27281a687c2eSMel Gorman {
27291a687c2eSMel Gorman 	int ret = 0;
27301a687c2eSMel Gorman 	if (!str)
27311a687c2eSMel Gorman 		goto out;
27321a687c2eSMel Gorman 
27331a687c2eSMel Gorman 	if (!strcmp(str, "enable")) {
2734c297663cSMel Gorman 		numabalancing_override = 1;
27351a687c2eSMel Gorman 		ret = 1;
27361a687c2eSMel Gorman 	} else if (!strcmp(str, "disable")) {
2737c297663cSMel Gorman 		numabalancing_override = -1;
27381a687c2eSMel Gorman 		ret = 1;
27391a687c2eSMel Gorman 	}
27401a687c2eSMel Gorman out:
27411a687c2eSMel Gorman 	if (!ret)
27424a404beaSAndrew Morton 		pr_warn("Unable to parse numa_balancing=\n");
27431a687c2eSMel Gorman 
27441a687c2eSMel Gorman 	return ret;
27451a687c2eSMel Gorman }
27461a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing);
27471a687c2eSMel Gorman #else
27481a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void)
27491a687c2eSMel Gorman {
27501a687c2eSMel Gorman }
27511a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */
27521a687c2eSMel Gorman 
27531da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */
27541da177e4SLinus Torvalds void __init numa_policy_init(void)
27551da177e4SLinus Torvalds {
2756b71636e2SPaul Mundt 	nodemask_t interleave_nodes;
2757b71636e2SPaul Mundt 	unsigned long largest = 0;
2758b71636e2SPaul Mundt 	int nid, prefer = 0;
2759b71636e2SPaul Mundt 
27601da177e4SLinus Torvalds 	policy_cache = kmem_cache_create("numa_policy",
27611da177e4SLinus Torvalds 					 sizeof(struct mempolicy),
276220c2df83SPaul Mundt 					 0, SLAB_PANIC, NULL);
27631da177e4SLinus Torvalds 
27641da177e4SLinus Torvalds 	sn_cache = kmem_cache_create("shared_policy_node",
27651da177e4SLinus Torvalds 				     sizeof(struct sp_node),
276620c2df83SPaul Mundt 				     0, SLAB_PANIC, NULL);
27671da177e4SLinus Torvalds 
27685606e387SMel Gorman 	for_each_node(nid) {
27695606e387SMel Gorman 		preferred_node_policy[nid] = (struct mempolicy) {
27705606e387SMel Gorman 			.refcnt = ATOMIC_INIT(1),
27715606e387SMel Gorman 			.mode = MPOL_PREFERRED,
27725606e387SMel Gorman 			.flags = MPOL_F_MOF | MPOL_F_MORON,
27735606e387SMel Gorman 			.v = { .preferred_node = nid, },
27745606e387SMel Gorman 		};
27755606e387SMel Gorman 	}
27765606e387SMel Gorman 
2777b71636e2SPaul Mundt 	/*
2778b71636e2SPaul Mundt 	 * Set interleaving policy for system init. Interleaving is only
2779b71636e2SPaul Mundt 	 * enabled across suitably sized nodes (default is >= 16MB), or
2780b71636e2SPaul Mundt 	 * fall back to the largest node if they're all smaller.
2781b71636e2SPaul Mundt 	 */
2782b71636e2SPaul Mundt 	nodes_clear(interleave_nodes);
278301f13bd6SLai Jiangshan 	for_each_node_state(nid, N_MEMORY) {
2784b71636e2SPaul Mundt 		unsigned long total_pages = node_present_pages(nid);
27851da177e4SLinus Torvalds 
2786b71636e2SPaul Mundt 		/* Preserve the largest node */
2787b71636e2SPaul Mundt 		if (largest < total_pages) {
2788b71636e2SPaul Mundt 			largest = total_pages;
2789b71636e2SPaul Mundt 			prefer = nid;
2790b71636e2SPaul Mundt 		}
2791b71636e2SPaul Mundt 
2792b71636e2SPaul Mundt 		/* Interleave this node? */
2793b71636e2SPaul Mundt 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2794b71636e2SPaul Mundt 			node_set(nid, interleave_nodes);
2795b71636e2SPaul Mundt 	}
2796b71636e2SPaul Mundt 
2797b71636e2SPaul Mundt 	/* All too small, use the largest */
2798b71636e2SPaul Mundt 	if (unlikely(nodes_empty(interleave_nodes)))
2799b71636e2SPaul Mundt 		node_set(prefer, interleave_nodes);
2800b71636e2SPaul Mundt 
2801028fec41SDavid Rientjes 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2802b1de0d13SMitchel Humpherys 		pr_err("%s: interleaving failed\n", __func__);
28031a687c2eSMel Gorman 
28041a687c2eSMel Gorman 	check_numabalancing_enable();
28051da177e4SLinus Torvalds }
28061da177e4SLinus Torvalds 
28078bccd85fSChristoph Lameter /* Reset policy of current process to default */
28081da177e4SLinus Torvalds void numa_default_policy(void)
28091da177e4SLinus Torvalds {
2810028fec41SDavid Rientjes 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
28111da177e4SLinus Torvalds }
281268860ec1SPaul Jackson 
28134225399aSPaul Jackson /*
2814095f1fc4SLee Schermerhorn  * Parse and format mempolicy from/to strings
2815095f1fc4SLee Schermerhorn  */
2816095f1fc4SLee Schermerhorn 
2817095f1fc4SLee Schermerhorn /*
2818f2a07f40SHugh Dickins  * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
28191a75a6c8SChristoph Lameter  */
2820345ace9cSLee Schermerhorn static const char * const policy_modes[] =
2821345ace9cSLee Schermerhorn {
2822345ace9cSLee Schermerhorn 	[MPOL_DEFAULT]    = "default",
2823345ace9cSLee Schermerhorn 	[MPOL_PREFERRED]  = "prefer",
2824345ace9cSLee Schermerhorn 	[MPOL_BIND]       = "bind",
2825345ace9cSLee Schermerhorn 	[MPOL_INTERLEAVE] = "interleave",
2826d3a71033SLee Schermerhorn 	[MPOL_LOCAL]      = "local",
2827345ace9cSLee Schermerhorn };
28281a75a6c8SChristoph Lameter 
2829095f1fc4SLee Schermerhorn 
2830095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS
2831095f1fc4SLee Schermerhorn /**
2832f2a07f40SHugh Dickins  * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2833095f1fc4SLee Schermerhorn  * @str:  string containing mempolicy to parse
283471fe804bSLee Schermerhorn  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2835095f1fc4SLee Schermerhorn  *
2836095f1fc4SLee Schermerhorn  * Format of input:
2837095f1fc4SLee Schermerhorn  *	<mode>[=<flags>][:<nodelist>]
2838095f1fc4SLee Schermerhorn  *
283971fe804bSLee Schermerhorn  * On success, returns 0, else 1
2840095f1fc4SLee Schermerhorn  */
2841a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol)
2842095f1fc4SLee Schermerhorn {
284371fe804bSLee Schermerhorn 	struct mempolicy *new = NULL;
2844f2a07f40SHugh Dickins 	unsigned short mode_flags;
284571fe804bSLee Schermerhorn 	nodemask_t nodes;
2846095f1fc4SLee Schermerhorn 	char *nodelist = strchr(str, ':');
2847095f1fc4SLee Schermerhorn 	char *flags = strchr(str, '=');
2848dedf2c73Szhong jiang 	int err = 1, mode;
2849095f1fc4SLee Schermerhorn 
2850c7a91bc7SDan Carpenter 	if (flags)
2851c7a91bc7SDan Carpenter 		*flags++ = '\0';	/* terminate mode string */
2852c7a91bc7SDan Carpenter 
2853095f1fc4SLee Schermerhorn 	if (nodelist) {
2854095f1fc4SLee Schermerhorn 		/* NUL-terminate mode or flags string */
2855095f1fc4SLee Schermerhorn 		*nodelist++ = '\0';
285671fe804bSLee Schermerhorn 		if (nodelist_parse(nodelist, nodes))
2857095f1fc4SLee Schermerhorn 			goto out;
285801f13bd6SLai Jiangshan 		if (!nodes_subset(nodes, node_states[N_MEMORY]))
2859095f1fc4SLee Schermerhorn 			goto out;
286071fe804bSLee Schermerhorn 	} else
286171fe804bSLee Schermerhorn 		nodes_clear(nodes);
286271fe804bSLee Schermerhorn 
2863dedf2c73Szhong jiang 	mode = match_string(policy_modes, MPOL_MAX, str);
2864dedf2c73Szhong jiang 	if (mode < 0)
2865095f1fc4SLee Schermerhorn 		goto out;
2866095f1fc4SLee Schermerhorn 
286771fe804bSLee Schermerhorn 	switch (mode) {
2868095f1fc4SLee Schermerhorn 	case MPOL_PREFERRED:
286971fe804bSLee Schermerhorn 		/*
287071fe804bSLee Schermerhorn 		 * Insist on a nodelist of one node only
287171fe804bSLee Schermerhorn 		 */
2872095f1fc4SLee Schermerhorn 		if (nodelist) {
2873095f1fc4SLee Schermerhorn 			char *rest = nodelist;
2874095f1fc4SLee Schermerhorn 			while (isdigit(*rest))
2875095f1fc4SLee Schermerhorn 				rest++;
2876926f2ae0SKOSAKI Motohiro 			if (*rest)
2877926f2ae0SKOSAKI Motohiro 				goto out;
2878095f1fc4SLee Schermerhorn 		}
2879095f1fc4SLee Schermerhorn 		break;
2880095f1fc4SLee Schermerhorn 	case MPOL_INTERLEAVE:
2881095f1fc4SLee Schermerhorn 		/*
2882095f1fc4SLee Schermerhorn 		 * Default to online nodes with memory if no nodelist
2883095f1fc4SLee Schermerhorn 		 */
2884095f1fc4SLee Schermerhorn 		if (!nodelist)
288501f13bd6SLai Jiangshan 			nodes = node_states[N_MEMORY];
28863f226aa1SLee Schermerhorn 		break;
288771fe804bSLee Schermerhorn 	case MPOL_LOCAL:
28883f226aa1SLee Schermerhorn 		/*
288971fe804bSLee Schermerhorn 		 * Don't allow a nodelist;  mpol_new() checks flags
28903f226aa1SLee Schermerhorn 		 */
289171fe804bSLee Schermerhorn 		if (nodelist)
28923f226aa1SLee Schermerhorn 			goto out;
289371fe804bSLee Schermerhorn 		mode = MPOL_PREFERRED;
28943f226aa1SLee Schermerhorn 		break;
2895413b43deSRavikiran G Thirumalai 	case MPOL_DEFAULT:
2896413b43deSRavikiran G Thirumalai 		/*
2897413b43deSRavikiran G Thirumalai 		 * Insist on a empty nodelist
2898413b43deSRavikiran G Thirumalai 		 */
2899413b43deSRavikiran G Thirumalai 		if (!nodelist)
2900413b43deSRavikiran G Thirumalai 			err = 0;
2901413b43deSRavikiran G Thirumalai 		goto out;
2902d69b2e63SKOSAKI Motohiro 	case MPOL_BIND:
290371fe804bSLee Schermerhorn 		/*
2904d69b2e63SKOSAKI Motohiro 		 * Insist on a nodelist
290571fe804bSLee Schermerhorn 		 */
2906d69b2e63SKOSAKI Motohiro 		if (!nodelist)
2907d69b2e63SKOSAKI Motohiro 			goto out;
2908095f1fc4SLee Schermerhorn 	}
2909095f1fc4SLee Schermerhorn 
291071fe804bSLee Schermerhorn 	mode_flags = 0;
2911095f1fc4SLee Schermerhorn 	if (flags) {
2912095f1fc4SLee Schermerhorn 		/*
2913095f1fc4SLee Schermerhorn 		 * Currently, we only support two mutually exclusive
2914095f1fc4SLee Schermerhorn 		 * mode flags.
2915095f1fc4SLee Schermerhorn 		 */
2916095f1fc4SLee Schermerhorn 		if (!strcmp(flags, "static"))
291771fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_STATIC_NODES;
2918095f1fc4SLee Schermerhorn 		else if (!strcmp(flags, "relative"))
291971fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_RELATIVE_NODES;
2920095f1fc4SLee Schermerhorn 		else
2921926f2ae0SKOSAKI Motohiro 			goto out;
2922095f1fc4SLee Schermerhorn 	}
292371fe804bSLee Schermerhorn 
292471fe804bSLee Schermerhorn 	new = mpol_new(mode, mode_flags, &nodes);
292571fe804bSLee Schermerhorn 	if (IS_ERR(new))
2926926f2ae0SKOSAKI Motohiro 		goto out;
2927926f2ae0SKOSAKI Motohiro 
2928f2a07f40SHugh Dickins 	/*
2929f2a07f40SHugh Dickins 	 * Save nodes for mpol_to_str() to show the tmpfs mount options
2930f2a07f40SHugh Dickins 	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2931f2a07f40SHugh Dickins 	 */
2932f2a07f40SHugh Dickins 	if (mode != MPOL_PREFERRED)
2933f2a07f40SHugh Dickins 		new->v.nodes = nodes;
2934f2a07f40SHugh Dickins 	else if (nodelist)
2935f2a07f40SHugh Dickins 		new->v.preferred_node = first_node(nodes);
2936f2a07f40SHugh Dickins 	else
2937f2a07f40SHugh Dickins 		new->flags |= MPOL_F_LOCAL;
2938f2a07f40SHugh Dickins 
2939f2a07f40SHugh Dickins 	/*
2940f2a07f40SHugh Dickins 	 * Save nodes for contextualization: this will be used to "clone"
2941f2a07f40SHugh Dickins 	 * the mempolicy in a specific context [cpuset] at a later time.
2942f2a07f40SHugh Dickins 	 */
2943e17f74afSLee Schermerhorn 	new->w.user_nodemask = nodes;
2944f2a07f40SHugh Dickins 
2945926f2ae0SKOSAKI Motohiro 	err = 0;
294671fe804bSLee Schermerhorn 
2947095f1fc4SLee Schermerhorn out:
2948095f1fc4SLee Schermerhorn 	/* Restore string for error message */
2949095f1fc4SLee Schermerhorn 	if (nodelist)
2950095f1fc4SLee Schermerhorn 		*--nodelist = ':';
2951095f1fc4SLee Schermerhorn 	if (flags)
2952095f1fc4SLee Schermerhorn 		*--flags = '=';
295371fe804bSLee Schermerhorn 	if (!err)
295471fe804bSLee Schermerhorn 		*mpol = new;
2955095f1fc4SLee Schermerhorn 	return err;
2956095f1fc4SLee Schermerhorn }
2957095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */
2958095f1fc4SLee Schermerhorn 
295971fe804bSLee Schermerhorn /**
296071fe804bSLee Schermerhorn  * mpol_to_str - format a mempolicy structure for printing
296171fe804bSLee Schermerhorn  * @buffer:  to contain formatted mempolicy string
296271fe804bSLee Schermerhorn  * @maxlen:  length of @buffer
296371fe804bSLee Schermerhorn  * @pol:  pointer to mempolicy to be formatted
296471fe804bSLee Schermerhorn  *
2965948927eeSDavid Rientjes  * Convert @pol into a string.  If @buffer is too short, truncate the string.
2966948927eeSDavid Rientjes  * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2967948927eeSDavid Rientjes  * longest flag, "relative", and to display at least a few node ids.
29681a75a6c8SChristoph Lameter  */
2969948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
29701a75a6c8SChristoph Lameter {
29711a75a6c8SChristoph Lameter 	char *p = buffer;
2972948927eeSDavid Rientjes 	nodemask_t nodes = NODE_MASK_NONE;
2973948927eeSDavid Rientjes 	unsigned short mode = MPOL_DEFAULT;
2974948927eeSDavid Rientjes 	unsigned short flags = 0;
29751a75a6c8SChristoph Lameter 
29768790c71aSDavid Rientjes 	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
2977bea904d5SLee Schermerhorn 		mode = pol->mode;
2978948927eeSDavid Rientjes 		flags = pol->flags;
2979948927eeSDavid Rientjes 	}
2980bea904d5SLee Schermerhorn 
29811a75a6c8SChristoph Lameter 	switch (mode) {
29821a75a6c8SChristoph Lameter 	case MPOL_DEFAULT:
29831a75a6c8SChristoph Lameter 		break;
29841a75a6c8SChristoph Lameter 	case MPOL_PREFERRED:
2985fc36b8d3SLee Schermerhorn 		if (flags & MPOL_F_LOCAL)
2986f2a07f40SHugh Dickins 			mode = MPOL_LOCAL;
298753f2556bSLee Schermerhorn 		else
2988fc36b8d3SLee Schermerhorn 			node_set(pol->v.preferred_node, nodes);
29891a75a6c8SChristoph Lameter 		break;
29901a75a6c8SChristoph Lameter 	case MPOL_BIND:
29911a75a6c8SChristoph Lameter 	case MPOL_INTERLEAVE:
29921a75a6c8SChristoph Lameter 		nodes = pol->v.nodes;
29931a75a6c8SChristoph Lameter 		break;
29941a75a6c8SChristoph Lameter 	default:
2995948927eeSDavid Rientjes 		WARN_ON_ONCE(1);
2996948927eeSDavid Rientjes 		snprintf(p, maxlen, "unknown");
2997948927eeSDavid Rientjes 		return;
29981a75a6c8SChristoph Lameter 	}
29991a75a6c8SChristoph Lameter 
3000b7a9f420SDavid Rientjes 	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
30011a75a6c8SChristoph Lameter 
3002fc36b8d3SLee Schermerhorn 	if (flags & MPOL_MODE_FLAGS) {
3003948927eeSDavid Rientjes 		p += snprintf(p, buffer + maxlen - p, "=");
3004f5b087b5SDavid Rientjes 
30052291990aSLee Schermerhorn 		/*
30062291990aSLee Schermerhorn 		 * Currently, the only defined flags are mutually exclusive
30072291990aSLee Schermerhorn 		 */
3008f5b087b5SDavid Rientjes 		if (flags & MPOL_F_STATIC_NODES)
30092291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "static");
30102291990aSLee Schermerhorn 		else if (flags & MPOL_F_RELATIVE_NODES)
30112291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "relative");
3012f5b087b5SDavid Rientjes 	}
3013f5b087b5SDavid Rientjes 
30149e763e0fSTejun Heo 	if (!nodes_empty(nodes))
30159e763e0fSTejun Heo 		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
30169e763e0fSTejun Heo 			       nodemask_pr_args(&nodes));
30171a75a6c8SChristoph Lameter }
3018