xref: /openbmc/linux/mm/mempolicy.c (revision a38a59fdfa10be55d08e4530923d950e739ac6a2)
146aeb7e6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * Simple NUMA memory policy for the Linux kernel.
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
68bccd85fSChristoph Lameter  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * NUMA policy allows the user to give hints in which node(s) memory should
91da177e4SLinus Torvalds  * be allocated.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Support four policies per VMA and per process:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * The VMA policy has priority over the process policy for a page fault.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * interleave     Allocate memory interleaved over a set of nodes,
161da177e4SLinus Torvalds  *                with normal fallback if it fails.
171da177e4SLinus Torvalds  *                For VMA based allocations this interleaves based on the
181da177e4SLinus Torvalds  *                offset into the backing object or offset into the mapping
191da177e4SLinus Torvalds  *                for anonymous memory. For process policy an process counter
201da177e4SLinus Torvalds  *                is used.
218bccd85fSChristoph Lameter  *
221da177e4SLinus Torvalds  * bind           Only allocate memory on a specific set of nodes,
231da177e4SLinus Torvalds  *                no fallback.
248bccd85fSChristoph Lameter  *                FIXME: memory is allocated starting with the first node
258bccd85fSChristoph Lameter  *                to the last. It would be better if bind would truly restrict
268bccd85fSChristoph Lameter  *                the allocation to memory nodes instead
278bccd85fSChristoph Lameter  *
281da177e4SLinus Torvalds  * preferred       Try a specific node first before normal fallback.
2900ef2d2fSDavid Rientjes  *                As a special case NUMA_NO_NODE here means do the allocation
301da177e4SLinus Torvalds  *                on the local CPU. This is normally identical to default,
311da177e4SLinus Torvalds  *                but useful to set in a VMA when you have a non default
321da177e4SLinus Torvalds  *                process policy.
338bccd85fSChristoph Lameter  *
34b27abaccSDave Hansen  * preferred many Try a set of nodes first before normal fallback. This is
35b27abaccSDave Hansen  *                similar to preferred without the special case.
36b27abaccSDave Hansen  *
371da177e4SLinus Torvalds  * default        Allocate on the local node first, or when on a VMA
381da177e4SLinus Torvalds  *                use the process policy. This is what Linux always did
391da177e4SLinus Torvalds  *		  in a NUMA aware kernel and still does by, ahem, default.
401da177e4SLinus Torvalds  *
411da177e4SLinus Torvalds  * The process policy is applied for most non interrupt memory allocations
421da177e4SLinus Torvalds  * in that process' context. Interrupts ignore the policies and always
431da177e4SLinus Torvalds  * try to allocate on the local CPU. The VMA policy is only applied for memory
441da177e4SLinus Torvalds  * allocations for a VMA in the VM.
451da177e4SLinus Torvalds  *
461da177e4SLinus Torvalds  * Currently there are a few corner cases in swapping where the policy
471da177e4SLinus Torvalds  * is not applied, but the majority should be handled. When process policy
481da177e4SLinus Torvalds  * is used it is not remembered over swap outs/swap ins.
491da177e4SLinus Torvalds  *
501da177e4SLinus Torvalds  * Only the highest zone in the zone hierarchy gets policied. Allocations
511da177e4SLinus Torvalds  * requesting a lower zone just use default policy. This implies that
521da177e4SLinus Torvalds  * on systems with highmem kernel lowmem allocation don't get policied.
531da177e4SLinus Torvalds  * Same with GFP_DMA allocations.
541da177e4SLinus Torvalds  *
551da177e4SLinus Torvalds  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
561da177e4SLinus Torvalds  * all users and remembered even when nobody has memory mapped.
571da177e4SLinus Torvalds  */
581da177e4SLinus Torvalds 
591da177e4SLinus Torvalds /* Notebook:
601da177e4SLinus Torvalds    fix mmap readahead to honour policy and enable policy for any page cache
611da177e4SLinus Torvalds    object
621da177e4SLinus Torvalds    statistics for bigpages
631da177e4SLinus Torvalds    global policy for page cache? currently it uses process policy. Requires
641da177e4SLinus Torvalds    first item above.
651da177e4SLinus Torvalds    handle mremap for shared memory (currently ignored for the policy)
661da177e4SLinus Torvalds    grows down?
671da177e4SLinus Torvalds    make bind policy root only? It can trigger oom much faster and the
681da177e4SLinus Torvalds    kernel is not always grateful with that.
691da177e4SLinus Torvalds */
701da177e4SLinus Torvalds 
71b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
72b1de0d13SMitchel Humpherys 
731da177e4SLinus Torvalds #include <linux/mempolicy.h>
74a520110eSChristoph Hellwig #include <linux/pagewalk.h>
751da177e4SLinus Torvalds #include <linux/highmem.h>
761da177e4SLinus Torvalds #include <linux/hugetlb.h>
771da177e4SLinus Torvalds #include <linux/kernel.h>
781da177e4SLinus Torvalds #include <linux/sched.h>
796e84f315SIngo Molnar #include <linux/sched/mm.h>
806a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h>
81f719ff9bSIngo Molnar #include <linux/sched/task.h>
821da177e4SLinus Torvalds #include <linux/nodemask.h>
831da177e4SLinus Torvalds #include <linux/cpuset.h>
841da177e4SLinus Torvalds #include <linux/slab.h>
851da177e4SLinus Torvalds #include <linux/string.h>
86b95f1b31SPaul Gortmaker #include <linux/export.h>
87b488893aSPavel Emelyanov #include <linux/nsproxy.h>
881da177e4SLinus Torvalds #include <linux/interrupt.h>
891da177e4SLinus Torvalds #include <linux/init.h>
901da177e4SLinus Torvalds #include <linux/compat.h>
9131367466SOtto Ebeling #include <linux/ptrace.h>
92dc9aa5b9SChristoph Lameter #include <linux/swap.h>
931a75a6c8SChristoph Lameter #include <linux/seq_file.h>
941a75a6c8SChristoph Lameter #include <linux/proc_fs.h>
95b20a3503SChristoph Lameter #include <linux/migrate.h>
9662b61f61SHugh Dickins #include <linux/ksm.h>
9795a402c3SChristoph Lameter #include <linux/rmap.h>
9886c3a764SDavid Quigley #include <linux/security.h>
99dbcb0f19SAdrian Bunk #include <linux/syscalls.h>
100095f1fc4SLee Schermerhorn #include <linux/ctype.h>
1016d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h>
102b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h>
103b1de0d13SMitchel Humpherys #include <linux/printk.h>
104c8633798SNaoya Horiguchi #include <linux/swapops.h>
105dc9aa5b9SChristoph Lameter 
1061da177e4SLinus Torvalds #include <asm/tlbflush.h>
1077c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
1081da177e4SLinus Torvalds 
10962695a84SNick Piggin #include "internal.h"
11062695a84SNick Piggin 
11138e35860SChristoph Lameter /* Internal flags */
112dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
11338e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
114dc9aa5b9SChristoph Lameter 
115fcc234f8SPekka Enberg static struct kmem_cache *policy_cache;
116fcc234f8SPekka Enberg static struct kmem_cache *sn_cache;
1171da177e4SLinus Torvalds 
1181da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not
1191da177e4SLinus Torvalds    policied. */
1206267276fSChristoph Lameter enum zone_type policy_zone = 0;
1211da177e4SLinus Torvalds 
122bea904d5SLee Schermerhorn /*
123bea904d5SLee Schermerhorn  * run-time system-wide default policy => local allocation
124bea904d5SLee Schermerhorn  */
125e754d79dSH Hartley Sweeten static struct mempolicy default_policy = {
1261da177e4SLinus Torvalds 	.refcnt = ATOMIC_INIT(1), /* never free it */
1277858d7bcSFeng Tang 	.mode = MPOL_LOCAL,
1281da177e4SLinus Torvalds };
1291da177e4SLinus Torvalds 
1305606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES];
1315606e387SMel Gorman 
132b2ca916cSDan Williams /**
133b2ca916cSDan Williams  * numa_map_to_online_node - Find closest online node
134f6e92f40SKrzysztof Kozlowski  * @node: Node id to start the search
135b2ca916cSDan Williams  *
136b2ca916cSDan Williams  * Lookup the next closest node by distance if @nid is not online.
137b2ca916cSDan Williams  */
138b2ca916cSDan Williams int numa_map_to_online_node(int node)
139b2ca916cSDan Williams {
1404fcbe96eSDan Williams 	int min_dist = INT_MAX, dist, n, min_node;
141b2ca916cSDan Williams 
1424fcbe96eSDan Williams 	if (node == NUMA_NO_NODE || node_online(node))
1434fcbe96eSDan Williams 		return node;
144b2ca916cSDan Williams 
145b2ca916cSDan Williams 	min_node = node;
146b2ca916cSDan Williams 	for_each_online_node(n) {
147b2ca916cSDan Williams 		dist = node_distance(node, n);
148b2ca916cSDan Williams 		if (dist < min_dist) {
149b2ca916cSDan Williams 			min_dist = dist;
150b2ca916cSDan Williams 			min_node = n;
151b2ca916cSDan Williams 		}
152b2ca916cSDan Williams 	}
153b2ca916cSDan Williams 
154b2ca916cSDan Williams 	return min_node;
155b2ca916cSDan Williams }
156b2ca916cSDan Williams EXPORT_SYMBOL_GPL(numa_map_to_online_node);
157b2ca916cSDan Williams 
15874d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p)
1595606e387SMel Gorman {
1605606e387SMel Gorman 	struct mempolicy *pol = p->mempolicy;
161f15ca78eSOleg Nesterov 	int node;
1625606e387SMel Gorman 
163f15ca78eSOleg Nesterov 	if (pol)
164f15ca78eSOleg Nesterov 		return pol;
1655606e387SMel Gorman 
166f15ca78eSOleg Nesterov 	node = numa_node_id();
1671da6f0e1SJianguo Wu 	if (node != NUMA_NO_NODE) {
1681da6f0e1SJianguo Wu 		pol = &preferred_node_policy[node];
169f15ca78eSOleg Nesterov 		/* preferred_node_policy is not initialised early in boot */
170f15ca78eSOleg Nesterov 		if (pol->mode)
171f15ca78eSOleg Nesterov 			return pol;
1721da6f0e1SJianguo Wu 	}
1735606e387SMel Gorman 
174f15ca78eSOleg Nesterov 	return &default_policy;
1755606e387SMel Gorman }
1765606e387SMel Gorman 
17737012946SDavid Rientjes static const struct mempolicy_operations {
17837012946SDavid Rientjes 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
179213980c0SVlastimil Babka 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
18037012946SDavid Rientjes } mpol_ops[MPOL_MAX];
18137012946SDavid Rientjes 
182f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
183f5b087b5SDavid Rientjes {
1846d556294SBob Liu 	return pol->flags & MPOL_MODE_FLAGS;
1854c50bc01SDavid Rientjes }
1864c50bc01SDavid Rientjes 
1874c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
1884c50bc01SDavid Rientjes 				   const nodemask_t *rel)
1894c50bc01SDavid Rientjes {
1904c50bc01SDavid Rientjes 	nodemask_t tmp;
1914c50bc01SDavid Rientjes 	nodes_fold(tmp, *orig, nodes_weight(*rel));
1924c50bc01SDavid Rientjes 	nodes_onto(*ret, tmp, *rel);
193f5b087b5SDavid Rientjes }
194f5b087b5SDavid Rientjes 
19537012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
19637012946SDavid Rientjes {
19737012946SDavid Rientjes 	if (nodes_empty(*nodes))
19837012946SDavid Rientjes 		return -EINVAL;
199269fbe72SBen Widawsky 	pol->nodes = *nodes;
20037012946SDavid Rientjes 	return 0;
20137012946SDavid Rientjes }
20237012946SDavid Rientjes 
20337012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
20437012946SDavid Rientjes {
2057858d7bcSFeng Tang 	if (nodes_empty(*nodes))
2067858d7bcSFeng Tang 		return -EINVAL;
207269fbe72SBen Widawsky 
208269fbe72SBen Widawsky 	nodes_clear(pol->nodes);
209269fbe72SBen Widawsky 	node_set(first_node(*nodes), pol->nodes);
21037012946SDavid Rientjes 	return 0;
21137012946SDavid Rientjes }
21237012946SDavid Rientjes 
213b27abaccSDave Hansen static int mpol_new_preferred_many(struct mempolicy *pol, const nodemask_t *nodes)
214b27abaccSDave Hansen {
215b27abaccSDave Hansen 	if (nodes_empty(*nodes))
216b27abaccSDave Hansen 		return -EINVAL;
217b27abaccSDave Hansen 	pol->nodes = *nodes;
218b27abaccSDave Hansen 	return 0;
219b27abaccSDave Hansen }
220b27abaccSDave Hansen 
22137012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
22237012946SDavid Rientjes {
223859f7ef1SZhihui Zhang 	if (nodes_empty(*nodes))
22437012946SDavid Rientjes 		return -EINVAL;
225269fbe72SBen Widawsky 	pol->nodes = *nodes;
22637012946SDavid Rientjes 	return 0;
22737012946SDavid Rientjes }
22837012946SDavid Rientjes 
22958568d2aSMiao Xie /*
23058568d2aSMiao Xie  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
23158568d2aSMiao Xie  * any, for the new policy.  mpol_new() has already validated the nodes
2327858d7bcSFeng Tang  * parameter with respect to the policy mode and flags.
23358568d2aSMiao Xie  *
23458568d2aSMiao Xie  * Must be called holding task's alloc_lock to protect task's mems_allowed
235c1e8d7c6SMichel Lespinasse  * and mempolicy.  May also be called holding the mmap_lock for write.
23658568d2aSMiao Xie  */
2374bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol,
2384bfc4495SKAMEZAWA Hiroyuki 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
23958568d2aSMiao Xie {
24058568d2aSMiao Xie 	int ret;
24158568d2aSMiao Xie 
2427858d7bcSFeng Tang 	/*
2437858d7bcSFeng Tang 	 * Default (pol==NULL) resp. local memory policies are not a
2447858d7bcSFeng Tang 	 * subject of any remapping. They also do not need any special
2457858d7bcSFeng Tang 	 * constructor.
2467858d7bcSFeng Tang 	 */
2477858d7bcSFeng Tang 	if (!pol || pol->mode == MPOL_LOCAL)
24858568d2aSMiao Xie 		return 0;
2497858d7bcSFeng Tang 
25001f13bd6SLai Jiangshan 	/* Check N_MEMORY */
2514bfc4495SKAMEZAWA Hiroyuki 	nodes_and(nsc->mask1,
25201f13bd6SLai Jiangshan 		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
25358568d2aSMiao Xie 
25458568d2aSMiao Xie 	VM_BUG_ON(!nodes);
2557858d7bcSFeng Tang 
25658568d2aSMiao Xie 	if (pol->flags & MPOL_F_RELATIVE_NODES)
2574bfc4495SKAMEZAWA Hiroyuki 		mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
25858568d2aSMiao Xie 	else
2594bfc4495SKAMEZAWA Hiroyuki 		nodes_and(nsc->mask2, *nodes, nsc->mask1);
2604bfc4495SKAMEZAWA Hiroyuki 
26158568d2aSMiao Xie 	if (mpol_store_user_nodemask(pol))
26258568d2aSMiao Xie 		pol->w.user_nodemask = *nodes;
26358568d2aSMiao Xie 	else
2647858d7bcSFeng Tang 		pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed;
26558568d2aSMiao Xie 
2664bfc4495SKAMEZAWA Hiroyuki 	ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
26758568d2aSMiao Xie 	return ret;
26858568d2aSMiao Xie }
26958568d2aSMiao Xie 
27058568d2aSMiao Xie /*
27158568d2aSMiao Xie  * This function just creates a new policy, does some check and simple
27258568d2aSMiao Xie  * initialization. You must invoke mpol_set_nodemask() to set nodes.
27358568d2aSMiao Xie  */
274028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
275028fec41SDavid Rientjes 				  nodemask_t *nodes)
2761da177e4SLinus Torvalds {
2771da177e4SLinus Torvalds 	struct mempolicy *policy;
2781da177e4SLinus Torvalds 
279028fec41SDavid Rientjes 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
28000ef2d2fSDavid Rientjes 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
281140d5a49SPaul Mundt 
2823e1f0645SDavid Rientjes 	if (mode == MPOL_DEFAULT) {
2833e1f0645SDavid Rientjes 		if (nodes && !nodes_empty(*nodes))
28437012946SDavid Rientjes 			return ERR_PTR(-EINVAL);
285d3a71033SLee Schermerhorn 		return NULL;
28637012946SDavid Rientjes 	}
2873e1f0645SDavid Rientjes 	VM_BUG_ON(!nodes);
2883e1f0645SDavid Rientjes 
2893e1f0645SDavid Rientjes 	/*
2903e1f0645SDavid Rientjes 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
2913e1f0645SDavid Rientjes 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
2923e1f0645SDavid Rientjes 	 * All other modes require a valid pointer to a non-empty nodemask.
2933e1f0645SDavid Rientjes 	 */
2943e1f0645SDavid Rientjes 	if (mode == MPOL_PREFERRED) {
2953e1f0645SDavid Rientjes 		if (nodes_empty(*nodes)) {
2963e1f0645SDavid Rientjes 			if (((flags & MPOL_F_STATIC_NODES) ||
2973e1f0645SDavid Rientjes 			     (flags & MPOL_F_RELATIVE_NODES)))
2983e1f0645SDavid Rientjes 				return ERR_PTR(-EINVAL);
2997858d7bcSFeng Tang 
3007858d7bcSFeng Tang 			mode = MPOL_LOCAL;
3013e1f0645SDavid Rientjes 		}
302479e2802SPeter Zijlstra 	} else if (mode == MPOL_LOCAL) {
3038d303e44SPiotr Kwapulinski 		if (!nodes_empty(*nodes) ||
3048d303e44SPiotr Kwapulinski 		    (flags & MPOL_F_STATIC_NODES) ||
3058d303e44SPiotr Kwapulinski 		    (flags & MPOL_F_RELATIVE_NODES))
306479e2802SPeter Zijlstra 			return ERR_PTR(-EINVAL);
3073e1f0645SDavid Rientjes 	} else if (nodes_empty(*nodes))
3083e1f0645SDavid Rientjes 		return ERR_PTR(-EINVAL);
3091da177e4SLinus Torvalds 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
3101da177e4SLinus Torvalds 	if (!policy)
3111da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
3121da177e4SLinus Torvalds 	atomic_set(&policy->refcnt, 1);
31345c4745aSLee Schermerhorn 	policy->mode = mode;
31437012946SDavid Rientjes 	policy->flags = flags;
3153e1f0645SDavid Rientjes 
31637012946SDavid Rientjes 	return policy;
31737012946SDavid Rientjes }
31837012946SDavid Rientjes 
31952cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */
32052cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p)
32152cd3b07SLee Schermerhorn {
32252cd3b07SLee Schermerhorn 	if (!atomic_dec_and_test(&p->refcnt))
32352cd3b07SLee Schermerhorn 		return;
32452cd3b07SLee Schermerhorn 	kmem_cache_free(policy_cache, p);
32552cd3b07SLee Schermerhorn }
32652cd3b07SLee Schermerhorn 
327213980c0SVlastimil Babka static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
32837012946SDavid Rientjes {
32937012946SDavid Rientjes }
33037012946SDavid Rientjes 
331213980c0SVlastimil Babka static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
3321d0d2680SDavid Rientjes {
3331d0d2680SDavid Rientjes 	nodemask_t tmp;
3341d0d2680SDavid Rientjes 
33537012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES)
33637012946SDavid Rientjes 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
33737012946SDavid Rientjes 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
33837012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3391d0d2680SDavid Rientjes 	else {
340269fbe72SBen Widawsky 		nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed,
341213980c0SVlastimil Babka 								*nodes);
34229b190faSzhong jiang 		pol->w.cpuset_mems_allowed = *nodes;
3431d0d2680SDavid Rientjes 	}
34437012946SDavid Rientjes 
345708c1bbcSMiao Xie 	if (nodes_empty(tmp))
346708c1bbcSMiao Xie 		tmp = *nodes;
347708c1bbcSMiao Xie 
348269fbe72SBen Widawsky 	pol->nodes = tmp;
34937012946SDavid Rientjes }
35037012946SDavid Rientjes 
35137012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol,
352213980c0SVlastimil Babka 						const nodemask_t *nodes)
35337012946SDavid Rientjes {
35437012946SDavid Rientjes 	pol->w.cpuset_mems_allowed = *nodes;
3551d0d2680SDavid Rientjes }
35637012946SDavid Rientjes 
357708c1bbcSMiao Xie /*
358708c1bbcSMiao Xie  * mpol_rebind_policy - Migrate a policy to a different set of nodes
359708c1bbcSMiao Xie  *
360c1e8d7c6SMichel Lespinasse  * Per-vma policies are protected by mmap_lock. Allocations using per-task
361213980c0SVlastimil Babka  * policies are protected by task->mems_allowed_seq to prevent a premature
362213980c0SVlastimil Babka  * OOM/allocation failure due to parallel nodemask modification.
363708c1bbcSMiao Xie  */
364213980c0SVlastimil Babka static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
36537012946SDavid Rientjes {
36637012946SDavid Rientjes 	if (!pol)
36737012946SDavid Rientjes 		return;
3687858d7bcSFeng Tang 	if (!mpol_store_user_nodemask(pol) &&
36937012946SDavid Rientjes 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
37037012946SDavid Rientjes 		return;
371708c1bbcSMiao Xie 
372213980c0SVlastimil Babka 	mpol_ops[pol->mode].rebind(pol, newmask);
3731d0d2680SDavid Rientjes }
3741d0d2680SDavid Rientjes 
3751d0d2680SDavid Rientjes /*
3761d0d2680SDavid Rientjes  * Wrapper for mpol_rebind_policy() that just requires task
3771d0d2680SDavid Rientjes  * pointer, and updates task mempolicy.
37858568d2aSMiao Xie  *
37958568d2aSMiao Xie  * Called with task's alloc_lock held.
3801d0d2680SDavid Rientjes  */
3811d0d2680SDavid Rientjes 
382213980c0SVlastimil Babka void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
3831d0d2680SDavid Rientjes {
384213980c0SVlastimil Babka 	mpol_rebind_policy(tsk->mempolicy, new);
3851d0d2680SDavid Rientjes }
3861d0d2680SDavid Rientjes 
3871d0d2680SDavid Rientjes /*
3881d0d2680SDavid Rientjes  * Rebind each vma in mm to new nodemask.
3891d0d2680SDavid Rientjes  *
390c1e8d7c6SMichel Lespinasse  * Call holding a reference to mm.  Takes mm->mmap_lock during call.
3911d0d2680SDavid Rientjes  */
3921d0d2680SDavid Rientjes 
3931d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
3941d0d2680SDavid Rientjes {
3951d0d2680SDavid Rientjes 	struct vm_area_struct *vma;
3961d0d2680SDavid Rientjes 
397d8ed45c5SMichel Lespinasse 	mmap_write_lock(mm);
3981d0d2680SDavid Rientjes 	for (vma = mm->mmap; vma; vma = vma->vm_next)
399213980c0SVlastimil Babka 		mpol_rebind_policy(vma->vm_policy, new);
400d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
4011d0d2680SDavid Rientjes }
4021d0d2680SDavid Rientjes 
40337012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
40437012946SDavid Rientjes 	[MPOL_DEFAULT] = {
40537012946SDavid Rientjes 		.rebind = mpol_rebind_default,
40637012946SDavid Rientjes 	},
40737012946SDavid Rientjes 	[MPOL_INTERLEAVE] = {
40837012946SDavid Rientjes 		.create = mpol_new_interleave,
40937012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
41037012946SDavid Rientjes 	},
41137012946SDavid Rientjes 	[MPOL_PREFERRED] = {
41237012946SDavid Rientjes 		.create = mpol_new_preferred,
41337012946SDavid Rientjes 		.rebind = mpol_rebind_preferred,
41437012946SDavid Rientjes 	},
41537012946SDavid Rientjes 	[MPOL_BIND] = {
41637012946SDavid Rientjes 		.create = mpol_new_bind,
41737012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
41837012946SDavid Rientjes 	},
4197858d7bcSFeng Tang 	[MPOL_LOCAL] = {
4207858d7bcSFeng Tang 		.rebind = mpol_rebind_default,
4217858d7bcSFeng Tang 	},
422b27abaccSDave Hansen 	[MPOL_PREFERRED_MANY] = {
423b27abaccSDave Hansen 		.create = mpol_new_preferred_many,
424b27abaccSDave Hansen 		.rebind = mpol_rebind_preferred,
425b27abaccSDave Hansen 	},
42637012946SDavid Rientjes };
42737012946SDavid Rientjes 
428a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist,
429fc301289SChristoph Lameter 				unsigned long flags);
4301a75a6c8SChristoph Lameter 
4316f4576e3SNaoya Horiguchi struct queue_pages {
4326f4576e3SNaoya Horiguchi 	struct list_head *pagelist;
4336f4576e3SNaoya Horiguchi 	unsigned long flags;
4346f4576e3SNaoya Horiguchi 	nodemask_t *nmask;
435f18da660SLi Xinhai 	unsigned long start;
436f18da660SLi Xinhai 	unsigned long end;
437f18da660SLi Xinhai 	struct vm_area_struct *first;
4386f4576e3SNaoya Horiguchi };
4396f4576e3SNaoya Horiguchi 
44098094945SNaoya Horiguchi /*
44188aaa2a1SNaoya Horiguchi  * Check if the page's nid is in qp->nmask.
44288aaa2a1SNaoya Horiguchi  *
44388aaa2a1SNaoya Horiguchi  * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
44488aaa2a1SNaoya Horiguchi  * in the invert of qp->nmask.
44588aaa2a1SNaoya Horiguchi  */
44688aaa2a1SNaoya Horiguchi static inline bool queue_pages_required(struct page *page,
44788aaa2a1SNaoya Horiguchi 					struct queue_pages *qp)
44888aaa2a1SNaoya Horiguchi {
44988aaa2a1SNaoya Horiguchi 	int nid = page_to_nid(page);
45088aaa2a1SNaoya Horiguchi 	unsigned long flags = qp->flags;
45188aaa2a1SNaoya Horiguchi 
45288aaa2a1SNaoya Horiguchi 	return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
45388aaa2a1SNaoya Horiguchi }
45488aaa2a1SNaoya Horiguchi 
455a7f40cfeSYang Shi /*
456d8835445SYang Shi  * queue_pages_pmd() has four possible return values:
457e5947d23SYang Shi  * 0 - pages are placed on the right node or queued successfully, or
458e5947d23SYang Shi  *     special page is met, i.e. huge zero page.
459d8835445SYang Shi  * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
460d8835445SYang Shi  *     specified.
461d8835445SYang Shi  * 2 - THP was split.
462d8835445SYang Shi  * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
463d8835445SYang Shi  *        existing page was already on a node that does not follow the
464d8835445SYang Shi  *        policy.
465a7f40cfeSYang Shi  */
466c8633798SNaoya Horiguchi static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
467c8633798SNaoya Horiguchi 				unsigned long end, struct mm_walk *walk)
468959a7e13SJules Irenge 	__releases(ptl)
469c8633798SNaoya Horiguchi {
470c8633798SNaoya Horiguchi 	int ret = 0;
471c8633798SNaoya Horiguchi 	struct page *page;
472c8633798SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
473c8633798SNaoya Horiguchi 	unsigned long flags;
474c8633798SNaoya Horiguchi 
475c8633798SNaoya Horiguchi 	if (unlikely(is_pmd_migration_entry(*pmd))) {
476a7f40cfeSYang Shi 		ret = -EIO;
477c8633798SNaoya Horiguchi 		goto unlock;
478c8633798SNaoya Horiguchi 	}
479c8633798SNaoya Horiguchi 	page = pmd_page(*pmd);
480c8633798SNaoya Horiguchi 	if (is_huge_zero_page(page)) {
481c8633798SNaoya Horiguchi 		spin_unlock(ptl);
482e5947d23SYang Shi 		walk->action = ACTION_CONTINUE;
483c8633798SNaoya Horiguchi 		goto out;
484c8633798SNaoya Horiguchi 	}
485d8835445SYang Shi 	if (!queue_pages_required(page, qp))
486c8633798SNaoya Horiguchi 		goto unlock;
487c8633798SNaoya Horiguchi 
488c8633798SNaoya Horiguchi 	flags = qp->flags;
489c8633798SNaoya Horiguchi 	/* go to thp migration */
490a7f40cfeSYang Shi 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
491a53190a4SYang Shi 		if (!vma_migratable(walk->vma) ||
492a53190a4SYang Shi 		    migrate_page_add(page, qp->pagelist, flags)) {
493d8835445SYang Shi 			ret = 1;
494a7f40cfeSYang Shi 			goto unlock;
495a7f40cfeSYang Shi 		}
496a7f40cfeSYang Shi 	} else
497a7f40cfeSYang Shi 		ret = -EIO;
498c8633798SNaoya Horiguchi unlock:
499c8633798SNaoya Horiguchi 	spin_unlock(ptl);
500c8633798SNaoya Horiguchi out:
501c8633798SNaoya Horiguchi 	return ret;
502c8633798SNaoya Horiguchi }
503c8633798SNaoya Horiguchi 
50488aaa2a1SNaoya Horiguchi /*
50598094945SNaoya Horiguchi  * Scan through pages checking if pages follow certain conditions,
50698094945SNaoya Horiguchi  * and move them to the pagelist if they do.
507d8835445SYang Shi  *
508d8835445SYang Shi  * queue_pages_pte_range() has three possible return values:
509e5947d23SYang Shi  * 0 - pages are placed on the right node or queued successfully, or
510e5947d23SYang Shi  *     special page is met, i.e. zero page.
511d8835445SYang Shi  * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
512d8835445SYang Shi  *     specified.
513d8835445SYang Shi  * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
514d8835445SYang Shi  *        on a node that does not follow the policy.
51598094945SNaoya Horiguchi  */
5166f4576e3SNaoya Horiguchi static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
5176f4576e3SNaoya Horiguchi 			unsigned long end, struct mm_walk *walk)
5181da177e4SLinus Torvalds {
5196f4576e3SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
5206f4576e3SNaoya Horiguchi 	struct page *page;
5216f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
5226f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
523c8633798SNaoya Horiguchi 	int ret;
524d8835445SYang Shi 	bool has_unmovable = false;
5253f088420SShijie Luo 	pte_t *pte, *mapped_pte;
526705e87c0SHugh Dickins 	spinlock_t *ptl;
527941150a3SHugh Dickins 
528c8633798SNaoya Horiguchi 	ptl = pmd_trans_huge_lock(pmd, vma);
529c8633798SNaoya Horiguchi 	if (ptl) {
530c8633798SNaoya Horiguchi 		ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
531d8835445SYang Shi 		if (ret != 2)
532a7f40cfeSYang Shi 			return ret;
533248db92dSKirill A. Shutemov 	}
534d8835445SYang Shi 	/* THP was split, fall through to pte walk */
53591612e0dSHugh Dickins 
536337d9abfSNaoya Horiguchi 	if (pmd_trans_unstable(pmd))
537337d9abfSNaoya Horiguchi 		return 0;
53894723aafSMichal Hocko 
5393f088420SShijie Luo 	mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
5406f4576e3SNaoya Horiguchi 	for (; addr != end; pte++, addr += PAGE_SIZE) {
54191612e0dSHugh Dickins 		if (!pte_present(*pte))
54291612e0dSHugh Dickins 			continue;
5436aab341eSLinus Torvalds 		page = vm_normal_page(vma, addr, *pte);
5446aab341eSLinus Torvalds 		if (!page)
54591612e0dSHugh Dickins 			continue;
546053837fcSNick Piggin 		/*
54762b61f61SHugh Dickins 		 * vm_normal_page() filters out zero pages, but there might
54862b61f61SHugh Dickins 		 * still be PageReserved pages to skip, perhaps in a VDSO.
549053837fcSNick Piggin 		 */
550b79bc0a0SHugh Dickins 		if (PageReserved(page))
551f4598c8bSChristoph Lameter 			continue;
55288aaa2a1SNaoya Horiguchi 		if (!queue_pages_required(page, qp))
55338e35860SChristoph Lameter 			continue;
554a7f40cfeSYang Shi 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
555d8835445SYang Shi 			/* MPOL_MF_STRICT must be specified if we get here */
556d8835445SYang Shi 			if (!vma_migratable(vma)) {
557d8835445SYang Shi 				has_unmovable = true;
558a7f40cfeSYang Shi 				break;
559d8835445SYang Shi 			}
560a53190a4SYang Shi 
561a53190a4SYang Shi 			/*
562a53190a4SYang Shi 			 * Do not abort immediately since there may be
563a53190a4SYang Shi 			 * temporary off LRU pages in the range.  Still
564a53190a4SYang Shi 			 * need migrate other LRU pages.
565a53190a4SYang Shi 			 */
566a53190a4SYang Shi 			if (migrate_page_add(page, qp->pagelist, flags))
567a53190a4SYang Shi 				has_unmovable = true;
568a7f40cfeSYang Shi 		} else
569a7f40cfeSYang Shi 			break;
5706f4576e3SNaoya Horiguchi 	}
5713f088420SShijie Luo 	pte_unmap_unlock(mapped_pte, ptl);
5726f4576e3SNaoya Horiguchi 	cond_resched();
573d8835445SYang Shi 
574d8835445SYang Shi 	if (has_unmovable)
575d8835445SYang Shi 		return 1;
576d8835445SYang Shi 
577a7f40cfeSYang Shi 	return addr != end ? -EIO : 0;
57891612e0dSHugh Dickins }
57991612e0dSHugh Dickins 
5806f4576e3SNaoya Horiguchi static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
5816f4576e3SNaoya Horiguchi 			       unsigned long addr, unsigned long end,
5826f4576e3SNaoya Horiguchi 			       struct mm_walk *walk)
583e2d8cf40SNaoya Horiguchi {
584dcf17635SLi Xinhai 	int ret = 0;
585e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE
5866f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
587dcf17635SLi Xinhai 	unsigned long flags = (qp->flags & MPOL_MF_VALID);
588e2d8cf40SNaoya Horiguchi 	struct page *page;
589cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
590d4c54919SNaoya Horiguchi 	pte_t entry;
591e2d8cf40SNaoya Horiguchi 
5926f4576e3SNaoya Horiguchi 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
5936f4576e3SNaoya Horiguchi 	entry = huge_ptep_get(pte);
594d4c54919SNaoya Horiguchi 	if (!pte_present(entry))
595d4c54919SNaoya Horiguchi 		goto unlock;
596d4c54919SNaoya Horiguchi 	page = pte_page(entry);
59788aaa2a1SNaoya Horiguchi 	if (!queue_pages_required(page, qp))
598e2d8cf40SNaoya Horiguchi 		goto unlock;
599dcf17635SLi Xinhai 
600dcf17635SLi Xinhai 	if (flags == MPOL_MF_STRICT) {
601dcf17635SLi Xinhai 		/*
602dcf17635SLi Xinhai 		 * STRICT alone means only detecting misplaced page and no
603dcf17635SLi Xinhai 		 * need to further check other vma.
604dcf17635SLi Xinhai 		 */
605dcf17635SLi Xinhai 		ret = -EIO;
606dcf17635SLi Xinhai 		goto unlock;
607dcf17635SLi Xinhai 	}
608dcf17635SLi Xinhai 
609dcf17635SLi Xinhai 	if (!vma_migratable(walk->vma)) {
610dcf17635SLi Xinhai 		/*
611dcf17635SLi Xinhai 		 * Must be STRICT with MOVE*, otherwise .test_walk() have
612dcf17635SLi Xinhai 		 * stopped walking current vma.
613dcf17635SLi Xinhai 		 * Detecting misplaced page but allow migrating pages which
614dcf17635SLi Xinhai 		 * have been queued.
615dcf17635SLi Xinhai 		 */
616dcf17635SLi Xinhai 		ret = 1;
617dcf17635SLi Xinhai 		goto unlock;
618dcf17635SLi Xinhai 	}
619dcf17635SLi Xinhai 
620e2d8cf40SNaoya Horiguchi 	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
621e2d8cf40SNaoya Horiguchi 	if (flags & (MPOL_MF_MOVE_ALL) ||
622dcf17635SLi Xinhai 	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) {
623dcf17635SLi Xinhai 		if (!isolate_huge_page(page, qp->pagelist) &&
624dcf17635SLi Xinhai 			(flags & MPOL_MF_STRICT))
625dcf17635SLi Xinhai 			/*
626dcf17635SLi Xinhai 			 * Failed to isolate page but allow migrating pages
627dcf17635SLi Xinhai 			 * which have been queued.
628dcf17635SLi Xinhai 			 */
629dcf17635SLi Xinhai 			ret = 1;
630dcf17635SLi Xinhai 	}
631e2d8cf40SNaoya Horiguchi unlock:
632cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
633e2d8cf40SNaoya Horiguchi #else
634e2d8cf40SNaoya Horiguchi 	BUG();
635e2d8cf40SNaoya Horiguchi #endif
636dcf17635SLi Xinhai 	return ret;
6371da177e4SLinus Torvalds }
6381da177e4SLinus Torvalds 
6395877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING
640b24f53a0SLee Schermerhorn /*
6414b10e7d5SMel Gorman  * This is used to mark a range of virtual addresses to be inaccessible.
6424b10e7d5SMel Gorman  * These are later cleared by a NUMA hinting fault. Depending on these
6434b10e7d5SMel Gorman  * faults, pages may be migrated for better NUMA placement.
6444b10e7d5SMel Gorman  *
6454b10e7d5SMel Gorman  * This is assuming that NUMA faults are handled using PROT_NONE. If
6464b10e7d5SMel Gorman  * an architecture makes a different choice, it will need further
6474b10e7d5SMel Gorman  * changes to the core.
648b24f53a0SLee Schermerhorn  */
6494b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma,
6504b10e7d5SMel Gorman 			unsigned long addr, unsigned long end)
651b24f53a0SLee Schermerhorn {
6524b10e7d5SMel Gorman 	int nr_updated;
653b24f53a0SLee Schermerhorn 
65458705444SPeter Xu 	nr_updated = change_protection(vma, addr, end, PAGE_NONE, MM_CP_PROT_NUMA);
65503c5a6e1SMel Gorman 	if (nr_updated)
65603c5a6e1SMel Gorman 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
657b24f53a0SLee Schermerhorn 
6584b10e7d5SMel Gorman 	return nr_updated;
659b24f53a0SLee Schermerhorn }
660b24f53a0SLee Schermerhorn #else
661b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma,
662b24f53a0SLee Schermerhorn 			unsigned long addr, unsigned long end)
663b24f53a0SLee Schermerhorn {
664b24f53a0SLee Schermerhorn 	return 0;
665b24f53a0SLee Schermerhorn }
6665877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */
667b24f53a0SLee Schermerhorn 
6686f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end,
6696f4576e3SNaoya Horiguchi 				struct mm_walk *walk)
6701da177e4SLinus Torvalds {
6716f4576e3SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
6726f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
6735b952b3cSAndi Kleen 	unsigned long endvma = vma->vm_end;
6746f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
675dc9aa5b9SChristoph Lameter 
676a18b3ac2SLi Xinhai 	/* range check first */
677ce33135cSMiaohe Lin 	VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma);
678f18da660SLi Xinhai 
679f18da660SLi Xinhai 	if (!qp->first) {
680f18da660SLi Xinhai 		qp->first = vma;
681f18da660SLi Xinhai 		if (!(flags & MPOL_MF_DISCONTIG_OK) &&
682f18da660SLi Xinhai 			(qp->start < vma->vm_start))
683f18da660SLi Xinhai 			/* hole at head side of range */
684a18b3ac2SLi Xinhai 			return -EFAULT;
685a18b3ac2SLi Xinhai 	}
686f18da660SLi Xinhai 	if (!(flags & MPOL_MF_DISCONTIG_OK) &&
687f18da660SLi Xinhai 		((vma->vm_end < qp->end) &&
688f18da660SLi Xinhai 		(!vma->vm_next || vma->vm_end < vma->vm_next->vm_start)))
689f18da660SLi Xinhai 		/* hole at middle or tail of range */
690f18da660SLi Xinhai 		return -EFAULT;
691a18b3ac2SLi Xinhai 
692a7f40cfeSYang Shi 	/*
693a7f40cfeSYang Shi 	 * Need check MPOL_MF_STRICT to return -EIO if possible
694a7f40cfeSYang Shi 	 * regardless of vma_migratable
695a7f40cfeSYang Shi 	 */
696a7f40cfeSYang Shi 	if (!vma_migratable(vma) &&
697a7f40cfeSYang Shi 	    !(flags & MPOL_MF_STRICT))
69848684a65SNaoya Horiguchi 		return 1;
69948684a65SNaoya Horiguchi 
7005b952b3cSAndi Kleen 	if (endvma > end)
7015b952b3cSAndi Kleen 		endvma = end;
702b24f53a0SLee Schermerhorn 
703b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY) {
7042c0346a3SMel Gorman 		/* Similar to task_numa_work, skip inaccessible VMAs */
7053122e80eSAnshuman Khandual 		if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
7064355c018SLiang Chen 			!(vma->vm_flags & VM_MIXEDMAP))
707b24f53a0SLee Schermerhorn 			change_prot_numa(vma, start, endvma);
7086f4576e3SNaoya Horiguchi 		return 1;
709b24f53a0SLee Schermerhorn 	}
710b24f53a0SLee Schermerhorn 
7116f4576e3SNaoya Horiguchi 	/* queue pages from current vma */
712a7f40cfeSYang Shi 	if (flags & MPOL_MF_VALID)
7136f4576e3SNaoya Horiguchi 		return 0;
7146f4576e3SNaoya Horiguchi 	return 1;
7156f4576e3SNaoya Horiguchi }
716b24f53a0SLee Schermerhorn 
7177b86ac33SChristoph Hellwig static const struct mm_walk_ops queue_pages_walk_ops = {
7187b86ac33SChristoph Hellwig 	.hugetlb_entry		= queue_pages_hugetlb,
7197b86ac33SChristoph Hellwig 	.pmd_entry		= queue_pages_pte_range,
7207b86ac33SChristoph Hellwig 	.test_walk		= queue_pages_test_walk,
7217b86ac33SChristoph Hellwig };
7227b86ac33SChristoph Hellwig 
7236f4576e3SNaoya Horiguchi /*
7246f4576e3SNaoya Horiguchi  * Walk through page tables and collect pages to be migrated.
7256f4576e3SNaoya Horiguchi  *
7266f4576e3SNaoya Horiguchi  * If pages found in a given range are on a set of nodes (determined by
7276f4576e3SNaoya Horiguchi  * @nodes and @flags,) it's isolated and queued to the pagelist which is
728d8835445SYang Shi  * passed via @private.
729d8835445SYang Shi  *
730d8835445SYang Shi  * queue_pages_range() has three possible return values:
731d8835445SYang Shi  * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
732d8835445SYang Shi  *     specified.
733d8835445SYang Shi  * 0 - queue pages successfully or no misplaced page.
734a85dfc30SYang Shi  * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
735a85dfc30SYang Shi  *         memory range specified by nodemask and maxnode points outside
736a85dfc30SYang Shi  *         your accessible address space (-EFAULT)
7376f4576e3SNaoya Horiguchi  */
7386f4576e3SNaoya Horiguchi static int
7396f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
7406f4576e3SNaoya Horiguchi 		nodemask_t *nodes, unsigned long flags,
7416f4576e3SNaoya Horiguchi 		struct list_head *pagelist)
7426f4576e3SNaoya Horiguchi {
743f18da660SLi Xinhai 	int err;
7446f4576e3SNaoya Horiguchi 	struct queue_pages qp = {
7456f4576e3SNaoya Horiguchi 		.pagelist = pagelist,
7466f4576e3SNaoya Horiguchi 		.flags = flags,
7476f4576e3SNaoya Horiguchi 		.nmask = nodes,
748f18da660SLi Xinhai 		.start = start,
749f18da660SLi Xinhai 		.end = end,
750f18da660SLi Xinhai 		.first = NULL,
7516f4576e3SNaoya Horiguchi 	};
7526f4576e3SNaoya Horiguchi 
753f18da660SLi Xinhai 	err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
754f18da660SLi Xinhai 
755f18da660SLi Xinhai 	if (!qp.first)
756f18da660SLi Xinhai 		/* whole range in hole */
757f18da660SLi Xinhai 		err = -EFAULT;
758f18da660SLi Xinhai 
759f18da660SLi Xinhai 	return err;
7601da177e4SLinus Torvalds }
7611da177e4SLinus Torvalds 
762869833f2SKOSAKI Motohiro /*
763869833f2SKOSAKI Motohiro  * Apply policy to a single VMA
764c1e8d7c6SMichel Lespinasse  * This must be called with the mmap_lock held for writing.
765869833f2SKOSAKI Motohiro  */
766869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma,
767869833f2SKOSAKI Motohiro 						struct mempolicy *pol)
7688d34694cSKOSAKI Motohiro {
769869833f2SKOSAKI Motohiro 	int err;
770869833f2SKOSAKI Motohiro 	struct mempolicy *old;
771869833f2SKOSAKI Motohiro 	struct mempolicy *new;
7728d34694cSKOSAKI Motohiro 
7738d34694cSKOSAKI Motohiro 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
7748d34694cSKOSAKI Motohiro 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
7758d34694cSKOSAKI Motohiro 		 vma->vm_ops, vma->vm_file,
7768d34694cSKOSAKI Motohiro 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
7778d34694cSKOSAKI Motohiro 
778869833f2SKOSAKI Motohiro 	new = mpol_dup(pol);
779869833f2SKOSAKI Motohiro 	if (IS_ERR(new))
780869833f2SKOSAKI Motohiro 		return PTR_ERR(new);
781869833f2SKOSAKI Motohiro 
782869833f2SKOSAKI Motohiro 	if (vma->vm_ops && vma->vm_ops->set_policy) {
7838d34694cSKOSAKI Motohiro 		err = vma->vm_ops->set_policy(vma, new);
784869833f2SKOSAKI Motohiro 		if (err)
785869833f2SKOSAKI Motohiro 			goto err_out;
7868d34694cSKOSAKI Motohiro 	}
787869833f2SKOSAKI Motohiro 
788869833f2SKOSAKI Motohiro 	old = vma->vm_policy;
789c1e8d7c6SMichel Lespinasse 	vma->vm_policy = new; /* protected by mmap_lock */
790869833f2SKOSAKI Motohiro 	mpol_put(old);
791869833f2SKOSAKI Motohiro 
792869833f2SKOSAKI Motohiro 	return 0;
793869833f2SKOSAKI Motohiro  err_out:
794869833f2SKOSAKI Motohiro 	mpol_put(new);
7958d34694cSKOSAKI Motohiro 	return err;
7968d34694cSKOSAKI Motohiro }
7978d34694cSKOSAKI Motohiro 
7981da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */
7999d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start,
8009d8cebd4SKOSAKI Motohiro 		       unsigned long end, struct mempolicy *new_pol)
8011da177e4SLinus Torvalds {
8021da177e4SLinus Torvalds 	struct vm_area_struct *next;
8039d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *prev;
8049d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *vma;
8059d8cebd4SKOSAKI Motohiro 	int err = 0;
806e26a5114SKOSAKI Motohiro 	pgoff_t pgoff;
8079d8cebd4SKOSAKI Motohiro 	unsigned long vmstart;
8089d8cebd4SKOSAKI Motohiro 	unsigned long vmend;
8091da177e4SLinus Torvalds 
810097d5910SLinus Torvalds 	vma = find_vma(mm, start);
811f18da660SLi Xinhai 	VM_BUG_ON(!vma);
8129d8cebd4SKOSAKI Motohiro 
813097d5910SLinus Torvalds 	prev = vma->vm_prev;
814e26a5114SKOSAKI Motohiro 	if (start > vma->vm_start)
815e26a5114SKOSAKI Motohiro 		prev = vma;
816e26a5114SKOSAKI Motohiro 
8179d8cebd4SKOSAKI Motohiro 	for (; vma && vma->vm_start < end; prev = vma, vma = next) {
8181da177e4SLinus Torvalds 		next = vma->vm_next;
8199d8cebd4SKOSAKI Motohiro 		vmstart = max(start, vma->vm_start);
8209d8cebd4SKOSAKI Motohiro 		vmend   = min(end, vma->vm_end);
8219d8cebd4SKOSAKI Motohiro 
822e26a5114SKOSAKI Motohiro 		if (mpol_equal(vma_policy(vma), new_pol))
823e26a5114SKOSAKI Motohiro 			continue;
824e26a5114SKOSAKI Motohiro 
825e26a5114SKOSAKI Motohiro 		pgoff = vma->vm_pgoff +
826e26a5114SKOSAKI Motohiro 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
8279d8cebd4SKOSAKI Motohiro 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
828e26a5114SKOSAKI Motohiro 				 vma->anon_vma, vma->vm_file, pgoff,
82919a809afSAndrea Arcangeli 				 new_pol, vma->vm_userfaultfd_ctx);
8309d8cebd4SKOSAKI Motohiro 		if (prev) {
8319d8cebd4SKOSAKI Motohiro 			vma = prev;
8329d8cebd4SKOSAKI Motohiro 			next = vma->vm_next;
8333964acd0SOleg Nesterov 			if (mpol_equal(vma_policy(vma), new_pol))
8349d8cebd4SKOSAKI Motohiro 				continue;
8353964acd0SOleg Nesterov 			/* vma_merge() joined vma && vma->next, case 8 */
8363964acd0SOleg Nesterov 			goto replace;
8371da177e4SLinus Torvalds 		}
8389d8cebd4SKOSAKI Motohiro 		if (vma->vm_start != vmstart) {
8399d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
8409d8cebd4SKOSAKI Motohiro 			if (err)
8419d8cebd4SKOSAKI Motohiro 				goto out;
8429d8cebd4SKOSAKI Motohiro 		}
8439d8cebd4SKOSAKI Motohiro 		if (vma->vm_end != vmend) {
8449d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmend, 0);
8459d8cebd4SKOSAKI Motohiro 			if (err)
8469d8cebd4SKOSAKI Motohiro 				goto out;
8479d8cebd4SKOSAKI Motohiro 		}
8483964acd0SOleg Nesterov  replace:
849869833f2SKOSAKI Motohiro 		err = vma_replace_policy(vma, new_pol);
8509d8cebd4SKOSAKI Motohiro 		if (err)
8519d8cebd4SKOSAKI Motohiro 			goto out;
8529d8cebd4SKOSAKI Motohiro 	}
8539d8cebd4SKOSAKI Motohiro 
8549d8cebd4SKOSAKI Motohiro  out:
8551da177e4SLinus Torvalds 	return err;
8561da177e4SLinus Torvalds }
8571da177e4SLinus Torvalds 
8581da177e4SLinus Torvalds /* Set the process memory policy */
859028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags,
860028fec41SDavid Rientjes 			     nodemask_t *nodes)
8611da177e4SLinus Torvalds {
86258568d2aSMiao Xie 	struct mempolicy *new, *old;
8634bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH(scratch);
86458568d2aSMiao Xie 	int ret;
8651da177e4SLinus Torvalds 
8664bfc4495SKAMEZAWA Hiroyuki 	if (!scratch)
8674bfc4495SKAMEZAWA Hiroyuki 		return -ENOMEM;
868f4e53d91SLee Schermerhorn 
8694bfc4495SKAMEZAWA Hiroyuki 	new = mpol_new(mode, flags, nodes);
8704bfc4495SKAMEZAWA Hiroyuki 	if (IS_ERR(new)) {
8714bfc4495SKAMEZAWA Hiroyuki 		ret = PTR_ERR(new);
8724bfc4495SKAMEZAWA Hiroyuki 		goto out;
8734bfc4495SKAMEZAWA Hiroyuki 	}
8742c7c3a7dSOleg Nesterov 
875bda420b9SHuang Ying 	if (flags & MPOL_F_NUMA_BALANCING) {
876bda420b9SHuang Ying 		if (new && new->mode == MPOL_BIND) {
877bda420b9SHuang Ying 			new->flags |= (MPOL_F_MOF | MPOL_F_MORON);
878bda420b9SHuang Ying 		} else {
879bda420b9SHuang Ying 			ret = -EINVAL;
880bda420b9SHuang Ying 			mpol_put(new);
881bda420b9SHuang Ying 			goto out;
882bda420b9SHuang Ying 		}
883bda420b9SHuang Ying 	}
884bda420b9SHuang Ying 
8854bfc4495SKAMEZAWA Hiroyuki 	ret = mpol_set_nodemask(new, nodes, scratch);
88658568d2aSMiao Xie 	if (ret) {
88758568d2aSMiao Xie 		mpol_put(new);
8884bfc4495SKAMEZAWA Hiroyuki 		goto out;
88958568d2aSMiao Xie 	}
89078b132e9SWei Yang 	task_lock(current);
89158568d2aSMiao Xie 	old = current->mempolicy;
8921da177e4SLinus Torvalds 	current->mempolicy = new;
89345816682SVlastimil Babka 	if (new && new->mode == MPOL_INTERLEAVE)
89445816682SVlastimil Babka 		current->il_prev = MAX_NUMNODES-1;
89558568d2aSMiao Xie 	task_unlock(current);
89658568d2aSMiao Xie 	mpol_put(old);
8974bfc4495SKAMEZAWA Hiroyuki 	ret = 0;
8984bfc4495SKAMEZAWA Hiroyuki out:
8994bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH_FREE(scratch);
9004bfc4495SKAMEZAWA Hiroyuki 	return ret;
9011da177e4SLinus Torvalds }
9021da177e4SLinus Torvalds 
903bea904d5SLee Schermerhorn /*
904bea904d5SLee Schermerhorn  * Return nodemask for policy for get_mempolicy() query
90558568d2aSMiao Xie  *
90658568d2aSMiao Xie  * Called with task's alloc_lock held
907bea904d5SLee Schermerhorn  */
908bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
9091da177e4SLinus Torvalds {
910dfcd3c0dSAndi Kleen 	nodes_clear(*nodes);
911bea904d5SLee Schermerhorn 	if (p == &default_policy)
912bea904d5SLee Schermerhorn 		return;
913bea904d5SLee Schermerhorn 
91445c4745aSLee Schermerhorn 	switch (p->mode) {
91519770b32SMel Gorman 	case MPOL_BIND:
9161da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
917269fbe72SBen Widawsky 	case MPOL_PREFERRED:
918b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
919269fbe72SBen Widawsky 		*nodes = p->nodes;
9201da177e4SLinus Torvalds 		break;
9217858d7bcSFeng Tang 	case MPOL_LOCAL:
9227858d7bcSFeng Tang 		/* return empty node mask for local allocation */
9237858d7bcSFeng Tang 		break;
9241da177e4SLinus Torvalds 	default:
9251da177e4SLinus Torvalds 		BUG();
9261da177e4SLinus Torvalds 	}
9271da177e4SLinus Torvalds }
9281da177e4SLinus Torvalds 
9293b9aadf7SAndrea Arcangeli static int lookup_node(struct mm_struct *mm, unsigned long addr)
9301da177e4SLinus Torvalds {
931ba841078SPeter Xu 	struct page *p = NULL;
9321da177e4SLinus Torvalds 	int err;
9331da177e4SLinus Torvalds 
9343b9aadf7SAndrea Arcangeli 	int locked = 1;
9353b9aadf7SAndrea Arcangeli 	err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked);
9362d3a36a4SMichal Hocko 	if (err > 0) {
9371da177e4SLinus Torvalds 		err = page_to_nid(p);
9381da177e4SLinus Torvalds 		put_page(p);
9391da177e4SLinus Torvalds 	}
9403b9aadf7SAndrea Arcangeli 	if (locked)
941d8ed45c5SMichel Lespinasse 		mmap_read_unlock(mm);
9421da177e4SLinus Torvalds 	return err;
9431da177e4SLinus Torvalds }
9441da177e4SLinus Torvalds 
9451da177e4SLinus Torvalds /* Retrieve NUMA policy */
946dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask,
9471da177e4SLinus Torvalds 			     unsigned long addr, unsigned long flags)
9481da177e4SLinus Torvalds {
9498bccd85fSChristoph Lameter 	int err;
9501da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
9511da177e4SLinus Torvalds 	struct vm_area_struct *vma = NULL;
9523b9aadf7SAndrea Arcangeli 	struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
9531da177e4SLinus Torvalds 
954754af6f5SLee Schermerhorn 	if (flags &
955754af6f5SLee Schermerhorn 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
9561da177e4SLinus Torvalds 		return -EINVAL;
957754af6f5SLee Schermerhorn 
958754af6f5SLee Schermerhorn 	if (flags & MPOL_F_MEMS_ALLOWED) {
959754af6f5SLee Schermerhorn 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
960754af6f5SLee Schermerhorn 			return -EINVAL;
961754af6f5SLee Schermerhorn 		*policy = 0;	/* just so it's initialized */
96258568d2aSMiao Xie 		task_lock(current);
963754af6f5SLee Schermerhorn 		*nmask  = cpuset_current_mems_allowed;
96458568d2aSMiao Xie 		task_unlock(current);
965754af6f5SLee Schermerhorn 		return 0;
966754af6f5SLee Schermerhorn 	}
967754af6f5SLee Schermerhorn 
9681da177e4SLinus Torvalds 	if (flags & MPOL_F_ADDR) {
969bea904d5SLee Schermerhorn 		/*
970bea904d5SLee Schermerhorn 		 * Do NOT fall back to task policy if the
971bea904d5SLee Schermerhorn 		 * vma/shared policy at addr is NULL.  We
972bea904d5SLee Schermerhorn 		 * want to return MPOL_DEFAULT in this case.
973bea904d5SLee Schermerhorn 		 */
974d8ed45c5SMichel Lespinasse 		mmap_read_lock(mm);
97533e3575cSLiam Howlett 		vma = vma_lookup(mm, addr);
9761da177e4SLinus Torvalds 		if (!vma) {
977d8ed45c5SMichel Lespinasse 			mmap_read_unlock(mm);
9781da177e4SLinus Torvalds 			return -EFAULT;
9791da177e4SLinus Torvalds 		}
9801da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
9811da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
9821da177e4SLinus Torvalds 		else
9831da177e4SLinus Torvalds 			pol = vma->vm_policy;
9841da177e4SLinus Torvalds 	} else if (addr)
9851da177e4SLinus Torvalds 		return -EINVAL;
9861da177e4SLinus Torvalds 
9871da177e4SLinus Torvalds 	if (!pol)
988bea904d5SLee Schermerhorn 		pol = &default_policy;	/* indicates default behavior */
9891da177e4SLinus Torvalds 
9901da177e4SLinus Torvalds 	if (flags & MPOL_F_NODE) {
9911da177e4SLinus Torvalds 		if (flags & MPOL_F_ADDR) {
9923b9aadf7SAndrea Arcangeli 			/*
9933b9aadf7SAndrea Arcangeli 			 * Take a refcount on the mpol, lookup_node()
994baf2f90bSLu Jialin 			 * will drop the mmap_lock, so after calling
9953b9aadf7SAndrea Arcangeli 			 * lookup_node() only "pol" remains valid, "vma"
9963b9aadf7SAndrea Arcangeli 			 * is stale.
9973b9aadf7SAndrea Arcangeli 			 */
9983b9aadf7SAndrea Arcangeli 			pol_refcount = pol;
9993b9aadf7SAndrea Arcangeli 			vma = NULL;
10003b9aadf7SAndrea Arcangeli 			mpol_get(pol);
10013b9aadf7SAndrea Arcangeli 			err = lookup_node(mm, addr);
10021da177e4SLinus Torvalds 			if (err < 0)
10031da177e4SLinus Torvalds 				goto out;
10048bccd85fSChristoph Lameter 			*policy = err;
10051da177e4SLinus Torvalds 		} else if (pol == current->mempolicy &&
100645c4745aSLee Schermerhorn 				pol->mode == MPOL_INTERLEAVE) {
1007269fbe72SBen Widawsky 			*policy = next_node_in(current->il_prev, pol->nodes);
10081da177e4SLinus Torvalds 		} else {
10091da177e4SLinus Torvalds 			err = -EINVAL;
10101da177e4SLinus Torvalds 			goto out;
10111da177e4SLinus Torvalds 		}
1012bea904d5SLee Schermerhorn 	} else {
1013bea904d5SLee Schermerhorn 		*policy = pol == &default_policy ? MPOL_DEFAULT :
1014bea904d5SLee Schermerhorn 						pol->mode;
1015d79df630SDavid Rientjes 		/*
1016d79df630SDavid Rientjes 		 * Internal mempolicy flags must be masked off before exposing
1017d79df630SDavid Rientjes 		 * the policy to userspace.
1018d79df630SDavid Rientjes 		 */
1019d79df630SDavid Rientjes 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
1020bea904d5SLee Schermerhorn 	}
10211da177e4SLinus Torvalds 
10221da177e4SLinus Torvalds 	err = 0;
102358568d2aSMiao Xie 	if (nmask) {
1024c6b6ef8bSLee Schermerhorn 		if (mpol_store_user_nodemask(pol)) {
1025c6b6ef8bSLee Schermerhorn 			*nmask = pol->w.user_nodemask;
1026c6b6ef8bSLee Schermerhorn 		} else {
102758568d2aSMiao Xie 			task_lock(current);
1028bea904d5SLee Schermerhorn 			get_policy_nodemask(pol, nmask);
102958568d2aSMiao Xie 			task_unlock(current);
103058568d2aSMiao Xie 		}
1031c6b6ef8bSLee Schermerhorn 	}
10321da177e4SLinus Torvalds 
10331da177e4SLinus Torvalds  out:
103452cd3b07SLee Schermerhorn 	mpol_cond_put(pol);
10351da177e4SLinus Torvalds 	if (vma)
1036d8ed45c5SMichel Lespinasse 		mmap_read_unlock(mm);
10373b9aadf7SAndrea Arcangeli 	if (pol_refcount)
10383b9aadf7SAndrea Arcangeli 		mpol_put(pol_refcount);
10391da177e4SLinus Torvalds 	return err;
10401da177e4SLinus Torvalds }
10411da177e4SLinus Torvalds 
1042b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION
10438bccd85fSChristoph Lameter /*
1044c8633798SNaoya Horiguchi  * page migration, thp tail pages can be passed.
10456ce3c4c0SChristoph Lameter  */
1046a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist,
1047fc301289SChristoph Lameter 				unsigned long flags)
10486ce3c4c0SChristoph Lameter {
1049c8633798SNaoya Horiguchi 	struct page *head = compound_head(page);
10506ce3c4c0SChristoph Lameter 	/*
1051fc301289SChristoph Lameter 	 * Avoid migrating a page that is shared with others.
10526ce3c4c0SChristoph Lameter 	 */
1053c8633798SNaoya Horiguchi 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
1054c8633798SNaoya Horiguchi 		if (!isolate_lru_page(head)) {
1055c8633798SNaoya Horiguchi 			list_add_tail(&head->lru, pagelist);
1056c8633798SNaoya Horiguchi 			mod_node_page_state(page_pgdat(head),
10579de4f22aSHuang Ying 				NR_ISOLATED_ANON + page_is_file_lru(head),
10586c357848SMatthew Wilcox (Oracle) 				thp_nr_pages(head));
1059a53190a4SYang Shi 		} else if (flags & MPOL_MF_STRICT) {
1060a53190a4SYang Shi 			/*
1061a53190a4SYang Shi 			 * Non-movable page may reach here.  And, there may be
1062a53190a4SYang Shi 			 * temporary off LRU pages or non-LRU movable pages.
1063a53190a4SYang Shi 			 * Treat them as unmovable pages since they can't be
1064a53190a4SYang Shi 			 * isolated, so they can't be moved at the moment.  It
1065a53190a4SYang Shi 			 * should return -EIO for this case too.
1066a53190a4SYang Shi 			 */
1067a53190a4SYang Shi 			return -EIO;
106862695a84SNick Piggin 		}
106962695a84SNick Piggin 	}
1070a53190a4SYang Shi 
1071a53190a4SYang Shi 	return 0;
10726ce3c4c0SChristoph Lameter }
10736ce3c4c0SChristoph Lameter 
10746ce3c4c0SChristoph Lameter /*
10757e2ab150SChristoph Lameter  * Migrate pages from one node to a target node.
10767e2ab150SChristoph Lameter  * Returns error or the number of pages not migrated.
10777e2ab150SChristoph Lameter  */
1078dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1079dbcb0f19SAdrian Bunk 			   int flags)
10807e2ab150SChristoph Lameter {
10817e2ab150SChristoph Lameter 	nodemask_t nmask;
10827e2ab150SChristoph Lameter 	LIST_HEAD(pagelist);
10837e2ab150SChristoph Lameter 	int err = 0;
1084a0976311SJoonsoo Kim 	struct migration_target_control mtc = {
1085a0976311SJoonsoo Kim 		.nid = dest,
1086a0976311SJoonsoo Kim 		.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1087a0976311SJoonsoo Kim 	};
10887e2ab150SChristoph Lameter 
10897e2ab150SChristoph Lameter 	nodes_clear(nmask);
10907e2ab150SChristoph Lameter 	node_set(source, nmask);
10917e2ab150SChristoph Lameter 
109208270807SMinchan Kim 	/*
109308270807SMinchan Kim 	 * This does not "check" the range but isolates all pages that
109408270807SMinchan Kim 	 * need migration.  Between passing in the full user address
109508270807SMinchan Kim 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
109608270807SMinchan Kim 	 */
109708270807SMinchan Kim 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
109898094945SNaoya Horiguchi 	queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
10997e2ab150SChristoph Lameter 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
11007e2ab150SChristoph Lameter 
1101cf608ac1SMinchan Kim 	if (!list_empty(&pagelist)) {
1102a0976311SJoonsoo Kim 		err = migrate_pages(&pagelist, alloc_migration_target, NULL,
11035ac95884SYang Shi 				(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
1104cf608ac1SMinchan Kim 		if (err)
1105e2d8cf40SNaoya Horiguchi 			putback_movable_pages(&pagelist);
1106cf608ac1SMinchan Kim 	}
110795a402c3SChristoph Lameter 
11087e2ab150SChristoph Lameter 	return err;
11097e2ab150SChristoph Lameter }
11107e2ab150SChristoph Lameter 
11117e2ab150SChristoph Lameter /*
11127e2ab150SChristoph Lameter  * Move pages between the two nodesets so as to preserve the physical
11137e2ab150SChristoph Lameter  * layout as much as possible.
111439743889SChristoph Lameter  *
111539743889SChristoph Lameter  * Returns the number of page that could not be moved.
111639743889SChristoph Lameter  */
11170ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
11180ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
111939743889SChristoph Lameter {
11207e2ab150SChristoph Lameter 	int busy = 0;
1121f555befdSJan Stancek 	int err = 0;
11227e2ab150SChristoph Lameter 	nodemask_t tmp;
112339743889SChristoph Lameter 
1124361a2a22SMinchan Kim 	lru_cache_disable();
11250aedadf9SChristoph Lameter 
1126d8ed45c5SMichel Lespinasse 	mmap_read_lock(mm);
1127d4984711SChristoph Lameter 
11287e2ab150SChristoph Lameter 	/*
11297e2ab150SChristoph Lameter 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
11307e2ab150SChristoph Lameter 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
11317e2ab150SChristoph Lameter 	 * bit in 'tmp', and return that <source, dest> pair for migration.
11327e2ab150SChristoph Lameter 	 * The pair of nodemasks 'to' and 'from' define the map.
11337e2ab150SChristoph Lameter 	 *
11347e2ab150SChristoph Lameter 	 * If no pair of bits is found that way, fallback to picking some
11357e2ab150SChristoph Lameter 	 * pair of 'source' and 'dest' bits that are not the same.  If the
11367e2ab150SChristoph Lameter 	 * 'source' and 'dest' bits are the same, this represents a node
11377e2ab150SChristoph Lameter 	 * that will be migrating to itself, so no pages need move.
11387e2ab150SChristoph Lameter 	 *
11397e2ab150SChristoph Lameter 	 * If no bits are left in 'tmp', or if all remaining bits left
11407e2ab150SChristoph Lameter 	 * in 'tmp' correspond to the same bit in 'to', return false
11417e2ab150SChristoph Lameter 	 * (nothing left to migrate).
11427e2ab150SChristoph Lameter 	 *
11437e2ab150SChristoph Lameter 	 * This lets us pick a pair of nodes to migrate between, such that
11447e2ab150SChristoph Lameter 	 * if possible the dest node is not already occupied by some other
11457e2ab150SChristoph Lameter 	 * source node, minimizing the risk of overloading the memory on a
11467e2ab150SChristoph Lameter 	 * node that would happen if we migrated incoming memory to a node
11477e2ab150SChristoph Lameter 	 * before migrating outgoing memory source that same node.
11487e2ab150SChristoph Lameter 	 *
11497e2ab150SChristoph Lameter 	 * A single scan of tmp is sufficient.  As we go, we remember the
11507e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
11517e2ab150SChristoph Lameter 	 * that not only moved, but what's better, moved to an empty slot
11527e2ab150SChristoph Lameter 	 * (d is not set in tmp), then we break out then, with that pair.
1153ae0e47f0SJustin P. Mattock 	 * Otherwise when we finish scanning from_tmp, we at least have the
11547e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved.  If we get all the way through
11557e2ab150SChristoph Lameter 	 * the scan of tmp without finding any node that moved, much less
11567e2ab150SChristoph Lameter 	 * moved to an empty node, then there is nothing left worth migrating.
11577e2ab150SChristoph Lameter 	 */
11587e2ab150SChristoph Lameter 
11590ce72d4fSAndrew Morton 	tmp = *from;
11607e2ab150SChristoph Lameter 	while (!nodes_empty(tmp)) {
11617e2ab150SChristoph Lameter 		int s, d;
1162b76ac7e7SJianguo Wu 		int source = NUMA_NO_NODE;
11637e2ab150SChristoph Lameter 		int dest = 0;
11647e2ab150SChristoph Lameter 
11657e2ab150SChristoph Lameter 		for_each_node_mask(s, tmp) {
11664a5b18ccSLarry Woodman 
11674a5b18ccSLarry Woodman 			/*
11684a5b18ccSLarry Woodman 			 * do_migrate_pages() tries to maintain the relative
11694a5b18ccSLarry Woodman 			 * node relationship of the pages established between
11704a5b18ccSLarry Woodman 			 * threads and memory areas.
11714a5b18ccSLarry Woodman                          *
11724a5b18ccSLarry Woodman 			 * However if the number of source nodes is not equal to
11734a5b18ccSLarry Woodman 			 * the number of destination nodes we can not preserve
11744a5b18ccSLarry Woodman 			 * this node relative relationship.  In that case, skip
11754a5b18ccSLarry Woodman 			 * copying memory from a node that is in the destination
11764a5b18ccSLarry Woodman 			 * mask.
11774a5b18ccSLarry Woodman 			 *
11784a5b18ccSLarry Woodman 			 * Example: [2,3,4] -> [3,4,5] moves everything.
11794a5b18ccSLarry Woodman 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
11804a5b18ccSLarry Woodman 			 */
11814a5b18ccSLarry Woodman 
11820ce72d4fSAndrew Morton 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
11830ce72d4fSAndrew Morton 						(node_isset(s, *to)))
11844a5b18ccSLarry Woodman 				continue;
11854a5b18ccSLarry Woodman 
11860ce72d4fSAndrew Morton 			d = node_remap(s, *from, *to);
11877e2ab150SChristoph Lameter 			if (s == d)
11887e2ab150SChristoph Lameter 				continue;
11897e2ab150SChristoph Lameter 
11907e2ab150SChristoph Lameter 			source = s;	/* Node moved. Memorize */
11917e2ab150SChristoph Lameter 			dest = d;
11927e2ab150SChristoph Lameter 
11937e2ab150SChristoph Lameter 			/* dest not in remaining from nodes? */
11947e2ab150SChristoph Lameter 			if (!node_isset(dest, tmp))
11957e2ab150SChristoph Lameter 				break;
11967e2ab150SChristoph Lameter 		}
1197b76ac7e7SJianguo Wu 		if (source == NUMA_NO_NODE)
11987e2ab150SChristoph Lameter 			break;
11997e2ab150SChristoph Lameter 
12007e2ab150SChristoph Lameter 		node_clear(source, tmp);
12017e2ab150SChristoph Lameter 		err = migrate_to_node(mm, source, dest, flags);
12027e2ab150SChristoph Lameter 		if (err > 0)
12037e2ab150SChristoph Lameter 			busy += err;
12047e2ab150SChristoph Lameter 		if (err < 0)
12057e2ab150SChristoph Lameter 			break;
120639743889SChristoph Lameter 	}
1207d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
1208d479960eSMinchan Kim 
1209361a2a22SMinchan Kim 	lru_cache_enable();
12107e2ab150SChristoph Lameter 	if (err < 0)
12117e2ab150SChristoph Lameter 		return err;
12127e2ab150SChristoph Lameter 	return busy;
1213b20a3503SChristoph Lameter 
121439743889SChristoph Lameter }
121539743889SChristoph Lameter 
12163ad33b24SLee Schermerhorn /*
12173ad33b24SLee Schermerhorn  * Allocate a new page for page migration based on vma policy.
1218d05f0cdcSHugh Dickins  * Start by assuming the page is mapped by the same vma as contains @start.
12193ad33b24SLee Schermerhorn  * Search forward from there, if not.  N.B., this assumes that the
12203ad33b24SLee Schermerhorn  * list of pages handed to migrate_pages()--which is how we get here--
12213ad33b24SLee Schermerhorn  * is in virtual address order.
12223ad33b24SLee Schermerhorn  */
1223666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start)
122495a402c3SChristoph Lameter {
1225d05f0cdcSHugh Dickins 	struct vm_area_struct *vma;
12263f649ab7SKees Cook 	unsigned long address;
122795a402c3SChristoph Lameter 
1228d05f0cdcSHugh Dickins 	vma = find_vma(current->mm, start);
12293ad33b24SLee Schermerhorn 	while (vma) {
12303ad33b24SLee Schermerhorn 		address = page_address_in_vma(page, vma);
12313ad33b24SLee Schermerhorn 		if (address != -EFAULT)
12323ad33b24SLee Schermerhorn 			break;
12333ad33b24SLee Schermerhorn 		vma = vma->vm_next;
12343ad33b24SLee Schermerhorn 	}
12353ad33b24SLee Schermerhorn 
123611c731e8SWanpeng Li 	if (PageHuge(page)) {
1237389c8178SMichal Hocko 		return alloc_huge_page_vma(page_hstate(compound_head(page)),
1238389c8178SMichal Hocko 				vma, address);
123994723aafSMichal Hocko 	} else if (PageTransHuge(page)) {
1240c8633798SNaoya Horiguchi 		struct page *thp;
1241c8633798SNaoya Horiguchi 
124219deb769SDavid Rientjes 		thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
124319deb769SDavid Rientjes 					 HPAGE_PMD_ORDER);
1244c8633798SNaoya Horiguchi 		if (!thp)
1245c8633798SNaoya Horiguchi 			return NULL;
1246c8633798SNaoya Horiguchi 		prep_transhuge_page(thp);
1247c8633798SNaoya Horiguchi 		return thp;
124811c731e8SWanpeng Li 	}
124911c731e8SWanpeng Li 	/*
125011c731e8SWanpeng Li 	 * if !vma, alloc_page_vma() will use task or system default policy
125111c731e8SWanpeng Li 	 */
12520f556856SMichal Hocko 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
12530f556856SMichal Hocko 			vma, address);
125495a402c3SChristoph Lameter }
1255b20a3503SChristoph Lameter #else
1256b20a3503SChristoph Lameter 
1257a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist,
1258b20a3503SChristoph Lameter 				unsigned long flags)
1259b20a3503SChristoph Lameter {
1260a53190a4SYang Shi 	return -EIO;
1261b20a3503SChristoph Lameter }
1262b20a3503SChristoph Lameter 
12630ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
12640ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
1265b20a3503SChristoph Lameter {
1266b20a3503SChristoph Lameter 	return -ENOSYS;
1267b20a3503SChristoph Lameter }
126895a402c3SChristoph Lameter 
1269666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start)
127095a402c3SChristoph Lameter {
127195a402c3SChristoph Lameter 	return NULL;
127295a402c3SChristoph Lameter }
1273b20a3503SChristoph Lameter #endif
1274b20a3503SChristoph Lameter 
1275dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len,
1276028fec41SDavid Rientjes 		     unsigned short mode, unsigned short mode_flags,
1277028fec41SDavid Rientjes 		     nodemask_t *nmask, unsigned long flags)
12786ce3c4c0SChristoph Lameter {
12796ce3c4c0SChristoph Lameter 	struct mm_struct *mm = current->mm;
12806ce3c4c0SChristoph Lameter 	struct mempolicy *new;
12816ce3c4c0SChristoph Lameter 	unsigned long end;
12826ce3c4c0SChristoph Lameter 	int err;
1283d8835445SYang Shi 	int ret;
12846ce3c4c0SChristoph Lameter 	LIST_HEAD(pagelist);
12856ce3c4c0SChristoph Lameter 
1286b24f53a0SLee Schermerhorn 	if (flags & ~(unsigned long)MPOL_MF_VALID)
12876ce3c4c0SChristoph Lameter 		return -EINVAL;
128874c00241SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
12896ce3c4c0SChristoph Lameter 		return -EPERM;
12906ce3c4c0SChristoph Lameter 
12916ce3c4c0SChristoph Lameter 	if (start & ~PAGE_MASK)
12926ce3c4c0SChristoph Lameter 		return -EINVAL;
12936ce3c4c0SChristoph Lameter 
12946ce3c4c0SChristoph Lameter 	if (mode == MPOL_DEFAULT)
12956ce3c4c0SChristoph Lameter 		flags &= ~MPOL_MF_STRICT;
12966ce3c4c0SChristoph Lameter 
12976ce3c4c0SChristoph Lameter 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
12986ce3c4c0SChristoph Lameter 	end = start + len;
12996ce3c4c0SChristoph Lameter 
13006ce3c4c0SChristoph Lameter 	if (end < start)
13016ce3c4c0SChristoph Lameter 		return -EINVAL;
13026ce3c4c0SChristoph Lameter 	if (end == start)
13036ce3c4c0SChristoph Lameter 		return 0;
13046ce3c4c0SChristoph Lameter 
1305028fec41SDavid Rientjes 	new = mpol_new(mode, mode_flags, nmask);
13066ce3c4c0SChristoph Lameter 	if (IS_ERR(new))
13076ce3c4c0SChristoph Lameter 		return PTR_ERR(new);
13086ce3c4c0SChristoph Lameter 
1309b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY)
1310b24f53a0SLee Schermerhorn 		new->flags |= MPOL_F_MOF;
1311b24f53a0SLee Schermerhorn 
13126ce3c4c0SChristoph Lameter 	/*
13136ce3c4c0SChristoph Lameter 	 * If we are using the default policy then operation
13146ce3c4c0SChristoph Lameter 	 * on discontinuous address spaces is okay after all
13156ce3c4c0SChristoph Lameter 	 */
13166ce3c4c0SChristoph Lameter 	if (!new)
13176ce3c4c0SChristoph Lameter 		flags |= MPOL_MF_DISCONTIG_OK;
13186ce3c4c0SChristoph Lameter 
1319028fec41SDavid Rientjes 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1320028fec41SDavid Rientjes 		 start, start + len, mode, mode_flags,
132100ef2d2fSDavid Rientjes 		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
13226ce3c4c0SChristoph Lameter 
13230aedadf9SChristoph Lameter 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
13240aedadf9SChristoph Lameter 
1325361a2a22SMinchan Kim 		lru_cache_disable();
13260aedadf9SChristoph Lameter 	}
13274bfc4495SKAMEZAWA Hiroyuki 	{
13284bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
13294bfc4495SKAMEZAWA Hiroyuki 		if (scratch) {
1330d8ed45c5SMichel Lespinasse 			mmap_write_lock(mm);
13314bfc4495SKAMEZAWA Hiroyuki 			err = mpol_set_nodemask(new, nmask, scratch);
13324bfc4495SKAMEZAWA Hiroyuki 			if (err)
1333d8ed45c5SMichel Lespinasse 				mmap_write_unlock(mm);
13344bfc4495SKAMEZAWA Hiroyuki 		} else
13354bfc4495SKAMEZAWA Hiroyuki 			err = -ENOMEM;
13364bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
13374bfc4495SKAMEZAWA Hiroyuki 	}
1338b05ca738SKOSAKI Motohiro 	if (err)
1339b05ca738SKOSAKI Motohiro 		goto mpol_out;
1340b05ca738SKOSAKI Motohiro 
1341d8835445SYang Shi 	ret = queue_pages_range(mm, start, end, nmask,
13426ce3c4c0SChristoph Lameter 			  flags | MPOL_MF_INVERT, &pagelist);
1343d8835445SYang Shi 
1344d8835445SYang Shi 	if (ret < 0) {
1345a85dfc30SYang Shi 		err = ret;
1346d8835445SYang Shi 		goto up_out;
1347d8835445SYang Shi 	}
1348d8835445SYang Shi 
13499d8cebd4SKOSAKI Motohiro 	err = mbind_range(mm, start, end, new);
13507e2ab150SChristoph Lameter 
1351b24f53a0SLee Schermerhorn 	if (!err) {
1352b24f53a0SLee Schermerhorn 		int nr_failed = 0;
1353b24f53a0SLee Schermerhorn 
1354cf608ac1SMinchan Kim 		if (!list_empty(&pagelist)) {
1355b24f53a0SLee Schermerhorn 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1356d05f0cdcSHugh Dickins 			nr_failed = migrate_pages(&pagelist, new_page, NULL,
13575ac95884SYang Shi 				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL);
1358cf608ac1SMinchan Kim 			if (nr_failed)
135974060e4dSNaoya Horiguchi 				putback_movable_pages(&pagelist);
1360cf608ac1SMinchan Kim 		}
13616ce3c4c0SChristoph Lameter 
1362d8835445SYang Shi 		if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
13636ce3c4c0SChristoph Lameter 			err = -EIO;
1364a85dfc30SYang Shi 	} else {
1365d8835445SYang Shi up_out:
1366a85dfc30SYang Shi 		if (!list_empty(&pagelist))
1367a85dfc30SYang Shi 			putback_movable_pages(&pagelist);
1368a85dfc30SYang Shi 	}
1369a85dfc30SYang Shi 
1370d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
1371b05ca738SKOSAKI Motohiro mpol_out:
1372f0be3d32SLee Schermerhorn 	mpol_put(new);
1373d479960eSMinchan Kim 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1374361a2a22SMinchan Kim 		lru_cache_enable();
13756ce3c4c0SChristoph Lameter 	return err;
13766ce3c4c0SChristoph Lameter }
13776ce3c4c0SChristoph Lameter 
137839743889SChristoph Lameter /*
13798bccd85fSChristoph Lameter  * User space interface with variable sized bitmaps for nodelists.
13808bccd85fSChristoph Lameter  */
13818bccd85fSChristoph Lameter 
13828bccd85fSChristoph Lameter /* Copy a node mask from user space. */
138339743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
13848bccd85fSChristoph Lameter 		     unsigned long maxnode)
13858bccd85fSChristoph Lameter {
13868bccd85fSChristoph Lameter 	unsigned long k;
138756521e7aSYisheng Xie 	unsigned long t;
13888bccd85fSChristoph Lameter 	unsigned long nlongs;
13898bccd85fSChristoph Lameter 	unsigned long endmask;
13908bccd85fSChristoph Lameter 
13918bccd85fSChristoph Lameter 	--maxnode;
13928bccd85fSChristoph Lameter 	nodes_clear(*nodes);
13938bccd85fSChristoph Lameter 	if (maxnode == 0 || !nmask)
13948bccd85fSChristoph Lameter 		return 0;
1395a9c930baSAndi Kleen 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1396636f13c1SChris Wright 		return -EINVAL;
13978bccd85fSChristoph Lameter 
13988bccd85fSChristoph Lameter 	nlongs = BITS_TO_LONGS(maxnode);
13998bccd85fSChristoph Lameter 	if ((maxnode % BITS_PER_LONG) == 0)
14008bccd85fSChristoph Lameter 		endmask = ~0UL;
14018bccd85fSChristoph Lameter 	else
14028bccd85fSChristoph Lameter 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
14038bccd85fSChristoph Lameter 
140456521e7aSYisheng Xie 	/*
140556521e7aSYisheng Xie 	 * When the user specified more nodes than supported just check
140656521e7aSYisheng Xie 	 * if the non supported part is all zero.
140756521e7aSYisheng Xie 	 *
140856521e7aSYisheng Xie 	 * If maxnode have more longs than MAX_NUMNODES, check
140956521e7aSYisheng Xie 	 * the bits in that area first. And then go through to
141056521e7aSYisheng Xie 	 * check the rest bits which equal or bigger than MAX_NUMNODES.
141156521e7aSYisheng Xie 	 * Otherwise, just check bits [MAX_NUMNODES, maxnode).
141256521e7aSYisheng Xie 	 */
14138bccd85fSChristoph Lameter 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
14148bccd85fSChristoph Lameter 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
14158bccd85fSChristoph Lameter 			if (get_user(t, nmask + k))
14168bccd85fSChristoph Lameter 				return -EFAULT;
14178bccd85fSChristoph Lameter 			if (k == nlongs - 1) {
14188bccd85fSChristoph Lameter 				if (t & endmask)
14198bccd85fSChristoph Lameter 					return -EINVAL;
14208bccd85fSChristoph Lameter 			} else if (t)
14218bccd85fSChristoph Lameter 				return -EINVAL;
14228bccd85fSChristoph Lameter 		}
14238bccd85fSChristoph Lameter 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
14248bccd85fSChristoph Lameter 		endmask = ~0UL;
14258bccd85fSChristoph Lameter 	}
14268bccd85fSChristoph Lameter 
142756521e7aSYisheng Xie 	if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
142856521e7aSYisheng Xie 		unsigned long valid_mask = endmask;
142956521e7aSYisheng Xie 
143056521e7aSYisheng Xie 		valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
143156521e7aSYisheng Xie 		if (get_user(t, nmask + nlongs - 1))
143256521e7aSYisheng Xie 			return -EFAULT;
143356521e7aSYisheng Xie 		if (t & valid_mask)
143456521e7aSYisheng Xie 			return -EINVAL;
143556521e7aSYisheng Xie 	}
143656521e7aSYisheng Xie 
14378bccd85fSChristoph Lameter 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
14388bccd85fSChristoph Lameter 		return -EFAULT;
14398bccd85fSChristoph Lameter 	nodes_addr(*nodes)[nlongs-1] &= endmask;
14408bccd85fSChristoph Lameter 	return 0;
14418bccd85fSChristoph Lameter }
14428bccd85fSChristoph Lameter 
14438bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */
14448bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
14458bccd85fSChristoph Lameter 			      nodemask_t *nodes)
14468bccd85fSChristoph Lameter {
14478bccd85fSChristoph Lameter 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1448050c17f2SRalph Campbell 	unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
14498bccd85fSChristoph Lameter 
14508bccd85fSChristoph Lameter 	if (copy > nbytes) {
14518bccd85fSChristoph Lameter 		if (copy > PAGE_SIZE)
14528bccd85fSChristoph Lameter 			return -EINVAL;
14538bccd85fSChristoph Lameter 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
14548bccd85fSChristoph Lameter 			return -EFAULT;
14558bccd85fSChristoph Lameter 		copy = nbytes;
14568bccd85fSChristoph Lameter 	}
14578bccd85fSChristoph Lameter 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
14588bccd85fSChristoph Lameter }
14598bccd85fSChristoph Lameter 
146095837924SFeng Tang /* Basic parameter sanity check used by both mbind() and set_mempolicy() */
146195837924SFeng Tang static inline int sanitize_mpol_flags(int *mode, unsigned short *flags)
146295837924SFeng Tang {
146395837924SFeng Tang 	*flags = *mode & MPOL_MODE_FLAGS;
146495837924SFeng Tang 	*mode &= ~MPOL_MODE_FLAGS;
1465b27abaccSDave Hansen 
1466*a38a59fdSBen Widawsky 	if ((unsigned int)(*mode) >=  MPOL_MAX)
146795837924SFeng Tang 		return -EINVAL;
146895837924SFeng Tang 	if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
146995837924SFeng Tang 		return -EINVAL;
147095837924SFeng Tang 
147195837924SFeng Tang 	return 0;
147295837924SFeng Tang }
147395837924SFeng Tang 
1474e7dc9ad6SDominik Brodowski static long kernel_mbind(unsigned long start, unsigned long len,
1475e7dc9ad6SDominik Brodowski 			 unsigned long mode, const unsigned long __user *nmask,
1476e7dc9ad6SDominik Brodowski 			 unsigned long maxnode, unsigned int flags)
14778bccd85fSChristoph Lameter {
1478028fec41SDavid Rientjes 	unsigned short mode_flags;
147995837924SFeng Tang 	nodemask_t nodes;
148095837924SFeng Tang 	int lmode = mode;
148195837924SFeng Tang 	int err;
14828bccd85fSChristoph Lameter 
1483057d3389SAndrey Konovalov 	start = untagged_addr(start);
148495837924SFeng Tang 	err = sanitize_mpol_flags(&lmode, &mode_flags);
148595837924SFeng Tang 	if (err)
148695837924SFeng Tang 		return err;
148795837924SFeng Tang 
14888bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
14898bccd85fSChristoph Lameter 	if (err)
14908bccd85fSChristoph Lameter 		return err;
149195837924SFeng Tang 
149295837924SFeng Tang 	return do_mbind(start, len, lmode, mode_flags, &nodes, flags);
14938bccd85fSChristoph Lameter }
14948bccd85fSChristoph Lameter 
1495e7dc9ad6SDominik Brodowski SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1496e7dc9ad6SDominik Brodowski 		unsigned long, mode, const unsigned long __user *, nmask,
1497e7dc9ad6SDominik Brodowski 		unsigned long, maxnode, unsigned int, flags)
1498e7dc9ad6SDominik Brodowski {
1499e7dc9ad6SDominik Brodowski 	return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1500e7dc9ad6SDominik Brodowski }
1501e7dc9ad6SDominik Brodowski 
15028bccd85fSChristoph Lameter /* Set the process memory policy */
1503af03c4acSDominik Brodowski static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1504af03c4acSDominik Brodowski 				 unsigned long maxnode)
15058bccd85fSChristoph Lameter {
150695837924SFeng Tang 	unsigned short mode_flags;
15078bccd85fSChristoph Lameter 	nodemask_t nodes;
150895837924SFeng Tang 	int lmode = mode;
150995837924SFeng Tang 	int err;
15108bccd85fSChristoph Lameter 
151195837924SFeng Tang 	err = sanitize_mpol_flags(&lmode, &mode_flags);
151295837924SFeng Tang 	if (err)
151395837924SFeng Tang 		return err;
151495837924SFeng Tang 
15158bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
15168bccd85fSChristoph Lameter 	if (err)
15178bccd85fSChristoph Lameter 		return err;
151895837924SFeng Tang 
151995837924SFeng Tang 	return do_set_mempolicy(lmode, mode_flags, &nodes);
15208bccd85fSChristoph Lameter }
15218bccd85fSChristoph Lameter 
1522af03c4acSDominik Brodowski SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1523af03c4acSDominik Brodowski 		unsigned long, maxnode)
1524af03c4acSDominik Brodowski {
1525af03c4acSDominik Brodowski 	return kernel_set_mempolicy(mode, nmask, maxnode);
1526af03c4acSDominik Brodowski }
1527af03c4acSDominik Brodowski 
1528b6e9b0baSDominik Brodowski static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1529b6e9b0baSDominik Brodowski 				const unsigned long __user *old_nodes,
1530b6e9b0baSDominik Brodowski 				const unsigned long __user *new_nodes)
153139743889SChristoph Lameter {
1532596d7cfaSKOSAKI Motohiro 	struct mm_struct *mm = NULL;
153339743889SChristoph Lameter 	struct task_struct *task;
153439743889SChristoph Lameter 	nodemask_t task_nodes;
153539743889SChristoph Lameter 	int err;
1536596d7cfaSKOSAKI Motohiro 	nodemask_t *old;
1537596d7cfaSKOSAKI Motohiro 	nodemask_t *new;
1538596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH(scratch);
153939743889SChristoph Lameter 
1540596d7cfaSKOSAKI Motohiro 	if (!scratch)
1541596d7cfaSKOSAKI Motohiro 		return -ENOMEM;
154239743889SChristoph Lameter 
1543596d7cfaSKOSAKI Motohiro 	old = &scratch->mask1;
1544596d7cfaSKOSAKI Motohiro 	new = &scratch->mask2;
1545596d7cfaSKOSAKI Motohiro 
1546596d7cfaSKOSAKI Motohiro 	err = get_nodes(old, old_nodes, maxnode);
154739743889SChristoph Lameter 	if (err)
1548596d7cfaSKOSAKI Motohiro 		goto out;
1549596d7cfaSKOSAKI Motohiro 
1550596d7cfaSKOSAKI Motohiro 	err = get_nodes(new, new_nodes, maxnode);
1551596d7cfaSKOSAKI Motohiro 	if (err)
1552596d7cfaSKOSAKI Motohiro 		goto out;
155339743889SChristoph Lameter 
155439743889SChristoph Lameter 	/* Find the mm_struct */
155555cfaa3cSZeng Zhaoming 	rcu_read_lock();
1556228ebcbeSPavel Emelyanov 	task = pid ? find_task_by_vpid(pid) : current;
155739743889SChristoph Lameter 	if (!task) {
155855cfaa3cSZeng Zhaoming 		rcu_read_unlock();
1559596d7cfaSKOSAKI Motohiro 		err = -ESRCH;
1560596d7cfaSKOSAKI Motohiro 		goto out;
156139743889SChristoph Lameter 	}
15623268c63eSChristoph Lameter 	get_task_struct(task);
156339743889SChristoph Lameter 
1564596d7cfaSKOSAKI Motohiro 	err = -EINVAL;
156539743889SChristoph Lameter 
156639743889SChristoph Lameter 	/*
156731367466SOtto Ebeling 	 * Check if this process has the right to modify the specified process.
156831367466SOtto Ebeling 	 * Use the regular "ptrace_may_access()" checks.
156939743889SChristoph Lameter 	 */
157031367466SOtto Ebeling 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1571c69e8d9cSDavid Howells 		rcu_read_unlock();
157239743889SChristoph Lameter 		err = -EPERM;
15733268c63eSChristoph Lameter 		goto out_put;
157439743889SChristoph Lameter 	}
1575c69e8d9cSDavid Howells 	rcu_read_unlock();
157639743889SChristoph Lameter 
157739743889SChristoph Lameter 	task_nodes = cpuset_mems_allowed(task);
157839743889SChristoph Lameter 	/* Is the user allowed to access the target nodes? */
1579596d7cfaSKOSAKI Motohiro 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
158039743889SChristoph Lameter 		err = -EPERM;
15813268c63eSChristoph Lameter 		goto out_put;
158239743889SChristoph Lameter 	}
158339743889SChristoph Lameter 
15840486a38bSYisheng Xie 	task_nodes = cpuset_mems_allowed(current);
15850486a38bSYisheng Xie 	nodes_and(*new, *new, task_nodes);
15860486a38bSYisheng Xie 	if (nodes_empty(*new))
15873268c63eSChristoph Lameter 		goto out_put;
15880486a38bSYisheng Xie 
158986c3a764SDavid Quigley 	err = security_task_movememory(task);
159086c3a764SDavid Quigley 	if (err)
15913268c63eSChristoph Lameter 		goto out_put;
159286c3a764SDavid Quigley 
15933268c63eSChristoph Lameter 	mm = get_task_mm(task);
15943268c63eSChristoph Lameter 	put_task_struct(task);
1595f2a9ef88SSasha Levin 
1596f2a9ef88SSasha Levin 	if (!mm) {
1597f2a9ef88SSasha Levin 		err = -EINVAL;
1598f2a9ef88SSasha Levin 		goto out;
1599f2a9ef88SSasha Levin 	}
1600f2a9ef88SSasha Levin 
1601596d7cfaSKOSAKI Motohiro 	err = do_migrate_pages(mm, old, new,
160274c00241SChristoph Lameter 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
16033268c63eSChristoph Lameter 
160439743889SChristoph Lameter 	mmput(mm);
16053268c63eSChristoph Lameter out:
1606596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH_FREE(scratch);
1607596d7cfaSKOSAKI Motohiro 
160839743889SChristoph Lameter 	return err;
16093268c63eSChristoph Lameter 
16103268c63eSChristoph Lameter out_put:
16113268c63eSChristoph Lameter 	put_task_struct(task);
16123268c63eSChristoph Lameter 	goto out;
16133268c63eSChristoph Lameter 
161439743889SChristoph Lameter }
161539743889SChristoph Lameter 
1616b6e9b0baSDominik Brodowski SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1617b6e9b0baSDominik Brodowski 		const unsigned long __user *, old_nodes,
1618b6e9b0baSDominik Brodowski 		const unsigned long __user *, new_nodes)
1619b6e9b0baSDominik Brodowski {
1620b6e9b0baSDominik Brodowski 	return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1621b6e9b0baSDominik Brodowski }
1622b6e9b0baSDominik Brodowski 
162339743889SChristoph Lameter 
16248bccd85fSChristoph Lameter /* Retrieve NUMA policy */
1625af03c4acSDominik Brodowski static int kernel_get_mempolicy(int __user *policy,
1626af03c4acSDominik Brodowski 				unsigned long __user *nmask,
1627af03c4acSDominik Brodowski 				unsigned long maxnode,
1628af03c4acSDominik Brodowski 				unsigned long addr,
1629af03c4acSDominik Brodowski 				unsigned long flags)
16308bccd85fSChristoph Lameter {
1631dbcb0f19SAdrian Bunk 	int err;
16323f649ab7SKees Cook 	int pval;
16338bccd85fSChristoph Lameter 	nodemask_t nodes;
16348bccd85fSChristoph Lameter 
1635050c17f2SRalph Campbell 	if (nmask != NULL && maxnode < nr_node_ids)
16368bccd85fSChristoph Lameter 		return -EINVAL;
16378bccd85fSChristoph Lameter 
16384605f057SWenchao Hao 	addr = untagged_addr(addr);
16394605f057SWenchao Hao 
16408bccd85fSChristoph Lameter 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
16418bccd85fSChristoph Lameter 
16428bccd85fSChristoph Lameter 	if (err)
16438bccd85fSChristoph Lameter 		return err;
16448bccd85fSChristoph Lameter 
16458bccd85fSChristoph Lameter 	if (policy && put_user(pval, policy))
16468bccd85fSChristoph Lameter 		return -EFAULT;
16478bccd85fSChristoph Lameter 
16488bccd85fSChristoph Lameter 	if (nmask)
16498bccd85fSChristoph Lameter 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
16508bccd85fSChristoph Lameter 
16518bccd85fSChristoph Lameter 	return err;
16528bccd85fSChristoph Lameter }
16538bccd85fSChristoph Lameter 
1654af03c4acSDominik Brodowski SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1655af03c4acSDominik Brodowski 		unsigned long __user *, nmask, unsigned long, maxnode,
1656af03c4acSDominik Brodowski 		unsigned long, addr, unsigned long, flags)
1657af03c4acSDominik Brodowski {
1658af03c4acSDominik Brodowski 	return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1659af03c4acSDominik Brodowski }
1660af03c4acSDominik Brodowski 
16611da177e4SLinus Torvalds #ifdef CONFIG_COMPAT
16621da177e4SLinus Torvalds 
1663c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1664c93e0f6cSHeiko Carstens 		       compat_ulong_t __user *, nmask,
1665c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode,
1666c93e0f6cSHeiko Carstens 		       compat_ulong_t, addr, compat_ulong_t, flags)
16671da177e4SLinus Torvalds {
16681da177e4SLinus Torvalds 	long err;
16691da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
16701da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
16711da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
16721da177e4SLinus Torvalds 
1673050c17f2SRalph Campbell 	nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
16741da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
16751da177e4SLinus Torvalds 
16761da177e4SLinus Torvalds 	if (nmask)
16771da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
16781da177e4SLinus Torvalds 
1679af03c4acSDominik Brodowski 	err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
16801da177e4SLinus Torvalds 
16811da177e4SLinus Torvalds 	if (!err && nmask) {
16822bbff6c7SKAMEZAWA Hiroyuki 		unsigned long copy_size;
16832bbff6c7SKAMEZAWA Hiroyuki 		copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
16842bbff6c7SKAMEZAWA Hiroyuki 		err = copy_from_user(bm, nm, copy_size);
16851da177e4SLinus Torvalds 		/* ensure entire bitmap is zeroed */
16861da177e4SLinus Torvalds 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
16871da177e4SLinus Torvalds 		err |= compat_put_bitmap(nmask, bm, nr_bits);
16881da177e4SLinus Torvalds 	}
16891da177e4SLinus Torvalds 
16901da177e4SLinus Torvalds 	return err;
16911da177e4SLinus Torvalds }
16921da177e4SLinus Torvalds 
1693c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1694c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode)
16951da177e4SLinus Torvalds {
16961da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
16971da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
16981da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
16991da177e4SLinus Torvalds 
17001da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
17011da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
17021da177e4SLinus Torvalds 
17031da177e4SLinus Torvalds 	if (nmask) {
1704cf01fb99SChris Salls 		if (compat_get_bitmap(bm, nmask, nr_bits))
17051da177e4SLinus Torvalds 			return -EFAULT;
1706cf01fb99SChris Salls 		nm = compat_alloc_user_space(alloc_size);
1707cf01fb99SChris Salls 		if (copy_to_user(nm, bm, alloc_size))
1708cf01fb99SChris Salls 			return -EFAULT;
1709cf01fb99SChris Salls 	}
17101da177e4SLinus Torvalds 
1711af03c4acSDominik Brodowski 	return kernel_set_mempolicy(mode, nm, nr_bits+1);
17121da177e4SLinus Torvalds }
17131da177e4SLinus Torvalds 
1714c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1715c93e0f6cSHeiko Carstens 		       compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1716c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode, compat_ulong_t, flags)
17171da177e4SLinus Torvalds {
17181da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
17191da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
1720dfcd3c0dSAndi Kleen 	nodemask_t bm;
17211da177e4SLinus Torvalds 
17221da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
17231da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
17241da177e4SLinus Torvalds 
17251da177e4SLinus Torvalds 	if (nmask) {
1726cf01fb99SChris Salls 		if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
17271da177e4SLinus Torvalds 			return -EFAULT;
1728cf01fb99SChris Salls 		nm = compat_alloc_user_space(alloc_size);
1729cf01fb99SChris Salls 		if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1730cf01fb99SChris Salls 			return -EFAULT;
1731cf01fb99SChris Salls 	}
17321da177e4SLinus Torvalds 
1733e7dc9ad6SDominik Brodowski 	return kernel_mbind(start, len, mode, nm, nr_bits+1, flags);
17341da177e4SLinus Torvalds }
17351da177e4SLinus Torvalds 
1736b6e9b0baSDominik Brodowski COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid,
1737b6e9b0baSDominik Brodowski 		       compat_ulong_t, maxnode,
1738b6e9b0baSDominik Brodowski 		       const compat_ulong_t __user *, old_nodes,
1739b6e9b0baSDominik Brodowski 		       const compat_ulong_t __user *, new_nodes)
1740b6e9b0baSDominik Brodowski {
1741b6e9b0baSDominik Brodowski 	unsigned long __user *old = NULL;
1742b6e9b0baSDominik Brodowski 	unsigned long __user *new = NULL;
1743b6e9b0baSDominik Brodowski 	nodemask_t tmp_mask;
1744b6e9b0baSDominik Brodowski 	unsigned long nr_bits;
1745b6e9b0baSDominik Brodowski 	unsigned long size;
1746b6e9b0baSDominik Brodowski 
1747b6e9b0baSDominik Brodowski 	nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES);
1748b6e9b0baSDominik Brodowski 	size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1749b6e9b0baSDominik Brodowski 	if (old_nodes) {
1750b6e9b0baSDominik Brodowski 		if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits))
1751b6e9b0baSDominik Brodowski 			return -EFAULT;
1752b6e9b0baSDominik Brodowski 		old = compat_alloc_user_space(new_nodes ? size * 2 : size);
1753b6e9b0baSDominik Brodowski 		if (new_nodes)
1754b6e9b0baSDominik Brodowski 			new = old + size / sizeof(unsigned long);
1755b6e9b0baSDominik Brodowski 		if (copy_to_user(old, nodes_addr(tmp_mask), size))
1756b6e9b0baSDominik Brodowski 			return -EFAULT;
1757b6e9b0baSDominik Brodowski 	}
1758b6e9b0baSDominik Brodowski 	if (new_nodes) {
1759b6e9b0baSDominik Brodowski 		if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits))
1760b6e9b0baSDominik Brodowski 			return -EFAULT;
1761b6e9b0baSDominik Brodowski 		if (new == NULL)
1762b6e9b0baSDominik Brodowski 			new = compat_alloc_user_space(size);
1763b6e9b0baSDominik Brodowski 		if (copy_to_user(new, nodes_addr(tmp_mask), size))
1764b6e9b0baSDominik Brodowski 			return -EFAULT;
1765b6e9b0baSDominik Brodowski 	}
1766b6e9b0baSDominik Brodowski 	return kernel_migrate_pages(pid, nr_bits + 1, old, new);
1767b6e9b0baSDominik Brodowski }
1768b6e9b0baSDominik Brodowski 
1769b6e9b0baSDominik Brodowski #endif /* CONFIG_COMPAT */
17701da177e4SLinus Torvalds 
177120ca87f2SLi Xinhai bool vma_migratable(struct vm_area_struct *vma)
177220ca87f2SLi Xinhai {
177320ca87f2SLi Xinhai 	if (vma->vm_flags & (VM_IO | VM_PFNMAP))
177420ca87f2SLi Xinhai 		return false;
177520ca87f2SLi Xinhai 
177620ca87f2SLi Xinhai 	/*
177720ca87f2SLi Xinhai 	 * DAX device mappings require predictable access latency, so avoid
177820ca87f2SLi Xinhai 	 * incurring periodic faults.
177920ca87f2SLi Xinhai 	 */
178020ca87f2SLi Xinhai 	if (vma_is_dax(vma))
178120ca87f2SLi Xinhai 		return false;
178220ca87f2SLi Xinhai 
178320ca87f2SLi Xinhai 	if (is_vm_hugetlb_page(vma) &&
178420ca87f2SLi Xinhai 		!hugepage_migration_supported(hstate_vma(vma)))
178520ca87f2SLi Xinhai 		return false;
178620ca87f2SLi Xinhai 
178720ca87f2SLi Xinhai 	/*
178820ca87f2SLi Xinhai 	 * Migration allocates pages in the highest zone. If we cannot
178920ca87f2SLi Xinhai 	 * do so then migration (at least from node to node) is not
179020ca87f2SLi Xinhai 	 * possible.
179120ca87f2SLi Xinhai 	 */
179220ca87f2SLi Xinhai 	if (vma->vm_file &&
179320ca87f2SLi Xinhai 		gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
179420ca87f2SLi Xinhai 			< policy_zone)
179520ca87f2SLi Xinhai 		return false;
179620ca87f2SLi Xinhai 	return true;
179720ca87f2SLi Xinhai }
179820ca87f2SLi Xinhai 
179974d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
180074d2c3a0SOleg Nesterov 						unsigned long addr)
18011da177e4SLinus Torvalds {
18028d90274bSOleg Nesterov 	struct mempolicy *pol = NULL;
18031da177e4SLinus Torvalds 
18041da177e4SLinus Torvalds 	if (vma) {
1805480eccf9SLee Schermerhorn 		if (vma->vm_ops && vma->vm_ops->get_policy) {
18068d90274bSOleg Nesterov 			pol = vma->vm_ops->get_policy(vma, addr);
180700442ad0SMel Gorman 		} else if (vma->vm_policy) {
18081da177e4SLinus Torvalds 			pol = vma->vm_policy;
180900442ad0SMel Gorman 
181000442ad0SMel Gorman 			/*
181100442ad0SMel Gorman 			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
181200442ad0SMel Gorman 			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
181300442ad0SMel Gorman 			 * count on these policies which will be dropped by
181400442ad0SMel Gorman 			 * mpol_cond_put() later
181500442ad0SMel Gorman 			 */
181600442ad0SMel Gorman 			if (mpol_needs_cond_ref(pol))
181700442ad0SMel Gorman 				mpol_get(pol);
181800442ad0SMel Gorman 		}
18191da177e4SLinus Torvalds 	}
1820f15ca78eSOleg Nesterov 
182174d2c3a0SOleg Nesterov 	return pol;
182274d2c3a0SOleg Nesterov }
182374d2c3a0SOleg Nesterov 
182474d2c3a0SOleg Nesterov /*
1825dd6eecb9SOleg Nesterov  * get_vma_policy(@vma, @addr)
182674d2c3a0SOleg Nesterov  * @vma: virtual memory area whose policy is sought
182774d2c3a0SOleg Nesterov  * @addr: address in @vma for shared policy lookup
182874d2c3a0SOleg Nesterov  *
182974d2c3a0SOleg Nesterov  * Returns effective policy for a VMA at specified address.
1830dd6eecb9SOleg Nesterov  * Falls back to current->mempolicy or system default policy, as necessary.
183174d2c3a0SOleg Nesterov  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
183274d2c3a0SOleg Nesterov  * count--added by the get_policy() vm_op, as appropriate--to protect against
183374d2c3a0SOleg Nesterov  * freeing by another task.  It is the caller's responsibility to free the
183474d2c3a0SOleg Nesterov  * extra reference for shared policies.
183574d2c3a0SOleg Nesterov  */
1836ac79f78dSDavid Rientjes static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1837dd6eecb9SOleg Nesterov 						unsigned long addr)
183874d2c3a0SOleg Nesterov {
183974d2c3a0SOleg Nesterov 	struct mempolicy *pol = __get_vma_policy(vma, addr);
184074d2c3a0SOleg Nesterov 
18418d90274bSOleg Nesterov 	if (!pol)
1842dd6eecb9SOleg Nesterov 		pol = get_task_policy(current);
18438d90274bSOleg Nesterov 
18441da177e4SLinus Torvalds 	return pol;
18451da177e4SLinus Torvalds }
18461da177e4SLinus Torvalds 
18476b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma)
1848fc314724SMel Gorman {
18496b6482bbSOleg Nesterov 	struct mempolicy *pol;
1850f15ca78eSOleg Nesterov 
1851fc314724SMel Gorman 	if (vma->vm_ops && vma->vm_ops->get_policy) {
1852fc314724SMel Gorman 		bool ret = false;
1853fc314724SMel Gorman 
1854fc314724SMel Gorman 		pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1855fc314724SMel Gorman 		if (pol && (pol->flags & MPOL_F_MOF))
1856fc314724SMel Gorman 			ret = true;
1857fc314724SMel Gorman 		mpol_cond_put(pol);
1858fc314724SMel Gorman 
1859fc314724SMel Gorman 		return ret;
18608d90274bSOleg Nesterov 	}
18618d90274bSOleg Nesterov 
1862fc314724SMel Gorman 	pol = vma->vm_policy;
18638d90274bSOleg Nesterov 	if (!pol)
18646b6482bbSOleg Nesterov 		pol = get_task_policy(current);
1865fc314724SMel Gorman 
1866fc314724SMel Gorman 	return pol->flags & MPOL_F_MOF;
1867fc314724SMel Gorman }
1868fc314724SMel Gorman 
1869d3eb1570SLai Jiangshan static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1870d3eb1570SLai Jiangshan {
1871d3eb1570SLai Jiangshan 	enum zone_type dynamic_policy_zone = policy_zone;
1872d3eb1570SLai Jiangshan 
1873d3eb1570SLai Jiangshan 	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1874d3eb1570SLai Jiangshan 
1875d3eb1570SLai Jiangshan 	/*
1876269fbe72SBen Widawsky 	 * if policy->nodes has movable memory only,
1877d3eb1570SLai Jiangshan 	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1878d3eb1570SLai Jiangshan 	 *
1879269fbe72SBen Widawsky 	 * policy->nodes is intersect with node_states[N_MEMORY].
1880f0953a1bSIngo Molnar 	 * so if the following test fails, it implies
1881269fbe72SBen Widawsky 	 * policy->nodes has movable memory only.
1882d3eb1570SLai Jiangshan 	 */
1883269fbe72SBen Widawsky 	if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY]))
1884d3eb1570SLai Jiangshan 		dynamic_policy_zone = ZONE_MOVABLE;
1885d3eb1570SLai Jiangshan 
1886d3eb1570SLai Jiangshan 	return zone >= dynamic_policy_zone;
1887d3eb1570SLai Jiangshan }
1888d3eb1570SLai Jiangshan 
188952cd3b07SLee Schermerhorn /*
189052cd3b07SLee Schermerhorn  * Return a nodemask representing a mempolicy for filtering nodes for
189152cd3b07SLee Schermerhorn  * page allocation
189252cd3b07SLee Schermerhorn  */
18938ca39e68SMuchun Song nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
189419770b32SMel Gorman {
1895b27abaccSDave Hansen 	int mode = policy->mode;
1896b27abaccSDave Hansen 
189719770b32SMel Gorman 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
1898b27abaccSDave Hansen 	if (unlikely(mode == MPOL_BIND) &&
1899d3eb1570SLai Jiangshan 		apply_policy_zone(policy, gfp_zone(gfp)) &&
1900269fbe72SBen Widawsky 		cpuset_nodemask_valid_mems_allowed(&policy->nodes))
1901269fbe72SBen Widawsky 		return &policy->nodes;
190219770b32SMel Gorman 
1903b27abaccSDave Hansen 	if (mode == MPOL_PREFERRED_MANY)
1904b27abaccSDave Hansen 		return &policy->nodes;
1905b27abaccSDave Hansen 
190619770b32SMel Gorman 	return NULL;
190719770b32SMel Gorman }
190819770b32SMel Gorman 
1909b27abaccSDave Hansen /*
1910b27abaccSDave Hansen  * Return the  preferred node id for 'prefer' mempolicy, and return
1911b27abaccSDave Hansen  * the given id for all other policies.
1912b27abaccSDave Hansen  *
1913b27abaccSDave Hansen  * policy_node() is always coupled with policy_nodemask(), which
1914b27abaccSDave Hansen  * secures the nodemask limit for 'bind' and 'prefer-many' policy.
1915b27abaccSDave Hansen  */
1916f8fd5253SWei Yang static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
19171da177e4SLinus Torvalds {
19187858d7bcSFeng Tang 	if (policy->mode == MPOL_PREFERRED) {
1919269fbe72SBen Widawsky 		nd = first_node(policy->nodes);
19207858d7bcSFeng Tang 	} else {
192119770b32SMel Gorman 		/*
19226d840958SMichal Hocko 		 * __GFP_THISNODE shouldn't even be used with the bind policy
19236d840958SMichal Hocko 		 * because we might easily break the expectation to stay on the
19246d840958SMichal Hocko 		 * requested node and not break the policy.
192519770b32SMel Gorman 		 */
19266d840958SMichal Hocko 		WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
19271da177e4SLinus Torvalds 	}
19286d840958SMichal Hocko 
192904ec6264SVlastimil Babka 	return nd;
19301da177e4SLinus Torvalds }
19311da177e4SLinus Torvalds 
19321da177e4SLinus Torvalds /* Do dynamic interleaving for a process */
19331da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy)
19341da177e4SLinus Torvalds {
193545816682SVlastimil Babka 	unsigned next;
19361da177e4SLinus Torvalds 	struct task_struct *me = current;
19371da177e4SLinus Torvalds 
1938269fbe72SBen Widawsky 	next = next_node_in(me->il_prev, policy->nodes);
1939f5b087b5SDavid Rientjes 	if (next < MAX_NUMNODES)
194045816682SVlastimil Babka 		me->il_prev = next;
194145816682SVlastimil Babka 	return next;
19421da177e4SLinus Torvalds }
19431da177e4SLinus Torvalds 
1944dc85da15SChristoph Lameter /*
1945dc85da15SChristoph Lameter  * Depending on the memory policy provide a node from which to allocate the
1946dc85da15SChristoph Lameter  * next slab entry.
1947dc85da15SChristoph Lameter  */
19482a389610SDavid Rientjes unsigned int mempolicy_slab_node(void)
1949dc85da15SChristoph Lameter {
1950e7b691b0SAndi Kleen 	struct mempolicy *policy;
19512a389610SDavid Rientjes 	int node = numa_mem_id();
1952e7b691b0SAndi Kleen 
1953e7b691b0SAndi Kleen 	if (in_interrupt())
19542a389610SDavid Rientjes 		return node;
1955e7b691b0SAndi Kleen 
1956e7b691b0SAndi Kleen 	policy = current->mempolicy;
19577858d7bcSFeng Tang 	if (!policy)
19582a389610SDavid Rientjes 		return node;
1959765c4507SChristoph Lameter 
1960bea904d5SLee Schermerhorn 	switch (policy->mode) {
1961bea904d5SLee Schermerhorn 	case MPOL_PREFERRED:
1962269fbe72SBen Widawsky 		return first_node(policy->nodes);
1963bea904d5SLee Schermerhorn 
1964dc85da15SChristoph Lameter 	case MPOL_INTERLEAVE:
1965dc85da15SChristoph Lameter 		return interleave_nodes(policy);
1966dc85da15SChristoph Lameter 
1967b27abaccSDave Hansen 	case MPOL_BIND:
1968b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
1969b27abaccSDave Hansen 	{
1970c33d6c06SMel Gorman 		struct zoneref *z;
1971c33d6c06SMel Gorman 
1972dc85da15SChristoph Lameter 		/*
1973dc85da15SChristoph Lameter 		 * Follow bind policy behavior and start allocation at the
1974dc85da15SChristoph Lameter 		 * first node.
1975dc85da15SChristoph Lameter 		 */
197619770b32SMel Gorman 		struct zonelist *zonelist;
197719770b32SMel Gorman 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1978c9634cf0SAneesh Kumar K.V 		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1979c33d6c06SMel Gorman 		z = first_zones_zonelist(zonelist, highest_zoneidx,
1980269fbe72SBen Widawsky 							&policy->nodes);
1981c1093b74SPavel Tatashin 		return z->zone ? zone_to_nid(z->zone) : node;
1982dd1a239fSMel Gorman 	}
19837858d7bcSFeng Tang 	case MPOL_LOCAL:
19847858d7bcSFeng Tang 		return node;
1985dc85da15SChristoph Lameter 
1986dc85da15SChristoph Lameter 	default:
1987bea904d5SLee Schermerhorn 		BUG();
1988dc85da15SChristoph Lameter 	}
1989dc85da15SChristoph Lameter }
1990dc85da15SChristoph Lameter 
1991fee83b3aSAndrew Morton /*
1992fee83b3aSAndrew Morton  * Do static interleaving for a VMA with known offset @n.  Returns the n'th
1993269fbe72SBen Widawsky  * node in pol->nodes (starting from n=0), wrapping around if n exceeds the
1994fee83b3aSAndrew Morton  * number of present nodes.
1995fee83b3aSAndrew Morton  */
199698c70baaSLaurent Dufour static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
19971da177e4SLinus Torvalds {
1998269fbe72SBen Widawsky 	unsigned nnodes = nodes_weight(pol->nodes);
1999f5b087b5SDavid Rientjes 	unsigned target;
2000fee83b3aSAndrew Morton 	int i;
2001fee83b3aSAndrew Morton 	int nid;
20021da177e4SLinus Torvalds 
2003f5b087b5SDavid Rientjes 	if (!nnodes)
2004f5b087b5SDavid Rientjes 		return numa_node_id();
2005fee83b3aSAndrew Morton 	target = (unsigned int)n % nnodes;
2006269fbe72SBen Widawsky 	nid = first_node(pol->nodes);
2007fee83b3aSAndrew Morton 	for (i = 0; i < target; i++)
2008269fbe72SBen Widawsky 		nid = next_node(nid, pol->nodes);
20091da177e4SLinus Torvalds 	return nid;
20101da177e4SLinus Torvalds }
20111da177e4SLinus Torvalds 
20125da7ca86SChristoph Lameter /* Determine a node number for interleave */
20135da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol,
20145da7ca86SChristoph Lameter 		 struct vm_area_struct *vma, unsigned long addr, int shift)
20155da7ca86SChristoph Lameter {
20165da7ca86SChristoph Lameter 	if (vma) {
20175da7ca86SChristoph Lameter 		unsigned long off;
20185da7ca86SChristoph Lameter 
20193b98b087SNishanth Aravamudan 		/*
20203b98b087SNishanth Aravamudan 		 * for small pages, there is no difference between
20213b98b087SNishanth Aravamudan 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
20223b98b087SNishanth Aravamudan 		 * for huge pages, since vm_pgoff is in units of small
20233b98b087SNishanth Aravamudan 		 * pages, we need to shift off the always 0 bits to get
20243b98b087SNishanth Aravamudan 		 * a useful offset.
20253b98b087SNishanth Aravamudan 		 */
20263b98b087SNishanth Aravamudan 		BUG_ON(shift < PAGE_SHIFT);
20273b98b087SNishanth Aravamudan 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
20285da7ca86SChristoph Lameter 		off += (addr - vma->vm_start) >> shift;
202998c70baaSLaurent Dufour 		return offset_il_node(pol, off);
20305da7ca86SChristoph Lameter 	} else
20315da7ca86SChristoph Lameter 		return interleave_nodes(pol);
20325da7ca86SChristoph Lameter }
20335da7ca86SChristoph Lameter 
203400ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS
2035480eccf9SLee Schermerhorn /*
203604ec6264SVlastimil Babka  * huge_node(@vma, @addr, @gfp_flags, @mpol)
2037b46e14acSFabian Frederick  * @vma: virtual memory area whose policy is sought
2038b46e14acSFabian Frederick  * @addr: address in @vma for shared policy lookup and interleave policy
2039b46e14acSFabian Frederick  * @gfp_flags: for requested zone
2040b46e14acSFabian Frederick  * @mpol: pointer to mempolicy pointer for reference counted mempolicy
2041b27abaccSDave Hansen  * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy
2042480eccf9SLee Schermerhorn  *
204304ec6264SVlastimil Babka  * Returns a nid suitable for a huge page allocation and a pointer
204452cd3b07SLee Schermerhorn  * to the struct mempolicy for conditional unref after allocation.
2045b27abaccSDave Hansen  * If the effective policy is 'bind' or 'prefer-many', returns a pointer
2046b27abaccSDave Hansen  * to the mempolicy's @nodemask for filtering the zonelist.
2047c0ff7453SMiao Xie  *
2048d26914d1SMel Gorman  * Must be protected by read_mems_allowed_begin()
2049480eccf9SLee Schermerhorn  */
205004ec6264SVlastimil Babka int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
205104ec6264SVlastimil Babka 				struct mempolicy **mpol, nodemask_t **nodemask)
20525da7ca86SChristoph Lameter {
205304ec6264SVlastimil Babka 	int nid;
2054b27abaccSDave Hansen 	int mode;
20555da7ca86SChristoph Lameter 
2056dd6eecb9SOleg Nesterov 	*mpol = get_vma_policy(vma, addr);
2057b27abaccSDave Hansen 	*nodemask = NULL;
2058b27abaccSDave Hansen 	mode = (*mpol)->mode;
20595da7ca86SChristoph Lameter 
2060b27abaccSDave Hansen 	if (unlikely(mode == MPOL_INTERLEAVE)) {
206104ec6264SVlastimil Babka 		nid = interleave_nid(*mpol, vma, addr,
206204ec6264SVlastimil Babka 					huge_page_shift(hstate_vma(vma)));
206352cd3b07SLee Schermerhorn 	} else {
206404ec6264SVlastimil Babka 		nid = policy_node(gfp_flags, *mpol, numa_node_id());
2065b27abaccSDave Hansen 		if (mode == MPOL_BIND || mode == MPOL_PREFERRED_MANY)
2066269fbe72SBen Widawsky 			*nodemask = &(*mpol)->nodes;
2067480eccf9SLee Schermerhorn 	}
206804ec6264SVlastimil Babka 	return nid;
20695da7ca86SChristoph Lameter }
207006808b08SLee Schermerhorn 
207106808b08SLee Schermerhorn /*
207206808b08SLee Schermerhorn  * init_nodemask_of_mempolicy
207306808b08SLee Schermerhorn  *
207406808b08SLee Schermerhorn  * If the current task's mempolicy is "default" [NULL], return 'false'
207506808b08SLee Schermerhorn  * to indicate default policy.  Otherwise, extract the policy nodemask
207606808b08SLee Schermerhorn  * for 'bind' or 'interleave' policy into the argument nodemask, or
207706808b08SLee Schermerhorn  * initialize the argument nodemask to contain the single node for
207806808b08SLee Schermerhorn  * 'preferred' or 'local' policy and return 'true' to indicate presence
207906808b08SLee Schermerhorn  * of non-default mempolicy.
208006808b08SLee Schermerhorn  *
208106808b08SLee Schermerhorn  * We don't bother with reference counting the mempolicy [mpol_get/put]
208206808b08SLee Schermerhorn  * because the current task is examining it's own mempolicy and a task's
208306808b08SLee Schermerhorn  * mempolicy is only ever changed by the task itself.
208406808b08SLee Schermerhorn  *
208506808b08SLee Schermerhorn  * N.B., it is the caller's responsibility to free a returned nodemask.
208606808b08SLee Schermerhorn  */
208706808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask)
208806808b08SLee Schermerhorn {
208906808b08SLee Schermerhorn 	struct mempolicy *mempolicy;
209006808b08SLee Schermerhorn 
209106808b08SLee Schermerhorn 	if (!(mask && current->mempolicy))
209206808b08SLee Schermerhorn 		return false;
209306808b08SLee Schermerhorn 
2094c0ff7453SMiao Xie 	task_lock(current);
209506808b08SLee Schermerhorn 	mempolicy = current->mempolicy;
209606808b08SLee Schermerhorn 	switch (mempolicy->mode) {
209706808b08SLee Schermerhorn 	case MPOL_PREFERRED:
2098b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
209906808b08SLee Schermerhorn 	case MPOL_BIND:
210006808b08SLee Schermerhorn 	case MPOL_INTERLEAVE:
2101269fbe72SBen Widawsky 		*mask = mempolicy->nodes;
210206808b08SLee Schermerhorn 		break;
210306808b08SLee Schermerhorn 
21047858d7bcSFeng Tang 	case MPOL_LOCAL:
2105269fbe72SBen Widawsky 		init_nodemask_of_node(mask, numa_node_id());
21067858d7bcSFeng Tang 		break;
21077858d7bcSFeng Tang 
210806808b08SLee Schermerhorn 	default:
210906808b08SLee Schermerhorn 		BUG();
211006808b08SLee Schermerhorn 	}
2111c0ff7453SMiao Xie 	task_unlock(current);
211206808b08SLee Schermerhorn 
211306808b08SLee Schermerhorn 	return true;
211406808b08SLee Schermerhorn }
211500ac59adSChen, Kenneth W #endif
21165da7ca86SChristoph Lameter 
21176f48d0ebSDavid Rientjes /*
2118b26e517aSFeng Tang  * mempolicy_in_oom_domain
21196f48d0ebSDavid Rientjes  *
2120b26e517aSFeng Tang  * If tsk's mempolicy is "bind", check for intersection between mask and
2121b26e517aSFeng Tang  * the policy nodemask. Otherwise, return true for all other policies
2122b26e517aSFeng Tang  * including "interleave", as a tsk with "interleave" policy may have
2123b26e517aSFeng Tang  * memory allocated from all nodes in system.
21246f48d0ebSDavid Rientjes  *
21256f48d0ebSDavid Rientjes  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
21266f48d0ebSDavid Rientjes  */
2127b26e517aSFeng Tang bool mempolicy_in_oom_domain(struct task_struct *tsk,
21286f48d0ebSDavid Rientjes 					const nodemask_t *mask)
21296f48d0ebSDavid Rientjes {
21306f48d0ebSDavid Rientjes 	struct mempolicy *mempolicy;
21316f48d0ebSDavid Rientjes 	bool ret = true;
21326f48d0ebSDavid Rientjes 
21336f48d0ebSDavid Rientjes 	if (!mask)
21346f48d0ebSDavid Rientjes 		return ret;
2135b26e517aSFeng Tang 
21366f48d0ebSDavid Rientjes 	task_lock(tsk);
21376f48d0ebSDavid Rientjes 	mempolicy = tsk->mempolicy;
2138b26e517aSFeng Tang 	if (mempolicy && mempolicy->mode == MPOL_BIND)
2139269fbe72SBen Widawsky 		ret = nodes_intersects(mempolicy->nodes, *mask);
21406f48d0ebSDavid Rientjes 	task_unlock(tsk);
2141b26e517aSFeng Tang 
21426f48d0ebSDavid Rientjes 	return ret;
21436f48d0ebSDavid Rientjes }
21446f48d0ebSDavid Rientjes 
21451da177e4SLinus Torvalds /* Allocate a page in interleaved policy.
21461da177e4SLinus Torvalds    Own path because it needs to do special accounting. */
2147662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2148662f3a0bSAndi Kleen 					unsigned nid)
21491da177e4SLinus Torvalds {
21501da177e4SLinus Torvalds 	struct page *page;
21511da177e4SLinus Torvalds 
215284172f4bSMatthew Wilcox (Oracle) 	page = __alloc_pages(gfp, order, nid, NULL);
21534518085eSKemi Wang 	/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
21544518085eSKemi Wang 	if (!static_branch_likely(&vm_numa_stat_key))
21554518085eSKemi Wang 		return page;
2156de55c8b2SAndrey Ryabinin 	if (page && page_to_nid(page) == nid) {
2157de55c8b2SAndrey Ryabinin 		preempt_disable();
2158f19298b9SMel Gorman 		__count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT);
2159de55c8b2SAndrey Ryabinin 		preempt_enable();
2160de55c8b2SAndrey Ryabinin 	}
21611da177e4SLinus Torvalds 	return page;
21621da177e4SLinus Torvalds }
21631da177e4SLinus Torvalds 
21644c54d949SFeng Tang static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
21654c54d949SFeng Tang 						int nid, struct mempolicy *pol)
21664c54d949SFeng Tang {
21674c54d949SFeng Tang 	struct page *page;
21684c54d949SFeng Tang 	gfp_t preferred_gfp;
21694c54d949SFeng Tang 
21704c54d949SFeng Tang 	/*
21714c54d949SFeng Tang 	 * This is a two pass approach. The first pass will only try the
21724c54d949SFeng Tang 	 * preferred nodes but skip the direct reclaim and allow the
21734c54d949SFeng Tang 	 * allocation to fail, while the second pass will try all the
21744c54d949SFeng Tang 	 * nodes in system.
21754c54d949SFeng Tang 	 */
21764c54d949SFeng Tang 	preferred_gfp = gfp | __GFP_NOWARN;
21774c54d949SFeng Tang 	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
21784c54d949SFeng Tang 	page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes);
21794c54d949SFeng Tang 	if (!page)
21804c54d949SFeng Tang 		page = __alloc_pages(gfp, order, numa_node_id(), NULL);
21814c54d949SFeng Tang 
21824c54d949SFeng Tang 	return page;
21834c54d949SFeng Tang }
21844c54d949SFeng Tang 
21851da177e4SLinus Torvalds /**
21860bbbc0b3SAndrea Arcangeli  * alloc_pages_vma - Allocate a page for a VMA.
2187eb350739SMatthew Wilcox (Oracle)  * @gfp: GFP flags.
21880bbbc0b3SAndrea Arcangeli  * @order: Order of the GFP allocation.
21891da177e4SLinus Torvalds  * @vma: Pointer to VMA or NULL if not available.
2190eb350739SMatthew Wilcox (Oracle)  * @addr: Virtual address of the allocation.  Must be inside @vma.
2191be97a41bSVlastimil Babka  * @node: Which node to prefer for allocation (modulo policy).
2192eb350739SMatthew Wilcox (Oracle)  * @hugepage: For hugepages try only the preferred node if possible.
21931da177e4SLinus Torvalds  *
2194eb350739SMatthew Wilcox (Oracle)  * Allocate a page for a specific address in @vma, using the appropriate
2195eb350739SMatthew Wilcox (Oracle)  * NUMA policy.  When @vma is not NULL the caller must hold the mmap_lock
2196eb350739SMatthew Wilcox (Oracle)  * of the mm_struct of the VMA to prevent it from going away.  Should be
2197eb350739SMatthew Wilcox (Oracle)  * used for all allocations for pages that will be mapped into user space.
2198eb350739SMatthew Wilcox (Oracle)  *
2199eb350739SMatthew Wilcox (Oracle)  * Return: The page on success or NULL if allocation fails.
22001da177e4SLinus Torvalds  */
2201eb350739SMatthew Wilcox (Oracle) struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
220219deb769SDavid Rientjes 		unsigned long addr, int node, bool hugepage)
22031da177e4SLinus Torvalds {
2204cc9a6c87SMel Gorman 	struct mempolicy *pol;
2205c0ff7453SMiao Xie 	struct page *page;
220604ec6264SVlastimil Babka 	int preferred_nid;
2207be97a41bSVlastimil Babka 	nodemask_t *nmask;
22081da177e4SLinus Torvalds 
2209dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2210cc9a6c87SMel Gorman 
2211be97a41bSVlastimil Babka 	if (pol->mode == MPOL_INTERLEAVE) {
22121da177e4SLinus Torvalds 		unsigned nid;
22135da7ca86SChristoph Lameter 
22148eac563cSAndi Kleen 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
221552cd3b07SLee Schermerhorn 		mpol_cond_put(pol);
22160bbbc0b3SAndrea Arcangeli 		page = alloc_page_interleave(gfp, order, nid);
2217be97a41bSVlastimil Babka 		goto out;
22181da177e4SLinus Torvalds 	}
22191da177e4SLinus Torvalds 
22204c54d949SFeng Tang 	if (pol->mode == MPOL_PREFERRED_MANY) {
22214c54d949SFeng Tang 		page = alloc_pages_preferred_many(gfp, order, node, pol);
22224c54d949SFeng Tang 		mpol_cond_put(pol);
22234c54d949SFeng Tang 		goto out;
22244c54d949SFeng Tang 	}
22254c54d949SFeng Tang 
222619deb769SDavid Rientjes 	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
222719deb769SDavid Rientjes 		int hpage_node = node;
222819deb769SDavid Rientjes 
222919deb769SDavid Rientjes 		/*
223019deb769SDavid Rientjes 		 * For hugepage allocation and non-interleave policy which
223119deb769SDavid Rientjes 		 * allows the current node (or other explicitly preferred
223219deb769SDavid Rientjes 		 * node) we only try to allocate from the current/preferred
223319deb769SDavid Rientjes 		 * node and don't fall back to other nodes, as the cost of
223419deb769SDavid Rientjes 		 * remote accesses would likely offset THP benefits.
223519deb769SDavid Rientjes 		 *
2236b27abaccSDave Hansen 		 * If the policy is interleave or does not allow the current
223719deb769SDavid Rientjes 		 * node in its nodemask, we allocate the standard way.
223819deb769SDavid Rientjes 		 */
22397858d7bcSFeng Tang 		if (pol->mode == MPOL_PREFERRED)
2240269fbe72SBen Widawsky 			hpage_node = first_node(pol->nodes);
224119deb769SDavid Rientjes 
224219deb769SDavid Rientjes 		nmask = policy_nodemask(gfp, pol);
224319deb769SDavid Rientjes 		if (!nmask || node_isset(hpage_node, *nmask)) {
224419deb769SDavid Rientjes 			mpol_cond_put(pol);
2245cc638f32SVlastimil Babka 			/*
2246cc638f32SVlastimil Babka 			 * First, try to allocate THP only on local node, but
2247cc638f32SVlastimil Babka 			 * don't reclaim unnecessarily, just compact.
2248cc638f32SVlastimil Babka 			 */
224919deb769SDavid Rientjes 			page = __alloc_pages_node(hpage_node,
2250cc638f32SVlastimil Babka 				gfp | __GFP_THISNODE | __GFP_NORETRY, order);
225176e654ccSDavid Rientjes 
225276e654ccSDavid Rientjes 			/*
225376e654ccSDavid Rientjes 			 * If hugepage allocations are configured to always
225476e654ccSDavid Rientjes 			 * synchronous compact or the vma has been madvised
225576e654ccSDavid Rientjes 			 * to prefer hugepage backing, retry allowing remote
2256cc638f32SVlastimil Babka 			 * memory with both reclaim and compact as well.
225776e654ccSDavid Rientjes 			 */
225876e654ccSDavid Rientjes 			if (!page && (gfp & __GFP_DIRECT_RECLAIM))
225976e654ccSDavid Rientjes 				page = __alloc_pages_node(hpage_node,
2260cc638f32SVlastimil Babka 								gfp, order);
226176e654ccSDavid Rientjes 
226219deb769SDavid Rientjes 			goto out;
226319deb769SDavid Rientjes 		}
226419deb769SDavid Rientjes 	}
226519deb769SDavid Rientjes 
2266077fcf11SAneesh Kumar K.V 	nmask = policy_nodemask(gfp, pol);
226704ec6264SVlastimil Babka 	preferred_nid = policy_node(gfp, pol, node);
226884172f4bSMatthew Wilcox (Oracle) 	page = __alloc_pages(gfp, order, preferred_nid, nmask);
2269d51e9894SVlastimil Babka 	mpol_cond_put(pol);
2270be97a41bSVlastimil Babka out:
2271077fcf11SAneesh Kumar K.V 	return page;
2272077fcf11SAneesh Kumar K.V }
227369262215SChristoph Hellwig EXPORT_SYMBOL(alloc_pages_vma);
2274077fcf11SAneesh Kumar K.V 
22751da177e4SLinus Torvalds /**
2276d7f946d0SMatthew Wilcox (Oracle)  * alloc_pages - Allocate pages.
22776421ec76SMatthew Wilcox (Oracle)  * @gfp: GFP flags.
22786421ec76SMatthew Wilcox (Oracle)  * @order: Power of two of number of pages to allocate.
22791da177e4SLinus Torvalds  *
22806421ec76SMatthew Wilcox (Oracle)  * Allocate 1 << @order contiguous pages.  The physical address of the
22816421ec76SMatthew Wilcox (Oracle)  * first page is naturally aligned (eg an order-3 allocation will be aligned
22826421ec76SMatthew Wilcox (Oracle)  * to a multiple of 8 * PAGE_SIZE bytes).  The NUMA policy of the current
22836421ec76SMatthew Wilcox (Oracle)  * process is honoured when in process context.
22841da177e4SLinus Torvalds  *
22856421ec76SMatthew Wilcox (Oracle)  * Context: Can be called from any context, providing the appropriate GFP
22866421ec76SMatthew Wilcox (Oracle)  * flags are used.
22876421ec76SMatthew Wilcox (Oracle)  * Return: The page on success or NULL if allocation fails.
22881da177e4SLinus Torvalds  */
2289d7f946d0SMatthew Wilcox (Oracle) struct page *alloc_pages(gfp_t gfp, unsigned order)
22901da177e4SLinus Torvalds {
22918d90274bSOleg Nesterov 	struct mempolicy *pol = &default_policy;
2292c0ff7453SMiao Xie 	struct page *page;
22931da177e4SLinus Torvalds 
22948d90274bSOleg Nesterov 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
22958d90274bSOleg Nesterov 		pol = get_task_policy(current);
229652cd3b07SLee Schermerhorn 
229752cd3b07SLee Schermerhorn 	/*
229852cd3b07SLee Schermerhorn 	 * No reference counting needed for current->mempolicy
229952cd3b07SLee Schermerhorn 	 * nor system default_policy
230052cd3b07SLee Schermerhorn 	 */
230145c4745aSLee Schermerhorn 	if (pol->mode == MPOL_INTERLEAVE)
2302c0ff7453SMiao Xie 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
23034c54d949SFeng Tang 	else if (pol->mode == MPOL_PREFERRED_MANY)
23044c54d949SFeng Tang 		page = alloc_pages_preferred_many(gfp, order,
23054c54d949SFeng Tang 				numa_node_id(), pol);
2306c0ff7453SMiao Xie 	else
230784172f4bSMatthew Wilcox (Oracle) 		page = __alloc_pages(gfp, order,
230804ec6264SVlastimil Babka 				policy_node(gfp, pol, numa_node_id()),
23095c4b4be3SAndi Kleen 				policy_nodemask(gfp, pol));
2310cc9a6c87SMel Gorman 
2311c0ff7453SMiao Xie 	return page;
23121da177e4SLinus Torvalds }
2313d7f946d0SMatthew Wilcox (Oracle) EXPORT_SYMBOL(alloc_pages);
23141da177e4SLinus Torvalds 
2315ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2316ef0855d3SOleg Nesterov {
2317ef0855d3SOleg Nesterov 	struct mempolicy *pol = mpol_dup(vma_policy(src));
2318ef0855d3SOleg Nesterov 
2319ef0855d3SOleg Nesterov 	if (IS_ERR(pol))
2320ef0855d3SOleg Nesterov 		return PTR_ERR(pol);
2321ef0855d3SOleg Nesterov 	dst->vm_policy = pol;
2322ef0855d3SOleg Nesterov 	return 0;
2323ef0855d3SOleg Nesterov }
2324ef0855d3SOleg Nesterov 
23254225399aSPaul Jackson /*
2326846a16bfSLee Schermerhorn  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
23274225399aSPaul Jackson  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
23284225399aSPaul Jackson  * with the mems_allowed returned by cpuset_mems_allowed().  This
23294225399aSPaul Jackson  * keeps mempolicies cpuset relative after its cpuset moves.  See
23304225399aSPaul Jackson  * further kernel/cpuset.c update_nodemask().
2331708c1bbcSMiao Xie  *
2332708c1bbcSMiao Xie  * current's mempolicy may be rebinded by the other task(the task that changes
2333708c1bbcSMiao Xie  * cpuset's mems), so we needn't do rebind work for current task.
23344225399aSPaul Jackson  */
23354225399aSPaul Jackson 
2336846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */
2337846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old)
23381da177e4SLinus Torvalds {
23391da177e4SLinus Torvalds 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
23401da177e4SLinus Torvalds 
23411da177e4SLinus Torvalds 	if (!new)
23421da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2343708c1bbcSMiao Xie 
2344708c1bbcSMiao Xie 	/* task's mempolicy is protected by alloc_lock */
2345708c1bbcSMiao Xie 	if (old == current->mempolicy) {
2346708c1bbcSMiao Xie 		task_lock(current);
2347708c1bbcSMiao Xie 		*new = *old;
2348708c1bbcSMiao Xie 		task_unlock(current);
2349708c1bbcSMiao Xie 	} else
2350708c1bbcSMiao Xie 		*new = *old;
2351708c1bbcSMiao Xie 
23524225399aSPaul Jackson 	if (current_cpuset_is_being_rebound()) {
23534225399aSPaul Jackson 		nodemask_t mems = cpuset_mems_allowed(current);
2354213980c0SVlastimil Babka 		mpol_rebind_policy(new, &mems);
23554225399aSPaul Jackson 	}
23561da177e4SLinus Torvalds 	atomic_set(&new->refcnt, 1);
23571da177e4SLinus Torvalds 	return new;
23581da177e4SLinus Torvalds }
23591da177e4SLinus Torvalds 
23601da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */
2361fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
23621da177e4SLinus Torvalds {
23631da177e4SLinus Torvalds 	if (!a || !b)
2364fcfb4dccSKOSAKI Motohiro 		return false;
236545c4745aSLee Schermerhorn 	if (a->mode != b->mode)
2366fcfb4dccSKOSAKI Motohiro 		return false;
236719800502SBob Liu 	if (a->flags != b->flags)
2368fcfb4dccSKOSAKI Motohiro 		return false;
236919800502SBob Liu 	if (mpol_store_user_nodemask(a))
237019800502SBob Liu 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2371fcfb4dccSKOSAKI Motohiro 			return false;
237219800502SBob Liu 
237345c4745aSLee Schermerhorn 	switch (a->mode) {
237419770b32SMel Gorman 	case MPOL_BIND:
23751da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
23761da177e4SLinus Torvalds 	case MPOL_PREFERRED:
2377b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
2378269fbe72SBen Widawsky 		return !!nodes_equal(a->nodes, b->nodes);
23797858d7bcSFeng Tang 	case MPOL_LOCAL:
23807858d7bcSFeng Tang 		return true;
23811da177e4SLinus Torvalds 	default:
23821da177e4SLinus Torvalds 		BUG();
2383fcfb4dccSKOSAKI Motohiro 		return false;
23841da177e4SLinus Torvalds 	}
23851da177e4SLinus Torvalds }
23861da177e4SLinus Torvalds 
23871da177e4SLinus Torvalds /*
23881da177e4SLinus Torvalds  * Shared memory backing store policy support.
23891da177e4SLinus Torvalds  *
23901da177e4SLinus Torvalds  * Remember policies even when nobody has shared memory mapped.
23911da177e4SLinus Torvalds  * The policies are kept in Red-Black tree linked from the inode.
23924a8c7bb5SNathan Zimmer  * They are protected by the sp->lock rwlock, which should be held
23931da177e4SLinus Torvalds  * for any accesses to the tree.
23941da177e4SLinus Torvalds  */
23951da177e4SLinus Torvalds 
23964a8c7bb5SNathan Zimmer /*
23974a8c7bb5SNathan Zimmer  * lookup first element intersecting start-end.  Caller holds sp->lock for
23984a8c7bb5SNathan Zimmer  * reading or for writing
23994a8c7bb5SNathan Zimmer  */
24001da177e4SLinus Torvalds static struct sp_node *
24011da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
24021da177e4SLinus Torvalds {
24031da177e4SLinus Torvalds 	struct rb_node *n = sp->root.rb_node;
24041da177e4SLinus Torvalds 
24051da177e4SLinus Torvalds 	while (n) {
24061da177e4SLinus Torvalds 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
24071da177e4SLinus Torvalds 
24081da177e4SLinus Torvalds 		if (start >= p->end)
24091da177e4SLinus Torvalds 			n = n->rb_right;
24101da177e4SLinus Torvalds 		else if (end <= p->start)
24111da177e4SLinus Torvalds 			n = n->rb_left;
24121da177e4SLinus Torvalds 		else
24131da177e4SLinus Torvalds 			break;
24141da177e4SLinus Torvalds 	}
24151da177e4SLinus Torvalds 	if (!n)
24161da177e4SLinus Torvalds 		return NULL;
24171da177e4SLinus Torvalds 	for (;;) {
24181da177e4SLinus Torvalds 		struct sp_node *w = NULL;
24191da177e4SLinus Torvalds 		struct rb_node *prev = rb_prev(n);
24201da177e4SLinus Torvalds 		if (!prev)
24211da177e4SLinus Torvalds 			break;
24221da177e4SLinus Torvalds 		w = rb_entry(prev, struct sp_node, nd);
24231da177e4SLinus Torvalds 		if (w->end <= start)
24241da177e4SLinus Torvalds 			break;
24251da177e4SLinus Torvalds 		n = prev;
24261da177e4SLinus Torvalds 	}
24271da177e4SLinus Torvalds 	return rb_entry(n, struct sp_node, nd);
24281da177e4SLinus Torvalds }
24291da177e4SLinus Torvalds 
24304a8c7bb5SNathan Zimmer /*
24314a8c7bb5SNathan Zimmer  * Insert a new shared policy into the list.  Caller holds sp->lock for
24324a8c7bb5SNathan Zimmer  * writing.
24334a8c7bb5SNathan Zimmer  */
24341da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new)
24351da177e4SLinus Torvalds {
24361da177e4SLinus Torvalds 	struct rb_node **p = &sp->root.rb_node;
24371da177e4SLinus Torvalds 	struct rb_node *parent = NULL;
24381da177e4SLinus Torvalds 	struct sp_node *nd;
24391da177e4SLinus Torvalds 
24401da177e4SLinus Torvalds 	while (*p) {
24411da177e4SLinus Torvalds 		parent = *p;
24421da177e4SLinus Torvalds 		nd = rb_entry(parent, struct sp_node, nd);
24431da177e4SLinus Torvalds 		if (new->start < nd->start)
24441da177e4SLinus Torvalds 			p = &(*p)->rb_left;
24451da177e4SLinus Torvalds 		else if (new->end > nd->end)
24461da177e4SLinus Torvalds 			p = &(*p)->rb_right;
24471da177e4SLinus Torvalds 		else
24481da177e4SLinus Torvalds 			BUG();
24491da177e4SLinus Torvalds 	}
24501da177e4SLinus Torvalds 	rb_link_node(&new->nd, parent, p);
24511da177e4SLinus Torvalds 	rb_insert_color(&new->nd, &sp->root);
2452140d5a49SPaul Mundt 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
245345c4745aSLee Schermerhorn 		 new->policy ? new->policy->mode : 0);
24541da177e4SLinus Torvalds }
24551da177e4SLinus Torvalds 
24561da177e4SLinus Torvalds /* Find shared policy intersecting idx */
24571da177e4SLinus Torvalds struct mempolicy *
24581da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
24591da177e4SLinus Torvalds {
24601da177e4SLinus Torvalds 	struct mempolicy *pol = NULL;
24611da177e4SLinus Torvalds 	struct sp_node *sn;
24621da177e4SLinus Torvalds 
24631da177e4SLinus Torvalds 	if (!sp->root.rb_node)
24641da177e4SLinus Torvalds 		return NULL;
24654a8c7bb5SNathan Zimmer 	read_lock(&sp->lock);
24661da177e4SLinus Torvalds 	sn = sp_lookup(sp, idx, idx+1);
24671da177e4SLinus Torvalds 	if (sn) {
24681da177e4SLinus Torvalds 		mpol_get(sn->policy);
24691da177e4SLinus Torvalds 		pol = sn->policy;
24701da177e4SLinus Torvalds 	}
24714a8c7bb5SNathan Zimmer 	read_unlock(&sp->lock);
24721da177e4SLinus Torvalds 	return pol;
24731da177e4SLinus Torvalds }
24741da177e4SLinus Torvalds 
247563f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n)
247663f74ca2SKOSAKI Motohiro {
247763f74ca2SKOSAKI Motohiro 	mpol_put(n->policy);
247863f74ca2SKOSAKI Motohiro 	kmem_cache_free(sn_cache, n);
247963f74ca2SKOSAKI Motohiro }
248063f74ca2SKOSAKI Motohiro 
2481771fb4d8SLee Schermerhorn /**
2482771fb4d8SLee Schermerhorn  * mpol_misplaced - check whether current page node is valid in policy
2483771fb4d8SLee Schermerhorn  *
2484b46e14acSFabian Frederick  * @page: page to be checked
2485b46e14acSFabian Frederick  * @vma: vm area where page mapped
2486b46e14acSFabian Frederick  * @addr: virtual address where page mapped
2487771fb4d8SLee Schermerhorn  *
2488771fb4d8SLee Schermerhorn  * Lookup current policy node id for vma,addr and "compare to" page's
24895f076944SMatthew Wilcox (Oracle)  * node id.  Policy determination "mimics" alloc_page_vma().
2490771fb4d8SLee Schermerhorn  * Called from fault path where we know the vma and faulting address.
24915f076944SMatthew Wilcox (Oracle)  *
2492062db293SBaolin Wang  * Return: NUMA_NO_NODE if the page is in a node that is valid for this
2493062db293SBaolin Wang  * policy, or a suitable node ID to allocate a replacement page from.
2494771fb4d8SLee Schermerhorn  */
2495771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2496771fb4d8SLee Schermerhorn {
2497771fb4d8SLee Schermerhorn 	struct mempolicy *pol;
2498c33d6c06SMel Gorman 	struct zoneref *z;
2499771fb4d8SLee Schermerhorn 	int curnid = page_to_nid(page);
2500771fb4d8SLee Schermerhorn 	unsigned long pgoff;
250190572890SPeter Zijlstra 	int thiscpu = raw_smp_processor_id();
250290572890SPeter Zijlstra 	int thisnid = cpu_to_node(thiscpu);
250398fa15f3SAnshuman Khandual 	int polnid = NUMA_NO_NODE;
2504062db293SBaolin Wang 	int ret = NUMA_NO_NODE;
2505771fb4d8SLee Schermerhorn 
2506dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2507771fb4d8SLee Schermerhorn 	if (!(pol->flags & MPOL_F_MOF))
2508771fb4d8SLee Schermerhorn 		goto out;
2509771fb4d8SLee Schermerhorn 
2510771fb4d8SLee Schermerhorn 	switch (pol->mode) {
2511771fb4d8SLee Schermerhorn 	case MPOL_INTERLEAVE:
2512771fb4d8SLee Schermerhorn 		pgoff = vma->vm_pgoff;
2513771fb4d8SLee Schermerhorn 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
251498c70baaSLaurent Dufour 		polnid = offset_il_node(pol, pgoff);
2515771fb4d8SLee Schermerhorn 		break;
2516771fb4d8SLee Schermerhorn 
2517771fb4d8SLee Schermerhorn 	case MPOL_PREFERRED:
2518b27abaccSDave Hansen 		if (node_isset(curnid, pol->nodes))
2519b27abaccSDave Hansen 			goto out;
2520269fbe72SBen Widawsky 		polnid = first_node(pol->nodes);
2521771fb4d8SLee Schermerhorn 		break;
2522771fb4d8SLee Schermerhorn 
25237858d7bcSFeng Tang 	case MPOL_LOCAL:
25247858d7bcSFeng Tang 		polnid = numa_node_id();
25257858d7bcSFeng Tang 		break;
25267858d7bcSFeng Tang 
2527771fb4d8SLee Schermerhorn 	case MPOL_BIND:
2528bda420b9SHuang Ying 		/* Optimize placement among multiple nodes via NUMA balancing */
2529bda420b9SHuang Ying 		if (pol->flags & MPOL_F_MORON) {
2530269fbe72SBen Widawsky 			if (node_isset(thisnid, pol->nodes))
2531bda420b9SHuang Ying 				break;
2532bda420b9SHuang Ying 			goto out;
2533bda420b9SHuang Ying 		}
2534b27abaccSDave Hansen 		fallthrough;
2535c33d6c06SMel Gorman 
2536b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
2537771fb4d8SLee Schermerhorn 		/*
2538771fb4d8SLee Schermerhorn 		 * use current page if in policy nodemask,
2539771fb4d8SLee Schermerhorn 		 * else select nearest allowed node, if any.
2540771fb4d8SLee Schermerhorn 		 * If no allowed nodes, use current [!misplaced].
2541771fb4d8SLee Schermerhorn 		 */
2542269fbe72SBen Widawsky 		if (node_isset(curnid, pol->nodes))
2543771fb4d8SLee Schermerhorn 			goto out;
2544c33d6c06SMel Gorman 		z = first_zones_zonelist(
2545771fb4d8SLee Schermerhorn 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2546771fb4d8SLee Schermerhorn 				gfp_zone(GFP_HIGHUSER),
2547269fbe72SBen Widawsky 				&pol->nodes);
2548c1093b74SPavel Tatashin 		polnid = zone_to_nid(z->zone);
2549771fb4d8SLee Schermerhorn 		break;
2550771fb4d8SLee Schermerhorn 
2551771fb4d8SLee Schermerhorn 	default:
2552771fb4d8SLee Schermerhorn 		BUG();
2553771fb4d8SLee Schermerhorn 	}
25545606e387SMel Gorman 
25555606e387SMel Gorman 	/* Migrate the page towards the node whose CPU is referencing it */
2556e42c8ff2SMel Gorman 	if (pol->flags & MPOL_F_MORON) {
255790572890SPeter Zijlstra 		polnid = thisnid;
25585606e387SMel Gorman 
255910f39042SRik van Riel 		if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2560de1c9ce6SRik van Riel 			goto out;
2561de1c9ce6SRik van Riel 	}
2562e42c8ff2SMel Gorman 
2563771fb4d8SLee Schermerhorn 	if (curnid != polnid)
2564771fb4d8SLee Schermerhorn 		ret = polnid;
2565771fb4d8SLee Schermerhorn out:
2566771fb4d8SLee Schermerhorn 	mpol_cond_put(pol);
2567771fb4d8SLee Schermerhorn 
2568771fb4d8SLee Schermerhorn 	return ret;
2569771fb4d8SLee Schermerhorn }
2570771fb4d8SLee Schermerhorn 
2571c11600e4SDavid Rientjes /*
2572c11600e4SDavid Rientjes  * Drop the (possibly final) reference to task->mempolicy.  It needs to be
2573c11600e4SDavid Rientjes  * dropped after task->mempolicy is set to NULL so that any allocation done as
2574c11600e4SDavid Rientjes  * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2575c11600e4SDavid Rientjes  * policy.
2576c11600e4SDavid Rientjes  */
2577c11600e4SDavid Rientjes void mpol_put_task_policy(struct task_struct *task)
2578c11600e4SDavid Rientjes {
2579c11600e4SDavid Rientjes 	struct mempolicy *pol;
2580c11600e4SDavid Rientjes 
2581c11600e4SDavid Rientjes 	task_lock(task);
2582c11600e4SDavid Rientjes 	pol = task->mempolicy;
2583c11600e4SDavid Rientjes 	task->mempolicy = NULL;
2584c11600e4SDavid Rientjes 	task_unlock(task);
2585c11600e4SDavid Rientjes 	mpol_put(pol);
2586c11600e4SDavid Rientjes }
2587c11600e4SDavid Rientjes 
25881da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n)
25891da177e4SLinus Torvalds {
2590140d5a49SPaul Mundt 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
25911da177e4SLinus Torvalds 	rb_erase(&n->nd, &sp->root);
259263f74ca2SKOSAKI Motohiro 	sp_free(n);
25931da177e4SLinus Torvalds }
25941da177e4SLinus Torvalds 
259542288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start,
259642288fe3SMel Gorman 			unsigned long end, struct mempolicy *pol)
259742288fe3SMel Gorman {
259842288fe3SMel Gorman 	node->start = start;
259942288fe3SMel Gorman 	node->end = end;
260042288fe3SMel Gorman 	node->policy = pol;
260142288fe3SMel Gorman }
260242288fe3SMel Gorman 
2603dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2604dbcb0f19SAdrian Bunk 				struct mempolicy *pol)
26051da177e4SLinus Torvalds {
2606869833f2SKOSAKI Motohiro 	struct sp_node *n;
2607869833f2SKOSAKI Motohiro 	struct mempolicy *newpol;
26081da177e4SLinus Torvalds 
2609869833f2SKOSAKI Motohiro 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
26101da177e4SLinus Torvalds 	if (!n)
26111da177e4SLinus Torvalds 		return NULL;
2612869833f2SKOSAKI Motohiro 
2613869833f2SKOSAKI Motohiro 	newpol = mpol_dup(pol);
2614869833f2SKOSAKI Motohiro 	if (IS_ERR(newpol)) {
2615869833f2SKOSAKI Motohiro 		kmem_cache_free(sn_cache, n);
2616869833f2SKOSAKI Motohiro 		return NULL;
2617869833f2SKOSAKI Motohiro 	}
2618869833f2SKOSAKI Motohiro 	newpol->flags |= MPOL_F_SHARED;
261942288fe3SMel Gorman 	sp_node_init(n, start, end, newpol);
2620869833f2SKOSAKI Motohiro 
26211da177e4SLinus Torvalds 	return n;
26221da177e4SLinus Torvalds }
26231da177e4SLinus Torvalds 
26241da177e4SLinus Torvalds /* Replace a policy range. */
26251da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
26261da177e4SLinus Torvalds 				 unsigned long end, struct sp_node *new)
26271da177e4SLinus Torvalds {
2628b22d127aSMel Gorman 	struct sp_node *n;
262942288fe3SMel Gorman 	struct sp_node *n_new = NULL;
263042288fe3SMel Gorman 	struct mempolicy *mpol_new = NULL;
2631b22d127aSMel Gorman 	int ret = 0;
26321da177e4SLinus Torvalds 
263342288fe3SMel Gorman restart:
26344a8c7bb5SNathan Zimmer 	write_lock(&sp->lock);
26351da177e4SLinus Torvalds 	n = sp_lookup(sp, start, end);
26361da177e4SLinus Torvalds 	/* Take care of old policies in the same range. */
26371da177e4SLinus Torvalds 	while (n && n->start < end) {
26381da177e4SLinus Torvalds 		struct rb_node *next = rb_next(&n->nd);
26391da177e4SLinus Torvalds 		if (n->start >= start) {
26401da177e4SLinus Torvalds 			if (n->end <= end)
26411da177e4SLinus Torvalds 				sp_delete(sp, n);
26421da177e4SLinus Torvalds 			else
26431da177e4SLinus Torvalds 				n->start = end;
26441da177e4SLinus Torvalds 		} else {
26451da177e4SLinus Torvalds 			/* Old policy spanning whole new range. */
26461da177e4SLinus Torvalds 			if (n->end > end) {
264742288fe3SMel Gorman 				if (!n_new)
264842288fe3SMel Gorman 					goto alloc_new;
264942288fe3SMel Gorman 
265042288fe3SMel Gorman 				*mpol_new = *n->policy;
265142288fe3SMel Gorman 				atomic_set(&mpol_new->refcnt, 1);
26527880639cSKOSAKI Motohiro 				sp_node_init(n_new, end, n->end, mpol_new);
26531da177e4SLinus Torvalds 				n->end = start;
26545ca39575SHillf Danton 				sp_insert(sp, n_new);
265542288fe3SMel Gorman 				n_new = NULL;
265642288fe3SMel Gorman 				mpol_new = NULL;
26571da177e4SLinus Torvalds 				break;
26581da177e4SLinus Torvalds 			} else
26591da177e4SLinus Torvalds 				n->end = start;
26601da177e4SLinus Torvalds 		}
26611da177e4SLinus Torvalds 		if (!next)
26621da177e4SLinus Torvalds 			break;
26631da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
26641da177e4SLinus Torvalds 	}
26651da177e4SLinus Torvalds 	if (new)
26661da177e4SLinus Torvalds 		sp_insert(sp, new);
26674a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
266842288fe3SMel Gorman 	ret = 0;
266942288fe3SMel Gorman 
267042288fe3SMel Gorman err_out:
267142288fe3SMel Gorman 	if (mpol_new)
267242288fe3SMel Gorman 		mpol_put(mpol_new);
267342288fe3SMel Gorman 	if (n_new)
267442288fe3SMel Gorman 		kmem_cache_free(sn_cache, n_new);
267542288fe3SMel Gorman 
2676b22d127aSMel Gorman 	return ret;
267742288fe3SMel Gorman 
267842288fe3SMel Gorman alloc_new:
26794a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
268042288fe3SMel Gorman 	ret = -ENOMEM;
268142288fe3SMel Gorman 	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
268242288fe3SMel Gorman 	if (!n_new)
268342288fe3SMel Gorman 		goto err_out;
268442288fe3SMel Gorman 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
268542288fe3SMel Gorman 	if (!mpol_new)
268642288fe3SMel Gorman 		goto err_out;
268742288fe3SMel Gorman 	goto restart;
26881da177e4SLinus Torvalds }
26891da177e4SLinus Torvalds 
269071fe804bSLee Schermerhorn /**
269171fe804bSLee Schermerhorn  * mpol_shared_policy_init - initialize shared policy for inode
269271fe804bSLee Schermerhorn  * @sp: pointer to inode shared policy
269371fe804bSLee Schermerhorn  * @mpol:  struct mempolicy to install
269471fe804bSLee Schermerhorn  *
269571fe804bSLee Schermerhorn  * Install non-NULL @mpol in inode's shared policy rb-tree.
269671fe804bSLee Schermerhorn  * On entry, the current task has a reference on a non-NULL @mpol.
269771fe804bSLee Schermerhorn  * This must be released on exit.
26984bfc4495SKAMEZAWA Hiroyuki  * This is called at get_inode() calls and we can use GFP_KERNEL.
269971fe804bSLee Schermerhorn  */
270071fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
27017339ff83SRobin Holt {
270258568d2aSMiao Xie 	int ret;
270358568d2aSMiao Xie 
270471fe804bSLee Schermerhorn 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
27054a8c7bb5SNathan Zimmer 	rwlock_init(&sp->lock);
27067339ff83SRobin Holt 
270771fe804bSLee Schermerhorn 	if (mpol) {
27087339ff83SRobin Holt 		struct vm_area_struct pvma;
270971fe804bSLee Schermerhorn 		struct mempolicy *new;
27104bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
27117339ff83SRobin Holt 
27124bfc4495SKAMEZAWA Hiroyuki 		if (!scratch)
27135c0c1654SLee Schermerhorn 			goto put_mpol;
271471fe804bSLee Schermerhorn 		/* contextualize the tmpfs mount point mempolicy */
271571fe804bSLee Schermerhorn 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
271615d77835SLee Schermerhorn 		if (IS_ERR(new))
27170cae3457SDan Carpenter 			goto free_scratch; /* no valid nodemask intersection */
271858568d2aSMiao Xie 
271958568d2aSMiao Xie 		task_lock(current);
27204bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
272158568d2aSMiao Xie 		task_unlock(current);
272215d77835SLee Schermerhorn 		if (ret)
27235c0c1654SLee Schermerhorn 			goto put_new;
272471fe804bSLee Schermerhorn 
272571fe804bSLee Schermerhorn 		/* Create pseudo-vma that contains just the policy */
27262c4541e2SKirill A. Shutemov 		vma_init(&pvma, NULL);
272771fe804bSLee Schermerhorn 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
272871fe804bSLee Schermerhorn 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
272915d77835SLee Schermerhorn 
27305c0c1654SLee Schermerhorn put_new:
273171fe804bSLee Schermerhorn 		mpol_put(new);			/* drop initial ref */
27320cae3457SDan Carpenter free_scratch:
27334bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
27345c0c1654SLee Schermerhorn put_mpol:
27355c0c1654SLee Schermerhorn 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
27367339ff83SRobin Holt 	}
27377339ff83SRobin Holt }
27387339ff83SRobin Holt 
27391da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info,
27401da177e4SLinus Torvalds 			struct vm_area_struct *vma, struct mempolicy *npol)
27411da177e4SLinus Torvalds {
27421da177e4SLinus Torvalds 	int err;
27431da177e4SLinus Torvalds 	struct sp_node *new = NULL;
27441da177e4SLinus Torvalds 	unsigned long sz = vma_pages(vma);
27451da177e4SLinus Torvalds 
2746028fec41SDavid Rientjes 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
27471da177e4SLinus Torvalds 		 vma->vm_pgoff,
274845c4745aSLee Schermerhorn 		 sz, npol ? npol->mode : -1,
2749028fec41SDavid Rientjes 		 npol ? npol->flags : -1,
2750269fbe72SBen Widawsky 		 npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE);
27511da177e4SLinus Torvalds 
27521da177e4SLinus Torvalds 	if (npol) {
27531da177e4SLinus Torvalds 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
27541da177e4SLinus Torvalds 		if (!new)
27551da177e4SLinus Torvalds 			return -ENOMEM;
27561da177e4SLinus Torvalds 	}
27571da177e4SLinus Torvalds 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
27581da177e4SLinus Torvalds 	if (err && new)
275963f74ca2SKOSAKI Motohiro 		sp_free(new);
27601da177e4SLinus Torvalds 	return err;
27611da177e4SLinus Torvalds }
27621da177e4SLinus Torvalds 
27631da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */
27641da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p)
27651da177e4SLinus Torvalds {
27661da177e4SLinus Torvalds 	struct sp_node *n;
27671da177e4SLinus Torvalds 	struct rb_node *next;
27681da177e4SLinus Torvalds 
27691da177e4SLinus Torvalds 	if (!p->root.rb_node)
27701da177e4SLinus Torvalds 		return;
27714a8c7bb5SNathan Zimmer 	write_lock(&p->lock);
27721da177e4SLinus Torvalds 	next = rb_first(&p->root);
27731da177e4SLinus Torvalds 	while (next) {
27741da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
27751da177e4SLinus Torvalds 		next = rb_next(&n->nd);
277663f74ca2SKOSAKI Motohiro 		sp_delete(p, n);
27771da177e4SLinus Torvalds 	}
27784a8c7bb5SNathan Zimmer 	write_unlock(&p->lock);
27791da177e4SLinus Torvalds }
27801da177e4SLinus Torvalds 
27811a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING
2782c297663cSMel Gorman static int __initdata numabalancing_override;
27831a687c2eSMel Gorman 
27841a687c2eSMel Gorman static void __init check_numabalancing_enable(void)
27851a687c2eSMel Gorman {
27861a687c2eSMel Gorman 	bool numabalancing_default = false;
27871a687c2eSMel Gorman 
27881a687c2eSMel Gorman 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
27891a687c2eSMel Gorman 		numabalancing_default = true;
27901a687c2eSMel Gorman 
2791c297663cSMel Gorman 	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2792c297663cSMel Gorman 	if (numabalancing_override)
2793c297663cSMel Gorman 		set_numabalancing_state(numabalancing_override == 1);
2794c297663cSMel Gorman 
2795b0dc2b9bSMel Gorman 	if (num_online_nodes() > 1 && !numabalancing_override) {
2796756a025fSJoe Perches 		pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2797c297663cSMel Gorman 			numabalancing_default ? "Enabling" : "Disabling");
27981a687c2eSMel Gorman 		set_numabalancing_state(numabalancing_default);
27991a687c2eSMel Gorman 	}
28001a687c2eSMel Gorman }
28011a687c2eSMel Gorman 
28021a687c2eSMel Gorman static int __init setup_numabalancing(char *str)
28031a687c2eSMel Gorman {
28041a687c2eSMel Gorman 	int ret = 0;
28051a687c2eSMel Gorman 	if (!str)
28061a687c2eSMel Gorman 		goto out;
28071a687c2eSMel Gorman 
28081a687c2eSMel Gorman 	if (!strcmp(str, "enable")) {
2809c297663cSMel Gorman 		numabalancing_override = 1;
28101a687c2eSMel Gorman 		ret = 1;
28111a687c2eSMel Gorman 	} else if (!strcmp(str, "disable")) {
2812c297663cSMel Gorman 		numabalancing_override = -1;
28131a687c2eSMel Gorman 		ret = 1;
28141a687c2eSMel Gorman 	}
28151a687c2eSMel Gorman out:
28161a687c2eSMel Gorman 	if (!ret)
28174a404beaSAndrew Morton 		pr_warn("Unable to parse numa_balancing=\n");
28181a687c2eSMel Gorman 
28191a687c2eSMel Gorman 	return ret;
28201a687c2eSMel Gorman }
28211a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing);
28221a687c2eSMel Gorman #else
28231a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void)
28241a687c2eSMel Gorman {
28251a687c2eSMel Gorman }
28261a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */
28271a687c2eSMel Gorman 
28281da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */
28291da177e4SLinus Torvalds void __init numa_policy_init(void)
28301da177e4SLinus Torvalds {
2831b71636e2SPaul Mundt 	nodemask_t interleave_nodes;
2832b71636e2SPaul Mundt 	unsigned long largest = 0;
2833b71636e2SPaul Mundt 	int nid, prefer = 0;
2834b71636e2SPaul Mundt 
28351da177e4SLinus Torvalds 	policy_cache = kmem_cache_create("numa_policy",
28361da177e4SLinus Torvalds 					 sizeof(struct mempolicy),
283720c2df83SPaul Mundt 					 0, SLAB_PANIC, NULL);
28381da177e4SLinus Torvalds 
28391da177e4SLinus Torvalds 	sn_cache = kmem_cache_create("shared_policy_node",
28401da177e4SLinus Torvalds 				     sizeof(struct sp_node),
284120c2df83SPaul Mundt 				     0, SLAB_PANIC, NULL);
28421da177e4SLinus Torvalds 
28435606e387SMel Gorman 	for_each_node(nid) {
28445606e387SMel Gorman 		preferred_node_policy[nid] = (struct mempolicy) {
28455606e387SMel Gorman 			.refcnt = ATOMIC_INIT(1),
28465606e387SMel Gorman 			.mode = MPOL_PREFERRED,
28475606e387SMel Gorman 			.flags = MPOL_F_MOF | MPOL_F_MORON,
2848269fbe72SBen Widawsky 			.nodes = nodemask_of_node(nid),
28495606e387SMel Gorman 		};
28505606e387SMel Gorman 	}
28515606e387SMel Gorman 
2852b71636e2SPaul Mundt 	/*
2853b71636e2SPaul Mundt 	 * Set interleaving policy for system init. Interleaving is only
2854b71636e2SPaul Mundt 	 * enabled across suitably sized nodes (default is >= 16MB), or
2855b71636e2SPaul Mundt 	 * fall back to the largest node if they're all smaller.
2856b71636e2SPaul Mundt 	 */
2857b71636e2SPaul Mundt 	nodes_clear(interleave_nodes);
285801f13bd6SLai Jiangshan 	for_each_node_state(nid, N_MEMORY) {
2859b71636e2SPaul Mundt 		unsigned long total_pages = node_present_pages(nid);
28601da177e4SLinus Torvalds 
2861b71636e2SPaul Mundt 		/* Preserve the largest node */
2862b71636e2SPaul Mundt 		if (largest < total_pages) {
2863b71636e2SPaul Mundt 			largest = total_pages;
2864b71636e2SPaul Mundt 			prefer = nid;
2865b71636e2SPaul Mundt 		}
2866b71636e2SPaul Mundt 
2867b71636e2SPaul Mundt 		/* Interleave this node? */
2868b71636e2SPaul Mundt 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2869b71636e2SPaul Mundt 			node_set(nid, interleave_nodes);
2870b71636e2SPaul Mundt 	}
2871b71636e2SPaul Mundt 
2872b71636e2SPaul Mundt 	/* All too small, use the largest */
2873b71636e2SPaul Mundt 	if (unlikely(nodes_empty(interleave_nodes)))
2874b71636e2SPaul Mundt 		node_set(prefer, interleave_nodes);
2875b71636e2SPaul Mundt 
2876028fec41SDavid Rientjes 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2877b1de0d13SMitchel Humpherys 		pr_err("%s: interleaving failed\n", __func__);
28781a687c2eSMel Gorman 
28791a687c2eSMel Gorman 	check_numabalancing_enable();
28801da177e4SLinus Torvalds }
28811da177e4SLinus Torvalds 
28828bccd85fSChristoph Lameter /* Reset policy of current process to default */
28831da177e4SLinus Torvalds void numa_default_policy(void)
28841da177e4SLinus Torvalds {
2885028fec41SDavid Rientjes 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
28861da177e4SLinus Torvalds }
288768860ec1SPaul Jackson 
28884225399aSPaul Jackson /*
2889095f1fc4SLee Schermerhorn  * Parse and format mempolicy from/to strings
2890095f1fc4SLee Schermerhorn  */
2891095f1fc4SLee Schermerhorn 
2892345ace9cSLee Schermerhorn static const char * const policy_modes[] =
2893345ace9cSLee Schermerhorn {
2894345ace9cSLee Schermerhorn 	[MPOL_DEFAULT]    = "default",
2895345ace9cSLee Schermerhorn 	[MPOL_PREFERRED]  = "prefer",
2896345ace9cSLee Schermerhorn 	[MPOL_BIND]       = "bind",
2897345ace9cSLee Schermerhorn 	[MPOL_INTERLEAVE] = "interleave",
2898d3a71033SLee Schermerhorn 	[MPOL_LOCAL]      = "local",
2899b27abaccSDave Hansen 	[MPOL_PREFERRED_MANY]  = "prefer (many)",
2900345ace9cSLee Schermerhorn };
29011a75a6c8SChristoph Lameter 
2902095f1fc4SLee Schermerhorn 
2903095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS
2904095f1fc4SLee Schermerhorn /**
2905f2a07f40SHugh Dickins  * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2906095f1fc4SLee Schermerhorn  * @str:  string containing mempolicy to parse
290771fe804bSLee Schermerhorn  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2908095f1fc4SLee Schermerhorn  *
2909095f1fc4SLee Schermerhorn  * Format of input:
2910095f1fc4SLee Schermerhorn  *	<mode>[=<flags>][:<nodelist>]
2911095f1fc4SLee Schermerhorn  *
291271fe804bSLee Schermerhorn  * On success, returns 0, else 1
2913095f1fc4SLee Schermerhorn  */
2914a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol)
2915095f1fc4SLee Schermerhorn {
291671fe804bSLee Schermerhorn 	struct mempolicy *new = NULL;
2917f2a07f40SHugh Dickins 	unsigned short mode_flags;
291871fe804bSLee Schermerhorn 	nodemask_t nodes;
2919095f1fc4SLee Schermerhorn 	char *nodelist = strchr(str, ':');
2920095f1fc4SLee Schermerhorn 	char *flags = strchr(str, '=');
2921dedf2c73Szhong jiang 	int err = 1, mode;
2922095f1fc4SLee Schermerhorn 
2923c7a91bc7SDan Carpenter 	if (flags)
2924c7a91bc7SDan Carpenter 		*flags++ = '\0';	/* terminate mode string */
2925c7a91bc7SDan Carpenter 
2926095f1fc4SLee Schermerhorn 	if (nodelist) {
2927095f1fc4SLee Schermerhorn 		/* NUL-terminate mode or flags string */
2928095f1fc4SLee Schermerhorn 		*nodelist++ = '\0';
292971fe804bSLee Schermerhorn 		if (nodelist_parse(nodelist, nodes))
2930095f1fc4SLee Schermerhorn 			goto out;
293101f13bd6SLai Jiangshan 		if (!nodes_subset(nodes, node_states[N_MEMORY]))
2932095f1fc4SLee Schermerhorn 			goto out;
293371fe804bSLee Schermerhorn 	} else
293471fe804bSLee Schermerhorn 		nodes_clear(nodes);
293571fe804bSLee Schermerhorn 
2936dedf2c73Szhong jiang 	mode = match_string(policy_modes, MPOL_MAX, str);
2937dedf2c73Szhong jiang 	if (mode < 0)
2938095f1fc4SLee Schermerhorn 		goto out;
2939095f1fc4SLee Schermerhorn 
294071fe804bSLee Schermerhorn 	switch (mode) {
2941095f1fc4SLee Schermerhorn 	case MPOL_PREFERRED:
294271fe804bSLee Schermerhorn 		/*
2943aa9f7d51SRandy Dunlap 		 * Insist on a nodelist of one node only, although later
2944aa9f7d51SRandy Dunlap 		 * we use first_node(nodes) to grab a single node, so here
2945aa9f7d51SRandy Dunlap 		 * nodelist (or nodes) cannot be empty.
294671fe804bSLee Schermerhorn 		 */
2947095f1fc4SLee Schermerhorn 		if (nodelist) {
2948095f1fc4SLee Schermerhorn 			char *rest = nodelist;
2949095f1fc4SLee Schermerhorn 			while (isdigit(*rest))
2950095f1fc4SLee Schermerhorn 				rest++;
2951926f2ae0SKOSAKI Motohiro 			if (*rest)
2952926f2ae0SKOSAKI Motohiro 				goto out;
2953aa9f7d51SRandy Dunlap 			if (nodes_empty(nodes))
2954aa9f7d51SRandy Dunlap 				goto out;
2955095f1fc4SLee Schermerhorn 		}
2956095f1fc4SLee Schermerhorn 		break;
2957095f1fc4SLee Schermerhorn 	case MPOL_INTERLEAVE:
2958095f1fc4SLee Schermerhorn 		/*
2959095f1fc4SLee Schermerhorn 		 * Default to online nodes with memory if no nodelist
2960095f1fc4SLee Schermerhorn 		 */
2961095f1fc4SLee Schermerhorn 		if (!nodelist)
296201f13bd6SLai Jiangshan 			nodes = node_states[N_MEMORY];
29633f226aa1SLee Schermerhorn 		break;
296471fe804bSLee Schermerhorn 	case MPOL_LOCAL:
29653f226aa1SLee Schermerhorn 		/*
296671fe804bSLee Schermerhorn 		 * Don't allow a nodelist;  mpol_new() checks flags
29673f226aa1SLee Schermerhorn 		 */
296871fe804bSLee Schermerhorn 		if (nodelist)
29693f226aa1SLee Schermerhorn 			goto out;
29703f226aa1SLee Schermerhorn 		break;
2971413b43deSRavikiran G Thirumalai 	case MPOL_DEFAULT:
2972413b43deSRavikiran G Thirumalai 		/*
2973413b43deSRavikiran G Thirumalai 		 * Insist on a empty nodelist
2974413b43deSRavikiran G Thirumalai 		 */
2975413b43deSRavikiran G Thirumalai 		if (!nodelist)
2976413b43deSRavikiran G Thirumalai 			err = 0;
2977413b43deSRavikiran G Thirumalai 		goto out;
2978b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
2979d69b2e63SKOSAKI Motohiro 	case MPOL_BIND:
298071fe804bSLee Schermerhorn 		/*
2981d69b2e63SKOSAKI Motohiro 		 * Insist on a nodelist
298271fe804bSLee Schermerhorn 		 */
2983d69b2e63SKOSAKI Motohiro 		if (!nodelist)
2984d69b2e63SKOSAKI Motohiro 			goto out;
2985095f1fc4SLee Schermerhorn 	}
2986095f1fc4SLee Schermerhorn 
298771fe804bSLee Schermerhorn 	mode_flags = 0;
2988095f1fc4SLee Schermerhorn 	if (flags) {
2989095f1fc4SLee Schermerhorn 		/*
2990095f1fc4SLee Schermerhorn 		 * Currently, we only support two mutually exclusive
2991095f1fc4SLee Schermerhorn 		 * mode flags.
2992095f1fc4SLee Schermerhorn 		 */
2993095f1fc4SLee Schermerhorn 		if (!strcmp(flags, "static"))
299471fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_STATIC_NODES;
2995095f1fc4SLee Schermerhorn 		else if (!strcmp(flags, "relative"))
299671fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_RELATIVE_NODES;
2997095f1fc4SLee Schermerhorn 		else
2998926f2ae0SKOSAKI Motohiro 			goto out;
2999095f1fc4SLee Schermerhorn 	}
300071fe804bSLee Schermerhorn 
300171fe804bSLee Schermerhorn 	new = mpol_new(mode, mode_flags, &nodes);
300271fe804bSLee Schermerhorn 	if (IS_ERR(new))
3003926f2ae0SKOSAKI Motohiro 		goto out;
3004926f2ae0SKOSAKI Motohiro 
3005f2a07f40SHugh Dickins 	/*
3006f2a07f40SHugh Dickins 	 * Save nodes for mpol_to_str() to show the tmpfs mount options
3007f2a07f40SHugh Dickins 	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
3008f2a07f40SHugh Dickins 	 */
3009269fbe72SBen Widawsky 	if (mode != MPOL_PREFERRED) {
3010269fbe72SBen Widawsky 		new->nodes = nodes;
3011269fbe72SBen Widawsky 	} else if (nodelist) {
3012269fbe72SBen Widawsky 		nodes_clear(new->nodes);
3013269fbe72SBen Widawsky 		node_set(first_node(nodes), new->nodes);
3014269fbe72SBen Widawsky 	} else {
30157858d7bcSFeng Tang 		new->mode = MPOL_LOCAL;
3016269fbe72SBen Widawsky 	}
3017f2a07f40SHugh Dickins 
3018f2a07f40SHugh Dickins 	/*
3019f2a07f40SHugh Dickins 	 * Save nodes for contextualization: this will be used to "clone"
3020f2a07f40SHugh Dickins 	 * the mempolicy in a specific context [cpuset] at a later time.
3021f2a07f40SHugh Dickins 	 */
3022e17f74afSLee Schermerhorn 	new->w.user_nodemask = nodes;
3023f2a07f40SHugh Dickins 
3024926f2ae0SKOSAKI Motohiro 	err = 0;
302571fe804bSLee Schermerhorn 
3026095f1fc4SLee Schermerhorn out:
3027095f1fc4SLee Schermerhorn 	/* Restore string for error message */
3028095f1fc4SLee Schermerhorn 	if (nodelist)
3029095f1fc4SLee Schermerhorn 		*--nodelist = ':';
3030095f1fc4SLee Schermerhorn 	if (flags)
3031095f1fc4SLee Schermerhorn 		*--flags = '=';
303271fe804bSLee Schermerhorn 	if (!err)
303371fe804bSLee Schermerhorn 		*mpol = new;
3034095f1fc4SLee Schermerhorn 	return err;
3035095f1fc4SLee Schermerhorn }
3036095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */
3037095f1fc4SLee Schermerhorn 
303871fe804bSLee Schermerhorn /**
303971fe804bSLee Schermerhorn  * mpol_to_str - format a mempolicy structure for printing
304071fe804bSLee Schermerhorn  * @buffer:  to contain formatted mempolicy string
304171fe804bSLee Schermerhorn  * @maxlen:  length of @buffer
304271fe804bSLee Schermerhorn  * @pol:  pointer to mempolicy to be formatted
304371fe804bSLee Schermerhorn  *
3044948927eeSDavid Rientjes  * Convert @pol into a string.  If @buffer is too short, truncate the string.
3045948927eeSDavid Rientjes  * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
3046948927eeSDavid Rientjes  * longest flag, "relative", and to display at least a few node ids.
30471a75a6c8SChristoph Lameter  */
3048948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
30491a75a6c8SChristoph Lameter {
30501a75a6c8SChristoph Lameter 	char *p = buffer;
3051948927eeSDavid Rientjes 	nodemask_t nodes = NODE_MASK_NONE;
3052948927eeSDavid Rientjes 	unsigned short mode = MPOL_DEFAULT;
3053948927eeSDavid Rientjes 	unsigned short flags = 0;
30541a75a6c8SChristoph Lameter 
30558790c71aSDavid Rientjes 	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
3056bea904d5SLee Schermerhorn 		mode = pol->mode;
3057948927eeSDavid Rientjes 		flags = pol->flags;
3058948927eeSDavid Rientjes 	}
3059bea904d5SLee Schermerhorn 
30601a75a6c8SChristoph Lameter 	switch (mode) {
30611a75a6c8SChristoph Lameter 	case MPOL_DEFAULT:
30627858d7bcSFeng Tang 	case MPOL_LOCAL:
30631a75a6c8SChristoph Lameter 		break;
30641a75a6c8SChristoph Lameter 	case MPOL_PREFERRED:
3065b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
30661a75a6c8SChristoph Lameter 	case MPOL_BIND:
30671a75a6c8SChristoph Lameter 	case MPOL_INTERLEAVE:
3068269fbe72SBen Widawsky 		nodes = pol->nodes;
30691a75a6c8SChristoph Lameter 		break;
30701a75a6c8SChristoph Lameter 	default:
3071948927eeSDavid Rientjes 		WARN_ON_ONCE(1);
3072948927eeSDavid Rientjes 		snprintf(p, maxlen, "unknown");
3073948927eeSDavid Rientjes 		return;
30741a75a6c8SChristoph Lameter 	}
30751a75a6c8SChristoph Lameter 
3076b7a9f420SDavid Rientjes 	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
30771a75a6c8SChristoph Lameter 
3078fc36b8d3SLee Schermerhorn 	if (flags & MPOL_MODE_FLAGS) {
3079948927eeSDavid Rientjes 		p += snprintf(p, buffer + maxlen - p, "=");
3080f5b087b5SDavid Rientjes 
30812291990aSLee Schermerhorn 		/*
30822291990aSLee Schermerhorn 		 * Currently, the only defined flags are mutually exclusive
30832291990aSLee Schermerhorn 		 */
3084f5b087b5SDavid Rientjes 		if (flags & MPOL_F_STATIC_NODES)
30852291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "static");
30862291990aSLee Schermerhorn 		else if (flags & MPOL_F_RELATIVE_NODES)
30872291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "relative");
3088f5b087b5SDavid Rientjes 	}
3089f5b087b5SDavid Rientjes 
30909e763e0fSTejun Heo 	if (!nodes_empty(nodes))
30919e763e0fSTejun Heo 		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
30929e763e0fSTejun Heo 			       nodemask_pr_args(&nodes));
30931a75a6c8SChristoph Lameter }
309420b51af1SHuang Ying 
309520b51af1SHuang Ying bool numa_demotion_enabled = false;
309620b51af1SHuang Ying 
309720b51af1SHuang Ying #ifdef CONFIG_SYSFS
309820b51af1SHuang Ying static ssize_t numa_demotion_enabled_show(struct kobject *kobj,
309920b51af1SHuang Ying 					  struct kobj_attribute *attr, char *buf)
310020b51af1SHuang Ying {
310120b51af1SHuang Ying 	return sysfs_emit(buf, "%s\n",
310220b51af1SHuang Ying 			  numa_demotion_enabled? "true" : "false");
310320b51af1SHuang Ying }
310420b51af1SHuang Ying 
310520b51af1SHuang Ying static ssize_t numa_demotion_enabled_store(struct kobject *kobj,
310620b51af1SHuang Ying 					   struct kobj_attribute *attr,
310720b51af1SHuang Ying 					   const char *buf, size_t count)
310820b51af1SHuang Ying {
310920b51af1SHuang Ying 	if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1))
311020b51af1SHuang Ying 		numa_demotion_enabled = true;
311120b51af1SHuang Ying 	else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1))
311220b51af1SHuang Ying 		numa_demotion_enabled = false;
311320b51af1SHuang Ying 	else
311420b51af1SHuang Ying 		return -EINVAL;
311520b51af1SHuang Ying 
311620b51af1SHuang Ying 	return count;
311720b51af1SHuang Ying }
311820b51af1SHuang Ying 
311920b51af1SHuang Ying static struct kobj_attribute numa_demotion_enabled_attr =
312020b51af1SHuang Ying 	__ATTR(demotion_enabled, 0644, numa_demotion_enabled_show,
312120b51af1SHuang Ying 	       numa_demotion_enabled_store);
312220b51af1SHuang Ying 
312320b51af1SHuang Ying static struct attribute *numa_attrs[] = {
312420b51af1SHuang Ying 	&numa_demotion_enabled_attr.attr,
312520b51af1SHuang Ying 	NULL,
312620b51af1SHuang Ying };
312720b51af1SHuang Ying 
312820b51af1SHuang Ying static const struct attribute_group numa_attr_group = {
312920b51af1SHuang Ying 	.attrs = numa_attrs,
313020b51af1SHuang Ying };
313120b51af1SHuang Ying 
313220b51af1SHuang Ying static int __init numa_init_sysfs(void)
313320b51af1SHuang Ying {
313420b51af1SHuang Ying 	int err;
313520b51af1SHuang Ying 	struct kobject *numa_kobj;
313620b51af1SHuang Ying 
313720b51af1SHuang Ying 	numa_kobj = kobject_create_and_add("numa", mm_kobj);
313820b51af1SHuang Ying 	if (!numa_kobj) {
313920b51af1SHuang Ying 		pr_err("failed to create numa kobject\n");
314020b51af1SHuang Ying 		return -ENOMEM;
314120b51af1SHuang Ying 	}
314220b51af1SHuang Ying 	err = sysfs_create_group(numa_kobj, &numa_attr_group);
314320b51af1SHuang Ying 	if (err) {
314420b51af1SHuang Ying 		pr_err("failed to register numa group\n");
314520b51af1SHuang Ying 		goto delete_obj;
314620b51af1SHuang Ying 	}
314720b51af1SHuang Ying 	return 0;
314820b51af1SHuang Ying 
314920b51af1SHuang Ying delete_obj:
315020b51af1SHuang Ying 	kobject_put(numa_kobj);
315120b51af1SHuang Ying 	return err;
315220b51af1SHuang Ying }
315320b51af1SHuang Ying subsys_initcall(numa_init_sysfs);
315420b51af1SHuang Ying #endif
3155