xref: /openbmc/linux/mm/mempolicy.c (revision 278002edb19bce2c628fafb0af936e77000f3a5b)
146aeb7e6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * Simple NUMA memory policy for the Linux kernel.
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
68bccd85fSChristoph Lameter  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * NUMA policy allows the user to give hints in which node(s) memory should
91da177e4SLinus Torvalds  * be allocated.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Support four policies per VMA and per process:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * The VMA policy has priority over the process policy for a page fault.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * interleave     Allocate memory interleaved over a set of nodes,
161da177e4SLinus Torvalds  *                with normal fallback if it fails.
171da177e4SLinus Torvalds  *                For VMA based allocations this interleaves based on the
181da177e4SLinus Torvalds  *                offset into the backing object or offset into the mapping
191da177e4SLinus Torvalds  *                for anonymous memory. For process policy an process counter
201da177e4SLinus Torvalds  *                is used.
218bccd85fSChristoph Lameter  *
221da177e4SLinus Torvalds  * bind           Only allocate memory on a specific set of nodes,
231da177e4SLinus Torvalds  *                no fallback.
248bccd85fSChristoph Lameter  *                FIXME: memory is allocated starting with the first node
258bccd85fSChristoph Lameter  *                to the last. It would be better if bind would truly restrict
268bccd85fSChristoph Lameter  *                the allocation to memory nodes instead
278bccd85fSChristoph Lameter  *
281da177e4SLinus Torvalds  * preferred       Try a specific node first before normal fallback.
2900ef2d2fSDavid Rientjes  *                As a special case NUMA_NO_NODE here means do the allocation
301da177e4SLinus Torvalds  *                on the local CPU. This is normally identical to default,
311da177e4SLinus Torvalds  *                but useful to set in a VMA when you have a non default
321da177e4SLinus Torvalds  *                process policy.
338bccd85fSChristoph Lameter  *
34b27abaccSDave Hansen  * preferred many Try a set of nodes first before normal fallback. This is
35b27abaccSDave Hansen  *                similar to preferred without the special case.
36b27abaccSDave Hansen  *
371da177e4SLinus Torvalds  * default        Allocate on the local node first, or when on a VMA
381da177e4SLinus Torvalds  *                use the process policy. This is what Linux always did
391da177e4SLinus Torvalds  *		  in a NUMA aware kernel and still does by, ahem, default.
401da177e4SLinus Torvalds  *
411da177e4SLinus Torvalds  * The process policy is applied for most non interrupt memory allocations
421da177e4SLinus Torvalds  * in that process' context. Interrupts ignore the policies and always
431da177e4SLinus Torvalds  * try to allocate on the local CPU. The VMA policy is only applied for memory
441da177e4SLinus Torvalds  * allocations for a VMA in the VM.
451da177e4SLinus Torvalds  *
461da177e4SLinus Torvalds  * Currently there are a few corner cases in swapping where the policy
471da177e4SLinus Torvalds  * is not applied, but the majority should be handled. When process policy
481da177e4SLinus Torvalds  * is used it is not remembered over swap outs/swap ins.
491da177e4SLinus Torvalds  *
501da177e4SLinus Torvalds  * Only the highest zone in the zone hierarchy gets policied. Allocations
511da177e4SLinus Torvalds  * requesting a lower zone just use default policy. This implies that
521da177e4SLinus Torvalds  * on systems with highmem kernel lowmem allocation don't get policied.
531da177e4SLinus Torvalds  * Same with GFP_DMA allocations.
541da177e4SLinus Torvalds  *
551da177e4SLinus Torvalds  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
561da177e4SLinus Torvalds  * all users and remembered even when nobody has memory mapped.
571da177e4SLinus Torvalds  */
581da177e4SLinus Torvalds 
591da177e4SLinus Torvalds /* Notebook:
601da177e4SLinus Torvalds    fix mmap readahead to honour policy and enable policy for any page cache
611da177e4SLinus Torvalds    object
621da177e4SLinus Torvalds    statistics for bigpages
631da177e4SLinus Torvalds    global policy for page cache? currently it uses process policy. Requires
641da177e4SLinus Torvalds    first item above.
651da177e4SLinus Torvalds    handle mremap for shared memory (currently ignored for the policy)
661da177e4SLinus Torvalds    grows down?
671da177e4SLinus Torvalds    make bind policy root only? It can trigger oom much faster and the
681da177e4SLinus Torvalds    kernel is not always grateful with that.
691da177e4SLinus Torvalds */
701da177e4SLinus Torvalds 
71b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
72b1de0d13SMitchel Humpherys 
731da177e4SLinus Torvalds #include <linux/mempolicy.h>
74a520110eSChristoph Hellwig #include <linux/pagewalk.h>
751da177e4SLinus Torvalds #include <linux/highmem.h>
761da177e4SLinus Torvalds #include <linux/hugetlb.h>
771da177e4SLinus Torvalds #include <linux/kernel.h>
781da177e4SLinus Torvalds #include <linux/sched.h>
796e84f315SIngo Molnar #include <linux/sched/mm.h>
806a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h>
81f719ff9bSIngo Molnar #include <linux/sched/task.h>
821da177e4SLinus Torvalds #include <linux/nodemask.h>
831da177e4SLinus Torvalds #include <linux/cpuset.h>
841da177e4SLinus Torvalds #include <linux/slab.h>
851da177e4SLinus Torvalds #include <linux/string.h>
86b95f1b31SPaul Gortmaker #include <linux/export.h>
87b488893aSPavel Emelyanov #include <linux/nsproxy.h>
881da177e4SLinus Torvalds #include <linux/interrupt.h>
891da177e4SLinus Torvalds #include <linux/init.h>
901da177e4SLinus Torvalds #include <linux/compat.h>
9131367466SOtto Ebeling #include <linux/ptrace.h>
92dc9aa5b9SChristoph Lameter #include <linux/swap.h>
931a75a6c8SChristoph Lameter #include <linux/seq_file.h>
941a75a6c8SChristoph Lameter #include <linux/proc_fs.h>
95b20a3503SChristoph Lameter #include <linux/migrate.h>
9662b61f61SHugh Dickins #include <linux/ksm.h>
9795a402c3SChristoph Lameter #include <linux/rmap.h>
9886c3a764SDavid Quigley #include <linux/security.h>
99dbcb0f19SAdrian Bunk #include <linux/syscalls.h>
100095f1fc4SLee Schermerhorn #include <linux/ctype.h>
1016d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h>
102b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h>
103b1de0d13SMitchel Humpherys #include <linux/printk.h>
104c8633798SNaoya Horiguchi #include <linux/swapops.h>
105dc9aa5b9SChristoph Lameter 
1061da177e4SLinus Torvalds #include <asm/tlbflush.h>
1074a18419fSNadav Amit #include <asm/tlb.h>
1087c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
1091da177e4SLinus Torvalds 
11062695a84SNick Piggin #include "internal.h"
11162695a84SNick Piggin 
11238e35860SChristoph Lameter /* Internal flags */
113dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
11438e35860SChristoph Lameter #define MPOL_MF_INVERT       (MPOL_MF_INTERNAL << 1)	/* Invert check for nodemask */
115cc424890SHugh Dickins #define MPOL_MF_WRLOCK       (MPOL_MF_INTERNAL << 2)	/* Write-lock walked vmas */
116dc9aa5b9SChristoph Lameter 
117fcc234f8SPekka Enberg static struct kmem_cache *policy_cache;
118fcc234f8SPekka Enberg static struct kmem_cache *sn_cache;
1191da177e4SLinus Torvalds 
1201da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not
1211da177e4SLinus Torvalds    policied. */
1226267276fSChristoph Lameter enum zone_type policy_zone = 0;
1231da177e4SLinus Torvalds 
124bea904d5SLee Schermerhorn /*
125bea904d5SLee Schermerhorn  * run-time system-wide default policy => local allocation
126bea904d5SLee Schermerhorn  */
127e754d79dSH Hartley Sweeten static struct mempolicy default_policy = {
1281da177e4SLinus Torvalds 	.refcnt = ATOMIC_INIT(1), /* never free it */
1297858d7bcSFeng Tang 	.mode = MPOL_LOCAL,
1301da177e4SLinus Torvalds };
1311da177e4SLinus Torvalds 
1325606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES];
1335606e387SMel Gorman 
134b2ca916cSDan Williams /**
13552cacc5cSYury Norov  * numa_nearest_node - Find nearest node by state
136f6e92f40SKrzysztof Kozlowski  * @node: Node id to start the search
13752cacc5cSYury Norov  * @state: State to filter the search
138b2ca916cSDan Williams  *
13952cacc5cSYury Norov  * Lookup the closest node by distance if @nid is not in state.
140dad5b023SRandy Dunlap  *
14152cacc5cSYury Norov  * Return: this @node if it is in state, otherwise the closest node by distance
142b2ca916cSDan Williams  */
numa_nearest_node(int node,unsigned int state)14352cacc5cSYury Norov int numa_nearest_node(int node, unsigned int state)
144b2ca916cSDan Williams {
1454fcbe96eSDan Williams 	int min_dist = INT_MAX, dist, n, min_node;
146b2ca916cSDan Williams 
14752cacc5cSYury Norov 	if (state >= NR_NODE_STATES)
14852cacc5cSYury Norov 		return -EINVAL;
14952cacc5cSYury Norov 
15052cacc5cSYury Norov 	if (node == NUMA_NO_NODE || node_state(node, state))
1514fcbe96eSDan Williams 		return node;
152b2ca916cSDan Williams 
153b2ca916cSDan Williams 	min_node = node;
15452cacc5cSYury Norov 	for_each_node_state(n, state) {
155b2ca916cSDan Williams 		dist = node_distance(node, n);
156b2ca916cSDan Williams 		if (dist < min_dist) {
157b2ca916cSDan Williams 			min_dist = dist;
158b2ca916cSDan Williams 			min_node = n;
159b2ca916cSDan Williams 		}
160b2ca916cSDan Williams 	}
161b2ca916cSDan Williams 
162b2ca916cSDan Williams 	return min_node;
163b2ca916cSDan Williams }
16452cacc5cSYury Norov EXPORT_SYMBOL_GPL(numa_nearest_node);
165b2ca916cSDan Williams 
get_task_policy(struct task_struct * p)16674d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p)
1675606e387SMel Gorman {
1685606e387SMel Gorman 	struct mempolicy *pol = p->mempolicy;
169f15ca78eSOleg Nesterov 	int node;
1705606e387SMel Gorman 
171f15ca78eSOleg Nesterov 	if (pol)
172f15ca78eSOleg Nesterov 		return pol;
1735606e387SMel Gorman 
174f15ca78eSOleg Nesterov 	node = numa_node_id();
1751da6f0e1SJianguo Wu 	if (node != NUMA_NO_NODE) {
1761da6f0e1SJianguo Wu 		pol = &preferred_node_policy[node];
177f15ca78eSOleg Nesterov 		/* preferred_node_policy is not initialised early in boot */
178f15ca78eSOleg Nesterov 		if (pol->mode)
179f15ca78eSOleg Nesterov 			return pol;
1801da6f0e1SJianguo Wu 	}
1815606e387SMel Gorman 
182f15ca78eSOleg Nesterov 	return &default_policy;
1835606e387SMel Gorman }
1845606e387SMel Gorman 
18537012946SDavid Rientjes static const struct mempolicy_operations {
18637012946SDavid Rientjes 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
187213980c0SVlastimil Babka 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
18837012946SDavid Rientjes } mpol_ops[MPOL_MAX];
18937012946SDavid Rientjes 
mpol_store_user_nodemask(const struct mempolicy * pol)190f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
191f5b087b5SDavid Rientjes {
1926d556294SBob Liu 	return pol->flags & MPOL_MODE_FLAGS;
1934c50bc01SDavid Rientjes }
1944c50bc01SDavid Rientjes 
mpol_relative_nodemask(nodemask_t * ret,const nodemask_t * orig,const nodemask_t * rel)1954c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
1964c50bc01SDavid Rientjes 				   const nodemask_t *rel)
1974c50bc01SDavid Rientjes {
1984c50bc01SDavid Rientjes 	nodemask_t tmp;
1994c50bc01SDavid Rientjes 	nodes_fold(tmp, *orig, nodes_weight(*rel));
2004c50bc01SDavid Rientjes 	nodes_onto(*ret, tmp, *rel);
201f5b087b5SDavid Rientjes }
202f5b087b5SDavid Rientjes 
mpol_new_nodemask(struct mempolicy * pol,const nodemask_t * nodes)203be897d48SFeng Tang static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
20437012946SDavid Rientjes {
20537012946SDavid Rientjes 	if (nodes_empty(*nodes))
20637012946SDavid Rientjes 		return -EINVAL;
207269fbe72SBen Widawsky 	pol->nodes = *nodes;
20837012946SDavid Rientjes 	return 0;
20937012946SDavid Rientjes }
21037012946SDavid Rientjes 
mpol_new_preferred(struct mempolicy * pol,const nodemask_t * nodes)21137012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
21237012946SDavid Rientjes {
2137858d7bcSFeng Tang 	if (nodes_empty(*nodes))
2147858d7bcSFeng Tang 		return -EINVAL;
215269fbe72SBen Widawsky 
216269fbe72SBen Widawsky 	nodes_clear(pol->nodes);
217269fbe72SBen Widawsky 	node_set(first_node(*nodes), pol->nodes);
21837012946SDavid Rientjes 	return 0;
21937012946SDavid Rientjes }
22037012946SDavid Rientjes 
22158568d2aSMiao Xie /*
22258568d2aSMiao Xie  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
22358568d2aSMiao Xie  * any, for the new policy.  mpol_new() has already validated the nodes
2247858d7bcSFeng Tang  * parameter with respect to the policy mode and flags.
22558568d2aSMiao Xie  *
22658568d2aSMiao Xie  * Must be called holding task's alloc_lock to protect task's mems_allowed
227c1e8d7c6SMichel Lespinasse  * and mempolicy.  May also be called holding the mmap_lock for write.
22858568d2aSMiao Xie  */
mpol_set_nodemask(struct mempolicy * pol,const nodemask_t * nodes,struct nodemask_scratch * nsc)2294bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol,
2304bfc4495SKAMEZAWA Hiroyuki 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
23158568d2aSMiao Xie {
23258568d2aSMiao Xie 	int ret;
23358568d2aSMiao Xie 
2347858d7bcSFeng Tang 	/*
2357858d7bcSFeng Tang 	 * Default (pol==NULL) resp. local memory policies are not a
2367858d7bcSFeng Tang 	 * subject of any remapping. They also do not need any special
2377858d7bcSFeng Tang 	 * constructor.
2387858d7bcSFeng Tang 	 */
2397858d7bcSFeng Tang 	if (!pol || pol->mode == MPOL_LOCAL)
24058568d2aSMiao Xie 		return 0;
2417858d7bcSFeng Tang 
24201f13bd6SLai Jiangshan 	/* Check N_MEMORY */
2434bfc4495SKAMEZAWA Hiroyuki 	nodes_and(nsc->mask1,
24401f13bd6SLai Jiangshan 		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
24558568d2aSMiao Xie 
24658568d2aSMiao Xie 	VM_BUG_ON(!nodes);
2477858d7bcSFeng Tang 
24858568d2aSMiao Xie 	if (pol->flags & MPOL_F_RELATIVE_NODES)
2494bfc4495SKAMEZAWA Hiroyuki 		mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
25058568d2aSMiao Xie 	else
2514bfc4495SKAMEZAWA Hiroyuki 		nodes_and(nsc->mask2, *nodes, nsc->mask1);
2524bfc4495SKAMEZAWA Hiroyuki 
25358568d2aSMiao Xie 	if (mpol_store_user_nodemask(pol))
25458568d2aSMiao Xie 		pol->w.user_nodemask = *nodes;
25558568d2aSMiao Xie 	else
2567858d7bcSFeng Tang 		pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed;
25758568d2aSMiao Xie 
2584bfc4495SKAMEZAWA Hiroyuki 	ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
25958568d2aSMiao Xie 	return ret;
26058568d2aSMiao Xie }
26158568d2aSMiao Xie 
26258568d2aSMiao Xie /*
26358568d2aSMiao Xie  * This function just creates a new policy, does some check and simple
26458568d2aSMiao Xie  * initialization. You must invoke mpol_set_nodemask() to set nodes.
26558568d2aSMiao Xie  */
mpol_new(unsigned short mode,unsigned short flags,nodemask_t * nodes)266028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
267028fec41SDavid Rientjes 				  nodemask_t *nodes)
2681da177e4SLinus Torvalds {
2691da177e4SLinus Torvalds 	struct mempolicy *policy;
2701da177e4SLinus Torvalds 
271028fec41SDavid Rientjes 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
27200ef2d2fSDavid Rientjes 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
273140d5a49SPaul Mundt 
2743e1f0645SDavid Rientjes 	if (mode == MPOL_DEFAULT) {
2753e1f0645SDavid Rientjes 		if (nodes && !nodes_empty(*nodes))
27637012946SDavid Rientjes 			return ERR_PTR(-EINVAL);
277d3a71033SLee Schermerhorn 		return NULL;
27837012946SDavid Rientjes 	}
2793e1f0645SDavid Rientjes 	VM_BUG_ON(!nodes);
2803e1f0645SDavid Rientjes 
2813e1f0645SDavid Rientjes 	/*
2823e1f0645SDavid Rientjes 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
2833e1f0645SDavid Rientjes 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
2843e1f0645SDavid Rientjes 	 * All other modes require a valid pointer to a non-empty nodemask.
2853e1f0645SDavid Rientjes 	 */
2863e1f0645SDavid Rientjes 	if (mode == MPOL_PREFERRED) {
2873e1f0645SDavid Rientjes 		if (nodes_empty(*nodes)) {
2883e1f0645SDavid Rientjes 			if (((flags & MPOL_F_STATIC_NODES) ||
2893e1f0645SDavid Rientjes 			     (flags & MPOL_F_RELATIVE_NODES)))
2903e1f0645SDavid Rientjes 				return ERR_PTR(-EINVAL);
2917858d7bcSFeng Tang 
2927858d7bcSFeng Tang 			mode = MPOL_LOCAL;
2933e1f0645SDavid Rientjes 		}
294479e2802SPeter Zijlstra 	} else if (mode == MPOL_LOCAL) {
2958d303e44SPiotr Kwapulinski 		if (!nodes_empty(*nodes) ||
2968d303e44SPiotr Kwapulinski 		    (flags & MPOL_F_STATIC_NODES) ||
2978d303e44SPiotr Kwapulinski 		    (flags & MPOL_F_RELATIVE_NODES))
298479e2802SPeter Zijlstra 			return ERR_PTR(-EINVAL);
2993e1f0645SDavid Rientjes 	} else if (nodes_empty(*nodes))
3003e1f0645SDavid Rientjes 		return ERR_PTR(-EINVAL);
3011da177e4SLinus Torvalds 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
3021da177e4SLinus Torvalds 	if (!policy)
3031da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
3041da177e4SLinus Torvalds 	atomic_set(&policy->refcnt, 1);
30545c4745aSLee Schermerhorn 	policy->mode = mode;
30637012946SDavid Rientjes 	policy->flags = flags;
307c6018b4bSAneesh Kumar K.V 	policy->home_node = NUMA_NO_NODE;
3083e1f0645SDavid Rientjes 
30937012946SDavid Rientjes 	return policy;
31037012946SDavid Rientjes }
31137012946SDavid Rientjes 
31252cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */
__mpol_put(struct mempolicy * p)31352cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p)
31452cd3b07SLee Schermerhorn {
31552cd3b07SLee Schermerhorn 	if (!atomic_dec_and_test(&p->refcnt))
31652cd3b07SLee Schermerhorn 		return;
31752cd3b07SLee Schermerhorn 	kmem_cache_free(policy_cache, p);
31852cd3b07SLee Schermerhorn }
31952cd3b07SLee Schermerhorn 
mpol_rebind_default(struct mempolicy * pol,const nodemask_t * nodes)320213980c0SVlastimil Babka static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
32137012946SDavid Rientjes {
32237012946SDavid Rientjes }
32337012946SDavid Rientjes 
mpol_rebind_nodemask(struct mempolicy * pol,const nodemask_t * nodes)324213980c0SVlastimil Babka static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
3251d0d2680SDavid Rientjes {
3261d0d2680SDavid Rientjes 	nodemask_t tmp;
3271d0d2680SDavid Rientjes 
32837012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES)
32937012946SDavid Rientjes 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
33037012946SDavid Rientjes 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
33137012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3321d0d2680SDavid Rientjes 	else {
333269fbe72SBen Widawsky 		nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed,
334213980c0SVlastimil Babka 								*nodes);
33529b190faSzhong jiang 		pol->w.cpuset_mems_allowed = *nodes;
3361d0d2680SDavid Rientjes 	}
33737012946SDavid Rientjes 
338708c1bbcSMiao Xie 	if (nodes_empty(tmp))
339708c1bbcSMiao Xie 		tmp = *nodes;
340708c1bbcSMiao Xie 
341269fbe72SBen Widawsky 	pol->nodes = tmp;
34237012946SDavid Rientjes }
34337012946SDavid Rientjes 
mpol_rebind_preferred(struct mempolicy * pol,const nodemask_t * nodes)34437012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol,
345213980c0SVlastimil Babka 						const nodemask_t *nodes)
34637012946SDavid Rientjes {
34737012946SDavid Rientjes 	pol->w.cpuset_mems_allowed = *nodes;
3481d0d2680SDavid Rientjes }
34937012946SDavid Rientjes 
350708c1bbcSMiao Xie /*
351708c1bbcSMiao Xie  * mpol_rebind_policy - Migrate a policy to a different set of nodes
352708c1bbcSMiao Xie  *
353c1e8d7c6SMichel Lespinasse  * Per-vma policies are protected by mmap_lock. Allocations using per-task
354213980c0SVlastimil Babka  * policies are protected by task->mems_allowed_seq to prevent a premature
355213980c0SVlastimil Babka  * OOM/allocation failure due to parallel nodemask modification.
356708c1bbcSMiao Xie  */
mpol_rebind_policy(struct mempolicy * pol,const nodemask_t * newmask)357213980c0SVlastimil Babka static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
35837012946SDavid Rientjes {
359018160adSWang Cheng 	if (!pol || pol->mode == MPOL_LOCAL)
36037012946SDavid Rientjes 		return;
3617858d7bcSFeng Tang 	if (!mpol_store_user_nodemask(pol) &&
36237012946SDavid Rientjes 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
36337012946SDavid Rientjes 		return;
364708c1bbcSMiao Xie 
365213980c0SVlastimil Babka 	mpol_ops[pol->mode].rebind(pol, newmask);
3661d0d2680SDavid Rientjes }
3671d0d2680SDavid Rientjes 
3681d0d2680SDavid Rientjes /*
3691d0d2680SDavid Rientjes  * Wrapper for mpol_rebind_policy() that just requires task
3701d0d2680SDavid Rientjes  * pointer, and updates task mempolicy.
37158568d2aSMiao Xie  *
37258568d2aSMiao Xie  * Called with task's alloc_lock held.
3731d0d2680SDavid Rientjes  */
3741d0d2680SDavid Rientjes 
mpol_rebind_task(struct task_struct * tsk,const nodemask_t * new)375213980c0SVlastimil Babka void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
3761d0d2680SDavid Rientjes {
377213980c0SVlastimil Babka 	mpol_rebind_policy(tsk->mempolicy, new);
3781d0d2680SDavid Rientjes }
3791d0d2680SDavid Rientjes 
3801d0d2680SDavid Rientjes /*
3811d0d2680SDavid Rientjes  * Rebind each vma in mm to new nodemask.
3821d0d2680SDavid Rientjes  *
383c1e8d7c6SMichel Lespinasse  * Call holding a reference to mm.  Takes mm->mmap_lock during call.
3841d0d2680SDavid Rientjes  */
3851d0d2680SDavid Rientjes 
mpol_rebind_mm(struct mm_struct * mm,nodemask_t * new)3861d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
3871d0d2680SDavid Rientjes {
3881d0d2680SDavid Rientjes 	struct vm_area_struct *vma;
38966850be5SLiam R. Howlett 	VMA_ITERATOR(vmi, mm, 0);
3901d0d2680SDavid Rientjes 
391d8ed45c5SMichel Lespinasse 	mmap_write_lock(mm);
3926c21e066SJann Horn 	for_each_vma(vmi, vma) {
3936c21e066SJann Horn 		vma_start_write(vma);
394213980c0SVlastimil Babka 		mpol_rebind_policy(vma->vm_policy, new);
3956c21e066SJann Horn 	}
396d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
3971d0d2680SDavid Rientjes }
3981d0d2680SDavid Rientjes 
39937012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
40037012946SDavid Rientjes 	[MPOL_DEFAULT] = {
40137012946SDavid Rientjes 		.rebind = mpol_rebind_default,
40237012946SDavid Rientjes 	},
40337012946SDavid Rientjes 	[MPOL_INTERLEAVE] = {
404be897d48SFeng Tang 		.create = mpol_new_nodemask,
40537012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
40637012946SDavid Rientjes 	},
40737012946SDavid Rientjes 	[MPOL_PREFERRED] = {
40837012946SDavid Rientjes 		.create = mpol_new_preferred,
40937012946SDavid Rientjes 		.rebind = mpol_rebind_preferred,
41037012946SDavid Rientjes 	},
41137012946SDavid Rientjes 	[MPOL_BIND] = {
412be897d48SFeng Tang 		.create = mpol_new_nodemask,
41337012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
41437012946SDavid Rientjes 	},
4157858d7bcSFeng Tang 	[MPOL_LOCAL] = {
4167858d7bcSFeng Tang 		.rebind = mpol_rebind_default,
4177858d7bcSFeng Tang 	},
418b27abaccSDave Hansen 	[MPOL_PREFERRED_MANY] = {
419be897d48SFeng Tang 		.create = mpol_new_nodemask,
420b27abaccSDave Hansen 		.rebind = mpol_rebind_preferred,
421b27abaccSDave Hansen 	},
42237012946SDavid Rientjes };
42337012946SDavid Rientjes 
424cc424890SHugh Dickins static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist,
425fc301289SChristoph Lameter 				unsigned long flags);
4261a75a6c8SChristoph Lameter 
strictly_unmovable(unsigned long flags)427cc424890SHugh Dickins static bool strictly_unmovable(unsigned long flags)
428cc424890SHugh Dickins {
429cc424890SHugh Dickins 	/*
430cc424890SHugh Dickins 	 * STRICT without MOVE flags lets do_mbind() fail immediately with -EIO
431cc424890SHugh Dickins 	 * if any misplaced page is found.
432cc424890SHugh Dickins 	 */
433cc424890SHugh Dickins 	return (flags & (MPOL_MF_STRICT | MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ==
434cc424890SHugh Dickins 			 MPOL_MF_STRICT;
435cc424890SHugh Dickins }
436cc424890SHugh Dickins 
4376f4576e3SNaoya Horiguchi struct queue_pages {
4386f4576e3SNaoya Horiguchi 	struct list_head *pagelist;
4396f4576e3SNaoya Horiguchi 	unsigned long flags;
4406f4576e3SNaoya Horiguchi 	nodemask_t *nmask;
441f18da660SLi Xinhai 	unsigned long start;
442f18da660SLi Xinhai 	unsigned long end;
443f18da660SLi Xinhai 	struct vm_area_struct *first;
444cc424890SHugh Dickins 	struct folio *large;		/* note last large folio encountered */
445cc424890SHugh Dickins 	long nr_failed;			/* could not be isolated at this time */
4466f4576e3SNaoya Horiguchi };
4476f4576e3SNaoya Horiguchi 
44898094945SNaoya Horiguchi /*
449d451b89dSVishal Moola (Oracle)  * Check if the folio's nid is in qp->nmask.
45088aaa2a1SNaoya Horiguchi  *
45188aaa2a1SNaoya Horiguchi  * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
45288aaa2a1SNaoya Horiguchi  * in the invert of qp->nmask.
45388aaa2a1SNaoya Horiguchi  */
queue_folio_required(struct folio * folio,struct queue_pages * qp)454d451b89dSVishal Moola (Oracle) static inline bool queue_folio_required(struct folio *folio,
45588aaa2a1SNaoya Horiguchi 					struct queue_pages *qp)
45688aaa2a1SNaoya Horiguchi {
457d451b89dSVishal Moola (Oracle) 	int nid = folio_nid(folio);
45888aaa2a1SNaoya Horiguchi 	unsigned long flags = qp->flags;
45988aaa2a1SNaoya Horiguchi 
46088aaa2a1SNaoya Horiguchi 	return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
46188aaa2a1SNaoya Horiguchi }
46288aaa2a1SNaoya Horiguchi 
queue_folios_pmd(pmd_t * pmd,struct mm_walk * walk)463cc424890SHugh Dickins static void queue_folios_pmd(pmd_t *pmd, struct mm_walk *walk)
464c8633798SNaoya Horiguchi {
465de1f5055SVishal Moola (Oracle) 	struct folio *folio;
466c8633798SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
467c8633798SNaoya Horiguchi 
468c8633798SNaoya Horiguchi 	if (unlikely(is_pmd_migration_entry(*pmd))) {
469cc424890SHugh Dickins 		qp->nr_failed++;
470cc424890SHugh Dickins 		return;
471c8633798SNaoya Horiguchi 	}
472de1f5055SVishal Moola (Oracle) 	folio = pfn_folio(pmd_pfn(*pmd));
473de1f5055SVishal Moola (Oracle) 	if (is_huge_zero_page(&folio->page)) {
474e5947d23SYang Shi 		walk->action = ACTION_CONTINUE;
475cc424890SHugh Dickins 		return;
476c8633798SNaoya Horiguchi 	}
477d451b89dSVishal Moola (Oracle) 	if (!queue_folio_required(folio, qp))
478cc424890SHugh Dickins 		return;
479cc424890SHugh Dickins 	if (!(qp->flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ||
480cc424890SHugh Dickins 	    !vma_migratable(walk->vma) ||
481cc424890SHugh Dickins 	    !migrate_folio_add(folio, qp->pagelist, qp->flags))
482cc424890SHugh Dickins 		qp->nr_failed++;
483c8633798SNaoya Horiguchi }
484c8633798SNaoya Horiguchi 
48588aaa2a1SNaoya Horiguchi /*
486cc424890SHugh Dickins  * Scan through folios, checking if they satisfy the required conditions,
487cc424890SHugh Dickins  * moving them from LRU to local pagelist for migration if they do (or not).
488d8835445SYang Shi  *
489cc424890SHugh Dickins  * queue_folios_pte_range() has two possible return values:
490cc424890SHugh Dickins  * 0 - continue walking to scan for more, even if an existing folio on the
491cc424890SHugh Dickins  *     wrong node could not be isolated and queued for migration.
492cc424890SHugh Dickins  * -EIO - only MPOL_MF_STRICT was specified, without MPOL_MF_MOVE or ..._ALL,
493cc424890SHugh Dickins  *        and an existing folio was on a node that does not follow the policy.
49498094945SNaoya Horiguchi  */
queue_folios_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)4953dae02bbSVishal Moola (Oracle) static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
4966f4576e3SNaoya Horiguchi 			unsigned long end, struct mm_walk *walk)
4971da177e4SLinus Torvalds {
4986f4576e3SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
4993dae02bbSVishal Moola (Oracle) 	struct folio *folio;
5006f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
5016f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
5023f088420SShijie Luo 	pte_t *pte, *mapped_pte;
503c33c7948SRyan Roberts 	pte_t ptent;
504705e87c0SHugh Dickins 	spinlock_t *ptl;
505941150a3SHugh Dickins 
506c8633798SNaoya Horiguchi 	ptl = pmd_trans_huge_lock(pmd, vma);
507cc424890SHugh Dickins 	if (ptl) {
508cc424890SHugh Dickins 		queue_folios_pmd(pmd, walk);
509cc424890SHugh Dickins 		spin_unlock(ptl);
510cc424890SHugh Dickins 		goto out;
511cc424890SHugh Dickins 	}
51291612e0dSHugh Dickins 
5133f088420SShijie Luo 	mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
5147780d040SHugh Dickins 	if (!pte) {
5157780d040SHugh Dickins 		walk->action = ACTION_AGAIN;
5167780d040SHugh Dickins 		return 0;
5177780d040SHugh Dickins 	}
5186f4576e3SNaoya Horiguchi 	for (; addr != end; pte++, addr += PAGE_SIZE) {
519c33c7948SRyan Roberts 		ptent = ptep_get(pte);
520cc424890SHugh Dickins 		if (pte_none(ptent))
52191612e0dSHugh Dickins 			continue;
522cc424890SHugh Dickins 		if (!pte_present(ptent)) {
523cc424890SHugh Dickins 			if (is_migration_entry(pte_to_swp_entry(ptent)))
524cc424890SHugh Dickins 				qp->nr_failed++;
525cc424890SHugh Dickins 			continue;
526cc424890SHugh Dickins 		}
527c33c7948SRyan Roberts 		folio = vm_normal_folio(vma, addr, ptent);
5283dae02bbSVishal Moola (Oracle) 		if (!folio || folio_is_zone_device(folio))
52991612e0dSHugh Dickins 			continue;
530053837fcSNick Piggin 		/*
5313dae02bbSVishal Moola (Oracle) 		 * vm_normal_folio() filters out zero pages, but there might
5323dae02bbSVishal Moola (Oracle) 		 * still be reserved folios to skip, perhaps in a VDSO.
533053837fcSNick Piggin 		 */
5343dae02bbSVishal Moola (Oracle) 		if (folio_test_reserved(folio))
535f4598c8bSChristoph Lameter 			continue;
536d451b89dSVishal Moola (Oracle) 		if (!queue_folio_required(folio, qp))
53738e35860SChristoph Lameter 			continue;
538cc424890SHugh Dickins 		if (folio_test_large(folio)) {
53924526268SYang Shi 			/*
540cc424890SHugh Dickins 			 * A large folio can only be isolated from LRU once,
541cc424890SHugh Dickins 			 * but may be mapped by many PTEs (and Copy-On-Write may
542cc424890SHugh Dickins 			 * intersperse PTEs of other, order 0, folios).  This is
543cc424890SHugh Dickins 			 * a common case, so don't mistake it for failure (but
544cc424890SHugh Dickins 			 * there can be other cases of multi-mapped pages which
545cc424890SHugh Dickins 			 * this quick check does not help to filter out - and a
546cc424890SHugh Dickins 			 * search of the pagelist might grow to be prohibitive).
547cc424890SHugh Dickins 			 *
548cc424890SHugh Dickins 			 * migrate_pages(&pagelist) returns nr_failed folios, so
549cc424890SHugh Dickins 			 * check "large" now so that queue_pages_range() returns
550cc424890SHugh Dickins 			 * a comparable nr_failed folios.  This does imply that
551cc424890SHugh Dickins 			 * if folio could not be isolated for some racy reason
552cc424890SHugh Dickins 			 * at its first PTE, later PTEs will not give it another
553cc424890SHugh Dickins 			 * chance of isolation; but keeps the accounting simple.
55424526268SYang Shi 			 */
555cc424890SHugh Dickins 			if (folio == qp->large)
556cc424890SHugh Dickins 				continue;
557cc424890SHugh Dickins 			qp->large = folio;
558cc424890SHugh Dickins 		}
559cc424890SHugh Dickins 		if (!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ||
560cc424890SHugh Dickins 		    !vma_migratable(vma) ||
561cc424890SHugh Dickins 		    !migrate_folio_add(folio, qp->pagelist, flags)) {
562cc424890SHugh Dickins 			qp->nr_failed++;
563cc424890SHugh Dickins 			if (strictly_unmovable(flags))
564a7f40cfeSYang Shi 				break;
5656f4576e3SNaoya Horiguchi 		}
566cc424890SHugh Dickins 	}
5673f088420SShijie Luo 	pte_unmap_unlock(mapped_pte, ptl);
5686f4576e3SNaoya Horiguchi 	cond_resched();
569cc424890SHugh Dickins out:
570cc424890SHugh Dickins 	if (qp->nr_failed && strictly_unmovable(flags))
571cc424890SHugh Dickins 		return -EIO;
572cc424890SHugh Dickins 	return 0;
57391612e0dSHugh Dickins }
57491612e0dSHugh Dickins 
queue_folios_hugetlb(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)5750a2c1e81SVishal Moola (Oracle) static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask,
5766f4576e3SNaoya Horiguchi 			       unsigned long addr, unsigned long end,
5776f4576e3SNaoya Horiguchi 			       struct mm_walk *walk)
578e2d8cf40SNaoya Horiguchi {
579e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE
5806f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
581cc424890SHugh Dickins 	unsigned long flags = qp->flags;
5820a2c1e81SVishal Moola (Oracle) 	struct folio *folio;
583cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
584d4c54919SNaoya Horiguchi 	pte_t entry;
585e2d8cf40SNaoya Horiguchi 
5866f4576e3SNaoya Horiguchi 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
5876f4576e3SNaoya Horiguchi 	entry = huge_ptep_get(pte);
588cc424890SHugh Dickins 	if (!pte_present(entry)) {
589cc424890SHugh Dickins 		if (unlikely(is_hugetlb_entry_migration(entry)))
590cc424890SHugh Dickins 			qp->nr_failed++;
591d4c54919SNaoya Horiguchi 		goto unlock;
592cc424890SHugh Dickins 	}
5930a2c1e81SVishal Moola (Oracle) 	folio = pfn_folio(pte_pfn(entry));
594d451b89dSVishal Moola (Oracle) 	if (!queue_folio_required(folio, qp))
595e2d8cf40SNaoya Horiguchi 		goto unlock;
596cc424890SHugh Dickins 	if (!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ||
597cc424890SHugh Dickins 	    !vma_migratable(walk->vma)) {
598cc424890SHugh Dickins 		qp->nr_failed++;
599dcf17635SLi Xinhai 		goto unlock;
600dcf17635SLi Xinhai 	}
601dcf17635SLi Xinhai 	/*
602cc424890SHugh Dickins 	 * Unless MPOL_MF_MOVE_ALL, we try to avoid migrating a shared folio.
603cc424890SHugh Dickins 	 * Choosing not to migrate a shared folio is not counted as a failure.
6040a2c1e81SVishal Moola (Oracle) 	 *
6050a2c1e81SVishal Moola (Oracle) 	 * To check if the folio is shared, ideally we want to make sure
6060a2c1e81SVishal Moola (Oracle) 	 * every page is mapped to the same process. Doing that is very
607cc424890SHugh Dickins 	 * expensive, so check the estimated sharers of the folio instead.
6080a2c1e81SVishal Moola (Oracle) 	 */
609cc424890SHugh Dickins 	if ((flags & MPOL_MF_MOVE_ALL) ||
610cc424890SHugh Dickins 	    (folio_estimated_sharers(folio) == 1 && !hugetlb_pmd_shared(pte)))
611cc424890SHugh Dickins 		if (!isolate_hugetlb(folio, qp->pagelist))
612cc424890SHugh Dickins 			qp->nr_failed++;
613e2d8cf40SNaoya Horiguchi unlock:
614cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
615cc424890SHugh Dickins 	if (qp->nr_failed && strictly_unmovable(flags))
616cc424890SHugh Dickins 		return -EIO;
617e2d8cf40SNaoya Horiguchi #endif
618cc424890SHugh Dickins 	return 0;
6191da177e4SLinus Torvalds }
6201da177e4SLinus Torvalds 
6215877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING
622b24f53a0SLee Schermerhorn /*
6234b10e7d5SMel Gorman  * This is used to mark a range of virtual addresses to be inaccessible.
6244b10e7d5SMel Gorman  * These are later cleared by a NUMA hinting fault. Depending on these
6254b10e7d5SMel Gorman  * faults, pages may be migrated for better NUMA placement.
6264b10e7d5SMel Gorman  *
6274b10e7d5SMel Gorman  * This is assuming that NUMA faults are handled using PROT_NONE. If
6284b10e7d5SMel Gorman  * an architecture makes a different choice, it will need further
6294b10e7d5SMel Gorman  * changes to the core.
630b24f53a0SLee Schermerhorn  */
change_prot_numa(struct vm_area_struct * vma,unsigned long addr,unsigned long end)6314b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma,
6324b10e7d5SMel Gorman 			unsigned long addr, unsigned long end)
633b24f53a0SLee Schermerhorn {
6344a18419fSNadav Amit 	struct mmu_gather tlb;
635a79390f5SPeter Xu 	long nr_updated;
636b24f53a0SLee Schermerhorn 
6374a18419fSNadav Amit 	tlb_gather_mmu(&tlb, vma->vm_mm);
6384a18419fSNadav Amit 
6391ef488edSDavid Hildenbrand 	nr_updated = change_protection(&tlb, vma, addr, end, MM_CP_PROT_NUMA);
640d1751118SPeter Xu 	if (nr_updated > 0)
64103c5a6e1SMel Gorman 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
642b24f53a0SLee Schermerhorn 
6434a18419fSNadav Amit 	tlb_finish_mmu(&tlb);
6444a18419fSNadav Amit 
6454b10e7d5SMel Gorman 	return nr_updated;
646b24f53a0SLee Schermerhorn }
647b24f53a0SLee Schermerhorn #else
change_prot_numa(struct vm_area_struct * vma,unsigned long addr,unsigned long end)648b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma,
649b24f53a0SLee Schermerhorn 			unsigned long addr, unsigned long end)
650b24f53a0SLee Schermerhorn {
651b24f53a0SLee Schermerhorn 	return 0;
652b24f53a0SLee Schermerhorn }
6535877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */
654b24f53a0SLee Schermerhorn 
queue_pages_test_walk(unsigned long start,unsigned long end,struct mm_walk * walk)6556f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end,
6566f4576e3SNaoya Horiguchi 				struct mm_walk *walk)
6571da177e4SLinus Torvalds {
65866850be5SLiam R. Howlett 	struct vm_area_struct *next, *vma = walk->vma;
6596f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
6605b952b3cSAndi Kleen 	unsigned long endvma = vma->vm_end;
6616f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
662dc9aa5b9SChristoph Lameter 
663a18b3ac2SLi Xinhai 	/* range check first */
664ce33135cSMiaohe Lin 	VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma);
665f18da660SLi Xinhai 
666f18da660SLi Xinhai 	if (!qp->first) {
667f18da660SLi Xinhai 		qp->first = vma;
668f18da660SLi Xinhai 		if (!(flags & MPOL_MF_DISCONTIG_OK) &&
669f18da660SLi Xinhai 			(qp->start < vma->vm_start))
670f18da660SLi Xinhai 			/* hole at head side of range */
671a18b3ac2SLi Xinhai 			return -EFAULT;
672a18b3ac2SLi Xinhai 	}
67366850be5SLiam R. Howlett 	next = find_vma(vma->vm_mm, vma->vm_end);
674f18da660SLi Xinhai 	if (!(flags & MPOL_MF_DISCONTIG_OK) &&
675f18da660SLi Xinhai 		((vma->vm_end < qp->end) &&
67666850be5SLiam R. Howlett 		(!next || vma->vm_end < next->vm_start)))
677f18da660SLi Xinhai 		/* hole at middle or tail of range */
678f18da660SLi Xinhai 		return -EFAULT;
679a18b3ac2SLi Xinhai 
680a7f40cfeSYang Shi 	/*
681a7f40cfeSYang Shi 	 * Need check MPOL_MF_STRICT to return -EIO if possible
682a7f40cfeSYang Shi 	 * regardless of vma_migratable
683a7f40cfeSYang Shi 	 */
684a7f40cfeSYang Shi 	if (!vma_migratable(vma) &&
685a7f40cfeSYang Shi 	    !(flags & MPOL_MF_STRICT))
68648684a65SNaoya Horiguchi 		return 1;
68748684a65SNaoya Horiguchi 
6885b952b3cSAndi Kleen 	if (endvma > end)
6895b952b3cSAndi Kleen 		endvma = end;
690b24f53a0SLee Schermerhorn 
691b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY) {
6922c0346a3SMel Gorman 		/* Similar to task_numa_work, skip inaccessible VMAs */
6933122e80eSAnshuman Khandual 		if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
6944355c018SLiang Chen 			!(vma->vm_flags & VM_MIXEDMAP))
695b24f53a0SLee Schermerhorn 			change_prot_numa(vma, start, endvma);
6966f4576e3SNaoya Horiguchi 		return 1;
697b24f53a0SLee Schermerhorn 	}
698b24f53a0SLee Schermerhorn 
699cc424890SHugh Dickins 	/*
700cc424890SHugh Dickins 	 * Check page nodes, and queue pages to move, in the current vma.
701cc424890SHugh Dickins 	 * But if no moving, and no strict checking, the scan can be skipped.
702cc424890SHugh Dickins 	 */
703cc424890SHugh Dickins 	if (flags & (MPOL_MF_STRICT | MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
7046f4576e3SNaoya Horiguchi 		return 0;
7056f4576e3SNaoya Horiguchi 	return 1;
7066f4576e3SNaoya Horiguchi }
707b24f53a0SLee Schermerhorn 
7087b86ac33SChristoph Hellwig static const struct mm_walk_ops queue_pages_walk_ops = {
7090a2c1e81SVishal Moola (Oracle) 	.hugetlb_entry		= queue_folios_hugetlb,
7103dae02bbSVishal Moola (Oracle) 	.pmd_entry		= queue_folios_pte_range,
7117b86ac33SChristoph Hellwig 	.test_walk		= queue_pages_test_walk,
71249b06385SSuren Baghdasaryan 	.walk_lock		= PGWALK_RDLOCK,
71349b06385SSuren Baghdasaryan };
71449b06385SSuren Baghdasaryan 
71549b06385SSuren Baghdasaryan static const struct mm_walk_ops queue_pages_lock_vma_walk_ops = {
71649b06385SSuren Baghdasaryan 	.hugetlb_entry		= queue_folios_hugetlb,
71749b06385SSuren Baghdasaryan 	.pmd_entry		= queue_folios_pte_range,
71849b06385SSuren Baghdasaryan 	.test_walk		= queue_pages_test_walk,
71949b06385SSuren Baghdasaryan 	.walk_lock		= PGWALK_WRLOCK,
7207b86ac33SChristoph Hellwig };
7217b86ac33SChristoph Hellwig 
7226f4576e3SNaoya Horiguchi /*
7236f4576e3SNaoya Horiguchi  * Walk through page tables and collect pages to be migrated.
7246f4576e3SNaoya Horiguchi  *
725cc424890SHugh Dickins  * If pages found in a given range are not on the required set of @nodes,
726cc424890SHugh Dickins  * and migration is allowed, they are isolated and queued to @pagelist.
727d8835445SYang Shi  *
728cc424890SHugh Dickins  * queue_pages_range() may return:
729cc424890SHugh Dickins  * 0 - all pages already on the right node, or successfully queued for moving
730cc424890SHugh Dickins  *     (or neither strict checking nor moving requested: only range checking).
731cc424890SHugh Dickins  * >0 - this number of misplaced folios could not be queued for moving
732cc424890SHugh Dickins  *      (a hugetlbfs page or a transparent huge page being counted as 1).
733cc424890SHugh Dickins  * -EIO - a misplaced page found, when MPOL_MF_STRICT specified without MOVEs.
734cc424890SHugh Dickins  * -EFAULT - a hole in the memory range, when MPOL_MF_DISCONTIG_OK unspecified.
7356f4576e3SNaoya Horiguchi  */
736cc424890SHugh Dickins static long
queue_pages_range(struct mm_struct * mm,unsigned long start,unsigned long end,nodemask_t * nodes,unsigned long flags,struct list_head * pagelist)7376f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
7386f4576e3SNaoya Horiguchi 		nodemask_t *nodes, unsigned long flags,
739cc424890SHugh Dickins 		struct list_head *pagelist)
7406f4576e3SNaoya Horiguchi {
741f18da660SLi Xinhai 	int err;
7426f4576e3SNaoya Horiguchi 	struct queue_pages qp = {
7436f4576e3SNaoya Horiguchi 		.pagelist = pagelist,
7446f4576e3SNaoya Horiguchi 		.flags = flags,
7456f4576e3SNaoya Horiguchi 		.nmask = nodes,
746f18da660SLi Xinhai 		.start = start,
747f18da660SLi Xinhai 		.end = end,
748f18da660SLi Xinhai 		.first = NULL,
7496f4576e3SNaoya Horiguchi 	};
750cc424890SHugh Dickins 	const struct mm_walk_ops *ops = (flags & MPOL_MF_WRLOCK) ?
75149b06385SSuren Baghdasaryan 			&queue_pages_lock_vma_walk_ops : &queue_pages_walk_ops;
7526f4576e3SNaoya Horiguchi 
75349b06385SSuren Baghdasaryan 	err = walk_page_range(mm, start, end, ops, &qp);
754f18da660SLi Xinhai 
755f18da660SLi Xinhai 	if (!qp.first)
756f18da660SLi Xinhai 		/* whole range in hole */
757f18da660SLi Xinhai 		err = -EFAULT;
758f18da660SLi Xinhai 
759cc424890SHugh Dickins 	return err ? : qp.nr_failed;
7601da177e4SLinus Torvalds }
7611da177e4SLinus Torvalds 
762869833f2SKOSAKI Motohiro /*
763869833f2SKOSAKI Motohiro  * Apply policy to a single VMA
764c1e8d7c6SMichel Lespinasse  * This must be called with the mmap_lock held for writing.
765869833f2SKOSAKI Motohiro  */
vma_replace_policy(struct vm_area_struct * vma,struct mempolicy * pol)766869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma,
767869833f2SKOSAKI Motohiro 						struct mempolicy *pol)
7688d34694cSKOSAKI Motohiro {
769869833f2SKOSAKI Motohiro 	int err;
770869833f2SKOSAKI Motohiro 	struct mempolicy *old;
771869833f2SKOSAKI Motohiro 	struct mempolicy *new;
7728d34694cSKOSAKI Motohiro 
7736c21e066SJann Horn 	vma_assert_write_locked(vma);
7746c21e066SJann Horn 
7758d34694cSKOSAKI Motohiro 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
7768d34694cSKOSAKI Motohiro 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
7778d34694cSKOSAKI Motohiro 		 vma->vm_ops, vma->vm_file,
7788d34694cSKOSAKI Motohiro 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
7798d34694cSKOSAKI Motohiro 
780869833f2SKOSAKI Motohiro 	new = mpol_dup(pol);
781869833f2SKOSAKI Motohiro 	if (IS_ERR(new))
782869833f2SKOSAKI Motohiro 		return PTR_ERR(new);
783869833f2SKOSAKI Motohiro 
784869833f2SKOSAKI Motohiro 	if (vma->vm_ops && vma->vm_ops->set_policy) {
7858d34694cSKOSAKI Motohiro 		err = vma->vm_ops->set_policy(vma, new);
786869833f2SKOSAKI Motohiro 		if (err)
787869833f2SKOSAKI Motohiro 			goto err_out;
7888d34694cSKOSAKI Motohiro 	}
789869833f2SKOSAKI Motohiro 
790869833f2SKOSAKI Motohiro 	old = vma->vm_policy;
791c1e8d7c6SMichel Lespinasse 	vma->vm_policy = new; /* protected by mmap_lock */
792869833f2SKOSAKI Motohiro 	mpol_put(old);
793869833f2SKOSAKI Motohiro 
794869833f2SKOSAKI Motohiro 	return 0;
795869833f2SKOSAKI Motohiro  err_out:
796869833f2SKOSAKI Motohiro 	mpol_put(new);
7978d34694cSKOSAKI Motohiro 	return err;
7988d34694cSKOSAKI Motohiro }
7998d34694cSKOSAKI Motohiro 
800f4e9e0e6SLiam R. Howlett /* Split or merge the VMA (if required) and apply the new policy */
mbind_range(struct vma_iterator * vmi,struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end,struct mempolicy * new_pol)801f4e9e0e6SLiam R. Howlett static int mbind_range(struct vma_iterator *vmi, struct vm_area_struct *vma,
802f4e9e0e6SLiam R. Howlett 		struct vm_area_struct **prev, unsigned long start,
8039d8cebd4SKOSAKI Motohiro 		unsigned long end, struct mempolicy *new_pol)
8041da177e4SLinus Torvalds {
805f4e9e0e6SLiam R. Howlett 	struct vm_area_struct *merged;
806f4e9e0e6SLiam R. Howlett 	unsigned long vmstart, vmend;
807e26a5114SKOSAKI Motohiro 	pgoff_t pgoff;
808f4e9e0e6SLiam R. Howlett 	int err;
8091da177e4SLinus Torvalds 
810f4e9e0e6SLiam R. Howlett 	vmend = min(end, vma->vm_end);
811f4e9e0e6SLiam R. Howlett 	if (start > vma->vm_start) {
812f4e9e0e6SLiam R. Howlett 		*prev = vma;
813f4e9e0e6SLiam R. Howlett 		vmstart = start;
814f4e9e0e6SLiam R. Howlett 	} else {
815f4e9e0e6SLiam R. Howlett 		vmstart = vma->vm_start;
816f4e9e0e6SLiam R. Howlett 	}
8179d8cebd4SKOSAKI Motohiro 
81800ca0f2eSLorenzo Stoakes 	if (mpol_equal(vma_policy(vma), new_pol)) {
81900ca0f2eSLorenzo Stoakes 		*prev = vma;
820f4e9e0e6SLiam R. Howlett 		return 0;
82100ca0f2eSLorenzo Stoakes 	}
822e26a5114SKOSAKI Motohiro 
823f4e9e0e6SLiam R. Howlett 	pgoff = vma->vm_pgoff + ((vmstart - vma->vm_start) >> PAGE_SHIFT);
824f4e9e0e6SLiam R. Howlett 	merged = vma_merge(vmi, vma->vm_mm, *prev, vmstart, vmend, vma->vm_flags,
825f4e9e0e6SLiam R. Howlett 			 vma->anon_vma, vma->vm_file, pgoff, new_pol,
826f4e9e0e6SLiam R. Howlett 			 vma->vm_userfaultfd_ctx, anon_vma_name(vma));
827f4e9e0e6SLiam R. Howlett 	if (merged) {
828f4e9e0e6SLiam R. Howlett 		*prev = merged;
829f4e9e0e6SLiam R. Howlett 		return vma_replace_policy(merged, new_pol);
8301da177e4SLinus Torvalds 	}
831f4e9e0e6SLiam R. Howlett 
8329d8cebd4SKOSAKI Motohiro 	if (vma->vm_start != vmstart) {
833f4e9e0e6SLiam R. Howlett 		err = split_vma(vmi, vma, vmstart, 1);
8349d8cebd4SKOSAKI Motohiro 		if (err)
8351da177e4SLinus Torvalds 			return err;
8361da177e4SLinus Torvalds 	}
8371da177e4SLinus Torvalds 
838f4e9e0e6SLiam R. Howlett 	if (vma->vm_end != vmend) {
839f4e9e0e6SLiam R. Howlett 		err = split_vma(vmi, vma, vmend, 0);
840f4e9e0e6SLiam R. Howlett 		if (err)
841f4e9e0e6SLiam R. Howlett 			return err;
842f4e9e0e6SLiam R. Howlett 	}
843f4e9e0e6SLiam R. Howlett 
844f4e9e0e6SLiam R. Howlett 	*prev = vma;
845f4e9e0e6SLiam R. Howlett 	return vma_replace_policy(vma, new_pol);
846f4e9e0e6SLiam R. Howlett }
847f4e9e0e6SLiam R. Howlett 
8481da177e4SLinus Torvalds /* Set the process memory policy */
do_set_mempolicy(unsigned short mode,unsigned short flags,nodemask_t * nodes)849028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags,
850028fec41SDavid Rientjes 			     nodemask_t *nodes)
8511da177e4SLinus Torvalds {
85258568d2aSMiao Xie 	struct mempolicy *new, *old;
8534bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH(scratch);
85458568d2aSMiao Xie 	int ret;
8551da177e4SLinus Torvalds 
8564bfc4495SKAMEZAWA Hiroyuki 	if (!scratch)
8574bfc4495SKAMEZAWA Hiroyuki 		return -ENOMEM;
858f4e53d91SLee Schermerhorn 
8594bfc4495SKAMEZAWA Hiroyuki 	new = mpol_new(mode, flags, nodes);
8604bfc4495SKAMEZAWA Hiroyuki 	if (IS_ERR(new)) {
8614bfc4495SKAMEZAWA Hiroyuki 		ret = PTR_ERR(new);
8624bfc4495SKAMEZAWA Hiroyuki 		goto out;
8634bfc4495SKAMEZAWA Hiroyuki 	}
8642c7c3a7dSOleg Nesterov 
86512c1dc8eSAbel Wu 	task_lock(current);
8664bfc4495SKAMEZAWA Hiroyuki 	ret = mpol_set_nodemask(new, nodes, scratch);
86758568d2aSMiao Xie 	if (ret) {
86812c1dc8eSAbel Wu 		task_unlock(current);
86958568d2aSMiao Xie 		mpol_put(new);
8704bfc4495SKAMEZAWA Hiroyuki 		goto out;
87158568d2aSMiao Xie 	}
87212c1dc8eSAbel Wu 
87358568d2aSMiao Xie 	old = current->mempolicy;
8741da177e4SLinus Torvalds 	current->mempolicy = new;
87545816682SVlastimil Babka 	if (new && new->mode == MPOL_INTERLEAVE)
87645816682SVlastimil Babka 		current->il_prev = MAX_NUMNODES-1;
87758568d2aSMiao Xie 	task_unlock(current);
87858568d2aSMiao Xie 	mpol_put(old);
8794bfc4495SKAMEZAWA Hiroyuki 	ret = 0;
8804bfc4495SKAMEZAWA Hiroyuki out:
8814bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH_FREE(scratch);
8824bfc4495SKAMEZAWA Hiroyuki 	return ret;
8831da177e4SLinus Torvalds }
8841da177e4SLinus Torvalds 
885bea904d5SLee Schermerhorn /*
886bea904d5SLee Schermerhorn  * Return nodemask for policy for get_mempolicy() query
88758568d2aSMiao Xie  *
88858568d2aSMiao Xie  * Called with task's alloc_lock held
889bea904d5SLee Schermerhorn  */
get_policy_nodemask(struct mempolicy * p,nodemask_t * nodes)890bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
8911da177e4SLinus Torvalds {
892dfcd3c0dSAndi Kleen 	nodes_clear(*nodes);
893bea904d5SLee Schermerhorn 	if (p == &default_policy)
894bea904d5SLee Schermerhorn 		return;
895bea904d5SLee Schermerhorn 
89645c4745aSLee Schermerhorn 	switch (p->mode) {
89719770b32SMel Gorman 	case MPOL_BIND:
8981da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
899269fbe72SBen Widawsky 	case MPOL_PREFERRED:
900b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
901269fbe72SBen Widawsky 		*nodes = p->nodes;
9021da177e4SLinus Torvalds 		break;
9037858d7bcSFeng Tang 	case MPOL_LOCAL:
9047858d7bcSFeng Tang 		/* return empty node mask for local allocation */
9057858d7bcSFeng Tang 		break;
9061da177e4SLinus Torvalds 	default:
9071da177e4SLinus Torvalds 		BUG();
9081da177e4SLinus Torvalds 	}
9091da177e4SLinus Torvalds }
9101da177e4SLinus Torvalds 
lookup_node(struct mm_struct * mm,unsigned long addr)9113b9aadf7SAndrea Arcangeli static int lookup_node(struct mm_struct *mm, unsigned long addr)
9121da177e4SLinus Torvalds {
913ba841078SPeter Xu 	struct page *p = NULL;
914f728b9c4SJohn Hubbard 	int ret;
9151da177e4SLinus Torvalds 
916f728b9c4SJohn Hubbard 	ret = get_user_pages_fast(addr & PAGE_MASK, 1, 0, &p);
917f728b9c4SJohn Hubbard 	if (ret > 0) {
918f728b9c4SJohn Hubbard 		ret = page_to_nid(p);
9191da177e4SLinus Torvalds 		put_page(p);
9201da177e4SLinus Torvalds 	}
921f728b9c4SJohn Hubbard 	return ret;
9221da177e4SLinus Torvalds }
9231da177e4SLinus Torvalds 
9241da177e4SLinus Torvalds /* Retrieve NUMA policy */
do_get_mempolicy(int * policy,nodemask_t * nmask,unsigned long addr,unsigned long flags)925dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask,
9261da177e4SLinus Torvalds 			     unsigned long addr, unsigned long flags)
9271da177e4SLinus Torvalds {
9288bccd85fSChristoph Lameter 	int err;
9291da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
9301da177e4SLinus Torvalds 	struct vm_area_struct *vma = NULL;
9313b9aadf7SAndrea Arcangeli 	struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
9321da177e4SLinus Torvalds 
933754af6f5SLee Schermerhorn 	if (flags &
934754af6f5SLee Schermerhorn 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
9351da177e4SLinus Torvalds 		return -EINVAL;
936754af6f5SLee Schermerhorn 
937754af6f5SLee Schermerhorn 	if (flags & MPOL_F_MEMS_ALLOWED) {
938754af6f5SLee Schermerhorn 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
939754af6f5SLee Schermerhorn 			return -EINVAL;
940754af6f5SLee Schermerhorn 		*policy = 0;	/* just so it's initialized */
94158568d2aSMiao Xie 		task_lock(current);
942754af6f5SLee Schermerhorn 		*nmask  = cpuset_current_mems_allowed;
94358568d2aSMiao Xie 		task_unlock(current);
944754af6f5SLee Schermerhorn 		return 0;
945754af6f5SLee Schermerhorn 	}
946754af6f5SLee Schermerhorn 
9471da177e4SLinus Torvalds 	if (flags & MPOL_F_ADDR) {
948bea904d5SLee Schermerhorn 		/*
949bea904d5SLee Schermerhorn 		 * Do NOT fall back to task policy if the
950bea904d5SLee Schermerhorn 		 * vma/shared policy at addr is NULL.  We
951bea904d5SLee Schermerhorn 		 * want to return MPOL_DEFAULT in this case.
952bea904d5SLee Schermerhorn 		 */
953d8ed45c5SMichel Lespinasse 		mmap_read_lock(mm);
95433e3575cSLiam Howlett 		vma = vma_lookup(mm, addr);
9551da177e4SLinus Torvalds 		if (!vma) {
956d8ed45c5SMichel Lespinasse 			mmap_read_unlock(mm);
9571da177e4SLinus Torvalds 			return -EFAULT;
9581da177e4SLinus Torvalds 		}
9591da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
9601da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
9611da177e4SLinus Torvalds 		else
9621da177e4SLinus Torvalds 			pol = vma->vm_policy;
9631da177e4SLinus Torvalds 	} else if (addr)
9641da177e4SLinus Torvalds 		return -EINVAL;
9651da177e4SLinus Torvalds 
9661da177e4SLinus Torvalds 	if (!pol)
967bea904d5SLee Schermerhorn 		pol = &default_policy;	/* indicates default behavior */
9681da177e4SLinus Torvalds 
9691da177e4SLinus Torvalds 	if (flags & MPOL_F_NODE) {
9701da177e4SLinus Torvalds 		if (flags & MPOL_F_ADDR) {
9713b9aadf7SAndrea Arcangeli 			/*
972f728b9c4SJohn Hubbard 			 * Take a refcount on the mpol, because we are about to
973f728b9c4SJohn Hubbard 			 * drop the mmap_lock, after which only "pol" remains
974f728b9c4SJohn Hubbard 			 * valid, "vma" is stale.
9753b9aadf7SAndrea Arcangeli 			 */
9763b9aadf7SAndrea Arcangeli 			pol_refcount = pol;
9773b9aadf7SAndrea Arcangeli 			vma = NULL;
9783b9aadf7SAndrea Arcangeli 			mpol_get(pol);
979f728b9c4SJohn Hubbard 			mmap_read_unlock(mm);
9803b9aadf7SAndrea Arcangeli 			err = lookup_node(mm, addr);
9811da177e4SLinus Torvalds 			if (err < 0)
9821da177e4SLinus Torvalds 				goto out;
9838bccd85fSChristoph Lameter 			*policy = err;
9841da177e4SLinus Torvalds 		} else if (pol == current->mempolicy &&
98545c4745aSLee Schermerhorn 				pol->mode == MPOL_INTERLEAVE) {
986269fbe72SBen Widawsky 			*policy = next_node_in(current->il_prev, pol->nodes);
9871da177e4SLinus Torvalds 		} else {
9881da177e4SLinus Torvalds 			err = -EINVAL;
9891da177e4SLinus Torvalds 			goto out;
9901da177e4SLinus Torvalds 		}
991bea904d5SLee Schermerhorn 	} else {
992bea904d5SLee Schermerhorn 		*policy = pol == &default_policy ? MPOL_DEFAULT :
993bea904d5SLee Schermerhorn 						pol->mode;
994d79df630SDavid Rientjes 		/*
995d79df630SDavid Rientjes 		 * Internal mempolicy flags must be masked off before exposing
996d79df630SDavid Rientjes 		 * the policy to userspace.
997d79df630SDavid Rientjes 		 */
998d79df630SDavid Rientjes 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
999bea904d5SLee Schermerhorn 	}
10001da177e4SLinus Torvalds 
10011da177e4SLinus Torvalds 	err = 0;
100258568d2aSMiao Xie 	if (nmask) {
1003c6b6ef8bSLee Schermerhorn 		if (mpol_store_user_nodemask(pol)) {
1004c6b6ef8bSLee Schermerhorn 			*nmask = pol->w.user_nodemask;
1005c6b6ef8bSLee Schermerhorn 		} else {
100658568d2aSMiao Xie 			task_lock(current);
1007bea904d5SLee Schermerhorn 			get_policy_nodemask(pol, nmask);
100858568d2aSMiao Xie 			task_unlock(current);
100958568d2aSMiao Xie 		}
1010c6b6ef8bSLee Schermerhorn 	}
10111da177e4SLinus Torvalds 
10121da177e4SLinus Torvalds  out:
101352cd3b07SLee Schermerhorn 	mpol_cond_put(pol);
10141da177e4SLinus Torvalds 	if (vma)
1015d8ed45c5SMichel Lespinasse 		mmap_read_unlock(mm);
10163b9aadf7SAndrea Arcangeli 	if (pol_refcount)
10173b9aadf7SAndrea Arcangeli 		mpol_put(pol_refcount);
10181da177e4SLinus Torvalds 	return err;
10191da177e4SLinus Torvalds }
10201da177e4SLinus Torvalds 
1021b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION
migrate_folio_add(struct folio * folio,struct list_head * foliolist,unsigned long flags)1022cc424890SHugh Dickins static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist,
1023fc301289SChristoph Lameter 				unsigned long flags)
10246ce3c4c0SChristoph Lameter {
10256ce3c4c0SChristoph Lameter 	/*
1026cc424890SHugh Dickins 	 * Unless MPOL_MF_MOVE_ALL, we try to avoid migrating a shared folio.
1027cc424890SHugh Dickins 	 * Choosing not to migrate a shared folio is not counted as a failure.
10284a64981dSVishal Moola (Oracle) 	 *
10294a64981dSVishal Moola (Oracle) 	 * To check if the folio is shared, ideally we want to make sure
10304a64981dSVishal Moola (Oracle) 	 * every page is mapped to the same process. Doing that is very
1031cc424890SHugh Dickins 	 * expensive, so check the estimated sharers of the folio instead.
10326ce3c4c0SChristoph Lameter 	 */
10334a64981dSVishal Moola (Oracle) 	if ((flags & MPOL_MF_MOVE_ALL) || folio_estimated_sharers(folio) == 1) {
1034be2d5756SBaolin Wang 		if (folio_isolate_lru(folio)) {
10354a64981dSVishal Moola (Oracle) 			list_add_tail(&folio->lru, foliolist);
10364a64981dSVishal Moola (Oracle) 			node_stat_mod_folio(folio,
10374a64981dSVishal Moola (Oracle) 				NR_ISOLATED_ANON + folio_is_file_lru(folio),
10384a64981dSVishal Moola (Oracle) 				folio_nr_pages(folio));
1039cc424890SHugh Dickins 		} else {
1040a53190a4SYang Shi 			/*
10414a64981dSVishal Moola (Oracle) 			 * Non-movable folio may reach here.  And, there may be
10424a64981dSVishal Moola (Oracle) 			 * temporary off LRU folios or non-LRU movable folios.
10434a64981dSVishal Moola (Oracle) 			 * Treat them as unmovable folios since they can't be
1044cc424890SHugh Dickins 			 * isolated, so they can't be moved at the moment.
1045a53190a4SYang Shi 			 */
1046cc424890SHugh Dickins 			return false;
104762695a84SNick Piggin 		}
104862695a84SNick Piggin 	}
1049cc424890SHugh Dickins 	return true;
10506ce3c4c0SChristoph Lameter }
10516ce3c4c0SChristoph Lameter 
10526ce3c4c0SChristoph Lameter /*
10537e2ab150SChristoph Lameter  * Migrate pages from one node to a target node.
10547e2ab150SChristoph Lameter  * Returns error or the number of pages not migrated.
10557e2ab150SChristoph Lameter  */
migrate_to_node(struct mm_struct * mm,int source,int dest,int flags)1056cc424890SHugh Dickins static long migrate_to_node(struct mm_struct *mm, int source, int dest,
1057dbcb0f19SAdrian Bunk 			    int flags)
10587e2ab150SChristoph Lameter {
10597e2ab150SChristoph Lameter 	nodemask_t nmask;
106066850be5SLiam R. Howlett 	struct vm_area_struct *vma;
10617e2ab150SChristoph Lameter 	LIST_HEAD(pagelist);
1062cc424890SHugh Dickins 	long nr_failed;
1063cc424890SHugh Dickins 	long err = 0;
1064a0976311SJoonsoo Kim 	struct migration_target_control mtc = {
1065a0976311SJoonsoo Kim 		.nid = dest,
1066a0976311SJoonsoo Kim 		.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1067a0976311SJoonsoo Kim 	};
10687e2ab150SChristoph Lameter 
10697e2ab150SChristoph Lameter 	nodes_clear(nmask);
10707e2ab150SChristoph Lameter 	node_set(source, nmask);
10717e2ab150SChristoph Lameter 
107208270807SMinchan Kim 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
1073cc424890SHugh Dickins 	vma = find_vma(mm, 0);
1074*a13b2b9bSDavid Hildenbrand 	if (unlikely(!vma)) {
1075*a13b2b9bSDavid Hildenbrand 		mmap_read_unlock(mm);
1076*a13b2b9bSDavid Hildenbrand 		return 0;
1077*a13b2b9bSDavid Hildenbrand 	}
1078cc424890SHugh Dickins 
1079cc424890SHugh Dickins 	/*
1080cc424890SHugh Dickins 	 * This does not migrate the range, but isolates all pages that
1081cc424890SHugh Dickins 	 * need migration.  Between passing in the full user address
1082cc424890SHugh Dickins 	 * space range and MPOL_MF_DISCONTIG_OK, this call cannot fail,
1083cc424890SHugh Dickins 	 * but passes back the count of pages which could not be isolated.
1084cc424890SHugh Dickins 	 */
1085cc424890SHugh Dickins 	nr_failed = queue_pages_range(mm, vma->vm_start, mm->task_size, &nmask,
1086cc424890SHugh Dickins 				      flags | MPOL_MF_DISCONTIG_OK, &pagelist);
10877e2ab150SChristoph Lameter 
1088cf608ac1SMinchan Kim 	if (!list_empty(&pagelist)) {
1089a0976311SJoonsoo Kim 		err = migrate_pages(&pagelist, alloc_migration_target, NULL,
10905ac95884SYang Shi 			(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
1091cf608ac1SMinchan Kim 		if (err)
1092e2d8cf40SNaoya Horiguchi 			putback_movable_pages(&pagelist);
1093cf608ac1SMinchan Kim 	}
109495a402c3SChristoph Lameter 
1095cc424890SHugh Dickins 	if (err >= 0)
1096cc424890SHugh Dickins 		err += nr_failed;
10977e2ab150SChristoph Lameter 	return err;
10987e2ab150SChristoph Lameter }
10997e2ab150SChristoph Lameter 
11007e2ab150SChristoph Lameter /*
11017e2ab150SChristoph Lameter  * Move pages between the two nodesets so as to preserve the physical
11027e2ab150SChristoph Lameter  * layout as much as possible.
110339743889SChristoph Lameter  *
110439743889SChristoph Lameter  * Returns the number of page that could not be moved.
110539743889SChristoph Lameter  */
do_migrate_pages(struct mm_struct * mm,const nodemask_t * from,const nodemask_t * to,int flags)11060ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
11070ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
110839743889SChristoph Lameter {
1109cc424890SHugh Dickins 	long nr_failed = 0;
1110cc424890SHugh Dickins 	long err = 0;
11117e2ab150SChristoph Lameter 	nodemask_t tmp;
111239743889SChristoph Lameter 
1113361a2a22SMinchan Kim 	lru_cache_disable();
11140aedadf9SChristoph Lameter 
1115d8ed45c5SMichel Lespinasse 	mmap_read_lock(mm);
1116d4984711SChristoph Lameter 
11177e2ab150SChristoph Lameter 	/*
11187e2ab150SChristoph Lameter 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
11197e2ab150SChristoph Lameter 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
11207e2ab150SChristoph Lameter 	 * bit in 'tmp', and return that <source, dest> pair for migration.
11217e2ab150SChristoph Lameter 	 * The pair of nodemasks 'to' and 'from' define the map.
11227e2ab150SChristoph Lameter 	 *
11237e2ab150SChristoph Lameter 	 * If no pair of bits is found that way, fallback to picking some
11247e2ab150SChristoph Lameter 	 * pair of 'source' and 'dest' bits that are not the same.  If the
11257e2ab150SChristoph Lameter 	 * 'source' and 'dest' bits are the same, this represents a node
11267e2ab150SChristoph Lameter 	 * that will be migrating to itself, so no pages need move.
11277e2ab150SChristoph Lameter 	 *
11287e2ab150SChristoph Lameter 	 * If no bits are left in 'tmp', or if all remaining bits left
11297e2ab150SChristoph Lameter 	 * in 'tmp' correspond to the same bit in 'to', return false
11307e2ab150SChristoph Lameter 	 * (nothing left to migrate).
11317e2ab150SChristoph Lameter 	 *
11327e2ab150SChristoph Lameter 	 * This lets us pick a pair of nodes to migrate between, such that
11337e2ab150SChristoph Lameter 	 * if possible the dest node is not already occupied by some other
11347e2ab150SChristoph Lameter 	 * source node, minimizing the risk of overloading the memory on a
11357e2ab150SChristoph Lameter 	 * node that would happen if we migrated incoming memory to a node
11367e2ab150SChristoph Lameter 	 * before migrating outgoing memory source that same node.
11377e2ab150SChristoph Lameter 	 *
11387e2ab150SChristoph Lameter 	 * A single scan of tmp is sufficient.  As we go, we remember the
11397e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
11407e2ab150SChristoph Lameter 	 * that not only moved, but what's better, moved to an empty slot
11417e2ab150SChristoph Lameter 	 * (d is not set in tmp), then we break out then, with that pair.
1142ae0e47f0SJustin P. Mattock 	 * Otherwise when we finish scanning from_tmp, we at least have the
11437e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved.  If we get all the way through
11447e2ab150SChristoph Lameter 	 * the scan of tmp without finding any node that moved, much less
11457e2ab150SChristoph Lameter 	 * moved to an empty node, then there is nothing left worth migrating.
11467e2ab150SChristoph Lameter 	 */
11477e2ab150SChristoph Lameter 
11480ce72d4fSAndrew Morton 	tmp = *from;
11497e2ab150SChristoph Lameter 	while (!nodes_empty(tmp)) {
11507e2ab150SChristoph Lameter 		int s, d;
1151b76ac7e7SJianguo Wu 		int source = NUMA_NO_NODE;
11527e2ab150SChristoph Lameter 		int dest = 0;
11537e2ab150SChristoph Lameter 
11547e2ab150SChristoph Lameter 		for_each_node_mask(s, tmp) {
11554a5b18ccSLarry Woodman 
11564a5b18ccSLarry Woodman 			/*
11574a5b18ccSLarry Woodman 			 * do_migrate_pages() tries to maintain the relative
11584a5b18ccSLarry Woodman 			 * node relationship of the pages established between
11594a5b18ccSLarry Woodman 			 * threads and memory areas.
11604a5b18ccSLarry Woodman                          *
11614a5b18ccSLarry Woodman 			 * However if the number of source nodes is not equal to
11624a5b18ccSLarry Woodman 			 * the number of destination nodes we can not preserve
11634a5b18ccSLarry Woodman 			 * this node relative relationship.  In that case, skip
11644a5b18ccSLarry Woodman 			 * copying memory from a node that is in the destination
11654a5b18ccSLarry Woodman 			 * mask.
11664a5b18ccSLarry Woodman 			 *
11674a5b18ccSLarry Woodman 			 * Example: [2,3,4] -> [3,4,5] moves everything.
11684a5b18ccSLarry Woodman 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
11694a5b18ccSLarry Woodman 			 */
11704a5b18ccSLarry Woodman 
11710ce72d4fSAndrew Morton 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
11720ce72d4fSAndrew Morton 						(node_isset(s, *to)))
11734a5b18ccSLarry Woodman 				continue;
11744a5b18ccSLarry Woodman 
11750ce72d4fSAndrew Morton 			d = node_remap(s, *from, *to);
11767e2ab150SChristoph Lameter 			if (s == d)
11777e2ab150SChristoph Lameter 				continue;
11787e2ab150SChristoph Lameter 
11797e2ab150SChristoph Lameter 			source = s;	/* Node moved. Memorize */
11807e2ab150SChristoph Lameter 			dest = d;
11817e2ab150SChristoph Lameter 
11827e2ab150SChristoph Lameter 			/* dest not in remaining from nodes? */
11837e2ab150SChristoph Lameter 			if (!node_isset(dest, tmp))
11847e2ab150SChristoph Lameter 				break;
11857e2ab150SChristoph Lameter 		}
1186b76ac7e7SJianguo Wu 		if (source == NUMA_NO_NODE)
11877e2ab150SChristoph Lameter 			break;
11887e2ab150SChristoph Lameter 
11897e2ab150SChristoph Lameter 		node_clear(source, tmp);
11907e2ab150SChristoph Lameter 		err = migrate_to_node(mm, source, dest, flags);
11917e2ab150SChristoph Lameter 		if (err > 0)
1192cc424890SHugh Dickins 			nr_failed += err;
11937e2ab150SChristoph Lameter 		if (err < 0)
11947e2ab150SChristoph Lameter 			break;
119539743889SChristoph Lameter 	}
1196d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
1197d479960eSMinchan Kim 
1198361a2a22SMinchan Kim 	lru_cache_enable();
11997e2ab150SChristoph Lameter 	if (err < 0)
12007e2ab150SChristoph Lameter 		return err;
1201cc424890SHugh Dickins 	return (nr_failed < INT_MAX) ? nr_failed : INT_MAX;
120239743889SChristoph Lameter }
120339743889SChristoph Lameter 
12043ad33b24SLee Schermerhorn /*
12053ad33b24SLee Schermerhorn  * Allocate a new page for page migration based on vma policy.
1206d05f0cdcSHugh Dickins  * Start by assuming the page is mapped by the same vma as contains @start.
12073ad33b24SLee Schermerhorn  * Search forward from there, if not.  N.B., this assumes that the
12083ad33b24SLee Schermerhorn  * list of pages handed to migrate_pages()--which is how we get here--
12093ad33b24SLee Schermerhorn  * is in virtual address order.
12103ad33b24SLee Schermerhorn  */
new_folio(struct folio * src,unsigned long start)12114e096ae1SMatthew Wilcox (Oracle) static struct folio *new_folio(struct folio *src, unsigned long start)
121295a402c3SChristoph Lameter {
1213d05f0cdcSHugh Dickins 	struct vm_area_struct *vma;
12143f649ab7SKees Cook 	unsigned long address;
121566850be5SLiam R. Howlett 	VMA_ITERATOR(vmi, current->mm, start);
1216ec4858e0SMatthew Wilcox (Oracle) 	gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL;
121795a402c3SChristoph Lameter 
121866850be5SLiam R. Howlett 	for_each_vma(vmi, vma) {
12194e096ae1SMatthew Wilcox (Oracle) 		address = page_address_in_vma(&src->page, vma);
12203ad33b24SLee Schermerhorn 		if (address != -EFAULT)
12213ad33b24SLee Schermerhorn 			break;
12223ad33b24SLee Schermerhorn 	}
12233ad33b24SLee Schermerhorn 
1224d0ce0e47SSidhartha Kumar 	if (folio_test_hugetlb(src)) {
12254e096ae1SMatthew Wilcox (Oracle) 		return alloc_hugetlb_folio_vma(folio_hstate(src),
1226389c8178SMichal Hocko 				vma, address);
1227d0ce0e47SSidhartha Kumar 	}
1228c8633798SNaoya Horiguchi 
1229ec4858e0SMatthew Wilcox (Oracle) 	if (folio_test_large(src))
1230ec4858e0SMatthew Wilcox (Oracle) 		gfp = GFP_TRANSHUGE;
1231ec4858e0SMatthew Wilcox (Oracle) 
123211c731e8SWanpeng Li 	/*
1233ec4858e0SMatthew Wilcox (Oracle) 	 * if !vma, vma_alloc_folio() will use task or system default policy
123411c731e8SWanpeng Li 	 */
12354e096ae1SMatthew Wilcox (Oracle) 	return vma_alloc_folio(gfp, folio_order(src), vma, address,
1236ec4858e0SMatthew Wilcox (Oracle) 			folio_test_large(src));
123795a402c3SChristoph Lameter }
1238b20a3503SChristoph Lameter #else
1239b20a3503SChristoph Lameter 
migrate_folio_add(struct folio * folio,struct list_head * foliolist,unsigned long flags)1240cc424890SHugh Dickins static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist,
1241b20a3503SChristoph Lameter 				unsigned long flags)
1242b20a3503SChristoph Lameter {
1243cc424890SHugh Dickins 	return false;
1244b20a3503SChristoph Lameter }
1245b20a3503SChristoph Lameter 
do_migrate_pages(struct mm_struct * mm,const nodemask_t * from,const nodemask_t * to,int flags)12460ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
12470ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
1248b20a3503SChristoph Lameter {
1249b20a3503SChristoph Lameter 	return -ENOSYS;
1250b20a3503SChristoph Lameter }
125195a402c3SChristoph Lameter 
new_folio(struct folio * src,unsigned long start)12524e096ae1SMatthew Wilcox (Oracle) static struct folio *new_folio(struct folio *src, unsigned long start)
125395a402c3SChristoph Lameter {
125495a402c3SChristoph Lameter 	return NULL;
125595a402c3SChristoph Lameter }
1256b20a3503SChristoph Lameter #endif
1257b20a3503SChristoph Lameter 
do_mbind(unsigned long start,unsigned long len,unsigned short mode,unsigned short mode_flags,nodemask_t * nmask,unsigned long flags)1258dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len,
1259028fec41SDavid Rientjes 		     unsigned short mode, unsigned short mode_flags,
1260028fec41SDavid Rientjes 		     nodemask_t *nmask, unsigned long flags)
12616ce3c4c0SChristoph Lameter {
12626ce3c4c0SChristoph Lameter 	struct mm_struct *mm = current->mm;
1263f4e9e0e6SLiam R. Howlett 	struct vm_area_struct *vma, *prev;
1264f4e9e0e6SLiam R. Howlett 	struct vma_iterator vmi;
12656ce3c4c0SChristoph Lameter 	struct mempolicy *new;
12666ce3c4c0SChristoph Lameter 	unsigned long end;
1267cc424890SHugh Dickins 	long err;
1268cc424890SHugh Dickins 	long nr_failed;
12696ce3c4c0SChristoph Lameter 	LIST_HEAD(pagelist);
12706ce3c4c0SChristoph Lameter 
1271b24f53a0SLee Schermerhorn 	if (flags & ~(unsigned long)MPOL_MF_VALID)
12726ce3c4c0SChristoph Lameter 		return -EINVAL;
127374c00241SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
12746ce3c4c0SChristoph Lameter 		return -EPERM;
12756ce3c4c0SChristoph Lameter 
12766ce3c4c0SChristoph Lameter 	if (start & ~PAGE_MASK)
12776ce3c4c0SChristoph Lameter 		return -EINVAL;
12786ce3c4c0SChristoph Lameter 
12796ce3c4c0SChristoph Lameter 	if (mode == MPOL_DEFAULT)
12806ce3c4c0SChristoph Lameter 		flags &= ~MPOL_MF_STRICT;
12816ce3c4c0SChristoph Lameter 
1282aaa31e05Sze zuo 	len = PAGE_ALIGN(len);
12836ce3c4c0SChristoph Lameter 	end = start + len;
12846ce3c4c0SChristoph Lameter 
12856ce3c4c0SChristoph Lameter 	if (end < start)
12866ce3c4c0SChristoph Lameter 		return -EINVAL;
12876ce3c4c0SChristoph Lameter 	if (end == start)
12886ce3c4c0SChristoph Lameter 		return 0;
12896ce3c4c0SChristoph Lameter 
1290028fec41SDavid Rientjes 	new = mpol_new(mode, mode_flags, nmask);
12916ce3c4c0SChristoph Lameter 	if (IS_ERR(new))
12926ce3c4c0SChristoph Lameter 		return PTR_ERR(new);
12936ce3c4c0SChristoph Lameter 
1294b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY)
1295b24f53a0SLee Schermerhorn 		new->flags |= MPOL_F_MOF;
1296b24f53a0SLee Schermerhorn 
12976ce3c4c0SChristoph Lameter 	/*
12986ce3c4c0SChristoph Lameter 	 * If we are using the default policy then operation
12996ce3c4c0SChristoph Lameter 	 * on discontinuous address spaces is okay after all
13006ce3c4c0SChristoph Lameter 	 */
13016ce3c4c0SChristoph Lameter 	if (!new)
13026ce3c4c0SChristoph Lameter 		flags |= MPOL_MF_DISCONTIG_OK;
13036ce3c4c0SChristoph Lameter 
1304028fec41SDavid Rientjes 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1305028fec41SDavid Rientjes 		 start, start + len, mode, mode_flags,
130600ef2d2fSDavid Rientjes 		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
13076ce3c4c0SChristoph Lameter 
1308cc424890SHugh Dickins 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1309361a2a22SMinchan Kim 		lru_cache_disable();
13104bfc4495SKAMEZAWA Hiroyuki 	{
13114bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
13124bfc4495SKAMEZAWA Hiroyuki 		if (scratch) {
1313d8ed45c5SMichel Lespinasse 			mmap_write_lock(mm);
13144bfc4495SKAMEZAWA Hiroyuki 			err = mpol_set_nodemask(new, nmask, scratch);
13154bfc4495SKAMEZAWA Hiroyuki 			if (err)
1316d8ed45c5SMichel Lespinasse 				mmap_write_unlock(mm);
13174bfc4495SKAMEZAWA Hiroyuki 		} else
13184bfc4495SKAMEZAWA Hiroyuki 			err = -ENOMEM;
13194bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
13204bfc4495SKAMEZAWA Hiroyuki 	}
1321b05ca738SKOSAKI Motohiro 	if (err)
1322b05ca738SKOSAKI Motohiro 		goto mpol_out;
1323b05ca738SKOSAKI Motohiro 
13246c21e066SJann Horn 	/*
1325cc424890SHugh Dickins 	 * Lock the VMAs before scanning for pages to migrate,
1326cc424890SHugh Dickins 	 * to ensure we don't miss a concurrently inserted page.
13276c21e066SJann Horn 	 */
1328cc424890SHugh Dickins 	nr_failed = queue_pages_range(mm, start, end, nmask,
1329cc424890SHugh Dickins 			flags | MPOL_MF_INVERT | MPOL_MF_WRLOCK, &pagelist);
1330d8835445SYang Shi 
1331cc424890SHugh Dickins 	if (nr_failed < 0) {
1332cc424890SHugh Dickins 		err = nr_failed;
1333cc424890SHugh Dickins 	} else {
1334f4e9e0e6SLiam R. Howlett 		vma_iter_init(&vmi, mm, start);
1335f4e9e0e6SLiam R. Howlett 		prev = vma_prev(&vmi);
1336f4e9e0e6SLiam R. Howlett 		for_each_vma_range(vmi, vma, end) {
1337f4e9e0e6SLiam R. Howlett 			err = mbind_range(&vmi, vma, &prev, start, end, new);
1338f4e9e0e6SLiam R. Howlett 			if (err)
1339f4e9e0e6SLiam R. Howlett 				break;
1340f4e9e0e6SLiam R. Howlett 		}
1341cc424890SHugh Dickins 	}
13427e2ab150SChristoph Lameter 
1343b24f53a0SLee Schermerhorn 	if (!err) {
1344cf608ac1SMinchan Kim 		if (!list_empty(&pagelist)) {
1345b24f53a0SLee Schermerhorn 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1346cc424890SHugh Dickins 			nr_failed |= migrate_pages(&pagelist, new_folio, NULL,
13475ac95884SYang Shi 				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL);
1348cc424890SHugh Dickins 		}
1349cc424890SHugh Dickins 		if (nr_failed && (flags & MPOL_MF_STRICT))
1350cc424890SHugh Dickins 			err = -EIO;
1351cf608ac1SMinchan Kim 	}
13526ce3c4c0SChristoph Lameter 
1353a85dfc30SYang Shi 	if (!list_empty(&pagelist))
1354a85dfc30SYang Shi 		putback_movable_pages(&pagelist);
1355a85dfc30SYang Shi 
1356d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
1357b05ca738SKOSAKI Motohiro mpol_out:
1358f0be3d32SLee Schermerhorn 	mpol_put(new);
1359d479960eSMinchan Kim 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1360361a2a22SMinchan Kim 		lru_cache_enable();
13616ce3c4c0SChristoph Lameter 	return err;
13626ce3c4c0SChristoph Lameter }
13636ce3c4c0SChristoph Lameter 
136439743889SChristoph Lameter /*
13658bccd85fSChristoph Lameter  * User space interface with variable sized bitmaps for nodelists.
13668bccd85fSChristoph Lameter  */
get_bitmap(unsigned long * mask,const unsigned long __user * nmask,unsigned long maxnode)1367e130242dSArnd Bergmann static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask,
1368e130242dSArnd Bergmann 		      unsigned long maxnode)
1369e130242dSArnd Bergmann {
1370e130242dSArnd Bergmann 	unsigned long nlongs = BITS_TO_LONGS(maxnode);
1371e130242dSArnd Bergmann 	int ret;
1372e130242dSArnd Bergmann 
1373e130242dSArnd Bergmann 	if (in_compat_syscall())
1374e130242dSArnd Bergmann 		ret = compat_get_bitmap(mask,
1375e130242dSArnd Bergmann 					(const compat_ulong_t __user *)nmask,
1376e130242dSArnd Bergmann 					maxnode);
1377e130242dSArnd Bergmann 	else
1378e130242dSArnd Bergmann 		ret = copy_from_user(mask, nmask,
1379e130242dSArnd Bergmann 				     nlongs * sizeof(unsigned long));
1380e130242dSArnd Bergmann 
1381e130242dSArnd Bergmann 	if (ret)
1382e130242dSArnd Bergmann 		return -EFAULT;
1383e130242dSArnd Bergmann 
1384e130242dSArnd Bergmann 	if (maxnode % BITS_PER_LONG)
1385e130242dSArnd Bergmann 		mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1;
1386e130242dSArnd Bergmann 
1387e130242dSArnd Bergmann 	return 0;
1388e130242dSArnd Bergmann }
13898bccd85fSChristoph Lameter 
13908bccd85fSChristoph Lameter /* Copy a node mask from user space. */
get_nodes(nodemask_t * nodes,const unsigned long __user * nmask,unsigned long maxnode)139139743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
13928bccd85fSChristoph Lameter 		     unsigned long maxnode)
13938bccd85fSChristoph Lameter {
13948bccd85fSChristoph Lameter 	--maxnode;
13958bccd85fSChristoph Lameter 	nodes_clear(*nodes);
13968bccd85fSChristoph Lameter 	if (maxnode == 0 || !nmask)
13978bccd85fSChristoph Lameter 		return 0;
1398a9c930baSAndi Kleen 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1399636f13c1SChris Wright 		return -EINVAL;
14008bccd85fSChristoph Lameter 
140156521e7aSYisheng Xie 	/*
140256521e7aSYisheng Xie 	 * When the user specified more nodes than supported just check
1403e130242dSArnd Bergmann 	 * if the non supported part is all zero, one word at a time,
1404e130242dSArnd Bergmann 	 * starting at the end.
140556521e7aSYisheng Xie 	 */
1406e130242dSArnd Bergmann 	while (maxnode > MAX_NUMNODES) {
1407e130242dSArnd Bergmann 		unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG);
1408e130242dSArnd Bergmann 		unsigned long t;
14098bccd85fSChristoph Lameter 
1410000eca5dSTianyu Li 		if (get_bitmap(&t, &nmask[(maxnode - 1) / BITS_PER_LONG], bits))
141156521e7aSYisheng Xie 			return -EFAULT;
1412e130242dSArnd Bergmann 
1413e130242dSArnd Bergmann 		if (maxnode - bits >= MAX_NUMNODES) {
1414e130242dSArnd Bergmann 			maxnode -= bits;
1415e130242dSArnd Bergmann 		} else {
1416e130242dSArnd Bergmann 			maxnode = MAX_NUMNODES;
1417e130242dSArnd Bergmann 			t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1418e130242dSArnd Bergmann 		}
1419e130242dSArnd Bergmann 		if (t)
142056521e7aSYisheng Xie 			return -EINVAL;
142156521e7aSYisheng Xie 	}
142256521e7aSYisheng Xie 
1423e130242dSArnd Bergmann 	return get_bitmap(nodes_addr(*nodes), nmask, maxnode);
14248bccd85fSChristoph Lameter }
14258bccd85fSChristoph Lameter 
14268bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */
copy_nodes_to_user(unsigned long __user * mask,unsigned long maxnode,nodemask_t * nodes)14278bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
14288bccd85fSChristoph Lameter 			      nodemask_t *nodes)
14298bccd85fSChristoph Lameter {
14308bccd85fSChristoph Lameter 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1431050c17f2SRalph Campbell 	unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
1432e130242dSArnd Bergmann 	bool compat = in_compat_syscall();
1433e130242dSArnd Bergmann 
1434e130242dSArnd Bergmann 	if (compat)
1435e130242dSArnd Bergmann 		nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t);
14368bccd85fSChristoph Lameter 
14378bccd85fSChristoph Lameter 	if (copy > nbytes) {
14388bccd85fSChristoph Lameter 		if (copy > PAGE_SIZE)
14398bccd85fSChristoph Lameter 			return -EINVAL;
14408bccd85fSChristoph Lameter 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
14418bccd85fSChristoph Lameter 			return -EFAULT;
14428bccd85fSChristoph Lameter 		copy = nbytes;
1443e130242dSArnd Bergmann 		maxnode = nr_node_ids;
14448bccd85fSChristoph Lameter 	}
1445e130242dSArnd Bergmann 
1446e130242dSArnd Bergmann 	if (compat)
1447e130242dSArnd Bergmann 		return compat_put_bitmap((compat_ulong_t __user *)mask,
1448e130242dSArnd Bergmann 					 nodes_addr(*nodes), maxnode);
1449e130242dSArnd Bergmann 
14508bccd85fSChristoph Lameter 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
14518bccd85fSChristoph Lameter }
14528bccd85fSChristoph Lameter 
145395837924SFeng Tang /* Basic parameter sanity check used by both mbind() and set_mempolicy() */
sanitize_mpol_flags(int * mode,unsigned short * flags)145495837924SFeng Tang static inline int sanitize_mpol_flags(int *mode, unsigned short *flags)
145595837924SFeng Tang {
145695837924SFeng Tang 	*flags = *mode & MPOL_MODE_FLAGS;
145795837924SFeng Tang 	*mode &= ~MPOL_MODE_FLAGS;
1458b27abaccSDave Hansen 
1459a38a59fdSBen Widawsky 	if ((unsigned int)(*mode) >=  MPOL_MAX)
146095837924SFeng Tang 		return -EINVAL;
146195837924SFeng Tang 	if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
146295837924SFeng Tang 		return -EINVAL;
14636d2aec9eSEric Dumazet 	if (*flags & MPOL_F_NUMA_BALANCING) {
14646d2aec9eSEric Dumazet 		if (*mode != MPOL_BIND)
14656d2aec9eSEric Dumazet 			return -EINVAL;
14666d2aec9eSEric Dumazet 		*flags |= (MPOL_F_MOF | MPOL_F_MORON);
14676d2aec9eSEric Dumazet 	}
146895837924SFeng Tang 	return 0;
146995837924SFeng Tang }
147095837924SFeng Tang 
kernel_mbind(unsigned long start,unsigned long len,unsigned long mode,const unsigned long __user * nmask,unsigned long maxnode,unsigned int flags)1471e7dc9ad6SDominik Brodowski static long kernel_mbind(unsigned long start, unsigned long len,
1472e7dc9ad6SDominik Brodowski 			 unsigned long mode, const unsigned long __user *nmask,
1473e7dc9ad6SDominik Brodowski 			 unsigned long maxnode, unsigned int flags)
14748bccd85fSChristoph Lameter {
1475028fec41SDavid Rientjes 	unsigned short mode_flags;
147695837924SFeng Tang 	nodemask_t nodes;
147795837924SFeng Tang 	int lmode = mode;
147895837924SFeng Tang 	int err;
14798bccd85fSChristoph Lameter 
1480057d3389SAndrey Konovalov 	start = untagged_addr(start);
148195837924SFeng Tang 	err = sanitize_mpol_flags(&lmode, &mode_flags);
148295837924SFeng Tang 	if (err)
148395837924SFeng Tang 		return err;
148495837924SFeng Tang 
14858bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
14868bccd85fSChristoph Lameter 	if (err)
14878bccd85fSChristoph Lameter 		return err;
148895837924SFeng Tang 
148995837924SFeng Tang 	return do_mbind(start, len, lmode, mode_flags, &nodes, flags);
14908bccd85fSChristoph Lameter }
14918bccd85fSChristoph Lameter 
SYSCALL_DEFINE4(set_mempolicy_home_node,unsigned long,start,unsigned long,len,unsigned long,home_node,unsigned long,flags)1492c6018b4bSAneesh Kumar K.V SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, len,
1493c6018b4bSAneesh Kumar K.V 		unsigned long, home_node, unsigned long, flags)
1494c6018b4bSAneesh Kumar K.V {
1495c6018b4bSAneesh Kumar K.V 	struct mm_struct *mm = current->mm;
1496f4e9e0e6SLiam R. Howlett 	struct vm_area_struct *vma, *prev;
1497e976936cSMichal Hocko 	struct mempolicy *new, *old;
1498c6018b4bSAneesh Kumar K.V 	unsigned long end;
1499c6018b4bSAneesh Kumar K.V 	int err = -ENOENT;
150066850be5SLiam R. Howlett 	VMA_ITERATOR(vmi, mm, start);
1501c6018b4bSAneesh Kumar K.V 
1502c6018b4bSAneesh Kumar K.V 	start = untagged_addr(start);
1503c6018b4bSAneesh Kumar K.V 	if (start & ~PAGE_MASK)
1504c6018b4bSAneesh Kumar K.V 		return -EINVAL;
1505c6018b4bSAneesh Kumar K.V 	/*
1506c6018b4bSAneesh Kumar K.V 	 * flags is used for future extension if any.
1507c6018b4bSAneesh Kumar K.V 	 */
1508c6018b4bSAneesh Kumar K.V 	if (flags != 0)
1509c6018b4bSAneesh Kumar K.V 		return -EINVAL;
1510c6018b4bSAneesh Kumar K.V 
1511c6018b4bSAneesh Kumar K.V 	/*
1512c6018b4bSAneesh Kumar K.V 	 * Check home_node is online to avoid accessing uninitialized
1513c6018b4bSAneesh Kumar K.V 	 * NODE_DATA.
1514c6018b4bSAneesh Kumar K.V 	 */
1515c6018b4bSAneesh Kumar K.V 	if (home_node >= MAX_NUMNODES || !node_online(home_node))
1516c6018b4bSAneesh Kumar K.V 		return -EINVAL;
1517c6018b4bSAneesh Kumar K.V 
1518aaa31e05Sze zuo 	len = PAGE_ALIGN(len);
1519c6018b4bSAneesh Kumar K.V 	end = start + len;
1520c6018b4bSAneesh Kumar K.V 
1521c6018b4bSAneesh Kumar K.V 	if (end < start)
1522c6018b4bSAneesh Kumar K.V 		return -EINVAL;
1523c6018b4bSAneesh Kumar K.V 	if (end == start)
1524c6018b4bSAneesh Kumar K.V 		return 0;
1525c6018b4bSAneesh Kumar K.V 	mmap_write_lock(mm);
1526f4e9e0e6SLiam R. Howlett 	prev = vma_prev(&vmi);
152766850be5SLiam R. Howlett 	for_each_vma_range(vmi, vma, end) {
1528c6018b4bSAneesh Kumar K.V 		/*
1529c6018b4bSAneesh Kumar K.V 		 * If any vma in the range got policy other than MPOL_BIND
1530c6018b4bSAneesh Kumar K.V 		 * or MPOL_PREFERRED_MANY we return error. We don't reset
1531c6018b4bSAneesh Kumar K.V 		 * the home node for vmas we already updated before.
1532c6018b4bSAneesh Kumar K.V 		 */
1533e976936cSMichal Hocko 		old = vma_policy(vma);
153451f62537SLiam R. Howlett 		if (!old) {
153551f62537SLiam R. Howlett 			prev = vma;
1536e976936cSMichal Hocko 			continue;
153751f62537SLiam R. Howlett 		}
1538e976936cSMichal Hocko 		if (old->mode != MPOL_BIND && old->mode != MPOL_PREFERRED_MANY) {
1539c6018b4bSAneesh Kumar K.V 			err = -EOPNOTSUPP;
1540c6018b4bSAneesh Kumar K.V 			break;
1541c6018b4bSAneesh Kumar K.V 		}
1542e976936cSMichal Hocko 		new = mpol_dup(old);
1543e976936cSMichal Hocko 		if (IS_ERR(new)) {
1544e976936cSMichal Hocko 			err = PTR_ERR(new);
1545e976936cSMichal Hocko 			break;
1546e976936cSMichal Hocko 		}
1547c6018b4bSAneesh Kumar K.V 
15486c21e066SJann Horn 		vma_start_write(vma);
1549c6018b4bSAneesh Kumar K.V 		new->home_node = home_node;
1550f4e9e0e6SLiam R. Howlett 		err = mbind_range(&vmi, vma, &prev, start, end, new);
1551c6018b4bSAneesh Kumar K.V 		mpol_put(new);
1552c6018b4bSAneesh Kumar K.V 		if (err)
1553c6018b4bSAneesh Kumar K.V 			break;
1554c6018b4bSAneesh Kumar K.V 	}
1555c6018b4bSAneesh Kumar K.V 	mmap_write_unlock(mm);
1556c6018b4bSAneesh Kumar K.V 	return err;
1557c6018b4bSAneesh Kumar K.V }
1558c6018b4bSAneesh Kumar K.V 
SYSCALL_DEFINE6(mbind,unsigned long,start,unsigned long,len,unsigned long,mode,const unsigned long __user *,nmask,unsigned long,maxnode,unsigned int,flags)1559e7dc9ad6SDominik Brodowski SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1560e7dc9ad6SDominik Brodowski 		unsigned long, mode, const unsigned long __user *, nmask,
1561e7dc9ad6SDominik Brodowski 		unsigned long, maxnode, unsigned int, flags)
1562e7dc9ad6SDominik Brodowski {
1563e7dc9ad6SDominik Brodowski 	return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1564e7dc9ad6SDominik Brodowski }
1565e7dc9ad6SDominik Brodowski 
15668bccd85fSChristoph Lameter /* Set the process memory policy */
kernel_set_mempolicy(int mode,const unsigned long __user * nmask,unsigned long maxnode)1567af03c4acSDominik Brodowski static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1568af03c4acSDominik Brodowski 				 unsigned long maxnode)
15698bccd85fSChristoph Lameter {
157095837924SFeng Tang 	unsigned short mode_flags;
15718bccd85fSChristoph Lameter 	nodemask_t nodes;
157295837924SFeng Tang 	int lmode = mode;
157395837924SFeng Tang 	int err;
15748bccd85fSChristoph Lameter 
157595837924SFeng Tang 	err = sanitize_mpol_flags(&lmode, &mode_flags);
157695837924SFeng Tang 	if (err)
157795837924SFeng Tang 		return err;
157895837924SFeng Tang 
15798bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
15808bccd85fSChristoph Lameter 	if (err)
15818bccd85fSChristoph Lameter 		return err;
158295837924SFeng Tang 
158395837924SFeng Tang 	return do_set_mempolicy(lmode, mode_flags, &nodes);
15848bccd85fSChristoph Lameter }
15858bccd85fSChristoph Lameter 
SYSCALL_DEFINE3(set_mempolicy,int,mode,const unsigned long __user *,nmask,unsigned long,maxnode)1586af03c4acSDominik Brodowski SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1587af03c4acSDominik Brodowski 		unsigned long, maxnode)
1588af03c4acSDominik Brodowski {
1589af03c4acSDominik Brodowski 	return kernel_set_mempolicy(mode, nmask, maxnode);
1590af03c4acSDominik Brodowski }
1591af03c4acSDominik Brodowski 
kernel_migrate_pages(pid_t pid,unsigned long maxnode,const unsigned long __user * old_nodes,const unsigned long __user * new_nodes)1592b6e9b0baSDominik Brodowski static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1593b6e9b0baSDominik Brodowski 				const unsigned long __user *old_nodes,
1594b6e9b0baSDominik Brodowski 				const unsigned long __user *new_nodes)
159539743889SChristoph Lameter {
1596596d7cfaSKOSAKI Motohiro 	struct mm_struct *mm = NULL;
159739743889SChristoph Lameter 	struct task_struct *task;
159839743889SChristoph Lameter 	nodemask_t task_nodes;
159939743889SChristoph Lameter 	int err;
1600596d7cfaSKOSAKI Motohiro 	nodemask_t *old;
1601596d7cfaSKOSAKI Motohiro 	nodemask_t *new;
1602596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH(scratch);
160339743889SChristoph Lameter 
1604596d7cfaSKOSAKI Motohiro 	if (!scratch)
1605596d7cfaSKOSAKI Motohiro 		return -ENOMEM;
160639743889SChristoph Lameter 
1607596d7cfaSKOSAKI Motohiro 	old = &scratch->mask1;
1608596d7cfaSKOSAKI Motohiro 	new = &scratch->mask2;
1609596d7cfaSKOSAKI Motohiro 
1610596d7cfaSKOSAKI Motohiro 	err = get_nodes(old, old_nodes, maxnode);
161139743889SChristoph Lameter 	if (err)
1612596d7cfaSKOSAKI Motohiro 		goto out;
1613596d7cfaSKOSAKI Motohiro 
1614596d7cfaSKOSAKI Motohiro 	err = get_nodes(new, new_nodes, maxnode);
1615596d7cfaSKOSAKI Motohiro 	if (err)
1616596d7cfaSKOSAKI Motohiro 		goto out;
161739743889SChristoph Lameter 
161839743889SChristoph Lameter 	/* Find the mm_struct */
161955cfaa3cSZeng Zhaoming 	rcu_read_lock();
1620228ebcbeSPavel Emelyanov 	task = pid ? find_task_by_vpid(pid) : current;
162139743889SChristoph Lameter 	if (!task) {
162255cfaa3cSZeng Zhaoming 		rcu_read_unlock();
1623596d7cfaSKOSAKI Motohiro 		err = -ESRCH;
1624596d7cfaSKOSAKI Motohiro 		goto out;
162539743889SChristoph Lameter 	}
16263268c63eSChristoph Lameter 	get_task_struct(task);
162739743889SChristoph Lameter 
1628596d7cfaSKOSAKI Motohiro 	err = -EINVAL;
162939743889SChristoph Lameter 
163039743889SChristoph Lameter 	/*
163131367466SOtto Ebeling 	 * Check if this process has the right to modify the specified process.
163231367466SOtto Ebeling 	 * Use the regular "ptrace_may_access()" checks.
163339743889SChristoph Lameter 	 */
163431367466SOtto Ebeling 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1635c69e8d9cSDavid Howells 		rcu_read_unlock();
163639743889SChristoph Lameter 		err = -EPERM;
16373268c63eSChristoph Lameter 		goto out_put;
163839743889SChristoph Lameter 	}
1639c69e8d9cSDavid Howells 	rcu_read_unlock();
164039743889SChristoph Lameter 
164139743889SChristoph Lameter 	task_nodes = cpuset_mems_allowed(task);
164239743889SChristoph Lameter 	/* Is the user allowed to access the target nodes? */
1643596d7cfaSKOSAKI Motohiro 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
164439743889SChristoph Lameter 		err = -EPERM;
16453268c63eSChristoph Lameter 		goto out_put;
164639743889SChristoph Lameter 	}
164739743889SChristoph Lameter 
16480486a38bSYisheng Xie 	task_nodes = cpuset_mems_allowed(current);
16490486a38bSYisheng Xie 	nodes_and(*new, *new, task_nodes);
16500486a38bSYisheng Xie 	if (nodes_empty(*new))
16513268c63eSChristoph Lameter 		goto out_put;
16520486a38bSYisheng Xie 
165386c3a764SDavid Quigley 	err = security_task_movememory(task);
165486c3a764SDavid Quigley 	if (err)
16553268c63eSChristoph Lameter 		goto out_put;
165686c3a764SDavid Quigley 
16573268c63eSChristoph Lameter 	mm = get_task_mm(task);
16583268c63eSChristoph Lameter 	put_task_struct(task);
1659f2a9ef88SSasha Levin 
1660f2a9ef88SSasha Levin 	if (!mm) {
1661f2a9ef88SSasha Levin 		err = -EINVAL;
1662f2a9ef88SSasha Levin 		goto out;
1663f2a9ef88SSasha Levin 	}
1664f2a9ef88SSasha Levin 
1665596d7cfaSKOSAKI Motohiro 	err = do_migrate_pages(mm, old, new,
166674c00241SChristoph Lameter 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
16673268c63eSChristoph Lameter 
166839743889SChristoph Lameter 	mmput(mm);
16693268c63eSChristoph Lameter out:
1670596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH_FREE(scratch);
1671596d7cfaSKOSAKI Motohiro 
167239743889SChristoph Lameter 	return err;
16733268c63eSChristoph Lameter 
16743268c63eSChristoph Lameter out_put:
16753268c63eSChristoph Lameter 	put_task_struct(task);
16763268c63eSChristoph Lameter 	goto out;
16773268c63eSChristoph Lameter 
167839743889SChristoph Lameter }
167939743889SChristoph Lameter 
SYSCALL_DEFINE4(migrate_pages,pid_t,pid,unsigned long,maxnode,const unsigned long __user *,old_nodes,const unsigned long __user *,new_nodes)1680b6e9b0baSDominik Brodowski SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1681b6e9b0baSDominik Brodowski 		const unsigned long __user *, old_nodes,
1682b6e9b0baSDominik Brodowski 		const unsigned long __user *, new_nodes)
1683b6e9b0baSDominik Brodowski {
1684b6e9b0baSDominik Brodowski 	return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1685b6e9b0baSDominik Brodowski }
1686b6e9b0baSDominik Brodowski 
168739743889SChristoph Lameter 
16888bccd85fSChristoph Lameter /* Retrieve NUMA policy */
kernel_get_mempolicy(int __user * policy,unsigned long __user * nmask,unsigned long maxnode,unsigned long addr,unsigned long flags)1689af03c4acSDominik Brodowski static int kernel_get_mempolicy(int __user *policy,
1690af03c4acSDominik Brodowski 				unsigned long __user *nmask,
1691af03c4acSDominik Brodowski 				unsigned long maxnode,
1692af03c4acSDominik Brodowski 				unsigned long addr,
1693af03c4acSDominik Brodowski 				unsigned long flags)
16948bccd85fSChristoph Lameter {
1695dbcb0f19SAdrian Bunk 	int err;
16963f649ab7SKees Cook 	int pval;
16978bccd85fSChristoph Lameter 	nodemask_t nodes;
16988bccd85fSChristoph Lameter 
1699050c17f2SRalph Campbell 	if (nmask != NULL && maxnode < nr_node_ids)
17008bccd85fSChristoph Lameter 		return -EINVAL;
17018bccd85fSChristoph Lameter 
17024605f057SWenchao Hao 	addr = untagged_addr(addr);
17034605f057SWenchao Hao 
17048bccd85fSChristoph Lameter 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
17058bccd85fSChristoph Lameter 
17068bccd85fSChristoph Lameter 	if (err)
17078bccd85fSChristoph Lameter 		return err;
17088bccd85fSChristoph Lameter 
17098bccd85fSChristoph Lameter 	if (policy && put_user(pval, policy))
17108bccd85fSChristoph Lameter 		return -EFAULT;
17118bccd85fSChristoph Lameter 
17128bccd85fSChristoph Lameter 	if (nmask)
17138bccd85fSChristoph Lameter 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
17148bccd85fSChristoph Lameter 
17158bccd85fSChristoph Lameter 	return err;
17168bccd85fSChristoph Lameter }
17178bccd85fSChristoph Lameter 
SYSCALL_DEFINE5(get_mempolicy,int __user *,policy,unsigned long __user *,nmask,unsigned long,maxnode,unsigned long,addr,unsigned long,flags)1718af03c4acSDominik Brodowski SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1719af03c4acSDominik Brodowski 		unsigned long __user *, nmask, unsigned long, maxnode,
1720af03c4acSDominik Brodowski 		unsigned long, addr, unsigned long, flags)
1721af03c4acSDominik Brodowski {
1722af03c4acSDominik Brodowski 	return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1723af03c4acSDominik Brodowski }
1724af03c4acSDominik Brodowski 
vma_migratable(struct vm_area_struct * vma)172520ca87f2SLi Xinhai bool vma_migratable(struct vm_area_struct *vma)
172620ca87f2SLi Xinhai {
172720ca87f2SLi Xinhai 	if (vma->vm_flags & (VM_IO | VM_PFNMAP))
172820ca87f2SLi Xinhai 		return false;
172920ca87f2SLi Xinhai 
173020ca87f2SLi Xinhai 	/*
173120ca87f2SLi Xinhai 	 * DAX device mappings require predictable access latency, so avoid
173220ca87f2SLi Xinhai 	 * incurring periodic faults.
173320ca87f2SLi Xinhai 	 */
173420ca87f2SLi Xinhai 	if (vma_is_dax(vma))
173520ca87f2SLi Xinhai 		return false;
173620ca87f2SLi Xinhai 
173720ca87f2SLi Xinhai 	if (is_vm_hugetlb_page(vma) &&
173820ca87f2SLi Xinhai 		!hugepage_migration_supported(hstate_vma(vma)))
173920ca87f2SLi Xinhai 		return false;
174020ca87f2SLi Xinhai 
174120ca87f2SLi Xinhai 	/*
174220ca87f2SLi Xinhai 	 * Migration allocates pages in the highest zone. If we cannot
174320ca87f2SLi Xinhai 	 * do so then migration (at least from node to node) is not
174420ca87f2SLi Xinhai 	 * possible.
174520ca87f2SLi Xinhai 	 */
174620ca87f2SLi Xinhai 	if (vma->vm_file &&
174720ca87f2SLi Xinhai 		gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
174820ca87f2SLi Xinhai 			< policy_zone)
174920ca87f2SLi Xinhai 		return false;
175020ca87f2SLi Xinhai 	return true;
175120ca87f2SLi Xinhai }
175220ca87f2SLi Xinhai 
__get_vma_policy(struct vm_area_struct * vma,unsigned long addr)175374d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
175474d2c3a0SOleg Nesterov 						unsigned long addr)
17551da177e4SLinus Torvalds {
17568d90274bSOleg Nesterov 	struct mempolicy *pol = NULL;
17571da177e4SLinus Torvalds 
17581da177e4SLinus Torvalds 	if (vma) {
1759480eccf9SLee Schermerhorn 		if (vma->vm_ops && vma->vm_ops->get_policy) {
17608d90274bSOleg Nesterov 			pol = vma->vm_ops->get_policy(vma, addr);
176100442ad0SMel Gorman 		} else if (vma->vm_policy) {
17621da177e4SLinus Torvalds 			pol = vma->vm_policy;
176300442ad0SMel Gorman 
176400442ad0SMel Gorman 			/*
176500442ad0SMel Gorman 			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
176600442ad0SMel Gorman 			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
176700442ad0SMel Gorman 			 * count on these policies which will be dropped by
176800442ad0SMel Gorman 			 * mpol_cond_put() later
176900442ad0SMel Gorman 			 */
177000442ad0SMel Gorman 			if (mpol_needs_cond_ref(pol))
177100442ad0SMel Gorman 				mpol_get(pol);
177200442ad0SMel Gorman 		}
17731da177e4SLinus Torvalds 	}
1774f15ca78eSOleg Nesterov 
177574d2c3a0SOleg Nesterov 	return pol;
177674d2c3a0SOleg Nesterov }
177774d2c3a0SOleg Nesterov 
177874d2c3a0SOleg Nesterov /*
1779dd6eecb9SOleg Nesterov  * get_vma_policy(@vma, @addr)
178074d2c3a0SOleg Nesterov  * @vma: virtual memory area whose policy is sought
178174d2c3a0SOleg Nesterov  * @addr: address in @vma for shared policy lookup
178274d2c3a0SOleg Nesterov  *
178374d2c3a0SOleg Nesterov  * Returns effective policy for a VMA at specified address.
1784dd6eecb9SOleg Nesterov  * Falls back to current->mempolicy or system default policy, as necessary.
178574d2c3a0SOleg Nesterov  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
178674d2c3a0SOleg Nesterov  * count--added by the get_policy() vm_op, as appropriate--to protect against
178774d2c3a0SOleg Nesterov  * freeing by another task.  It is the caller's responsibility to free the
178874d2c3a0SOleg Nesterov  * extra reference for shared policies.
178974d2c3a0SOleg Nesterov  */
get_vma_policy(struct vm_area_struct * vma,unsigned long addr)1790ac79f78dSDavid Rientjes static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1791dd6eecb9SOleg Nesterov 						unsigned long addr)
179274d2c3a0SOleg Nesterov {
179374d2c3a0SOleg Nesterov 	struct mempolicy *pol = __get_vma_policy(vma, addr);
179474d2c3a0SOleg Nesterov 
17958d90274bSOleg Nesterov 	if (!pol)
1796dd6eecb9SOleg Nesterov 		pol = get_task_policy(current);
17978d90274bSOleg Nesterov 
17981da177e4SLinus Torvalds 	return pol;
17991da177e4SLinus Torvalds }
18001da177e4SLinus Torvalds 
vma_policy_mof(struct vm_area_struct * vma)18016b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma)
1802fc314724SMel Gorman {
18036b6482bbSOleg Nesterov 	struct mempolicy *pol;
1804f15ca78eSOleg Nesterov 
1805fc314724SMel Gorman 	if (vma->vm_ops && vma->vm_ops->get_policy) {
1806fc314724SMel Gorman 		bool ret = false;
1807fc314724SMel Gorman 
1808fc314724SMel Gorman 		pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1809fc314724SMel Gorman 		if (pol && (pol->flags & MPOL_F_MOF))
1810fc314724SMel Gorman 			ret = true;
1811fc314724SMel Gorman 		mpol_cond_put(pol);
1812fc314724SMel Gorman 
1813fc314724SMel Gorman 		return ret;
18148d90274bSOleg Nesterov 	}
18158d90274bSOleg Nesterov 
1816fc314724SMel Gorman 	pol = vma->vm_policy;
18178d90274bSOleg Nesterov 	if (!pol)
18186b6482bbSOleg Nesterov 		pol = get_task_policy(current);
1819fc314724SMel Gorman 
1820fc314724SMel Gorman 	return pol->flags & MPOL_F_MOF;
1821fc314724SMel Gorman }
1822fc314724SMel Gorman 
apply_policy_zone(struct mempolicy * policy,enum zone_type zone)1823d2226ebdSFeng Tang bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1824d3eb1570SLai Jiangshan {
1825d3eb1570SLai Jiangshan 	enum zone_type dynamic_policy_zone = policy_zone;
1826d3eb1570SLai Jiangshan 
1827d3eb1570SLai Jiangshan 	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1828d3eb1570SLai Jiangshan 
1829d3eb1570SLai Jiangshan 	/*
1830269fbe72SBen Widawsky 	 * if policy->nodes has movable memory only,
1831d3eb1570SLai Jiangshan 	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1832d3eb1570SLai Jiangshan 	 *
1833269fbe72SBen Widawsky 	 * policy->nodes is intersect with node_states[N_MEMORY].
1834f0953a1bSIngo Molnar 	 * so if the following test fails, it implies
1835269fbe72SBen Widawsky 	 * policy->nodes has movable memory only.
1836d3eb1570SLai Jiangshan 	 */
1837269fbe72SBen Widawsky 	if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY]))
1838d3eb1570SLai Jiangshan 		dynamic_policy_zone = ZONE_MOVABLE;
1839d3eb1570SLai Jiangshan 
1840d3eb1570SLai Jiangshan 	return zone >= dynamic_policy_zone;
1841d3eb1570SLai Jiangshan }
1842d3eb1570SLai Jiangshan 
184352cd3b07SLee Schermerhorn /*
184452cd3b07SLee Schermerhorn  * Return a nodemask representing a mempolicy for filtering nodes for
184552cd3b07SLee Schermerhorn  * page allocation
184652cd3b07SLee Schermerhorn  */
policy_nodemask(gfp_t gfp,struct mempolicy * policy)18478ca39e68SMuchun Song nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
184819770b32SMel Gorman {
1849b27abaccSDave Hansen 	int mode = policy->mode;
1850b27abaccSDave Hansen 
185119770b32SMel Gorman 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
1852b27abaccSDave Hansen 	if (unlikely(mode == MPOL_BIND) &&
1853d3eb1570SLai Jiangshan 		apply_policy_zone(policy, gfp_zone(gfp)) &&
1854269fbe72SBen Widawsky 		cpuset_nodemask_valid_mems_allowed(&policy->nodes))
1855269fbe72SBen Widawsky 		return &policy->nodes;
185619770b32SMel Gorman 
1857b27abaccSDave Hansen 	if (mode == MPOL_PREFERRED_MANY)
1858b27abaccSDave Hansen 		return &policy->nodes;
1859b27abaccSDave Hansen 
186019770b32SMel Gorman 	return NULL;
186119770b32SMel Gorman }
186219770b32SMel Gorman 
1863b27abaccSDave Hansen /*
1864b27abaccSDave Hansen  * Return the  preferred node id for 'prefer' mempolicy, and return
1865b27abaccSDave Hansen  * the given id for all other policies.
1866b27abaccSDave Hansen  *
1867b27abaccSDave Hansen  * policy_node() is always coupled with policy_nodemask(), which
1868b27abaccSDave Hansen  * secures the nodemask limit for 'bind' and 'prefer-many' policy.
1869b27abaccSDave Hansen  */
policy_node(gfp_t gfp,struct mempolicy * policy,int nd)1870f8fd5253SWei Yang static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
18711da177e4SLinus Torvalds {
18727858d7bcSFeng Tang 	if (policy->mode == MPOL_PREFERRED) {
1873269fbe72SBen Widawsky 		nd = first_node(policy->nodes);
18747858d7bcSFeng Tang 	} else {
187519770b32SMel Gorman 		/*
18766d840958SMichal Hocko 		 * __GFP_THISNODE shouldn't even be used with the bind policy
18776d840958SMichal Hocko 		 * because we might easily break the expectation to stay on the
18786d840958SMichal Hocko 		 * requested node and not break the policy.
187919770b32SMel Gorman 		 */
18806d840958SMichal Hocko 		WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
18811da177e4SLinus Torvalds 	}
18826d840958SMichal Hocko 
1883c6018b4bSAneesh Kumar K.V 	if ((policy->mode == MPOL_BIND ||
1884c6018b4bSAneesh Kumar K.V 	     policy->mode == MPOL_PREFERRED_MANY) &&
1885c6018b4bSAneesh Kumar K.V 	    policy->home_node != NUMA_NO_NODE)
1886c6018b4bSAneesh Kumar K.V 		return policy->home_node;
1887c6018b4bSAneesh Kumar K.V 
188804ec6264SVlastimil Babka 	return nd;
18891da177e4SLinus Torvalds }
18901da177e4SLinus Torvalds 
18911da177e4SLinus Torvalds /* Do dynamic interleaving for a process */
interleave_nodes(struct mempolicy * policy)18921da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy)
18931da177e4SLinus Torvalds {
189445816682SVlastimil Babka 	unsigned next;
18951da177e4SLinus Torvalds 	struct task_struct *me = current;
18961da177e4SLinus Torvalds 
1897269fbe72SBen Widawsky 	next = next_node_in(me->il_prev, policy->nodes);
1898f5b087b5SDavid Rientjes 	if (next < MAX_NUMNODES)
189945816682SVlastimil Babka 		me->il_prev = next;
190045816682SVlastimil Babka 	return next;
19011da177e4SLinus Torvalds }
19021da177e4SLinus Torvalds 
1903dc85da15SChristoph Lameter /*
1904dc85da15SChristoph Lameter  * Depending on the memory policy provide a node from which to allocate the
1905dc85da15SChristoph Lameter  * next slab entry.
1906dc85da15SChristoph Lameter  */
mempolicy_slab_node(void)19072a389610SDavid Rientjes unsigned int mempolicy_slab_node(void)
1908dc85da15SChristoph Lameter {
1909e7b691b0SAndi Kleen 	struct mempolicy *policy;
19102a389610SDavid Rientjes 	int node = numa_mem_id();
1911e7b691b0SAndi Kleen 
191238b031ddSVasily Averin 	if (!in_task())
19132a389610SDavid Rientjes 		return node;
1914e7b691b0SAndi Kleen 
1915e7b691b0SAndi Kleen 	policy = current->mempolicy;
19167858d7bcSFeng Tang 	if (!policy)
19172a389610SDavid Rientjes 		return node;
1918765c4507SChristoph Lameter 
1919bea904d5SLee Schermerhorn 	switch (policy->mode) {
1920bea904d5SLee Schermerhorn 	case MPOL_PREFERRED:
1921269fbe72SBen Widawsky 		return first_node(policy->nodes);
1922bea904d5SLee Schermerhorn 
1923dc85da15SChristoph Lameter 	case MPOL_INTERLEAVE:
1924dc85da15SChristoph Lameter 		return interleave_nodes(policy);
1925dc85da15SChristoph Lameter 
1926b27abaccSDave Hansen 	case MPOL_BIND:
1927b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
1928b27abaccSDave Hansen 	{
1929c33d6c06SMel Gorman 		struct zoneref *z;
1930c33d6c06SMel Gorman 
1931dc85da15SChristoph Lameter 		/*
1932dc85da15SChristoph Lameter 		 * Follow bind policy behavior and start allocation at the
1933dc85da15SChristoph Lameter 		 * first node.
1934dc85da15SChristoph Lameter 		 */
193519770b32SMel Gorman 		struct zonelist *zonelist;
193619770b32SMel Gorman 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1937c9634cf0SAneesh Kumar K.V 		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1938c33d6c06SMel Gorman 		z = first_zones_zonelist(zonelist, highest_zoneidx,
1939269fbe72SBen Widawsky 							&policy->nodes);
1940c1093b74SPavel Tatashin 		return z->zone ? zone_to_nid(z->zone) : node;
1941dd1a239fSMel Gorman 	}
19427858d7bcSFeng Tang 	case MPOL_LOCAL:
19437858d7bcSFeng Tang 		return node;
1944dc85da15SChristoph Lameter 
1945dc85da15SChristoph Lameter 	default:
1946bea904d5SLee Schermerhorn 		BUG();
1947dc85da15SChristoph Lameter 	}
1948dc85da15SChristoph Lameter }
1949dc85da15SChristoph Lameter 
1950fee83b3aSAndrew Morton /*
1951fee83b3aSAndrew Morton  * Do static interleaving for a VMA with known offset @n.  Returns the n'th
1952269fbe72SBen Widawsky  * node in pol->nodes (starting from n=0), wrapping around if n exceeds the
1953fee83b3aSAndrew Morton  * number of present nodes.
1954fee83b3aSAndrew Morton  */
offset_il_node(struct mempolicy * pol,unsigned long n)195598c70baaSLaurent Dufour static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
19561da177e4SLinus Torvalds {
1957276aeee1Syanghui 	nodemask_t nodemask = pol->nodes;
1958276aeee1Syanghui 	unsigned int target, nnodes;
1959fee83b3aSAndrew Morton 	int i;
1960fee83b3aSAndrew Morton 	int nid;
1961276aeee1Syanghui 	/*
1962276aeee1Syanghui 	 * The barrier will stabilize the nodemask in a register or on
1963276aeee1Syanghui 	 * the stack so that it will stop changing under the code.
1964276aeee1Syanghui 	 *
1965276aeee1Syanghui 	 * Between first_node() and next_node(), pol->nodes could be changed
1966276aeee1Syanghui 	 * by other threads. So we put pol->nodes in a local stack.
1967276aeee1Syanghui 	 */
1968276aeee1Syanghui 	barrier();
19691da177e4SLinus Torvalds 
1970276aeee1Syanghui 	nnodes = nodes_weight(nodemask);
1971f5b087b5SDavid Rientjes 	if (!nnodes)
1972f5b087b5SDavid Rientjes 		return numa_node_id();
1973fee83b3aSAndrew Morton 	target = (unsigned int)n % nnodes;
1974276aeee1Syanghui 	nid = first_node(nodemask);
1975fee83b3aSAndrew Morton 	for (i = 0; i < target; i++)
1976276aeee1Syanghui 		nid = next_node(nid, nodemask);
19771da177e4SLinus Torvalds 	return nid;
19781da177e4SLinus Torvalds }
19791da177e4SLinus Torvalds 
19805da7ca86SChristoph Lameter /* Determine a node number for interleave */
interleave_nid(struct mempolicy * pol,struct vm_area_struct * vma,unsigned long addr,int shift)19815da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol,
19825da7ca86SChristoph Lameter 		 struct vm_area_struct *vma, unsigned long addr, int shift)
19835da7ca86SChristoph Lameter {
19845da7ca86SChristoph Lameter 	if (vma) {
19855da7ca86SChristoph Lameter 		unsigned long off;
19865da7ca86SChristoph Lameter 
19873b98b087SNishanth Aravamudan 		/*
19883b98b087SNishanth Aravamudan 		 * for small pages, there is no difference between
19893b98b087SNishanth Aravamudan 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
19903b98b087SNishanth Aravamudan 		 * for huge pages, since vm_pgoff is in units of small
19913b98b087SNishanth Aravamudan 		 * pages, we need to shift off the always 0 bits to get
19923b98b087SNishanth Aravamudan 		 * a useful offset.
19933b98b087SNishanth Aravamudan 		 */
19943b98b087SNishanth Aravamudan 		BUG_ON(shift < PAGE_SHIFT);
19953b98b087SNishanth Aravamudan 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
19965da7ca86SChristoph Lameter 		off += (addr - vma->vm_start) >> shift;
199798c70baaSLaurent Dufour 		return offset_il_node(pol, off);
19985da7ca86SChristoph Lameter 	} else
19995da7ca86SChristoph Lameter 		return interleave_nodes(pol);
20005da7ca86SChristoph Lameter }
20015da7ca86SChristoph Lameter 
200200ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS
2003480eccf9SLee Schermerhorn /*
200404ec6264SVlastimil Babka  * huge_node(@vma, @addr, @gfp_flags, @mpol)
2005b46e14acSFabian Frederick  * @vma: virtual memory area whose policy is sought
2006b46e14acSFabian Frederick  * @addr: address in @vma for shared policy lookup and interleave policy
2007b46e14acSFabian Frederick  * @gfp_flags: for requested zone
2008b46e14acSFabian Frederick  * @mpol: pointer to mempolicy pointer for reference counted mempolicy
2009b27abaccSDave Hansen  * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy
2010480eccf9SLee Schermerhorn  *
201104ec6264SVlastimil Babka  * Returns a nid suitable for a huge page allocation and a pointer
201252cd3b07SLee Schermerhorn  * to the struct mempolicy for conditional unref after allocation.
2013b27abaccSDave Hansen  * If the effective policy is 'bind' or 'prefer-many', returns a pointer
2014b27abaccSDave Hansen  * to the mempolicy's @nodemask for filtering the zonelist.
2015c0ff7453SMiao Xie  *
2016d26914d1SMel Gorman  * Must be protected by read_mems_allowed_begin()
2017480eccf9SLee Schermerhorn  */
huge_node(struct vm_area_struct * vma,unsigned long addr,gfp_t gfp_flags,struct mempolicy ** mpol,nodemask_t ** nodemask)201804ec6264SVlastimil Babka int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
201904ec6264SVlastimil Babka 				struct mempolicy **mpol, nodemask_t **nodemask)
20205da7ca86SChristoph Lameter {
202104ec6264SVlastimil Babka 	int nid;
2022b27abaccSDave Hansen 	int mode;
20235da7ca86SChristoph Lameter 
2024dd6eecb9SOleg Nesterov 	*mpol = get_vma_policy(vma, addr);
2025b27abaccSDave Hansen 	*nodemask = NULL;
2026b27abaccSDave Hansen 	mode = (*mpol)->mode;
20275da7ca86SChristoph Lameter 
2028b27abaccSDave Hansen 	if (unlikely(mode == MPOL_INTERLEAVE)) {
202904ec6264SVlastimil Babka 		nid = interleave_nid(*mpol, vma, addr,
203004ec6264SVlastimil Babka 					huge_page_shift(hstate_vma(vma)));
203152cd3b07SLee Schermerhorn 	} else {
203204ec6264SVlastimil Babka 		nid = policy_node(gfp_flags, *mpol, numa_node_id());
2033b27abaccSDave Hansen 		if (mode == MPOL_BIND || mode == MPOL_PREFERRED_MANY)
2034269fbe72SBen Widawsky 			*nodemask = &(*mpol)->nodes;
2035480eccf9SLee Schermerhorn 	}
203604ec6264SVlastimil Babka 	return nid;
20375da7ca86SChristoph Lameter }
203806808b08SLee Schermerhorn 
203906808b08SLee Schermerhorn /*
204006808b08SLee Schermerhorn  * init_nodemask_of_mempolicy
204106808b08SLee Schermerhorn  *
204206808b08SLee Schermerhorn  * If the current task's mempolicy is "default" [NULL], return 'false'
204306808b08SLee Schermerhorn  * to indicate default policy.  Otherwise, extract the policy nodemask
204406808b08SLee Schermerhorn  * for 'bind' or 'interleave' policy into the argument nodemask, or
204506808b08SLee Schermerhorn  * initialize the argument nodemask to contain the single node for
204606808b08SLee Schermerhorn  * 'preferred' or 'local' policy and return 'true' to indicate presence
204706808b08SLee Schermerhorn  * of non-default mempolicy.
204806808b08SLee Schermerhorn  *
204906808b08SLee Schermerhorn  * We don't bother with reference counting the mempolicy [mpol_get/put]
205006808b08SLee Schermerhorn  * because the current task is examining it's own mempolicy and a task's
205106808b08SLee Schermerhorn  * mempolicy is only ever changed by the task itself.
205206808b08SLee Schermerhorn  *
205306808b08SLee Schermerhorn  * N.B., it is the caller's responsibility to free a returned nodemask.
205406808b08SLee Schermerhorn  */
init_nodemask_of_mempolicy(nodemask_t * mask)205506808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask)
205606808b08SLee Schermerhorn {
205706808b08SLee Schermerhorn 	struct mempolicy *mempolicy;
205806808b08SLee Schermerhorn 
205906808b08SLee Schermerhorn 	if (!(mask && current->mempolicy))
206006808b08SLee Schermerhorn 		return false;
206106808b08SLee Schermerhorn 
2062c0ff7453SMiao Xie 	task_lock(current);
206306808b08SLee Schermerhorn 	mempolicy = current->mempolicy;
206406808b08SLee Schermerhorn 	switch (mempolicy->mode) {
206506808b08SLee Schermerhorn 	case MPOL_PREFERRED:
2066b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
206706808b08SLee Schermerhorn 	case MPOL_BIND:
206806808b08SLee Schermerhorn 	case MPOL_INTERLEAVE:
2069269fbe72SBen Widawsky 		*mask = mempolicy->nodes;
207006808b08SLee Schermerhorn 		break;
207106808b08SLee Schermerhorn 
20727858d7bcSFeng Tang 	case MPOL_LOCAL:
2073269fbe72SBen Widawsky 		init_nodemask_of_node(mask, numa_node_id());
20747858d7bcSFeng Tang 		break;
20757858d7bcSFeng Tang 
207606808b08SLee Schermerhorn 	default:
207706808b08SLee Schermerhorn 		BUG();
207806808b08SLee Schermerhorn 	}
2079c0ff7453SMiao Xie 	task_unlock(current);
208006808b08SLee Schermerhorn 
208106808b08SLee Schermerhorn 	return true;
208206808b08SLee Schermerhorn }
208300ac59adSChen, Kenneth W #endif
20845da7ca86SChristoph Lameter 
20856f48d0ebSDavid Rientjes /*
2086b26e517aSFeng Tang  * mempolicy_in_oom_domain
20876f48d0ebSDavid Rientjes  *
2088b26e517aSFeng Tang  * If tsk's mempolicy is "bind", check for intersection between mask and
2089b26e517aSFeng Tang  * the policy nodemask. Otherwise, return true for all other policies
2090b26e517aSFeng Tang  * including "interleave", as a tsk with "interleave" policy may have
2091b26e517aSFeng Tang  * memory allocated from all nodes in system.
20926f48d0ebSDavid Rientjes  *
20936f48d0ebSDavid Rientjes  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
20946f48d0ebSDavid Rientjes  */
mempolicy_in_oom_domain(struct task_struct * tsk,const nodemask_t * mask)2095b26e517aSFeng Tang bool mempolicy_in_oom_domain(struct task_struct *tsk,
20966f48d0ebSDavid Rientjes 					const nodemask_t *mask)
20976f48d0ebSDavid Rientjes {
20986f48d0ebSDavid Rientjes 	struct mempolicy *mempolicy;
20996f48d0ebSDavid Rientjes 	bool ret = true;
21006f48d0ebSDavid Rientjes 
21016f48d0ebSDavid Rientjes 	if (!mask)
21026f48d0ebSDavid Rientjes 		return ret;
2103b26e517aSFeng Tang 
21046f48d0ebSDavid Rientjes 	task_lock(tsk);
21056f48d0ebSDavid Rientjes 	mempolicy = tsk->mempolicy;
2106b26e517aSFeng Tang 	if (mempolicy && mempolicy->mode == MPOL_BIND)
2107269fbe72SBen Widawsky 		ret = nodes_intersects(mempolicy->nodes, *mask);
21086f48d0ebSDavid Rientjes 	task_unlock(tsk);
2109b26e517aSFeng Tang 
21106f48d0ebSDavid Rientjes 	return ret;
21116f48d0ebSDavid Rientjes }
21126f48d0ebSDavid Rientjes 
21131da177e4SLinus Torvalds /* Allocate a page in interleaved policy.
21141da177e4SLinus Torvalds    Own path because it needs to do special accounting. */
alloc_page_interleave(gfp_t gfp,unsigned order,unsigned nid)2115662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2116662f3a0bSAndi Kleen 					unsigned nid)
21171da177e4SLinus Torvalds {
21181da177e4SLinus Torvalds 	struct page *page;
21191da177e4SLinus Torvalds 
212084172f4bSMatthew Wilcox (Oracle) 	page = __alloc_pages(gfp, order, nid, NULL);
21214518085eSKemi Wang 	/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
21224518085eSKemi Wang 	if (!static_branch_likely(&vm_numa_stat_key))
21234518085eSKemi Wang 		return page;
2124de55c8b2SAndrey Ryabinin 	if (page && page_to_nid(page) == nid) {
2125de55c8b2SAndrey Ryabinin 		preempt_disable();
2126f19298b9SMel Gorman 		__count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT);
2127de55c8b2SAndrey Ryabinin 		preempt_enable();
2128de55c8b2SAndrey Ryabinin 	}
21291da177e4SLinus Torvalds 	return page;
21301da177e4SLinus Torvalds }
21311da177e4SLinus Torvalds 
alloc_pages_preferred_many(gfp_t gfp,unsigned int order,int nid,struct mempolicy * pol)21324c54d949SFeng Tang static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
21334c54d949SFeng Tang 						int nid, struct mempolicy *pol)
21344c54d949SFeng Tang {
21354c54d949SFeng Tang 	struct page *page;
21364c54d949SFeng Tang 	gfp_t preferred_gfp;
21374c54d949SFeng Tang 
21384c54d949SFeng Tang 	/*
21394c54d949SFeng Tang 	 * This is a two pass approach. The first pass will only try the
21404c54d949SFeng Tang 	 * preferred nodes but skip the direct reclaim and allow the
21414c54d949SFeng Tang 	 * allocation to fail, while the second pass will try all the
21424c54d949SFeng Tang 	 * nodes in system.
21434c54d949SFeng Tang 	 */
21444c54d949SFeng Tang 	preferred_gfp = gfp | __GFP_NOWARN;
21454c54d949SFeng Tang 	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
21464c54d949SFeng Tang 	page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes);
21474c54d949SFeng Tang 	if (!page)
2148c0455116SAneesh Kumar K.V 		page = __alloc_pages(gfp, order, nid, NULL);
21494c54d949SFeng Tang 
21504c54d949SFeng Tang 	return page;
21514c54d949SFeng Tang }
21524c54d949SFeng Tang 
21531da177e4SLinus Torvalds /**
2154adf88aa8SMatthew Wilcox (Oracle)  * vma_alloc_folio - Allocate a folio for a VMA.
2155eb350739SMatthew Wilcox (Oracle)  * @gfp: GFP flags.
2156adf88aa8SMatthew Wilcox (Oracle)  * @order: Order of the folio.
21571da177e4SLinus Torvalds  * @vma: Pointer to VMA or NULL if not available.
2158eb350739SMatthew Wilcox (Oracle)  * @addr: Virtual address of the allocation.  Must be inside @vma.
2159eb350739SMatthew Wilcox (Oracle)  * @hugepage: For hugepages try only the preferred node if possible.
21601da177e4SLinus Torvalds  *
2161adf88aa8SMatthew Wilcox (Oracle)  * Allocate a folio for a specific address in @vma, using the appropriate
2162eb350739SMatthew Wilcox (Oracle)  * NUMA policy.  When @vma is not NULL the caller must hold the mmap_lock
2163eb350739SMatthew Wilcox (Oracle)  * of the mm_struct of the VMA to prevent it from going away.  Should be
2164adf88aa8SMatthew Wilcox (Oracle)  * used for all allocations for folios that will be mapped into user space.
2165eb350739SMatthew Wilcox (Oracle)  *
2166adf88aa8SMatthew Wilcox (Oracle)  * Return: The folio on success or NULL if allocation fails.
21671da177e4SLinus Torvalds  */
vma_alloc_folio(gfp_t gfp,int order,struct vm_area_struct * vma,unsigned long addr,bool hugepage)2168adf88aa8SMatthew Wilcox (Oracle) struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
2169be1a13ebSMichal Hocko 		unsigned long addr, bool hugepage)
21701da177e4SLinus Torvalds {
2171cc9a6c87SMel Gorman 	struct mempolicy *pol;
2172be1a13ebSMichal Hocko 	int node = numa_node_id();
2173adf88aa8SMatthew Wilcox (Oracle) 	struct folio *folio;
217404ec6264SVlastimil Babka 	int preferred_nid;
2175be97a41bSVlastimil Babka 	nodemask_t *nmask;
21761da177e4SLinus Torvalds 
2177dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2178cc9a6c87SMel Gorman 
2179be97a41bSVlastimil Babka 	if (pol->mode == MPOL_INTERLEAVE) {
2180adf88aa8SMatthew Wilcox (Oracle) 		struct page *page;
21811da177e4SLinus Torvalds 		unsigned nid;
21825da7ca86SChristoph Lameter 
21838eac563cSAndi Kleen 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
218452cd3b07SLee Schermerhorn 		mpol_cond_put(pol);
2185adf88aa8SMatthew Wilcox (Oracle) 		gfp |= __GFP_COMP;
21860bbbc0b3SAndrea Arcangeli 		page = alloc_page_interleave(gfp, order, nid);
2187bc899023SHugh Dickins 		return page_rmappable_folio(page);
21881da177e4SLinus Torvalds 	}
21891da177e4SLinus Torvalds 
21904c54d949SFeng Tang 	if (pol->mode == MPOL_PREFERRED_MANY) {
2191adf88aa8SMatthew Wilcox (Oracle) 		struct page *page;
2192adf88aa8SMatthew Wilcox (Oracle) 
2193c0455116SAneesh Kumar K.V 		node = policy_node(gfp, pol, node);
2194adf88aa8SMatthew Wilcox (Oracle) 		gfp |= __GFP_COMP;
21954c54d949SFeng Tang 		page = alloc_pages_preferred_many(gfp, order, node, pol);
21964c54d949SFeng Tang 		mpol_cond_put(pol);
2197bc899023SHugh Dickins 		return page_rmappable_folio(page);
21984c54d949SFeng Tang 	}
21994c54d949SFeng Tang 
220019deb769SDavid Rientjes 	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
220119deb769SDavid Rientjes 		int hpage_node = node;
220219deb769SDavid Rientjes 
220319deb769SDavid Rientjes 		/*
220419deb769SDavid Rientjes 		 * For hugepage allocation and non-interleave policy which
220519deb769SDavid Rientjes 		 * allows the current node (or other explicitly preferred
220619deb769SDavid Rientjes 		 * node) we only try to allocate from the current/preferred
220719deb769SDavid Rientjes 		 * node and don't fall back to other nodes, as the cost of
220819deb769SDavid Rientjes 		 * remote accesses would likely offset THP benefits.
220919deb769SDavid Rientjes 		 *
2210b27abaccSDave Hansen 		 * If the policy is interleave or does not allow the current
221119deb769SDavid Rientjes 		 * node in its nodemask, we allocate the standard way.
221219deb769SDavid Rientjes 		 */
22137858d7bcSFeng Tang 		if (pol->mode == MPOL_PREFERRED)
2214269fbe72SBen Widawsky 			hpage_node = first_node(pol->nodes);
221519deb769SDavid Rientjes 
221619deb769SDavid Rientjes 		nmask = policy_nodemask(gfp, pol);
221719deb769SDavid Rientjes 		if (!nmask || node_isset(hpage_node, *nmask)) {
221819deb769SDavid Rientjes 			mpol_cond_put(pol);
2219cc638f32SVlastimil Babka 			/*
2220cc638f32SVlastimil Babka 			 * First, try to allocate THP only on local node, but
2221cc638f32SVlastimil Babka 			 * don't reclaim unnecessarily, just compact.
2222cc638f32SVlastimil Babka 			 */
2223adf88aa8SMatthew Wilcox (Oracle) 			folio = __folio_alloc_node(gfp | __GFP_THISNODE |
2224adf88aa8SMatthew Wilcox (Oracle) 					__GFP_NORETRY, order, hpage_node);
222576e654ccSDavid Rientjes 
222676e654ccSDavid Rientjes 			/*
222776e654ccSDavid Rientjes 			 * If hugepage allocations are configured to always
222876e654ccSDavid Rientjes 			 * synchronous compact or the vma has been madvised
222976e654ccSDavid Rientjes 			 * to prefer hugepage backing, retry allowing remote
2230cc638f32SVlastimil Babka 			 * memory with both reclaim and compact as well.
223176e654ccSDavid Rientjes 			 */
2232adf88aa8SMatthew Wilcox (Oracle) 			if (!folio && (gfp & __GFP_DIRECT_RECLAIM))
2233adf88aa8SMatthew Wilcox (Oracle) 				folio = __folio_alloc(gfp, order, hpage_node,
2234adf88aa8SMatthew Wilcox (Oracle) 						      nmask);
223576e654ccSDavid Rientjes 
223619deb769SDavid Rientjes 			goto out;
223719deb769SDavid Rientjes 		}
223819deb769SDavid Rientjes 	}
223919deb769SDavid Rientjes 
2240077fcf11SAneesh Kumar K.V 	nmask = policy_nodemask(gfp, pol);
224104ec6264SVlastimil Babka 	preferred_nid = policy_node(gfp, pol, node);
2242adf88aa8SMatthew Wilcox (Oracle) 	folio = __folio_alloc(gfp, order, preferred_nid, nmask);
2243d51e9894SVlastimil Babka 	mpol_cond_put(pol);
2244be97a41bSVlastimil Babka out:
2245f584b680SMatthew Wilcox (Oracle) 	return folio;
2246f584b680SMatthew Wilcox (Oracle) }
2247adf88aa8SMatthew Wilcox (Oracle) EXPORT_SYMBOL(vma_alloc_folio);
2248f584b680SMatthew Wilcox (Oracle) 
22491da177e4SLinus Torvalds /**
2250d7f946d0SMatthew Wilcox (Oracle)  * alloc_pages - Allocate pages.
22516421ec76SMatthew Wilcox (Oracle)  * @gfp: GFP flags.
22526421ec76SMatthew Wilcox (Oracle)  * @order: Power of two of number of pages to allocate.
22531da177e4SLinus Torvalds  *
22546421ec76SMatthew Wilcox (Oracle)  * Allocate 1 << @order contiguous pages.  The physical address of the
22556421ec76SMatthew Wilcox (Oracle)  * first page is naturally aligned (eg an order-3 allocation will be aligned
22566421ec76SMatthew Wilcox (Oracle)  * to a multiple of 8 * PAGE_SIZE bytes).  The NUMA policy of the current
22576421ec76SMatthew Wilcox (Oracle)  * process is honoured when in process context.
22581da177e4SLinus Torvalds  *
22596421ec76SMatthew Wilcox (Oracle)  * Context: Can be called from any context, providing the appropriate GFP
22606421ec76SMatthew Wilcox (Oracle)  * flags are used.
22616421ec76SMatthew Wilcox (Oracle)  * Return: The page on success or NULL if allocation fails.
22621da177e4SLinus Torvalds  */
alloc_pages(gfp_t gfp,unsigned order)2263d7f946d0SMatthew Wilcox (Oracle) struct page *alloc_pages(gfp_t gfp, unsigned order)
22641da177e4SLinus Torvalds {
22658d90274bSOleg Nesterov 	struct mempolicy *pol = &default_policy;
2266c0ff7453SMiao Xie 	struct page *page;
22671da177e4SLinus Torvalds 
22688d90274bSOleg Nesterov 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
22698d90274bSOleg Nesterov 		pol = get_task_policy(current);
227052cd3b07SLee Schermerhorn 
227152cd3b07SLee Schermerhorn 	/*
227252cd3b07SLee Schermerhorn 	 * No reference counting needed for current->mempolicy
227352cd3b07SLee Schermerhorn 	 * nor system default_policy
227452cd3b07SLee Schermerhorn 	 */
227545c4745aSLee Schermerhorn 	if (pol->mode == MPOL_INTERLEAVE)
2276c0ff7453SMiao Xie 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
22774c54d949SFeng Tang 	else if (pol->mode == MPOL_PREFERRED_MANY)
22784c54d949SFeng Tang 		page = alloc_pages_preferred_many(gfp, order,
2279c0455116SAneesh Kumar K.V 				  policy_node(gfp, pol, numa_node_id()), pol);
2280c0ff7453SMiao Xie 	else
228184172f4bSMatthew Wilcox (Oracle) 		page = __alloc_pages(gfp, order,
228204ec6264SVlastimil Babka 				policy_node(gfp, pol, numa_node_id()),
22835c4b4be3SAndi Kleen 				policy_nodemask(gfp, pol));
2284cc9a6c87SMel Gorman 
2285c0ff7453SMiao Xie 	return page;
22861da177e4SLinus Torvalds }
2287d7f946d0SMatthew Wilcox (Oracle) EXPORT_SYMBOL(alloc_pages);
22881da177e4SLinus Torvalds 
folio_alloc(gfp_t gfp,unsigned order)2289cc09cb13SMatthew Wilcox (Oracle) struct folio *folio_alloc(gfp_t gfp, unsigned order)
2290cc09cb13SMatthew Wilcox (Oracle) {
2291bc899023SHugh Dickins 	return page_rmappable_folio(alloc_pages(gfp | __GFP_COMP, order));
2292cc09cb13SMatthew Wilcox (Oracle) }
2293cc09cb13SMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_alloc);
2294cc09cb13SMatthew Wilcox (Oracle) 
alloc_pages_bulk_array_interleave(gfp_t gfp,struct mempolicy * pol,unsigned long nr_pages,struct page ** page_array)2295c00b6b96SChen Wandun static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
2296c00b6b96SChen Wandun 		struct mempolicy *pol, unsigned long nr_pages,
2297c00b6b96SChen Wandun 		struct page **page_array)
2298c00b6b96SChen Wandun {
2299c00b6b96SChen Wandun 	int nodes;
2300c00b6b96SChen Wandun 	unsigned long nr_pages_per_node;
2301c00b6b96SChen Wandun 	int delta;
2302c00b6b96SChen Wandun 	int i;
2303c00b6b96SChen Wandun 	unsigned long nr_allocated;
2304c00b6b96SChen Wandun 	unsigned long total_allocated = 0;
2305c00b6b96SChen Wandun 
2306c00b6b96SChen Wandun 	nodes = nodes_weight(pol->nodes);
2307c00b6b96SChen Wandun 	nr_pages_per_node = nr_pages / nodes;
2308c00b6b96SChen Wandun 	delta = nr_pages - nodes * nr_pages_per_node;
2309c00b6b96SChen Wandun 
2310c00b6b96SChen Wandun 	for (i = 0; i < nodes; i++) {
2311c00b6b96SChen Wandun 		if (delta) {
2312c00b6b96SChen Wandun 			nr_allocated = __alloc_pages_bulk(gfp,
2313c00b6b96SChen Wandun 					interleave_nodes(pol), NULL,
2314c00b6b96SChen Wandun 					nr_pages_per_node + 1, NULL,
2315c00b6b96SChen Wandun 					page_array);
2316c00b6b96SChen Wandun 			delta--;
2317c00b6b96SChen Wandun 		} else {
2318c00b6b96SChen Wandun 			nr_allocated = __alloc_pages_bulk(gfp,
2319c00b6b96SChen Wandun 					interleave_nodes(pol), NULL,
2320c00b6b96SChen Wandun 					nr_pages_per_node, NULL, page_array);
2321c00b6b96SChen Wandun 		}
2322c00b6b96SChen Wandun 
2323c00b6b96SChen Wandun 		page_array += nr_allocated;
2324c00b6b96SChen Wandun 		total_allocated += nr_allocated;
2325c00b6b96SChen Wandun 	}
2326c00b6b96SChen Wandun 
2327c00b6b96SChen Wandun 	return total_allocated;
2328c00b6b96SChen Wandun }
2329c00b6b96SChen Wandun 
alloc_pages_bulk_array_preferred_many(gfp_t gfp,int nid,struct mempolicy * pol,unsigned long nr_pages,struct page ** page_array)2330c00b6b96SChen Wandun static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
2331c00b6b96SChen Wandun 		struct mempolicy *pol, unsigned long nr_pages,
2332c00b6b96SChen Wandun 		struct page **page_array)
2333c00b6b96SChen Wandun {
2334c00b6b96SChen Wandun 	gfp_t preferred_gfp;
2335c00b6b96SChen Wandun 	unsigned long nr_allocated = 0;
2336c00b6b96SChen Wandun 
2337c00b6b96SChen Wandun 	preferred_gfp = gfp | __GFP_NOWARN;
2338c00b6b96SChen Wandun 	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2339c00b6b96SChen Wandun 
2340c00b6b96SChen Wandun 	nr_allocated  = __alloc_pages_bulk(preferred_gfp, nid, &pol->nodes,
2341c00b6b96SChen Wandun 					   nr_pages, NULL, page_array);
2342c00b6b96SChen Wandun 
2343c00b6b96SChen Wandun 	if (nr_allocated < nr_pages)
2344c00b6b96SChen Wandun 		nr_allocated += __alloc_pages_bulk(gfp, numa_node_id(), NULL,
2345c00b6b96SChen Wandun 				nr_pages - nr_allocated, NULL,
2346c00b6b96SChen Wandun 				page_array + nr_allocated);
2347c00b6b96SChen Wandun 	return nr_allocated;
2348c00b6b96SChen Wandun }
2349c00b6b96SChen Wandun 
2350c00b6b96SChen Wandun /* alloc pages bulk and mempolicy should be considered at the
2351c00b6b96SChen Wandun  * same time in some situation such as vmalloc.
2352c00b6b96SChen Wandun  *
2353c00b6b96SChen Wandun  * It can accelerate memory allocation especially interleaving
2354c00b6b96SChen Wandun  * allocate memory.
2355c00b6b96SChen Wandun  */
alloc_pages_bulk_array_mempolicy(gfp_t gfp,unsigned long nr_pages,struct page ** page_array)2356c00b6b96SChen Wandun unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
2357c00b6b96SChen Wandun 		unsigned long nr_pages, struct page **page_array)
2358c00b6b96SChen Wandun {
2359c00b6b96SChen Wandun 	struct mempolicy *pol = &default_policy;
2360c00b6b96SChen Wandun 
2361c00b6b96SChen Wandun 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2362c00b6b96SChen Wandun 		pol = get_task_policy(current);
2363c00b6b96SChen Wandun 
2364c00b6b96SChen Wandun 	if (pol->mode == MPOL_INTERLEAVE)
2365c00b6b96SChen Wandun 		return alloc_pages_bulk_array_interleave(gfp, pol,
2366c00b6b96SChen Wandun 							 nr_pages, page_array);
2367c00b6b96SChen Wandun 
2368c00b6b96SChen Wandun 	if (pol->mode == MPOL_PREFERRED_MANY)
2369c00b6b96SChen Wandun 		return alloc_pages_bulk_array_preferred_many(gfp,
2370c00b6b96SChen Wandun 				numa_node_id(), pol, nr_pages, page_array);
2371c00b6b96SChen Wandun 
2372c00b6b96SChen Wandun 	return __alloc_pages_bulk(gfp, policy_node(gfp, pol, numa_node_id()),
2373c00b6b96SChen Wandun 				  policy_nodemask(gfp, pol), nr_pages, NULL,
2374c00b6b96SChen Wandun 				  page_array);
2375c00b6b96SChen Wandun }
2376c00b6b96SChen Wandun 
vma_dup_policy(struct vm_area_struct * src,struct vm_area_struct * dst)2377ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2378ef0855d3SOleg Nesterov {
2379ef0855d3SOleg Nesterov 	struct mempolicy *pol = mpol_dup(vma_policy(src));
2380ef0855d3SOleg Nesterov 
2381ef0855d3SOleg Nesterov 	if (IS_ERR(pol))
2382ef0855d3SOleg Nesterov 		return PTR_ERR(pol);
2383ef0855d3SOleg Nesterov 	dst->vm_policy = pol;
2384ef0855d3SOleg Nesterov 	return 0;
2385ef0855d3SOleg Nesterov }
2386ef0855d3SOleg Nesterov 
23874225399aSPaul Jackson /*
2388846a16bfSLee Schermerhorn  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
23894225399aSPaul Jackson  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
23904225399aSPaul Jackson  * with the mems_allowed returned by cpuset_mems_allowed().  This
23914225399aSPaul Jackson  * keeps mempolicies cpuset relative after its cpuset moves.  See
23924225399aSPaul Jackson  * further kernel/cpuset.c update_nodemask().
2393708c1bbcSMiao Xie  *
2394708c1bbcSMiao Xie  * current's mempolicy may be rebinded by the other task(the task that changes
2395708c1bbcSMiao Xie  * cpuset's mems), so we needn't do rebind work for current task.
23964225399aSPaul Jackson  */
23974225399aSPaul Jackson 
2398846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */
__mpol_dup(struct mempolicy * old)2399846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old)
24001da177e4SLinus Torvalds {
24011da177e4SLinus Torvalds 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
24021da177e4SLinus Torvalds 
24031da177e4SLinus Torvalds 	if (!new)
24041da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2405708c1bbcSMiao Xie 
2406708c1bbcSMiao Xie 	/* task's mempolicy is protected by alloc_lock */
2407708c1bbcSMiao Xie 	if (old == current->mempolicy) {
2408708c1bbcSMiao Xie 		task_lock(current);
2409708c1bbcSMiao Xie 		*new = *old;
2410708c1bbcSMiao Xie 		task_unlock(current);
2411708c1bbcSMiao Xie 	} else
2412708c1bbcSMiao Xie 		*new = *old;
2413708c1bbcSMiao Xie 
24144225399aSPaul Jackson 	if (current_cpuset_is_being_rebound()) {
24154225399aSPaul Jackson 		nodemask_t mems = cpuset_mems_allowed(current);
2416213980c0SVlastimil Babka 		mpol_rebind_policy(new, &mems);
24174225399aSPaul Jackson 	}
24181da177e4SLinus Torvalds 	atomic_set(&new->refcnt, 1);
24191da177e4SLinus Torvalds 	return new;
24201da177e4SLinus Torvalds }
24211da177e4SLinus Torvalds 
24221da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */
__mpol_equal(struct mempolicy * a,struct mempolicy * b)2423fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
24241da177e4SLinus Torvalds {
24251da177e4SLinus Torvalds 	if (!a || !b)
2426fcfb4dccSKOSAKI Motohiro 		return false;
242745c4745aSLee Schermerhorn 	if (a->mode != b->mode)
2428fcfb4dccSKOSAKI Motohiro 		return false;
242919800502SBob Liu 	if (a->flags != b->flags)
2430fcfb4dccSKOSAKI Motohiro 		return false;
2431c6018b4bSAneesh Kumar K.V 	if (a->home_node != b->home_node)
2432c6018b4bSAneesh Kumar K.V 		return false;
243319800502SBob Liu 	if (mpol_store_user_nodemask(a))
243419800502SBob Liu 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2435fcfb4dccSKOSAKI Motohiro 			return false;
243619800502SBob Liu 
243745c4745aSLee Schermerhorn 	switch (a->mode) {
243819770b32SMel Gorman 	case MPOL_BIND:
24391da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
24401da177e4SLinus Torvalds 	case MPOL_PREFERRED:
2441b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
2442269fbe72SBen Widawsky 		return !!nodes_equal(a->nodes, b->nodes);
24437858d7bcSFeng Tang 	case MPOL_LOCAL:
24447858d7bcSFeng Tang 		return true;
24451da177e4SLinus Torvalds 	default:
24461da177e4SLinus Torvalds 		BUG();
2447fcfb4dccSKOSAKI Motohiro 		return false;
24481da177e4SLinus Torvalds 	}
24491da177e4SLinus Torvalds }
24501da177e4SLinus Torvalds 
24511da177e4SLinus Torvalds /*
24521da177e4SLinus Torvalds  * Shared memory backing store policy support.
24531da177e4SLinus Torvalds  *
24541da177e4SLinus Torvalds  * Remember policies even when nobody has shared memory mapped.
24551da177e4SLinus Torvalds  * The policies are kept in Red-Black tree linked from the inode.
24564a8c7bb5SNathan Zimmer  * They are protected by the sp->lock rwlock, which should be held
24571da177e4SLinus Torvalds  * for any accesses to the tree.
24581da177e4SLinus Torvalds  */
24591da177e4SLinus Torvalds 
24604a8c7bb5SNathan Zimmer /*
24614a8c7bb5SNathan Zimmer  * lookup first element intersecting start-end.  Caller holds sp->lock for
24624a8c7bb5SNathan Zimmer  * reading or for writing
24634a8c7bb5SNathan Zimmer  */
24641da177e4SLinus Torvalds static struct sp_node *
sp_lookup(struct shared_policy * sp,unsigned long start,unsigned long end)24651da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
24661da177e4SLinus Torvalds {
24671da177e4SLinus Torvalds 	struct rb_node *n = sp->root.rb_node;
24681da177e4SLinus Torvalds 
24691da177e4SLinus Torvalds 	while (n) {
24701da177e4SLinus Torvalds 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
24711da177e4SLinus Torvalds 
24721da177e4SLinus Torvalds 		if (start >= p->end)
24731da177e4SLinus Torvalds 			n = n->rb_right;
24741da177e4SLinus Torvalds 		else if (end <= p->start)
24751da177e4SLinus Torvalds 			n = n->rb_left;
24761da177e4SLinus Torvalds 		else
24771da177e4SLinus Torvalds 			break;
24781da177e4SLinus Torvalds 	}
24791da177e4SLinus Torvalds 	if (!n)
24801da177e4SLinus Torvalds 		return NULL;
24811da177e4SLinus Torvalds 	for (;;) {
24821da177e4SLinus Torvalds 		struct sp_node *w = NULL;
24831da177e4SLinus Torvalds 		struct rb_node *prev = rb_prev(n);
24841da177e4SLinus Torvalds 		if (!prev)
24851da177e4SLinus Torvalds 			break;
24861da177e4SLinus Torvalds 		w = rb_entry(prev, struct sp_node, nd);
24871da177e4SLinus Torvalds 		if (w->end <= start)
24881da177e4SLinus Torvalds 			break;
24891da177e4SLinus Torvalds 		n = prev;
24901da177e4SLinus Torvalds 	}
24911da177e4SLinus Torvalds 	return rb_entry(n, struct sp_node, nd);
24921da177e4SLinus Torvalds }
24931da177e4SLinus Torvalds 
24944a8c7bb5SNathan Zimmer /*
24954a8c7bb5SNathan Zimmer  * Insert a new shared policy into the list.  Caller holds sp->lock for
24964a8c7bb5SNathan Zimmer  * writing.
24974a8c7bb5SNathan Zimmer  */
sp_insert(struct shared_policy * sp,struct sp_node * new)24981da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new)
24991da177e4SLinus Torvalds {
25001da177e4SLinus Torvalds 	struct rb_node **p = &sp->root.rb_node;
25011da177e4SLinus Torvalds 	struct rb_node *parent = NULL;
25021da177e4SLinus Torvalds 	struct sp_node *nd;
25031da177e4SLinus Torvalds 
25041da177e4SLinus Torvalds 	while (*p) {
25051da177e4SLinus Torvalds 		parent = *p;
25061da177e4SLinus Torvalds 		nd = rb_entry(parent, struct sp_node, nd);
25071da177e4SLinus Torvalds 		if (new->start < nd->start)
25081da177e4SLinus Torvalds 			p = &(*p)->rb_left;
25091da177e4SLinus Torvalds 		else if (new->end > nd->end)
25101da177e4SLinus Torvalds 			p = &(*p)->rb_right;
25111da177e4SLinus Torvalds 		else
25121da177e4SLinus Torvalds 			BUG();
25131da177e4SLinus Torvalds 	}
25141da177e4SLinus Torvalds 	rb_link_node(&new->nd, parent, p);
25151da177e4SLinus Torvalds 	rb_insert_color(&new->nd, &sp->root);
2516140d5a49SPaul Mundt 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
251745c4745aSLee Schermerhorn 		 new->policy ? new->policy->mode : 0);
25181da177e4SLinus Torvalds }
25191da177e4SLinus Torvalds 
25201da177e4SLinus Torvalds /* Find shared policy intersecting idx */
25211da177e4SLinus Torvalds struct mempolicy *
mpol_shared_policy_lookup(struct shared_policy * sp,unsigned long idx)25221da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
25231da177e4SLinus Torvalds {
25241da177e4SLinus Torvalds 	struct mempolicy *pol = NULL;
25251da177e4SLinus Torvalds 	struct sp_node *sn;
25261da177e4SLinus Torvalds 
25271da177e4SLinus Torvalds 	if (!sp->root.rb_node)
25281da177e4SLinus Torvalds 		return NULL;
25294a8c7bb5SNathan Zimmer 	read_lock(&sp->lock);
25301da177e4SLinus Torvalds 	sn = sp_lookup(sp, idx, idx+1);
25311da177e4SLinus Torvalds 	if (sn) {
25321da177e4SLinus Torvalds 		mpol_get(sn->policy);
25331da177e4SLinus Torvalds 		pol = sn->policy;
25341da177e4SLinus Torvalds 	}
25354a8c7bb5SNathan Zimmer 	read_unlock(&sp->lock);
25361da177e4SLinus Torvalds 	return pol;
25371da177e4SLinus Torvalds }
25381da177e4SLinus Torvalds 
sp_free(struct sp_node * n)253963f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n)
254063f74ca2SKOSAKI Motohiro {
254163f74ca2SKOSAKI Motohiro 	mpol_put(n->policy);
254263f74ca2SKOSAKI Motohiro 	kmem_cache_free(sn_cache, n);
254363f74ca2SKOSAKI Motohiro }
254463f74ca2SKOSAKI Motohiro 
2545771fb4d8SLee Schermerhorn /**
2546771fb4d8SLee Schermerhorn  * mpol_misplaced - check whether current page node is valid in policy
2547771fb4d8SLee Schermerhorn  *
2548b46e14acSFabian Frederick  * @page: page to be checked
2549b46e14acSFabian Frederick  * @vma: vm area where page mapped
2550b46e14acSFabian Frederick  * @addr: virtual address where page mapped
2551771fb4d8SLee Schermerhorn  *
2552771fb4d8SLee Schermerhorn  * Lookup current policy node id for vma,addr and "compare to" page's
25535f076944SMatthew Wilcox (Oracle)  * node id.  Policy determination "mimics" alloc_page_vma().
2554771fb4d8SLee Schermerhorn  * Called from fault path where we know the vma and faulting address.
25555f076944SMatthew Wilcox (Oracle)  *
2556062db293SBaolin Wang  * Return: NUMA_NO_NODE if the page is in a node that is valid for this
2557062db293SBaolin Wang  * policy, or a suitable node ID to allocate a replacement page from.
2558771fb4d8SLee Schermerhorn  */
mpol_misplaced(struct page * page,struct vm_area_struct * vma,unsigned long addr)2559771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2560771fb4d8SLee Schermerhorn {
2561771fb4d8SLee Schermerhorn 	struct mempolicy *pol;
2562c33d6c06SMel Gorman 	struct zoneref *z;
2563771fb4d8SLee Schermerhorn 	int curnid = page_to_nid(page);
2564771fb4d8SLee Schermerhorn 	unsigned long pgoff;
256590572890SPeter Zijlstra 	int thiscpu = raw_smp_processor_id();
256690572890SPeter Zijlstra 	int thisnid = cpu_to_node(thiscpu);
256798fa15f3SAnshuman Khandual 	int polnid = NUMA_NO_NODE;
2568062db293SBaolin Wang 	int ret = NUMA_NO_NODE;
2569771fb4d8SLee Schermerhorn 
2570dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2571771fb4d8SLee Schermerhorn 	if (!(pol->flags & MPOL_F_MOF))
2572771fb4d8SLee Schermerhorn 		goto out;
2573771fb4d8SLee Schermerhorn 
2574771fb4d8SLee Schermerhorn 	switch (pol->mode) {
2575771fb4d8SLee Schermerhorn 	case MPOL_INTERLEAVE:
2576771fb4d8SLee Schermerhorn 		pgoff = vma->vm_pgoff;
2577771fb4d8SLee Schermerhorn 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
257898c70baaSLaurent Dufour 		polnid = offset_il_node(pol, pgoff);
2579771fb4d8SLee Schermerhorn 		break;
2580771fb4d8SLee Schermerhorn 
2581771fb4d8SLee Schermerhorn 	case MPOL_PREFERRED:
2582b27abaccSDave Hansen 		if (node_isset(curnid, pol->nodes))
2583b27abaccSDave Hansen 			goto out;
2584269fbe72SBen Widawsky 		polnid = first_node(pol->nodes);
2585771fb4d8SLee Schermerhorn 		break;
2586771fb4d8SLee Schermerhorn 
25877858d7bcSFeng Tang 	case MPOL_LOCAL:
25887858d7bcSFeng Tang 		polnid = numa_node_id();
25897858d7bcSFeng Tang 		break;
25907858d7bcSFeng Tang 
2591771fb4d8SLee Schermerhorn 	case MPOL_BIND:
2592bda420b9SHuang Ying 		/* Optimize placement among multiple nodes via NUMA balancing */
2593bda420b9SHuang Ying 		if (pol->flags & MPOL_F_MORON) {
2594269fbe72SBen Widawsky 			if (node_isset(thisnid, pol->nodes))
2595bda420b9SHuang Ying 				break;
2596bda420b9SHuang Ying 			goto out;
2597bda420b9SHuang Ying 		}
2598b27abaccSDave Hansen 		fallthrough;
2599c33d6c06SMel Gorman 
2600b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
2601771fb4d8SLee Schermerhorn 		/*
2602771fb4d8SLee Schermerhorn 		 * use current page if in policy nodemask,
2603771fb4d8SLee Schermerhorn 		 * else select nearest allowed node, if any.
2604771fb4d8SLee Schermerhorn 		 * If no allowed nodes, use current [!misplaced].
2605771fb4d8SLee Schermerhorn 		 */
2606269fbe72SBen Widawsky 		if (node_isset(curnid, pol->nodes))
2607771fb4d8SLee Schermerhorn 			goto out;
2608c33d6c06SMel Gorman 		z = first_zones_zonelist(
2609771fb4d8SLee Schermerhorn 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2610771fb4d8SLee Schermerhorn 				gfp_zone(GFP_HIGHUSER),
2611269fbe72SBen Widawsky 				&pol->nodes);
2612c1093b74SPavel Tatashin 		polnid = zone_to_nid(z->zone);
2613771fb4d8SLee Schermerhorn 		break;
2614771fb4d8SLee Schermerhorn 
2615771fb4d8SLee Schermerhorn 	default:
2616771fb4d8SLee Schermerhorn 		BUG();
2617771fb4d8SLee Schermerhorn 	}
26185606e387SMel Gorman 
26195606e387SMel Gorman 	/* Migrate the page towards the node whose CPU is referencing it */
2620e42c8ff2SMel Gorman 	if (pol->flags & MPOL_F_MORON) {
262190572890SPeter Zijlstra 		polnid = thisnid;
26225606e387SMel Gorman 
262310f39042SRik van Riel 		if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2624de1c9ce6SRik van Riel 			goto out;
2625de1c9ce6SRik van Riel 	}
2626e42c8ff2SMel Gorman 
2627771fb4d8SLee Schermerhorn 	if (curnid != polnid)
2628771fb4d8SLee Schermerhorn 		ret = polnid;
2629771fb4d8SLee Schermerhorn out:
2630771fb4d8SLee Schermerhorn 	mpol_cond_put(pol);
2631771fb4d8SLee Schermerhorn 
2632771fb4d8SLee Schermerhorn 	return ret;
2633771fb4d8SLee Schermerhorn }
2634771fb4d8SLee Schermerhorn 
2635c11600e4SDavid Rientjes /*
2636c11600e4SDavid Rientjes  * Drop the (possibly final) reference to task->mempolicy.  It needs to be
2637c11600e4SDavid Rientjes  * dropped after task->mempolicy is set to NULL so that any allocation done as
2638c11600e4SDavid Rientjes  * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2639c11600e4SDavid Rientjes  * policy.
2640c11600e4SDavid Rientjes  */
mpol_put_task_policy(struct task_struct * task)2641c11600e4SDavid Rientjes void mpol_put_task_policy(struct task_struct *task)
2642c11600e4SDavid Rientjes {
2643c11600e4SDavid Rientjes 	struct mempolicy *pol;
2644c11600e4SDavid Rientjes 
2645c11600e4SDavid Rientjes 	task_lock(task);
2646c11600e4SDavid Rientjes 	pol = task->mempolicy;
2647c11600e4SDavid Rientjes 	task->mempolicy = NULL;
2648c11600e4SDavid Rientjes 	task_unlock(task);
2649c11600e4SDavid Rientjes 	mpol_put(pol);
2650c11600e4SDavid Rientjes }
2651c11600e4SDavid Rientjes 
sp_delete(struct shared_policy * sp,struct sp_node * n)26521da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n)
26531da177e4SLinus Torvalds {
2654140d5a49SPaul Mundt 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
26551da177e4SLinus Torvalds 	rb_erase(&n->nd, &sp->root);
265663f74ca2SKOSAKI Motohiro 	sp_free(n);
26571da177e4SLinus Torvalds }
26581da177e4SLinus Torvalds 
sp_node_init(struct sp_node * node,unsigned long start,unsigned long end,struct mempolicy * pol)265942288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start,
266042288fe3SMel Gorman 			unsigned long end, struct mempolicy *pol)
266142288fe3SMel Gorman {
266242288fe3SMel Gorman 	node->start = start;
266342288fe3SMel Gorman 	node->end = end;
266442288fe3SMel Gorman 	node->policy = pol;
266542288fe3SMel Gorman }
266642288fe3SMel Gorman 
sp_alloc(unsigned long start,unsigned long end,struct mempolicy * pol)2667dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2668dbcb0f19SAdrian Bunk 				struct mempolicy *pol)
26691da177e4SLinus Torvalds {
2670869833f2SKOSAKI Motohiro 	struct sp_node *n;
2671869833f2SKOSAKI Motohiro 	struct mempolicy *newpol;
26721da177e4SLinus Torvalds 
2673869833f2SKOSAKI Motohiro 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
26741da177e4SLinus Torvalds 	if (!n)
26751da177e4SLinus Torvalds 		return NULL;
2676869833f2SKOSAKI Motohiro 
2677869833f2SKOSAKI Motohiro 	newpol = mpol_dup(pol);
2678869833f2SKOSAKI Motohiro 	if (IS_ERR(newpol)) {
2679869833f2SKOSAKI Motohiro 		kmem_cache_free(sn_cache, n);
2680869833f2SKOSAKI Motohiro 		return NULL;
2681869833f2SKOSAKI Motohiro 	}
2682869833f2SKOSAKI Motohiro 	newpol->flags |= MPOL_F_SHARED;
268342288fe3SMel Gorman 	sp_node_init(n, start, end, newpol);
2684869833f2SKOSAKI Motohiro 
26851da177e4SLinus Torvalds 	return n;
26861da177e4SLinus Torvalds }
26871da177e4SLinus Torvalds 
26881da177e4SLinus Torvalds /* Replace a policy range. */
shared_policy_replace(struct shared_policy * sp,unsigned long start,unsigned long end,struct sp_node * new)26891da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
26901da177e4SLinus Torvalds 				 unsigned long end, struct sp_node *new)
26911da177e4SLinus Torvalds {
2692b22d127aSMel Gorman 	struct sp_node *n;
269342288fe3SMel Gorman 	struct sp_node *n_new = NULL;
269442288fe3SMel Gorman 	struct mempolicy *mpol_new = NULL;
2695b22d127aSMel Gorman 	int ret = 0;
26961da177e4SLinus Torvalds 
269742288fe3SMel Gorman restart:
26984a8c7bb5SNathan Zimmer 	write_lock(&sp->lock);
26991da177e4SLinus Torvalds 	n = sp_lookup(sp, start, end);
27001da177e4SLinus Torvalds 	/* Take care of old policies in the same range. */
27011da177e4SLinus Torvalds 	while (n && n->start < end) {
27021da177e4SLinus Torvalds 		struct rb_node *next = rb_next(&n->nd);
27031da177e4SLinus Torvalds 		if (n->start >= start) {
27041da177e4SLinus Torvalds 			if (n->end <= end)
27051da177e4SLinus Torvalds 				sp_delete(sp, n);
27061da177e4SLinus Torvalds 			else
27071da177e4SLinus Torvalds 				n->start = end;
27081da177e4SLinus Torvalds 		} else {
27091da177e4SLinus Torvalds 			/* Old policy spanning whole new range. */
27101da177e4SLinus Torvalds 			if (n->end > end) {
271142288fe3SMel Gorman 				if (!n_new)
271242288fe3SMel Gorman 					goto alloc_new;
271342288fe3SMel Gorman 
271442288fe3SMel Gorman 				*mpol_new = *n->policy;
271542288fe3SMel Gorman 				atomic_set(&mpol_new->refcnt, 1);
27167880639cSKOSAKI Motohiro 				sp_node_init(n_new, end, n->end, mpol_new);
27171da177e4SLinus Torvalds 				n->end = start;
27185ca39575SHillf Danton 				sp_insert(sp, n_new);
271942288fe3SMel Gorman 				n_new = NULL;
272042288fe3SMel Gorman 				mpol_new = NULL;
27211da177e4SLinus Torvalds 				break;
27221da177e4SLinus Torvalds 			} else
27231da177e4SLinus Torvalds 				n->end = start;
27241da177e4SLinus Torvalds 		}
27251da177e4SLinus Torvalds 		if (!next)
27261da177e4SLinus Torvalds 			break;
27271da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
27281da177e4SLinus Torvalds 	}
27291da177e4SLinus Torvalds 	if (new)
27301da177e4SLinus Torvalds 		sp_insert(sp, new);
27314a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
273242288fe3SMel Gorman 	ret = 0;
273342288fe3SMel Gorman 
273442288fe3SMel Gorman err_out:
273542288fe3SMel Gorman 	if (mpol_new)
273642288fe3SMel Gorman 		mpol_put(mpol_new);
273742288fe3SMel Gorman 	if (n_new)
273842288fe3SMel Gorman 		kmem_cache_free(sn_cache, n_new);
273942288fe3SMel Gorman 
2740b22d127aSMel Gorman 	return ret;
274142288fe3SMel Gorman 
274242288fe3SMel Gorman alloc_new:
27434a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
274442288fe3SMel Gorman 	ret = -ENOMEM;
274542288fe3SMel Gorman 	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
274642288fe3SMel Gorman 	if (!n_new)
274742288fe3SMel Gorman 		goto err_out;
274842288fe3SMel Gorman 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
274942288fe3SMel Gorman 	if (!mpol_new)
275042288fe3SMel Gorman 		goto err_out;
27514ad09955SMiaohe Lin 	atomic_set(&mpol_new->refcnt, 1);
275242288fe3SMel Gorman 	goto restart;
27531da177e4SLinus Torvalds }
27541da177e4SLinus Torvalds 
275571fe804bSLee Schermerhorn /**
275671fe804bSLee Schermerhorn  * mpol_shared_policy_init - initialize shared policy for inode
275771fe804bSLee Schermerhorn  * @sp: pointer to inode shared policy
275871fe804bSLee Schermerhorn  * @mpol:  struct mempolicy to install
275971fe804bSLee Schermerhorn  *
276071fe804bSLee Schermerhorn  * Install non-NULL @mpol in inode's shared policy rb-tree.
276171fe804bSLee Schermerhorn  * On entry, the current task has a reference on a non-NULL @mpol.
276271fe804bSLee Schermerhorn  * This must be released on exit.
27634bfc4495SKAMEZAWA Hiroyuki  * This is called at get_inode() calls and we can use GFP_KERNEL.
276471fe804bSLee Schermerhorn  */
mpol_shared_policy_init(struct shared_policy * sp,struct mempolicy * mpol)276571fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
27667339ff83SRobin Holt {
276758568d2aSMiao Xie 	int ret;
276858568d2aSMiao Xie 
276971fe804bSLee Schermerhorn 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
27704a8c7bb5SNathan Zimmer 	rwlock_init(&sp->lock);
27717339ff83SRobin Holt 
277271fe804bSLee Schermerhorn 	if (mpol) {
27737339ff83SRobin Holt 		struct vm_area_struct pvma;
277471fe804bSLee Schermerhorn 		struct mempolicy *new;
27754bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
27767339ff83SRobin Holt 
27774bfc4495SKAMEZAWA Hiroyuki 		if (!scratch)
27785c0c1654SLee Schermerhorn 			goto put_mpol;
277971fe804bSLee Schermerhorn 		/* contextualize the tmpfs mount point mempolicy */
278071fe804bSLee Schermerhorn 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
278115d77835SLee Schermerhorn 		if (IS_ERR(new))
27820cae3457SDan Carpenter 			goto free_scratch; /* no valid nodemask intersection */
278358568d2aSMiao Xie 
278458568d2aSMiao Xie 		task_lock(current);
27854bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
278658568d2aSMiao Xie 		task_unlock(current);
278715d77835SLee Schermerhorn 		if (ret)
27885c0c1654SLee Schermerhorn 			goto put_new;
278971fe804bSLee Schermerhorn 
279071fe804bSLee Schermerhorn 		/* Create pseudo-vma that contains just the policy */
27912c4541e2SKirill A. Shutemov 		vma_init(&pvma, NULL);
279271fe804bSLee Schermerhorn 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
279371fe804bSLee Schermerhorn 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
279415d77835SLee Schermerhorn 
27955c0c1654SLee Schermerhorn put_new:
279671fe804bSLee Schermerhorn 		mpol_put(new);			/* drop initial ref */
27970cae3457SDan Carpenter free_scratch:
27984bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
27995c0c1654SLee Schermerhorn put_mpol:
28005c0c1654SLee Schermerhorn 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
28017339ff83SRobin Holt 	}
28027339ff83SRobin Holt }
28037339ff83SRobin Holt 
mpol_set_shared_policy(struct shared_policy * info,struct vm_area_struct * vma,struct mempolicy * npol)28041da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info,
28051da177e4SLinus Torvalds 			struct vm_area_struct *vma, struct mempolicy *npol)
28061da177e4SLinus Torvalds {
28071da177e4SLinus Torvalds 	int err;
28081da177e4SLinus Torvalds 	struct sp_node *new = NULL;
28091da177e4SLinus Torvalds 	unsigned long sz = vma_pages(vma);
28101da177e4SLinus Torvalds 
2811028fec41SDavid Rientjes 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
28121da177e4SLinus Torvalds 		 vma->vm_pgoff,
281345c4745aSLee Schermerhorn 		 sz, npol ? npol->mode : -1,
2814028fec41SDavid Rientjes 		 npol ? npol->flags : -1,
2815269fbe72SBen Widawsky 		 npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE);
28161da177e4SLinus Torvalds 
28171da177e4SLinus Torvalds 	if (npol) {
28181da177e4SLinus Torvalds 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
28191da177e4SLinus Torvalds 		if (!new)
28201da177e4SLinus Torvalds 			return -ENOMEM;
28211da177e4SLinus Torvalds 	}
28221da177e4SLinus Torvalds 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
28231da177e4SLinus Torvalds 	if (err && new)
282463f74ca2SKOSAKI Motohiro 		sp_free(new);
28251da177e4SLinus Torvalds 	return err;
28261da177e4SLinus Torvalds }
28271da177e4SLinus Torvalds 
28281da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */
mpol_free_shared_policy(struct shared_policy * p)28291da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p)
28301da177e4SLinus Torvalds {
28311da177e4SLinus Torvalds 	struct sp_node *n;
28321da177e4SLinus Torvalds 	struct rb_node *next;
28331da177e4SLinus Torvalds 
28341da177e4SLinus Torvalds 	if (!p->root.rb_node)
28351da177e4SLinus Torvalds 		return;
28364a8c7bb5SNathan Zimmer 	write_lock(&p->lock);
28371da177e4SLinus Torvalds 	next = rb_first(&p->root);
28381da177e4SLinus Torvalds 	while (next) {
28391da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
28401da177e4SLinus Torvalds 		next = rb_next(&n->nd);
284163f74ca2SKOSAKI Motohiro 		sp_delete(p, n);
28421da177e4SLinus Torvalds 	}
28434a8c7bb5SNathan Zimmer 	write_unlock(&p->lock);
28441da177e4SLinus Torvalds }
28451da177e4SLinus Torvalds 
28461a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING
2847c297663cSMel Gorman static int __initdata numabalancing_override;
28481a687c2eSMel Gorman 
check_numabalancing_enable(void)28491a687c2eSMel Gorman static void __init check_numabalancing_enable(void)
28501a687c2eSMel Gorman {
28511a687c2eSMel Gorman 	bool numabalancing_default = false;
28521a687c2eSMel Gorman 
28531a687c2eSMel Gorman 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
28541a687c2eSMel Gorman 		numabalancing_default = true;
28551a687c2eSMel Gorman 
2856c297663cSMel Gorman 	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2857c297663cSMel Gorman 	if (numabalancing_override)
2858c297663cSMel Gorman 		set_numabalancing_state(numabalancing_override == 1);
2859c297663cSMel Gorman 
2860b0dc2b9bSMel Gorman 	if (num_online_nodes() > 1 && !numabalancing_override) {
2861756a025fSJoe Perches 		pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2862c297663cSMel Gorman 			numabalancing_default ? "Enabling" : "Disabling");
28631a687c2eSMel Gorman 		set_numabalancing_state(numabalancing_default);
28641a687c2eSMel Gorman 	}
28651a687c2eSMel Gorman }
28661a687c2eSMel Gorman 
setup_numabalancing(char * str)28671a687c2eSMel Gorman static int __init setup_numabalancing(char *str)
28681a687c2eSMel Gorman {
28691a687c2eSMel Gorman 	int ret = 0;
28701a687c2eSMel Gorman 	if (!str)
28711a687c2eSMel Gorman 		goto out;
28721a687c2eSMel Gorman 
28731a687c2eSMel Gorman 	if (!strcmp(str, "enable")) {
2874c297663cSMel Gorman 		numabalancing_override = 1;
28751a687c2eSMel Gorman 		ret = 1;
28761a687c2eSMel Gorman 	} else if (!strcmp(str, "disable")) {
2877c297663cSMel Gorman 		numabalancing_override = -1;
28781a687c2eSMel Gorman 		ret = 1;
28791a687c2eSMel Gorman 	}
28801a687c2eSMel Gorman out:
28811a687c2eSMel Gorman 	if (!ret)
28824a404beaSAndrew Morton 		pr_warn("Unable to parse numa_balancing=\n");
28831a687c2eSMel Gorman 
28841a687c2eSMel Gorman 	return ret;
28851a687c2eSMel Gorman }
28861a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing);
28871a687c2eSMel Gorman #else
check_numabalancing_enable(void)28881a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void)
28891a687c2eSMel Gorman {
28901a687c2eSMel Gorman }
28911a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */
28921a687c2eSMel Gorman 
28931da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */
numa_policy_init(void)28941da177e4SLinus Torvalds void __init numa_policy_init(void)
28951da177e4SLinus Torvalds {
2896b71636e2SPaul Mundt 	nodemask_t interleave_nodes;
2897b71636e2SPaul Mundt 	unsigned long largest = 0;
2898b71636e2SPaul Mundt 	int nid, prefer = 0;
2899b71636e2SPaul Mundt 
29001da177e4SLinus Torvalds 	policy_cache = kmem_cache_create("numa_policy",
29011da177e4SLinus Torvalds 					 sizeof(struct mempolicy),
290220c2df83SPaul Mundt 					 0, SLAB_PANIC, NULL);
29031da177e4SLinus Torvalds 
29041da177e4SLinus Torvalds 	sn_cache = kmem_cache_create("shared_policy_node",
29051da177e4SLinus Torvalds 				     sizeof(struct sp_node),
290620c2df83SPaul Mundt 				     0, SLAB_PANIC, NULL);
29071da177e4SLinus Torvalds 
29085606e387SMel Gorman 	for_each_node(nid) {
29095606e387SMel Gorman 		preferred_node_policy[nid] = (struct mempolicy) {
29105606e387SMel Gorman 			.refcnt = ATOMIC_INIT(1),
29115606e387SMel Gorman 			.mode = MPOL_PREFERRED,
29125606e387SMel Gorman 			.flags = MPOL_F_MOF | MPOL_F_MORON,
2913269fbe72SBen Widawsky 			.nodes = nodemask_of_node(nid),
29145606e387SMel Gorman 		};
29155606e387SMel Gorman 	}
29165606e387SMel Gorman 
2917b71636e2SPaul Mundt 	/*
2918b71636e2SPaul Mundt 	 * Set interleaving policy for system init. Interleaving is only
2919b71636e2SPaul Mundt 	 * enabled across suitably sized nodes (default is >= 16MB), or
2920b71636e2SPaul Mundt 	 * fall back to the largest node if they're all smaller.
2921b71636e2SPaul Mundt 	 */
2922b71636e2SPaul Mundt 	nodes_clear(interleave_nodes);
292301f13bd6SLai Jiangshan 	for_each_node_state(nid, N_MEMORY) {
2924b71636e2SPaul Mundt 		unsigned long total_pages = node_present_pages(nid);
29251da177e4SLinus Torvalds 
2926b71636e2SPaul Mundt 		/* Preserve the largest node */
2927b71636e2SPaul Mundt 		if (largest < total_pages) {
2928b71636e2SPaul Mundt 			largest = total_pages;
2929b71636e2SPaul Mundt 			prefer = nid;
2930b71636e2SPaul Mundt 		}
2931b71636e2SPaul Mundt 
2932b71636e2SPaul Mundt 		/* Interleave this node? */
2933b71636e2SPaul Mundt 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2934b71636e2SPaul Mundt 			node_set(nid, interleave_nodes);
2935b71636e2SPaul Mundt 	}
2936b71636e2SPaul Mundt 
2937b71636e2SPaul Mundt 	/* All too small, use the largest */
2938b71636e2SPaul Mundt 	if (unlikely(nodes_empty(interleave_nodes)))
2939b71636e2SPaul Mundt 		node_set(prefer, interleave_nodes);
2940b71636e2SPaul Mundt 
2941028fec41SDavid Rientjes 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2942b1de0d13SMitchel Humpherys 		pr_err("%s: interleaving failed\n", __func__);
29431a687c2eSMel Gorman 
29441a687c2eSMel Gorman 	check_numabalancing_enable();
29451da177e4SLinus Torvalds }
29461da177e4SLinus Torvalds 
29478bccd85fSChristoph Lameter /* Reset policy of current process to default */
numa_default_policy(void)29481da177e4SLinus Torvalds void numa_default_policy(void)
29491da177e4SLinus Torvalds {
2950028fec41SDavid Rientjes 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
29511da177e4SLinus Torvalds }
295268860ec1SPaul Jackson 
29534225399aSPaul Jackson /*
2954095f1fc4SLee Schermerhorn  * Parse and format mempolicy from/to strings
2955095f1fc4SLee Schermerhorn  */
2956095f1fc4SLee Schermerhorn 
2957345ace9cSLee Schermerhorn static const char * const policy_modes[] =
2958345ace9cSLee Schermerhorn {
2959345ace9cSLee Schermerhorn 	[MPOL_DEFAULT]    = "default",
2960345ace9cSLee Schermerhorn 	[MPOL_PREFERRED]  = "prefer",
2961345ace9cSLee Schermerhorn 	[MPOL_BIND]       = "bind",
2962345ace9cSLee Schermerhorn 	[MPOL_INTERLEAVE] = "interleave",
2963d3a71033SLee Schermerhorn 	[MPOL_LOCAL]      = "local",
2964b27abaccSDave Hansen 	[MPOL_PREFERRED_MANY]  = "prefer (many)",
2965345ace9cSLee Schermerhorn };
29661a75a6c8SChristoph Lameter 
2967095f1fc4SLee Schermerhorn 
2968095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS
2969095f1fc4SLee Schermerhorn /**
2970f2a07f40SHugh Dickins  * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2971095f1fc4SLee Schermerhorn  * @str:  string containing mempolicy to parse
297271fe804bSLee Schermerhorn  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2973095f1fc4SLee Schermerhorn  *
2974095f1fc4SLee Schermerhorn  * Format of input:
2975095f1fc4SLee Schermerhorn  *	<mode>[=<flags>][:<nodelist>]
2976095f1fc4SLee Schermerhorn  *
2977dad5b023SRandy Dunlap  * Return: %0 on success, else %1
2978095f1fc4SLee Schermerhorn  */
mpol_parse_str(char * str,struct mempolicy ** mpol)2979a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol)
2980095f1fc4SLee Schermerhorn {
298171fe804bSLee Schermerhorn 	struct mempolicy *new = NULL;
2982f2a07f40SHugh Dickins 	unsigned short mode_flags;
298371fe804bSLee Schermerhorn 	nodemask_t nodes;
2984095f1fc4SLee Schermerhorn 	char *nodelist = strchr(str, ':');
2985095f1fc4SLee Schermerhorn 	char *flags = strchr(str, '=');
2986dedf2c73Szhong jiang 	int err = 1, mode;
2987095f1fc4SLee Schermerhorn 
2988c7a91bc7SDan Carpenter 	if (flags)
2989c7a91bc7SDan Carpenter 		*flags++ = '\0';	/* terminate mode string */
2990c7a91bc7SDan Carpenter 
2991095f1fc4SLee Schermerhorn 	if (nodelist) {
2992095f1fc4SLee Schermerhorn 		/* NUL-terminate mode or flags string */
2993095f1fc4SLee Schermerhorn 		*nodelist++ = '\0';
299471fe804bSLee Schermerhorn 		if (nodelist_parse(nodelist, nodes))
2995095f1fc4SLee Schermerhorn 			goto out;
299601f13bd6SLai Jiangshan 		if (!nodes_subset(nodes, node_states[N_MEMORY]))
2997095f1fc4SLee Schermerhorn 			goto out;
299871fe804bSLee Schermerhorn 	} else
299971fe804bSLee Schermerhorn 		nodes_clear(nodes);
300071fe804bSLee Schermerhorn 
3001dedf2c73Szhong jiang 	mode = match_string(policy_modes, MPOL_MAX, str);
3002dedf2c73Szhong jiang 	if (mode < 0)
3003095f1fc4SLee Schermerhorn 		goto out;
3004095f1fc4SLee Schermerhorn 
300571fe804bSLee Schermerhorn 	switch (mode) {
3006095f1fc4SLee Schermerhorn 	case MPOL_PREFERRED:
300771fe804bSLee Schermerhorn 		/*
3008aa9f7d51SRandy Dunlap 		 * Insist on a nodelist of one node only, although later
3009aa9f7d51SRandy Dunlap 		 * we use first_node(nodes) to grab a single node, so here
3010aa9f7d51SRandy Dunlap 		 * nodelist (or nodes) cannot be empty.
301171fe804bSLee Schermerhorn 		 */
3012095f1fc4SLee Schermerhorn 		if (nodelist) {
3013095f1fc4SLee Schermerhorn 			char *rest = nodelist;
3014095f1fc4SLee Schermerhorn 			while (isdigit(*rest))
3015095f1fc4SLee Schermerhorn 				rest++;
3016926f2ae0SKOSAKI Motohiro 			if (*rest)
3017926f2ae0SKOSAKI Motohiro 				goto out;
3018aa9f7d51SRandy Dunlap 			if (nodes_empty(nodes))
3019aa9f7d51SRandy Dunlap 				goto out;
3020095f1fc4SLee Schermerhorn 		}
3021095f1fc4SLee Schermerhorn 		break;
3022095f1fc4SLee Schermerhorn 	case MPOL_INTERLEAVE:
3023095f1fc4SLee Schermerhorn 		/*
3024095f1fc4SLee Schermerhorn 		 * Default to online nodes with memory if no nodelist
3025095f1fc4SLee Schermerhorn 		 */
3026095f1fc4SLee Schermerhorn 		if (!nodelist)
302701f13bd6SLai Jiangshan 			nodes = node_states[N_MEMORY];
30283f226aa1SLee Schermerhorn 		break;
302971fe804bSLee Schermerhorn 	case MPOL_LOCAL:
30303f226aa1SLee Schermerhorn 		/*
303171fe804bSLee Schermerhorn 		 * Don't allow a nodelist;  mpol_new() checks flags
30323f226aa1SLee Schermerhorn 		 */
303371fe804bSLee Schermerhorn 		if (nodelist)
30343f226aa1SLee Schermerhorn 			goto out;
30353f226aa1SLee Schermerhorn 		break;
3036413b43deSRavikiran G Thirumalai 	case MPOL_DEFAULT:
3037413b43deSRavikiran G Thirumalai 		/*
3038413b43deSRavikiran G Thirumalai 		 * Insist on a empty nodelist
3039413b43deSRavikiran G Thirumalai 		 */
3040413b43deSRavikiran G Thirumalai 		if (!nodelist)
3041413b43deSRavikiran G Thirumalai 			err = 0;
3042413b43deSRavikiran G Thirumalai 		goto out;
3043b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
3044d69b2e63SKOSAKI Motohiro 	case MPOL_BIND:
304571fe804bSLee Schermerhorn 		/*
3046d69b2e63SKOSAKI Motohiro 		 * Insist on a nodelist
304771fe804bSLee Schermerhorn 		 */
3048d69b2e63SKOSAKI Motohiro 		if (!nodelist)
3049d69b2e63SKOSAKI Motohiro 			goto out;
3050095f1fc4SLee Schermerhorn 	}
3051095f1fc4SLee Schermerhorn 
305271fe804bSLee Schermerhorn 	mode_flags = 0;
3053095f1fc4SLee Schermerhorn 	if (flags) {
3054095f1fc4SLee Schermerhorn 		/*
3055095f1fc4SLee Schermerhorn 		 * Currently, we only support two mutually exclusive
3056095f1fc4SLee Schermerhorn 		 * mode flags.
3057095f1fc4SLee Schermerhorn 		 */
3058095f1fc4SLee Schermerhorn 		if (!strcmp(flags, "static"))
305971fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_STATIC_NODES;
3060095f1fc4SLee Schermerhorn 		else if (!strcmp(flags, "relative"))
306171fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_RELATIVE_NODES;
3062095f1fc4SLee Schermerhorn 		else
3063926f2ae0SKOSAKI Motohiro 			goto out;
3064095f1fc4SLee Schermerhorn 	}
306571fe804bSLee Schermerhorn 
306671fe804bSLee Schermerhorn 	new = mpol_new(mode, mode_flags, &nodes);
306771fe804bSLee Schermerhorn 	if (IS_ERR(new))
3068926f2ae0SKOSAKI Motohiro 		goto out;
3069926f2ae0SKOSAKI Motohiro 
3070f2a07f40SHugh Dickins 	/*
3071f2a07f40SHugh Dickins 	 * Save nodes for mpol_to_str() to show the tmpfs mount options
3072f2a07f40SHugh Dickins 	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
3073f2a07f40SHugh Dickins 	 */
3074269fbe72SBen Widawsky 	if (mode != MPOL_PREFERRED) {
3075269fbe72SBen Widawsky 		new->nodes = nodes;
3076269fbe72SBen Widawsky 	} else if (nodelist) {
3077269fbe72SBen Widawsky 		nodes_clear(new->nodes);
3078269fbe72SBen Widawsky 		node_set(first_node(nodes), new->nodes);
3079269fbe72SBen Widawsky 	} else {
30807858d7bcSFeng Tang 		new->mode = MPOL_LOCAL;
3081269fbe72SBen Widawsky 	}
3082f2a07f40SHugh Dickins 
3083f2a07f40SHugh Dickins 	/*
3084f2a07f40SHugh Dickins 	 * Save nodes for contextualization: this will be used to "clone"
3085f2a07f40SHugh Dickins 	 * the mempolicy in a specific context [cpuset] at a later time.
3086f2a07f40SHugh Dickins 	 */
3087e17f74afSLee Schermerhorn 	new->w.user_nodemask = nodes;
3088f2a07f40SHugh Dickins 
3089926f2ae0SKOSAKI Motohiro 	err = 0;
309071fe804bSLee Schermerhorn 
3091095f1fc4SLee Schermerhorn out:
3092095f1fc4SLee Schermerhorn 	/* Restore string for error message */
3093095f1fc4SLee Schermerhorn 	if (nodelist)
3094095f1fc4SLee Schermerhorn 		*--nodelist = ':';
3095095f1fc4SLee Schermerhorn 	if (flags)
3096095f1fc4SLee Schermerhorn 		*--flags = '=';
309771fe804bSLee Schermerhorn 	if (!err)
309871fe804bSLee Schermerhorn 		*mpol = new;
3099095f1fc4SLee Schermerhorn 	return err;
3100095f1fc4SLee Schermerhorn }
3101095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */
3102095f1fc4SLee Schermerhorn 
310371fe804bSLee Schermerhorn /**
310471fe804bSLee Schermerhorn  * mpol_to_str - format a mempolicy structure for printing
310571fe804bSLee Schermerhorn  * @buffer:  to contain formatted mempolicy string
310671fe804bSLee Schermerhorn  * @maxlen:  length of @buffer
310771fe804bSLee Schermerhorn  * @pol:  pointer to mempolicy to be formatted
310871fe804bSLee Schermerhorn  *
3109948927eeSDavid Rientjes  * Convert @pol into a string.  If @buffer is too short, truncate the string.
3110ab14e199STvrtko Ursulin  * Recommend a @maxlen of at least 51 for the longest mode, "weighted
3111ab14e199STvrtko Ursulin  * interleave", plus the longest flag flags, "relative|balancing", and to
3112ab14e199STvrtko Ursulin  * display at least a few node ids.
31131a75a6c8SChristoph Lameter  */
mpol_to_str(char * buffer,int maxlen,struct mempolicy * pol)3114948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
31151a75a6c8SChristoph Lameter {
31161a75a6c8SChristoph Lameter 	char *p = buffer;
3117948927eeSDavid Rientjes 	nodemask_t nodes = NODE_MASK_NONE;
3118948927eeSDavid Rientjes 	unsigned short mode = MPOL_DEFAULT;
3119948927eeSDavid Rientjes 	unsigned short flags = 0;
31201a75a6c8SChristoph Lameter 
3121ab14e199STvrtko Ursulin 	if (pol &&
3122ab14e199STvrtko Ursulin 	    pol != &default_policy &&
3123ab14e199STvrtko Ursulin 	    !(pol >= &preferred_node_policy[0] &&
3124ab14e199STvrtko Ursulin 	      pol <= &preferred_node_policy[ARRAY_SIZE(preferred_node_policy) - 1])) {
3125bea904d5SLee Schermerhorn 		mode = pol->mode;
3126948927eeSDavid Rientjes 		flags = pol->flags;
3127948927eeSDavid Rientjes 	}
3128bea904d5SLee Schermerhorn 
31291a75a6c8SChristoph Lameter 	switch (mode) {
31301a75a6c8SChristoph Lameter 	case MPOL_DEFAULT:
31317858d7bcSFeng Tang 	case MPOL_LOCAL:
31321a75a6c8SChristoph Lameter 		break;
31331a75a6c8SChristoph Lameter 	case MPOL_PREFERRED:
3134b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
31351a75a6c8SChristoph Lameter 	case MPOL_BIND:
31361a75a6c8SChristoph Lameter 	case MPOL_INTERLEAVE:
3137269fbe72SBen Widawsky 		nodes = pol->nodes;
31381a75a6c8SChristoph Lameter 		break;
31391a75a6c8SChristoph Lameter 	default:
3140948927eeSDavid Rientjes 		WARN_ON_ONCE(1);
3141948927eeSDavid Rientjes 		snprintf(p, maxlen, "unknown");
3142948927eeSDavid Rientjes 		return;
31431a75a6c8SChristoph Lameter 	}
31441a75a6c8SChristoph Lameter 
3145b7a9f420SDavid Rientjes 	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
31461a75a6c8SChristoph Lameter 
3147fc36b8d3SLee Schermerhorn 	if (flags & MPOL_MODE_FLAGS) {
3148948927eeSDavid Rientjes 		p += snprintf(p, buffer + maxlen - p, "=");
3149f5b087b5SDavid Rientjes 
31502291990aSLee Schermerhorn 		/*
3151ab14e199STvrtko Ursulin 		 * Static and relative are mutually exclusive.
31522291990aSLee Schermerhorn 		 */
3153f5b087b5SDavid Rientjes 		if (flags & MPOL_F_STATIC_NODES)
31542291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "static");
31552291990aSLee Schermerhorn 		else if (flags & MPOL_F_RELATIVE_NODES)
31562291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "relative");
3157ab14e199STvrtko Ursulin 
3158ab14e199STvrtko Ursulin 		if (flags & MPOL_F_NUMA_BALANCING) {
3159ab14e199STvrtko Ursulin 			if (!is_power_of_2(flags & MPOL_MODE_FLAGS))
3160ab14e199STvrtko Ursulin 				p += snprintf(p, buffer + maxlen - p, "|");
3161ab14e199STvrtko Ursulin 			p += snprintf(p, buffer + maxlen - p, "balancing");
3162ab14e199STvrtko Ursulin 		}
3163f5b087b5SDavid Rientjes 	}
3164f5b087b5SDavid Rientjes 
31659e763e0fSTejun Heo 	if (!nodes_empty(nodes))
31669e763e0fSTejun Heo 		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
31679e763e0fSTejun Heo 			       nodemask_pr_args(&nodes));
31681a75a6c8SChristoph Lameter }
3169