xref: /openbmc/linux/mm/mempolicy.c (revision 7780d04046a2288ab85d88bedacc60fa4fad9971)
146aeb7e6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * Simple NUMA memory policy for the Linux kernel.
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
68bccd85fSChristoph Lameter  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * NUMA policy allows the user to give hints in which node(s) memory should
91da177e4SLinus Torvalds  * be allocated.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Support four policies per VMA and per process:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * The VMA policy has priority over the process policy for a page fault.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * interleave     Allocate memory interleaved over a set of nodes,
161da177e4SLinus Torvalds  *                with normal fallback if it fails.
171da177e4SLinus Torvalds  *                For VMA based allocations this interleaves based on the
181da177e4SLinus Torvalds  *                offset into the backing object or offset into the mapping
191da177e4SLinus Torvalds  *                for anonymous memory. For process policy an process counter
201da177e4SLinus Torvalds  *                is used.
218bccd85fSChristoph Lameter  *
221da177e4SLinus Torvalds  * bind           Only allocate memory on a specific set of nodes,
231da177e4SLinus Torvalds  *                no fallback.
248bccd85fSChristoph Lameter  *                FIXME: memory is allocated starting with the first node
258bccd85fSChristoph Lameter  *                to the last. It would be better if bind would truly restrict
268bccd85fSChristoph Lameter  *                the allocation to memory nodes instead
278bccd85fSChristoph Lameter  *
281da177e4SLinus Torvalds  * preferred       Try a specific node first before normal fallback.
2900ef2d2fSDavid Rientjes  *                As a special case NUMA_NO_NODE here means do the allocation
301da177e4SLinus Torvalds  *                on the local CPU. This is normally identical to default,
311da177e4SLinus Torvalds  *                but useful to set in a VMA when you have a non default
321da177e4SLinus Torvalds  *                process policy.
338bccd85fSChristoph Lameter  *
34b27abaccSDave Hansen  * preferred many Try a set of nodes first before normal fallback. This is
35b27abaccSDave Hansen  *                similar to preferred without the special case.
36b27abaccSDave Hansen  *
371da177e4SLinus Torvalds  * default        Allocate on the local node first, or when on a VMA
381da177e4SLinus Torvalds  *                use the process policy. This is what Linux always did
391da177e4SLinus Torvalds  *		  in a NUMA aware kernel and still does by, ahem, default.
401da177e4SLinus Torvalds  *
411da177e4SLinus Torvalds  * The process policy is applied for most non interrupt memory allocations
421da177e4SLinus Torvalds  * in that process' context. Interrupts ignore the policies and always
431da177e4SLinus Torvalds  * try to allocate on the local CPU. The VMA policy is only applied for memory
441da177e4SLinus Torvalds  * allocations for a VMA in the VM.
451da177e4SLinus Torvalds  *
461da177e4SLinus Torvalds  * Currently there are a few corner cases in swapping where the policy
471da177e4SLinus Torvalds  * is not applied, but the majority should be handled. When process policy
481da177e4SLinus Torvalds  * is used it is not remembered over swap outs/swap ins.
491da177e4SLinus Torvalds  *
501da177e4SLinus Torvalds  * Only the highest zone in the zone hierarchy gets policied. Allocations
511da177e4SLinus Torvalds  * requesting a lower zone just use default policy. This implies that
521da177e4SLinus Torvalds  * on systems with highmem kernel lowmem allocation don't get policied.
531da177e4SLinus Torvalds  * Same with GFP_DMA allocations.
541da177e4SLinus Torvalds  *
551da177e4SLinus Torvalds  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
561da177e4SLinus Torvalds  * all users and remembered even when nobody has memory mapped.
571da177e4SLinus Torvalds  */
581da177e4SLinus Torvalds 
591da177e4SLinus Torvalds /* Notebook:
601da177e4SLinus Torvalds    fix mmap readahead to honour policy and enable policy for any page cache
611da177e4SLinus Torvalds    object
621da177e4SLinus Torvalds    statistics for bigpages
631da177e4SLinus Torvalds    global policy for page cache? currently it uses process policy. Requires
641da177e4SLinus Torvalds    first item above.
651da177e4SLinus Torvalds    handle mremap for shared memory (currently ignored for the policy)
661da177e4SLinus Torvalds    grows down?
671da177e4SLinus Torvalds    make bind policy root only? It can trigger oom much faster and the
681da177e4SLinus Torvalds    kernel is not always grateful with that.
691da177e4SLinus Torvalds */
701da177e4SLinus Torvalds 
71b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
72b1de0d13SMitchel Humpherys 
731da177e4SLinus Torvalds #include <linux/mempolicy.h>
74a520110eSChristoph Hellwig #include <linux/pagewalk.h>
751da177e4SLinus Torvalds #include <linux/highmem.h>
761da177e4SLinus Torvalds #include <linux/hugetlb.h>
771da177e4SLinus Torvalds #include <linux/kernel.h>
781da177e4SLinus Torvalds #include <linux/sched.h>
796e84f315SIngo Molnar #include <linux/sched/mm.h>
806a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h>
81f719ff9bSIngo Molnar #include <linux/sched/task.h>
821da177e4SLinus Torvalds #include <linux/nodemask.h>
831da177e4SLinus Torvalds #include <linux/cpuset.h>
841da177e4SLinus Torvalds #include <linux/slab.h>
851da177e4SLinus Torvalds #include <linux/string.h>
86b95f1b31SPaul Gortmaker #include <linux/export.h>
87b488893aSPavel Emelyanov #include <linux/nsproxy.h>
881da177e4SLinus Torvalds #include <linux/interrupt.h>
891da177e4SLinus Torvalds #include <linux/init.h>
901da177e4SLinus Torvalds #include <linux/compat.h>
9131367466SOtto Ebeling #include <linux/ptrace.h>
92dc9aa5b9SChristoph Lameter #include <linux/swap.h>
931a75a6c8SChristoph Lameter #include <linux/seq_file.h>
941a75a6c8SChristoph Lameter #include <linux/proc_fs.h>
95b20a3503SChristoph Lameter #include <linux/migrate.h>
9662b61f61SHugh Dickins #include <linux/ksm.h>
9795a402c3SChristoph Lameter #include <linux/rmap.h>
9886c3a764SDavid Quigley #include <linux/security.h>
99dbcb0f19SAdrian Bunk #include <linux/syscalls.h>
100095f1fc4SLee Schermerhorn #include <linux/ctype.h>
1016d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h>
102b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h>
103b1de0d13SMitchel Humpherys #include <linux/printk.h>
104c8633798SNaoya Horiguchi #include <linux/swapops.h>
105dc9aa5b9SChristoph Lameter 
1061da177e4SLinus Torvalds #include <asm/tlbflush.h>
1074a18419fSNadav Amit #include <asm/tlb.h>
1087c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
1091da177e4SLinus Torvalds 
11062695a84SNick Piggin #include "internal.h"
11162695a84SNick Piggin 
11238e35860SChristoph Lameter /* Internal flags */
113dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
11438e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
115dc9aa5b9SChristoph Lameter 
116fcc234f8SPekka Enberg static struct kmem_cache *policy_cache;
117fcc234f8SPekka Enberg static struct kmem_cache *sn_cache;
1181da177e4SLinus Torvalds 
1191da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not
1201da177e4SLinus Torvalds    policied. */
1216267276fSChristoph Lameter enum zone_type policy_zone = 0;
1221da177e4SLinus Torvalds 
123bea904d5SLee Schermerhorn /*
124bea904d5SLee Schermerhorn  * run-time system-wide default policy => local allocation
125bea904d5SLee Schermerhorn  */
126e754d79dSH Hartley Sweeten static struct mempolicy default_policy = {
1271da177e4SLinus Torvalds 	.refcnt = ATOMIC_INIT(1), /* never free it */
1287858d7bcSFeng Tang 	.mode = MPOL_LOCAL,
1291da177e4SLinus Torvalds };
1301da177e4SLinus Torvalds 
1315606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES];
1325606e387SMel Gorman 
133b2ca916cSDan Williams /**
134b2ca916cSDan Williams  * numa_map_to_online_node - Find closest online node
135f6e92f40SKrzysztof Kozlowski  * @node: Node id to start the search
136b2ca916cSDan Williams  *
137b2ca916cSDan Williams  * Lookup the next closest node by distance if @nid is not online.
138dad5b023SRandy Dunlap  *
139dad5b023SRandy Dunlap  * Return: this @node if it is online, otherwise the closest node by distance
140b2ca916cSDan Williams  */
141b2ca916cSDan Williams int numa_map_to_online_node(int node)
142b2ca916cSDan Williams {
1434fcbe96eSDan Williams 	int min_dist = INT_MAX, dist, n, min_node;
144b2ca916cSDan Williams 
1454fcbe96eSDan Williams 	if (node == NUMA_NO_NODE || node_online(node))
1464fcbe96eSDan Williams 		return node;
147b2ca916cSDan Williams 
148b2ca916cSDan Williams 	min_node = node;
149b2ca916cSDan Williams 	for_each_online_node(n) {
150b2ca916cSDan Williams 		dist = node_distance(node, n);
151b2ca916cSDan Williams 		if (dist < min_dist) {
152b2ca916cSDan Williams 			min_dist = dist;
153b2ca916cSDan Williams 			min_node = n;
154b2ca916cSDan Williams 		}
155b2ca916cSDan Williams 	}
156b2ca916cSDan Williams 
157b2ca916cSDan Williams 	return min_node;
158b2ca916cSDan Williams }
159b2ca916cSDan Williams EXPORT_SYMBOL_GPL(numa_map_to_online_node);
160b2ca916cSDan Williams 
16174d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p)
1625606e387SMel Gorman {
1635606e387SMel Gorman 	struct mempolicy *pol = p->mempolicy;
164f15ca78eSOleg Nesterov 	int node;
1655606e387SMel Gorman 
166f15ca78eSOleg Nesterov 	if (pol)
167f15ca78eSOleg Nesterov 		return pol;
1685606e387SMel Gorman 
169f15ca78eSOleg Nesterov 	node = numa_node_id();
1701da6f0e1SJianguo Wu 	if (node != NUMA_NO_NODE) {
1711da6f0e1SJianguo Wu 		pol = &preferred_node_policy[node];
172f15ca78eSOleg Nesterov 		/* preferred_node_policy is not initialised early in boot */
173f15ca78eSOleg Nesterov 		if (pol->mode)
174f15ca78eSOleg Nesterov 			return pol;
1751da6f0e1SJianguo Wu 	}
1765606e387SMel Gorman 
177f15ca78eSOleg Nesterov 	return &default_policy;
1785606e387SMel Gorman }
1795606e387SMel Gorman 
18037012946SDavid Rientjes static const struct mempolicy_operations {
18137012946SDavid Rientjes 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
182213980c0SVlastimil Babka 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
18337012946SDavid Rientjes } mpol_ops[MPOL_MAX];
18437012946SDavid Rientjes 
185f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
186f5b087b5SDavid Rientjes {
1876d556294SBob Liu 	return pol->flags & MPOL_MODE_FLAGS;
1884c50bc01SDavid Rientjes }
1894c50bc01SDavid Rientjes 
1904c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
1914c50bc01SDavid Rientjes 				   const nodemask_t *rel)
1924c50bc01SDavid Rientjes {
1934c50bc01SDavid Rientjes 	nodemask_t tmp;
1944c50bc01SDavid Rientjes 	nodes_fold(tmp, *orig, nodes_weight(*rel));
1954c50bc01SDavid Rientjes 	nodes_onto(*ret, tmp, *rel);
196f5b087b5SDavid Rientjes }
197f5b087b5SDavid Rientjes 
198be897d48SFeng Tang static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
19937012946SDavid Rientjes {
20037012946SDavid Rientjes 	if (nodes_empty(*nodes))
20137012946SDavid Rientjes 		return -EINVAL;
202269fbe72SBen Widawsky 	pol->nodes = *nodes;
20337012946SDavid Rientjes 	return 0;
20437012946SDavid Rientjes }
20537012946SDavid Rientjes 
20637012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
20737012946SDavid Rientjes {
2087858d7bcSFeng Tang 	if (nodes_empty(*nodes))
2097858d7bcSFeng Tang 		return -EINVAL;
210269fbe72SBen Widawsky 
211269fbe72SBen Widawsky 	nodes_clear(pol->nodes);
212269fbe72SBen Widawsky 	node_set(first_node(*nodes), pol->nodes);
21337012946SDavid Rientjes 	return 0;
21437012946SDavid Rientjes }
21537012946SDavid Rientjes 
21658568d2aSMiao Xie /*
21758568d2aSMiao Xie  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
21858568d2aSMiao Xie  * any, for the new policy.  mpol_new() has already validated the nodes
2197858d7bcSFeng Tang  * parameter with respect to the policy mode and flags.
22058568d2aSMiao Xie  *
22158568d2aSMiao Xie  * Must be called holding task's alloc_lock to protect task's mems_allowed
222c1e8d7c6SMichel Lespinasse  * and mempolicy.  May also be called holding the mmap_lock for write.
22358568d2aSMiao Xie  */
2244bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol,
2254bfc4495SKAMEZAWA Hiroyuki 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
22658568d2aSMiao Xie {
22758568d2aSMiao Xie 	int ret;
22858568d2aSMiao Xie 
2297858d7bcSFeng Tang 	/*
2307858d7bcSFeng Tang 	 * Default (pol==NULL) resp. local memory policies are not a
2317858d7bcSFeng Tang 	 * subject of any remapping. They also do not need any special
2327858d7bcSFeng Tang 	 * constructor.
2337858d7bcSFeng Tang 	 */
2347858d7bcSFeng Tang 	if (!pol || pol->mode == MPOL_LOCAL)
23558568d2aSMiao Xie 		return 0;
2367858d7bcSFeng Tang 
23701f13bd6SLai Jiangshan 	/* Check N_MEMORY */
2384bfc4495SKAMEZAWA Hiroyuki 	nodes_and(nsc->mask1,
23901f13bd6SLai Jiangshan 		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
24058568d2aSMiao Xie 
24158568d2aSMiao Xie 	VM_BUG_ON(!nodes);
2427858d7bcSFeng Tang 
24358568d2aSMiao Xie 	if (pol->flags & MPOL_F_RELATIVE_NODES)
2444bfc4495SKAMEZAWA Hiroyuki 		mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
24558568d2aSMiao Xie 	else
2464bfc4495SKAMEZAWA Hiroyuki 		nodes_and(nsc->mask2, *nodes, nsc->mask1);
2474bfc4495SKAMEZAWA Hiroyuki 
24858568d2aSMiao Xie 	if (mpol_store_user_nodemask(pol))
24958568d2aSMiao Xie 		pol->w.user_nodemask = *nodes;
25058568d2aSMiao Xie 	else
2517858d7bcSFeng Tang 		pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed;
25258568d2aSMiao Xie 
2534bfc4495SKAMEZAWA Hiroyuki 	ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
25458568d2aSMiao Xie 	return ret;
25558568d2aSMiao Xie }
25658568d2aSMiao Xie 
25758568d2aSMiao Xie /*
25858568d2aSMiao Xie  * This function just creates a new policy, does some check and simple
25958568d2aSMiao Xie  * initialization. You must invoke mpol_set_nodemask() to set nodes.
26058568d2aSMiao Xie  */
261028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
262028fec41SDavid Rientjes 				  nodemask_t *nodes)
2631da177e4SLinus Torvalds {
2641da177e4SLinus Torvalds 	struct mempolicy *policy;
2651da177e4SLinus Torvalds 
266028fec41SDavid Rientjes 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
26700ef2d2fSDavid Rientjes 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
268140d5a49SPaul Mundt 
2693e1f0645SDavid Rientjes 	if (mode == MPOL_DEFAULT) {
2703e1f0645SDavid Rientjes 		if (nodes && !nodes_empty(*nodes))
27137012946SDavid Rientjes 			return ERR_PTR(-EINVAL);
272d3a71033SLee Schermerhorn 		return NULL;
27337012946SDavid Rientjes 	}
2743e1f0645SDavid Rientjes 	VM_BUG_ON(!nodes);
2753e1f0645SDavid Rientjes 
2763e1f0645SDavid Rientjes 	/*
2773e1f0645SDavid Rientjes 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
2783e1f0645SDavid Rientjes 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
2793e1f0645SDavid Rientjes 	 * All other modes require a valid pointer to a non-empty nodemask.
2803e1f0645SDavid Rientjes 	 */
2813e1f0645SDavid Rientjes 	if (mode == MPOL_PREFERRED) {
2823e1f0645SDavid Rientjes 		if (nodes_empty(*nodes)) {
2833e1f0645SDavid Rientjes 			if (((flags & MPOL_F_STATIC_NODES) ||
2843e1f0645SDavid Rientjes 			     (flags & MPOL_F_RELATIVE_NODES)))
2853e1f0645SDavid Rientjes 				return ERR_PTR(-EINVAL);
2867858d7bcSFeng Tang 
2877858d7bcSFeng Tang 			mode = MPOL_LOCAL;
2883e1f0645SDavid Rientjes 		}
289479e2802SPeter Zijlstra 	} else if (mode == MPOL_LOCAL) {
2908d303e44SPiotr Kwapulinski 		if (!nodes_empty(*nodes) ||
2918d303e44SPiotr Kwapulinski 		    (flags & MPOL_F_STATIC_NODES) ||
2928d303e44SPiotr Kwapulinski 		    (flags & MPOL_F_RELATIVE_NODES))
293479e2802SPeter Zijlstra 			return ERR_PTR(-EINVAL);
2943e1f0645SDavid Rientjes 	} else if (nodes_empty(*nodes))
2953e1f0645SDavid Rientjes 		return ERR_PTR(-EINVAL);
2961da177e4SLinus Torvalds 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2971da177e4SLinus Torvalds 	if (!policy)
2981da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2991da177e4SLinus Torvalds 	atomic_set(&policy->refcnt, 1);
30045c4745aSLee Schermerhorn 	policy->mode = mode;
30137012946SDavid Rientjes 	policy->flags = flags;
302c6018b4bSAneesh Kumar K.V 	policy->home_node = NUMA_NO_NODE;
3033e1f0645SDavid Rientjes 
30437012946SDavid Rientjes 	return policy;
30537012946SDavid Rientjes }
30637012946SDavid Rientjes 
30752cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */
30852cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p)
30952cd3b07SLee Schermerhorn {
31052cd3b07SLee Schermerhorn 	if (!atomic_dec_and_test(&p->refcnt))
31152cd3b07SLee Schermerhorn 		return;
31252cd3b07SLee Schermerhorn 	kmem_cache_free(policy_cache, p);
31352cd3b07SLee Schermerhorn }
31452cd3b07SLee Schermerhorn 
315213980c0SVlastimil Babka static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
31637012946SDavid Rientjes {
31737012946SDavid Rientjes }
31837012946SDavid Rientjes 
319213980c0SVlastimil Babka static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
3201d0d2680SDavid Rientjes {
3211d0d2680SDavid Rientjes 	nodemask_t tmp;
3221d0d2680SDavid Rientjes 
32337012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES)
32437012946SDavid Rientjes 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
32537012946SDavid Rientjes 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
32637012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3271d0d2680SDavid Rientjes 	else {
328269fbe72SBen Widawsky 		nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed,
329213980c0SVlastimil Babka 								*nodes);
33029b190faSzhong jiang 		pol->w.cpuset_mems_allowed = *nodes;
3311d0d2680SDavid Rientjes 	}
33237012946SDavid Rientjes 
333708c1bbcSMiao Xie 	if (nodes_empty(tmp))
334708c1bbcSMiao Xie 		tmp = *nodes;
335708c1bbcSMiao Xie 
336269fbe72SBen Widawsky 	pol->nodes = tmp;
33737012946SDavid Rientjes }
33837012946SDavid Rientjes 
33937012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol,
340213980c0SVlastimil Babka 						const nodemask_t *nodes)
34137012946SDavid Rientjes {
34237012946SDavid Rientjes 	pol->w.cpuset_mems_allowed = *nodes;
3431d0d2680SDavid Rientjes }
34437012946SDavid Rientjes 
345708c1bbcSMiao Xie /*
346708c1bbcSMiao Xie  * mpol_rebind_policy - Migrate a policy to a different set of nodes
347708c1bbcSMiao Xie  *
348c1e8d7c6SMichel Lespinasse  * Per-vma policies are protected by mmap_lock. Allocations using per-task
349213980c0SVlastimil Babka  * policies are protected by task->mems_allowed_seq to prevent a premature
350213980c0SVlastimil Babka  * OOM/allocation failure due to parallel nodemask modification.
351708c1bbcSMiao Xie  */
352213980c0SVlastimil Babka static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
35337012946SDavid Rientjes {
354018160adSWang Cheng 	if (!pol || pol->mode == MPOL_LOCAL)
35537012946SDavid Rientjes 		return;
3567858d7bcSFeng Tang 	if (!mpol_store_user_nodemask(pol) &&
35737012946SDavid Rientjes 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
35837012946SDavid Rientjes 		return;
359708c1bbcSMiao Xie 
360213980c0SVlastimil Babka 	mpol_ops[pol->mode].rebind(pol, newmask);
3611d0d2680SDavid Rientjes }
3621d0d2680SDavid Rientjes 
3631d0d2680SDavid Rientjes /*
3641d0d2680SDavid Rientjes  * Wrapper for mpol_rebind_policy() that just requires task
3651d0d2680SDavid Rientjes  * pointer, and updates task mempolicy.
36658568d2aSMiao Xie  *
36758568d2aSMiao Xie  * Called with task's alloc_lock held.
3681d0d2680SDavid Rientjes  */
3691d0d2680SDavid Rientjes 
370213980c0SVlastimil Babka void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
3711d0d2680SDavid Rientjes {
372213980c0SVlastimil Babka 	mpol_rebind_policy(tsk->mempolicy, new);
3731d0d2680SDavid Rientjes }
3741d0d2680SDavid Rientjes 
3751d0d2680SDavid Rientjes /*
3761d0d2680SDavid Rientjes  * Rebind each vma in mm to new nodemask.
3771d0d2680SDavid Rientjes  *
378c1e8d7c6SMichel Lespinasse  * Call holding a reference to mm.  Takes mm->mmap_lock during call.
3791d0d2680SDavid Rientjes  */
3801d0d2680SDavid Rientjes 
3811d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
3821d0d2680SDavid Rientjes {
3831d0d2680SDavid Rientjes 	struct vm_area_struct *vma;
38466850be5SLiam R. Howlett 	VMA_ITERATOR(vmi, mm, 0);
3851d0d2680SDavid Rientjes 
386d8ed45c5SMichel Lespinasse 	mmap_write_lock(mm);
38766850be5SLiam R. Howlett 	for_each_vma(vmi, vma)
388213980c0SVlastimil Babka 		mpol_rebind_policy(vma->vm_policy, new);
389d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
3901d0d2680SDavid Rientjes }
3911d0d2680SDavid Rientjes 
39237012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
39337012946SDavid Rientjes 	[MPOL_DEFAULT] = {
39437012946SDavid Rientjes 		.rebind = mpol_rebind_default,
39537012946SDavid Rientjes 	},
39637012946SDavid Rientjes 	[MPOL_INTERLEAVE] = {
397be897d48SFeng Tang 		.create = mpol_new_nodemask,
39837012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
39937012946SDavid Rientjes 	},
40037012946SDavid Rientjes 	[MPOL_PREFERRED] = {
40137012946SDavid Rientjes 		.create = mpol_new_preferred,
40237012946SDavid Rientjes 		.rebind = mpol_rebind_preferred,
40337012946SDavid Rientjes 	},
40437012946SDavid Rientjes 	[MPOL_BIND] = {
405be897d48SFeng Tang 		.create = mpol_new_nodemask,
40637012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
40737012946SDavid Rientjes 	},
4087858d7bcSFeng Tang 	[MPOL_LOCAL] = {
4097858d7bcSFeng Tang 		.rebind = mpol_rebind_default,
4107858d7bcSFeng Tang 	},
411b27abaccSDave Hansen 	[MPOL_PREFERRED_MANY] = {
412be897d48SFeng Tang 		.create = mpol_new_nodemask,
413b27abaccSDave Hansen 		.rebind = mpol_rebind_preferred,
414b27abaccSDave Hansen 	},
41537012946SDavid Rientjes };
41637012946SDavid Rientjes 
4174a64981dSVishal Moola (Oracle) static int migrate_folio_add(struct folio *folio, struct list_head *foliolist,
418fc301289SChristoph Lameter 				unsigned long flags);
4191a75a6c8SChristoph Lameter 
4206f4576e3SNaoya Horiguchi struct queue_pages {
4216f4576e3SNaoya Horiguchi 	struct list_head *pagelist;
4226f4576e3SNaoya Horiguchi 	unsigned long flags;
4236f4576e3SNaoya Horiguchi 	nodemask_t *nmask;
424f18da660SLi Xinhai 	unsigned long start;
425f18da660SLi Xinhai 	unsigned long end;
426f18da660SLi Xinhai 	struct vm_area_struct *first;
4276f4576e3SNaoya Horiguchi };
4286f4576e3SNaoya Horiguchi 
42998094945SNaoya Horiguchi /*
430d451b89dSVishal Moola (Oracle)  * Check if the folio's nid is in qp->nmask.
43188aaa2a1SNaoya Horiguchi  *
43288aaa2a1SNaoya Horiguchi  * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
43388aaa2a1SNaoya Horiguchi  * in the invert of qp->nmask.
43488aaa2a1SNaoya Horiguchi  */
435d451b89dSVishal Moola (Oracle) static inline bool queue_folio_required(struct folio *folio,
43688aaa2a1SNaoya Horiguchi 					struct queue_pages *qp)
43788aaa2a1SNaoya Horiguchi {
438d451b89dSVishal Moola (Oracle) 	int nid = folio_nid(folio);
43988aaa2a1SNaoya Horiguchi 	unsigned long flags = qp->flags;
44088aaa2a1SNaoya Horiguchi 
44188aaa2a1SNaoya Horiguchi 	return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
44288aaa2a1SNaoya Horiguchi }
44388aaa2a1SNaoya Horiguchi 
444a7f40cfeSYang Shi /*
445de1f5055SVishal Moola (Oracle)  * queue_folios_pmd() has three possible return values:
446de1f5055SVishal Moola (Oracle)  * 0 - folios are placed on the right node or queued successfully, or
447e5947d23SYang Shi  *     special page is met, i.e. huge zero page.
448de1f5055SVishal Moola (Oracle)  * 1 - there is unmovable folio, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
449d8835445SYang Shi  *     specified.
450d8835445SYang Shi  * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
451de1f5055SVishal Moola (Oracle)  *        existing folio was already on a node that does not follow the
452d8835445SYang Shi  *        policy.
453a7f40cfeSYang Shi  */
454de1f5055SVishal Moola (Oracle) static int queue_folios_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
455c8633798SNaoya Horiguchi 				unsigned long end, struct mm_walk *walk)
456959a7e13SJules Irenge 	__releases(ptl)
457c8633798SNaoya Horiguchi {
458c8633798SNaoya Horiguchi 	int ret = 0;
459de1f5055SVishal Moola (Oracle) 	struct folio *folio;
460c8633798SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
461c8633798SNaoya Horiguchi 	unsigned long flags;
462c8633798SNaoya Horiguchi 
463c8633798SNaoya Horiguchi 	if (unlikely(is_pmd_migration_entry(*pmd))) {
464a7f40cfeSYang Shi 		ret = -EIO;
465c8633798SNaoya Horiguchi 		goto unlock;
466c8633798SNaoya Horiguchi 	}
467de1f5055SVishal Moola (Oracle) 	folio = pfn_folio(pmd_pfn(*pmd));
468de1f5055SVishal Moola (Oracle) 	if (is_huge_zero_page(&folio->page)) {
469e5947d23SYang Shi 		walk->action = ACTION_CONTINUE;
4706d97cf88SMiaohe Lin 		goto unlock;
471c8633798SNaoya Horiguchi 	}
472d451b89dSVishal Moola (Oracle) 	if (!queue_folio_required(folio, qp))
473c8633798SNaoya Horiguchi 		goto unlock;
474c8633798SNaoya Horiguchi 
475c8633798SNaoya Horiguchi 	flags = qp->flags;
476de1f5055SVishal Moola (Oracle) 	/* go to folio migration */
477a7f40cfeSYang Shi 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
478a53190a4SYang Shi 		if (!vma_migratable(walk->vma) ||
4794a64981dSVishal Moola (Oracle) 		    migrate_folio_add(folio, qp->pagelist, flags)) {
480d8835445SYang Shi 			ret = 1;
481a7f40cfeSYang Shi 			goto unlock;
482a7f40cfeSYang Shi 		}
483a7f40cfeSYang Shi 	} else
484a7f40cfeSYang Shi 		ret = -EIO;
485c8633798SNaoya Horiguchi unlock:
486c8633798SNaoya Horiguchi 	spin_unlock(ptl);
487c8633798SNaoya Horiguchi 	return ret;
488c8633798SNaoya Horiguchi }
489c8633798SNaoya Horiguchi 
49088aaa2a1SNaoya Horiguchi /*
49198094945SNaoya Horiguchi  * Scan through pages checking if pages follow certain conditions,
49298094945SNaoya Horiguchi  * and move them to the pagelist if they do.
493d8835445SYang Shi  *
4943dae02bbSVishal Moola (Oracle)  * queue_folios_pte_range() has three possible return values:
4953dae02bbSVishal Moola (Oracle)  * 0 - folios are placed on the right node or queued successfully, or
496e5947d23SYang Shi  *     special page is met, i.e. zero page.
4973dae02bbSVishal Moola (Oracle)  * 1 - there is unmovable folio, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
498d8835445SYang Shi  *     specified.
4993dae02bbSVishal Moola (Oracle)  * -EIO - only MPOL_MF_STRICT was specified and an existing folio was already
500d8835445SYang Shi  *        on a node that does not follow the policy.
50198094945SNaoya Horiguchi  */
5023dae02bbSVishal Moola (Oracle) static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
5036f4576e3SNaoya Horiguchi 			unsigned long end, struct mm_walk *walk)
5041da177e4SLinus Torvalds {
5056f4576e3SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
5063dae02bbSVishal Moola (Oracle) 	struct folio *folio;
5076f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
5086f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
509d8835445SYang Shi 	bool has_unmovable = false;
5103f088420SShijie Luo 	pte_t *pte, *mapped_pte;
511705e87c0SHugh Dickins 	spinlock_t *ptl;
512941150a3SHugh Dickins 
513c8633798SNaoya Horiguchi 	ptl = pmd_trans_huge_lock(pmd, vma);
514bc78b5edSMiaohe Lin 	if (ptl)
515de1f5055SVishal Moola (Oracle) 		return queue_folios_pmd(pmd, ptl, addr, end, walk);
51691612e0dSHugh Dickins 
5173f088420SShijie Luo 	mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
518*7780d040SHugh Dickins 	if (!pte) {
519*7780d040SHugh Dickins 		walk->action = ACTION_AGAIN;
520*7780d040SHugh Dickins 		return 0;
521*7780d040SHugh Dickins 	}
5226f4576e3SNaoya Horiguchi 	for (; addr != end; pte++, addr += PAGE_SIZE) {
52391612e0dSHugh Dickins 		if (!pte_present(*pte))
52491612e0dSHugh Dickins 			continue;
5253dae02bbSVishal Moola (Oracle) 		folio = vm_normal_folio(vma, addr, *pte);
5263dae02bbSVishal Moola (Oracle) 		if (!folio || folio_is_zone_device(folio))
52791612e0dSHugh Dickins 			continue;
528053837fcSNick Piggin 		/*
5293dae02bbSVishal Moola (Oracle) 		 * vm_normal_folio() filters out zero pages, but there might
5303dae02bbSVishal Moola (Oracle) 		 * still be reserved folios to skip, perhaps in a VDSO.
531053837fcSNick Piggin 		 */
5323dae02bbSVishal Moola (Oracle) 		if (folio_test_reserved(folio))
533f4598c8bSChristoph Lameter 			continue;
534d451b89dSVishal Moola (Oracle) 		if (!queue_folio_required(folio, qp))
53538e35860SChristoph Lameter 			continue;
536a7f40cfeSYang Shi 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
537d8835445SYang Shi 			/* MPOL_MF_STRICT must be specified if we get here */
538d8835445SYang Shi 			if (!vma_migratable(vma)) {
539d8835445SYang Shi 				has_unmovable = true;
540a7f40cfeSYang Shi 				break;
541d8835445SYang Shi 			}
542a53190a4SYang Shi 
543a53190a4SYang Shi 			/*
544a53190a4SYang Shi 			 * Do not abort immediately since there may be
545a53190a4SYang Shi 			 * temporary off LRU pages in the range.  Still
546a53190a4SYang Shi 			 * need migrate other LRU pages.
547a53190a4SYang Shi 			 */
5484a64981dSVishal Moola (Oracle) 			if (migrate_folio_add(folio, qp->pagelist, flags))
549a53190a4SYang Shi 				has_unmovable = true;
550a7f40cfeSYang Shi 		} else
551a7f40cfeSYang Shi 			break;
5526f4576e3SNaoya Horiguchi 	}
5533f088420SShijie Luo 	pte_unmap_unlock(mapped_pte, ptl);
5546f4576e3SNaoya Horiguchi 	cond_resched();
555d8835445SYang Shi 
556d8835445SYang Shi 	if (has_unmovable)
557d8835445SYang Shi 		return 1;
558d8835445SYang Shi 
559a7f40cfeSYang Shi 	return addr != end ? -EIO : 0;
56091612e0dSHugh Dickins }
56191612e0dSHugh Dickins 
5620a2c1e81SVishal Moola (Oracle) static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask,
5636f4576e3SNaoya Horiguchi 			       unsigned long addr, unsigned long end,
5646f4576e3SNaoya Horiguchi 			       struct mm_walk *walk)
565e2d8cf40SNaoya Horiguchi {
566dcf17635SLi Xinhai 	int ret = 0;
567e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE
5686f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
569dcf17635SLi Xinhai 	unsigned long flags = (qp->flags & MPOL_MF_VALID);
5700a2c1e81SVishal Moola (Oracle) 	struct folio *folio;
571cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
572d4c54919SNaoya Horiguchi 	pte_t entry;
573e2d8cf40SNaoya Horiguchi 
5746f4576e3SNaoya Horiguchi 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
5756f4576e3SNaoya Horiguchi 	entry = huge_ptep_get(pte);
576d4c54919SNaoya Horiguchi 	if (!pte_present(entry))
577d4c54919SNaoya Horiguchi 		goto unlock;
5780a2c1e81SVishal Moola (Oracle) 	folio = pfn_folio(pte_pfn(entry));
579d451b89dSVishal Moola (Oracle) 	if (!queue_folio_required(folio, qp))
580e2d8cf40SNaoya Horiguchi 		goto unlock;
581dcf17635SLi Xinhai 
582dcf17635SLi Xinhai 	if (flags == MPOL_MF_STRICT) {
583dcf17635SLi Xinhai 		/*
5840a2c1e81SVishal Moola (Oracle) 		 * STRICT alone means only detecting misplaced folio and no
585dcf17635SLi Xinhai 		 * need to further check other vma.
586dcf17635SLi Xinhai 		 */
587dcf17635SLi Xinhai 		ret = -EIO;
588dcf17635SLi Xinhai 		goto unlock;
589dcf17635SLi Xinhai 	}
590dcf17635SLi Xinhai 
591dcf17635SLi Xinhai 	if (!vma_migratable(walk->vma)) {
592dcf17635SLi Xinhai 		/*
593dcf17635SLi Xinhai 		 * Must be STRICT with MOVE*, otherwise .test_walk() have
594dcf17635SLi Xinhai 		 * stopped walking current vma.
5950a2c1e81SVishal Moola (Oracle) 		 * Detecting misplaced folio but allow migrating folios which
596dcf17635SLi Xinhai 		 * have been queued.
597dcf17635SLi Xinhai 		 */
598dcf17635SLi Xinhai 		ret = 1;
599dcf17635SLi Xinhai 		goto unlock;
600dcf17635SLi Xinhai 	}
601dcf17635SLi Xinhai 
6020a2c1e81SVishal Moola (Oracle) 	/*
6030a2c1e81SVishal Moola (Oracle) 	 * With MPOL_MF_MOVE, we try to migrate only unshared folios. If it
6040a2c1e81SVishal Moola (Oracle) 	 * is shared it is likely not worth migrating.
6050a2c1e81SVishal Moola (Oracle) 	 *
6060a2c1e81SVishal Moola (Oracle) 	 * To check if the folio is shared, ideally we want to make sure
6070a2c1e81SVishal Moola (Oracle) 	 * every page is mapped to the same process. Doing that is very
6080a2c1e81SVishal Moola (Oracle) 	 * expensive, so check the estimated mapcount of the folio instead.
6090a2c1e81SVishal Moola (Oracle) 	 */
610e2d8cf40SNaoya Horiguchi 	if (flags & (MPOL_MF_MOVE_ALL) ||
6110a2c1e81SVishal Moola (Oracle) 	    (flags & MPOL_MF_MOVE && folio_estimated_sharers(folio) == 1 &&
61273bdf65eSMike Kravetz 	     !hugetlb_pmd_shared(pte))) {
6139747b9e9SBaolin Wang 		if (!isolate_hugetlb(folio, qp->pagelist) &&
614dcf17635SLi Xinhai 			(flags & MPOL_MF_STRICT))
615dcf17635SLi Xinhai 			/*
6160a2c1e81SVishal Moola (Oracle) 			 * Failed to isolate folio but allow migrating pages
617dcf17635SLi Xinhai 			 * which have been queued.
618dcf17635SLi Xinhai 			 */
619dcf17635SLi Xinhai 			ret = 1;
620dcf17635SLi Xinhai 	}
621e2d8cf40SNaoya Horiguchi unlock:
622cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
623e2d8cf40SNaoya Horiguchi #else
624e2d8cf40SNaoya Horiguchi 	BUG();
625e2d8cf40SNaoya Horiguchi #endif
626dcf17635SLi Xinhai 	return ret;
6271da177e4SLinus Torvalds }
6281da177e4SLinus Torvalds 
6295877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING
630b24f53a0SLee Schermerhorn /*
6314b10e7d5SMel Gorman  * This is used to mark a range of virtual addresses to be inaccessible.
6324b10e7d5SMel Gorman  * These are later cleared by a NUMA hinting fault. Depending on these
6334b10e7d5SMel Gorman  * faults, pages may be migrated for better NUMA placement.
6344b10e7d5SMel Gorman  *
6354b10e7d5SMel Gorman  * This is assuming that NUMA faults are handled using PROT_NONE. If
6364b10e7d5SMel Gorman  * an architecture makes a different choice, it will need further
6374b10e7d5SMel Gorman  * changes to the core.
638b24f53a0SLee Schermerhorn  */
6394b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma,
6404b10e7d5SMel Gorman 			unsigned long addr, unsigned long end)
641b24f53a0SLee Schermerhorn {
6424a18419fSNadav Amit 	struct mmu_gather tlb;
643a79390f5SPeter Xu 	long nr_updated;
644b24f53a0SLee Schermerhorn 
6454a18419fSNadav Amit 	tlb_gather_mmu(&tlb, vma->vm_mm);
6464a18419fSNadav Amit 
6471ef488edSDavid Hildenbrand 	nr_updated = change_protection(&tlb, vma, addr, end, MM_CP_PROT_NUMA);
648d1751118SPeter Xu 	if (nr_updated > 0)
64903c5a6e1SMel Gorman 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
650b24f53a0SLee Schermerhorn 
6514a18419fSNadav Amit 	tlb_finish_mmu(&tlb);
6524a18419fSNadav Amit 
6534b10e7d5SMel Gorman 	return nr_updated;
654b24f53a0SLee Schermerhorn }
655b24f53a0SLee Schermerhorn #else
656b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma,
657b24f53a0SLee Schermerhorn 			unsigned long addr, unsigned long end)
658b24f53a0SLee Schermerhorn {
659b24f53a0SLee Schermerhorn 	return 0;
660b24f53a0SLee Schermerhorn }
6615877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */
662b24f53a0SLee Schermerhorn 
6636f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end,
6646f4576e3SNaoya Horiguchi 				struct mm_walk *walk)
6651da177e4SLinus Torvalds {
66666850be5SLiam R. Howlett 	struct vm_area_struct *next, *vma = walk->vma;
6676f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
6685b952b3cSAndi Kleen 	unsigned long endvma = vma->vm_end;
6696f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
670dc9aa5b9SChristoph Lameter 
671a18b3ac2SLi Xinhai 	/* range check first */
672ce33135cSMiaohe Lin 	VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma);
673f18da660SLi Xinhai 
674f18da660SLi Xinhai 	if (!qp->first) {
675f18da660SLi Xinhai 		qp->first = vma;
676f18da660SLi Xinhai 		if (!(flags & MPOL_MF_DISCONTIG_OK) &&
677f18da660SLi Xinhai 			(qp->start < vma->vm_start))
678f18da660SLi Xinhai 			/* hole at head side of range */
679a18b3ac2SLi Xinhai 			return -EFAULT;
680a18b3ac2SLi Xinhai 	}
68166850be5SLiam R. Howlett 	next = find_vma(vma->vm_mm, vma->vm_end);
682f18da660SLi Xinhai 	if (!(flags & MPOL_MF_DISCONTIG_OK) &&
683f18da660SLi Xinhai 		((vma->vm_end < qp->end) &&
68466850be5SLiam R. Howlett 		(!next || vma->vm_end < next->vm_start)))
685f18da660SLi Xinhai 		/* hole at middle or tail of range */
686f18da660SLi Xinhai 		return -EFAULT;
687a18b3ac2SLi Xinhai 
688a7f40cfeSYang Shi 	/*
689a7f40cfeSYang Shi 	 * Need check MPOL_MF_STRICT to return -EIO if possible
690a7f40cfeSYang Shi 	 * regardless of vma_migratable
691a7f40cfeSYang Shi 	 */
692a7f40cfeSYang Shi 	if (!vma_migratable(vma) &&
693a7f40cfeSYang Shi 	    !(flags & MPOL_MF_STRICT))
69448684a65SNaoya Horiguchi 		return 1;
69548684a65SNaoya Horiguchi 
6965b952b3cSAndi Kleen 	if (endvma > end)
6975b952b3cSAndi Kleen 		endvma = end;
698b24f53a0SLee Schermerhorn 
699b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY) {
7002c0346a3SMel Gorman 		/* Similar to task_numa_work, skip inaccessible VMAs */
7013122e80eSAnshuman Khandual 		if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
7024355c018SLiang Chen 			!(vma->vm_flags & VM_MIXEDMAP))
703b24f53a0SLee Schermerhorn 			change_prot_numa(vma, start, endvma);
7046f4576e3SNaoya Horiguchi 		return 1;
705b24f53a0SLee Schermerhorn 	}
706b24f53a0SLee Schermerhorn 
7076f4576e3SNaoya Horiguchi 	/* queue pages from current vma */
708a7f40cfeSYang Shi 	if (flags & MPOL_MF_VALID)
7096f4576e3SNaoya Horiguchi 		return 0;
7106f4576e3SNaoya Horiguchi 	return 1;
7116f4576e3SNaoya Horiguchi }
712b24f53a0SLee Schermerhorn 
7137b86ac33SChristoph Hellwig static const struct mm_walk_ops queue_pages_walk_ops = {
7140a2c1e81SVishal Moola (Oracle) 	.hugetlb_entry		= queue_folios_hugetlb,
7153dae02bbSVishal Moola (Oracle) 	.pmd_entry		= queue_folios_pte_range,
7167b86ac33SChristoph Hellwig 	.test_walk		= queue_pages_test_walk,
7177b86ac33SChristoph Hellwig };
7187b86ac33SChristoph Hellwig 
7196f4576e3SNaoya Horiguchi /*
7206f4576e3SNaoya Horiguchi  * Walk through page tables and collect pages to be migrated.
7216f4576e3SNaoya Horiguchi  *
7226f4576e3SNaoya Horiguchi  * If pages found in a given range are on a set of nodes (determined by
7236f4576e3SNaoya Horiguchi  * @nodes and @flags,) it's isolated and queued to the pagelist which is
724d8835445SYang Shi  * passed via @private.
725d8835445SYang Shi  *
726d8835445SYang Shi  * queue_pages_range() has three possible return values:
727d8835445SYang Shi  * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
728d8835445SYang Shi  *     specified.
729d8835445SYang Shi  * 0 - queue pages successfully or no misplaced page.
730a85dfc30SYang Shi  * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
731a85dfc30SYang Shi  *         memory range specified by nodemask and maxnode points outside
732a85dfc30SYang Shi  *         your accessible address space (-EFAULT)
7336f4576e3SNaoya Horiguchi  */
7346f4576e3SNaoya Horiguchi static int
7356f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
7366f4576e3SNaoya Horiguchi 		nodemask_t *nodes, unsigned long flags,
7376f4576e3SNaoya Horiguchi 		struct list_head *pagelist)
7386f4576e3SNaoya Horiguchi {
739f18da660SLi Xinhai 	int err;
7406f4576e3SNaoya Horiguchi 	struct queue_pages qp = {
7416f4576e3SNaoya Horiguchi 		.pagelist = pagelist,
7426f4576e3SNaoya Horiguchi 		.flags = flags,
7436f4576e3SNaoya Horiguchi 		.nmask = nodes,
744f18da660SLi Xinhai 		.start = start,
745f18da660SLi Xinhai 		.end = end,
746f18da660SLi Xinhai 		.first = NULL,
7476f4576e3SNaoya Horiguchi 	};
7486f4576e3SNaoya Horiguchi 
749f18da660SLi Xinhai 	err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
750f18da660SLi Xinhai 
751f18da660SLi Xinhai 	if (!qp.first)
752f18da660SLi Xinhai 		/* whole range in hole */
753f18da660SLi Xinhai 		err = -EFAULT;
754f18da660SLi Xinhai 
755f18da660SLi Xinhai 	return err;
7561da177e4SLinus Torvalds }
7571da177e4SLinus Torvalds 
758869833f2SKOSAKI Motohiro /*
759869833f2SKOSAKI Motohiro  * Apply policy to a single VMA
760c1e8d7c6SMichel Lespinasse  * This must be called with the mmap_lock held for writing.
761869833f2SKOSAKI Motohiro  */
762869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma,
763869833f2SKOSAKI Motohiro 						struct mempolicy *pol)
7648d34694cSKOSAKI Motohiro {
765869833f2SKOSAKI Motohiro 	int err;
766869833f2SKOSAKI Motohiro 	struct mempolicy *old;
767869833f2SKOSAKI Motohiro 	struct mempolicy *new;
7688d34694cSKOSAKI Motohiro 
7698d34694cSKOSAKI Motohiro 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
7708d34694cSKOSAKI Motohiro 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
7718d34694cSKOSAKI Motohiro 		 vma->vm_ops, vma->vm_file,
7728d34694cSKOSAKI Motohiro 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
7738d34694cSKOSAKI Motohiro 
774869833f2SKOSAKI Motohiro 	new = mpol_dup(pol);
775869833f2SKOSAKI Motohiro 	if (IS_ERR(new))
776869833f2SKOSAKI Motohiro 		return PTR_ERR(new);
777869833f2SKOSAKI Motohiro 
778869833f2SKOSAKI Motohiro 	if (vma->vm_ops && vma->vm_ops->set_policy) {
7798d34694cSKOSAKI Motohiro 		err = vma->vm_ops->set_policy(vma, new);
780869833f2SKOSAKI Motohiro 		if (err)
781869833f2SKOSAKI Motohiro 			goto err_out;
7828d34694cSKOSAKI Motohiro 	}
783869833f2SKOSAKI Motohiro 
784869833f2SKOSAKI Motohiro 	old = vma->vm_policy;
785c1e8d7c6SMichel Lespinasse 	vma->vm_policy = new; /* protected by mmap_lock */
786869833f2SKOSAKI Motohiro 	mpol_put(old);
787869833f2SKOSAKI Motohiro 
788869833f2SKOSAKI Motohiro 	return 0;
789869833f2SKOSAKI Motohiro  err_out:
790869833f2SKOSAKI Motohiro 	mpol_put(new);
7918d34694cSKOSAKI Motohiro 	return err;
7928d34694cSKOSAKI Motohiro }
7938d34694cSKOSAKI Motohiro 
794f4e9e0e6SLiam R. Howlett /* Split or merge the VMA (if required) and apply the new policy */
795f4e9e0e6SLiam R. Howlett static int mbind_range(struct vma_iterator *vmi, struct vm_area_struct *vma,
796f4e9e0e6SLiam R. Howlett 		struct vm_area_struct **prev, unsigned long start,
7979d8cebd4SKOSAKI Motohiro 		unsigned long end, struct mempolicy *new_pol)
7981da177e4SLinus Torvalds {
799f4e9e0e6SLiam R. Howlett 	struct vm_area_struct *merged;
800f4e9e0e6SLiam R. Howlett 	unsigned long vmstart, vmend;
801e26a5114SKOSAKI Motohiro 	pgoff_t pgoff;
802f4e9e0e6SLiam R. Howlett 	int err;
8031da177e4SLinus Torvalds 
804f4e9e0e6SLiam R. Howlett 	vmend = min(end, vma->vm_end);
805f4e9e0e6SLiam R. Howlett 	if (start > vma->vm_start) {
806f4e9e0e6SLiam R. Howlett 		*prev = vma;
807f4e9e0e6SLiam R. Howlett 		vmstart = start;
808f4e9e0e6SLiam R. Howlett 	} else {
809f4e9e0e6SLiam R. Howlett 		vmstart = vma->vm_start;
810f4e9e0e6SLiam R. Howlett 	}
8119d8cebd4SKOSAKI Motohiro 
81200ca0f2eSLorenzo Stoakes 	if (mpol_equal(vma_policy(vma), new_pol)) {
81300ca0f2eSLorenzo Stoakes 		*prev = vma;
814f4e9e0e6SLiam R. Howlett 		return 0;
81500ca0f2eSLorenzo Stoakes 	}
816e26a5114SKOSAKI Motohiro 
817f4e9e0e6SLiam R. Howlett 	pgoff = vma->vm_pgoff + ((vmstart - vma->vm_start) >> PAGE_SHIFT);
818f4e9e0e6SLiam R. Howlett 	merged = vma_merge(vmi, vma->vm_mm, *prev, vmstart, vmend, vma->vm_flags,
819f4e9e0e6SLiam R. Howlett 			 vma->anon_vma, vma->vm_file, pgoff, new_pol,
820f4e9e0e6SLiam R. Howlett 			 vma->vm_userfaultfd_ctx, anon_vma_name(vma));
821f4e9e0e6SLiam R. Howlett 	if (merged) {
822f4e9e0e6SLiam R. Howlett 		*prev = merged;
823f4e9e0e6SLiam R. Howlett 		return vma_replace_policy(merged, new_pol);
8241da177e4SLinus Torvalds 	}
825f4e9e0e6SLiam R. Howlett 
8269d8cebd4SKOSAKI Motohiro 	if (vma->vm_start != vmstart) {
827f4e9e0e6SLiam R. Howlett 		err = split_vma(vmi, vma, vmstart, 1);
8289d8cebd4SKOSAKI Motohiro 		if (err)
8291da177e4SLinus Torvalds 			return err;
8301da177e4SLinus Torvalds 	}
8311da177e4SLinus Torvalds 
832f4e9e0e6SLiam R. Howlett 	if (vma->vm_end != vmend) {
833f4e9e0e6SLiam R. Howlett 		err = split_vma(vmi, vma, vmend, 0);
834f4e9e0e6SLiam R. Howlett 		if (err)
835f4e9e0e6SLiam R. Howlett 			return err;
836f4e9e0e6SLiam R. Howlett 	}
837f4e9e0e6SLiam R. Howlett 
838f4e9e0e6SLiam R. Howlett 	*prev = vma;
839f4e9e0e6SLiam R. Howlett 	return vma_replace_policy(vma, new_pol);
840f4e9e0e6SLiam R. Howlett }
841f4e9e0e6SLiam R. Howlett 
8421da177e4SLinus Torvalds /* Set the process memory policy */
843028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags,
844028fec41SDavid Rientjes 			     nodemask_t *nodes)
8451da177e4SLinus Torvalds {
84658568d2aSMiao Xie 	struct mempolicy *new, *old;
8474bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH(scratch);
84858568d2aSMiao Xie 	int ret;
8491da177e4SLinus Torvalds 
8504bfc4495SKAMEZAWA Hiroyuki 	if (!scratch)
8514bfc4495SKAMEZAWA Hiroyuki 		return -ENOMEM;
852f4e53d91SLee Schermerhorn 
8534bfc4495SKAMEZAWA Hiroyuki 	new = mpol_new(mode, flags, nodes);
8544bfc4495SKAMEZAWA Hiroyuki 	if (IS_ERR(new)) {
8554bfc4495SKAMEZAWA Hiroyuki 		ret = PTR_ERR(new);
8564bfc4495SKAMEZAWA Hiroyuki 		goto out;
8574bfc4495SKAMEZAWA Hiroyuki 	}
8582c7c3a7dSOleg Nesterov 
85912c1dc8eSAbel Wu 	task_lock(current);
8604bfc4495SKAMEZAWA Hiroyuki 	ret = mpol_set_nodemask(new, nodes, scratch);
86158568d2aSMiao Xie 	if (ret) {
86212c1dc8eSAbel Wu 		task_unlock(current);
86358568d2aSMiao Xie 		mpol_put(new);
8644bfc4495SKAMEZAWA Hiroyuki 		goto out;
86558568d2aSMiao Xie 	}
86612c1dc8eSAbel Wu 
86758568d2aSMiao Xie 	old = current->mempolicy;
8681da177e4SLinus Torvalds 	current->mempolicy = new;
86945816682SVlastimil Babka 	if (new && new->mode == MPOL_INTERLEAVE)
87045816682SVlastimil Babka 		current->il_prev = MAX_NUMNODES-1;
87158568d2aSMiao Xie 	task_unlock(current);
87258568d2aSMiao Xie 	mpol_put(old);
8734bfc4495SKAMEZAWA Hiroyuki 	ret = 0;
8744bfc4495SKAMEZAWA Hiroyuki out:
8754bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH_FREE(scratch);
8764bfc4495SKAMEZAWA Hiroyuki 	return ret;
8771da177e4SLinus Torvalds }
8781da177e4SLinus Torvalds 
879bea904d5SLee Schermerhorn /*
880bea904d5SLee Schermerhorn  * Return nodemask for policy for get_mempolicy() query
88158568d2aSMiao Xie  *
88258568d2aSMiao Xie  * Called with task's alloc_lock held
883bea904d5SLee Schermerhorn  */
884bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
8851da177e4SLinus Torvalds {
886dfcd3c0dSAndi Kleen 	nodes_clear(*nodes);
887bea904d5SLee Schermerhorn 	if (p == &default_policy)
888bea904d5SLee Schermerhorn 		return;
889bea904d5SLee Schermerhorn 
89045c4745aSLee Schermerhorn 	switch (p->mode) {
89119770b32SMel Gorman 	case MPOL_BIND:
8921da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
893269fbe72SBen Widawsky 	case MPOL_PREFERRED:
894b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
895269fbe72SBen Widawsky 		*nodes = p->nodes;
8961da177e4SLinus Torvalds 		break;
8977858d7bcSFeng Tang 	case MPOL_LOCAL:
8987858d7bcSFeng Tang 		/* return empty node mask for local allocation */
8997858d7bcSFeng Tang 		break;
9001da177e4SLinus Torvalds 	default:
9011da177e4SLinus Torvalds 		BUG();
9021da177e4SLinus Torvalds 	}
9031da177e4SLinus Torvalds }
9041da177e4SLinus Torvalds 
9053b9aadf7SAndrea Arcangeli static int lookup_node(struct mm_struct *mm, unsigned long addr)
9061da177e4SLinus Torvalds {
907ba841078SPeter Xu 	struct page *p = NULL;
908f728b9c4SJohn Hubbard 	int ret;
9091da177e4SLinus Torvalds 
910f728b9c4SJohn Hubbard 	ret = get_user_pages_fast(addr & PAGE_MASK, 1, 0, &p);
911f728b9c4SJohn Hubbard 	if (ret > 0) {
912f728b9c4SJohn Hubbard 		ret = page_to_nid(p);
9131da177e4SLinus Torvalds 		put_page(p);
9141da177e4SLinus Torvalds 	}
915f728b9c4SJohn Hubbard 	return ret;
9161da177e4SLinus Torvalds }
9171da177e4SLinus Torvalds 
9181da177e4SLinus Torvalds /* Retrieve NUMA policy */
919dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask,
9201da177e4SLinus Torvalds 			     unsigned long addr, unsigned long flags)
9211da177e4SLinus Torvalds {
9228bccd85fSChristoph Lameter 	int err;
9231da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
9241da177e4SLinus Torvalds 	struct vm_area_struct *vma = NULL;
9253b9aadf7SAndrea Arcangeli 	struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
9261da177e4SLinus Torvalds 
927754af6f5SLee Schermerhorn 	if (flags &
928754af6f5SLee Schermerhorn 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
9291da177e4SLinus Torvalds 		return -EINVAL;
930754af6f5SLee Schermerhorn 
931754af6f5SLee Schermerhorn 	if (flags & MPOL_F_MEMS_ALLOWED) {
932754af6f5SLee Schermerhorn 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
933754af6f5SLee Schermerhorn 			return -EINVAL;
934754af6f5SLee Schermerhorn 		*policy = 0;	/* just so it's initialized */
93558568d2aSMiao Xie 		task_lock(current);
936754af6f5SLee Schermerhorn 		*nmask  = cpuset_current_mems_allowed;
93758568d2aSMiao Xie 		task_unlock(current);
938754af6f5SLee Schermerhorn 		return 0;
939754af6f5SLee Schermerhorn 	}
940754af6f5SLee Schermerhorn 
9411da177e4SLinus Torvalds 	if (flags & MPOL_F_ADDR) {
942bea904d5SLee Schermerhorn 		/*
943bea904d5SLee Schermerhorn 		 * Do NOT fall back to task policy if the
944bea904d5SLee Schermerhorn 		 * vma/shared policy at addr is NULL.  We
945bea904d5SLee Schermerhorn 		 * want to return MPOL_DEFAULT in this case.
946bea904d5SLee Schermerhorn 		 */
947d8ed45c5SMichel Lespinasse 		mmap_read_lock(mm);
94833e3575cSLiam Howlett 		vma = vma_lookup(mm, addr);
9491da177e4SLinus Torvalds 		if (!vma) {
950d8ed45c5SMichel Lespinasse 			mmap_read_unlock(mm);
9511da177e4SLinus Torvalds 			return -EFAULT;
9521da177e4SLinus Torvalds 		}
9531da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
9541da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
9551da177e4SLinus Torvalds 		else
9561da177e4SLinus Torvalds 			pol = vma->vm_policy;
9571da177e4SLinus Torvalds 	} else if (addr)
9581da177e4SLinus Torvalds 		return -EINVAL;
9591da177e4SLinus Torvalds 
9601da177e4SLinus Torvalds 	if (!pol)
961bea904d5SLee Schermerhorn 		pol = &default_policy;	/* indicates default behavior */
9621da177e4SLinus Torvalds 
9631da177e4SLinus Torvalds 	if (flags & MPOL_F_NODE) {
9641da177e4SLinus Torvalds 		if (flags & MPOL_F_ADDR) {
9653b9aadf7SAndrea Arcangeli 			/*
966f728b9c4SJohn Hubbard 			 * Take a refcount on the mpol, because we are about to
967f728b9c4SJohn Hubbard 			 * drop the mmap_lock, after which only "pol" remains
968f728b9c4SJohn Hubbard 			 * valid, "vma" is stale.
9693b9aadf7SAndrea Arcangeli 			 */
9703b9aadf7SAndrea Arcangeli 			pol_refcount = pol;
9713b9aadf7SAndrea Arcangeli 			vma = NULL;
9723b9aadf7SAndrea Arcangeli 			mpol_get(pol);
973f728b9c4SJohn Hubbard 			mmap_read_unlock(mm);
9743b9aadf7SAndrea Arcangeli 			err = lookup_node(mm, addr);
9751da177e4SLinus Torvalds 			if (err < 0)
9761da177e4SLinus Torvalds 				goto out;
9778bccd85fSChristoph Lameter 			*policy = err;
9781da177e4SLinus Torvalds 		} else if (pol == current->mempolicy &&
97945c4745aSLee Schermerhorn 				pol->mode == MPOL_INTERLEAVE) {
980269fbe72SBen Widawsky 			*policy = next_node_in(current->il_prev, pol->nodes);
9811da177e4SLinus Torvalds 		} else {
9821da177e4SLinus Torvalds 			err = -EINVAL;
9831da177e4SLinus Torvalds 			goto out;
9841da177e4SLinus Torvalds 		}
985bea904d5SLee Schermerhorn 	} else {
986bea904d5SLee Schermerhorn 		*policy = pol == &default_policy ? MPOL_DEFAULT :
987bea904d5SLee Schermerhorn 						pol->mode;
988d79df630SDavid Rientjes 		/*
989d79df630SDavid Rientjes 		 * Internal mempolicy flags must be masked off before exposing
990d79df630SDavid Rientjes 		 * the policy to userspace.
991d79df630SDavid Rientjes 		 */
992d79df630SDavid Rientjes 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
993bea904d5SLee Schermerhorn 	}
9941da177e4SLinus Torvalds 
9951da177e4SLinus Torvalds 	err = 0;
99658568d2aSMiao Xie 	if (nmask) {
997c6b6ef8bSLee Schermerhorn 		if (mpol_store_user_nodemask(pol)) {
998c6b6ef8bSLee Schermerhorn 			*nmask = pol->w.user_nodemask;
999c6b6ef8bSLee Schermerhorn 		} else {
100058568d2aSMiao Xie 			task_lock(current);
1001bea904d5SLee Schermerhorn 			get_policy_nodemask(pol, nmask);
100258568d2aSMiao Xie 			task_unlock(current);
100358568d2aSMiao Xie 		}
1004c6b6ef8bSLee Schermerhorn 	}
10051da177e4SLinus Torvalds 
10061da177e4SLinus Torvalds  out:
100752cd3b07SLee Schermerhorn 	mpol_cond_put(pol);
10081da177e4SLinus Torvalds 	if (vma)
1009d8ed45c5SMichel Lespinasse 		mmap_read_unlock(mm);
10103b9aadf7SAndrea Arcangeli 	if (pol_refcount)
10113b9aadf7SAndrea Arcangeli 		mpol_put(pol_refcount);
10121da177e4SLinus Torvalds 	return err;
10131da177e4SLinus Torvalds }
10141da177e4SLinus Torvalds 
1015b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION
10164a64981dSVishal Moola (Oracle) static int migrate_folio_add(struct folio *folio, struct list_head *foliolist,
1017fc301289SChristoph Lameter 				unsigned long flags)
10186ce3c4c0SChristoph Lameter {
10196ce3c4c0SChristoph Lameter 	/*
10204a64981dSVishal Moola (Oracle) 	 * We try to migrate only unshared folios. If it is shared it
10214a64981dSVishal Moola (Oracle) 	 * is likely not worth migrating.
10224a64981dSVishal Moola (Oracle) 	 *
10234a64981dSVishal Moola (Oracle) 	 * To check if the folio is shared, ideally we want to make sure
10244a64981dSVishal Moola (Oracle) 	 * every page is mapped to the same process. Doing that is very
10254a64981dSVishal Moola (Oracle) 	 * expensive, so check the estimated mapcount of the folio instead.
10266ce3c4c0SChristoph Lameter 	 */
10274a64981dSVishal Moola (Oracle) 	if ((flags & MPOL_MF_MOVE_ALL) || folio_estimated_sharers(folio) == 1) {
1028be2d5756SBaolin Wang 		if (folio_isolate_lru(folio)) {
10294a64981dSVishal Moola (Oracle) 			list_add_tail(&folio->lru, foliolist);
10304a64981dSVishal Moola (Oracle) 			node_stat_mod_folio(folio,
10314a64981dSVishal Moola (Oracle) 				NR_ISOLATED_ANON + folio_is_file_lru(folio),
10324a64981dSVishal Moola (Oracle) 				folio_nr_pages(folio));
1033a53190a4SYang Shi 		} else if (flags & MPOL_MF_STRICT) {
1034a53190a4SYang Shi 			/*
10354a64981dSVishal Moola (Oracle) 			 * Non-movable folio may reach here.  And, there may be
10364a64981dSVishal Moola (Oracle) 			 * temporary off LRU folios or non-LRU movable folios.
10374a64981dSVishal Moola (Oracle) 			 * Treat them as unmovable folios since they can't be
1038a53190a4SYang Shi 			 * isolated, so they can't be moved at the moment.  It
1039a53190a4SYang Shi 			 * should return -EIO for this case too.
1040a53190a4SYang Shi 			 */
1041a53190a4SYang Shi 			return -EIO;
104262695a84SNick Piggin 		}
104362695a84SNick Piggin 	}
1044a53190a4SYang Shi 
1045a53190a4SYang Shi 	return 0;
10466ce3c4c0SChristoph Lameter }
10476ce3c4c0SChristoph Lameter 
10486ce3c4c0SChristoph Lameter /*
10497e2ab150SChristoph Lameter  * Migrate pages from one node to a target node.
10507e2ab150SChristoph Lameter  * Returns error or the number of pages not migrated.
10517e2ab150SChristoph Lameter  */
1052dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1053dbcb0f19SAdrian Bunk 			   int flags)
10547e2ab150SChristoph Lameter {
10557e2ab150SChristoph Lameter 	nodemask_t nmask;
105666850be5SLiam R. Howlett 	struct vm_area_struct *vma;
10577e2ab150SChristoph Lameter 	LIST_HEAD(pagelist);
10587e2ab150SChristoph Lameter 	int err = 0;
1059a0976311SJoonsoo Kim 	struct migration_target_control mtc = {
1060a0976311SJoonsoo Kim 		.nid = dest,
1061a0976311SJoonsoo Kim 		.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1062a0976311SJoonsoo Kim 	};
10637e2ab150SChristoph Lameter 
10647e2ab150SChristoph Lameter 	nodes_clear(nmask);
10657e2ab150SChristoph Lameter 	node_set(source, nmask);
10667e2ab150SChristoph Lameter 
106708270807SMinchan Kim 	/*
106808270807SMinchan Kim 	 * This does not "check" the range but isolates all pages that
106908270807SMinchan Kim 	 * need migration.  Between passing in the full user address
107008270807SMinchan Kim 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
107108270807SMinchan Kim 	 */
107266850be5SLiam R. Howlett 	vma = find_vma(mm, 0);
107308270807SMinchan Kim 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
107466850be5SLiam R. Howlett 	queue_pages_range(mm, vma->vm_start, mm->task_size, &nmask,
10757e2ab150SChristoph Lameter 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
10767e2ab150SChristoph Lameter 
1077cf608ac1SMinchan Kim 	if (!list_empty(&pagelist)) {
1078a0976311SJoonsoo Kim 		err = migrate_pages(&pagelist, alloc_migration_target, NULL,
10795ac95884SYang Shi 				(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
1080cf608ac1SMinchan Kim 		if (err)
1081e2d8cf40SNaoya Horiguchi 			putback_movable_pages(&pagelist);
1082cf608ac1SMinchan Kim 	}
108395a402c3SChristoph Lameter 
10847e2ab150SChristoph Lameter 	return err;
10857e2ab150SChristoph Lameter }
10867e2ab150SChristoph Lameter 
10877e2ab150SChristoph Lameter /*
10887e2ab150SChristoph Lameter  * Move pages between the two nodesets so as to preserve the physical
10897e2ab150SChristoph Lameter  * layout as much as possible.
109039743889SChristoph Lameter  *
109139743889SChristoph Lameter  * Returns the number of page that could not be moved.
109239743889SChristoph Lameter  */
10930ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
10940ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
109539743889SChristoph Lameter {
10967e2ab150SChristoph Lameter 	int busy = 0;
1097f555befdSJan Stancek 	int err = 0;
10987e2ab150SChristoph Lameter 	nodemask_t tmp;
109939743889SChristoph Lameter 
1100361a2a22SMinchan Kim 	lru_cache_disable();
11010aedadf9SChristoph Lameter 
1102d8ed45c5SMichel Lespinasse 	mmap_read_lock(mm);
1103d4984711SChristoph Lameter 
11047e2ab150SChristoph Lameter 	/*
11057e2ab150SChristoph Lameter 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
11067e2ab150SChristoph Lameter 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
11077e2ab150SChristoph Lameter 	 * bit in 'tmp', and return that <source, dest> pair for migration.
11087e2ab150SChristoph Lameter 	 * The pair of nodemasks 'to' and 'from' define the map.
11097e2ab150SChristoph Lameter 	 *
11107e2ab150SChristoph Lameter 	 * If no pair of bits is found that way, fallback to picking some
11117e2ab150SChristoph Lameter 	 * pair of 'source' and 'dest' bits that are not the same.  If the
11127e2ab150SChristoph Lameter 	 * 'source' and 'dest' bits are the same, this represents a node
11137e2ab150SChristoph Lameter 	 * that will be migrating to itself, so no pages need move.
11147e2ab150SChristoph Lameter 	 *
11157e2ab150SChristoph Lameter 	 * If no bits are left in 'tmp', or if all remaining bits left
11167e2ab150SChristoph Lameter 	 * in 'tmp' correspond to the same bit in 'to', return false
11177e2ab150SChristoph Lameter 	 * (nothing left to migrate).
11187e2ab150SChristoph Lameter 	 *
11197e2ab150SChristoph Lameter 	 * This lets us pick a pair of nodes to migrate between, such that
11207e2ab150SChristoph Lameter 	 * if possible the dest node is not already occupied by some other
11217e2ab150SChristoph Lameter 	 * source node, minimizing the risk of overloading the memory on a
11227e2ab150SChristoph Lameter 	 * node that would happen if we migrated incoming memory to a node
11237e2ab150SChristoph Lameter 	 * before migrating outgoing memory source that same node.
11247e2ab150SChristoph Lameter 	 *
11257e2ab150SChristoph Lameter 	 * A single scan of tmp is sufficient.  As we go, we remember the
11267e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
11277e2ab150SChristoph Lameter 	 * that not only moved, but what's better, moved to an empty slot
11287e2ab150SChristoph Lameter 	 * (d is not set in tmp), then we break out then, with that pair.
1129ae0e47f0SJustin P. Mattock 	 * Otherwise when we finish scanning from_tmp, we at least have the
11307e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved.  If we get all the way through
11317e2ab150SChristoph Lameter 	 * the scan of tmp without finding any node that moved, much less
11327e2ab150SChristoph Lameter 	 * moved to an empty node, then there is nothing left worth migrating.
11337e2ab150SChristoph Lameter 	 */
11347e2ab150SChristoph Lameter 
11350ce72d4fSAndrew Morton 	tmp = *from;
11367e2ab150SChristoph Lameter 	while (!nodes_empty(tmp)) {
11377e2ab150SChristoph Lameter 		int s, d;
1138b76ac7e7SJianguo Wu 		int source = NUMA_NO_NODE;
11397e2ab150SChristoph Lameter 		int dest = 0;
11407e2ab150SChristoph Lameter 
11417e2ab150SChristoph Lameter 		for_each_node_mask(s, tmp) {
11424a5b18ccSLarry Woodman 
11434a5b18ccSLarry Woodman 			/*
11444a5b18ccSLarry Woodman 			 * do_migrate_pages() tries to maintain the relative
11454a5b18ccSLarry Woodman 			 * node relationship of the pages established between
11464a5b18ccSLarry Woodman 			 * threads and memory areas.
11474a5b18ccSLarry Woodman                          *
11484a5b18ccSLarry Woodman 			 * However if the number of source nodes is not equal to
11494a5b18ccSLarry Woodman 			 * the number of destination nodes we can not preserve
11504a5b18ccSLarry Woodman 			 * this node relative relationship.  In that case, skip
11514a5b18ccSLarry Woodman 			 * copying memory from a node that is in the destination
11524a5b18ccSLarry Woodman 			 * mask.
11534a5b18ccSLarry Woodman 			 *
11544a5b18ccSLarry Woodman 			 * Example: [2,3,4] -> [3,4,5] moves everything.
11554a5b18ccSLarry Woodman 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
11564a5b18ccSLarry Woodman 			 */
11574a5b18ccSLarry Woodman 
11580ce72d4fSAndrew Morton 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
11590ce72d4fSAndrew Morton 						(node_isset(s, *to)))
11604a5b18ccSLarry Woodman 				continue;
11614a5b18ccSLarry Woodman 
11620ce72d4fSAndrew Morton 			d = node_remap(s, *from, *to);
11637e2ab150SChristoph Lameter 			if (s == d)
11647e2ab150SChristoph Lameter 				continue;
11657e2ab150SChristoph Lameter 
11667e2ab150SChristoph Lameter 			source = s;	/* Node moved. Memorize */
11677e2ab150SChristoph Lameter 			dest = d;
11687e2ab150SChristoph Lameter 
11697e2ab150SChristoph Lameter 			/* dest not in remaining from nodes? */
11707e2ab150SChristoph Lameter 			if (!node_isset(dest, tmp))
11717e2ab150SChristoph Lameter 				break;
11727e2ab150SChristoph Lameter 		}
1173b76ac7e7SJianguo Wu 		if (source == NUMA_NO_NODE)
11747e2ab150SChristoph Lameter 			break;
11757e2ab150SChristoph Lameter 
11767e2ab150SChristoph Lameter 		node_clear(source, tmp);
11777e2ab150SChristoph Lameter 		err = migrate_to_node(mm, source, dest, flags);
11787e2ab150SChristoph Lameter 		if (err > 0)
11797e2ab150SChristoph Lameter 			busy += err;
11807e2ab150SChristoph Lameter 		if (err < 0)
11817e2ab150SChristoph Lameter 			break;
118239743889SChristoph Lameter 	}
1183d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
1184d479960eSMinchan Kim 
1185361a2a22SMinchan Kim 	lru_cache_enable();
11867e2ab150SChristoph Lameter 	if (err < 0)
11877e2ab150SChristoph Lameter 		return err;
11887e2ab150SChristoph Lameter 	return busy;
1189b20a3503SChristoph Lameter 
119039743889SChristoph Lameter }
119139743889SChristoph Lameter 
11923ad33b24SLee Schermerhorn /*
11933ad33b24SLee Schermerhorn  * Allocate a new page for page migration based on vma policy.
1194d05f0cdcSHugh Dickins  * Start by assuming the page is mapped by the same vma as contains @start.
11953ad33b24SLee Schermerhorn  * Search forward from there, if not.  N.B., this assumes that the
11963ad33b24SLee Schermerhorn  * list of pages handed to migrate_pages()--which is how we get here--
11973ad33b24SLee Schermerhorn  * is in virtual address order.
11983ad33b24SLee Schermerhorn  */
11994e096ae1SMatthew Wilcox (Oracle) static struct folio *new_folio(struct folio *src, unsigned long start)
120095a402c3SChristoph Lameter {
1201d05f0cdcSHugh Dickins 	struct vm_area_struct *vma;
12023f649ab7SKees Cook 	unsigned long address;
120366850be5SLiam R. Howlett 	VMA_ITERATOR(vmi, current->mm, start);
1204ec4858e0SMatthew Wilcox (Oracle) 	gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL;
120595a402c3SChristoph Lameter 
120666850be5SLiam R. Howlett 	for_each_vma(vmi, vma) {
12074e096ae1SMatthew Wilcox (Oracle) 		address = page_address_in_vma(&src->page, vma);
12083ad33b24SLee Schermerhorn 		if (address != -EFAULT)
12093ad33b24SLee Schermerhorn 			break;
12103ad33b24SLee Schermerhorn 	}
12113ad33b24SLee Schermerhorn 
1212d0ce0e47SSidhartha Kumar 	if (folio_test_hugetlb(src)) {
12134e096ae1SMatthew Wilcox (Oracle) 		return alloc_hugetlb_folio_vma(folio_hstate(src),
1214389c8178SMichal Hocko 				vma, address);
1215d0ce0e47SSidhartha Kumar 	}
1216c8633798SNaoya Horiguchi 
1217ec4858e0SMatthew Wilcox (Oracle) 	if (folio_test_large(src))
1218ec4858e0SMatthew Wilcox (Oracle) 		gfp = GFP_TRANSHUGE;
1219ec4858e0SMatthew Wilcox (Oracle) 
122011c731e8SWanpeng Li 	/*
1221ec4858e0SMatthew Wilcox (Oracle) 	 * if !vma, vma_alloc_folio() will use task or system default policy
122211c731e8SWanpeng Li 	 */
12234e096ae1SMatthew Wilcox (Oracle) 	return vma_alloc_folio(gfp, folio_order(src), vma, address,
1224ec4858e0SMatthew Wilcox (Oracle) 			folio_test_large(src));
122595a402c3SChristoph Lameter }
1226b20a3503SChristoph Lameter #else
1227b20a3503SChristoph Lameter 
12284a64981dSVishal Moola (Oracle) static int migrate_folio_add(struct folio *folio, struct list_head *foliolist,
1229b20a3503SChristoph Lameter 				unsigned long flags)
1230b20a3503SChristoph Lameter {
1231a53190a4SYang Shi 	return -EIO;
1232b20a3503SChristoph Lameter }
1233b20a3503SChristoph Lameter 
12340ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
12350ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
1236b20a3503SChristoph Lameter {
1237b20a3503SChristoph Lameter 	return -ENOSYS;
1238b20a3503SChristoph Lameter }
123995a402c3SChristoph Lameter 
12404e096ae1SMatthew Wilcox (Oracle) static struct folio *new_folio(struct folio *src, unsigned long start)
124195a402c3SChristoph Lameter {
124295a402c3SChristoph Lameter 	return NULL;
124395a402c3SChristoph Lameter }
1244b20a3503SChristoph Lameter #endif
1245b20a3503SChristoph Lameter 
1246dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len,
1247028fec41SDavid Rientjes 		     unsigned short mode, unsigned short mode_flags,
1248028fec41SDavid Rientjes 		     nodemask_t *nmask, unsigned long flags)
12496ce3c4c0SChristoph Lameter {
12506ce3c4c0SChristoph Lameter 	struct mm_struct *mm = current->mm;
1251f4e9e0e6SLiam R. Howlett 	struct vm_area_struct *vma, *prev;
1252f4e9e0e6SLiam R. Howlett 	struct vma_iterator vmi;
12536ce3c4c0SChristoph Lameter 	struct mempolicy *new;
12546ce3c4c0SChristoph Lameter 	unsigned long end;
12556ce3c4c0SChristoph Lameter 	int err;
1256d8835445SYang Shi 	int ret;
12576ce3c4c0SChristoph Lameter 	LIST_HEAD(pagelist);
12586ce3c4c0SChristoph Lameter 
1259b24f53a0SLee Schermerhorn 	if (flags & ~(unsigned long)MPOL_MF_VALID)
12606ce3c4c0SChristoph Lameter 		return -EINVAL;
126174c00241SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
12626ce3c4c0SChristoph Lameter 		return -EPERM;
12636ce3c4c0SChristoph Lameter 
12646ce3c4c0SChristoph Lameter 	if (start & ~PAGE_MASK)
12656ce3c4c0SChristoph Lameter 		return -EINVAL;
12666ce3c4c0SChristoph Lameter 
12676ce3c4c0SChristoph Lameter 	if (mode == MPOL_DEFAULT)
12686ce3c4c0SChristoph Lameter 		flags &= ~MPOL_MF_STRICT;
12696ce3c4c0SChristoph Lameter 
1270aaa31e05Sze zuo 	len = PAGE_ALIGN(len);
12716ce3c4c0SChristoph Lameter 	end = start + len;
12726ce3c4c0SChristoph Lameter 
12736ce3c4c0SChristoph Lameter 	if (end < start)
12746ce3c4c0SChristoph Lameter 		return -EINVAL;
12756ce3c4c0SChristoph Lameter 	if (end == start)
12766ce3c4c0SChristoph Lameter 		return 0;
12776ce3c4c0SChristoph Lameter 
1278028fec41SDavid Rientjes 	new = mpol_new(mode, mode_flags, nmask);
12796ce3c4c0SChristoph Lameter 	if (IS_ERR(new))
12806ce3c4c0SChristoph Lameter 		return PTR_ERR(new);
12816ce3c4c0SChristoph Lameter 
1282b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY)
1283b24f53a0SLee Schermerhorn 		new->flags |= MPOL_F_MOF;
1284b24f53a0SLee Schermerhorn 
12856ce3c4c0SChristoph Lameter 	/*
12866ce3c4c0SChristoph Lameter 	 * If we are using the default policy then operation
12876ce3c4c0SChristoph Lameter 	 * on discontinuous address spaces is okay after all
12886ce3c4c0SChristoph Lameter 	 */
12896ce3c4c0SChristoph Lameter 	if (!new)
12906ce3c4c0SChristoph Lameter 		flags |= MPOL_MF_DISCONTIG_OK;
12916ce3c4c0SChristoph Lameter 
1292028fec41SDavid Rientjes 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1293028fec41SDavid Rientjes 		 start, start + len, mode, mode_flags,
129400ef2d2fSDavid Rientjes 		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
12956ce3c4c0SChristoph Lameter 
12960aedadf9SChristoph Lameter 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
12970aedadf9SChristoph Lameter 
1298361a2a22SMinchan Kim 		lru_cache_disable();
12990aedadf9SChristoph Lameter 	}
13004bfc4495SKAMEZAWA Hiroyuki 	{
13014bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
13024bfc4495SKAMEZAWA Hiroyuki 		if (scratch) {
1303d8ed45c5SMichel Lespinasse 			mmap_write_lock(mm);
13044bfc4495SKAMEZAWA Hiroyuki 			err = mpol_set_nodemask(new, nmask, scratch);
13054bfc4495SKAMEZAWA Hiroyuki 			if (err)
1306d8ed45c5SMichel Lespinasse 				mmap_write_unlock(mm);
13074bfc4495SKAMEZAWA Hiroyuki 		} else
13084bfc4495SKAMEZAWA Hiroyuki 			err = -ENOMEM;
13094bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
13104bfc4495SKAMEZAWA Hiroyuki 	}
1311b05ca738SKOSAKI Motohiro 	if (err)
1312b05ca738SKOSAKI Motohiro 		goto mpol_out;
1313b05ca738SKOSAKI Motohiro 
1314d8835445SYang Shi 	ret = queue_pages_range(mm, start, end, nmask,
13156ce3c4c0SChristoph Lameter 			  flags | MPOL_MF_INVERT, &pagelist);
1316d8835445SYang Shi 
1317d8835445SYang Shi 	if (ret < 0) {
1318a85dfc30SYang Shi 		err = ret;
1319d8835445SYang Shi 		goto up_out;
1320d8835445SYang Shi 	}
1321d8835445SYang Shi 
1322f4e9e0e6SLiam R. Howlett 	vma_iter_init(&vmi, mm, start);
1323f4e9e0e6SLiam R. Howlett 	prev = vma_prev(&vmi);
1324f4e9e0e6SLiam R. Howlett 	for_each_vma_range(vmi, vma, end) {
1325f4e9e0e6SLiam R. Howlett 		err = mbind_range(&vmi, vma, &prev, start, end, new);
1326f4e9e0e6SLiam R. Howlett 		if (err)
1327f4e9e0e6SLiam R. Howlett 			break;
1328f4e9e0e6SLiam R. Howlett 	}
13297e2ab150SChristoph Lameter 
1330b24f53a0SLee Schermerhorn 	if (!err) {
1331b24f53a0SLee Schermerhorn 		int nr_failed = 0;
1332b24f53a0SLee Schermerhorn 
1333cf608ac1SMinchan Kim 		if (!list_empty(&pagelist)) {
1334b24f53a0SLee Schermerhorn 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
13354e096ae1SMatthew Wilcox (Oracle) 			nr_failed = migrate_pages(&pagelist, new_folio, NULL,
13365ac95884SYang Shi 				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL);
1337cf608ac1SMinchan Kim 			if (nr_failed)
133874060e4dSNaoya Horiguchi 				putback_movable_pages(&pagelist);
1339cf608ac1SMinchan Kim 		}
13406ce3c4c0SChristoph Lameter 
1341d8835445SYang Shi 		if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
13426ce3c4c0SChristoph Lameter 			err = -EIO;
1343a85dfc30SYang Shi 	} else {
1344d8835445SYang Shi up_out:
1345a85dfc30SYang Shi 		if (!list_empty(&pagelist))
1346a85dfc30SYang Shi 			putback_movable_pages(&pagelist);
1347a85dfc30SYang Shi 	}
1348a85dfc30SYang Shi 
1349d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
1350b05ca738SKOSAKI Motohiro mpol_out:
1351f0be3d32SLee Schermerhorn 	mpol_put(new);
1352d479960eSMinchan Kim 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1353361a2a22SMinchan Kim 		lru_cache_enable();
13546ce3c4c0SChristoph Lameter 	return err;
13556ce3c4c0SChristoph Lameter }
13566ce3c4c0SChristoph Lameter 
135739743889SChristoph Lameter /*
13588bccd85fSChristoph Lameter  * User space interface with variable sized bitmaps for nodelists.
13598bccd85fSChristoph Lameter  */
1360e130242dSArnd Bergmann static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask,
1361e130242dSArnd Bergmann 		      unsigned long maxnode)
1362e130242dSArnd Bergmann {
1363e130242dSArnd Bergmann 	unsigned long nlongs = BITS_TO_LONGS(maxnode);
1364e130242dSArnd Bergmann 	int ret;
1365e130242dSArnd Bergmann 
1366e130242dSArnd Bergmann 	if (in_compat_syscall())
1367e130242dSArnd Bergmann 		ret = compat_get_bitmap(mask,
1368e130242dSArnd Bergmann 					(const compat_ulong_t __user *)nmask,
1369e130242dSArnd Bergmann 					maxnode);
1370e130242dSArnd Bergmann 	else
1371e130242dSArnd Bergmann 		ret = copy_from_user(mask, nmask,
1372e130242dSArnd Bergmann 				     nlongs * sizeof(unsigned long));
1373e130242dSArnd Bergmann 
1374e130242dSArnd Bergmann 	if (ret)
1375e130242dSArnd Bergmann 		return -EFAULT;
1376e130242dSArnd Bergmann 
1377e130242dSArnd Bergmann 	if (maxnode % BITS_PER_LONG)
1378e130242dSArnd Bergmann 		mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1;
1379e130242dSArnd Bergmann 
1380e130242dSArnd Bergmann 	return 0;
1381e130242dSArnd Bergmann }
13828bccd85fSChristoph Lameter 
13838bccd85fSChristoph Lameter /* Copy a node mask from user space. */
138439743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
13858bccd85fSChristoph Lameter 		     unsigned long maxnode)
13868bccd85fSChristoph Lameter {
13878bccd85fSChristoph Lameter 	--maxnode;
13888bccd85fSChristoph Lameter 	nodes_clear(*nodes);
13898bccd85fSChristoph Lameter 	if (maxnode == 0 || !nmask)
13908bccd85fSChristoph Lameter 		return 0;
1391a9c930baSAndi Kleen 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1392636f13c1SChris Wright 		return -EINVAL;
13938bccd85fSChristoph Lameter 
139456521e7aSYisheng Xie 	/*
139556521e7aSYisheng Xie 	 * When the user specified more nodes than supported just check
1396e130242dSArnd Bergmann 	 * if the non supported part is all zero, one word at a time,
1397e130242dSArnd Bergmann 	 * starting at the end.
139856521e7aSYisheng Xie 	 */
1399e130242dSArnd Bergmann 	while (maxnode > MAX_NUMNODES) {
1400e130242dSArnd Bergmann 		unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG);
1401e130242dSArnd Bergmann 		unsigned long t;
14028bccd85fSChristoph Lameter 
1403000eca5dSTianyu Li 		if (get_bitmap(&t, &nmask[(maxnode - 1) / BITS_PER_LONG], bits))
140456521e7aSYisheng Xie 			return -EFAULT;
1405e130242dSArnd Bergmann 
1406e130242dSArnd Bergmann 		if (maxnode - bits >= MAX_NUMNODES) {
1407e130242dSArnd Bergmann 			maxnode -= bits;
1408e130242dSArnd Bergmann 		} else {
1409e130242dSArnd Bergmann 			maxnode = MAX_NUMNODES;
1410e130242dSArnd Bergmann 			t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1411e130242dSArnd Bergmann 		}
1412e130242dSArnd Bergmann 		if (t)
141356521e7aSYisheng Xie 			return -EINVAL;
141456521e7aSYisheng Xie 	}
141556521e7aSYisheng Xie 
1416e130242dSArnd Bergmann 	return get_bitmap(nodes_addr(*nodes), nmask, maxnode);
14178bccd85fSChristoph Lameter }
14188bccd85fSChristoph Lameter 
14198bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */
14208bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
14218bccd85fSChristoph Lameter 			      nodemask_t *nodes)
14228bccd85fSChristoph Lameter {
14238bccd85fSChristoph Lameter 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1424050c17f2SRalph Campbell 	unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
1425e130242dSArnd Bergmann 	bool compat = in_compat_syscall();
1426e130242dSArnd Bergmann 
1427e130242dSArnd Bergmann 	if (compat)
1428e130242dSArnd Bergmann 		nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t);
14298bccd85fSChristoph Lameter 
14308bccd85fSChristoph Lameter 	if (copy > nbytes) {
14318bccd85fSChristoph Lameter 		if (copy > PAGE_SIZE)
14328bccd85fSChristoph Lameter 			return -EINVAL;
14338bccd85fSChristoph Lameter 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
14348bccd85fSChristoph Lameter 			return -EFAULT;
14358bccd85fSChristoph Lameter 		copy = nbytes;
1436e130242dSArnd Bergmann 		maxnode = nr_node_ids;
14378bccd85fSChristoph Lameter 	}
1438e130242dSArnd Bergmann 
1439e130242dSArnd Bergmann 	if (compat)
1440e130242dSArnd Bergmann 		return compat_put_bitmap((compat_ulong_t __user *)mask,
1441e130242dSArnd Bergmann 					 nodes_addr(*nodes), maxnode);
1442e130242dSArnd Bergmann 
14438bccd85fSChristoph Lameter 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
14448bccd85fSChristoph Lameter }
14458bccd85fSChristoph Lameter 
144695837924SFeng Tang /* Basic parameter sanity check used by both mbind() and set_mempolicy() */
144795837924SFeng Tang static inline int sanitize_mpol_flags(int *mode, unsigned short *flags)
144895837924SFeng Tang {
144995837924SFeng Tang 	*flags = *mode & MPOL_MODE_FLAGS;
145095837924SFeng Tang 	*mode &= ~MPOL_MODE_FLAGS;
1451b27abaccSDave Hansen 
1452a38a59fdSBen Widawsky 	if ((unsigned int)(*mode) >=  MPOL_MAX)
145395837924SFeng Tang 		return -EINVAL;
145495837924SFeng Tang 	if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
145595837924SFeng Tang 		return -EINVAL;
14566d2aec9eSEric Dumazet 	if (*flags & MPOL_F_NUMA_BALANCING) {
14576d2aec9eSEric Dumazet 		if (*mode != MPOL_BIND)
14586d2aec9eSEric Dumazet 			return -EINVAL;
14596d2aec9eSEric Dumazet 		*flags |= (MPOL_F_MOF | MPOL_F_MORON);
14606d2aec9eSEric Dumazet 	}
146195837924SFeng Tang 	return 0;
146295837924SFeng Tang }
146395837924SFeng Tang 
1464e7dc9ad6SDominik Brodowski static long kernel_mbind(unsigned long start, unsigned long len,
1465e7dc9ad6SDominik Brodowski 			 unsigned long mode, const unsigned long __user *nmask,
1466e7dc9ad6SDominik Brodowski 			 unsigned long maxnode, unsigned int flags)
14678bccd85fSChristoph Lameter {
1468028fec41SDavid Rientjes 	unsigned short mode_flags;
146995837924SFeng Tang 	nodemask_t nodes;
147095837924SFeng Tang 	int lmode = mode;
147195837924SFeng Tang 	int err;
14728bccd85fSChristoph Lameter 
1473057d3389SAndrey Konovalov 	start = untagged_addr(start);
147495837924SFeng Tang 	err = sanitize_mpol_flags(&lmode, &mode_flags);
147595837924SFeng Tang 	if (err)
147695837924SFeng Tang 		return err;
147795837924SFeng Tang 
14788bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
14798bccd85fSChristoph Lameter 	if (err)
14808bccd85fSChristoph Lameter 		return err;
148195837924SFeng Tang 
148295837924SFeng Tang 	return do_mbind(start, len, lmode, mode_flags, &nodes, flags);
14838bccd85fSChristoph Lameter }
14848bccd85fSChristoph Lameter 
1485c6018b4bSAneesh Kumar K.V SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, len,
1486c6018b4bSAneesh Kumar K.V 		unsigned long, home_node, unsigned long, flags)
1487c6018b4bSAneesh Kumar K.V {
1488c6018b4bSAneesh Kumar K.V 	struct mm_struct *mm = current->mm;
1489f4e9e0e6SLiam R. Howlett 	struct vm_area_struct *vma, *prev;
1490e976936cSMichal Hocko 	struct mempolicy *new, *old;
1491c6018b4bSAneesh Kumar K.V 	unsigned long end;
1492c6018b4bSAneesh Kumar K.V 	int err = -ENOENT;
149366850be5SLiam R. Howlett 	VMA_ITERATOR(vmi, mm, start);
1494c6018b4bSAneesh Kumar K.V 
1495c6018b4bSAneesh Kumar K.V 	start = untagged_addr(start);
1496c6018b4bSAneesh Kumar K.V 	if (start & ~PAGE_MASK)
1497c6018b4bSAneesh Kumar K.V 		return -EINVAL;
1498c6018b4bSAneesh Kumar K.V 	/*
1499c6018b4bSAneesh Kumar K.V 	 * flags is used for future extension if any.
1500c6018b4bSAneesh Kumar K.V 	 */
1501c6018b4bSAneesh Kumar K.V 	if (flags != 0)
1502c6018b4bSAneesh Kumar K.V 		return -EINVAL;
1503c6018b4bSAneesh Kumar K.V 
1504c6018b4bSAneesh Kumar K.V 	/*
1505c6018b4bSAneesh Kumar K.V 	 * Check home_node is online to avoid accessing uninitialized
1506c6018b4bSAneesh Kumar K.V 	 * NODE_DATA.
1507c6018b4bSAneesh Kumar K.V 	 */
1508c6018b4bSAneesh Kumar K.V 	if (home_node >= MAX_NUMNODES || !node_online(home_node))
1509c6018b4bSAneesh Kumar K.V 		return -EINVAL;
1510c6018b4bSAneesh Kumar K.V 
1511aaa31e05Sze zuo 	len = PAGE_ALIGN(len);
1512c6018b4bSAneesh Kumar K.V 	end = start + len;
1513c6018b4bSAneesh Kumar K.V 
1514c6018b4bSAneesh Kumar K.V 	if (end < start)
1515c6018b4bSAneesh Kumar K.V 		return -EINVAL;
1516c6018b4bSAneesh Kumar K.V 	if (end == start)
1517c6018b4bSAneesh Kumar K.V 		return 0;
1518c6018b4bSAneesh Kumar K.V 	mmap_write_lock(mm);
1519f4e9e0e6SLiam R. Howlett 	prev = vma_prev(&vmi);
152066850be5SLiam R. Howlett 	for_each_vma_range(vmi, vma, end) {
1521c6018b4bSAneesh Kumar K.V 		/*
1522c6018b4bSAneesh Kumar K.V 		 * If any vma in the range got policy other than MPOL_BIND
1523c6018b4bSAneesh Kumar K.V 		 * or MPOL_PREFERRED_MANY we return error. We don't reset
1524c6018b4bSAneesh Kumar K.V 		 * the home node for vmas we already updated before.
1525c6018b4bSAneesh Kumar K.V 		 */
1526e976936cSMichal Hocko 		old = vma_policy(vma);
1527e976936cSMichal Hocko 		if (!old)
1528e976936cSMichal Hocko 			continue;
1529e976936cSMichal Hocko 		if (old->mode != MPOL_BIND && old->mode != MPOL_PREFERRED_MANY) {
1530c6018b4bSAneesh Kumar K.V 			err = -EOPNOTSUPP;
1531c6018b4bSAneesh Kumar K.V 			break;
1532c6018b4bSAneesh Kumar K.V 		}
1533e976936cSMichal Hocko 		new = mpol_dup(old);
1534e976936cSMichal Hocko 		if (IS_ERR(new)) {
1535e976936cSMichal Hocko 			err = PTR_ERR(new);
1536e976936cSMichal Hocko 			break;
1537e976936cSMichal Hocko 		}
1538c6018b4bSAneesh Kumar K.V 
1539c6018b4bSAneesh Kumar K.V 		new->home_node = home_node;
1540f4e9e0e6SLiam R. Howlett 		err = mbind_range(&vmi, vma, &prev, start, end, new);
1541c6018b4bSAneesh Kumar K.V 		mpol_put(new);
1542c6018b4bSAneesh Kumar K.V 		if (err)
1543c6018b4bSAneesh Kumar K.V 			break;
1544c6018b4bSAneesh Kumar K.V 	}
1545c6018b4bSAneesh Kumar K.V 	mmap_write_unlock(mm);
1546c6018b4bSAneesh Kumar K.V 	return err;
1547c6018b4bSAneesh Kumar K.V }
1548c6018b4bSAneesh Kumar K.V 
1549e7dc9ad6SDominik Brodowski SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1550e7dc9ad6SDominik Brodowski 		unsigned long, mode, const unsigned long __user *, nmask,
1551e7dc9ad6SDominik Brodowski 		unsigned long, maxnode, unsigned int, flags)
1552e7dc9ad6SDominik Brodowski {
1553e7dc9ad6SDominik Brodowski 	return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1554e7dc9ad6SDominik Brodowski }
1555e7dc9ad6SDominik Brodowski 
15568bccd85fSChristoph Lameter /* Set the process memory policy */
1557af03c4acSDominik Brodowski static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1558af03c4acSDominik Brodowski 				 unsigned long maxnode)
15598bccd85fSChristoph Lameter {
156095837924SFeng Tang 	unsigned short mode_flags;
15618bccd85fSChristoph Lameter 	nodemask_t nodes;
156295837924SFeng Tang 	int lmode = mode;
156395837924SFeng Tang 	int err;
15648bccd85fSChristoph Lameter 
156595837924SFeng Tang 	err = sanitize_mpol_flags(&lmode, &mode_flags);
156695837924SFeng Tang 	if (err)
156795837924SFeng Tang 		return err;
156895837924SFeng Tang 
15698bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
15708bccd85fSChristoph Lameter 	if (err)
15718bccd85fSChristoph Lameter 		return err;
157295837924SFeng Tang 
157395837924SFeng Tang 	return do_set_mempolicy(lmode, mode_flags, &nodes);
15748bccd85fSChristoph Lameter }
15758bccd85fSChristoph Lameter 
1576af03c4acSDominik Brodowski SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1577af03c4acSDominik Brodowski 		unsigned long, maxnode)
1578af03c4acSDominik Brodowski {
1579af03c4acSDominik Brodowski 	return kernel_set_mempolicy(mode, nmask, maxnode);
1580af03c4acSDominik Brodowski }
1581af03c4acSDominik Brodowski 
1582b6e9b0baSDominik Brodowski static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1583b6e9b0baSDominik Brodowski 				const unsigned long __user *old_nodes,
1584b6e9b0baSDominik Brodowski 				const unsigned long __user *new_nodes)
158539743889SChristoph Lameter {
1586596d7cfaSKOSAKI Motohiro 	struct mm_struct *mm = NULL;
158739743889SChristoph Lameter 	struct task_struct *task;
158839743889SChristoph Lameter 	nodemask_t task_nodes;
158939743889SChristoph Lameter 	int err;
1590596d7cfaSKOSAKI Motohiro 	nodemask_t *old;
1591596d7cfaSKOSAKI Motohiro 	nodemask_t *new;
1592596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH(scratch);
159339743889SChristoph Lameter 
1594596d7cfaSKOSAKI Motohiro 	if (!scratch)
1595596d7cfaSKOSAKI Motohiro 		return -ENOMEM;
159639743889SChristoph Lameter 
1597596d7cfaSKOSAKI Motohiro 	old = &scratch->mask1;
1598596d7cfaSKOSAKI Motohiro 	new = &scratch->mask2;
1599596d7cfaSKOSAKI Motohiro 
1600596d7cfaSKOSAKI Motohiro 	err = get_nodes(old, old_nodes, maxnode);
160139743889SChristoph Lameter 	if (err)
1602596d7cfaSKOSAKI Motohiro 		goto out;
1603596d7cfaSKOSAKI Motohiro 
1604596d7cfaSKOSAKI Motohiro 	err = get_nodes(new, new_nodes, maxnode);
1605596d7cfaSKOSAKI Motohiro 	if (err)
1606596d7cfaSKOSAKI Motohiro 		goto out;
160739743889SChristoph Lameter 
160839743889SChristoph Lameter 	/* Find the mm_struct */
160955cfaa3cSZeng Zhaoming 	rcu_read_lock();
1610228ebcbeSPavel Emelyanov 	task = pid ? find_task_by_vpid(pid) : current;
161139743889SChristoph Lameter 	if (!task) {
161255cfaa3cSZeng Zhaoming 		rcu_read_unlock();
1613596d7cfaSKOSAKI Motohiro 		err = -ESRCH;
1614596d7cfaSKOSAKI Motohiro 		goto out;
161539743889SChristoph Lameter 	}
16163268c63eSChristoph Lameter 	get_task_struct(task);
161739743889SChristoph Lameter 
1618596d7cfaSKOSAKI Motohiro 	err = -EINVAL;
161939743889SChristoph Lameter 
162039743889SChristoph Lameter 	/*
162131367466SOtto Ebeling 	 * Check if this process has the right to modify the specified process.
162231367466SOtto Ebeling 	 * Use the regular "ptrace_may_access()" checks.
162339743889SChristoph Lameter 	 */
162431367466SOtto Ebeling 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1625c69e8d9cSDavid Howells 		rcu_read_unlock();
162639743889SChristoph Lameter 		err = -EPERM;
16273268c63eSChristoph Lameter 		goto out_put;
162839743889SChristoph Lameter 	}
1629c69e8d9cSDavid Howells 	rcu_read_unlock();
163039743889SChristoph Lameter 
163139743889SChristoph Lameter 	task_nodes = cpuset_mems_allowed(task);
163239743889SChristoph Lameter 	/* Is the user allowed to access the target nodes? */
1633596d7cfaSKOSAKI Motohiro 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
163439743889SChristoph Lameter 		err = -EPERM;
16353268c63eSChristoph Lameter 		goto out_put;
163639743889SChristoph Lameter 	}
163739743889SChristoph Lameter 
16380486a38bSYisheng Xie 	task_nodes = cpuset_mems_allowed(current);
16390486a38bSYisheng Xie 	nodes_and(*new, *new, task_nodes);
16400486a38bSYisheng Xie 	if (nodes_empty(*new))
16413268c63eSChristoph Lameter 		goto out_put;
16420486a38bSYisheng Xie 
164386c3a764SDavid Quigley 	err = security_task_movememory(task);
164486c3a764SDavid Quigley 	if (err)
16453268c63eSChristoph Lameter 		goto out_put;
164686c3a764SDavid Quigley 
16473268c63eSChristoph Lameter 	mm = get_task_mm(task);
16483268c63eSChristoph Lameter 	put_task_struct(task);
1649f2a9ef88SSasha Levin 
1650f2a9ef88SSasha Levin 	if (!mm) {
1651f2a9ef88SSasha Levin 		err = -EINVAL;
1652f2a9ef88SSasha Levin 		goto out;
1653f2a9ef88SSasha Levin 	}
1654f2a9ef88SSasha Levin 
1655596d7cfaSKOSAKI Motohiro 	err = do_migrate_pages(mm, old, new,
165674c00241SChristoph Lameter 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
16573268c63eSChristoph Lameter 
165839743889SChristoph Lameter 	mmput(mm);
16593268c63eSChristoph Lameter out:
1660596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH_FREE(scratch);
1661596d7cfaSKOSAKI Motohiro 
166239743889SChristoph Lameter 	return err;
16633268c63eSChristoph Lameter 
16643268c63eSChristoph Lameter out_put:
16653268c63eSChristoph Lameter 	put_task_struct(task);
16663268c63eSChristoph Lameter 	goto out;
16673268c63eSChristoph Lameter 
166839743889SChristoph Lameter }
166939743889SChristoph Lameter 
1670b6e9b0baSDominik Brodowski SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1671b6e9b0baSDominik Brodowski 		const unsigned long __user *, old_nodes,
1672b6e9b0baSDominik Brodowski 		const unsigned long __user *, new_nodes)
1673b6e9b0baSDominik Brodowski {
1674b6e9b0baSDominik Brodowski 	return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1675b6e9b0baSDominik Brodowski }
1676b6e9b0baSDominik Brodowski 
167739743889SChristoph Lameter 
16788bccd85fSChristoph Lameter /* Retrieve NUMA policy */
1679af03c4acSDominik Brodowski static int kernel_get_mempolicy(int __user *policy,
1680af03c4acSDominik Brodowski 				unsigned long __user *nmask,
1681af03c4acSDominik Brodowski 				unsigned long maxnode,
1682af03c4acSDominik Brodowski 				unsigned long addr,
1683af03c4acSDominik Brodowski 				unsigned long flags)
16848bccd85fSChristoph Lameter {
1685dbcb0f19SAdrian Bunk 	int err;
16863f649ab7SKees Cook 	int pval;
16878bccd85fSChristoph Lameter 	nodemask_t nodes;
16888bccd85fSChristoph Lameter 
1689050c17f2SRalph Campbell 	if (nmask != NULL && maxnode < nr_node_ids)
16908bccd85fSChristoph Lameter 		return -EINVAL;
16918bccd85fSChristoph Lameter 
16924605f057SWenchao Hao 	addr = untagged_addr(addr);
16934605f057SWenchao Hao 
16948bccd85fSChristoph Lameter 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
16958bccd85fSChristoph Lameter 
16968bccd85fSChristoph Lameter 	if (err)
16978bccd85fSChristoph Lameter 		return err;
16988bccd85fSChristoph Lameter 
16998bccd85fSChristoph Lameter 	if (policy && put_user(pval, policy))
17008bccd85fSChristoph Lameter 		return -EFAULT;
17018bccd85fSChristoph Lameter 
17028bccd85fSChristoph Lameter 	if (nmask)
17038bccd85fSChristoph Lameter 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
17048bccd85fSChristoph Lameter 
17058bccd85fSChristoph Lameter 	return err;
17068bccd85fSChristoph Lameter }
17078bccd85fSChristoph Lameter 
1708af03c4acSDominik Brodowski SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1709af03c4acSDominik Brodowski 		unsigned long __user *, nmask, unsigned long, maxnode,
1710af03c4acSDominik Brodowski 		unsigned long, addr, unsigned long, flags)
1711af03c4acSDominik Brodowski {
1712af03c4acSDominik Brodowski 	return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1713af03c4acSDominik Brodowski }
1714af03c4acSDominik Brodowski 
171520ca87f2SLi Xinhai bool vma_migratable(struct vm_area_struct *vma)
171620ca87f2SLi Xinhai {
171720ca87f2SLi Xinhai 	if (vma->vm_flags & (VM_IO | VM_PFNMAP))
171820ca87f2SLi Xinhai 		return false;
171920ca87f2SLi Xinhai 
172020ca87f2SLi Xinhai 	/*
172120ca87f2SLi Xinhai 	 * DAX device mappings require predictable access latency, so avoid
172220ca87f2SLi Xinhai 	 * incurring periodic faults.
172320ca87f2SLi Xinhai 	 */
172420ca87f2SLi Xinhai 	if (vma_is_dax(vma))
172520ca87f2SLi Xinhai 		return false;
172620ca87f2SLi Xinhai 
172720ca87f2SLi Xinhai 	if (is_vm_hugetlb_page(vma) &&
172820ca87f2SLi Xinhai 		!hugepage_migration_supported(hstate_vma(vma)))
172920ca87f2SLi Xinhai 		return false;
173020ca87f2SLi Xinhai 
173120ca87f2SLi Xinhai 	/*
173220ca87f2SLi Xinhai 	 * Migration allocates pages in the highest zone. If we cannot
173320ca87f2SLi Xinhai 	 * do so then migration (at least from node to node) is not
173420ca87f2SLi Xinhai 	 * possible.
173520ca87f2SLi Xinhai 	 */
173620ca87f2SLi Xinhai 	if (vma->vm_file &&
173720ca87f2SLi Xinhai 		gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
173820ca87f2SLi Xinhai 			< policy_zone)
173920ca87f2SLi Xinhai 		return false;
174020ca87f2SLi Xinhai 	return true;
174120ca87f2SLi Xinhai }
174220ca87f2SLi Xinhai 
174374d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
174474d2c3a0SOleg Nesterov 						unsigned long addr)
17451da177e4SLinus Torvalds {
17468d90274bSOleg Nesterov 	struct mempolicy *pol = NULL;
17471da177e4SLinus Torvalds 
17481da177e4SLinus Torvalds 	if (vma) {
1749480eccf9SLee Schermerhorn 		if (vma->vm_ops && vma->vm_ops->get_policy) {
17508d90274bSOleg Nesterov 			pol = vma->vm_ops->get_policy(vma, addr);
175100442ad0SMel Gorman 		} else if (vma->vm_policy) {
17521da177e4SLinus Torvalds 			pol = vma->vm_policy;
175300442ad0SMel Gorman 
175400442ad0SMel Gorman 			/*
175500442ad0SMel Gorman 			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
175600442ad0SMel Gorman 			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
175700442ad0SMel Gorman 			 * count on these policies which will be dropped by
175800442ad0SMel Gorman 			 * mpol_cond_put() later
175900442ad0SMel Gorman 			 */
176000442ad0SMel Gorman 			if (mpol_needs_cond_ref(pol))
176100442ad0SMel Gorman 				mpol_get(pol);
176200442ad0SMel Gorman 		}
17631da177e4SLinus Torvalds 	}
1764f15ca78eSOleg Nesterov 
176574d2c3a0SOleg Nesterov 	return pol;
176674d2c3a0SOleg Nesterov }
176774d2c3a0SOleg Nesterov 
176874d2c3a0SOleg Nesterov /*
1769dd6eecb9SOleg Nesterov  * get_vma_policy(@vma, @addr)
177074d2c3a0SOleg Nesterov  * @vma: virtual memory area whose policy is sought
177174d2c3a0SOleg Nesterov  * @addr: address in @vma for shared policy lookup
177274d2c3a0SOleg Nesterov  *
177374d2c3a0SOleg Nesterov  * Returns effective policy for a VMA at specified address.
1774dd6eecb9SOleg Nesterov  * Falls back to current->mempolicy or system default policy, as necessary.
177574d2c3a0SOleg Nesterov  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
177674d2c3a0SOleg Nesterov  * count--added by the get_policy() vm_op, as appropriate--to protect against
177774d2c3a0SOleg Nesterov  * freeing by another task.  It is the caller's responsibility to free the
177874d2c3a0SOleg Nesterov  * extra reference for shared policies.
177974d2c3a0SOleg Nesterov  */
1780ac79f78dSDavid Rientjes static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1781dd6eecb9SOleg Nesterov 						unsigned long addr)
178274d2c3a0SOleg Nesterov {
178374d2c3a0SOleg Nesterov 	struct mempolicy *pol = __get_vma_policy(vma, addr);
178474d2c3a0SOleg Nesterov 
17858d90274bSOleg Nesterov 	if (!pol)
1786dd6eecb9SOleg Nesterov 		pol = get_task_policy(current);
17878d90274bSOleg Nesterov 
17881da177e4SLinus Torvalds 	return pol;
17891da177e4SLinus Torvalds }
17901da177e4SLinus Torvalds 
17916b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma)
1792fc314724SMel Gorman {
17936b6482bbSOleg Nesterov 	struct mempolicy *pol;
1794f15ca78eSOleg Nesterov 
1795fc314724SMel Gorman 	if (vma->vm_ops && vma->vm_ops->get_policy) {
1796fc314724SMel Gorman 		bool ret = false;
1797fc314724SMel Gorman 
1798fc314724SMel Gorman 		pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1799fc314724SMel Gorman 		if (pol && (pol->flags & MPOL_F_MOF))
1800fc314724SMel Gorman 			ret = true;
1801fc314724SMel Gorman 		mpol_cond_put(pol);
1802fc314724SMel Gorman 
1803fc314724SMel Gorman 		return ret;
18048d90274bSOleg Nesterov 	}
18058d90274bSOleg Nesterov 
1806fc314724SMel Gorman 	pol = vma->vm_policy;
18078d90274bSOleg Nesterov 	if (!pol)
18086b6482bbSOleg Nesterov 		pol = get_task_policy(current);
1809fc314724SMel Gorman 
1810fc314724SMel Gorman 	return pol->flags & MPOL_F_MOF;
1811fc314724SMel Gorman }
1812fc314724SMel Gorman 
1813d2226ebdSFeng Tang bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1814d3eb1570SLai Jiangshan {
1815d3eb1570SLai Jiangshan 	enum zone_type dynamic_policy_zone = policy_zone;
1816d3eb1570SLai Jiangshan 
1817d3eb1570SLai Jiangshan 	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1818d3eb1570SLai Jiangshan 
1819d3eb1570SLai Jiangshan 	/*
1820269fbe72SBen Widawsky 	 * if policy->nodes has movable memory only,
1821d3eb1570SLai Jiangshan 	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1822d3eb1570SLai Jiangshan 	 *
1823269fbe72SBen Widawsky 	 * policy->nodes is intersect with node_states[N_MEMORY].
1824f0953a1bSIngo Molnar 	 * so if the following test fails, it implies
1825269fbe72SBen Widawsky 	 * policy->nodes has movable memory only.
1826d3eb1570SLai Jiangshan 	 */
1827269fbe72SBen Widawsky 	if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY]))
1828d3eb1570SLai Jiangshan 		dynamic_policy_zone = ZONE_MOVABLE;
1829d3eb1570SLai Jiangshan 
1830d3eb1570SLai Jiangshan 	return zone >= dynamic_policy_zone;
1831d3eb1570SLai Jiangshan }
1832d3eb1570SLai Jiangshan 
183352cd3b07SLee Schermerhorn /*
183452cd3b07SLee Schermerhorn  * Return a nodemask representing a mempolicy for filtering nodes for
183552cd3b07SLee Schermerhorn  * page allocation
183652cd3b07SLee Schermerhorn  */
18378ca39e68SMuchun Song nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
183819770b32SMel Gorman {
1839b27abaccSDave Hansen 	int mode = policy->mode;
1840b27abaccSDave Hansen 
184119770b32SMel Gorman 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
1842b27abaccSDave Hansen 	if (unlikely(mode == MPOL_BIND) &&
1843d3eb1570SLai Jiangshan 		apply_policy_zone(policy, gfp_zone(gfp)) &&
1844269fbe72SBen Widawsky 		cpuset_nodemask_valid_mems_allowed(&policy->nodes))
1845269fbe72SBen Widawsky 		return &policy->nodes;
184619770b32SMel Gorman 
1847b27abaccSDave Hansen 	if (mode == MPOL_PREFERRED_MANY)
1848b27abaccSDave Hansen 		return &policy->nodes;
1849b27abaccSDave Hansen 
185019770b32SMel Gorman 	return NULL;
185119770b32SMel Gorman }
185219770b32SMel Gorman 
1853b27abaccSDave Hansen /*
1854b27abaccSDave Hansen  * Return the  preferred node id for 'prefer' mempolicy, and return
1855b27abaccSDave Hansen  * the given id for all other policies.
1856b27abaccSDave Hansen  *
1857b27abaccSDave Hansen  * policy_node() is always coupled with policy_nodemask(), which
1858b27abaccSDave Hansen  * secures the nodemask limit for 'bind' and 'prefer-many' policy.
1859b27abaccSDave Hansen  */
1860f8fd5253SWei Yang static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
18611da177e4SLinus Torvalds {
18627858d7bcSFeng Tang 	if (policy->mode == MPOL_PREFERRED) {
1863269fbe72SBen Widawsky 		nd = first_node(policy->nodes);
18647858d7bcSFeng Tang 	} else {
186519770b32SMel Gorman 		/*
18666d840958SMichal Hocko 		 * __GFP_THISNODE shouldn't even be used with the bind policy
18676d840958SMichal Hocko 		 * because we might easily break the expectation to stay on the
18686d840958SMichal Hocko 		 * requested node and not break the policy.
186919770b32SMel Gorman 		 */
18706d840958SMichal Hocko 		WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
18711da177e4SLinus Torvalds 	}
18726d840958SMichal Hocko 
1873c6018b4bSAneesh Kumar K.V 	if ((policy->mode == MPOL_BIND ||
1874c6018b4bSAneesh Kumar K.V 	     policy->mode == MPOL_PREFERRED_MANY) &&
1875c6018b4bSAneesh Kumar K.V 	    policy->home_node != NUMA_NO_NODE)
1876c6018b4bSAneesh Kumar K.V 		return policy->home_node;
1877c6018b4bSAneesh Kumar K.V 
187804ec6264SVlastimil Babka 	return nd;
18791da177e4SLinus Torvalds }
18801da177e4SLinus Torvalds 
18811da177e4SLinus Torvalds /* Do dynamic interleaving for a process */
18821da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy)
18831da177e4SLinus Torvalds {
188445816682SVlastimil Babka 	unsigned next;
18851da177e4SLinus Torvalds 	struct task_struct *me = current;
18861da177e4SLinus Torvalds 
1887269fbe72SBen Widawsky 	next = next_node_in(me->il_prev, policy->nodes);
1888f5b087b5SDavid Rientjes 	if (next < MAX_NUMNODES)
188945816682SVlastimil Babka 		me->il_prev = next;
189045816682SVlastimil Babka 	return next;
18911da177e4SLinus Torvalds }
18921da177e4SLinus Torvalds 
1893dc85da15SChristoph Lameter /*
1894dc85da15SChristoph Lameter  * Depending on the memory policy provide a node from which to allocate the
1895dc85da15SChristoph Lameter  * next slab entry.
1896dc85da15SChristoph Lameter  */
18972a389610SDavid Rientjes unsigned int mempolicy_slab_node(void)
1898dc85da15SChristoph Lameter {
1899e7b691b0SAndi Kleen 	struct mempolicy *policy;
19002a389610SDavid Rientjes 	int node = numa_mem_id();
1901e7b691b0SAndi Kleen 
190238b031ddSVasily Averin 	if (!in_task())
19032a389610SDavid Rientjes 		return node;
1904e7b691b0SAndi Kleen 
1905e7b691b0SAndi Kleen 	policy = current->mempolicy;
19067858d7bcSFeng Tang 	if (!policy)
19072a389610SDavid Rientjes 		return node;
1908765c4507SChristoph Lameter 
1909bea904d5SLee Schermerhorn 	switch (policy->mode) {
1910bea904d5SLee Schermerhorn 	case MPOL_PREFERRED:
1911269fbe72SBen Widawsky 		return first_node(policy->nodes);
1912bea904d5SLee Schermerhorn 
1913dc85da15SChristoph Lameter 	case MPOL_INTERLEAVE:
1914dc85da15SChristoph Lameter 		return interleave_nodes(policy);
1915dc85da15SChristoph Lameter 
1916b27abaccSDave Hansen 	case MPOL_BIND:
1917b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
1918b27abaccSDave Hansen 	{
1919c33d6c06SMel Gorman 		struct zoneref *z;
1920c33d6c06SMel Gorman 
1921dc85da15SChristoph Lameter 		/*
1922dc85da15SChristoph Lameter 		 * Follow bind policy behavior and start allocation at the
1923dc85da15SChristoph Lameter 		 * first node.
1924dc85da15SChristoph Lameter 		 */
192519770b32SMel Gorman 		struct zonelist *zonelist;
192619770b32SMel Gorman 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1927c9634cf0SAneesh Kumar K.V 		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1928c33d6c06SMel Gorman 		z = first_zones_zonelist(zonelist, highest_zoneidx,
1929269fbe72SBen Widawsky 							&policy->nodes);
1930c1093b74SPavel Tatashin 		return z->zone ? zone_to_nid(z->zone) : node;
1931dd1a239fSMel Gorman 	}
19327858d7bcSFeng Tang 	case MPOL_LOCAL:
19337858d7bcSFeng Tang 		return node;
1934dc85da15SChristoph Lameter 
1935dc85da15SChristoph Lameter 	default:
1936bea904d5SLee Schermerhorn 		BUG();
1937dc85da15SChristoph Lameter 	}
1938dc85da15SChristoph Lameter }
1939dc85da15SChristoph Lameter 
1940fee83b3aSAndrew Morton /*
1941fee83b3aSAndrew Morton  * Do static interleaving for a VMA with known offset @n.  Returns the n'th
1942269fbe72SBen Widawsky  * node in pol->nodes (starting from n=0), wrapping around if n exceeds the
1943fee83b3aSAndrew Morton  * number of present nodes.
1944fee83b3aSAndrew Morton  */
194598c70baaSLaurent Dufour static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
19461da177e4SLinus Torvalds {
1947276aeee1Syanghui 	nodemask_t nodemask = pol->nodes;
1948276aeee1Syanghui 	unsigned int target, nnodes;
1949fee83b3aSAndrew Morton 	int i;
1950fee83b3aSAndrew Morton 	int nid;
1951276aeee1Syanghui 	/*
1952276aeee1Syanghui 	 * The barrier will stabilize the nodemask in a register or on
1953276aeee1Syanghui 	 * the stack so that it will stop changing under the code.
1954276aeee1Syanghui 	 *
1955276aeee1Syanghui 	 * Between first_node() and next_node(), pol->nodes could be changed
1956276aeee1Syanghui 	 * by other threads. So we put pol->nodes in a local stack.
1957276aeee1Syanghui 	 */
1958276aeee1Syanghui 	barrier();
19591da177e4SLinus Torvalds 
1960276aeee1Syanghui 	nnodes = nodes_weight(nodemask);
1961f5b087b5SDavid Rientjes 	if (!nnodes)
1962f5b087b5SDavid Rientjes 		return numa_node_id();
1963fee83b3aSAndrew Morton 	target = (unsigned int)n % nnodes;
1964276aeee1Syanghui 	nid = first_node(nodemask);
1965fee83b3aSAndrew Morton 	for (i = 0; i < target; i++)
1966276aeee1Syanghui 		nid = next_node(nid, nodemask);
19671da177e4SLinus Torvalds 	return nid;
19681da177e4SLinus Torvalds }
19691da177e4SLinus Torvalds 
19705da7ca86SChristoph Lameter /* Determine a node number for interleave */
19715da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol,
19725da7ca86SChristoph Lameter 		 struct vm_area_struct *vma, unsigned long addr, int shift)
19735da7ca86SChristoph Lameter {
19745da7ca86SChristoph Lameter 	if (vma) {
19755da7ca86SChristoph Lameter 		unsigned long off;
19765da7ca86SChristoph Lameter 
19773b98b087SNishanth Aravamudan 		/*
19783b98b087SNishanth Aravamudan 		 * for small pages, there is no difference between
19793b98b087SNishanth Aravamudan 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
19803b98b087SNishanth Aravamudan 		 * for huge pages, since vm_pgoff is in units of small
19813b98b087SNishanth Aravamudan 		 * pages, we need to shift off the always 0 bits to get
19823b98b087SNishanth Aravamudan 		 * a useful offset.
19833b98b087SNishanth Aravamudan 		 */
19843b98b087SNishanth Aravamudan 		BUG_ON(shift < PAGE_SHIFT);
19853b98b087SNishanth Aravamudan 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
19865da7ca86SChristoph Lameter 		off += (addr - vma->vm_start) >> shift;
198798c70baaSLaurent Dufour 		return offset_il_node(pol, off);
19885da7ca86SChristoph Lameter 	} else
19895da7ca86SChristoph Lameter 		return interleave_nodes(pol);
19905da7ca86SChristoph Lameter }
19915da7ca86SChristoph Lameter 
199200ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS
1993480eccf9SLee Schermerhorn /*
199404ec6264SVlastimil Babka  * huge_node(@vma, @addr, @gfp_flags, @mpol)
1995b46e14acSFabian Frederick  * @vma: virtual memory area whose policy is sought
1996b46e14acSFabian Frederick  * @addr: address in @vma for shared policy lookup and interleave policy
1997b46e14acSFabian Frederick  * @gfp_flags: for requested zone
1998b46e14acSFabian Frederick  * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1999b27abaccSDave Hansen  * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy
2000480eccf9SLee Schermerhorn  *
200104ec6264SVlastimil Babka  * Returns a nid suitable for a huge page allocation and a pointer
200252cd3b07SLee Schermerhorn  * to the struct mempolicy for conditional unref after allocation.
2003b27abaccSDave Hansen  * If the effective policy is 'bind' or 'prefer-many', returns a pointer
2004b27abaccSDave Hansen  * to the mempolicy's @nodemask for filtering the zonelist.
2005c0ff7453SMiao Xie  *
2006d26914d1SMel Gorman  * Must be protected by read_mems_allowed_begin()
2007480eccf9SLee Schermerhorn  */
200804ec6264SVlastimil Babka int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
200904ec6264SVlastimil Babka 				struct mempolicy **mpol, nodemask_t **nodemask)
20105da7ca86SChristoph Lameter {
201104ec6264SVlastimil Babka 	int nid;
2012b27abaccSDave Hansen 	int mode;
20135da7ca86SChristoph Lameter 
2014dd6eecb9SOleg Nesterov 	*mpol = get_vma_policy(vma, addr);
2015b27abaccSDave Hansen 	*nodemask = NULL;
2016b27abaccSDave Hansen 	mode = (*mpol)->mode;
20175da7ca86SChristoph Lameter 
2018b27abaccSDave Hansen 	if (unlikely(mode == MPOL_INTERLEAVE)) {
201904ec6264SVlastimil Babka 		nid = interleave_nid(*mpol, vma, addr,
202004ec6264SVlastimil Babka 					huge_page_shift(hstate_vma(vma)));
202152cd3b07SLee Schermerhorn 	} else {
202204ec6264SVlastimil Babka 		nid = policy_node(gfp_flags, *mpol, numa_node_id());
2023b27abaccSDave Hansen 		if (mode == MPOL_BIND || mode == MPOL_PREFERRED_MANY)
2024269fbe72SBen Widawsky 			*nodemask = &(*mpol)->nodes;
2025480eccf9SLee Schermerhorn 	}
202604ec6264SVlastimil Babka 	return nid;
20275da7ca86SChristoph Lameter }
202806808b08SLee Schermerhorn 
202906808b08SLee Schermerhorn /*
203006808b08SLee Schermerhorn  * init_nodemask_of_mempolicy
203106808b08SLee Schermerhorn  *
203206808b08SLee Schermerhorn  * If the current task's mempolicy is "default" [NULL], return 'false'
203306808b08SLee Schermerhorn  * to indicate default policy.  Otherwise, extract the policy nodemask
203406808b08SLee Schermerhorn  * for 'bind' or 'interleave' policy into the argument nodemask, or
203506808b08SLee Schermerhorn  * initialize the argument nodemask to contain the single node for
203606808b08SLee Schermerhorn  * 'preferred' or 'local' policy and return 'true' to indicate presence
203706808b08SLee Schermerhorn  * of non-default mempolicy.
203806808b08SLee Schermerhorn  *
203906808b08SLee Schermerhorn  * We don't bother with reference counting the mempolicy [mpol_get/put]
204006808b08SLee Schermerhorn  * because the current task is examining it's own mempolicy and a task's
204106808b08SLee Schermerhorn  * mempolicy is only ever changed by the task itself.
204206808b08SLee Schermerhorn  *
204306808b08SLee Schermerhorn  * N.B., it is the caller's responsibility to free a returned nodemask.
204406808b08SLee Schermerhorn  */
204506808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask)
204606808b08SLee Schermerhorn {
204706808b08SLee Schermerhorn 	struct mempolicy *mempolicy;
204806808b08SLee Schermerhorn 
204906808b08SLee Schermerhorn 	if (!(mask && current->mempolicy))
205006808b08SLee Schermerhorn 		return false;
205106808b08SLee Schermerhorn 
2052c0ff7453SMiao Xie 	task_lock(current);
205306808b08SLee Schermerhorn 	mempolicy = current->mempolicy;
205406808b08SLee Schermerhorn 	switch (mempolicy->mode) {
205506808b08SLee Schermerhorn 	case MPOL_PREFERRED:
2056b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
205706808b08SLee Schermerhorn 	case MPOL_BIND:
205806808b08SLee Schermerhorn 	case MPOL_INTERLEAVE:
2059269fbe72SBen Widawsky 		*mask = mempolicy->nodes;
206006808b08SLee Schermerhorn 		break;
206106808b08SLee Schermerhorn 
20627858d7bcSFeng Tang 	case MPOL_LOCAL:
2063269fbe72SBen Widawsky 		init_nodemask_of_node(mask, numa_node_id());
20647858d7bcSFeng Tang 		break;
20657858d7bcSFeng Tang 
206606808b08SLee Schermerhorn 	default:
206706808b08SLee Schermerhorn 		BUG();
206806808b08SLee Schermerhorn 	}
2069c0ff7453SMiao Xie 	task_unlock(current);
207006808b08SLee Schermerhorn 
207106808b08SLee Schermerhorn 	return true;
207206808b08SLee Schermerhorn }
207300ac59adSChen, Kenneth W #endif
20745da7ca86SChristoph Lameter 
20756f48d0ebSDavid Rientjes /*
2076b26e517aSFeng Tang  * mempolicy_in_oom_domain
20776f48d0ebSDavid Rientjes  *
2078b26e517aSFeng Tang  * If tsk's mempolicy is "bind", check for intersection between mask and
2079b26e517aSFeng Tang  * the policy nodemask. Otherwise, return true for all other policies
2080b26e517aSFeng Tang  * including "interleave", as a tsk with "interleave" policy may have
2081b26e517aSFeng Tang  * memory allocated from all nodes in system.
20826f48d0ebSDavid Rientjes  *
20836f48d0ebSDavid Rientjes  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
20846f48d0ebSDavid Rientjes  */
2085b26e517aSFeng Tang bool mempolicy_in_oom_domain(struct task_struct *tsk,
20866f48d0ebSDavid Rientjes 					const nodemask_t *mask)
20876f48d0ebSDavid Rientjes {
20886f48d0ebSDavid Rientjes 	struct mempolicy *mempolicy;
20896f48d0ebSDavid Rientjes 	bool ret = true;
20906f48d0ebSDavid Rientjes 
20916f48d0ebSDavid Rientjes 	if (!mask)
20926f48d0ebSDavid Rientjes 		return ret;
2093b26e517aSFeng Tang 
20946f48d0ebSDavid Rientjes 	task_lock(tsk);
20956f48d0ebSDavid Rientjes 	mempolicy = tsk->mempolicy;
2096b26e517aSFeng Tang 	if (mempolicy && mempolicy->mode == MPOL_BIND)
2097269fbe72SBen Widawsky 		ret = nodes_intersects(mempolicy->nodes, *mask);
20986f48d0ebSDavid Rientjes 	task_unlock(tsk);
2099b26e517aSFeng Tang 
21006f48d0ebSDavid Rientjes 	return ret;
21016f48d0ebSDavid Rientjes }
21026f48d0ebSDavid Rientjes 
21031da177e4SLinus Torvalds /* Allocate a page in interleaved policy.
21041da177e4SLinus Torvalds    Own path because it needs to do special accounting. */
2105662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2106662f3a0bSAndi Kleen 					unsigned nid)
21071da177e4SLinus Torvalds {
21081da177e4SLinus Torvalds 	struct page *page;
21091da177e4SLinus Torvalds 
211084172f4bSMatthew Wilcox (Oracle) 	page = __alloc_pages(gfp, order, nid, NULL);
21114518085eSKemi Wang 	/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
21124518085eSKemi Wang 	if (!static_branch_likely(&vm_numa_stat_key))
21134518085eSKemi Wang 		return page;
2114de55c8b2SAndrey Ryabinin 	if (page && page_to_nid(page) == nid) {
2115de55c8b2SAndrey Ryabinin 		preempt_disable();
2116f19298b9SMel Gorman 		__count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT);
2117de55c8b2SAndrey Ryabinin 		preempt_enable();
2118de55c8b2SAndrey Ryabinin 	}
21191da177e4SLinus Torvalds 	return page;
21201da177e4SLinus Torvalds }
21211da177e4SLinus Torvalds 
21224c54d949SFeng Tang static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
21234c54d949SFeng Tang 						int nid, struct mempolicy *pol)
21244c54d949SFeng Tang {
21254c54d949SFeng Tang 	struct page *page;
21264c54d949SFeng Tang 	gfp_t preferred_gfp;
21274c54d949SFeng Tang 
21284c54d949SFeng Tang 	/*
21294c54d949SFeng Tang 	 * This is a two pass approach. The first pass will only try the
21304c54d949SFeng Tang 	 * preferred nodes but skip the direct reclaim and allow the
21314c54d949SFeng Tang 	 * allocation to fail, while the second pass will try all the
21324c54d949SFeng Tang 	 * nodes in system.
21334c54d949SFeng Tang 	 */
21344c54d949SFeng Tang 	preferred_gfp = gfp | __GFP_NOWARN;
21354c54d949SFeng Tang 	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
21364c54d949SFeng Tang 	page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes);
21374c54d949SFeng Tang 	if (!page)
2138c0455116SAneesh Kumar K.V 		page = __alloc_pages(gfp, order, nid, NULL);
21394c54d949SFeng Tang 
21404c54d949SFeng Tang 	return page;
21414c54d949SFeng Tang }
21424c54d949SFeng Tang 
21431da177e4SLinus Torvalds /**
2144adf88aa8SMatthew Wilcox (Oracle)  * vma_alloc_folio - Allocate a folio for a VMA.
2145eb350739SMatthew Wilcox (Oracle)  * @gfp: GFP flags.
2146adf88aa8SMatthew Wilcox (Oracle)  * @order: Order of the folio.
21471da177e4SLinus Torvalds  * @vma: Pointer to VMA or NULL if not available.
2148eb350739SMatthew Wilcox (Oracle)  * @addr: Virtual address of the allocation.  Must be inside @vma.
2149eb350739SMatthew Wilcox (Oracle)  * @hugepage: For hugepages try only the preferred node if possible.
21501da177e4SLinus Torvalds  *
2151adf88aa8SMatthew Wilcox (Oracle)  * Allocate a folio for a specific address in @vma, using the appropriate
2152eb350739SMatthew Wilcox (Oracle)  * NUMA policy.  When @vma is not NULL the caller must hold the mmap_lock
2153eb350739SMatthew Wilcox (Oracle)  * of the mm_struct of the VMA to prevent it from going away.  Should be
2154adf88aa8SMatthew Wilcox (Oracle)  * used for all allocations for folios that will be mapped into user space.
2155eb350739SMatthew Wilcox (Oracle)  *
2156adf88aa8SMatthew Wilcox (Oracle)  * Return: The folio on success or NULL if allocation fails.
21571da177e4SLinus Torvalds  */
2158adf88aa8SMatthew Wilcox (Oracle) struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
2159be1a13ebSMichal Hocko 		unsigned long addr, bool hugepage)
21601da177e4SLinus Torvalds {
2161cc9a6c87SMel Gorman 	struct mempolicy *pol;
2162be1a13ebSMichal Hocko 	int node = numa_node_id();
2163adf88aa8SMatthew Wilcox (Oracle) 	struct folio *folio;
216404ec6264SVlastimil Babka 	int preferred_nid;
2165be97a41bSVlastimil Babka 	nodemask_t *nmask;
21661da177e4SLinus Torvalds 
2167dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2168cc9a6c87SMel Gorman 
2169be97a41bSVlastimil Babka 	if (pol->mode == MPOL_INTERLEAVE) {
2170adf88aa8SMatthew Wilcox (Oracle) 		struct page *page;
21711da177e4SLinus Torvalds 		unsigned nid;
21725da7ca86SChristoph Lameter 
21738eac563cSAndi Kleen 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
217452cd3b07SLee Schermerhorn 		mpol_cond_put(pol);
2175adf88aa8SMatthew Wilcox (Oracle) 		gfp |= __GFP_COMP;
21760bbbc0b3SAndrea Arcangeli 		page = alloc_page_interleave(gfp, order, nid);
2177adf88aa8SMatthew Wilcox (Oracle) 		if (page && order > 1)
2178adf88aa8SMatthew Wilcox (Oracle) 			prep_transhuge_page(page);
2179adf88aa8SMatthew Wilcox (Oracle) 		folio = (struct folio *)page;
2180be97a41bSVlastimil Babka 		goto out;
21811da177e4SLinus Torvalds 	}
21821da177e4SLinus Torvalds 
21834c54d949SFeng Tang 	if (pol->mode == MPOL_PREFERRED_MANY) {
2184adf88aa8SMatthew Wilcox (Oracle) 		struct page *page;
2185adf88aa8SMatthew Wilcox (Oracle) 
2186c0455116SAneesh Kumar K.V 		node = policy_node(gfp, pol, node);
2187adf88aa8SMatthew Wilcox (Oracle) 		gfp |= __GFP_COMP;
21884c54d949SFeng Tang 		page = alloc_pages_preferred_many(gfp, order, node, pol);
21894c54d949SFeng Tang 		mpol_cond_put(pol);
2190adf88aa8SMatthew Wilcox (Oracle) 		if (page && order > 1)
2191adf88aa8SMatthew Wilcox (Oracle) 			prep_transhuge_page(page);
2192adf88aa8SMatthew Wilcox (Oracle) 		folio = (struct folio *)page;
21934c54d949SFeng Tang 		goto out;
21944c54d949SFeng Tang 	}
21954c54d949SFeng Tang 
219619deb769SDavid Rientjes 	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
219719deb769SDavid Rientjes 		int hpage_node = node;
219819deb769SDavid Rientjes 
219919deb769SDavid Rientjes 		/*
220019deb769SDavid Rientjes 		 * For hugepage allocation and non-interleave policy which
220119deb769SDavid Rientjes 		 * allows the current node (or other explicitly preferred
220219deb769SDavid Rientjes 		 * node) we only try to allocate from the current/preferred
220319deb769SDavid Rientjes 		 * node and don't fall back to other nodes, as the cost of
220419deb769SDavid Rientjes 		 * remote accesses would likely offset THP benefits.
220519deb769SDavid Rientjes 		 *
2206b27abaccSDave Hansen 		 * If the policy is interleave or does not allow the current
220719deb769SDavid Rientjes 		 * node in its nodemask, we allocate the standard way.
220819deb769SDavid Rientjes 		 */
22097858d7bcSFeng Tang 		if (pol->mode == MPOL_PREFERRED)
2210269fbe72SBen Widawsky 			hpage_node = first_node(pol->nodes);
221119deb769SDavid Rientjes 
221219deb769SDavid Rientjes 		nmask = policy_nodemask(gfp, pol);
221319deb769SDavid Rientjes 		if (!nmask || node_isset(hpage_node, *nmask)) {
221419deb769SDavid Rientjes 			mpol_cond_put(pol);
2215cc638f32SVlastimil Babka 			/*
2216cc638f32SVlastimil Babka 			 * First, try to allocate THP only on local node, but
2217cc638f32SVlastimil Babka 			 * don't reclaim unnecessarily, just compact.
2218cc638f32SVlastimil Babka 			 */
2219adf88aa8SMatthew Wilcox (Oracle) 			folio = __folio_alloc_node(gfp | __GFP_THISNODE |
2220adf88aa8SMatthew Wilcox (Oracle) 					__GFP_NORETRY, order, hpage_node);
222176e654ccSDavid Rientjes 
222276e654ccSDavid Rientjes 			/*
222376e654ccSDavid Rientjes 			 * If hugepage allocations are configured to always
222476e654ccSDavid Rientjes 			 * synchronous compact or the vma has been madvised
222576e654ccSDavid Rientjes 			 * to prefer hugepage backing, retry allowing remote
2226cc638f32SVlastimil Babka 			 * memory with both reclaim and compact as well.
222776e654ccSDavid Rientjes 			 */
2228adf88aa8SMatthew Wilcox (Oracle) 			if (!folio && (gfp & __GFP_DIRECT_RECLAIM))
2229adf88aa8SMatthew Wilcox (Oracle) 				folio = __folio_alloc(gfp, order, hpage_node,
2230adf88aa8SMatthew Wilcox (Oracle) 						      nmask);
223176e654ccSDavid Rientjes 
223219deb769SDavid Rientjes 			goto out;
223319deb769SDavid Rientjes 		}
223419deb769SDavid Rientjes 	}
223519deb769SDavid Rientjes 
2236077fcf11SAneesh Kumar K.V 	nmask = policy_nodemask(gfp, pol);
223704ec6264SVlastimil Babka 	preferred_nid = policy_node(gfp, pol, node);
2238adf88aa8SMatthew Wilcox (Oracle) 	folio = __folio_alloc(gfp, order, preferred_nid, nmask);
2239d51e9894SVlastimil Babka 	mpol_cond_put(pol);
2240be97a41bSVlastimil Babka out:
2241f584b680SMatthew Wilcox (Oracle) 	return folio;
2242f584b680SMatthew Wilcox (Oracle) }
2243adf88aa8SMatthew Wilcox (Oracle) EXPORT_SYMBOL(vma_alloc_folio);
2244f584b680SMatthew Wilcox (Oracle) 
22451da177e4SLinus Torvalds /**
2246d7f946d0SMatthew Wilcox (Oracle)  * alloc_pages - Allocate pages.
22476421ec76SMatthew Wilcox (Oracle)  * @gfp: GFP flags.
22486421ec76SMatthew Wilcox (Oracle)  * @order: Power of two of number of pages to allocate.
22491da177e4SLinus Torvalds  *
22506421ec76SMatthew Wilcox (Oracle)  * Allocate 1 << @order contiguous pages.  The physical address of the
22516421ec76SMatthew Wilcox (Oracle)  * first page is naturally aligned (eg an order-3 allocation will be aligned
22526421ec76SMatthew Wilcox (Oracle)  * to a multiple of 8 * PAGE_SIZE bytes).  The NUMA policy of the current
22536421ec76SMatthew Wilcox (Oracle)  * process is honoured when in process context.
22541da177e4SLinus Torvalds  *
22556421ec76SMatthew Wilcox (Oracle)  * Context: Can be called from any context, providing the appropriate GFP
22566421ec76SMatthew Wilcox (Oracle)  * flags are used.
22576421ec76SMatthew Wilcox (Oracle)  * Return: The page on success or NULL if allocation fails.
22581da177e4SLinus Torvalds  */
2259d7f946d0SMatthew Wilcox (Oracle) struct page *alloc_pages(gfp_t gfp, unsigned order)
22601da177e4SLinus Torvalds {
22618d90274bSOleg Nesterov 	struct mempolicy *pol = &default_policy;
2262c0ff7453SMiao Xie 	struct page *page;
22631da177e4SLinus Torvalds 
22648d90274bSOleg Nesterov 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
22658d90274bSOleg Nesterov 		pol = get_task_policy(current);
226652cd3b07SLee Schermerhorn 
226752cd3b07SLee Schermerhorn 	/*
226852cd3b07SLee Schermerhorn 	 * No reference counting needed for current->mempolicy
226952cd3b07SLee Schermerhorn 	 * nor system default_policy
227052cd3b07SLee Schermerhorn 	 */
227145c4745aSLee Schermerhorn 	if (pol->mode == MPOL_INTERLEAVE)
2272c0ff7453SMiao Xie 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
22734c54d949SFeng Tang 	else if (pol->mode == MPOL_PREFERRED_MANY)
22744c54d949SFeng Tang 		page = alloc_pages_preferred_many(gfp, order,
2275c0455116SAneesh Kumar K.V 				  policy_node(gfp, pol, numa_node_id()), pol);
2276c0ff7453SMiao Xie 	else
227784172f4bSMatthew Wilcox (Oracle) 		page = __alloc_pages(gfp, order,
227804ec6264SVlastimil Babka 				policy_node(gfp, pol, numa_node_id()),
22795c4b4be3SAndi Kleen 				policy_nodemask(gfp, pol));
2280cc9a6c87SMel Gorman 
2281c0ff7453SMiao Xie 	return page;
22821da177e4SLinus Torvalds }
2283d7f946d0SMatthew Wilcox (Oracle) EXPORT_SYMBOL(alloc_pages);
22841da177e4SLinus Torvalds 
2285cc09cb13SMatthew Wilcox (Oracle) struct folio *folio_alloc(gfp_t gfp, unsigned order)
2286cc09cb13SMatthew Wilcox (Oracle) {
2287cc09cb13SMatthew Wilcox (Oracle) 	struct page *page = alloc_pages(gfp | __GFP_COMP, order);
2288cc09cb13SMatthew Wilcox (Oracle) 
2289cc09cb13SMatthew Wilcox (Oracle) 	if (page && order > 1)
2290cc09cb13SMatthew Wilcox (Oracle) 		prep_transhuge_page(page);
2291cc09cb13SMatthew Wilcox (Oracle) 	return (struct folio *)page;
2292cc09cb13SMatthew Wilcox (Oracle) }
2293cc09cb13SMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_alloc);
2294cc09cb13SMatthew Wilcox (Oracle) 
2295c00b6b96SChen Wandun static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
2296c00b6b96SChen Wandun 		struct mempolicy *pol, unsigned long nr_pages,
2297c00b6b96SChen Wandun 		struct page **page_array)
2298c00b6b96SChen Wandun {
2299c00b6b96SChen Wandun 	int nodes;
2300c00b6b96SChen Wandun 	unsigned long nr_pages_per_node;
2301c00b6b96SChen Wandun 	int delta;
2302c00b6b96SChen Wandun 	int i;
2303c00b6b96SChen Wandun 	unsigned long nr_allocated;
2304c00b6b96SChen Wandun 	unsigned long total_allocated = 0;
2305c00b6b96SChen Wandun 
2306c00b6b96SChen Wandun 	nodes = nodes_weight(pol->nodes);
2307c00b6b96SChen Wandun 	nr_pages_per_node = nr_pages / nodes;
2308c00b6b96SChen Wandun 	delta = nr_pages - nodes * nr_pages_per_node;
2309c00b6b96SChen Wandun 
2310c00b6b96SChen Wandun 	for (i = 0; i < nodes; i++) {
2311c00b6b96SChen Wandun 		if (delta) {
2312c00b6b96SChen Wandun 			nr_allocated = __alloc_pages_bulk(gfp,
2313c00b6b96SChen Wandun 					interleave_nodes(pol), NULL,
2314c00b6b96SChen Wandun 					nr_pages_per_node + 1, NULL,
2315c00b6b96SChen Wandun 					page_array);
2316c00b6b96SChen Wandun 			delta--;
2317c00b6b96SChen Wandun 		} else {
2318c00b6b96SChen Wandun 			nr_allocated = __alloc_pages_bulk(gfp,
2319c00b6b96SChen Wandun 					interleave_nodes(pol), NULL,
2320c00b6b96SChen Wandun 					nr_pages_per_node, NULL, page_array);
2321c00b6b96SChen Wandun 		}
2322c00b6b96SChen Wandun 
2323c00b6b96SChen Wandun 		page_array += nr_allocated;
2324c00b6b96SChen Wandun 		total_allocated += nr_allocated;
2325c00b6b96SChen Wandun 	}
2326c00b6b96SChen Wandun 
2327c00b6b96SChen Wandun 	return total_allocated;
2328c00b6b96SChen Wandun }
2329c00b6b96SChen Wandun 
2330c00b6b96SChen Wandun static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
2331c00b6b96SChen Wandun 		struct mempolicy *pol, unsigned long nr_pages,
2332c00b6b96SChen Wandun 		struct page **page_array)
2333c00b6b96SChen Wandun {
2334c00b6b96SChen Wandun 	gfp_t preferred_gfp;
2335c00b6b96SChen Wandun 	unsigned long nr_allocated = 0;
2336c00b6b96SChen Wandun 
2337c00b6b96SChen Wandun 	preferred_gfp = gfp | __GFP_NOWARN;
2338c00b6b96SChen Wandun 	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2339c00b6b96SChen Wandun 
2340c00b6b96SChen Wandun 	nr_allocated  = __alloc_pages_bulk(preferred_gfp, nid, &pol->nodes,
2341c00b6b96SChen Wandun 					   nr_pages, NULL, page_array);
2342c00b6b96SChen Wandun 
2343c00b6b96SChen Wandun 	if (nr_allocated < nr_pages)
2344c00b6b96SChen Wandun 		nr_allocated += __alloc_pages_bulk(gfp, numa_node_id(), NULL,
2345c00b6b96SChen Wandun 				nr_pages - nr_allocated, NULL,
2346c00b6b96SChen Wandun 				page_array + nr_allocated);
2347c00b6b96SChen Wandun 	return nr_allocated;
2348c00b6b96SChen Wandun }
2349c00b6b96SChen Wandun 
2350c00b6b96SChen Wandun /* alloc pages bulk and mempolicy should be considered at the
2351c00b6b96SChen Wandun  * same time in some situation such as vmalloc.
2352c00b6b96SChen Wandun  *
2353c00b6b96SChen Wandun  * It can accelerate memory allocation especially interleaving
2354c00b6b96SChen Wandun  * allocate memory.
2355c00b6b96SChen Wandun  */
2356c00b6b96SChen Wandun unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
2357c00b6b96SChen Wandun 		unsigned long nr_pages, struct page **page_array)
2358c00b6b96SChen Wandun {
2359c00b6b96SChen Wandun 	struct mempolicy *pol = &default_policy;
2360c00b6b96SChen Wandun 
2361c00b6b96SChen Wandun 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2362c00b6b96SChen Wandun 		pol = get_task_policy(current);
2363c00b6b96SChen Wandun 
2364c00b6b96SChen Wandun 	if (pol->mode == MPOL_INTERLEAVE)
2365c00b6b96SChen Wandun 		return alloc_pages_bulk_array_interleave(gfp, pol,
2366c00b6b96SChen Wandun 							 nr_pages, page_array);
2367c00b6b96SChen Wandun 
2368c00b6b96SChen Wandun 	if (pol->mode == MPOL_PREFERRED_MANY)
2369c00b6b96SChen Wandun 		return alloc_pages_bulk_array_preferred_many(gfp,
2370c00b6b96SChen Wandun 				numa_node_id(), pol, nr_pages, page_array);
2371c00b6b96SChen Wandun 
2372c00b6b96SChen Wandun 	return __alloc_pages_bulk(gfp, policy_node(gfp, pol, numa_node_id()),
2373c00b6b96SChen Wandun 				  policy_nodemask(gfp, pol), nr_pages, NULL,
2374c00b6b96SChen Wandun 				  page_array);
2375c00b6b96SChen Wandun }
2376c00b6b96SChen Wandun 
2377ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2378ef0855d3SOleg Nesterov {
2379ef0855d3SOleg Nesterov 	struct mempolicy *pol = mpol_dup(vma_policy(src));
2380ef0855d3SOleg Nesterov 
2381ef0855d3SOleg Nesterov 	if (IS_ERR(pol))
2382ef0855d3SOleg Nesterov 		return PTR_ERR(pol);
2383ef0855d3SOleg Nesterov 	dst->vm_policy = pol;
2384ef0855d3SOleg Nesterov 	return 0;
2385ef0855d3SOleg Nesterov }
2386ef0855d3SOleg Nesterov 
23874225399aSPaul Jackson /*
2388846a16bfSLee Schermerhorn  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
23894225399aSPaul Jackson  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
23904225399aSPaul Jackson  * with the mems_allowed returned by cpuset_mems_allowed().  This
23914225399aSPaul Jackson  * keeps mempolicies cpuset relative after its cpuset moves.  See
23924225399aSPaul Jackson  * further kernel/cpuset.c update_nodemask().
2393708c1bbcSMiao Xie  *
2394708c1bbcSMiao Xie  * current's mempolicy may be rebinded by the other task(the task that changes
2395708c1bbcSMiao Xie  * cpuset's mems), so we needn't do rebind work for current task.
23964225399aSPaul Jackson  */
23974225399aSPaul Jackson 
2398846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */
2399846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old)
24001da177e4SLinus Torvalds {
24011da177e4SLinus Torvalds 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
24021da177e4SLinus Torvalds 
24031da177e4SLinus Torvalds 	if (!new)
24041da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2405708c1bbcSMiao Xie 
2406708c1bbcSMiao Xie 	/* task's mempolicy is protected by alloc_lock */
2407708c1bbcSMiao Xie 	if (old == current->mempolicy) {
2408708c1bbcSMiao Xie 		task_lock(current);
2409708c1bbcSMiao Xie 		*new = *old;
2410708c1bbcSMiao Xie 		task_unlock(current);
2411708c1bbcSMiao Xie 	} else
2412708c1bbcSMiao Xie 		*new = *old;
2413708c1bbcSMiao Xie 
24144225399aSPaul Jackson 	if (current_cpuset_is_being_rebound()) {
24154225399aSPaul Jackson 		nodemask_t mems = cpuset_mems_allowed(current);
2416213980c0SVlastimil Babka 		mpol_rebind_policy(new, &mems);
24174225399aSPaul Jackson 	}
24181da177e4SLinus Torvalds 	atomic_set(&new->refcnt, 1);
24191da177e4SLinus Torvalds 	return new;
24201da177e4SLinus Torvalds }
24211da177e4SLinus Torvalds 
24221da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */
2423fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
24241da177e4SLinus Torvalds {
24251da177e4SLinus Torvalds 	if (!a || !b)
2426fcfb4dccSKOSAKI Motohiro 		return false;
242745c4745aSLee Schermerhorn 	if (a->mode != b->mode)
2428fcfb4dccSKOSAKI Motohiro 		return false;
242919800502SBob Liu 	if (a->flags != b->flags)
2430fcfb4dccSKOSAKI Motohiro 		return false;
2431c6018b4bSAneesh Kumar K.V 	if (a->home_node != b->home_node)
2432c6018b4bSAneesh Kumar K.V 		return false;
243319800502SBob Liu 	if (mpol_store_user_nodemask(a))
243419800502SBob Liu 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2435fcfb4dccSKOSAKI Motohiro 			return false;
243619800502SBob Liu 
243745c4745aSLee Schermerhorn 	switch (a->mode) {
243819770b32SMel Gorman 	case MPOL_BIND:
24391da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
24401da177e4SLinus Torvalds 	case MPOL_PREFERRED:
2441b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
2442269fbe72SBen Widawsky 		return !!nodes_equal(a->nodes, b->nodes);
24437858d7bcSFeng Tang 	case MPOL_LOCAL:
24447858d7bcSFeng Tang 		return true;
24451da177e4SLinus Torvalds 	default:
24461da177e4SLinus Torvalds 		BUG();
2447fcfb4dccSKOSAKI Motohiro 		return false;
24481da177e4SLinus Torvalds 	}
24491da177e4SLinus Torvalds }
24501da177e4SLinus Torvalds 
24511da177e4SLinus Torvalds /*
24521da177e4SLinus Torvalds  * Shared memory backing store policy support.
24531da177e4SLinus Torvalds  *
24541da177e4SLinus Torvalds  * Remember policies even when nobody has shared memory mapped.
24551da177e4SLinus Torvalds  * The policies are kept in Red-Black tree linked from the inode.
24564a8c7bb5SNathan Zimmer  * They are protected by the sp->lock rwlock, which should be held
24571da177e4SLinus Torvalds  * for any accesses to the tree.
24581da177e4SLinus Torvalds  */
24591da177e4SLinus Torvalds 
24604a8c7bb5SNathan Zimmer /*
24614a8c7bb5SNathan Zimmer  * lookup first element intersecting start-end.  Caller holds sp->lock for
24624a8c7bb5SNathan Zimmer  * reading or for writing
24634a8c7bb5SNathan Zimmer  */
24641da177e4SLinus Torvalds static struct sp_node *
24651da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
24661da177e4SLinus Torvalds {
24671da177e4SLinus Torvalds 	struct rb_node *n = sp->root.rb_node;
24681da177e4SLinus Torvalds 
24691da177e4SLinus Torvalds 	while (n) {
24701da177e4SLinus Torvalds 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
24711da177e4SLinus Torvalds 
24721da177e4SLinus Torvalds 		if (start >= p->end)
24731da177e4SLinus Torvalds 			n = n->rb_right;
24741da177e4SLinus Torvalds 		else if (end <= p->start)
24751da177e4SLinus Torvalds 			n = n->rb_left;
24761da177e4SLinus Torvalds 		else
24771da177e4SLinus Torvalds 			break;
24781da177e4SLinus Torvalds 	}
24791da177e4SLinus Torvalds 	if (!n)
24801da177e4SLinus Torvalds 		return NULL;
24811da177e4SLinus Torvalds 	for (;;) {
24821da177e4SLinus Torvalds 		struct sp_node *w = NULL;
24831da177e4SLinus Torvalds 		struct rb_node *prev = rb_prev(n);
24841da177e4SLinus Torvalds 		if (!prev)
24851da177e4SLinus Torvalds 			break;
24861da177e4SLinus Torvalds 		w = rb_entry(prev, struct sp_node, nd);
24871da177e4SLinus Torvalds 		if (w->end <= start)
24881da177e4SLinus Torvalds 			break;
24891da177e4SLinus Torvalds 		n = prev;
24901da177e4SLinus Torvalds 	}
24911da177e4SLinus Torvalds 	return rb_entry(n, struct sp_node, nd);
24921da177e4SLinus Torvalds }
24931da177e4SLinus Torvalds 
24944a8c7bb5SNathan Zimmer /*
24954a8c7bb5SNathan Zimmer  * Insert a new shared policy into the list.  Caller holds sp->lock for
24964a8c7bb5SNathan Zimmer  * writing.
24974a8c7bb5SNathan Zimmer  */
24981da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new)
24991da177e4SLinus Torvalds {
25001da177e4SLinus Torvalds 	struct rb_node **p = &sp->root.rb_node;
25011da177e4SLinus Torvalds 	struct rb_node *parent = NULL;
25021da177e4SLinus Torvalds 	struct sp_node *nd;
25031da177e4SLinus Torvalds 
25041da177e4SLinus Torvalds 	while (*p) {
25051da177e4SLinus Torvalds 		parent = *p;
25061da177e4SLinus Torvalds 		nd = rb_entry(parent, struct sp_node, nd);
25071da177e4SLinus Torvalds 		if (new->start < nd->start)
25081da177e4SLinus Torvalds 			p = &(*p)->rb_left;
25091da177e4SLinus Torvalds 		else if (new->end > nd->end)
25101da177e4SLinus Torvalds 			p = &(*p)->rb_right;
25111da177e4SLinus Torvalds 		else
25121da177e4SLinus Torvalds 			BUG();
25131da177e4SLinus Torvalds 	}
25141da177e4SLinus Torvalds 	rb_link_node(&new->nd, parent, p);
25151da177e4SLinus Torvalds 	rb_insert_color(&new->nd, &sp->root);
2516140d5a49SPaul Mundt 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
251745c4745aSLee Schermerhorn 		 new->policy ? new->policy->mode : 0);
25181da177e4SLinus Torvalds }
25191da177e4SLinus Torvalds 
25201da177e4SLinus Torvalds /* Find shared policy intersecting idx */
25211da177e4SLinus Torvalds struct mempolicy *
25221da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
25231da177e4SLinus Torvalds {
25241da177e4SLinus Torvalds 	struct mempolicy *pol = NULL;
25251da177e4SLinus Torvalds 	struct sp_node *sn;
25261da177e4SLinus Torvalds 
25271da177e4SLinus Torvalds 	if (!sp->root.rb_node)
25281da177e4SLinus Torvalds 		return NULL;
25294a8c7bb5SNathan Zimmer 	read_lock(&sp->lock);
25301da177e4SLinus Torvalds 	sn = sp_lookup(sp, idx, idx+1);
25311da177e4SLinus Torvalds 	if (sn) {
25321da177e4SLinus Torvalds 		mpol_get(sn->policy);
25331da177e4SLinus Torvalds 		pol = sn->policy;
25341da177e4SLinus Torvalds 	}
25354a8c7bb5SNathan Zimmer 	read_unlock(&sp->lock);
25361da177e4SLinus Torvalds 	return pol;
25371da177e4SLinus Torvalds }
25381da177e4SLinus Torvalds 
253963f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n)
254063f74ca2SKOSAKI Motohiro {
254163f74ca2SKOSAKI Motohiro 	mpol_put(n->policy);
254263f74ca2SKOSAKI Motohiro 	kmem_cache_free(sn_cache, n);
254363f74ca2SKOSAKI Motohiro }
254463f74ca2SKOSAKI Motohiro 
2545771fb4d8SLee Schermerhorn /**
2546771fb4d8SLee Schermerhorn  * mpol_misplaced - check whether current page node is valid in policy
2547771fb4d8SLee Schermerhorn  *
2548b46e14acSFabian Frederick  * @page: page to be checked
2549b46e14acSFabian Frederick  * @vma: vm area where page mapped
2550b46e14acSFabian Frederick  * @addr: virtual address where page mapped
2551771fb4d8SLee Schermerhorn  *
2552771fb4d8SLee Schermerhorn  * Lookup current policy node id for vma,addr and "compare to" page's
25535f076944SMatthew Wilcox (Oracle)  * node id.  Policy determination "mimics" alloc_page_vma().
2554771fb4d8SLee Schermerhorn  * Called from fault path where we know the vma and faulting address.
25555f076944SMatthew Wilcox (Oracle)  *
2556062db293SBaolin Wang  * Return: NUMA_NO_NODE if the page is in a node that is valid for this
2557062db293SBaolin Wang  * policy, or a suitable node ID to allocate a replacement page from.
2558771fb4d8SLee Schermerhorn  */
2559771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2560771fb4d8SLee Schermerhorn {
2561771fb4d8SLee Schermerhorn 	struct mempolicy *pol;
2562c33d6c06SMel Gorman 	struct zoneref *z;
2563771fb4d8SLee Schermerhorn 	int curnid = page_to_nid(page);
2564771fb4d8SLee Schermerhorn 	unsigned long pgoff;
256590572890SPeter Zijlstra 	int thiscpu = raw_smp_processor_id();
256690572890SPeter Zijlstra 	int thisnid = cpu_to_node(thiscpu);
256798fa15f3SAnshuman Khandual 	int polnid = NUMA_NO_NODE;
2568062db293SBaolin Wang 	int ret = NUMA_NO_NODE;
2569771fb4d8SLee Schermerhorn 
2570dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2571771fb4d8SLee Schermerhorn 	if (!(pol->flags & MPOL_F_MOF))
2572771fb4d8SLee Schermerhorn 		goto out;
2573771fb4d8SLee Schermerhorn 
2574771fb4d8SLee Schermerhorn 	switch (pol->mode) {
2575771fb4d8SLee Schermerhorn 	case MPOL_INTERLEAVE:
2576771fb4d8SLee Schermerhorn 		pgoff = vma->vm_pgoff;
2577771fb4d8SLee Schermerhorn 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
257898c70baaSLaurent Dufour 		polnid = offset_il_node(pol, pgoff);
2579771fb4d8SLee Schermerhorn 		break;
2580771fb4d8SLee Schermerhorn 
2581771fb4d8SLee Schermerhorn 	case MPOL_PREFERRED:
2582b27abaccSDave Hansen 		if (node_isset(curnid, pol->nodes))
2583b27abaccSDave Hansen 			goto out;
2584269fbe72SBen Widawsky 		polnid = first_node(pol->nodes);
2585771fb4d8SLee Schermerhorn 		break;
2586771fb4d8SLee Schermerhorn 
25877858d7bcSFeng Tang 	case MPOL_LOCAL:
25887858d7bcSFeng Tang 		polnid = numa_node_id();
25897858d7bcSFeng Tang 		break;
25907858d7bcSFeng Tang 
2591771fb4d8SLee Schermerhorn 	case MPOL_BIND:
2592bda420b9SHuang Ying 		/* Optimize placement among multiple nodes via NUMA balancing */
2593bda420b9SHuang Ying 		if (pol->flags & MPOL_F_MORON) {
2594269fbe72SBen Widawsky 			if (node_isset(thisnid, pol->nodes))
2595bda420b9SHuang Ying 				break;
2596bda420b9SHuang Ying 			goto out;
2597bda420b9SHuang Ying 		}
2598b27abaccSDave Hansen 		fallthrough;
2599c33d6c06SMel Gorman 
2600b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
2601771fb4d8SLee Schermerhorn 		/*
2602771fb4d8SLee Schermerhorn 		 * use current page if in policy nodemask,
2603771fb4d8SLee Schermerhorn 		 * else select nearest allowed node, if any.
2604771fb4d8SLee Schermerhorn 		 * If no allowed nodes, use current [!misplaced].
2605771fb4d8SLee Schermerhorn 		 */
2606269fbe72SBen Widawsky 		if (node_isset(curnid, pol->nodes))
2607771fb4d8SLee Schermerhorn 			goto out;
2608c33d6c06SMel Gorman 		z = first_zones_zonelist(
2609771fb4d8SLee Schermerhorn 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2610771fb4d8SLee Schermerhorn 				gfp_zone(GFP_HIGHUSER),
2611269fbe72SBen Widawsky 				&pol->nodes);
2612c1093b74SPavel Tatashin 		polnid = zone_to_nid(z->zone);
2613771fb4d8SLee Schermerhorn 		break;
2614771fb4d8SLee Schermerhorn 
2615771fb4d8SLee Schermerhorn 	default:
2616771fb4d8SLee Schermerhorn 		BUG();
2617771fb4d8SLee Schermerhorn 	}
26185606e387SMel Gorman 
26195606e387SMel Gorman 	/* Migrate the page towards the node whose CPU is referencing it */
2620e42c8ff2SMel Gorman 	if (pol->flags & MPOL_F_MORON) {
262190572890SPeter Zijlstra 		polnid = thisnid;
26225606e387SMel Gorman 
262310f39042SRik van Riel 		if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2624de1c9ce6SRik van Riel 			goto out;
2625de1c9ce6SRik van Riel 	}
2626e42c8ff2SMel Gorman 
2627771fb4d8SLee Schermerhorn 	if (curnid != polnid)
2628771fb4d8SLee Schermerhorn 		ret = polnid;
2629771fb4d8SLee Schermerhorn out:
2630771fb4d8SLee Schermerhorn 	mpol_cond_put(pol);
2631771fb4d8SLee Schermerhorn 
2632771fb4d8SLee Schermerhorn 	return ret;
2633771fb4d8SLee Schermerhorn }
2634771fb4d8SLee Schermerhorn 
2635c11600e4SDavid Rientjes /*
2636c11600e4SDavid Rientjes  * Drop the (possibly final) reference to task->mempolicy.  It needs to be
2637c11600e4SDavid Rientjes  * dropped after task->mempolicy is set to NULL so that any allocation done as
2638c11600e4SDavid Rientjes  * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2639c11600e4SDavid Rientjes  * policy.
2640c11600e4SDavid Rientjes  */
2641c11600e4SDavid Rientjes void mpol_put_task_policy(struct task_struct *task)
2642c11600e4SDavid Rientjes {
2643c11600e4SDavid Rientjes 	struct mempolicy *pol;
2644c11600e4SDavid Rientjes 
2645c11600e4SDavid Rientjes 	task_lock(task);
2646c11600e4SDavid Rientjes 	pol = task->mempolicy;
2647c11600e4SDavid Rientjes 	task->mempolicy = NULL;
2648c11600e4SDavid Rientjes 	task_unlock(task);
2649c11600e4SDavid Rientjes 	mpol_put(pol);
2650c11600e4SDavid Rientjes }
2651c11600e4SDavid Rientjes 
26521da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n)
26531da177e4SLinus Torvalds {
2654140d5a49SPaul Mundt 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
26551da177e4SLinus Torvalds 	rb_erase(&n->nd, &sp->root);
265663f74ca2SKOSAKI Motohiro 	sp_free(n);
26571da177e4SLinus Torvalds }
26581da177e4SLinus Torvalds 
265942288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start,
266042288fe3SMel Gorman 			unsigned long end, struct mempolicy *pol)
266142288fe3SMel Gorman {
266242288fe3SMel Gorman 	node->start = start;
266342288fe3SMel Gorman 	node->end = end;
266442288fe3SMel Gorman 	node->policy = pol;
266542288fe3SMel Gorman }
266642288fe3SMel Gorman 
2667dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2668dbcb0f19SAdrian Bunk 				struct mempolicy *pol)
26691da177e4SLinus Torvalds {
2670869833f2SKOSAKI Motohiro 	struct sp_node *n;
2671869833f2SKOSAKI Motohiro 	struct mempolicy *newpol;
26721da177e4SLinus Torvalds 
2673869833f2SKOSAKI Motohiro 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
26741da177e4SLinus Torvalds 	if (!n)
26751da177e4SLinus Torvalds 		return NULL;
2676869833f2SKOSAKI Motohiro 
2677869833f2SKOSAKI Motohiro 	newpol = mpol_dup(pol);
2678869833f2SKOSAKI Motohiro 	if (IS_ERR(newpol)) {
2679869833f2SKOSAKI Motohiro 		kmem_cache_free(sn_cache, n);
2680869833f2SKOSAKI Motohiro 		return NULL;
2681869833f2SKOSAKI Motohiro 	}
2682869833f2SKOSAKI Motohiro 	newpol->flags |= MPOL_F_SHARED;
268342288fe3SMel Gorman 	sp_node_init(n, start, end, newpol);
2684869833f2SKOSAKI Motohiro 
26851da177e4SLinus Torvalds 	return n;
26861da177e4SLinus Torvalds }
26871da177e4SLinus Torvalds 
26881da177e4SLinus Torvalds /* Replace a policy range. */
26891da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
26901da177e4SLinus Torvalds 				 unsigned long end, struct sp_node *new)
26911da177e4SLinus Torvalds {
2692b22d127aSMel Gorman 	struct sp_node *n;
269342288fe3SMel Gorman 	struct sp_node *n_new = NULL;
269442288fe3SMel Gorman 	struct mempolicy *mpol_new = NULL;
2695b22d127aSMel Gorman 	int ret = 0;
26961da177e4SLinus Torvalds 
269742288fe3SMel Gorman restart:
26984a8c7bb5SNathan Zimmer 	write_lock(&sp->lock);
26991da177e4SLinus Torvalds 	n = sp_lookup(sp, start, end);
27001da177e4SLinus Torvalds 	/* Take care of old policies in the same range. */
27011da177e4SLinus Torvalds 	while (n && n->start < end) {
27021da177e4SLinus Torvalds 		struct rb_node *next = rb_next(&n->nd);
27031da177e4SLinus Torvalds 		if (n->start >= start) {
27041da177e4SLinus Torvalds 			if (n->end <= end)
27051da177e4SLinus Torvalds 				sp_delete(sp, n);
27061da177e4SLinus Torvalds 			else
27071da177e4SLinus Torvalds 				n->start = end;
27081da177e4SLinus Torvalds 		} else {
27091da177e4SLinus Torvalds 			/* Old policy spanning whole new range. */
27101da177e4SLinus Torvalds 			if (n->end > end) {
271142288fe3SMel Gorman 				if (!n_new)
271242288fe3SMel Gorman 					goto alloc_new;
271342288fe3SMel Gorman 
271442288fe3SMel Gorman 				*mpol_new = *n->policy;
271542288fe3SMel Gorman 				atomic_set(&mpol_new->refcnt, 1);
27167880639cSKOSAKI Motohiro 				sp_node_init(n_new, end, n->end, mpol_new);
27171da177e4SLinus Torvalds 				n->end = start;
27185ca39575SHillf Danton 				sp_insert(sp, n_new);
271942288fe3SMel Gorman 				n_new = NULL;
272042288fe3SMel Gorman 				mpol_new = NULL;
27211da177e4SLinus Torvalds 				break;
27221da177e4SLinus Torvalds 			} else
27231da177e4SLinus Torvalds 				n->end = start;
27241da177e4SLinus Torvalds 		}
27251da177e4SLinus Torvalds 		if (!next)
27261da177e4SLinus Torvalds 			break;
27271da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
27281da177e4SLinus Torvalds 	}
27291da177e4SLinus Torvalds 	if (new)
27301da177e4SLinus Torvalds 		sp_insert(sp, new);
27314a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
273242288fe3SMel Gorman 	ret = 0;
273342288fe3SMel Gorman 
273442288fe3SMel Gorman err_out:
273542288fe3SMel Gorman 	if (mpol_new)
273642288fe3SMel Gorman 		mpol_put(mpol_new);
273742288fe3SMel Gorman 	if (n_new)
273842288fe3SMel Gorman 		kmem_cache_free(sn_cache, n_new);
273942288fe3SMel Gorman 
2740b22d127aSMel Gorman 	return ret;
274142288fe3SMel Gorman 
274242288fe3SMel Gorman alloc_new:
27434a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
274442288fe3SMel Gorman 	ret = -ENOMEM;
274542288fe3SMel Gorman 	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
274642288fe3SMel Gorman 	if (!n_new)
274742288fe3SMel Gorman 		goto err_out;
274842288fe3SMel Gorman 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
274942288fe3SMel Gorman 	if (!mpol_new)
275042288fe3SMel Gorman 		goto err_out;
27514ad09955SMiaohe Lin 	atomic_set(&mpol_new->refcnt, 1);
275242288fe3SMel Gorman 	goto restart;
27531da177e4SLinus Torvalds }
27541da177e4SLinus Torvalds 
275571fe804bSLee Schermerhorn /**
275671fe804bSLee Schermerhorn  * mpol_shared_policy_init - initialize shared policy for inode
275771fe804bSLee Schermerhorn  * @sp: pointer to inode shared policy
275871fe804bSLee Schermerhorn  * @mpol:  struct mempolicy to install
275971fe804bSLee Schermerhorn  *
276071fe804bSLee Schermerhorn  * Install non-NULL @mpol in inode's shared policy rb-tree.
276171fe804bSLee Schermerhorn  * On entry, the current task has a reference on a non-NULL @mpol.
276271fe804bSLee Schermerhorn  * This must be released on exit.
27634bfc4495SKAMEZAWA Hiroyuki  * This is called at get_inode() calls and we can use GFP_KERNEL.
276471fe804bSLee Schermerhorn  */
276571fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
27667339ff83SRobin Holt {
276758568d2aSMiao Xie 	int ret;
276858568d2aSMiao Xie 
276971fe804bSLee Schermerhorn 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
27704a8c7bb5SNathan Zimmer 	rwlock_init(&sp->lock);
27717339ff83SRobin Holt 
277271fe804bSLee Schermerhorn 	if (mpol) {
27737339ff83SRobin Holt 		struct vm_area_struct pvma;
277471fe804bSLee Schermerhorn 		struct mempolicy *new;
27754bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
27767339ff83SRobin Holt 
27774bfc4495SKAMEZAWA Hiroyuki 		if (!scratch)
27785c0c1654SLee Schermerhorn 			goto put_mpol;
277971fe804bSLee Schermerhorn 		/* contextualize the tmpfs mount point mempolicy */
278071fe804bSLee Schermerhorn 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
278115d77835SLee Schermerhorn 		if (IS_ERR(new))
27820cae3457SDan Carpenter 			goto free_scratch; /* no valid nodemask intersection */
278358568d2aSMiao Xie 
278458568d2aSMiao Xie 		task_lock(current);
27854bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
278658568d2aSMiao Xie 		task_unlock(current);
278715d77835SLee Schermerhorn 		if (ret)
27885c0c1654SLee Schermerhorn 			goto put_new;
278971fe804bSLee Schermerhorn 
279071fe804bSLee Schermerhorn 		/* Create pseudo-vma that contains just the policy */
27912c4541e2SKirill A. Shutemov 		vma_init(&pvma, NULL);
279271fe804bSLee Schermerhorn 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
279371fe804bSLee Schermerhorn 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
279415d77835SLee Schermerhorn 
27955c0c1654SLee Schermerhorn put_new:
279671fe804bSLee Schermerhorn 		mpol_put(new);			/* drop initial ref */
27970cae3457SDan Carpenter free_scratch:
27984bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
27995c0c1654SLee Schermerhorn put_mpol:
28005c0c1654SLee Schermerhorn 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
28017339ff83SRobin Holt 	}
28027339ff83SRobin Holt }
28037339ff83SRobin Holt 
28041da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info,
28051da177e4SLinus Torvalds 			struct vm_area_struct *vma, struct mempolicy *npol)
28061da177e4SLinus Torvalds {
28071da177e4SLinus Torvalds 	int err;
28081da177e4SLinus Torvalds 	struct sp_node *new = NULL;
28091da177e4SLinus Torvalds 	unsigned long sz = vma_pages(vma);
28101da177e4SLinus Torvalds 
2811028fec41SDavid Rientjes 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
28121da177e4SLinus Torvalds 		 vma->vm_pgoff,
281345c4745aSLee Schermerhorn 		 sz, npol ? npol->mode : -1,
2814028fec41SDavid Rientjes 		 npol ? npol->flags : -1,
2815269fbe72SBen Widawsky 		 npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE);
28161da177e4SLinus Torvalds 
28171da177e4SLinus Torvalds 	if (npol) {
28181da177e4SLinus Torvalds 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
28191da177e4SLinus Torvalds 		if (!new)
28201da177e4SLinus Torvalds 			return -ENOMEM;
28211da177e4SLinus Torvalds 	}
28221da177e4SLinus Torvalds 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
28231da177e4SLinus Torvalds 	if (err && new)
282463f74ca2SKOSAKI Motohiro 		sp_free(new);
28251da177e4SLinus Torvalds 	return err;
28261da177e4SLinus Torvalds }
28271da177e4SLinus Torvalds 
28281da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */
28291da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p)
28301da177e4SLinus Torvalds {
28311da177e4SLinus Torvalds 	struct sp_node *n;
28321da177e4SLinus Torvalds 	struct rb_node *next;
28331da177e4SLinus Torvalds 
28341da177e4SLinus Torvalds 	if (!p->root.rb_node)
28351da177e4SLinus Torvalds 		return;
28364a8c7bb5SNathan Zimmer 	write_lock(&p->lock);
28371da177e4SLinus Torvalds 	next = rb_first(&p->root);
28381da177e4SLinus Torvalds 	while (next) {
28391da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
28401da177e4SLinus Torvalds 		next = rb_next(&n->nd);
284163f74ca2SKOSAKI Motohiro 		sp_delete(p, n);
28421da177e4SLinus Torvalds 	}
28434a8c7bb5SNathan Zimmer 	write_unlock(&p->lock);
28441da177e4SLinus Torvalds }
28451da177e4SLinus Torvalds 
28461a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING
2847c297663cSMel Gorman static int __initdata numabalancing_override;
28481a687c2eSMel Gorman 
28491a687c2eSMel Gorman static void __init check_numabalancing_enable(void)
28501a687c2eSMel Gorman {
28511a687c2eSMel Gorman 	bool numabalancing_default = false;
28521a687c2eSMel Gorman 
28531a687c2eSMel Gorman 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
28541a687c2eSMel Gorman 		numabalancing_default = true;
28551a687c2eSMel Gorman 
2856c297663cSMel Gorman 	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2857c297663cSMel Gorman 	if (numabalancing_override)
2858c297663cSMel Gorman 		set_numabalancing_state(numabalancing_override == 1);
2859c297663cSMel Gorman 
2860b0dc2b9bSMel Gorman 	if (num_online_nodes() > 1 && !numabalancing_override) {
2861756a025fSJoe Perches 		pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2862c297663cSMel Gorman 			numabalancing_default ? "Enabling" : "Disabling");
28631a687c2eSMel Gorman 		set_numabalancing_state(numabalancing_default);
28641a687c2eSMel Gorman 	}
28651a687c2eSMel Gorman }
28661a687c2eSMel Gorman 
28671a687c2eSMel Gorman static int __init setup_numabalancing(char *str)
28681a687c2eSMel Gorman {
28691a687c2eSMel Gorman 	int ret = 0;
28701a687c2eSMel Gorman 	if (!str)
28711a687c2eSMel Gorman 		goto out;
28721a687c2eSMel Gorman 
28731a687c2eSMel Gorman 	if (!strcmp(str, "enable")) {
2874c297663cSMel Gorman 		numabalancing_override = 1;
28751a687c2eSMel Gorman 		ret = 1;
28761a687c2eSMel Gorman 	} else if (!strcmp(str, "disable")) {
2877c297663cSMel Gorman 		numabalancing_override = -1;
28781a687c2eSMel Gorman 		ret = 1;
28791a687c2eSMel Gorman 	}
28801a687c2eSMel Gorman out:
28811a687c2eSMel Gorman 	if (!ret)
28824a404beaSAndrew Morton 		pr_warn("Unable to parse numa_balancing=\n");
28831a687c2eSMel Gorman 
28841a687c2eSMel Gorman 	return ret;
28851a687c2eSMel Gorman }
28861a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing);
28871a687c2eSMel Gorman #else
28881a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void)
28891a687c2eSMel Gorman {
28901a687c2eSMel Gorman }
28911a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */
28921a687c2eSMel Gorman 
28931da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */
28941da177e4SLinus Torvalds void __init numa_policy_init(void)
28951da177e4SLinus Torvalds {
2896b71636e2SPaul Mundt 	nodemask_t interleave_nodes;
2897b71636e2SPaul Mundt 	unsigned long largest = 0;
2898b71636e2SPaul Mundt 	int nid, prefer = 0;
2899b71636e2SPaul Mundt 
29001da177e4SLinus Torvalds 	policy_cache = kmem_cache_create("numa_policy",
29011da177e4SLinus Torvalds 					 sizeof(struct mempolicy),
290220c2df83SPaul Mundt 					 0, SLAB_PANIC, NULL);
29031da177e4SLinus Torvalds 
29041da177e4SLinus Torvalds 	sn_cache = kmem_cache_create("shared_policy_node",
29051da177e4SLinus Torvalds 				     sizeof(struct sp_node),
290620c2df83SPaul Mundt 				     0, SLAB_PANIC, NULL);
29071da177e4SLinus Torvalds 
29085606e387SMel Gorman 	for_each_node(nid) {
29095606e387SMel Gorman 		preferred_node_policy[nid] = (struct mempolicy) {
29105606e387SMel Gorman 			.refcnt = ATOMIC_INIT(1),
29115606e387SMel Gorman 			.mode = MPOL_PREFERRED,
29125606e387SMel Gorman 			.flags = MPOL_F_MOF | MPOL_F_MORON,
2913269fbe72SBen Widawsky 			.nodes = nodemask_of_node(nid),
29145606e387SMel Gorman 		};
29155606e387SMel Gorman 	}
29165606e387SMel Gorman 
2917b71636e2SPaul Mundt 	/*
2918b71636e2SPaul Mundt 	 * Set interleaving policy for system init. Interleaving is only
2919b71636e2SPaul Mundt 	 * enabled across suitably sized nodes (default is >= 16MB), or
2920b71636e2SPaul Mundt 	 * fall back to the largest node if they're all smaller.
2921b71636e2SPaul Mundt 	 */
2922b71636e2SPaul Mundt 	nodes_clear(interleave_nodes);
292301f13bd6SLai Jiangshan 	for_each_node_state(nid, N_MEMORY) {
2924b71636e2SPaul Mundt 		unsigned long total_pages = node_present_pages(nid);
29251da177e4SLinus Torvalds 
2926b71636e2SPaul Mundt 		/* Preserve the largest node */
2927b71636e2SPaul Mundt 		if (largest < total_pages) {
2928b71636e2SPaul Mundt 			largest = total_pages;
2929b71636e2SPaul Mundt 			prefer = nid;
2930b71636e2SPaul Mundt 		}
2931b71636e2SPaul Mundt 
2932b71636e2SPaul Mundt 		/* Interleave this node? */
2933b71636e2SPaul Mundt 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2934b71636e2SPaul Mundt 			node_set(nid, interleave_nodes);
2935b71636e2SPaul Mundt 	}
2936b71636e2SPaul Mundt 
2937b71636e2SPaul Mundt 	/* All too small, use the largest */
2938b71636e2SPaul Mundt 	if (unlikely(nodes_empty(interleave_nodes)))
2939b71636e2SPaul Mundt 		node_set(prefer, interleave_nodes);
2940b71636e2SPaul Mundt 
2941028fec41SDavid Rientjes 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2942b1de0d13SMitchel Humpherys 		pr_err("%s: interleaving failed\n", __func__);
29431a687c2eSMel Gorman 
29441a687c2eSMel Gorman 	check_numabalancing_enable();
29451da177e4SLinus Torvalds }
29461da177e4SLinus Torvalds 
29478bccd85fSChristoph Lameter /* Reset policy of current process to default */
29481da177e4SLinus Torvalds void numa_default_policy(void)
29491da177e4SLinus Torvalds {
2950028fec41SDavid Rientjes 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
29511da177e4SLinus Torvalds }
295268860ec1SPaul Jackson 
29534225399aSPaul Jackson /*
2954095f1fc4SLee Schermerhorn  * Parse and format mempolicy from/to strings
2955095f1fc4SLee Schermerhorn  */
2956095f1fc4SLee Schermerhorn 
2957345ace9cSLee Schermerhorn static const char * const policy_modes[] =
2958345ace9cSLee Schermerhorn {
2959345ace9cSLee Schermerhorn 	[MPOL_DEFAULT]    = "default",
2960345ace9cSLee Schermerhorn 	[MPOL_PREFERRED]  = "prefer",
2961345ace9cSLee Schermerhorn 	[MPOL_BIND]       = "bind",
2962345ace9cSLee Schermerhorn 	[MPOL_INTERLEAVE] = "interleave",
2963d3a71033SLee Schermerhorn 	[MPOL_LOCAL]      = "local",
2964b27abaccSDave Hansen 	[MPOL_PREFERRED_MANY]  = "prefer (many)",
2965345ace9cSLee Schermerhorn };
29661a75a6c8SChristoph Lameter 
2967095f1fc4SLee Schermerhorn 
2968095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS
2969095f1fc4SLee Schermerhorn /**
2970f2a07f40SHugh Dickins  * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2971095f1fc4SLee Schermerhorn  * @str:  string containing mempolicy to parse
297271fe804bSLee Schermerhorn  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2973095f1fc4SLee Schermerhorn  *
2974095f1fc4SLee Schermerhorn  * Format of input:
2975095f1fc4SLee Schermerhorn  *	<mode>[=<flags>][:<nodelist>]
2976095f1fc4SLee Schermerhorn  *
2977dad5b023SRandy Dunlap  * Return: %0 on success, else %1
2978095f1fc4SLee Schermerhorn  */
2979a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol)
2980095f1fc4SLee Schermerhorn {
298171fe804bSLee Schermerhorn 	struct mempolicy *new = NULL;
2982f2a07f40SHugh Dickins 	unsigned short mode_flags;
298371fe804bSLee Schermerhorn 	nodemask_t nodes;
2984095f1fc4SLee Schermerhorn 	char *nodelist = strchr(str, ':');
2985095f1fc4SLee Schermerhorn 	char *flags = strchr(str, '=');
2986dedf2c73Szhong jiang 	int err = 1, mode;
2987095f1fc4SLee Schermerhorn 
2988c7a91bc7SDan Carpenter 	if (flags)
2989c7a91bc7SDan Carpenter 		*flags++ = '\0';	/* terminate mode string */
2990c7a91bc7SDan Carpenter 
2991095f1fc4SLee Schermerhorn 	if (nodelist) {
2992095f1fc4SLee Schermerhorn 		/* NUL-terminate mode or flags string */
2993095f1fc4SLee Schermerhorn 		*nodelist++ = '\0';
299471fe804bSLee Schermerhorn 		if (nodelist_parse(nodelist, nodes))
2995095f1fc4SLee Schermerhorn 			goto out;
299601f13bd6SLai Jiangshan 		if (!nodes_subset(nodes, node_states[N_MEMORY]))
2997095f1fc4SLee Schermerhorn 			goto out;
299871fe804bSLee Schermerhorn 	} else
299971fe804bSLee Schermerhorn 		nodes_clear(nodes);
300071fe804bSLee Schermerhorn 
3001dedf2c73Szhong jiang 	mode = match_string(policy_modes, MPOL_MAX, str);
3002dedf2c73Szhong jiang 	if (mode < 0)
3003095f1fc4SLee Schermerhorn 		goto out;
3004095f1fc4SLee Schermerhorn 
300571fe804bSLee Schermerhorn 	switch (mode) {
3006095f1fc4SLee Schermerhorn 	case MPOL_PREFERRED:
300771fe804bSLee Schermerhorn 		/*
3008aa9f7d51SRandy Dunlap 		 * Insist on a nodelist of one node only, although later
3009aa9f7d51SRandy Dunlap 		 * we use first_node(nodes) to grab a single node, so here
3010aa9f7d51SRandy Dunlap 		 * nodelist (or nodes) cannot be empty.
301171fe804bSLee Schermerhorn 		 */
3012095f1fc4SLee Schermerhorn 		if (nodelist) {
3013095f1fc4SLee Schermerhorn 			char *rest = nodelist;
3014095f1fc4SLee Schermerhorn 			while (isdigit(*rest))
3015095f1fc4SLee Schermerhorn 				rest++;
3016926f2ae0SKOSAKI Motohiro 			if (*rest)
3017926f2ae0SKOSAKI Motohiro 				goto out;
3018aa9f7d51SRandy Dunlap 			if (nodes_empty(nodes))
3019aa9f7d51SRandy Dunlap 				goto out;
3020095f1fc4SLee Schermerhorn 		}
3021095f1fc4SLee Schermerhorn 		break;
3022095f1fc4SLee Schermerhorn 	case MPOL_INTERLEAVE:
3023095f1fc4SLee Schermerhorn 		/*
3024095f1fc4SLee Schermerhorn 		 * Default to online nodes with memory if no nodelist
3025095f1fc4SLee Schermerhorn 		 */
3026095f1fc4SLee Schermerhorn 		if (!nodelist)
302701f13bd6SLai Jiangshan 			nodes = node_states[N_MEMORY];
30283f226aa1SLee Schermerhorn 		break;
302971fe804bSLee Schermerhorn 	case MPOL_LOCAL:
30303f226aa1SLee Schermerhorn 		/*
303171fe804bSLee Schermerhorn 		 * Don't allow a nodelist;  mpol_new() checks flags
30323f226aa1SLee Schermerhorn 		 */
303371fe804bSLee Schermerhorn 		if (nodelist)
30343f226aa1SLee Schermerhorn 			goto out;
30353f226aa1SLee Schermerhorn 		break;
3036413b43deSRavikiran G Thirumalai 	case MPOL_DEFAULT:
3037413b43deSRavikiran G Thirumalai 		/*
3038413b43deSRavikiran G Thirumalai 		 * Insist on a empty nodelist
3039413b43deSRavikiran G Thirumalai 		 */
3040413b43deSRavikiran G Thirumalai 		if (!nodelist)
3041413b43deSRavikiran G Thirumalai 			err = 0;
3042413b43deSRavikiran G Thirumalai 		goto out;
3043b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
3044d69b2e63SKOSAKI Motohiro 	case MPOL_BIND:
304571fe804bSLee Schermerhorn 		/*
3046d69b2e63SKOSAKI Motohiro 		 * Insist on a nodelist
304771fe804bSLee Schermerhorn 		 */
3048d69b2e63SKOSAKI Motohiro 		if (!nodelist)
3049d69b2e63SKOSAKI Motohiro 			goto out;
3050095f1fc4SLee Schermerhorn 	}
3051095f1fc4SLee Schermerhorn 
305271fe804bSLee Schermerhorn 	mode_flags = 0;
3053095f1fc4SLee Schermerhorn 	if (flags) {
3054095f1fc4SLee Schermerhorn 		/*
3055095f1fc4SLee Schermerhorn 		 * Currently, we only support two mutually exclusive
3056095f1fc4SLee Schermerhorn 		 * mode flags.
3057095f1fc4SLee Schermerhorn 		 */
3058095f1fc4SLee Schermerhorn 		if (!strcmp(flags, "static"))
305971fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_STATIC_NODES;
3060095f1fc4SLee Schermerhorn 		else if (!strcmp(flags, "relative"))
306171fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_RELATIVE_NODES;
3062095f1fc4SLee Schermerhorn 		else
3063926f2ae0SKOSAKI Motohiro 			goto out;
3064095f1fc4SLee Schermerhorn 	}
306571fe804bSLee Schermerhorn 
306671fe804bSLee Schermerhorn 	new = mpol_new(mode, mode_flags, &nodes);
306771fe804bSLee Schermerhorn 	if (IS_ERR(new))
3068926f2ae0SKOSAKI Motohiro 		goto out;
3069926f2ae0SKOSAKI Motohiro 
3070f2a07f40SHugh Dickins 	/*
3071f2a07f40SHugh Dickins 	 * Save nodes for mpol_to_str() to show the tmpfs mount options
3072f2a07f40SHugh Dickins 	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
3073f2a07f40SHugh Dickins 	 */
3074269fbe72SBen Widawsky 	if (mode != MPOL_PREFERRED) {
3075269fbe72SBen Widawsky 		new->nodes = nodes;
3076269fbe72SBen Widawsky 	} else if (nodelist) {
3077269fbe72SBen Widawsky 		nodes_clear(new->nodes);
3078269fbe72SBen Widawsky 		node_set(first_node(nodes), new->nodes);
3079269fbe72SBen Widawsky 	} else {
30807858d7bcSFeng Tang 		new->mode = MPOL_LOCAL;
3081269fbe72SBen Widawsky 	}
3082f2a07f40SHugh Dickins 
3083f2a07f40SHugh Dickins 	/*
3084f2a07f40SHugh Dickins 	 * Save nodes for contextualization: this will be used to "clone"
3085f2a07f40SHugh Dickins 	 * the mempolicy in a specific context [cpuset] at a later time.
3086f2a07f40SHugh Dickins 	 */
3087e17f74afSLee Schermerhorn 	new->w.user_nodemask = nodes;
3088f2a07f40SHugh Dickins 
3089926f2ae0SKOSAKI Motohiro 	err = 0;
309071fe804bSLee Schermerhorn 
3091095f1fc4SLee Schermerhorn out:
3092095f1fc4SLee Schermerhorn 	/* Restore string for error message */
3093095f1fc4SLee Schermerhorn 	if (nodelist)
3094095f1fc4SLee Schermerhorn 		*--nodelist = ':';
3095095f1fc4SLee Schermerhorn 	if (flags)
3096095f1fc4SLee Schermerhorn 		*--flags = '=';
309771fe804bSLee Schermerhorn 	if (!err)
309871fe804bSLee Schermerhorn 		*mpol = new;
3099095f1fc4SLee Schermerhorn 	return err;
3100095f1fc4SLee Schermerhorn }
3101095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */
3102095f1fc4SLee Schermerhorn 
310371fe804bSLee Schermerhorn /**
310471fe804bSLee Schermerhorn  * mpol_to_str - format a mempolicy structure for printing
310571fe804bSLee Schermerhorn  * @buffer:  to contain formatted mempolicy string
310671fe804bSLee Schermerhorn  * @maxlen:  length of @buffer
310771fe804bSLee Schermerhorn  * @pol:  pointer to mempolicy to be formatted
310871fe804bSLee Schermerhorn  *
3109948927eeSDavid Rientjes  * Convert @pol into a string.  If @buffer is too short, truncate the string.
3110948927eeSDavid Rientjes  * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
3111948927eeSDavid Rientjes  * longest flag, "relative", and to display at least a few node ids.
31121a75a6c8SChristoph Lameter  */
3113948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
31141a75a6c8SChristoph Lameter {
31151a75a6c8SChristoph Lameter 	char *p = buffer;
3116948927eeSDavid Rientjes 	nodemask_t nodes = NODE_MASK_NONE;
3117948927eeSDavid Rientjes 	unsigned short mode = MPOL_DEFAULT;
3118948927eeSDavid Rientjes 	unsigned short flags = 0;
31191a75a6c8SChristoph Lameter 
31208790c71aSDavid Rientjes 	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
3121bea904d5SLee Schermerhorn 		mode = pol->mode;
3122948927eeSDavid Rientjes 		flags = pol->flags;
3123948927eeSDavid Rientjes 	}
3124bea904d5SLee Schermerhorn 
31251a75a6c8SChristoph Lameter 	switch (mode) {
31261a75a6c8SChristoph Lameter 	case MPOL_DEFAULT:
31277858d7bcSFeng Tang 	case MPOL_LOCAL:
31281a75a6c8SChristoph Lameter 		break;
31291a75a6c8SChristoph Lameter 	case MPOL_PREFERRED:
3130b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
31311a75a6c8SChristoph Lameter 	case MPOL_BIND:
31321a75a6c8SChristoph Lameter 	case MPOL_INTERLEAVE:
3133269fbe72SBen Widawsky 		nodes = pol->nodes;
31341a75a6c8SChristoph Lameter 		break;
31351a75a6c8SChristoph Lameter 	default:
3136948927eeSDavid Rientjes 		WARN_ON_ONCE(1);
3137948927eeSDavid Rientjes 		snprintf(p, maxlen, "unknown");
3138948927eeSDavid Rientjes 		return;
31391a75a6c8SChristoph Lameter 	}
31401a75a6c8SChristoph Lameter 
3141b7a9f420SDavid Rientjes 	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
31421a75a6c8SChristoph Lameter 
3143fc36b8d3SLee Schermerhorn 	if (flags & MPOL_MODE_FLAGS) {
3144948927eeSDavid Rientjes 		p += snprintf(p, buffer + maxlen - p, "=");
3145f5b087b5SDavid Rientjes 
31462291990aSLee Schermerhorn 		/*
31472291990aSLee Schermerhorn 		 * Currently, the only defined flags are mutually exclusive
31482291990aSLee Schermerhorn 		 */
3149f5b087b5SDavid Rientjes 		if (flags & MPOL_F_STATIC_NODES)
31502291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "static");
31512291990aSLee Schermerhorn 		else if (flags & MPOL_F_RELATIVE_NODES)
31522291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "relative");
3153f5b087b5SDavid Rientjes 	}
3154f5b087b5SDavid Rientjes 
31559e763e0fSTejun Heo 	if (!nodes_empty(nodes))
31569e763e0fSTejun Heo 		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
31579e763e0fSTejun Heo 			       nodemask_pr_args(&nodes));
31581a75a6c8SChristoph Lameter }
3159