xref: /openbmc/linux/mm/mempolicy.c (revision be2d57563822b7e00b2b16d9354637c4b6d6d5cc)
146aeb7e6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * Simple NUMA memory policy for the Linux kernel.
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
68bccd85fSChristoph Lameter  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * NUMA policy allows the user to give hints in which node(s) memory should
91da177e4SLinus Torvalds  * be allocated.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Support four policies per VMA and per process:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * The VMA policy has priority over the process policy for a page fault.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * interleave     Allocate memory interleaved over a set of nodes,
161da177e4SLinus Torvalds  *                with normal fallback if it fails.
171da177e4SLinus Torvalds  *                For VMA based allocations this interleaves based on the
181da177e4SLinus Torvalds  *                offset into the backing object or offset into the mapping
191da177e4SLinus Torvalds  *                for anonymous memory. For process policy an process counter
201da177e4SLinus Torvalds  *                is used.
218bccd85fSChristoph Lameter  *
221da177e4SLinus Torvalds  * bind           Only allocate memory on a specific set of nodes,
231da177e4SLinus Torvalds  *                no fallback.
248bccd85fSChristoph Lameter  *                FIXME: memory is allocated starting with the first node
258bccd85fSChristoph Lameter  *                to the last. It would be better if bind would truly restrict
268bccd85fSChristoph Lameter  *                the allocation to memory nodes instead
278bccd85fSChristoph Lameter  *
281da177e4SLinus Torvalds  * preferred       Try a specific node first before normal fallback.
2900ef2d2fSDavid Rientjes  *                As a special case NUMA_NO_NODE here means do the allocation
301da177e4SLinus Torvalds  *                on the local CPU. This is normally identical to default,
311da177e4SLinus Torvalds  *                but useful to set in a VMA when you have a non default
321da177e4SLinus Torvalds  *                process policy.
338bccd85fSChristoph Lameter  *
34b27abaccSDave Hansen  * preferred many Try a set of nodes first before normal fallback. This is
35b27abaccSDave Hansen  *                similar to preferred without the special case.
36b27abaccSDave Hansen  *
371da177e4SLinus Torvalds  * default        Allocate on the local node first, or when on a VMA
381da177e4SLinus Torvalds  *                use the process policy. This is what Linux always did
391da177e4SLinus Torvalds  *		  in a NUMA aware kernel and still does by, ahem, default.
401da177e4SLinus Torvalds  *
411da177e4SLinus Torvalds  * The process policy is applied for most non interrupt memory allocations
421da177e4SLinus Torvalds  * in that process' context. Interrupts ignore the policies and always
431da177e4SLinus Torvalds  * try to allocate on the local CPU. The VMA policy is only applied for memory
441da177e4SLinus Torvalds  * allocations for a VMA in the VM.
451da177e4SLinus Torvalds  *
461da177e4SLinus Torvalds  * Currently there are a few corner cases in swapping where the policy
471da177e4SLinus Torvalds  * is not applied, but the majority should be handled. When process policy
481da177e4SLinus Torvalds  * is used it is not remembered over swap outs/swap ins.
491da177e4SLinus Torvalds  *
501da177e4SLinus Torvalds  * Only the highest zone in the zone hierarchy gets policied. Allocations
511da177e4SLinus Torvalds  * requesting a lower zone just use default policy. This implies that
521da177e4SLinus Torvalds  * on systems with highmem kernel lowmem allocation don't get policied.
531da177e4SLinus Torvalds  * Same with GFP_DMA allocations.
541da177e4SLinus Torvalds  *
551da177e4SLinus Torvalds  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
561da177e4SLinus Torvalds  * all users and remembered even when nobody has memory mapped.
571da177e4SLinus Torvalds  */
581da177e4SLinus Torvalds 
591da177e4SLinus Torvalds /* Notebook:
601da177e4SLinus Torvalds    fix mmap readahead to honour policy and enable policy for any page cache
611da177e4SLinus Torvalds    object
621da177e4SLinus Torvalds    statistics for bigpages
631da177e4SLinus Torvalds    global policy for page cache? currently it uses process policy. Requires
641da177e4SLinus Torvalds    first item above.
651da177e4SLinus Torvalds    handle mremap for shared memory (currently ignored for the policy)
661da177e4SLinus Torvalds    grows down?
671da177e4SLinus Torvalds    make bind policy root only? It can trigger oom much faster and the
681da177e4SLinus Torvalds    kernel is not always grateful with that.
691da177e4SLinus Torvalds */
701da177e4SLinus Torvalds 
71b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
72b1de0d13SMitchel Humpherys 
731da177e4SLinus Torvalds #include <linux/mempolicy.h>
74a520110eSChristoph Hellwig #include <linux/pagewalk.h>
751da177e4SLinus Torvalds #include <linux/highmem.h>
761da177e4SLinus Torvalds #include <linux/hugetlb.h>
771da177e4SLinus Torvalds #include <linux/kernel.h>
781da177e4SLinus Torvalds #include <linux/sched.h>
796e84f315SIngo Molnar #include <linux/sched/mm.h>
806a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h>
81f719ff9bSIngo Molnar #include <linux/sched/task.h>
821da177e4SLinus Torvalds #include <linux/nodemask.h>
831da177e4SLinus Torvalds #include <linux/cpuset.h>
841da177e4SLinus Torvalds #include <linux/slab.h>
851da177e4SLinus Torvalds #include <linux/string.h>
86b95f1b31SPaul Gortmaker #include <linux/export.h>
87b488893aSPavel Emelyanov #include <linux/nsproxy.h>
881da177e4SLinus Torvalds #include <linux/interrupt.h>
891da177e4SLinus Torvalds #include <linux/init.h>
901da177e4SLinus Torvalds #include <linux/compat.h>
9131367466SOtto Ebeling #include <linux/ptrace.h>
92dc9aa5b9SChristoph Lameter #include <linux/swap.h>
931a75a6c8SChristoph Lameter #include <linux/seq_file.h>
941a75a6c8SChristoph Lameter #include <linux/proc_fs.h>
95b20a3503SChristoph Lameter #include <linux/migrate.h>
9662b61f61SHugh Dickins #include <linux/ksm.h>
9795a402c3SChristoph Lameter #include <linux/rmap.h>
9886c3a764SDavid Quigley #include <linux/security.h>
99dbcb0f19SAdrian Bunk #include <linux/syscalls.h>
100095f1fc4SLee Schermerhorn #include <linux/ctype.h>
1016d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h>
102b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h>
103b1de0d13SMitchel Humpherys #include <linux/printk.h>
104c8633798SNaoya Horiguchi #include <linux/swapops.h>
105dc9aa5b9SChristoph Lameter 
1061da177e4SLinus Torvalds #include <asm/tlbflush.h>
1074a18419fSNadav Amit #include <asm/tlb.h>
1087c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
1091da177e4SLinus Torvalds 
11062695a84SNick Piggin #include "internal.h"
11162695a84SNick Piggin 
11238e35860SChristoph Lameter /* Internal flags */
113dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
11438e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
115dc9aa5b9SChristoph Lameter 
116fcc234f8SPekka Enberg static struct kmem_cache *policy_cache;
117fcc234f8SPekka Enberg static struct kmem_cache *sn_cache;
1181da177e4SLinus Torvalds 
1191da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not
1201da177e4SLinus Torvalds    policied. */
1216267276fSChristoph Lameter enum zone_type policy_zone = 0;
1221da177e4SLinus Torvalds 
123bea904d5SLee Schermerhorn /*
124bea904d5SLee Schermerhorn  * run-time system-wide default policy => local allocation
125bea904d5SLee Schermerhorn  */
126e754d79dSH Hartley Sweeten static struct mempolicy default_policy = {
1271da177e4SLinus Torvalds 	.refcnt = ATOMIC_INIT(1), /* never free it */
1287858d7bcSFeng Tang 	.mode = MPOL_LOCAL,
1291da177e4SLinus Torvalds };
1301da177e4SLinus Torvalds 
1315606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES];
1325606e387SMel Gorman 
133b2ca916cSDan Williams /**
134b2ca916cSDan Williams  * numa_map_to_online_node - Find closest online node
135f6e92f40SKrzysztof Kozlowski  * @node: Node id to start the search
136b2ca916cSDan Williams  *
137b2ca916cSDan Williams  * Lookup the next closest node by distance if @nid is not online.
138dad5b023SRandy Dunlap  *
139dad5b023SRandy Dunlap  * Return: this @node if it is online, otherwise the closest node by distance
140b2ca916cSDan Williams  */
141b2ca916cSDan Williams int numa_map_to_online_node(int node)
142b2ca916cSDan Williams {
1434fcbe96eSDan Williams 	int min_dist = INT_MAX, dist, n, min_node;
144b2ca916cSDan Williams 
1454fcbe96eSDan Williams 	if (node == NUMA_NO_NODE || node_online(node))
1464fcbe96eSDan Williams 		return node;
147b2ca916cSDan Williams 
148b2ca916cSDan Williams 	min_node = node;
149b2ca916cSDan Williams 	for_each_online_node(n) {
150b2ca916cSDan Williams 		dist = node_distance(node, n);
151b2ca916cSDan Williams 		if (dist < min_dist) {
152b2ca916cSDan Williams 			min_dist = dist;
153b2ca916cSDan Williams 			min_node = n;
154b2ca916cSDan Williams 		}
155b2ca916cSDan Williams 	}
156b2ca916cSDan Williams 
157b2ca916cSDan Williams 	return min_node;
158b2ca916cSDan Williams }
159b2ca916cSDan Williams EXPORT_SYMBOL_GPL(numa_map_to_online_node);
160b2ca916cSDan Williams 
16174d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p)
1625606e387SMel Gorman {
1635606e387SMel Gorman 	struct mempolicy *pol = p->mempolicy;
164f15ca78eSOleg Nesterov 	int node;
1655606e387SMel Gorman 
166f15ca78eSOleg Nesterov 	if (pol)
167f15ca78eSOleg Nesterov 		return pol;
1685606e387SMel Gorman 
169f15ca78eSOleg Nesterov 	node = numa_node_id();
1701da6f0e1SJianguo Wu 	if (node != NUMA_NO_NODE) {
1711da6f0e1SJianguo Wu 		pol = &preferred_node_policy[node];
172f15ca78eSOleg Nesterov 		/* preferred_node_policy is not initialised early in boot */
173f15ca78eSOleg Nesterov 		if (pol->mode)
174f15ca78eSOleg Nesterov 			return pol;
1751da6f0e1SJianguo Wu 	}
1765606e387SMel Gorman 
177f15ca78eSOleg Nesterov 	return &default_policy;
1785606e387SMel Gorman }
1795606e387SMel Gorman 
18037012946SDavid Rientjes static const struct mempolicy_operations {
18137012946SDavid Rientjes 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
182213980c0SVlastimil Babka 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
18337012946SDavid Rientjes } mpol_ops[MPOL_MAX];
18437012946SDavid Rientjes 
185f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
186f5b087b5SDavid Rientjes {
1876d556294SBob Liu 	return pol->flags & MPOL_MODE_FLAGS;
1884c50bc01SDavid Rientjes }
1894c50bc01SDavid Rientjes 
1904c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
1914c50bc01SDavid Rientjes 				   const nodemask_t *rel)
1924c50bc01SDavid Rientjes {
1934c50bc01SDavid Rientjes 	nodemask_t tmp;
1944c50bc01SDavid Rientjes 	nodes_fold(tmp, *orig, nodes_weight(*rel));
1954c50bc01SDavid Rientjes 	nodes_onto(*ret, tmp, *rel);
196f5b087b5SDavid Rientjes }
197f5b087b5SDavid Rientjes 
198be897d48SFeng Tang static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
19937012946SDavid Rientjes {
20037012946SDavid Rientjes 	if (nodes_empty(*nodes))
20137012946SDavid Rientjes 		return -EINVAL;
202269fbe72SBen Widawsky 	pol->nodes = *nodes;
20337012946SDavid Rientjes 	return 0;
20437012946SDavid Rientjes }
20537012946SDavid Rientjes 
20637012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
20737012946SDavid Rientjes {
2087858d7bcSFeng Tang 	if (nodes_empty(*nodes))
2097858d7bcSFeng Tang 		return -EINVAL;
210269fbe72SBen Widawsky 
211269fbe72SBen Widawsky 	nodes_clear(pol->nodes);
212269fbe72SBen Widawsky 	node_set(first_node(*nodes), pol->nodes);
21337012946SDavid Rientjes 	return 0;
21437012946SDavid Rientjes }
21537012946SDavid Rientjes 
21658568d2aSMiao Xie /*
21758568d2aSMiao Xie  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
21858568d2aSMiao Xie  * any, for the new policy.  mpol_new() has already validated the nodes
2197858d7bcSFeng Tang  * parameter with respect to the policy mode and flags.
22058568d2aSMiao Xie  *
22158568d2aSMiao Xie  * Must be called holding task's alloc_lock to protect task's mems_allowed
222c1e8d7c6SMichel Lespinasse  * and mempolicy.  May also be called holding the mmap_lock for write.
22358568d2aSMiao Xie  */
2244bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol,
2254bfc4495SKAMEZAWA Hiroyuki 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
22658568d2aSMiao Xie {
22758568d2aSMiao Xie 	int ret;
22858568d2aSMiao Xie 
2297858d7bcSFeng Tang 	/*
2307858d7bcSFeng Tang 	 * Default (pol==NULL) resp. local memory policies are not a
2317858d7bcSFeng Tang 	 * subject of any remapping. They also do not need any special
2327858d7bcSFeng Tang 	 * constructor.
2337858d7bcSFeng Tang 	 */
2347858d7bcSFeng Tang 	if (!pol || pol->mode == MPOL_LOCAL)
23558568d2aSMiao Xie 		return 0;
2367858d7bcSFeng Tang 
23701f13bd6SLai Jiangshan 	/* Check N_MEMORY */
2384bfc4495SKAMEZAWA Hiroyuki 	nodes_and(nsc->mask1,
23901f13bd6SLai Jiangshan 		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
24058568d2aSMiao Xie 
24158568d2aSMiao Xie 	VM_BUG_ON(!nodes);
2427858d7bcSFeng Tang 
24358568d2aSMiao Xie 	if (pol->flags & MPOL_F_RELATIVE_NODES)
2444bfc4495SKAMEZAWA Hiroyuki 		mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
24558568d2aSMiao Xie 	else
2464bfc4495SKAMEZAWA Hiroyuki 		nodes_and(nsc->mask2, *nodes, nsc->mask1);
2474bfc4495SKAMEZAWA Hiroyuki 
24858568d2aSMiao Xie 	if (mpol_store_user_nodemask(pol))
24958568d2aSMiao Xie 		pol->w.user_nodemask = *nodes;
25058568d2aSMiao Xie 	else
2517858d7bcSFeng Tang 		pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed;
25258568d2aSMiao Xie 
2534bfc4495SKAMEZAWA Hiroyuki 	ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
25458568d2aSMiao Xie 	return ret;
25558568d2aSMiao Xie }
25658568d2aSMiao Xie 
25758568d2aSMiao Xie /*
25858568d2aSMiao Xie  * This function just creates a new policy, does some check and simple
25958568d2aSMiao Xie  * initialization. You must invoke mpol_set_nodemask() to set nodes.
26058568d2aSMiao Xie  */
261028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
262028fec41SDavid Rientjes 				  nodemask_t *nodes)
2631da177e4SLinus Torvalds {
2641da177e4SLinus Torvalds 	struct mempolicy *policy;
2651da177e4SLinus Torvalds 
266028fec41SDavid Rientjes 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
26700ef2d2fSDavid Rientjes 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
268140d5a49SPaul Mundt 
2693e1f0645SDavid Rientjes 	if (mode == MPOL_DEFAULT) {
2703e1f0645SDavid Rientjes 		if (nodes && !nodes_empty(*nodes))
27137012946SDavid Rientjes 			return ERR_PTR(-EINVAL);
272d3a71033SLee Schermerhorn 		return NULL;
27337012946SDavid Rientjes 	}
2743e1f0645SDavid Rientjes 	VM_BUG_ON(!nodes);
2753e1f0645SDavid Rientjes 
2763e1f0645SDavid Rientjes 	/*
2773e1f0645SDavid Rientjes 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
2783e1f0645SDavid Rientjes 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
2793e1f0645SDavid Rientjes 	 * All other modes require a valid pointer to a non-empty nodemask.
2803e1f0645SDavid Rientjes 	 */
2813e1f0645SDavid Rientjes 	if (mode == MPOL_PREFERRED) {
2823e1f0645SDavid Rientjes 		if (nodes_empty(*nodes)) {
2833e1f0645SDavid Rientjes 			if (((flags & MPOL_F_STATIC_NODES) ||
2843e1f0645SDavid Rientjes 			     (flags & MPOL_F_RELATIVE_NODES)))
2853e1f0645SDavid Rientjes 				return ERR_PTR(-EINVAL);
2867858d7bcSFeng Tang 
2877858d7bcSFeng Tang 			mode = MPOL_LOCAL;
2883e1f0645SDavid Rientjes 		}
289479e2802SPeter Zijlstra 	} else if (mode == MPOL_LOCAL) {
2908d303e44SPiotr Kwapulinski 		if (!nodes_empty(*nodes) ||
2918d303e44SPiotr Kwapulinski 		    (flags & MPOL_F_STATIC_NODES) ||
2928d303e44SPiotr Kwapulinski 		    (flags & MPOL_F_RELATIVE_NODES))
293479e2802SPeter Zijlstra 			return ERR_PTR(-EINVAL);
2943e1f0645SDavid Rientjes 	} else if (nodes_empty(*nodes))
2953e1f0645SDavid Rientjes 		return ERR_PTR(-EINVAL);
2961da177e4SLinus Torvalds 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2971da177e4SLinus Torvalds 	if (!policy)
2981da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2991da177e4SLinus Torvalds 	atomic_set(&policy->refcnt, 1);
30045c4745aSLee Schermerhorn 	policy->mode = mode;
30137012946SDavid Rientjes 	policy->flags = flags;
302c6018b4bSAneesh Kumar K.V 	policy->home_node = NUMA_NO_NODE;
3033e1f0645SDavid Rientjes 
30437012946SDavid Rientjes 	return policy;
30537012946SDavid Rientjes }
30637012946SDavid Rientjes 
30752cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */
30852cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p)
30952cd3b07SLee Schermerhorn {
31052cd3b07SLee Schermerhorn 	if (!atomic_dec_and_test(&p->refcnt))
31152cd3b07SLee Schermerhorn 		return;
31252cd3b07SLee Schermerhorn 	kmem_cache_free(policy_cache, p);
31352cd3b07SLee Schermerhorn }
31452cd3b07SLee Schermerhorn 
315213980c0SVlastimil Babka static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
31637012946SDavid Rientjes {
31737012946SDavid Rientjes }
31837012946SDavid Rientjes 
319213980c0SVlastimil Babka static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
3201d0d2680SDavid Rientjes {
3211d0d2680SDavid Rientjes 	nodemask_t tmp;
3221d0d2680SDavid Rientjes 
32337012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES)
32437012946SDavid Rientjes 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
32537012946SDavid Rientjes 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
32637012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3271d0d2680SDavid Rientjes 	else {
328269fbe72SBen Widawsky 		nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed,
329213980c0SVlastimil Babka 								*nodes);
33029b190faSzhong jiang 		pol->w.cpuset_mems_allowed = *nodes;
3311d0d2680SDavid Rientjes 	}
33237012946SDavid Rientjes 
333708c1bbcSMiao Xie 	if (nodes_empty(tmp))
334708c1bbcSMiao Xie 		tmp = *nodes;
335708c1bbcSMiao Xie 
336269fbe72SBen Widawsky 	pol->nodes = tmp;
33737012946SDavid Rientjes }
33837012946SDavid Rientjes 
33937012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol,
340213980c0SVlastimil Babka 						const nodemask_t *nodes)
34137012946SDavid Rientjes {
34237012946SDavid Rientjes 	pol->w.cpuset_mems_allowed = *nodes;
3431d0d2680SDavid Rientjes }
34437012946SDavid Rientjes 
345708c1bbcSMiao Xie /*
346708c1bbcSMiao Xie  * mpol_rebind_policy - Migrate a policy to a different set of nodes
347708c1bbcSMiao Xie  *
348c1e8d7c6SMichel Lespinasse  * Per-vma policies are protected by mmap_lock. Allocations using per-task
349213980c0SVlastimil Babka  * policies are protected by task->mems_allowed_seq to prevent a premature
350213980c0SVlastimil Babka  * OOM/allocation failure due to parallel nodemask modification.
351708c1bbcSMiao Xie  */
352213980c0SVlastimil Babka static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
35337012946SDavid Rientjes {
354018160adSWang Cheng 	if (!pol || pol->mode == MPOL_LOCAL)
35537012946SDavid Rientjes 		return;
3567858d7bcSFeng Tang 	if (!mpol_store_user_nodemask(pol) &&
35737012946SDavid Rientjes 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
35837012946SDavid Rientjes 		return;
359708c1bbcSMiao Xie 
360213980c0SVlastimil Babka 	mpol_ops[pol->mode].rebind(pol, newmask);
3611d0d2680SDavid Rientjes }
3621d0d2680SDavid Rientjes 
3631d0d2680SDavid Rientjes /*
3641d0d2680SDavid Rientjes  * Wrapper for mpol_rebind_policy() that just requires task
3651d0d2680SDavid Rientjes  * pointer, and updates task mempolicy.
36658568d2aSMiao Xie  *
36758568d2aSMiao Xie  * Called with task's alloc_lock held.
3681d0d2680SDavid Rientjes  */
3691d0d2680SDavid Rientjes 
370213980c0SVlastimil Babka void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
3711d0d2680SDavid Rientjes {
372213980c0SVlastimil Babka 	mpol_rebind_policy(tsk->mempolicy, new);
3731d0d2680SDavid Rientjes }
3741d0d2680SDavid Rientjes 
3751d0d2680SDavid Rientjes /*
3761d0d2680SDavid Rientjes  * Rebind each vma in mm to new nodemask.
3771d0d2680SDavid Rientjes  *
378c1e8d7c6SMichel Lespinasse  * Call holding a reference to mm.  Takes mm->mmap_lock during call.
3791d0d2680SDavid Rientjes  */
3801d0d2680SDavid Rientjes 
3811d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
3821d0d2680SDavid Rientjes {
3831d0d2680SDavid Rientjes 	struct vm_area_struct *vma;
38466850be5SLiam R. Howlett 	VMA_ITERATOR(vmi, mm, 0);
3851d0d2680SDavid Rientjes 
386d8ed45c5SMichel Lespinasse 	mmap_write_lock(mm);
38766850be5SLiam R. Howlett 	for_each_vma(vmi, vma)
388213980c0SVlastimil Babka 		mpol_rebind_policy(vma->vm_policy, new);
389d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
3901d0d2680SDavid Rientjes }
3911d0d2680SDavid Rientjes 
39237012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
39337012946SDavid Rientjes 	[MPOL_DEFAULT] = {
39437012946SDavid Rientjes 		.rebind = mpol_rebind_default,
39537012946SDavid Rientjes 	},
39637012946SDavid Rientjes 	[MPOL_INTERLEAVE] = {
397be897d48SFeng Tang 		.create = mpol_new_nodemask,
39837012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
39937012946SDavid Rientjes 	},
40037012946SDavid Rientjes 	[MPOL_PREFERRED] = {
40137012946SDavid Rientjes 		.create = mpol_new_preferred,
40237012946SDavid Rientjes 		.rebind = mpol_rebind_preferred,
40337012946SDavid Rientjes 	},
40437012946SDavid Rientjes 	[MPOL_BIND] = {
405be897d48SFeng Tang 		.create = mpol_new_nodemask,
40637012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
40737012946SDavid Rientjes 	},
4087858d7bcSFeng Tang 	[MPOL_LOCAL] = {
4097858d7bcSFeng Tang 		.rebind = mpol_rebind_default,
4107858d7bcSFeng Tang 	},
411b27abaccSDave Hansen 	[MPOL_PREFERRED_MANY] = {
412be897d48SFeng Tang 		.create = mpol_new_nodemask,
413b27abaccSDave Hansen 		.rebind = mpol_rebind_preferred,
414b27abaccSDave Hansen 	},
41537012946SDavid Rientjes };
41637012946SDavid Rientjes 
4174a64981dSVishal Moola (Oracle) static int migrate_folio_add(struct folio *folio, struct list_head *foliolist,
418fc301289SChristoph Lameter 				unsigned long flags);
4191a75a6c8SChristoph Lameter 
4206f4576e3SNaoya Horiguchi struct queue_pages {
4216f4576e3SNaoya Horiguchi 	struct list_head *pagelist;
4226f4576e3SNaoya Horiguchi 	unsigned long flags;
4236f4576e3SNaoya Horiguchi 	nodemask_t *nmask;
424f18da660SLi Xinhai 	unsigned long start;
425f18da660SLi Xinhai 	unsigned long end;
426f18da660SLi Xinhai 	struct vm_area_struct *first;
4276f4576e3SNaoya Horiguchi };
4286f4576e3SNaoya Horiguchi 
42998094945SNaoya Horiguchi /*
430d451b89dSVishal Moola (Oracle)  * Check if the folio's nid is in qp->nmask.
43188aaa2a1SNaoya Horiguchi  *
43288aaa2a1SNaoya Horiguchi  * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
43388aaa2a1SNaoya Horiguchi  * in the invert of qp->nmask.
43488aaa2a1SNaoya Horiguchi  */
435d451b89dSVishal Moola (Oracle) static inline bool queue_folio_required(struct folio *folio,
43688aaa2a1SNaoya Horiguchi 					struct queue_pages *qp)
43788aaa2a1SNaoya Horiguchi {
438d451b89dSVishal Moola (Oracle) 	int nid = folio_nid(folio);
43988aaa2a1SNaoya Horiguchi 	unsigned long flags = qp->flags;
44088aaa2a1SNaoya Horiguchi 
44188aaa2a1SNaoya Horiguchi 	return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
44288aaa2a1SNaoya Horiguchi }
44388aaa2a1SNaoya Horiguchi 
444a7f40cfeSYang Shi /*
445de1f5055SVishal Moola (Oracle)  * queue_folios_pmd() has three possible return values:
446de1f5055SVishal Moola (Oracle)  * 0 - folios are placed on the right node or queued successfully, or
447e5947d23SYang Shi  *     special page is met, i.e. huge zero page.
448de1f5055SVishal Moola (Oracle)  * 1 - there is unmovable folio, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
449d8835445SYang Shi  *     specified.
450d8835445SYang Shi  * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
451de1f5055SVishal Moola (Oracle)  *        existing folio was already on a node that does not follow the
452d8835445SYang Shi  *        policy.
453a7f40cfeSYang Shi  */
454de1f5055SVishal Moola (Oracle) static int queue_folios_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
455c8633798SNaoya Horiguchi 				unsigned long end, struct mm_walk *walk)
456959a7e13SJules Irenge 	__releases(ptl)
457c8633798SNaoya Horiguchi {
458c8633798SNaoya Horiguchi 	int ret = 0;
459de1f5055SVishal Moola (Oracle) 	struct folio *folio;
460c8633798SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
461c8633798SNaoya Horiguchi 	unsigned long flags;
462c8633798SNaoya Horiguchi 
463c8633798SNaoya Horiguchi 	if (unlikely(is_pmd_migration_entry(*pmd))) {
464a7f40cfeSYang Shi 		ret = -EIO;
465c8633798SNaoya Horiguchi 		goto unlock;
466c8633798SNaoya Horiguchi 	}
467de1f5055SVishal Moola (Oracle) 	folio = pfn_folio(pmd_pfn(*pmd));
468de1f5055SVishal Moola (Oracle) 	if (is_huge_zero_page(&folio->page)) {
469e5947d23SYang Shi 		walk->action = ACTION_CONTINUE;
4706d97cf88SMiaohe Lin 		goto unlock;
471c8633798SNaoya Horiguchi 	}
472d451b89dSVishal Moola (Oracle) 	if (!queue_folio_required(folio, qp))
473c8633798SNaoya Horiguchi 		goto unlock;
474c8633798SNaoya Horiguchi 
475c8633798SNaoya Horiguchi 	flags = qp->flags;
476de1f5055SVishal Moola (Oracle) 	/* go to folio migration */
477a7f40cfeSYang Shi 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
478a53190a4SYang Shi 		if (!vma_migratable(walk->vma) ||
4794a64981dSVishal Moola (Oracle) 		    migrate_folio_add(folio, qp->pagelist, flags)) {
480d8835445SYang Shi 			ret = 1;
481a7f40cfeSYang Shi 			goto unlock;
482a7f40cfeSYang Shi 		}
483a7f40cfeSYang Shi 	} else
484a7f40cfeSYang Shi 		ret = -EIO;
485c8633798SNaoya Horiguchi unlock:
486c8633798SNaoya Horiguchi 	spin_unlock(ptl);
487c8633798SNaoya Horiguchi 	return ret;
488c8633798SNaoya Horiguchi }
489c8633798SNaoya Horiguchi 
49088aaa2a1SNaoya Horiguchi /*
49198094945SNaoya Horiguchi  * Scan through pages checking if pages follow certain conditions,
49298094945SNaoya Horiguchi  * and move them to the pagelist if they do.
493d8835445SYang Shi  *
4943dae02bbSVishal Moola (Oracle)  * queue_folios_pte_range() has three possible return values:
4953dae02bbSVishal Moola (Oracle)  * 0 - folios are placed on the right node or queued successfully, or
496e5947d23SYang Shi  *     special page is met, i.e. zero page.
4973dae02bbSVishal Moola (Oracle)  * 1 - there is unmovable folio, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
498d8835445SYang Shi  *     specified.
4993dae02bbSVishal Moola (Oracle)  * -EIO - only MPOL_MF_STRICT was specified and an existing folio was already
500d8835445SYang Shi  *        on a node that does not follow the policy.
50198094945SNaoya Horiguchi  */
5023dae02bbSVishal Moola (Oracle) static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
5036f4576e3SNaoya Horiguchi 			unsigned long end, struct mm_walk *walk)
5041da177e4SLinus Torvalds {
5056f4576e3SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
5063dae02bbSVishal Moola (Oracle) 	struct folio *folio;
5076f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
5086f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
509d8835445SYang Shi 	bool has_unmovable = false;
5103f088420SShijie Luo 	pte_t *pte, *mapped_pte;
511705e87c0SHugh Dickins 	spinlock_t *ptl;
512941150a3SHugh Dickins 
513c8633798SNaoya Horiguchi 	ptl = pmd_trans_huge_lock(pmd, vma);
514bc78b5edSMiaohe Lin 	if (ptl)
515de1f5055SVishal Moola (Oracle) 		return queue_folios_pmd(pmd, ptl, addr, end, walk);
51691612e0dSHugh Dickins 
517337d9abfSNaoya Horiguchi 	if (pmd_trans_unstable(pmd))
518337d9abfSNaoya Horiguchi 		return 0;
51994723aafSMichal Hocko 
5203f088420SShijie Luo 	mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
5216f4576e3SNaoya Horiguchi 	for (; addr != end; pte++, addr += PAGE_SIZE) {
52291612e0dSHugh Dickins 		if (!pte_present(*pte))
52391612e0dSHugh Dickins 			continue;
5243dae02bbSVishal Moola (Oracle) 		folio = vm_normal_folio(vma, addr, *pte);
5253dae02bbSVishal Moola (Oracle) 		if (!folio || folio_is_zone_device(folio))
52691612e0dSHugh Dickins 			continue;
527053837fcSNick Piggin 		/*
5283dae02bbSVishal Moola (Oracle) 		 * vm_normal_folio() filters out zero pages, but there might
5293dae02bbSVishal Moola (Oracle) 		 * still be reserved folios to skip, perhaps in a VDSO.
530053837fcSNick Piggin 		 */
5313dae02bbSVishal Moola (Oracle) 		if (folio_test_reserved(folio))
532f4598c8bSChristoph Lameter 			continue;
533d451b89dSVishal Moola (Oracle) 		if (!queue_folio_required(folio, qp))
53438e35860SChristoph Lameter 			continue;
535a7f40cfeSYang Shi 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
536d8835445SYang Shi 			/* MPOL_MF_STRICT must be specified if we get here */
537d8835445SYang Shi 			if (!vma_migratable(vma)) {
538d8835445SYang Shi 				has_unmovable = true;
539a7f40cfeSYang Shi 				break;
540d8835445SYang Shi 			}
541a53190a4SYang Shi 
542a53190a4SYang Shi 			/*
543a53190a4SYang Shi 			 * Do not abort immediately since there may be
544a53190a4SYang Shi 			 * temporary off LRU pages in the range.  Still
545a53190a4SYang Shi 			 * need migrate other LRU pages.
546a53190a4SYang Shi 			 */
5474a64981dSVishal Moola (Oracle) 			if (migrate_folio_add(folio, qp->pagelist, flags))
548a53190a4SYang Shi 				has_unmovable = true;
549a7f40cfeSYang Shi 		} else
550a7f40cfeSYang Shi 			break;
5516f4576e3SNaoya Horiguchi 	}
5523f088420SShijie Luo 	pte_unmap_unlock(mapped_pte, ptl);
5536f4576e3SNaoya Horiguchi 	cond_resched();
554d8835445SYang Shi 
555d8835445SYang Shi 	if (has_unmovable)
556d8835445SYang Shi 		return 1;
557d8835445SYang Shi 
558a7f40cfeSYang Shi 	return addr != end ? -EIO : 0;
55991612e0dSHugh Dickins }
56091612e0dSHugh Dickins 
5610a2c1e81SVishal Moola (Oracle) static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask,
5626f4576e3SNaoya Horiguchi 			       unsigned long addr, unsigned long end,
5636f4576e3SNaoya Horiguchi 			       struct mm_walk *walk)
564e2d8cf40SNaoya Horiguchi {
565dcf17635SLi Xinhai 	int ret = 0;
566e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE
5676f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
568dcf17635SLi Xinhai 	unsigned long flags = (qp->flags & MPOL_MF_VALID);
5690a2c1e81SVishal Moola (Oracle) 	struct folio *folio;
570cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
571d4c54919SNaoya Horiguchi 	pte_t entry;
572e2d8cf40SNaoya Horiguchi 
5736f4576e3SNaoya Horiguchi 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
5746f4576e3SNaoya Horiguchi 	entry = huge_ptep_get(pte);
575d4c54919SNaoya Horiguchi 	if (!pte_present(entry))
576d4c54919SNaoya Horiguchi 		goto unlock;
5770a2c1e81SVishal Moola (Oracle) 	folio = pfn_folio(pte_pfn(entry));
578d451b89dSVishal Moola (Oracle) 	if (!queue_folio_required(folio, qp))
579e2d8cf40SNaoya Horiguchi 		goto unlock;
580dcf17635SLi Xinhai 
581dcf17635SLi Xinhai 	if (flags == MPOL_MF_STRICT) {
582dcf17635SLi Xinhai 		/*
5830a2c1e81SVishal Moola (Oracle) 		 * STRICT alone means only detecting misplaced folio and no
584dcf17635SLi Xinhai 		 * need to further check other vma.
585dcf17635SLi Xinhai 		 */
586dcf17635SLi Xinhai 		ret = -EIO;
587dcf17635SLi Xinhai 		goto unlock;
588dcf17635SLi Xinhai 	}
589dcf17635SLi Xinhai 
590dcf17635SLi Xinhai 	if (!vma_migratable(walk->vma)) {
591dcf17635SLi Xinhai 		/*
592dcf17635SLi Xinhai 		 * Must be STRICT with MOVE*, otherwise .test_walk() have
593dcf17635SLi Xinhai 		 * stopped walking current vma.
5940a2c1e81SVishal Moola (Oracle) 		 * Detecting misplaced folio but allow migrating folios which
595dcf17635SLi Xinhai 		 * have been queued.
596dcf17635SLi Xinhai 		 */
597dcf17635SLi Xinhai 		ret = 1;
598dcf17635SLi Xinhai 		goto unlock;
599dcf17635SLi Xinhai 	}
600dcf17635SLi Xinhai 
6010a2c1e81SVishal Moola (Oracle) 	/*
6020a2c1e81SVishal Moola (Oracle) 	 * With MPOL_MF_MOVE, we try to migrate only unshared folios. If it
6030a2c1e81SVishal Moola (Oracle) 	 * is shared it is likely not worth migrating.
6040a2c1e81SVishal Moola (Oracle) 	 *
6050a2c1e81SVishal Moola (Oracle) 	 * To check if the folio is shared, ideally we want to make sure
6060a2c1e81SVishal Moola (Oracle) 	 * every page is mapped to the same process. Doing that is very
6070a2c1e81SVishal Moola (Oracle) 	 * expensive, so check the estimated mapcount of the folio instead.
6080a2c1e81SVishal Moola (Oracle) 	 */
609e2d8cf40SNaoya Horiguchi 	if (flags & (MPOL_MF_MOVE_ALL) ||
6100a2c1e81SVishal Moola (Oracle) 	    (flags & MPOL_MF_MOVE && folio_estimated_sharers(folio) == 1 &&
61173bdf65eSMike Kravetz 	     !hugetlb_pmd_shared(pte))) {
6120a2c1e81SVishal Moola (Oracle) 		if (isolate_hugetlb(folio, qp->pagelist) &&
613dcf17635SLi Xinhai 			(flags & MPOL_MF_STRICT))
614dcf17635SLi Xinhai 			/*
6150a2c1e81SVishal Moola (Oracle) 			 * Failed to isolate folio but allow migrating pages
616dcf17635SLi Xinhai 			 * which have been queued.
617dcf17635SLi Xinhai 			 */
618dcf17635SLi Xinhai 			ret = 1;
619dcf17635SLi Xinhai 	}
620e2d8cf40SNaoya Horiguchi unlock:
621cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
622e2d8cf40SNaoya Horiguchi #else
623e2d8cf40SNaoya Horiguchi 	BUG();
624e2d8cf40SNaoya Horiguchi #endif
625dcf17635SLi Xinhai 	return ret;
6261da177e4SLinus Torvalds }
6271da177e4SLinus Torvalds 
6285877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING
629b24f53a0SLee Schermerhorn /*
6304b10e7d5SMel Gorman  * This is used to mark a range of virtual addresses to be inaccessible.
6314b10e7d5SMel Gorman  * These are later cleared by a NUMA hinting fault. Depending on these
6324b10e7d5SMel Gorman  * faults, pages may be migrated for better NUMA placement.
6334b10e7d5SMel Gorman  *
6344b10e7d5SMel Gorman  * This is assuming that NUMA faults are handled using PROT_NONE. If
6354b10e7d5SMel Gorman  * an architecture makes a different choice, it will need further
6364b10e7d5SMel Gorman  * changes to the core.
637b24f53a0SLee Schermerhorn  */
6384b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma,
6394b10e7d5SMel Gorman 			unsigned long addr, unsigned long end)
640b24f53a0SLee Schermerhorn {
6414a18419fSNadav Amit 	struct mmu_gather tlb;
642a79390f5SPeter Xu 	long nr_updated;
643b24f53a0SLee Schermerhorn 
6444a18419fSNadav Amit 	tlb_gather_mmu(&tlb, vma->vm_mm);
6454a18419fSNadav Amit 
6461ef488edSDavid Hildenbrand 	nr_updated = change_protection(&tlb, vma, addr, end, MM_CP_PROT_NUMA);
647d1751118SPeter Xu 	if (nr_updated > 0)
64803c5a6e1SMel Gorman 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
649b24f53a0SLee Schermerhorn 
6504a18419fSNadav Amit 	tlb_finish_mmu(&tlb);
6514a18419fSNadav Amit 
6524b10e7d5SMel Gorman 	return nr_updated;
653b24f53a0SLee Schermerhorn }
654b24f53a0SLee Schermerhorn #else
655b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma,
656b24f53a0SLee Schermerhorn 			unsigned long addr, unsigned long end)
657b24f53a0SLee Schermerhorn {
658b24f53a0SLee Schermerhorn 	return 0;
659b24f53a0SLee Schermerhorn }
6605877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */
661b24f53a0SLee Schermerhorn 
6626f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end,
6636f4576e3SNaoya Horiguchi 				struct mm_walk *walk)
6641da177e4SLinus Torvalds {
66566850be5SLiam R. Howlett 	struct vm_area_struct *next, *vma = walk->vma;
6666f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
6675b952b3cSAndi Kleen 	unsigned long endvma = vma->vm_end;
6686f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
669dc9aa5b9SChristoph Lameter 
670a18b3ac2SLi Xinhai 	/* range check first */
671ce33135cSMiaohe Lin 	VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma);
672f18da660SLi Xinhai 
673f18da660SLi Xinhai 	if (!qp->first) {
674f18da660SLi Xinhai 		qp->first = vma;
675f18da660SLi Xinhai 		if (!(flags & MPOL_MF_DISCONTIG_OK) &&
676f18da660SLi Xinhai 			(qp->start < vma->vm_start))
677f18da660SLi Xinhai 			/* hole at head side of range */
678a18b3ac2SLi Xinhai 			return -EFAULT;
679a18b3ac2SLi Xinhai 	}
68066850be5SLiam R. Howlett 	next = find_vma(vma->vm_mm, vma->vm_end);
681f18da660SLi Xinhai 	if (!(flags & MPOL_MF_DISCONTIG_OK) &&
682f18da660SLi Xinhai 		((vma->vm_end < qp->end) &&
68366850be5SLiam R. Howlett 		(!next || vma->vm_end < next->vm_start)))
684f18da660SLi Xinhai 		/* hole at middle or tail of range */
685f18da660SLi Xinhai 		return -EFAULT;
686a18b3ac2SLi Xinhai 
687a7f40cfeSYang Shi 	/*
688a7f40cfeSYang Shi 	 * Need check MPOL_MF_STRICT to return -EIO if possible
689a7f40cfeSYang Shi 	 * regardless of vma_migratable
690a7f40cfeSYang Shi 	 */
691a7f40cfeSYang Shi 	if (!vma_migratable(vma) &&
692a7f40cfeSYang Shi 	    !(flags & MPOL_MF_STRICT))
69348684a65SNaoya Horiguchi 		return 1;
69448684a65SNaoya Horiguchi 
6955b952b3cSAndi Kleen 	if (endvma > end)
6965b952b3cSAndi Kleen 		endvma = end;
697b24f53a0SLee Schermerhorn 
698b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY) {
6992c0346a3SMel Gorman 		/* Similar to task_numa_work, skip inaccessible VMAs */
7003122e80eSAnshuman Khandual 		if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
7014355c018SLiang Chen 			!(vma->vm_flags & VM_MIXEDMAP))
702b24f53a0SLee Schermerhorn 			change_prot_numa(vma, start, endvma);
7036f4576e3SNaoya Horiguchi 		return 1;
704b24f53a0SLee Schermerhorn 	}
705b24f53a0SLee Schermerhorn 
7066f4576e3SNaoya Horiguchi 	/* queue pages from current vma */
707a7f40cfeSYang Shi 	if (flags & MPOL_MF_VALID)
7086f4576e3SNaoya Horiguchi 		return 0;
7096f4576e3SNaoya Horiguchi 	return 1;
7106f4576e3SNaoya Horiguchi }
711b24f53a0SLee Schermerhorn 
7127b86ac33SChristoph Hellwig static const struct mm_walk_ops queue_pages_walk_ops = {
7130a2c1e81SVishal Moola (Oracle) 	.hugetlb_entry		= queue_folios_hugetlb,
7143dae02bbSVishal Moola (Oracle) 	.pmd_entry		= queue_folios_pte_range,
7157b86ac33SChristoph Hellwig 	.test_walk		= queue_pages_test_walk,
7167b86ac33SChristoph Hellwig };
7177b86ac33SChristoph Hellwig 
7186f4576e3SNaoya Horiguchi /*
7196f4576e3SNaoya Horiguchi  * Walk through page tables and collect pages to be migrated.
7206f4576e3SNaoya Horiguchi  *
7216f4576e3SNaoya Horiguchi  * If pages found in a given range are on a set of nodes (determined by
7226f4576e3SNaoya Horiguchi  * @nodes and @flags,) it's isolated and queued to the pagelist which is
723d8835445SYang Shi  * passed via @private.
724d8835445SYang Shi  *
725d8835445SYang Shi  * queue_pages_range() has three possible return values:
726d8835445SYang Shi  * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
727d8835445SYang Shi  *     specified.
728d8835445SYang Shi  * 0 - queue pages successfully or no misplaced page.
729a85dfc30SYang Shi  * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
730a85dfc30SYang Shi  *         memory range specified by nodemask and maxnode points outside
731a85dfc30SYang Shi  *         your accessible address space (-EFAULT)
7326f4576e3SNaoya Horiguchi  */
7336f4576e3SNaoya Horiguchi static int
7346f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
7356f4576e3SNaoya Horiguchi 		nodemask_t *nodes, unsigned long flags,
7366f4576e3SNaoya Horiguchi 		struct list_head *pagelist)
7376f4576e3SNaoya Horiguchi {
738f18da660SLi Xinhai 	int err;
7396f4576e3SNaoya Horiguchi 	struct queue_pages qp = {
7406f4576e3SNaoya Horiguchi 		.pagelist = pagelist,
7416f4576e3SNaoya Horiguchi 		.flags = flags,
7426f4576e3SNaoya Horiguchi 		.nmask = nodes,
743f18da660SLi Xinhai 		.start = start,
744f18da660SLi Xinhai 		.end = end,
745f18da660SLi Xinhai 		.first = NULL,
7466f4576e3SNaoya Horiguchi 	};
7476f4576e3SNaoya Horiguchi 
748f18da660SLi Xinhai 	err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
749f18da660SLi Xinhai 
750f18da660SLi Xinhai 	if (!qp.first)
751f18da660SLi Xinhai 		/* whole range in hole */
752f18da660SLi Xinhai 		err = -EFAULT;
753f18da660SLi Xinhai 
754f18da660SLi Xinhai 	return err;
7551da177e4SLinus Torvalds }
7561da177e4SLinus Torvalds 
757869833f2SKOSAKI Motohiro /*
758869833f2SKOSAKI Motohiro  * Apply policy to a single VMA
759c1e8d7c6SMichel Lespinasse  * This must be called with the mmap_lock held for writing.
760869833f2SKOSAKI Motohiro  */
761869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma,
762869833f2SKOSAKI Motohiro 						struct mempolicy *pol)
7638d34694cSKOSAKI Motohiro {
764869833f2SKOSAKI Motohiro 	int err;
765869833f2SKOSAKI Motohiro 	struct mempolicy *old;
766869833f2SKOSAKI Motohiro 	struct mempolicy *new;
7678d34694cSKOSAKI Motohiro 
7688d34694cSKOSAKI Motohiro 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
7698d34694cSKOSAKI Motohiro 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
7708d34694cSKOSAKI Motohiro 		 vma->vm_ops, vma->vm_file,
7718d34694cSKOSAKI Motohiro 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
7728d34694cSKOSAKI Motohiro 
773869833f2SKOSAKI Motohiro 	new = mpol_dup(pol);
774869833f2SKOSAKI Motohiro 	if (IS_ERR(new))
775869833f2SKOSAKI Motohiro 		return PTR_ERR(new);
776869833f2SKOSAKI Motohiro 
777869833f2SKOSAKI Motohiro 	if (vma->vm_ops && vma->vm_ops->set_policy) {
7788d34694cSKOSAKI Motohiro 		err = vma->vm_ops->set_policy(vma, new);
779869833f2SKOSAKI Motohiro 		if (err)
780869833f2SKOSAKI Motohiro 			goto err_out;
7818d34694cSKOSAKI Motohiro 	}
782869833f2SKOSAKI Motohiro 
783869833f2SKOSAKI Motohiro 	old = vma->vm_policy;
784c1e8d7c6SMichel Lespinasse 	vma->vm_policy = new; /* protected by mmap_lock */
785869833f2SKOSAKI Motohiro 	mpol_put(old);
786869833f2SKOSAKI Motohiro 
787869833f2SKOSAKI Motohiro 	return 0;
788869833f2SKOSAKI Motohiro  err_out:
789869833f2SKOSAKI Motohiro 	mpol_put(new);
7908d34694cSKOSAKI Motohiro 	return err;
7918d34694cSKOSAKI Motohiro }
7928d34694cSKOSAKI Motohiro 
7931da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */
7949d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start,
7959d8cebd4SKOSAKI Motohiro 		       unsigned long end, struct mempolicy *new_pol)
7961da177e4SLinus Torvalds {
797f10c2abcSLiam R. Howlett 	VMA_ITERATOR(vmi, mm, start);
7989d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *prev;
7999d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *vma;
8009d8cebd4SKOSAKI Motohiro 	int err = 0;
801e26a5114SKOSAKI Motohiro 	pgoff_t pgoff;
8021da177e4SLinus Torvalds 
803f10c2abcSLiam R. Howlett 	prev = vma_prev(&vmi);
804f10c2abcSLiam R. Howlett 	vma = vma_find(&vmi, end);
8057329e3ebSLiam Howlett 	if (WARN_ON(!vma))
8067329e3ebSLiam Howlett 		return 0;
8077329e3ebSLiam Howlett 
8087329e3ebSLiam Howlett 	if (start > vma->vm_start)
8097329e3ebSLiam Howlett 		prev = vma;
8109d8cebd4SKOSAKI Motohiro 
811f10c2abcSLiam R. Howlett 	do {
81266850be5SLiam R. Howlett 		unsigned long vmstart = max(start, vma->vm_start);
81366850be5SLiam R. Howlett 		unsigned long vmend = min(end, vma->vm_end);
8149d8cebd4SKOSAKI Motohiro 
815e26a5114SKOSAKI Motohiro 		if (mpol_equal(vma_policy(vma), new_pol))
81666850be5SLiam R. Howlett 			goto next;
817e26a5114SKOSAKI Motohiro 
818e26a5114SKOSAKI Motohiro 		pgoff = vma->vm_pgoff +
819e26a5114SKOSAKI Motohiro 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
8209760ebffSLiam R. Howlett 		prev = vma_merge(&vmi, mm, prev, vmstart, vmend, vma->vm_flags,
821e26a5114SKOSAKI Motohiro 				 vma->anon_vma, vma->vm_file, pgoff,
8229a10064fSColin Cross 				 new_pol, vma->vm_userfaultfd_ctx,
8235c26f6acSSuren Baghdasaryan 				 anon_vma_name(vma));
8249d8cebd4SKOSAKI Motohiro 		if (prev) {
8259d8cebd4SKOSAKI Motohiro 			vma = prev;
8263964acd0SOleg Nesterov 			goto replace;
8271da177e4SLinus Torvalds 		}
8289d8cebd4SKOSAKI Motohiro 		if (vma->vm_start != vmstart) {
8299760ebffSLiam R. Howlett 			err = split_vma(&vmi, vma, vmstart, 1);
8309d8cebd4SKOSAKI Motohiro 			if (err)
8319d8cebd4SKOSAKI Motohiro 				goto out;
8329d8cebd4SKOSAKI Motohiro 		}
8339d8cebd4SKOSAKI Motohiro 		if (vma->vm_end != vmend) {
8349760ebffSLiam R. Howlett 			err = split_vma(&vmi, vma, vmend, 0);
8359d8cebd4SKOSAKI Motohiro 			if (err)
8369d8cebd4SKOSAKI Motohiro 				goto out;
8379d8cebd4SKOSAKI Motohiro 		}
8383964acd0SOleg Nesterov replace:
839869833f2SKOSAKI Motohiro 		err = vma_replace_policy(vma, new_pol);
8409d8cebd4SKOSAKI Motohiro 		if (err)
8419d8cebd4SKOSAKI Motohiro 			goto out;
84266850be5SLiam R. Howlett next:
84366850be5SLiam R. Howlett 		prev = vma;
844f10c2abcSLiam R. Howlett 	} for_each_vma_range(vmi, vma, end);
8459d8cebd4SKOSAKI Motohiro 
8469d8cebd4SKOSAKI Motohiro out:
8471da177e4SLinus Torvalds 	return err;
8481da177e4SLinus Torvalds }
8491da177e4SLinus Torvalds 
8501da177e4SLinus Torvalds /* Set the process memory policy */
851028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags,
852028fec41SDavid Rientjes 			     nodemask_t *nodes)
8531da177e4SLinus Torvalds {
85458568d2aSMiao Xie 	struct mempolicy *new, *old;
8554bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH(scratch);
85658568d2aSMiao Xie 	int ret;
8571da177e4SLinus Torvalds 
8584bfc4495SKAMEZAWA Hiroyuki 	if (!scratch)
8594bfc4495SKAMEZAWA Hiroyuki 		return -ENOMEM;
860f4e53d91SLee Schermerhorn 
8614bfc4495SKAMEZAWA Hiroyuki 	new = mpol_new(mode, flags, nodes);
8624bfc4495SKAMEZAWA Hiroyuki 	if (IS_ERR(new)) {
8634bfc4495SKAMEZAWA Hiroyuki 		ret = PTR_ERR(new);
8644bfc4495SKAMEZAWA Hiroyuki 		goto out;
8654bfc4495SKAMEZAWA Hiroyuki 	}
8662c7c3a7dSOleg Nesterov 
86712c1dc8eSAbel Wu 	task_lock(current);
8684bfc4495SKAMEZAWA Hiroyuki 	ret = mpol_set_nodemask(new, nodes, scratch);
86958568d2aSMiao Xie 	if (ret) {
87012c1dc8eSAbel Wu 		task_unlock(current);
87158568d2aSMiao Xie 		mpol_put(new);
8724bfc4495SKAMEZAWA Hiroyuki 		goto out;
87358568d2aSMiao Xie 	}
87412c1dc8eSAbel Wu 
87558568d2aSMiao Xie 	old = current->mempolicy;
8761da177e4SLinus Torvalds 	current->mempolicy = new;
87745816682SVlastimil Babka 	if (new && new->mode == MPOL_INTERLEAVE)
87845816682SVlastimil Babka 		current->il_prev = MAX_NUMNODES-1;
87958568d2aSMiao Xie 	task_unlock(current);
88058568d2aSMiao Xie 	mpol_put(old);
8814bfc4495SKAMEZAWA Hiroyuki 	ret = 0;
8824bfc4495SKAMEZAWA Hiroyuki out:
8834bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH_FREE(scratch);
8844bfc4495SKAMEZAWA Hiroyuki 	return ret;
8851da177e4SLinus Torvalds }
8861da177e4SLinus Torvalds 
887bea904d5SLee Schermerhorn /*
888bea904d5SLee Schermerhorn  * Return nodemask for policy for get_mempolicy() query
88958568d2aSMiao Xie  *
89058568d2aSMiao Xie  * Called with task's alloc_lock held
891bea904d5SLee Schermerhorn  */
892bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
8931da177e4SLinus Torvalds {
894dfcd3c0dSAndi Kleen 	nodes_clear(*nodes);
895bea904d5SLee Schermerhorn 	if (p == &default_policy)
896bea904d5SLee Schermerhorn 		return;
897bea904d5SLee Schermerhorn 
89845c4745aSLee Schermerhorn 	switch (p->mode) {
89919770b32SMel Gorman 	case MPOL_BIND:
9001da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
901269fbe72SBen Widawsky 	case MPOL_PREFERRED:
902b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
903269fbe72SBen Widawsky 		*nodes = p->nodes;
9041da177e4SLinus Torvalds 		break;
9057858d7bcSFeng Tang 	case MPOL_LOCAL:
9067858d7bcSFeng Tang 		/* return empty node mask for local allocation */
9077858d7bcSFeng Tang 		break;
9081da177e4SLinus Torvalds 	default:
9091da177e4SLinus Torvalds 		BUG();
9101da177e4SLinus Torvalds 	}
9111da177e4SLinus Torvalds }
9121da177e4SLinus Torvalds 
9133b9aadf7SAndrea Arcangeli static int lookup_node(struct mm_struct *mm, unsigned long addr)
9141da177e4SLinus Torvalds {
915ba841078SPeter Xu 	struct page *p = NULL;
916f728b9c4SJohn Hubbard 	int ret;
9171da177e4SLinus Torvalds 
918f728b9c4SJohn Hubbard 	ret = get_user_pages_fast(addr & PAGE_MASK, 1, 0, &p);
919f728b9c4SJohn Hubbard 	if (ret > 0) {
920f728b9c4SJohn Hubbard 		ret = page_to_nid(p);
9211da177e4SLinus Torvalds 		put_page(p);
9221da177e4SLinus Torvalds 	}
923f728b9c4SJohn Hubbard 	return ret;
9241da177e4SLinus Torvalds }
9251da177e4SLinus Torvalds 
9261da177e4SLinus Torvalds /* Retrieve NUMA policy */
927dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask,
9281da177e4SLinus Torvalds 			     unsigned long addr, unsigned long flags)
9291da177e4SLinus Torvalds {
9308bccd85fSChristoph Lameter 	int err;
9311da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
9321da177e4SLinus Torvalds 	struct vm_area_struct *vma = NULL;
9333b9aadf7SAndrea Arcangeli 	struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
9341da177e4SLinus Torvalds 
935754af6f5SLee Schermerhorn 	if (flags &
936754af6f5SLee Schermerhorn 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
9371da177e4SLinus Torvalds 		return -EINVAL;
938754af6f5SLee Schermerhorn 
939754af6f5SLee Schermerhorn 	if (flags & MPOL_F_MEMS_ALLOWED) {
940754af6f5SLee Schermerhorn 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
941754af6f5SLee Schermerhorn 			return -EINVAL;
942754af6f5SLee Schermerhorn 		*policy = 0;	/* just so it's initialized */
94358568d2aSMiao Xie 		task_lock(current);
944754af6f5SLee Schermerhorn 		*nmask  = cpuset_current_mems_allowed;
94558568d2aSMiao Xie 		task_unlock(current);
946754af6f5SLee Schermerhorn 		return 0;
947754af6f5SLee Schermerhorn 	}
948754af6f5SLee Schermerhorn 
9491da177e4SLinus Torvalds 	if (flags & MPOL_F_ADDR) {
950bea904d5SLee Schermerhorn 		/*
951bea904d5SLee Schermerhorn 		 * Do NOT fall back to task policy if the
952bea904d5SLee Schermerhorn 		 * vma/shared policy at addr is NULL.  We
953bea904d5SLee Schermerhorn 		 * want to return MPOL_DEFAULT in this case.
954bea904d5SLee Schermerhorn 		 */
955d8ed45c5SMichel Lespinasse 		mmap_read_lock(mm);
95633e3575cSLiam Howlett 		vma = vma_lookup(mm, addr);
9571da177e4SLinus Torvalds 		if (!vma) {
958d8ed45c5SMichel Lespinasse 			mmap_read_unlock(mm);
9591da177e4SLinus Torvalds 			return -EFAULT;
9601da177e4SLinus Torvalds 		}
9611da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
9621da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
9631da177e4SLinus Torvalds 		else
9641da177e4SLinus Torvalds 			pol = vma->vm_policy;
9651da177e4SLinus Torvalds 	} else if (addr)
9661da177e4SLinus Torvalds 		return -EINVAL;
9671da177e4SLinus Torvalds 
9681da177e4SLinus Torvalds 	if (!pol)
969bea904d5SLee Schermerhorn 		pol = &default_policy;	/* indicates default behavior */
9701da177e4SLinus Torvalds 
9711da177e4SLinus Torvalds 	if (flags & MPOL_F_NODE) {
9721da177e4SLinus Torvalds 		if (flags & MPOL_F_ADDR) {
9733b9aadf7SAndrea Arcangeli 			/*
974f728b9c4SJohn Hubbard 			 * Take a refcount on the mpol, because we are about to
975f728b9c4SJohn Hubbard 			 * drop the mmap_lock, after which only "pol" remains
976f728b9c4SJohn Hubbard 			 * valid, "vma" is stale.
9773b9aadf7SAndrea Arcangeli 			 */
9783b9aadf7SAndrea Arcangeli 			pol_refcount = pol;
9793b9aadf7SAndrea Arcangeli 			vma = NULL;
9803b9aadf7SAndrea Arcangeli 			mpol_get(pol);
981f728b9c4SJohn Hubbard 			mmap_read_unlock(mm);
9823b9aadf7SAndrea Arcangeli 			err = lookup_node(mm, addr);
9831da177e4SLinus Torvalds 			if (err < 0)
9841da177e4SLinus Torvalds 				goto out;
9858bccd85fSChristoph Lameter 			*policy = err;
9861da177e4SLinus Torvalds 		} else if (pol == current->mempolicy &&
98745c4745aSLee Schermerhorn 				pol->mode == MPOL_INTERLEAVE) {
988269fbe72SBen Widawsky 			*policy = next_node_in(current->il_prev, pol->nodes);
9891da177e4SLinus Torvalds 		} else {
9901da177e4SLinus Torvalds 			err = -EINVAL;
9911da177e4SLinus Torvalds 			goto out;
9921da177e4SLinus Torvalds 		}
993bea904d5SLee Schermerhorn 	} else {
994bea904d5SLee Schermerhorn 		*policy = pol == &default_policy ? MPOL_DEFAULT :
995bea904d5SLee Schermerhorn 						pol->mode;
996d79df630SDavid Rientjes 		/*
997d79df630SDavid Rientjes 		 * Internal mempolicy flags must be masked off before exposing
998d79df630SDavid Rientjes 		 * the policy to userspace.
999d79df630SDavid Rientjes 		 */
1000d79df630SDavid Rientjes 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
1001bea904d5SLee Schermerhorn 	}
10021da177e4SLinus Torvalds 
10031da177e4SLinus Torvalds 	err = 0;
100458568d2aSMiao Xie 	if (nmask) {
1005c6b6ef8bSLee Schermerhorn 		if (mpol_store_user_nodemask(pol)) {
1006c6b6ef8bSLee Schermerhorn 			*nmask = pol->w.user_nodemask;
1007c6b6ef8bSLee Schermerhorn 		} else {
100858568d2aSMiao Xie 			task_lock(current);
1009bea904d5SLee Schermerhorn 			get_policy_nodemask(pol, nmask);
101058568d2aSMiao Xie 			task_unlock(current);
101158568d2aSMiao Xie 		}
1012c6b6ef8bSLee Schermerhorn 	}
10131da177e4SLinus Torvalds 
10141da177e4SLinus Torvalds  out:
101552cd3b07SLee Schermerhorn 	mpol_cond_put(pol);
10161da177e4SLinus Torvalds 	if (vma)
1017d8ed45c5SMichel Lespinasse 		mmap_read_unlock(mm);
10183b9aadf7SAndrea Arcangeli 	if (pol_refcount)
10193b9aadf7SAndrea Arcangeli 		mpol_put(pol_refcount);
10201da177e4SLinus Torvalds 	return err;
10211da177e4SLinus Torvalds }
10221da177e4SLinus Torvalds 
1023b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION
10244a64981dSVishal Moola (Oracle) static int migrate_folio_add(struct folio *folio, struct list_head *foliolist,
1025fc301289SChristoph Lameter 				unsigned long flags)
10266ce3c4c0SChristoph Lameter {
10276ce3c4c0SChristoph Lameter 	/*
10284a64981dSVishal Moola (Oracle) 	 * We try to migrate only unshared folios. If it is shared it
10294a64981dSVishal Moola (Oracle) 	 * is likely not worth migrating.
10304a64981dSVishal Moola (Oracle) 	 *
10314a64981dSVishal Moola (Oracle) 	 * To check if the folio is shared, ideally we want to make sure
10324a64981dSVishal Moola (Oracle) 	 * every page is mapped to the same process. Doing that is very
10334a64981dSVishal Moola (Oracle) 	 * expensive, so check the estimated mapcount of the folio instead.
10346ce3c4c0SChristoph Lameter 	 */
10354a64981dSVishal Moola (Oracle) 	if ((flags & MPOL_MF_MOVE_ALL) || folio_estimated_sharers(folio) == 1) {
1036*be2d5756SBaolin Wang 		if (folio_isolate_lru(folio)) {
10374a64981dSVishal Moola (Oracle) 			list_add_tail(&folio->lru, foliolist);
10384a64981dSVishal Moola (Oracle) 			node_stat_mod_folio(folio,
10394a64981dSVishal Moola (Oracle) 				NR_ISOLATED_ANON + folio_is_file_lru(folio),
10404a64981dSVishal Moola (Oracle) 				folio_nr_pages(folio));
1041a53190a4SYang Shi 		} else if (flags & MPOL_MF_STRICT) {
1042a53190a4SYang Shi 			/*
10434a64981dSVishal Moola (Oracle) 			 * Non-movable folio may reach here.  And, there may be
10444a64981dSVishal Moola (Oracle) 			 * temporary off LRU folios or non-LRU movable folios.
10454a64981dSVishal Moola (Oracle) 			 * Treat them as unmovable folios since they can't be
1046a53190a4SYang Shi 			 * isolated, so they can't be moved at the moment.  It
1047a53190a4SYang Shi 			 * should return -EIO for this case too.
1048a53190a4SYang Shi 			 */
1049a53190a4SYang Shi 			return -EIO;
105062695a84SNick Piggin 		}
105162695a84SNick Piggin 	}
1052a53190a4SYang Shi 
1053a53190a4SYang Shi 	return 0;
10546ce3c4c0SChristoph Lameter }
10556ce3c4c0SChristoph Lameter 
10566ce3c4c0SChristoph Lameter /*
10577e2ab150SChristoph Lameter  * Migrate pages from one node to a target node.
10587e2ab150SChristoph Lameter  * Returns error or the number of pages not migrated.
10597e2ab150SChristoph Lameter  */
1060dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1061dbcb0f19SAdrian Bunk 			   int flags)
10627e2ab150SChristoph Lameter {
10637e2ab150SChristoph Lameter 	nodemask_t nmask;
106466850be5SLiam R. Howlett 	struct vm_area_struct *vma;
10657e2ab150SChristoph Lameter 	LIST_HEAD(pagelist);
10667e2ab150SChristoph Lameter 	int err = 0;
1067a0976311SJoonsoo Kim 	struct migration_target_control mtc = {
1068a0976311SJoonsoo Kim 		.nid = dest,
1069a0976311SJoonsoo Kim 		.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1070a0976311SJoonsoo Kim 	};
10717e2ab150SChristoph Lameter 
10727e2ab150SChristoph Lameter 	nodes_clear(nmask);
10737e2ab150SChristoph Lameter 	node_set(source, nmask);
10747e2ab150SChristoph Lameter 
107508270807SMinchan Kim 	/*
107608270807SMinchan Kim 	 * This does not "check" the range but isolates all pages that
107708270807SMinchan Kim 	 * need migration.  Between passing in the full user address
107808270807SMinchan Kim 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
107908270807SMinchan Kim 	 */
108066850be5SLiam R. Howlett 	vma = find_vma(mm, 0);
108108270807SMinchan Kim 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
108266850be5SLiam R. Howlett 	queue_pages_range(mm, vma->vm_start, mm->task_size, &nmask,
10837e2ab150SChristoph Lameter 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
10847e2ab150SChristoph Lameter 
1085cf608ac1SMinchan Kim 	if (!list_empty(&pagelist)) {
1086a0976311SJoonsoo Kim 		err = migrate_pages(&pagelist, alloc_migration_target, NULL,
10875ac95884SYang Shi 				(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
1088cf608ac1SMinchan Kim 		if (err)
1089e2d8cf40SNaoya Horiguchi 			putback_movable_pages(&pagelist);
1090cf608ac1SMinchan Kim 	}
109195a402c3SChristoph Lameter 
10927e2ab150SChristoph Lameter 	return err;
10937e2ab150SChristoph Lameter }
10947e2ab150SChristoph Lameter 
10957e2ab150SChristoph Lameter /*
10967e2ab150SChristoph Lameter  * Move pages between the two nodesets so as to preserve the physical
10977e2ab150SChristoph Lameter  * layout as much as possible.
109839743889SChristoph Lameter  *
109939743889SChristoph Lameter  * Returns the number of page that could not be moved.
110039743889SChristoph Lameter  */
11010ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
11020ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
110339743889SChristoph Lameter {
11047e2ab150SChristoph Lameter 	int busy = 0;
1105f555befdSJan Stancek 	int err = 0;
11067e2ab150SChristoph Lameter 	nodemask_t tmp;
110739743889SChristoph Lameter 
1108361a2a22SMinchan Kim 	lru_cache_disable();
11090aedadf9SChristoph Lameter 
1110d8ed45c5SMichel Lespinasse 	mmap_read_lock(mm);
1111d4984711SChristoph Lameter 
11127e2ab150SChristoph Lameter 	/*
11137e2ab150SChristoph Lameter 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
11147e2ab150SChristoph Lameter 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
11157e2ab150SChristoph Lameter 	 * bit in 'tmp', and return that <source, dest> pair for migration.
11167e2ab150SChristoph Lameter 	 * The pair of nodemasks 'to' and 'from' define the map.
11177e2ab150SChristoph Lameter 	 *
11187e2ab150SChristoph Lameter 	 * If no pair of bits is found that way, fallback to picking some
11197e2ab150SChristoph Lameter 	 * pair of 'source' and 'dest' bits that are not the same.  If the
11207e2ab150SChristoph Lameter 	 * 'source' and 'dest' bits are the same, this represents a node
11217e2ab150SChristoph Lameter 	 * that will be migrating to itself, so no pages need move.
11227e2ab150SChristoph Lameter 	 *
11237e2ab150SChristoph Lameter 	 * If no bits are left in 'tmp', or if all remaining bits left
11247e2ab150SChristoph Lameter 	 * in 'tmp' correspond to the same bit in 'to', return false
11257e2ab150SChristoph Lameter 	 * (nothing left to migrate).
11267e2ab150SChristoph Lameter 	 *
11277e2ab150SChristoph Lameter 	 * This lets us pick a pair of nodes to migrate between, such that
11287e2ab150SChristoph Lameter 	 * if possible the dest node is not already occupied by some other
11297e2ab150SChristoph Lameter 	 * source node, minimizing the risk of overloading the memory on a
11307e2ab150SChristoph Lameter 	 * node that would happen if we migrated incoming memory to a node
11317e2ab150SChristoph Lameter 	 * before migrating outgoing memory source that same node.
11327e2ab150SChristoph Lameter 	 *
11337e2ab150SChristoph Lameter 	 * A single scan of tmp is sufficient.  As we go, we remember the
11347e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
11357e2ab150SChristoph Lameter 	 * that not only moved, but what's better, moved to an empty slot
11367e2ab150SChristoph Lameter 	 * (d is not set in tmp), then we break out then, with that pair.
1137ae0e47f0SJustin P. Mattock 	 * Otherwise when we finish scanning from_tmp, we at least have the
11387e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved.  If we get all the way through
11397e2ab150SChristoph Lameter 	 * the scan of tmp without finding any node that moved, much less
11407e2ab150SChristoph Lameter 	 * moved to an empty node, then there is nothing left worth migrating.
11417e2ab150SChristoph Lameter 	 */
11427e2ab150SChristoph Lameter 
11430ce72d4fSAndrew Morton 	tmp = *from;
11447e2ab150SChristoph Lameter 	while (!nodes_empty(tmp)) {
11457e2ab150SChristoph Lameter 		int s, d;
1146b76ac7e7SJianguo Wu 		int source = NUMA_NO_NODE;
11477e2ab150SChristoph Lameter 		int dest = 0;
11487e2ab150SChristoph Lameter 
11497e2ab150SChristoph Lameter 		for_each_node_mask(s, tmp) {
11504a5b18ccSLarry Woodman 
11514a5b18ccSLarry Woodman 			/*
11524a5b18ccSLarry Woodman 			 * do_migrate_pages() tries to maintain the relative
11534a5b18ccSLarry Woodman 			 * node relationship of the pages established between
11544a5b18ccSLarry Woodman 			 * threads and memory areas.
11554a5b18ccSLarry Woodman                          *
11564a5b18ccSLarry Woodman 			 * However if the number of source nodes is not equal to
11574a5b18ccSLarry Woodman 			 * the number of destination nodes we can not preserve
11584a5b18ccSLarry Woodman 			 * this node relative relationship.  In that case, skip
11594a5b18ccSLarry Woodman 			 * copying memory from a node that is in the destination
11604a5b18ccSLarry Woodman 			 * mask.
11614a5b18ccSLarry Woodman 			 *
11624a5b18ccSLarry Woodman 			 * Example: [2,3,4] -> [3,4,5] moves everything.
11634a5b18ccSLarry Woodman 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
11644a5b18ccSLarry Woodman 			 */
11654a5b18ccSLarry Woodman 
11660ce72d4fSAndrew Morton 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
11670ce72d4fSAndrew Morton 						(node_isset(s, *to)))
11684a5b18ccSLarry Woodman 				continue;
11694a5b18ccSLarry Woodman 
11700ce72d4fSAndrew Morton 			d = node_remap(s, *from, *to);
11717e2ab150SChristoph Lameter 			if (s == d)
11727e2ab150SChristoph Lameter 				continue;
11737e2ab150SChristoph Lameter 
11747e2ab150SChristoph Lameter 			source = s;	/* Node moved. Memorize */
11757e2ab150SChristoph Lameter 			dest = d;
11767e2ab150SChristoph Lameter 
11777e2ab150SChristoph Lameter 			/* dest not in remaining from nodes? */
11787e2ab150SChristoph Lameter 			if (!node_isset(dest, tmp))
11797e2ab150SChristoph Lameter 				break;
11807e2ab150SChristoph Lameter 		}
1181b76ac7e7SJianguo Wu 		if (source == NUMA_NO_NODE)
11827e2ab150SChristoph Lameter 			break;
11837e2ab150SChristoph Lameter 
11847e2ab150SChristoph Lameter 		node_clear(source, tmp);
11857e2ab150SChristoph Lameter 		err = migrate_to_node(mm, source, dest, flags);
11867e2ab150SChristoph Lameter 		if (err > 0)
11877e2ab150SChristoph Lameter 			busy += err;
11887e2ab150SChristoph Lameter 		if (err < 0)
11897e2ab150SChristoph Lameter 			break;
119039743889SChristoph Lameter 	}
1191d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
1192d479960eSMinchan Kim 
1193361a2a22SMinchan Kim 	lru_cache_enable();
11947e2ab150SChristoph Lameter 	if (err < 0)
11957e2ab150SChristoph Lameter 		return err;
11967e2ab150SChristoph Lameter 	return busy;
1197b20a3503SChristoph Lameter 
119839743889SChristoph Lameter }
119939743889SChristoph Lameter 
12003ad33b24SLee Schermerhorn /*
12013ad33b24SLee Schermerhorn  * Allocate a new page for page migration based on vma policy.
1202d05f0cdcSHugh Dickins  * Start by assuming the page is mapped by the same vma as contains @start.
12033ad33b24SLee Schermerhorn  * Search forward from there, if not.  N.B., this assumes that the
12043ad33b24SLee Schermerhorn  * list of pages handed to migrate_pages()--which is how we get here--
12053ad33b24SLee Schermerhorn  * is in virtual address order.
12063ad33b24SLee Schermerhorn  */
1207666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start)
120895a402c3SChristoph Lameter {
1209ec4858e0SMatthew Wilcox (Oracle) 	struct folio *dst, *src = page_folio(page);
1210d05f0cdcSHugh Dickins 	struct vm_area_struct *vma;
12113f649ab7SKees Cook 	unsigned long address;
121266850be5SLiam R. Howlett 	VMA_ITERATOR(vmi, current->mm, start);
1213ec4858e0SMatthew Wilcox (Oracle) 	gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL;
121495a402c3SChristoph Lameter 
121566850be5SLiam R. Howlett 	for_each_vma(vmi, vma) {
12163ad33b24SLee Schermerhorn 		address = page_address_in_vma(page, vma);
12173ad33b24SLee Schermerhorn 		if (address != -EFAULT)
12183ad33b24SLee Schermerhorn 			break;
12193ad33b24SLee Schermerhorn 	}
12203ad33b24SLee Schermerhorn 
1221d0ce0e47SSidhartha Kumar 	if (folio_test_hugetlb(src)) {
1222d0ce0e47SSidhartha Kumar 		dst = alloc_hugetlb_folio_vma(folio_hstate(src),
1223389c8178SMichal Hocko 				vma, address);
1224d0ce0e47SSidhartha Kumar 		return &dst->page;
1225d0ce0e47SSidhartha Kumar 	}
1226c8633798SNaoya Horiguchi 
1227ec4858e0SMatthew Wilcox (Oracle) 	if (folio_test_large(src))
1228ec4858e0SMatthew Wilcox (Oracle) 		gfp = GFP_TRANSHUGE;
1229ec4858e0SMatthew Wilcox (Oracle) 
123011c731e8SWanpeng Li 	/*
1231ec4858e0SMatthew Wilcox (Oracle) 	 * if !vma, vma_alloc_folio() will use task or system default policy
123211c731e8SWanpeng Li 	 */
1233ec4858e0SMatthew Wilcox (Oracle) 	dst = vma_alloc_folio(gfp, folio_order(src), vma, address,
1234ec4858e0SMatthew Wilcox (Oracle) 			folio_test_large(src));
1235ec4858e0SMatthew Wilcox (Oracle) 	return &dst->page;
123695a402c3SChristoph Lameter }
1237b20a3503SChristoph Lameter #else
1238b20a3503SChristoph Lameter 
12394a64981dSVishal Moola (Oracle) static int migrate_folio_add(struct folio *folio, struct list_head *foliolist,
1240b20a3503SChristoph Lameter 				unsigned long flags)
1241b20a3503SChristoph Lameter {
1242a53190a4SYang Shi 	return -EIO;
1243b20a3503SChristoph Lameter }
1244b20a3503SChristoph Lameter 
12450ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
12460ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
1247b20a3503SChristoph Lameter {
1248b20a3503SChristoph Lameter 	return -ENOSYS;
1249b20a3503SChristoph Lameter }
125095a402c3SChristoph Lameter 
1251666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start)
125295a402c3SChristoph Lameter {
125395a402c3SChristoph Lameter 	return NULL;
125495a402c3SChristoph Lameter }
1255b20a3503SChristoph Lameter #endif
1256b20a3503SChristoph Lameter 
1257dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len,
1258028fec41SDavid Rientjes 		     unsigned short mode, unsigned short mode_flags,
1259028fec41SDavid Rientjes 		     nodemask_t *nmask, unsigned long flags)
12606ce3c4c0SChristoph Lameter {
12616ce3c4c0SChristoph Lameter 	struct mm_struct *mm = current->mm;
12626ce3c4c0SChristoph Lameter 	struct mempolicy *new;
12636ce3c4c0SChristoph Lameter 	unsigned long end;
12646ce3c4c0SChristoph Lameter 	int err;
1265d8835445SYang Shi 	int ret;
12666ce3c4c0SChristoph Lameter 	LIST_HEAD(pagelist);
12676ce3c4c0SChristoph Lameter 
1268b24f53a0SLee Schermerhorn 	if (flags & ~(unsigned long)MPOL_MF_VALID)
12696ce3c4c0SChristoph Lameter 		return -EINVAL;
127074c00241SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
12716ce3c4c0SChristoph Lameter 		return -EPERM;
12726ce3c4c0SChristoph Lameter 
12736ce3c4c0SChristoph Lameter 	if (start & ~PAGE_MASK)
12746ce3c4c0SChristoph Lameter 		return -EINVAL;
12756ce3c4c0SChristoph Lameter 
12766ce3c4c0SChristoph Lameter 	if (mode == MPOL_DEFAULT)
12776ce3c4c0SChristoph Lameter 		flags &= ~MPOL_MF_STRICT;
12786ce3c4c0SChristoph Lameter 
1279aaa31e05Sze zuo 	len = PAGE_ALIGN(len);
12806ce3c4c0SChristoph Lameter 	end = start + len;
12816ce3c4c0SChristoph Lameter 
12826ce3c4c0SChristoph Lameter 	if (end < start)
12836ce3c4c0SChristoph Lameter 		return -EINVAL;
12846ce3c4c0SChristoph Lameter 	if (end == start)
12856ce3c4c0SChristoph Lameter 		return 0;
12866ce3c4c0SChristoph Lameter 
1287028fec41SDavid Rientjes 	new = mpol_new(mode, mode_flags, nmask);
12886ce3c4c0SChristoph Lameter 	if (IS_ERR(new))
12896ce3c4c0SChristoph Lameter 		return PTR_ERR(new);
12906ce3c4c0SChristoph Lameter 
1291b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY)
1292b24f53a0SLee Schermerhorn 		new->flags |= MPOL_F_MOF;
1293b24f53a0SLee Schermerhorn 
12946ce3c4c0SChristoph Lameter 	/*
12956ce3c4c0SChristoph Lameter 	 * If we are using the default policy then operation
12966ce3c4c0SChristoph Lameter 	 * on discontinuous address spaces is okay after all
12976ce3c4c0SChristoph Lameter 	 */
12986ce3c4c0SChristoph Lameter 	if (!new)
12996ce3c4c0SChristoph Lameter 		flags |= MPOL_MF_DISCONTIG_OK;
13006ce3c4c0SChristoph Lameter 
1301028fec41SDavid Rientjes 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1302028fec41SDavid Rientjes 		 start, start + len, mode, mode_flags,
130300ef2d2fSDavid Rientjes 		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
13046ce3c4c0SChristoph Lameter 
13050aedadf9SChristoph Lameter 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
13060aedadf9SChristoph Lameter 
1307361a2a22SMinchan Kim 		lru_cache_disable();
13080aedadf9SChristoph Lameter 	}
13094bfc4495SKAMEZAWA Hiroyuki 	{
13104bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
13114bfc4495SKAMEZAWA Hiroyuki 		if (scratch) {
1312d8ed45c5SMichel Lespinasse 			mmap_write_lock(mm);
13134bfc4495SKAMEZAWA Hiroyuki 			err = mpol_set_nodemask(new, nmask, scratch);
13144bfc4495SKAMEZAWA Hiroyuki 			if (err)
1315d8ed45c5SMichel Lespinasse 				mmap_write_unlock(mm);
13164bfc4495SKAMEZAWA Hiroyuki 		} else
13174bfc4495SKAMEZAWA Hiroyuki 			err = -ENOMEM;
13184bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
13194bfc4495SKAMEZAWA Hiroyuki 	}
1320b05ca738SKOSAKI Motohiro 	if (err)
1321b05ca738SKOSAKI Motohiro 		goto mpol_out;
1322b05ca738SKOSAKI Motohiro 
1323d8835445SYang Shi 	ret = queue_pages_range(mm, start, end, nmask,
13246ce3c4c0SChristoph Lameter 			  flags | MPOL_MF_INVERT, &pagelist);
1325d8835445SYang Shi 
1326d8835445SYang Shi 	if (ret < 0) {
1327a85dfc30SYang Shi 		err = ret;
1328d8835445SYang Shi 		goto up_out;
1329d8835445SYang Shi 	}
1330d8835445SYang Shi 
13319d8cebd4SKOSAKI Motohiro 	err = mbind_range(mm, start, end, new);
13327e2ab150SChristoph Lameter 
1333b24f53a0SLee Schermerhorn 	if (!err) {
1334b24f53a0SLee Schermerhorn 		int nr_failed = 0;
1335b24f53a0SLee Schermerhorn 
1336cf608ac1SMinchan Kim 		if (!list_empty(&pagelist)) {
1337b24f53a0SLee Schermerhorn 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1338d05f0cdcSHugh Dickins 			nr_failed = migrate_pages(&pagelist, new_page, NULL,
13395ac95884SYang Shi 				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL);
1340cf608ac1SMinchan Kim 			if (nr_failed)
134174060e4dSNaoya Horiguchi 				putback_movable_pages(&pagelist);
1342cf608ac1SMinchan Kim 		}
13436ce3c4c0SChristoph Lameter 
1344d8835445SYang Shi 		if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
13456ce3c4c0SChristoph Lameter 			err = -EIO;
1346a85dfc30SYang Shi 	} else {
1347d8835445SYang Shi up_out:
1348a85dfc30SYang Shi 		if (!list_empty(&pagelist))
1349a85dfc30SYang Shi 			putback_movable_pages(&pagelist);
1350a85dfc30SYang Shi 	}
1351a85dfc30SYang Shi 
1352d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
1353b05ca738SKOSAKI Motohiro mpol_out:
1354f0be3d32SLee Schermerhorn 	mpol_put(new);
1355d479960eSMinchan Kim 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1356361a2a22SMinchan Kim 		lru_cache_enable();
13576ce3c4c0SChristoph Lameter 	return err;
13586ce3c4c0SChristoph Lameter }
13596ce3c4c0SChristoph Lameter 
136039743889SChristoph Lameter /*
13618bccd85fSChristoph Lameter  * User space interface with variable sized bitmaps for nodelists.
13628bccd85fSChristoph Lameter  */
1363e130242dSArnd Bergmann static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask,
1364e130242dSArnd Bergmann 		      unsigned long maxnode)
1365e130242dSArnd Bergmann {
1366e130242dSArnd Bergmann 	unsigned long nlongs = BITS_TO_LONGS(maxnode);
1367e130242dSArnd Bergmann 	int ret;
1368e130242dSArnd Bergmann 
1369e130242dSArnd Bergmann 	if (in_compat_syscall())
1370e130242dSArnd Bergmann 		ret = compat_get_bitmap(mask,
1371e130242dSArnd Bergmann 					(const compat_ulong_t __user *)nmask,
1372e130242dSArnd Bergmann 					maxnode);
1373e130242dSArnd Bergmann 	else
1374e130242dSArnd Bergmann 		ret = copy_from_user(mask, nmask,
1375e130242dSArnd Bergmann 				     nlongs * sizeof(unsigned long));
1376e130242dSArnd Bergmann 
1377e130242dSArnd Bergmann 	if (ret)
1378e130242dSArnd Bergmann 		return -EFAULT;
1379e130242dSArnd Bergmann 
1380e130242dSArnd Bergmann 	if (maxnode % BITS_PER_LONG)
1381e130242dSArnd Bergmann 		mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1;
1382e130242dSArnd Bergmann 
1383e130242dSArnd Bergmann 	return 0;
1384e130242dSArnd Bergmann }
13858bccd85fSChristoph Lameter 
13868bccd85fSChristoph Lameter /* Copy a node mask from user space. */
138739743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
13888bccd85fSChristoph Lameter 		     unsigned long maxnode)
13898bccd85fSChristoph Lameter {
13908bccd85fSChristoph Lameter 	--maxnode;
13918bccd85fSChristoph Lameter 	nodes_clear(*nodes);
13928bccd85fSChristoph Lameter 	if (maxnode == 0 || !nmask)
13938bccd85fSChristoph Lameter 		return 0;
1394a9c930baSAndi Kleen 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1395636f13c1SChris Wright 		return -EINVAL;
13968bccd85fSChristoph Lameter 
139756521e7aSYisheng Xie 	/*
139856521e7aSYisheng Xie 	 * When the user specified more nodes than supported just check
1399e130242dSArnd Bergmann 	 * if the non supported part is all zero, one word at a time,
1400e130242dSArnd Bergmann 	 * starting at the end.
140156521e7aSYisheng Xie 	 */
1402e130242dSArnd Bergmann 	while (maxnode > MAX_NUMNODES) {
1403e130242dSArnd Bergmann 		unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG);
1404e130242dSArnd Bergmann 		unsigned long t;
14058bccd85fSChristoph Lameter 
1406000eca5dSTianyu Li 		if (get_bitmap(&t, &nmask[(maxnode - 1) / BITS_PER_LONG], bits))
140756521e7aSYisheng Xie 			return -EFAULT;
1408e130242dSArnd Bergmann 
1409e130242dSArnd Bergmann 		if (maxnode - bits >= MAX_NUMNODES) {
1410e130242dSArnd Bergmann 			maxnode -= bits;
1411e130242dSArnd Bergmann 		} else {
1412e130242dSArnd Bergmann 			maxnode = MAX_NUMNODES;
1413e130242dSArnd Bergmann 			t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1414e130242dSArnd Bergmann 		}
1415e130242dSArnd Bergmann 		if (t)
141656521e7aSYisheng Xie 			return -EINVAL;
141756521e7aSYisheng Xie 	}
141856521e7aSYisheng Xie 
1419e130242dSArnd Bergmann 	return get_bitmap(nodes_addr(*nodes), nmask, maxnode);
14208bccd85fSChristoph Lameter }
14218bccd85fSChristoph Lameter 
14228bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */
14238bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
14248bccd85fSChristoph Lameter 			      nodemask_t *nodes)
14258bccd85fSChristoph Lameter {
14268bccd85fSChristoph Lameter 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1427050c17f2SRalph Campbell 	unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
1428e130242dSArnd Bergmann 	bool compat = in_compat_syscall();
1429e130242dSArnd Bergmann 
1430e130242dSArnd Bergmann 	if (compat)
1431e130242dSArnd Bergmann 		nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t);
14328bccd85fSChristoph Lameter 
14338bccd85fSChristoph Lameter 	if (copy > nbytes) {
14348bccd85fSChristoph Lameter 		if (copy > PAGE_SIZE)
14358bccd85fSChristoph Lameter 			return -EINVAL;
14368bccd85fSChristoph Lameter 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
14378bccd85fSChristoph Lameter 			return -EFAULT;
14388bccd85fSChristoph Lameter 		copy = nbytes;
1439e130242dSArnd Bergmann 		maxnode = nr_node_ids;
14408bccd85fSChristoph Lameter 	}
1441e130242dSArnd Bergmann 
1442e130242dSArnd Bergmann 	if (compat)
1443e130242dSArnd Bergmann 		return compat_put_bitmap((compat_ulong_t __user *)mask,
1444e130242dSArnd Bergmann 					 nodes_addr(*nodes), maxnode);
1445e130242dSArnd Bergmann 
14468bccd85fSChristoph Lameter 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
14478bccd85fSChristoph Lameter }
14488bccd85fSChristoph Lameter 
144995837924SFeng Tang /* Basic parameter sanity check used by both mbind() and set_mempolicy() */
145095837924SFeng Tang static inline int sanitize_mpol_flags(int *mode, unsigned short *flags)
145195837924SFeng Tang {
145295837924SFeng Tang 	*flags = *mode & MPOL_MODE_FLAGS;
145395837924SFeng Tang 	*mode &= ~MPOL_MODE_FLAGS;
1454b27abaccSDave Hansen 
1455a38a59fdSBen Widawsky 	if ((unsigned int)(*mode) >=  MPOL_MAX)
145695837924SFeng Tang 		return -EINVAL;
145795837924SFeng Tang 	if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
145895837924SFeng Tang 		return -EINVAL;
14596d2aec9eSEric Dumazet 	if (*flags & MPOL_F_NUMA_BALANCING) {
14606d2aec9eSEric Dumazet 		if (*mode != MPOL_BIND)
14616d2aec9eSEric Dumazet 			return -EINVAL;
14626d2aec9eSEric Dumazet 		*flags |= (MPOL_F_MOF | MPOL_F_MORON);
14636d2aec9eSEric Dumazet 	}
146495837924SFeng Tang 	return 0;
146595837924SFeng Tang }
146695837924SFeng Tang 
1467e7dc9ad6SDominik Brodowski static long kernel_mbind(unsigned long start, unsigned long len,
1468e7dc9ad6SDominik Brodowski 			 unsigned long mode, const unsigned long __user *nmask,
1469e7dc9ad6SDominik Brodowski 			 unsigned long maxnode, unsigned int flags)
14708bccd85fSChristoph Lameter {
1471028fec41SDavid Rientjes 	unsigned short mode_flags;
147295837924SFeng Tang 	nodemask_t nodes;
147395837924SFeng Tang 	int lmode = mode;
147495837924SFeng Tang 	int err;
14758bccd85fSChristoph Lameter 
1476057d3389SAndrey Konovalov 	start = untagged_addr(start);
147795837924SFeng Tang 	err = sanitize_mpol_flags(&lmode, &mode_flags);
147895837924SFeng Tang 	if (err)
147995837924SFeng Tang 		return err;
148095837924SFeng Tang 
14818bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
14828bccd85fSChristoph Lameter 	if (err)
14838bccd85fSChristoph Lameter 		return err;
148495837924SFeng Tang 
148595837924SFeng Tang 	return do_mbind(start, len, lmode, mode_flags, &nodes, flags);
14868bccd85fSChristoph Lameter }
14878bccd85fSChristoph Lameter 
1488c6018b4bSAneesh Kumar K.V SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, len,
1489c6018b4bSAneesh Kumar K.V 		unsigned long, home_node, unsigned long, flags)
1490c6018b4bSAneesh Kumar K.V {
1491c6018b4bSAneesh Kumar K.V 	struct mm_struct *mm = current->mm;
1492c6018b4bSAneesh Kumar K.V 	struct vm_area_struct *vma;
1493e976936cSMichal Hocko 	struct mempolicy *new, *old;
1494c6018b4bSAneesh Kumar K.V 	unsigned long vmstart;
1495c6018b4bSAneesh Kumar K.V 	unsigned long vmend;
1496c6018b4bSAneesh Kumar K.V 	unsigned long end;
1497c6018b4bSAneesh Kumar K.V 	int err = -ENOENT;
149866850be5SLiam R. Howlett 	VMA_ITERATOR(vmi, mm, start);
1499c6018b4bSAneesh Kumar K.V 
1500c6018b4bSAneesh Kumar K.V 	start = untagged_addr(start);
1501c6018b4bSAneesh Kumar K.V 	if (start & ~PAGE_MASK)
1502c6018b4bSAneesh Kumar K.V 		return -EINVAL;
1503c6018b4bSAneesh Kumar K.V 	/*
1504c6018b4bSAneesh Kumar K.V 	 * flags is used for future extension if any.
1505c6018b4bSAneesh Kumar K.V 	 */
1506c6018b4bSAneesh Kumar K.V 	if (flags != 0)
1507c6018b4bSAneesh Kumar K.V 		return -EINVAL;
1508c6018b4bSAneesh Kumar K.V 
1509c6018b4bSAneesh Kumar K.V 	/*
1510c6018b4bSAneesh Kumar K.V 	 * Check home_node is online to avoid accessing uninitialized
1511c6018b4bSAneesh Kumar K.V 	 * NODE_DATA.
1512c6018b4bSAneesh Kumar K.V 	 */
1513c6018b4bSAneesh Kumar K.V 	if (home_node >= MAX_NUMNODES || !node_online(home_node))
1514c6018b4bSAneesh Kumar K.V 		return -EINVAL;
1515c6018b4bSAneesh Kumar K.V 
1516aaa31e05Sze zuo 	len = PAGE_ALIGN(len);
1517c6018b4bSAneesh Kumar K.V 	end = start + len;
1518c6018b4bSAneesh Kumar K.V 
1519c6018b4bSAneesh Kumar K.V 	if (end < start)
1520c6018b4bSAneesh Kumar K.V 		return -EINVAL;
1521c6018b4bSAneesh Kumar K.V 	if (end == start)
1522c6018b4bSAneesh Kumar K.V 		return 0;
1523c6018b4bSAneesh Kumar K.V 	mmap_write_lock(mm);
152466850be5SLiam R. Howlett 	for_each_vma_range(vmi, vma, end) {
1525c6018b4bSAneesh Kumar K.V 		/*
1526c6018b4bSAneesh Kumar K.V 		 * If any vma in the range got policy other than MPOL_BIND
1527c6018b4bSAneesh Kumar K.V 		 * or MPOL_PREFERRED_MANY we return error. We don't reset
1528c6018b4bSAneesh Kumar K.V 		 * the home node for vmas we already updated before.
1529c6018b4bSAneesh Kumar K.V 		 */
1530e976936cSMichal Hocko 		old = vma_policy(vma);
1531e976936cSMichal Hocko 		if (!old)
1532e976936cSMichal Hocko 			continue;
1533e976936cSMichal Hocko 		if (old->mode != MPOL_BIND && old->mode != MPOL_PREFERRED_MANY) {
1534c6018b4bSAneesh Kumar K.V 			err = -EOPNOTSUPP;
1535c6018b4bSAneesh Kumar K.V 			break;
1536c6018b4bSAneesh Kumar K.V 		}
1537e976936cSMichal Hocko 		new = mpol_dup(old);
1538e976936cSMichal Hocko 		if (IS_ERR(new)) {
1539e976936cSMichal Hocko 			err = PTR_ERR(new);
1540e976936cSMichal Hocko 			break;
1541e976936cSMichal Hocko 		}
1542c6018b4bSAneesh Kumar K.V 
1543c6018b4bSAneesh Kumar K.V 		new->home_node = home_node;
1544e976936cSMichal Hocko 		vmstart = max(start, vma->vm_start);
1545e976936cSMichal Hocko 		vmend   = min(end, vma->vm_end);
1546c6018b4bSAneesh Kumar K.V 		err = mbind_range(mm, vmstart, vmend, new);
1547c6018b4bSAneesh Kumar K.V 		mpol_put(new);
1548c6018b4bSAneesh Kumar K.V 		if (err)
1549c6018b4bSAneesh Kumar K.V 			break;
1550c6018b4bSAneesh Kumar K.V 	}
1551c6018b4bSAneesh Kumar K.V 	mmap_write_unlock(mm);
1552c6018b4bSAneesh Kumar K.V 	return err;
1553c6018b4bSAneesh Kumar K.V }
1554c6018b4bSAneesh Kumar K.V 
1555e7dc9ad6SDominik Brodowski SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1556e7dc9ad6SDominik Brodowski 		unsigned long, mode, const unsigned long __user *, nmask,
1557e7dc9ad6SDominik Brodowski 		unsigned long, maxnode, unsigned int, flags)
1558e7dc9ad6SDominik Brodowski {
1559e7dc9ad6SDominik Brodowski 	return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1560e7dc9ad6SDominik Brodowski }
1561e7dc9ad6SDominik Brodowski 
15628bccd85fSChristoph Lameter /* Set the process memory policy */
1563af03c4acSDominik Brodowski static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1564af03c4acSDominik Brodowski 				 unsigned long maxnode)
15658bccd85fSChristoph Lameter {
156695837924SFeng Tang 	unsigned short mode_flags;
15678bccd85fSChristoph Lameter 	nodemask_t nodes;
156895837924SFeng Tang 	int lmode = mode;
156995837924SFeng Tang 	int err;
15708bccd85fSChristoph Lameter 
157195837924SFeng Tang 	err = sanitize_mpol_flags(&lmode, &mode_flags);
157295837924SFeng Tang 	if (err)
157395837924SFeng Tang 		return err;
157495837924SFeng Tang 
15758bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
15768bccd85fSChristoph Lameter 	if (err)
15778bccd85fSChristoph Lameter 		return err;
157895837924SFeng Tang 
157995837924SFeng Tang 	return do_set_mempolicy(lmode, mode_flags, &nodes);
15808bccd85fSChristoph Lameter }
15818bccd85fSChristoph Lameter 
1582af03c4acSDominik Brodowski SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1583af03c4acSDominik Brodowski 		unsigned long, maxnode)
1584af03c4acSDominik Brodowski {
1585af03c4acSDominik Brodowski 	return kernel_set_mempolicy(mode, nmask, maxnode);
1586af03c4acSDominik Brodowski }
1587af03c4acSDominik Brodowski 
1588b6e9b0baSDominik Brodowski static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1589b6e9b0baSDominik Brodowski 				const unsigned long __user *old_nodes,
1590b6e9b0baSDominik Brodowski 				const unsigned long __user *new_nodes)
159139743889SChristoph Lameter {
1592596d7cfaSKOSAKI Motohiro 	struct mm_struct *mm = NULL;
159339743889SChristoph Lameter 	struct task_struct *task;
159439743889SChristoph Lameter 	nodemask_t task_nodes;
159539743889SChristoph Lameter 	int err;
1596596d7cfaSKOSAKI Motohiro 	nodemask_t *old;
1597596d7cfaSKOSAKI Motohiro 	nodemask_t *new;
1598596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH(scratch);
159939743889SChristoph Lameter 
1600596d7cfaSKOSAKI Motohiro 	if (!scratch)
1601596d7cfaSKOSAKI Motohiro 		return -ENOMEM;
160239743889SChristoph Lameter 
1603596d7cfaSKOSAKI Motohiro 	old = &scratch->mask1;
1604596d7cfaSKOSAKI Motohiro 	new = &scratch->mask2;
1605596d7cfaSKOSAKI Motohiro 
1606596d7cfaSKOSAKI Motohiro 	err = get_nodes(old, old_nodes, maxnode);
160739743889SChristoph Lameter 	if (err)
1608596d7cfaSKOSAKI Motohiro 		goto out;
1609596d7cfaSKOSAKI Motohiro 
1610596d7cfaSKOSAKI Motohiro 	err = get_nodes(new, new_nodes, maxnode);
1611596d7cfaSKOSAKI Motohiro 	if (err)
1612596d7cfaSKOSAKI Motohiro 		goto out;
161339743889SChristoph Lameter 
161439743889SChristoph Lameter 	/* Find the mm_struct */
161555cfaa3cSZeng Zhaoming 	rcu_read_lock();
1616228ebcbeSPavel Emelyanov 	task = pid ? find_task_by_vpid(pid) : current;
161739743889SChristoph Lameter 	if (!task) {
161855cfaa3cSZeng Zhaoming 		rcu_read_unlock();
1619596d7cfaSKOSAKI Motohiro 		err = -ESRCH;
1620596d7cfaSKOSAKI Motohiro 		goto out;
162139743889SChristoph Lameter 	}
16223268c63eSChristoph Lameter 	get_task_struct(task);
162339743889SChristoph Lameter 
1624596d7cfaSKOSAKI Motohiro 	err = -EINVAL;
162539743889SChristoph Lameter 
162639743889SChristoph Lameter 	/*
162731367466SOtto Ebeling 	 * Check if this process has the right to modify the specified process.
162831367466SOtto Ebeling 	 * Use the regular "ptrace_may_access()" checks.
162939743889SChristoph Lameter 	 */
163031367466SOtto Ebeling 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1631c69e8d9cSDavid Howells 		rcu_read_unlock();
163239743889SChristoph Lameter 		err = -EPERM;
16333268c63eSChristoph Lameter 		goto out_put;
163439743889SChristoph Lameter 	}
1635c69e8d9cSDavid Howells 	rcu_read_unlock();
163639743889SChristoph Lameter 
163739743889SChristoph Lameter 	task_nodes = cpuset_mems_allowed(task);
163839743889SChristoph Lameter 	/* Is the user allowed to access the target nodes? */
1639596d7cfaSKOSAKI Motohiro 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
164039743889SChristoph Lameter 		err = -EPERM;
16413268c63eSChristoph Lameter 		goto out_put;
164239743889SChristoph Lameter 	}
164339743889SChristoph Lameter 
16440486a38bSYisheng Xie 	task_nodes = cpuset_mems_allowed(current);
16450486a38bSYisheng Xie 	nodes_and(*new, *new, task_nodes);
16460486a38bSYisheng Xie 	if (nodes_empty(*new))
16473268c63eSChristoph Lameter 		goto out_put;
16480486a38bSYisheng Xie 
164986c3a764SDavid Quigley 	err = security_task_movememory(task);
165086c3a764SDavid Quigley 	if (err)
16513268c63eSChristoph Lameter 		goto out_put;
165286c3a764SDavid Quigley 
16533268c63eSChristoph Lameter 	mm = get_task_mm(task);
16543268c63eSChristoph Lameter 	put_task_struct(task);
1655f2a9ef88SSasha Levin 
1656f2a9ef88SSasha Levin 	if (!mm) {
1657f2a9ef88SSasha Levin 		err = -EINVAL;
1658f2a9ef88SSasha Levin 		goto out;
1659f2a9ef88SSasha Levin 	}
1660f2a9ef88SSasha Levin 
1661596d7cfaSKOSAKI Motohiro 	err = do_migrate_pages(mm, old, new,
166274c00241SChristoph Lameter 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
16633268c63eSChristoph Lameter 
166439743889SChristoph Lameter 	mmput(mm);
16653268c63eSChristoph Lameter out:
1666596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH_FREE(scratch);
1667596d7cfaSKOSAKI Motohiro 
166839743889SChristoph Lameter 	return err;
16693268c63eSChristoph Lameter 
16703268c63eSChristoph Lameter out_put:
16713268c63eSChristoph Lameter 	put_task_struct(task);
16723268c63eSChristoph Lameter 	goto out;
16733268c63eSChristoph Lameter 
167439743889SChristoph Lameter }
167539743889SChristoph Lameter 
1676b6e9b0baSDominik Brodowski SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1677b6e9b0baSDominik Brodowski 		const unsigned long __user *, old_nodes,
1678b6e9b0baSDominik Brodowski 		const unsigned long __user *, new_nodes)
1679b6e9b0baSDominik Brodowski {
1680b6e9b0baSDominik Brodowski 	return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1681b6e9b0baSDominik Brodowski }
1682b6e9b0baSDominik Brodowski 
168339743889SChristoph Lameter 
16848bccd85fSChristoph Lameter /* Retrieve NUMA policy */
1685af03c4acSDominik Brodowski static int kernel_get_mempolicy(int __user *policy,
1686af03c4acSDominik Brodowski 				unsigned long __user *nmask,
1687af03c4acSDominik Brodowski 				unsigned long maxnode,
1688af03c4acSDominik Brodowski 				unsigned long addr,
1689af03c4acSDominik Brodowski 				unsigned long flags)
16908bccd85fSChristoph Lameter {
1691dbcb0f19SAdrian Bunk 	int err;
16923f649ab7SKees Cook 	int pval;
16938bccd85fSChristoph Lameter 	nodemask_t nodes;
16948bccd85fSChristoph Lameter 
1695050c17f2SRalph Campbell 	if (nmask != NULL && maxnode < nr_node_ids)
16968bccd85fSChristoph Lameter 		return -EINVAL;
16978bccd85fSChristoph Lameter 
16984605f057SWenchao Hao 	addr = untagged_addr(addr);
16994605f057SWenchao Hao 
17008bccd85fSChristoph Lameter 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
17018bccd85fSChristoph Lameter 
17028bccd85fSChristoph Lameter 	if (err)
17038bccd85fSChristoph Lameter 		return err;
17048bccd85fSChristoph Lameter 
17058bccd85fSChristoph Lameter 	if (policy && put_user(pval, policy))
17068bccd85fSChristoph Lameter 		return -EFAULT;
17078bccd85fSChristoph Lameter 
17088bccd85fSChristoph Lameter 	if (nmask)
17098bccd85fSChristoph Lameter 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
17108bccd85fSChristoph Lameter 
17118bccd85fSChristoph Lameter 	return err;
17128bccd85fSChristoph Lameter }
17138bccd85fSChristoph Lameter 
1714af03c4acSDominik Brodowski SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1715af03c4acSDominik Brodowski 		unsigned long __user *, nmask, unsigned long, maxnode,
1716af03c4acSDominik Brodowski 		unsigned long, addr, unsigned long, flags)
1717af03c4acSDominik Brodowski {
1718af03c4acSDominik Brodowski 	return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1719af03c4acSDominik Brodowski }
1720af03c4acSDominik Brodowski 
172120ca87f2SLi Xinhai bool vma_migratable(struct vm_area_struct *vma)
172220ca87f2SLi Xinhai {
172320ca87f2SLi Xinhai 	if (vma->vm_flags & (VM_IO | VM_PFNMAP))
172420ca87f2SLi Xinhai 		return false;
172520ca87f2SLi Xinhai 
172620ca87f2SLi Xinhai 	/*
172720ca87f2SLi Xinhai 	 * DAX device mappings require predictable access latency, so avoid
172820ca87f2SLi Xinhai 	 * incurring periodic faults.
172920ca87f2SLi Xinhai 	 */
173020ca87f2SLi Xinhai 	if (vma_is_dax(vma))
173120ca87f2SLi Xinhai 		return false;
173220ca87f2SLi Xinhai 
173320ca87f2SLi Xinhai 	if (is_vm_hugetlb_page(vma) &&
173420ca87f2SLi Xinhai 		!hugepage_migration_supported(hstate_vma(vma)))
173520ca87f2SLi Xinhai 		return false;
173620ca87f2SLi Xinhai 
173720ca87f2SLi Xinhai 	/*
173820ca87f2SLi Xinhai 	 * Migration allocates pages in the highest zone. If we cannot
173920ca87f2SLi Xinhai 	 * do so then migration (at least from node to node) is not
174020ca87f2SLi Xinhai 	 * possible.
174120ca87f2SLi Xinhai 	 */
174220ca87f2SLi Xinhai 	if (vma->vm_file &&
174320ca87f2SLi Xinhai 		gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
174420ca87f2SLi Xinhai 			< policy_zone)
174520ca87f2SLi Xinhai 		return false;
174620ca87f2SLi Xinhai 	return true;
174720ca87f2SLi Xinhai }
174820ca87f2SLi Xinhai 
174974d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
175074d2c3a0SOleg Nesterov 						unsigned long addr)
17511da177e4SLinus Torvalds {
17528d90274bSOleg Nesterov 	struct mempolicy *pol = NULL;
17531da177e4SLinus Torvalds 
17541da177e4SLinus Torvalds 	if (vma) {
1755480eccf9SLee Schermerhorn 		if (vma->vm_ops && vma->vm_ops->get_policy) {
17568d90274bSOleg Nesterov 			pol = vma->vm_ops->get_policy(vma, addr);
175700442ad0SMel Gorman 		} else if (vma->vm_policy) {
17581da177e4SLinus Torvalds 			pol = vma->vm_policy;
175900442ad0SMel Gorman 
176000442ad0SMel Gorman 			/*
176100442ad0SMel Gorman 			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
176200442ad0SMel Gorman 			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
176300442ad0SMel Gorman 			 * count on these policies which will be dropped by
176400442ad0SMel Gorman 			 * mpol_cond_put() later
176500442ad0SMel Gorman 			 */
176600442ad0SMel Gorman 			if (mpol_needs_cond_ref(pol))
176700442ad0SMel Gorman 				mpol_get(pol);
176800442ad0SMel Gorman 		}
17691da177e4SLinus Torvalds 	}
1770f15ca78eSOleg Nesterov 
177174d2c3a0SOleg Nesterov 	return pol;
177274d2c3a0SOleg Nesterov }
177374d2c3a0SOleg Nesterov 
177474d2c3a0SOleg Nesterov /*
1775dd6eecb9SOleg Nesterov  * get_vma_policy(@vma, @addr)
177674d2c3a0SOleg Nesterov  * @vma: virtual memory area whose policy is sought
177774d2c3a0SOleg Nesterov  * @addr: address in @vma for shared policy lookup
177874d2c3a0SOleg Nesterov  *
177974d2c3a0SOleg Nesterov  * Returns effective policy for a VMA at specified address.
1780dd6eecb9SOleg Nesterov  * Falls back to current->mempolicy or system default policy, as necessary.
178174d2c3a0SOleg Nesterov  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
178274d2c3a0SOleg Nesterov  * count--added by the get_policy() vm_op, as appropriate--to protect against
178374d2c3a0SOleg Nesterov  * freeing by another task.  It is the caller's responsibility to free the
178474d2c3a0SOleg Nesterov  * extra reference for shared policies.
178574d2c3a0SOleg Nesterov  */
1786ac79f78dSDavid Rientjes static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1787dd6eecb9SOleg Nesterov 						unsigned long addr)
178874d2c3a0SOleg Nesterov {
178974d2c3a0SOleg Nesterov 	struct mempolicy *pol = __get_vma_policy(vma, addr);
179074d2c3a0SOleg Nesterov 
17918d90274bSOleg Nesterov 	if (!pol)
1792dd6eecb9SOleg Nesterov 		pol = get_task_policy(current);
17938d90274bSOleg Nesterov 
17941da177e4SLinus Torvalds 	return pol;
17951da177e4SLinus Torvalds }
17961da177e4SLinus Torvalds 
17976b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma)
1798fc314724SMel Gorman {
17996b6482bbSOleg Nesterov 	struct mempolicy *pol;
1800f15ca78eSOleg Nesterov 
1801fc314724SMel Gorman 	if (vma->vm_ops && vma->vm_ops->get_policy) {
1802fc314724SMel Gorman 		bool ret = false;
1803fc314724SMel Gorman 
1804fc314724SMel Gorman 		pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1805fc314724SMel Gorman 		if (pol && (pol->flags & MPOL_F_MOF))
1806fc314724SMel Gorman 			ret = true;
1807fc314724SMel Gorman 		mpol_cond_put(pol);
1808fc314724SMel Gorman 
1809fc314724SMel Gorman 		return ret;
18108d90274bSOleg Nesterov 	}
18118d90274bSOleg Nesterov 
1812fc314724SMel Gorman 	pol = vma->vm_policy;
18138d90274bSOleg Nesterov 	if (!pol)
18146b6482bbSOleg Nesterov 		pol = get_task_policy(current);
1815fc314724SMel Gorman 
1816fc314724SMel Gorman 	return pol->flags & MPOL_F_MOF;
1817fc314724SMel Gorman }
1818fc314724SMel Gorman 
1819d2226ebdSFeng Tang bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1820d3eb1570SLai Jiangshan {
1821d3eb1570SLai Jiangshan 	enum zone_type dynamic_policy_zone = policy_zone;
1822d3eb1570SLai Jiangshan 
1823d3eb1570SLai Jiangshan 	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1824d3eb1570SLai Jiangshan 
1825d3eb1570SLai Jiangshan 	/*
1826269fbe72SBen Widawsky 	 * if policy->nodes has movable memory only,
1827d3eb1570SLai Jiangshan 	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1828d3eb1570SLai Jiangshan 	 *
1829269fbe72SBen Widawsky 	 * policy->nodes is intersect with node_states[N_MEMORY].
1830f0953a1bSIngo Molnar 	 * so if the following test fails, it implies
1831269fbe72SBen Widawsky 	 * policy->nodes has movable memory only.
1832d3eb1570SLai Jiangshan 	 */
1833269fbe72SBen Widawsky 	if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY]))
1834d3eb1570SLai Jiangshan 		dynamic_policy_zone = ZONE_MOVABLE;
1835d3eb1570SLai Jiangshan 
1836d3eb1570SLai Jiangshan 	return zone >= dynamic_policy_zone;
1837d3eb1570SLai Jiangshan }
1838d3eb1570SLai Jiangshan 
183952cd3b07SLee Schermerhorn /*
184052cd3b07SLee Schermerhorn  * Return a nodemask representing a mempolicy for filtering nodes for
184152cd3b07SLee Schermerhorn  * page allocation
184252cd3b07SLee Schermerhorn  */
18438ca39e68SMuchun Song nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
184419770b32SMel Gorman {
1845b27abaccSDave Hansen 	int mode = policy->mode;
1846b27abaccSDave Hansen 
184719770b32SMel Gorman 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
1848b27abaccSDave Hansen 	if (unlikely(mode == MPOL_BIND) &&
1849d3eb1570SLai Jiangshan 		apply_policy_zone(policy, gfp_zone(gfp)) &&
1850269fbe72SBen Widawsky 		cpuset_nodemask_valid_mems_allowed(&policy->nodes))
1851269fbe72SBen Widawsky 		return &policy->nodes;
185219770b32SMel Gorman 
1853b27abaccSDave Hansen 	if (mode == MPOL_PREFERRED_MANY)
1854b27abaccSDave Hansen 		return &policy->nodes;
1855b27abaccSDave Hansen 
185619770b32SMel Gorman 	return NULL;
185719770b32SMel Gorman }
185819770b32SMel Gorman 
1859b27abaccSDave Hansen /*
1860b27abaccSDave Hansen  * Return the  preferred node id for 'prefer' mempolicy, and return
1861b27abaccSDave Hansen  * the given id for all other policies.
1862b27abaccSDave Hansen  *
1863b27abaccSDave Hansen  * policy_node() is always coupled with policy_nodemask(), which
1864b27abaccSDave Hansen  * secures the nodemask limit for 'bind' and 'prefer-many' policy.
1865b27abaccSDave Hansen  */
1866f8fd5253SWei Yang static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
18671da177e4SLinus Torvalds {
18687858d7bcSFeng Tang 	if (policy->mode == MPOL_PREFERRED) {
1869269fbe72SBen Widawsky 		nd = first_node(policy->nodes);
18707858d7bcSFeng Tang 	} else {
187119770b32SMel Gorman 		/*
18726d840958SMichal Hocko 		 * __GFP_THISNODE shouldn't even be used with the bind policy
18736d840958SMichal Hocko 		 * because we might easily break the expectation to stay on the
18746d840958SMichal Hocko 		 * requested node and not break the policy.
187519770b32SMel Gorman 		 */
18766d840958SMichal Hocko 		WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
18771da177e4SLinus Torvalds 	}
18786d840958SMichal Hocko 
1879c6018b4bSAneesh Kumar K.V 	if ((policy->mode == MPOL_BIND ||
1880c6018b4bSAneesh Kumar K.V 	     policy->mode == MPOL_PREFERRED_MANY) &&
1881c6018b4bSAneesh Kumar K.V 	    policy->home_node != NUMA_NO_NODE)
1882c6018b4bSAneesh Kumar K.V 		return policy->home_node;
1883c6018b4bSAneesh Kumar K.V 
188404ec6264SVlastimil Babka 	return nd;
18851da177e4SLinus Torvalds }
18861da177e4SLinus Torvalds 
18871da177e4SLinus Torvalds /* Do dynamic interleaving for a process */
18881da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy)
18891da177e4SLinus Torvalds {
189045816682SVlastimil Babka 	unsigned next;
18911da177e4SLinus Torvalds 	struct task_struct *me = current;
18921da177e4SLinus Torvalds 
1893269fbe72SBen Widawsky 	next = next_node_in(me->il_prev, policy->nodes);
1894f5b087b5SDavid Rientjes 	if (next < MAX_NUMNODES)
189545816682SVlastimil Babka 		me->il_prev = next;
189645816682SVlastimil Babka 	return next;
18971da177e4SLinus Torvalds }
18981da177e4SLinus Torvalds 
1899dc85da15SChristoph Lameter /*
1900dc85da15SChristoph Lameter  * Depending on the memory policy provide a node from which to allocate the
1901dc85da15SChristoph Lameter  * next slab entry.
1902dc85da15SChristoph Lameter  */
19032a389610SDavid Rientjes unsigned int mempolicy_slab_node(void)
1904dc85da15SChristoph Lameter {
1905e7b691b0SAndi Kleen 	struct mempolicy *policy;
19062a389610SDavid Rientjes 	int node = numa_mem_id();
1907e7b691b0SAndi Kleen 
190838b031ddSVasily Averin 	if (!in_task())
19092a389610SDavid Rientjes 		return node;
1910e7b691b0SAndi Kleen 
1911e7b691b0SAndi Kleen 	policy = current->mempolicy;
19127858d7bcSFeng Tang 	if (!policy)
19132a389610SDavid Rientjes 		return node;
1914765c4507SChristoph Lameter 
1915bea904d5SLee Schermerhorn 	switch (policy->mode) {
1916bea904d5SLee Schermerhorn 	case MPOL_PREFERRED:
1917269fbe72SBen Widawsky 		return first_node(policy->nodes);
1918bea904d5SLee Schermerhorn 
1919dc85da15SChristoph Lameter 	case MPOL_INTERLEAVE:
1920dc85da15SChristoph Lameter 		return interleave_nodes(policy);
1921dc85da15SChristoph Lameter 
1922b27abaccSDave Hansen 	case MPOL_BIND:
1923b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
1924b27abaccSDave Hansen 	{
1925c33d6c06SMel Gorman 		struct zoneref *z;
1926c33d6c06SMel Gorman 
1927dc85da15SChristoph Lameter 		/*
1928dc85da15SChristoph Lameter 		 * Follow bind policy behavior and start allocation at the
1929dc85da15SChristoph Lameter 		 * first node.
1930dc85da15SChristoph Lameter 		 */
193119770b32SMel Gorman 		struct zonelist *zonelist;
193219770b32SMel Gorman 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1933c9634cf0SAneesh Kumar K.V 		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1934c33d6c06SMel Gorman 		z = first_zones_zonelist(zonelist, highest_zoneidx,
1935269fbe72SBen Widawsky 							&policy->nodes);
1936c1093b74SPavel Tatashin 		return z->zone ? zone_to_nid(z->zone) : node;
1937dd1a239fSMel Gorman 	}
19387858d7bcSFeng Tang 	case MPOL_LOCAL:
19397858d7bcSFeng Tang 		return node;
1940dc85da15SChristoph Lameter 
1941dc85da15SChristoph Lameter 	default:
1942bea904d5SLee Schermerhorn 		BUG();
1943dc85da15SChristoph Lameter 	}
1944dc85da15SChristoph Lameter }
1945dc85da15SChristoph Lameter 
1946fee83b3aSAndrew Morton /*
1947fee83b3aSAndrew Morton  * Do static interleaving for a VMA with known offset @n.  Returns the n'th
1948269fbe72SBen Widawsky  * node in pol->nodes (starting from n=0), wrapping around if n exceeds the
1949fee83b3aSAndrew Morton  * number of present nodes.
1950fee83b3aSAndrew Morton  */
195198c70baaSLaurent Dufour static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
19521da177e4SLinus Torvalds {
1953276aeee1Syanghui 	nodemask_t nodemask = pol->nodes;
1954276aeee1Syanghui 	unsigned int target, nnodes;
1955fee83b3aSAndrew Morton 	int i;
1956fee83b3aSAndrew Morton 	int nid;
1957276aeee1Syanghui 	/*
1958276aeee1Syanghui 	 * The barrier will stabilize the nodemask in a register or on
1959276aeee1Syanghui 	 * the stack so that it will stop changing under the code.
1960276aeee1Syanghui 	 *
1961276aeee1Syanghui 	 * Between first_node() and next_node(), pol->nodes could be changed
1962276aeee1Syanghui 	 * by other threads. So we put pol->nodes in a local stack.
1963276aeee1Syanghui 	 */
1964276aeee1Syanghui 	barrier();
19651da177e4SLinus Torvalds 
1966276aeee1Syanghui 	nnodes = nodes_weight(nodemask);
1967f5b087b5SDavid Rientjes 	if (!nnodes)
1968f5b087b5SDavid Rientjes 		return numa_node_id();
1969fee83b3aSAndrew Morton 	target = (unsigned int)n % nnodes;
1970276aeee1Syanghui 	nid = first_node(nodemask);
1971fee83b3aSAndrew Morton 	for (i = 0; i < target; i++)
1972276aeee1Syanghui 		nid = next_node(nid, nodemask);
19731da177e4SLinus Torvalds 	return nid;
19741da177e4SLinus Torvalds }
19751da177e4SLinus Torvalds 
19765da7ca86SChristoph Lameter /* Determine a node number for interleave */
19775da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol,
19785da7ca86SChristoph Lameter 		 struct vm_area_struct *vma, unsigned long addr, int shift)
19795da7ca86SChristoph Lameter {
19805da7ca86SChristoph Lameter 	if (vma) {
19815da7ca86SChristoph Lameter 		unsigned long off;
19825da7ca86SChristoph Lameter 
19833b98b087SNishanth Aravamudan 		/*
19843b98b087SNishanth Aravamudan 		 * for small pages, there is no difference between
19853b98b087SNishanth Aravamudan 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
19863b98b087SNishanth Aravamudan 		 * for huge pages, since vm_pgoff is in units of small
19873b98b087SNishanth Aravamudan 		 * pages, we need to shift off the always 0 bits to get
19883b98b087SNishanth Aravamudan 		 * a useful offset.
19893b98b087SNishanth Aravamudan 		 */
19903b98b087SNishanth Aravamudan 		BUG_ON(shift < PAGE_SHIFT);
19913b98b087SNishanth Aravamudan 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
19925da7ca86SChristoph Lameter 		off += (addr - vma->vm_start) >> shift;
199398c70baaSLaurent Dufour 		return offset_il_node(pol, off);
19945da7ca86SChristoph Lameter 	} else
19955da7ca86SChristoph Lameter 		return interleave_nodes(pol);
19965da7ca86SChristoph Lameter }
19975da7ca86SChristoph Lameter 
199800ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS
1999480eccf9SLee Schermerhorn /*
200004ec6264SVlastimil Babka  * huge_node(@vma, @addr, @gfp_flags, @mpol)
2001b46e14acSFabian Frederick  * @vma: virtual memory area whose policy is sought
2002b46e14acSFabian Frederick  * @addr: address in @vma for shared policy lookup and interleave policy
2003b46e14acSFabian Frederick  * @gfp_flags: for requested zone
2004b46e14acSFabian Frederick  * @mpol: pointer to mempolicy pointer for reference counted mempolicy
2005b27abaccSDave Hansen  * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy
2006480eccf9SLee Schermerhorn  *
200704ec6264SVlastimil Babka  * Returns a nid suitable for a huge page allocation and a pointer
200852cd3b07SLee Schermerhorn  * to the struct mempolicy for conditional unref after allocation.
2009b27abaccSDave Hansen  * If the effective policy is 'bind' or 'prefer-many', returns a pointer
2010b27abaccSDave Hansen  * to the mempolicy's @nodemask for filtering the zonelist.
2011c0ff7453SMiao Xie  *
2012d26914d1SMel Gorman  * Must be protected by read_mems_allowed_begin()
2013480eccf9SLee Schermerhorn  */
201404ec6264SVlastimil Babka int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
201504ec6264SVlastimil Babka 				struct mempolicy **mpol, nodemask_t **nodemask)
20165da7ca86SChristoph Lameter {
201704ec6264SVlastimil Babka 	int nid;
2018b27abaccSDave Hansen 	int mode;
20195da7ca86SChristoph Lameter 
2020dd6eecb9SOleg Nesterov 	*mpol = get_vma_policy(vma, addr);
2021b27abaccSDave Hansen 	*nodemask = NULL;
2022b27abaccSDave Hansen 	mode = (*mpol)->mode;
20235da7ca86SChristoph Lameter 
2024b27abaccSDave Hansen 	if (unlikely(mode == MPOL_INTERLEAVE)) {
202504ec6264SVlastimil Babka 		nid = interleave_nid(*mpol, vma, addr,
202604ec6264SVlastimil Babka 					huge_page_shift(hstate_vma(vma)));
202752cd3b07SLee Schermerhorn 	} else {
202804ec6264SVlastimil Babka 		nid = policy_node(gfp_flags, *mpol, numa_node_id());
2029b27abaccSDave Hansen 		if (mode == MPOL_BIND || mode == MPOL_PREFERRED_MANY)
2030269fbe72SBen Widawsky 			*nodemask = &(*mpol)->nodes;
2031480eccf9SLee Schermerhorn 	}
203204ec6264SVlastimil Babka 	return nid;
20335da7ca86SChristoph Lameter }
203406808b08SLee Schermerhorn 
203506808b08SLee Schermerhorn /*
203606808b08SLee Schermerhorn  * init_nodemask_of_mempolicy
203706808b08SLee Schermerhorn  *
203806808b08SLee Schermerhorn  * If the current task's mempolicy is "default" [NULL], return 'false'
203906808b08SLee Schermerhorn  * to indicate default policy.  Otherwise, extract the policy nodemask
204006808b08SLee Schermerhorn  * for 'bind' or 'interleave' policy into the argument nodemask, or
204106808b08SLee Schermerhorn  * initialize the argument nodemask to contain the single node for
204206808b08SLee Schermerhorn  * 'preferred' or 'local' policy and return 'true' to indicate presence
204306808b08SLee Schermerhorn  * of non-default mempolicy.
204406808b08SLee Schermerhorn  *
204506808b08SLee Schermerhorn  * We don't bother with reference counting the mempolicy [mpol_get/put]
204606808b08SLee Schermerhorn  * because the current task is examining it's own mempolicy and a task's
204706808b08SLee Schermerhorn  * mempolicy is only ever changed by the task itself.
204806808b08SLee Schermerhorn  *
204906808b08SLee Schermerhorn  * N.B., it is the caller's responsibility to free a returned nodemask.
205006808b08SLee Schermerhorn  */
205106808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask)
205206808b08SLee Schermerhorn {
205306808b08SLee Schermerhorn 	struct mempolicy *mempolicy;
205406808b08SLee Schermerhorn 
205506808b08SLee Schermerhorn 	if (!(mask && current->mempolicy))
205606808b08SLee Schermerhorn 		return false;
205706808b08SLee Schermerhorn 
2058c0ff7453SMiao Xie 	task_lock(current);
205906808b08SLee Schermerhorn 	mempolicy = current->mempolicy;
206006808b08SLee Schermerhorn 	switch (mempolicy->mode) {
206106808b08SLee Schermerhorn 	case MPOL_PREFERRED:
2062b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
206306808b08SLee Schermerhorn 	case MPOL_BIND:
206406808b08SLee Schermerhorn 	case MPOL_INTERLEAVE:
2065269fbe72SBen Widawsky 		*mask = mempolicy->nodes;
206606808b08SLee Schermerhorn 		break;
206706808b08SLee Schermerhorn 
20687858d7bcSFeng Tang 	case MPOL_LOCAL:
2069269fbe72SBen Widawsky 		init_nodemask_of_node(mask, numa_node_id());
20707858d7bcSFeng Tang 		break;
20717858d7bcSFeng Tang 
207206808b08SLee Schermerhorn 	default:
207306808b08SLee Schermerhorn 		BUG();
207406808b08SLee Schermerhorn 	}
2075c0ff7453SMiao Xie 	task_unlock(current);
207606808b08SLee Schermerhorn 
207706808b08SLee Schermerhorn 	return true;
207806808b08SLee Schermerhorn }
207900ac59adSChen, Kenneth W #endif
20805da7ca86SChristoph Lameter 
20816f48d0ebSDavid Rientjes /*
2082b26e517aSFeng Tang  * mempolicy_in_oom_domain
20836f48d0ebSDavid Rientjes  *
2084b26e517aSFeng Tang  * If tsk's mempolicy is "bind", check for intersection between mask and
2085b26e517aSFeng Tang  * the policy nodemask. Otherwise, return true for all other policies
2086b26e517aSFeng Tang  * including "interleave", as a tsk with "interleave" policy may have
2087b26e517aSFeng Tang  * memory allocated from all nodes in system.
20886f48d0ebSDavid Rientjes  *
20896f48d0ebSDavid Rientjes  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
20906f48d0ebSDavid Rientjes  */
2091b26e517aSFeng Tang bool mempolicy_in_oom_domain(struct task_struct *tsk,
20926f48d0ebSDavid Rientjes 					const nodemask_t *mask)
20936f48d0ebSDavid Rientjes {
20946f48d0ebSDavid Rientjes 	struct mempolicy *mempolicy;
20956f48d0ebSDavid Rientjes 	bool ret = true;
20966f48d0ebSDavid Rientjes 
20976f48d0ebSDavid Rientjes 	if (!mask)
20986f48d0ebSDavid Rientjes 		return ret;
2099b26e517aSFeng Tang 
21006f48d0ebSDavid Rientjes 	task_lock(tsk);
21016f48d0ebSDavid Rientjes 	mempolicy = tsk->mempolicy;
2102b26e517aSFeng Tang 	if (mempolicy && mempolicy->mode == MPOL_BIND)
2103269fbe72SBen Widawsky 		ret = nodes_intersects(mempolicy->nodes, *mask);
21046f48d0ebSDavid Rientjes 	task_unlock(tsk);
2105b26e517aSFeng Tang 
21066f48d0ebSDavid Rientjes 	return ret;
21076f48d0ebSDavid Rientjes }
21086f48d0ebSDavid Rientjes 
21091da177e4SLinus Torvalds /* Allocate a page in interleaved policy.
21101da177e4SLinus Torvalds    Own path because it needs to do special accounting. */
2111662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2112662f3a0bSAndi Kleen 					unsigned nid)
21131da177e4SLinus Torvalds {
21141da177e4SLinus Torvalds 	struct page *page;
21151da177e4SLinus Torvalds 
211684172f4bSMatthew Wilcox (Oracle) 	page = __alloc_pages(gfp, order, nid, NULL);
21174518085eSKemi Wang 	/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
21184518085eSKemi Wang 	if (!static_branch_likely(&vm_numa_stat_key))
21194518085eSKemi Wang 		return page;
2120de55c8b2SAndrey Ryabinin 	if (page && page_to_nid(page) == nid) {
2121de55c8b2SAndrey Ryabinin 		preempt_disable();
2122f19298b9SMel Gorman 		__count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT);
2123de55c8b2SAndrey Ryabinin 		preempt_enable();
2124de55c8b2SAndrey Ryabinin 	}
21251da177e4SLinus Torvalds 	return page;
21261da177e4SLinus Torvalds }
21271da177e4SLinus Torvalds 
21284c54d949SFeng Tang static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
21294c54d949SFeng Tang 						int nid, struct mempolicy *pol)
21304c54d949SFeng Tang {
21314c54d949SFeng Tang 	struct page *page;
21324c54d949SFeng Tang 	gfp_t preferred_gfp;
21334c54d949SFeng Tang 
21344c54d949SFeng Tang 	/*
21354c54d949SFeng Tang 	 * This is a two pass approach. The first pass will only try the
21364c54d949SFeng Tang 	 * preferred nodes but skip the direct reclaim and allow the
21374c54d949SFeng Tang 	 * allocation to fail, while the second pass will try all the
21384c54d949SFeng Tang 	 * nodes in system.
21394c54d949SFeng Tang 	 */
21404c54d949SFeng Tang 	preferred_gfp = gfp | __GFP_NOWARN;
21414c54d949SFeng Tang 	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
21424c54d949SFeng Tang 	page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes);
21434c54d949SFeng Tang 	if (!page)
2144c0455116SAneesh Kumar K.V 		page = __alloc_pages(gfp, order, nid, NULL);
21454c54d949SFeng Tang 
21464c54d949SFeng Tang 	return page;
21474c54d949SFeng Tang }
21484c54d949SFeng Tang 
21491da177e4SLinus Torvalds /**
2150adf88aa8SMatthew Wilcox (Oracle)  * vma_alloc_folio - Allocate a folio for a VMA.
2151eb350739SMatthew Wilcox (Oracle)  * @gfp: GFP flags.
2152adf88aa8SMatthew Wilcox (Oracle)  * @order: Order of the folio.
21531da177e4SLinus Torvalds  * @vma: Pointer to VMA or NULL if not available.
2154eb350739SMatthew Wilcox (Oracle)  * @addr: Virtual address of the allocation.  Must be inside @vma.
2155eb350739SMatthew Wilcox (Oracle)  * @hugepage: For hugepages try only the preferred node if possible.
21561da177e4SLinus Torvalds  *
2157adf88aa8SMatthew Wilcox (Oracle)  * Allocate a folio for a specific address in @vma, using the appropriate
2158eb350739SMatthew Wilcox (Oracle)  * NUMA policy.  When @vma is not NULL the caller must hold the mmap_lock
2159eb350739SMatthew Wilcox (Oracle)  * of the mm_struct of the VMA to prevent it from going away.  Should be
2160adf88aa8SMatthew Wilcox (Oracle)  * used for all allocations for folios that will be mapped into user space.
2161eb350739SMatthew Wilcox (Oracle)  *
2162adf88aa8SMatthew Wilcox (Oracle)  * Return: The folio on success or NULL if allocation fails.
21631da177e4SLinus Torvalds  */
2164adf88aa8SMatthew Wilcox (Oracle) struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
2165be1a13ebSMichal Hocko 		unsigned long addr, bool hugepage)
21661da177e4SLinus Torvalds {
2167cc9a6c87SMel Gorman 	struct mempolicy *pol;
2168be1a13ebSMichal Hocko 	int node = numa_node_id();
2169adf88aa8SMatthew Wilcox (Oracle) 	struct folio *folio;
217004ec6264SVlastimil Babka 	int preferred_nid;
2171be97a41bSVlastimil Babka 	nodemask_t *nmask;
21721da177e4SLinus Torvalds 
2173dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2174cc9a6c87SMel Gorman 
2175be97a41bSVlastimil Babka 	if (pol->mode == MPOL_INTERLEAVE) {
2176adf88aa8SMatthew Wilcox (Oracle) 		struct page *page;
21771da177e4SLinus Torvalds 		unsigned nid;
21785da7ca86SChristoph Lameter 
21798eac563cSAndi Kleen 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
218052cd3b07SLee Schermerhorn 		mpol_cond_put(pol);
2181adf88aa8SMatthew Wilcox (Oracle) 		gfp |= __GFP_COMP;
21820bbbc0b3SAndrea Arcangeli 		page = alloc_page_interleave(gfp, order, nid);
2183adf88aa8SMatthew Wilcox (Oracle) 		if (page && order > 1)
2184adf88aa8SMatthew Wilcox (Oracle) 			prep_transhuge_page(page);
2185adf88aa8SMatthew Wilcox (Oracle) 		folio = (struct folio *)page;
2186be97a41bSVlastimil Babka 		goto out;
21871da177e4SLinus Torvalds 	}
21881da177e4SLinus Torvalds 
21894c54d949SFeng Tang 	if (pol->mode == MPOL_PREFERRED_MANY) {
2190adf88aa8SMatthew Wilcox (Oracle) 		struct page *page;
2191adf88aa8SMatthew Wilcox (Oracle) 
2192c0455116SAneesh Kumar K.V 		node = policy_node(gfp, pol, node);
2193adf88aa8SMatthew Wilcox (Oracle) 		gfp |= __GFP_COMP;
21944c54d949SFeng Tang 		page = alloc_pages_preferred_many(gfp, order, node, pol);
21954c54d949SFeng Tang 		mpol_cond_put(pol);
2196adf88aa8SMatthew Wilcox (Oracle) 		if (page && order > 1)
2197adf88aa8SMatthew Wilcox (Oracle) 			prep_transhuge_page(page);
2198adf88aa8SMatthew Wilcox (Oracle) 		folio = (struct folio *)page;
21994c54d949SFeng Tang 		goto out;
22004c54d949SFeng Tang 	}
22014c54d949SFeng Tang 
220219deb769SDavid Rientjes 	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
220319deb769SDavid Rientjes 		int hpage_node = node;
220419deb769SDavid Rientjes 
220519deb769SDavid Rientjes 		/*
220619deb769SDavid Rientjes 		 * For hugepage allocation and non-interleave policy which
220719deb769SDavid Rientjes 		 * allows the current node (or other explicitly preferred
220819deb769SDavid Rientjes 		 * node) we only try to allocate from the current/preferred
220919deb769SDavid Rientjes 		 * node and don't fall back to other nodes, as the cost of
221019deb769SDavid Rientjes 		 * remote accesses would likely offset THP benefits.
221119deb769SDavid Rientjes 		 *
2212b27abaccSDave Hansen 		 * If the policy is interleave or does not allow the current
221319deb769SDavid Rientjes 		 * node in its nodemask, we allocate the standard way.
221419deb769SDavid Rientjes 		 */
22157858d7bcSFeng Tang 		if (pol->mode == MPOL_PREFERRED)
2216269fbe72SBen Widawsky 			hpage_node = first_node(pol->nodes);
221719deb769SDavid Rientjes 
221819deb769SDavid Rientjes 		nmask = policy_nodemask(gfp, pol);
221919deb769SDavid Rientjes 		if (!nmask || node_isset(hpage_node, *nmask)) {
222019deb769SDavid Rientjes 			mpol_cond_put(pol);
2221cc638f32SVlastimil Babka 			/*
2222cc638f32SVlastimil Babka 			 * First, try to allocate THP only on local node, but
2223cc638f32SVlastimil Babka 			 * don't reclaim unnecessarily, just compact.
2224cc638f32SVlastimil Babka 			 */
2225adf88aa8SMatthew Wilcox (Oracle) 			folio = __folio_alloc_node(gfp | __GFP_THISNODE |
2226adf88aa8SMatthew Wilcox (Oracle) 					__GFP_NORETRY, order, hpage_node);
222776e654ccSDavid Rientjes 
222876e654ccSDavid Rientjes 			/*
222976e654ccSDavid Rientjes 			 * If hugepage allocations are configured to always
223076e654ccSDavid Rientjes 			 * synchronous compact or the vma has been madvised
223176e654ccSDavid Rientjes 			 * to prefer hugepage backing, retry allowing remote
2232cc638f32SVlastimil Babka 			 * memory with both reclaim and compact as well.
223376e654ccSDavid Rientjes 			 */
2234adf88aa8SMatthew Wilcox (Oracle) 			if (!folio && (gfp & __GFP_DIRECT_RECLAIM))
2235adf88aa8SMatthew Wilcox (Oracle) 				folio = __folio_alloc(gfp, order, hpage_node,
2236adf88aa8SMatthew Wilcox (Oracle) 						      nmask);
223776e654ccSDavid Rientjes 
223819deb769SDavid Rientjes 			goto out;
223919deb769SDavid Rientjes 		}
224019deb769SDavid Rientjes 	}
224119deb769SDavid Rientjes 
2242077fcf11SAneesh Kumar K.V 	nmask = policy_nodemask(gfp, pol);
224304ec6264SVlastimil Babka 	preferred_nid = policy_node(gfp, pol, node);
2244adf88aa8SMatthew Wilcox (Oracle) 	folio = __folio_alloc(gfp, order, preferred_nid, nmask);
2245d51e9894SVlastimil Babka 	mpol_cond_put(pol);
2246be97a41bSVlastimil Babka out:
2247f584b680SMatthew Wilcox (Oracle) 	return folio;
2248f584b680SMatthew Wilcox (Oracle) }
2249adf88aa8SMatthew Wilcox (Oracle) EXPORT_SYMBOL(vma_alloc_folio);
2250f584b680SMatthew Wilcox (Oracle) 
22511da177e4SLinus Torvalds /**
2252d7f946d0SMatthew Wilcox (Oracle)  * alloc_pages - Allocate pages.
22536421ec76SMatthew Wilcox (Oracle)  * @gfp: GFP flags.
22546421ec76SMatthew Wilcox (Oracle)  * @order: Power of two of number of pages to allocate.
22551da177e4SLinus Torvalds  *
22566421ec76SMatthew Wilcox (Oracle)  * Allocate 1 << @order contiguous pages.  The physical address of the
22576421ec76SMatthew Wilcox (Oracle)  * first page is naturally aligned (eg an order-3 allocation will be aligned
22586421ec76SMatthew Wilcox (Oracle)  * to a multiple of 8 * PAGE_SIZE bytes).  The NUMA policy of the current
22596421ec76SMatthew Wilcox (Oracle)  * process is honoured when in process context.
22601da177e4SLinus Torvalds  *
22616421ec76SMatthew Wilcox (Oracle)  * Context: Can be called from any context, providing the appropriate GFP
22626421ec76SMatthew Wilcox (Oracle)  * flags are used.
22636421ec76SMatthew Wilcox (Oracle)  * Return: The page on success or NULL if allocation fails.
22641da177e4SLinus Torvalds  */
2265d7f946d0SMatthew Wilcox (Oracle) struct page *alloc_pages(gfp_t gfp, unsigned order)
22661da177e4SLinus Torvalds {
22678d90274bSOleg Nesterov 	struct mempolicy *pol = &default_policy;
2268c0ff7453SMiao Xie 	struct page *page;
22691da177e4SLinus Torvalds 
22708d90274bSOleg Nesterov 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
22718d90274bSOleg Nesterov 		pol = get_task_policy(current);
227252cd3b07SLee Schermerhorn 
227352cd3b07SLee Schermerhorn 	/*
227452cd3b07SLee Schermerhorn 	 * No reference counting needed for current->mempolicy
227552cd3b07SLee Schermerhorn 	 * nor system default_policy
227652cd3b07SLee Schermerhorn 	 */
227745c4745aSLee Schermerhorn 	if (pol->mode == MPOL_INTERLEAVE)
2278c0ff7453SMiao Xie 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
22794c54d949SFeng Tang 	else if (pol->mode == MPOL_PREFERRED_MANY)
22804c54d949SFeng Tang 		page = alloc_pages_preferred_many(gfp, order,
2281c0455116SAneesh Kumar K.V 				  policy_node(gfp, pol, numa_node_id()), pol);
2282c0ff7453SMiao Xie 	else
228384172f4bSMatthew Wilcox (Oracle) 		page = __alloc_pages(gfp, order,
228404ec6264SVlastimil Babka 				policy_node(gfp, pol, numa_node_id()),
22855c4b4be3SAndi Kleen 				policy_nodemask(gfp, pol));
2286cc9a6c87SMel Gorman 
2287c0ff7453SMiao Xie 	return page;
22881da177e4SLinus Torvalds }
2289d7f946d0SMatthew Wilcox (Oracle) EXPORT_SYMBOL(alloc_pages);
22901da177e4SLinus Torvalds 
2291cc09cb13SMatthew Wilcox (Oracle) struct folio *folio_alloc(gfp_t gfp, unsigned order)
2292cc09cb13SMatthew Wilcox (Oracle) {
2293cc09cb13SMatthew Wilcox (Oracle) 	struct page *page = alloc_pages(gfp | __GFP_COMP, order);
2294cc09cb13SMatthew Wilcox (Oracle) 
2295cc09cb13SMatthew Wilcox (Oracle) 	if (page && order > 1)
2296cc09cb13SMatthew Wilcox (Oracle) 		prep_transhuge_page(page);
2297cc09cb13SMatthew Wilcox (Oracle) 	return (struct folio *)page;
2298cc09cb13SMatthew Wilcox (Oracle) }
2299cc09cb13SMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_alloc);
2300cc09cb13SMatthew Wilcox (Oracle) 
2301c00b6b96SChen Wandun static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
2302c00b6b96SChen Wandun 		struct mempolicy *pol, unsigned long nr_pages,
2303c00b6b96SChen Wandun 		struct page **page_array)
2304c00b6b96SChen Wandun {
2305c00b6b96SChen Wandun 	int nodes;
2306c00b6b96SChen Wandun 	unsigned long nr_pages_per_node;
2307c00b6b96SChen Wandun 	int delta;
2308c00b6b96SChen Wandun 	int i;
2309c00b6b96SChen Wandun 	unsigned long nr_allocated;
2310c00b6b96SChen Wandun 	unsigned long total_allocated = 0;
2311c00b6b96SChen Wandun 
2312c00b6b96SChen Wandun 	nodes = nodes_weight(pol->nodes);
2313c00b6b96SChen Wandun 	nr_pages_per_node = nr_pages / nodes;
2314c00b6b96SChen Wandun 	delta = nr_pages - nodes * nr_pages_per_node;
2315c00b6b96SChen Wandun 
2316c00b6b96SChen Wandun 	for (i = 0; i < nodes; i++) {
2317c00b6b96SChen Wandun 		if (delta) {
2318c00b6b96SChen Wandun 			nr_allocated = __alloc_pages_bulk(gfp,
2319c00b6b96SChen Wandun 					interleave_nodes(pol), NULL,
2320c00b6b96SChen Wandun 					nr_pages_per_node + 1, NULL,
2321c00b6b96SChen Wandun 					page_array);
2322c00b6b96SChen Wandun 			delta--;
2323c00b6b96SChen Wandun 		} else {
2324c00b6b96SChen Wandun 			nr_allocated = __alloc_pages_bulk(gfp,
2325c00b6b96SChen Wandun 					interleave_nodes(pol), NULL,
2326c00b6b96SChen Wandun 					nr_pages_per_node, NULL, page_array);
2327c00b6b96SChen Wandun 		}
2328c00b6b96SChen Wandun 
2329c00b6b96SChen Wandun 		page_array += nr_allocated;
2330c00b6b96SChen Wandun 		total_allocated += nr_allocated;
2331c00b6b96SChen Wandun 	}
2332c00b6b96SChen Wandun 
2333c00b6b96SChen Wandun 	return total_allocated;
2334c00b6b96SChen Wandun }
2335c00b6b96SChen Wandun 
2336c00b6b96SChen Wandun static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
2337c00b6b96SChen Wandun 		struct mempolicy *pol, unsigned long nr_pages,
2338c00b6b96SChen Wandun 		struct page **page_array)
2339c00b6b96SChen Wandun {
2340c00b6b96SChen Wandun 	gfp_t preferred_gfp;
2341c00b6b96SChen Wandun 	unsigned long nr_allocated = 0;
2342c00b6b96SChen Wandun 
2343c00b6b96SChen Wandun 	preferred_gfp = gfp | __GFP_NOWARN;
2344c00b6b96SChen Wandun 	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2345c00b6b96SChen Wandun 
2346c00b6b96SChen Wandun 	nr_allocated  = __alloc_pages_bulk(preferred_gfp, nid, &pol->nodes,
2347c00b6b96SChen Wandun 					   nr_pages, NULL, page_array);
2348c00b6b96SChen Wandun 
2349c00b6b96SChen Wandun 	if (nr_allocated < nr_pages)
2350c00b6b96SChen Wandun 		nr_allocated += __alloc_pages_bulk(gfp, numa_node_id(), NULL,
2351c00b6b96SChen Wandun 				nr_pages - nr_allocated, NULL,
2352c00b6b96SChen Wandun 				page_array + nr_allocated);
2353c00b6b96SChen Wandun 	return nr_allocated;
2354c00b6b96SChen Wandun }
2355c00b6b96SChen Wandun 
2356c00b6b96SChen Wandun /* alloc pages bulk and mempolicy should be considered at the
2357c00b6b96SChen Wandun  * same time in some situation such as vmalloc.
2358c00b6b96SChen Wandun  *
2359c00b6b96SChen Wandun  * It can accelerate memory allocation especially interleaving
2360c00b6b96SChen Wandun  * allocate memory.
2361c00b6b96SChen Wandun  */
2362c00b6b96SChen Wandun unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
2363c00b6b96SChen Wandun 		unsigned long nr_pages, struct page **page_array)
2364c00b6b96SChen Wandun {
2365c00b6b96SChen Wandun 	struct mempolicy *pol = &default_policy;
2366c00b6b96SChen Wandun 
2367c00b6b96SChen Wandun 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2368c00b6b96SChen Wandun 		pol = get_task_policy(current);
2369c00b6b96SChen Wandun 
2370c00b6b96SChen Wandun 	if (pol->mode == MPOL_INTERLEAVE)
2371c00b6b96SChen Wandun 		return alloc_pages_bulk_array_interleave(gfp, pol,
2372c00b6b96SChen Wandun 							 nr_pages, page_array);
2373c00b6b96SChen Wandun 
2374c00b6b96SChen Wandun 	if (pol->mode == MPOL_PREFERRED_MANY)
2375c00b6b96SChen Wandun 		return alloc_pages_bulk_array_preferred_many(gfp,
2376c00b6b96SChen Wandun 				numa_node_id(), pol, nr_pages, page_array);
2377c00b6b96SChen Wandun 
2378c00b6b96SChen Wandun 	return __alloc_pages_bulk(gfp, policy_node(gfp, pol, numa_node_id()),
2379c00b6b96SChen Wandun 				  policy_nodemask(gfp, pol), nr_pages, NULL,
2380c00b6b96SChen Wandun 				  page_array);
2381c00b6b96SChen Wandun }
2382c00b6b96SChen Wandun 
2383ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2384ef0855d3SOleg Nesterov {
2385ef0855d3SOleg Nesterov 	struct mempolicy *pol = mpol_dup(vma_policy(src));
2386ef0855d3SOleg Nesterov 
2387ef0855d3SOleg Nesterov 	if (IS_ERR(pol))
2388ef0855d3SOleg Nesterov 		return PTR_ERR(pol);
2389ef0855d3SOleg Nesterov 	dst->vm_policy = pol;
2390ef0855d3SOleg Nesterov 	return 0;
2391ef0855d3SOleg Nesterov }
2392ef0855d3SOleg Nesterov 
23934225399aSPaul Jackson /*
2394846a16bfSLee Schermerhorn  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
23954225399aSPaul Jackson  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
23964225399aSPaul Jackson  * with the mems_allowed returned by cpuset_mems_allowed().  This
23974225399aSPaul Jackson  * keeps mempolicies cpuset relative after its cpuset moves.  See
23984225399aSPaul Jackson  * further kernel/cpuset.c update_nodemask().
2399708c1bbcSMiao Xie  *
2400708c1bbcSMiao Xie  * current's mempolicy may be rebinded by the other task(the task that changes
2401708c1bbcSMiao Xie  * cpuset's mems), so we needn't do rebind work for current task.
24024225399aSPaul Jackson  */
24034225399aSPaul Jackson 
2404846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */
2405846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old)
24061da177e4SLinus Torvalds {
24071da177e4SLinus Torvalds 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
24081da177e4SLinus Torvalds 
24091da177e4SLinus Torvalds 	if (!new)
24101da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2411708c1bbcSMiao Xie 
2412708c1bbcSMiao Xie 	/* task's mempolicy is protected by alloc_lock */
2413708c1bbcSMiao Xie 	if (old == current->mempolicy) {
2414708c1bbcSMiao Xie 		task_lock(current);
2415708c1bbcSMiao Xie 		*new = *old;
2416708c1bbcSMiao Xie 		task_unlock(current);
2417708c1bbcSMiao Xie 	} else
2418708c1bbcSMiao Xie 		*new = *old;
2419708c1bbcSMiao Xie 
24204225399aSPaul Jackson 	if (current_cpuset_is_being_rebound()) {
24214225399aSPaul Jackson 		nodemask_t mems = cpuset_mems_allowed(current);
2422213980c0SVlastimil Babka 		mpol_rebind_policy(new, &mems);
24234225399aSPaul Jackson 	}
24241da177e4SLinus Torvalds 	atomic_set(&new->refcnt, 1);
24251da177e4SLinus Torvalds 	return new;
24261da177e4SLinus Torvalds }
24271da177e4SLinus Torvalds 
24281da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */
2429fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
24301da177e4SLinus Torvalds {
24311da177e4SLinus Torvalds 	if (!a || !b)
2432fcfb4dccSKOSAKI Motohiro 		return false;
243345c4745aSLee Schermerhorn 	if (a->mode != b->mode)
2434fcfb4dccSKOSAKI Motohiro 		return false;
243519800502SBob Liu 	if (a->flags != b->flags)
2436fcfb4dccSKOSAKI Motohiro 		return false;
2437c6018b4bSAneesh Kumar K.V 	if (a->home_node != b->home_node)
2438c6018b4bSAneesh Kumar K.V 		return false;
243919800502SBob Liu 	if (mpol_store_user_nodemask(a))
244019800502SBob Liu 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2441fcfb4dccSKOSAKI Motohiro 			return false;
244219800502SBob Liu 
244345c4745aSLee Schermerhorn 	switch (a->mode) {
244419770b32SMel Gorman 	case MPOL_BIND:
24451da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
24461da177e4SLinus Torvalds 	case MPOL_PREFERRED:
2447b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
2448269fbe72SBen Widawsky 		return !!nodes_equal(a->nodes, b->nodes);
24497858d7bcSFeng Tang 	case MPOL_LOCAL:
24507858d7bcSFeng Tang 		return true;
24511da177e4SLinus Torvalds 	default:
24521da177e4SLinus Torvalds 		BUG();
2453fcfb4dccSKOSAKI Motohiro 		return false;
24541da177e4SLinus Torvalds 	}
24551da177e4SLinus Torvalds }
24561da177e4SLinus Torvalds 
24571da177e4SLinus Torvalds /*
24581da177e4SLinus Torvalds  * Shared memory backing store policy support.
24591da177e4SLinus Torvalds  *
24601da177e4SLinus Torvalds  * Remember policies even when nobody has shared memory mapped.
24611da177e4SLinus Torvalds  * The policies are kept in Red-Black tree linked from the inode.
24624a8c7bb5SNathan Zimmer  * They are protected by the sp->lock rwlock, which should be held
24631da177e4SLinus Torvalds  * for any accesses to the tree.
24641da177e4SLinus Torvalds  */
24651da177e4SLinus Torvalds 
24664a8c7bb5SNathan Zimmer /*
24674a8c7bb5SNathan Zimmer  * lookup first element intersecting start-end.  Caller holds sp->lock for
24684a8c7bb5SNathan Zimmer  * reading or for writing
24694a8c7bb5SNathan Zimmer  */
24701da177e4SLinus Torvalds static struct sp_node *
24711da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
24721da177e4SLinus Torvalds {
24731da177e4SLinus Torvalds 	struct rb_node *n = sp->root.rb_node;
24741da177e4SLinus Torvalds 
24751da177e4SLinus Torvalds 	while (n) {
24761da177e4SLinus Torvalds 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
24771da177e4SLinus Torvalds 
24781da177e4SLinus Torvalds 		if (start >= p->end)
24791da177e4SLinus Torvalds 			n = n->rb_right;
24801da177e4SLinus Torvalds 		else if (end <= p->start)
24811da177e4SLinus Torvalds 			n = n->rb_left;
24821da177e4SLinus Torvalds 		else
24831da177e4SLinus Torvalds 			break;
24841da177e4SLinus Torvalds 	}
24851da177e4SLinus Torvalds 	if (!n)
24861da177e4SLinus Torvalds 		return NULL;
24871da177e4SLinus Torvalds 	for (;;) {
24881da177e4SLinus Torvalds 		struct sp_node *w = NULL;
24891da177e4SLinus Torvalds 		struct rb_node *prev = rb_prev(n);
24901da177e4SLinus Torvalds 		if (!prev)
24911da177e4SLinus Torvalds 			break;
24921da177e4SLinus Torvalds 		w = rb_entry(prev, struct sp_node, nd);
24931da177e4SLinus Torvalds 		if (w->end <= start)
24941da177e4SLinus Torvalds 			break;
24951da177e4SLinus Torvalds 		n = prev;
24961da177e4SLinus Torvalds 	}
24971da177e4SLinus Torvalds 	return rb_entry(n, struct sp_node, nd);
24981da177e4SLinus Torvalds }
24991da177e4SLinus Torvalds 
25004a8c7bb5SNathan Zimmer /*
25014a8c7bb5SNathan Zimmer  * Insert a new shared policy into the list.  Caller holds sp->lock for
25024a8c7bb5SNathan Zimmer  * writing.
25034a8c7bb5SNathan Zimmer  */
25041da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new)
25051da177e4SLinus Torvalds {
25061da177e4SLinus Torvalds 	struct rb_node **p = &sp->root.rb_node;
25071da177e4SLinus Torvalds 	struct rb_node *parent = NULL;
25081da177e4SLinus Torvalds 	struct sp_node *nd;
25091da177e4SLinus Torvalds 
25101da177e4SLinus Torvalds 	while (*p) {
25111da177e4SLinus Torvalds 		parent = *p;
25121da177e4SLinus Torvalds 		nd = rb_entry(parent, struct sp_node, nd);
25131da177e4SLinus Torvalds 		if (new->start < nd->start)
25141da177e4SLinus Torvalds 			p = &(*p)->rb_left;
25151da177e4SLinus Torvalds 		else if (new->end > nd->end)
25161da177e4SLinus Torvalds 			p = &(*p)->rb_right;
25171da177e4SLinus Torvalds 		else
25181da177e4SLinus Torvalds 			BUG();
25191da177e4SLinus Torvalds 	}
25201da177e4SLinus Torvalds 	rb_link_node(&new->nd, parent, p);
25211da177e4SLinus Torvalds 	rb_insert_color(&new->nd, &sp->root);
2522140d5a49SPaul Mundt 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
252345c4745aSLee Schermerhorn 		 new->policy ? new->policy->mode : 0);
25241da177e4SLinus Torvalds }
25251da177e4SLinus Torvalds 
25261da177e4SLinus Torvalds /* Find shared policy intersecting idx */
25271da177e4SLinus Torvalds struct mempolicy *
25281da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
25291da177e4SLinus Torvalds {
25301da177e4SLinus Torvalds 	struct mempolicy *pol = NULL;
25311da177e4SLinus Torvalds 	struct sp_node *sn;
25321da177e4SLinus Torvalds 
25331da177e4SLinus Torvalds 	if (!sp->root.rb_node)
25341da177e4SLinus Torvalds 		return NULL;
25354a8c7bb5SNathan Zimmer 	read_lock(&sp->lock);
25361da177e4SLinus Torvalds 	sn = sp_lookup(sp, idx, idx+1);
25371da177e4SLinus Torvalds 	if (sn) {
25381da177e4SLinus Torvalds 		mpol_get(sn->policy);
25391da177e4SLinus Torvalds 		pol = sn->policy;
25401da177e4SLinus Torvalds 	}
25414a8c7bb5SNathan Zimmer 	read_unlock(&sp->lock);
25421da177e4SLinus Torvalds 	return pol;
25431da177e4SLinus Torvalds }
25441da177e4SLinus Torvalds 
254563f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n)
254663f74ca2SKOSAKI Motohiro {
254763f74ca2SKOSAKI Motohiro 	mpol_put(n->policy);
254863f74ca2SKOSAKI Motohiro 	kmem_cache_free(sn_cache, n);
254963f74ca2SKOSAKI Motohiro }
255063f74ca2SKOSAKI Motohiro 
2551771fb4d8SLee Schermerhorn /**
2552771fb4d8SLee Schermerhorn  * mpol_misplaced - check whether current page node is valid in policy
2553771fb4d8SLee Schermerhorn  *
2554b46e14acSFabian Frederick  * @page: page to be checked
2555b46e14acSFabian Frederick  * @vma: vm area where page mapped
2556b46e14acSFabian Frederick  * @addr: virtual address where page mapped
2557771fb4d8SLee Schermerhorn  *
2558771fb4d8SLee Schermerhorn  * Lookup current policy node id for vma,addr and "compare to" page's
25595f076944SMatthew Wilcox (Oracle)  * node id.  Policy determination "mimics" alloc_page_vma().
2560771fb4d8SLee Schermerhorn  * Called from fault path where we know the vma and faulting address.
25615f076944SMatthew Wilcox (Oracle)  *
2562062db293SBaolin Wang  * Return: NUMA_NO_NODE if the page is in a node that is valid for this
2563062db293SBaolin Wang  * policy, or a suitable node ID to allocate a replacement page from.
2564771fb4d8SLee Schermerhorn  */
2565771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2566771fb4d8SLee Schermerhorn {
2567771fb4d8SLee Schermerhorn 	struct mempolicy *pol;
2568c33d6c06SMel Gorman 	struct zoneref *z;
2569771fb4d8SLee Schermerhorn 	int curnid = page_to_nid(page);
2570771fb4d8SLee Schermerhorn 	unsigned long pgoff;
257190572890SPeter Zijlstra 	int thiscpu = raw_smp_processor_id();
257290572890SPeter Zijlstra 	int thisnid = cpu_to_node(thiscpu);
257398fa15f3SAnshuman Khandual 	int polnid = NUMA_NO_NODE;
2574062db293SBaolin Wang 	int ret = NUMA_NO_NODE;
2575771fb4d8SLee Schermerhorn 
2576dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2577771fb4d8SLee Schermerhorn 	if (!(pol->flags & MPOL_F_MOF))
2578771fb4d8SLee Schermerhorn 		goto out;
2579771fb4d8SLee Schermerhorn 
2580771fb4d8SLee Schermerhorn 	switch (pol->mode) {
2581771fb4d8SLee Schermerhorn 	case MPOL_INTERLEAVE:
2582771fb4d8SLee Schermerhorn 		pgoff = vma->vm_pgoff;
2583771fb4d8SLee Schermerhorn 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
258498c70baaSLaurent Dufour 		polnid = offset_il_node(pol, pgoff);
2585771fb4d8SLee Schermerhorn 		break;
2586771fb4d8SLee Schermerhorn 
2587771fb4d8SLee Schermerhorn 	case MPOL_PREFERRED:
2588b27abaccSDave Hansen 		if (node_isset(curnid, pol->nodes))
2589b27abaccSDave Hansen 			goto out;
2590269fbe72SBen Widawsky 		polnid = first_node(pol->nodes);
2591771fb4d8SLee Schermerhorn 		break;
2592771fb4d8SLee Schermerhorn 
25937858d7bcSFeng Tang 	case MPOL_LOCAL:
25947858d7bcSFeng Tang 		polnid = numa_node_id();
25957858d7bcSFeng Tang 		break;
25967858d7bcSFeng Tang 
2597771fb4d8SLee Schermerhorn 	case MPOL_BIND:
2598bda420b9SHuang Ying 		/* Optimize placement among multiple nodes via NUMA balancing */
2599bda420b9SHuang Ying 		if (pol->flags & MPOL_F_MORON) {
2600269fbe72SBen Widawsky 			if (node_isset(thisnid, pol->nodes))
2601bda420b9SHuang Ying 				break;
2602bda420b9SHuang Ying 			goto out;
2603bda420b9SHuang Ying 		}
2604b27abaccSDave Hansen 		fallthrough;
2605c33d6c06SMel Gorman 
2606b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
2607771fb4d8SLee Schermerhorn 		/*
2608771fb4d8SLee Schermerhorn 		 * use current page if in policy nodemask,
2609771fb4d8SLee Schermerhorn 		 * else select nearest allowed node, if any.
2610771fb4d8SLee Schermerhorn 		 * If no allowed nodes, use current [!misplaced].
2611771fb4d8SLee Schermerhorn 		 */
2612269fbe72SBen Widawsky 		if (node_isset(curnid, pol->nodes))
2613771fb4d8SLee Schermerhorn 			goto out;
2614c33d6c06SMel Gorman 		z = first_zones_zonelist(
2615771fb4d8SLee Schermerhorn 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2616771fb4d8SLee Schermerhorn 				gfp_zone(GFP_HIGHUSER),
2617269fbe72SBen Widawsky 				&pol->nodes);
2618c1093b74SPavel Tatashin 		polnid = zone_to_nid(z->zone);
2619771fb4d8SLee Schermerhorn 		break;
2620771fb4d8SLee Schermerhorn 
2621771fb4d8SLee Schermerhorn 	default:
2622771fb4d8SLee Schermerhorn 		BUG();
2623771fb4d8SLee Schermerhorn 	}
26245606e387SMel Gorman 
26255606e387SMel Gorman 	/* Migrate the page towards the node whose CPU is referencing it */
2626e42c8ff2SMel Gorman 	if (pol->flags & MPOL_F_MORON) {
262790572890SPeter Zijlstra 		polnid = thisnid;
26285606e387SMel Gorman 
262910f39042SRik van Riel 		if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2630de1c9ce6SRik van Riel 			goto out;
2631de1c9ce6SRik van Riel 	}
2632e42c8ff2SMel Gorman 
2633771fb4d8SLee Schermerhorn 	if (curnid != polnid)
2634771fb4d8SLee Schermerhorn 		ret = polnid;
2635771fb4d8SLee Schermerhorn out:
2636771fb4d8SLee Schermerhorn 	mpol_cond_put(pol);
2637771fb4d8SLee Schermerhorn 
2638771fb4d8SLee Schermerhorn 	return ret;
2639771fb4d8SLee Schermerhorn }
2640771fb4d8SLee Schermerhorn 
2641c11600e4SDavid Rientjes /*
2642c11600e4SDavid Rientjes  * Drop the (possibly final) reference to task->mempolicy.  It needs to be
2643c11600e4SDavid Rientjes  * dropped after task->mempolicy is set to NULL so that any allocation done as
2644c11600e4SDavid Rientjes  * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2645c11600e4SDavid Rientjes  * policy.
2646c11600e4SDavid Rientjes  */
2647c11600e4SDavid Rientjes void mpol_put_task_policy(struct task_struct *task)
2648c11600e4SDavid Rientjes {
2649c11600e4SDavid Rientjes 	struct mempolicy *pol;
2650c11600e4SDavid Rientjes 
2651c11600e4SDavid Rientjes 	task_lock(task);
2652c11600e4SDavid Rientjes 	pol = task->mempolicy;
2653c11600e4SDavid Rientjes 	task->mempolicy = NULL;
2654c11600e4SDavid Rientjes 	task_unlock(task);
2655c11600e4SDavid Rientjes 	mpol_put(pol);
2656c11600e4SDavid Rientjes }
2657c11600e4SDavid Rientjes 
26581da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n)
26591da177e4SLinus Torvalds {
2660140d5a49SPaul Mundt 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
26611da177e4SLinus Torvalds 	rb_erase(&n->nd, &sp->root);
266263f74ca2SKOSAKI Motohiro 	sp_free(n);
26631da177e4SLinus Torvalds }
26641da177e4SLinus Torvalds 
266542288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start,
266642288fe3SMel Gorman 			unsigned long end, struct mempolicy *pol)
266742288fe3SMel Gorman {
266842288fe3SMel Gorman 	node->start = start;
266942288fe3SMel Gorman 	node->end = end;
267042288fe3SMel Gorman 	node->policy = pol;
267142288fe3SMel Gorman }
267242288fe3SMel Gorman 
2673dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2674dbcb0f19SAdrian Bunk 				struct mempolicy *pol)
26751da177e4SLinus Torvalds {
2676869833f2SKOSAKI Motohiro 	struct sp_node *n;
2677869833f2SKOSAKI Motohiro 	struct mempolicy *newpol;
26781da177e4SLinus Torvalds 
2679869833f2SKOSAKI Motohiro 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
26801da177e4SLinus Torvalds 	if (!n)
26811da177e4SLinus Torvalds 		return NULL;
2682869833f2SKOSAKI Motohiro 
2683869833f2SKOSAKI Motohiro 	newpol = mpol_dup(pol);
2684869833f2SKOSAKI Motohiro 	if (IS_ERR(newpol)) {
2685869833f2SKOSAKI Motohiro 		kmem_cache_free(sn_cache, n);
2686869833f2SKOSAKI Motohiro 		return NULL;
2687869833f2SKOSAKI Motohiro 	}
2688869833f2SKOSAKI Motohiro 	newpol->flags |= MPOL_F_SHARED;
268942288fe3SMel Gorman 	sp_node_init(n, start, end, newpol);
2690869833f2SKOSAKI Motohiro 
26911da177e4SLinus Torvalds 	return n;
26921da177e4SLinus Torvalds }
26931da177e4SLinus Torvalds 
26941da177e4SLinus Torvalds /* Replace a policy range. */
26951da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
26961da177e4SLinus Torvalds 				 unsigned long end, struct sp_node *new)
26971da177e4SLinus Torvalds {
2698b22d127aSMel Gorman 	struct sp_node *n;
269942288fe3SMel Gorman 	struct sp_node *n_new = NULL;
270042288fe3SMel Gorman 	struct mempolicy *mpol_new = NULL;
2701b22d127aSMel Gorman 	int ret = 0;
27021da177e4SLinus Torvalds 
270342288fe3SMel Gorman restart:
27044a8c7bb5SNathan Zimmer 	write_lock(&sp->lock);
27051da177e4SLinus Torvalds 	n = sp_lookup(sp, start, end);
27061da177e4SLinus Torvalds 	/* Take care of old policies in the same range. */
27071da177e4SLinus Torvalds 	while (n && n->start < end) {
27081da177e4SLinus Torvalds 		struct rb_node *next = rb_next(&n->nd);
27091da177e4SLinus Torvalds 		if (n->start >= start) {
27101da177e4SLinus Torvalds 			if (n->end <= end)
27111da177e4SLinus Torvalds 				sp_delete(sp, n);
27121da177e4SLinus Torvalds 			else
27131da177e4SLinus Torvalds 				n->start = end;
27141da177e4SLinus Torvalds 		} else {
27151da177e4SLinus Torvalds 			/* Old policy spanning whole new range. */
27161da177e4SLinus Torvalds 			if (n->end > end) {
271742288fe3SMel Gorman 				if (!n_new)
271842288fe3SMel Gorman 					goto alloc_new;
271942288fe3SMel Gorman 
272042288fe3SMel Gorman 				*mpol_new = *n->policy;
272142288fe3SMel Gorman 				atomic_set(&mpol_new->refcnt, 1);
27227880639cSKOSAKI Motohiro 				sp_node_init(n_new, end, n->end, mpol_new);
27231da177e4SLinus Torvalds 				n->end = start;
27245ca39575SHillf Danton 				sp_insert(sp, n_new);
272542288fe3SMel Gorman 				n_new = NULL;
272642288fe3SMel Gorman 				mpol_new = NULL;
27271da177e4SLinus Torvalds 				break;
27281da177e4SLinus Torvalds 			} else
27291da177e4SLinus Torvalds 				n->end = start;
27301da177e4SLinus Torvalds 		}
27311da177e4SLinus Torvalds 		if (!next)
27321da177e4SLinus Torvalds 			break;
27331da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
27341da177e4SLinus Torvalds 	}
27351da177e4SLinus Torvalds 	if (new)
27361da177e4SLinus Torvalds 		sp_insert(sp, new);
27374a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
273842288fe3SMel Gorman 	ret = 0;
273942288fe3SMel Gorman 
274042288fe3SMel Gorman err_out:
274142288fe3SMel Gorman 	if (mpol_new)
274242288fe3SMel Gorman 		mpol_put(mpol_new);
274342288fe3SMel Gorman 	if (n_new)
274442288fe3SMel Gorman 		kmem_cache_free(sn_cache, n_new);
274542288fe3SMel Gorman 
2746b22d127aSMel Gorman 	return ret;
274742288fe3SMel Gorman 
274842288fe3SMel Gorman alloc_new:
27494a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
275042288fe3SMel Gorman 	ret = -ENOMEM;
275142288fe3SMel Gorman 	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
275242288fe3SMel Gorman 	if (!n_new)
275342288fe3SMel Gorman 		goto err_out;
275442288fe3SMel Gorman 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
275542288fe3SMel Gorman 	if (!mpol_new)
275642288fe3SMel Gorman 		goto err_out;
27574ad09955SMiaohe Lin 	atomic_set(&mpol_new->refcnt, 1);
275842288fe3SMel Gorman 	goto restart;
27591da177e4SLinus Torvalds }
27601da177e4SLinus Torvalds 
276171fe804bSLee Schermerhorn /**
276271fe804bSLee Schermerhorn  * mpol_shared_policy_init - initialize shared policy for inode
276371fe804bSLee Schermerhorn  * @sp: pointer to inode shared policy
276471fe804bSLee Schermerhorn  * @mpol:  struct mempolicy to install
276571fe804bSLee Schermerhorn  *
276671fe804bSLee Schermerhorn  * Install non-NULL @mpol in inode's shared policy rb-tree.
276771fe804bSLee Schermerhorn  * On entry, the current task has a reference on a non-NULL @mpol.
276871fe804bSLee Schermerhorn  * This must be released on exit.
27694bfc4495SKAMEZAWA Hiroyuki  * This is called at get_inode() calls and we can use GFP_KERNEL.
277071fe804bSLee Schermerhorn  */
277171fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
27727339ff83SRobin Holt {
277358568d2aSMiao Xie 	int ret;
277458568d2aSMiao Xie 
277571fe804bSLee Schermerhorn 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
27764a8c7bb5SNathan Zimmer 	rwlock_init(&sp->lock);
27777339ff83SRobin Holt 
277871fe804bSLee Schermerhorn 	if (mpol) {
27797339ff83SRobin Holt 		struct vm_area_struct pvma;
278071fe804bSLee Schermerhorn 		struct mempolicy *new;
27814bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
27827339ff83SRobin Holt 
27834bfc4495SKAMEZAWA Hiroyuki 		if (!scratch)
27845c0c1654SLee Schermerhorn 			goto put_mpol;
278571fe804bSLee Schermerhorn 		/* contextualize the tmpfs mount point mempolicy */
278671fe804bSLee Schermerhorn 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
278715d77835SLee Schermerhorn 		if (IS_ERR(new))
27880cae3457SDan Carpenter 			goto free_scratch; /* no valid nodemask intersection */
278958568d2aSMiao Xie 
279058568d2aSMiao Xie 		task_lock(current);
27914bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
279258568d2aSMiao Xie 		task_unlock(current);
279315d77835SLee Schermerhorn 		if (ret)
27945c0c1654SLee Schermerhorn 			goto put_new;
279571fe804bSLee Schermerhorn 
279671fe804bSLee Schermerhorn 		/* Create pseudo-vma that contains just the policy */
27972c4541e2SKirill A. Shutemov 		vma_init(&pvma, NULL);
279871fe804bSLee Schermerhorn 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
279971fe804bSLee Schermerhorn 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
280015d77835SLee Schermerhorn 
28015c0c1654SLee Schermerhorn put_new:
280271fe804bSLee Schermerhorn 		mpol_put(new);			/* drop initial ref */
28030cae3457SDan Carpenter free_scratch:
28044bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
28055c0c1654SLee Schermerhorn put_mpol:
28065c0c1654SLee Schermerhorn 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
28077339ff83SRobin Holt 	}
28087339ff83SRobin Holt }
28097339ff83SRobin Holt 
28101da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info,
28111da177e4SLinus Torvalds 			struct vm_area_struct *vma, struct mempolicy *npol)
28121da177e4SLinus Torvalds {
28131da177e4SLinus Torvalds 	int err;
28141da177e4SLinus Torvalds 	struct sp_node *new = NULL;
28151da177e4SLinus Torvalds 	unsigned long sz = vma_pages(vma);
28161da177e4SLinus Torvalds 
2817028fec41SDavid Rientjes 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
28181da177e4SLinus Torvalds 		 vma->vm_pgoff,
281945c4745aSLee Schermerhorn 		 sz, npol ? npol->mode : -1,
2820028fec41SDavid Rientjes 		 npol ? npol->flags : -1,
2821269fbe72SBen Widawsky 		 npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE);
28221da177e4SLinus Torvalds 
28231da177e4SLinus Torvalds 	if (npol) {
28241da177e4SLinus Torvalds 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
28251da177e4SLinus Torvalds 		if (!new)
28261da177e4SLinus Torvalds 			return -ENOMEM;
28271da177e4SLinus Torvalds 	}
28281da177e4SLinus Torvalds 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
28291da177e4SLinus Torvalds 	if (err && new)
283063f74ca2SKOSAKI Motohiro 		sp_free(new);
28311da177e4SLinus Torvalds 	return err;
28321da177e4SLinus Torvalds }
28331da177e4SLinus Torvalds 
28341da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */
28351da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p)
28361da177e4SLinus Torvalds {
28371da177e4SLinus Torvalds 	struct sp_node *n;
28381da177e4SLinus Torvalds 	struct rb_node *next;
28391da177e4SLinus Torvalds 
28401da177e4SLinus Torvalds 	if (!p->root.rb_node)
28411da177e4SLinus Torvalds 		return;
28424a8c7bb5SNathan Zimmer 	write_lock(&p->lock);
28431da177e4SLinus Torvalds 	next = rb_first(&p->root);
28441da177e4SLinus Torvalds 	while (next) {
28451da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
28461da177e4SLinus Torvalds 		next = rb_next(&n->nd);
284763f74ca2SKOSAKI Motohiro 		sp_delete(p, n);
28481da177e4SLinus Torvalds 	}
28494a8c7bb5SNathan Zimmer 	write_unlock(&p->lock);
28501da177e4SLinus Torvalds }
28511da177e4SLinus Torvalds 
28521a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING
2853c297663cSMel Gorman static int __initdata numabalancing_override;
28541a687c2eSMel Gorman 
28551a687c2eSMel Gorman static void __init check_numabalancing_enable(void)
28561a687c2eSMel Gorman {
28571a687c2eSMel Gorman 	bool numabalancing_default = false;
28581a687c2eSMel Gorman 
28591a687c2eSMel Gorman 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
28601a687c2eSMel Gorman 		numabalancing_default = true;
28611a687c2eSMel Gorman 
2862c297663cSMel Gorman 	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2863c297663cSMel Gorman 	if (numabalancing_override)
2864c297663cSMel Gorman 		set_numabalancing_state(numabalancing_override == 1);
2865c297663cSMel Gorman 
2866b0dc2b9bSMel Gorman 	if (num_online_nodes() > 1 && !numabalancing_override) {
2867756a025fSJoe Perches 		pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2868c297663cSMel Gorman 			numabalancing_default ? "Enabling" : "Disabling");
28691a687c2eSMel Gorman 		set_numabalancing_state(numabalancing_default);
28701a687c2eSMel Gorman 	}
28711a687c2eSMel Gorman }
28721a687c2eSMel Gorman 
28731a687c2eSMel Gorman static int __init setup_numabalancing(char *str)
28741a687c2eSMel Gorman {
28751a687c2eSMel Gorman 	int ret = 0;
28761a687c2eSMel Gorman 	if (!str)
28771a687c2eSMel Gorman 		goto out;
28781a687c2eSMel Gorman 
28791a687c2eSMel Gorman 	if (!strcmp(str, "enable")) {
2880c297663cSMel Gorman 		numabalancing_override = 1;
28811a687c2eSMel Gorman 		ret = 1;
28821a687c2eSMel Gorman 	} else if (!strcmp(str, "disable")) {
2883c297663cSMel Gorman 		numabalancing_override = -1;
28841a687c2eSMel Gorman 		ret = 1;
28851a687c2eSMel Gorman 	}
28861a687c2eSMel Gorman out:
28871a687c2eSMel Gorman 	if (!ret)
28884a404beaSAndrew Morton 		pr_warn("Unable to parse numa_balancing=\n");
28891a687c2eSMel Gorman 
28901a687c2eSMel Gorman 	return ret;
28911a687c2eSMel Gorman }
28921a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing);
28931a687c2eSMel Gorman #else
28941a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void)
28951a687c2eSMel Gorman {
28961a687c2eSMel Gorman }
28971a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */
28981a687c2eSMel Gorman 
28991da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */
29001da177e4SLinus Torvalds void __init numa_policy_init(void)
29011da177e4SLinus Torvalds {
2902b71636e2SPaul Mundt 	nodemask_t interleave_nodes;
2903b71636e2SPaul Mundt 	unsigned long largest = 0;
2904b71636e2SPaul Mundt 	int nid, prefer = 0;
2905b71636e2SPaul Mundt 
29061da177e4SLinus Torvalds 	policy_cache = kmem_cache_create("numa_policy",
29071da177e4SLinus Torvalds 					 sizeof(struct mempolicy),
290820c2df83SPaul Mundt 					 0, SLAB_PANIC, NULL);
29091da177e4SLinus Torvalds 
29101da177e4SLinus Torvalds 	sn_cache = kmem_cache_create("shared_policy_node",
29111da177e4SLinus Torvalds 				     sizeof(struct sp_node),
291220c2df83SPaul Mundt 				     0, SLAB_PANIC, NULL);
29131da177e4SLinus Torvalds 
29145606e387SMel Gorman 	for_each_node(nid) {
29155606e387SMel Gorman 		preferred_node_policy[nid] = (struct mempolicy) {
29165606e387SMel Gorman 			.refcnt = ATOMIC_INIT(1),
29175606e387SMel Gorman 			.mode = MPOL_PREFERRED,
29185606e387SMel Gorman 			.flags = MPOL_F_MOF | MPOL_F_MORON,
2919269fbe72SBen Widawsky 			.nodes = nodemask_of_node(nid),
29205606e387SMel Gorman 		};
29215606e387SMel Gorman 	}
29225606e387SMel Gorman 
2923b71636e2SPaul Mundt 	/*
2924b71636e2SPaul Mundt 	 * Set interleaving policy for system init. Interleaving is only
2925b71636e2SPaul Mundt 	 * enabled across suitably sized nodes (default is >= 16MB), or
2926b71636e2SPaul Mundt 	 * fall back to the largest node if they're all smaller.
2927b71636e2SPaul Mundt 	 */
2928b71636e2SPaul Mundt 	nodes_clear(interleave_nodes);
292901f13bd6SLai Jiangshan 	for_each_node_state(nid, N_MEMORY) {
2930b71636e2SPaul Mundt 		unsigned long total_pages = node_present_pages(nid);
29311da177e4SLinus Torvalds 
2932b71636e2SPaul Mundt 		/* Preserve the largest node */
2933b71636e2SPaul Mundt 		if (largest < total_pages) {
2934b71636e2SPaul Mundt 			largest = total_pages;
2935b71636e2SPaul Mundt 			prefer = nid;
2936b71636e2SPaul Mundt 		}
2937b71636e2SPaul Mundt 
2938b71636e2SPaul Mundt 		/* Interleave this node? */
2939b71636e2SPaul Mundt 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2940b71636e2SPaul Mundt 			node_set(nid, interleave_nodes);
2941b71636e2SPaul Mundt 	}
2942b71636e2SPaul Mundt 
2943b71636e2SPaul Mundt 	/* All too small, use the largest */
2944b71636e2SPaul Mundt 	if (unlikely(nodes_empty(interleave_nodes)))
2945b71636e2SPaul Mundt 		node_set(prefer, interleave_nodes);
2946b71636e2SPaul Mundt 
2947028fec41SDavid Rientjes 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2948b1de0d13SMitchel Humpherys 		pr_err("%s: interleaving failed\n", __func__);
29491a687c2eSMel Gorman 
29501a687c2eSMel Gorman 	check_numabalancing_enable();
29511da177e4SLinus Torvalds }
29521da177e4SLinus Torvalds 
29538bccd85fSChristoph Lameter /* Reset policy of current process to default */
29541da177e4SLinus Torvalds void numa_default_policy(void)
29551da177e4SLinus Torvalds {
2956028fec41SDavid Rientjes 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
29571da177e4SLinus Torvalds }
295868860ec1SPaul Jackson 
29594225399aSPaul Jackson /*
2960095f1fc4SLee Schermerhorn  * Parse and format mempolicy from/to strings
2961095f1fc4SLee Schermerhorn  */
2962095f1fc4SLee Schermerhorn 
2963345ace9cSLee Schermerhorn static const char * const policy_modes[] =
2964345ace9cSLee Schermerhorn {
2965345ace9cSLee Schermerhorn 	[MPOL_DEFAULT]    = "default",
2966345ace9cSLee Schermerhorn 	[MPOL_PREFERRED]  = "prefer",
2967345ace9cSLee Schermerhorn 	[MPOL_BIND]       = "bind",
2968345ace9cSLee Schermerhorn 	[MPOL_INTERLEAVE] = "interleave",
2969d3a71033SLee Schermerhorn 	[MPOL_LOCAL]      = "local",
2970b27abaccSDave Hansen 	[MPOL_PREFERRED_MANY]  = "prefer (many)",
2971345ace9cSLee Schermerhorn };
29721a75a6c8SChristoph Lameter 
2973095f1fc4SLee Schermerhorn 
2974095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS
2975095f1fc4SLee Schermerhorn /**
2976f2a07f40SHugh Dickins  * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2977095f1fc4SLee Schermerhorn  * @str:  string containing mempolicy to parse
297871fe804bSLee Schermerhorn  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2979095f1fc4SLee Schermerhorn  *
2980095f1fc4SLee Schermerhorn  * Format of input:
2981095f1fc4SLee Schermerhorn  *	<mode>[=<flags>][:<nodelist>]
2982095f1fc4SLee Schermerhorn  *
2983dad5b023SRandy Dunlap  * Return: %0 on success, else %1
2984095f1fc4SLee Schermerhorn  */
2985a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol)
2986095f1fc4SLee Schermerhorn {
298771fe804bSLee Schermerhorn 	struct mempolicy *new = NULL;
2988f2a07f40SHugh Dickins 	unsigned short mode_flags;
298971fe804bSLee Schermerhorn 	nodemask_t nodes;
2990095f1fc4SLee Schermerhorn 	char *nodelist = strchr(str, ':');
2991095f1fc4SLee Schermerhorn 	char *flags = strchr(str, '=');
2992dedf2c73Szhong jiang 	int err = 1, mode;
2993095f1fc4SLee Schermerhorn 
2994c7a91bc7SDan Carpenter 	if (flags)
2995c7a91bc7SDan Carpenter 		*flags++ = '\0';	/* terminate mode string */
2996c7a91bc7SDan Carpenter 
2997095f1fc4SLee Schermerhorn 	if (nodelist) {
2998095f1fc4SLee Schermerhorn 		/* NUL-terminate mode or flags string */
2999095f1fc4SLee Schermerhorn 		*nodelist++ = '\0';
300071fe804bSLee Schermerhorn 		if (nodelist_parse(nodelist, nodes))
3001095f1fc4SLee Schermerhorn 			goto out;
300201f13bd6SLai Jiangshan 		if (!nodes_subset(nodes, node_states[N_MEMORY]))
3003095f1fc4SLee Schermerhorn 			goto out;
300471fe804bSLee Schermerhorn 	} else
300571fe804bSLee Schermerhorn 		nodes_clear(nodes);
300671fe804bSLee Schermerhorn 
3007dedf2c73Szhong jiang 	mode = match_string(policy_modes, MPOL_MAX, str);
3008dedf2c73Szhong jiang 	if (mode < 0)
3009095f1fc4SLee Schermerhorn 		goto out;
3010095f1fc4SLee Schermerhorn 
301171fe804bSLee Schermerhorn 	switch (mode) {
3012095f1fc4SLee Schermerhorn 	case MPOL_PREFERRED:
301371fe804bSLee Schermerhorn 		/*
3014aa9f7d51SRandy Dunlap 		 * Insist on a nodelist of one node only, although later
3015aa9f7d51SRandy Dunlap 		 * we use first_node(nodes) to grab a single node, so here
3016aa9f7d51SRandy Dunlap 		 * nodelist (or nodes) cannot be empty.
301771fe804bSLee Schermerhorn 		 */
3018095f1fc4SLee Schermerhorn 		if (nodelist) {
3019095f1fc4SLee Schermerhorn 			char *rest = nodelist;
3020095f1fc4SLee Schermerhorn 			while (isdigit(*rest))
3021095f1fc4SLee Schermerhorn 				rest++;
3022926f2ae0SKOSAKI Motohiro 			if (*rest)
3023926f2ae0SKOSAKI Motohiro 				goto out;
3024aa9f7d51SRandy Dunlap 			if (nodes_empty(nodes))
3025aa9f7d51SRandy Dunlap 				goto out;
3026095f1fc4SLee Schermerhorn 		}
3027095f1fc4SLee Schermerhorn 		break;
3028095f1fc4SLee Schermerhorn 	case MPOL_INTERLEAVE:
3029095f1fc4SLee Schermerhorn 		/*
3030095f1fc4SLee Schermerhorn 		 * Default to online nodes with memory if no nodelist
3031095f1fc4SLee Schermerhorn 		 */
3032095f1fc4SLee Schermerhorn 		if (!nodelist)
303301f13bd6SLai Jiangshan 			nodes = node_states[N_MEMORY];
30343f226aa1SLee Schermerhorn 		break;
303571fe804bSLee Schermerhorn 	case MPOL_LOCAL:
30363f226aa1SLee Schermerhorn 		/*
303771fe804bSLee Schermerhorn 		 * Don't allow a nodelist;  mpol_new() checks flags
30383f226aa1SLee Schermerhorn 		 */
303971fe804bSLee Schermerhorn 		if (nodelist)
30403f226aa1SLee Schermerhorn 			goto out;
30413f226aa1SLee Schermerhorn 		break;
3042413b43deSRavikiran G Thirumalai 	case MPOL_DEFAULT:
3043413b43deSRavikiran G Thirumalai 		/*
3044413b43deSRavikiran G Thirumalai 		 * Insist on a empty nodelist
3045413b43deSRavikiran G Thirumalai 		 */
3046413b43deSRavikiran G Thirumalai 		if (!nodelist)
3047413b43deSRavikiran G Thirumalai 			err = 0;
3048413b43deSRavikiran G Thirumalai 		goto out;
3049b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
3050d69b2e63SKOSAKI Motohiro 	case MPOL_BIND:
305171fe804bSLee Schermerhorn 		/*
3052d69b2e63SKOSAKI Motohiro 		 * Insist on a nodelist
305371fe804bSLee Schermerhorn 		 */
3054d69b2e63SKOSAKI Motohiro 		if (!nodelist)
3055d69b2e63SKOSAKI Motohiro 			goto out;
3056095f1fc4SLee Schermerhorn 	}
3057095f1fc4SLee Schermerhorn 
305871fe804bSLee Schermerhorn 	mode_flags = 0;
3059095f1fc4SLee Schermerhorn 	if (flags) {
3060095f1fc4SLee Schermerhorn 		/*
3061095f1fc4SLee Schermerhorn 		 * Currently, we only support two mutually exclusive
3062095f1fc4SLee Schermerhorn 		 * mode flags.
3063095f1fc4SLee Schermerhorn 		 */
3064095f1fc4SLee Schermerhorn 		if (!strcmp(flags, "static"))
306571fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_STATIC_NODES;
3066095f1fc4SLee Schermerhorn 		else if (!strcmp(flags, "relative"))
306771fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_RELATIVE_NODES;
3068095f1fc4SLee Schermerhorn 		else
3069926f2ae0SKOSAKI Motohiro 			goto out;
3070095f1fc4SLee Schermerhorn 	}
307171fe804bSLee Schermerhorn 
307271fe804bSLee Schermerhorn 	new = mpol_new(mode, mode_flags, &nodes);
307371fe804bSLee Schermerhorn 	if (IS_ERR(new))
3074926f2ae0SKOSAKI Motohiro 		goto out;
3075926f2ae0SKOSAKI Motohiro 
3076f2a07f40SHugh Dickins 	/*
3077f2a07f40SHugh Dickins 	 * Save nodes for mpol_to_str() to show the tmpfs mount options
3078f2a07f40SHugh Dickins 	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
3079f2a07f40SHugh Dickins 	 */
3080269fbe72SBen Widawsky 	if (mode != MPOL_PREFERRED) {
3081269fbe72SBen Widawsky 		new->nodes = nodes;
3082269fbe72SBen Widawsky 	} else if (nodelist) {
3083269fbe72SBen Widawsky 		nodes_clear(new->nodes);
3084269fbe72SBen Widawsky 		node_set(first_node(nodes), new->nodes);
3085269fbe72SBen Widawsky 	} else {
30867858d7bcSFeng Tang 		new->mode = MPOL_LOCAL;
3087269fbe72SBen Widawsky 	}
3088f2a07f40SHugh Dickins 
3089f2a07f40SHugh Dickins 	/*
3090f2a07f40SHugh Dickins 	 * Save nodes for contextualization: this will be used to "clone"
3091f2a07f40SHugh Dickins 	 * the mempolicy in a specific context [cpuset] at a later time.
3092f2a07f40SHugh Dickins 	 */
3093e17f74afSLee Schermerhorn 	new->w.user_nodemask = nodes;
3094f2a07f40SHugh Dickins 
3095926f2ae0SKOSAKI Motohiro 	err = 0;
309671fe804bSLee Schermerhorn 
3097095f1fc4SLee Schermerhorn out:
3098095f1fc4SLee Schermerhorn 	/* Restore string for error message */
3099095f1fc4SLee Schermerhorn 	if (nodelist)
3100095f1fc4SLee Schermerhorn 		*--nodelist = ':';
3101095f1fc4SLee Schermerhorn 	if (flags)
3102095f1fc4SLee Schermerhorn 		*--flags = '=';
310371fe804bSLee Schermerhorn 	if (!err)
310471fe804bSLee Schermerhorn 		*mpol = new;
3105095f1fc4SLee Schermerhorn 	return err;
3106095f1fc4SLee Schermerhorn }
3107095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */
3108095f1fc4SLee Schermerhorn 
310971fe804bSLee Schermerhorn /**
311071fe804bSLee Schermerhorn  * mpol_to_str - format a mempolicy structure for printing
311171fe804bSLee Schermerhorn  * @buffer:  to contain formatted mempolicy string
311271fe804bSLee Schermerhorn  * @maxlen:  length of @buffer
311371fe804bSLee Schermerhorn  * @pol:  pointer to mempolicy to be formatted
311471fe804bSLee Schermerhorn  *
3115948927eeSDavid Rientjes  * Convert @pol into a string.  If @buffer is too short, truncate the string.
3116948927eeSDavid Rientjes  * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
3117948927eeSDavid Rientjes  * longest flag, "relative", and to display at least a few node ids.
31181a75a6c8SChristoph Lameter  */
3119948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
31201a75a6c8SChristoph Lameter {
31211a75a6c8SChristoph Lameter 	char *p = buffer;
3122948927eeSDavid Rientjes 	nodemask_t nodes = NODE_MASK_NONE;
3123948927eeSDavid Rientjes 	unsigned short mode = MPOL_DEFAULT;
3124948927eeSDavid Rientjes 	unsigned short flags = 0;
31251a75a6c8SChristoph Lameter 
31268790c71aSDavid Rientjes 	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
3127bea904d5SLee Schermerhorn 		mode = pol->mode;
3128948927eeSDavid Rientjes 		flags = pol->flags;
3129948927eeSDavid Rientjes 	}
3130bea904d5SLee Schermerhorn 
31311a75a6c8SChristoph Lameter 	switch (mode) {
31321a75a6c8SChristoph Lameter 	case MPOL_DEFAULT:
31337858d7bcSFeng Tang 	case MPOL_LOCAL:
31341a75a6c8SChristoph Lameter 		break;
31351a75a6c8SChristoph Lameter 	case MPOL_PREFERRED:
3136b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
31371a75a6c8SChristoph Lameter 	case MPOL_BIND:
31381a75a6c8SChristoph Lameter 	case MPOL_INTERLEAVE:
3139269fbe72SBen Widawsky 		nodes = pol->nodes;
31401a75a6c8SChristoph Lameter 		break;
31411a75a6c8SChristoph Lameter 	default:
3142948927eeSDavid Rientjes 		WARN_ON_ONCE(1);
3143948927eeSDavid Rientjes 		snprintf(p, maxlen, "unknown");
3144948927eeSDavid Rientjes 		return;
31451a75a6c8SChristoph Lameter 	}
31461a75a6c8SChristoph Lameter 
3147b7a9f420SDavid Rientjes 	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
31481a75a6c8SChristoph Lameter 
3149fc36b8d3SLee Schermerhorn 	if (flags & MPOL_MODE_FLAGS) {
3150948927eeSDavid Rientjes 		p += snprintf(p, buffer + maxlen - p, "=");
3151f5b087b5SDavid Rientjes 
31522291990aSLee Schermerhorn 		/*
31532291990aSLee Schermerhorn 		 * Currently, the only defined flags are mutually exclusive
31542291990aSLee Schermerhorn 		 */
3155f5b087b5SDavid Rientjes 		if (flags & MPOL_F_STATIC_NODES)
31562291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "static");
31572291990aSLee Schermerhorn 		else if (flags & MPOL_F_RELATIVE_NODES)
31582291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "relative");
3159f5b087b5SDavid Rientjes 	}
3160f5b087b5SDavid Rientjes 
31619e763e0fSTejun Heo 	if (!nodes_empty(nodes))
31629e763e0fSTejun Heo 		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
31639e763e0fSTejun Heo 			       nodemask_pr_args(&nodes));
31641a75a6c8SChristoph Lameter }
3165