xref: /openbmc/linux/mm/mempolicy.c (revision 6c21e066f9256ea1df6f88768f6ae1080b7cf509)
146aeb7e6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * Simple NUMA memory policy for the Linux kernel.
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
68bccd85fSChristoph Lameter  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * NUMA policy allows the user to give hints in which node(s) memory should
91da177e4SLinus Torvalds  * be allocated.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Support four policies per VMA and per process:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * The VMA policy has priority over the process policy for a page fault.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * interleave     Allocate memory interleaved over a set of nodes,
161da177e4SLinus Torvalds  *                with normal fallback if it fails.
171da177e4SLinus Torvalds  *                For VMA based allocations this interleaves based on the
181da177e4SLinus Torvalds  *                offset into the backing object or offset into the mapping
191da177e4SLinus Torvalds  *                for anonymous memory. For process policy an process counter
201da177e4SLinus Torvalds  *                is used.
218bccd85fSChristoph Lameter  *
221da177e4SLinus Torvalds  * bind           Only allocate memory on a specific set of nodes,
231da177e4SLinus Torvalds  *                no fallback.
248bccd85fSChristoph Lameter  *                FIXME: memory is allocated starting with the first node
258bccd85fSChristoph Lameter  *                to the last. It would be better if bind would truly restrict
268bccd85fSChristoph Lameter  *                the allocation to memory nodes instead
278bccd85fSChristoph Lameter  *
281da177e4SLinus Torvalds  * preferred       Try a specific node first before normal fallback.
2900ef2d2fSDavid Rientjes  *                As a special case NUMA_NO_NODE here means do the allocation
301da177e4SLinus Torvalds  *                on the local CPU. This is normally identical to default,
311da177e4SLinus Torvalds  *                but useful to set in a VMA when you have a non default
321da177e4SLinus Torvalds  *                process policy.
338bccd85fSChristoph Lameter  *
34b27abaccSDave Hansen  * preferred many Try a set of nodes first before normal fallback. This is
35b27abaccSDave Hansen  *                similar to preferred without the special case.
36b27abaccSDave Hansen  *
371da177e4SLinus Torvalds  * default        Allocate on the local node first, or when on a VMA
381da177e4SLinus Torvalds  *                use the process policy. This is what Linux always did
391da177e4SLinus Torvalds  *		  in a NUMA aware kernel and still does by, ahem, default.
401da177e4SLinus Torvalds  *
411da177e4SLinus Torvalds  * The process policy is applied for most non interrupt memory allocations
421da177e4SLinus Torvalds  * in that process' context. Interrupts ignore the policies and always
431da177e4SLinus Torvalds  * try to allocate on the local CPU. The VMA policy is only applied for memory
441da177e4SLinus Torvalds  * allocations for a VMA in the VM.
451da177e4SLinus Torvalds  *
461da177e4SLinus Torvalds  * Currently there are a few corner cases in swapping where the policy
471da177e4SLinus Torvalds  * is not applied, but the majority should be handled. When process policy
481da177e4SLinus Torvalds  * is used it is not remembered over swap outs/swap ins.
491da177e4SLinus Torvalds  *
501da177e4SLinus Torvalds  * Only the highest zone in the zone hierarchy gets policied. Allocations
511da177e4SLinus Torvalds  * requesting a lower zone just use default policy. This implies that
521da177e4SLinus Torvalds  * on systems with highmem kernel lowmem allocation don't get policied.
531da177e4SLinus Torvalds  * Same with GFP_DMA allocations.
541da177e4SLinus Torvalds  *
551da177e4SLinus Torvalds  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
561da177e4SLinus Torvalds  * all users and remembered even when nobody has memory mapped.
571da177e4SLinus Torvalds  */
581da177e4SLinus Torvalds 
591da177e4SLinus Torvalds /* Notebook:
601da177e4SLinus Torvalds    fix mmap readahead to honour policy and enable policy for any page cache
611da177e4SLinus Torvalds    object
621da177e4SLinus Torvalds    statistics for bigpages
631da177e4SLinus Torvalds    global policy for page cache? currently it uses process policy. Requires
641da177e4SLinus Torvalds    first item above.
651da177e4SLinus Torvalds    handle mremap for shared memory (currently ignored for the policy)
661da177e4SLinus Torvalds    grows down?
671da177e4SLinus Torvalds    make bind policy root only? It can trigger oom much faster and the
681da177e4SLinus Torvalds    kernel is not always grateful with that.
691da177e4SLinus Torvalds */
701da177e4SLinus Torvalds 
71b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
72b1de0d13SMitchel Humpherys 
731da177e4SLinus Torvalds #include <linux/mempolicy.h>
74a520110eSChristoph Hellwig #include <linux/pagewalk.h>
751da177e4SLinus Torvalds #include <linux/highmem.h>
761da177e4SLinus Torvalds #include <linux/hugetlb.h>
771da177e4SLinus Torvalds #include <linux/kernel.h>
781da177e4SLinus Torvalds #include <linux/sched.h>
796e84f315SIngo Molnar #include <linux/sched/mm.h>
806a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h>
81f719ff9bSIngo Molnar #include <linux/sched/task.h>
821da177e4SLinus Torvalds #include <linux/nodemask.h>
831da177e4SLinus Torvalds #include <linux/cpuset.h>
841da177e4SLinus Torvalds #include <linux/slab.h>
851da177e4SLinus Torvalds #include <linux/string.h>
86b95f1b31SPaul Gortmaker #include <linux/export.h>
87b488893aSPavel Emelyanov #include <linux/nsproxy.h>
881da177e4SLinus Torvalds #include <linux/interrupt.h>
891da177e4SLinus Torvalds #include <linux/init.h>
901da177e4SLinus Torvalds #include <linux/compat.h>
9131367466SOtto Ebeling #include <linux/ptrace.h>
92dc9aa5b9SChristoph Lameter #include <linux/swap.h>
931a75a6c8SChristoph Lameter #include <linux/seq_file.h>
941a75a6c8SChristoph Lameter #include <linux/proc_fs.h>
95b20a3503SChristoph Lameter #include <linux/migrate.h>
9662b61f61SHugh Dickins #include <linux/ksm.h>
9795a402c3SChristoph Lameter #include <linux/rmap.h>
9886c3a764SDavid Quigley #include <linux/security.h>
99dbcb0f19SAdrian Bunk #include <linux/syscalls.h>
100095f1fc4SLee Schermerhorn #include <linux/ctype.h>
1016d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h>
102b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h>
103b1de0d13SMitchel Humpherys #include <linux/printk.h>
104c8633798SNaoya Horiguchi #include <linux/swapops.h>
105dc9aa5b9SChristoph Lameter 
1061da177e4SLinus Torvalds #include <asm/tlbflush.h>
1074a18419fSNadav Amit #include <asm/tlb.h>
1087c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
1091da177e4SLinus Torvalds 
11062695a84SNick Piggin #include "internal.h"
11162695a84SNick Piggin 
11238e35860SChristoph Lameter /* Internal flags */
113dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
11438e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
115dc9aa5b9SChristoph Lameter 
116fcc234f8SPekka Enberg static struct kmem_cache *policy_cache;
117fcc234f8SPekka Enberg static struct kmem_cache *sn_cache;
1181da177e4SLinus Torvalds 
1191da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not
1201da177e4SLinus Torvalds    policied. */
1216267276fSChristoph Lameter enum zone_type policy_zone = 0;
1221da177e4SLinus Torvalds 
123bea904d5SLee Schermerhorn /*
124bea904d5SLee Schermerhorn  * run-time system-wide default policy => local allocation
125bea904d5SLee Schermerhorn  */
126e754d79dSH Hartley Sweeten static struct mempolicy default_policy = {
1271da177e4SLinus Torvalds 	.refcnt = ATOMIC_INIT(1), /* never free it */
1287858d7bcSFeng Tang 	.mode = MPOL_LOCAL,
1291da177e4SLinus Torvalds };
1301da177e4SLinus Torvalds 
1315606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES];
1325606e387SMel Gorman 
133b2ca916cSDan Williams /**
134b2ca916cSDan Williams  * numa_map_to_online_node - Find closest online node
135f6e92f40SKrzysztof Kozlowski  * @node: Node id to start the search
136b2ca916cSDan Williams  *
137b2ca916cSDan Williams  * Lookup the next closest node by distance if @nid is not online.
138dad5b023SRandy Dunlap  *
139dad5b023SRandy Dunlap  * Return: this @node if it is online, otherwise the closest node by distance
140b2ca916cSDan Williams  */
141b2ca916cSDan Williams int numa_map_to_online_node(int node)
142b2ca916cSDan Williams {
1434fcbe96eSDan Williams 	int min_dist = INT_MAX, dist, n, min_node;
144b2ca916cSDan Williams 
1454fcbe96eSDan Williams 	if (node == NUMA_NO_NODE || node_online(node))
1464fcbe96eSDan Williams 		return node;
147b2ca916cSDan Williams 
148b2ca916cSDan Williams 	min_node = node;
149b2ca916cSDan Williams 	for_each_online_node(n) {
150b2ca916cSDan Williams 		dist = node_distance(node, n);
151b2ca916cSDan Williams 		if (dist < min_dist) {
152b2ca916cSDan Williams 			min_dist = dist;
153b2ca916cSDan Williams 			min_node = n;
154b2ca916cSDan Williams 		}
155b2ca916cSDan Williams 	}
156b2ca916cSDan Williams 
157b2ca916cSDan Williams 	return min_node;
158b2ca916cSDan Williams }
159b2ca916cSDan Williams EXPORT_SYMBOL_GPL(numa_map_to_online_node);
160b2ca916cSDan Williams 
16174d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p)
1625606e387SMel Gorman {
1635606e387SMel Gorman 	struct mempolicy *pol = p->mempolicy;
164f15ca78eSOleg Nesterov 	int node;
1655606e387SMel Gorman 
166f15ca78eSOleg Nesterov 	if (pol)
167f15ca78eSOleg Nesterov 		return pol;
1685606e387SMel Gorman 
169f15ca78eSOleg Nesterov 	node = numa_node_id();
1701da6f0e1SJianguo Wu 	if (node != NUMA_NO_NODE) {
1711da6f0e1SJianguo Wu 		pol = &preferred_node_policy[node];
172f15ca78eSOleg Nesterov 		/* preferred_node_policy is not initialised early in boot */
173f15ca78eSOleg Nesterov 		if (pol->mode)
174f15ca78eSOleg Nesterov 			return pol;
1751da6f0e1SJianguo Wu 	}
1765606e387SMel Gorman 
177f15ca78eSOleg Nesterov 	return &default_policy;
1785606e387SMel Gorman }
1795606e387SMel Gorman 
18037012946SDavid Rientjes static const struct mempolicy_operations {
18137012946SDavid Rientjes 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
182213980c0SVlastimil Babka 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
18337012946SDavid Rientjes } mpol_ops[MPOL_MAX];
18437012946SDavid Rientjes 
185f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
186f5b087b5SDavid Rientjes {
1876d556294SBob Liu 	return pol->flags & MPOL_MODE_FLAGS;
1884c50bc01SDavid Rientjes }
1894c50bc01SDavid Rientjes 
1904c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
1914c50bc01SDavid Rientjes 				   const nodemask_t *rel)
1924c50bc01SDavid Rientjes {
1934c50bc01SDavid Rientjes 	nodemask_t tmp;
1944c50bc01SDavid Rientjes 	nodes_fold(tmp, *orig, nodes_weight(*rel));
1954c50bc01SDavid Rientjes 	nodes_onto(*ret, tmp, *rel);
196f5b087b5SDavid Rientjes }
197f5b087b5SDavid Rientjes 
198be897d48SFeng Tang static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
19937012946SDavid Rientjes {
20037012946SDavid Rientjes 	if (nodes_empty(*nodes))
20137012946SDavid Rientjes 		return -EINVAL;
202269fbe72SBen Widawsky 	pol->nodes = *nodes;
20337012946SDavid Rientjes 	return 0;
20437012946SDavid Rientjes }
20537012946SDavid Rientjes 
20637012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
20737012946SDavid Rientjes {
2087858d7bcSFeng Tang 	if (nodes_empty(*nodes))
2097858d7bcSFeng Tang 		return -EINVAL;
210269fbe72SBen Widawsky 
211269fbe72SBen Widawsky 	nodes_clear(pol->nodes);
212269fbe72SBen Widawsky 	node_set(first_node(*nodes), pol->nodes);
21337012946SDavid Rientjes 	return 0;
21437012946SDavid Rientjes }
21537012946SDavid Rientjes 
21658568d2aSMiao Xie /*
21758568d2aSMiao Xie  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
21858568d2aSMiao Xie  * any, for the new policy.  mpol_new() has already validated the nodes
2197858d7bcSFeng Tang  * parameter with respect to the policy mode and flags.
22058568d2aSMiao Xie  *
22158568d2aSMiao Xie  * Must be called holding task's alloc_lock to protect task's mems_allowed
222c1e8d7c6SMichel Lespinasse  * and mempolicy.  May also be called holding the mmap_lock for write.
22358568d2aSMiao Xie  */
2244bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol,
2254bfc4495SKAMEZAWA Hiroyuki 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
22658568d2aSMiao Xie {
22758568d2aSMiao Xie 	int ret;
22858568d2aSMiao Xie 
2297858d7bcSFeng Tang 	/*
2307858d7bcSFeng Tang 	 * Default (pol==NULL) resp. local memory policies are not a
2317858d7bcSFeng Tang 	 * subject of any remapping. They also do not need any special
2327858d7bcSFeng Tang 	 * constructor.
2337858d7bcSFeng Tang 	 */
2347858d7bcSFeng Tang 	if (!pol || pol->mode == MPOL_LOCAL)
23558568d2aSMiao Xie 		return 0;
2367858d7bcSFeng Tang 
23701f13bd6SLai Jiangshan 	/* Check N_MEMORY */
2384bfc4495SKAMEZAWA Hiroyuki 	nodes_and(nsc->mask1,
23901f13bd6SLai Jiangshan 		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
24058568d2aSMiao Xie 
24158568d2aSMiao Xie 	VM_BUG_ON(!nodes);
2427858d7bcSFeng Tang 
24358568d2aSMiao Xie 	if (pol->flags & MPOL_F_RELATIVE_NODES)
2444bfc4495SKAMEZAWA Hiroyuki 		mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
24558568d2aSMiao Xie 	else
2464bfc4495SKAMEZAWA Hiroyuki 		nodes_and(nsc->mask2, *nodes, nsc->mask1);
2474bfc4495SKAMEZAWA Hiroyuki 
24858568d2aSMiao Xie 	if (mpol_store_user_nodemask(pol))
24958568d2aSMiao Xie 		pol->w.user_nodemask = *nodes;
25058568d2aSMiao Xie 	else
2517858d7bcSFeng Tang 		pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed;
25258568d2aSMiao Xie 
2534bfc4495SKAMEZAWA Hiroyuki 	ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
25458568d2aSMiao Xie 	return ret;
25558568d2aSMiao Xie }
25658568d2aSMiao Xie 
25758568d2aSMiao Xie /*
25858568d2aSMiao Xie  * This function just creates a new policy, does some check and simple
25958568d2aSMiao Xie  * initialization. You must invoke mpol_set_nodemask() to set nodes.
26058568d2aSMiao Xie  */
261028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
262028fec41SDavid Rientjes 				  nodemask_t *nodes)
2631da177e4SLinus Torvalds {
2641da177e4SLinus Torvalds 	struct mempolicy *policy;
2651da177e4SLinus Torvalds 
266028fec41SDavid Rientjes 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
26700ef2d2fSDavid Rientjes 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
268140d5a49SPaul Mundt 
2693e1f0645SDavid Rientjes 	if (mode == MPOL_DEFAULT) {
2703e1f0645SDavid Rientjes 		if (nodes && !nodes_empty(*nodes))
27137012946SDavid Rientjes 			return ERR_PTR(-EINVAL);
272d3a71033SLee Schermerhorn 		return NULL;
27337012946SDavid Rientjes 	}
2743e1f0645SDavid Rientjes 	VM_BUG_ON(!nodes);
2753e1f0645SDavid Rientjes 
2763e1f0645SDavid Rientjes 	/*
2773e1f0645SDavid Rientjes 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
2783e1f0645SDavid Rientjes 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
2793e1f0645SDavid Rientjes 	 * All other modes require a valid pointer to a non-empty nodemask.
2803e1f0645SDavid Rientjes 	 */
2813e1f0645SDavid Rientjes 	if (mode == MPOL_PREFERRED) {
2823e1f0645SDavid Rientjes 		if (nodes_empty(*nodes)) {
2833e1f0645SDavid Rientjes 			if (((flags & MPOL_F_STATIC_NODES) ||
2843e1f0645SDavid Rientjes 			     (flags & MPOL_F_RELATIVE_NODES)))
2853e1f0645SDavid Rientjes 				return ERR_PTR(-EINVAL);
2867858d7bcSFeng Tang 
2877858d7bcSFeng Tang 			mode = MPOL_LOCAL;
2883e1f0645SDavid Rientjes 		}
289479e2802SPeter Zijlstra 	} else if (mode == MPOL_LOCAL) {
2908d303e44SPiotr Kwapulinski 		if (!nodes_empty(*nodes) ||
2918d303e44SPiotr Kwapulinski 		    (flags & MPOL_F_STATIC_NODES) ||
2928d303e44SPiotr Kwapulinski 		    (flags & MPOL_F_RELATIVE_NODES))
293479e2802SPeter Zijlstra 			return ERR_PTR(-EINVAL);
2943e1f0645SDavid Rientjes 	} else if (nodes_empty(*nodes))
2953e1f0645SDavid Rientjes 		return ERR_PTR(-EINVAL);
2961da177e4SLinus Torvalds 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2971da177e4SLinus Torvalds 	if (!policy)
2981da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2991da177e4SLinus Torvalds 	atomic_set(&policy->refcnt, 1);
30045c4745aSLee Schermerhorn 	policy->mode = mode;
30137012946SDavid Rientjes 	policy->flags = flags;
302c6018b4bSAneesh Kumar K.V 	policy->home_node = NUMA_NO_NODE;
3033e1f0645SDavid Rientjes 
30437012946SDavid Rientjes 	return policy;
30537012946SDavid Rientjes }
30637012946SDavid Rientjes 
30752cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */
30852cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p)
30952cd3b07SLee Schermerhorn {
31052cd3b07SLee Schermerhorn 	if (!atomic_dec_and_test(&p->refcnt))
31152cd3b07SLee Schermerhorn 		return;
31252cd3b07SLee Schermerhorn 	kmem_cache_free(policy_cache, p);
31352cd3b07SLee Schermerhorn }
31452cd3b07SLee Schermerhorn 
315213980c0SVlastimil Babka static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
31637012946SDavid Rientjes {
31737012946SDavid Rientjes }
31837012946SDavid Rientjes 
319213980c0SVlastimil Babka static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
3201d0d2680SDavid Rientjes {
3211d0d2680SDavid Rientjes 	nodemask_t tmp;
3221d0d2680SDavid Rientjes 
32337012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES)
32437012946SDavid Rientjes 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
32537012946SDavid Rientjes 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
32637012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3271d0d2680SDavid Rientjes 	else {
328269fbe72SBen Widawsky 		nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed,
329213980c0SVlastimil Babka 								*nodes);
33029b190faSzhong jiang 		pol->w.cpuset_mems_allowed = *nodes;
3311d0d2680SDavid Rientjes 	}
33237012946SDavid Rientjes 
333708c1bbcSMiao Xie 	if (nodes_empty(tmp))
334708c1bbcSMiao Xie 		tmp = *nodes;
335708c1bbcSMiao Xie 
336269fbe72SBen Widawsky 	pol->nodes = tmp;
33737012946SDavid Rientjes }
33837012946SDavid Rientjes 
33937012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol,
340213980c0SVlastimil Babka 						const nodemask_t *nodes)
34137012946SDavid Rientjes {
34237012946SDavid Rientjes 	pol->w.cpuset_mems_allowed = *nodes;
3431d0d2680SDavid Rientjes }
34437012946SDavid Rientjes 
345708c1bbcSMiao Xie /*
346708c1bbcSMiao Xie  * mpol_rebind_policy - Migrate a policy to a different set of nodes
347708c1bbcSMiao Xie  *
348c1e8d7c6SMichel Lespinasse  * Per-vma policies are protected by mmap_lock. Allocations using per-task
349213980c0SVlastimil Babka  * policies are protected by task->mems_allowed_seq to prevent a premature
350213980c0SVlastimil Babka  * OOM/allocation failure due to parallel nodemask modification.
351708c1bbcSMiao Xie  */
352213980c0SVlastimil Babka static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
35337012946SDavid Rientjes {
354018160adSWang Cheng 	if (!pol || pol->mode == MPOL_LOCAL)
35537012946SDavid Rientjes 		return;
3567858d7bcSFeng Tang 	if (!mpol_store_user_nodemask(pol) &&
35737012946SDavid Rientjes 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
35837012946SDavid Rientjes 		return;
359708c1bbcSMiao Xie 
360213980c0SVlastimil Babka 	mpol_ops[pol->mode].rebind(pol, newmask);
3611d0d2680SDavid Rientjes }
3621d0d2680SDavid Rientjes 
3631d0d2680SDavid Rientjes /*
3641d0d2680SDavid Rientjes  * Wrapper for mpol_rebind_policy() that just requires task
3651d0d2680SDavid Rientjes  * pointer, and updates task mempolicy.
36658568d2aSMiao Xie  *
36758568d2aSMiao Xie  * Called with task's alloc_lock held.
3681d0d2680SDavid Rientjes  */
3691d0d2680SDavid Rientjes 
370213980c0SVlastimil Babka void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
3711d0d2680SDavid Rientjes {
372213980c0SVlastimil Babka 	mpol_rebind_policy(tsk->mempolicy, new);
3731d0d2680SDavid Rientjes }
3741d0d2680SDavid Rientjes 
3751d0d2680SDavid Rientjes /*
3761d0d2680SDavid Rientjes  * Rebind each vma in mm to new nodemask.
3771d0d2680SDavid Rientjes  *
378c1e8d7c6SMichel Lespinasse  * Call holding a reference to mm.  Takes mm->mmap_lock during call.
3791d0d2680SDavid Rientjes  */
3801d0d2680SDavid Rientjes 
3811d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
3821d0d2680SDavid Rientjes {
3831d0d2680SDavid Rientjes 	struct vm_area_struct *vma;
38466850be5SLiam R. Howlett 	VMA_ITERATOR(vmi, mm, 0);
3851d0d2680SDavid Rientjes 
386d8ed45c5SMichel Lespinasse 	mmap_write_lock(mm);
387*6c21e066SJann Horn 	for_each_vma(vmi, vma) {
388*6c21e066SJann Horn 		vma_start_write(vma);
389213980c0SVlastimil Babka 		mpol_rebind_policy(vma->vm_policy, new);
390*6c21e066SJann Horn 	}
391d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
3921d0d2680SDavid Rientjes }
3931d0d2680SDavid Rientjes 
39437012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
39537012946SDavid Rientjes 	[MPOL_DEFAULT] = {
39637012946SDavid Rientjes 		.rebind = mpol_rebind_default,
39737012946SDavid Rientjes 	},
39837012946SDavid Rientjes 	[MPOL_INTERLEAVE] = {
399be897d48SFeng Tang 		.create = mpol_new_nodemask,
40037012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
40137012946SDavid Rientjes 	},
40237012946SDavid Rientjes 	[MPOL_PREFERRED] = {
40337012946SDavid Rientjes 		.create = mpol_new_preferred,
40437012946SDavid Rientjes 		.rebind = mpol_rebind_preferred,
40537012946SDavid Rientjes 	},
40637012946SDavid Rientjes 	[MPOL_BIND] = {
407be897d48SFeng Tang 		.create = mpol_new_nodemask,
40837012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
40937012946SDavid Rientjes 	},
4107858d7bcSFeng Tang 	[MPOL_LOCAL] = {
4117858d7bcSFeng Tang 		.rebind = mpol_rebind_default,
4127858d7bcSFeng Tang 	},
413b27abaccSDave Hansen 	[MPOL_PREFERRED_MANY] = {
414be897d48SFeng Tang 		.create = mpol_new_nodemask,
415b27abaccSDave Hansen 		.rebind = mpol_rebind_preferred,
416b27abaccSDave Hansen 	},
41737012946SDavid Rientjes };
41837012946SDavid Rientjes 
4194a64981dSVishal Moola (Oracle) static int migrate_folio_add(struct folio *folio, struct list_head *foliolist,
420fc301289SChristoph Lameter 				unsigned long flags);
4211a75a6c8SChristoph Lameter 
4226f4576e3SNaoya Horiguchi struct queue_pages {
4236f4576e3SNaoya Horiguchi 	struct list_head *pagelist;
4246f4576e3SNaoya Horiguchi 	unsigned long flags;
4256f4576e3SNaoya Horiguchi 	nodemask_t *nmask;
426f18da660SLi Xinhai 	unsigned long start;
427f18da660SLi Xinhai 	unsigned long end;
428f18da660SLi Xinhai 	struct vm_area_struct *first;
4296f4576e3SNaoya Horiguchi };
4306f4576e3SNaoya Horiguchi 
43198094945SNaoya Horiguchi /*
432d451b89dSVishal Moola (Oracle)  * Check if the folio's nid is in qp->nmask.
43388aaa2a1SNaoya Horiguchi  *
43488aaa2a1SNaoya Horiguchi  * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
43588aaa2a1SNaoya Horiguchi  * in the invert of qp->nmask.
43688aaa2a1SNaoya Horiguchi  */
437d451b89dSVishal Moola (Oracle) static inline bool queue_folio_required(struct folio *folio,
43888aaa2a1SNaoya Horiguchi 					struct queue_pages *qp)
43988aaa2a1SNaoya Horiguchi {
440d451b89dSVishal Moola (Oracle) 	int nid = folio_nid(folio);
44188aaa2a1SNaoya Horiguchi 	unsigned long flags = qp->flags;
44288aaa2a1SNaoya Horiguchi 
44388aaa2a1SNaoya Horiguchi 	return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
44488aaa2a1SNaoya Horiguchi }
44588aaa2a1SNaoya Horiguchi 
446a7f40cfeSYang Shi /*
447de1f5055SVishal Moola (Oracle)  * queue_folios_pmd() has three possible return values:
448de1f5055SVishal Moola (Oracle)  * 0 - folios are placed on the right node or queued successfully, or
449e5947d23SYang Shi  *     special page is met, i.e. huge zero page.
450de1f5055SVishal Moola (Oracle)  * 1 - there is unmovable folio, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
451d8835445SYang Shi  *     specified.
452d8835445SYang Shi  * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
453de1f5055SVishal Moola (Oracle)  *        existing folio was already on a node that does not follow the
454d8835445SYang Shi  *        policy.
455a7f40cfeSYang Shi  */
456de1f5055SVishal Moola (Oracle) static int queue_folios_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
457c8633798SNaoya Horiguchi 				unsigned long end, struct mm_walk *walk)
458959a7e13SJules Irenge 	__releases(ptl)
459c8633798SNaoya Horiguchi {
460c8633798SNaoya Horiguchi 	int ret = 0;
461de1f5055SVishal Moola (Oracle) 	struct folio *folio;
462c8633798SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
463c8633798SNaoya Horiguchi 	unsigned long flags;
464c8633798SNaoya Horiguchi 
465c8633798SNaoya Horiguchi 	if (unlikely(is_pmd_migration_entry(*pmd))) {
466a7f40cfeSYang Shi 		ret = -EIO;
467c8633798SNaoya Horiguchi 		goto unlock;
468c8633798SNaoya Horiguchi 	}
469de1f5055SVishal Moola (Oracle) 	folio = pfn_folio(pmd_pfn(*pmd));
470de1f5055SVishal Moola (Oracle) 	if (is_huge_zero_page(&folio->page)) {
471e5947d23SYang Shi 		walk->action = ACTION_CONTINUE;
4726d97cf88SMiaohe Lin 		goto unlock;
473c8633798SNaoya Horiguchi 	}
474d451b89dSVishal Moola (Oracle) 	if (!queue_folio_required(folio, qp))
475c8633798SNaoya Horiguchi 		goto unlock;
476c8633798SNaoya Horiguchi 
477c8633798SNaoya Horiguchi 	flags = qp->flags;
478de1f5055SVishal Moola (Oracle) 	/* go to folio migration */
479a7f40cfeSYang Shi 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
480a53190a4SYang Shi 		if (!vma_migratable(walk->vma) ||
4814a64981dSVishal Moola (Oracle) 		    migrate_folio_add(folio, qp->pagelist, flags)) {
482d8835445SYang Shi 			ret = 1;
483a7f40cfeSYang Shi 			goto unlock;
484a7f40cfeSYang Shi 		}
485a7f40cfeSYang Shi 	} else
486a7f40cfeSYang Shi 		ret = -EIO;
487c8633798SNaoya Horiguchi unlock:
488c8633798SNaoya Horiguchi 	spin_unlock(ptl);
489c8633798SNaoya Horiguchi 	return ret;
490c8633798SNaoya Horiguchi }
491c8633798SNaoya Horiguchi 
49288aaa2a1SNaoya Horiguchi /*
49398094945SNaoya Horiguchi  * Scan through pages checking if pages follow certain conditions,
49498094945SNaoya Horiguchi  * and move them to the pagelist if they do.
495d8835445SYang Shi  *
4963dae02bbSVishal Moola (Oracle)  * queue_folios_pte_range() has three possible return values:
4973dae02bbSVishal Moola (Oracle)  * 0 - folios are placed on the right node or queued successfully, or
498e5947d23SYang Shi  *     special page is met, i.e. zero page.
4993dae02bbSVishal Moola (Oracle)  * 1 - there is unmovable folio, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
500d8835445SYang Shi  *     specified.
5013dae02bbSVishal Moola (Oracle)  * -EIO - only MPOL_MF_STRICT was specified and an existing folio was already
502d8835445SYang Shi  *        on a node that does not follow the policy.
50398094945SNaoya Horiguchi  */
5043dae02bbSVishal Moola (Oracle) static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
5056f4576e3SNaoya Horiguchi 			unsigned long end, struct mm_walk *walk)
5061da177e4SLinus Torvalds {
5076f4576e3SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
5083dae02bbSVishal Moola (Oracle) 	struct folio *folio;
5096f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
5106f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
511d8835445SYang Shi 	bool has_unmovable = false;
5123f088420SShijie Luo 	pte_t *pte, *mapped_pte;
513c33c7948SRyan Roberts 	pte_t ptent;
514705e87c0SHugh Dickins 	spinlock_t *ptl;
515941150a3SHugh Dickins 
516c8633798SNaoya Horiguchi 	ptl = pmd_trans_huge_lock(pmd, vma);
517bc78b5edSMiaohe Lin 	if (ptl)
518de1f5055SVishal Moola (Oracle) 		return queue_folios_pmd(pmd, ptl, addr, end, walk);
51991612e0dSHugh Dickins 
5203f088420SShijie Luo 	mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
5217780d040SHugh Dickins 	if (!pte) {
5227780d040SHugh Dickins 		walk->action = ACTION_AGAIN;
5237780d040SHugh Dickins 		return 0;
5247780d040SHugh Dickins 	}
5256f4576e3SNaoya Horiguchi 	for (; addr != end; pte++, addr += PAGE_SIZE) {
526c33c7948SRyan Roberts 		ptent = ptep_get(pte);
527c33c7948SRyan Roberts 		if (!pte_present(ptent))
52891612e0dSHugh Dickins 			continue;
529c33c7948SRyan Roberts 		folio = vm_normal_folio(vma, addr, ptent);
5303dae02bbSVishal Moola (Oracle) 		if (!folio || folio_is_zone_device(folio))
53191612e0dSHugh Dickins 			continue;
532053837fcSNick Piggin 		/*
5333dae02bbSVishal Moola (Oracle) 		 * vm_normal_folio() filters out zero pages, but there might
5343dae02bbSVishal Moola (Oracle) 		 * still be reserved folios to skip, perhaps in a VDSO.
535053837fcSNick Piggin 		 */
5363dae02bbSVishal Moola (Oracle) 		if (folio_test_reserved(folio))
537f4598c8bSChristoph Lameter 			continue;
538d451b89dSVishal Moola (Oracle) 		if (!queue_folio_required(folio, qp))
53938e35860SChristoph Lameter 			continue;
540a7f40cfeSYang Shi 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
541d8835445SYang Shi 			/* MPOL_MF_STRICT must be specified if we get here */
542d8835445SYang Shi 			if (!vma_migratable(vma)) {
543d8835445SYang Shi 				has_unmovable = true;
544a7f40cfeSYang Shi 				break;
545d8835445SYang Shi 			}
546a53190a4SYang Shi 
547a53190a4SYang Shi 			/*
548a53190a4SYang Shi 			 * Do not abort immediately since there may be
549a53190a4SYang Shi 			 * temporary off LRU pages in the range.  Still
550a53190a4SYang Shi 			 * need migrate other LRU pages.
551a53190a4SYang Shi 			 */
5524a64981dSVishal Moola (Oracle) 			if (migrate_folio_add(folio, qp->pagelist, flags))
553a53190a4SYang Shi 				has_unmovable = true;
554a7f40cfeSYang Shi 		} else
555a7f40cfeSYang Shi 			break;
5566f4576e3SNaoya Horiguchi 	}
5573f088420SShijie Luo 	pte_unmap_unlock(mapped_pte, ptl);
5586f4576e3SNaoya Horiguchi 	cond_resched();
559d8835445SYang Shi 
560d8835445SYang Shi 	if (has_unmovable)
561d8835445SYang Shi 		return 1;
562d8835445SYang Shi 
563a7f40cfeSYang Shi 	return addr != end ? -EIO : 0;
56491612e0dSHugh Dickins }
56591612e0dSHugh Dickins 
5660a2c1e81SVishal Moola (Oracle) static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask,
5676f4576e3SNaoya Horiguchi 			       unsigned long addr, unsigned long end,
5686f4576e3SNaoya Horiguchi 			       struct mm_walk *walk)
569e2d8cf40SNaoya Horiguchi {
570dcf17635SLi Xinhai 	int ret = 0;
571e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE
5726f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
573dcf17635SLi Xinhai 	unsigned long flags = (qp->flags & MPOL_MF_VALID);
5740a2c1e81SVishal Moola (Oracle) 	struct folio *folio;
575cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
576d4c54919SNaoya Horiguchi 	pte_t entry;
577e2d8cf40SNaoya Horiguchi 
5786f4576e3SNaoya Horiguchi 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
5796f4576e3SNaoya Horiguchi 	entry = huge_ptep_get(pte);
580d4c54919SNaoya Horiguchi 	if (!pte_present(entry))
581d4c54919SNaoya Horiguchi 		goto unlock;
5820a2c1e81SVishal Moola (Oracle) 	folio = pfn_folio(pte_pfn(entry));
583d451b89dSVishal Moola (Oracle) 	if (!queue_folio_required(folio, qp))
584e2d8cf40SNaoya Horiguchi 		goto unlock;
585dcf17635SLi Xinhai 
586dcf17635SLi Xinhai 	if (flags == MPOL_MF_STRICT) {
587dcf17635SLi Xinhai 		/*
5880a2c1e81SVishal Moola (Oracle) 		 * STRICT alone means only detecting misplaced folio and no
589dcf17635SLi Xinhai 		 * need to further check other vma.
590dcf17635SLi Xinhai 		 */
591dcf17635SLi Xinhai 		ret = -EIO;
592dcf17635SLi Xinhai 		goto unlock;
593dcf17635SLi Xinhai 	}
594dcf17635SLi Xinhai 
595dcf17635SLi Xinhai 	if (!vma_migratable(walk->vma)) {
596dcf17635SLi Xinhai 		/*
597dcf17635SLi Xinhai 		 * Must be STRICT with MOVE*, otherwise .test_walk() have
598dcf17635SLi Xinhai 		 * stopped walking current vma.
5990a2c1e81SVishal Moola (Oracle) 		 * Detecting misplaced folio but allow migrating folios which
600dcf17635SLi Xinhai 		 * have been queued.
601dcf17635SLi Xinhai 		 */
602dcf17635SLi Xinhai 		ret = 1;
603dcf17635SLi Xinhai 		goto unlock;
604dcf17635SLi Xinhai 	}
605dcf17635SLi Xinhai 
6060a2c1e81SVishal Moola (Oracle) 	/*
6070a2c1e81SVishal Moola (Oracle) 	 * With MPOL_MF_MOVE, we try to migrate only unshared folios. If it
6080a2c1e81SVishal Moola (Oracle) 	 * is shared it is likely not worth migrating.
6090a2c1e81SVishal Moola (Oracle) 	 *
6100a2c1e81SVishal Moola (Oracle) 	 * To check if the folio is shared, ideally we want to make sure
6110a2c1e81SVishal Moola (Oracle) 	 * every page is mapped to the same process. Doing that is very
6120a2c1e81SVishal Moola (Oracle) 	 * expensive, so check the estimated mapcount of the folio instead.
6130a2c1e81SVishal Moola (Oracle) 	 */
614e2d8cf40SNaoya Horiguchi 	if (flags & (MPOL_MF_MOVE_ALL) ||
6150a2c1e81SVishal Moola (Oracle) 	    (flags & MPOL_MF_MOVE && folio_estimated_sharers(folio) == 1 &&
61673bdf65eSMike Kravetz 	     !hugetlb_pmd_shared(pte))) {
6179747b9e9SBaolin Wang 		if (!isolate_hugetlb(folio, qp->pagelist) &&
618dcf17635SLi Xinhai 			(flags & MPOL_MF_STRICT))
619dcf17635SLi Xinhai 			/*
6200a2c1e81SVishal Moola (Oracle) 			 * Failed to isolate folio but allow migrating pages
621dcf17635SLi Xinhai 			 * which have been queued.
622dcf17635SLi Xinhai 			 */
623dcf17635SLi Xinhai 			ret = 1;
624dcf17635SLi Xinhai 	}
625e2d8cf40SNaoya Horiguchi unlock:
626cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
627e2d8cf40SNaoya Horiguchi #else
628e2d8cf40SNaoya Horiguchi 	BUG();
629e2d8cf40SNaoya Horiguchi #endif
630dcf17635SLi Xinhai 	return ret;
6311da177e4SLinus Torvalds }
6321da177e4SLinus Torvalds 
6335877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING
634b24f53a0SLee Schermerhorn /*
6354b10e7d5SMel Gorman  * This is used to mark a range of virtual addresses to be inaccessible.
6364b10e7d5SMel Gorman  * These are later cleared by a NUMA hinting fault. Depending on these
6374b10e7d5SMel Gorman  * faults, pages may be migrated for better NUMA placement.
6384b10e7d5SMel Gorman  *
6394b10e7d5SMel Gorman  * This is assuming that NUMA faults are handled using PROT_NONE. If
6404b10e7d5SMel Gorman  * an architecture makes a different choice, it will need further
6414b10e7d5SMel Gorman  * changes to the core.
642b24f53a0SLee Schermerhorn  */
6434b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma,
6444b10e7d5SMel Gorman 			unsigned long addr, unsigned long end)
645b24f53a0SLee Schermerhorn {
6464a18419fSNadav Amit 	struct mmu_gather tlb;
647a79390f5SPeter Xu 	long nr_updated;
648b24f53a0SLee Schermerhorn 
6494a18419fSNadav Amit 	tlb_gather_mmu(&tlb, vma->vm_mm);
6504a18419fSNadav Amit 
6511ef488edSDavid Hildenbrand 	nr_updated = change_protection(&tlb, vma, addr, end, MM_CP_PROT_NUMA);
652d1751118SPeter Xu 	if (nr_updated > 0)
65303c5a6e1SMel Gorman 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
654b24f53a0SLee Schermerhorn 
6554a18419fSNadav Amit 	tlb_finish_mmu(&tlb);
6564a18419fSNadav Amit 
6574b10e7d5SMel Gorman 	return nr_updated;
658b24f53a0SLee Schermerhorn }
659b24f53a0SLee Schermerhorn #else
660b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma,
661b24f53a0SLee Schermerhorn 			unsigned long addr, unsigned long end)
662b24f53a0SLee Schermerhorn {
663b24f53a0SLee Schermerhorn 	return 0;
664b24f53a0SLee Schermerhorn }
6655877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */
666b24f53a0SLee Schermerhorn 
6676f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end,
6686f4576e3SNaoya Horiguchi 				struct mm_walk *walk)
6691da177e4SLinus Torvalds {
67066850be5SLiam R. Howlett 	struct vm_area_struct *next, *vma = walk->vma;
6716f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
6725b952b3cSAndi Kleen 	unsigned long endvma = vma->vm_end;
6736f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
674dc9aa5b9SChristoph Lameter 
675a18b3ac2SLi Xinhai 	/* range check first */
676ce33135cSMiaohe Lin 	VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma);
677f18da660SLi Xinhai 
678f18da660SLi Xinhai 	if (!qp->first) {
679f18da660SLi Xinhai 		qp->first = vma;
680f18da660SLi Xinhai 		if (!(flags & MPOL_MF_DISCONTIG_OK) &&
681f18da660SLi Xinhai 			(qp->start < vma->vm_start))
682f18da660SLi Xinhai 			/* hole at head side of range */
683a18b3ac2SLi Xinhai 			return -EFAULT;
684a18b3ac2SLi Xinhai 	}
68566850be5SLiam R. Howlett 	next = find_vma(vma->vm_mm, vma->vm_end);
686f18da660SLi Xinhai 	if (!(flags & MPOL_MF_DISCONTIG_OK) &&
687f18da660SLi Xinhai 		((vma->vm_end < qp->end) &&
68866850be5SLiam R. Howlett 		(!next || vma->vm_end < next->vm_start)))
689f18da660SLi Xinhai 		/* hole at middle or tail of range */
690f18da660SLi Xinhai 		return -EFAULT;
691a18b3ac2SLi Xinhai 
692a7f40cfeSYang Shi 	/*
693a7f40cfeSYang Shi 	 * Need check MPOL_MF_STRICT to return -EIO if possible
694a7f40cfeSYang Shi 	 * regardless of vma_migratable
695a7f40cfeSYang Shi 	 */
696a7f40cfeSYang Shi 	if (!vma_migratable(vma) &&
697a7f40cfeSYang Shi 	    !(flags & MPOL_MF_STRICT))
69848684a65SNaoya Horiguchi 		return 1;
69948684a65SNaoya Horiguchi 
7005b952b3cSAndi Kleen 	if (endvma > end)
7015b952b3cSAndi Kleen 		endvma = end;
702b24f53a0SLee Schermerhorn 
703b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY) {
7042c0346a3SMel Gorman 		/* Similar to task_numa_work, skip inaccessible VMAs */
7053122e80eSAnshuman Khandual 		if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
7064355c018SLiang Chen 			!(vma->vm_flags & VM_MIXEDMAP))
707b24f53a0SLee Schermerhorn 			change_prot_numa(vma, start, endvma);
7086f4576e3SNaoya Horiguchi 		return 1;
709b24f53a0SLee Schermerhorn 	}
710b24f53a0SLee Schermerhorn 
7116f4576e3SNaoya Horiguchi 	/* queue pages from current vma */
712a7f40cfeSYang Shi 	if (flags & MPOL_MF_VALID)
7136f4576e3SNaoya Horiguchi 		return 0;
7146f4576e3SNaoya Horiguchi 	return 1;
7156f4576e3SNaoya Horiguchi }
716b24f53a0SLee Schermerhorn 
7177b86ac33SChristoph Hellwig static const struct mm_walk_ops queue_pages_walk_ops = {
7180a2c1e81SVishal Moola (Oracle) 	.hugetlb_entry		= queue_folios_hugetlb,
7193dae02bbSVishal Moola (Oracle) 	.pmd_entry		= queue_folios_pte_range,
7207b86ac33SChristoph Hellwig 	.test_walk		= queue_pages_test_walk,
7217b86ac33SChristoph Hellwig };
7227b86ac33SChristoph Hellwig 
7236f4576e3SNaoya Horiguchi /*
7246f4576e3SNaoya Horiguchi  * Walk through page tables and collect pages to be migrated.
7256f4576e3SNaoya Horiguchi  *
7266f4576e3SNaoya Horiguchi  * If pages found in a given range are on a set of nodes (determined by
7276f4576e3SNaoya Horiguchi  * @nodes and @flags,) it's isolated and queued to the pagelist which is
728d8835445SYang Shi  * passed via @private.
729d8835445SYang Shi  *
730d8835445SYang Shi  * queue_pages_range() has three possible return values:
731d8835445SYang Shi  * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
732d8835445SYang Shi  *     specified.
733d8835445SYang Shi  * 0 - queue pages successfully or no misplaced page.
734a85dfc30SYang Shi  * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
735a85dfc30SYang Shi  *         memory range specified by nodemask and maxnode points outside
736a85dfc30SYang Shi  *         your accessible address space (-EFAULT)
7376f4576e3SNaoya Horiguchi  */
7386f4576e3SNaoya Horiguchi static int
7396f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
7406f4576e3SNaoya Horiguchi 		nodemask_t *nodes, unsigned long flags,
7416f4576e3SNaoya Horiguchi 		struct list_head *pagelist)
7426f4576e3SNaoya Horiguchi {
743f18da660SLi Xinhai 	int err;
7446f4576e3SNaoya Horiguchi 	struct queue_pages qp = {
7456f4576e3SNaoya Horiguchi 		.pagelist = pagelist,
7466f4576e3SNaoya Horiguchi 		.flags = flags,
7476f4576e3SNaoya Horiguchi 		.nmask = nodes,
748f18da660SLi Xinhai 		.start = start,
749f18da660SLi Xinhai 		.end = end,
750f18da660SLi Xinhai 		.first = NULL,
7516f4576e3SNaoya Horiguchi 	};
7526f4576e3SNaoya Horiguchi 
753f18da660SLi Xinhai 	err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
754f18da660SLi Xinhai 
755f18da660SLi Xinhai 	if (!qp.first)
756f18da660SLi Xinhai 		/* whole range in hole */
757f18da660SLi Xinhai 		err = -EFAULT;
758f18da660SLi Xinhai 
759f18da660SLi Xinhai 	return err;
7601da177e4SLinus Torvalds }
7611da177e4SLinus Torvalds 
762869833f2SKOSAKI Motohiro /*
763869833f2SKOSAKI Motohiro  * Apply policy to a single VMA
764c1e8d7c6SMichel Lespinasse  * This must be called with the mmap_lock held for writing.
765869833f2SKOSAKI Motohiro  */
766869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma,
767869833f2SKOSAKI Motohiro 						struct mempolicy *pol)
7688d34694cSKOSAKI Motohiro {
769869833f2SKOSAKI Motohiro 	int err;
770869833f2SKOSAKI Motohiro 	struct mempolicy *old;
771869833f2SKOSAKI Motohiro 	struct mempolicy *new;
7728d34694cSKOSAKI Motohiro 
773*6c21e066SJann Horn 	vma_assert_write_locked(vma);
774*6c21e066SJann Horn 
7758d34694cSKOSAKI Motohiro 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
7768d34694cSKOSAKI Motohiro 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
7778d34694cSKOSAKI Motohiro 		 vma->vm_ops, vma->vm_file,
7788d34694cSKOSAKI Motohiro 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
7798d34694cSKOSAKI Motohiro 
780869833f2SKOSAKI Motohiro 	new = mpol_dup(pol);
781869833f2SKOSAKI Motohiro 	if (IS_ERR(new))
782869833f2SKOSAKI Motohiro 		return PTR_ERR(new);
783869833f2SKOSAKI Motohiro 
784869833f2SKOSAKI Motohiro 	if (vma->vm_ops && vma->vm_ops->set_policy) {
7858d34694cSKOSAKI Motohiro 		err = vma->vm_ops->set_policy(vma, new);
786869833f2SKOSAKI Motohiro 		if (err)
787869833f2SKOSAKI Motohiro 			goto err_out;
7888d34694cSKOSAKI Motohiro 	}
789869833f2SKOSAKI Motohiro 
790869833f2SKOSAKI Motohiro 	old = vma->vm_policy;
791c1e8d7c6SMichel Lespinasse 	vma->vm_policy = new; /* protected by mmap_lock */
792869833f2SKOSAKI Motohiro 	mpol_put(old);
793869833f2SKOSAKI Motohiro 
794869833f2SKOSAKI Motohiro 	return 0;
795869833f2SKOSAKI Motohiro  err_out:
796869833f2SKOSAKI Motohiro 	mpol_put(new);
7978d34694cSKOSAKI Motohiro 	return err;
7988d34694cSKOSAKI Motohiro }
7998d34694cSKOSAKI Motohiro 
800f4e9e0e6SLiam R. Howlett /* Split or merge the VMA (if required) and apply the new policy */
801f4e9e0e6SLiam R. Howlett static int mbind_range(struct vma_iterator *vmi, struct vm_area_struct *vma,
802f4e9e0e6SLiam R. Howlett 		struct vm_area_struct **prev, unsigned long start,
8039d8cebd4SKOSAKI Motohiro 		unsigned long end, struct mempolicy *new_pol)
8041da177e4SLinus Torvalds {
805f4e9e0e6SLiam R. Howlett 	struct vm_area_struct *merged;
806f4e9e0e6SLiam R. Howlett 	unsigned long vmstart, vmend;
807e26a5114SKOSAKI Motohiro 	pgoff_t pgoff;
808f4e9e0e6SLiam R. Howlett 	int err;
8091da177e4SLinus Torvalds 
810f4e9e0e6SLiam R. Howlett 	vmend = min(end, vma->vm_end);
811f4e9e0e6SLiam R. Howlett 	if (start > vma->vm_start) {
812f4e9e0e6SLiam R. Howlett 		*prev = vma;
813f4e9e0e6SLiam R. Howlett 		vmstart = start;
814f4e9e0e6SLiam R. Howlett 	} else {
815f4e9e0e6SLiam R. Howlett 		vmstart = vma->vm_start;
816f4e9e0e6SLiam R. Howlett 	}
8179d8cebd4SKOSAKI Motohiro 
81800ca0f2eSLorenzo Stoakes 	if (mpol_equal(vma_policy(vma), new_pol)) {
81900ca0f2eSLorenzo Stoakes 		*prev = vma;
820f4e9e0e6SLiam R. Howlett 		return 0;
82100ca0f2eSLorenzo Stoakes 	}
822e26a5114SKOSAKI Motohiro 
823f4e9e0e6SLiam R. Howlett 	pgoff = vma->vm_pgoff + ((vmstart - vma->vm_start) >> PAGE_SHIFT);
824f4e9e0e6SLiam R. Howlett 	merged = vma_merge(vmi, vma->vm_mm, *prev, vmstart, vmend, vma->vm_flags,
825f4e9e0e6SLiam R. Howlett 			 vma->anon_vma, vma->vm_file, pgoff, new_pol,
826f4e9e0e6SLiam R. Howlett 			 vma->vm_userfaultfd_ctx, anon_vma_name(vma));
827f4e9e0e6SLiam R. Howlett 	if (merged) {
828f4e9e0e6SLiam R. Howlett 		*prev = merged;
829f4e9e0e6SLiam R. Howlett 		return vma_replace_policy(merged, new_pol);
8301da177e4SLinus Torvalds 	}
831f4e9e0e6SLiam R. Howlett 
8329d8cebd4SKOSAKI Motohiro 	if (vma->vm_start != vmstart) {
833f4e9e0e6SLiam R. Howlett 		err = split_vma(vmi, vma, vmstart, 1);
8349d8cebd4SKOSAKI Motohiro 		if (err)
8351da177e4SLinus Torvalds 			return err;
8361da177e4SLinus Torvalds 	}
8371da177e4SLinus Torvalds 
838f4e9e0e6SLiam R. Howlett 	if (vma->vm_end != vmend) {
839f4e9e0e6SLiam R. Howlett 		err = split_vma(vmi, vma, vmend, 0);
840f4e9e0e6SLiam R. Howlett 		if (err)
841f4e9e0e6SLiam R. Howlett 			return err;
842f4e9e0e6SLiam R. Howlett 	}
843f4e9e0e6SLiam R. Howlett 
844f4e9e0e6SLiam R. Howlett 	*prev = vma;
845f4e9e0e6SLiam R. Howlett 	return vma_replace_policy(vma, new_pol);
846f4e9e0e6SLiam R. Howlett }
847f4e9e0e6SLiam R. Howlett 
8481da177e4SLinus Torvalds /* Set the process memory policy */
849028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags,
850028fec41SDavid Rientjes 			     nodemask_t *nodes)
8511da177e4SLinus Torvalds {
85258568d2aSMiao Xie 	struct mempolicy *new, *old;
8534bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH(scratch);
85458568d2aSMiao Xie 	int ret;
8551da177e4SLinus Torvalds 
8564bfc4495SKAMEZAWA Hiroyuki 	if (!scratch)
8574bfc4495SKAMEZAWA Hiroyuki 		return -ENOMEM;
858f4e53d91SLee Schermerhorn 
8594bfc4495SKAMEZAWA Hiroyuki 	new = mpol_new(mode, flags, nodes);
8604bfc4495SKAMEZAWA Hiroyuki 	if (IS_ERR(new)) {
8614bfc4495SKAMEZAWA Hiroyuki 		ret = PTR_ERR(new);
8624bfc4495SKAMEZAWA Hiroyuki 		goto out;
8634bfc4495SKAMEZAWA Hiroyuki 	}
8642c7c3a7dSOleg Nesterov 
86512c1dc8eSAbel Wu 	task_lock(current);
8664bfc4495SKAMEZAWA Hiroyuki 	ret = mpol_set_nodemask(new, nodes, scratch);
86758568d2aSMiao Xie 	if (ret) {
86812c1dc8eSAbel Wu 		task_unlock(current);
86958568d2aSMiao Xie 		mpol_put(new);
8704bfc4495SKAMEZAWA Hiroyuki 		goto out;
87158568d2aSMiao Xie 	}
87212c1dc8eSAbel Wu 
87358568d2aSMiao Xie 	old = current->mempolicy;
8741da177e4SLinus Torvalds 	current->mempolicy = new;
87545816682SVlastimil Babka 	if (new && new->mode == MPOL_INTERLEAVE)
87645816682SVlastimil Babka 		current->il_prev = MAX_NUMNODES-1;
87758568d2aSMiao Xie 	task_unlock(current);
87858568d2aSMiao Xie 	mpol_put(old);
8794bfc4495SKAMEZAWA Hiroyuki 	ret = 0;
8804bfc4495SKAMEZAWA Hiroyuki out:
8814bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH_FREE(scratch);
8824bfc4495SKAMEZAWA Hiroyuki 	return ret;
8831da177e4SLinus Torvalds }
8841da177e4SLinus Torvalds 
885bea904d5SLee Schermerhorn /*
886bea904d5SLee Schermerhorn  * Return nodemask for policy for get_mempolicy() query
88758568d2aSMiao Xie  *
88858568d2aSMiao Xie  * Called with task's alloc_lock held
889bea904d5SLee Schermerhorn  */
890bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
8911da177e4SLinus Torvalds {
892dfcd3c0dSAndi Kleen 	nodes_clear(*nodes);
893bea904d5SLee Schermerhorn 	if (p == &default_policy)
894bea904d5SLee Schermerhorn 		return;
895bea904d5SLee Schermerhorn 
89645c4745aSLee Schermerhorn 	switch (p->mode) {
89719770b32SMel Gorman 	case MPOL_BIND:
8981da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
899269fbe72SBen Widawsky 	case MPOL_PREFERRED:
900b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
901269fbe72SBen Widawsky 		*nodes = p->nodes;
9021da177e4SLinus Torvalds 		break;
9037858d7bcSFeng Tang 	case MPOL_LOCAL:
9047858d7bcSFeng Tang 		/* return empty node mask for local allocation */
9057858d7bcSFeng Tang 		break;
9061da177e4SLinus Torvalds 	default:
9071da177e4SLinus Torvalds 		BUG();
9081da177e4SLinus Torvalds 	}
9091da177e4SLinus Torvalds }
9101da177e4SLinus Torvalds 
9113b9aadf7SAndrea Arcangeli static int lookup_node(struct mm_struct *mm, unsigned long addr)
9121da177e4SLinus Torvalds {
913ba841078SPeter Xu 	struct page *p = NULL;
914f728b9c4SJohn Hubbard 	int ret;
9151da177e4SLinus Torvalds 
916f728b9c4SJohn Hubbard 	ret = get_user_pages_fast(addr & PAGE_MASK, 1, 0, &p);
917f728b9c4SJohn Hubbard 	if (ret > 0) {
918f728b9c4SJohn Hubbard 		ret = page_to_nid(p);
9191da177e4SLinus Torvalds 		put_page(p);
9201da177e4SLinus Torvalds 	}
921f728b9c4SJohn Hubbard 	return ret;
9221da177e4SLinus Torvalds }
9231da177e4SLinus Torvalds 
9241da177e4SLinus Torvalds /* Retrieve NUMA policy */
925dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask,
9261da177e4SLinus Torvalds 			     unsigned long addr, unsigned long flags)
9271da177e4SLinus Torvalds {
9288bccd85fSChristoph Lameter 	int err;
9291da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
9301da177e4SLinus Torvalds 	struct vm_area_struct *vma = NULL;
9313b9aadf7SAndrea Arcangeli 	struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
9321da177e4SLinus Torvalds 
933754af6f5SLee Schermerhorn 	if (flags &
934754af6f5SLee Schermerhorn 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
9351da177e4SLinus Torvalds 		return -EINVAL;
936754af6f5SLee Schermerhorn 
937754af6f5SLee Schermerhorn 	if (flags & MPOL_F_MEMS_ALLOWED) {
938754af6f5SLee Schermerhorn 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
939754af6f5SLee Schermerhorn 			return -EINVAL;
940754af6f5SLee Schermerhorn 		*policy = 0;	/* just so it's initialized */
94158568d2aSMiao Xie 		task_lock(current);
942754af6f5SLee Schermerhorn 		*nmask  = cpuset_current_mems_allowed;
94358568d2aSMiao Xie 		task_unlock(current);
944754af6f5SLee Schermerhorn 		return 0;
945754af6f5SLee Schermerhorn 	}
946754af6f5SLee Schermerhorn 
9471da177e4SLinus Torvalds 	if (flags & MPOL_F_ADDR) {
948bea904d5SLee Schermerhorn 		/*
949bea904d5SLee Schermerhorn 		 * Do NOT fall back to task policy if the
950bea904d5SLee Schermerhorn 		 * vma/shared policy at addr is NULL.  We
951bea904d5SLee Schermerhorn 		 * want to return MPOL_DEFAULT in this case.
952bea904d5SLee Schermerhorn 		 */
953d8ed45c5SMichel Lespinasse 		mmap_read_lock(mm);
95433e3575cSLiam Howlett 		vma = vma_lookup(mm, addr);
9551da177e4SLinus Torvalds 		if (!vma) {
956d8ed45c5SMichel Lespinasse 			mmap_read_unlock(mm);
9571da177e4SLinus Torvalds 			return -EFAULT;
9581da177e4SLinus Torvalds 		}
9591da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
9601da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
9611da177e4SLinus Torvalds 		else
9621da177e4SLinus Torvalds 			pol = vma->vm_policy;
9631da177e4SLinus Torvalds 	} else if (addr)
9641da177e4SLinus Torvalds 		return -EINVAL;
9651da177e4SLinus Torvalds 
9661da177e4SLinus Torvalds 	if (!pol)
967bea904d5SLee Schermerhorn 		pol = &default_policy;	/* indicates default behavior */
9681da177e4SLinus Torvalds 
9691da177e4SLinus Torvalds 	if (flags & MPOL_F_NODE) {
9701da177e4SLinus Torvalds 		if (flags & MPOL_F_ADDR) {
9713b9aadf7SAndrea Arcangeli 			/*
972f728b9c4SJohn Hubbard 			 * Take a refcount on the mpol, because we are about to
973f728b9c4SJohn Hubbard 			 * drop the mmap_lock, after which only "pol" remains
974f728b9c4SJohn Hubbard 			 * valid, "vma" is stale.
9753b9aadf7SAndrea Arcangeli 			 */
9763b9aadf7SAndrea Arcangeli 			pol_refcount = pol;
9773b9aadf7SAndrea Arcangeli 			vma = NULL;
9783b9aadf7SAndrea Arcangeli 			mpol_get(pol);
979f728b9c4SJohn Hubbard 			mmap_read_unlock(mm);
9803b9aadf7SAndrea Arcangeli 			err = lookup_node(mm, addr);
9811da177e4SLinus Torvalds 			if (err < 0)
9821da177e4SLinus Torvalds 				goto out;
9838bccd85fSChristoph Lameter 			*policy = err;
9841da177e4SLinus Torvalds 		} else if (pol == current->mempolicy &&
98545c4745aSLee Schermerhorn 				pol->mode == MPOL_INTERLEAVE) {
986269fbe72SBen Widawsky 			*policy = next_node_in(current->il_prev, pol->nodes);
9871da177e4SLinus Torvalds 		} else {
9881da177e4SLinus Torvalds 			err = -EINVAL;
9891da177e4SLinus Torvalds 			goto out;
9901da177e4SLinus Torvalds 		}
991bea904d5SLee Schermerhorn 	} else {
992bea904d5SLee Schermerhorn 		*policy = pol == &default_policy ? MPOL_DEFAULT :
993bea904d5SLee Schermerhorn 						pol->mode;
994d79df630SDavid Rientjes 		/*
995d79df630SDavid Rientjes 		 * Internal mempolicy flags must be masked off before exposing
996d79df630SDavid Rientjes 		 * the policy to userspace.
997d79df630SDavid Rientjes 		 */
998d79df630SDavid Rientjes 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
999bea904d5SLee Schermerhorn 	}
10001da177e4SLinus Torvalds 
10011da177e4SLinus Torvalds 	err = 0;
100258568d2aSMiao Xie 	if (nmask) {
1003c6b6ef8bSLee Schermerhorn 		if (mpol_store_user_nodemask(pol)) {
1004c6b6ef8bSLee Schermerhorn 			*nmask = pol->w.user_nodemask;
1005c6b6ef8bSLee Schermerhorn 		} else {
100658568d2aSMiao Xie 			task_lock(current);
1007bea904d5SLee Schermerhorn 			get_policy_nodemask(pol, nmask);
100858568d2aSMiao Xie 			task_unlock(current);
100958568d2aSMiao Xie 		}
1010c6b6ef8bSLee Schermerhorn 	}
10111da177e4SLinus Torvalds 
10121da177e4SLinus Torvalds  out:
101352cd3b07SLee Schermerhorn 	mpol_cond_put(pol);
10141da177e4SLinus Torvalds 	if (vma)
1015d8ed45c5SMichel Lespinasse 		mmap_read_unlock(mm);
10163b9aadf7SAndrea Arcangeli 	if (pol_refcount)
10173b9aadf7SAndrea Arcangeli 		mpol_put(pol_refcount);
10181da177e4SLinus Torvalds 	return err;
10191da177e4SLinus Torvalds }
10201da177e4SLinus Torvalds 
1021b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION
10224a64981dSVishal Moola (Oracle) static int migrate_folio_add(struct folio *folio, struct list_head *foliolist,
1023fc301289SChristoph Lameter 				unsigned long flags)
10246ce3c4c0SChristoph Lameter {
10256ce3c4c0SChristoph Lameter 	/*
10264a64981dSVishal Moola (Oracle) 	 * We try to migrate only unshared folios. If it is shared it
10274a64981dSVishal Moola (Oracle) 	 * is likely not worth migrating.
10284a64981dSVishal Moola (Oracle) 	 *
10294a64981dSVishal Moola (Oracle) 	 * To check if the folio is shared, ideally we want to make sure
10304a64981dSVishal Moola (Oracle) 	 * every page is mapped to the same process. Doing that is very
10314a64981dSVishal Moola (Oracle) 	 * expensive, so check the estimated mapcount of the folio instead.
10326ce3c4c0SChristoph Lameter 	 */
10334a64981dSVishal Moola (Oracle) 	if ((flags & MPOL_MF_MOVE_ALL) || folio_estimated_sharers(folio) == 1) {
1034be2d5756SBaolin Wang 		if (folio_isolate_lru(folio)) {
10354a64981dSVishal Moola (Oracle) 			list_add_tail(&folio->lru, foliolist);
10364a64981dSVishal Moola (Oracle) 			node_stat_mod_folio(folio,
10374a64981dSVishal Moola (Oracle) 				NR_ISOLATED_ANON + folio_is_file_lru(folio),
10384a64981dSVishal Moola (Oracle) 				folio_nr_pages(folio));
1039a53190a4SYang Shi 		} else if (flags & MPOL_MF_STRICT) {
1040a53190a4SYang Shi 			/*
10414a64981dSVishal Moola (Oracle) 			 * Non-movable folio may reach here.  And, there may be
10424a64981dSVishal Moola (Oracle) 			 * temporary off LRU folios or non-LRU movable folios.
10434a64981dSVishal Moola (Oracle) 			 * Treat them as unmovable folios since they can't be
1044a53190a4SYang Shi 			 * isolated, so they can't be moved at the moment.  It
1045a53190a4SYang Shi 			 * should return -EIO for this case too.
1046a53190a4SYang Shi 			 */
1047a53190a4SYang Shi 			return -EIO;
104862695a84SNick Piggin 		}
104962695a84SNick Piggin 	}
1050a53190a4SYang Shi 
1051a53190a4SYang Shi 	return 0;
10526ce3c4c0SChristoph Lameter }
10536ce3c4c0SChristoph Lameter 
10546ce3c4c0SChristoph Lameter /*
10557e2ab150SChristoph Lameter  * Migrate pages from one node to a target node.
10567e2ab150SChristoph Lameter  * Returns error or the number of pages not migrated.
10577e2ab150SChristoph Lameter  */
1058dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1059dbcb0f19SAdrian Bunk 			   int flags)
10607e2ab150SChristoph Lameter {
10617e2ab150SChristoph Lameter 	nodemask_t nmask;
106266850be5SLiam R. Howlett 	struct vm_area_struct *vma;
10637e2ab150SChristoph Lameter 	LIST_HEAD(pagelist);
10647e2ab150SChristoph Lameter 	int err = 0;
1065a0976311SJoonsoo Kim 	struct migration_target_control mtc = {
1066a0976311SJoonsoo Kim 		.nid = dest,
1067a0976311SJoonsoo Kim 		.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1068a0976311SJoonsoo Kim 	};
10697e2ab150SChristoph Lameter 
10707e2ab150SChristoph Lameter 	nodes_clear(nmask);
10717e2ab150SChristoph Lameter 	node_set(source, nmask);
10727e2ab150SChristoph Lameter 
107308270807SMinchan Kim 	/*
107408270807SMinchan Kim 	 * This does not "check" the range but isolates all pages that
107508270807SMinchan Kim 	 * need migration.  Between passing in the full user address
107608270807SMinchan Kim 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
107708270807SMinchan Kim 	 */
107866850be5SLiam R. Howlett 	vma = find_vma(mm, 0);
107908270807SMinchan Kim 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
108066850be5SLiam R. Howlett 	queue_pages_range(mm, vma->vm_start, mm->task_size, &nmask,
10817e2ab150SChristoph Lameter 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
10827e2ab150SChristoph Lameter 
1083cf608ac1SMinchan Kim 	if (!list_empty(&pagelist)) {
1084a0976311SJoonsoo Kim 		err = migrate_pages(&pagelist, alloc_migration_target, NULL,
10855ac95884SYang Shi 				(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
1086cf608ac1SMinchan Kim 		if (err)
1087e2d8cf40SNaoya Horiguchi 			putback_movable_pages(&pagelist);
1088cf608ac1SMinchan Kim 	}
108995a402c3SChristoph Lameter 
10907e2ab150SChristoph Lameter 	return err;
10917e2ab150SChristoph Lameter }
10927e2ab150SChristoph Lameter 
10937e2ab150SChristoph Lameter /*
10947e2ab150SChristoph Lameter  * Move pages between the two nodesets so as to preserve the physical
10957e2ab150SChristoph Lameter  * layout as much as possible.
109639743889SChristoph Lameter  *
109739743889SChristoph Lameter  * Returns the number of page that could not be moved.
109839743889SChristoph Lameter  */
10990ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
11000ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
110139743889SChristoph Lameter {
11027e2ab150SChristoph Lameter 	int busy = 0;
1103f555befdSJan Stancek 	int err = 0;
11047e2ab150SChristoph Lameter 	nodemask_t tmp;
110539743889SChristoph Lameter 
1106361a2a22SMinchan Kim 	lru_cache_disable();
11070aedadf9SChristoph Lameter 
1108d8ed45c5SMichel Lespinasse 	mmap_read_lock(mm);
1109d4984711SChristoph Lameter 
11107e2ab150SChristoph Lameter 	/*
11117e2ab150SChristoph Lameter 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
11127e2ab150SChristoph Lameter 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
11137e2ab150SChristoph Lameter 	 * bit in 'tmp', and return that <source, dest> pair for migration.
11147e2ab150SChristoph Lameter 	 * The pair of nodemasks 'to' and 'from' define the map.
11157e2ab150SChristoph Lameter 	 *
11167e2ab150SChristoph Lameter 	 * If no pair of bits is found that way, fallback to picking some
11177e2ab150SChristoph Lameter 	 * pair of 'source' and 'dest' bits that are not the same.  If the
11187e2ab150SChristoph Lameter 	 * 'source' and 'dest' bits are the same, this represents a node
11197e2ab150SChristoph Lameter 	 * that will be migrating to itself, so no pages need move.
11207e2ab150SChristoph Lameter 	 *
11217e2ab150SChristoph Lameter 	 * If no bits are left in 'tmp', or if all remaining bits left
11227e2ab150SChristoph Lameter 	 * in 'tmp' correspond to the same bit in 'to', return false
11237e2ab150SChristoph Lameter 	 * (nothing left to migrate).
11247e2ab150SChristoph Lameter 	 *
11257e2ab150SChristoph Lameter 	 * This lets us pick a pair of nodes to migrate between, such that
11267e2ab150SChristoph Lameter 	 * if possible the dest node is not already occupied by some other
11277e2ab150SChristoph Lameter 	 * source node, minimizing the risk of overloading the memory on a
11287e2ab150SChristoph Lameter 	 * node that would happen if we migrated incoming memory to a node
11297e2ab150SChristoph Lameter 	 * before migrating outgoing memory source that same node.
11307e2ab150SChristoph Lameter 	 *
11317e2ab150SChristoph Lameter 	 * A single scan of tmp is sufficient.  As we go, we remember the
11327e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
11337e2ab150SChristoph Lameter 	 * that not only moved, but what's better, moved to an empty slot
11347e2ab150SChristoph Lameter 	 * (d is not set in tmp), then we break out then, with that pair.
1135ae0e47f0SJustin P. Mattock 	 * Otherwise when we finish scanning from_tmp, we at least have the
11367e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved.  If we get all the way through
11377e2ab150SChristoph Lameter 	 * the scan of tmp without finding any node that moved, much less
11387e2ab150SChristoph Lameter 	 * moved to an empty node, then there is nothing left worth migrating.
11397e2ab150SChristoph Lameter 	 */
11407e2ab150SChristoph Lameter 
11410ce72d4fSAndrew Morton 	tmp = *from;
11427e2ab150SChristoph Lameter 	while (!nodes_empty(tmp)) {
11437e2ab150SChristoph Lameter 		int s, d;
1144b76ac7e7SJianguo Wu 		int source = NUMA_NO_NODE;
11457e2ab150SChristoph Lameter 		int dest = 0;
11467e2ab150SChristoph Lameter 
11477e2ab150SChristoph Lameter 		for_each_node_mask(s, tmp) {
11484a5b18ccSLarry Woodman 
11494a5b18ccSLarry Woodman 			/*
11504a5b18ccSLarry Woodman 			 * do_migrate_pages() tries to maintain the relative
11514a5b18ccSLarry Woodman 			 * node relationship of the pages established between
11524a5b18ccSLarry Woodman 			 * threads and memory areas.
11534a5b18ccSLarry Woodman                          *
11544a5b18ccSLarry Woodman 			 * However if the number of source nodes is not equal to
11554a5b18ccSLarry Woodman 			 * the number of destination nodes we can not preserve
11564a5b18ccSLarry Woodman 			 * this node relative relationship.  In that case, skip
11574a5b18ccSLarry Woodman 			 * copying memory from a node that is in the destination
11584a5b18ccSLarry Woodman 			 * mask.
11594a5b18ccSLarry Woodman 			 *
11604a5b18ccSLarry Woodman 			 * Example: [2,3,4] -> [3,4,5] moves everything.
11614a5b18ccSLarry Woodman 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
11624a5b18ccSLarry Woodman 			 */
11634a5b18ccSLarry Woodman 
11640ce72d4fSAndrew Morton 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
11650ce72d4fSAndrew Morton 						(node_isset(s, *to)))
11664a5b18ccSLarry Woodman 				continue;
11674a5b18ccSLarry Woodman 
11680ce72d4fSAndrew Morton 			d = node_remap(s, *from, *to);
11697e2ab150SChristoph Lameter 			if (s == d)
11707e2ab150SChristoph Lameter 				continue;
11717e2ab150SChristoph Lameter 
11727e2ab150SChristoph Lameter 			source = s;	/* Node moved. Memorize */
11737e2ab150SChristoph Lameter 			dest = d;
11747e2ab150SChristoph Lameter 
11757e2ab150SChristoph Lameter 			/* dest not in remaining from nodes? */
11767e2ab150SChristoph Lameter 			if (!node_isset(dest, tmp))
11777e2ab150SChristoph Lameter 				break;
11787e2ab150SChristoph Lameter 		}
1179b76ac7e7SJianguo Wu 		if (source == NUMA_NO_NODE)
11807e2ab150SChristoph Lameter 			break;
11817e2ab150SChristoph Lameter 
11827e2ab150SChristoph Lameter 		node_clear(source, tmp);
11837e2ab150SChristoph Lameter 		err = migrate_to_node(mm, source, dest, flags);
11847e2ab150SChristoph Lameter 		if (err > 0)
11857e2ab150SChristoph Lameter 			busy += err;
11867e2ab150SChristoph Lameter 		if (err < 0)
11877e2ab150SChristoph Lameter 			break;
118839743889SChristoph Lameter 	}
1189d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
1190d479960eSMinchan Kim 
1191361a2a22SMinchan Kim 	lru_cache_enable();
11927e2ab150SChristoph Lameter 	if (err < 0)
11937e2ab150SChristoph Lameter 		return err;
11947e2ab150SChristoph Lameter 	return busy;
1195b20a3503SChristoph Lameter 
119639743889SChristoph Lameter }
119739743889SChristoph Lameter 
11983ad33b24SLee Schermerhorn /*
11993ad33b24SLee Schermerhorn  * Allocate a new page for page migration based on vma policy.
1200d05f0cdcSHugh Dickins  * Start by assuming the page is mapped by the same vma as contains @start.
12013ad33b24SLee Schermerhorn  * Search forward from there, if not.  N.B., this assumes that the
12023ad33b24SLee Schermerhorn  * list of pages handed to migrate_pages()--which is how we get here--
12033ad33b24SLee Schermerhorn  * is in virtual address order.
12043ad33b24SLee Schermerhorn  */
12054e096ae1SMatthew Wilcox (Oracle) static struct folio *new_folio(struct folio *src, unsigned long start)
120695a402c3SChristoph Lameter {
1207d05f0cdcSHugh Dickins 	struct vm_area_struct *vma;
12083f649ab7SKees Cook 	unsigned long address;
120966850be5SLiam R. Howlett 	VMA_ITERATOR(vmi, current->mm, start);
1210ec4858e0SMatthew Wilcox (Oracle) 	gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL;
121195a402c3SChristoph Lameter 
121266850be5SLiam R. Howlett 	for_each_vma(vmi, vma) {
12134e096ae1SMatthew Wilcox (Oracle) 		address = page_address_in_vma(&src->page, vma);
12143ad33b24SLee Schermerhorn 		if (address != -EFAULT)
12153ad33b24SLee Schermerhorn 			break;
12163ad33b24SLee Schermerhorn 	}
12173ad33b24SLee Schermerhorn 
1218d0ce0e47SSidhartha Kumar 	if (folio_test_hugetlb(src)) {
12194e096ae1SMatthew Wilcox (Oracle) 		return alloc_hugetlb_folio_vma(folio_hstate(src),
1220389c8178SMichal Hocko 				vma, address);
1221d0ce0e47SSidhartha Kumar 	}
1222c8633798SNaoya Horiguchi 
1223ec4858e0SMatthew Wilcox (Oracle) 	if (folio_test_large(src))
1224ec4858e0SMatthew Wilcox (Oracle) 		gfp = GFP_TRANSHUGE;
1225ec4858e0SMatthew Wilcox (Oracle) 
122611c731e8SWanpeng Li 	/*
1227ec4858e0SMatthew Wilcox (Oracle) 	 * if !vma, vma_alloc_folio() will use task or system default policy
122811c731e8SWanpeng Li 	 */
12294e096ae1SMatthew Wilcox (Oracle) 	return vma_alloc_folio(gfp, folio_order(src), vma, address,
1230ec4858e0SMatthew Wilcox (Oracle) 			folio_test_large(src));
123195a402c3SChristoph Lameter }
1232b20a3503SChristoph Lameter #else
1233b20a3503SChristoph Lameter 
12344a64981dSVishal Moola (Oracle) static int migrate_folio_add(struct folio *folio, struct list_head *foliolist,
1235b20a3503SChristoph Lameter 				unsigned long flags)
1236b20a3503SChristoph Lameter {
1237a53190a4SYang Shi 	return -EIO;
1238b20a3503SChristoph Lameter }
1239b20a3503SChristoph Lameter 
12400ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
12410ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
1242b20a3503SChristoph Lameter {
1243b20a3503SChristoph Lameter 	return -ENOSYS;
1244b20a3503SChristoph Lameter }
124595a402c3SChristoph Lameter 
12464e096ae1SMatthew Wilcox (Oracle) static struct folio *new_folio(struct folio *src, unsigned long start)
124795a402c3SChristoph Lameter {
124895a402c3SChristoph Lameter 	return NULL;
124995a402c3SChristoph Lameter }
1250b20a3503SChristoph Lameter #endif
1251b20a3503SChristoph Lameter 
1252dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len,
1253028fec41SDavid Rientjes 		     unsigned short mode, unsigned short mode_flags,
1254028fec41SDavid Rientjes 		     nodemask_t *nmask, unsigned long flags)
12556ce3c4c0SChristoph Lameter {
12566ce3c4c0SChristoph Lameter 	struct mm_struct *mm = current->mm;
1257f4e9e0e6SLiam R. Howlett 	struct vm_area_struct *vma, *prev;
1258f4e9e0e6SLiam R. Howlett 	struct vma_iterator vmi;
12596ce3c4c0SChristoph Lameter 	struct mempolicy *new;
12606ce3c4c0SChristoph Lameter 	unsigned long end;
12616ce3c4c0SChristoph Lameter 	int err;
1262d8835445SYang Shi 	int ret;
12636ce3c4c0SChristoph Lameter 	LIST_HEAD(pagelist);
12646ce3c4c0SChristoph Lameter 
1265b24f53a0SLee Schermerhorn 	if (flags & ~(unsigned long)MPOL_MF_VALID)
12666ce3c4c0SChristoph Lameter 		return -EINVAL;
126774c00241SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
12686ce3c4c0SChristoph Lameter 		return -EPERM;
12696ce3c4c0SChristoph Lameter 
12706ce3c4c0SChristoph Lameter 	if (start & ~PAGE_MASK)
12716ce3c4c0SChristoph Lameter 		return -EINVAL;
12726ce3c4c0SChristoph Lameter 
12736ce3c4c0SChristoph Lameter 	if (mode == MPOL_DEFAULT)
12746ce3c4c0SChristoph Lameter 		flags &= ~MPOL_MF_STRICT;
12756ce3c4c0SChristoph Lameter 
1276aaa31e05Sze zuo 	len = PAGE_ALIGN(len);
12776ce3c4c0SChristoph Lameter 	end = start + len;
12786ce3c4c0SChristoph Lameter 
12796ce3c4c0SChristoph Lameter 	if (end < start)
12806ce3c4c0SChristoph Lameter 		return -EINVAL;
12816ce3c4c0SChristoph Lameter 	if (end == start)
12826ce3c4c0SChristoph Lameter 		return 0;
12836ce3c4c0SChristoph Lameter 
1284028fec41SDavid Rientjes 	new = mpol_new(mode, mode_flags, nmask);
12856ce3c4c0SChristoph Lameter 	if (IS_ERR(new))
12866ce3c4c0SChristoph Lameter 		return PTR_ERR(new);
12876ce3c4c0SChristoph Lameter 
1288b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY)
1289b24f53a0SLee Schermerhorn 		new->flags |= MPOL_F_MOF;
1290b24f53a0SLee Schermerhorn 
12916ce3c4c0SChristoph Lameter 	/*
12926ce3c4c0SChristoph Lameter 	 * If we are using the default policy then operation
12936ce3c4c0SChristoph Lameter 	 * on discontinuous address spaces is okay after all
12946ce3c4c0SChristoph Lameter 	 */
12956ce3c4c0SChristoph Lameter 	if (!new)
12966ce3c4c0SChristoph Lameter 		flags |= MPOL_MF_DISCONTIG_OK;
12976ce3c4c0SChristoph Lameter 
1298028fec41SDavid Rientjes 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1299028fec41SDavid Rientjes 		 start, start + len, mode, mode_flags,
130000ef2d2fSDavid Rientjes 		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
13016ce3c4c0SChristoph Lameter 
13020aedadf9SChristoph Lameter 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
13030aedadf9SChristoph Lameter 
1304361a2a22SMinchan Kim 		lru_cache_disable();
13050aedadf9SChristoph Lameter 	}
13064bfc4495SKAMEZAWA Hiroyuki 	{
13074bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
13084bfc4495SKAMEZAWA Hiroyuki 		if (scratch) {
1309d8ed45c5SMichel Lespinasse 			mmap_write_lock(mm);
13104bfc4495SKAMEZAWA Hiroyuki 			err = mpol_set_nodemask(new, nmask, scratch);
13114bfc4495SKAMEZAWA Hiroyuki 			if (err)
1312d8ed45c5SMichel Lespinasse 				mmap_write_unlock(mm);
13134bfc4495SKAMEZAWA Hiroyuki 		} else
13144bfc4495SKAMEZAWA Hiroyuki 			err = -ENOMEM;
13154bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
13164bfc4495SKAMEZAWA Hiroyuki 	}
1317b05ca738SKOSAKI Motohiro 	if (err)
1318b05ca738SKOSAKI Motohiro 		goto mpol_out;
1319b05ca738SKOSAKI Motohiro 
1320*6c21e066SJann Horn 	/*
1321*6c21e066SJann Horn 	 * Lock the VMAs before scanning for pages to migrate, to ensure we don't
1322*6c21e066SJann Horn 	 * miss a concurrently inserted page.
1323*6c21e066SJann Horn 	 */
1324*6c21e066SJann Horn 	vma_iter_init(&vmi, mm, start);
1325*6c21e066SJann Horn 	for_each_vma_range(vmi, vma, end)
1326*6c21e066SJann Horn 		vma_start_write(vma);
1327*6c21e066SJann Horn 
1328d8835445SYang Shi 	ret = queue_pages_range(mm, start, end, nmask,
13296ce3c4c0SChristoph Lameter 			  flags | MPOL_MF_INVERT, &pagelist);
1330d8835445SYang Shi 
1331d8835445SYang Shi 	if (ret < 0) {
1332a85dfc30SYang Shi 		err = ret;
1333d8835445SYang Shi 		goto up_out;
1334d8835445SYang Shi 	}
1335d8835445SYang Shi 
1336f4e9e0e6SLiam R. Howlett 	vma_iter_init(&vmi, mm, start);
1337f4e9e0e6SLiam R. Howlett 	prev = vma_prev(&vmi);
1338f4e9e0e6SLiam R. Howlett 	for_each_vma_range(vmi, vma, end) {
1339f4e9e0e6SLiam R. Howlett 		err = mbind_range(&vmi, vma, &prev, start, end, new);
1340f4e9e0e6SLiam R. Howlett 		if (err)
1341f4e9e0e6SLiam R. Howlett 			break;
1342f4e9e0e6SLiam R. Howlett 	}
13437e2ab150SChristoph Lameter 
1344b24f53a0SLee Schermerhorn 	if (!err) {
1345b24f53a0SLee Schermerhorn 		int nr_failed = 0;
1346b24f53a0SLee Schermerhorn 
1347cf608ac1SMinchan Kim 		if (!list_empty(&pagelist)) {
1348b24f53a0SLee Schermerhorn 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
13494e096ae1SMatthew Wilcox (Oracle) 			nr_failed = migrate_pages(&pagelist, new_folio, NULL,
13505ac95884SYang Shi 				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL);
1351cf608ac1SMinchan Kim 			if (nr_failed)
135274060e4dSNaoya Horiguchi 				putback_movable_pages(&pagelist);
1353cf608ac1SMinchan Kim 		}
13546ce3c4c0SChristoph Lameter 
1355d8835445SYang Shi 		if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
13566ce3c4c0SChristoph Lameter 			err = -EIO;
1357a85dfc30SYang Shi 	} else {
1358d8835445SYang Shi up_out:
1359a85dfc30SYang Shi 		if (!list_empty(&pagelist))
1360a85dfc30SYang Shi 			putback_movable_pages(&pagelist);
1361a85dfc30SYang Shi 	}
1362a85dfc30SYang Shi 
1363d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
1364b05ca738SKOSAKI Motohiro mpol_out:
1365f0be3d32SLee Schermerhorn 	mpol_put(new);
1366d479960eSMinchan Kim 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1367361a2a22SMinchan Kim 		lru_cache_enable();
13686ce3c4c0SChristoph Lameter 	return err;
13696ce3c4c0SChristoph Lameter }
13706ce3c4c0SChristoph Lameter 
137139743889SChristoph Lameter /*
13728bccd85fSChristoph Lameter  * User space interface with variable sized bitmaps for nodelists.
13738bccd85fSChristoph Lameter  */
1374e130242dSArnd Bergmann static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask,
1375e130242dSArnd Bergmann 		      unsigned long maxnode)
1376e130242dSArnd Bergmann {
1377e130242dSArnd Bergmann 	unsigned long nlongs = BITS_TO_LONGS(maxnode);
1378e130242dSArnd Bergmann 	int ret;
1379e130242dSArnd Bergmann 
1380e130242dSArnd Bergmann 	if (in_compat_syscall())
1381e130242dSArnd Bergmann 		ret = compat_get_bitmap(mask,
1382e130242dSArnd Bergmann 					(const compat_ulong_t __user *)nmask,
1383e130242dSArnd Bergmann 					maxnode);
1384e130242dSArnd Bergmann 	else
1385e130242dSArnd Bergmann 		ret = copy_from_user(mask, nmask,
1386e130242dSArnd Bergmann 				     nlongs * sizeof(unsigned long));
1387e130242dSArnd Bergmann 
1388e130242dSArnd Bergmann 	if (ret)
1389e130242dSArnd Bergmann 		return -EFAULT;
1390e130242dSArnd Bergmann 
1391e130242dSArnd Bergmann 	if (maxnode % BITS_PER_LONG)
1392e130242dSArnd Bergmann 		mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1;
1393e130242dSArnd Bergmann 
1394e130242dSArnd Bergmann 	return 0;
1395e130242dSArnd Bergmann }
13968bccd85fSChristoph Lameter 
13978bccd85fSChristoph Lameter /* Copy a node mask from user space. */
139839743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
13998bccd85fSChristoph Lameter 		     unsigned long maxnode)
14008bccd85fSChristoph Lameter {
14018bccd85fSChristoph Lameter 	--maxnode;
14028bccd85fSChristoph Lameter 	nodes_clear(*nodes);
14038bccd85fSChristoph Lameter 	if (maxnode == 0 || !nmask)
14048bccd85fSChristoph Lameter 		return 0;
1405a9c930baSAndi Kleen 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1406636f13c1SChris Wright 		return -EINVAL;
14078bccd85fSChristoph Lameter 
140856521e7aSYisheng Xie 	/*
140956521e7aSYisheng Xie 	 * When the user specified more nodes than supported just check
1410e130242dSArnd Bergmann 	 * if the non supported part is all zero, one word at a time,
1411e130242dSArnd Bergmann 	 * starting at the end.
141256521e7aSYisheng Xie 	 */
1413e130242dSArnd Bergmann 	while (maxnode > MAX_NUMNODES) {
1414e130242dSArnd Bergmann 		unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG);
1415e130242dSArnd Bergmann 		unsigned long t;
14168bccd85fSChristoph Lameter 
1417000eca5dSTianyu Li 		if (get_bitmap(&t, &nmask[(maxnode - 1) / BITS_PER_LONG], bits))
141856521e7aSYisheng Xie 			return -EFAULT;
1419e130242dSArnd Bergmann 
1420e130242dSArnd Bergmann 		if (maxnode - bits >= MAX_NUMNODES) {
1421e130242dSArnd Bergmann 			maxnode -= bits;
1422e130242dSArnd Bergmann 		} else {
1423e130242dSArnd Bergmann 			maxnode = MAX_NUMNODES;
1424e130242dSArnd Bergmann 			t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1425e130242dSArnd Bergmann 		}
1426e130242dSArnd Bergmann 		if (t)
142756521e7aSYisheng Xie 			return -EINVAL;
142856521e7aSYisheng Xie 	}
142956521e7aSYisheng Xie 
1430e130242dSArnd Bergmann 	return get_bitmap(nodes_addr(*nodes), nmask, maxnode);
14318bccd85fSChristoph Lameter }
14328bccd85fSChristoph Lameter 
14338bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */
14348bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
14358bccd85fSChristoph Lameter 			      nodemask_t *nodes)
14368bccd85fSChristoph Lameter {
14378bccd85fSChristoph Lameter 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1438050c17f2SRalph Campbell 	unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
1439e130242dSArnd Bergmann 	bool compat = in_compat_syscall();
1440e130242dSArnd Bergmann 
1441e130242dSArnd Bergmann 	if (compat)
1442e130242dSArnd Bergmann 		nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t);
14438bccd85fSChristoph Lameter 
14448bccd85fSChristoph Lameter 	if (copy > nbytes) {
14458bccd85fSChristoph Lameter 		if (copy > PAGE_SIZE)
14468bccd85fSChristoph Lameter 			return -EINVAL;
14478bccd85fSChristoph Lameter 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
14488bccd85fSChristoph Lameter 			return -EFAULT;
14498bccd85fSChristoph Lameter 		copy = nbytes;
1450e130242dSArnd Bergmann 		maxnode = nr_node_ids;
14518bccd85fSChristoph Lameter 	}
1452e130242dSArnd Bergmann 
1453e130242dSArnd Bergmann 	if (compat)
1454e130242dSArnd Bergmann 		return compat_put_bitmap((compat_ulong_t __user *)mask,
1455e130242dSArnd Bergmann 					 nodes_addr(*nodes), maxnode);
1456e130242dSArnd Bergmann 
14578bccd85fSChristoph Lameter 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
14588bccd85fSChristoph Lameter }
14598bccd85fSChristoph Lameter 
146095837924SFeng Tang /* Basic parameter sanity check used by both mbind() and set_mempolicy() */
146195837924SFeng Tang static inline int sanitize_mpol_flags(int *mode, unsigned short *flags)
146295837924SFeng Tang {
146395837924SFeng Tang 	*flags = *mode & MPOL_MODE_FLAGS;
146495837924SFeng Tang 	*mode &= ~MPOL_MODE_FLAGS;
1465b27abaccSDave Hansen 
1466a38a59fdSBen Widawsky 	if ((unsigned int)(*mode) >=  MPOL_MAX)
146795837924SFeng Tang 		return -EINVAL;
146895837924SFeng Tang 	if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
146995837924SFeng Tang 		return -EINVAL;
14706d2aec9eSEric Dumazet 	if (*flags & MPOL_F_NUMA_BALANCING) {
14716d2aec9eSEric Dumazet 		if (*mode != MPOL_BIND)
14726d2aec9eSEric Dumazet 			return -EINVAL;
14736d2aec9eSEric Dumazet 		*flags |= (MPOL_F_MOF | MPOL_F_MORON);
14746d2aec9eSEric Dumazet 	}
147595837924SFeng Tang 	return 0;
147695837924SFeng Tang }
147795837924SFeng Tang 
1478e7dc9ad6SDominik Brodowski static long kernel_mbind(unsigned long start, unsigned long len,
1479e7dc9ad6SDominik Brodowski 			 unsigned long mode, const unsigned long __user *nmask,
1480e7dc9ad6SDominik Brodowski 			 unsigned long maxnode, unsigned int flags)
14818bccd85fSChristoph Lameter {
1482028fec41SDavid Rientjes 	unsigned short mode_flags;
148395837924SFeng Tang 	nodemask_t nodes;
148495837924SFeng Tang 	int lmode = mode;
148595837924SFeng Tang 	int err;
14868bccd85fSChristoph Lameter 
1487057d3389SAndrey Konovalov 	start = untagged_addr(start);
148895837924SFeng Tang 	err = sanitize_mpol_flags(&lmode, &mode_flags);
148995837924SFeng Tang 	if (err)
149095837924SFeng Tang 		return err;
149195837924SFeng Tang 
14928bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
14938bccd85fSChristoph Lameter 	if (err)
14948bccd85fSChristoph Lameter 		return err;
149595837924SFeng Tang 
149695837924SFeng Tang 	return do_mbind(start, len, lmode, mode_flags, &nodes, flags);
14978bccd85fSChristoph Lameter }
14988bccd85fSChristoph Lameter 
1499c6018b4bSAneesh Kumar K.V SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, len,
1500c6018b4bSAneesh Kumar K.V 		unsigned long, home_node, unsigned long, flags)
1501c6018b4bSAneesh Kumar K.V {
1502c6018b4bSAneesh Kumar K.V 	struct mm_struct *mm = current->mm;
1503f4e9e0e6SLiam R. Howlett 	struct vm_area_struct *vma, *prev;
1504e976936cSMichal Hocko 	struct mempolicy *new, *old;
1505c6018b4bSAneesh Kumar K.V 	unsigned long end;
1506c6018b4bSAneesh Kumar K.V 	int err = -ENOENT;
150766850be5SLiam R. Howlett 	VMA_ITERATOR(vmi, mm, start);
1508c6018b4bSAneesh Kumar K.V 
1509c6018b4bSAneesh Kumar K.V 	start = untagged_addr(start);
1510c6018b4bSAneesh Kumar K.V 	if (start & ~PAGE_MASK)
1511c6018b4bSAneesh Kumar K.V 		return -EINVAL;
1512c6018b4bSAneesh Kumar K.V 	/*
1513c6018b4bSAneesh Kumar K.V 	 * flags is used for future extension if any.
1514c6018b4bSAneesh Kumar K.V 	 */
1515c6018b4bSAneesh Kumar K.V 	if (flags != 0)
1516c6018b4bSAneesh Kumar K.V 		return -EINVAL;
1517c6018b4bSAneesh Kumar K.V 
1518c6018b4bSAneesh Kumar K.V 	/*
1519c6018b4bSAneesh Kumar K.V 	 * Check home_node is online to avoid accessing uninitialized
1520c6018b4bSAneesh Kumar K.V 	 * NODE_DATA.
1521c6018b4bSAneesh Kumar K.V 	 */
1522c6018b4bSAneesh Kumar K.V 	if (home_node >= MAX_NUMNODES || !node_online(home_node))
1523c6018b4bSAneesh Kumar K.V 		return -EINVAL;
1524c6018b4bSAneesh Kumar K.V 
1525aaa31e05Sze zuo 	len = PAGE_ALIGN(len);
1526c6018b4bSAneesh Kumar K.V 	end = start + len;
1527c6018b4bSAneesh Kumar K.V 
1528c6018b4bSAneesh Kumar K.V 	if (end < start)
1529c6018b4bSAneesh Kumar K.V 		return -EINVAL;
1530c6018b4bSAneesh Kumar K.V 	if (end == start)
1531c6018b4bSAneesh Kumar K.V 		return 0;
1532c6018b4bSAneesh Kumar K.V 	mmap_write_lock(mm);
1533f4e9e0e6SLiam R. Howlett 	prev = vma_prev(&vmi);
153466850be5SLiam R. Howlett 	for_each_vma_range(vmi, vma, end) {
1535c6018b4bSAneesh Kumar K.V 		/*
1536c6018b4bSAneesh Kumar K.V 		 * If any vma in the range got policy other than MPOL_BIND
1537c6018b4bSAneesh Kumar K.V 		 * or MPOL_PREFERRED_MANY we return error. We don't reset
1538c6018b4bSAneesh Kumar K.V 		 * the home node for vmas we already updated before.
1539c6018b4bSAneesh Kumar K.V 		 */
1540e976936cSMichal Hocko 		old = vma_policy(vma);
1541e976936cSMichal Hocko 		if (!old)
1542e976936cSMichal Hocko 			continue;
1543e976936cSMichal Hocko 		if (old->mode != MPOL_BIND && old->mode != MPOL_PREFERRED_MANY) {
1544c6018b4bSAneesh Kumar K.V 			err = -EOPNOTSUPP;
1545c6018b4bSAneesh Kumar K.V 			break;
1546c6018b4bSAneesh Kumar K.V 		}
1547e976936cSMichal Hocko 		new = mpol_dup(old);
1548e976936cSMichal Hocko 		if (IS_ERR(new)) {
1549e976936cSMichal Hocko 			err = PTR_ERR(new);
1550e976936cSMichal Hocko 			break;
1551e976936cSMichal Hocko 		}
1552c6018b4bSAneesh Kumar K.V 
1553*6c21e066SJann Horn 		vma_start_write(vma);
1554c6018b4bSAneesh Kumar K.V 		new->home_node = home_node;
1555f4e9e0e6SLiam R. Howlett 		err = mbind_range(&vmi, vma, &prev, start, end, new);
1556c6018b4bSAneesh Kumar K.V 		mpol_put(new);
1557c6018b4bSAneesh Kumar K.V 		if (err)
1558c6018b4bSAneesh Kumar K.V 			break;
1559c6018b4bSAneesh Kumar K.V 	}
1560c6018b4bSAneesh Kumar K.V 	mmap_write_unlock(mm);
1561c6018b4bSAneesh Kumar K.V 	return err;
1562c6018b4bSAneesh Kumar K.V }
1563c6018b4bSAneesh Kumar K.V 
1564e7dc9ad6SDominik Brodowski SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1565e7dc9ad6SDominik Brodowski 		unsigned long, mode, const unsigned long __user *, nmask,
1566e7dc9ad6SDominik Brodowski 		unsigned long, maxnode, unsigned int, flags)
1567e7dc9ad6SDominik Brodowski {
1568e7dc9ad6SDominik Brodowski 	return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1569e7dc9ad6SDominik Brodowski }
1570e7dc9ad6SDominik Brodowski 
15718bccd85fSChristoph Lameter /* Set the process memory policy */
1572af03c4acSDominik Brodowski static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1573af03c4acSDominik Brodowski 				 unsigned long maxnode)
15748bccd85fSChristoph Lameter {
157595837924SFeng Tang 	unsigned short mode_flags;
15768bccd85fSChristoph Lameter 	nodemask_t nodes;
157795837924SFeng Tang 	int lmode = mode;
157895837924SFeng Tang 	int err;
15798bccd85fSChristoph Lameter 
158095837924SFeng Tang 	err = sanitize_mpol_flags(&lmode, &mode_flags);
158195837924SFeng Tang 	if (err)
158295837924SFeng Tang 		return err;
158395837924SFeng Tang 
15848bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
15858bccd85fSChristoph Lameter 	if (err)
15868bccd85fSChristoph Lameter 		return err;
158795837924SFeng Tang 
158895837924SFeng Tang 	return do_set_mempolicy(lmode, mode_flags, &nodes);
15898bccd85fSChristoph Lameter }
15908bccd85fSChristoph Lameter 
1591af03c4acSDominik Brodowski SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1592af03c4acSDominik Brodowski 		unsigned long, maxnode)
1593af03c4acSDominik Brodowski {
1594af03c4acSDominik Brodowski 	return kernel_set_mempolicy(mode, nmask, maxnode);
1595af03c4acSDominik Brodowski }
1596af03c4acSDominik Brodowski 
1597b6e9b0baSDominik Brodowski static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1598b6e9b0baSDominik Brodowski 				const unsigned long __user *old_nodes,
1599b6e9b0baSDominik Brodowski 				const unsigned long __user *new_nodes)
160039743889SChristoph Lameter {
1601596d7cfaSKOSAKI Motohiro 	struct mm_struct *mm = NULL;
160239743889SChristoph Lameter 	struct task_struct *task;
160339743889SChristoph Lameter 	nodemask_t task_nodes;
160439743889SChristoph Lameter 	int err;
1605596d7cfaSKOSAKI Motohiro 	nodemask_t *old;
1606596d7cfaSKOSAKI Motohiro 	nodemask_t *new;
1607596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH(scratch);
160839743889SChristoph Lameter 
1609596d7cfaSKOSAKI Motohiro 	if (!scratch)
1610596d7cfaSKOSAKI Motohiro 		return -ENOMEM;
161139743889SChristoph Lameter 
1612596d7cfaSKOSAKI Motohiro 	old = &scratch->mask1;
1613596d7cfaSKOSAKI Motohiro 	new = &scratch->mask2;
1614596d7cfaSKOSAKI Motohiro 
1615596d7cfaSKOSAKI Motohiro 	err = get_nodes(old, old_nodes, maxnode);
161639743889SChristoph Lameter 	if (err)
1617596d7cfaSKOSAKI Motohiro 		goto out;
1618596d7cfaSKOSAKI Motohiro 
1619596d7cfaSKOSAKI Motohiro 	err = get_nodes(new, new_nodes, maxnode);
1620596d7cfaSKOSAKI Motohiro 	if (err)
1621596d7cfaSKOSAKI Motohiro 		goto out;
162239743889SChristoph Lameter 
162339743889SChristoph Lameter 	/* Find the mm_struct */
162455cfaa3cSZeng Zhaoming 	rcu_read_lock();
1625228ebcbeSPavel Emelyanov 	task = pid ? find_task_by_vpid(pid) : current;
162639743889SChristoph Lameter 	if (!task) {
162755cfaa3cSZeng Zhaoming 		rcu_read_unlock();
1628596d7cfaSKOSAKI Motohiro 		err = -ESRCH;
1629596d7cfaSKOSAKI Motohiro 		goto out;
163039743889SChristoph Lameter 	}
16313268c63eSChristoph Lameter 	get_task_struct(task);
163239743889SChristoph Lameter 
1633596d7cfaSKOSAKI Motohiro 	err = -EINVAL;
163439743889SChristoph Lameter 
163539743889SChristoph Lameter 	/*
163631367466SOtto Ebeling 	 * Check if this process has the right to modify the specified process.
163731367466SOtto Ebeling 	 * Use the regular "ptrace_may_access()" checks.
163839743889SChristoph Lameter 	 */
163931367466SOtto Ebeling 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1640c69e8d9cSDavid Howells 		rcu_read_unlock();
164139743889SChristoph Lameter 		err = -EPERM;
16423268c63eSChristoph Lameter 		goto out_put;
164339743889SChristoph Lameter 	}
1644c69e8d9cSDavid Howells 	rcu_read_unlock();
164539743889SChristoph Lameter 
164639743889SChristoph Lameter 	task_nodes = cpuset_mems_allowed(task);
164739743889SChristoph Lameter 	/* Is the user allowed to access the target nodes? */
1648596d7cfaSKOSAKI Motohiro 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
164939743889SChristoph Lameter 		err = -EPERM;
16503268c63eSChristoph Lameter 		goto out_put;
165139743889SChristoph Lameter 	}
165239743889SChristoph Lameter 
16530486a38bSYisheng Xie 	task_nodes = cpuset_mems_allowed(current);
16540486a38bSYisheng Xie 	nodes_and(*new, *new, task_nodes);
16550486a38bSYisheng Xie 	if (nodes_empty(*new))
16563268c63eSChristoph Lameter 		goto out_put;
16570486a38bSYisheng Xie 
165886c3a764SDavid Quigley 	err = security_task_movememory(task);
165986c3a764SDavid Quigley 	if (err)
16603268c63eSChristoph Lameter 		goto out_put;
166186c3a764SDavid Quigley 
16623268c63eSChristoph Lameter 	mm = get_task_mm(task);
16633268c63eSChristoph Lameter 	put_task_struct(task);
1664f2a9ef88SSasha Levin 
1665f2a9ef88SSasha Levin 	if (!mm) {
1666f2a9ef88SSasha Levin 		err = -EINVAL;
1667f2a9ef88SSasha Levin 		goto out;
1668f2a9ef88SSasha Levin 	}
1669f2a9ef88SSasha Levin 
1670596d7cfaSKOSAKI Motohiro 	err = do_migrate_pages(mm, old, new,
167174c00241SChristoph Lameter 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
16723268c63eSChristoph Lameter 
167339743889SChristoph Lameter 	mmput(mm);
16743268c63eSChristoph Lameter out:
1675596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH_FREE(scratch);
1676596d7cfaSKOSAKI Motohiro 
167739743889SChristoph Lameter 	return err;
16783268c63eSChristoph Lameter 
16793268c63eSChristoph Lameter out_put:
16803268c63eSChristoph Lameter 	put_task_struct(task);
16813268c63eSChristoph Lameter 	goto out;
16823268c63eSChristoph Lameter 
168339743889SChristoph Lameter }
168439743889SChristoph Lameter 
1685b6e9b0baSDominik Brodowski SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1686b6e9b0baSDominik Brodowski 		const unsigned long __user *, old_nodes,
1687b6e9b0baSDominik Brodowski 		const unsigned long __user *, new_nodes)
1688b6e9b0baSDominik Brodowski {
1689b6e9b0baSDominik Brodowski 	return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1690b6e9b0baSDominik Brodowski }
1691b6e9b0baSDominik Brodowski 
169239743889SChristoph Lameter 
16938bccd85fSChristoph Lameter /* Retrieve NUMA policy */
1694af03c4acSDominik Brodowski static int kernel_get_mempolicy(int __user *policy,
1695af03c4acSDominik Brodowski 				unsigned long __user *nmask,
1696af03c4acSDominik Brodowski 				unsigned long maxnode,
1697af03c4acSDominik Brodowski 				unsigned long addr,
1698af03c4acSDominik Brodowski 				unsigned long flags)
16998bccd85fSChristoph Lameter {
1700dbcb0f19SAdrian Bunk 	int err;
17013f649ab7SKees Cook 	int pval;
17028bccd85fSChristoph Lameter 	nodemask_t nodes;
17038bccd85fSChristoph Lameter 
1704050c17f2SRalph Campbell 	if (nmask != NULL && maxnode < nr_node_ids)
17058bccd85fSChristoph Lameter 		return -EINVAL;
17068bccd85fSChristoph Lameter 
17074605f057SWenchao Hao 	addr = untagged_addr(addr);
17084605f057SWenchao Hao 
17098bccd85fSChristoph Lameter 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
17108bccd85fSChristoph Lameter 
17118bccd85fSChristoph Lameter 	if (err)
17128bccd85fSChristoph Lameter 		return err;
17138bccd85fSChristoph Lameter 
17148bccd85fSChristoph Lameter 	if (policy && put_user(pval, policy))
17158bccd85fSChristoph Lameter 		return -EFAULT;
17168bccd85fSChristoph Lameter 
17178bccd85fSChristoph Lameter 	if (nmask)
17188bccd85fSChristoph Lameter 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
17198bccd85fSChristoph Lameter 
17208bccd85fSChristoph Lameter 	return err;
17218bccd85fSChristoph Lameter }
17228bccd85fSChristoph Lameter 
1723af03c4acSDominik Brodowski SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1724af03c4acSDominik Brodowski 		unsigned long __user *, nmask, unsigned long, maxnode,
1725af03c4acSDominik Brodowski 		unsigned long, addr, unsigned long, flags)
1726af03c4acSDominik Brodowski {
1727af03c4acSDominik Brodowski 	return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1728af03c4acSDominik Brodowski }
1729af03c4acSDominik Brodowski 
173020ca87f2SLi Xinhai bool vma_migratable(struct vm_area_struct *vma)
173120ca87f2SLi Xinhai {
173220ca87f2SLi Xinhai 	if (vma->vm_flags & (VM_IO | VM_PFNMAP))
173320ca87f2SLi Xinhai 		return false;
173420ca87f2SLi Xinhai 
173520ca87f2SLi Xinhai 	/*
173620ca87f2SLi Xinhai 	 * DAX device mappings require predictable access latency, so avoid
173720ca87f2SLi Xinhai 	 * incurring periodic faults.
173820ca87f2SLi Xinhai 	 */
173920ca87f2SLi Xinhai 	if (vma_is_dax(vma))
174020ca87f2SLi Xinhai 		return false;
174120ca87f2SLi Xinhai 
174220ca87f2SLi Xinhai 	if (is_vm_hugetlb_page(vma) &&
174320ca87f2SLi Xinhai 		!hugepage_migration_supported(hstate_vma(vma)))
174420ca87f2SLi Xinhai 		return false;
174520ca87f2SLi Xinhai 
174620ca87f2SLi Xinhai 	/*
174720ca87f2SLi Xinhai 	 * Migration allocates pages in the highest zone. If we cannot
174820ca87f2SLi Xinhai 	 * do so then migration (at least from node to node) is not
174920ca87f2SLi Xinhai 	 * possible.
175020ca87f2SLi Xinhai 	 */
175120ca87f2SLi Xinhai 	if (vma->vm_file &&
175220ca87f2SLi Xinhai 		gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
175320ca87f2SLi Xinhai 			< policy_zone)
175420ca87f2SLi Xinhai 		return false;
175520ca87f2SLi Xinhai 	return true;
175620ca87f2SLi Xinhai }
175720ca87f2SLi Xinhai 
175874d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
175974d2c3a0SOleg Nesterov 						unsigned long addr)
17601da177e4SLinus Torvalds {
17618d90274bSOleg Nesterov 	struct mempolicy *pol = NULL;
17621da177e4SLinus Torvalds 
17631da177e4SLinus Torvalds 	if (vma) {
1764480eccf9SLee Schermerhorn 		if (vma->vm_ops && vma->vm_ops->get_policy) {
17658d90274bSOleg Nesterov 			pol = vma->vm_ops->get_policy(vma, addr);
176600442ad0SMel Gorman 		} else if (vma->vm_policy) {
17671da177e4SLinus Torvalds 			pol = vma->vm_policy;
176800442ad0SMel Gorman 
176900442ad0SMel Gorman 			/*
177000442ad0SMel Gorman 			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
177100442ad0SMel Gorman 			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
177200442ad0SMel Gorman 			 * count on these policies which will be dropped by
177300442ad0SMel Gorman 			 * mpol_cond_put() later
177400442ad0SMel Gorman 			 */
177500442ad0SMel Gorman 			if (mpol_needs_cond_ref(pol))
177600442ad0SMel Gorman 				mpol_get(pol);
177700442ad0SMel Gorman 		}
17781da177e4SLinus Torvalds 	}
1779f15ca78eSOleg Nesterov 
178074d2c3a0SOleg Nesterov 	return pol;
178174d2c3a0SOleg Nesterov }
178274d2c3a0SOleg Nesterov 
178374d2c3a0SOleg Nesterov /*
1784dd6eecb9SOleg Nesterov  * get_vma_policy(@vma, @addr)
178574d2c3a0SOleg Nesterov  * @vma: virtual memory area whose policy is sought
178674d2c3a0SOleg Nesterov  * @addr: address in @vma for shared policy lookup
178774d2c3a0SOleg Nesterov  *
178874d2c3a0SOleg Nesterov  * Returns effective policy for a VMA at specified address.
1789dd6eecb9SOleg Nesterov  * Falls back to current->mempolicy or system default policy, as necessary.
179074d2c3a0SOleg Nesterov  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
179174d2c3a0SOleg Nesterov  * count--added by the get_policy() vm_op, as appropriate--to protect against
179274d2c3a0SOleg Nesterov  * freeing by another task.  It is the caller's responsibility to free the
179374d2c3a0SOleg Nesterov  * extra reference for shared policies.
179474d2c3a0SOleg Nesterov  */
1795ac79f78dSDavid Rientjes static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1796dd6eecb9SOleg Nesterov 						unsigned long addr)
179774d2c3a0SOleg Nesterov {
179874d2c3a0SOleg Nesterov 	struct mempolicy *pol = __get_vma_policy(vma, addr);
179974d2c3a0SOleg Nesterov 
18008d90274bSOleg Nesterov 	if (!pol)
1801dd6eecb9SOleg Nesterov 		pol = get_task_policy(current);
18028d90274bSOleg Nesterov 
18031da177e4SLinus Torvalds 	return pol;
18041da177e4SLinus Torvalds }
18051da177e4SLinus Torvalds 
18066b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma)
1807fc314724SMel Gorman {
18086b6482bbSOleg Nesterov 	struct mempolicy *pol;
1809f15ca78eSOleg Nesterov 
1810fc314724SMel Gorman 	if (vma->vm_ops && vma->vm_ops->get_policy) {
1811fc314724SMel Gorman 		bool ret = false;
1812fc314724SMel Gorman 
1813fc314724SMel Gorman 		pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1814fc314724SMel Gorman 		if (pol && (pol->flags & MPOL_F_MOF))
1815fc314724SMel Gorman 			ret = true;
1816fc314724SMel Gorman 		mpol_cond_put(pol);
1817fc314724SMel Gorman 
1818fc314724SMel Gorman 		return ret;
18198d90274bSOleg Nesterov 	}
18208d90274bSOleg Nesterov 
1821fc314724SMel Gorman 	pol = vma->vm_policy;
18228d90274bSOleg Nesterov 	if (!pol)
18236b6482bbSOleg Nesterov 		pol = get_task_policy(current);
1824fc314724SMel Gorman 
1825fc314724SMel Gorman 	return pol->flags & MPOL_F_MOF;
1826fc314724SMel Gorman }
1827fc314724SMel Gorman 
1828d2226ebdSFeng Tang bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1829d3eb1570SLai Jiangshan {
1830d3eb1570SLai Jiangshan 	enum zone_type dynamic_policy_zone = policy_zone;
1831d3eb1570SLai Jiangshan 
1832d3eb1570SLai Jiangshan 	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1833d3eb1570SLai Jiangshan 
1834d3eb1570SLai Jiangshan 	/*
1835269fbe72SBen Widawsky 	 * if policy->nodes has movable memory only,
1836d3eb1570SLai Jiangshan 	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1837d3eb1570SLai Jiangshan 	 *
1838269fbe72SBen Widawsky 	 * policy->nodes is intersect with node_states[N_MEMORY].
1839f0953a1bSIngo Molnar 	 * so if the following test fails, it implies
1840269fbe72SBen Widawsky 	 * policy->nodes has movable memory only.
1841d3eb1570SLai Jiangshan 	 */
1842269fbe72SBen Widawsky 	if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY]))
1843d3eb1570SLai Jiangshan 		dynamic_policy_zone = ZONE_MOVABLE;
1844d3eb1570SLai Jiangshan 
1845d3eb1570SLai Jiangshan 	return zone >= dynamic_policy_zone;
1846d3eb1570SLai Jiangshan }
1847d3eb1570SLai Jiangshan 
184852cd3b07SLee Schermerhorn /*
184952cd3b07SLee Schermerhorn  * Return a nodemask representing a mempolicy for filtering nodes for
185052cd3b07SLee Schermerhorn  * page allocation
185152cd3b07SLee Schermerhorn  */
18528ca39e68SMuchun Song nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
185319770b32SMel Gorman {
1854b27abaccSDave Hansen 	int mode = policy->mode;
1855b27abaccSDave Hansen 
185619770b32SMel Gorman 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
1857b27abaccSDave Hansen 	if (unlikely(mode == MPOL_BIND) &&
1858d3eb1570SLai Jiangshan 		apply_policy_zone(policy, gfp_zone(gfp)) &&
1859269fbe72SBen Widawsky 		cpuset_nodemask_valid_mems_allowed(&policy->nodes))
1860269fbe72SBen Widawsky 		return &policy->nodes;
186119770b32SMel Gorman 
1862b27abaccSDave Hansen 	if (mode == MPOL_PREFERRED_MANY)
1863b27abaccSDave Hansen 		return &policy->nodes;
1864b27abaccSDave Hansen 
186519770b32SMel Gorman 	return NULL;
186619770b32SMel Gorman }
186719770b32SMel Gorman 
1868b27abaccSDave Hansen /*
1869b27abaccSDave Hansen  * Return the  preferred node id for 'prefer' mempolicy, and return
1870b27abaccSDave Hansen  * the given id for all other policies.
1871b27abaccSDave Hansen  *
1872b27abaccSDave Hansen  * policy_node() is always coupled with policy_nodemask(), which
1873b27abaccSDave Hansen  * secures the nodemask limit for 'bind' and 'prefer-many' policy.
1874b27abaccSDave Hansen  */
1875f8fd5253SWei Yang static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
18761da177e4SLinus Torvalds {
18777858d7bcSFeng Tang 	if (policy->mode == MPOL_PREFERRED) {
1878269fbe72SBen Widawsky 		nd = first_node(policy->nodes);
18797858d7bcSFeng Tang 	} else {
188019770b32SMel Gorman 		/*
18816d840958SMichal Hocko 		 * __GFP_THISNODE shouldn't even be used with the bind policy
18826d840958SMichal Hocko 		 * because we might easily break the expectation to stay on the
18836d840958SMichal Hocko 		 * requested node and not break the policy.
188419770b32SMel Gorman 		 */
18856d840958SMichal Hocko 		WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
18861da177e4SLinus Torvalds 	}
18876d840958SMichal Hocko 
1888c6018b4bSAneesh Kumar K.V 	if ((policy->mode == MPOL_BIND ||
1889c6018b4bSAneesh Kumar K.V 	     policy->mode == MPOL_PREFERRED_MANY) &&
1890c6018b4bSAneesh Kumar K.V 	    policy->home_node != NUMA_NO_NODE)
1891c6018b4bSAneesh Kumar K.V 		return policy->home_node;
1892c6018b4bSAneesh Kumar K.V 
189304ec6264SVlastimil Babka 	return nd;
18941da177e4SLinus Torvalds }
18951da177e4SLinus Torvalds 
18961da177e4SLinus Torvalds /* Do dynamic interleaving for a process */
18971da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy)
18981da177e4SLinus Torvalds {
189945816682SVlastimil Babka 	unsigned next;
19001da177e4SLinus Torvalds 	struct task_struct *me = current;
19011da177e4SLinus Torvalds 
1902269fbe72SBen Widawsky 	next = next_node_in(me->il_prev, policy->nodes);
1903f5b087b5SDavid Rientjes 	if (next < MAX_NUMNODES)
190445816682SVlastimil Babka 		me->il_prev = next;
190545816682SVlastimil Babka 	return next;
19061da177e4SLinus Torvalds }
19071da177e4SLinus Torvalds 
1908dc85da15SChristoph Lameter /*
1909dc85da15SChristoph Lameter  * Depending on the memory policy provide a node from which to allocate the
1910dc85da15SChristoph Lameter  * next slab entry.
1911dc85da15SChristoph Lameter  */
19122a389610SDavid Rientjes unsigned int mempolicy_slab_node(void)
1913dc85da15SChristoph Lameter {
1914e7b691b0SAndi Kleen 	struct mempolicy *policy;
19152a389610SDavid Rientjes 	int node = numa_mem_id();
1916e7b691b0SAndi Kleen 
191738b031ddSVasily Averin 	if (!in_task())
19182a389610SDavid Rientjes 		return node;
1919e7b691b0SAndi Kleen 
1920e7b691b0SAndi Kleen 	policy = current->mempolicy;
19217858d7bcSFeng Tang 	if (!policy)
19222a389610SDavid Rientjes 		return node;
1923765c4507SChristoph Lameter 
1924bea904d5SLee Schermerhorn 	switch (policy->mode) {
1925bea904d5SLee Schermerhorn 	case MPOL_PREFERRED:
1926269fbe72SBen Widawsky 		return first_node(policy->nodes);
1927bea904d5SLee Schermerhorn 
1928dc85da15SChristoph Lameter 	case MPOL_INTERLEAVE:
1929dc85da15SChristoph Lameter 		return interleave_nodes(policy);
1930dc85da15SChristoph Lameter 
1931b27abaccSDave Hansen 	case MPOL_BIND:
1932b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
1933b27abaccSDave Hansen 	{
1934c33d6c06SMel Gorman 		struct zoneref *z;
1935c33d6c06SMel Gorman 
1936dc85da15SChristoph Lameter 		/*
1937dc85da15SChristoph Lameter 		 * Follow bind policy behavior and start allocation at the
1938dc85da15SChristoph Lameter 		 * first node.
1939dc85da15SChristoph Lameter 		 */
194019770b32SMel Gorman 		struct zonelist *zonelist;
194119770b32SMel Gorman 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1942c9634cf0SAneesh Kumar K.V 		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1943c33d6c06SMel Gorman 		z = first_zones_zonelist(zonelist, highest_zoneidx,
1944269fbe72SBen Widawsky 							&policy->nodes);
1945c1093b74SPavel Tatashin 		return z->zone ? zone_to_nid(z->zone) : node;
1946dd1a239fSMel Gorman 	}
19477858d7bcSFeng Tang 	case MPOL_LOCAL:
19487858d7bcSFeng Tang 		return node;
1949dc85da15SChristoph Lameter 
1950dc85da15SChristoph Lameter 	default:
1951bea904d5SLee Schermerhorn 		BUG();
1952dc85da15SChristoph Lameter 	}
1953dc85da15SChristoph Lameter }
1954dc85da15SChristoph Lameter 
1955fee83b3aSAndrew Morton /*
1956fee83b3aSAndrew Morton  * Do static interleaving for a VMA with known offset @n.  Returns the n'th
1957269fbe72SBen Widawsky  * node in pol->nodes (starting from n=0), wrapping around if n exceeds the
1958fee83b3aSAndrew Morton  * number of present nodes.
1959fee83b3aSAndrew Morton  */
196098c70baaSLaurent Dufour static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
19611da177e4SLinus Torvalds {
1962276aeee1Syanghui 	nodemask_t nodemask = pol->nodes;
1963276aeee1Syanghui 	unsigned int target, nnodes;
1964fee83b3aSAndrew Morton 	int i;
1965fee83b3aSAndrew Morton 	int nid;
1966276aeee1Syanghui 	/*
1967276aeee1Syanghui 	 * The barrier will stabilize the nodemask in a register or on
1968276aeee1Syanghui 	 * the stack so that it will stop changing under the code.
1969276aeee1Syanghui 	 *
1970276aeee1Syanghui 	 * Between first_node() and next_node(), pol->nodes could be changed
1971276aeee1Syanghui 	 * by other threads. So we put pol->nodes in a local stack.
1972276aeee1Syanghui 	 */
1973276aeee1Syanghui 	barrier();
19741da177e4SLinus Torvalds 
1975276aeee1Syanghui 	nnodes = nodes_weight(nodemask);
1976f5b087b5SDavid Rientjes 	if (!nnodes)
1977f5b087b5SDavid Rientjes 		return numa_node_id();
1978fee83b3aSAndrew Morton 	target = (unsigned int)n % nnodes;
1979276aeee1Syanghui 	nid = first_node(nodemask);
1980fee83b3aSAndrew Morton 	for (i = 0; i < target; i++)
1981276aeee1Syanghui 		nid = next_node(nid, nodemask);
19821da177e4SLinus Torvalds 	return nid;
19831da177e4SLinus Torvalds }
19841da177e4SLinus Torvalds 
19855da7ca86SChristoph Lameter /* Determine a node number for interleave */
19865da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol,
19875da7ca86SChristoph Lameter 		 struct vm_area_struct *vma, unsigned long addr, int shift)
19885da7ca86SChristoph Lameter {
19895da7ca86SChristoph Lameter 	if (vma) {
19905da7ca86SChristoph Lameter 		unsigned long off;
19915da7ca86SChristoph Lameter 
19923b98b087SNishanth Aravamudan 		/*
19933b98b087SNishanth Aravamudan 		 * for small pages, there is no difference between
19943b98b087SNishanth Aravamudan 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
19953b98b087SNishanth Aravamudan 		 * for huge pages, since vm_pgoff is in units of small
19963b98b087SNishanth Aravamudan 		 * pages, we need to shift off the always 0 bits to get
19973b98b087SNishanth Aravamudan 		 * a useful offset.
19983b98b087SNishanth Aravamudan 		 */
19993b98b087SNishanth Aravamudan 		BUG_ON(shift < PAGE_SHIFT);
20003b98b087SNishanth Aravamudan 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
20015da7ca86SChristoph Lameter 		off += (addr - vma->vm_start) >> shift;
200298c70baaSLaurent Dufour 		return offset_il_node(pol, off);
20035da7ca86SChristoph Lameter 	} else
20045da7ca86SChristoph Lameter 		return interleave_nodes(pol);
20055da7ca86SChristoph Lameter }
20065da7ca86SChristoph Lameter 
200700ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS
2008480eccf9SLee Schermerhorn /*
200904ec6264SVlastimil Babka  * huge_node(@vma, @addr, @gfp_flags, @mpol)
2010b46e14acSFabian Frederick  * @vma: virtual memory area whose policy is sought
2011b46e14acSFabian Frederick  * @addr: address in @vma for shared policy lookup and interleave policy
2012b46e14acSFabian Frederick  * @gfp_flags: for requested zone
2013b46e14acSFabian Frederick  * @mpol: pointer to mempolicy pointer for reference counted mempolicy
2014b27abaccSDave Hansen  * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy
2015480eccf9SLee Schermerhorn  *
201604ec6264SVlastimil Babka  * Returns a nid suitable for a huge page allocation and a pointer
201752cd3b07SLee Schermerhorn  * to the struct mempolicy for conditional unref after allocation.
2018b27abaccSDave Hansen  * If the effective policy is 'bind' or 'prefer-many', returns a pointer
2019b27abaccSDave Hansen  * to the mempolicy's @nodemask for filtering the zonelist.
2020c0ff7453SMiao Xie  *
2021d26914d1SMel Gorman  * Must be protected by read_mems_allowed_begin()
2022480eccf9SLee Schermerhorn  */
202304ec6264SVlastimil Babka int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
202404ec6264SVlastimil Babka 				struct mempolicy **mpol, nodemask_t **nodemask)
20255da7ca86SChristoph Lameter {
202604ec6264SVlastimil Babka 	int nid;
2027b27abaccSDave Hansen 	int mode;
20285da7ca86SChristoph Lameter 
2029dd6eecb9SOleg Nesterov 	*mpol = get_vma_policy(vma, addr);
2030b27abaccSDave Hansen 	*nodemask = NULL;
2031b27abaccSDave Hansen 	mode = (*mpol)->mode;
20325da7ca86SChristoph Lameter 
2033b27abaccSDave Hansen 	if (unlikely(mode == MPOL_INTERLEAVE)) {
203404ec6264SVlastimil Babka 		nid = interleave_nid(*mpol, vma, addr,
203504ec6264SVlastimil Babka 					huge_page_shift(hstate_vma(vma)));
203652cd3b07SLee Schermerhorn 	} else {
203704ec6264SVlastimil Babka 		nid = policy_node(gfp_flags, *mpol, numa_node_id());
2038b27abaccSDave Hansen 		if (mode == MPOL_BIND || mode == MPOL_PREFERRED_MANY)
2039269fbe72SBen Widawsky 			*nodemask = &(*mpol)->nodes;
2040480eccf9SLee Schermerhorn 	}
204104ec6264SVlastimil Babka 	return nid;
20425da7ca86SChristoph Lameter }
204306808b08SLee Schermerhorn 
204406808b08SLee Schermerhorn /*
204506808b08SLee Schermerhorn  * init_nodemask_of_mempolicy
204606808b08SLee Schermerhorn  *
204706808b08SLee Schermerhorn  * If the current task's mempolicy is "default" [NULL], return 'false'
204806808b08SLee Schermerhorn  * to indicate default policy.  Otherwise, extract the policy nodemask
204906808b08SLee Schermerhorn  * for 'bind' or 'interleave' policy into the argument nodemask, or
205006808b08SLee Schermerhorn  * initialize the argument nodemask to contain the single node for
205106808b08SLee Schermerhorn  * 'preferred' or 'local' policy and return 'true' to indicate presence
205206808b08SLee Schermerhorn  * of non-default mempolicy.
205306808b08SLee Schermerhorn  *
205406808b08SLee Schermerhorn  * We don't bother with reference counting the mempolicy [mpol_get/put]
205506808b08SLee Schermerhorn  * because the current task is examining it's own mempolicy and a task's
205606808b08SLee Schermerhorn  * mempolicy is only ever changed by the task itself.
205706808b08SLee Schermerhorn  *
205806808b08SLee Schermerhorn  * N.B., it is the caller's responsibility to free a returned nodemask.
205906808b08SLee Schermerhorn  */
206006808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask)
206106808b08SLee Schermerhorn {
206206808b08SLee Schermerhorn 	struct mempolicy *mempolicy;
206306808b08SLee Schermerhorn 
206406808b08SLee Schermerhorn 	if (!(mask && current->mempolicy))
206506808b08SLee Schermerhorn 		return false;
206606808b08SLee Schermerhorn 
2067c0ff7453SMiao Xie 	task_lock(current);
206806808b08SLee Schermerhorn 	mempolicy = current->mempolicy;
206906808b08SLee Schermerhorn 	switch (mempolicy->mode) {
207006808b08SLee Schermerhorn 	case MPOL_PREFERRED:
2071b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
207206808b08SLee Schermerhorn 	case MPOL_BIND:
207306808b08SLee Schermerhorn 	case MPOL_INTERLEAVE:
2074269fbe72SBen Widawsky 		*mask = mempolicy->nodes;
207506808b08SLee Schermerhorn 		break;
207606808b08SLee Schermerhorn 
20777858d7bcSFeng Tang 	case MPOL_LOCAL:
2078269fbe72SBen Widawsky 		init_nodemask_of_node(mask, numa_node_id());
20797858d7bcSFeng Tang 		break;
20807858d7bcSFeng Tang 
208106808b08SLee Schermerhorn 	default:
208206808b08SLee Schermerhorn 		BUG();
208306808b08SLee Schermerhorn 	}
2084c0ff7453SMiao Xie 	task_unlock(current);
208506808b08SLee Schermerhorn 
208606808b08SLee Schermerhorn 	return true;
208706808b08SLee Schermerhorn }
208800ac59adSChen, Kenneth W #endif
20895da7ca86SChristoph Lameter 
20906f48d0ebSDavid Rientjes /*
2091b26e517aSFeng Tang  * mempolicy_in_oom_domain
20926f48d0ebSDavid Rientjes  *
2093b26e517aSFeng Tang  * If tsk's mempolicy is "bind", check for intersection between mask and
2094b26e517aSFeng Tang  * the policy nodemask. Otherwise, return true for all other policies
2095b26e517aSFeng Tang  * including "interleave", as a tsk with "interleave" policy may have
2096b26e517aSFeng Tang  * memory allocated from all nodes in system.
20976f48d0ebSDavid Rientjes  *
20986f48d0ebSDavid Rientjes  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
20996f48d0ebSDavid Rientjes  */
2100b26e517aSFeng Tang bool mempolicy_in_oom_domain(struct task_struct *tsk,
21016f48d0ebSDavid Rientjes 					const nodemask_t *mask)
21026f48d0ebSDavid Rientjes {
21036f48d0ebSDavid Rientjes 	struct mempolicy *mempolicy;
21046f48d0ebSDavid Rientjes 	bool ret = true;
21056f48d0ebSDavid Rientjes 
21066f48d0ebSDavid Rientjes 	if (!mask)
21076f48d0ebSDavid Rientjes 		return ret;
2108b26e517aSFeng Tang 
21096f48d0ebSDavid Rientjes 	task_lock(tsk);
21106f48d0ebSDavid Rientjes 	mempolicy = tsk->mempolicy;
2111b26e517aSFeng Tang 	if (mempolicy && mempolicy->mode == MPOL_BIND)
2112269fbe72SBen Widawsky 		ret = nodes_intersects(mempolicy->nodes, *mask);
21136f48d0ebSDavid Rientjes 	task_unlock(tsk);
2114b26e517aSFeng Tang 
21156f48d0ebSDavid Rientjes 	return ret;
21166f48d0ebSDavid Rientjes }
21176f48d0ebSDavid Rientjes 
21181da177e4SLinus Torvalds /* Allocate a page in interleaved policy.
21191da177e4SLinus Torvalds    Own path because it needs to do special accounting. */
2120662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2121662f3a0bSAndi Kleen 					unsigned nid)
21221da177e4SLinus Torvalds {
21231da177e4SLinus Torvalds 	struct page *page;
21241da177e4SLinus Torvalds 
212584172f4bSMatthew Wilcox (Oracle) 	page = __alloc_pages(gfp, order, nid, NULL);
21264518085eSKemi Wang 	/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
21274518085eSKemi Wang 	if (!static_branch_likely(&vm_numa_stat_key))
21284518085eSKemi Wang 		return page;
2129de55c8b2SAndrey Ryabinin 	if (page && page_to_nid(page) == nid) {
2130de55c8b2SAndrey Ryabinin 		preempt_disable();
2131f19298b9SMel Gorman 		__count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT);
2132de55c8b2SAndrey Ryabinin 		preempt_enable();
2133de55c8b2SAndrey Ryabinin 	}
21341da177e4SLinus Torvalds 	return page;
21351da177e4SLinus Torvalds }
21361da177e4SLinus Torvalds 
21374c54d949SFeng Tang static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
21384c54d949SFeng Tang 						int nid, struct mempolicy *pol)
21394c54d949SFeng Tang {
21404c54d949SFeng Tang 	struct page *page;
21414c54d949SFeng Tang 	gfp_t preferred_gfp;
21424c54d949SFeng Tang 
21434c54d949SFeng Tang 	/*
21444c54d949SFeng Tang 	 * This is a two pass approach. The first pass will only try the
21454c54d949SFeng Tang 	 * preferred nodes but skip the direct reclaim and allow the
21464c54d949SFeng Tang 	 * allocation to fail, while the second pass will try all the
21474c54d949SFeng Tang 	 * nodes in system.
21484c54d949SFeng Tang 	 */
21494c54d949SFeng Tang 	preferred_gfp = gfp | __GFP_NOWARN;
21504c54d949SFeng Tang 	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
21514c54d949SFeng Tang 	page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes);
21524c54d949SFeng Tang 	if (!page)
2153c0455116SAneesh Kumar K.V 		page = __alloc_pages(gfp, order, nid, NULL);
21544c54d949SFeng Tang 
21554c54d949SFeng Tang 	return page;
21564c54d949SFeng Tang }
21574c54d949SFeng Tang 
21581da177e4SLinus Torvalds /**
2159adf88aa8SMatthew Wilcox (Oracle)  * vma_alloc_folio - Allocate a folio for a VMA.
2160eb350739SMatthew Wilcox (Oracle)  * @gfp: GFP flags.
2161adf88aa8SMatthew Wilcox (Oracle)  * @order: Order of the folio.
21621da177e4SLinus Torvalds  * @vma: Pointer to VMA or NULL if not available.
2163eb350739SMatthew Wilcox (Oracle)  * @addr: Virtual address of the allocation.  Must be inside @vma.
2164eb350739SMatthew Wilcox (Oracle)  * @hugepage: For hugepages try only the preferred node if possible.
21651da177e4SLinus Torvalds  *
2166adf88aa8SMatthew Wilcox (Oracle)  * Allocate a folio for a specific address in @vma, using the appropriate
2167eb350739SMatthew Wilcox (Oracle)  * NUMA policy.  When @vma is not NULL the caller must hold the mmap_lock
2168eb350739SMatthew Wilcox (Oracle)  * of the mm_struct of the VMA to prevent it from going away.  Should be
2169adf88aa8SMatthew Wilcox (Oracle)  * used for all allocations for folios that will be mapped into user space.
2170eb350739SMatthew Wilcox (Oracle)  *
2171adf88aa8SMatthew Wilcox (Oracle)  * Return: The folio on success or NULL if allocation fails.
21721da177e4SLinus Torvalds  */
2173adf88aa8SMatthew Wilcox (Oracle) struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
2174be1a13ebSMichal Hocko 		unsigned long addr, bool hugepage)
21751da177e4SLinus Torvalds {
2176cc9a6c87SMel Gorman 	struct mempolicy *pol;
2177be1a13ebSMichal Hocko 	int node = numa_node_id();
2178adf88aa8SMatthew Wilcox (Oracle) 	struct folio *folio;
217904ec6264SVlastimil Babka 	int preferred_nid;
2180be97a41bSVlastimil Babka 	nodemask_t *nmask;
21811da177e4SLinus Torvalds 
2182dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2183cc9a6c87SMel Gorman 
2184be97a41bSVlastimil Babka 	if (pol->mode == MPOL_INTERLEAVE) {
2185adf88aa8SMatthew Wilcox (Oracle) 		struct page *page;
21861da177e4SLinus Torvalds 		unsigned nid;
21875da7ca86SChristoph Lameter 
21888eac563cSAndi Kleen 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
218952cd3b07SLee Schermerhorn 		mpol_cond_put(pol);
2190adf88aa8SMatthew Wilcox (Oracle) 		gfp |= __GFP_COMP;
21910bbbc0b3SAndrea Arcangeli 		page = alloc_page_interleave(gfp, order, nid);
2192adf88aa8SMatthew Wilcox (Oracle) 		if (page && order > 1)
2193adf88aa8SMatthew Wilcox (Oracle) 			prep_transhuge_page(page);
2194adf88aa8SMatthew Wilcox (Oracle) 		folio = (struct folio *)page;
2195be97a41bSVlastimil Babka 		goto out;
21961da177e4SLinus Torvalds 	}
21971da177e4SLinus Torvalds 
21984c54d949SFeng Tang 	if (pol->mode == MPOL_PREFERRED_MANY) {
2199adf88aa8SMatthew Wilcox (Oracle) 		struct page *page;
2200adf88aa8SMatthew Wilcox (Oracle) 
2201c0455116SAneesh Kumar K.V 		node = policy_node(gfp, pol, node);
2202adf88aa8SMatthew Wilcox (Oracle) 		gfp |= __GFP_COMP;
22034c54d949SFeng Tang 		page = alloc_pages_preferred_many(gfp, order, node, pol);
22044c54d949SFeng Tang 		mpol_cond_put(pol);
2205adf88aa8SMatthew Wilcox (Oracle) 		if (page && order > 1)
2206adf88aa8SMatthew Wilcox (Oracle) 			prep_transhuge_page(page);
2207adf88aa8SMatthew Wilcox (Oracle) 		folio = (struct folio *)page;
22084c54d949SFeng Tang 		goto out;
22094c54d949SFeng Tang 	}
22104c54d949SFeng Tang 
221119deb769SDavid Rientjes 	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
221219deb769SDavid Rientjes 		int hpage_node = node;
221319deb769SDavid Rientjes 
221419deb769SDavid Rientjes 		/*
221519deb769SDavid Rientjes 		 * For hugepage allocation and non-interleave policy which
221619deb769SDavid Rientjes 		 * allows the current node (or other explicitly preferred
221719deb769SDavid Rientjes 		 * node) we only try to allocate from the current/preferred
221819deb769SDavid Rientjes 		 * node and don't fall back to other nodes, as the cost of
221919deb769SDavid Rientjes 		 * remote accesses would likely offset THP benefits.
222019deb769SDavid Rientjes 		 *
2221b27abaccSDave Hansen 		 * If the policy is interleave or does not allow the current
222219deb769SDavid Rientjes 		 * node in its nodemask, we allocate the standard way.
222319deb769SDavid Rientjes 		 */
22247858d7bcSFeng Tang 		if (pol->mode == MPOL_PREFERRED)
2225269fbe72SBen Widawsky 			hpage_node = first_node(pol->nodes);
222619deb769SDavid Rientjes 
222719deb769SDavid Rientjes 		nmask = policy_nodemask(gfp, pol);
222819deb769SDavid Rientjes 		if (!nmask || node_isset(hpage_node, *nmask)) {
222919deb769SDavid Rientjes 			mpol_cond_put(pol);
2230cc638f32SVlastimil Babka 			/*
2231cc638f32SVlastimil Babka 			 * First, try to allocate THP only on local node, but
2232cc638f32SVlastimil Babka 			 * don't reclaim unnecessarily, just compact.
2233cc638f32SVlastimil Babka 			 */
2234adf88aa8SMatthew Wilcox (Oracle) 			folio = __folio_alloc_node(gfp | __GFP_THISNODE |
2235adf88aa8SMatthew Wilcox (Oracle) 					__GFP_NORETRY, order, hpage_node);
223676e654ccSDavid Rientjes 
223776e654ccSDavid Rientjes 			/*
223876e654ccSDavid Rientjes 			 * If hugepage allocations are configured to always
223976e654ccSDavid Rientjes 			 * synchronous compact or the vma has been madvised
224076e654ccSDavid Rientjes 			 * to prefer hugepage backing, retry allowing remote
2241cc638f32SVlastimil Babka 			 * memory with both reclaim and compact as well.
224276e654ccSDavid Rientjes 			 */
2243adf88aa8SMatthew Wilcox (Oracle) 			if (!folio && (gfp & __GFP_DIRECT_RECLAIM))
2244adf88aa8SMatthew Wilcox (Oracle) 				folio = __folio_alloc(gfp, order, hpage_node,
2245adf88aa8SMatthew Wilcox (Oracle) 						      nmask);
224676e654ccSDavid Rientjes 
224719deb769SDavid Rientjes 			goto out;
224819deb769SDavid Rientjes 		}
224919deb769SDavid Rientjes 	}
225019deb769SDavid Rientjes 
2251077fcf11SAneesh Kumar K.V 	nmask = policy_nodemask(gfp, pol);
225204ec6264SVlastimil Babka 	preferred_nid = policy_node(gfp, pol, node);
2253adf88aa8SMatthew Wilcox (Oracle) 	folio = __folio_alloc(gfp, order, preferred_nid, nmask);
2254d51e9894SVlastimil Babka 	mpol_cond_put(pol);
2255be97a41bSVlastimil Babka out:
2256f584b680SMatthew Wilcox (Oracle) 	return folio;
2257f584b680SMatthew Wilcox (Oracle) }
2258adf88aa8SMatthew Wilcox (Oracle) EXPORT_SYMBOL(vma_alloc_folio);
2259f584b680SMatthew Wilcox (Oracle) 
22601da177e4SLinus Torvalds /**
2261d7f946d0SMatthew Wilcox (Oracle)  * alloc_pages - Allocate pages.
22626421ec76SMatthew Wilcox (Oracle)  * @gfp: GFP flags.
22636421ec76SMatthew Wilcox (Oracle)  * @order: Power of two of number of pages to allocate.
22641da177e4SLinus Torvalds  *
22656421ec76SMatthew Wilcox (Oracle)  * Allocate 1 << @order contiguous pages.  The physical address of the
22666421ec76SMatthew Wilcox (Oracle)  * first page is naturally aligned (eg an order-3 allocation will be aligned
22676421ec76SMatthew Wilcox (Oracle)  * to a multiple of 8 * PAGE_SIZE bytes).  The NUMA policy of the current
22686421ec76SMatthew Wilcox (Oracle)  * process is honoured when in process context.
22691da177e4SLinus Torvalds  *
22706421ec76SMatthew Wilcox (Oracle)  * Context: Can be called from any context, providing the appropriate GFP
22716421ec76SMatthew Wilcox (Oracle)  * flags are used.
22726421ec76SMatthew Wilcox (Oracle)  * Return: The page on success or NULL if allocation fails.
22731da177e4SLinus Torvalds  */
2274d7f946d0SMatthew Wilcox (Oracle) struct page *alloc_pages(gfp_t gfp, unsigned order)
22751da177e4SLinus Torvalds {
22768d90274bSOleg Nesterov 	struct mempolicy *pol = &default_policy;
2277c0ff7453SMiao Xie 	struct page *page;
22781da177e4SLinus Torvalds 
22798d90274bSOleg Nesterov 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
22808d90274bSOleg Nesterov 		pol = get_task_policy(current);
228152cd3b07SLee Schermerhorn 
228252cd3b07SLee Schermerhorn 	/*
228352cd3b07SLee Schermerhorn 	 * No reference counting needed for current->mempolicy
228452cd3b07SLee Schermerhorn 	 * nor system default_policy
228552cd3b07SLee Schermerhorn 	 */
228645c4745aSLee Schermerhorn 	if (pol->mode == MPOL_INTERLEAVE)
2287c0ff7453SMiao Xie 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
22884c54d949SFeng Tang 	else if (pol->mode == MPOL_PREFERRED_MANY)
22894c54d949SFeng Tang 		page = alloc_pages_preferred_many(gfp, order,
2290c0455116SAneesh Kumar K.V 				  policy_node(gfp, pol, numa_node_id()), pol);
2291c0ff7453SMiao Xie 	else
229284172f4bSMatthew Wilcox (Oracle) 		page = __alloc_pages(gfp, order,
229304ec6264SVlastimil Babka 				policy_node(gfp, pol, numa_node_id()),
22945c4b4be3SAndi Kleen 				policy_nodemask(gfp, pol));
2295cc9a6c87SMel Gorman 
2296c0ff7453SMiao Xie 	return page;
22971da177e4SLinus Torvalds }
2298d7f946d0SMatthew Wilcox (Oracle) EXPORT_SYMBOL(alloc_pages);
22991da177e4SLinus Torvalds 
2300cc09cb13SMatthew Wilcox (Oracle) struct folio *folio_alloc(gfp_t gfp, unsigned order)
2301cc09cb13SMatthew Wilcox (Oracle) {
2302cc09cb13SMatthew Wilcox (Oracle) 	struct page *page = alloc_pages(gfp | __GFP_COMP, order);
2303cc09cb13SMatthew Wilcox (Oracle) 
2304cc09cb13SMatthew Wilcox (Oracle) 	if (page && order > 1)
2305cc09cb13SMatthew Wilcox (Oracle) 		prep_transhuge_page(page);
2306cc09cb13SMatthew Wilcox (Oracle) 	return (struct folio *)page;
2307cc09cb13SMatthew Wilcox (Oracle) }
2308cc09cb13SMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_alloc);
2309cc09cb13SMatthew Wilcox (Oracle) 
2310c00b6b96SChen Wandun static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
2311c00b6b96SChen Wandun 		struct mempolicy *pol, unsigned long nr_pages,
2312c00b6b96SChen Wandun 		struct page **page_array)
2313c00b6b96SChen Wandun {
2314c00b6b96SChen Wandun 	int nodes;
2315c00b6b96SChen Wandun 	unsigned long nr_pages_per_node;
2316c00b6b96SChen Wandun 	int delta;
2317c00b6b96SChen Wandun 	int i;
2318c00b6b96SChen Wandun 	unsigned long nr_allocated;
2319c00b6b96SChen Wandun 	unsigned long total_allocated = 0;
2320c00b6b96SChen Wandun 
2321c00b6b96SChen Wandun 	nodes = nodes_weight(pol->nodes);
2322c00b6b96SChen Wandun 	nr_pages_per_node = nr_pages / nodes;
2323c00b6b96SChen Wandun 	delta = nr_pages - nodes * nr_pages_per_node;
2324c00b6b96SChen Wandun 
2325c00b6b96SChen Wandun 	for (i = 0; i < nodes; i++) {
2326c00b6b96SChen Wandun 		if (delta) {
2327c00b6b96SChen Wandun 			nr_allocated = __alloc_pages_bulk(gfp,
2328c00b6b96SChen Wandun 					interleave_nodes(pol), NULL,
2329c00b6b96SChen Wandun 					nr_pages_per_node + 1, NULL,
2330c00b6b96SChen Wandun 					page_array);
2331c00b6b96SChen Wandun 			delta--;
2332c00b6b96SChen Wandun 		} else {
2333c00b6b96SChen Wandun 			nr_allocated = __alloc_pages_bulk(gfp,
2334c00b6b96SChen Wandun 					interleave_nodes(pol), NULL,
2335c00b6b96SChen Wandun 					nr_pages_per_node, NULL, page_array);
2336c00b6b96SChen Wandun 		}
2337c00b6b96SChen Wandun 
2338c00b6b96SChen Wandun 		page_array += nr_allocated;
2339c00b6b96SChen Wandun 		total_allocated += nr_allocated;
2340c00b6b96SChen Wandun 	}
2341c00b6b96SChen Wandun 
2342c00b6b96SChen Wandun 	return total_allocated;
2343c00b6b96SChen Wandun }
2344c00b6b96SChen Wandun 
2345c00b6b96SChen Wandun static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
2346c00b6b96SChen Wandun 		struct mempolicy *pol, unsigned long nr_pages,
2347c00b6b96SChen Wandun 		struct page **page_array)
2348c00b6b96SChen Wandun {
2349c00b6b96SChen Wandun 	gfp_t preferred_gfp;
2350c00b6b96SChen Wandun 	unsigned long nr_allocated = 0;
2351c00b6b96SChen Wandun 
2352c00b6b96SChen Wandun 	preferred_gfp = gfp | __GFP_NOWARN;
2353c00b6b96SChen Wandun 	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2354c00b6b96SChen Wandun 
2355c00b6b96SChen Wandun 	nr_allocated  = __alloc_pages_bulk(preferred_gfp, nid, &pol->nodes,
2356c00b6b96SChen Wandun 					   nr_pages, NULL, page_array);
2357c00b6b96SChen Wandun 
2358c00b6b96SChen Wandun 	if (nr_allocated < nr_pages)
2359c00b6b96SChen Wandun 		nr_allocated += __alloc_pages_bulk(gfp, numa_node_id(), NULL,
2360c00b6b96SChen Wandun 				nr_pages - nr_allocated, NULL,
2361c00b6b96SChen Wandun 				page_array + nr_allocated);
2362c00b6b96SChen Wandun 	return nr_allocated;
2363c00b6b96SChen Wandun }
2364c00b6b96SChen Wandun 
2365c00b6b96SChen Wandun /* alloc pages bulk and mempolicy should be considered at the
2366c00b6b96SChen Wandun  * same time in some situation such as vmalloc.
2367c00b6b96SChen Wandun  *
2368c00b6b96SChen Wandun  * It can accelerate memory allocation especially interleaving
2369c00b6b96SChen Wandun  * allocate memory.
2370c00b6b96SChen Wandun  */
2371c00b6b96SChen Wandun unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
2372c00b6b96SChen Wandun 		unsigned long nr_pages, struct page **page_array)
2373c00b6b96SChen Wandun {
2374c00b6b96SChen Wandun 	struct mempolicy *pol = &default_policy;
2375c00b6b96SChen Wandun 
2376c00b6b96SChen Wandun 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2377c00b6b96SChen Wandun 		pol = get_task_policy(current);
2378c00b6b96SChen Wandun 
2379c00b6b96SChen Wandun 	if (pol->mode == MPOL_INTERLEAVE)
2380c00b6b96SChen Wandun 		return alloc_pages_bulk_array_interleave(gfp, pol,
2381c00b6b96SChen Wandun 							 nr_pages, page_array);
2382c00b6b96SChen Wandun 
2383c00b6b96SChen Wandun 	if (pol->mode == MPOL_PREFERRED_MANY)
2384c00b6b96SChen Wandun 		return alloc_pages_bulk_array_preferred_many(gfp,
2385c00b6b96SChen Wandun 				numa_node_id(), pol, nr_pages, page_array);
2386c00b6b96SChen Wandun 
2387c00b6b96SChen Wandun 	return __alloc_pages_bulk(gfp, policy_node(gfp, pol, numa_node_id()),
2388c00b6b96SChen Wandun 				  policy_nodemask(gfp, pol), nr_pages, NULL,
2389c00b6b96SChen Wandun 				  page_array);
2390c00b6b96SChen Wandun }
2391c00b6b96SChen Wandun 
2392ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2393ef0855d3SOleg Nesterov {
2394ef0855d3SOleg Nesterov 	struct mempolicy *pol = mpol_dup(vma_policy(src));
2395ef0855d3SOleg Nesterov 
2396ef0855d3SOleg Nesterov 	if (IS_ERR(pol))
2397ef0855d3SOleg Nesterov 		return PTR_ERR(pol);
2398ef0855d3SOleg Nesterov 	dst->vm_policy = pol;
2399ef0855d3SOleg Nesterov 	return 0;
2400ef0855d3SOleg Nesterov }
2401ef0855d3SOleg Nesterov 
24024225399aSPaul Jackson /*
2403846a16bfSLee Schermerhorn  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
24044225399aSPaul Jackson  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
24054225399aSPaul Jackson  * with the mems_allowed returned by cpuset_mems_allowed().  This
24064225399aSPaul Jackson  * keeps mempolicies cpuset relative after its cpuset moves.  See
24074225399aSPaul Jackson  * further kernel/cpuset.c update_nodemask().
2408708c1bbcSMiao Xie  *
2409708c1bbcSMiao Xie  * current's mempolicy may be rebinded by the other task(the task that changes
2410708c1bbcSMiao Xie  * cpuset's mems), so we needn't do rebind work for current task.
24114225399aSPaul Jackson  */
24124225399aSPaul Jackson 
2413846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */
2414846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old)
24151da177e4SLinus Torvalds {
24161da177e4SLinus Torvalds 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
24171da177e4SLinus Torvalds 
24181da177e4SLinus Torvalds 	if (!new)
24191da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2420708c1bbcSMiao Xie 
2421708c1bbcSMiao Xie 	/* task's mempolicy is protected by alloc_lock */
2422708c1bbcSMiao Xie 	if (old == current->mempolicy) {
2423708c1bbcSMiao Xie 		task_lock(current);
2424708c1bbcSMiao Xie 		*new = *old;
2425708c1bbcSMiao Xie 		task_unlock(current);
2426708c1bbcSMiao Xie 	} else
2427708c1bbcSMiao Xie 		*new = *old;
2428708c1bbcSMiao Xie 
24294225399aSPaul Jackson 	if (current_cpuset_is_being_rebound()) {
24304225399aSPaul Jackson 		nodemask_t mems = cpuset_mems_allowed(current);
2431213980c0SVlastimil Babka 		mpol_rebind_policy(new, &mems);
24324225399aSPaul Jackson 	}
24331da177e4SLinus Torvalds 	atomic_set(&new->refcnt, 1);
24341da177e4SLinus Torvalds 	return new;
24351da177e4SLinus Torvalds }
24361da177e4SLinus Torvalds 
24371da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */
2438fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
24391da177e4SLinus Torvalds {
24401da177e4SLinus Torvalds 	if (!a || !b)
2441fcfb4dccSKOSAKI Motohiro 		return false;
244245c4745aSLee Schermerhorn 	if (a->mode != b->mode)
2443fcfb4dccSKOSAKI Motohiro 		return false;
244419800502SBob Liu 	if (a->flags != b->flags)
2445fcfb4dccSKOSAKI Motohiro 		return false;
2446c6018b4bSAneesh Kumar K.V 	if (a->home_node != b->home_node)
2447c6018b4bSAneesh Kumar K.V 		return false;
244819800502SBob Liu 	if (mpol_store_user_nodemask(a))
244919800502SBob Liu 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2450fcfb4dccSKOSAKI Motohiro 			return false;
245119800502SBob Liu 
245245c4745aSLee Schermerhorn 	switch (a->mode) {
245319770b32SMel Gorman 	case MPOL_BIND:
24541da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
24551da177e4SLinus Torvalds 	case MPOL_PREFERRED:
2456b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
2457269fbe72SBen Widawsky 		return !!nodes_equal(a->nodes, b->nodes);
24587858d7bcSFeng Tang 	case MPOL_LOCAL:
24597858d7bcSFeng Tang 		return true;
24601da177e4SLinus Torvalds 	default:
24611da177e4SLinus Torvalds 		BUG();
2462fcfb4dccSKOSAKI Motohiro 		return false;
24631da177e4SLinus Torvalds 	}
24641da177e4SLinus Torvalds }
24651da177e4SLinus Torvalds 
24661da177e4SLinus Torvalds /*
24671da177e4SLinus Torvalds  * Shared memory backing store policy support.
24681da177e4SLinus Torvalds  *
24691da177e4SLinus Torvalds  * Remember policies even when nobody has shared memory mapped.
24701da177e4SLinus Torvalds  * The policies are kept in Red-Black tree linked from the inode.
24714a8c7bb5SNathan Zimmer  * They are protected by the sp->lock rwlock, which should be held
24721da177e4SLinus Torvalds  * for any accesses to the tree.
24731da177e4SLinus Torvalds  */
24741da177e4SLinus Torvalds 
24754a8c7bb5SNathan Zimmer /*
24764a8c7bb5SNathan Zimmer  * lookup first element intersecting start-end.  Caller holds sp->lock for
24774a8c7bb5SNathan Zimmer  * reading or for writing
24784a8c7bb5SNathan Zimmer  */
24791da177e4SLinus Torvalds static struct sp_node *
24801da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
24811da177e4SLinus Torvalds {
24821da177e4SLinus Torvalds 	struct rb_node *n = sp->root.rb_node;
24831da177e4SLinus Torvalds 
24841da177e4SLinus Torvalds 	while (n) {
24851da177e4SLinus Torvalds 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
24861da177e4SLinus Torvalds 
24871da177e4SLinus Torvalds 		if (start >= p->end)
24881da177e4SLinus Torvalds 			n = n->rb_right;
24891da177e4SLinus Torvalds 		else if (end <= p->start)
24901da177e4SLinus Torvalds 			n = n->rb_left;
24911da177e4SLinus Torvalds 		else
24921da177e4SLinus Torvalds 			break;
24931da177e4SLinus Torvalds 	}
24941da177e4SLinus Torvalds 	if (!n)
24951da177e4SLinus Torvalds 		return NULL;
24961da177e4SLinus Torvalds 	for (;;) {
24971da177e4SLinus Torvalds 		struct sp_node *w = NULL;
24981da177e4SLinus Torvalds 		struct rb_node *prev = rb_prev(n);
24991da177e4SLinus Torvalds 		if (!prev)
25001da177e4SLinus Torvalds 			break;
25011da177e4SLinus Torvalds 		w = rb_entry(prev, struct sp_node, nd);
25021da177e4SLinus Torvalds 		if (w->end <= start)
25031da177e4SLinus Torvalds 			break;
25041da177e4SLinus Torvalds 		n = prev;
25051da177e4SLinus Torvalds 	}
25061da177e4SLinus Torvalds 	return rb_entry(n, struct sp_node, nd);
25071da177e4SLinus Torvalds }
25081da177e4SLinus Torvalds 
25094a8c7bb5SNathan Zimmer /*
25104a8c7bb5SNathan Zimmer  * Insert a new shared policy into the list.  Caller holds sp->lock for
25114a8c7bb5SNathan Zimmer  * writing.
25124a8c7bb5SNathan Zimmer  */
25131da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new)
25141da177e4SLinus Torvalds {
25151da177e4SLinus Torvalds 	struct rb_node **p = &sp->root.rb_node;
25161da177e4SLinus Torvalds 	struct rb_node *parent = NULL;
25171da177e4SLinus Torvalds 	struct sp_node *nd;
25181da177e4SLinus Torvalds 
25191da177e4SLinus Torvalds 	while (*p) {
25201da177e4SLinus Torvalds 		parent = *p;
25211da177e4SLinus Torvalds 		nd = rb_entry(parent, struct sp_node, nd);
25221da177e4SLinus Torvalds 		if (new->start < nd->start)
25231da177e4SLinus Torvalds 			p = &(*p)->rb_left;
25241da177e4SLinus Torvalds 		else if (new->end > nd->end)
25251da177e4SLinus Torvalds 			p = &(*p)->rb_right;
25261da177e4SLinus Torvalds 		else
25271da177e4SLinus Torvalds 			BUG();
25281da177e4SLinus Torvalds 	}
25291da177e4SLinus Torvalds 	rb_link_node(&new->nd, parent, p);
25301da177e4SLinus Torvalds 	rb_insert_color(&new->nd, &sp->root);
2531140d5a49SPaul Mundt 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
253245c4745aSLee Schermerhorn 		 new->policy ? new->policy->mode : 0);
25331da177e4SLinus Torvalds }
25341da177e4SLinus Torvalds 
25351da177e4SLinus Torvalds /* Find shared policy intersecting idx */
25361da177e4SLinus Torvalds struct mempolicy *
25371da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
25381da177e4SLinus Torvalds {
25391da177e4SLinus Torvalds 	struct mempolicy *pol = NULL;
25401da177e4SLinus Torvalds 	struct sp_node *sn;
25411da177e4SLinus Torvalds 
25421da177e4SLinus Torvalds 	if (!sp->root.rb_node)
25431da177e4SLinus Torvalds 		return NULL;
25444a8c7bb5SNathan Zimmer 	read_lock(&sp->lock);
25451da177e4SLinus Torvalds 	sn = sp_lookup(sp, idx, idx+1);
25461da177e4SLinus Torvalds 	if (sn) {
25471da177e4SLinus Torvalds 		mpol_get(sn->policy);
25481da177e4SLinus Torvalds 		pol = sn->policy;
25491da177e4SLinus Torvalds 	}
25504a8c7bb5SNathan Zimmer 	read_unlock(&sp->lock);
25511da177e4SLinus Torvalds 	return pol;
25521da177e4SLinus Torvalds }
25531da177e4SLinus Torvalds 
255463f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n)
255563f74ca2SKOSAKI Motohiro {
255663f74ca2SKOSAKI Motohiro 	mpol_put(n->policy);
255763f74ca2SKOSAKI Motohiro 	kmem_cache_free(sn_cache, n);
255863f74ca2SKOSAKI Motohiro }
255963f74ca2SKOSAKI Motohiro 
2560771fb4d8SLee Schermerhorn /**
2561771fb4d8SLee Schermerhorn  * mpol_misplaced - check whether current page node is valid in policy
2562771fb4d8SLee Schermerhorn  *
2563b46e14acSFabian Frederick  * @page: page to be checked
2564b46e14acSFabian Frederick  * @vma: vm area where page mapped
2565b46e14acSFabian Frederick  * @addr: virtual address where page mapped
2566771fb4d8SLee Schermerhorn  *
2567771fb4d8SLee Schermerhorn  * Lookup current policy node id for vma,addr and "compare to" page's
25685f076944SMatthew Wilcox (Oracle)  * node id.  Policy determination "mimics" alloc_page_vma().
2569771fb4d8SLee Schermerhorn  * Called from fault path where we know the vma and faulting address.
25705f076944SMatthew Wilcox (Oracle)  *
2571062db293SBaolin Wang  * Return: NUMA_NO_NODE if the page is in a node that is valid for this
2572062db293SBaolin Wang  * policy, or a suitable node ID to allocate a replacement page from.
2573771fb4d8SLee Schermerhorn  */
2574771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2575771fb4d8SLee Schermerhorn {
2576771fb4d8SLee Schermerhorn 	struct mempolicy *pol;
2577c33d6c06SMel Gorman 	struct zoneref *z;
2578771fb4d8SLee Schermerhorn 	int curnid = page_to_nid(page);
2579771fb4d8SLee Schermerhorn 	unsigned long pgoff;
258090572890SPeter Zijlstra 	int thiscpu = raw_smp_processor_id();
258190572890SPeter Zijlstra 	int thisnid = cpu_to_node(thiscpu);
258298fa15f3SAnshuman Khandual 	int polnid = NUMA_NO_NODE;
2583062db293SBaolin Wang 	int ret = NUMA_NO_NODE;
2584771fb4d8SLee Schermerhorn 
2585dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2586771fb4d8SLee Schermerhorn 	if (!(pol->flags & MPOL_F_MOF))
2587771fb4d8SLee Schermerhorn 		goto out;
2588771fb4d8SLee Schermerhorn 
2589771fb4d8SLee Schermerhorn 	switch (pol->mode) {
2590771fb4d8SLee Schermerhorn 	case MPOL_INTERLEAVE:
2591771fb4d8SLee Schermerhorn 		pgoff = vma->vm_pgoff;
2592771fb4d8SLee Schermerhorn 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
259398c70baaSLaurent Dufour 		polnid = offset_il_node(pol, pgoff);
2594771fb4d8SLee Schermerhorn 		break;
2595771fb4d8SLee Schermerhorn 
2596771fb4d8SLee Schermerhorn 	case MPOL_PREFERRED:
2597b27abaccSDave Hansen 		if (node_isset(curnid, pol->nodes))
2598b27abaccSDave Hansen 			goto out;
2599269fbe72SBen Widawsky 		polnid = first_node(pol->nodes);
2600771fb4d8SLee Schermerhorn 		break;
2601771fb4d8SLee Schermerhorn 
26027858d7bcSFeng Tang 	case MPOL_LOCAL:
26037858d7bcSFeng Tang 		polnid = numa_node_id();
26047858d7bcSFeng Tang 		break;
26057858d7bcSFeng Tang 
2606771fb4d8SLee Schermerhorn 	case MPOL_BIND:
2607bda420b9SHuang Ying 		/* Optimize placement among multiple nodes via NUMA balancing */
2608bda420b9SHuang Ying 		if (pol->flags & MPOL_F_MORON) {
2609269fbe72SBen Widawsky 			if (node_isset(thisnid, pol->nodes))
2610bda420b9SHuang Ying 				break;
2611bda420b9SHuang Ying 			goto out;
2612bda420b9SHuang Ying 		}
2613b27abaccSDave Hansen 		fallthrough;
2614c33d6c06SMel Gorman 
2615b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
2616771fb4d8SLee Schermerhorn 		/*
2617771fb4d8SLee Schermerhorn 		 * use current page if in policy nodemask,
2618771fb4d8SLee Schermerhorn 		 * else select nearest allowed node, if any.
2619771fb4d8SLee Schermerhorn 		 * If no allowed nodes, use current [!misplaced].
2620771fb4d8SLee Schermerhorn 		 */
2621269fbe72SBen Widawsky 		if (node_isset(curnid, pol->nodes))
2622771fb4d8SLee Schermerhorn 			goto out;
2623c33d6c06SMel Gorman 		z = first_zones_zonelist(
2624771fb4d8SLee Schermerhorn 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2625771fb4d8SLee Schermerhorn 				gfp_zone(GFP_HIGHUSER),
2626269fbe72SBen Widawsky 				&pol->nodes);
2627c1093b74SPavel Tatashin 		polnid = zone_to_nid(z->zone);
2628771fb4d8SLee Schermerhorn 		break;
2629771fb4d8SLee Schermerhorn 
2630771fb4d8SLee Schermerhorn 	default:
2631771fb4d8SLee Schermerhorn 		BUG();
2632771fb4d8SLee Schermerhorn 	}
26335606e387SMel Gorman 
26345606e387SMel Gorman 	/* Migrate the page towards the node whose CPU is referencing it */
2635e42c8ff2SMel Gorman 	if (pol->flags & MPOL_F_MORON) {
263690572890SPeter Zijlstra 		polnid = thisnid;
26375606e387SMel Gorman 
263810f39042SRik van Riel 		if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2639de1c9ce6SRik van Riel 			goto out;
2640de1c9ce6SRik van Riel 	}
2641e42c8ff2SMel Gorman 
2642771fb4d8SLee Schermerhorn 	if (curnid != polnid)
2643771fb4d8SLee Schermerhorn 		ret = polnid;
2644771fb4d8SLee Schermerhorn out:
2645771fb4d8SLee Schermerhorn 	mpol_cond_put(pol);
2646771fb4d8SLee Schermerhorn 
2647771fb4d8SLee Schermerhorn 	return ret;
2648771fb4d8SLee Schermerhorn }
2649771fb4d8SLee Schermerhorn 
2650c11600e4SDavid Rientjes /*
2651c11600e4SDavid Rientjes  * Drop the (possibly final) reference to task->mempolicy.  It needs to be
2652c11600e4SDavid Rientjes  * dropped after task->mempolicy is set to NULL so that any allocation done as
2653c11600e4SDavid Rientjes  * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2654c11600e4SDavid Rientjes  * policy.
2655c11600e4SDavid Rientjes  */
2656c11600e4SDavid Rientjes void mpol_put_task_policy(struct task_struct *task)
2657c11600e4SDavid Rientjes {
2658c11600e4SDavid Rientjes 	struct mempolicy *pol;
2659c11600e4SDavid Rientjes 
2660c11600e4SDavid Rientjes 	task_lock(task);
2661c11600e4SDavid Rientjes 	pol = task->mempolicy;
2662c11600e4SDavid Rientjes 	task->mempolicy = NULL;
2663c11600e4SDavid Rientjes 	task_unlock(task);
2664c11600e4SDavid Rientjes 	mpol_put(pol);
2665c11600e4SDavid Rientjes }
2666c11600e4SDavid Rientjes 
26671da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n)
26681da177e4SLinus Torvalds {
2669140d5a49SPaul Mundt 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
26701da177e4SLinus Torvalds 	rb_erase(&n->nd, &sp->root);
267163f74ca2SKOSAKI Motohiro 	sp_free(n);
26721da177e4SLinus Torvalds }
26731da177e4SLinus Torvalds 
267442288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start,
267542288fe3SMel Gorman 			unsigned long end, struct mempolicy *pol)
267642288fe3SMel Gorman {
267742288fe3SMel Gorman 	node->start = start;
267842288fe3SMel Gorman 	node->end = end;
267942288fe3SMel Gorman 	node->policy = pol;
268042288fe3SMel Gorman }
268142288fe3SMel Gorman 
2682dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2683dbcb0f19SAdrian Bunk 				struct mempolicy *pol)
26841da177e4SLinus Torvalds {
2685869833f2SKOSAKI Motohiro 	struct sp_node *n;
2686869833f2SKOSAKI Motohiro 	struct mempolicy *newpol;
26871da177e4SLinus Torvalds 
2688869833f2SKOSAKI Motohiro 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
26891da177e4SLinus Torvalds 	if (!n)
26901da177e4SLinus Torvalds 		return NULL;
2691869833f2SKOSAKI Motohiro 
2692869833f2SKOSAKI Motohiro 	newpol = mpol_dup(pol);
2693869833f2SKOSAKI Motohiro 	if (IS_ERR(newpol)) {
2694869833f2SKOSAKI Motohiro 		kmem_cache_free(sn_cache, n);
2695869833f2SKOSAKI Motohiro 		return NULL;
2696869833f2SKOSAKI Motohiro 	}
2697869833f2SKOSAKI Motohiro 	newpol->flags |= MPOL_F_SHARED;
269842288fe3SMel Gorman 	sp_node_init(n, start, end, newpol);
2699869833f2SKOSAKI Motohiro 
27001da177e4SLinus Torvalds 	return n;
27011da177e4SLinus Torvalds }
27021da177e4SLinus Torvalds 
27031da177e4SLinus Torvalds /* Replace a policy range. */
27041da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
27051da177e4SLinus Torvalds 				 unsigned long end, struct sp_node *new)
27061da177e4SLinus Torvalds {
2707b22d127aSMel Gorman 	struct sp_node *n;
270842288fe3SMel Gorman 	struct sp_node *n_new = NULL;
270942288fe3SMel Gorman 	struct mempolicy *mpol_new = NULL;
2710b22d127aSMel Gorman 	int ret = 0;
27111da177e4SLinus Torvalds 
271242288fe3SMel Gorman restart:
27134a8c7bb5SNathan Zimmer 	write_lock(&sp->lock);
27141da177e4SLinus Torvalds 	n = sp_lookup(sp, start, end);
27151da177e4SLinus Torvalds 	/* Take care of old policies in the same range. */
27161da177e4SLinus Torvalds 	while (n && n->start < end) {
27171da177e4SLinus Torvalds 		struct rb_node *next = rb_next(&n->nd);
27181da177e4SLinus Torvalds 		if (n->start >= start) {
27191da177e4SLinus Torvalds 			if (n->end <= end)
27201da177e4SLinus Torvalds 				sp_delete(sp, n);
27211da177e4SLinus Torvalds 			else
27221da177e4SLinus Torvalds 				n->start = end;
27231da177e4SLinus Torvalds 		} else {
27241da177e4SLinus Torvalds 			/* Old policy spanning whole new range. */
27251da177e4SLinus Torvalds 			if (n->end > end) {
272642288fe3SMel Gorman 				if (!n_new)
272742288fe3SMel Gorman 					goto alloc_new;
272842288fe3SMel Gorman 
272942288fe3SMel Gorman 				*mpol_new = *n->policy;
273042288fe3SMel Gorman 				atomic_set(&mpol_new->refcnt, 1);
27317880639cSKOSAKI Motohiro 				sp_node_init(n_new, end, n->end, mpol_new);
27321da177e4SLinus Torvalds 				n->end = start;
27335ca39575SHillf Danton 				sp_insert(sp, n_new);
273442288fe3SMel Gorman 				n_new = NULL;
273542288fe3SMel Gorman 				mpol_new = NULL;
27361da177e4SLinus Torvalds 				break;
27371da177e4SLinus Torvalds 			} else
27381da177e4SLinus Torvalds 				n->end = start;
27391da177e4SLinus Torvalds 		}
27401da177e4SLinus Torvalds 		if (!next)
27411da177e4SLinus Torvalds 			break;
27421da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
27431da177e4SLinus Torvalds 	}
27441da177e4SLinus Torvalds 	if (new)
27451da177e4SLinus Torvalds 		sp_insert(sp, new);
27464a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
274742288fe3SMel Gorman 	ret = 0;
274842288fe3SMel Gorman 
274942288fe3SMel Gorman err_out:
275042288fe3SMel Gorman 	if (mpol_new)
275142288fe3SMel Gorman 		mpol_put(mpol_new);
275242288fe3SMel Gorman 	if (n_new)
275342288fe3SMel Gorman 		kmem_cache_free(sn_cache, n_new);
275442288fe3SMel Gorman 
2755b22d127aSMel Gorman 	return ret;
275642288fe3SMel Gorman 
275742288fe3SMel Gorman alloc_new:
27584a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
275942288fe3SMel Gorman 	ret = -ENOMEM;
276042288fe3SMel Gorman 	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
276142288fe3SMel Gorman 	if (!n_new)
276242288fe3SMel Gorman 		goto err_out;
276342288fe3SMel Gorman 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
276442288fe3SMel Gorman 	if (!mpol_new)
276542288fe3SMel Gorman 		goto err_out;
27664ad09955SMiaohe Lin 	atomic_set(&mpol_new->refcnt, 1);
276742288fe3SMel Gorman 	goto restart;
27681da177e4SLinus Torvalds }
27691da177e4SLinus Torvalds 
277071fe804bSLee Schermerhorn /**
277171fe804bSLee Schermerhorn  * mpol_shared_policy_init - initialize shared policy for inode
277271fe804bSLee Schermerhorn  * @sp: pointer to inode shared policy
277371fe804bSLee Schermerhorn  * @mpol:  struct mempolicy to install
277471fe804bSLee Schermerhorn  *
277571fe804bSLee Schermerhorn  * Install non-NULL @mpol in inode's shared policy rb-tree.
277671fe804bSLee Schermerhorn  * On entry, the current task has a reference on a non-NULL @mpol.
277771fe804bSLee Schermerhorn  * This must be released on exit.
27784bfc4495SKAMEZAWA Hiroyuki  * This is called at get_inode() calls and we can use GFP_KERNEL.
277971fe804bSLee Schermerhorn  */
278071fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
27817339ff83SRobin Holt {
278258568d2aSMiao Xie 	int ret;
278358568d2aSMiao Xie 
278471fe804bSLee Schermerhorn 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
27854a8c7bb5SNathan Zimmer 	rwlock_init(&sp->lock);
27867339ff83SRobin Holt 
278771fe804bSLee Schermerhorn 	if (mpol) {
27887339ff83SRobin Holt 		struct vm_area_struct pvma;
278971fe804bSLee Schermerhorn 		struct mempolicy *new;
27904bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
27917339ff83SRobin Holt 
27924bfc4495SKAMEZAWA Hiroyuki 		if (!scratch)
27935c0c1654SLee Schermerhorn 			goto put_mpol;
279471fe804bSLee Schermerhorn 		/* contextualize the tmpfs mount point mempolicy */
279571fe804bSLee Schermerhorn 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
279615d77835SLee Schermerhorn 		if (IS_ERR(new))
27970cae3457SDan Carpenter 			goto free_scratch; /* no valid nodemask intersection */
279858568d2aSMiao Xie 
279958568d2aSMiao Xie 		task_lock(current);
28004bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
280158568d2aSMiao Xie 		task_unlock(current);
280215d77835SLee Schermerhorn 		if (ret)
28035c0c1654SLee Schermerhorn 			goto put_new;
280471fe804bSLee Schermerhorn 
280571fe804bSLee Schermerhorn 		/* Create pseudo-vma that contains just the policy */
28062c4541e2SKirill A. Shutemov 		vma_init(&pvma, NULL);
280771fe804bSLee Schermerhorn 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
280871fe804bSLee Schermerhorn 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
280915d77835SLee Schermerhorn 
28105c0c1654SLee Schermerhorn put_new:
281171fe804bSLee Schermerhorn 		mpol_put(new);			/* drop initial ref */
28120cae3457SDan Carpenter free_scratch:
28134bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
28145c0c1654SLee Schermerhorn put_mpol:
28155c0c1654SLee Schermerhorn 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
28167339ff83SRobin Holt 	}
28177339ff83SRobin Holt }
28187339ff83SRobin Holt 
28191da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info,
28201da177e4SLinus Torvalds 			struct vm_area_struct *vma, struct mempolicy *npol)
28211da177e4SLinus Torvalds {
28221da177e4SLinus Torvalds 	int err;
28231da177e4SLinus Torvalds 	struct sp_node *new = NULL;
28241da177e4SLinus Torvalds 	unsigned long sz = vma_pages(vma);
28251da177e4SLinus Torvalds 
2826028fec41SDavid Rientjes 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
28271da177e4SLinus Torvalds 		 vma->vm_pgoff,
282845c4745aSLee Schermerhorn 		 sz, npol ? npol->mode : -1,
2829028fec41SDavid Rientjes 		 npol ? npol->flags : -1,
2830269fbe72SBen Widawsky 		 npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE);
28311da177e4SLinus Torvalds 
28321da177e4SLinus Torvalds 	if (npol) {
28331da177e4SLinus Torvalds 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
28341da177e4SLinus Torvalds 		if (!new)
28351da177e4SLinus Torvalds 			return -ENOMEM;
28361da177e4SLinus Torvalds 	}
28371da177e4SLinus Torvalds 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
28381da177e4SLinus Torvalds 	if (err && new)
283963f74ca2SKOSAKI Motohiro 		sp_free(new);
28401da177e4SLinus Torvalds 	return err;
28411da177e4SLinus Torvalds }
28421da177e4SLinus Torvalds 
28431da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */
28441da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p)
28451da177e4SLinus Torvalds {
28461da177e4SLinus Torvalds 	struct sp_node *n;
28471da177e4SLinus Torvalds 	struct rb_node *next;
28481da177e4SLinus Torvalds 
28491da177e4SLinus Torvalds 	if (!p->root.rb_node)
28501da177e4SLinus Torvalds 		return;
28514a8c7bb5SNathan Zimmer 	write_lock(&p->lock);
28521da177e4SLinus Torvalds 	next = rb_first(&p->root);
28531da177e4SLinus Torvalds 	while (next) {
28541da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
28551da177e4SLinus Torvalds 		next = rb_next(&n->nd);
285663f74ca2SKOSAKI Motohiro 		sp_delete(p, n);
28571da177e4SLinus Torvalds 	}
28584a8c7bb5SNathan Zimmer 	write_unlock(&p->lock);
28591da177e4SLinus Torvalds }
28601da177e4SLinus Torvalds 
28611a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING
2862c297663cSMel Gorman static int __initdata numabalancing_override;
28631a687c2eSMel Gorman 
28641a687c2eSMel Gorman static void __init check_numabalancing_enable(void)
28651a687c2eSMel Gorman {
28661a687c2eSMel Gorman 	bool numabalancing_default = false;
28671a687c2eSMel Gorman 
28681a687c2eSMel Gorman 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
28691a687c2eSMel Gorman 		numabalancing_default = true;
28701a687c2eSMel Gorman 
2871c297663cSMel Gorman 	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2872c297663cSMel Gorman 	if (numabalancing_override)
2873c297663cSMel Gorman 		set_numabalancing_state(numabalancing_override == 1);
2874c297663cSMel Gorman 
2875b0dc2b9bSMel Gorman 	if (num_online_nodes() > 1 && !numabalancing_override) {
2876756a025fSJoe Perches 		pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2877c297663cSMel Gorman 			numabalancing_default ? "Enabling" : "Disabling");
28781a687c2eSMel Gorman 		set_numabalancing_state(numabalancing_default);
28791a687c2eSMel Gorman 	}
28801a687c2eSMel Gorman }
28811a687c2eSMel Gorman 
28821a687c2eSMel Gorman static int __init setup_numabalancing(char *str)
28831a687c2eSMel Gorman {
28841a687c2eSMel Gorman 	int ret = 0;
28851a687c2eSMel Gorman 	if (!str)
28861a687c2eSMel Gorman 		goto out;
28871a687c2eSMel Gorman 
28881a687c2eSMel Gorman 	if (!strcmp(str, "enable")) {
2889c297663cSMel Gorman 		numabalancing_override = 1;
28901a687c2eSMel Gorman 		ret = 1;
28911a687c2eSMel Gorman 	} else if (!strcmp(str, "disable")) {
2892c297663cSMel Gorman 		numabalancing_override = -1;
28931a687c2eSMel Gorman 		ret = 1;
28941a687c2eSMel Gorman 	}
28951a687c2eSMel Gorman out:
28961a687c2eSMel Gorman 	if (!ret)
28974a404beaSAndrew Morton 		pr_warn("Unable to parse numa_balancing=\n");
28981a687c2eSMel Gorman 
28991a687c2eSMel Gorman 	return ret;
29001a687c2eSMel Gorman }
29011a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing);
29021a687c2eSMel Gorman #else
29031a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void)
29041a687c2eSMel Gorman {
29051a687c2eSMel Gorman }
29061a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */
29071a687c2eSMel Gorman 
29081da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */
29091da177e4SLinus Torvalds void __init numa_policy_init(void)
29101da177e4SLinus Torvalds {
2911b71636e2SPaul Mundt 	nodemask_t interleave_nodes;
2912b71636e2SPaul Mundt 	unsigned long largest = 0;
2913b71636e2SPaul Mundt 	int nid, prefer = 0;
2914b71636e2SPaul Mundt 
29151da177e4SLinus Torvalds 	policy_cache = kmem_cache_create("numa_policy",
29161da177e4SLinus Torvalds 					 sizeof(struct mempolicy),
291720c2df83SPaul Mundt 					 0, SLAB_PANIC, NULL);
29181da177e4SLinus Torvalds 
29191da177e4SLinus Torvalds 	sn_cache = kmem_cache_create("shared_policy_node",
29201da177e4SLinus Torvalds 				     sizeof(struct sp_node),
292120c2df83SPaul Mundt 				     0, SLAB_PANIC, NULL);
29221da177e4SLinus Torvalds 
29235606e387SMel Gorman 	for_each_node(nid) {
29245606e387SMel Gorman 		preferred_node_policy[nid] = (struct mempolicy) {
29255606e387SMel Gorman 			.refcnt = ATOMIC_INIT(1),
29265606e387SMel Gorman 			.mode = MPOL_PREFERRED,
29275606e387SMel Gorman 			.flags = MPOL_F_MOF | MPOL_F_MORON,
2928269fbe72SBen Widawsky 			.nodes = nodemask_of_node(nid),
29295606e387SMel Gorman 		};
29305606e387SMel Gorman 	}
29315606e387SMel Gorman 
2932b71636e2SPaul Mundt 	/*
2933b71636e2SPaul Mundt 	 * Set interleaving policy for system init. Interleaving is only
2934b71636e2SPaul Mundt 	 * enabled across suitably sized nodes (default is >= 16MB), or
2935b71636e2SPaul Mundt 	 * fall back to the largest node if they're all smaller.
2936b71636e2SPaul Mundt 	 */
2937b71636e2SPaul Mundt 	nodes_clear(interleave_nodes);
293801f13bd6SLai Jiangshan 	for_each_node_state(nid, N_MEMORY) {
2939b71636e2SPaul Mundt 		unsigned long total_pages = node_present_pages(nid);
29401da177e4SLinus Torvalds 
2941b71636e2SPaul Mundt 		/* Preserve the largest node */
2942b71636e2SPaul Mundt 		if (largest < total_pages) {
2943b71636e2SPaul Mundt 			largest = total_pages;
2944b71636e2SPaul Mundt 			prefer = nid;
2945b71636e2SPaul Mundt 		}
2946b71636e2SPaul Mundt 
2947b71636e2SPaul Mundt 		/* Interleave this node? */
2948b71636e2SPaul Mundt 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2949b71636e2SPaul Mundt 			node_set(nid, interleave_nodes);
2950b71636e2SPaul Mundt 	}
2951b71636e2SPaul Mundt 
2952b71636e2SPaul Mundt 	/* All too small, use the largest */
2953b71636e2SPaul Mundt 	if (unlikely(nodes_empty(interleave_nodes)))
2954b71636e2SPaul Mundt 		node_set(prefer, interleave_nodes);
2955b71636e2SPaul Mundt 
2956028fec41SDavid Rientjes 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2957b1de0d13SMitchel Humpherys 		pr_err("%s: interleaving failed\n", __func__);
29581a687c2eSMel Gorman 
29591a687c2eSMel Gorman 	check_numabalancing_enable();
29601da177e4SLinus Torvalds }
29611da177e4SLinus Torvalds 
29628bccd85fSChristoph Lameter /* Reset policy of current process to default */
29631da177e4SLinus Torvalds void numa_default_policy(void)
29641da177e4SLinus Torvalds {
2965028fec41SDavid Rientjes 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
29661da177e4SLinus Torvalds }
296768860ec1SPaul Jackson 
29684225399aSPaul Jackson /*
2969095f1fc4SLee Schermerhorn  * Parse and format mempolicy from/to strings
2970095f1fc4SLee Schermerhorn  */
2971095f1fc4SLee Schermerhorn 
2972345ace9cSLee Schermerhorn static const char * const policy_modes[] =
2973345ace9cSLee Schermerhorn {
2974345ace9cSLee Schermerhorn 	[MPOL_DEFAULT]    = "default",
2975345ace9cSLee Schermerhorn 	[MPOL_PREFERRED]  = "prefer",
2976345ace9cSLee Schermerhorn 	[MPOL_BIND]       = "bind",
2977345ace9cSLee Schermerhorn 	[MPOL_INTERLEAVE] = "interleave",
2978d3a71033SLee Schermerhorn 	[MPOL_LOCAL]      = "local",
2979b27abaccSDave Hansen 	[MPOL_PREFERRED_MANY]  = "prefer (many)",
2980345ace9cSLee Schermerhorn };
29811a75a6c8SChristoph Lameter 
2982095f1fc4SLee Schermerhorn 
2983095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS
2984095f1fc4SLee Schermerhorn /**
2985f2a07f40SHugh Dickins  * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2986095f1fc4SLee Schermerhorn  * @str:  string containing mempolicy to parse
298771fe804bSLee Schermerhorn  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2988095f1fc4SLee Schermerhorn  *
2989095f1fc4SLee Schermerhorn  * Format of input:
2990095f1fc4SLee Schermerhorn  *	<mode>[=<flags>][:<nodelist>]
2991095f1fc4SLee Schermerhorn  *
2992dad5b023SRandy Dunlap  * Return: %0 on success, else %1
2993095f1fc4SLee Schermerhorn  */
2994a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol)
2995095f1fc4SLee Schermerhorn {
299671fe804bSLee Schermerhorn 	struct mempolicy *new = NULL;
2997f2a07f40SHugh Dickins 	unsigned short mode_flags;
299871fe804bSLee Schermerhorn 	nodemask_t nodes;
2999095f1fc4SLee Schermerhorn 	char *nodelist = strchr(str, ':');
3000095f1fc4SLee Schermerhorn 	char *flags = strchr(str, '=');
3001dedf2c73Szhong jiang 	int err = 1, mode;
3002095f1fc4SLee Schermerhorn 
3003c7a91bc7SDan Carpenter 	if (flags)
3004c7a91bc7SDan Carpenter 		*flags++ = '\0';	/* terminate mode string */
3005c7a91bc7SDan Carpenter 
3006095f1fc4SLee Schermerhorn 	if (nodelist) {
3007095f1fc4SLee Schermerhorn 		/* NUL-terminate mode or flags string */
3008095f1fc4SLee Schermerhorn 		*nodelist++ = '\0';
300971fe804bSLee Schermerhorn 		if (nodelist_parse(nodelist, nodes))
3010095f1fc4SLee Schermerhorn 			goto out;
301101f13bd6SLai Jiangshan 		if (!nodes_subset(nodes, node_states[N_MEMORY]))
3012095f1fc4SLee Schermerhorn 			goto out;
301371fe804bSLee Schermerhorn 	} else
301471fe804bSLee Schermerhorn 		nodes_clear(nodes);
301571fe804bSLee Schermerhorn 
3016dedf2c73Szhong jiang 	mode = match_string(policy_modes, MPOL_MAX, str);
3017dedf2c73Szhong jiang 	if (mode < 0)
3018095f1fc4SLee Schermerhorn 		goto out;
3019095f1fc4SLee Schermerhorn 
302071fe804bSLee Schermerhorn 	switch (mode) {
3021095f1fc4SLee Schermerhorn 	case MPOL_PREFERRED:
302271fe804bSLee Schermerhorn 		/*
3023aa9f7d51SRandy Dunlap 		 * Insist on a nodelist of one node only, although later
3024aa9f7d51SRandy Dunlap 		 * we use first_node(nodes) to grab a single node, so here
3025aa9f7d51SRandy Dunlap 		 * nodelist (or nodes) cannot be empty.
302671fe804bSLee Schermerhorn 		 */
3027095f1fc4SLee Schermerhorn 		if (nodelist) {
3028095f1fc4SLee Schermerhorn 			char *rest = nodelist;
3029095f1fc4SLee Schermerhorn 			while (isdigit(*rest))
3030095f1fc4SLee Schermerhorn 				rest++;
3031926f2ae0SKOSAKI Motohiro 			if (*rest)
3032926f2ae0SKOSAKI Motohiro 				goto out;
3033aa9f7d51SRandy Dunlap 			if (nodes_empty(nodes))
3034aa9f7d51SRandy Dunlap 				goto out;
3035095f1fc4SLee Schermerhorn 		}
3036095f1fc4SLee Schermerhorn 		break;
3037095f1fc4SLee Schermerhorn 	case MPOL_INTERLEAVE:
3038095f1fc4SLee Schermerhorn 		/*
3039095f1fc4SLee Schermerhorn 		 * Default to online nodes with memory if no nodelist
3040095f1fc4SLee Schermerhorn 		 */
3041095f1fc4SLee Schermerhorn 		if (!nodelist)
304201f13bd6SLai Jiangshan 			nodes = node_states[N_MEMORY];
30433f226aa1SLee Schermerhorn 		break;
304471fe804bSLee Schermerhorn 	case MPOL_LOCAL:
30453f226aa1SLee Schermerhorn 		/*
304671fe804bSLee Schermerhorn 		 * Don't allow a nodelist;  mpol_new() checks flags
30473f226aa1SLee Schermerhorn 		 */
304871fe804bSLee Schermerhorn 		if (nodelist)
30493f226aa1SLee Schermerhorn 			goto out;
30503f226aa1SLee Schermerhorn 		break;
3051413b43deSRavikiran G Thirumalai 	case MPOL_DEFAULT:
3052413b43deSRavikiran G Thirumalai 		/*
3053413b43deSRavikiran G Thirumalai 		 * Insist on a empty nodelist
3054413b43deSRavikiran G Thirumalai 		 */
3055413b43deSRavikiran G Thirumalai 		if (!nodelist)
3056413b43deSRavikiran G Thirumalai 			err = 0;
3057413b43deSRavikiran G Thirumalai 		goto out;
3058b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
3059d69b2e63SKOSAKI Motohiro 	case MPOL_BIND:
306071fe804bSLee Schermerhorn 		/*
3061d69b2e63SKOSAKI Motohiro 		 * Insist on a nodelist
306271fe804bSLee Schermerhorn 		 */
3063d69b2e63SKOSAKI Motohiro 		if (!nodelist)
3064d69b2e63SKOSAKI Motohiro 			goto out;
3065095f1fc4SLee Schermerhorn 	}
3066095f1fc4SLee Schermerhorn 
306771fe804bSLee Schermerhorn 	mode_flags = 0;
3068095f1fc4SLee Schermerhorn 	if (flags) {
3069095f1fc4SLee Schermerhorn 		/*
3070095f1fc4SLee Schermerhorn 		 * Currently, we only support two mutually exclusive
3071095f1fc4SLee Schermerhorn 		 * mode flags.
3072095f1fc4SLee Schermerhorn 		 */
3073095f1fc4SLee Schermerhorn 		if (!strcmp(flags, "static"))
307471fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_STATIC_NODES;
3075095f1fc4SLee Schermerhorn 		else if (!strcmp(flags, "relative"))
307671fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_RELATIVE_NODES;
3077095f1fc4SLee Schermerhorn 		else
3078926f2ae0SKOSAKI Motohiro 			goto out;
3079095f1fc4SLee Schermerhorn 	}
308071fe804bSLee Schermerhorn 
308171fe804bSLee Schermerhorn 	new = mpol_new(mode, mode_flags, &nodes);
308271fe804bSLee Schermerhorn 	if (IS_ERR(new))
3083926f2ae0SKOSAKI Motohiro 		goto out;
3084926f2ae0SKOSAKI Motohiro 
3085f2a07f40SHugh Dickins 	/*
3086f2a07f40SHugh Dickins 	 * Save nodes for mpol_to_str() to show the tmpfs mount options
3087f2a07f40SHugh Dickins 	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
3088f2a07f40SHugh Dickins 	 */
3089269fbe72SBen Widawsky 	if (mode != MPOL_PREFERRED) {
3090269fbe72SBen Widawsky 		new->nodes = nodes;
3091269fbe72SBen Widawsky 	} else if (nodelist) {
3092269fbe72SBen Widawsky 		nodes_clear(new->nodes);
3093269fbe72SBen Widawsky 		node_set(first_node(nodes), new->nodes);
3094269fbe72SBen Widawsky 	} else {
30957858d7bcSFeng Tang 		new->mode = MPOL_LOCAL;
3096269fbe72SBen Widawsky 	}
3097f2a07f40SHugh Dickins 
3098f2a07f40SHugh Dickins 	/*
3099f2a07f40SHugh Dickins 	 * Save nodes for contextualization: this will be used to "clone"
3100f2a07f40SHugh Dickins 	 * the mempolicy in a specific context [cpuset] at a later time.
3101f2a07f40SHugh Dickins 	 */
3102e17f74afSLee Schermerhorn 	new->w.user_nodemask = nodes;
3103f2a07f40SHugh Dickins 
3104926f2ae0SKOSAKI Motohiro 	err = 0;
310571fe804bSLee Schermerhorn 
3106095f1fc4SLee Schermerhorn out:
3107095f1fc4SLee Schermerhorn 	/* Restore string for error message */
3108095f1fc4SLee Schermerhorn 	if (nodelist)
3109095f1fc4SLee Schermerhorn 		*--nodelist = ':';
3110095f1fc4SLee Schermerhorn 	if (flags)
3111095f1fc4SLee Schermerhorn 		*--flags = '=';
311271fe804bSLee Schermerhorn 	if (!err)
311371fe804bSLee Schermerhorn 		*mpol = new;
3114095f1fc4SLee Schermerhorn 	return err;
3115095f1fc4SLee Schermerhorn }
3116095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */
3117095f1fc4SLee Schermerhorn 
311871fe804bSLee Schermerhorn /**
311971fe804bSLee Schermerhorn  * mpol_to_str - format a mempolicy structure for printing
312071fe804bSLee Schermerhorn  * @buffer:  to contain formatted mempolicy string
312171fe804bSLee Schermerhorn  * @maxlen:  length of @buffer
312271fe804bSLee Schermerhorn  * @pol:  pointer to mempolicy to be formatted
312371fe804bSLee Schermerhorn  *
3124948927eeSDavid Rientjes  * Convert @pol into a string.  If @buffer is too short, truncate the string.
3125948927eeSDavid Rientjes  * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
3126948927eeSDavid Rientjes  * longest flag, "relative", and to display at least a few node ids.
31271a75a6c8SChristoph Lameter  */
3128948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
31291a75a6c8SChristoph Lameter {
31301a75a6c8SChristoph Lameter 	char *p = buffer;
3131948927eeSDavid Rientjes 	nodemask_t nodes = NODE_MASK_NONE;
3132948927eeSDavid Rientjes 	unsigned short mode = MPOL_DEFAULT;
3133948927eeSDavid Rientjes 	unsigned short flags = 0;
31341a75a6c8SChristoph Lameter 
31358790c71aSDavid Rientjes 	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
3136bea904d5SLee Schermerhorn 		mode = pol->mode;
3137948927eeSDavid Rientjes 		flags = pol->flags;
3138948927eeSDavid Rientjes 	}
3139bea904d5SLee Schermerhorn 
31401a75a6c8SChristoph Lameter 	switch (mode) {
31411a75a6c8SChristoph Lameter 	case MPOL_DEFAULT:
31427858d7bcSFeng Tang 	case MPOL_LOCAL:
31431a75a6c8SChristoph Lameter 		break;
31441a75a6c8SChristoph Lameter 	case MPOL_PREFERRED:
3145b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
31461a75a6c8SChristoph Lameter 	case MPOL_BIND:
31471a75a6c8SChristoph Lameter 	case MPOL_INTERLEAVE:
3148269fbe72SBen Widawsky 		nodes = pol->nodes;
31491a75a6c8SChristoph Lameter 		break;
31501a75a6c8SChristoph Lameter 	default:
3151948927eeSDavid Rientjes 		WARN_ON_ONCE(1);
3152948927eeSDavid Rientjes 		snprintf(p, maxlen, "unknown");
3153948927eeSDavid Rientjes 		return;
31541a75a6c8SChristoph Lameter 	}
31551a75a6c8SChristoph Lameter 
3156b7a9f420SDavid Rientjes 	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
31571a75a6c8SChristoph Lameter 
3158fc36b8d3SLee Schermerhorn 	if (flags & MPOL_MODE_FLAGS) {
3159948927eeSDavid Rientjes 		p += snprintf(p, buffer + maxlen - p, "=");
3160f5b087b5SDavid Rientjes 
31612291990aSLee Schermerhorn 		/*
31622291990aSLee Schermerhorn 		 * Currently, the only defined flags are mutually exclusive
31632291990aSLee Schermerhorn 		 */
3164f5b087b5SDavid Rientjes 		if (flags & MPOL_F_STATIC_NODES)
31652291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "static");
31662291990aSLee Schermerhorn 		else if (flags & MPOL_F_RELATIVE_NODES)
31672291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "relative");
3168f5b087b5SDavid Rientjes 	}
3169f5b087b5SDavid Rientjes 
31709e763e0fSTejun Heo 	if (!nodes_empty(nodes))
31719e763e0fSTejun Heo 		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
31729e763e0fSTejun Heo 			       nodemask_pr_args(&nodes));
31731a75a6c8SChristoph Lameter }
3174