xref: /openbmc/linux/mm/mempolicy.c (revision d1751118c88673fe5a948ad82277898e9e284c55)
146aeb7e6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * Simple NUMA memory policy for the Linux kernel.
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
68bccd85fSChristoph Lameter  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * NUMA policy allows the user to give hints in which node(s) memory should
91da177e4SLinus Torvalds  * be allocated.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Support four policies per VMA and per process:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * The VMA policy has priority over the process policy for a page fault.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * interleave     Allocate memory interleaved over a set of nodes,
161da177e4SLinus Torvalds  *                with normal fallback if it fails.
171da177e4SLinus Torvalds  *                For VMA based allocations this interleaves based on the
181da177e4SLinus Torvalds  *                offset into the backing object or offset into the mapping
191da177e4SLinus Torvalds  *                for anonymous memory. For process policy an process counter
201da177e4SLinus Torvalds  *                is used.
218bccd85fSChristoph Lameter  *
221da177e4SLinus Torvalds  * bind           Only allocate memory on a specific set of nodes,
231da177e4SLinus Torvalds  *                no fallback.
248bccd85fSChristoph Lameter  *                FIXME: memory is allocated starting with the first node
258bccd85fSChristoph Lameter  *                to the last. It would be better if bind would truly restrict
268bccd85fSChristoph Lameter  *                the allocation to memory nodes instead
278bccd85fSChristoph Lameter  *
281da177e4SLinus Torvalds  * preferred       Try a specific node first before normal fallback.
2900ef2d2fSDavid Rientjes  *                As a special case NUMA_NO_NODE here means do the allocation
301da177e4SLinus Torvalds  *                on the local CPU. This is normally identical to default,
311da177e4SLinus Torvalds  *                but useful to set in a VMA when you have a non default
321da177e4SLinus Torvalds  *                process policy.
338bccd85fSChristoph Lameter  *
34b27abaccSDave Hansen  * preferred many Try a set of nodes first before normal fallback. This is
35b27abaccSDave Hansen  *                similar to preferred without the special case.
36b27abaccSDave Hansen  *
371da177e4SLinus Torvalds  * default        Allocate on the local node first, or when on a VMA
381da177e4SLinus Torvalds  *                use the process policy. This is what Linux always did
391da177e4SLinus Torvalds  *		  in a NUMA aware kernel and still does by, ahem, default.
401da177e4SLinus Torvalds  *
411da177e4SLinus Torvalds  * The process policy is applied for most non interrupt memory allocations
421da177e4SLinus Torvalds  * in that process' context. Interrupts ignore the policies and always
431da177e4SLinus Torvalds  * try to allocate on the local CPU. The VMA policy is only applied for memory
441da177e4SLinus Torvalds  * allocations for a VMA in the VM.
451da177e4SLinus Torvalds  *
461da177e4SLinus Torvalds  * Currently there are a few corner cases in swapping where the policy
471da177e4SLinus Torvalds  * is not applied, but the majority should be handled. When process policy
481da177e4SLinus Torvalds  * is used it is not remembered over swap outs/swap ins.
491da177e4SLinus Torvalds  *
501da177e4SLinus Torvalds  * Only the highest zone in the zone hierarchy gets policied. Allocations
511da177e4SLinus Torvalds  * requesting a lower zone just use default policy. This implies that
521da177e4SLinus Torvalds  * on systems with highmem kernel lowmem allocation don't get policied.
531da177e4SLinus Torvalds  * Same with GFP_DMA allocations.
541da177e4SLinus Torvalds  *
551da177e4SLinus Torvalds  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
561da177e4SLinus Torvalds  * all users and remembered even when nobody has memory mapped.
571da177e4SLinus Torvalds  */
581da177e4SLinus Torvalds 
591da177e4SLinus Torvalds /* Notebook:
601da177e4SLinus Torvalds    fix mmap readahead to honour policy and enable policy for any page cache
611da177e4SLinus Torvalds    object
621da177e4SLinus Torvalds    statistics for bigpages
631da177e4SLinus Torvalds    global policy for page cache? currently it uses process policy. Requires
641da177e4SLinus Torvalds    first item above.
651da177e4SLinus Torvalds    handle mremap for shared memory (currently ignored for the policy)
661da177e4SLinus Torvalds    grows down?
671da177e4SLinus Torvalds    make bind policy root only? It can trigger oom much faster and the
681da177e4SLinus Torvalds    kernel is not always grateful with that.
691da177e4SLinus Torvalds */
701da177e4SLinus Torvalds 
71b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
72b1de0d13SMitchel Humpherys 
731da177e4SLinus Torvalds #include <linux/mempolicy.h>
74a520110eSChristoph Hellwig #include <linux/pagewalk.h>
751da177e4SLinus Torvalds #include <linux/highmem.h>
761da177e4SLinus Torvalds #include <linux/hugetlb.h>
771da177e4SLinus Torvalds #include <linux/kernel.h>
781da177e4SLinus Torvalds #include <linux/sched.h>
796e84f315SIngo Molnar #include <linux/sched/mm.h>
806a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h>
81f719ff9bSIngo Molnar #include <linux/sched/task.h>
821da177e4SLinus Torvalds #include <linux/nodemask.h>
831da177e4SLinus Torvalds #include <linux/cpuset.h>
841da177e4SLinus Torvalds #include <linux/slab.h>
851da177e4SLinus Torvalds #include <linux/string.h>
86b95f1b31SPaul Gortmaker #include <linux/export.h>
87b488893aSPavel Emelyanov #include <linux/nsproxy.h>
881da177e4SLinus Torvalds #include <linux/interrupt.h>
891da177e4SLinus Torvalds #include <linux/init.h>
901da177e4SLinus Torvalds #include <linux/compat.h>
9131367466SOtto Ebeling #include <linux/ptrace.h>
92dc9aa5b9SChristoph Lameter #include <linux/swap.h>
931a75a6c8SChristoph Lameter #include <linux/seq_file.h>
941a75a6c8SChristoph Lameter #include <linux/proc_fs.h>
95b20a3503SChristoph Lameter #include <linux/migrate.h>
9662b61f61SHugh Dickins #include <linux/ksm.h>
9795a402c3SChristoph Lameter #include <linux/rmap.h>
9886c3a764SDavid Quigley #include <linux/security.h>
99dbcb0f19SAdrian Bunk #include <linux/syscalls.h>
100095f1fc4SLee Schermerhorn #include <linux/ctype.h>
1016d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h>
102b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h>
103b1de0d13SMitchel Humpherys #include <linux/printk.h>
104c8633798SNaoya Horiguchi #include <linux/swapops.h>
105dc9aa5b9SChristoph Lameter 
1061da177e4SLinus Torvalds #include <asm/tlbflush.h>
1074a18419fSNadav Amit #include <asm/tlb.h>
1087c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
1091da177e4SLinus Torvalds 
11062695a84SNick Piggin #include "internal.h"
11162695a84SNick Piggin 
11238e35860SChristoph Lameter /* Internal flags */
113dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
11438e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
115dc9aa5b9SChristoph Lameter 
116fcc234f8SPekka Enberg static struct kmem_cache *policy_cache;
117fcc234f8SPekka Enberg static struct kmem_cache *sn_cache;
1181da177e4SLinus Torvalds 
1191da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not
1201da177e4SLinus Torvalds    policied. */
1216267276fSChristoph Lameter enum zone_type policy_zone = 0;
1221da177e4SLinus Torvalds 
123bea904d5SLee Schermerhorn /*
124bea904d5SLee Schermerhorn  * run-time system-wide default policy => local allocation
125bea904d5SLee Schermerhorn  */
126e754d79dSH Hartley Sweeten static struct mempolicy default_policy = {
1271da177e4SLinus Torvalds 	.refcnt = ATOMIC_INIT(1), /* never free it */
1287858d7bcSFeng Tang 	.mode = MPOL_LOCAL,
1291da177e4SLinus Torvalds };
1301da177e4SLinus Torvalds 
1315606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES];
1325606e387SMel Gorman 
133b2ca916cSDan Williams /**
134b2ca916cSDan Williams  * numa_map_to_online_node - Find closest online node
135f6e92f40SKrzysztof Kozlowski  * @node: Node id to start the search
136b2ca916cSDan Williams  *
137b2ca916cSDan Williams  * Lookup the next closest node by distance if @nid is not online.
138dad5b023SRandy Dunlap  *
139dad5b023SRandy Dunlap  * Return: this @node if it is online, otherwise the closest node by distance
140b2ca916cSDan Williams  */
141b2ca916cSDan Williams int numa_map_to_online_node(int node)
142b2ca916cSDan Williams {
1434fcbe96eSDan Williams 	int min_dist = INT_MAX, dist, n, min_node;
144b2ca916cSDan Williams 
1454fcbe96eSDan Williams 	if (node == NUMA_NO_NODE || node_online(node))
1464fcbe96eSDan Williams 		return node;
147b2ca916cSDan Williams 
148b2ca916cSDan Williams 	min_node = node;
149b2ca916cSDan Williams 	for_each_online_node(n) {
150b2ca916cSDan Williams 		dist = node_distance(node, n);
151b2ca916cSDan Williams 		if (dist < min_dist) {
152b2ca916cSDan Williams 			min_dist = dist;
153b2ca916cSDan Williams 			min_node = n;
154b2ca916cSDan Williams 		}
155b2ca916cSDan Williams 	}
156b2ca916cSDan Williams 
157b2ca916cSDan Williams 	return min_node;
158b2ca916cSDan Williams }
159b2ca916cSDan Williams EXPORT_SYMBOL_GPL(numa_map_to_online_node);
160b2ca916cSDan Williams 
16174d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p)
1625606e387SMel Gorman {
1635606e387SMel Gorman 	struct mempolicy *pol = p->mempolicy;
164f15ca78eSOleg Nesterov 	int node;
1655606e387SMel Gorman 
166f15ca78eSOleg Nesterov 	if (pol)
167f15ca78eSOleg Nesterov 		return pol;
1685606e387SMel Gorman 
169f15ca78eSOleg Nesterov 	node = numa_node_id();
1701da6f0e1SJianguo Wu 	if (node != NUMA_NO_NODE) {
1711da6f0e1SJianguo Wu 		pol = &preferred_node_policy[node];
172f15ca78eSOleg Nesterov 		/* preferred_node_policy is not initialised early in boot */
173f15ca78eSOleg Nesterov 		if (pol->mode)
174f15ca78eSOleg Nesterov 			return pol;
1751da6f0e1SJianguo Wu 	}
1765606e387SMel Gorman 
177f15ca78eSOleg Nesterov 	return &default_policy;
1785606e387SMel Gorman }
1795606e387SMel Gorman 
18037012946SDavid Rientjes static const struct mempolicy_operations {
18137012946SDavid Rientjes 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
182213980c0SVlastimil Babka 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
18337012946SDavid Rientjes } mpol_ops[MPOL_MAX];
18437012946SDavid Rientjes 
185f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
186f5b087b5SDavid Rientjes {
1876d556294SBob Liu 	return pol->flags & MPOL_MODE_FLAGS;
1884c50bc01SDavid Rientjes }
1894c50bc01SDavid Rientjes 
1904c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
1914c50bc01SDavid Rientjes 				   const nodemask_t *rel)
1924c50bc01SDavid Rientjes {
1934c50bc01SDavid Rientjes 	nodemask_t tmp;
1944c50bc01SDavid Rientjes 	nodes_fold(tmp, *orig, nodes_weight(*rel));
1954c50bc01SDavid Rientjes 	nodes_onto(*ret, tmp, *rel);
196f5b087b5SDavid Rientjes }
197f5b087b5SDavid Rientjes 
198be897d48SFeng Tang static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
19937012946SDavid Rientjes {
20037012946SDavid Rientjes 	if (nodes_empty(*nodes))
20137012946SDavid Rientjes 		return -EINVAL;
202269fbe72SBen Widawsky 	pol->nodes = *nodes;
20337012946SDavid Rientjes 	return 0;
20437012946SDavid Rientjes }
20537012946SDavid Rientjes 
20637012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
20737012946SDavid Rientjes {
2087858d7bcSFeng Tang 	if (nodes_empty(*nodes))
2097858d7bcSFeng Tang 		return -EINVAL;
210269fbe72SBen Widawsky 
211269fbe72SBen Widawsky 	nodes_clear(pol->nodes);
212269fbe72SBen Widawsky 	node_set(first_node(*nodes), pol->nodes);
21337012946SDavid Rientjes 	return 0;
21437012946SDavid Rientjes }
21537012946SDavid Rientjes 
21658568d2aSMiao Xie /*
21758568d2aSMiao Xie  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
21858568d2aSMiao Xie  * any, for the new policy.  mpol_new() has already validated the nodes
2197858d7bcSFeng Tang  * parameter with respect to the policy mode and flags.
22058568d2aSMiao Xie  *
22158568d2aSMiao Xie  * Must be called holding task's alloc_lock to protect task's mems_allowed
222c1e8d7c6SMichel Lespinasse  * and mempolicy.  May also be called holding the mmap_lock for write.
22358568d2aSMiao Xie  */
2244bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol,
2254bfc4495SKAMEZAWA Hiroyuki 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
22658568d2aSMiao Xie {
22758568d2aSMiao Xie 	int ret;
22858568d2aSMiao Xie 
2297858d7bcSFeng Tang 	/*
2307858d7bcSFeng Tang 	 * Default (pol==NULL) resp. local memory policies are not a
2317858d7bcSFeng Tang 	 * subject of any remapping. They also do not need any special
2327858d7bcSFeng Tang 	 * constructor.
2337858d7bcSFeng Tang 	 */
2347858d7bcSFeng Tang 	if (!pol || pol->mode == MPOL_LOCAL)
23558568d2aSMiao Xie 		return 0;
2367858d7bcSFeng Tang 
23701f13bd6SLai Jiangshan 	/* Check N_MEMORY */
2384bfc4495SKAMEZAWA Hiroyuki 	nodes_and(nsc->mask1,
23901f13bd6SLai Jiangshan 		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
24058568d2aSMiao Xie 
24158568d2aSMiao Xie 	VM_BUG_ON(!nodes);
2427858d7bcSFeng Tang 
24358568d2aSMiao Xie 	if (pol->flags & MPOL_F_RELATIVE_NODES)
2444bfc4495SKAMEZAWA Hiroyuki 		mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
24558568d2aSMiao Xie 	else
2464bfc4495SKAMEZAWA Hiroyuki 		nodes_and(nsc->mask2, *nodes, nsc->mask1);
2474bfc4495SKAMEZAWA Hiroyuki 
24858568d2aSMiao Xie 	if (mpol_store_user_nodemask(pol))
24958568d2aSMiao Xie 		pol->w.user_nodemask = *nodes;
25058568d2aSMiao Xie 	else
2517858d7bcSFeng Tang 		pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed;
25258568d2aSMiao Xie 
2534bfc4495SKAMEZAWA Hiroyuki 	ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
25458568d2aSMiao Xie 	return ret;
25558568d2aSMiao Xie }
25658568d2aSMiao Xie 
25758568d2aSMiao Xie /*
25858568d2aSMiao Xie  * This function just creates a new policy, does some check and simple
25958568d2aSMiao Xie  * initialization. You must invoke mpol_set_nodemask() to set nodes.
26058568d2aSMiao Xie  */
261028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
262028fec41SDavid Rientjes 				  nodemask_t *nodes)
2631da177e4SLinus Torvalds {
2641da177e4SLinus Torvalds 	struct mempolicy *policy;
2651da177e4SLinus Torvalds 
266028fec41SDavid Rientjes 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
26700ef2d2fSDavid Rientjes 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
268140d5a49SPaul Mundt 
2693e1f0645SDavid Rientjes 	if (mode == MPOL_DEFAULT) {
2703e1f0645SDavid Rientjes 		if (nodes && !nodes_empty(*nodes))
27137012946SDavid Rientjes 			return ERR_PTR(-EINVAL);
272d3a71033SLee Schermerhorn 		return NULL;
27337012946SDavid Rientjes 	}
2743e1f0645SDavid Rientjes 	VM_BUG_ON(!nodes);
2753e1f0645SDavid Rientjes 
2763e1f0645SDavid Rientjes 	/*
2773e1f0645SDavid Rientjes 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
2783e1f0645SDavid Rientjes 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
2793e1f0645SDavid Rientjes 	 * All other modes require a valid pointer to a non-empty nodemask.
2803e1f0645SDavid Rientjes 	 */
2813e1f0645SDavid Rientjes 	if (mode == MPOL_PREFERRED) {
2823e1f0645SDavid Rientjes 		if (nodes_empty(*nodes)) {
2833e1f0645SDavid Rientjes 			if (((flags & MPOL_F_STATIC_NODES) ||
2843e1f0645SDavid Rientjes 			     (flags & MPOL_F_RELATIVE_NODES)))
2853e1f0645SDavid Rientjes 				return ERR_PTR(-EINVAL);
2867858d7bcSFeng Tang 
2877858d7bcSFeng Tang 			mode = MPOL_LOCAL;
2883e1f0645SDavid Rientjes 		}
289479e2802SPeter Zijlstra 	} else if (mode == MPOL_LOCAL) {
2908d303e44SPiotr Kwapulinski 		if (!nodes_empty(*nodes) ||
2918d303e44SPiotr Kwapulinski 		    (flags & MPOL_F_STATIC_NODES) ||
2928d303e44SPiotr Kwapulinski 		    (flags & MPOL_F_RELATIVE_NODES))
293479e2802SPeter Zijlstra 			return ERR_PTR(-EINVAL);
2943e1f0645SDavid Rientjes 	} else if (nodes_empty(*nodes))
2953e1f0645SDavid Rientjes 		return ERR_PTR(-EINVAL);
2961da177e4SLinus Torvalds 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2971da177e4SLinus Torvalds 	if (!policy)
2981da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2991da177e4SLinus Torvalds 	atomic_set(&policy->refcnt, 1);
30045c4745aSLee Schermerhorn 	policy->mode = mode;
30137012946SDavid Rientjes 	policy->flags = flags;
302c6018b4bSAneesh Kumar K.V 	policy->home_node = NUMA_NO_NODE;
3033e1f0645SDavid Rientjes 
30437012946SDavid Rientjes 	return policy;
30537012946SDavid Rientjes }
30637012946SDavid Rientjes 
30752cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */
30852cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p)
30952cd3b07SLee Schermerhorn {
31052cd3b07SLee Schermerhorn 	if (!atomic_dec_and_test(&p->refcnt))
31152cd3b07SLee Schermerhorn 		return;
31252cd3b07SLee Schermerhorn 	kmem_cache_free(policy_cache, p);
31352cd3b07SLee Schermerhorn }
31452cd3b07SLee Schermerhorn 
315213980c0SVlastimil Babka static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
31637012946SDavid Rientjes {
31737012946SDavid Rientjes }
31837012946SDavid Rientjes 
319213980c0SVlastimil Babka static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
3201d0d2680SDavid Rientjes {
3211d0d2680SDavid Rientjes 	nodemask_t tmp;
3221d0d2680SDavid Rientjes 
32337012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES)
32437012946SDavid Rientjes 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
32537012946SDavid Rientjes 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
32637012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3271d0d2680SDavid Rientjes 	else {
328269fbe72SBen Widawsky 		nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed,
329213980c0SVlastimil Babka 								*nodes);
33029b190faSzhong jiang 		pol->w.cpuset_mems_allowed = *nodes;
3311d0d2680SDavid Rientjes 	}
33237012946SDavid Rientjes 
333708c1bbcSMiao Xie 	if (nodes_empty(tmp))
334708c1bbcSMiao Xie 		tmp = *nodes;
335708c1bbcSMiao Xie 
336269fbe72SBen Widawsky 	pol->nodes = tmp;
33737012946SDavid Rientjes }
33837012946SDavid Rientjes 
33937012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol,
340213980c0SVlastimil Babka 						const nodemask_t *nodes)
34137012946SDavid Rientjes {
34237012946SDavid Rientjes 	pol->w.cpuset_mems_allowed = *nodes;
3431d0d2680SDavid Rientjes }
34437012946SDavid Rientjes 
345708c1bbcSMiao Xie /*
346708c1bbcSMiao Xie  * mpol_rebind_policy - Migrate a policy to a different set of nodes
347708c1bbcSMiao Xie  *
348c1e8d7c6SMichel Lespinasse  * Per-vma policies are protected by mmap_lock. Allocations using per-task
349213980c0SVlastimil Babka  * policies are protected by task->mems_allowed_seq to prevent a premature
350213980c0SVlastimil Babka  * OOM/allocation failure due to parallel nodemask modification.
351708c1bbcSMiao Xie  */
352213980c0SVlastimil Babka static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
35337012946SDavid Rientjes {
354018160adSWang Cheng 	if (!pol || pol->mode == MPOL_LOCAL)
35537012946SDavid Rientjes 		return;
3567858d7bcSFeng Tang 	if (!mpol_store_user_nodemask(pol) &&
35737012946SDavid Rientjes 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
35837012946SDavid Rientjes 		return;
359708c1bbcSMiao Xie 
360213980c0SVlastimil Babka 	mpol_ops[pol->mode].rebind(pol, newmask);
3611d0d2680SDavid Rientjes }
3621d0d2680SDavid Rientjes 
3631d0d2680SDavid Rientjes /*
3641d0d2680SDavid Rientjes  * Wrapper for mpol_rebind_policy() that just requires task
3651d0d2680SDavid Rientjes  * pointer, and updates task mempolicy.
36658568d2aSMiao Xie  *
36758568d2aSMiao Xie  * Called with task's alloc_lock held.
3681d0d2680SDavid Rientjes  */
3691d0d2680SDavid Rientjes 
370213980c0SVlastimil Babka void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
3711d0d2680SDavid Rientjes {
372213980c0SVlastimil Babka 	mpol_rebind_policy(tsk->mempolicy, new);
3731d0d2680SDavid Rientjes }
3741d0d2680SDavid Rientjes 
3751d0d2680SDavid Rientjes /*
3761d0d2680SDavid Rientjes  * Rebind each vma in mm to new nodemask.
3771d0d2680SDavid Rientjes  *
378c1e8d7c6SMichel Lespinasse  * Call holding a reference to mm.  Takes mm->mmap_lock during call.
3791d0d2680SDavid Rientjes  */
3801d0d2680SDavid Rientjes 
3811d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
3821d0d2680SDavid Rientjes {
3831d0d2680SDavid Rientjes 	struct vm_area_struct *vma;
38466850be5SLiam R. Howlett 	VMA_ITERATOR(vmi, mm, 0);
3851d0d2680SDavid Rientjes 
386d8ed45c5SMichel Lespinasse 	mmap_write_lock(mm);
38766850be5SLiam R. Howlett 	for_each_vma(vmi, vma)
388213980c0SVlastimil Babka 		mpol_rebind_policy(vma->vm_policy, new);
389d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
3901d0d2680SDavid Rientjes }
3911d0d2680SDavid Rientjes 
39237012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
39337012946SDavid Rientjes 	[MPOL_DEFAULT] = {
39437012946SDavid Rientjes 		.rebind = mpol_rebind_default,
39537012946SDavid Rientjes 	},
39637012946SDavid Rientjes 	[MPOL_INTERLEAVE] = {
397be897d48SFeng Tang 		.create = mpol_new_nodemask,
39837012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
39937012946SDavid Rientjes 	},
40037012946SDavid Rientjes 	[MPOL_PREFERRED] = {
40137012946SDavid Rientjes 		.create = mpol_new_preferred,
40237012946SDavid Rientjes 		.rebind = mpol_rebind_preferred,
40337012946SDavid Rientjes 	},
40437012946SDavid Rientjes 	[MPOL_BIND] = {
405be897d48SFeng Tang 		.create = mpol_new_nodemask,
40637012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
40737012946SDavid Rientjes 	},
4087858d7bcSFeng Tang 	[MPOL_LOCAL] = {
4097858d7bcSFeng Tang 		.rebind = mpol_rebind_default,
4107858d7bcSFeng Tang 	},
411b27abaccSDave Hansen 	[MPOL_PREFERRED_MANY] = {
412be897d48SFeng Tang 		.create = mpol_new_nodemask,
413b27abaccSDave Hansen 		.rebind = mpol_rebind_preferred,
414b27abaccSDave Hansen 	},
41537012946SDavid Rientjes };
41637012946SDavid Rientjes 
417a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist,
418fc301289SChristoph Lameter 				unsigned long flags);
4191a75a6c8SChristoph Lameter 
4206f4576e3SNaoya Horiguchi struct queue_pages {
4216f4576e3SNaoya Horiguchi 	struct list_head *pagelist;
4226f4576e3SNaoya Horiguchi 	unsigned long flags;
4236f4576e3SNaoya Horiguchi 	nodemask_t *nmask;
424f18da660SLi Xinhai 	unsigned long start;
425f18da660SLi Xinhai 	unsigned long end;
426f18da660SLi Xinhai 	struct vm_area_struct *first;
4276f4576e3SNaoya Horiguchi };
4286f4576e3SNaoya Horiguchi 
42998094945SNaoya Horiguchi /*
43088aaa2a1SNaoya Horiguchi  * Check if the page's nid is in qp->nmask.
43188aaa2a1SNaoya Horiguchi  *
43288aaa2a1SNaoya Horiguchi  * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
43388aaa2a1SNaoya Horiguchi  * in the invert of qp->nmask.
43488aaa2a1SNaoya Horiguchi  */
43588aaa2a1SNaoya Horiguchi static inline bool queue_pages_required(struct page *page,
43688aaa2a1SNaoya Horiguchi 					struct queue_pages *qp)
43788aaa2a1SNaoya Horiguchi {
43888aaa2a1SNaoya Horiguchi 	int nid = page_to_nid(page);
43988aaa2a1SNaoya Horiguchi 	unsigned long flags = qp->flags;
44088aaa2a1SNaoya Horiguchi 
44188aaa2a1SNaoya Horiguchi 	return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
44288aaa2a1SNaoya Horiguchi }
44388aaa2a1SNaoya Horiguchi 
444a7f40cfeSYang Shi /*
445bc78b5edSMiaohe Lin  * queue_pages_pmd() has three possible return values:
446e5947d23SYang Shi  * 0 - pages are placed on the right node or queued successfully, or
447e5947d23SYang Shi  *     special page is met, i.e. huge zero page.
448d8835445SYang Shi  * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
449d8835445SYang Shi  *     specified.
450d8835445SYang Shi  * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
451d8835445SYang Shi  *        existing page was already on a node that does not follow the
452d8835445SYang Shi  *        policy.
453a7f40cfeSYang Shi  */
454c8633798SNaoya Horiguchi static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
455c8633798SNaoya Horiguchi 				unsigned long end, struct mm_walk *walk)
456959a7e13SJules Irenge 	__releases(ptl)
457c8633798SNaoya Horiguchi {
458c8633798SNaoya Horiguchi 	int ret = 0;
459c8633798SNaoya Horiguchi 	struct page *page;
460c8633798SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
461c8633798SNaoya Horiguchi 	unsigned long flags;
462c8633798SNaoya Horiguchi 
463c8633798SNaoya Horiguchi 	if (unlikely(is_pmd_migration_entry(*pmd))) {
464a7f40cfeSYang Shi 		ret = -EIO;
465c8633798SNaoya Horiguchi 		goto unlock;
466c8633798SNaoya Horiguchi 	}
467c8633798SNaoya Horiguchi 	page = pmd_page(*pmd);
468c8633798SNaoya Horiguchi 	if (is_huge_zero_page(page)) {
469e5947d23SYang Shi 		walk->action = ACTION_CONTINUE;
4706d97cf88SMiaohe Lin 		goto unlock;
471c8633798SNaoya Horiguchi 	}
472d8835445SYang Shi 	if (!queue_pages_required(page, qp))
473c8633798SNaoya Horiguchi 		goto unlock;
474c8633798SNaoya Horiguchi 
475c8633798SNaoya Horiguchi 	flags = qp->flags;
476c8633798SNaoya Horiguchi 	/* go to thp migration */
477a7f40cfeSYang Shi 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
478a53190a4SYang Shi 		if (!vma_migratable(walk->vma) ||
479a53190a4SYang Shi 		    migrate_page_add(page, qp->pagelist, flags)) {
480d8835445SYang Shi 			ret = 1;
481a7f40cfeSYang Shi 			goto unlock;
482a7f40cfeSYang Shi 		}
483a7f40cfeSYang Shi 	} else
484a7f40cfeSYang Shi 		ret = -EIO;
485c8633798SNaoya Horiguchi unlock:
486c8633798SNaoya Horiguchi 	spin_unlock(ptl);
487c8633798SNaoya Horiguchi 	return ret;
488c8633798SNaoya Horiguchi }
489c8633798SNaoya Horiguchi 
49088aaa2a1SNaoya Horiguchi /*
49198094945SNaoya Horiguchi  * Scan through pages checking if pages follow certain conditions,
49298094945SNaoya Horiguchi  * and move them to the pagelist if they do.
493d8835445SYang Shi  *
494d8835445SYang Shi  * queue_pages_pte_range() has three possible return values:
495e5947d23SYang Shi  * 0 - pages are placed on the right node or queued successfully, or
496e5947d23SYang Shi  *     special page is met, i.e. zero page.
497d8835445SYang Shi  * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
498d8835445SYang Shi  *     specified.
499d8835445SYang Shi  * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
500d8835445SYang Shi  *        on a node that does not follow the policy.
50198094945SNaoya Horiguchi  */
5026f4576e3SNaoya Horiguchi static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
5036f4576e3SNaoya Horiguchi 			unsigned long end, struct mm_walk *walk)
5041da177e4SLinus Torvalds {
5056f4576e3SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
5066f4576e3SNaoya Horiguchi 	struct page *page;
5076f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
5086f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
509d8835445SYang Shi 	bool has_unmovable = false;
5103f088420SShijie Luo 	pte_t *pte, *mapped_pte;
511705e87c0SHugh Dickins 	spinlock_t *ptl;
512941150a3SHugh Dickins 
513c8633798SNaoya Horiguchi 	ptl = pmd_trans_huge_lock(pmd, vma);
514bc78b5edSMiaohe Lin 	if (ptl)
515bc78b5edSMiaohe Lin 		return queue_pages_pmd(pmd, ptl, addr, end, walk);
51691612e0dSHugh Dickins 
517337d9abfSNaoya Horiguchi 	if (pmd_trans_unstable(pmd))
518337d9abfSNaoya Horiguchi 		return 0;
51994723aafSMichal Hocko 
5203f088420SShijie Luo 	mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
5216f4576e3SNaoya Horiguchi 	for (; addr != end; pte++, addr += PAGE_SIZE) {
52291612e0dSHugh Dickins 		if (!pte_present(*pte))
52391612e0dSHugh Dickins 			continue;
5246aab341eSLinus Torvalds 		page = vm_normal_page(vma, addr, *pte);
5253218f871SAlex Sierra 		if (!page || is_zone_device_page(page))
52691612e0dSHugh Dickins 			continue;
527053837fcSNick Piggin 		/*
52862b61f61SHugh Dickins 		 * vm_normal_page() filters out zero pages, but there might
52962b61f61SHugh Dickins 		 * still be PageReserved pages to skip, perhaps in a VDSO.
530053837fcSNick Piggin 		 */
531b79bc0a0SHugh Dickins 		if (PageReserved(page))
532f4598c8bSChristoph Lameter 			continue;
53388aaa2a1SNaoya Horiguchi 		if (!queue_pages_required(page, qp))
53438e35860SChristoph Lameter 			continue;
535a7f40cfeSYang Shi 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
536d8835445SYang Shi 			/* MPOL_MF_STRICT must be specified if we get here */
537d8835445SYang Shi 			if (!vma_migratable(vma)) {
538d8835445SYang Shi 				has_unmovable = true;
539a7f40cfeSYang Shi 				break;
540d8835445SYang Shi 			}
541a53190a4SYang Shi 
542a53190a4SYang Shi 			/*
543a53190a4SYang Shi 			 * Do not abort immediately since there may be
544a53190a4SYang Shi 			 * temporary off LRU pages in the range.  Still
545a53190a4SYang Shi 			 * need migrate other LRU pages.
546a53190a4SYang Shi 			 */
547a53190a4SYang Shi 			if (migrate_page_add(page, qp->pagelist, flags))
548a53190a4SYang Shi 				has_unmovable = true;
549a7f40cfeSYang Shi 		} else
550a7f40cfeSYang Shi 			break;
5516f4576e3SNaoya Horiguchi 	}
5523f088420SShijie Luo 	pte_unmap_unlock(mapped_pte, ptl);
5536f4576e3SNaoya Horiguchi 	cond_resched();
554d8835445SYang Shi 
555d8835445SYang Shi 	if (has_unmovable)
556d8835445SYang Shi 		return 1;
557d8835445SYang Shi 
558a7f40cfeSYang Shi 	return addr != end ? -EIO : 0;
55991612e0dSHugh Dickins }
56091612e0dSHugh Dickins 
5616f4576e3SNaoya Horiguchi static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
5626f4576e3SNaoya Horiguchi 			       unsigned long addr, unsigned long end,
5636f4576e3SNaoya Horiguchi 			       struct mm_walk *walk)
564e2d8cf40SNaoya Horiguchi {
565dcf17635SLi Xinhai 	int ret = 0;
566e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE
5676f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
568dcf17635SLi Xinhai 	unsigned long flags = (qp->flags & MPOL_MF_VALID);
569e2d8cf40SNaoya Horiguchi 	struct page *page;
570cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
571d4c54919SNaoya Horiguchi 	pte_t entry;
572e2d8cf40SNaoya Horiguchi 
5736f4576e3SNaoya Horiguchi 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
5746f4576e3SNaoya Horiguchi 	entry = huge_ptep_get(pte);
575d4c54919SNaoya Horiguchi 	if (!pte_present(entry))
576d4c54919SNaoya Horiguchi 		goto unlock;
577d4c54919SNaoya Horiguchi 	page = pte_page(entry);
57888aaa2a1SNaoya Horiguchi 	if (!queue_pages_required(page, qp))
579e2d8cf40SNaoya Horiguchi 		goto unlock;
580dcf17635SLi Xinhai 
581dcf17635SLi Xinhai 	if (flags == MPOL_MF_STRICT) {
582dcf17635SLi Xinhai 		/*
583dcf17635SLi Xinhai 		 * STRICT alone means only detecting misplaced page and no
584dcf17635SLi Xinhai 		 * need to further check other vma.
585dcf17635SLi Xinhai 		 */
586dcf17635SLi Xinhai 		ret = -EIO;
587dcf17635SLi Xinhai 		goto unlock;
588dcf17635SLi Xinhai 	}
589dcf17635SLi Xinhai 
590dcf17635SLi Xinhai 	if (!vma_migratable(walk->vma)) {
591dcf17635SLi Xinhai 		/*
592dcf17635SLi Xinhai 		 * Must be STRICT with MOVE*, otherwise .test_walk() have
593dcf17635SLi Xinhai 		 * stopped walking current vma.
594dcf17635SLi Xinhai 		 * Detecting misplaced page but allow migrating pages which
595dcf17635SLi Xinhai 		 * have been queued.
596dcf17635SLi Xinhai 		 */
597dcf17635SLi Xinhai 		ret = 1;
598dcf17635SLi Xinhai 		goto unlock;
599dcf17635SLi Xinhai 	}
600dcf17635SLi Xinhai 
601e2d8cf40SNaoya Horiguchi 	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
602e2d8cf40SNaoya Horiguchi 	if (flags & (MPOL_MF_MOVE_ALL) ||
603dcf17635SLi Xinhai 	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) {
6047ce82f4cSMiaohe Lin 		if (isolate_hugetlb(page, qp->pagelist) &&
605dcf17635SLi Xinhai 			(flags & MPOL_MF_STRICT))
606dcf17635SLi Xinhai 			/*
607dcf17635SLi Xinhai 			 * Failed to isolate page but allow migrating pages
608dcf17635SLi Xinhai 			 * which have been queued.
609dcf17635SLi Xinhai 			 */
610dcf17635SLi Xinhai 			ret = 1;
611dcf17635SLi Xinhai 	}
612e2d8cf40SNaoya Horiguchi unlock:
613cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
614e2d8cf40SNaoya Horiguchi #else
615e2d8cf40SNaoya Horiguchi 	BUG();
616e2d8cf40SNaoya Horiguchi #endif
617dcf17635SLi Xinhai 	return ret;
6181da177e4SLinus Torvalds }
6191da177e4SLinus Torvalds 
6205877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING
621b24f53a0SLee Schermerhorn /*
6224b10e7d5SMel Gorman  * This is used to mark a range of virtual addresses to be inaccessible.
6234b10e7d5SMel Gorman  * These are later cleared by a NUMA hinting fault. Depending on these
6244b10e7d5SMel Gorman  * faults, pages may be migrated for better NUMA placement.
6254b10e7d5SMel Gorman  *
6264b10e7d5SMel Gorman  * This is assuming that NUMA faults are handled using PROT_NONE. If
6274b10e7d5SMel Gorman  * an architecture makes a different choice, it will need further
6284b10e7d5SMel Gorman  * changes to the core.
629b24f53a0SLee Schermerhorn  */
6304b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma,
6314b10e7d5SMel Gorman 			unsigned long addr, unsigned long end)
632b24f53a0SLee Schermerhorn {
6334a18419fSNadav Amit 	struct mmu_gather tlb;
634a79390f5SPeter Xu 	long nr_updated;
635b24f53a0SLee Schermerhorn 
6364a18419fSNadav Amit 	tlb_gather_mmu(&tlb, vma->vm_mm);
6374a18419fSNadav Amit 
6381ef488edSDavid Hildenbrand 	nr_updated = change_protection(&tlb, vma, addr, end, MM_CP_PROT_NUMA);
639*d1751118SPeter Xu 	if (nr_updated > 0)
64003c5a6e1SMel Gorman 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
641b24f53a0SLee Schermerhorn 
6424a18419fSNadav Amit 	tlb_finish_mmu(&tlb);
6434a18419fSNadav Amit 
6444b10e7d5SMel Gorman 	return nr_updated;
645b24f53a0SLee Schermerhorn }
646b24f53a0SLee Schermerhorn #else
647b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma,
648b24f53a0SLee Schermerhorn 			unsigned long addr, unsigned long end)
649b24f53a0SLee Schermerhorn {
650b24f53a0SLee Schermerhorn 	return 0;
651b24f53a0SLee Schermerhorn }
6525877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */
653b24f53a0SLee Schermerhorn 
6546f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end,
6556f4576e3SNaoya Horiguchi 				struct mm_walk *walk)
6561da177e4SLinus Torvalds {
65766850be5SLiam R. Howlett 	struct vm_area_struct *next, *vma = walk->vma;
6586f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
6595b952b3cSAndi Kleen 	unsigned long endvma = vma->vm_end;
6606f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
661dc9aa5b9SChristoph Lameter 
662a18b3ac2SLi Xinhai 	/* range check first */
663ce33135cSMiaohe Lin 	VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma);
664f18da660SLi Xinhai 
665f18da660SLi Xinhai 	if (!qp->first) {
666f18da660SLi Xinhai 		qp->first = vma;
667f18da660SLi Xinhai 		if (!(flags & MPOL_MF_DISCONTIG_OK) &&
668f18da660SLi Xinhai 			(qp->start < vma->vm_start))
669f18da660SLi Xinhai 			/* hole at head side of range */
670a18b3ac2SLi Xinhai 			return -EFAULT;
671a18b3ac2SLi Xinhai 	}
67266850be5SLiam R. Howlett 	next = find_vma(vma->vm_mm, vma->vm_end);
673f18da660SLi Xinhai 	if (!(flags & MPOL_MF_DISCONTIG_OK) &&
674f18da660SLi Xinhai 		((vma->vm_end < qp->end) &&
67566850be5SLiam R. Howlett 		(!next || vma->vm_end < next->vm_start)))
676f18da660SLi Xinhai 		/* hole at middle or tail of range */
677f18da660SLi Xinhai 		return -EFAULT;
678a18b3ac2SLi Xinhai 
679a7f40cfeSYang Shi 	/*
680a7f40cfeSYang Shi 	 * Need check MPOL_MF_STRICT to return -EIO if possible
681a7f40cfeSYang Shi 	 * regardless of vma_migratable
682a7f40cfeSYang Shi 	 */
683a7f40cfeSYang Shi 	if (!vma_migratable(vma) &&
684a7f40cfeSYang Shi 	    !(flags & MPOL_MF_STRICT))
68548684a65SNaoya Horiguchi 		return 1;
68648684a65SNaoya Horiguchi 
6875b952b3cSAndi Kleen 	if (endvma > end)
6885b952b3cSAndi Kleen 		endvma = end;
689b24f53a0SLee Schermerhorn 
690b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY) {
6912c0346a3SMel Gorman 		/* Similar to task_numa_work, skip inaccessible VMAs */
6923122e80eSAnshuman Khandual 		if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
6934355c018SLiang Chen 			!(vma->vm_flags & VM_MIXEDMAP))
694b24f53a0SLee Schermerhorn 			change_prot_numa(vma, start, endvma);
6956f4576e3SNaoya Horiguchi 		return 1;
696b24f53a0SLee Schermerhorn 	}
697b24f53a0SLee Schermerhorn 
6986f4576e3SNaoya Horiguchi 	/* queue pages from current vma */
699a7f40cfeSYang Shi 	if (flags & MPOL_MF_VALID)
7006f4576e3SNaoya Horiguchi 		return 0;
7016f4576e3SNaoya Horiguchi 	return 1;
7026f4576e3SNaoya Horiguchi }
703b24f53a0SLee Schermerhorn 
7047b86ac33SChristoph Hellwig static const struct mm_walk_ops queue_pages_walk_ops = {
7057b86ac33SChristoph Hellwig 	.hugetlb_entry		= queue_pages_hugetlb,
7067b86ac33SChristoph Hellwig 	.pmd_entry		= queue_pages_pte_range,
7077b86ac33SChristoph Hellwig 	.test_walk		= queue_pages_test_walk,
7087b86ac33SChristoph Hellwig };
7097b86ac33SChristoph Hellwig 
7106f4576e3SNaoya Horiguchi /*
7116f4576e3SNaoya Horiguchi  * Walk through page tables and collect pages to be migrated.
7126f4576e3SNaoya Horiguchi  *
7136f4576e3SNaoya Horiguchi  * If pages found in a given range are on a set of nodes (determined by
7146f4576e3SNaoya Horiguchi  * @nodes and @flags,) it's isolated and queued to the pagelist which is
715d8835445SYang Shi  * passed via @private.
716d8835445SYang Shi  *
717d8835445SYang Shi  * queue_pages_range() has three possible return values:
718d8835445SYang Shi  * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
719d8835445SYang Shi  *     specified.
720d8835445SYang Shi  * 0 - queue pages successfully or no misplaced page.
721a85dfc30SYang Shi  * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
722a85dfc30SYang Shi  *         memory range specified by nodemask and maxnode points outside
723a85dfc30SYang Shi  *         your accessible address space (-EFAULT)
7246f4576e3SNaoya Horiguchi  */
7256f4576e3SNaoya Horiguchi static int
7266f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
7276f4576e3SNaoya Horiguchi 		nodemask_t *nodes, unsigned long flags,
7286f4576e3SNaoya Horiguchi 		struct list_head *pagelist)
7296f4576e3SNaoya Horiguchi {
730f18da660SLi Xinhai 	int err;
7316f4576e3SNaoya Horiguchi 	struct queue_pages qp = {
7326f4576e3SNaoya Horiguchi 		.pagelist = pagelist,
7336f4576e3SNaoya Horiguchi 		.flags = flags,
7346f4576e3SNaoya Horiguchi 		.nmask = nodes,
735f18da660SLi Xinhai 		.start = start,
736f18da660SLi Xinhai 		.end = end,
737f18da660SLi Xinhai 		.first = NULL,
7386f4576e3SNaoya Horiguchi 	};
7396f4576e3SNaoya Horiguchi 
740f18da660SLi Xinhai 	err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
741f18da660SLi Xinhai 
742f18da660SLi Xinhai 	if (!qp.first)
743f18da660SLi Xinhai 		/* whole range in hole */
744f18da660SLi Xinhai 		err = -EFAULT;
745f18da660SLi Xinhai 
746f18da660SLi Xinhai 	return err;
7471da177e4SLinus Torvalds }
7481da177e4SLinus Torvalds 
749869833f2SKOSAKI Motohiro /*
750869833f2SKOSAKI Motohiro  * Apply policy to a single VMA
751c1e8d7c6SMichel Lespinasse  * This must be called with the mmap_lock held for writing.
752869833f2SKOSAKI Motohiro  */
753869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma,
754869833f2SKOSAKI Motohiro 						struct mempolicy *pol)
7558d34694cSKOSAKI Motohiro {
756869833f2SKOSAKI Motohiro 	int err;
757869833f2SKOSAKI Motohiro 	struct mempolicy *old;
758869833f2SKOSAKI Motohiro 	struct mempolicy *new;
7598d34694cSKOSAKI Motohiro 
7608d34694cSKOSAKI Motohiro 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
7618d34694cSKOSAKI Motohiro 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
7628d34694cSKOSAKI Motohiro 		 vma->vm_ops, vma->vm_file,
7638d34694cSKOSAKI Motohiro 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
7648d34694cSKOSAKI Motohiro 
765869833f2SKOSAKI Motohiro 	new = mpol_dup(pol);
766869833f2SKOSAKI Motohiro 	if (IS_ERR(new))
767869833f2SKOSAKI Motohiro 		return PTR_ERR(new);
768869833f2SKOSAKI Motohiro 
769869833f2SKOSAKI Motohiro 	if (vma->vm_ops && vma->vm_ops->set_policy) {
7708d34694cSKOSAKI Motohiro 		err = vma->vm_ops->set_policy(vma, new);
771869833f2SKOSAKI Motohiro 		if (err)
772869833f2SKOSAKI Motohiro 			goto err_out;
7738d34694cSKOSAKI Motohiro 	}
774869833f2SKOSAKI Motohiro 
775869833f2SKOSAKI Motohiro 	old = vma->vm_policy;
776c1e8d7c6SMichel Lespinasse 	vma->vm_policy = new; /* protected by mmap_lock */
777869833f2SKOSAKI Motohiro 	mpol_put(old);
778869833f2SKOSAKI Motohiro 
779869833f2SKOSAKI Motohiro 	return 0;
780869833f2SKOSAKI Motohiro  err_out:
781869833f2SKOSAKI Motohiro 	mpol_put(new);
7828d34694cSKOSAKI Motohiro 	return err;
7838d34694cSKOSAKI Motohiro }
7848d34694cSKOSAKI Motohiro 
7851da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */
7869d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start,
7879d8cebd4SKOSAKI Motohiro 		       unsigned long end, struct mempolicy *new_pol)
7881da177e4SLinus Torvalds {
7897329e3ebSLiam Howlett 	MA_STATE(mas, &mm->mm_mt, start, start);
7909d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *prev;
7919d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *vma;
7929d8cebd4SKOSAKI Motohiro 	int err = 0;
793e26a5114SKOSAKI Motohiro 	pgoff_t pgoff;
7941da177e4SLinus Torvalds 
7957329e3ebSLiam Howlett 	prev = mas_prev(&mas, 0);
7967329e3ebSLiam Howlett 	if (unlikely(!prev))
7977329e3ebSLiam Howlett 		mas_set(&mas, start);
7987329e3ebSLiam Howlett 
7997329e3ebSLiam Howlett 	vma = mas_find(&mas, end - 1);
8007329e3ebSLiam Howlett 	if (WARN_ON(!vma))
8017329e3ebSLiam Howlett 		return 0;
8027329e3ebSLiam Howlett 
8037329e3ebSLiam Howlett 	if (start > vma->vm_start)
8047329e3ebSLiam Howlett 		prev = vma;
8059d8cebd4SKOSAKI Motohiro 
80666850be5SLiam R. Howlett 	for (; vma; vma = mas_next(&mas, end - 1)) {
80766850be5SLiam R. Howlett 		unsigned long vmstart = max(start, vma->vm_start);
80866850be5SLiam R. Howlett 		unsigned long vmend = min(end, vma->vm_end);
8099d8cebd4SKOSAKI Motohiro 
810e26a5114SKOSAKI Motohiro 		if (mpol_equal(vma_policy(vma), new_pol))
81166850be5SLiam R. Howlett 			goto next;
812e26a5114SKOSAKI Motohiro 
813e26a5114SKOSAKI Motohiro 		pgoff = vma->vm_pgoff +
814e26a5114SKOSAKI Motohiro 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
8159d8cebd4SKOSAKI Motohiro 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
816e26a5114SKOSAKI Motohiro 				 vma->anon_vma, vma->vm_file, pgoff,
8179a10064fSColin Cross 				 new_pol, vma->vm_userfaultfd_ctx,
8185c26f6acSSuren Baghdasaryan 				 anon_vma_name(vma));
8199d8cebd4SKOSAKI Motohiro 		if (prev) {
82066850be5SLiam R. Howlett 			/* vma_merge() invalidated the mas */
82166850be5SLiam R. Howlett 			mas_pause(&mas);
8229d8cebd4SKOSAKI Motohiro 			vma = prev;
8233964acd0SOleg Nesterov 			goto replace;
8241da177e4SLinus Torvalds 		}
8259d8cebd4SKOSAKI Motohiro 		if (vma->vm_start != vmstart) {
8269d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
8279d8cebd4SKOSAKI Motohiro 			if (err)
8289d8cebd4SKOSAKI Motohiro 				goto out;
82966850be5SLiam R. Howlett 			/* split_vma() invalidated the mas */
83066850be5SLiam R. Howlett 			mas_pause(&mas);
8319d8cebd4SKOSAKI Motohiro 		}
8329d8cebd4SKOSAKI Motohiro 		if (vma->vm_end != vmend) {
8339d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmend, 0);
8349d8cebd4SKOSAKI Motohiro 			if (err)
8359d8cebd4SKOSAKI Motohiro 				goto out;
83666850be5SLiam R. Howlett 			/* split_vma() invalidated the mas */
83766850be5SLiam R. Howlett 			mas_pause(&mas);
8389d8cebd4SKOSAKI Motohiro 		}
8393964acd0SOleg Nesterov replace:
840869833f2SKOSAKI Motohiro 		err = vma_replace_policy(vma, new_pol);
8419d8cebd4SKOSAKI Motohiro 		if (err)
8429d8cebd4SKOSAKI Motohiro 			goto out;
84366850be5SLiam R. Howlett next:
84466850be5SLiam R. Howlett 		prev = vma;
8459d8cebd4SKOSAKI Motohiro 	}
8469d8cebd4SKOSAKI Motohiro 
8479d8cebd4SKOSAKI Motohiro out:
8481da177e4SLinus Torvalds 	return err;
8491da177e4SLinus Torvalds }
8501da177e4SLinus Torvalds 
8511da177e4SLinus Torvalds /* Set the process memory policy */
852028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags,
853028fec41SDavid Rientjes 			     nodemask_t *nodes)
8541da177e4SLinus Torvalds {
85558568d2aSMiao Xie 	struct mempolicy *new, *old;
8564bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH(scratch);
85758568d2aSMiao Xie 	int ret;
8581da177e4SLinus Torvalds 
8594bfc4495SKAMEZAWA Hiroyuki 	if (!scratch)
8604bfc4495SKAMEZAWA Hiroyuki 		return -ENOMEM;
861f4e53d91SLee Schermerhorn 
8624bfc4495SKAMEZAWA Hiroyuki 	new = mpol_new(mode, flags, nodes);
8634bfc4495SKAMEZAWA Hiroyuki 	if (IS_ERR(new)) {
8644bfc4495SKAMEZAWA Hiroyuki 		ret = PTR_ERR(new);
8654bfc4495SKAMEZAWA Hiroyuki 		goto out;
8664bfc4495SKAMEZAWA Hiroyuki 	}
8672c7c3a7dSOleg Nesterov 
86812c1dc8eSAbel Wu 	task_lock(current);
8694bfc4495SKAMEZAWA Hiroyuki 	ret = mpol_set_nodemask(new, nodes, scratch);
87058568d2aSMiao Xie 	if (ret) {
87112c1dc8eSAbel Wu 		task_unlock(current);
87258568d2aSMiao Xie 		mpol_put(new);
8734bfc4495SKAMEZAWA Hiroyuki 		goto out;
87458568d2aSMiao Xie 	}
87512c1dc8eSAbel Wu 
87658568d2aSMiao Xie 	old = current->mempolicy;
8771da177e4SLinus Torvalds 	current->mempolicy = new;
87845816682SVlastimil Babka 	if (new && new->mode == MPOL_INTERLEAVE)
87945816682SVlastimil Babka 		current->il_prev = MAX_NUMNODES-1;
88058568d2aSMiao Xie 	task_unlock(current);
88158568d2aSMiao Xie 	mpol_put(old);
8824bfc4495SKAMEZAWA Hiroyuki 	ret = 0;
8834bfc4495SKAMEZAWA Hiroyuki out:
8844bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH_FREE(scratch);
8854bfc4495SKAMEZAWA Hiroyuki 	return ret;
8861da177e4SLinus Torvalds }
8871da177e4SLinus Torvalds 
888bea904d5SLee Schermerhorn /*
889bea904d5SLee Schermerhorn  * Return nodemask for policy for get_mempolicy() query
89058568d2aSMiao Xie  *
89158568d2aSMiao Xie  * Called with task's alloc_lock held
892bea904d5SLee Schermerhorn  */
893bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
8941da177e4SLinus Torvalds {
895dfcd3c0dSAndi Kleen 	nodes_clear(*nodes);
896bea904d5SLee Schermerhorn 	if (p == &default_policy)
897bea904d5SLee Schermerhorn 		return;
898bea904d5SLee Schermerhorn 
89945c4745aSLee Schermerhorn 	switch (p->mode) {
90019770b32SMel Gorman 	case MPOL_BIND:
9011da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
902269fbe72SBen Widawsky 	case MPOL_PREFERRED:
903b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
904269fbe72SBen Widawsky 		*nodes = p->nodes;
9051da177e4SLinus Torvalds 		break;
9067858d7bcSFeng Tang 	case MPOL_LOCAL:
9077858d7bcSFeng Tang 		/* return empty node mask for local allocation */
9087858d7bcSFeng Tang 		break;
9091da177e4SLinus Torvalds 	default:
9101da177e4SLinus Torvalds 		BUG();
9111da177e4SLinus Torvalds 	}
9121da177e4SLinus Torvalds }
9131da177e4SLinus Torvalds 
9143b9aadf7SAndrea Arcangeli static int lookup_node(struct mm_struct *mm, unsigned long addr)
9151da177e4SLinus Torvalds {
916ba841078SPeter Xu 	struct page *p = NULL;
917f728b9c4SJohn Hubbard 	int ret;
9181da177e4SLinus Torvalds 
919f728b9c4SJohn Hubbard 	ret = get_user_pages_fast(addr & PAGE_MASK, 1, 0, &p);
920f728b9c4SJohn Hubbard 	if (ret > 0) {
921f728b9c4SJohn Hubbard 		ret = page_to_nid(p);
9221da177e4SLinus Torvalds 		put_page(p);
9231da177e4SLinus Torvalds 	}
924f728b9c4SJohn Hubbard 	return ret;
9251da177e4SLinus Torvalds }
9261da177e4SLinus Torvalds 
9271da177e4SLinus Torvalds /* Retrieve NUMA policy */
928dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask,
9291da177e4SLinus Torvalds 			     unsigned long addr, unsigned long flags)
9301da177e4SLinus Torvalds {
9318bccd85fSChristoph Lameter 	int err;
9321da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
9331da177e4SLinus Torvalds 	struct vm_area_struct *vma = NULL;
9343b9aadf7SAndrea Arcangeli 	struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
9351da177e4SLinus Torvalds 
936754af6f5SLee Schermerhorn 	if (flags &
937754af6f5SLee Schermerhorn 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
9381da177e4SLinus Torvalds 		return -EINVAL;
939754af6f5SLee Schermerhorn 
940754af6f5SLee Schermerhorn 	if (flags & MPOL_F_MEMS_ALLOWED) {
941754af6f5SLee Schermerhorn 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
942754af6f5SLee Schermerhorn 			return -EINVAL;
943754af6f5SLee Schermerhorn 		*policy = 0;	/* just so it's initialized */
94458568d2aSMiao Xie 		task_lock(current);
945754af6f5SLee Schermerhorn 		*nmask  = cpuset_current_mems_allowed;
94658568d2aSMiao Xie 		task_unlock(current);
947754af6f5SLee Schermerhorn 		return 0;
948754af6f5SLee Schermerhorn 	}
949754af6f5SLee Schermerhorn 
9501da177e4SLinus Torvalds 	if (flags & MPOL_F_ADDR) {
951bea904d5SLee Schermerhorn 		/*
952bea904d5SLee Schermerhorn 		 * Do NOT fall back to task policy if the
953bea904d5SLee Schermerhorn 		 * vma/shared policy at addr is NULL.  We
954bea904d5SLee Schermerhorn 		 * want to return MPOL_DEFAULT in this case.
955bea904d5SLee Schermerhorn 		 */
956d8ed45c5SMichel Lespinasse 		mmap_read_lock(mm);
95733e3575cSLiam Howlett 		vma = vma_lookup(mm, addr);
9581da177e4SLinus Torvalds 		if (!vma) {
959d8ed45c5SMichel Lespinasse 			mmap_read_unlock(mm);
9601da177e4SLinus Torvalds 			return -EFAULT;
9611da177e4SLinus Torvalds 		}
9621da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
9631da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
9641da177e4SLinus Torvalds 		else
9651da177e4SLinus Torvalds 			pol = vma->vm_policy;
9661da177e4SLinus Torvalds 	} else if (addr)
9671da177e4SLinus Torvalds 		return -EINVAL;
9681da177e4SLinus Torvalds 
9691da177e4SLinus Torvalds 	if (!pol)
970bea904d5SLee Schermerhorn 		pol = &default_policy;	/* indicates default behavior */
9711da177e4SLinus Torvalds 
9721da177e4SLinus Torvalds 	if (flags & MPOL_F_NODE) {
9731da177e4SLinus Torvalds 		if (flags & MPOL_F_ADDR) {
9743b9aadf7SAndrea Arcangeli 			/*
975f728b9c4SJohn Hubbard 			 * Take a refcount on the mpol, because we are about to
976f728b9c4SJohn Hubbard 			 * drop the mmap_lock, after which only "pol" remains
977f728b9c4SJohn Hubbard 			 * valid, "vma" is stale.
9783b9aadf7SAndrea Arcangeli 			 */
9793b9aadf7SAndrea Arcangeli 			pol_refcount = pol;
9803b9aadf7SAndrea Arcangeli 			vma = NULL;
9813b9aadf7SAndrea Arcangeli 			mpol_get(pol);
982f728b9c4SJohn Hubbard 			mmap_read_unlock(mm);
9833b9aadf7SAndrea Arcangeli 			err = lookup_node(mm, addr);
9841da177e4SLinus Torvalds 			if (err < 0)
9851da177e4SLinus Torvalds 				goto out;
9868bccd85fSChristoph Lameter 			*policy = err;
9871da177e4SLinus Torvalds 		} else if (pol == current->mempolicy &&
98845c4745aSLee Schermerhorn 				pol->mode == MPOL_INTERLEAVE) {
989269fbe72SBen Widawsky 			*policy = next_node_in(current->il_prev, pol->nodes);
9901da177e4SLinus Torvalds 		} else {
9911da177e4SLinus Torvalds 			err = -EINVAL;
9921da177e4SLinus Torvalds 			goto out;
9931da177e4SLinus Torvalds 		}
994bea904d5SLee Schermerhorn 	} else {
995bea904d5SLee Schermerhorn 		*policy = pol == &default_policy ? MPOL_DEFAULT :
996bea904d5SLee Schermerhorn 						pol->mode;
997d79df630SDavid Rientjes 		/*
998d79df630SDavid Rientjes 		 * Internal mempolicy flags must be masked off before exposing
999d79df630SDavid Rientjes 		 * the policy to userspace.
1000d79df630SDavid Rientjes 		 */
1001d79df630SDavid Rientjes 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
1002bea904d5SLee Schermerhorn 	}
10031da177e4SLinus Torvalds 
10041da177e4SLinus Torvalds 	err = 0;
100558568d2aSMiao Xie 	if (nmask) {
1006c6b6ef8bSLee Schermerhorn 		if (mpol_store_user_nodemask(pol)) {
1007c6b6ef8bSLee Schermerhorn 			*nmask = pol->w.user_nodemask;
1008c6b6ef8bSLee Schermerhorn 		} else {
100958568d2aSMiao Xie 			task_lock(current);
1010bea904d5SLee Schermerhorn 			get_policy_nodemask(pol, nmask);
101158568d2aSMiao Xie 			task_unlock(current);
101258568d2aSMiao Xie 		}
1013c6b6ef8bSLee Schermerhorn 	}
10141da177e4SLinus Torvalds 
10151da177e4SLinus Torvalds  out:
101652cd3b07SLee Schermerhorn 	mpol_cond_put(pol);
10171da177e4SLinus Torvalds 	if (vma)
1018d8ed45c5SMichel Lespinasse 		mmap_read_unlock(mm);
10193b9aadf7SAndrea Arcangeli 	if (pol_refcount)
10203b9aadf7SAndrea Arcangeli 		mpol_put(pol_refcount);
10211da177e4SLinus Torvalds 	return err;
10221da177e4SLinus Torvalds }
10231da177e4SLinus Torvalds 
1024b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION
10258bccd85fSChristoph Lameter /*
1026c8633798SNaoya Horiguchi  * page migration, thp tail pages can be passed.
10276ce3c4c0SChristoph Lameter  */
1028a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist,
1029fc301289SChristoph Lameter 				unsigned long flags)
10306ce3c4c0SChristoph Lameter {
1031c8633798SNaoya Horiguchi 	struct page *head = compound_head(page);
10326ce3c4c0SChristoph Lameter 	/*
1033fc301289SChristoph Lameter 	 * Avoid migrating a page that is shared with others.
10346ce3c4c0SChristoph Lameter 	 */
1035c8633798SNaoya Horiguchi 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
1036c8633798SNaoya Horiguchi 		if (!isolate_lru_page(head)) {
1037c8633798SNaoya Horiguchi 			list_add_tail(&head->lru, pagelist);
1038c8633798SNaoya Horiguchi 			mod_node_page_state(page_pgdat(head),
10399de4f22aSHuang Ying 				NR_ISOLATED_ANON + page_is_file_lru(head),
10406c357848SMatthew Wilcox (Oracle) 				thp_nr_pages(head));
1041a53190a4SYang Shi 		} else if (flags & MPOL_MF_STRICT) {
1042a53190a4SYang Shi 			/*
1043a53190a4SYang Shi 			 * Non-movable page may reach here.  And, there may be
1044a53190a4SYang Shi 			 * temporary off LRU pages or non-LRU movable pages.
1045a53190a4SYang Shi 			 * Treat them as unmovable pages since they can't be
1046a53190a4SYang Shi 			 * isolated, so they can't be moved at the moment.  It
1047a53190a4SYang Shi 			 * should return -EIO for this case too.
1048a53190a4SYang Shi 			 */
1049a53190a4SYang Shi 			return -EIO;
105062695a84SNick Piggin 		}
105162695a84SNick Piggin 	}
1052a53190a4SYang Shi 
1053a53190a4SYang Shi 	return 0;
10546ce3c4c0SChristoph Lameter }
10556ce3c4c0SChristoph Lameter 
10566ce3c4c0SChristoph Lameter /*
10577e2ab150SChristoph Lameter  * Migrate pages from one node to a target node.
10587e2ab150SChristoph Lameter  * Returns error or the number of pages not migrated.
10597e2ab150SChristoph Lameter  */
1060dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1061dbcb0f19SAdrian Bunk 			   int flags)
10627e2ab150SChristoph Lameter {
10637e2ab150SChristoph Lameter 	nodemask_t nmask;
106466850be5SLiam R. Howlett 	struct vm_area_struct *vma;
10657e2ab150SChristoph Lameter 	LIST_HEAD(pagelist);
10667e2ab150SChristoph Lameter 	int err = 0;
1067a0976311SJoonsoo Kim 	struct migration_target_control mtc = {
1068a0976311SJoonsoo Kim 		.nid = dest,
1069a0976311SJoonsoo Kim 		.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1070a0976311SJoonsoo Kim 	};
10717e2ab150SChristoph Lameter 
10727e2ab150SChristoph Lameter 	nodes_clear(nmask);
10737e2ab150SChristoph Lameter 	node_set(source, nmask);
10747e2ab150SChristoph Lameter 
107508270807SMinchan Kim 	/*
107608270807SMinchan Kim 	 * This does not "check" the range but isolates all pages that
107708270807SMinchan Kim 	 * need migration.  Between passing in the full user address
107808270807SMinchan Kim 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
107908270807SMinchan Kim 	 */
108066850be5SLiam R. Howlett 	vma = find_vma(mm, 0);
108108270807SMinchan Kim 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
108266850be5SLiam R. Howlett 	queue_pages_range(mm, vma->vm_start, mm->task_size, &nmask,
10837e2ab150SChristoph Lameter 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
10847e2ab150SChristoph Lameter 
1085cf608ac1SMinchan Kim 	if (!list_empty(&pagelist)) {
1086a0976311SJoonsoo Kim 		err = migrate_pages(&pagelist, alloc_migration_target, NULL,
10875ac95884SYang Shi 				(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
1088cf608ac1SMinchan Kim 		if (err)
1089e2d8cf40SNaoya Horiguchi 			putback_movable_pages(&pagelist);
1090cf608ac1SMinchan Kim 	}
109195a402c3SChristoph Lameter 
10927e2ab150SChristoph Lameter 	return err;
10937e2ab150SChristoph Lameter }
10947e2ab150SChristoph Lameter 
10957e2ab150SChristoph Lameter /*
10967e2ab150SChristoph Lameter  * Move pages between the two nodesets so as to preserve the physical
10977e2ab150SChristoph Lameter  * layout as much as possible.
109839743889SChristoph Lameter  *
109939743889SChristoph Lameter  * Returns the number of page that could not be moved.
110039743889SChristoph Lameter  */
11010ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
11020ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
110339743889SChristoph Lameter {
11047e2ab150SChristoph Lameter 	int busy = 0;
1105f555befdSJan Stancek 	int err = 0;
11067e2ab150SChristoph Lameter 	nodemask_t tmp;
110739743889SChristoph Lameter 
1108361a2a22SMinchan Kim 	lru_cache_disable();
11090aedadf9SChristoph Lameter 
1110d8ed45c5SMichel Lespinasse 	mmap_read_lock(mm);
1111d4984711SChristoph Lameter 
11127e2ab150SChristoph Lameter 	/*
11137e2ab150SChristoph Lameter 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
11147e2ab150SChristoph Lameter 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
11157e2ab150SChristoph Lameter 	 * bit in 'tmp', and return that <source, dest> pair for migration.
11167e2ab150SChristoph Lameter 	 * The pair of nodemasks 'to' and 'from' define the map.
11177e2ab150SChristoph Lameter 	 *
11187e2ab150SChristoph Lameter 	 * If no pair of bits is found that way, fallback to picking some
11197e2ab150SChristoph Lameter 	 * pair of 'source' and 'dest' bits that are not the same.  If the
11207e2ab150SChristoph Lameter 	 * 'source' and 'dest' bits are the same, this represents a node
11217e2ab150SChristoph Lameter 	 * that will be migrating to itself, so no pages need move.
11227e2ab150SChristoph Lameter 	 *
11237e2ab150SChristoph Lameter 	 * If no bits are left in 'tmp', or if all remaining bits left
11247e2ab150SChristoph Lameter 	 * in 'tmp' correspond to the same bit in 'to', return false
11257e2ab150SChristoph Lameter 	 * (nothing left to migrate).
11267e2ab150SChristoph Lameter 	 *
11277e2ab150SChristoph Lameter 	 * This lets us pick a pair of nodes to migrate between, such that
11287e2ab150SChristoph Lameter 	 * if possible the dest node is not already occupied by some other
11297e2ab150SChristoph Lameter 	 * source node, minimizing the risk of overloading the memory on a
11307e2ab150SChristoph Lameter 	 * node that would happen if we migrated incoming memory to a node
11317e2ab150SChristoph Lameter 	 * before migrating outgoing memory source that same node.
11327e2ab150SChristoph Lameter 	 *
11337e2ab150SChristoph Lameter 	 * A single scan of tmp is sufficient.  As we go, we remember the
11347e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
11357e2ab150SChristoph Lameter 	 * that not only moved, but what's better, moved to an empty slot
11367e2ab150SChristoph Lameter 	 * (d is not set in tmp), then we break out then, with that pair.
1137ae0e47f0SJustin P. Mattock 	 * Otherwise when we finish scanning from_tmp, we at least have the
11387e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved.  If we get all the way through
11397e2ab150SChristoph Lameter 	 * the scan of tmp without finding any node that moved, much less
11407e2ab150SChristoph Lameter 	 * moved to an empty node, then there is nothing left worth migrating.
11417e2ab150SChristoph Lameter 	 */
11427e2ab150SChristoph Lameter 
11430ce72d4fSAndrew Morton 	tmp = *from;
11447e2ab150SChristoph Lameter 	while (!nodes_empty(tmp)) {
11457e2ab150SChristoph Lameter 		int s, d;
1146b76ac7e7SJianguo Wu 		int source = NUMA_NO_NODE;
11477e2ab150SChristoph Lameter 		int dest = 0;
11487e2ab150SChristoph Lameter 
11497e2ab150SChristoph Lameter 		for_each_node_mask(s, tmp) {
11504a5b18ccSLarry Woodman 
11514a5b18ccSLarry Woodman 			/*
11524a5b18ccSLarry Woodman 			 * do_migrate_pages() tries to maintain the relative
11534a5b18ccSLarry Woodman 			 * node relationship of the pages established between
11544a5b18ccSLarry Woodman 			 * threads and memory areas.
11554a5b18ccSLarry Woodman                          *
11564a5b18ccSLarry Woodman 			 * However if the number of source nodes is not equal to
11574a5b18ccSLarry Woodman 			 * the number of destination nodes we can not preserve
11584a5b18ccSLarry Woodman 			 * this node relative relationship.  In that case, skip
11594a5b18ccSLarry Woodman 			 * copying memory from a node that is in the destination
11604a5b18ccSLarry Woodman 			 * mask.
11614a5b18ccSLarry Woodman 			 *
11624a5b18ccSLarry Woodman 			 * Example: [2,3,4] -> [3,4,5] moves everything.
11634a5b18ccSLarry Woodman 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
11644a5b18ccSLarry Woodman 			 */
11654a5b18ccSLarry Woodman 
11660ce72d4fSAndrew Morton 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
11670ce72d4fSAndrew Morton 						(node_isset(s, *to)))
11684a5b18ccSLarry Woodman 				continue;
11694a5b18ccSLarry Woodman 
11700ce72d4fSAndrew Morton 			d = node_remap(s, *from, *to);
11717e2ab150SChristoph Lameter 			if (s == d)
11727e2ab150SChristoph Lameter 				continue;
11737e2ab150SChristoph Lameter 
11747e2ab150SChristoph Lameter 			source = s;	/* Node moved. Memorize */
11757e2ab150SChristoph Lameter 			dest = d;
11767e2ab150SChristoph Lameter 
11777e2ab150SChristoph Lameter 			/* dest not in remaining from nodes? */
11787e2ab150SChristoph Lameter 			if (!node_isset(dest, tmp))
11797e2ab150SChristoph Lameter 				break;
11807e2ab150SChristoph Lameter 		}
1181b76ac7e7SJianguo Wu 		if (source == NUMA_NO_NODE)
11827e2ab150SChristoph Lameter 			break;
11837e2ab150SChristoph Lameter 
11847e2ab150SChristoph Lameter 		node_clear(source, tmp);
11857e2ab150SChristoph Lameter 		err = migrate_to_node(mm, source, dest, flags);
11867e2ab150SChristoph Lameter 		if (err > 0)
11877e2ab150SChristoph Lameter 			busy += err;
11887e2ab150SChristoph Lameter 		if (err < 0)
11897e2ab150SChristoph Lameter 			break;
119039743889SChristoph Lameter 	}
1191d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
1192d479960eSMinchan Kim 
1193361a2a22SMinchan Kim 	lru_cache_enable();
11947e2ab150SChristoph Lameter 	if (err < 0)
11957e2ab150SChristoph Lameter 		return err;
11967e2ab150SChristoph Lameter 	return busy;
1197b20a3503SChristoph Lameter 
119839743889SChristoph Lameter }
119939743889SChristoph Lameter 
12003ad33b24SLee Schermerhorn /*
12013ad33b24SLee Schermerhorn  * Allocate a new page for page migration based on vma policy.
1202d05f0cdcSHugh Dickins  * Start by assuming the page is mapped by the same vma as contains @start.
12033ad33b24SLee Schermerhorn  * Search forward from there, if not.  N.B., this assumes that the
12043ad33b24SLee Schermerhorn  * list of pages handed to migrate_pages()--which is how we get here--
12053ad33b24SLee Schermerhorn  * is in virtual address order.
12063ad33b24SLee Schermerhorn  */
1207666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start)
120895a402c3SChristoph Lameter {
1209ec4858e0SMatthew Wilcox (Oracle) 	struct folio *dst, *src = page_folio(page);
1210d05f0cdcSHugh Dickins 	struct vm_area_struct *vma;
12113f649ab7SKees Cook 	unsigned long address;
121266850be5SLiam R. Howlett 	VMA_ITERATOR(vmi, current->mm, start);
1213ec4858e0SMatthew Wilcox (Oracle) 	gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL;
121495a402c3SChristoph Lameter 
121566850be5SLiam R. Howlett 	for_each_vma(vmi, vma) {
12163ad33b24SLee Schermerhorn 		address = page_address_in_vma(page, vma);
12173ad33b24SLee Schermerhorn 		if (address != -EFAULT)
12183ad33b24SLee Schermerhorn 			break;
12193ad33b24SLee Schermerhorn 	}
12203ad33b24SLee Schermerhorn 
1221ec4858e0SMatthew Wilcox (Oracle) 	if (folio_test_hugetlb(src))
1222ec4858e0SMatthew Wilcox (Oracle) 		return alloc_huge_page_vma(page_hstate(&src->page),
1223389c8178SMichal Hocko 				vma, address);
1224c8633798SNaoya Horiguchi 
1225ec4858e0SMatthew Wilcox (Oracle) 	if (folio_test_large(src))
1226ec4858e0SMatthew Wilcox (Oracle) 		gfp = GFP_TRANSHUGE;
1227ec4858e0SMatthew Wilcox (Oracle) 
122811c731e8SWanpeng Li 	/*
1229ec4858e0SMatthew Wilcox (Oracle) 	 * if !vma, vma_alloc_folio() will use task or system default policy
123011c731e8SWanpeng Li 	 */
1231ec4858e0SMatthew Wilcox (Oracle) 	dst = vma_alloc_folio(gfp, folio_order(src), vma, address,
1232ec4858e0SMatthew Wilcox (Oracle) 			folio_test_large(src));
1233ec4858e0SMatthew Wilcox (Oracle) 	return &dst->page;
123495a402c3SChristoph Lameter }
1235b20a3503SChristoph Lameter #else
1236b20a3503SChristoph Lameter 
1237a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist,
1238b20a3503SChristoph Lameter 				unsigned long flags)
1239b20a3503SChristoph Lameter {
1240a53190a4SYang Shi 	return -EIO;
1241b20a3503SChristoph Lameter }
1242b20a3503SChristoph Lameter 
12430ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
12440ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
1245b20a3503SChristoph Lameter {
1246b20a3503SChristoph Lameter 	return -ENOSYS;
1247b20a3503SChristoph Lameter }
124895a402c3SChristoph Lameter 
1249666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start)
125095a402c3SChristoph Lameter {
125195a402c3SChristoph Lameter 	return NULL;
125295a402c3SChristoph Lameter }
1253b20a3503SChristoph Lameter #endif
1254b20a3503SChristoph Lameter 
1255dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len,
1256028fec41SDavid Rientjes 		     unsigned short mode, unsigned short mode_flags,
1257028fec41SDavid Rientjes 		     nodemask_t *nmask, unsigned long flags)
12586ce3c4c0SChristoph Lameter {
12596ce3c4c0SChristoph Lameter 	struct mm_struct *mm = current->mm;
12606ce3c4c0SChristoph Lameter 	struct mempolicy *new;
12616ce3c4c0SChristoph Lameter 	unsigned long end;
12626ce3c4c0SChristoph Lameter 	int err;
1263d8835445SYang Shi 	int ret;
12646ce3c4c0SChristoph Lameter 	LIST_HEAD(pagelist);
12656ce3c4c0SChristoph Lameter 
1266b24f53a0SLee Schermerhorn 	if (flags & ~(unsigned long)MPOL_MF_VALID)
12676ce3c4c0SChristoph Lameter 		return -EINVAL;
126874c00241SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
12696ce3c4c0SChristoph Lameter 		return -EPERM;
12706ce3c4c0SChristoph Lameter 
12716ce3c4c0SChristoph Lameter 	if (start & ~PAGE_MASK)
12726ce3c4c0SChristoph Lameter 		return -EINVAL;
12736ce3c4c0SChristoph Lameter 
12746ce3c4c0SChristoph Lameter 	if (mode == MPOL_DEFAULT)
12756ce3c4c0SChristoph Lameter 		flags &= ~MPOL_MF_STRICT;
12766ce3c4c0SChristoph Lameter 
1277aaa31e05Sze zuo 	len = PAGE_ALIGN(len);
12786ce3c4c0SChristoph Lameter 	end = start + len;
12796ce3c4c0SChristoph Lameter 
12806ce3c4c0SChristoph Lameter 	if (end < start)
12816ce3c4c0SChristoph Lameter 		return -EINVAL;
12826ce3c4c0SChristoph Lameter 	if (end == start)
12836ce3c4c0SChristoph Lameter 		return 0;
12846ce3c4c0SChristoph Lameter 
1285028fec41SDavid Rientjes 	new = mpol_new(mode, mode_flags, nmask);
12866ce3c4c0SChristoph Lameter 	if (IS_ERR(new))
12876ce3c4c0SChristoph Lameter 		return PTR_ERR(new);
12886ce3c4c0SChristoph Lameter 
1289b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY)
1290b24f53a0SLee Schermerhorn 		new->flags |= MPOL_F_MOF;
1291b24f53a0SLee Schermerhorn 
12926ce3c4c0SChristoph Lameter 	/*
12936ce3c4c0SChristoph Lameter 	 * If we are using the default policy then operation
12946ce3c4c0SChristoph Lameter 	 * on discontinuous address spaces is okay after all
12956ce3c4c0SChristoph Lameter 	 */
12966ce3c4c0SChristoph Lameter 	if (!new)
12976ce3c4c0SChristoph Lameter 		flags |= MPOL_MF_DISCONTIG_OK;
12986ce3c4c0SChristoph Lameter 
1299028fec41SDavid Rientjes 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1300028fec41SDavid Rientjes 		 start, start + len, mode, mode_flags,
130100ef2d2fSDavid Rientjes 		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
13026ce3c4c0SChristoph Lameter 
13030aedadf9SChristoph Lameter 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
13040aedadf9SChristoph Lameter 
1305361a2a22SMinchan Kim 		lru_cache_disable();
13060aedadf9SChristoph Lameter 	}
13074bfc4495SKAMEZAWA Hiroyuki 	{
13084bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
13094bfc4495SKAMEZAWA Hiroyuki 		if (scratch) {
1310d8ed45c5SMichel Lespinasse 			mmap_write_lock(mm);
13114bfc4495SKAMEZAWA Hiroyuki 			err = mpol_set_nodemask(new, nmask, scratch);
13124bfc4495SKAMEZAWA Hiroyuki 			if (err)
1313d8ed45c5SMichel Lespinasse 				mmap_write_unlock(mm);
13144bfc4495SKAMEZAWA Hiroyuki 		} else
13154bfc4495SKAMEZAWA Hiroyuki 			err = -ENOMEM;
13164bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
13174bfc4495SKAMEZAWA Hiroyuki 	}
1318b05ca738SKOSAKI Motohiro 	if (err)
1319b05ca738SKOSAKI Motohiro 		goto mpol_out;
1320b05ca738SKOSAKI Motohiro 
1321d8835445SYang Shi 	ret = queue_pages_range(mm, start, end, nmask,
13226ce3c4c0SChristoph Lameter 			  flags | MPOL_MF_INVERT, &pagelist);
1323d8835445SYang Shi 
1324d8835445SYang Shi 	if (ret < 0) {
1325a85dfc30SYang Shi 		err = ret;
1326d8835445SYang Shi 		goto up_out;
1327d8835445SYang Shi 	}
1328d8835445SYang Shi 
13299d8cebd4SKOSAKI Motohiro 	err = mbind_range(mm, start, end, new);
13307e2ab150SChristoph Lameter 
1331b24f53a0SLee Schermerhorn 	if (!err) {
1332b24f53a0SLee Schermerhorn 		int nr_failed = 0;
1333b24f53a0SLee Schermerhorn 
1334cf608ac1SMinchan Kim 		if (!list_empty(&pagelist)) {
1335b24f53a0SLee Schermerhorn 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1336d05f0cdcSHugh Dickins 			nr_failed = migrate_pages(&pagelist, new_page, NULL,
13375ac95884SYang Shi 				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL);
1338cf608ac1SMinchan Kim 			if (nr_failed)
133974060e4dSNaoya Horiguchi 				putback_movable_pages(&pagelist);
1340cf608ac1SMinchan Kim 		}
13416ce3c4c0SChristoph Lameter 
1342d8835445SYang Shi 		if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
13436ce3c4c0SChristoph Lameter 			err = -EIO;
1344a85dfc30SYang Shi 	} else {
1345d8835445SYang Shi up_out:
1346a85dfc30SYang Shi 		if (!list_empty(&pagelist))
1347a85dfc30SYang Shi 			putback_movable_pages(&pagelist);
1348a85dfc30SYang Shi 	}
1349a85dfc30SYang Shi 
1350d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
1351b05ca738SKOSAKI Motohiro mpol_out:
1352f0be3d32SLee Schermerhorn 	mpol_put(new);
1353d479960eSMinchan Kim 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1354361a2a22SMinchan Kim 		lru_cache_enable();
13556ce3c4c0SChristoph Lameter 	return err;
13566ce3c4c0SChristoph Lameter }
13576ce3c4c0SChristoph Lameter 
135839743889SChristoph Lameter /*
13598bccd85fSChristoph Lameter  * User space interface with variable sized bitmaps for nodelists.
13608bccd85fSChristoph Lameter  */
1361e130242dSArnd Bergmann static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask,
1362e130242dSArnd Bergmann 		      unsigned long maxnode)
1363e130242dSArnd Bergmann {
1364e130242dSArnd Bergmann 	unsigned long nlongs = BITS_TO_LONGS(maxnode);
1365e130242dSArnd Bergmann 	int ret;
1366e130242dSArnd Bergmann 
1367e130242dSArnd Bergmann 	if (in_compat_syscall())
1368e130242dSArnd Bergmann 		ret = compat_get_bitmap(mask,
1369e130242dSArnd Bergmann 					(const compat_ulong_t __user *)nmask,
1370e130242dSArnd Bergmann 					maxnode);
1371e130242dSArnd Bergmann 	else
1372e130242dSArnd Bergmann 		ret = copy_from_user(mask, nmask,
1373e130242dSArnd Bergmann 				     nlongs * sizeof(unsigned long));
1374e130242dSArnd Bergmann 
1375e130242dSArnd Bergmann 	if (ret)
1376e130242dSArnd Bergmann 		return -EFAULT;
1377e130242dSArnd Bergmann 
1378e130242dSArnd Bergmann 	if (maxnode % BITS_PER_LONG)
1379e130242dSArnd Bergmann 		mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1;
1380e130242dSArnd Bergmann 
1381e130242dSArnd Bergmann 	return 0;
1382e130242dSArnd Bergmann }
13838bccd85fSChristoph Lameter 
13848bccd85fSChristoph Lameter /* Copy a node mask from user space. */
138539743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
13868bccd85fSChristoph Lameter 		     unsigned long maxnode)
13878bccd85fSChristoph Lameter {
13888bccd85fSChristoph Lameter 	--maxnode;
13898bccd85fSChristoph Lameter 	nodes_clear(*nodes);
13908bccd85fSChristoph Lameter 	if (maxnode == 0 || !nmask)
13918bccd85fSChristoph Lameter 		return 0;
1392a9c930baSAndi Kleen 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1393636f13c1SChris Wright 		return -EINVAL;
13948bccd85fSChristoph Lameter 
139556521e7aSYisheng Xie 	/*
139656521e7aSYisheng Xie 	 * When the user specified more nodes than supported just check
1397e130242dSArnd Bergmann 	 * if the non supported part is all zero, one word at a time,
1398e130242dSArnd Bergmann 	 * starting at the end.
139956521e7aSYisheng Xie 	 */
1400e130242dSArnd Bergmann 	while (maxnode > MAX_NUMNODES) {
1401e130242dSArnd Bergmann 		unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG);
1402e130242dSArnd Bergmann 		unsigned long t;
14038bccd85fSChristoph Lameter 
1404000eca5dSTianyu Li 		if (get_bitmap(&t, &nmask[(maxnode - 1) / BITS_PER_LONG], bits))
140556521e7aSYisheng Xie 			return -EFAULT;
1406e130242dSArnd Bergmann 
1407e130242dSArnd Bergmann 		if (maxnode - bits >= MAX_NUMNODES) {
1408e130242dSArnd Bergmann 			maxnode -= bits;
1409e130242dSArnd Bergmann 		} else {
1410e130242dSArnd Bergmann 			maxnode = MAX_NUMNODES;
1411e130242dSArnd Bergmann 			t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1412e130242dSArnd Bergmann 		}
1413e130242dSArnd Bergmann 		if (t)
141456521e7aSYisheng Xie 			return -EINVAL;
141556521e7aSYisheng Xie 	}
141656521e7aSYisheng Xie 
1417e130242dSArnd Bergmann 	return get_bitmap(nodes_addr(*nodes), nmask, maxnode);
14188bccd85fSChristoph Lameter }
14198bccd85fSChristoph Lameter 
14208bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */
14218bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
14228bccd85fSChristoph Lameter 			      nodemask_t *nodes)
14238bccd85fSChristoph Lameter {
14248bccd85fSChristoph Lameter 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1425050c17f2SRalph Campbell 	unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
1426e130242dSArnd Bergmann 	bool compat = in_compat_syscall();
1427e130242dSArnd Bergmann 
1428e130242dSArnd Bergmann 	if (compat)
1429e130242dSArnd Bergmann 		nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t);
14308bccd85fSChristoph Lameter 
14318bccd85fSChristoph Lameter 	if (copy > nbytes) {
14328bccd85fSChristoph Lameter 		if (copy > PAGE_SIZE)
14338bccd85fSChristoph Lameter 			return -EINVAL;
14348bccd85fSChristoph Lameter 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
14358bccd85fSChristoph Lameter 			return -EFAULT;
14368bccd85fSChristoph Lameter 		copy = nbytes;
1437e130242dSArnd Bergmann 		maxnode = nr_node_ids;
14388bccd85fSChristoph Lameter 	}
1439e130242dSArnd Bergmann 
1440e130242dSArnd Bergmann 	if (compat)
1441e130242dSArnd Bergmann 		return compat_put_bitmap((compat_ulong_t __user *)mask,
1442e130242dSArnd Bergmann 					 nodes_addr(*nodes), maxnode);
1443e130242dSArnd Bergmann 
14448bccd85fSChristoph Lameter 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
14458bccd85fSChristoph Lameter }
14468bccd85fSChristoph Lameter 
144795837924SFeng Tang /* Basic parameter sanity check used by both mbind() and set_mempolicy() */
144895837924SFeng Tang static inline int sanitize_mpol_flags(int *mode, unsigned short *flags)
144995837924SFeng Tang {
145095837924SFeng Tang 	*flags = *mode & MPOL_MODE_FLAGS;
145195837924SFeng Tang 	*mode &= ~MPOL_MODE_FLAGS;
1452b27abaccSDave Hansen 
1453a38a59fdSBen Widawsky 	if ((unsigned int)(*mode) >=  MPOL_MAX)
145495837924SFeng Tang 		return -EINVAL;
145595837924SFeng Tang 	if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
145695837924SFeng Tang 		return -EINVAL;
14576d2aec9eSEric Dumazet 	if (*flags & MPOL_F_NUMA_BALANCING) {
14586d2aec9eSEric Dumazet 		if (*mode != MPOL_BIND)
14596d2aec9eSEric Dumazet 			return -EINVAL;
14606d2aec9eSEric Dumazet 		*flags |= (MPOL_F_MOF | MPOL_F_MORON);
14616d2aec9eSEric Dumazet 	}
146295837924SFeng Tang 	return 0;
146395837924SFeng Tang }
146495837924SFeng Tang 
1465e7dc9ad6SDominik Brodowski static long kernel_mbind(unsigned long start, unsigned long len,
1466e7dc9ad6SDominik Brodowski 			 unsigned long mode, const unsigned long __user *nmask,
1467e7dc9ad6SDominik Brodowski 			 unsigned long maxnode, unsigned int flags)
14688bccd85fSChristoph Lameter {
1469028fec41SDavid Rientjes 	unsigned short mode_flags;
147095837924SFeng Tang 	nodemask_t nodes;
147195837924SFeng Tang 	int lmode = mode;
147295837924SFeng Tang 	int err;
14738bccd85fSChristoph Lameter 
1474057d3389SAndrey Konovalov 	start = untagged_addr(start);
147595837924SFeng Tang 	err = sanitize_mpol_flags(&lmode, &mode_flags);
147695837924SFeng Tang 	if (err)
147795837924SFeng Tang 		return err;
147895837924SFeng Tang 
14798bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
14808bccd85fSChristoph Lameter 	if (err)
14818bccd85fSChristoph Lameter 		return err;
148295837924SFeng Tang 
148395837924SFeng Tang 	return do_mbind(start, len, lmode, mode_flags, &nodes, flags);
14848bccd85fSChristoph Lameter }
14858bccd85fSChristoph Lameter 
1486c6018b4bSAneesh Kumar K.V SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, len,
1487c6018b4bSAneesh Kumar K.V 		unsigned long, home_node, unsigned long, flags)
1488c6018b4bSAneesh Kumar K.V {
1489c6018b4bSAneesh Kumar K.V 	struct mm_struct *mm = current->mm;
1490c6018b4bSAneesh Kumar K.V 	struct vm_area_struct *vma;
1491e976936cSMichal Hocko 	struct mempolicy *new, *old;
1492c6018b4bSAneesh Kumar K.V 	unsigned long vmstart;
1493c6018b4bSAneesh Kumar K.V 	unsigned long vmend;
1494c6018b4bSAneesh Kumar K.V 	unsigned long end;
1495c6018b4bSAneesh Kumar K.V 	int err = -ENOENT;
149666850be5SLiam R. Howlett 	VMA_ITERATOR(vmi, mm, start);
1497c6018b4bSAneesh Kumar K.V 
1498c6018b4bSAneesh Kumar K.V 	start = untagged_addr(start);
1499c6018b4bSAneesh Kumar K.V 	if (start & ~PAGE_MASK)
1500c6018b4bSAneesh Kumar K.V 		return -EINVAL;
1501c6018b4bSAneesh Kumar K.V 	/*
1502c6018b4bSAneesh Kumar K.V 	 * flags is used for future extension if any.
1503c6018b4bSAneesh Kumar K.V 	 */
1504c6018b4bSAneesh Kumar K.V 	if (flags != 0)
1505c6018b4bSAneesh Kumar K.V 		return -EINVAL;
1506c6018b4bSAneesh Kumar K.V 
1507c6018b4bSAneesh Kumar K.V 	/*
1508c6018b4bSAneesh Kumar K.V 	 * Check home_node is online to avoid accessing uninitialized
1509c6018b4bSAneesh Kumar K.V 	 * NODE_DATA.
1510c6018b4bSAneesh Kumar K.V 	 */
1511c6018b4bSAneesh Kumar K.V 	if (home_node >= MAX_NUMNODES || !node_online(home_node))
1512c6018b4bSAneesh Kumar K.V 		return -EINVAL;
1513c6018b4bSAneesh Kumar K.V 
1514aaa31e05Sze zuo 	len = PAGE_ALIGN(len);
1515c6018b4bSAneesh Kumar K.V 	end = start + len;
1516c6018b4bSAneesh Kumar K.V 
1517c6018b4bSAneesh Kumar K.V 	if (end < start)
1518c6018b4bSAneesh Kumar K.V 		return -EINVAL;
1519c6018b4bSAneesh Kumar K.V 	if (end == start)
1520c6018b4bSAneesh Kumar K.V 		return 0;
1521c6018b4bSAneesh Kumar K.V 	mmap_write_lock(mm);
152266850be5SLiam R. Howlett 	for_each_vma_range(vmi, vma, end) {
1523c6018b4bSAneesh Kumar K.V 		/*
1524c6018b4bSAneesh Kumar K.V 		 * If any vma in the range got policy other than MPOL_BIND
1525c6018b4bSAneesh Kumar K.V 		 * or MPOL_PREFERRED_MANY we return error. We don't reset
1526c6018b4bSAneesh Kumar K.V 		 * the home node for vmas we already updated before.
1527c6018b4bSAneesh Kumar K.V 		 */
1528e976936cSMichal Hocko 		old = vma_policy(vma);
1529e976936cSMichal Hocko 		if (!old)
1530e976936cSMichal Hocko 			continue;
1531e976936cSMichal Hocko 		if (old->mode != MPOL_BIND && old->mode != MPOL_PREFERRED_MANY) {
1532c6018b4bSAneesh Kumar K.V 			err = -EOPNOTSUPP;
1533c6018b4bSAneesh Kumar K.V 			break;
1534c6018b4bSAneesh Kumar K.V 		}
1535e976936cSMichal Hocko 		new = mpol_dup(old);
1536e976936cSMichal Hocko 		if (IS_ERR(new)) {
1537e976936cSMichal Hocko 			err = PTR_ERR(new);
1538e976936cSMichal Hocko 			break;
1539e976936cSMichal Hocko 		}
1540c6018b4bSAneesh Kumar K.V 
1541c6018b4bSAneesh Kumar K.V 		new->home_node = home_node;
1542e976936cSMichal Hocko 		vmstart = max(start, vma->vm_start);
1543e976936cSMichal Hocko 		vmend   = min(end, vma->vm_end);
1544c6018b4bSAneesh Kumar K.V 		err = mbind_range(mm, vmstart, vmend, new);
1545c6018b4bSAneesh Kumar K.V 		mpol_put(new);
1546c6018b4bSAneesh Kumar K.V 		if (err)
1547c6018b4bSAneesh Kumar K.V 			break;
1548c6018b4bSAneesh Kumar K.V 	}
1549c6018b4bSAneesh Kumar K.V 	mmap_write_unlock(mm);
1550c6018b4bSAneesh Kumar K.V 	return err;
1551c6018b4bSAneesh Kumar K.V }
1552c6018b4bSAneesh Kumar K.V 
1553e7dc9ad6SDominik Brodowski SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1554e7dc9ad6SDominik Brodowski 		unsigned long, mode, const unsigned long __user *, nmask,
1555e7dc9ad6SDominik Brodowski 		unsigned long, maxnode, unsigned int, flags)
1556e7dc9ad6SDominik Brodowski {
1557e7dc9ad6SDominik Brodowski 	return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1558e7dc9ad6SDominik Brodowski }
1559e7dc9ad6SDominik Brodowski 
15608bccd85fSChristoph Lameter /* Set the process memory policy */
1561af03c4acSDominik Brodowski static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1562af03c4acSDominik Brodowski 				 unsigned long maxnode)
15638bccd85fSChristoph Lameter {
156495837924SFeng Tang 	unsigned short mode_flags;
15658bccd85fSChristoph Lameter 	nodemask_t nodes;
156695837924SFeng Tang 	int lmode = mode;
156795837924SFeng Tang 	int err;
15688bccd85fSChristoph Lameter 
156995837924SFeng Tang 	err = sanitize_mpol_flags(&lmode, &mode_flags);
157095837924SFeng Tang 	if (err)
157195837924SFeng Tang 		return err;
157295837924SFeng Tang 
15738bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
15748bccd85fSChristoph Lameter 	if (err)
15758bccd85fSChristoph Lameter 		return err;
157695837924SFeng Tang 
157795837924SFeng Tang 	return do_set_mempolicy(lmode, mode_flags, &nodes);
15788bccd85fSChristoph Lameter }
15798bccd85fSChristoph Lameter 
1580af03c4acSDominik Brodowski SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1581af03c4acSDominik Brodowski 		unsigned long, maxnode)
1582af03c4acSDominik Brodowski {
1583af03c4acSDominik Brodowski 	return kernel_set_mempolicy(mode, nmask, maxnode);
1584af03c4acSDominik Brodowski }
1585af03c4acSDominik Brodowski 
1586b6e9b0baSDominik Brodowski static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1587b6e9b0baSDominik Brodowski 				const unsigned long __user *old_nodes,
1588b6e9b0baSDominik Brodowski 				const unsigned long __user *new_nodes)
158939743889SChristoph Lameter {
1590596d7cfaSKOSAKI Motohiro 	struct mm_struct *mm = NULL;
159139743889SChristoph Lameter 	struct task_struct *task;
159239743889SChristoph Lameter 	nodemask_t task_nodes;
159339743889SChristoph Lameter 	int err;
1594596d7cfaSKOSAKI Motohiro 	nodemask_t *old;
1595596d7cfaSKOSAKI Motohiro 	nodemask_t *new;
1596596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH(scratch);
159739743889SChristoph Lameter 
1598596d7cfaSKOSAKI Motohiro 	if (!scratch)
1599596d7cfaSKOSAKI Motohiro 		return -ENOMEM;
160039743889SChristoph Lameter 
1601596d7cfaSKOSAKI Motohiro 	old = &scratch->mask1;
1602596d7cfaSKOSAKI Motohiro 	new = &scratch->mask2;
1603596d7cfaSKOSAKI Motohiro 
1604596d7cfaSKOSAKI Motohiro 	err = get_nodes(old, old_nodes, maxnode);
160539743889SChristoph Lameter 	if (err)
1606596d7cfaSKOSAKI Motohiro 		goto out;
1607596d7cfaSKOSAKI Motohiro 
1608596d7cfaSKOSAKI Motohiro 	err = get_nodes(new, new_nodes, maxnode);
1609596d7cfaSKOSAKI Motohiro 	if (err)
1610596d7cfaSKOSAKI Motohiro 		goto out;
161139743889SChristoph Lameter 
161239743889SChristoph Lameter 	/* Find the mm_struct */
161355cfaa3cSZeng Zhaoming 	rcu_read_lock();
1614228ebcbeSPavel Emelyanov 	task = pid ? find_task_by_vpid(pid) : current;
161539743889SChristoph Lameter 	if (!task) {
161655cfaa3cSZeng Zhaoming 		rcu_read_unlock();
1617596d7cfaSKOSAKI Motohiro 		err = -ESRCH;
1618596d7cfaSKOSAKI Motohiro 		goto out;
161939743889SChristoph Lameter 	}
16203268c63eSChristoph Lameter 	get_task_struct(task);
162139743889SChristoph Lameter 
1622596d7cfaSKOSAKI Motohiro 	err = -EINVAL;
162339743889SChristoph Lameter 
162439743889SChristoph Lameter 	/*
162531367466SOtto Ebeling 	 * Check if this process has the right to modify the specified process.
162631367466SOtto Ebeling 	 * Use the regular "ptrace_may_access()" checks.
162739743889SChristoph Lameter 	 */
162831367466SOtto Ebeling 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1629c69e8d9cSDavid Howells 		rcu_read_unlock();
163039743889SChristoph Lameter 		err = -EPERM;
16313268c63eSChristoph Lameter 		goto out_put;
163239743889SChristoph Lameter 	}
1633c69e8d9cSDavid Howells 	rcu_read_unlock();
163439743889SChristoph Lameter 
163539743889SChristoph Lameter 	task_nodes = cpuset_mems_allowed(task);
163639743889SChristoph Lameter 	/* Is the user allowed to access the target nodes? */
1637596d7cfaSKOSAKI Motohiro 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
163839743889SChristoph Lameter 		err = -EPERM;
16393268c63eSChristoph Lameter 		goto out_put;
164039743889SChristoph Lameter 	}
164139743889SChristoph Lameter 
16420486a38bSYisheng Xie 	task_nodes = cpuset_mems_allowed(current);
16430486a38bSYisheng Xie 	nodes_and(*new, *new, task_nodes);
16440486a38bSYisheng Xie 	if (nodes_empty(*new))
16453268c63eSChristoph Lameter 		goto out_put;
16460486a38bSYisheng Xie 
164786c3a764SDavid Quigley 	err = security_task_movememory(task);
164886c3a764SDavid Quigley 	if (err)
16493268c63eSChristoph Lameter 		goto out_put;
165086c3a764SDavid Quigley 
16513268c63eSChristoph Lameter 	mm = get_task_mm(task);
16523268c63eSChristoph Lameter 	put_task_struct(task);
1653f2a9ef88SSasha Levin 
1654f2a9ef88SSasha Levin 	if (!mm) {
1655f2a9ef88SSasha Levin 		err = -EINVAL;
1656f2a9ef88SSasha Levin 		goto out;
1657f2a9ef88SSasha Levin 	}
1658f2a9ef88SSasha Levin 
1659596d7cfaSKOSAKI Motohiro 	err = do_migrate_pages(mm, old, new,
166074c00241SChristoph Lameter 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
16613268c63eSChristoph Lameter 
166239743889SChristoph Lameter 	mmput(mm);
16633268c63eSChristoph Lameter out:
1664596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH_FREE(scratch);
1665596d7cfaSKOSAKI Motohiro 
166639743889SChristoph Lameter 	return err;
16673268c63eSChristoph Lameter 
16683268c63eSChristoph Lameter out_put:
16693268c63eSChristoph Lameter 	put_task_struct(task);
16703268c63eSChristoph Lameter 	goto out;
16713268c63eSChristoph Lameter 
167239743889SChristoph Lameter }
167339743889SChristoph Lameter 
1674b6e9b0baSDominik Brodowski SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1675b6e9b0baSDominik Brodowski 		const unsigned long __user *, old_nodes,
1676b6e9b0baSDominik Brodowski 		const unsigned long __user *, new_nodes)
1677b6e9b0baSDominik Brodowski {
1678b6e9b0baSDominik Brodowski 	return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1679b6e9b0baSDominik Brodowski }
1680b6e9b0baSDominik Brodowski 
168139743889SChristoph Lameter 
16828bccd85fSChristoph Lameter /* Retrieve NUMA policy */
1683af03c4acSDominik Brodowski static int kernel_get_mempolicy(int __user *policy,
1684af03c4acSDominik Brodowski 				unsigned long __user *nmask,
1685af03c4acSDominik Brodowski 				unsigned long maxnode,
1686af03c4acSDominik Brodowski 				unsigned long addr,
1687af03c4acSDominik Brodowski 				unsigned long flags)
16888bccd85fSChristoph Lameter {
1689dbcb0f19SAdrian Bunk 	int err;
16903f649ab7SKees Cook 	int pval;
16918bccd85fSChristoph Lameter 	nodemask_t nodes;
16928bccd85fSChristoph Lameter 
1693050c17f2SRalph Campbell 	if (nmask != NULL && maxnode < nr_node_ids)
16948bccd85fSChristoph Lameter 		return -EINVAL;
16958bccd85fSChristoph Lameter 
16964605f057SWenchao Hao 	addr = untagged_addr(addr);
16974605f057SWenchao Hao 
16988bccd85fSChristoph Lameter 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
16998bccd85fSChristoph Lameter 
17008bccd85fSChristoph Lameter 	if (err)
17018bccd85fSChristoph Lameter 		return err;
17028bccd85fSChristoph Lameter 
17038bccd85fSChristoph Lameter 	if (policy && put_user(pval, policy))
17048bccd85fSChristoph Lameter 		return -EFAULT;
17058bccd85fSChristoph Lameter 
17068bccd85fSChristoph Lameter 	if (nmask)
17078bccd85fSChristoph Lameter 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
17088bccd85fSChristoph Lameter 
17098bccd85fSChristoph Lameter 	return err;
17108bccd85fSChristoph Lameter }
17118bccd85fSChristoph Lameter 
1712af03c4acSDominik Brodowski SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1713af03c4acSDominik Brodowski 		unsigned long __user *, nmask, unsigned long, maxnode,
1714af03c4acSDominik Brodowski 		unsigned long, addr, unsigned long, flags)
1715af03c4acSDominik Brodowski {
1716af03c4acSDominik Brodowski 	return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1717af03c4acSDominik Brodowski }
1718af03c4acSDominik Brodowski 
171920ca87f2SLi Xinhai bool vma_migratable(struct vm_area_struct *vma)
172020ca87f2SLi Xinhai {
172120ca87f2SLi Xinhai 	if (vma->vm_flags & (VM_IO | VM_PFNMAP))
172220ca87f2SLi Xinhai 		return false;
172320ca87f2SLi Xinhai 
172420ca87f2SLi Xinhai 	/*
172520ca87f2SLi Xinhai 	 * DAX device mappings require predictable access latency, so avoid
172620ca87f2SLi Xinhai 	 * incurring periodic faults.
172720ca87f2SLi Xinhai 	 */
172820ca87f2SLi Xinhai 	if (vma_is_dax(vma))
172920ca87f2SLi Xinhai 		return false;
173020ca87f2SLi Xinhai 
173120ca87f2SLi Xinhai 	if (is_vm_hugetlb_page(vma) &&
173220ca87f2SLi Xinhai 		!hugepage_migration_supported(hstate_vma(vma)))
173320ca87f2SLi Xinhai 		return false;
173420ca87f2SLi Xinhai 
173520ca87f2SLi Xinhai 	/*
173620ca87f2SLi Xinhai 	 * Migration allocates pages in the highest zone. If we cannot
173720ca87f2SLi Xinhai 	 * do so then migration (at least from node to node) is not
173820ca87f2SLi Xinhai 	 * possible.
173920ca87f2SLi Xinhai 	 */
174020ca87f2SLi Xinhai 	if (vma->vm_file &&
174120ca87f2SLi Xinhai 		gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
174220ca87f2SLi Xinhai 			< policy_zone)
174320ca87f2SLi Xinhai 		return false;
174420ca87f2SLi Xinhai 	return true;
174520ca87f2SLi Xinhai }
174620ca87f2SLi Xinhai 
174774d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
174874d2c3a0SOleg Nesterov 						unsigned long addr)
17491da177e4SLinus Torvalds {
17508d90274bSOleg Nesterov 	struct mempolicy *pol = NULL;
17511da177e4SLinus Torvalds 
17521da177e4SLinus Torvalds 	if (vma) {
1753480eccf9SLee Schermerhorn 		if (vma->vm_ops && vma->vm_ops->get_policy) {
17548d90274bSOleg Nesterov 			pol = vma->vm_ops->get_policy(vma, addr);
175500442ad0SMel Gorman 		} else if (vma->vm_policy) {
17561da177e4SLinus Torvalds 			pol = vma->vm_policy;
175700442ad0SMel Gorman 
175800442ad0SMel Gorman 			/*
175900442ad0SMel Gorman 			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
176000442ad0SMel Gorman 			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
176100442ad0SMel Gorman 			 * count on these policies which will be dropped by
176200442ad0SMel Gorman 			 * mpol_cond_put() later
176300442ad0SMel Gorman 			 */
176400442ad0SMel Gorman 			if (mpol_needs_cond_ref(pol))
176500442ad0SMel Gorman 				mpol_get(pol);
176600442ad0SMel Gorman 		}
17671da177e4SLinus Torvalds 	}
1768f15ca78eSOleg Nesterov 
176974d2c3a0SOleg Nesterov 	return pol;
177074d2c3a0SOleg Nesterov }
177174d2c3a0SOleg Nesterov 
177274d2c3a0SOleg Nesterov /*
1773dd6eecb9SOleg Nesterov  * get_vma_policy(@vma, @addr)
177474d2c3a0SOleg Nesterov  * @vma: virtual memory area whose policy is sought
177574d2c3a0SOleg Nesterov  * @addr: address in @vma for shared policy lookup
177674d2c3a0SOleg Nesterov  *
177774d2c3a0SOleg Nesterov  * Returns effective policy for a VMA at specified address.
1778dd6eecb9SOleg Nesterov  * Falls back to current->mempolicy or system default policy, as necessary.
177974d2c3a0SOleg Nesterov  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
178074d2c3a0SOleg Nesterov  * count--added by the get_policy() vm_op, as appropriate--to protect against
178174d2c3a0SOleg Nesterov  * freeing by another task.  It is the caller's responsibility to free the
178274d2c3a0SOleg Nesterov  * extra reference for shared policies.
178374d2c3a0SOleg Nesterov  */
1784ac79f78dSDavid Rientjes static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1785dd6eecb9SOleg Nesterov 						unsigned long addr)
178674d2c3a0SOleg Nesterov {
178774d2c3a0SOleg Nesterov 	struct mempolicy *pol = __get_vma_policy(vma, addr);
178874d2c3a0SOleg Nesterov 
17898d90274bSOleg Nesterov 	if (!pol)
1790dd6eecb9SOleg Nesterov 		pol = get_task_policy(current);
17918d90274bSOleg Nesterov 
17921da177e4SLinus Torvalds 	return pol;
17931da177e4SLinus Torvalds }
17941da177e4SLinus Torvalds 
17956b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma)
1796fc314724SMel Gorman {
17976b6482bbSOleg Nesterov 	struct mempolicy *pol;
1798f15ca78eSOleg Nesterov 
1799fc314724SMel Gorman 	if (vma->vm_ops && vma->vm_ops->get_policy) {
1800fc314724SMel Gorman 		bool ret = false;
1801fc314724SMel Gorman 
1802fc314724SMel Gorman 		pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1803fc314724SMel Gorman 		if (pol && (pol->flags & MPOL_F_MOF))
1804fc314724SMel Gorman 			ret = true;
1805fc314724SMel Gorman 		mpol_cond_put(pol);
1806fc314724SMel Gorman 
1807fc314724SMel Gorman 		return ret;
18088d90274bSOleg Nesterov 	}
18098d90274bSOleg Nesterov 
1810fc314724SMel Gorman 	pol = vma->vm_policy;
18118d90274bSOleg Nesterov 	if (!pol)
18126b6482bbSOleg Nesterov 		pol = get_task_policy(current);
1813fc314724SMel Gorman 
1814fc314724SMel Gorman 	return pol->flags & MPOL_F_MOF;
1815fc314724SMel Gorman }
1816fc314724SMel Gorman 
1817d2226ebdSFeng Tang bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1818d3eb1570SLai Jiangshan {
1819d3eb1570SLai Jiangshan 	enum zone_type dynamic_policy_zone = policy_zone;
1820d3eb1570SLai Jiangshan 
1821d3eb1570SLai Jiangshan 	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1822d3eb1570SLai Jiangshan 
1823d3eb1570SLai Jiangshan 	/*
1824269fbe72SBen Widawsky 	 * if policy->nodes has movable memory only,
1825d3eb1570SLai Jiangshan 	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1826d3eb1570SLai Jiangshan 	 *
1827269fbe72SBen Widawsky 	 * policy->nodes is intersect with node_states[N_MEMORY].
1828f0953a1bSIngo Molnar 	 * so if the following test fails, it implies
1829269fbe72SBen Widawsky 	 * policy->nodes has movable memory only.
1830d3eb1570SLai Jiangshan 	 */
1831269fbe72SBen Widawsky 	if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY]))
1832d3eb1570SLai Jiangshan 		dynamic_policy_zone = ZONE_MOVABLE;
1833d3eb1570SLai Jiangshan 
1834d3eb1570SLai Jiangshan 	return zone >= dynamic_policy_zone;
1835d3eb1570SLai Jiangshan }
1836d3eb1570SLai Jiangshan 
183752cd3b07SLee Schermerhorn /*
183852cd3b07SLee Schermerhorn  * Return a nodemask representing a mempolicy for filtering nodes for
183952cd3b07SLee Schermerhorn  * page allocation
184052cd3b07SLee Schermerhorn  */
18418ca39e68SMuchun Song nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
184219770b32SMel Gorman {
1843b27abaccSDave Hansen 	int mode = policy->mode;
1844b27abaccSDave Hansen 
184519770b32SMel Gorman 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
1846b27abaccSDave Hansen 	if (unlikely(mode == MPOL_BIND) &&
1847d3eb1570SLai Jiangshan 		apply_policy_zone(policy, gfp_zone(gfp)) &&
1848269fbe72SBen Widawsky 		cpuset_nodemask_valid_mems_allowed(&policy->nodes))
1849269fbe72SBen Widawsky 		return &policy->nodes;
185019770b32SMel Gorman 
1851b27abaccSDave Hansen 	if (mode == MPOL_PREFERRED_MANY)
1852b27abaccSDave Hansen 		return &policy->nodes;
1853b27abaccSDave Hansen 
185419770b32SMel Gorman 	return NULL;
185519770b32SMel Gorman }
185619770b32SMel Gorman 
1857b27abaccSDave Hansen /*
1858b27abaccSDave Hansen  * Return the  preferred node id for 'prefer' mempolicy, and return
1859b27abaccSDave Hansen  * the given id for all other policies.
1860b27abaccSDave Hansen  *
1861b27abaccSDave Hansen  * policy_node() is always coupled with policy_nodemask(), which
1862b27abaccSDave Hansen  * secures the nodemask limit for 'bind' and 'prefer-many' policy.
1863b27abaccSDave Hansen  */
1864f8fd5253SWei Yang static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
18651da177e4SLinus Torvalds {
18667858d7bcSFeng Tang 	if (policy->mode == MPOL_PREFERRED) {
1867269fbe72SBen Widawsky 		nd = first_node(policy->nodes);
18687858d7bcSFeng Tang 	} else {
186919770b32SMel Gorman 		/*
18706d840958SMichal Hocko 		 * __GFP_THISNODE shouldn't even be used with the bind policy
18716d840958SMichal Hocko 		 * because we might easily break the expectation to stay on the
18726d840958SMichal Hocko 		 * requested node and not break the policy.
187319770b32SMel Gorman 		 */
18746d840958SMichal Hocko 		WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
18751da177e4SLinus Torvalds 	}
18766d840958SMichal Hocko 
1877c6018b4bSAneesh Kumar K.V 	if ((policy->mode == MPOL_BIND ||
1878c6018b4bSAneesh Kumar K.V 	     policy->mode == MPOL_PREFERRED_MANY) &&
1879c6018b4bSAneesh Kumar K.V 	    policy->home_node != NUMA_NO_NODE)
1880c6018b4bSAneesh Kumar K.V 		return policy->home_node;
1881c6018b4bSAneesh Kumar K.V 
188204ec6264SVlastimil Babka 	return nd;
18831da177e4SLinus Torvalds }
18841da177e4SLinus Torvalds 
18851da177e4SLinus Torvalds /* Do dynamic interleaving for a process */
18861da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy)
18871da177e4SLinus Torvalds {
188845816682SVlastimil Babka 	unsigned next;
18891da177e4SLinus Torvalds 	struct task_struct *me = current;
18901da177e4SLinus Torvalds 
1891269fbe72SBen Widawsky 	next = next_node_in(me->il_prev, policy->nodes);
1892f5b087b5SDavid Rientjes 	if (next < MAX_NUMNODES)
189345816682SVlastimil Babka 		me->il_prev = next;
189445816682SVlastimil Babka 	return next;
18951da177e4SLinus Torvalds }
18961da177e4SLinus Torvalds 
1897dc85da15SChristoph Lameter /*
1898dc85da15SChristoph Lameter  * Depending on the memory policy provide a node from which to allocate the
1899dc85da15SChristoph Lameter  * next slab entry.
1900dc85da15SChristoph Lameter  */
19012a389610SDavid Rientjes unsigned int mempolicy_slab_node(void)
1902dc85da15SChristoph Lameter {
1903e7b691b0SAndi Kleen 	struct mempolicy *policy;
19042a389610SDavid Rientjes 	int node = numa_mem_id();
1905e7b691b0SAndi Kleen 
190638b031ddSVasily Averin 	if (!in_task())
19072a389610SDavid Rientjes 		return node;
1908e7b691b0SAndi Kleen 
1909e7b691b0SAndi Kleen 	policy = current->mempolicy;
19107858d7bcSFeng Tang 	if (!policy)
19112a389610SDavid Rientjes 		return node;
1912765c4507SChristoph Lameter 
1913bea904d5SLee Schermerhorn 	switch (policy->mode) {
1914bea904d5SLee Schermerhorn 	case MPOL_PREFERRED:
1915269fbe72SBen Widawsky 		return first_node(policy->nodes);
1916bea904d5SLee Schermerhorn 
1917dc85da15SChristoph Lameter 	case MPOL_INTERLEAVE:
1918dc85da15SChristoph Lameter 		return interleave_nodes(policy);
1919dc85da15SChristoph Lameter 
1920b27abaccSDave Hansen 	case MPOL_BIND:
1921b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
1922b27abaccSDave Hansen 	{
1923c33d6c06SMel Gorman 		struct zoneref *z;
1924c33d6c06SMel Gorman 
1925dc85da15SChristoph Lameter 		/*
1926dc85da15SChristoph Lameter 		 * Follow bind policy behavior and start allocation at the
1927dc85da15SChristoph Lameter 		 * first node.
1928dc85da15SChristoph Lameter 		 */
192919770b32SMel Gorman 		struct zonelist *zonelist;
193019770b32SMel Gorman 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1931c9634cf0SAneesh Kumar K.V 		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1932c33d6c06SMel Gorman 		z = first_zones_zonelist(zonelist, highest_zoneidx,
1933269fbe72SBen Widawsky 							&policy->nodes);
1934c1093b74SPavel Tatashin 		return z->zone ? zone_to_nid(z->zone) : node;
1935dd1a239fSMel Gorman 	}
19367858d7bcSFeng Tang 	case MPOL_LOCAL:
19377858d7bcSFeng Tang 		return node;
1938dc85da15SChristoph Lameter 
1939dc85da15SChristoph Lameter 	default:
1940bea904d5SLee Schermerhorn 		BUG();
1941dc85da15SChristoph Lameter 	}
1942dc85da15SChristoph Lameter }
1943dc85da15SChristoph Lameter 
1944fee83b3aSAndrew Morton /*
1945fee83b3aSAndrew Morton  * Do static interleaving for a VMA with known offset @n.  Returns the n'th
1946269fbe72SBen Widawsky  * node in pol->nodes (starting from n=0), wrapping around if n exceeds the
1947fee83b3aSAndrew Morton  * number of present nodes.
1948fee83b3aSAndrew Morton  */
194998c70baaSLaurent Dufour static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
19501da177e4SLinus Torvalds {
1951276aeee1Syanghui 	nodemask_t nodemask = pol->nodes;
1952276aeee1Syanghui 	unsigned int target, nnodes;
1953fee83b3aSAndrew Morton 	int i;
1954fee83b3aSAndrew Morton 	int nid;
1955276aeee1Syanghui 	/*
1956276aeee1Syanghui 	 * The barrier will stabilize the nodemask in a register or on
1957276aeee1Syanghui 	 * the stack so that it will stop changing under the code.
1958276aeee1Syanghui 	 *
1959276aeee1Syanghui 	 * Between first_node() and next_node(), pol->nodes could be changed
1960276aeee1Syanghui 	 * by other threads. So we put pol->nodes in a local stack.
1961276aeee1Syanghui 	 */
1962276aeee1Syanghui 	barrier();
19631da177e4SLinus Torvalds 
1964276aeee1Syanghui 	nnodes = nodes_weight(nodemask);
1965f5b087b5SDavid Rientjes 	if (!nnodes)
1966f5b087b5SDavid Rientjes 		return numa_node_id();
1967fee83b3aSAndrew Morton 	target = (unsigned int)n % nnodes;
1968276aeee1Syanghui 	nid = first_node(nodemask);
1969fee83b3aSAndrew Morton 	for (i = 0; i < target; i++)
1970276aeee1Syanghui 		nid = next_node(nid, nodemask);
19711da177e4SLinus Torvalds 	return nid;
19721da177e4SLinus Torvalds }
19731da177e4SLinus Torvalds 
19745da7ca86SChristoph Lameter /* Determine a node number for interleave */
19755da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol,
19765da7ca86SChristoph Lameter 		 struct vm_area_struct *vma, unsigned long addr, int shift)
19775da7ca86SChristoph Lameter {
19785da7ca86SChristoph Lameter 	if (vma) {
19795da7ca86SChristoph Lameter 		unsigned long off;
19805da7ca86SChristoph Lameter 
19813b98b087SNishanth Aravamudan 		/*
19823b98b087SNishanth Aravamudan 		 * for small pages, there is no difference between
19833b98b087SNishanth Aravamudan 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
19843b98b087SNishanth Aravamudan 		 * for huge pages, since vm_pgoff is in units of small
19853b98b087SNishanth Aravamudan 		 * pages, we need to shift off the always 0 bits to get
19863b98b087SNishanth Aravamudan 		 * a useful offset.
19873b98b087SNishanth Aravamudan 		 */
19883b98b087SNishanth Aravamudan 		BUG_ON(shift < PAGE_SHIFT);
19893b98b087SNishanth Aravamudan 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
19905da7ca86SChristoph Lameter 		off += (addr - vma->vm_start) >> shift;
199198c70baaSLaurent Dufour 		return offset_il_node(pol, off);
19925da7ca86SChristoph Lameter 	} else
19935da7ca86SChristoph Lameter 		return interleave_nodes(pol);
19945da7ca86SChristoph Lameter }
19955da7ca86SChristoph Lameter 
199600ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS
1997480eccf9SLee Schermerhorn /*
199804ec6264SVlastimil Babka  * huge_node(@vma, @addr, @gfp_flags, @mpol)
1999b46e14acSFabian Frederick  * @vma: virtual memory area whose policy is sought
2000b46e14acSFabian Frederick  * @addr: address in @vma for shared policy lookup and interleave policy
2001b46e14acSFabian Frederick  * @gfp_flags: for requested zone
2002b46e14acSFabian Frederick  * @mpol: pointer to mempolicy pointer for reference counted mempolicy
2003b27abaccSDave Hansen  * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy
2004480eccf9SLee Schermerhorn  *
200504ec6264SVlastimil Babka  * Returns a nid suitable for a huge page allocation and a pointer
200652cd3b07SLee Schermerhorn  * to the struct mempolicy for conditional unref after allocation.
2007b27abaccSDave Hansen  * If the effective policy is 'bind' or 'prefer-many', returns a pointer
2008b27abaccSDave Hansen  * to the mempolicy's @nodemask for filtering the zonelist.
2009c0ff7453SMiao Xie  *
2010d26914d1SMel Gorman  * Must be protected by read_mems_allowed_begin()
2011480eccf9SLee Schermerhorn  */
201204ec6264SVlastimil Babka int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
201304ec6264SVlastimil Babka 				struct mempolicy **mpol, nodemask_t **nodemask)
20145da7ca86SChristoph Lameter {
201504ec6264SVlastimil Babka 	int nid;
2016b27abaccSDave Hansen 	int mode;
20175da7ca86SChristoph Lameter 
2018dd6eecb9SOleg Nesterov 	*mpol = get_vma_policy(vma, addr);
2019b27abaccSDave Hansen 	*nodemask = NULL;
2020b27abaccSDave Hansen 	mode = (*mpol)->mode;
20215da7ca86SChristoph Lameter 
2022b27abaccSDave Hansen 	if (unlikely(mode == MPOL_INTERLEAVE)) {
202304ec6264SVlastimil Babka 		nid = interleave_nid(*mpol, vma, addr,
202404ec6264SVlastimil Babka 					huge_page_shift(hstate_vma(vma)));
202552cd3b07SLee Schermerhorn 	} else {
202604ec6264SVlastimil Babka 		nid = policy_node(gfp_flags, *mpol, numa_node_id());
2027b27abaccSDave Hansen 		if (mode == MPOL_BIND || mode == MPOL_PREFERRED_MANY)
2028269fbe72SBen Widawsky 			*nodemask = &(*mpol)->nodes;
2029480eccf9SLee Schermerhorn 	}
203004ec6264SVlastimil Babka 	return nid;
20315da7ca86SChristoph Lameter }
203206808b08SLee Schermerhorn 
203306808b08SLee Schermerhorn /*
203406808b08SLee Schermerhorn  * init_nodemask_of_mempolicy
203506808b08SLee Schermerhorn  *
203606808b08SLee Schermerhorn  * If the current task's mempolicy is "default" [NULL], return 'false'
203706808b08SLee Schermerhorn  * to indicate default policy.  Otherwise, extract the policy nodemask
203806808b08SLee Schermerhorn  * for 'bind' or 'interleave' policy into the argument nodemask, or
203906808b08SLee Schermerhorn  * initialize the argument nodemask to contain the single node for
204006808b08SLee Schermerhorn  * 'preferred' or 'local' policy and return 'true' to indicate presence
204106808b08SLee Schermerhorn  * of non-default mempolicy.
204206808b08SLee Schermerhorn  *
204306808b08SLee Schermerhorn  * We don't bother with reference counting the mempolicy [mpol_get/put]
204406808b08SLee Schermerhorn  * because the current task is examining it's own mempolicy and a task's
204506808b08SLee Schermerhorn  * mempolicy is only ever changed by the task itself.
204606808b08SLee Schermerhorn  *
204706808b08SLee Schermerhorn  * N.B., it is the caller's responsibility to free a returned nodemask.
204806808b08SLee Schermerhorn  */
204906808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask)
205006808b08SLee Schermerhorn {
205106808b08SLee Schermerhorn 	struct mempolicy *mempolicy;
205206808b08SLee Schermerhorn 
205306808b08SLee Schermerhorn 	if (!(mask && current->mempolicy))
205406808b08SLee Schermerhorn 		return false;
205506808b08SLee Schermerhorn 
2056c0ff7453SMiao Xie 	task_lock(current);
205706808b08SLee Schermerhorn 	mempolicy = current->mempolicy;
205806808b08SLee Schermerhorn 	switch (mempolicy->mode) {
205906808b08SLee Schermerhorn 	case MPOL_PREFERRED:
2060b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
206106808b08SLee Schermerhorn 	case MPOL_BIND:
206206808b08SLee Schermerhorn 	case MPOL_INTERLEAVE:
2063269fbe72SBen Widawsky 		*mask = mempolicy->nodes;
206406808b08SLee Schermerhorn 		break;
206506808b08SLee Schermerhorn 
20667858d7bcSFeng Tang 	case MPOL_LOCAL:
2067269fbe72SBen Widawsky 		init_nodemask_of_node(mask, numa_node_id());
20687858d7bcSFeng Tang 		break;
20697858d7bcSFeng Tang 
207006808b08SLee Schermerhorn 	default:
207106808b08SLee Schermerhorn 		BUG();
207206808b08SLee Schermerhorn 	}
2073c0ff7453SMiao Xie 	task_unlock(current);
207406808b08SLee Schermerhorn 
207506808b08SLee Schermerhorn 	return true;
207606808b08SLee Schermerhorn }
207700ac59adSChen, Kenneth W #endif
20785da7ca86SChristoph Lameter 
20796f48d0ebSDavid Rientjes /*
2080b26e517aSFeng Tang  * mempolicy_in_oom_domain
20816f48d0ebSDavid Rientjes  *
2082b26e517aSFeng Tang  * If tsk's mempolicy is "bind", check for intersection between mask and
2083b26e517aSFeng Tang  * the policy nodemask. Otherwise, return true for all other policies
2084b26e517aSFeng Tang  * including "interleave", as a tsk with "interleave" policy may have
2085b26e517aSFeng Tang  * memory allocated from all nodes in system.
20866f48d0ebSDavid Rientjes  *
20876f48d0ebSDavid Rientjes  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
20886f48d0ebSDavid Rientjes  */
2089b26e517aSFeng Tang bool mempolicy_in_oom_domain(struct task_struct *tsk,
20906f48d0ebSDavid Rientjes 					const nodemask_t *mask)
20916f48d0ebSDavid Rientjes {
20926f48d0ebSDavid Rientjes 	struct mempolicy *mempolicy;
20936f48d0ebSDavid Rientjes 	bool ret = true;
20946f48d0ebSDavid Rientjes 
20956f48d0ebSDavid Rientjes 	if (!mask)
20966f48d0ebSDavid Rientjes 		return ret;
2097b26e517aSFeng Tang 
20986f48d0ebSDavid Rientjes 	task_lock(tsk);
20996f48d0ebSDavid Rientjes 	mempolicy = tsk->mempolicy;
2100b26e517aSFeng Tang 	if (mempolicy && mempolicy->mode == MPOL_BIND)
2101269fbe72SBen Widawsky 		ret = nodes_intersects(mempolicy->nodes, *mask);
21026f48d0ebSDavid Rientjes 	task_unlock(tsk);
2103b26e517aSFeng Tang 
21046f48d0ebSDavid Rientjes 	return ret;
21056f48d0ebSDavid Rientjes }
21066f48d0ebSDavid Rientjes 
21071da177e4SLinus Torvalds /* Allocate a page in interleaved policy.
21081da177e4SLinus Torvalds    Own path because it needs to do special accounting. */
2109662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2110662f3a0bSAndi Kleen 					unsigned nid)
21111da177e4SLinus Torvalds {
21121da177e4SLinus Torvalds 	struct page *page;
21131da177e4SLinus Torvalds 
211484172f4bSMatthew Wilcox (Oracle) 	page = __alloc_pages(gfp, order, nid, NULL);
21154518085eSKemi Wang 	/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
21164518085eSKemi Wang 	if (!static_branch_likely(&vm_numa_stat_key))
21174518085eSKemi Wang 		return page;
2118de55c8b2SAndrey Ryabinin 	if (page && page_to_nid(page) == nid) {
2119de55c8b2SAndrey Ryabinin 		preempt_disable();
2120f19298b9SMel Gorman 		__count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT);
2121de55c8b2SAndrey Ryabinin 		preempt_enable();
2122de55c8b2SAndrey Ryabinin 	}
21231da177e4SLinus Torvalds 	return page;
21241da177e4SLinus Torvalds }
21251da177e4SLinus Torvalds 
21264c54d949SFeng Tang static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
21274c54d949SFeng Tang 						int nid, struct mempolicy *pol)
21284c54d949SFeng Tang {
21294c54d949SFeng Tang 	struct page *page;
21304c54d949SFeng Tang 	gfp_t preferred_gfp;
21314c54d949SFeng Tang 
21324c54d949SFeng Tang 	/*
21334c54d949SFeng Tang 	 * This is a two pass approach. The first pass will only try the
21344c54d949SFeng Tang 	 * preferred nodes but skip the direct reclaim and allow the
21354c54d949SFeng Tang 	 * allocation to fail, while the second pass will try all the
21364c54d949SFeng Tang 	 * nodes in system.
21374c54d949SFeng Tang 	 */
21384c54d949SFeng Tang 	preferred_gfp = gfp | __GFP_NOWARN;
21394c54d949SFeng Tang 	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
21404c54d949SFeng Tang 	page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes);
21414c54d949SFeng Tang 	if (!page)
2142c0455116SAneesh Kumar K.V 		page = __alloc_pages(gfp, order, nid, NULL);
21434c54d949SFeng Tang 
21444c54d949SFeng Tang 	return page;
21454c54d949SFeng Tang }
21464c54d949SFeng Tang 
21471da177e4SLinus Torvalds /**
2148adf88aa8SMatthew Wilcox (Oracle)  * vma_alloc_folio - Allocate a folio for a VMA.
2149eb350739SMatthew Wilcox (Oracle)  * @gfp: GFP flags.
2150adf88aa8SMatthew Wilcox (Oracle)  * @order: Order of the folio.
21511da177e4SLinus Torvalds  * @vma: Pointer to VMA or NULL if not available.
2152eb350739SMatthew Wilcox (Oracle)  * @addr: Virtual address of the allocation.  Must be inside @vma.
2153eb350739SMatthew Wilcox (Oracle)  * @hugepage: For hugepages try only the preferred node if possible.
21541da177e4SLinus Torvalds  *
2155adf88aa8SMatthew Wilcox (Oracle)  * Allocate a folio for a specific address in @vma, using the appropriate
2156eb350739SMatthew Wilcox (Oracle)  * NUMA policy.  When @vma is not NULL the caller must hold the mmap_lock
2157eb350739SMatthew Wilcox (Oracle)  * of the mm_struct of the VMA to prevent it from going away.  Should be
2158adf88aa8SMatthew Wilcox (Oracle)  * used for all allocations for folios that will be mapped into user space.
2159eb350739SMatthew Wilcox (Oracle)  *
2160adf88aa8SMatthew Wilcox (Oracle)  * Return: The folio on success or NULL if allocation fails.
21611da177e4SLinus Torvalds  */
2162adf88aa8SMatthew Wilcox (Oracle) struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
2163be1a13ebSMichal Hocko 		unsigned long addr, bool hugepage)
21641da177e4SLinus Torvalds {
2165cc9a6c87SMel Gorman 	struct mempolicy *pol;
2166be1a13ebSMichal Hocko 	int node = numa_node_id();
2167adf88aa8SMatthew Wilcox (Oracle) 	struct folio *folio;
216804ec6264SVlastimil Babka 	int preferred_nid;
2169be97a41bSVlastimil Babka 	nodemask_t *nmask;
21701da177e4SLinus Torvalds 
2171dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2172cc9a6c87SMel Gorman 
2173be97a41bSVlastimil Babka 	if (pol->mode == MPOL_INTERLEAVE) {
2174adf88aa8SMatthew Wilcox (Oracle) 		struct page *page;
21751da177e4SLinus Torvalds 		unsigned nid;
21765da7ca86SChristoph Lameter 
21778eac563cSAndi Kleen 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
217852cd3b07SLee Schermerhorn 		mpol_cond_put(pol);
2179adf88aa8SMatthew Wilcox (Oracle) 		gfp |= __GFP_COMP;
21800bbbc0b3SAndrea Arcangeli 		page = alloc_page_interleave(gfp, order, nid);
2181adf88aa8SMatthew Wilcox (Oracle) 		if (page && order > 1)
2182adf88aa8SMatthew Wilcox (Oracle) 			prep_transhuge_page(page);
2183adf88aa8SMatthew Wilcox (Oracle) 		folio = (struct folio *)page;
2184be97a41bSVlastimil Babka 		goto out;
21851da177e4SLinus Torvalds 	}
21861da177e4SLinus Torvalds 
21874c54d949SFeng Tang 	if (pol->mode == MPOL_PREFERRED_MANY) {
2188adf88aa8SMatthew Wilcox (Oracle) 		struct page *page;
2189adf88aa8SMatthew Wilcox (Oracle) 
2190c0455116SAneesh Kumar K.V 		node = policy_node(gfp, pol, node);
2191adf88aa8SMatthew Wilcox (Oracle) 		gfp |= __GFP_COMP;
21924c54d949SFeng Tang 		page = alloc_pages_preferred_many(gfp, order, node, pol);
21934c54d949SFeng Tang 		mpol_cond_put(pol);
2194adf88aa8SMatthew Wilcox (Oracle) 		if (page && order > 1)
2195adf88aa8SMatthew Wilcox (Oracle) 			prep_transhuge_page(page);
2196adf88aa8SMatthew Wilcox (Oracle) 		folio = (struct folio *)page;
21974c54d949SFeng Tang 		goto out;
21984c54d949SFeng Tang 	}
21994c54d949SFeng Tang 
220019deb769SDavid Rientjes 	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
220119deb769SDavid Rientjes 		int hpage_node = node;
220219deb769SDavid Rientjes 
220319deb769SDavid Rientjes 		/*
220419deb769SDavid Rientjes 		 * For hugepage allocation and non-interleave policy which
220519deb769SDavid Rientjes 		 * allows the current node (or other explicitly preferred
220619deb769SDavid Rientjes 		 * node) we only try to allocate from the current/preferred
220719deb769SDavid Rientjes 		 * node and don't fall back to other nodes, as the cost of
220819deb769SDavid Rientjes 		 * remote accesses would likely offset THP benefits.
220919deb769SDavid Rientjes 		 *
2210b27abaccSDave Hansen 		 * If the policy is interleave or does not allow the current
221119deb769SDavid Rientjes 		 * node in its nodemask, we allocate the standard way.
221219deb769SDavid Rientjes 		 */
22137858d7bcSFeng Tang 		if (pol->mode == MPOL_PREFERRED)
2214269fbe72SBen Widawsky 			hpage_node = first_node(pol->nodes);
221519deb769SDavid Rientjes 
221619deb769SDavid Rientjes 		nmask = policy_nodemask(gfp, pol);
221719deb769SDavid Rientjes 		if (!nmask || node_isset(hpage_node, *nmask)) {
221819deb769SDavid Rientjes 			mpol_cond_put(pol);
2219cc638f32SVlastimil Babka 			/*
2220cc638f32SVlastimil Babka 			 * First, try to allocate THP only on local node, but
2221cc638f32SVlastimil Babka 			 * don't reclaim unnecessarily, just compact.
2222cc638f32SVlastimil Babka 			 */
2223adf88aa8SMatthew Wilcox (Oracle) 			folio = __folio_alloc_node(gfp | __GFP_THISNODE |
2224adf88aa8SMatthew Wilcox (Oracle) 					__GFP_NORETRY, order, hpage_node);
222576e654ccSDavid Rientjes 
222676e654ccSDavid Rientjes 			/*
222776e654ccSDavid Rientjes 			 * If hugepage allocations are configured to always
222876e654ccSDavid Rientjes 			 * synchronous compact or the vma has been madvised
222976e654ccSDavid Rientjes 			 * to prefer hugepage backing, retry allowing remote
2230cc638f32SVlastimil Babka 			 * memory with both reclaim and compact as well.
223176e654ccSDavid Rientjes 			 */
2232adf88aa8SMatthew Wilcox (Oracle) 			if (!folio && (gfp & __GFP_DIRECT_RECLAIM))
2233adf88aa8SMatthew Wilcox (Oracle) 				folio = __folio_alloc(gfp, order, hpage_node,
2234adf88aa8SMatthew Wilcox (Oracle) 						      nmask);
223576e654ccSDavid Rientjes 
223619deb769SDavid Rientjes 			goto out;
223719deb769SDavid Rientjes 		}
223819deb769SDavid Rientjes 	}
223919deb769SDavid Rientjes 
2240077fcf11SAneesh Kumar K.V 	nmask = policy_nodemask(gfp, pol);
224104ec6264SVlastimil Babka 	preferred_nid = policy_node(gfp, pol, node);
2242adf88aa8SMatthew Wilcox (Oracle) 	folio = __folio_alloc(gfp, order, preferred_nid, nmask);
2243d51e9894SVlastimil Babka 	mpol_cond_put(pol);
2244be97a41bSVlastimil Babka out:
2245f584b680SMatthew Wilcox (Oracle) 	return folio;
2246f584b680SMatthew Wilcox (Oracle) }
2247adf88aa8SMatthew Wilcox (Oracle) EXPORT_SYMBOL(vma_alloc_folio);
2248f584b680SMatthew Wilcox (Oracle) 
22491da177e4SLinus Torvalds /**
2250d7f946d0SMatthew Wilcox (Oracle)  * alloc_pages - Allocate pages.
22516421ec76SMatthew Wilcox (Oracle)  * @gfp: GFP flags.
22526421ec76SMatthew Wilcox (Oracle)  * @order: Power of two of number of pages to allocate.
22531da177e4SLinus Torvalds  *
22546421ec76SMatthew Wilcox (Oracle)  * Allocate 1 << @order contiguous pages.  The physical address of the
22556421ec76SMatthew Wilcox (Oracle)  * first page is naturally aligned (eg an order-3 allocation will be aligned
22566421ec76SMatthew Wilcox (Oracle)  * to a multiple of 8 * PAGE_SIZE bytes).  The NUMA policy of the current
22576421ec76SMatthew Wilcox (Oracle)  * process is honoured when in process context.
22581da177e4SLinus Torvalds  *
22596421ec76SMatthew Wilcox (Oracle)  * Context: Can be called from any context, providing the appropriate GFP
22606421ec76SMatthew Wilcox (Oracle)  * flags are used.
22616421ec76SMatthew Wilcox (Oracle)  * Return: The page on success or NULL if allocation fails.
22621da177e4SLinus Torvalds  */
2263d7f946d0SMatthew Wilcox (Oracle) struct page *alloc_pages(gfp_t gfp, unsigned order)
22641da177e4SLinus Torvalds {
22658d90274bSOleg Nesterov 	struct mempolicy *pol = &default_policy;
2266c0ff7453SMiao Xie 	struct page *page;
22671da177e4SLinus Torvalds 
22688d90274bSOleg Nesterov 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
22698d90274bSOleg Nesterov 		pol = get_task_policy(current);
227052cd3b07SLee Schermerhorn 
227152cd3b07SLee Schermerhorn 	/*
227252cd3b07SLee Schermerhorn 	 * No reference counting needed for current->mempolicy
227352cd3b07SLee Schermerhorn 	 * nor system default_policy
227452cd3b07SLee Schermerhorn 	 */
227545c4745aSLee Schermerhorn 	if (pol->mode == MPOL_INTERLEAVE)
2276c0ff7453SMiao Xie 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
22774c54d949SFeng Tang 	else if (pol->mode == MPOL_PREFERRED_MANY)
22784c54d949SFeng Tang 		page = alloc_pages_preferred_many(gfp, order,
2279c0455116SAneesh Kumar K.V 				  policy_node(gfp, pol, numa_node_id()), pol);
2280c0ff7453SMiao Xie 	else
228184172f4bSMatthew Wilcox (Oracle) 		page = __alloc_pages(gfp, order,
228204ec6264SVlastimil Babka 				policy_node(gfp, pol, numa_node_id()),
22835c4b4be3SAndi Kleen 				policy_nodemask(gfp, pol));
2284cc9a6c87SMel Gorman 
2285c0ff7453SMiao Xie 	return page;
22861da177e4SLinus Torvalds }
2287d7f946d0SMatthew Wilcox (Oracle) EXPORT_SYMBOL(alloc_pages);
22881da177e4SLinus Torvalds 
2289cc09cb13SMatthew Wilcox (Oracle) struct folio *folio_alloc(gfp_t gfp, unsigned order)
2290cc09cb13SMatthew Wilcox (Oracle) {
2291cc09cb13SMatthew Wilcox (Oracle) 	struct page *page = alloc_pages(gfp | __GFP_COMP, order);
2292cc09cb13SMatthew Wilcox (Oracle) 
2293cc09cb13SMatthew Wilcox (Oracle) 	if (page && order > 1)
2294cc09cb13SMatthew Wilcox (Oracle) 		prep_transhuge_page(page);
2295cc09cb13SMatthew Wilcox (Oracle) 	return (struct folio *)page;
2296cc09cb13SMatthew Wilcox (Oracle) }
2297cc09cb13SMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_alloc);
2298cc09cb13SMatthew Wilcox (Oracle) 
2299c00b6b96SChen Wandun static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp,
2300c00b6b96SChen Wandun 		struct mempolicy *pol, unsigned long nr_pages,
2301c00b6b96SChen Wandun 		struct page **page_array)
2302c00b6b96SChen Wandun {
2303c00b6b96SChen Wandun 	int nodes;
2304c00b6b96SChen Wandun 	unsigned long nr_pages_per_node;
2305c00b6b96SChen Wandun 	int delta;
2306c00b6b96SChen Wandun 	int i;
2307c00b6b96SChen Wandun 	unsigned long nr_allocated;
2308c00b6b96SChen Wandun 	unsigned long total_allocated = 0;
2309c00b6b96SChen Wandun 
2310c00b6b96SChen Wandun 	nodes = nodes_weight(pol->nodes);
2311c00b6b96SChen Wandun 	nr_pages_per_node = nr_pages / nodes;
2312c00b6b96SChen Wandun 	delta = nr_pages - nodes * nr_pages_per_node;
2313c00b6b96SChen Wandun 
2314c00b6b96SChen Wandun 	for (i = 0; i < nodes; i++) {
2315c00b6b96SChen Wandun 		if (delta) {
2316c00b6b96SChen Wandun 			nr_allocated = __alloc_pages_bulk(gfp,
2317c00b6b96SChen Wandun 					interleave_nodes(pol), NULL,
2318c00b6b96SChen Wandun 					nr_pages_per_node + 1, NULL,
2319c00b6b96SChen Wandun 					page_array);
2320c00b6b96SChen Wandun 			delta--;
2321c00b6b96SChen Wandun 		} else {
2322c00b6b96SChen Wandun 			nr_allocated = __alloc_pages_bulk(gfp,
2323c00b6b96SChen Wandun 					interleave_nodes(pol), NULL,
2324c00b6b96SChen Wandun 					nr_pages_per_node, NULL, page_array);
2325c00b6b96SChen Wandun 		}
2326c00b6b96SChen Wandun 
2327c00b6b96SChen Wandun 		page_array += nr_allocated;
2328c00b6b96SChen Wandun 		total_allocated += nr_allocated;
2329c00b6b96SChen Wandun 	}
2330c00b6b96SChen Wandun 
2331c00b6b96SChen Wandun 	return total_allocated;
2332c00b6b96SChen Wandun }
2333c00b6b96SChen Wandun 
2334c00b6b96SChen Wandun static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid,
2335c00b6b96SChen Wandun 		struct mempolicy *pol, unsigned long nr_pages,
2336c00b6b96SChen Wandun 		struct page **page_array)
2337c00b6b96SChen Wandun {
2338c00b6b96SChen Wandun 	gfp_t preferred_gfp;
2339c00b6b96SChen Wandun 	unsigned long nr_allocated = 0;
2340c00b6b96SChen Wandun 
2341c00b6b96SChen Wandun 	preferred_gfp = gfp | __GFP_NOWARN;
2342c00b6b96SChen Wandun 	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2343c00b6b96SChen Wandun 
2344c00b6b96SChen Wandun 	nr_allocated  = __alloc_pages_bulk(preferred_gfp, nid, &pol->nodes,
2345c00b6b96SChen Wandun 					   nr_pages, NULL, page_array);
2346c00b6b96SChen Wandun 
2347c00b6b96SChen Wandun 	if (nr_allocated < nr_pages)
2348c00b6b96SChen Wandun 		nr_allocated += __alloc_pages_bulk(gfp, numa_node_id(), NULL,
2349c00b6b96SChen Wandun 				nr_pages - nr_allocated, NULL,
2350c00b6b96SChen Wandun 				page_array + nr_allocated);
2351c00b6b96SChen Wandun 	return nr_allocated;
2352c00b6b96SChen Wandun }
2353c00b6b96SChen Wandun 
2354c00b6b96SChen Wandun /* alloc pages bulk and mempolicy should be considered at the
2355c00b6b96SChen Wandun  * same time in some situation such as vmalloc.
2356c00b6b96SChen Wandun  *
2357c00b6b96SChen Wandun  * It can accelerate memory allocation especially interleaving
2358c00b6b96SChen Wandun  * allocate memory.
2359c00b6b96SChen Wandun  */
2360c00b6b96SChen Wandun unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
2361c00b6b96SChen Wandun 		unsigned long nr_pages, struct page **page_array)
2362c00b6b96SChen Wandun {
2363c00b6b96SChen Wandun 	struct mempolicy *pol = &default_policy;
2364c00b6b96SChen Wandun 
2365c00b6b96SChen Wandun 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2366c00b6b96SChen Wandun 		pol = get_task_policy(current);
2367c00b6b96SChen Wandun 
2368c00b6b96SChen Wandun 	if (pol->mode == MPOL_INTERLEAVE)
2369c00b6b96SChen Wandun 		return alloc_pages_bulk_array_interleave(gfp, pol,
2370c00b6b96SChen Wandun 							 nr_pages, page_array);
2371c00b6b96SChen Wandun 
2372c00b6b96SChen Wandun 	if (pol->mode == MPOL_PREFERRED_MANY)
2373c00b6b96SChen Wandun 		return alloc_pages_bulk_array_preferred_many(gfp,
2374c00b6b96SChen Wandun 				numa_node_id(), pol, nr_pages, page_array);
2375c00b6b96SChen Wandun 
2376c00b6b96SChen Wandun 	return __alloc_pages_bulk(gfp, policy_node(gfp, pol, numa_node_id()),
2377c00b6b96SChen Wandun 				  policy_nodemask(gfp, pol), nr_pages, NULL,
2378c00b6b96SChen Wandun 				  page_array);
2379c00b6b96SChen Wandun }
2380c00b6b96SChen Wandun 
2381ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2382ef0855d3SOleg Nesterov {
2383ef0855d3SOleg Nesterov 	struct mempolicy *pol = mpol_dup(vma_policy(src));
2384ef0855d3SOleg Nesterov 
2385ef0855d3SOleg Nesterov 	if (IS_ERR(pol))
2386ef0855d3SOleg Nesterov 		return PTR_ERR(pol);
2387ef0855d3SOleg Nesterov 	dst->vm_policy = pol;
2388ef0855d3SOleg Nesterov 	return 0;
2389ef0855d3SOleg Nesterov }
2390ef0855d3SOleg Nesterov 
23914225399aSPaul Jackson /*
2392846a16bfSLee Schermerhorn  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
23934225399aSPaul Jackson  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
23944225399aSPaul Jackson  * with the mems_allowed returned by cpuset_mems_allowed().  This
23954225399aSPaul Jackson  * keeps mempolicies cpuset relative after its cpuset moves.  See
23964225399aSPaul Jackson  * further kernel/cpuset.c update_nodemask().
2397708c1bbcSMiao Xie  *
2398708c1bbcSMiao Xie  * current's mempolicy may be rebinded by the other task(the task that changes
2399708c1bbcSMiao Xie  * cpuset's mems), so we needn't do rebind work for current task.
24004225399aSPaul Jackson  */
24014225399aSPaul Jackson 
2402846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */
2403846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old)
24041da177e4SLinus Torvalds {
24051da177e4SLinus Torvalds 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
24061da177e4SLinus Torvalds 
24071da177e4SLinus Torvalds 	if (!new)
24081da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2409708c1bbcSMiao Xie 
2410708c1bbcSMiao Xie 	/* task's mempolicy is protected by alloc_lock */
2411708c1bbcSMiao Xie 	if (old == current->mempolicy) {
2412708c1bbcSMiao Xie 		task_lock(current);
2413708c1bbcSMiao Xie 		*new = *old;
2414708c1bbcSMiao Xie 		task_unlock(current);
2415708c1bbcSMiao Xie 	} else
2416708c1bbcSMiao Xie 		*new = *old;
2417708c1bbcSMiao Xie 
24184225399aSPaul Jackson 	if (current_cpuset_is_being_rebound()) {
24194225399aSPaul Jackson 		nodemask_t mems = cpuset_mems_allowed(current);
2420213980c0SVlastimil Babka 		mpol_rebind_policy(new, &mems);
24214225399aSPaul Jackson 	}
24221da177e4SLinus Torvalds 	atomic_set(&new->refcnt, 1);
24231da177e4SLinus Torvalds 	return new;
24241da177e4SLinus Torvalds }
24251da177e4SLinus Torvalds 
24261da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */
2427fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
24281da177e4SLinus Torvalds {
24291da177e4SLinus Torvalds 	if (!a || !b)
2430fcfb4dccSKOSAKI Motohiro 		return false;
243145c4745aSLee Schermerhorn 	if (a->mode != b->mode)
2432fcfb4dccSKOSAKI Motohiro 		return false;
243319800502SBob Liu 	if (a->flags != b->flags)
2434fcfb4dccSKOSAKI Motohiro 		return false;
2435c6018b4bSAneesh Kumar K.V 	if (a->home_node != b->home_node)
2436c6018b4bSAneesh Kumar K.V 		return false;
243719800502SBob Liu 	if (mpol_store_user_nodemask(a))
243819800502SBob Liu 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2439fcfb4dccSKOSAKI Motohiro 			return false;
244019800502SBob Liu 
244145c4745aSLee Schermerhorn 	switch (a->mode) {
244219770b32SMel Gorman 	case MPOL_BIND:
24431da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
24441da177e4SLinus Torvalds 	case MPOL_PREFERRED:
2445b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
2446269fbe72SBen Widawsky 		return !!nodes_equal(a->nodes, b->nodes);
24477858d7bcSFeng Tang 	case MPOL_LOCAL:
24487858d7bcSFeng Tang 		return true;
24491da177e4SLinus Torvalds 	default:
24501da177e4SLinus Torvalds 		BUG();
2451fcfb4dccSKOSAKI Motohiro 		return false;
24521da177e4SLinus Torvalds 	}
24531da177e4SLinus Torvalds }
24541da177e4SLinus Torvalds 
24551da177e4SLinus Torvalds /*
24561da177e4SLinus Torvalds  * Shared memory backing store policy support.
24571da177e4SLinus Torvalds  *
24581da177e4SLinus Torvalds  * Remember policies even when nobody has shared memory mapped.
24591da177e4SLinus Torvalds  * The policies are kept in Red-Black tree linked from the inode.
24604a8c7bb5SNathan Zimmer  * They are protected by the sp->lock rwlock, which should be held
24611da177e4SLinus Torvalds  * for any accesses to the tree.
24621da177e4SLinus Torvalds  */
24631da177e4SLinus Torvalds 
24644a8c7bb5SNathan Zimmer /*
24654a8c7bb5SNathan Zimmer  * lookup first element intersecting start-end.  Caller holds sp->lock for
24664a8c7bb5SNathan Zimmer  * reading or for writing
24674a8c7bb5SNathan Zimmer  */
24681da177e4SLinus Torvalds static struct sp_node *
24691da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
24701da177e4SLinus Torvalds {
24711da177e4SLinus Torvalds 	struct rb_node *n = sp->root.rb_node;
24721da177e4SLinus Torvalds 
24731da177e4SLinus Torvalds 	while (n) {
24741da177e4SLinus Torvalds 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
24751da177e4SLinus Torvalds 
24761da177e4SLinus Torvalds 		if (start >= p->end)
24771da177e4SLinus Torvalds 			n = n->rb_right;
24781da177e4SLinus Torvalds 		else if (end <= p->start)
24791da177e4SLinus Torvalds 			n = n->rb_left;
24801da177e4SLinus Torvalds 		else
24811da177e4SLinus Torvalds 			break;
24821da177e4SLinus Torvalds 	}
24831da177e4SLinus Torvalds 	if (!n)
24841da177e4SLinus Torvalds 		return NULL;
24851da177e4SLinus Torvalds 	for (;;) {
24861da177e4SLinus Torvalds 		struct sp_node *w = NULL;
24871da177e4SLinus Torvalds 		struct rb_node *prev = rb_prev(n);
24881da177e4SLinus Torvalds 		if (!prev)
24891da177e4SLinus Torvalds 			break;
24901da177e4SLinus Torvalds 		w = rb_entry(prev, struct sp_node, nd);
24911da177e4SLinus Torvalds 		if (w->end <= start)
24921da177e4SLinus Torvalds 			break;
24931da177e4SLinus Torvalds 		n = prev;
24941da177e4SLinus Torvalds 	}
24951da177e4SLinus Torvalds 	return rb_entry(n, struct sp_node, nd);
24961da177e4SLinus Torvalds }
24971da177e4SLinus Torvalds 
24984a8c7bb5SNathan Zimmer /*
24994a8c7bb5SNathan Zimmer  * Insert a new shared policy into the list.  Caller holds sp->lock for
25004a8c7bb5SNathan Zimmer  * writing.
25014a8c7bb5SNathan Zimmer  */
25021da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new)
25031da177e4SLinus Torvalds {
25041da177e4SLinus Torvalds 	struct rb_node **p = &sp->root.rb_node;
25051da177e4SLinus Torvalds 	struct rb_node *parent = NULL;
25061da177e4SLinus Torvalds 	struct sp_node *nd;
25071da177e4SLinus Torvalds 
25081da177e4SLinus Torvalds 	while (*p) {
25091da177e4SLinus Torvalds 		parent = *p;
25101da177e4SLinus Torvalds 		nd = rb_entry(parent, struct sp_node, nd);
25111da177e4SLinus Torvalds 		if (new->start < nd->start)
25121da177e4SLinus Torvalds 			p = &(*p)->rb_left;
25131da177e4SLinus Torvalds 		else if (new->end > nd->end)
25141da177e4SLinus Torvalds 			p = &(*p)->rb_right;
25151da177e4SLinus Torvalds 		else
25161da177e4SLinus Torvalds 			BUG();
25171da177e4SLinus Torvalds 	}
25181da177e4SLinus Torvalds 	rb_link_node(&new->nd, parent, p);
25191da177e4SLinus Torvalds 	rb_insert_color(&new->nd, &sp->root);
2520140d5a49SPaul Mundt 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
252145c4745aSLee Schermerhorn 		 new->policy ? new->policy->mode : 0);
25221da177e4SLinus Torvalds }
25231da177e4SLinus Torvalds 
25241da177e4SLinus Torvalds /* Find shared policy intersecting idx */
25251da177e4SLinus Torvalds struct mempolicy *
25261da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
25271da177e4SLinus Torvalds {
25281da177e4SLinus Torvalds 	struct mempolicy *pol = NULL;
25291da177e4SLinus Torvalds 	struct sp_node *sn;
25301da177e4SLinus Torvalds 
25311da177e4SLinus Torvalds 	if (!sp->root.rb_node)
25321da177e4SLinus Torvalds 		return NULL;
25334a8c7bb5SNathan Zimmer 	read_lock(&sp->lock);
25341da177e4SLinus Torvalds 	sn = sp_lookup(sp, idx, idx+1);
25351da177e4SLinus Torvalds 	if (sn) {
25361da177e4SLinus Torvalds 		mpol_get(sn->policy);
25371da177e4SLinus Torvalds 		pol = sn->policy;
25381da177e4SLinus Torvalds 	}
25394a8c7bb5SNathan Zimmer 	read_unlock(&sp->lock);
25401da177e4SLinus Torvalds 	return pol;
25411da177e4SLinus Torvalds }
25421da177e4SLinus Torvalds 
254363f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n)
254463f74ca2SKOSAKI Motohiro {
254563f74ca2SKOSAKI Motohiro 	mpol_put(n->policy);
254663f74ca2SKOSAKI Motohiro 	kmem_cache_free(sn_cache, n);
254763f74ca2SKOSAKI Motohiro }
254863f74ca2SKOSAKI Motohiro 
2549771fb4d8SLee Schermerhorn /**
2550771fb4d8SLee Schermerhorn  * mpol_misplaced - check whether current page node is valid in policy
2551771fb4d8SLee Schermerhorn  *
2552b46e14acSFabian Frederick  * @page: page to be checked
2553b46e14acSFabian Frederick  * @vma: vm area where page mapped
2554b46e14acSFabian Frederick  * @addr: virtual address where page mapped
2555771fb4d8SLee Schermerhorn  *
2556771fb4d8SLee Schermerhorn  * Lookup current policy node id for vma,addr and "compare to" page's
25575f076944SMatthew Wilcox (Oracle)  * node id.  Policy determination "mimics" alloc_page_vma().
2558771fb4d8SLee Schermerhorn  * Called from fault path where we know the vma and faulting address.
25595f076944SMatthew Wilcox (Oracle)  *
2560062db293SBaolin Wang  * Return: NUMA_NO_NODE if the page is in a node that is valid for this
2561062db293SBaolin Wang  * policy, or a suitable node ID to allocate a replacement page from.
2562771fb4d8SLee Schermerhorn  */
2563771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2564771fb4d8SLee Schermerhorn {
2565771fb4d8SLee Schermerhorn 	struct mempolicy *pol;
2566c33d6c06SMel Gorman 	struct zoneref *z;
2567771fb4d8SLee Schermerhorn 	int curnid = page_to_nid(page);
2568771fb4d8SLee Schermerhorn 	unsigned long pgoff;
256990572890SPeter Zijlstra 	int thiscpu = raw_smp_processor_id();
257090572890SPeter Zijlstra 	int thisnid = cpu_to_node(thiscpu);
257198fa15f3SAnshuman Khandual 	int polnid = NUMA_NO_NODE;
2572062db293SBaolin Wang 	int ret = NUMA_NO_NODE;
2573771fb4d8SLee Schermerhorn 
2574dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2575771fb4d8SLee Schermerhorn 	if (!(pol->flags & MPOL_F_MOF))
2576771fb4d8SLee Schermerhorn 		goto out;
2577771fb4d8SLee Schermerhorn 
2578771fb4d8SLee Schermerhorn 	switch (pol->mode) {
2579771fb4d8SLee Schermerhorn 	case MPOL_INTERLEAVE:
2580771fb4d8SLee Schermerhorn 		pgoff = vma->vm_pgoff;
2581771fb4d8SLee Schermerhorn 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
258298c70baaSLaurent Dufour 		polnid = offset_il_node(pol, pgoff);
2583771fb4d8SLee Schermerhorn 		break;
2584771fb4d8SLee Schermerhorn 
2585771fb4d8SLee Schermerhorn 	case MPOL_PREFERRED:
2586b27abaccSDave Hansen 		if (node_isset(curnid, pol->nodes))
2587b27abaccSDave Hansen 			goto out;
2588269fbe72SBen Widawsky 		polnid = first_node(pol->nodes);
2589771fb4d8SLee Schermerhorn 		break;
2590771fb4d8SLee Schermerhorn 
25917858d7bcSFeng Tang 	case MPOL_LOCAL:
25927858d7bcSFeng Tang 		polnid = numa_node_id();
25937858d7bcSFeng Tang 		break;
25947858d7bcSFeng Tang 
2595771fb4d8SLee Schermerhorn 	case MPOL_BIND:
2596bda420b9SHuang Ying 		/* Optimize placement among multiple nodes via NUMA balancing */
2597bda420b9SHuang Ying 		if (pol->flags & MPOL_F_MORON) {
2598269fbe72SBen Widawsky 			if (node_isset(thisnid, pol->nodes))
2599bda420b9SHuang Ying 				break;
2600bda420b9SHuang Ying 			goto out;
2601bda420b9SHuang Ying 		}
2602b27abaccSDave Hansen 		fallthrough;
2603c33d6c06SMel Gorman 
2604b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
2605771fb4d8SLee Schermerhorn 		/*
2606771fb4d8SLee Schermerhorn 		 * use current page if in policy nodemask,
2607771fb4d8SLee Schermerhorn 		 * else select nearest allowed node, if any.
2608771fb4d8SLee Schermerhorn 		 * If no allowed nodes, use current [!misplaced].
2609771fb4d8SLee Schermerhorn 		 */
2610269fbe72SBen Widawsky 		if (node_isset(curnid, pol->nodes))
2611771fb4d8SLee Schermerhorn 			goto out;
2612c33d6c06SMel Gorman 		z = first_zones_zonelist(
2613771fb4d8SLee Schermerhorn 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2614771fb4d8SLee Schermerhorn 				gfp_zone(GFP_HIGHUSER),
2615269fbe72SBen Widawsky 				&pol->nodes);
2616c1093b74SPavel Tatashin 		polnid = zone_to_nid(z->zone);
2617771fb4d8SLee Schermerhorn 		break;
2618771fb4d8SLee Schermerhorn 
2619771fb4d8SLee Schermerhorn 	default:
2620771fb4d8SLee Schermerhorn 		BUG();
2621771fb4d8SLee Schermerhorn 	}
26225606e387SMel Gorman 
26235606e387SMel Gorman 	/* Migrate the page towards the node whose CPU is referencing it */
2624e42c8ff2SMel Gorman 	if (pol->flags & MPOL_F_MORON) {
262590572890SPeter Zijlstra 		polnid = thisnid;
26265606e387SMel Gorman 
262710f39042SRik van Riel 		if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2628de1c9ce6SRik van Riel 			goto out;
2629de1c9ce6SRik van Riel 	}
2630e42c8ff2SMel Gorman 
2631771fb4d8SLee Schermerhorn 	if (curnid != polnid)
2632771fb4d8SLee Schermerhorn 		ret = polnid;
2633771fb4d8SLee Schermerhorn out:
2634771fb4d8SLee Schermerhorn 	mpol_cond_put(pol);
2635771fb4d8SLee Schermerhorn 
2636771fb4d8SLee Schermerhorn 	return ret;
2637771fb4d8SLee Schermerhorn }
2638771fb4d8SLee Schermerhorn 
2639c11600e4SDavid Rientjes /*
2640c11600e4SDavid Rientjes  * Drop the (possibly final) reference to task->mempolicy.  It needs to be
2641c11600e4SDavid Rientjes  * dropped after task->mempolicy is set to NULL so that any allocation done as
2642c11600e4SDavid Rientjes  * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2643c11600e4SDavid Rientjes  * policy.
2644c11600e4SDavid Rientjes  */
2645c11600e4SDavid Rientjes void mpol_put_task_policy(struct task_struct *task)
2646c11600e4SDavid Rientjes {
2647c11600e4SDavid Rientjes 	struct mempolicy *pol;
2648c11600e4SDavid Rientjes 
2649c11600e4SDavid Rientjes 	task_lock(task);
2650c11600e4SDavid Rientjes 	pol = task->mempolicy;
2651c11600e4SDavid Rientjes 	task->mempolicy = NULL;
2652c11600e4SDavid Rientjes 	task_unlock(task);
2653c11600e4SDavid Rientjes 	mpol_put(pol);
2654c11600e4SDavid Rientjes }
2655c11600e4SDavid Rientjes 
26561da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n)
26571da177e4SLinus Torvalds {
2658140d5a49SPaul Mundt 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
26591da177e4SLinus Torvalds 	rb_erase(&n->nd, &sp->root);
266063f74ca2SKOSAKI Motohiro 	sp_free(n);
26611da177e4SLinus Torvalds }
26621da177e4SLinus Torvalds 
266342288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start,
266442288fe3SMel Gorman 			unsigned long end, struct mempolicy *pol)
266542288fe3SMel Gorman {
266642288fe3SMel Gorman 	node->start = start;
266742288fe3SMel Gorman 	node->end = end;
266842288fe3SMel Gorman 	node->policy = pol;
266942288fe3SMel Gorman }
267042288fe3SMel Gorman 
2671dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2672dbcb0f19SAdrian Bunk 				struct mempolicy *pol)
26731da177e4SLinus Torvalds {
2674869833f2SKOSAKI Motohiro 	struct sp_node *n;
2675869833f2SKOSAKI Motohiro 	struct mempolicy *newpol;
26761da177e4SLinus Torvalds 
2677869833f2SKOSAKI Motohiro 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
26781da177e4SLinus Torvalds 	if (!n)
26791da177e4SLinus Torvalds 		return NULL;
2680869833f2SKOSAKI Motohiro 
2681869833f2SKOSAKI Motohiro 	newpol = mpol_dup(pol);
2682869833f2SKOSAKI Motohiro 	if (IS_ERR(newpol)) {
2683869833f2SKOSAKI Motohiro 		kmem_cache_free(sn_cache, n);
2684869833f2SKOSAKI Motohiro 		return NULL;
2685869833f2SKOSAKI Motohiro 	}
2686869833f2SKOSAKI Motohiro 	newpol->flags |= MPOL_F_SHARED;
268742288fe3SMel Gorman 	sp_node_init(n, start, end, newpol);
2688869833f2SKOSAKI Motohiro 
26891da177e4SLinus Torvalds 	return n;
26901da177e4SLinus Torvalds }
26911da177e4SLinus Torvalds 
26921da177e4SLinus Torvalds /* Replace a policy range. */
26931da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
26941da177e4SLinus Torvalds 				 unsigned long end, struct sp_node *new)
26951da177e4SLinus Torvalds {
2696b22d127aSMel Gorman 	struct sp_node *n;
269742288fe3SMel Gorman 	struct sp_node *n_new = NULL;
269842288fe3SMel Gorman 	struct mempolicy *mpol_new = NULL;
2699b22d127aSMel Gorman 	int ret = 0;
27001da177e4SLinus Torvalds 
270142288fe3SMel Gorman restart:
27024a8c7bb5SNathan Zimmer 	write_lock(&sp->lock);
27031da177e4SLinus Torvalds 	n = sp_lookup(sp, start, end);
27041da177e4SLinus Torvalds 	/* Take care of old policies in the same range. */
27051da177e4SLinus Torvalds 	while (n && n->start < end) {
27061da177e4SLinus Torvalds 		struct rb_node *next = rb_next(&n->nd);
27071da177e4SLinus Torvalds 		if (n->start >= start) {
27081da177e4SLinus Torvalds 			if (n->end <= end)
27091da177e4SLinus Torvalds 				sp_delete(sp, n);
27101da177e4SLinus Torvalds 			else
27111da177e4SLinus Torvalds 				n->start = end;
27121da177e4SLinus Torvalds 		} else {
27131da177e4SLinus Torvalds 			/* Old policy spanning whole new range. */
27141da177e4SLinus Torvalds 			if (n->end > end) {
271542288fe3SMel Gorman 				if (!n_new)
271642288fe3SMel Gorman 					goto alloc_new;
271742288fe3SMel Gorman 
271842288fe3SMel Gorman 				*mpol_new = *n->policy;
271942288fe3SMel Gorman 				atomic_set(&mpol_new->refcnt, 1);
27207880639cSKOSAKI Motohiro 				sp_node_init(n_new, end, n->end, mpol_new);
27211da177e4SLinus Torvalds 				n->end = start;
27225ca39575SHillf Danton 				sp_insert(sp, n_new);
272342288fe3SMel Gorman 				n_new = NULL;
272442288fe3SMel Gorman 				mpol_new = NULL;
27251da177e4SLinus Torvalds 				break;
27261da177e4SLinus Torvalds 			} else
27271da177e4SLinus Torvalds 				n->end = start;
27281da177e4SLinus Torvalds 		}
27291da177e4SLinus Torvalds 		if (!next)
27301da177e4SLinus Torvalds 			break;
27311da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
27321da177e4SLinus Torvalds 	}
27331da177e4SLinus Torvalds 	if (new)
27341da177e4SLinus Torvalds 		sp_insert(sp, new);
27354a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
273642288fe3SMel Gorman 	ret = 0;
273742288fe3SMel Gorman 
273842288fe3SMel Gorman err_out:
273942288fe3SMel Gorman 	if (mpol_new)
274042288fe3SMel Gorman 		mpol_put(mpol_new);
274142288fe3SMel Gorman 	if (n_new)
274242288fe3SMel Gorman 		kmem_cache_free(sn_cache, n_new);
274342288fe3SMel Gorman 
2744b22d127aSMel Gorman 	return ret;
274542288fe3SMel Gorman 
274642288fe3SMel Gorman alloc_new:
27474a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
274842288fe3SMel Gorman 	ret = -ENOMEM;
274942288fe3SMel Gorman 	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
275042288fe3SMel Gorman 	if (!n_new)
275142288fe3SMel Gorman 		goto err_out;
275242288fe3SMel Gorman 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
275342288fe3SMel Gorman 	if (!mpol_new)
275442288fe3SMel Gorman 		goto err_out;
27554ad09955SMiaohe Lin 	atomic_set(&mpol_new->refcnt, 1);
275642288fe3SMel Gorman 	goto restart;
27571da177e4SLinus Torvalds }
27581da177e4SLinus Torvalds 
275971fe804bSLee Schermerhorn /**
276071fe804bSLee Schermerhorn  * mpol_shared_policy_init - initialize shared policy for inode
276171fe804bSLee Schermerhorn  * @sp: pointer to inode shared policy
276271fe804bSLee Schermerhorn  * @mpol:  struct mempolicy to install
276371fe804bSLee Schermerhorn  *
276471fe804bSLee Schermerhorn  * Install non-NULL @mpol in inode's shared policy rb-tree.
276571fe804bSLee Schermerhorn  * On entry, the current task has a reference on a non-NULL @mpol.
276671fe804bSLee Schermerhorn  * This must be released on exit.
27674bfc4495SKAMEZAWA Hiroyuki  * This is called at get_inode() calls and we can use GFP_KERNEL.
276871fe804bSLee Schermerhorn  */
276971fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
27707339ff83SRobin Holt {
277158568d2aSMiao Xie 	int ret;
277258568d2aSMiao Xie 
277371fe804bSLee Schermerhorn 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
27744a8c7bb5SNathan Zimmer 	rwlock_init(&sp->lock);
27757339ff83SRobin Holt 
277671fe804bSLee Schermerhorn 	if (mpol) {
27777339ff83SRobin Holt 		struct vm_area_struct pvma;
277871fe804bSLee Schermerhorn 		struct mempolicy *new;
27794bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
27807339ff83SRobin Holt 
27814bfc4495SKAMEZAWA Hiroyuki 		if (!scratch)
27825c0c1654SLee Schermerhorn 			goto put_mpol;
278371fe804bSLee Schermerhorn 		/* contextualize the tmpfs mount point mempolicy */
278471fe804bSLee Schermerhorn 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
278515d77835SLee Schermerhorn 		if (IS_ERR(new))
27860cae3457SDan Carpenter 			goto free_scratch; /* no valid nodemask intersection */
278758568d2aSMiao Xie 
278858568d2aSMiao Xie 		task_lock(current);
27894bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
279058568d2aSMiao Xie 		task_unlock(current);
279115d77835SLee Schermerhorn 		if (ret)
27925c0c1654SLee Schermerhorn 			goto put_new;
279371fe804bSLee Schermerhorn 
279471fe804bSLee Schermerhorn 		/* Create pseudo-vma that contains just the policy */
27952c4541e2SKirill A. Shutemov 		vma_init(&pvma, NULL);
279671fe804bSLee Schermerhorn 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
279771fe804bSLee Schermerhorn 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
279815d77835SLee Schermerhorn 
27995c0c1654SLee Schermerhorn put_new:
280071fe804bSLee Schermerhorn 		mpol_put(new);			/* drop initial ref */
28010cae3457SDan Carpenter free_scratch:
28024bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
28035c0c1654SLee Schermerhorn put_mpol:
28045c0c1654SLee Schermerhorn 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
28057339ff83SRobin Holt 	}
28067339ff83SRobin Holt }
28077339ff83SRobin Holt 
28081da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info,
28091da177e4SLinus Torvalds 			struct vm_area_struct *vma, struct mempolicy *npol)
28101da177e4SLinus Torvalds {
28111da177e4SLinus Torvalds 	int err;
28121da177e4SLinus Torvalds 	struct sp_node *new = NULL;
28131da177e4SLinus Torvalds 	unsigned long sz = vma_pages(vma);
28141da177e4SLinus Torvalds 
2815028fec41SDavid Rientjes 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
28161da177e4SLinus Torvalds 		 vma->vm_pgoff,
281745c4745aSLee Schermerhorn 		 sz, npol ? npol->mode : -1,
2818028fec41SDavid Rientjes 		 npol ? npol->flags : -1,
2819269fbe72SBen Widawsky 		 npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE);
28201da177e4SLinus Torvalds 
28211da177e4SLinus Torvalds 	if (npol) {
28221da177e4SLinus Torvalds 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
28231da177e4SLinus Torvalds 		if (!new)
28241da177e4SLinus Torvalds 			return -ENOMEM;
28251da177e4SLinus Torvalds 	}
28261da177e4SLinus Torvalds 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
28271da177e4SLinus Torvalds 	if (err && new)
282863f74ca2SKOSAKI Motohiro 		sp_free(new);
28291da177e4SLinus Torvalds 	return err;
28301da177e4SLinus Torvalds }
28311da177e4SLinus Torvalds 
28321da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */
28331da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p)
28341da177e4SLinus Torvalds {
28351da177e4SLinus Torvalds 	struct sp_node *n;
28361da177e4SLinus Torvalds 	struct rb_node *next;
28371da177e4SLinus Torvalds 
28381da177e4SLinus Torvalds 	if (!p->root.rb_node)
28391da177e4SLinus Torvalds 		return;
28404a8c7bb5SNathan Zimmer 	write_lock(&p->lock);
28411da177e4SLinus Torvalds 	next = rb_first(&p->root);
28421da177e4SLinus Torvalds 	while (next) {
28431da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
28441da177e4SLinus Torvalds 		next = rb_next(&n->nd);
284563f74ca2SKOSAKI Motohiro 		sp_delete(p, n);
28461da177e4SLinus Torvalds 	}
28474a8c7bb5SNathan Zimmer 	write_unlock(&p->lock);
28481da177e4SLinus Torvalds }
28491da177e4SLinus Torvalds 
28501a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING
2851c297663cSMel Gorman static int __initdata numabalancing_override;
28521a687c2eSMel Gorman 
28531a687c2eSMel Gorman static void __init check_numabalancing_enable(void)
28541a687c2eSMel Gorman {
28551a687c2eSMel Gorman 	bool numabalancing_default = false;
28561a687c2eSMel Gorman 
28571a687c2eSMel Gorman 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
28581a687c2eSMel Gorman 		numabalancing_default = true;
28591a687c2eSMel Gorman 
2860c297663cSMel Gorman 	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2861c297663cSMel Gorman 	if (numabalancing_override)
2862c297663cSMel Gorman 		set_numabalancing_state(numabalancing_override == 1);
2863c297663cSMel Gorman 
2864b0dc2b9bSMel Gorman 	if (num_online_nodes() > 1 && !numabalancing_override) {
2865756a025fSJoe Perches 		pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2866c297663cSMel Gorman 			numabalancing_default ? "Enabling" : "Disabling");
28671a687c2eSMel Gorman 		set_numabalancing_state(numabalancing_default);
28681a687c2eSMel Gorman 	}
28691a687c2eSMel Gorman }
28701a687c2eSMel Gorman 
28711a687c2eSMel Gorman static int __init setup_numabalancing(char *str)
28721a687c2eSMel Gorman {
28731a687c2eSMel Gorman 	int ret = 0;
28741a687c2eSMel Gorman 	if (!str)
28751a687c2eSMel Gorman 		goto out;
28761a687c2eSMel Gorman 
28771a687c2eSMel Gorman 	if (!strcmp(str, "enable")) {
2878c297663cSMel Gorman 		numabalancing_override = 1;
28791a687c2eSMel Gorman 		ret = 1;
28801a687c2eSMel Gorman 	} else if (!strcmp(str, "disable")) {
2881c297663cSMel Gorman 		numabalancing_override = -1;
28821a687c2eSMel Gorman 		ret = 1;
28831a687c2eSMel Gorman 	}
28841a687c2eSMel Gorman out:
28851a687c2eSMel Gorman 	if (!ret)
28864a404beaSAndrew Morton 		pr_warn("Unable to parse numa_balancing=\n");
28871a687c2eSMel Gorman 
28881a687c2eSMel Gorman 	return ret;
28891a687c2eSMel Gorman }
28901a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing);
28911a687c2eSMel Gorman #else
28921a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void)
28931a687c2eSMel Gorman {
28941a687c2eSMel Gorman }
28951a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */
28961a687c2eSMel Gorman 
28971da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */
28981da177e4SLinus Torvalds void __init numa_policy_init(void)
28991da177e4SLinus Torvalds {
2900b71636e2SPaul Mundt 	nodemask_t interleave_nodes;
2901b71636e2SPaul Mundt 	unsigned long largest = 0;
2902b71636e2SPaul Mundt 	int nid, prefer = 0;
2903b71636e2SPaul Mundt 
29041da177e4SLinus Torvalds 	policy_cache = kmem_cache_create("numa_policy",
29051da177e4SLinus Torvalds 					 sizeof(struct mempolicy),
290620c2df83SPaul Mundt 					 0, SLAB_PANIC, NULL);
29071da177e4SLinus Torvalds 
29081da177e4SLinus Torvalds 	sn_cache = kmem_cache_create("shared_policy_node",
29091da177e4SLinus Torvalds 				     sizeof(struct sp_node),
291020c2df83SPaul Mundt 				     0, SLAB_PANIC, NULL);
29111da177e4SLinus Torvalds 
29125606e387SMel Gorman 	for_each_node(nid) {
29135606e387SMel Gorman 		preferred_node_policy[nid] = (struct mempolicy) {
29145606e387SMel Gorman 			.refcnt = ATOMIC_INIT(1),
29155606e387SMel Gorman 			.mode = MPOL_PREFERRED,
29165606e387SMel Gorman 			.flags = MPOL_F_MOF | MPOL_F_MORON,
2917269fbe72SBen Widawsky 			.nodes = nodemask_of_node(nid),
29185606e387SMel Gorman 		};
29195606e387SMel Gorman 	}
29205606e387SMel Gorman 
2921b71636e2SPaul Mundt 	/*
2922b71636e2SPaul Mundt 	 * Set interleaving policy for system init. Interleaving is only
2923b71636e2SPaul Mundt 	 * enabled across suitably sized nodes (default is >= 16MB), or
2924b71636e2SPaul Mundt 	 * fall back to the largest node if they're all smaller.
2925b71636e2SPaul Mundt 	 */
2926b71636e2SPaul Mundt 	nodes_clear(interleave_nodes);
292701f13bd6SLai Jiangshan 	for_each_node_state(nid, N_MEMORY) {
2928b71636e2SPaul Mundt 		unsigned long total_pages = node_present_pages(nid);
29291da177e4SLinus Torvalds 
2930b71636e2SPaul Mundt 		/* Preserve the largest node */
2931b71636e2SPaul Mundt 		if (largest < total_pages) {
2932b71636e2SPaul Mundt 			largest = total_pages;
2933b71636e2SPaul Mundt 			prefer = nid;
2934b71636e2SPaul Mundt 		}
2935b71636e2SPaul Mundt 
2936b71636e2SPaul Mundt 		/* Interleave this node? */
2937b71636e2SPaul Mundt 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2938b71636e2SPaul Mundt 			node_set(nid, interleave_nodes);
2939b71636e2SPaul Mundt 	}
2940b71636e2SPaul Mundt 
2941b71636e2SPaul Mundt 	/* All too small, use the largest */
2942b71636e2SPaul Mundt 	if (unlikely(nodes_empty(interleave_nodes)))
2943b71636e2SPaul Mundt 		node_set(prefer, interleave_nodes);
2944b71636e2SPaul Mundt 
2945028fec41SDavid Rientjes 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2946b1de0d13SMitchel Humpherys 		pr_err("%s: interleaving failed\n", __func__);
29471a687c2eSMel Gorman 
29481a687c2eSMel Gorman 	check_numabalancing_enable();
29491da177e4SLinus Torvalds }
29501da177e4SLinus Torvalds 
29518bccd85fSChristoph Lameter /* Reset policy of current process to default */
29521da177e4SLinus Torvalds void numa_default_policy(void)
29531da177e4SLinus Torvalds {
2954028fec41SDavid Rientjes 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
29551da177e4SLinus Torvalds }
295668860ec1SPaul Jackson 
29574225399aSPaul Jackson /*
2958095f1fc4SLee Schermerhorn  * Parse and format mempolicy from/to strings
2959095f1fc4SLee Schermerhorn  */
2960095f1fc4SLee Schermerhorn 
2961345ace9cSLee Schermerhorn static const char * const policy_modes[] =
2962345ace9cSLee Schermerhorn {
2963345ace9cSLee Schermerhorn 	[MPOL_DEFAULT]    = "default",
2964345ace9cSLee Schermerhorn 	[MPOL_PREFERRED]  = "prefer",
2965345ace9cSLee Schermerhorn 	[MPOL_BIND]       = "bind",
2966345ace9cSLee Schermerhorn 	[MPOL_INTERLEAVE] = "interleave",
2967d3a71033SLee Schermerhorn 	[MPOL_LOCAL]      = "local",
2968b27abaccSDave Hansen 	[MPOL_PREFERRED_MANY]  = "prefer (many)",
2969345ace9cSLee Schermerhorn };
29701a75a6c8SChristoph Lameter 
2971095f1fc4SLee Schermerhorn 
2972095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS
2973095f1fc4SLee Schermerhorn /**
2974f2a07f40SHugh Dickins  * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2975095f1fc4SLee Schermerhorn  * @str:  string containing mempolicy to parse
297671fe804bSLee Schermerhorn  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2977095f1fc4SLee Schermerhorn  *
2978095f1fc4SLee Schermerhorn  * Format of input:
2979095f1fc4SLee Schermerhorn  *	<mode>[=<flags>][:<nodelist>]
2980095f1fc4SLee Schermerhorn  *
2981dad5b023SRandy Dunlap  * Return: %0 on success, else %1
2982095f1fc4SLee Schermerhorn  */
2983a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol)
2984095f1fc4SLee Schermerhorn {
298571fe804bSLee Schermerhorn 	struct mempolicy *new = NULL;
2986f2a07f40SHugh Dickins 	unsigned short mode_flags;
298771fe804bSLee Schermerhorn 	nodemask_t nodes;
2988095f1fc4SLee Schermerhorn 	char *nodelist = strchr(str, ':');
2989095f1fc4SLee Schermerhorn 	char *flags = strchr(str, '=');
2990dedf2c73Szhong jiang 	int err = 1, mode;
2991095f1fc4SLee Schermerhorn 
2992c7a91bc7SDan Carpenter 	if (flags)
2993c7a91bc7SDan Carpenter 		*flags++ = '\0';	/* terminate mode string */
2994c7a91bc7SDan Carpenter 
2995095f1fc4SLee Schermerhorn 	if (nodelist) {
2996095f1fc4SLee Schermerhorn 		/* NUL-terminate mode or flags string */
2997095f1fc4SLee Schermerhorn 		*nodelist++ = '\0';
299871fe804bSLee Schermerhorn 		if (nodelist_parse(nodelist, nodes))
2999095f1fc4SLee Schermerhorn 			goto out;
300001f13bd6SLai Jiangshan 		if (!nodes_subset(nodes, node_states[N_MEMORY]))
3001095f1fc4SLee Schermerhorn 			goto out;
300271fe804bSLee Schermerhorn 	} else
300371fe804bSLee Schermerhorn 		nodes_clear(nodes);
300471fe804bSLee Schermerhorn 
3005dedf2c73Szhong jiang 	mode = match_string(policy_modes, MPOL_MAX, str);
3006dedf2c73Szhong jiang 	if (mode < 0)
3007095f1fc4SLee Schermerhorn 		goto out;
3008095f1fc4SLee Schermerhorn 
300971fe804bSLee Schermerhorn 	switch (mode) {
3010095f1fc4SLee Schermerhorn 	case MPOL_PREFERRED:
301171fe804bSLee Schermerhorn 		/*
3012aa9f7d51SRandy Dunlap 		 * Insist on a nodelist of one node only, although later
3013aa9f7d51SRandy Dunlap 		 * we use first_node(nodes) to grab a single node, so here
3014aa9f7d51SRandy Dunlap 		 * nodelist (or nodes) cannot be empty.
301571fe804bSLee Schermerhorn 		 */
3016095f1fc4SLee Schermerhorn 		if (nodelist) {
3017095f1fc4SLee Schermerhorn 			char *rest = nodelist;
3018095f1fc4SLee Schermerhorn 			while (isdigit(*rest))
3019095f1fc4SLee Schermerhorn 				rest++;
3020926f2ae0SKOSAKI Motohiro 			if (*rest)
3021926f2ae0SKOSAKI Motohiro 				goto out;
3022aa9f7d51SRandy Dunlap 			if (nodes_empty(nodes))
3023aa9f7d51SRandy Dunlap 				goto out;
3024095f1fc4SLee Schermerhorn 		}
3025095f1fc4SLee Schermerhorn 		break;
3026095f1fc4SLee Schermerhorn 	case MPOL_INTERLEAVE:
3027095f1fc4SLee Schermerhorn 		/*
3028095f1fc4SLee Schermerhorn 		 * Default to online nodes with memory if no nodelist
3029095f1fc4SLee Schermerhorn 		 */
3030095f1fc4SLee Schermerhorn 		if (!nodelist)
303101f13bd6SLai Jiangshan 			nodes = node_states[N_MEMORY];
30323f226aa1SLee Schermerhorn 		break;
303371fe804bSLee Schermerhorn 	case MPOL_LOCAL:
30343f226aa1SLee Schermerhorn 		/*
303571fe804bSLee Schermerhorn 		 * Don't allow a nodelist;  mpol_new() checks flags
30363f226aa1SLee Schermerhorn 		 */
303771fe804bSLee Schermerhorn 		if (nodelist)
30383f226aa1SLee Schermerhorn 			goto out;
30393f226aa1SLee Schermerhorn 		break;
3040413b43deSRavikiran G Thirumalai 	case MPOL_DEFAULT:
3041413b43deSRavikiran G Thirumalai 		/*
3042413b43deSRavikiran G Thirumalai 		 * Insist on a empty nodelist
3043413b43deSRavikiran G Thirumalai 		 */
3044413b43deSRavikiran G Thirumalai 		if (!nodelist)
3045413b43deSRavikiran G Thirumalai 			err = 0;
3046413b43deSRavikiran G Thirumalai 		goto out;
3047b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
3048d69b2e63SKOSAKI Motohiro 	case MPOL_BIND:
304971fe804bSLee Schermerhorn 		/*
3050d69b2e63SKOSAKI Motohiro 		 * Insist on a nodelist
305171fe804bSLee Schermerhorn 		 */
3052d69b2e63SKOSAKI Motohiro 		if (!nodelist)
3053d69b2e63SKOSAKI Motohiro 			goto out;
3054095f1fc4SLee Schermerhorn 	}
3055095f1fc4SLee Schermerhorn 
305671fe804bSLee Schermerhorn 	mode_flags = 0;
3057095f1fc4SLee Schermerhorn 	if (flags) {
3058095f1fc4SLee Schermerhorn 		/*
3059095f1fc4SLee Schermerhorn 		 * Currently, we only support two mutually exclusive
3060095f1fc4SLee Schermerhorn 		 * mode flags.
3061095f1fc4SLee Schermerhorn 		 */
3062095f1fc4SLee Schermerhorn 		if (!strcmp(flags, "static"))
306371fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_STATIC_NODES;
3064095f1fc4SLee Schermerhorn 		else if (!strcmp(flags, "relative"))
306571fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_RELATIVE_NODES;
3066095f1fc4SLee Schermerhorn 		else
3067926f2ae0SKOSAKI Motohiro 			goto out;
3068095f1fc4SLee Schermerhorn 	}
306971fe804bSLee Schermerhorn 
307071fe804bSLee Schermerhorn 	new = mpol_new(mode, mode_flags, &nodes);
307171fe804bSLee Schermerhorn 	if (IS_ERR(new))
3072926f2ae0SKOSAKI Motohiro 		goto out;
3073926f2ae0SKOSAKI Motohiro 
3074f2a07f40SHugh Dickins 	/*
3075f2a07f40SHugh Dickins 	 * Save nodes for mpol_to_str() to show the tmpfs mount options
3076f2a07f40SHugh Dickins 	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
3077f2a07f40SHugh Dickins 	 */
3078269fbe72SBen Widawsky 	if (mode != MPOL_PREFERRED) {
3079269fbe72SBen Widawsky 		new->nodes = nodes;
3080269fbe72SBen Widawsky 	} else if (nodelist) {
3081269fbe72SBen Widawsky 		nodes_clear(new->nodes);
3082269fbe72SBen Widawsky 		node_set(first_node(nodes), new->nodes);
3083269fbe72SBen Widawsky 	} else {
30847858d7bcSFeng Tang 		new->mode = MPOL_LOCAL;
3085269fbe72SBen Widawsky 	}
3086f2a07f40SHugh Dickins 
3087f2a07f40SHugh Dickins 	/*
3088f2a07f40SHugh Dickins 	 * Save nodes for contextualization: this will be used to "clone"
3089f2a07f40SHugh Dickins 	 * the mempolicy in a specific context [cpuset] at a later time.
3090f2a07f40SHugh Dickins 	 */
3091e17f74afSLee Schermerhorn 	new->w.user_nodemask = nodes;
3092f2a07f40SHugh Dickins 
3093926f2ae0SKOSAKI Motohiro 	err = 0;
309471fe804bSLee Schermerhorn 
3095095f1fc4SLee Schermerhorn out:
3096095f1fc4SLee Schermerhorn 	/* Restore string for error message */
3097095f1fc4SLee Schermerhorn 	if (nodelist)
3098095f1fc4SLee Schermerhorn 		*--nodelist = ':';
3099095f1fc4SLee Schermerhorn 	if (flags)
3100095f1fc4SLee Schermerhorn 		*--flags = '=';
310171fe804bSLee Schermerhorn 	if (!err)
310271fe804bSLee Schermerhorn 		*mpol = new;
3103095f1fc4SLee Schermerhorn 	return err;
3104095f1fc4SLee Schermerhorn }
3105095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */
3106095f1fc4SLee Schermerhorn 
310771fe804bSLee Schermerhorn /**
310871fe804bSLee Schermerhorn  * mpol_to_str - format a mempolicy structure for printing
310971fe804bSLee Schermerhorn  * @buffer:  to contain formatted mempolicy string
311071fe804bSLee Schermerhorn  * @maxlen:  length of @buffer
311171fe804bSLee Schermerhorn  * @pol:  pointer to mempolicy to be formatted
311271fe804bSLee Schermerhorn  *
3113948927eeSDavid Rientjes  * Convert @pol into a string.  If @buffer is too short, truncate the string.
3114948927eeSDavid Rientjes  * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
3115948927eeSDavid Rientjes  * longest flag, "relative", and to display at least a few node ids.
31161a75a6c8SChristoph Lameter  */
3117948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
31181a75a6c8SChristoph Lameter {
31191a75a6c8SChristoph Lameter 	char *p = buffer;
3120948927eeSDavid Rientjes 	nodemask_t nodes = NODE_MASK_NONE;
3121948927eeSDavid Rientjes 	unsigned short mode = MPOL_DEFAULT;
3122948927eeSDavid Rientjes 	unsigned short flags = 0;
31231a75a6c8SChristoph Lameter 
31248790c71aSDavid Rientjes 	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
3125bea904d5SLee Schermerhorn 		mode = pol->mode;
3126948927eeSDavid Rientjes 		flags = pol->flags;
3127948927eeSDavid Rientjes 	}
3128bea904d5SLee Schermerhorn 
31291a75a6c8SChristoph Lameter 	switch (mode) {
31301a75a6c8SChristoph Lameter 	case MPOL_DEFAULT:
31317858d7bcSFeng Tang 	case MPOL_LOCAL:
31321a75a6c8SChristoph Lameter 		break;
31331a75a6c8SChristoph Lameter 	case MPOL_PREFERRED:
3134b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
31351a75a6c8SChristoph Lameter 	case MPOL_BIND:
31361a75a6c8SChristoph Lameter 	case MPOL_INTERLEAVE:
3137269fbe72SBen Widawsky 		nodes = pol->nodes;
31381a75a6c8SChristoph Lameter 		break;
31391a75a6c8SChristoph Lameter 	default:
3140948927eeSDavid Rientjes 		WARN_ON_ONCE(1);
3141948927eeSDavid Rientjes 		snprintf(p, maxlen, "unknown");
3142948927eeSDavid Rientjes 		return;
31431a75a6c8SChristoph Lameter 	}
31441a75a6c8SChristoph Lameter 
3145b7a9f420SDavid Rientjes 	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
31461a75a6c8SChristoph Lameter 
3147fc36b8d3SLee Schermerhorn 	if (flags & MPOL_MODE_FLAGS) {
3148948927eeSDavid Rientjes 		p += snprintf(p, buffer + maxlen - p, "=");
3149f5b087b5SDavid Rientjes 
31502291990aSLee Schermerhorn 		/*
31512291990aSLee Schermerhorn 		 * Currently, the only defined flags are mutually exclusive
31522291990aSLee Schermerhorn 		 */
3153f5b087b5SDavid Rientjes 		if (flags & MPOL_F_STATIC_NODES)
31542291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "static");
31552291990aSLee Schermerhorn 		else if (flags & MPOL_F_RELATIVE_NODES)
31562291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "relative");
3157f5b087b5SDavid Rientjes 	}
3158f5b087b5SDavid Rientjes 
31599e763e0fSTejun Heo 	if (!nodes_empty(nodes))
31609e763e0fSTejun Heo 		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
31619e763e0fSTejun Heo 			       nodemask_pr_args(&nodes));
31621a75a6c8SChristoph Lameter }
3163