xref: /openbmc/linux/mm/mempolicy.c (revision cc09cb134124a42fbe3bdcebefdc54e286d8f3e5)
146aeb7e6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * Simple NUMA memory policy for the Linux kernel.
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
68bccd85fSChristoph Lameter  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * NUMA policy allows the user to give hints in which node(s) memory should
91da177e4SLinus Torvalds  * be allocated.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Support four policies per VMA and per process:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * The VMA policy has priority over the process policy for a page fault.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * interleave     Allocate memory interleaved over a set of nodes,
161da177e4SLinus Torvalds  *                with normal fallback if it fails.
171da177e4SLinus Torvalds  *                For VMA based allocations this interleaves based on the
181da177e4SLinus Torvalds  *                offset into the backing object or offset into the mapping
191da177e4SLinus Torvalds  *                for anonymous memory. For process policy an process counter
201da177e4SLinus Torvalds  *                is used.
218bccd85fSChristoph Lameter  *
221da177e4SLinus Torvalds  * bind           Only allocate memory on a specific set of nodes,
231da177e4SLinus Torvalds  *                no fallback.
248bccd85fSChristoph Lameter  *                FIXME: memory is allocated starting with the first node
258bccd85fSChristoph Lameter  *                to the last. It would be better if bind would truly restrict
268bccd85fSChristoph Lameter  *                the allocation to memory nodes instead
278bccd85fSChristoph Lameter  *
281da177e4SLinus Torvalds  * preferred       Try a specific node first before normal fallback.
2900ef2d2fSDavid Rientjes  *                As a special case NUMA_NO_NODE here means do the allocation
301da177e4SLinus Torvalds  *                on the local CPU. This is normally identical to default,
311da177e4SLinus Torvalds  *                but useful to set in a VMA when you have a non default
321da177e4SLinus Torvalds  *                process policy.
338bccd85fSChristoph Lameter  *
34b27abaccSDave Hansen  * preferred many Try a set of nodes first before normal fallback. This is
35b27abaccSDave Hansen  *                similar to preferred without the special case.
36b27abaccSDave Hansen  *
371da177e4SLinus Torvalds  * default        Allocate on the local node first, or when on a VMA
381da177e4SLinus Torvalds  *                use the process policy. This is what Linux always did
391da177e4SLinus Torvalds  *		  in a NUMA aware kernel and still does by, ahem, default.
401da177e4SLinus Torvalds  *
411da177e4SLinus Torvalds  * The process policy is applied for most non interrupt memory allocations
421da177e4SLinus Torvalds  * in that process' context. Interrupts ignore the policies and always
431da177e4SLinus Torvalds  * try to allocate on the local CPU. The VMA policy is only applied for memory
441da177e4SLinus Torvalds  * allocations for a VMA in the VM.
451da177e4SLinus Torvalds  *
461da177e4SLinus Torvalds  * Currently there are a few corner cases in swapping where the policy
471da177e4SLinus Torvalds  * is not applied, but the majority should be handled. When process policy
481da177e4SLinus Torvalds  * is used it is not remembered over swap outs/swap ins.
491da177e4SLinus Torvalds  *
501da177e4SLinus Torvalds  * Only the highest zone in the zone hierarchy gets policied. Allocations
511da177e4SLinus Torvalds  * requesting a lower zone just use default policy. This implies that
521da177e4SLinus Torvalds  * on systems with highmem kernel lowmem allocation don't get policied.
531da177e4SLinus Torvalds  * Same with GFP_DMA allocations.
541da177e4SLinus Torvalds  *
551da177e4SLinus Torvalds  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
561da177e4SLinus Torvalds  * all users and remembered even when nobody has memory mapped.
571da177e4SLinus Torvalds  */
581da177e4SLinus Torvalds 
591da177e4SLinus Torvalds /* Notebook:
601da177e4SLinus Torvalds    fix mmap readahead to honour policy and enable policy for any page cache
611da177e4SLinus Torvalds    object
621da177e4SLinus Torvalds    statistics for bigpages
631da177e4SLinus Torvalds    global policy for page cache? currently it uses process policy. Requires
641da177e4SLinus Torvalds    first item above.
651da177e4SLinus Torvalds    handle mremap for shared memory (currently ignored for the policy)
661da177e4SLinus Torvalds    grows down?
671da177e4SLinus Torvalds    make bind policy root only? It can trigger oom much faster and the
681da177e4SLinus Torvalds    kernel is not always grateful with that.
691da177e4SLinus Torvalds */
701da177e4SLinus Torvalds 
71b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
72b1de0d13SMitchel Humpherys 
731da177e4SLinus Torvalds #include <linux/mempolicy.h>
74a520110eSChristoph Hellwig #include <linux/pagewalk.h>
751da177e4SLinus Torvalds #include <linux/highmem.h>
761da177e4SLinus Torvalds #include <linux/hugetlb.h>
771da177e4SLinus Torvalds #include <linux/kernel.h>
781da177e4SLinus Torvalds #include <linux/sched.h>
796e84f315SIngo Molnar #include <linux/sched/mm.h>
806a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h>
81f719ff9bSIngo Molnar #include <linux/sched/task.h>
821da177e4SLinus Torvalds #include <linux/nodemask.h>
831da177e4SLinus Torvalds #include <linux/cpuset.h>
841da177e4SLinus Torvalds #include <linux/slab.h>
851da177e4SLinus Torvalds #include <linux/string.h>
86b95f1b31SPaul Gortmaker #include <linux/export.h>
87b488893aSPavel Emelyanov #include <linux/nsproxy.h>
881da177e4SLinus Torvalds #include <linux/interrupt.h>
891da177e4SLinus Torvalds #include <linux/init.h>
901da177e4SLinus Torvalds #include <linux/compat.h>
9131367466SOtto Ebeling #include <linux/ptrace.h>
92dc9aa5b9SChristoph Lameter #include <linux/swap.h>
931a75a6c8SChristoph Lameter #include <linux/seq_file.h>
941a75a6c8SChristoph Lameter #include <linux/proc_fs.h>
95b20a3503SChristoph Lameter #include <linux/migrate.h>
9662b61f61SHugh Dickins #include <linux/ksm.h>
9795a402c3SChristoph Lameter #include <linux/rmap.h>
9886c3a764SDavid Quigley #include <linux/security.h>
99dbcb0f19SAdrian Bunk #include <linux/syscalls.h>
100095f1fc4SLee Schermerhorn #include <linux/ctype.h>
1016d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h>
102b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h>
103b1de0d13SMitchel Humpherys #include <linux/printk.h>
104c8633798SNaoya Horiguchi #include <linux/swapops.h>
105dc9aa5b9SChristoph Lameter 
1061da177e4SLinus Torvalds #include <asm/tlbflush.h>
1077c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
1081da177e4SLinus Torvalds 
10962695a84SNick Piggin #include "internal.h"
11062695a84SNick Piggin 
11138e35860SChristoph Lameter /* Internal flags */
112dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
11338e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
114dc9aa5b9SChristoph Lameter 
115fcc234f8SPekka Enberg static struct kmem_cache *policy_cache;
116fcc234f8SPekka Enberg static struct kmem_cache *sn_cache;
1171da177e4SLinus Torvalds 
1181da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not
1191da177e4SLinus Torvalds    policied. */
1206267276fSChristoph Lameter enum zone_type policy_zone = 0;
1211da177e4SLinus Torvalds 
122bea904d5SLee Schermerhorn /*
123bea904d5SLee Schermerhorn  * run-time system-wide default policy => local allocation
124bea904d5SLee Schermerhorn  */
125e754d79dSH Hartley Sweeten static struct mempolicy default_policy = {
1261da177e4SLinus Torvalds 	.refcnt = ATOMIC_INIT(1), /* never free it */
1277858d7bcSFeng Tang 	.mode = MPOL_LOCAL,
1281da177e4SLinus Torvalds };
1291da177e4SLinus Torvalds 
1305606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES];
1315606e387SMel Gorman 
132b2ca916cSDan Williams /**
133b2ca916cSDan Williams  * numa_map_to_online_node - Find closest online node
134f6e92f40SKrzysztof Kozlowski  * @node: Node id to start the search
135b2ca916cSDan Williams  *
136b2ca916cSDan Williams  * Lookup the next closest node by distance if @nid is not online.
137b2ca916cSDan Williams  */
138b2ca916cSDan Williams int numa_map_to_online_node(int node)
139b2ca916cSDan Williams {
1404fcbe96eSDan Williams 	int min_dist = INT_MAX, dist, n, min_node;
141b2ca916cSDan Williams 
1424fcbe96eSDan Williams 	if (node == NUMA_NO_NODE || node_online(node))
1434fcbe96eSDan Williams 		return node;
144b2ca916cSDan Williams 
145b2ca916cSDan Williams 	min_node = node;
146b2ca916cSDan Williams 	for_each_online_node(n) {
147b2ca916cSDan Williams 		dist = node_distance(node, n);
148b2ca916cSDan Williams 		if (dist < min_dist) {
149b2ca916cSDan Williams 			min_dist = dist;
150b2ca916cSDan Williams 			min_node = n;
151b2ca916cSDan Williams 		}
152b2ca916cSDan Williams 	}
153b2ca916cSDan Williams 
154b2ca916cSDan Williams 	return min_node;
155b2ca916cSDan Williams }
156b2ca916cSDan Williams EXPORT_SYMBOL_GPL(numa_map_to_online_node);
157b2ca916cSDan Williams 
15874d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p)
1595606e387SMel Gorman {
1605606e387SMel Gorman 	struct mempolicy *pol = p->mempolicy;
161f15ca78eSOleg Nesterov 	int node;
1625606e387SMel Gorman 
163f15ca78eSOleg Nesterov 	if (pol)
164f15ca78eSOleg Nesterov 		return pol;
1655606e387SMel Gorman 
166f15ca78eSOleg Nesterov 	node = numa_node_id();
1671da6f0e1SJianguo Wu 	if (node != NUMA_NO_NODE) {
1681da6f0e1SJianguo Wu 		pol = &preferred_node_policy[node];
169f15ca78eSOleg Nesterov 		/* preferred_node_policy is not initialised early in boot */
170f15ca78eSOleg Nesterov 		if (pol->mode)
171f15ca78eSOleg Nesterov 			return pol;
1721da6f0e1SJianguo Wu 	}
1735606e387SMel Gorman 
174f15ca78eSOleg Nesterov 	return &default_policy;
1755606e387SMel Gorman }
1765606e387SMel Gorman 
17737012946SDavid Rientjes static const struct mempolicy_operations {
17837012946SDavid Rientjes 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
179213980c0SVlastimil Babka 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
18037012946SDavid Rientjes } mpol_ops[MPOL_MAX];
18137012946SDavid Rientjes 
182f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
183f5b087b5SDavid Rientjes {
1846d556294SBob Liu 	return pol->flags & MPOL_MODE_FLAGS;
1854c50bc01SDavid Rientjes }
1864c50bc01SDavid Rientjes 
1874c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
1884c50bc01SDavid Rientjes 				   const nodemask_t *rel)
1894c50bc01SDavid Rientjes {
1904c50bc01SDavid Rientjes 	nodemask_t tmp;
1914c50bc01SDavid Rientjes 	nodes_fold(tmp, *orig, nodes_weight(*rel));
1924c50bc01SDavid Rientjes 	nodes_onto(*ret, tmp, *rel);
193f5b087b5SDavid Rientjes }
194f5b087b5SDavid Rientjes 
195be897d48SFeng Tang static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
19637012946SDavid Rientjes {
19737012946SDavid Rientjes 	if (nodes_empty(*nodes))
19837012946SDavid Rientjes 		return -EINVAL;
199269fbe72SBen Widawsky 	pol->nodes = *nodes;
20037012946SDavid Rientjes 	return 0;
20137012946SDavid Rientjes }
20237012946SDavid Rientjes 
20337012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
20437012946SDavid Rientjes {
2057858d7bcSFeng Tang 	if (nodes_empty(*nodes))
2067858d7bcSFeng Tang 		return -EINVAL;
207269fbe72SBen Widawsky 
208269fbe72SBen Widawsky 	nodes_clear(pol->nodes);
209269fbe72SBen Widawsky 	node_set(first_node(*nodes), pol->nodes);
21037012946SDavid Rientjes 	return 0;
21137012946SDavid Rientjes }
21237012946SDavid Rientjes 
21358568d2aSMiao Xie /*
21458568d2aSMiao Xie  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
21558568d2aSMiao Xie  * any, for the new policy.  mpol_new() has already validated the nodes
2167858d7bcSFeng Tang  * parameter with respect to the policy mode and flags.
21758568d2aSMiao Xie  *
21858568d2aSMiao Xie  * Must be called holding task's alloc_lock to protect task's mems_allowed
219c1e8d7c6SMichel Lespinasse  * and mempolicy.  May also be called holding the mmap_lock for write.
22058568d2aSMiao Xie  */
2214bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol,
2224bfc4495SKAMEZAWA Hiroyuki 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
22358568d2aSMiao Xie {
22458568d2aSMiao Xie 	int ret;
22558568d2aSMiao Xie 
2267858d7bcSFeng Tang 	/*
2277858d7bcSFeng Tang 	 * Default (pol==NULL) resp. local memory policies are not a
2287858d7bcSFeng Tang 	 * subject of any remapping. They also do not need any special
2297858d7bcSFeng Tang 	 * constructor.
2307858d7bcSFeng Tang 	 */
2317858d7bcSFeng Tang 	if (!pol || pol->mode == MPOL_LOCAL)
23258568d2aSMiao Xie 		return 0;
2337858d7bcSFeng Tang 
23401f13bd6SLai Jiangshan 	/* Check N_MEMORY */
2354bfc4495SKAMEZAWA Hiroyuki 	nodes_and(nsc->mask1,
23601f13bd6SLai Jiangshan 		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
23758568d2aSMiao Xie 
23858568d2aSMiao Xie 	VM_BUG_ON(!nodes);
2397858d7bcSFeng Tang 
24058568d2aSMiao Xie 	if (pol->flags & MPOL_F_RELATIVE_NODES)
2414bfc4495SKAMEZAWA Hiroyuki 		mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
24258568d2aSMiao Xie 	else
2434bfc4495SKAMEZAWA Hiroyuki 		nodes_and(nsc->mask2, *nodes, nsc->mask1);
2444bfc4495SKAMEZAWA Hiroyuki 
24558568d2aSMiao Xie 	if (mpol_store_user_nodemask(pol))
24658568d2aSMiao Xie 		pol->w.user_nodemask = *nodes;
24758568d2aSMiao Xie 	else
2487858d7bcSFeng Tang 		pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed;
24958568d2aSMiao Xie 
2504bfc4495SKAMEZAWA Hiroyuki 	ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
25158568d2aSMiao Xie 	return ret;
25258568d2aSMiao Xie }
25358568d2aSMiao Xie 
25458568d2aSMiao Xie /*
25558568d2aSMiao Xie  * This function just creates a new policy, does some check and simple
25658568d2aSMiao Xie  * initialization. You must invoke mpol_set_nodemask() to set nodes.
25758568d2aSMiao Xie  */
258028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
259028fec41SDavid Rientjes 				  nodemask_t *nodes)
2601da177e4SLinus Torvalds {
2611da177e4SLinus Torvalds 	struct mempolicy *policy;
2621da177e4SLinus Torvalds 
263028fec41SDavid Rientjes 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
26400ef2d2fSDavid Rientjes 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
265140d5a49SPaul Mundt 
2663e1f0645SDavid Rientjes 	if (mode == MPOL_DEFAULT) {
2673e1f0645SDavid Rientjes 		if (nodes && !nodes_empty(*nodes))
26837012946SDavid Rientjes 			return ERR_PTR(-EINVAL);
269d3a71033SLee Schermerhorn 		return NULL;
27037012946SDavid Rientjes 	}
2713e1f0645SDavid Rientjes 	VM_BUG_ON(!nodes);
2723e1f0645SDavid Rientjes 
2733e1f0645SDavid Rientjes 	/*
2743e1f0645SDavid Rientjes 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
2753e1f0645SDavid Rientjes 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
2763e1f0645SDavid Rientjes 	 * All other modes require a valid pointer to a non-empty nodemask.
2773e1f0645SDavid Rientjes 	 */
2783e1f0645SDavid Rientjes 	if (mode == MPOL_PREFERRED) {
2793e1f0645SDavid Rientjes 		if (nodes_empty(*nodes)) {
2803e1f0645SDavid Rientjes 			if (((flags & MPOL_F_STATIC_NODES) ||
2813e1f0645SDavid Rientjes 			     (flags & MPOL_F_RELATIVE_NODES)))
2823e1f0645SDavid Rientjes 				return ERR_PTR(-EINVAL);
2837858d7bcSFeng Tang 
2847858d7bcSFeng Tang 			mode = MPOL_LOCAL;
2853e1f0645SDavid Rientjes 		}
286479e2802SPeter Zijlstra 	} else if (mode == MPOL_LOCAL) {
2878d303e44SPiotr Kwapulinski 		if (!nodes_empty(*nodes) ||
2888d303e44SPiotr Kwapulinski 		    (flags & MPOL_F_STATIC_NODES) ||
2898d303e44SPiotr Kwapulinski 		    (flags & MPOL_F_RELATIVE_NODES))
290479e2802SPeter Zijlstra 			return ERR_PTR(-EINVAL);
2913e1f0645SDavid Rientjes 	} else if (nodes_empty(*nodes))
2923e1f0645SDavid Rientjes 		return ERR_PTR(-EINVAL);
2931da177e4SLinus Torvalds 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2941da177e4SLinus Torvalds 	if (!policy)
2951da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2961da177e4SLinus Torvalds 	atomic_set(&policy->refcnt, 1);
29745c4745aSLee Schermerhorn 	policy->mode = mode;
29837012946SDavid Rientjes 	policy->flags = flags;
2993e1f0645SDavid Rientjes 
30037012946SDavid Rientjes 	return policy;
30137012946SDavid Rientjes }
30237012946SDavid Rientjes 
30352cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */
30452cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p)
30552cd3b07SLee Schermerhorn {
30652cd3b07SLee Schermerhorn 	if (!atomic_dec_and_test(&p->refcnt))
30752cd3b07SLee Schermerhorn 		return;
30852cd3b07SLee Schermerhorn 	kmem_cache_free(policy_cache, p);
30952cd3b07SLee Schermerhorn }
31052cd3b07SLee Schermerhorn 
311213980c0SVlastimil Babka static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
31237012946SDavid Rientjes {
31337012946SDavid Rientjes }
31437012946SDavid Rientjes 
315213980c0SVlastimil Babka static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
3161d0d2680SDavid Rientjes {
3171d0d2680SDavid Rientjes 	nodemask_t tmp;
3181d0d2680SDavid Rientjes 
31937012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES)
32037012946SDavid Rientjes 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
32137012946SDavid Rientjes 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
32237012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3231d0d2680SDavid Rientjes 	else {
324269fbe72SBen Widawsky 		nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed,
325213980c0SVlastimil Babka 								*nodes);
32629b190faSzhong jiang 		pol->w.cpuset_mems_allowed = *nodes;
3271d0d2680SDavid Rientjes 	}
32837012946SDavid Rientjes 
329708c1bbcSMiao Xie 	if (nodes_empty(tmp))
330708c1bbcSMiao Xie 		tmp = *nodes;
331708c1bbcSMiao Xie 
332269fbe72SBen Widawsky 	pol->nodes = tmp;
33337012946SDavid Rientjes }
33437012946SDavid Rientjes 
33537012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol,
336213980c0SVlastimil Babka 						const nodemask_t *nodes)
33737012946SDavid Rientjes {
33837012946SDavid Rientjes 	pol->w.cpuset_mems_allowed = *nodes;
3391d0d2680SDavid Rientjes }
34037012946SDavid Rientjes 
341708c1bbcSMiao Xie /*
342708c1bbcSMiao Xie  * mpol_rebind_policy - Migrate a policy to a different set of nodes
343708c1bbcSMiao Xie  *
344c1e8d7c6SMichel Lespinasse  * Per-vma policies are protected by mmap_lock. Allocations using per-task
345213980c0SVlastimil Babka  * policies are protected by task->mems_allowed_seq to prevent a premature
346213980c0SVlastimil Babka  * OOM/allocation failure due to parallel nodemask modification.
347708c1bbcSMiao Xie  */
348213980c0SVlastimil Babka static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
34937012946SDavid Rientjes {
35037012946SDavid Rientjes 	if (!pol)
35137012946SDavid Rientjes 		return;
3527858d7bcSFeng Tang 	if (!mpol_store_user_nodemask(pol) &&
35337012946SDavid Rientjes 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
35437012946SDavid Rientjes 		return;
355708c1bbcSMiao Xie 
356213980c0SVlastimil Babka 	mpol_ops[pol->mode].rebind(pol, newmask);
3571d0d2680SDavid Rientjes }
3581d0d2680SDavid Rientjes 
3591d0d2680SDavid Rientjes /*
3601d0d2680SDavid Rientjes  * Wrapper for mpol_rebind_policy() that just requires task
3611d0d2680SDavid Rientjes  * pointer, and updates task mempolicy.
36258568d2aSMiao Xie  *
36358568d2aSMiao Xie  * Called with task's alloc_lock held.
3641d0d2680SDavid Rientjes  */
3651d0d2680SDavid Rientjes 
366213980c0SVlastimil Babka void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
3671d0d2680SDavid Rientjes {
368213980c0SVlastimil Babka 	mpol_rebind_policy(tsk->mempolicy, new);
3691d0d2680SDavid Rientjes }
3701d0d2680SDavid Rientjes 
3711d0d2680SDavid Rientjes /*
3721d0d2680SDavid Rientjes  * Rebind each vma in mm to new nodemask.
3731d0d2680SDavid Rientjes  *
374c1e8d7c6SMichel Lespinasse  * Call holding a reference to mm.  Takes mm->mmap_lock during call.
3751d0d2680SDavid Rientjes  */
3761d0d2680SDavid Rientjes 
3771d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
3781d0d2680SDavid Rientjes {
3791d0d2680SDavid Rientjes 	struct vm_area_struct *vma;
3801d0d2680SDavid Rientjes 
381d8ed45c5SMichel Lespinasse 	mmap_write_lock(mm);
3821d0d2680SDavid Rientjes 	for (vma = mm->mmap; vma; vma = vma->vm_next)
383213980c0SVlastimil Babka 		mpol_rebind_policy(vma->vm_policy, new);
384d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
3851d0d2680SDavid Rientjes }
3861d0d2680SDavid Rientjes 
38737012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
38837012946SDavid Rientjes 	[MPOL_DEFAULT] = {
38937012946SDavid Rientjes 		.rebind = mpol_rebind_default,
39037012946SDavid Rientjes 	},
39137012946SDavid Rientjes 	[MPOL_INTERLEAVE] = {
392be897d48SFeng Tang 		.create = mpol_new_nodemask,
39337012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
39437012946SDavid Rientjes 	},
39537012946SDavid Rientjes 	[MPOL_PREFERRED] = {
39637012946SDavid Rientjes 		.create = mpol_new_preferred,
39737012946SDavid Rientjes 		.rebind = mpol_rebind_preferred,
39837012946SDavid Rientjes 	},
39937012946SDavid Rientjes 	[MPOL_BIND] = {
400be897d48SFeng Tang 		.create = mpol_new_nodemask,
40137012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
40237012946SDavid Rientjes 	},
4037858d7bcSFeng Tang 	[MPOL_LOCAL] = {
4047858d7bcSFeng Tang 		.rebind = mpol_rebind_default,
4057858d7bcSFeng Tang 	},
406b27abaccSDave Hansen 	[MPOL_PREFERRED_MANY] = {
407be897d48SFeng Tang 		.create = mpol_new_nodemask,
408b27abaccSDave Hansen 		.rebind = mpol_rebind_preferred,
409b27abaccSDave Hansen 	},
41037012946SDavid Rientjes };
41137012946SDavid Rientjes 
412a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist,
413fc301289SChristoph Lameter 				unsigned long flags);
4141a75a6c8SChristoph Lameter 
4156f4576e3SNaoya Horiguchi struct queue_pages {
4166f4576e3SNaoya Horiguchi 	struct list_head *pagelist;
4176f4576e3SNaoya Horiguchi 	unsigned long flags;
4186f4576e3SNaoya Horiguchi 	nodemask_t *nmask;
419f18da660SLi Xinhai 	unsigned long start;
420f18da660SLi Xinhai 	unsigned long end;
421f18da660SLi Xinhai 	struct vm_area_struct *first;
4226f4576e3SNaoya Horiguchi };
4236f4576e3SNaoya Horiguchi 
42498094945SNaoya Horiguchi /*
42588aaa2a1SNaoya Horiguchi  * Check if the page's nid is in qp->nmask.
42688aaa2a1SNaoya Horiguchi  *
42788aaa2a1SNaoya Horiguchi  * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
42888aaa2a1SNaoya Horiguchi  * in the invert of qp->nmask.
42988aaa2a1SNaoya Horiguchi  */
43088aaa2a1SNaoya Horiguchi static inline bool queue_pages_required(struct page *page,
43188aaa2a1SNaoya Horiguchi 					struct queue_pages *qp)
43288aaa2a1SNaoya Horiguchi {
43388aaa2a1SNaoya Horiguchi 	int nid = page_to_nid(page);
43488aaa2a1SNaoya Horiguchi 	unsigned long flags = qp->flags;
43588aaa2a1SNaoya Horiguchi 
43688aaa2a1SNaoya Horiguchi 	return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
43788aaa2a1SNaoya Horiguchi }
43888aaa2a1SNaoya Horiguchi 
439a7f40cfeSYang Shi /*
440d8835445SYang Shi  * queue_pages_pmd() has four possible return values:
441e5947d23SYang Shi  * 0 - pages are placed on the right node or queued successfully, or
442e5947d23SYang Shi  *     special page is met, i.e. huge zero page.
443d8835445SYang Shi  * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
444d8835445SYang Shi  *     specified.
445d8835445SYang Shi  * 2 - THP was split.
446d8835445SYang Shi  * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
447d8835445SYang Shi  *        existing page was already on a node that does not follow the
448d8835445SYang Shi  *        policy.
449a7f40cfeSYang Shi  */
450c8633798SNaoya Horiguchi static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
451c8633798SNaoya Horiguchi 				unsigned long end, struct mm_walk *walk)
452959a7e13SJules Irenge 	__releases(ptl)
453c8633798SNaoya Horiguchi {
454c8633798SNaoya Horiguchi 	int ret = 0;
455c8633798SNaoya Horiguchi 	struct page *page;
456c8633798SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
457c8633798SNaoya Horiguchi 	unsigned long flags;
458c8633798SNaoya Horiguchi 
459c8633798SNaoya Horiguchi 	if (unlikely(is_pmd_migration_entry(*pmd))) {
460a7f40cfeSYang Shi 		ret = -EIO;
461c8633798SNaoya Horiguchi 		goto unlock;
462c8633798SNaoya Horiguchi 	}
463c8633798SNaoya Horiguchi 	page = pmd_page(*pmd);
464c8633798SNaoya Horiguchi 	if (is_huge_zero_page(page)) {
465c8633798SNaoya Horiguchi 		spin_unlock(ptl);
466e5947d23SYang Shi 		walk->action = ACTION_CONTINUE;
467c8633798SNaoya Horiguchi 		goto out;
468c8633798SNaoya Horiguchi 	}
469d8835445SYang Shi 	if (!queue_pages_required(page, qp))
470c8633798SNaoya Horiguchi 		goto unlock;
471c8633798SNaoya Horiguchi 
472c8633798SNaoya Horiguchi 	flags = qp->flags;
473c8633798SNaoya Horiguchi 	/* go to thp migration */
474a7f40cfeSYang Shi 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
475a53190a4SYang Shi 		if (!vma_migratable(walk->vma) ||
476a53190a4SYang Shi 		    migrate_page_add(page, qp->pagelist, flags)) {
477d8835445SYang Shi 			ret = 1;
478a7f40cfeSYang Shi 			goto unlock;
479a7f40cfeSYang Shi 		}
480a7f40cfeSYang Shi 	} else
481a7f40cfeSYang Shi 		ret = -EIO;
482c8633798SNaoya Horiguchi unlock:
483c8633798SNaoya Horiguchi 	spin_unlock(ptl);
484c8633798SNaoya Horiguchi out:
485c8633798SNaoya Horiguchi 	return ret;
486c8633798SNaoya Horiguchi }
487c8633798SNaoya Horiguchi 
48888aaa2a1SNaoya Horiguchi /*
48998094945SNaoya Horiguchi  * Scan through pages checking if pages follow certain conditions,
49098094945SNaoya Horiguchi  * and move them to the pagelist if they do.
491d8835445SYang Shi  *
492d8835445SYang Shi  * queue_pages_pte_range() has three possible return values:
493e5947d23SYang Shi  * 0 - pages are placed on the right node or queued successfully, or
494e5947d23SYang Shi  *     special page is met, i.e. zero page.
495d8835445SYang Shi  * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
496d8835445SYang Shi  *     specified.
497d8835445SYang Shi  * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
498d8835445SYang Shi  *        on a node that does not follow the policy.
49998094945SNaoya Horiguchi  */
5006f4576e3SNaoya Horiguchi static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
5016f4576e3SNaoya Horiguchi 			unsigned long end, struct mm_walk *walk)
5021da177e4SLinus Torvalds {
5036f4576e3SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
5046f4576e3SNaoya Horiguchi 	struct page *page;
5056f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
5066f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
507c8633798SNaoya Horiguchi 	int ret;
508d8835445SYang Shi 	bool has_unmovable = false;
5093f088420SShijie Luo 	pte_t *pte, *mapped_pte;
510705e87c0SHugh Dickins 	spinlock_t *ptl;
511941150a3SHugh Dickins 
512c8633798SNaoya Horiguchi 	ptl = pmd_trans_huge_lock(pmd, vma);
513c8633798SNaoya Horiguchi 	if (ptl) {
514c8633798SNaoya Horiguchi 		ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
515d8835445SYang Shi 		if (ret != 2)
516a7f40cfeSYang Shi 			return ret;
517248db92dSKirill A. Shutemov 	}
518d8835445SYang Shi 	/* THP was split, fall through to pte walk */
51991612e0dSHugh Dickins 
520337d9abfSNaoya Horiguchi 	if (pmd_trans_unstable(pmd))
521337d9abfSNaoya Horiguchi 		return 0;
52294723aafSMichal Hocko 
5233f088420SShijie Luo 	mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
5246f4576e3SNaoya Horiguchi 	for (; addr != end; pte++, addr += PAGE_SIZE) {
52591612e0dSHugh Dickins 		if (!pte_present(*pte))
52691612e0dSHugh Dickins 			continue;
5276aab341eSLinus Torvalds 		page = vm_normal_page(vma, addr, *pte);
5286aab341eSLinus Torvalds 		if (!page)
52991612e0dSHugh Dickins 			continue;
530053837fcSNick Piggin 		/*
53162b61f61SHugh Dickins 		 * vm_normal_page() filters out zero pages, but there might
53262b61f61SHugh Dickins 		 * still be PageReserved pages to skip, perhaps in a VDSO.
533053837fcSNick Piggin 		 */
534b79bc0a0SHugh Dickins 		if (PageReserved(page))
535f4598c8bSChristoph Lameter 			continue;
53688aaa2a1SNaoya Horiguchi 		if (!queue_pages_required(page, qp))
53738e35860SChristoph Lameter 			continue;
538a7f40cfeSYang Shi 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
539d8835445SYang Shi 			/* MPOL_MF_STRICT must be specified if we get here */
540d8835445SYang Shi 			if (!vma_migratable(vma)) {
541d8835445SYang Shi 				has_unmovable = true;
542a7f40cfeSYang Shi 				break;
543d8835445SYang Shi 			}
544a53190a4SYang Shi 
545a53190a4SYang Shi 			/*
546a53190a4SYang Shi 			 * Do not abort immediately since there may be
547a53190a4SYang Shi 			 * temporary off LRU pages in the range.  Still
548a53190a4SYang Shi 			 * need migrate other LRU pages.
549a53190a4SYang Shi 			 */
550a53190a4SYang Shi 			if (migrate_page_add(page, qp->pagelist, flags))
551a53190a4SYang Shi 				has_unmovable = true;
552a7f40cfeSYang Shi 		} else
553a7f40cfeSYang Shi 			break;
5546f4576e3SNaoya Horiguchi 	}
5553f088420SShijie Luo 	pte_unmap_unlock(mapped_pte, ptl);
5566f4576e3SNaoya Horiguchi 	cond_resched();
557d8835445SYang Shi 
558d8835445SYang Shi 	if (has_unmovable)
559d8835445SYang Shi 		return 1;
560d8835445SYang Shi 
561a7f40cfeSYang Shi 	return addr != end ? -EIO : 0;
56291612e0dSHugh Dickins }
56391612e0dSHugh Dickins 
5646f4576e3SNaoya Horiguchi static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
5656f4576e3SNaoya Horiguchi 			       unsigned long addr, unsigned long end,
5666f4576e3SNaoya Horiguchi 			       struct mm_walk *walk)
567e2d8cf40SNaoya Horiguchi {
568dcf17635SLi Xinhai 	int ret = 0;
569e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE
5706f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
571dcf17635SLi Xinhai 	unsigned long flags = (qp->flags & MPOL_MF_VALID);
572e2d8cf40SNaoya Horiguchi 	struct page *page;
573cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
574d4c54919SNaoya Horiguchi 	pte_t entry;
575e2d8cf40SNaoya Horiguchi 
5766f4576e3SNaoya Horiguchi 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
5776f4576e3SNaoya Horiguchi 	entry = huge_ptep_get(pte);
578d4c54919SNaoya Horiguchi 	if (!pte_present(entry))
579d4c54919SNaoya Horiguchi 		goto unlock;
580d4c54919SNaoya Horiguchi 	page = pte_page(entry);
58188aaa2a1SNaoya Horiguchi 	if (!queue_pages_required(page, qp))
582e2d8cf40SNaoya Horiguchi 		goto unlock;
583dcf17635SLi Xinhai 
584dcf17635SLi Xinhai 	if (flags == MPOL_MF_STRICT) {
585dcf17635SLi Xinhai 		/*
586dcf17635SLi Xinhai 		 * STRICT alone means only detecting misplaced page and no
587dcf17635SLi Xinhai 		 * need to further check other vma.
588dcf17635SLi Xinhai 		 */
589dcf17635SLi Xinhai 		ret = -EIO;
590dcf17635SLi Xinhai 		goto unlock;
591dcf17635SLi Xinhai 	}
592dcf17635SLi Xinhai 
593dcf17635SLi Xinhai 	if (!vma_migratable(walk->vma)) {
594dcf17635SLi Xinhai 		/*
595dcf17635SLi Xinhai 		 * Must be STRICT with MOVE*, otherwise .test_walk() have
596dcf17635SLi Xinhai 		 * stopped walking current vma.
597dcf17635SLi Xinhai 		 * Detecting misplaced page but allow migrating pages which
598dcf17635SLi Xinhai 		 * have been queued.
599dcf17635SLi Xinhai 		 */
600dcf17635SLi Xinhai 		ret = 1;
601dcf17635SLi Xinhai 		goto unlock;
602dcf17635SLi Xinhai 	}
603dcf17635SLi Xinhai 
604e2d8cf40SNaoya Horiguchi 	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
605e2d8cf40SNaoya Horiguchi 	if (flags & (MPOL_MF_MOVE_ALL) ||
606dcf17635SLi Xinhai 	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) {
607dcf17635SLi Xinhai 		if (!isolate_huge_page(page, qp->pagelist) &&
608dcf17635SLi Xinhai 			(flags & MPOL_MF_STRICT))
609dcf17635SLi Xinhai 			/*
610dcf17635SLi Xinhai 			 * Failed to isolate page but allow migrating pages
611dcf17635SLi Xinhai 			 * which have been queued.
612dcf17635SLi Xinhai 			 */
613dcf17635SLi Xinhai 			ret = 1;
614dcf17635SLi Xinhai 	}
615e2d8cf40SNaoya Horiguchi unlock:
616cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
617e2d8cf40SNaoya Horiguchi #else
618e2d8cf40SNaoya Horiguchi 	BUG();
619e2d8cf40SNaoya Horiguchi #endif
620dcf17635SLi Xinhai 	return ret;
6211da177e4SLinus Torvalds }
6221da177e4SLinus Torvalds 
6235877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING
624b24f53a0SLee Schermerhorn /*
6254b10e7d5SMel Gorman  * This is used to mark a range of virtual addresses to be inaccessible.
6264b10e7d5SMel Gorman  * These are later cleared by a NUMA hinting fault. Depending on these
6274b10e7d5SMel Gorman  * faults, pages may be migrated for better NUMA placement.
6284b10e7d5SMel Gorman  *
6294b10e7d5SMel Gorman  * This is assuming that NUMA faults are handled using PROT_NONE. If
6304b10e7d5SMel Gorman  * an architecture makes a different choice, it will need further
6314b10e7d5SMel Gorman  * changes to the core.
632b24f53a0SLee Schermerhorn  */
6334b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma,
6344b10e7d5SMel Gorman 			unsigned long addr, unsigned long end)
635b24f53a0SLee Schermerhorn {
6364b10e7d5SMel Gorman 	int nr_updated;
637b24f53a0SLee Schermerhorn 
63858705444SPeter Xu 	nr_updated = change_protection(vma, addr, end, PAGE_NONE, MM_CP_PROT_NUMA);
63903c5a6e1SMel Gorman 	if (nr_updated)
64003c5a6e1SMel Gorman 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
641b24f53a0SLee Schermerhorn 
6424b10e7d5SMel Gorman 	return nr_updated;
643b24f53a0SLee Schermerhorn }
644b24f53a0SLee Schermerhorn #else
645b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma,
646b24f53a0SLee Schermerhorn 			unsigned long addr, unsigned long end)
647b24f53a0SLee Schermerhorn {
648b24f53a0SLee Schermerhorn 	return 0;
649b24f53a0SLee Schermerhorn }
6505877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */
651b24f53a0SLee Schermerhorn 
6526f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end,
6536f4576e3SNaoya Horiguchi 				struct mm_walk *walk)
6541da177e4SLinus Torvalds {
6556f4576e3SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
6566f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
6575b952b3cSAndi Kleen 	unsigned long endvma = vma->vm_end;
6586f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
659dc9aa5b9SChristoph Lameter 
660a18b3ac2SLi Xinhai 	/* range check first */
661ce33135cSMiaohe Lin 	VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma);
662f18da660SLi Xinhai 
663f18da660SLi Xinhai 	if (!qp->first) {
664f18da660SLi Xinhai 		qp->first = vma;
665f18da660SLi Xinhai 		if (!(flags & MPOL_MF_DISCONTIG_OK) &&
666f18da660SLi Xinhai 			(qp->start < vma->vm_start))
667f18da660SLi Xinhai 			/* hole at head side of range */
668a18b3ac2SLi Xinhai 			return -EFAULT;
669a18b3ac2SLi Xinhai 	}
670f18da660SLi Xinhai 	if (!(flags & MPOL_MF_DISCONTIG_OK) &&
671f18da660SLi Xinhai 		((vma->vm_end < qp->end) &&
672f18da660SLi Xinhai 		(!vma->vm_next || vma->vm_end < vma->vm_next->vm_start)))
673f18da660SLi Xinhai 		/* hole at middle or tail of range */
674f18da660SLi Xinhai 		return -EFAULT;
675a18b3ac2SLi Xinhai 
676a7f40cfeSYang Shi 	/*
677a7f40cfeSYang Shi 	 * Need check MPOL_MF_STRICT to return -EIO if possible
678a7f40cfeSYang Shi 	 * regardless of vma_migratable
679a7f40cfeSYang Shi 	 */
680a7f40cfeSYang Shi 	if (!vma_migratable(vma) &&
681a7f40cfeSYang Shi 	    !(flags & MPOL_MF_STRICT))
68248684a65SNaoya Horiguchi 		return 1;
68348684a65SNaoya Horiguchi 
6845b952b3cSAndi Kleen 	if (endvma > end)
6855b952b3cSAndi Kleen 		endvma = end;
686b24f53a0SLee Schermerhorn 
687b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY) {
6882c0346a3SMel Gorman 		/* Similar to task_numa_work, skip inaccessible VMAs */
6893122e80eSAnshuman Khandual 		if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
6904355c018SLiang Chen 			!(vma->vm_flags & VM_MIXEDMAP))
691b24f53a0SLee Schermerhorn 			change_prot_numa(vma, start, endvma);
6926f4576e3SNaoya Horiguchi 		return 1;
693b24f53a0SLee Schermerhorn 	}
694b24f53a0SLee Schermerhorn 
6956f4576e3SNaoya Horiguchi 	/* queue pages from current vma */
696a7f40cfeSYang Shi 	if (flags & MPOL_MF_VALID)
6976f4576e3SNaoya Horiguchi 		return 0;
6986f4576e3SNaoya Horiguchi 	return 1;
6996f4576e3SNaoya Horiguchi }
700b24f53a0SLee Schermerhorn 
7017b86ac33SChristoph Hellwig static const struct mm_walk_ops queue_pages_walk_ops = {
7027b86ac33SChristoph Hellwig 	.hugetlb_entry		= queue_pages_hugetlb,
7037b86ac33SChristoph Hellwig 	.pmd_entry		= queue_pages_pte_range,
7047b86ac33SChristoph Hellwig 	.test_walk		= queue_pages_test_walk,
7057b86ac33SChristoph Hellwig };
7067b86ac33SChristoph Hellwig 
7076f4576e3SNaoya Horiguchi /*
7086f4576e3SNaoya Horiguchi  * Walk through page tables and collect pages to be migrated.
7096f4576e3SNaoya Horiguchi  *
7106f4576e3SNaoya Horiguchi  * If pages found in a given range are on a set of nodes (determined by
7116f4576e3SNaoya Horiguchi  * @nodes and @flags,) it's isolated and queued to the pagelist which is
712d8835445SYang Shi  * passed via @private.
713d8835445SYang Shi  *
714d8835445SYang Shi  * queue_pages_range() has three possible return values:
715d8835445SYang Shi  * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
716d8835445SYang Shi  *     specified.
717d8835445SYang Shi  * 0 - queue pages successfully or no misplaced page.
718a85dfc30SYang Shi  * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
719a85dfc30SYang Shi  *         memory range specified by nodemask and maxnode points outside
720a85dfc30SYang Shi  *         your accessible address space (-EFAULT)
7216f4576e3SNaoya Horiguchi  */
7226f4576e3SNaoya Horiguchi static int
7236f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
7246f4576e3SNaoya Horiguchi 		nodemask_t *nodes, unsigned long flags,
7256f4576e3SNaoya Horiguchi 		struct list_head *pagelist)
7266f4576e3SNaoya Horiguchi {
727f18da660SLi Xinhai 	int err;
7286f4576e3SNaoya Horiguchi 	struct queue_pages qp = {
7296f4576e3SNaoya Horiguchi 		.pagelist = pagelist,
7306f4576e3SNaoya Horiguchi 		.flags = flags,
7316f4576e3SNaoya Horiguchi 		.nmask = nodes,
732f18da660SLi Xinhai 		.start = start,
733f18da660SLi Xinhai 		.end = end,
734f18da660SLi Xinhai 		.first = NULL,
7356f4576e3SNaoya Horiguchi 	};
7366f4576e3SNaoya Horiguchi 
737f18da660SLi Xinhai 	err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
738f18da660SLi Xinhai 
739f18da660SLi Xinhai 	if (!qp.first)
740f18da660SLi Xinhai 		/* whole range in hole */
741f18da660SLi Xinhai 		err = -EFAULT;
742f18da660SLi Xinhai 
743f18da660SLi Xinhai 	return err;
7441da177e4SLinus Torvalds }
7451da177e4SLinus Torvalds 
746869833f2SKOSAKI Motohiro /*
747869833f2SKOSAKI Motohiro  * Apply policy to a single VMA
748c1e8d7c6SMichel Lespinasse  * This must be called with the mmap_lock held for writing.
749869833f2SKOSAKI Motohiro  */
750869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma,
751869833f2SKOSAKI Motohiro 						struct mempolicy *pol)
7528d34694cSKOSAKI Motohiro {
753869833f2SKOSAKI Motohiro 	int err;
754869833f2SKOSAKI Motohiro 	struct mempolicy *old;
755869833f2SKOSAKI Motohiro 	struct mempolicy *new;
7568d34694cSKOSAKI Motohiro 
7578d34694cSKOSAKI Motohiro 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
7588d34694cSKOSAKI Motohiro 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
7598d34694cSKOSAKI Motohiro 		 vma->vm_ops, vma->vm_file,
7608d34694cSKOSAKI Motohiro 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
7618d34694cSKOSAKI Motohiro 
762869833f2SKOSAKI Motohiro 	new = mpol_dup(pol);
763869833f2SKOSAKI Motohiro 	if (IS_ERR(new))
764869833f2SKOSAKI Motohiro 		return PTR_ERR(new);
765869833f2SKOSAKI Motohiro 
766869833f2SKOSAKI Motohiro 	if (vma->vm_ops && vma->vm_ops->set_policy) {
7678d34694cSKOSAKI Motohiro 		err = vma->vm_ops->set_policy(vma, new);
768869833f2SKOSAKI Motohiro 		if (err)
769869833f2SKOSAKI Motohiro 			goto err_out;
7708d34694cSKOSAKI Motohiro 	}
771869833f2SKOSAKI Motohiro 
772869833f2SKOSAKI Motohiro 	old = vma->vm_policy;
773c1e8d7c6SMichel Lespinasse 	vma->vm_policy = new; /* protected by mmap_lock */
774869833f2SKOSAKI Motohiro 	mpol_put(old);
775869833f2SKOSAKI Motohiro 
776869833f2SKOSAKI Motohiro 	return 0;
777869833f2SKOSAKI Motohiro  err_out:
778869833f2SKOSAKI Motohiro 	mpol_put(new);
7798d34694cSKOSAKI Motohiro 	return err;
7808d34694cSKOSAKI Motohiro }
7818d34694cSKOSAKI Motohiro 
7821da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */
7839d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start,
7849d8cebd4SKOSAKI Motohiro 		       unsigned long end, struct mempolicy *new_pol)
7851da177e4SLinus Torvalds {
7861da177e4SLinus Torvalds 	struct vm_area_struct *next;
7879d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *prev;
7889d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *vma;
7899d8cebd4SKOSAKI Motohiro 	int err = 0;
790e26a5114SKOSAKI Motohiro 	pgoff_t pgoff;
7919d8cebd4SKOSAKI Motohiro 	unsigned long vmstart;
7929d8cebd4SKOSAKI Motohiro 	unsigned long vmend;
7931da177e4SLinus Torvalds 
794097d5910SLinus Torvalds 	vma = find_vma(mm, start);
795f18da660SLi Xinhai 	VM_BUG_ON(!vma);
7969d8cebd4SKOSAKI Motohiro 
797097d5910SLinus Torvalds 	prev = vma->vm_prev;
798e26a5114SKOSAKI Motohiro 	if (start > vma->vm_start)
799e26a5114SKOSAKI Motohiro 		prev = vma;
800e26a5114SKOSAKI Motohiro 
8019d8cebd4SKOSAKI Motohiro 	for (; vma && vma->vm_start < end; prev = vma, vma = next) {
8021da177e4SLinus Torvalds 		next = vma->vm_next;
8039d8cebd4SKOSAKI Motohiro 		vmstart = max(start, vma->vm_start);
8049d8cebd4SKOSAKI Motohiro 		vmend   = min(end, vma->vm_end);
8059d8cebd4SKOSAKI Motohiro 
806e26a5114SKOSAKI Motohiro 		if (mpol_equal(vma_policy(vma), new_pol))
807e26a5114SKOSAKI Motohiro 			continue;
808e26a5114SKOSAKI Motohiro 
809e26a5114SKOSAKI Motohiro 		pgoff = vma->vm_pgoff +
810e26a5114SKOSAKI Motohiro 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
8119d8cebd4SKOSAKI Motohiro 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
812e26a5114SKOSAKI Motohiro 				 vma->anon_vma, vma->vm_file, pgoff,
81319a809afSAndrea Arcangeli 				 new_pol, vma->vm_userfaultfd_ctx);
8149d8cebd4SKOSAKI Motohiro 		if (prev) {
8159d8cebd4SKOSAKI Motohiro 			vma = prev;
8169d8cebd4SKOSAKI Motohiro 			next = vma->vm_next;
8173964acd0SOleg Nesterov 			if (mpol_equal(vma_policy(vma), new_pol))
8189d8cebd4SKOSAKI Motohiro 				continue;
8193964acd0SOleg Nesterov 			/* vma_merge() joined vma && vma->next, case 8 */
8203964acd0SOleg Nesterov 			goto replace;
8211da177e4SLinus Torvalds 		}
8229d8cebd4SKOSAKI Motohiro 		if (vma->vm_start != vmstart) {
8239d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
8249d8cebd4SKOSAKI Motohiro 			if (err)
8259d8cebd4SKOSAKI Motohiro 				goto out;
8269d8cebd4SKOSAKI Motohiro 		}
8279d8cebd4SKOSAKI Motohiro 		if (vma->vm_end != vmend) {
8289d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmend, 0);
8299d8cebd4SKOSAKI Motohiro 			if (err)
8309d8cebd4SKOSAKI Motohiro 				goto out;
8319d8cebd4SKOSAKI Motohiro 		}
8323964acd0SOleg Nesterov  replace:
833869833f2SKOSAKI Motohiro 		err = vma_replace_policy(vma, new_pol);
8349d8cebd4SKOSAKI Motohiro 		if (err)
8359d8cebd4SKOSAKI Motohiro 			goto out;
8369d8cebd4SKOSAKI Motohiro 	}
8379d8cebd4SKOSAKI Motohiro 
8389d8cebd4SKOSAKI Motohiro  out:
8391da177e4SLinus Torvalds 	return err;
8401da177e4SLinus Torvalds }
8411da177e4SLinus Torvalds 
8421da177e4SLinus Torvalds /* Set the process memory policy */
843028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags,
844028fec41SDavid Rientjes 			     nodemask_t *nodes)
8451da177e4SLinus Torvalds {
84658568d2aSMiao Xie 	struct mempolicy *new, *old;
8474bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH(scratch);
84858568d2aSMiao Xie 	int ret;
8491da177e4SLinus Torvalds 
8504bfc4495SKAMEZAWA Hiroyuki 	if (!scratch)
8514bfc4495SKAMEZAWA Hiroyuki 		return -ENOMEM;
852f4e53d91SLee Schermerhorn 
8534bfc4495SKAMEZAWA Hiroyuki 	new = mpol_new(mode, flags, nodes);
8544bfc4495SKAMEZAWA Hiroyuki 	if (IS_ERR(new)) {
8554bfc4495SKAMEZAWA Hiroyuki 		ret = PTR_ERR(new);
8564bfc4495SKAMEZAWA Hiroyuki 		goto out;
8574bfc4495SKAMEZAWA Hiroyuki 	}
8582c7c3a7dSOleg Nesterov 
859bda420b9SHuang Ying 	if (flags & MPOL_F_NUMA_BALANCING) {
860bda420b9SHuang Ying 		if (new && new->mode == MPOL_BIND) {
861bda420b9SHuang Ying 			new->flags |= (MPOL_F_MOF | MPOL_F_MORON);
862bda420b9SHuang Ying 		} else {
863bda420b9SHuang Ying 			ret = -EINVAL;
864bda420b9SHuang Ying 			mpol_put(new);
865bda420b9SHuang Ying 			goto out;
866bda420b9SHuang Ying 		}
867bda420b9SHuang Ying 	}
868bda420b9SHuang Ying 
8694bfc4495SKAMEZAWA Hiroyuki 	ret = mpol_set_nodemask(new, nodes, scratch);
87058568d2aSMiao Xie 	if (ret) {
87158568d2aSMiao Xie 		mpol_put(new);
8724bfc4495SKAMEZAWA Hiroyuki 		goto out;
87358568d2aSMiao Xie 	}
87478b132e9SWei Yang 	task_lock(current);
87558568d2aSMiao Xie 	old = current->mempolicy;
8761da177e4SLinus Torvalds 	current->mempolicy = new;
87745816682SVlastimil Babka 	if (new && new->mode == MPOL_INTERLEAVE)
87845816682SVlastimil Babka 		current->il_prev = MAX_NUMNODES-1;
87958568d2aSMiao Xie 	task_unlock(current);
88058568d2aSMiao Xie 	mpol_put(old);
8814bfc4495SKAMEZAWA Hiroyuki 	ret = 0;
8824bfc4495SKAMEZAWA Hiroyuki out:
8834bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH_FREE(scratch);
8844bfc4495SKAMEZAWA Hiroyuki 	return ret;
8851da177e4SLinus Torvalds }
8861da177e4SLinus Torvalds 
887bea904d5SLee Schermerhorn /*
888bea904d5SLee Schermerhorn  * Return nodemask for policy for get_mempolicy() query
88958568d2aSMiao Xie  *
89058568d2aSMiao Xie  * Called with task's alloc_lock held
891bea904d5SLee Schermerhorn  */
892bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
8931da177e4SLinus Torvalds {
894dfcd3c0dSAndi Kleen 	nodes_clear(*nodes);
895bea904d5SLee Schermerhorn 	if (p == &default_policy)
896bea904d5SLee Schermerhorn 		return;
897bea904d5SLee Schermerhorn 
89845c4745aSLee Schermerhorn 	switch (p->mode) {
89919770b32SMel Gorman 	case MPOL_BIND:
9001da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
901269fbe72SBen Widawsky 	case MPOL_PREFERRED:
902b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
903269fbe72SBen Widawsky 		*nodes = p->nodes;
9041da177e4SLinus Torvalds 		break;
9057858d7bcSFeng Tang 	case MPOL_LOCAL:
9067858d7bcSFeng Tang 		/* return empty node mask for local allocation */
9077858d7bcSFeng Tang 		break;
9081da177e4SLinus Torvalds 	default:
9091da177e4SLinus Torvalds 		BUG();
9101da177e4SLinus Torvalds 	}
9111da177e4SLinus Torvalds }
9121da177e4SLinus Torvalds 
9133b9aadf7SAndrea Arcangeli static int lookup_node(struct mm_struct *mm, unsigned long addr)
9141da177e4SLinus Torvalds {
915ba841078SPeter Xu 	struct page *p = NULL;
9161da177e4SLinus Torvalds 	int err;
9171da177e4SLinus Torvalds 
9183b9aadf7SAndrea Arcangeli 	int locked = 1;
9193b9aadf7SAndrea Arcangeli 	err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked);
9202d3a36a4SMichal Hocko 	if (err > 0) {
9211da177e4SLinus Torvalds 		err = page_to_nid(p);
9221da177e4SLinus Torvalds 		put_page(p);
9231da177e4SLinus Torvalds 	}
9243b9aadf7SAndrea Arcangeli 	if (locked)
925d8ed45c5SMichel Lespinasse 		mmap_read_unlock(mm);
9261da177e4SLinus Torvalds 	return err;
9271da177e4SLinus Torvalds }
9281da177e4SLinus Torvalds 
9291da177e4SLinus Torvalds /* Retrieve NUMA policy */
930dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask,
9311da177e4SLinus Torvalds 			     unsigned long addr, unsigned long flags)
9321da177e4SLinus Torvalds {
9338bccd85fSChristoph Lameter 	int err;
9341da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
9351da177e4SLinus Torvalds 	struct vm_area_struct *vma = NULL;
9363b9aadf7SAndrea Arcangeli 	struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
9371da177e4SLinus Torvalds 
938754af6f5SLee Schermerhorn 	if (flags &
939754af6f5SLee Schermerhorn 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
9401da177e4SLinus Torvalds 		return -EINVAL;
941754af6f5SLee Schermerhorn 
942754af6f5SLee Schermerhorn 	if (flags & MPOL_F_MEMS_ALLOWED) {
943754af6f5SLee Schermerhorn 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
944754af6f5SLee Schermerhorn 			return -EINVAL;
945754af6f5SLee Schermerhorn 		*policy = 0;	/* just so it's initialized */
94658568d2aSMiao Xie 		task_lock(current);
947754af6f5SLee Schermerhorn 		*nmask  = cpuset_current_mems_allowed;
94858568d2aSMiao Xie 		task_unlock(current);
949754af6f5SLee Schermerhorn 		return 0;
950754af6f5SLee Schermerhorn 	}
951754af6f5SLee Schermerhorn 
9521da177e4SLinus Torvalds 	if (flags & MPOL_F_ADDR) {
953bea904d5SLee Schermerhorn 		/*
954bea904d5SLee Schermerhorn 		 * Do NOT fall back to task policy if the
955bea904d5SLee Schermerhorn 		 * vma/shared policy at addr is NULL.  We
956bea904d5SLee Schermerhorn 		 * want to return MPOL_DEFAULT in this case.
957bea904d5SLee Schermerhorn 		 */
958d8ed45c5SMichel Lespinasse 		mmap_read_lock(mm);
95933e3575cSLiam Howlett 		vma = vma_lookup(mm, addr);
9601da177e4SLinus Torvalds 		if (!vma) {
961d8ed45c5SMichel Lespinasse 			mmap_read_unlock(mm);
9621da177e4SLinus Torvalds 			return -EFAULT;
9631da177e4SLinus Torvalds 		}
9641da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
9651da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
9661da177e4SLinus Torvalds 		else
9671da177e4SLinus Torvalds 			pol = vma->vm_policy;
9681da177e4SLinus Torvalds 	} else if (addr)
9691da177e4SLinus Torvalds 		return -EINVAL;
9701da177e4SLinus Torvalds 
9711da177e4SLinus Torvalds 	if (!pol)
972bea904d5SLee Schermerhorn 		pol = &default_policy;	/* indicates default behavior */
9731da177e4SLinus Torvalds 
9741da177e4SLinus Torvalds 	if (flags & MPOL_F_NODE) {
9751da177e4SLinus Torvalds 		if (flags & MPOL_F_ADDR) {
9763b9aadf7SAndrea Arcangeli 			/*
9773b9aadf7SAndrea Arcangeli 			 * Take a refcount on the mpol, lookup_node()
978baf2f90bSLu Jialin 			 * will drop the mmap_lock, so after calling
9793b9aadf7SAndrea Arcangeli 			 * lookup_node() only "pol" remains valid, "vma"
9803b9aadf7SAndrea Arcangeli 			 * is stale.
9813b9aadf7SAndrea Arcangeli 			 */
9823b9aadf7SAndrea Arcangeli 			pol_refcount = pol;
9833b9aadf7SAndrea Arcangeli 			vma = NULL;
9843b9aadf7SAndrea Arcangeli 			mpol_get(pol);
9853b9aadf7SAndrea Arcangeli 			err = lookup_node(mm, addr);
9861da177e4SLinus Torvalds 			if (err < 0)
9871da177e4SLinus Torvalds 				goto out;
9888bccd85fSChristoph Lameter 			*policy = err;
9891da177e4SLinus Torvalds 		} else if (pol == current->mempolicy &&
99045c4745aSLee Schermerhorn 				pol->mode == MPOL_INTERLEAVE) {
991269fbe72SBen Widawsky 			*policy = next_node_in(current->il_prev, pol->nodes);
9921da177e4SLinus Torvalds 		} else {
9931da177e4SLinus Torvalds 			err = -EINVAL;
9941da177e4SLinus Torvalds 			goto out;
9951da177e4SLinus Torvalds 		}
996bea904d5SLee Schermerhorn 	} else {
997bea904d5SLee Schermerhorn 		*policy = pol == &default_policy ? MPOL_DEFAULT :
998bea904d5SLee Schermerhorn 						pol->mode;
999d79df630SDavid Rientjes 		/*
1000d79df630SDavid Rientjes 		 * Internal mempolicy flags must be masked off before exposing
1001d79df630SDavid Rientjes 		 * the policy to userspace.
1002d79df630SDavid Rientjes 		 */
1003d79df630SDavid Rientjes 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
1004bea904d5SLee Schermerhorn 	}
10051da177e4SLinus Torvalds 
10061da177e4SLinus Torvalds 	err = 0;
100758568d2aSMiao Xie 	if (nmask) {
1008c6b6ef8bSLee Schermerhorn 		if (mpol_store_user_nodemask(pol)) {
1009c6b6ef8bSLee Schermerhorn 			*nmask = pol->w.user_nodemask;
1010c6b6ef8bSLee Schermerhorn 		} else {
101158568d2aSMiao Xie 			task_lock(current);
1012bea904d5SLee Schermerhorn 			get_policy_nodemask(pol, nmask);
101358568d2aSMiao Xie 			task_unlock(current);
101458568d2aSMiao Xie 		}
1015c6b6ef8bSLee Schermerhorn 	}
10161da177e4SLinus Torvalds 
10171da177e4SLinus Torvalds  out:
101852cd3b07SLee Schermerhorn 	mpol_cond_put(pol);
10191da177e4SLinus Torvalds 	if (vma)
1020d8ed45c5SMichel Lespinasse 		mmap_read_unlock(mm);
10213b9aadf7SAndrea Arcangeli 	if (pol_refcount)
10223b9aadf7SAndrea Arcangeli 		mpol_put(pol_refcount);
10231da177e4SLinus Torvalds 	return err;
10241da177e4SLinus Torvalds }
10251da177e4SLinus Torvalds 
1026b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION
10278bccd85fSChristoph Lameter /*
1028c8633798SNaoya Horiguchi  * page migration, thp tail pages can be passed.
10296ce3c4c0SChristoph Lameter  */
1030a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist,
1031fc301289SChristoph Lameter 				unsigned long flags)
10326ce3c4c0SChristoph Lameter {
1033c8633798SNaoya Horiguchi 	struct page *head = compound_head(page);
10346ce3c4c0SChristoph Lameter 	/*
1035fc301289SChristoph Lameter 	 * Avoid migrating a page that is shared with others.
10366ce3c4c0SChristoph Lameter 	 */
1037c8633798SNaoya Horiguchi 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
1038c8633798SNaoya Horiguchi 		if (!isolate_lru_page(head)) {
1039c8633798SNaoya Horiguchi 			list_add_tail(&head->lru, pagelist);
1040c8633798SNaoya Horiguchi 			mod_node_page_state(page_pgdat(head),
10419de4f22aSHuang Ying 				NR_ISOLATED_ANON + page_is_file_lru(head),
10426c357848SMatthew Wilcox (Oracle) 				thp_nr_pages(head));
1043a53190a4SYang Shi 		} else if (flags & MPOL_MF_STRICT) {
1044a53190a4SYang Shi 			/*
1045a53190a4SYang Shi 			 * Non-movable page may reach here.  And, there may be
1046a53190a4SYang Shi 			 * temporary off LRU pages or non-LRU movable pages.
1047a53190a4SYang Shi 			 * Treat them as unmovable pages since they can't be
1048a53190a4SYang Shi 			 * isolated, so they can't be moved at the moment.  It
1049a53190a4SYang Shi 			 * should return -EIO for this case too.
1050a53190a4SYang Shi 			 */
1051a53190a4SYang Shi 			return -EIO;
105262695a84SNick Piggin 		}
105362695a84SNick Piggin 	}
1054a53190a4SYang Shi 
1055a53190a4SYang Shi 	return 0;
10566ce3c4c0SChristoph Lameter }
10576ce3c4c0SChristoph Lameter 
10586ce3c4c0SChristoph Lameter /*
10597e2ab150SChristoph Lameter  * Migrate pages from one node to a target node.
10607e2ab150SChristoph Lameter  * Returns error or the number of pages not migrated.
10617e2ab150SChristoph Lameter  */
1062dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1063dbcb0f19SAdrian Bunk 			   int flags)
10647e2ab150SChristoph Lameter {
10657e2ab150SChristoph Lameter 	nodemask_t nmask;
10667e2ab150SChristoph Lameter 	LIST_HEAD(pagelist);
10677e2ab150SChristoph Lameter 	int err = 0;
1068a0976311SJoonsoo Kim 	struct migration_target_control mtc = {
1069a0976311SJoonsoo Kim 		.nid = dest,
1070a0976311SJoonsoo Kim 		.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1071a0976311SJoonsoo Kim 	};
10727e2ab150SChristoph Lameter 
10737e2ab150SChristoph Lameter 	nodes_clear(nmask);
10747e2ab150SChristoph Lameter 	node_set(source, nmask);
10757e2ab150SChristoph Lameter 
107608270807SMinchan Kim 	/*
107708270807SMinchan Kim 	 * This does not "check" the range but isolates all pages that
107808270807SMinchan Kim 	 * need migration.  Between passing in the full user address
107908270807SMinchan Kim 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
108008270807SMinchan Kim 	 */
108108270807SMinchan Kim 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
108298094945SNaoya Horiguchi 	queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
10837e2ab150SChristoph Lameter 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
10847e2ab150SChristoph Lameter 
1085cf608ac1SMinchan Kim 	if (!list_empty(&pagelist)) {
1086a0976311SJoonsoo Kim 		err = migrate_pages(&pagelist, alloc_migration_target, NULL,
10875ac95884SYang Shi 				(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
1088cf608ac1SMinchan Kim 		if (err)
1089e2d8cf40SNaoya Horiguchi 			putback_movable_pages(&pagelist);
1090cf608ac1SMinchan Kim 	}
109195a402c3SChristoph Lameter 
10927e2ab150SChristoph Lameter 	return err;
10937e2ab150SChristoph Lameter }
10947e2ab150SChristoph Lameter 
10957e2ab150SChristoph Lameter /*
10967e2ab150SChristoph Lameter  * Move pages between the two nodesets so as to preserve the physical
10977e2ab150SChristoph Lameter  * layout as much as possible.
109839743889SChristoph Lameter  *
109939743889SChristoph Lameter  * Returns the number of page that could not be moved.
110039743889SChristoph Lameter  */
11010ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
11020ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
110339743889SChristoph Lameter {
11047e2ab150SChristoph Lameter 	int busy = 0;
1105f555befdSJan Stancek 	int err = 0;
11067e2ab150SChristoph Lameter 	nodemask_t tmp;
110739743889SChristoph Lameter 
1108361a2a22SMinchan Kim 	lru_cache_disable();
11090aedadf9SChristoph Lameter 
1110d8ed45c5SMichel Lespinasse 	mmap_read_lock(mm);
1111d4984711SChristoph Lameter 
11127e2ab150SChristoph Lameter 	/*
11137e2ab150SChristoph Lameter 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
11147e2ab150SChristoph Lameter 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
11157e2ab150SChristoph Lameter 	 * bit in 'tmp', and return that <source, dest> pair for migration.
11167e2ab150SChristoph Lameter 	 * The pair of nodemasks 'to' and 'from' define the map.
11177e2ab150SChristoph Lameter 	 *
11187e2ab150SChristoph Lameter 	 * If no pair of bits is found that way, fallback to picking some
11197e2ab150SChristoph Lameter 	 * pair of 'source' and 'dest' bits that are not the same.  If the
11207e2ab150SChristoph Lameter 	 * 'source' and 'dest' bits are the same, this represents a node
11217e2ab150SChristoph Lameter 	 * that will be migrating to itself, so no pages need move.
11227e2ab150SChristoph Lameter 	 *
11237e2ab150SChristoph Lameter 	 * If no bits are left in 'tmp', or if all remaining bits left
11247e2ab150SChristoph Lameter 	 * in 'tmp' correspond to the same bit in 'to', return false
11257e2ab150SChristoph Lameter 	 * (nothing left to migrate).
11267e2ab150SChristoph Lameter 	 *
11277e2ab150SChristoph Lameter 	 * This lets us pick a pair of nodes to migrate between, such that
11287e2ab150SChristoph Lameter 	 * if possible the dest node is not already occupied by some other
11297e2ab150SChristoph Lameter 	 * source node, minimizing the risk of overloading the memory on a
11307e2ab150SChristoph Lameter 	 * node that would happen if we migrated incoming memory to a node
11317e2ab150SChristoph Lameter 	 * before migrating outgoing memory source that same node.
11327e2ab150SChristoph Lameter 	 *
11337e2ab150SChristoph Lameter 	 * A single scan of tmp is sufficient.  As we go, we remember the
11347e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
11357e2ab150SChristoph Lameter 	 * that not only moved, but what's better, moved to an empty slot
11367e2ab150SChristoph Lameter 	 * (d is not set in tmp), then we break out then, with that pair.
1137ae0e47f0SJustin P. Mattock 	 * Otherwise when we finish scanning from_tmp, we at least have the
11387e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved.  If we get all the way through
11397e2ab150SChristoph Lameter 	 * the scan of tmp without finding any node that moved, much less
11407e2ab150SChristoph Lameter 	 * moved to an empty node, then there is nothing left worth migrating.
11417e2ab150SChristoph Lameter 	 */
11427e2ab150SChristoph Lameter 
11430ce72d4fSAndrew Morton 	tmp = *from;
11447e2ab150SChristoph Lameter 	while (!nodes_empty(tmp)) {
11457e2ab150SChristoph Lameter 		int s, d;
1146b76ac7e7SJianguo Wu 		int source = NUMA_NO_NODE;
11477e2ab150SChristoph Lameter 		int dest = 0;
11487e2ab150SChristoph Lameter 
11497e2ab150SChristoph Lameter 		for_each_node_mask(s, tmp) {
11504a5b18ccSLarry Woodman 
11514a5b18ccSLarry Woodman 			/*
11524a5b18ccSLarry Woodman 			 * do_migrate_pages() tries to maintain the relative
11534a5b18ccSLarry Woodman 			 * node relationship of the pages established between
11544a5b18ccSLarry Woodman 			 * threads and memory areas.
11554a5b18ccSLarry Woodman                          *
11564a5b18ccSLarry Woodman 			 * However if the number of source nodes is not equal to
11574a5b18ccSLarry Woodman 			 * the number of destination nodes we can not preserve
11584a5b18ccSLarry Woodman 			 * this node relative relationship.  In that case, skip
11594a5b18ccSLarry Woodman 			 * copying memory from a node that is in the destination
11604a5b18ccSLarry Woodman 			 * mask.
11614a5b18ccSLarry Woodman 			 *
11624a5b18ccSLarry Woodman 			 * Example: [2,3,4] -> [3,4,5] moves everything.
11634a5b18ccSLarry Woodman 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
11644a5b18ccSLarry Woodman 			 */
11654a5b18ccSLarry Woodman 
11660ce72d4fSAndrew Morton 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
11670ce72d4fSAndrew Morton 						(node_isset(s, *to)))
11684a5b18ccSLarry Woodman 				continue;
11694a5b18ccSLarry Woodman 
11700ce72d4fSAndrew Morton 			d = node_remap(s, *from, *to);
11717e2ab150SChristoph Lameter 			if (s == d)
11727e2ab150SChristoph Lameter 				continue;
11737e2ab150SChristoph Lameter 
11747e2ab150SChristoph Lameter 			source = s;	/* Node moved. Memorize */
11757e2ab150SChristoph Lameter 			dest = d;
11767e2ab150SChristoph Lameter 
11777e2ab150SChristoph Lameter 			/* dest not in remaining from nodes? */
11787e2ab150SChristoph Lameter 			if (!node_isset(dest, tmp))
11797e2ab150SChristoph Lameter 				break;
11807e2ab150SChristoph Lameter 		}
1181b76ac7e7SJianguo Wu 		if (source == NUMA_NO_NODE)
11827e2ab150SChristoph Lameter 			break;
11837e2ab150SChristoph Lameter 
11847e2ab150SChristoph Lameter 		node_clear(source, tmp);
11857e2ab150SChristoph Lameter 		err = migrate_to_node(mm, source, dest, flags);
11867e2ab150SChristoph Lameter 		if (err > 0)
11877e2ab150SChristoph Lameter 			busy += err;
11887e2ab150SChristoph Lameter 		if (err < 0)
11897e2ab150SChristoph Lameter 			break;
119039743889SChristoph Lameter 	}
1191d8ed45c5SMichel Lespinasse 	mmap_read_unlock(mm);
1192d479960eSMinchan Kim 
1193361a2a22SMinchan Kim 	lru_cache_enable();
11947e2ab150SChristoph Lameter 	if (err < 0)
11957e2ab150SChristoph Lameter 		return err;
11967e2ab150SChristoph Lameter 	return busy;
1197b20a3503SChristoph Lameter 
119839743889SChristoph Lameter }
119939743889SChristoph Lameter 
12003ad33b24SLee Schermerhorn /*
12013ad33b24SLee Schermerhorn  * Allocate a new page for page migration based on vma policy.
1202d05f0cdcSHugh Dickins  * Start by assuming the page is mapped by the same vma as contains @start.
12033ad33b24SLee Schermerhorn  * Search forward from there, if not.  N.B., this assumes that the
12043ad33b24SLee Schermerhorn  * list of pages handed to migrate_pages()--which is how we get here--
12053ad33b24SLee Schermerhorn  * is in virtual address order.
12063ad33b24SLee Schermerhorn  */
1207666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start)
120895a402c3SChristoph Lameter {
1209d05f0cdcSHugh Dickins 	struct vm_area_struct *vma;
12103f649ab7SKees Cook 	unsigned long address;
121195a402c3SChristoph Lameter 
1212d05f0cdcSHugh Dickins 	vma = find_vma(current->mm, start);
12133ad33b24SLee Schermerhorn 	while (vma) {
12143ad33b24SLee Schermerhorn 		address = page_address_in_vma(page, vma);
12153ad33b24SLee Schermerhorn 		if (address != -EFAULT)
12163ad33b24SLee Schermerhorn 			break;
12173ad33b24SLee Schermerhorn 		vma = vma->vm_next;
12183ad33b24SLee Schermerhorn 	}
12193ad33b24SLee Schermerhorn 
122011c731e8SWanpeng Li 	if (PageHuge(page)) {
1221389c8178SMichal Hocko 		return alloc_huge_page_vma(page_hstate(compound_head(page)),
1222389c8178SMichal Hocko 				vma, address);
122394723aafSMichal Hocko 	} else if (PageTransHuge(page)) {
1224c8633798SNaoya Horiguchi 		struct page *thp;
1225c8633798SNaoya Horiguchi 
122619deb769SDavid Rientjes 		thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
122719deb769SDavid Rientjes 					 HPAGE_PMD_ORDER);
1228c8633798SNaoya Horiguchi 		if (!thp)
1229c8633798SNaoya Horiguchi 			return NULL;
1230c8633798SNaoya Horiguchi 		prep_transhuge_page(thp);
1231c8633798SNaoya Horiguchi 		return thp;
123211c731e8SWanpeng Li 	}
123311c731e8SWanpeng Li 	/*
123411c731e8SWanpeng Li 	 * if !vma, alloc_page_vma() will use task or system default policy
123511c731e8SWanpeng Li 	 */
12360f556856SMichal Hocko 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
12370f556856SMichal Hocko 			vma, address);
123895a402c3SChristoph Lameter }
1239b20a3503SChristoph Lameter #else
1240b20a3503SChristoph Lameter 
1241a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist,
1242b20a3503SChristoph Lameter 				unsigned long flags)
1243b20a3503SChristoph Lameter {
1244a53190a4SYang Shi 	return -EIO;
1245b20a3503SChristoph Lameter }
1246b20a3503SChristoph Lameter 
12470ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
12480ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
1249b20a3503SChristoph Lameter {
1250b20a3503SChristoph Lameter 	return -ENOSYS;
1251b20a3503SChristoph Lameter }
125295a402c3SChristoph Lameter 
1253666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start)
125495a402c3SChristoph Lameter {
125595a402c3SChristoph Lameter 	return NULL;
125695a402c3SChristoph Lameter }
1257b20a3503SChristoph Lameter #endif
1258b20a3503SChristoph Lameter 
1259dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len,
1260028fec41SDavid Rientjes 		     unsigned short mode, unsigned short mode_flags,
1261028fec41SDavid Rientjes 		     nodemask_t *nmask, unsigned long flags)
12626ce3c4c0SChristoph Lameter {
12636ce3c4c0SChristoph Lameter 	struct mm_struct *mm = current->mm;
12646ce3c4c0SChristoph Lameter 	struct mempolicy *new;
12656ce3c4c0SChristoph Lameter 	unsigned long end;
12666ce3c4c0SChristoph Lameter 	int err;
1267d8835445SYang Shi 	int ret;
12686ce3c4c0SChristoph Lameter 	LIST_HEAD(pagelist);
12696ce3c4c0SChristoph Lameter 
1270b24f53a0SLee Schermerhorn 	if (flags & ~(unsigned long)MPOL_MF_VALID)
12716ce3c4c0SChristoph Lameter 		return -EINVAL;
127274c00241SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
12736ce3c4c0SChristoph Lameter 		return -EPERM;
12746ce3c4c0SChristoph Lameter 
12756ce3c4c0SChristoph Lameter 	if (start & ~PAGE_MASK)
12766ce3c4c0SChristoph Lameter 		return -EINVAL;
12776ce3c4c0SChristoph Lameter 
12786ce3c4c0SChristoph Lameter 	if (mode == MPOL_DEFAULT)
12796ce3c4c0SChristoph Lameter 		flags &= ~MPOL_MF_STRICT;
12806ce3c4c0SChristoph Lameter 
12816ce3c4c0SChristoph Lameter 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
12826ce3c4c0SChristoph Lameter 	end = start + len;
12836ce3c4c0SChristoph Lameter 
12846ce3c4c0SChristoph Lameter 	if (end < start)
12856ce3c4c0SChristoph Lameter 		return -EINVAL;
12866ce3c4c0SChristoph Lameter 	if (end == start)
12876ce3c4c0SChristoph Lameter 		return 0;
12886ce3c4c0SChristoph Lameter 
1289028fec41SDavid Rientjes 	new = mpol_new(mode, mode_flags, nmask);
12906ce3c4c0SChristoph Lameter 	if (IS_ERR(new))
12916ce3c4c0SChristoph Lameter 		return PTR_ERR(new);
12926ce3c4c0SChristoph Lameter 
1293b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY)
1294b24f53a0SLee Schermerhorn 		new->flags |= MPOL_F_MOF;
1295b24f53a0SLee Schermerhorn 
12966ce3c4c0SChristoph Lameter 	/*
12976ce3c4c0SChristoph Lameter 	 * If we are using the default policy then operation
12986ce3c4c0SChristoph Lameter 	 * on discontinuous address spaces is okay after all
12996ce3c4c0SChristoph Lameter 	 */
13006ce3c4c0SChristoph Lameter 	if (!new)
13016ce3c4c0SChristoph Lameter 		flags |= MPOL_MF_DISCONTIG_OK;
13026ce3c4c0SChristoph Lameter 
1303028fec41SDavid Rientjes 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1304028fec41SDavid Rientjes 		 start, start + len, mode, mode_flags,
130500ef2d2fSDavid Rientjes 		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
13066ce3c4c0SChristoph Lameter 
13070aedadf9SChristoph Lameter 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
13080aedadf9SChristoph Lameter 
1309361a2a22SMinchan Kim 		lru_cache_disable();
13100aedadf9SChristoph Lameter 	}
13114bfc4495SKAMEZAWA Hiroyuki 	{
13124bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
13134bfc4495SKAMEZAWA Hiroyuki 		if (scratch) {
1314d8ed45c5SMichel Lespinasse 			mmap_write_lock(mm);
13154bfc4495SKAMEZAWA Hiroyuki 			err = mpol_set_nodemask(new, nmask, scratch);
13164bfc4495SKAMEZAWA Hiroyuki 			if (err)
1317d8ed45c5SMichel Lespinasse 				mmap_write_unlock(mm);
13184bfc4495SKAMEZAWA Hiroyuki 		} else
13194bfc4495SKAMEZAWA Hiroyuki 			err = -ENOMEM;
13204bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
13214bfc4495SKAMEZAWA Hiroyuki 	}
1322b05ca738SKOSAKI Motohiro 	if (err)
1323b05ca738SKOSAKI Motohiro 		goto mpol_out;
1324b05ca738SKOSAKI Motohiro 
1325d8835445SYang Shi 	ret = queue_pages_range(mm, start, end, nmask,
13266ce3c4c0SChristoph Lameter 			  flags | MPOL_MF_INVERT, &pagelist);
1327d8835445SYang Shi 
1328d8835445SYang Shi 	if (ret < 0) {
1329a85dfc30SYang Shi 		err = ret;
1330d8835445SYang Shi 		goto up_out;
1331d8835445SYang Shi 	}
1332d8835445SYang Shi 
13339d8cebd4SKOSAKI Motohiro 	err = mbind_range(mm, start, end, new);
13347e2ab150SChristoph Lameter 
1335b24f53a0SLee Schermerhorn 	if (!err) {
1336b24f53a0SLee Schermerhorn 		int nr_failed = 0;
1337b24f53a0SLee Schermerhorn 
1338cf608ac1SMinchan Kim 		if (!list_empty(&pagelist)) {
1339b24f53a0SLee Schermerhorn 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1340d05f0cdcSHugh Dickins 			nr_failed = migrate_pages(&pagelist, new_page, NULL,
13415ac95884SYang Shi 				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL);
1342cf608ac1SMinchan Kim 			if (nr_failed)
134374060e4dSNaoya Horiguchi 				putback_movable_pages(&pagelist);
1344cf608ac1SMinchan Kim 		}
13456ce3c4c0SChristoph Lameter 
1346d8835445SYang Shi 		if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
13476ce3c4c0SChristoph Lameter 			err = -EIO;
1348a85dfc30SYang Shi 	} else {
1349d8835445SYang Shi up_out:
1350a85dfc30SYang Shi 		if (!list_empty(&pagelist))
1351a85dfc30SYang Shi 			putback_movable_pages(&pagelist);
1352a85dfc30SYang Shi 	}
1353a85dfc30SYang Shi 
1354d8ed45c5SMichel Lespinasse 	mmap_write_unlock(mm);
1355b05ca738SKOSAKI Motohiro mpol_out:
1356f0be3d32SLee Schermerhorn 	mpol_put(new);
1357d479960eSMinchan Kim 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1358361a2a22SMinchan Kim 		lru_cache_enable();
13596ce3c4c0SChristoph Lameter 	return err;
13606ce3c4c0SChristoph Lameter }
13616ce3c4c0SChristoph Lameter 
136239743889SChristoph Lameter /*
13638bccd85fSChristoph Lameter  * User space interface with variable sized bitmaps for nodelists.
13648bccd85fSChristoph Lameter  */
1365e130242dSArnd Bergmann static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask,
1366e130242dSArnd Bergmann 		      unsigned long maxnode)
1367e130242dSArnd Bergmann {
1368e130242dSArnd Bergmann 	unsigned long nlongs = BITS_TO_LONGS(maxnode);
1369e130242dSArnd Bergmann 	int ret;
1370e130242dSArnd Bergmann 
1371e130242dSArnd Bergmann 	if (in_compat_syscall())
1372e130242dSArnd Bergmann 		ret = compat_get_bitmap(mask,
1373e130242dSArnd Bergmann 					(const compat_ulong_t __user *)nmask,
1374e130242dSArnd Bergmann 					maxnode);
1375e130242dSArnd Bergmann 	else
1376e130242dSArnd Bergmann 		ret = copy_from_user(mask, nmask,
1377e130242dSArnd Bergmann 				     nlongs * sizeof(unsigned long));
1378e130242dSArnd Bergmann 
1379e130242dSArnd Bergmann 	if (ret)
1380e130242dSArnd Bergmann 		return -EFAULT;
1381e130242dSArnd Bergmann 
1382e130242dSArnd Bergmann 	if (maxnode % BITS_PER_LONG)
1383e130242dSArnd Bergmann 		mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1;
1384e130242dSArnd Bergmann 
1385e130242dSArnd Bergmann 	return 0;
1386e130242dSArnd Bergmann }
13878bccd85fSChristoph Lameter 
13888bccd85fSChristoph Lameter /* Copy a node mask from user space. */
138939743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
13908bccd85fSChristoph Lameter 		     unsigned long maxnode)
13918bccd85fSChristoph Lameter {
13928bccd85fSChristoph Lameter 	--maxnode;
13938bccd85fSChristoph Lameter 	nodes_clear(*nodes);
13948bccd85fSChristoph Lameter 	if (maxnode == 0 || !nmask)
13958bccd85fSChristoph Lameter 		return 0;
1396a9c930baSAndi Kleen 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1397636f13c1SChris Wright 		return -EINVAL;
13988bccd85fSChristoph Lameter 
139956521e7aSYisheng Xie 	/*
140056521e7aSYisheng Xie 	 * When the user specified more nodes than supported just check
1401e130242dSArnd Bergmann 	 * if the non supported part is all zero, one word at a time,
1402e130242dSArnd Bergmann 	 * starting at the end.
140356521e7aSYisheng Xie 	 */
1404e130242dSArnd Bergmann 	while (maxnode > MAX_NUMNODES) {
1405e130242dSArnd Bergmann 		unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG);
1406e130242dSArnd Bergmann 		unsigned long t;
14078bccd85fSChristoph Lameter 
1408e130242dSArnd Bergmann 		if (get_bitmap(&t, &nmask[maxnode / BITS_PER_LONG], bits))
140956521e7aSYisheng Xie 			return -EFAULT;
1410e130242dSArnd Bergmann 
1411e130242dSArnd Bergmann 		if (maxnode - bits >= MAX_NUMNODES) {
1412e130242dSArnd Bergmann 			maxnode -= bits;
1413e130242dSArnd Bergmann 		} else {
1414e130242dSArnd Bergmann 			maxnode = MAX_NUMNODES;
1415e130242dSArnd Bergmann 			t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1416e130242dSArnd Bergmann 		}
1417e130242dSArnd Bergmann 		if (t)
141856521e7aSYisheng Xie 			return -EINVAL;
141956521e7aSYisheng Xie 	}
142056521e7aSYisheng Xie 
1421e130242dSArnd Bergmann 	return get_bitmap(nodes_addr(*nodes), nmask, maxnode);
14228bccd85fSChristoph Lameter }
14238bccd85fSChristoph Lameter 
14248bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */
14258bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
14268bccd85fSChristoph Lameter 			      nodemask_t *nodes)
14278bccd85fSChristoph Lameter {
14288bccd85fSChristoph Lameter 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1429050c17f2SRalph Campbell 	unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
1430e130242dSArnd Bergmann 	bool compat = in_compat_syscall();
1431e130242dSArnd Bergmann 
1432e130242dSArnd Bergmann 	if (compat)
1433e130242dSArnd Bergmann 		nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t);
14348bccd85fSChristoph Lameter 
14358bccd85fSChristoph Lameter 	if (copy > nbytes) {
14368bccd85fSChristoph Lameter 		if (copy > PAGE_SIZE)
14378bccd85fSChristoph Lameter 			return -EINVAL;
14388bccd85fSChristoph Lameter 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
14398bccd85fSChristoph Lameter 			return -EFAULT;
14408bccd85fSChristoph Lameter 		copy = nbytes;
1441e130242dSArnd Bergmann 		maxnode = nr_node_ids;
14428bccd85fSChristoph Lameter 	}
1443e130242dSArnd Bergmann 
1444e130242dSArnd Bergmann 	if (compat)
1445e130242dSArnd Bergmann 		return compat_put_bitmap((compat_ulong_t __user *)mask,
1446e130242dSArnd Bergmann 					 nodes_addr(*nodes), maxnode);
1447e130242dSArnd Bergmann 
14488bccd85fSChristoph Lameter 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
14498bccd85fSChristoph Lameter }
14508bccd85fSChristoph Lameter 
145195837924SFeng Tang /* Basic parameter sanity check used by both mbind() and set_mempolicy() */
145295837924SFeng Tang static inline int sanitize_mpol_flags(int *mode, unsigned short *flags)
145395837924SFeng Tang {
145495837924SFeng Tang 	*flags = *mode & MPOL_MODE_FLAGS;
145595837924SFeng Tang 	*mode &= ~MPOL_MODE_FLAGS;
1456b27abaccSDave Hansen 
1457a38a59fdSBen Widawsky 	if ((unsigned int)(*mode) >=  MPOL_MAX)
145895837924SFeng Tang 		return -EINVAL;
145995837924SFeng Tang 	if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
146095837924SFeng Tang 		return -EINVAL;
146195837924SFeng Tang 
146295837924SFeng Tang 	return 0;
146395837924SFeng Tang }
146495837924SFeng Tang 
1465e7dc9ad6SDominik Brodowski static long kernel_mbind(unsigned long start, unsigned long len,
1466e7dc9ad6SDominik Brodowski 			 unsigned long mode, const unsigned long __user *nmask,
1467e7dc9ad6SDominik Brodowski 			 unsigned long maxnode, unsigned int flags)
14688bccd85fSChristoph Lameter {
1469028fec41SDavid Rientjes 	unsigned short mode_flags;
147095837924SFeng Tang 	nodemask_t nodes;
147195837924SFeng Tang 	int lmode = mode;
147295837924SFeng Tang 	int err;
14738bccd85fSChristoph Lameter 
1474057d3389SAndrey Konovalov 	start = untagged_addr(start);
147595837924SFeng Tang 	err = sanitize_mpol_flags(&lmode, &mode_flags);
147695837924SFeng Tang 	if (err)
147795837924SFeng Tang 		return err;
147895837924SFeng Tang 
14798bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
14808bccd85fSChristoph Lameter 	if (err)
14818bccd85fSChristoph Lameter 		return err;
148295837924SFeng Tang 
148395837924SFeng Tang 	return do_mbind(start, len, lmode, mode_flags, &nodes, flags);
14848bccd85fSChristoph Lameter }
14858bccd85fSChristoph Lameter 
1486e7dc9ad6SDominik Brodowski SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1487e7dc9ad6SDominik Brodowski 		unsigned long, mode, const unsigned long __user *, nmask,
1488e7dc9ad6SDominik Brodowski 		unsigned long, maxnode, unsigned int, flags)
1489e7dc9ad6SDominik Brodowski {
1490e7dc9ad6SDominik Brodowski 	return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1491e7dc9ad6SDominik Brodowski }
1492e7dc9ad6SDominik Brodowski 
14938bccd85fSChristoph Lameter /* Set the process memory policy */
1494af03c4acSDominik Brodowski static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1495af03c4acSDominik Brodowski 				 unsigned long maxnode)
14968bccd85fSChristoph Lameter {
149795837924SFeng Tang 	unsigned short mode_flags;
14988bccd85fSChristoph Lameter 	nodemask_t nodes;
149995837924SFeng Tang 	int lmode = mode;
150095837924SFeng Tang 	int err;
15018bccd85fSChristoph Lameter 
150295837924SFeng Tang 	err = sanitize_mpol_flags(&lmode, &mode_flags);
150395837924SFeng Tang 	if (err)
150495837924SFeng Tang 		return err;
150595837924SFeng Tang 
15068bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
15078bccd85fSChristoph Lameter 	if (err)
15088bccd85fSChristoph Lameter 		return err;
150995837924SFeng Tang 
151095837924SFeng Tang 	return do_set_mempolicy(lmode, mode_flags, &nodes);
15118bccd85fSChristoph Lameter }
15128bccd85fSChristoph Lameter 
1513af03c4acSDominik Brodowski SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1514af03c4acSDominik Brodowski 		unsigned long, maxnode)
1515af03c4acSDominik Brodowski {
1516af03c4acSDominik Brodowski 	return kernel_set_mempolicy(mode, nmask, maxnode);
1517af03c4acSDominik Brodowski }
1518af03c4acSDominik Brodowski 
1519b6e9b0baSDominik Brodowski static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1520b6e9b0baSDominik Brodowski 				const unsigned long __user *old_nodes,
1521b6e9b0baSDominik Brodowski 				const unsigned long __user *new_nodes)
152239743889SChristoph Lameter {
1523596d7cfaSKOSAKI Motohiro 	struct mm_struct *mm = NULL;
152439743889SChristoph Lameter 	struct task_struct *task;
152539743889SChristoph Lameter 	nodemask_t task_nodes;
152639743889SChristoph Lameter 	int err;
1527596d7cfaSKOSAKI Motohiro 	nodemask_t *old;
1528596d7cfaSKOSAKI Motohiro 	nodemask_t *new;
1529596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH(scratch);
153039743889SChristoph Lameter 
1531596d7cfaSKOSAKI Motohiro 	if (!scratch)
1532596d7cfaSKOSAKI Motohiro 		return -ENOMEM;
153339743889SChristoph Lameter 
1534596d7cfaSKOSAKI Motohiro 	old = &scratch->mask1;
1535596d7cfaSKOSAKI Motohiro 	new = &scratch->mask2;
1536596d7cfaSKOSAKI Motohiro 
1537596d7cfaSKOSAKI Motohiro 	err = get_nodes(old, old_nodes, maxnode);
153839743889SChristoph Lameter 	if (err)
1539596d7cfaSKOSAKI Motohiro 		goto out;
1540596d7cfaSKOSAKI Motohiro 
1541596d7cfaSKOSAKI Motohiro 	err = get_nodes(new, new_nodes, maxnode);
1542596d7cfaSKOSAKI Motohiro 	if (err)
1543596d7cfaSKOSAKI Motohiro 		goto out;
154439743889SChristoph Lameter 
154539743889SChristoph Lameter 	/* Find the mm_struct */
154655cfaa3cSZeng Zhaoming 	rcu_read_lock();
1547228ebcbeSPavel Emelyanov 	task = pid ? find_task_by_vpid(pid) : current;
154839743889SChristoph Lameter 	if (!task) {
154955cfaa3cSZeng Zhaoming 		rcu_read_unlock();
1550596d7cfaSKOSAKI Motohiro 		err = -ESRCH;
1551596d7cfaSKOSAKI Motohiro 		goto out;
155239743889SChristoph Lameter 	}
15533268c63eSChristoph Lameter 	get_task_struct(task);
155439743889SChristoph Lameter 
1555596d7cfaSKOSAKI Motohiro 	err = -EINVAL;
155639743889SChristoph Lameter 
155739743889SChristoph Lameter 	/*
155831367466SOtto Ebeling 	 * Check if this process has the right to modify the specified process.
155931367466SOtto Ebeling 	 * Use the regular "ptrace_may_access()" checks.
156039743889SChristoph Lameter 	 */
156131367466SOtto Ebeling 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1562c69e8d9cSDavid Howells 		rcu_read_unlock();
156339743889SChristoph Lameter 		err = -EPERM;
15643268c63eSChristoph Lameter 		goto out_put;
156539743889SChristoph Lameter 	}
1566c69e8d9cSDavid Howells 	rcu_read_unlock();
156739743889SChristoph Lameter 
156839743889SChristoph Lameter 	task_nodes = cpuset_mems_allowed(task);
156939743889SChristoph Lameter 	/* Is the user allowed to access the target nodes? */
1570596d7cfaSKOSAKI Motohiro 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
157139743889SChristoph Lameter 		err = -EPERM;
15723268c63eSChristoph Lameter 		goto out_put;
157339743889SChristoph Lameter 	}
157439743889SChristoph Lameter 
15750486a38bSYisheng Xie 	task_nodes = cpuset_mems_allowed(current);
15760486a38bSYisheng Xie 	nodes_and(*new, *new, task_nodes);
15770486a38bSYisheng Xie 	if (nodes_empty(*new))
15783268c63eSChristoph Lameter 		goto out_put;
15790486a38bSYisheng Xie 
158086c3a764SDavid Quigley 	err = security_task_movememory(task);
158186c3a764SDavid Quigley 	if (err)
15823268c63eSChristoph Lameter 		goto out_put;
158386c3a764SDavid Quigley 
15843268c63eSChristoph Lameter 	mm = get_task_mm(task);
15853268c63eSChristoph Lameter 	put_task_struct(task);
1586f2a9ef88SSasha Levin 
1587f2a9ef88SSasha Levin 	if (!mm) {
1588f2a9ef88SSasha Levin 		err = -EINVAL;
1589f2a9ef88SSasha Levin 		goto out;
1590f2a9ef88SSasha Levin 	}
1591f2a9ef88SSasha Levin 
1592596d7cfaSKOSAKI Motohiro 	err = do_migrate_pages(mm, old, new,
159374c00241SChristoph Lameter 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
15943268c63eSChristoph Lameter 
159539743889SChristoph Lameter 	mmput(mm);
15963268c63eSChristoph Lameter out:
1597596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH_FREE(scratch);
1598596d7cfaSKOSAKI Motohiro 
159939743889SChristoph Lameter 	return err;
16003268c63eSChristoph Lameter 
16013268c63eSChristoph Lameter out_put:
16023268c63eSChristoph Lameter 	put_task_struct(task);
16033268c63eSChristoph Lameter 	goto out;
16043268c63eSChristoph Lameter 
160539743889SChristoph Lameter }
160639743889SChristoph Lameter 
1607b6e9b0baSDominik Brodowski SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1608b6e9b0baSDominik Brodowski 		const unsigned long __user *, old_nodes,
1609b6e9b0baSDominik Brodowski 		const unsigned long __user *, new_nodes)
1610b6e9b0baSDominik Brodowski {
1611b6e9b0baSDominik Brodowski 	return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1612b6e9b0baSDominik Brodowski }
1613b6e9b0baSDominik Brodowski 
161439743889SChristoph Lameter 
16158bccd85fSChristoph Lameter /* Retrieve NUMA policy */
1616af03c4acSDominik Brodowski static int kernel_get_mempolicy(int __user *policy,
1617af03c4acSDominik Brodowski 				unsigned long __user *nmask,
1618af03c4acSDominik Brodowski 				unsigned long maxnode,
1619af03c4acSDominik Brodowski 				unsigned long addr,
1620af03c4acSDominik Brodowski 				unsigned long flags)
16218bccd85fSChristoph Lameter {
1622dbcb0f19SAdrian Bunk 	int err;
16233f649ab7SKees Cook 	int pval;
16248bccd85fSChristoph Lameter 	nodemask_t nodes;
16258bccd85fSChristoph Lameter 
1626050c17f2SRalph Campbell 	if (nmask != NULL && maxnode < nr_node_ids)
16278bccd85fSChristoph Lameter 		return -EINVAL;
16288bccd85fSChristoph Lameter 
16294605f057SWenchao Hao 	addr = untagged_addr(addr);
16304605f057SWenchao Hao 
16318bccd85fSChristoph Lameter 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
16328bccd85fSChristoph Lameter 
16338bccd85fSChristoph Lameter 	if (err)
16348bccd85fSChristoph Lameter 		return err;
16358bccd85fSChristoph Lameter 
16368bccd85fSChristoph Lameter 	if (policy && put_user(pval, policy))
16378bccd85fSChristoph Lameter 		return -EFAULT;
16388bccd85fSChristoph Lameter 
16398bccd85fSChristoph Lameter 	if (nmask)
16408bccd85fSChristoph Lameter 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
16418bccd85fSChristoph Lameter 
16428bccd85fSChristoph Lameter 	return err;
16438bccd85fSChristoph Lameter }
16448bccd85fSChristoph Lameter 
1645af03c4acSDominik Brodowski SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1646af03c4acSDominik Brodowski 		unsigned long __user *, nmask, unsigned long, maxnode,
1647af03c4acSDominik Brodowski 		unsigned long, addr, unsigned long, flags)
1648af03c4acSDominik Brodowski {
1649af03c4acSDominik Brodowski 	return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1650af03c4acSDominik Brodowski }
1651af03c4acSDominik Brodowski 
165220ca87f2SLi Xinhai bool vma_migratable(struct vm_area_struct *vma)
165320ca87f2SLi Xinhai {
165420ca87f2SLi Xinhai 	if (vma->vm_flags & (VM_IO | VM_PFNMAP))
165520ca87f2SLi Xinhai 		return false;
165620ca87f2SLi Xinhai 
165720ca87f2SLi Xinhai 	/*
165820ca87f2SLi Xinhai 	 * DAX device mappings require predictable access latency, so avoid
165920ca87f2SLi Xinhai 	 * incurring periodic faults.
166020ca87f2SLi Xinhai 	 */
166120ca87f2SLi Xinhai 	if (vma_is_dax(vma))
166220ca87f2SLi Xinhai 		return false;
166320ca87f2SLi Xinhai 
166420ca87f2SLi Xinhai 	if (is_vm_hugetlb_page(vma) &&
166520ca87f2SLi Xinhai 		!hugepage_migration_supported(hstate_vma(vma)))
166620ca87f2SLi Xinhai 		return false;
166720ca87f2SLi Xinhai 
166820ca87f2SLi Xinhai 	/*
166920ca87f2SLi Xinhai 	 * Migration allocates pages in the highest zone. If we cannot
167020ca87f2SLi Xinhai 	 * do so then migration (at least from node to node) is not
167120ca87f2SLi Xinhai 	 * possible.
167220ca87f2SLi Xinhai 	 */
167320ca87f2SLi Xinhai 	if (vma->vm_file &&
167420ca87f2SLi Xinhai 		gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
167520ca87f2SLi Xinhai 			< policy_zone)
167620ca87f2SLi Xinhai 		return false;
167720ca87f2SLi Xinhai 	return true;
167820ca87f2SLi Xinhai }
167920ca87f2SLi Xinhai 
168074d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
168174d2c3a0SOleg Nesterov 						unsigned long addr)
16821da177e4SLinus Torvalds {
16838d90274bSOleg Nesterov 	struct mempolicy *pol = NULL;
16841da177e4SLinus Torvalds 
16851da177e4SLinus Torvalds 	if (vma) {
1686480eccf9SLee Schermerhorn 		if (vma->vm_ops && vma->vm_ops->get_policy) {
16878d90274bSOleg Nesterov 			pol = vma->vm_ops->get_policy(vma, addr);
168800442ad0SMel Gorman 		} else if (vma->vm_policy) {
16891da177e4SLinus Torvalds 			pol = vma->vm_policy;
169000442ad0SMel Gorman 
169100442ad0SMel Gorman 			/*
169200442ad0SMel Gorman 			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
169300442ad0SMel Gorman 			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
169400442ad0SMel Gorman 			 * count on these policies which will be dropped by
169500442ad0SMel Gorman 			 * mpol_cond_put() later
169600442ad0SMel Gorman 			 */
169700442ad0SMel Gorman 			if (mpol_needs_cond_ref(pol))
169800442ad0SMel Gorman 				mpol_get(pol);
169900442ad0SMel Gorman 		}
17001da177e4SLinus Torvalds 	}
1701f15ca78eSOleg Nesterov 
170274d2c3a0SOleg Nesterov 	return pol;
170374d2c3a0SOleg Nesterov }
170474d2c3a0SOleg Nesterov 
170574d2c3a0SOleg Nesterov /*
1706dd6eecb9SOleg Nesterov  * get_vma_policy(@vma, @addr)
170774d2c3a0SOleg Nesterov  * @vma: virtual memory area whose policy is sought
170874d2c3a0SOleg Nesterov  * @addr: address in @vma for shared policy lookup
170974d2c3a0SOleg Nesterov  *
171074d2c3a0SOleg Nesterov  * Returns effective policy for a VMA at specified address.
1711dd6eecb9SOleg Nesterov  * Falls back to current->mempolicy or system default policy, as necessary.
171274d2c3a0SOleg Nesterov  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
171374d2c3a0SOleg Nesterov  * count--added by the get_policy() vm_op, as appropriate--to protect against
171474d2c3a0SOleg Nesterov  * freeing by another task.  It is the caller's responsibility to free the
171574d2c3a0SOleg Nesterov  * extra reference for shared policies.
171674d2c3a0SOleg Nesterov  */
1717ac79f78dSDavid Rientjes static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1718dd6eecb9SOleg Nesterov 						unsigned long addr)
171974d2c3a0SOleg Nesterov {
172074d2c3a0SOleg Nesterov 	struct mempolicy *pol = __get_vma_policy(vma, addr);
172174d2c3a0SOleg Nesterov 
17228d90274bSOleg Nesterov 	if (!pol)
1723dd6eecb9SOleg Nesterov 		pol = get_task_policy(current);
17248d90274bSOleg Nesterov 
17251da177e4SLinus Torvalds 	return pol;
17261da177e4SLinus Torvalds }
17271da177e4SLinus Torvalds 
17286b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma)
1729fc314724SMel Gorman {
17306b6482bbSOleg Nesterov 	struct mempolicy *pol;
1731f15ca78eSOleg Nesterov 
1732fc314724SMel Gorman 	if (vma->vm_ops && vma->vm_ops->get_policy) {
1733fc314724SMel Gorman 		bool ret = false;
1734fc314724SMel Gorman 
1735fc314724SMel Gorman 		pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1736fc314724SMel Gorman 		if (pol && (pol->flags & MPOL_F_MOF))
1737fc314724SMel Gorman 			ret = true;
1738fc314724SMel Gorman 		mpol_cond_put(pol);
1739fc314724SMel Gorman 
1740fc314724SMel Gorman 		return ret;
17418d90274bSOleg Nesterov 	}
17428d90274bSOleg Nesterov 
1743fc314724SMel Gorman 	pol = vma->vm_policy;
17448d90274bSOleg Nesterov 	if (!pol)
17456b6482bbSOleg Nesterov 		pol = get_task_policy(current);
1746fc314724SMel Gorman 
1747fc314724SMel Gorman 	return pol->flags & MPOL_F_MOF;
1748fc314724SMel Gorman }
1749fc314724SMel Gorman 
1750d3eb1570SLai Jiangshan static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1751d3eb1570SLai Jiangshan {
1752d3eb1570SLai Jiangshan 	enum zone_type dynamic_policy_zone = policy_zone;
1753d3eb1570SLai Jiangshan 
1754d3eb1570SLai Jiangshan 	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1755d3eb1570SLai Jiangshan 
1756d3eb1570SLai Jiangshan 	/*
1757269fbe72SBen Widawsky 	 * if policy->nodes has movable memory only,
1758d3eb1570SLai Jiangshan 	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1759d3eb1570SLai Jiangshan 	 *
1760269fbe72SBen Widawsky 	 * policy->nodes is intersect with node_states[N_MEMORY].
1761f0953a1bSIngo Molnar 	 * so if the following test fails, it implies
1762269fbe72SBen Widawsky 	 * policy->nodes has movable memory only.
1763d3eb1570SLai Jiangshan 	 */
1764269fbe72SBen Widawsky 	if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY]))
1765d3eb1570SLai Jiangshan 		dynamic_policy_zone = ZONE_MOVABLE;
1766d3eb1570SLai Jiangshan 
1767d3eb1570SLai Jiangshan 	return zone >= dynamic_policy_zone;
1768d3eb1570SLai Jiangshan }
1769d3eb1570SLai Jiangshan 
177052cd3b07SLee Schermerhorn /*
177152cd3b07SLee Schermerhorn  * Return a nodemask representing a mempolicy for filtering nodes for
177252cd3b07SLee Schermerhorn  * page allocation
177352cd3b07SLee Schermerhorn  */
17748ca39e68SMuchun Song nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
177519770b32SMel Gorman {
1776b27abaccSDave Hansen 	int mode = policy->mode;
1777b27abaccSDave Hansen 
177819770b32SMel Gorman 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
1779b27abaccSDave Hansen 	if (unlikely(mode == MPOL_BIND) &&
1780d3eb1570SLai Jiangshan 		apply_policy_zone(policy, gfp_zone(gfp)) &&
1781269fbe72SBen Widawsky 		cpuset_nodemask_valid_mems_allowed(&policy->nodes))
1782269fbe72SBen Widawsky 		return &policy->nodes;
178319770b32SMel Gorman 
1784b27abaccSDave Hansen 	if (mode == MPOL_PREFERRED_MANY)
1785b27abaccSDave Hansen 		return &policy->nodes;
1786b27abaccSDave Hansen 
178719770b32SMel Gorman 	return NULL;
178819770b32SMel Gorman }
178919770b32SMel Gorman 
1790b27abaccSDave Hansen /*
1791b27abaccSDave Hansen  * Return the  preferred node id for 'prefer' mempolicy, and return
1792b27abaccSDave Hansen  * the given id for all other policies.
1793b27abaccSDave Hansen  *
1794b27abaccSDave Hansen  * policy_node() is always coupled with policy_nodemask(), which
1795b27abaccSDave Hansen  * secures the nodemask limit for 'bind' and 'prefer-many' policy.
1796b27abaccSDave Hansen  */
1797f8fd5253SWei Yang static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
17981da177e4SLinus Torvalds {
17997858d7bcSFeng Tang 	if (policy->mode == MPOL_PREFERRED) {
1800269fbe72SBen Widawsky 		nd = first_node(policy->nodes);
18017858d7bcSFeng Tang 	} else {
180219770b32SMel Gorman 		/*
18036d840958SMichal Hocko 		 * __GFP_THISNODE shouldn't even be used with the bind policy
18046d840958SMichal Hocko 		 * because we might easily break the expectation to stay on the
18056d840958SMichal Hocko 		 * requested node and not break the policy.
180619770b32SMel Gorman 		 */
18076d840958SMichal Hocko 		WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
18081da177e4SLinus Torvalds 	}
18096d840958SMichal Hocko 
181004ec6264SVlastimil Babka 	return nd;
18111da177e4SLinus Torvalds }
18121da177e4SLinus Torvalds 
18131da177e4SLinus Torvalds /* Do dynamic interleaving for a process */
18141da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy)
18151da177e4SLinus Torvalds {
181645816682SVlastimil Babka 	unsigned next;
18171da177e4SLinus Torvalds 	struct task_struct *me = current;
18181da177e4SLinus Torvalds 
1819269fbe72SBen Widawsky 	next = next_node_in(me->il_prev, policy->nodes);
1820f5b087b5SDavid Rientjes 	if (next < MAX_NUMNODES)
182145816682SVlastimil Babka 		me->il_prev = next;
182245816682SVlastimil Babka 	return next;
18231da177e4SLinus Torvalds }
18241da177e4SLinus Torvalds 
1825dc85da15SChristoph Lameter /*
1826dc85da15SChristoph Lameter  * Depending on the memory policy provide a node from which to allocate the
1827dc85da15SChristoph Lameter  * next slab entry.
1828dc85da15SChristoph Lameter  */
18292a389610SDavid Rientjes unsigned int mempolicy_slab_node(void)
1830dc85da15SChristoph Lameter {
1831e7b691b0SAndi Kleen 	struct mempolicy *policy;
18322a389610SDavid Rientjes 	int node = numa_mem_id();
1833e7b691b0SAndi Kleen 
183438b031ddSVasily Averin 	if (!in_task())
18352a389610SDavid Rientjes 		return node;
1836e7b691b0SAndi Kleen 
1837e7b691b0SAndi Kleen 	policy = current->mempolicy;
18387858d7bcSFeng Tang 	if (!policy)
18392a389610SDavid Rientjes 		return node;
1840765c4507SChristoph Lameter 
1841bea904d5SLee Schermerhorn 	switch (policy->mode) {
1842bea904d5SLee Schermerhorn 	case MPOL_PREFERRED:
1843269fbe72SBen Widawsky 		return first_node(policy->nodes);
1844bea904d5SLee Schermerhorn 
1845dc85da15SChristoph Lameter 	case MPOL_INTERLEAVE:
1846dc85da15SChristoph Lameter 		return interleave_nodes(policy);
1847dc85da15SChristoph Lameter 
1848b27abaccSDave Hansen 	case MPOL_BIND:
1849b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
1850b27abaccSDave Hansen 	{
1851c33d6c06SMel Gorman 		struct zoneref *z;
1852c33d6c06SMel Gorman 
1853dc85da15SChristoph Lameter 		/*
1854dc85da15SChristoph Lameter 		 * Follow bind policy behavior and start allocation at the
1855dc85da15SChristoph Lameter 		 * first node.
1856dc85da15SChristoph Lameter 		 */
185719770b32SMel Gorman 		struct zonelist *zonelist;
185819770b32SMel Gorman 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1859c9634cf0SAneesh Kumar K.V 		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1860c33d6c06SMel Gorman 		z = first_zones_zonelist(zonelist, highest_zoneidx,
1861269fbe72SBen Widawsky 							&policy->nodes);
1862c1093b74SPavel Tatashin 		return z->zone ? zone_to_nid(z->zone) : node;
1863dd1a239fSMel Gorman 	}
18647858d7bcSFeng Tang 	case MPOL_LOCAL:
18657858d7bcSFeng Tang 		return node;
1866dc85da15SChristoph Lameter 
1867dc85da15SChristoph Lameter 	default:
1868bea904d5SLee Schermerhorn 		BUG();
1869dc85da15SChristoph Lameter 	}
1870dc85da15SChristoph Lameter }
1871dc85da15SChristoph Lameter 
1872fee83b3aSAndrew Morton /*
1873fee83b3aSAndrew Morton  * Do static interleaving for a VMA with known offset @n.  Returns the n'th
1874269fbe72SBen Widawsky  * node in pol->nodes (starting from n=0), wrapping around if n exceeds the
1875fee83b3aSAndrew Morton  * number of present nodes.
1876fee83b3aSAndrew Morton  */
187798c70baaSLaurent Dufour static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
18781da177e4SLinus Torvalds {
1879276aeee1Syanghui 	nodemask_t nodemask = pol->nodes;
1880276aeee1Syanghui 	unsigned int target, nnodes;
1881fee83b3aSAndrew Morton 	int i;
1882fee83b3aSAndrew Morton 	int nid;
1883276aeee1Syanghui 	/*
1884276aeee1Syanghui 	 * The barrier will stabilize the nodemask in a register or on
1885276aeee1Syanghui 	 * the stack so that it will stop changing under the code.
1886276aeee1Syanghui 	 *
1887276aeee1Syanghui 	 * Between first_node() and next_node(), pol->nodes could be changed
1888276aeee1Syanghui 	 * by other threads. So we put pol->nodes in a local stack.
1889276aeee1Syanghui 	 */
1890276aeee1Syanghui 	barrier();
18911da177e4SLinus Torvalds 
1892276aeee1Syanghui 	nnodes = nodes_weight(nodemask);
1893f5b087b5SDavid Rientjes 	if (!nnodes)
1894f5b087b5SDavid Rientjes 		return numa_node_id();
1895fee83b3aSAndrew Morton 	target = (unsigned int)n % nnodes;
1896276aeee1Syanghui 	nid = first_node(nodemask);
1897fee83b3aSAndrew Morton 	for (i = 0; i < target; i++)
1898276aeee1Syanghui 		nid = next_node(nid, nodemask);
18991da177e4SLinus Torvalds 	return nid;
19001da177e4SLinus Torvalds }
19011da177e4SLinus Torvalds 
19025da7ca86SChristoph Lameter /* Determine a node number for interleave */
19035da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol,
19045da7ca86SChristoph Lameter 		 struct vm_area_struct *vma, unsigned long addr, int shift)
19055da7ca86SChristoph Lameter {
19065da7ca86SChristoph Lameter 	if (vma) {
19075da7ca86SChristoph Lameter 		unsigned long off;
19085da7ca86SChristoph Lameter 
19093b98b087SNishanth Aravamudan 		/*
19103b98b087SNishanth Aravamudan 		 * for small pages, there is no difference between
19113b98b087SNishanth Aravamudan 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
19123b98b087SNishanth Aravamudan 		 * for huge pages, since vm_pgoff is in units of small
19133b98b087SNishanth Aravamudan 		 * pages, we need to shift off the always 0 bits to get
19143b98b087SNishanth Aravamudan 		 * a useful offset.
19153b98b087SNishanth Aravamudan 		 */
19163b98b087SNishanth Aravamudan 		BUG_ON(shift < PAGE_SHIFT);
19173b98b087SNishanth Aravamudan 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
19185da7ca86SChristoph Lameter 		off += (addr - vma->vm_start) >> shift;
191998c70baaSLaurent Dufour 		return offset_il_node(pol, off);
19205da7ca86SChristoph Lameter 	} else
19215da7ca86SChristoph Lameter 		return interleave_nodes(pol);
19225da7ca86SChristoph Lameter }
19235da7ca86SChristoph Lameter 
192400ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS
1925480eccf9SLee Schermerhorn /*
192604ec6264SVlastimil Babka  * huge_node(@vma, @addr, @gfp_flags, @mpol)
1927b46e14acSFabian Frederick  * @vma: virtual memory area whose policy is sought
1928b46e14acSFabian Frederick  * @addr: address in @vma for shared policy lookup and interleave policy
1929b46e14acSFabian Frederick  * @gfp_flags: for requested zone
1930b46e14acSFabian Frederick  * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1931b27abaccSDave Hansen  * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy
1932480eccf9SLee Schermerhorn  *
193304ec6264SVlastimil Babka  * Returns a nid suitable for a huge page allocation and a pointer
193452cd3b07SLee Schermerhorn  * to the struct mempolicy for conditional unref after allocation.
1935b27abaccSDave Hansen  * If the effective policy is 'bind' or 'prefer-many', returns a pointer
1936b27abaccSDave Hansen  * to the mempolicy's @nodemask for filtering the zonelist.
1937c0ff7453SMiao Xie  *
1938d26914d1SMel Gorman  * Must be protected by read_mems_allowed_begin()
1939480eccf9SLee Schermerhorn  */
194004ec6264SVlastimil Babka int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
194104ec6264SVlastimil Babka 				struct mempolicy **mpol, nodemask_t **nodemask)
19425da7ca86SChristoph Lameter {
194304ec6264SVlastimil Babka 	int nid;
1944b27abaccSDave Hansen 	int mode;
19455da7ca86SChristoph Lameter 
1946dd6eecb9SOleg Nesterov 	*mpol = get_vma_policy(vma, addr);
1947b27abaccSDave Hansen 	*nodemask = NULL;
1948b27abaccSDave Hansen 	mode = (*mpol)->mode;
19495da7ca86SChristoph Lameter 
1950b27abaccSDave Hansen 	if (unlikely(mode == MPOL_INTERLEAVE)) {
195104ec6264SVlastimil Babka 		nid = interleave_nid(*mpol, vma, addr,
195204ec6264SVlastimil Babka 					huge_page_shift(hstate_vma(vma)));
195352cd3b07SLee Schermerhorn 	} else {
195404ec6264SVlastimil Babka 		nid = policy_node(gfp_flags, *mpol, numa_node_id());
1955b27abaccSDave Hansen 		if (mode == MPOL_BIND || mode == MPOL_PREFERRED_MANY)
1956269fbe72SBen Widawsky 			*nodemask = &(*mpol)->nodes;
1957480eccf9SLee Schermerhorn 	}
195804ec6264SVlastimil Babka 	return nid;
19595da7ca86SChristoph Lameter }
196006808b08SLee Schermerhorn 
196106808b08SLee Schermerhorn /*
196206808b08SLee Schermerhorn  * init_nodemask_of_mempolicy
196306808b08SLee Schermerhorn  *
196406808b08SLee Schermerhorn  * If the current task's mempolicy is "default" [NULL], return 'false'
196506808b08SLee Schermerhorn  * to indicate default policy.  Otherwise, extract the policy nodemask
196606808b08SLee Schermerhorn  * for 'bind' or 'interleave' policy into the argument nodemask, or
196706808b08SLee Schermerhorn  * initialize the argument nodemask to contain the single node for
196806808b08SLee Schermerhorn  * 'preferred' or 'local' policy and return 'true' to indicate presence
196906808b08SLee Schermerhorn  * of non-default mempolicy.
197006808b08SLee Schermerhorn  *
197106808b08SLee Schermerhorn  * We don't bother with reference counting the mempolicy [mpol_get/put]
197206808b08SLee Schermerhorn  * because the current task is examining it's own mempolicy and a task's
197306808b08SLee Schermerhorn  * mempolicy is only ever changed by the task itself.
197406808b08SLee Schermerhorn  *
197506808b08SLee Schermerhorn  * N.B., it is the caller's responsibility to free a returned nodemask.
197606808b08SLee Schermerhorn  */
197706808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask)
197806808b08SLee Schermerhorn {
197906808b08SLee Schermerhorn 	struct mempolicy *mempolicy;
198006808b08SLee Schermerhorn 
198106808b08SLee Schermerhorn 	if (!(mask && current->mempolicy))
198206808b08SLee Schermerhorn 		return false;
198306808b08SLee Schermerhorn 
1984c0ff7453SMiao Xie 	task_lock(current);
198506808b08SLee Schermerhorn 	mempolicy = current->mempolicy;
198606808b08SLee Schermerhorn 	switch (mempolicy->mode) {
198706808b08SLee Schermerhorn 	case MPOL_PREFERRED:
1988b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
198906808b08SLee Schermerhorn 	case MPOL_BIND:
199006808b08SLee Schermerhorn 	case MPOL_INTERLEAVE:
1991269fbe72SBen Widawsky 		*mask = mempolicy->nodes;
199206808b08SLee Schermerhorn 		break;
199306808b08SLee Schermerhorn 
19947858d7bcSFeng Tang 	case MPOL_LOCAL:
1995269fbe72SBen Widawsky 		init_nodemask_of_node(mask, numa_node_id());
19967858d7bcSFeng Tang 		break;
19977858d7bcSFeng Tang 
199806808b08SLee Schermerhorn 	default:
199906808b08SLee Schermerhorn 		BUG();
200006808b08SLee Schermerhorn 	}
2001c0ff7453SMiao Xie 	task_unlock(current);
200206808b08SLee Schermerhorn 
200306808b08SLee Schermerhorn 	return true;
200406808b08SLee Schermerhorn }
200500ac59adSChen, Kenneth W #endif
20065da7ca86SChristoph Lameter 
20076f48d0ebSDavid Rientjes /*
2008b26e517aSFeng Tang  * mempolicy_in_oom_domain
20096f48d0ebSDavid Rientjes  *
2010b26e517aSFeng Tang  * If tsk's mempolicy is "bind", check for intersection between mask and
2011b26e517aSFeng Tang  * the policy nodemask. Otherwise, return true for all other policies
2012b26e517aSFeng Tang  * including "interleave", as a tsk with "interleave" policy may have
2013b26e517aSFeng Tang  * memory allocated from all nodes in system.
20146f48d0ebSDavid Rientjes  *
20156f48d0ebSDavid Rientjes  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
20166f48d0ebSDavid Rientjes  */
2017b26e517aSFeng Tang bool mempolicy_in_oom_domain(struct task_struct *tsk,
20186f48d0ebSDavid Rientjes 					const nodemask_t *mask)
20196f48d0ebSDavid Rientjes {
20206f48d0ebSDavid Rientjes 	struct mempolicy *mempolicy;
20216f48d0ebSDavid Rientjes 	bool ret = true;
20226f48d0ebSDavid Rientjes 
20236f48d0ebSDavid Rientjes 	if (!mask)
20246f48d0ebSDavid Rientjes 		return ret;
2025b26e517aSFeng Tang 
20266f48d0ebSDavid Rientjes 	task_lock(tsk);
20276f48d0ebSDavid Rientjes 	mempolicy = tsk->mempolicy;
2028b26e517aSFeng Tang 	if (mempolicy && mempolicy->mode == MPOL_BIND)
2029269fbe72SBen Widawsky 		ret = nodes_intersects(mempolicy->nodes, *mask);
20306f48d0ebSDavid Rientjes 	task_unlock(tsk);
2031b26e517aSFeng Tang 
20326f48d0ebSDavid Rientjes 	return ret;
20336f48d0ebSDavid Rientjes }
20346f48d0ebSDavid Rientjes 
20351da177e4SLinus Torvalds /* Allocate a page in interleaved policy.
20361da177e4SLinus Torvalds    Own path because it needs to do special accounting. */
2037662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2038662f3a0bSAndi Kleen 					unsigned nid)
20391da177e4SLinus Torvalds {
20401da177e4SLinus Torvalds 	struct page *page;
20411da177e4SLinus Torvalds 
204284172f4bSMatthew Wilcox (Oracle) 	page = __alloc_pages(gfp, order, nid, NULL);
20434518085eSKemi Wang 	/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
20444518085eSKemi Wang 	if (!static_branch_likely(&vm_numa_stat_key))
20454518085eSKemi Wang 		return page;
2046de55c8b2SAndrey Ryabinin 	if (page && page_to_nid(page) == nid) {
2047de55c8b2SAndrey Ryabinin 		preempt_disable();
2048f19298b9SMel Gorman 		__count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT);
2049de55c8b2SAndrey Ryabinin 		preempt_enable();
2050de55c8b2SAndrey Ryabinin 	}
20511da177e4SLinus Torvalds 	return page;
20521da177e4SLinus Torvalds }
20531da177e4SLinus Torvalds 
20544c54d949SFeng Tang static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
20554c54d949SFeng Tang 						int nid, struct mempolicy *pol)
20564c54d949SFeng Tang {
20574c54d949SFeng Tang 	struct page *page;
20584c54d949SFeng Tang 	gfp_t preferred_gfp;
20594c54d949SFeng Tang 
20604c54d949SFeng Tang 	/*
20614c54d949SFeng Tang 	 * This is a two pass approach. The first pass will only try the
20624c54d949SFeng Tang 	 * preferred nodes but skip the direct reclaim and allow the
20634c54d949SFeng Tang 	 * allocation to fail, while the second pass will try all the
20644c54d949SFeng Tang 	 * nodes in system.
20654c54d949SFeng Tang 	 */
20664c54d949SFeng Tang 	preferred_gfp = gfp | __GFP_NOWARN;
20674c54d949SFeng Tang 	preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
20684c54d949SFeng Tang 	page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes);
20694c54d949SFeng Tang 	if (!page)
20704c54d949SFeng Tang 		page = __alloc_pages(gfp, order, numa_node_id(), NULL);
20714c54d949SFeng Tang 
20724c54d949SFeng Tang 	return page;
20734c54d949SFeng Tang }
20744c54d949SFeng Tang 
20751da177e4SLinus Torvalds /**
20760bbbc0b3SAndrea Arcangeli  * alloc_pages_vma - Allocate a page for a VMA.
2077eb350739SMatthew Wilcox (Oracle)  * @gfp: GFP flags.
20780bbbc0b3SAndrea Arcangeli  * @order: Order of the GFP allocation.
20791da177e4SLinus Torvalds  * @vma: Pointer to VMA or NULL if not available.
2080eb350739SMatthew Wilcox (Oracle)  * @addr: Virtual address of the allocation.  Must be inside @vma.
2081be97a41bSVlastimil Babka  * @node: Which node to prefer for allocation (modulo policy).
2082eb350739SMatthew Wilcox (Oracle)  * @hugepage: For hugepages try only the preferred node if possible.
20831da177e4SLinus Torvalds  *
2084eb350739SMatthew Wilcox (Oracle)  * Allocate a page for a specific address in @vma, using the appropriate
2085eb350739SMatthew Wilcox (Oracle)  * NUMA policy.  When @vma is not NULL the caller must hold the mmap_lock
2086eb350739SMatthew Wilcox (Oracle)  * of the mm_struct of the VMA to prevent it from going away.  Should be
2087eb350739SMatthew Wilcox (Oracle)  * used for all allocations for pages that will be mapped into user space.
2088eb350739SMatthew Wilcox (Oracle)  *
2089eb350739SMatthew Wilcox (Oracle)  * Return: The page on success or NULL if allocation fails.
20901da177e4SLinus Torvalds  */
2091eb350739SMatthew Wilcox (Oracle) struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
209219deb769SDavid Rientjes 		unsigned long addr, int node, bool hugepage)
20931da177e4SLinus Torvalds {
2094cc9a6c87SMel Gorman 	struct mempolicy *pol;
2095c0ff7453SMiao Xie 	struct page *page;
209604ec6264SVlastimil Babka 	int preferred_nid;
2097be97a41bSVlastimil Babka 	nodemask_t *nmask;
20981da177e4SLinus Torvalds 
2099dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2100cc9a6c87SMel Gorman 
2101be97a41bSVlastimil Babka 	if (pol->mode == MPOL_INTERLEAVE) {
21021da177e4SLinus Torvalds 		unsigned nid;
21035da7ca86SChristoph Lameter 
21048eac563cSAndi Kleen 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
210552cd3b07SLee Schermerhorn 		mpol_cond_put(pol);
21060bbbc0b3SAndrea Arcangeli 		page = alloc_page_interleave(gfp, order, nid);
2107be97a41bSVlastimil Babka 		goto out;
21081da177e4SLinus Torvalds 	}
21091da177e4SLinus Torvalds 
21104c54d949SFeng Tang 	if (pol->mode == MPOL_PREFERRED_MANY) {
21114c54d949SFeng Tang 		page = alloc_pages_preferred_many(gfp, order, node, pol);
21124c54d949SFeng Tang 		mpol_cond_put(pol);
21134c54d949SFeng Tang 		goto out;
21144c54d949SFeng Tang 	}
21154c54d949SFeng Tang 
211619deb769SDavid Rientjes 	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
211719deb769SDavid Rientjes 		int hpage_node = node;
211819deb769SDavid Rientjes 
211919deb769SDavid Rientjes 		/*
212019deb769SDavid Rientjes 		 * For hugepage allocation and non-interleave policy which
212119deb769SDavid Rientjes 		 * allows the current node (or other explicitly preferred
212219deb769SDavid Rientjes 		 * node) we only try to allocate from the current/preferred
212319deb769SDavid Rientjes 		 * node and don't fall back to other nodes, as the cost of
212419deb769SDavid Rientjes 		 * remote accesses would likely offset THP benefits.
212519deb769SDavid Rientjes 		 *
2126b27abaccSDave Hansen 		 * If the policy is interleave or does not allow the current
212719deb769SDavid Rientjes 		 * node in its nodemask, we allocate the standard way.
212819deb769SDavid Rientjes 		 */
21297858d7bcSFeng Tang 		if (pol->mode == MPOL_PREFERRED)
2130269fbe72SBen Widawsky 			hpage_node = first_node(pol->nodes);
213119deb769SDavid Rientjes 
213219deb769SDavid Rientjes 		nmask = policy_nodemask(gfp, pol);
213319deb769SDavid Rientjes 		if (!nmask || node_isset(hpage_node, *nmask)) {
213419deb769SDavid Rientjes 			mpol_cond_put(pol);
2135cc638f32SVlastimil Babka 			/*
2136cc638f32SVlastimil Babka 			 * First, try to allocate THP only on local node, but
2137cc638f32SVlastimil Babka 			 * don't reclaim unnecessarily, just compact.
2138cc638f32SVlastimil Babka 			 */
213919deb769SDavid Rientjes 			page = __alloc_pages_node(hpage_node,
2140cc638f32SVlastimil Babka 				gfp | __GFP_THISNODE | __GFP_NORETRY, order);
214176e654ccSDavid Rientjes 
214276e654ccSDavid Rientjes 			/*
214376e654ccSDavid Rientjes 			 * If hugepage allocations are configured to always
214476e654ccSDavid Rientjes 			 * synchronous compact or the vma has been madvised
214576e654ccSDavid Rientjes 			 * to prefer hugepage backing, retry allowing remote
2146cc638f32SVlastimil Babka 			 * memory with both reclaim and compact as well.
214776e654ccSDavid Rientjes 			 */
214876e654ccSDavid Rientjes 			if (!page && (gfp & __GFP_DIRECT_RECLAIM))
214976e654ccSDavid Rientjes 				page = __alloc_pages_node(hpage_node,
2150cc638f32SVlastimil Babka 								gfp, order);
215176e654ccSDavid Rientjes 
215219deb769SDavid Rientjes 			goto out;
215319deb769SDavid Rientjes 		}
215419deb769SDavid Rientjes 	}
215519deb769SDavid Rientjes 
2156077fcf11SAneesh Kumar K.V 	nmask = policy_nodemask(gfp, pol);
215704ec6264SVlastimil Babka 	preferred_nid = policy_node(gfp, pol, node);
215884172f4bSMatthew Wilcox (Oracle) 	page = __alloc_pages(gfp, order, preferred_nid, nmask);
2159d51e9894SVlastimil Babka 	mpol_cond_put(pol);
2160be97a41bSVlastimil Babka out:
2161077fcf11SAneesh Kumar K.V 	return page;
2162077fcf11SAneesh Kumar K.V }
216369262215SChristoph Hellwig EXPORT_SYMBOL(alloc_pages_vma);
2164077fcf11SAneesh Kumar K.V 
21651da177e4SLinus Torvalds /**
2166d7f946d0SMatthew Wilcox (Oracle)  * alloc_pages - Allocate pages.
21676421ec76SMatthew Wilcox (Oracle)  * @gfp: GFP flags.
21686421ec76SMatthew Wilcox (Oracle)  * @order: Power of two of number of pages to allocate.
21691da177e4SLinus Torvalds  *
21706421ec76SMatthew Wilcox (Oracle)  * Allocate 1 << @order contiguous pages.  The physical address of the
21716421ec76SMatthew Wilcox (Oracle)  * first page is naturally aligned (eg an order-3 allocation will be aligned
21726421ec76SMatthew Wilcox (Oracle)  * to a multiple of 8 * PAGE_SIZE bytes).  The NUMA policy of the current
21736421ec76SMatthew Wilcox (Oracle)  * process is honoured when in process context.
21741da177e4SLinus Torvalds  *
21756421ec76SMatthew Wilcox (Oracle)  * Context: Can be called from any context, providing the appropriate GFP
21766421ec76SMatthew Wilcox (Oracle)  * flags are used.
21776421ec76SMatthew Wilcox (Oracle)  * Return: The page on success or NULL if allocation fails.
21781da177e4SLinus Torvalds  */
2179d7f946d0SMatthew Wilcox (Oracle) struct page *alloc_pages(gfp_t gfp, unsigned order)
21801da177e4SLinus Torvalds {
21818d90274bSOleg Nesterov 	struct mempolicy *pol = &default_policy;
2182c0ff7453SMiao Xie 	struct page *page;
21831da177e4SLinus Torvalds 
21848d90274bSOleg Nesterov 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
21858d90274bSOleg Nesterov 		pol = get_task_policy(current);
218652cd3b07SLee Schermerhorn 
218752cd3b07SLee Schermerhorn 	/*
218852cd3b07SLee Schermerhorn 	 * No reference counting needed for current->mempolicy
218952cd3b07SLee Schermerhorn 	 * nor system default_policy
219052cd3b07SLee Schermerhorn 	 */
219145c4745aSLee Schermerhorn 	if (pol->mode == MPOL_INTERLEAVE)
2192c0ff7453SMiao Xie 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
21934c54d949SFeng Tang 	else if (pol->mode == MPOL_PREFERRED_MANY)
21944c54d949SFeng Tang 		page = alloc_pages_preferred_many(gfp, order,
21954c54d949SFeng Tang 				numa_node_id(), pol);
2196c0ff7453SMiao Xie 	else
219784172f4bSMatthew Wilcox (Oracle) 		page = __alloc_pages(gfp, order,
219804ec6264SVlastimil Babka 				policy_node(gfp, pol, numa_node_id()),
21995c4b4be3SAndi Kleen 				policy_nodemask(gfp, pol));
2200cc9a6c87SMel Gorman 
2201c0ff7453SMiao Xie 	return page;
22021da177e4SLinus Torvalds }
2203d7f946d0SMatthew Wilcox (Oracle) EXPORT_SYMBOL(alloc_pages);
22041da177e4SLinus Torvalds 
2205*cc09cb13SMatthew Wilcox (Oracle) struct folio *folio_alloc(gfp_t gfp, unsigned order)
2206*cc09cb13SMatthew Wilcox (Oracle) {
2207*cc09cb13SMatthew Wilcox (Oracle) 	struct page *page = alloc_pages(gfp | __GFP_COMP, order);
2208*cc09cb13SMatthew Wilcox (Oracle) 
2209*cc09cb13SMatthew Wilcox (Oracle) 	if (page && order > 1)
2210*cc09cb13SMatthew Wilcox (Oracle) 		prep_transhuge_page(page);
2211*cc09cb13SMatthew Wilcox (Oracle) 	return (struct folio *)page;
2212*cc09cb13SMatthew Wilcox (Oracle) }
2213*cc09cb13SMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_alloc);
2214*cc09cb13SMatthew Wilcox (Oracle) 
2215ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2216ef0855d3SOleg Nesterov {
2217ef0855d3SOleg Nesterov 	struct mempolicy *pol = mpol_dup(vma_policy(src));
2218ef0855d3SOleg Nesterov 
2219ef0855d3SOleg Nesterov 	if (IS_ERR(pol))
2220ef0855d3SOleg Nesterov 		return PTR_ERR(pol);
2221ef0855d3SOleg Nesterov 	dst->vm_policy = pol;
2222ef0855d3SOleg Nesterov 	return 0;
2223ef0855d3SOleg Nesterov }
2224ef0855d3SOleg Nesterov 
22254225399aSPaul Jackson /*
2226846a16bfSLee Schermerhorn  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
22274225399aSPaul Jackson  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
22284225399aSPaul Jackson  * with the mems_allowed returned by cpuset_mems_allowed().  This
22294225399aSPaul Jackson  * keeps mempolicies cpuset relative after its cpuset moves.  See
22304225399aSPaul Jackson  * further kernel/cpuset.c update_nodemask().
2231708c1bbcSMiao Xie  *
2232708c1bbcSMiao Xie  * current's mempolicy may be rebinded by the other task(the task that changes
2233708c1bbcSMiao Xie  * cpuset's mems), so we needn't do rebind work for current task.
22344225399aSPaul Jackson  */
22354225399aSPaul Jackson 
2236846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */
2237846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old)
22381da177e4SLinus Torvalds {
22391da177e4SLinus Torvalds 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
22401da177e4SLinus Torvalds 
22411da177e4SLinus Torvalds 	if (!new)
22421da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2243708c1bbcSMiao Xie 
2244708c1bbcSMiao Xie 	/* task's mempolicy is protected by alloc_lock */
2245708c1bbcSMiao Xie 	if (old == current->mempolicy) {
2246708c1bbcSMiao Xie 		task_lock(current);
2247708c1bbcSMiao Xie 		*new = *old;
2248708c1bbcSMiao Xie 		task_unlock(current);
2249708c1bbcSMiao Xie 	} else
2250708c1bbcSMiao Xie 		*new = *old;
2251708c1bbcSMiao Xie 
22524225399aSPaul Jackson 	if (current_cpuset_is_being_rebound()) {
22534225399aSPaul Jackson 		nodemask_t mems = cpuset_mems_allowed(current);
2254213980c0SVlastimil Babka 		mpol_rebind_policy(new, &mems);
22554225399aSPaul Jackson 	}
22561da177e4SLinus Torvalds 	atomic_set(&new->refcnt, 1);
22571da177e4SLinus Torvalds 	return new;
22581da177e4SLinus Torvalds }
22591da177e4SLinus Torvalds 
22601da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */
2261fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
22621da177e4SLinus Torvalds {
22631da177e4SLinus Torvalds 	if (!a || !b)
2264fcfb4dccSKOSAKI Motohiro 		return false;
226545c4745aSLee Schermerhorn 	if (a->mode != b->mode)
2266fcfb4dccSKOSAKI Motohiro 		return false;
226719800502SBob Liu 	if (a->flags != b->flags)
2268fcfb4dccSKOSAKI Motohiro 		return false;
226919800502SBob Liu 	if (mpol_store_user_nodemask(a))
227019800502SBob Liu 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2271fcfb4dccSKOSAKI Motohiro 			return false;
227219800502SBob Liu 
227345c4745aSLee Schermerhorn 	switch (a->mode) {
227419770b32SMel Gorman 	case MPOL_BIND:
22751da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
22761da177e4SLinus Torvalds 	case MPOL_PREFERRED:
2277b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
2278269fbe72SBen Widawsky 		return !!nodes_equal(a->nodes, b->nodes);
22797858d7bcSFeng Tang 	case MPOL_LOCAL:
22807858d7bcSFeng Tang 		return true;
22811da177e4SLinus Torvalds 	default:
22821da177e4SLinus Torvalds 		BUG();
2283fcfb4dccSKOSAKI Motohiro 		return false;
22841da177e4SLinus Torvalds 	}
22851da177e4SLinus Torvalds }
22861da177e4SLinus Torvalds 
22871da177e4SLinus Torvalds /*
22881da177e4SLinus Torvalds  * Shared memory backing store policy support.
22891da177e4SLinus Torvalds  *
22901da177e4SLinus Torvalds  * Remember policies even when nobody has shared memory mapped.
22911da177e4SLinus Torvalds  * The policies are kept in Red-Black tree linked from the inode.
22924a8c7bb5SNathan Zimmer  * They are protected by the sp->lock rwlock, which should be held
22931da177e4SLinus Torvalds  * for any accesses to the tree.
22941da177e4SLinus Torvalds  */
22951da177e4SLinus Torvalds 
22964a8c7bb5SNathan Zimmer /*
22974a8c7bb5SNathan Zimmer  * lookup first element intersecting start-end.  Caller holds sp->lock for
22984a8c7bb5SNathan Zimmer  * reading or for writing
22994a8c7bb5SNathan Zimmer  */
23001da177e4SLinus Torvalds static struct sp_node *
23011da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
23021da177e4SLinus Torvalds {
23031da177e4SLinus Torvalds 	struct rb_node *n = sp->root.rb_node;
23041da177e4SLinus Torvalds 
23051da177e4SLinus Torvalds 	while (n) {
23061da177e4SLinus Torvalds 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
23071da177e4SLinus Torvalds 
23081da177e4SLinus Torvalds 		if (start >= p->end)
23091da177e4SLinus Torvalds 			n = n->rb_right;
23101da177e4SLinus Torvalds 		else if (end <= p->start)
23111da177e4SLinus Torvalds 			n = n->rb_left;
23121da177e4SLinus Torvalds 		else
23131da177e4SLinus Torvalds 			break;
23141da177e4SLinus Torvalds 	}
23151da177e4SLinus Torvalds 	if (!n)
23161da177e4SLinus Torvalds 		return NULL;
23171da177e4SLinus Torvalds 	for (;;) {
23181da177e4SLinus Torvalds 		struct sp_node *w = NULL;
23191da177e4SLinus Torvalds 		struct rb_node *prev = rb_prev(n);
23201da177e4SLinus Torvalds 		if (!prev)
23211da177e4SLinus Torvalds 			break;
23221da177e4SLinus Torvalds 		w = rb_entry(prev, struct sp_node, nd);
23231da177e4SLinus Torvalds 		if (w->end <= start)
23241da177e4SLinus Torvalds 			break;
23251da177e4SLinus Torvalds 		n = prev;
23261da177e4SLinus Torvalds 	}
23271da177e4SLinus Torvalds 	return rb_entry(n, struct sp_node, nd);
23281da177e4SLinus Torvalds }
23291da177e4SLinus Torvalds 
23304a8c7bb5SNathan Zimmer /*
23314a8c7bb5SNathan Zimmer  * Insert a new shared policy into the list.  Caller holds sp->lock for
23324a8c7bb5SNathan Zimmer  * writing.
23334a8c7bb5SNathan Zimmer  */
23341da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new)
23351da177e4SLinus Torvalds {
23361da177e4SLinus Torvalds 	struct rb_node **p = &sp->root.rb_node;
23371da177e4SLinus Torvalds 	struct rb_node *parent = NULL;
23381da177e4SLinus Torvalds 	struct sp_node *nd;
23391da177e4SLinus Torvalds 
23401da177e4SLinus Torvalds 	while (*p) {
23411da177e4SLinus Torvalds 		parent = *p;
23421da177e4SLinus Torvalds 		nd = rb_entry(parent, struct sp_node, nd);
23431da177e4SLinus Torvalds 		if (new->start < nd->start)
23441da177e4SLinus Torvalds 			p = &(*p)->rb_left;
23451da177e4SLinus Torvalds 		else if (new->end > nd->end)
23461da177e4SLinus Torvalds 			p = &(*p)->rb_right;
23471da177e4SLinus Torvalds 		else
23481da177e4SLinus Torvalds 			BUG();
23491da177e4SLinus Torvalds 	}
23501da177e4SLinus Torvalds 	rb_link_node(&new->nd, parent, p);
23511da177e4SLinus Torvalds 	rb_insert_color(&new->nd, &sp->root);
2352140d5a49SPaul Mundt 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
235345c4745aSLee Schermerhorn 		 new->policy ? new->policy->mode : 0);
23541da177e4SLinus Torvalds }
23551da177e4SLinus Torvalds 
23561da177e4SLinus Torvalds /* Find shared policy intersecting idx */
23571da177e4SLinus Torvalds struct mempolicy *
23581da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
23591da177e4SLinus Torvalds {
23601da177e4SLinus Torvalds 	struct mempolicy *pol = NULL;
23611da177e4SLinus Torvalds 	struct sp_node *sn;
23621da177e4SLinus Torvalds 
23631da177e4SLinus Torvalds 	if (!sp->root.rb_node)
23641da177e4SLinus Torvalds 		return NULL;
23654a8c7bb5SNathan Zimmer 	read_lock(&sp->lock);
23661da177e4SLinus Torvalds 	sn = sp_lookup(sp, idx, idx+1);
23671da177e4SLinus Torvalds 	if (sn) {
23681da177e4SLinus Torvalds 		mpol_get(sn->policy);
23691da177e4SLinus Torvalds 		pol = sn->policy;
23701da177e4SLinus Torvalds 	}
23714a8c7bb5SNathan Zimmer 	read_unlock(&sp->lock);
23721da177e4SLinus Torvalds 	return pol;
23731da177e4SLinus Torvalds }
23741da177e4SLinus Torvalds 
237563f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n)
237663f74ca2SKOSAKI Motohiro {
237763f74ca2SKOSAKI Motohiro 	mpol_put(n->policy);
237863f74ca2SKOSAKI Motohiro 	kmem_cache_free(sn_cache, n);
237963f74ca2SKOSAKI Motohiro }
238063f74ca2SKOSAKI Motohiro 
2381771fb4d8SLee Schermerhorn /**
2382771fb4d8SLee Schermerhorn  * mpol_misplaced - check whether current page node is valid in policy
2383771fb4d8SLee Schermerhorn  *
2384b46e14acSFabian Frederick  * @page: page to be checked
2385b46e14acSFabian Frederick  * @vma: vm area where page mapped
2386b46e14acSFabian Frederick  * @addr: virtual address where page mapped
2387771fb4d8SLee Schermerhorn  *
2388771fb4d8SLee Schermerhorn  * Lookup current policy node id for vma,addr and "compare to" page's
23895f076944SMatthew Wilcox (Oracle)  * node id.  Policy determination "mimics" alloc_page_vma().
2390771fb4d8SLee Schermerhorn  * Called from fault path where we know the vma and faulting address.
23915f076944SMatthew Wilcox (Oracle)  *
2392062db293SBaolin Wang  * Return: NUMA_NO_NODE if the page is in a node that is valid for this
2393062db293SBaolin Wang  * policy, or a suitable node ID to allocate a replacement page from.
2394771fb4d8SLee Schermerhorn  */
2395771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2396771fb4d8SLee Schermerhorn {
2397771fb4d8SLee Schermerhorn 	struct mempolicy *pol;
2398c33d6c06SMel Gorman 	struct zoneref *z;
2399771fb4d8SLee Schermerhorn 	int curnid = page_to_nid(page);
2400771fb4d8SLee Schermerhorn 	unsigned long pgoff;
240190572890SPeter Zijlstra 	int thiscpu = raw_smp_processor_id();
240290572890SPeter Zijlstra 	int thisnid = cpu_to_node(thiscpu);
240398fa15f3SAnshuman Khandual 	int polnid = NUMA_NO_NODE;
2404062db293SBaolin Wang 	int ret = NUMA_NO_NODE;
2405771fb4d8SLee Schermerhorn 
2406dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2407771fb4d8SLee Schermerhorn 	if (!(pol->flags & MPOL_F_MOF))
2408771fb4d8SLee Schermerhorn 		goto out;
2409771fb4d8SLee Schermerhorn 
2410771fb4d8SLee Schermerhorn 	switch (pol->mode) {
2411771fb4d8SLee Schermerhorn 	case MPOL_INTERLEAVE:
2412771fb4d8SLee Schermerhorn 		pgoff = vma->vm_pgoff;
2413771fb4d8SLee Schermerhorn 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
241498c70baaSLaurent Dufour 		polnid = offset_il_node(pol, pgoff);
2415771fb4d8SLee Schermerhorn 		break;
2416771fb4d8SLee Schermerhorn 
2417771fb4d8SLee Schermerhorn 	case MPOL_PREFERRED:
2418b27abaccSDave Hansen 		if (node_isset(curnid, pol->nodes))
2419b27abaccSDave Hansen 			goto out;
2420269fbe72SBen Widawsky 		polnid = first_node(pol->nodes);
2421771fb4d8SLee Schermerhorn 		break;
2422771fb4d8SLee Schermerhorn 
24237858d7bcSFeng Tang 	case MPOL_LOCAL:
24247858d7bcSFeng Tang 		polnid = numa_node_id();
24257858d7bcSFeng Tang 		break;
24267858d7bcSFeng Tang 
2427771fb4d8SLee Schermerhorn 	case MPOL_BIND:
2428bda420b9SHuang Ying 		/* Optimize placement among multiple nodes via NUMA balancing */
2429bda420b9SHuang Ying 		if (pol->flags & MPOL_F_MORON) {
2430269fbe72SBen Widawsky 			if (node_isset(thisnid, pol->nodes))
2431bda420b9SHuang Ying 				break;
2432bda420b9SHuang Ying 			goto out;
2433bda420b9SHuang Ying 		}
2434b27abaccSDave Hansen 		fallthrough;
2435c33d6c06SMel Gorman 
2436b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
2437771fb4d8SLee Schermerhorn 		/*
2438771fb4d8SLee Schermerhorn 		 * use current page if in policy nodemask,
2439771fb4d8SLee Schermerhorn 		 * else select nearest allowed node, if any.
2440771fb4d8SLee Schermerhorn 		 * If no allowed nodes, use current [!misplaced].
2441771fb4d8SLee Schermerhorn 		 */
2442269fbe72SBen Widawsky 		if (node_isset(curnid, pol->nodes))
2443771fb4d8SLee Schermerhorn 			goto out;
2444c33d6c06SMel Gorman 		z = first_zones_zonelist(
2445771fb4d8SLee Schermerhorn 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2446771fb4d8SLee Schermerhorn 				gfp_zone(GFP_HIGHUSER),
2447269fbe72SBen Widawsky 				&pol->nodes);
2448c1093b74SPavel Tatashin 		polnid = zone_to_nid(z->zone);
2449771fb4d8SLee Schermerhorn 		break;
2450771fb4d8SLee Schermerhorn 
2451771fb4d8SLee Schermerhorn 	default:
2452771fb4d8SLee Schermerhorn 		BUG();
2453771fb4d8SLee Schermerhorn 	}
24545606e387SMel Gorman 
24555606e387SMel Gorman 	/* Migrate the page towards the node whose CPU is referencing it */
2456e42c8ff2SMel Gorman 	if (pol->flags & MPOL_F_MORON) {
245790572890SPeter Zijlstra 		polnid = thisnid;
24585606e387SMel Gorman 
245910f39042SRik van Riel 		if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2460de1c9ce6SRik van Riel 			goto out;
2461de1c9ce6SRik van Riel 	}
2462e42c8ff2SMel Gorman 
2463771fb4d8SLee Schermerhorn 	if (curnid != polnid)
2464771fb4d8SLee Schermerhorn 		ret = polnid;
2465771fb4d8SLee Schermerhorn out:
2466771fb4d8SLee Schermerhorn 	mpol_cond_put(pol);
2467771fb4d8SLee Schermerhorn 
2468771fb4d8SLee Schermerhorn 	return ret;
2469771fb4d8SLee Schermerhorn }
2470771fb4d8SLee Schermerhorn 
2471c11600e4SDavid Rientjes /*
2472c11600e4SDavid Rientjes  * Drop the (possibly final) reference to task->mempolicy.  It needs to be
2473c11600e4SDavid Rientjes  * dropped after task->mempolicy is set to NULL so that any allocation done as
2474c11600e4SDavid Rientjes  * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2475c11600e4SDavid Rientjes  * policy.
2476c11600e4SDavid Rientjes  */
2477c11600e4SDavid Rientjes void mpol_put_task_policy(struct task_struct *task)
2478c11600e4SDavid Rientjes {
2479c11600e4SDavid Rientjes 	struct mempolicy *pol;
2480c11600e4SDavid Rientjes 
2481c11600e4SDavid Rientjes 	task_lock(task);
2482c11600e4SDavid Rientjes 	pol = task->mempolicy;
2483c11600e4SDavid Rientjes 	task->mempolicy = NULL;
2484c11600e4SDavid Rientjes 	task_unlock(task);
2485c11600e4SDavid Rientjes 	mpol_put(pol);
2486c11600e4SDavid Rientjes }
2487c11600e4SDavid Rientjes 
24881da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n)
24891da177e4SLinus Torvalds {
2490140d5a49SPaul Mundt 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
24911da177e4SLinus Torvalds 	rb_erase(&n->nd, &sp->root);
249263f74ca2SKOSAKI Motohiro 	sp_free(n);
24931da177e4SLinus Torvalds }
24941da177e4SLinus Torvalds 
249542288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start,
249642288fe3SMel Gorman 			unsigned long end, struct mempolicy *pol)
249742288fe3SMel Gorman {
249842288fe3SMel Gorman 	node->start = start;
249942288fe3SMel Gorman 	node->end = end;
250042288fe3SMel Gorman 	node->policy = pol;
250142288fe3SMel Gorman }
250242288fe3SMel Gorman 
2503dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2504dbcb0f19SAdrian Bunk 				struct mempolicy *pol)
25051da177e4SLinus Torvalds {
2506869833f2SKOSAKI Motohiro 	struct sp_node *n;
2507869833f2SKOSAKI Motohiro 	struct mempolicy *newpol;
25081da177e4SLinus Torvalds 
2509869833f2SKOSAKI Motohiro 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
25101da177e4SLinus Torvalds 	if (!n)
25111da177e4SLinus Torvalds 		return NULL;
2512869833f2SKOSAKI Motohiro 
2513869833f2SKOSAKI Motohiro 	newpol = mpol_dup(pol);
2514869833f2SKOSAKI Motohiro 	if (IS_ERR(newpol)) {
2515869833f2SKOSAKI Motohiro 		kmem_cache_free(sn_cache, n);
2516869833f2SKOSAKI Motohiro 		return NULL;
2517869833f2SKOSAKI Motohiro 	}
2518869833f2SKOSAKI Motohiro 	newpol->flags |= MPOL_F_SHARED;
251942288fe3SMel Gorman 	sp_node_init(n, start, end, newpol);
2520869833f2SKOSAKI Motohiro 
25211da177e4SLinus Torvalds 	return n;
25221da177e4SLinus Torvalds }
25231da177e4SLinus Torvalds 
25241da177e4SLinus Torvalds /* Replace a policy range. */
25251da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
25261da177e4SLinus Torvalds 				 unsigned long end, struct sp_node *new)
25271da177e4SLinus Torvalds {
2528b22d127aSMel Gorman 	struct sp_node *n;
252942288fe3SMel Gorman 	struct sp_node *n_new = NULL;
253042288fe3SMel Gorman 	struct mempolicy *mpol_new = NULL;
2531b22d127aSMel Gorman 	int ret = 0;
25321da177e4SLinus Torvalds 
253342288fe3SMel Gorman restart:
25344a8c7bb5SNathan Zimmer 	write_lock(&sp->lock);
25351da177e4SLinus Torvalds 	n = sp_lookup(sp, start, end);
25361da177e4SLinus Torvalds 	/* Take care of old policies in the same range. */
25371da177e4SLinus Torvalds 	while (n && n->start < end) {
25381da177e4SLinus Torvalds 		struct rb_node *next = rb_next(&n->nd);
25391da177e4SLinus Torvalds 		if (n->start >= start) {
25401da177e4SLinus Torvalds 			if (n->end <= end)
25411da177e4SLinus Torvalds 				sp_delete(sp, n);
25421da177e4SLinus Torvalds 			else
25431da177e4SLinus Torvalds 				n->start = end;
25441da177e4SLinus Torvalds 		} else {
25451da177e4SLinus Torvalds 			/* Old policy spanning whole new range. */
25461da177e4SLinus Torvalds 			if (n->end > end) {
254742288fe3SMel Gorman 				if (!n_new)
254842288fe3SMel Gorman 					goto alloc_new;
254942288fe3SMel Gorman 
255042288fe3SMel Gorman 				*mpol_new = *n->policy;
255142288fe3SMel Gorman 				atomic_set(&mpol_new->refcnt, 1);
25527880639cSKOSAKI Motohiro 				sp_node_init(n_new, end, n->end, mpol_new);
25531da177e4SLinus Torvalds 				n->end = start;
25545ca39575SHillf Danton 				sp_insert(sp, n_new);
255542288fe3SMel Gorman 				n_new = NULL;
255642288fe3SMel Gorman 				mpol_new = NULL;
25571da177e4SLinus Torvalds 				break;
25581da177e4SLinus Torvalds 			} else
25591da177e4SLinus Torvalds 				n->end = start;
25601da177e4SLinus Torvalds 		}
25611da177e4SLinus Torvalds 		if (!next)
25621da177e4SLinus Torvalds 			break;
25631da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
25641da177e4SLinus Torvalds 	}
25651da177e4SLinus Torvalds 	if (new)
25661da177e4SLinus Torvalds 		sp_insert(sp, new);
25674a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
256842288fe3SMel Gorman 	ret = 0;
256942288fe3SMel Gorman 
257042288fe3SMel Gorman err_out:
257142288fe3SMel Gorman 	if (mpol_new)
257242288fe3SMel Gorman 		mpol_put(mpol_new);
257342288fe3SMel Gorman 	if (n_new)
257442288fe3SMel Gorman 		kmem_cache_free(sn_cache, n_new);
257542288fe3SMel Gorman 
2576b22d127aSMel Gorman 	return ret;
257742288fe3SMel Gorman 
257842288fe3SMel Gorman alloc_new:
25794a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
258042288fe3SMel Gorman 	ret = -ENOMEM;
258142288fe3SMel Gorman 	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
258242288fe3SMel Gorman 	if (!n_new)
258342288fe3SMel Gorman 		goto err_out;
258442288fe3SMel Gorman 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
258542288fe3SMel Gorman 	if (!mpol_new)
258642288fe3SMel Gorman 		goto err_out;
258742288fe3SMel Gorman 	goto restart;
25881da177e4SLinus Torvalds }
25891da177e4SLinus Torvalds 
259071fe804bSLee Schermerhorn /**
259171fe804bSLee Schermerhorn  * mpol_shared_policy_init - initialize shared policy for inode
259271fe804bSLee Schermerhorn  * @sp: pointer to inode shared policy
259371fe804bSLee Schermerhorn  * @mpol:  struct mempolicy to install
259471fe804bSLee Schermerhorn  *
259571fe804bSLee Schermerhorn  * Install non-NULL @mpol in inode's shared policy rb-tree.
259671fe804bSLee Schermerhorn  * On entry, the current task has a reference on a non-NULL @mpol.
259771fe804bSLee Schermerhorn  * This must be released on exit.
25984bfc4495SKAMEZAWA Hiroyuki  * This is called at get_inode() calls and we can use GFP_KERNEL.
259971fe804bSLee Schermerhorn  */
260071fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
26017339ff83SRobin Holt {
260258568d2aSMiao Xie 	int ret;
260358568d2aSMiao Xie 
260471fe804bSLee Schermerhorn 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
26054a8c7bb5SNathan Zimmer 	rwlock_init(&sp->lock);
26067339ff83SRobin Holt 
260771fe804bSLee Schermerhorn 	if (mpol) {
26087339ff83SRobin Holt 		struct vm_area_struct pvma;
260971fe804bSLee Schermerhorn 		struct mempolicy *new;
26104bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
26117339ff83SRobin Holt 
26124bfc4495SKAMEZAWA Hiroyuki 		if (!scratch)
26135c0c1654SLee Schermerhorn 			goto put_mpol;
261471fe804bSLee Schermerhorn 		/* contextualize the tmpfs mount point mempolicy */
261571fe804bSLee Schermerhorn 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
261615d77835SLee Schermerhorn 		if (IS_ERR(new))
26170cae3457SDan Carpenter 			goto free_scratch; /* no valid nodemask intersection */
261858568d2aSMiao Xie 
261958568d2aSMiao Xie 		task_lock(current);
26204bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
262158568d2aSMiao Xie 		task_unlock(current);
262215d77835SLee Schermerhorn 		if (ret)
26235c0c1654SLee Schermerhorn 			goto put_new;
262471fe804bSLee Schermerhorn 
262571fe804bSLee Schermerhorn 		/* Create pseudo-vma that contains just the policy */
26262c4541e2SKirill A. Shutemov 		vma_init(&pvma, NULL);
262771fe804bSLee Schermerhorn 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
262871fe804bSLee Schermerhorn 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
262915d77835SLee Schermerhorn 
26305c0c1654SLee Schermerhorn put_new:
263171fe804bSLee Schermerhorn 		mpol_put(new);			/* drop initial ref */
26320cae3457SDan Carpenter free_scratch:
26334bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
26345c0c1654SLee Schermerhorn put_mpol:
26355c0c1654SLee Schermerhorn 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
26367339ff83SRobin Holt 	}
26377339ff83SRobin Holt }
26387339ff83SRobin Holt 
26391da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info,
26401da177e4SLinus Torvalds 			struct vm_area_struct *vma, struct mempolicy *npol)
26411da177e4SLinus Torvalds {
26421da177e4SLinus Torvalds 	int err;
26431da177e4SLinus Torvalds 	struct sp_node *new = NULL;
26441da177e4SLinus Torvalds 	unsigned long sz = vma_pages(vma);
26451da177e4SLinus Torvalds 
2646028fec41SDavid Rientjes 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
26471da177e4SLinus Torvalds 		 vma->vm_pgoff,
264845c4745aSLee Schermerhorn 		 sz, npol ? npol->mode : -1,
2649028fec41SDavid Rientjes 		 npol ? npol->flags : -1,
2650269fbe72SBen Widawsky 		 npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE);
26511da177e4SLinus Torvalds 
26521da177e4SLinus Torvalds 	if (npol) {
26531da177e4SLinus Torvalds 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
26541da177e4SLinus Torvalds 		if (!new)
26551da177e4SLinus Torvalds 			return -ENOMEM;
26561da177e4SLinus Torvalds 	}
26571da177e4SLinus Torvalds 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
26581da177e4SLinus Torvalds 	if (err && new)
265963f74ca2SKOSAKI Motohiro 		sp_free(new);
26601da177e4SLinus Torvalds 	return err;
26611da177e4SLinus Torvalds }
26621da177e4SLinus Torvalds 
26631da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */
26641da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p)
26651da177e4SLinus Torvalds {
26661da177e4SLinus Torvalds 	struct sp_node *n;
26671da177e4SLinus Torvalds 	struct rb_node *next;
26681da177e4SLinus Torvalds 
26691da177e4SLinus Torvalds 	if (!p->root.rb_node)
26701da177e4SLinus Torvalds 		return;
26714a8c7bb5SNathan Zimmer 	write_lock(&p->lock);
26721da177e4SLinus Torvalds 	next = rb_first(&p->root);
26731da177e4SLinus Torvalds 	while (next) {
26741da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
26751da177e4SLinus Torvalds 		next = rb_next(&n->nd);
267663f74ca2SKOSAKI Motohiro 		sp_delete(p, n);
26771da177e4SLinus Torvalds 	}
26784a8c7bb5SNathan Zimmer 	write_unlock(&p->lock);
26791da177e4SLinus Torvalds }
26801da177e4SLinus Torvalds 
26811a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING
2682c297663cSMel Gorman static int __initdata numabalancing_override;
26831a687c2eSMel Gorman 
26841a687c2eSMel Gorman static void __init check_numabalancing_enable(void)
26851a687c2eSMel Gorman {
26861a687c2eSMel Gorman 	bool numabalancing_default = false;
26871a687c2eSMel Gorman 
26881a687c2eSMel Gorman 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
26891a687c2eSMel Gorman 		numabalancing_default = true;
26901a687c2eSMel Gorman 
2691c297663cSMel Gorman 	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2692c297663cSMel Gorman 	if (numabalancing_override)
2693c297663cSMel Gorman 		set_numabalancing_state(numabalancing_override == 1);
2694c297663cSMel Gorman 
2695b0dc2b9bSMel Gorman 	if (num_online_nodes() > 1 && !numabalancing_override) {
2696756a025fSJoe Perches 		pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2697c297663cSMel Gorman 			numabalancing_default ? "Enabling" : "Disabling");
26981a687c2eSMel Gorman 		set_numabalancing_state(numabalancing_default);
26991a687c2eSMel Gorman 	}
27001a687c2eSMel Gorman }
27011a687c2eSMel Gorman 
27021a687c2eSMel Gorman static int __init setup_numabalancing(char *str)
27031a687c2eSMel Gorman {
27041a687c2eSMel Gorman 	int ret = 0;
27051a687c2eSMel Gorman 	if (!str)
27061a687c2eSMel Gorman 		goto out;
27071a687c2eSMel Gorman 
27081a687c2eSMel Gorman 	if (!strcmp(str, "enable")) {
2709c297663cSMel Gorman 		numabalancing_override = 1;
27101a687c2eSMel Gorman 		ret = 1;
27111a687c2eSMel Gorman 	} else if (!strcmp(str, "disable")) {
2712c297663cSMel Gorman 		numabalancing_override = -1;
27131a687c2eSMel Gorman 		ret = 1;
27141a687c2eSMel Gorman 	}
27151a687c2eSMel Gorman out:
27161a687c2eSMel Gorman 	if (!ret)
27174a404beaSAndrew Morton 		pr_warn("Unable to parse numa_balancing=\n");
27181a687c2eSMel Gorman 
27191a687c2eSMel Gorman 	return ret;
27201a687c2eSMel Gorman }
27211a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing);
27221a687c2eSMel Gorman #else
27231a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void)
27241a687c2eSMel Gorman {
27251a687c2eSMel Gorman }
27261a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */
27271a687c2eSMel Gorman 
27281da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */
27291da177e4SLinus Torvalds void __init numa_policy_init(void)
27301da177e4SLinus Torvalds {
2731b71636e2SPaul Mundt 	nodemask_t interleave_nodes;
2732b71636e2SPaul Mundt 	unsigned long largest = 0;
2733b71636e2SPaul Mundt 	int nid, prefer = 0;
2734b71636e2SPaul Mundt 
27351da177e4SLinus Torvalds 	policy_cache = kmem_cache_create("numa_policy",
27361da177e4SLinus Torvalds 					 sizeof(struct mempolicy),
273720c2df83SPaul Mundt 					 0, SLAB_PANIC, NULL);
27381da177e4SLinus Torvalds 
27391da177e4SLinus Torvalds 	sn_cache = kmem_cache_create("shared_policy_node",
27401da177e4SLinus Torvalds 				     sizeof(struct sp_node),
274120c2df83SPaul Mundt 				     0, SLAB_PANIC, NULL);
27421da177e4SLinus Torvalds 
27435606e387SMel Gorman 	for_each_node(nid) {
27445606e387SMel Gorman 		preferred_node_policy[nid] = (struct mempolicy) {
27455606e387SMel Gorman 			.refcnt = ATOMIC_INIT(1),
27465606e387SMel Gorman 			.mode = MPOL_PREFERRED,
27475606e387SMel Gorman 			.flags = MPOL_F_MOF | MPOL_F_MORON,
2748269fbe72SBen Widawsky 			.nodes = nodemask_of_node(nid),
27495606e387SMel Gorman 		};
27505606e387SMel Gorman 	}
27515606e387SMel Gorman 
2752b71636e2SPaul Mundt 	/*
2753b71636e2SPaul Mundt 	 * Set interleaving policy for system init. Interleaving is only
2754b71636e2SPaul Mundt 	 * enabled across suitably sized nodes (default is >= 16MB), or
2755b71636e2SPaul Mundt 	 * fall back to the largest node if they're all smaller.
2756b71636e2SPaul Mundt 	 */
2757b71636e2SPaul Mundt 	nodes_clear(interleave_nodes);
275801f13bd6SLai Jiangshan 	for_each_node_state(nid, N_MEMORY) {
2759b71636e2SPaul Mundt 		unsigned long total_pages = node_present_pages(nid);
27601da177e4SLinus Torvalds 
2761b71636e2SPaul Mundt 		/* Preserve the largest node */
2762b71636e2SPaul Mundt 		if (largest < total_pages) {
2763b71636e2SPaul Mundt 			largest = total_pages;
2764b71636e2SPaul Mundt 			prefer = nid;
2765b71636e2SPaul Mundt 		}
2766b71636e2SPaul Mundt 
2767b71636e2SPaul Mundt 		/* Interleave this node? */
2768b71636e2SPaul Mundt 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2769b71636e2SPaul Mundt 			node_set(nid, interleave_nodes);
2770b71636e2SPaul Mundt 	}
2771b71636e2SPaul Mundt 
2772b71636e2SPaul Mundt 	/* All too small, use the largest */
2773b71636e2SPaul Mundt 	if (unlikely(nodes_empty(interleave_nodes)))
2774b71636e2SPaul Mundt 		node_set(prefer, interleave_nodes);
2775b71636e2SPaul Mundt 
2776028fec41SDavid Rientjes 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2777b1de0d13SMitchel Humpherys 		pr_err("%s: interleaving failed\n", __func__);
27781a687c2eSMel Gorman 
27791a687c2eSMel Gorman 	check_numabalancing_enable();
27801da177e4SLinus Torvalds }
27811da177e4SLinus Torvalds 
27828bccd85fSChristoph Lameter /* Reset policy of current process to default */
27831da177e4SLinus Torvalds void numa_default_policy(void)
27841da177e4SLinus Torvalds {
2785028fec41SDavid Rientjes 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
27861da177e4SLinus Torvalds }
278768860ec1SPaul Jackson 
27884225399aSPaul Jackson /*
2789095f1fc4SLee Schermerhorn  * Parse and format mempolicy from/to strings
2790095f1fc4SLee Schermerhorn  */
2791095f1fc4SLee Schermerhorn 
2792345ace9cSLee Schermerhorn static const char * const policy_modes[] =
2793345ace9cSLee Schermerhorn {
2794345ace9cSLee Schermerhorn 	[MPOL_DEFAULT]    = "default",
2795345ace9cSLee Schermerhorn 	[MPOL_PREFERRED]  = "prefer",
2796345ace9cSLee Schermerhorn 	[MPOL_BIND]       = "bind",
2797345ace9cSLee Schermerhorn 	[MPOL_INTERLEAVE] = "interleave",
2798d3a71033SLee Schermerhorn 	[MPOL_LOCAL]      = "local",
2799b27abaccSDave Hansen 	[MPOL_PREFERRED_MANY]  = "prefer (many)",
2800345ace9cSLee Schermerhorn };
28011a75a6c8SChristoph Lameter 
2802095f1fc4SLee Schermerhorn 
2803095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS
2804095f1fc4SLee Schermerhorn /**
2805f2a07f40SHugh Dickins  * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2806095f1fc4SLee Schermerhorn  * @str:  string containing mempolicy to parse
280771fe804bSLee Schermerhorn  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2808095f1fc4SLee Schermerhorn  *
2809095f1fc4SLee Schermerhorn  * Format of input:
2810095f1fc4SLee Schermerhorn  *	<mode>[=<flags>][:<nodelist>]
2811095f1fc4SLee Schermerhorn  *
281271fe804bSLee Schermerhorn  * On success, returns 0, else 1
2813095f1fc4SLee Schermerhorn  */
2814a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol)
2815095f1fc4SLee Schermerhorn {
281671fe804bSLee Schermerhorn 	struct mempolicy *new = NULL;
2817f2a07f40SHugh Dickins 	unsigned short mode_flags;
281871fe804bSLee Schermerhorn 	nodemask_t nodes;
2819095f1fc4SLee Schermerhorn 	char *nodelist = strchr(str, ':');
2820095f1fc4SLee Schermerhorn 	char *flags = strchr(str, '=');
2821dedf2c73Szhong jiang 	int err = 1, mode;
2822095f1fc4SLee Schermerhorn 
2823c7a91bc7SDan Carpenter 	if (flags)
2824c7a91bc7SDan Carpenter 		*flags++ = '\0';	/* terminate mode string */
2825c7a91bc7SDan Carpenter 
2826095f1fc4SLee Schermerhorn 	if (nodelist) {
2827095f1fc4SLee Schermerhorn 		/* NUL-terminate mode or flags string */
2828095f1fc4SLee Schermerhorn 		*nodelist++ = '\0';
282971fe804bSLee Schermerhorn 		if (nodelist_parse(nodelist, nodes))
2830095f1fc4SLee Schermerhorn 			goto out;
283101f13bd6SLai Jiangshan 		if (!nodes_subset(nodes, node_states[N_MEMORY]))
2832095f1fc4SLee Schermerhorn 			goto out;
283371fe804bSLee Schermerhorn 	} else
283471fe804bSLee Schermerhorn 		nodes_clear(nodes);
283571fe804bSLee Schermerhorn 
2836dedf2c73Szhong jiang 	mode = match_string(policy_modes, MPOL_MAX, str);
2837dedf2c73Szhong jiang 	if (mode < 0)
2838095f1fc4SLee Schermerhorn 		goto out;
2839095f1fc4SLee Schermerhorn 
284071fe804bSLee Schermerhorn 	switch (mode) {
2841095f1fc4SLee Schermerhorn 	case MPOL_PREFERRED:
284271fe804bSLee Schermerhorn 		/*
2843aa9f7d51SRandy Dunlap 		 * Insist on a nodelist of one node only, although later
2844aa9f7d51SRandy Dunlap 		 * we use first_node(nodes) to grab a single node, so here
2845aa9f7d51SRandy Dunlap 		 * nodelist (or nodes) cannot be empty.
284671fe804bSLee Schermerhorn 		 */
2847095f1fc4SLee Schermerhorn 		if (nodelist) {
2848095f1fc4SLee Schermerhorn 			char *rest = nodelist;
2849095f1fc4SLee Schermerhorn 			while (isdigit(*rest))
2850095f1fc4SLee Schermerhorn 				rest++;
2851926f2ae0SKOSAKI Motohiro 			if (*rest)
2852926f2ae0SKOSAKI Motohiro 				goto out;
2853aa9f7d51SRandy Dunlap 			if (nodes_empty(nodes))
2854aa9f7d51SRandy Dunlap 				goto out;
2855095f1fc4SLee Schermerhorn 		}
2856095f1fc4SLee Schermerhorn 		break;
2857095f1fc4SLee Schermerhorn 	case MPOL_INTERLEAVE:
2858095f1fc4SLee Schermerhorn 		/*
2859095f1fc4SLee Schermerhorn 		 * Default to online nodes with memory if no nodelist
2860095f1fc4SLee Schermerhorn 		 */
2861095f1fc4SLee Schermerhorn 		if (!nodelist)
286201f13bd6SLai Jiangshan 			nodes = node_states[N_MEMORY];
28633f226aa1SLee Schermerhorn 		break;
286471fe804bSLee Schermerhorn 	case MPOL_LOCAL:
28653f226aa1SLee Schermerhorn 		/*
286671fe804bSLee Schermerhorn 		 * Don't allow a nodelist;  mpol_new() checks flags
28673f226aa1SLee Schermerhorn 		 */
286871fe804bSLee Schermerhorn 		if (nodelist)
28693f226aa1SLee Schermerhorn 			goto out;
28703f226aa1SLee Schermerhorn 		break;
2871413b43deSRavikiran G Thirumalai 	case MPOL_DEFAULT:
2872413b43deSRavikiran G Thirumalai 		/*
2873413b43deSRavikiran G Thirumalai 		 * Insist on a empty nodelist
2874413b43deSRavikiran G Thirumalai 		 */
2875413b43deSRavikiran G Thirumalai 		if (!nodelist)
2876413b43deSRavikiran G Thirumalai 			err = 0;
2877413b43deSRavikiran G Thirumalai 		goto out;
2878b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
2879d69b2e63SKOSAKI Motohiro 	case MPOL_BIND:
288071fe804bSLee Schermerhorn 		/*
2881d69b2e63SKOSAKI Motohiro 		 * Insist on a nodelist
288271fe804bSLee Schermerhorn 		 */
2883d69b2e63SKOSAKI Motohiro 		if (!nodelist)
2884d69b2e63SKOSAKI Motohiro 			goto out;
2885095f1fc4SLee Schermerhorn 	}
2886095f1fc4SLee Schermerhorn 
288771fe804bSLee Schermerhorn 	mode_flags = 0;
2888095f1fc4SLee Schermerhorn 	if (flags) {
2889095f1fc4SLee Schermerhorn 		/*
2890095f1fc4SLee Schermerhorn 		 * Currently, we only support two mutually exclusive
2891095f1fc4SLee Schermerhorn 		 * mode flags.
2892095f1fc4SLee Schermerhorn 		 */
2893095f1fc4SLee Schermerhorn 		if (!strcmp(flags, "static"))
289471fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_STATIC_NODES;
2895095f1fc4SLee Schermerhorn 		else if (!strcmp(flags, "relative"))
289671fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_RELATIVE_NODES;
2897095f1fc4SLee Schermerhorn 		else
2898926f2ae0SKOSAKI Motohiro 			goto out;
2899095f1fc4SLee Schermerhorn 	}
290071fe804bSLee Schermerhorn 
290171fe804bSLee Schermerhorn 	new = mpol_new(mode, mode_flags, &nodes);
290271fe804bSLee Schermerhorn 	if (IS_ERR(new))
2903926f2ae0SKOSAKI Motohiro 		goto out;
2904926f2ae0SKOSAKI Motohiro 
2905f2a07f40SHugh Dickins 	/*
2906f2a07f40SHugh Dickins 	 * Save nodes for mpol_to_str() to show the tmpfs mount options
2907f2a07f40SHugh Dickins 	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2908f2a07f40SHugh Dickins 	 */
2909269fbe72SBen Widawsky 	if (mode != MPOL_PREFERRED) {
2910269fbe72SBen Widawsky 		new->nodes = nodes;
2911269fbe72SBen Widawsky 	} else if (nodelist) {
2912269fbe72SBen Widawsky 		nodes_clear(new->nodes);
2913269fbe72SBen Widawsky 		node_set(first_node(nodes), new->nodes);
2914269fbe72SBen Widawsky 	} else {
29157858d7bcSFeng Tang 		new->mode = MPOL_LOCAL;
2916269fbe72SBen Widawsky 	}
2917f2a07f40SHugh Dickins 
2918f2a07f40SHugh Dickins 	/*
2919f2a07f40SHugh Dickins 	 * Save nodes for contextualization: this will be used to "clone"
2920f2a07f40SHugh Dickins 	 * the mempolicy in a specific context [cpuset] at a later time.
2921f2a07f40SHugh Dickins 	 */
2922e17f74afSLee Schermerhorn 	new->w.user_nodemask = nodes;
2923f2a07f40SHugh Dickins 
2924926f2ae0SKOSAKI Motohiro 	err = 0;
292571fe804bSLee Schermerhorn 
2926095f1fc4SLee Schermerhorn out:
2927095f1fc4SLee Schermerhorn 	/* Restore string for error message */
2928095f1fc4SLee Schermerhorn 	if (nodelist)
2929095f1fc4SLee Schermerhorn 		*--nodelist = ':';
2930095f1fc4SLee Schermerhorn 	if (flags)
2931095f1fc4SLee Schermerhorn 		*--flags = '=';
293271fe804bSLee Schermerhorn 	if (!err)
293371fe804bSLee Schermerhorn 		*mpol = new;
2934095f1fc4SLee Schermerhorn 	return err;
2935095f1fc4SLee Schermerhorn }
2936095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */
2937095f1fc4SLee Schermerhorn 
293871fe804bSLee Schermerhorn /**
293971fe804bSLee Schermerhorn  * mpol_to_str - format a mempolicy structure for printing
294071fe804bSLee Schermerhorn  * @buffer:  to contain formatted mempolicy string
294171fe804bSLee Schermerhorn  * @maxlen:  length of @buffer
294271fe804bSLee Schermerhorn  * @pol:  pointer to mempolicy to be formatted
294371fe804bSLee Schermerhorn  *
2944948927eeSDavid Rientjes  * Convert @pol into a string.  If @buffer is too short, truncate the string.
2945948927eeSDavid Rientjes  * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2946948927eeSDavid Rientjes  * longest flag, "relative", and to display at least a few node ids.
29471a75a6c8SChristoph Lameter  */
2948948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
29491a75a6c8SChristoph Lameter {
29501a75a6c8SChristoph Lameter 	char *p = buffer;
2951948927eeSDavid Rientjes 	nodemask_t nodes = NODE_MASK_NONE;
2952948927eeSDavid Rientjes 	unsigned short mode = MPOL_DEFAULT;
2953948927eeSDavid Rientjes 	unsigned short flags = 0;
29541a75a6c8SChristoph Lameter 
29558790c71aSDavid Rientjes 	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
2956bea904d5SLee Schermerhorn 		mode = pol->mode;
2957948927eeSDavid Rientjes 		flags = pol->flags;
2958948927eeSDavid Rientjes 	}
2959bea904d5SLee Schermerhorn 
29601a75a6c8SChristoph Lameter 	switch (mode) {
29611a75a6c8SChristoph Lameter 	case MPOL_DEFAULT:
29627858d7bcSFeng Tang 	case MPOL_LOCAL:
29631a75a6c8SChristoph Lameter 		break;
29641a75a6c8SChristoph Lameter 	case MPOL_PREFERRED:
2965b27abaccSDave Hansen 	case MPOL_PREFERRED_MANY:
29661a75a6c8SChristoph Lameter 	case MPOL_BIND:
29671a75a6c8SChristoph Lameter 	case MPOL_INTERLEAVE:
2968269fbe72SBen Widawsky 		nodes = pol->nodes;
29691a75a6c8SChristoph Lameter 		break;
29701a75a6c8SChristoph Lameter 	default:
2971948927eeSDavid Rientjes 		WARN_ON_ONCE(1);
2972948927eeSDavid Rientjes 		snprintf(p, maxlen, "unknown");
2973948927eeSDavid Rientjes 		return;
29741a75a6c8SChristoph Lameter 	}
29751a75a6c8SChristoph Lameter 
2976b7a9f420SDavid Rientjes 	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
29771a75a6c8SChristoph Lameter 
2978fc36b8d3SLee Schermerhorn 	if (flags & MPOL_MODE_FLAGS) {
2979948927eeSDavid Rientjes 		p += snprintf(p, buffer + maxlen - p, "=");
2980f5b087b5SDavid Rientjes 
29812291990aSLee Schermerhorn 		/*
29822291990aSLee Schermerhorn 		 * Currently, the only defined flags are mutually exclusive
29832291990aSLee Schermerhorn 		 */
2984f5b087b5SDavid Rientjes 		if (flags & MPOL_F_STATIC_NODES)
29852291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "static");
29862291990aSLee Schermerhorn 		else if (flags & MPOL_F_RELATIVE_NODES)
29872291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "relative");
2988f5b087b5SDavid Rientjes 	}
2989f5b087b5SDavid Rientjes 
29909e763e0fSTejun Heo 	if (!nodes_empty(nodes))
29919e763e0fSTejun Heo 		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
29929e763e0fSTejun Heo 			       nodemask_pr_args(&nodes));
29931a75a6c8SChristoph Lameter }
299420b51af1SHuang Ying 
299520b51af1SHuang Ying bool numa_demotion_enabled = false;
299620b51af1SHuang Ying 
299720b51af1SHuang Ying #ifdef CONFIG_SYSFS
299820b51af1SHuang Ying static ssize_t numa_demotion_enabled_show(struct kobject *kobj,
299920b51af1SHuang Ying 					  struct kobj_attribute *attr, char *buf)
300020b51af1SHuang Ying {
300120b51af1SHuang Ying 	return sysfs_emit(buf, "%s\n",
300220b51af1SHuang Ying 			  numa_demotion_enabled? "true" : "false");
300320b51af1SHuang Ying }
300420b51af1SHuang Ying 
300520b51af1SHuang Ying static ssize_t numa_demotion_enabled_store(struct kobject *kobj,
300620b51af1SHuang Ying 					   struct kobj_attribute *attr,
300720b51af1SHuang Ying 					   const char *buf, size_t count)
300820b51af1SHuang Ying {
300920b51af1SHuang Ying 	if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1))
301020b51af1SHuang Ying 		numa_demotion_enabled = true;
301120b51af1SHuang Ying 	else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1))
301220b51af1SHuang Ying 		numa_demotion_enabled = false;
301320b51af1SHuang Ying 	else
301420b51af1SHuang Ying 		return -EINVAL;
301520b51af1SHuang Ying 
301620b51af1SHuang Ying 	return count;
301720b51af1SHuang Ying }
301820b51af1SHuang Ying 
301920b51af1SHuang Ying static struct kobj_attribute numa_demotion_enabled_attr =
302020b51af1SHuang Ying 	__ATTR(demotion_enabled, 0644, numa_demotion_enabled_show,
302120b51af1SHuang Ying 	       numa_demotion_enabled_store);
302220b51af1SHuang Ying 
302320b51af1SHuang Ying static struct attribute *numa_attrs[] = {
302420b51af1SHuang Ying 	&numa_demotion_enabled_attr.attr,
302520b51af1SHuang Ying 	NULL,
302620b51af1SHuang Ying };
302720b51af1SHuang Ying 
302820b51af1SHuang Ying static const struct attribute_group numa_attr_group = {
302920b51af1SHuang Ying 	.attrs = numa_attrs,
303020b51af1SHuang Ying };
303120b51af1SHuang Ying 
303220b51af1SHuang Ying static int __init numa_init_sysfs(void)
303320b51af1SHuang Ying {
303420b51af1SHuang Ying 	int err;
303520b51af1SHuang Ying 	struct kobject *numa_kobj;
303620b51af1SHuang Ying 
303720b51af1SHuang Ying 	numa_kobj = kobject_create_and_add("numa", mm_kobj);
303820b51af1SHuang Ying 	if (!numa_kobj) {
303920b51af1SHuang Ying 		pr_err("failed to create numa kobject\n");
304020b51af1SHuang Ying 		return -ENOMEM;
304120b51af1SHuang Ying 	}
304220b51af1SHuang Ying 	err = sysfs_create_group(numa_kobj, &numa_attr_group);
304320b51af1SHuang Ying 	if (err) {
304420b51af1SHuang Ying 		pr_err("failed to register numa group\n");
304520b51af1SHuang Ying 		goto delete_obj;
304620b51af1SHuang Ying 	}
304720b51af1SHuang Ying 	return 0;
304820b51af1SHuang Ying 
304920b51af1SHuang Ying delete_obj:
305020b51af1SHuang Ying 	kobject_put(numa_kobj);
305120b51af1SHuang Ying 	return err;
305220b51af1SHuang Ying }
305320b51af1SHuang Ying subsys_initcall(numa_init_sysfs);
305420b51af1SHuang Ying #endif
3055