xref: /openbmc/linux/mm/mempolicy.c (revision 19deb7695e072deaff025e03de40c61b525bd57e)
146aeb7e6SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * Simple NUMA memory policy for the Linux kernel.
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
68bccd85fSChristoph Lameter  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * NUMA policy allows the user to give hints in which node(s) memory should
91da177e4SLinus Torvalds  * be allocated.
101da177e4SLinus Torvalds  *
111da177e4SLinus Torvalds  * Support four policies per VMA and per process:
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  * The VMA policy has priority over the process policy for a page fault.
141da177e4SLinus Torvalds  *
151da177e4SLinus Torvalds  * interleave     Allocate memory interleaved over a set of nodes,
161da177e4SLinus Torvalds  *                with normal fallback if it fails.
171da177e4SLinus Torvalds  *                For VMA based allocations this interleaves based on the
181da177e4SLinus Torvalds  *                offset into the backing object or offset into the mapping
191da177e4SLinus Torvalds  *                for anonymous memory. For process policy an process counter
201da177e4SLinus Torvalds  *                is used.
218bccd85fSChristoph Lameter  *
221da177e4SLinus Torvalds  * bind           Only allocate memory on a specific set of nodes,
231da177e4SLinus Torvalds  *                no fallback.
248bccd85fSChristoph Lameter  *                FIXME: memory is allocated starting with the first node
258bccd85fSChristoph Lameter  *                to the last. It would be better if bind would truly restrict
268bccd85fSChristoph Lameter  *                the allocation to memory nodes instead
278bccd85fSChristoph Lameter  *
281da177e4SLinus Torvalds  * preferred       Try a specific node first before normal fallback.
2900ef2d2fSDavid Rientjes  *                As a special case NUMA_NO_NODE here means do the allocation
301da177e4SLinus Torvalds  *                on the local CPU. This is normally identical to default,
311da177e4SLinus Torvalds  *                but useful to set in a VMA when you have a non default
321da177e4SLinus Torvalds  *                process policy.
338bccd85fSChristoph Lameter  *
341da177e4SLinus Torvalds  * default        Allocate on the local node first, or when on a VMA
351da177e4SLinus Torvalds  *                use the process policy. This is what Linux always did
361da177e4SLinus Torvalds  *		  in a NUMA aware kernel and still does by, ahem, default.
371da177e4SLinus Torvalds  *
381da177e4SLinus Torvalds  * The process policy is applied for most non interrupt memory allocations
391da177e4SLinus Torvalds  * in that process' context. Interrupts ignore the policies and always
401da177e4SLinus Torvalds  * try to allocate on the local CPU. The VMA policy is only applied for memory
411da177e4SLinus Torvalds  * allocations for a VMA in the VM.
421da177e4SLinus Torvalds  *
431da177e4SLinus Torvalds  * Currently there are a few corner cases in swapping where the policy
441da177e4SLinus Torvalds  * is not applied, but the majority should be handled. When process policy
451da177e4SLinus Torvalds  * is used it is not remembered over swap outs/swap ins.
461da177e4SLinus Torvalds  *
471da177e4SLinus Torvalds  * Only the highest zone in the zone hierarchy gets policied. Allocations
481da177e4SLinus Torvalds  * requesting a lower zone just use default policy. This implies that
491da177e4SLinus Torvalds  * on systems with highmem kernel lowmem allocation don't get policied.
501da177e4SLinus Torvalds  * Same with GFP_DMA allocations.
511da177e4SLinus Torvalds  *
521da177e4SLinus Torvalds  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
531da177e4SLinus Torvalds  * all users and remembered even when nobody has memory mapped.
541da177e4SLinus Torvalds  */
551da177e4SLinus Torvalds 
561da177e4SLinus Torvalds /* Notebook:
571da177e4SLinus Torvalds    fix mmap readahead to honour policy and enable policy for any page cache
581da177e4SLinus Torvalds    object
591da177e4SLinus Torvalds    statistics for bigpages
601da177e4SLinus Torvalds    global policy for page cache? currently it uses process policy. Requires
611da177e4SLinus Torvalds    first item above.
621da177e4SLinus Torvalds    handle mremap for shared memory (currently ignored for the policy)
631da177e4SLinus Torvalds    grows down?
641da177e4SLinus Torvalds    make bind policy root only? It can trigger oom much faster and the
651da177e4SLinus Torvalds    kernel is not always grateful with that.
661da177e4SLinus Torvalds */
671da177e4SLinus Torvalds 
68b1de0d13SMitchel Humpherys #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69b1de0d13SMitchel Humpherys 
701da177e4SLinus Torvalds #include <linux/mempolicy.h>
711da177e4SLinus Torvalds #include <linux/mm.h>
721da177e4SLinus Torvalds #include <linux/highmem.h>
731da177e4SLinus Torvalds #include <linux/hugetlb.h>
741da177e4SLinus Torvalds #include <linux/kernel.h>
751da177e4SLinus Torvalds #include <linux/sched.h>
766e84f315SIngo Molnar #include <linux/sched/mm.h>
776a3827d7SIngo Molnar #include <linux/sched/numa_balancing.h>
78f719ff9bSIngo Molnar #include <linux/sched/task.h>
791da177e4SLinus Torvalds #include <linux/nodemask.h>
801da177e4SLinus Torvalds #include <linux/cpuset.h>
811da177e4SLinus Torvalds #include <linux/slab.h>
821da177e4SLinus Torvalds #include <linux/string.h>
83b95f1b31SPaul Gortmaker #include <linux/export.h>
84b488893aSPavel Emelyanov #include <linux/nsproxy.h>
851da177e4SLinus Torvalds #include <linux/interrupt.h>
861da177e4SLinus Torvalds #include <linux/init.h>
871da177e4SLinus Torvalds #include <linux/compat.h>
8831367466SOtto Ebeling #include <linux/ptrace.h>
89dc9aa5b9SChristoph Lameter #include <linux/swap.h>
901a75a6c8SChristoph Lameter #include <linux/seq_file.h>
911a75a6c8SChristoph Lameter #include <linux/proc_fs.h>
92b20a3503SChristoph Lameter #include <linux/migrate.h>
9362b61f61SHugh Dickins #include <linux/ksm.h>
9495a402c3SChristoph Lameter #include <linux/rmap.h>
9586c3a764SDavid Quigley #include <linux/security.h>
96dbcb0f19SAdrian Bunk #include <linux/syscalls.h>
97095f1fc4SLee Schermerhorn #include <linux/ctype.h>
986d9c285aSKOSAKI Motohiro #include <linux/mm_inline.h>
99b24f53a0SLee Schermerhorn #include <linux/mmu_notifier.h>
100b1de0d13SMitchel Humpherys #include <linux/printk.h>
101c8633798SNaoya Horiguchi #include <linux/swapops.h>
102dc9aa5b9SChristoph Lameter 
1031da177e4SLinus Torvalds #include <asm/tlbflush.h>
1047c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
1051da177e4SLinus Torvalds 
10662695a84SNick Piggin #include "internal.h"
10762695a84SNick Piggin 
10838e35860SChristoph Lameter /* Internal flags */
109dc9aa5b9SChristoph Lameter #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
11038e35860SChristoph Lameter #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
111dc9aa5b9SChristoph Lameter 
112fcc234f8SPekka Enberg static struct kmem_cache *policy_cache;
113fcc234f8SPekka Enberg static struct kmem_cache *sn_cache;
1141da177e4SLinus Torvalds 
1151da177e4SLinus Torvalds /* Highest zone. An specific allocation for a zone below that is not
1161da177e4SLinus Torvalds    policied. */
1176267276fSChristoph Lameter enum zone_type policy_zone = 0;
1181da177e4SLinus Torvalds 
119bea904d5SLee Schermerhorn /*
120bea904d5SLee Schermerhorn  * run-time system-wide default policy => local allocation
121bea904d5SLee Schermerhorn  */
122e754d79dSH Hartley Sweeten static struct mempolicy default_policy = {
1231da177e4SLinus Torvalds 	.refcnt = ATOMIC_INIT(1), /* never free it */
124bea904d5SLee Schermerhorn 	.mode = MPOL_PREFERRED,
125fc36b8d3SLee Schermerhorn 	.flags = MPOL_F_LOCAL,
1261da177e4SLinus Torvalds };
1271da177e4SLinus Torvalds 
1285606e387SMel Gorman static struct mempolicy preferred_node_policy[MAX_NUMNODES];
1295606e387SMel Gorman 
13074d2c3a0SOleg Nesterov struct mempolicy *get_task_policy(struct task_struct *p)
1315606e387SMel Gorman {
1325606e387SMel Gorman 	struct mempolicy *pol = p->mempolicy;
133f15ca78eSOleg Nesterov 	int node;
1345606e387SMel Gorman 
135f15ca78eSOleg Nesterov 	if (pol)
136f15ca78eSOleg Nesterov 		return pol;
1375606e387SMel Gorman 
138f15ca78eSOleg Nesterov 	node = numa_node_id();
1391da6f0e1SJianguo Wu 	if (node != NUMA_NO_NODE) {
1401da6f0e1SJianguo Wu 		pol = &preferred_node_policy[node];
141f15ca78eSOleg Nesterov 		/* preferred_node_policy is not initialised early in boot */
142f15ca78eSOleg Nesterov 		if (pol->mode)
143f15ca78eSOleg Nesterov 			return pol;
1441da6f0e1SJianguo Wu 	}
1455606e387SMel Gorman 
146f15ca78eSOleg Nesterov 	return &default_policy;
1475606e387SMel Gorman }
1485606e387SMel Gorman 
14937012946SDavid Rientjes static const struct mempolicy_operations {
15037012946SDavid Rientjes 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
151213980c0SVlastimil Babka 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
15237012946SDavid Rientjes } mpol_ops[MPOL_MAX];
15337012946SDavid Rientjes 
154f5b087b5SDavid Rientjes static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
155f5b087b5SDavid Rientjes {
1566d556294SBob Liu 	return pol->flags & MPOL_MODE_FLAGS;
1574c50bc01SDavid Rientjes }
1584c50bc01SDavid Rientjes 
1594c50bc01SDavid Rientjes static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
1604c50bc01SDavid Rientjes 				   const nodemask_t *rel)
1614c50bc01SDavid Rientjes {
1624c50bc01SDavid Rientjes 	nodemask_t tmp;
1634c50bc01SDavid Rientjes 	nodes_fold(tmp, *orig, nodes_weight(*rel));
1644c50bc01SDavid Rientjes 	nodes_onto(*ret, tmp, *rel);
165f5b087b5SDavid Rientjes }
166f5b087b5SDavid Rientjes 
16737012946SDavid Rientjes static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
16837012946SDavid Rientjes {
16937012946SDavid Rientjes 	if (nodes_empty(*nodes))
17037012946SDavid Rientjes 		return -EINVAL;
17137012946SDavid Rientjes 	pol->v.nodes = *nodes;
17237012946SDavid Rientjes 	return 0;
17337012946SDavid Rientjes }
17437012946SDavid Rientjes 
17537012946SDavid Rientjes static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
17637012946SDavid Rientjes {
17737012946SDavid Rientjes 	if (!nodes)
178fc36b8d3SLee Schermerhorn 		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
17937012946SDavid Rientjes 	else if (nodes_empty(*nodes))
18037012946SDavid Rientjes 		return -EINVAL;			/*  no allowed nodes */
18137012946SDavid Rientjes 	else
18237012946SDavid Rientjes 		pol->v.preferred_node = first_node(*nodes);
18337012946SDavid Rientjes 	return 0;
18437012946SDavid Rientjes }
18537012946SDavid Rientjes 
18637012946SDavid Rientjes static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
18737012946SDavid Rientjes {
188859f7ef1SZhihui Zhang 	if (nodes_empty(*nodes))
18937012946SDavid Rientjes 		return -EINVAL;
19037012946SDavid Rientjes 	pol->v.nodes = *nodes;
19137012946SDavid Rientjes 	return 0;
19237012946SDavid Rientjes }
19337012946SDavid Rientjes 
19458568d2aSMiao Xie /*
19558568d2aSMiao Xie  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
19658568d2aSMiao Xie  * any, for the new policy.  mpol_new() has already validated the nodes
19758568d2aSMiao Xie  * parameter with respect to the policy mode and flags.  But, we need to
19858568d2aSMiao Xie  * handle an empty nodemask with MPOL_PREFERRED here.
19958568d2aSMiao Xie  *
20058568d2aSMiao Xie  * Must be called holding task's alloc_lock to protect task's mems_allowed
20158568d2aSMiao Xie  * and mempolicy.  May also be called holding the mmap_semaphore for write.
20258568d2aSMiao Xie  */
2034bfc4495SKAMEZAWA Hiroyuki static int mpol_set_nodemask(struct mempolicy *pol,
2044bfc4495SKAMEZAWA Hiroyuki 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
20558568d2aSMiao Xie {
20658568d2aSMiao Xie 	int ret;
20758568d2aSMiao Xie 
20858568d2aSMiao Xie 	/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
20958568d2aSMiao Xie 	if (pol == NULL)
21058568d2aSMiao Xie 		return 0;
21101f13bd6SLai Jiangshan 	/* Check N_MEMORY */
2124bfc4495SKAMEZAWA Hiroyuki 	nodes_and(nsc->mask1,
21301f13bd6SLai Jiangshan 		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
21458568d2aSMiao Xie 
21558568d2aSMiao Xie 	VM_BUG_ON(!nodes);
21658568d2aSMiao Xie 	if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
21758568d2aSMiao Xie 		nodes = NULL;	/* explicit local allocation */
21858568d2aSMiao Xie 	else {
21958568d2aSMiao Xie 		if (pol->flags & MPOL_F_RELATIVE_NODES)
2204bfc4495SKAMEZAWA Hiroyuki 			mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
22158568d2aSMiao Xie 		else
2224bfc4495SKAMEZAWA Hiroyuki 			nodes_and(nsc->mask2, *nodes, nsc->mask1);
2234bfc4495SKAMEZAWA Hiroyuki 
22458568d2aSMiao Xie 		if (mpol_store_user_nodemask(pol))
22558568d2aSMiao Xie 			pol->w.user_nodemask = *nodes;
22658568d2aSMiao Xie 		else
22758568d2aSMiao Xie 			pol->w.cpuset_mems_allowed =
22858568d2aSMiao Xie 						cpuset_current_mems_allowed;
22958568d2aSMiao Xie 	}
23058568d2aSMiao Xie 
2314bfc4495SKAMEZAWA Hiroyuki 	if (nodes)
2324bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
2334bfc4495SKAMEZAWA Hiroyuki 	else
2344bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_ops[pol->mode].create(pol, NULL);
23558568d2aSMiao Xie 	return ret;
23658568d2aSMiao Xie }
23758568d2aSMiao Xie 
23858568d2aSMiao Xie /*
23958568d2aSMiao Xie  * This function just creates a new policy, does some check and simple
24058568d2aSMiao Xie  * initialization. You must invoke mpol_set_nodemask() to set nodes.
24158568d2aSMiao Xie  */
242028fec41SDavid Rientjes static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
243028fec41SDavid Rientjes 				  nodemask_t *nodes)
2441da177e4SLinus Torvalds {
2451da177e4SLinus Torvalds 	struct mempolicy *policy;
2461da177e4SLinus Torvalds 
247028fec41SDavid Rientjes 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
24800ef2d2fSDavid Rientjes 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
249140d5a49SPaul Mundt 
2503e1f0645SDavid Rientjes 	if (mode == MPOL_DEFAULT) {
2513e1f0645SDavid Rientjes 		if (nodes && !nodes_empty(*nodes))
25237012946SDavid Rientjes 			return ERR_PTR(-EINVAL);
253d3a71033SLee Schermerhorn 		return NULL;
25437012946SDavid Rientjes 	}
2553e1f0645SDavid Rientjes 	VM_BUG_ON(!nodes);
2563e1f0645SDavid Rientjes 
2573e1f0645SDavid Rientjes 	/*
2583e1f0645SDavid Rientjes 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
2593e1f0645SDavid Rientjes 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
2603e1f0645SDavid Rientjes 	 * All other modes require a valid pointer to a non-empty nodemask.
2613e1f0645SDavid Rientjes 	 */
2623e1f0645SDavid Rientjes 	if (mode == MPOL_PREFERRED) {
2633e1f0645SDavid Rientjes 		if (nodes_empty(*nodes)) {
2643e1f0645SDavid Rientjes 			if (((flags & MPOL_F_STATIC_NODES) ||
2653e1f0645SDavid Rientjes 			     (flags & MPOL_F_RELATIVE_NODES)))
2663e1f0645SDavid Rientjes 				return ERR_PTR(-EINVAL);
2673e1f0645SDavid Rientjes 		}
268479e2802SPeter Zijlstra 	} else if (mode == MPOL_LOCAL) {
2698d303e44SPiotr Kwapulinski 		if (!nodes_empty(*nodes) ||
2708d303e44SPiotr Kwapulinski 		    (flags & MPOL_F_STATIC_NODES) ||
2718d303e44SPiotr Kwapulinski 		    (flags & MPOL_F_RELATIVE_NODES))
272479e2802SPeter Zijlstra 			return ERR_PTR(-EINVAL);
273479e2802SPeter Zijlstra 		mode = MPOL_PREFERRED;
2743e1f0645SDavid Rientjes 	} else if (nodes_empty(*nodes))
2753e1f0645SDavid Rientjes 		return ERR_PTR(-EINVAL);
2761da177e4SLinus Torvalds 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2771da177e4SLinus Torvalds 	if (!policy)
2781da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2791da177e4SLinus Torvalds 	atomic_set(&policy->refcnt, 1);
28045c4745aSLee Schermerhorn 	policy->mode = mode;
28137012946SDavid Rientjes 	policy->flags = flags;
2823e1f0645SDavid Rientjes 
28337012946SDavid Rientjes 	return policy;
28437012946SDavid Rientjes }
28537012946SDavid Rientjes 
28652cd3b07SLee Schermerhorn /* Slow path of a mpol destructor. */
28752cd3b07SLee Schermerhorn void __mpol_put(struct mempolicy *p)
28852cd3b07SLee Schermerhorn {
28952cd3b07SLee Schermerhorn 	if (!atomic_dec_and_test(&p->refcnt))
29052cd3b07SLee Schermerhorn 		return;
29152cd3b07SLee Schermerhorn 	kmem_cache_free(policy_cache, p);
29252cd3b07SLee Schermerhorn }
29352cd3b07SLee Schermerhorn 
294213980c0SVlastimil Babka static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
29537012946SDavid Rientjes {
29637012946SDavid Rientjes }
29737012946SDavid Rientjes 
298213980c0SVlastimil Babka static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
2991d0d2680SDavid Rientjes {
3001d0d2680SDavid Rientjes 	nodemask_t tmp;
3011d0d2680SDavid Rientjes 
30237012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES)
30337012946SDavid Rientjes 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
30437012946SDavid Rientjes 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
30537012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3061d0d2680SDavid Rientjes 	else {
307213980c0SVlastimil Babka 		nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
308213980c0SVlastimil Babka 								*nodes);
30929b190faSzhong jiang 		pol->w.cpuset_mems_allowed = *nodes;
3101d0d2680SDavid Rientjes 	}
31137012946SDavid Rientjes 
312708c1bbcSMiao Xie 	if (nodes_empty(tmp))
313708c1bbcSMiao Xie 		tmp = *nodes;
314708c1bbcSMiao Xie 
3151d0d2680SDavid Rientjes 	pol->v.nodes = tmp;
31637012946SDavid Rientjes }
31737012946SDavid Rientjes 
31837012946SDavid Rientjes static void mpol_rebind_preferred(struct mempolicy *pol,
319213980c0SVlastimil Babka 						const nodemask_t *nodes)
32037012946SDavid Rientjes {
32137012946SDavid Rientjes 	nodemask_t tmp;
32237012946SDavid Rientjes 
32337012946SDavid Rientjes 	if (pol->flags & MPOL_F_STATIC_NODES) {
3241d0d2680SDavid Rientjes 		int node = first_node(pol->w.user_nodemask);
3251d0d2680SDavid Rientjes 
326fc36b8d3SLee Schermerhorn 		if (node_isset(node, *nodes)) {
3271d0d2680SDavid Rientjes 			pol->v.preferred_node = node;
328fc36b8d3SLee Schermerhorn 			pol->flags &= ~MPOL_F_LOCAL;
329fc36b8d3SLee Schermerhorn 		} else
330fc36b8d3SLee Schermerhorn 			pol->flags |= MPOL_F_LOCAL;
33137012946SDavid Rientjes 	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
33237012946SDavid Rientjes 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
3331d0d2680SDavid Rientjes 		pol->v.preferred_node = first_node(tmp);
334fc36b8d3SLee Schermerhorn 	} else if (!(pol->flags & MPOL_F_LOCAL)) {
3351d0d2680SDavid Rientjes 		pol->v.preferred_node = node_remap(pol->v.preferred_node,
33637012946SDavid Rientjes 						   pol->w.cpuset_mems_allowed,
33737012946SDavid Rientjes 						   *nodes);
33837012946SDavid Rientjes 		pol->w.cpuset_mems_allowed = *nodes;
3391d0d2680SDavid Rientjes 	}
3401d0d2680SDavid Rientjes }
34137012946SDavid Rientjes 
342708c1bbcSMiao Xie /*
343708c1bbcSMiao Xie  * mpol_rebind_policy - Migrate a policy to a different set of nodes
344708c1bbcSMiao Xie  *
345213980c0SVlastimil Babka  * Per-vma policies are protected by mmap_sem. Allocations using per-task
346213980c0SVlastimil Babka  * policies are protected by task->mems_allowed_seq to prevent a premature
347213980c0SVlastimil Babka  * OOM/allocation failure due to parallel nodemask modification.
348708c1bbcSMiao Xie  */
349213980c0SVlastimil Babka static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
35037012946SDavid Rientjes {
35137012946SDavid Rientjes 	if (!pol)
35237012946SDavid Rientjes 		return;
3532e25644eSVlastimil Babka 	if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) &&
35437012946SDavid Rientjes 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
35537012946SDavid Rientjes 		return;
356708c1bbcSMiao Xie 
357213980c0SVlastimil Babka 	mpol_ops[pol->mode].rebind(pol, newmask);
3581d0d2680SDavid Rientjes }
3591d0d2680SDavid Rientjes 
3601d0d2680SDavid Rientjes /*
3611d0d2680SDavid Rientjes  * Wrapper for mpol_rebind_policy() that just requires task
3621d0d2680SDavid Rientjes  * pointer, and updates task mempolicy.
36358568d2aSMiao Xie  *
36458568d2aSMiao Xie  * Called with task's alloc_lock held.
3651d0d2680SDavid Rientjes  */
3661d0d2680SDavid Rientjes 
367213980c0SVlastimil Babka void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
3681d0d2680SDavid Rientjes {
369213980c0SVlastimil Babka 	mpol_rebind_policy(tsk->mempolicy, new);
3701d0d2680SDavid Rientjes }
3711d0d2680SDavid Rientjes 
3721d0d2680SDavid Rientjes /*
3731d0d2680SDavid Rientjes  * Rebind each vma in mm to new nodemask.
3741d0d2680SDavid Rientjes  *
3751d0d2680SDavid Rientjes  * Call holding a reference to mm.  Takes mm->mmap_sem during call.
3761d0d2680SDavid Rientjes  */
3771d0d2680SDavid Rientjes 
3781d0d2680SDavid Rientjes void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
3791d0d2680SDavid Rientjes {
3801d0d2680SDavid Rientjes 	struct vm_area_struct *vma;
3811d0d2680SDavid Rientjes 
3821d0d2680SDavid Rientjes 	down_write(&mm->mmap_sem);
3831d0d2680SDavid Rientjes 	for (vma = mm->mmap; vma; vma = vma->vm_next)
384213980c0SVlastimil Babka 		mpol_rebind_policy(vma->vm_policy, new);
3851d0d2680SDavid Rientjes 	up_write(&mm->mmap_sem);
3861d0d2680SDavid Rientjes }
3871d0d2680SDavid Rientjes 
38837012946SDavid Rientjes static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
38937012946SDavid Rientjes 	[MPOL_DEFAULT] = {
39037012946SDavid Rientjes 		.rebind = mpol_rebind_default,
39137012946SDavid Rientjes 	},
39237012946SDavid Rientjes 	[MPOL_INTERLEAVE] = {
39337012946SDavid Rientjes 		.create = mpol_new_interleave,
39437012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
39537012946SDavid Rientjes 	},
39637012946SDavid Rientjes 	[MPOL_PREFERRED] = {
39737012946SDavid Rientjes 		.create = mpol_new_preferred,
39837012946SDavid Rientjes 		.rebind = mpol_rebind_preferred,
39937012946SDavid Rientjes 	},
40037012946SDavid Rientjes 	[MPOL_BIND] = {
40137012946SDavid Rientjes 		.create = mpol_new_bind,
40237012946SDavid Rientjes 		.rebind = mpol_rebind_nodemask,
40337012946SDavid Rientjes 	},
40437012946SDavid Rientjes };
40537012946SDavid Rientjes 
406a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist,
407fc301289SChristoph Lameter 				unsigned long flags);
4081a75a6c8SChristoph Lameter 
4096f4576e3SNaoya Horiguchi struct queue_pages {
4106f4576e3SNaoya Horiguchi 	struct list_head *pagelist;
4116f4576e3SNaoya Horiguchi 	unsigned long flags;
4126f4576e3SNaoya Horiguchi 	nodemask_t *nmask;
4136f4576e3SNaoya Horiguchi 	struct vm_area_struct *prev;
4146f4576e3SNaoya Horiguchi };
4156f4576e3SNaoya Horiguchi 
41698094945SNaoya Horiguchi /*
41788aaa2a1SNaoya Horiguchi  * Check if the page's nid is in qp->nmask.
41888aaa2a1SNaoya Horiguchi  *
41988aaa2a1SNaoya Horiguchi  * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
42088aaa2a1SNaoya Horiguchi  * in the invert of qp->nmask.
42188aaa2a1SNaoya Horiguchi  */
42288aaa2a1SNaoya Horiguchi static inline bool queue_pages_required(struct page *page,
42388aaa2a1SNaoya Horiguchi 					struct queue_pages *qp)
42488aaa2a1SNaoya Horiguchi {
42588aaa2a1SNaoya Horiguchi 	int nid = page_to_nid(page);
42688aaa2a1SNaoya Horiguchi 	unsigned long flags = qp->flags;
42788aaa2a1SNaoya Horiguchi 
42888aaa2a1SNaoya Horiguchi 	return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
42988aaa2a1SNaoya Horiguchi }
43088aaa2a1SNaoya Horiguchi 
431a7f40cfeSYang Shi /*
432d8835445SYang Shi  * queue_pages_pmd() has four possible return values:
433d8835445SYang Shi  * 0 - pages are placed on the right node or queued successfully.
434d8835445SYang Shi  * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
435d8835445SYang Shi  *     specified.
436d8835445SYang Shi  * 2 - THP was split.
437d8835445SYang Shi  * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
438d8835445SYang Shi  *        existing page was already on a node that does not follow the
439d8835445SYang Shi  *        policy.
440a7f40cfeSYang Shi  */
441c8633798SNaoya Horiguchi static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
442c8633798SNaoya Horiguchi 				unsigned long end, struct mm_walk *walk)
443c8633798SNaoya Horiguchi {
444c8633798SNaoya Horiguchi 	int ret = 0;
445c8633798SNaoya Horiguchi 	struct page *page;
446c8633798SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
447c8633798SNaoya Horiguchi 	unsigned long flags;
448c8633798SNaoya Horiguchi 
449c8633798SNaoya Horiguchi 	if (unlikely(is_pmd_migration_entry(*pmd))) {
450a7f40cfeSYang Shi 		ret = -EIO;
451c8633798SNaoya Horiguchi 		goto unlock;
452c8633798SNaoya Horiguchi 	}
453c8633798SNaoya Horiguchi 	page = pmd_page(*pmd);
454c8633798SNaoya Horiguchi 	if (is_huge_zero_page(page)) {
455c8633798SNaoya Horiguchi 		spin_unlock(ptl);
456c8633798SNaoya Horiguchi 		__split_huge_pmd(walk->vma, pmd, addr, false, NULL);
457d8835445SYang Shi 		ret = 2;
458c8633798SNaoya Horiguchi 		goto out;
459c8633798SNaoya Horiguchi 	}
460d8835445SYang Shi 	if (!queue_pages_required(page, qp))
461c8633798SNaoya Horiguchi 		goto unlock;
462c8633798SNaoya Horiguchi 
463c8633798SNaoya Horiguchi 	flags = qp->flags;
464c8633798SNaoya Horiguchi 	/* go to thp migration */
465a7f40cfeSYang Shi 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
466a53190a4SYang Shi 		if (!vma_migratable(walk->vma) ||
467a53190a4SYang Shi 		    migrate_page_add(page, qp->pagelist, flags)) {
468d8835445SYang Shi 			ret = 1;
469a7f40cfeSYang Shi 			goto unlock;
470a7f40cfeSYang Shi 		}
471a7f40cfeSYang Shi 	} else
472a7f40cfeSYang Shi 		ret = -EIO;
473c8633798SNaoya Horiguchi unlock:
474c8633798SNaoya Horiguchi 	spin_unlock(ptl);
475c8633798SNaoya Horiguchi out:
476c8633798SNaoya Horiguchi 	return ret;
477c8633798SNaoya Horiguchi }
478c8633798SNaoya Horiguchi 
47988aaa2a1SNaoya Horiguchi /*
48098094945SNaoya Horiguchi  * Scan through pages checking if pages follow certain conditions,
48198094945SNaoya Horiguchi  * and move them to the pagelist if they do.
482d8835445SYang Shi  *
483d8835445SYang Shi  * queue_pages_pte_range() has three possible return values:
484d8835445SYang Shi  * 0 - pages are placed on the right node or queued successfully.
485d8835445SYang Shi  * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
486d8835445SYang Shi  *     specified.
487d8835445SYang Shi  * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
488d8835445SYang Shi  *        on a node that does not follow the policy.
48998094945SNaoya Horiguchi  */
4906f4576e3SNaoya Horiguchi static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
4916f4576e3SNaoya Horiguchi 			unsigned long end, struct mm_walk *walk)
4921da177e4SLinus Torvalds {
4936f4576e3SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
4946f4576e3SNaoya Horiguchi 	struct page *page;
4956f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
4966f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
497c8633798SNaoya Horiguchi 	int ret;
498d8835445SYang Shi 	bool has_unmovable = false;
49991612e0dSHugh Dickins 	pte_t *pte;
500705e87c0SHugh Dickins 	spinlock_t *ptl;
501941150a3SHugh Dickins 
502c8633798SNaoya Horiguchi 	ptl = pmd_trans_huge_lock(pmd, vma);
503c8633798SNaoya Horiguchi 	if (ptl) {
504c8633798SNaoya Horiguchi 		ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
505d8835445SYang Shi 		if (ret != 2)
506a7f40cfeSYang Shi 			return ret;
507248db92dSKirill A. Shutemov 	}
508d8835445SYang Shi 	/* THP was split, fall through to pte walk */
50991612e0dSHugh Dickins 
510337d9abfSNaoya Horiguchi 	if (pmd_trans_unstable(pmd))
511337d9abfSNaoya Horiguchi 		return 0;
51294723aafSMichal Hocko 
5136f4576e3SNaoya Horiguchi 	pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
5146f4576e3SNaoya Horiguchi 	for (; addr != end; pte++, addr += PAGE_SIZE) {
51591612e0dSHugh Dickins 		if (!pte_present(*pte))
51691612e0dSHugh Dickins 			continue;
5176aab341eSLinus Torvalds 		page = vm_normal_page(vma, addr, *pte);
5186aab341eSLinus Torvalds 		if (!page)
51991612e0dSHugh Dickins 			continue;
520053837fcSNick Piggin 		/*
52162b61f61SHugh Dickins 		 * vm_normal_page() filters out zero pages, but there might
52262b61f61SHugh Dickins 		 * still be PageReserved pages to skip, perhaps in a VDSO.
523053837fcSNick Piggin 		 */
524b79bc0a0SHugh Dickins 		if (PageReserved(page))
525f4598c8bSChristoph Lameter 			continue;
52688aaa2a1SNaoya Horiguchi 		if (!queue_pages_required(page, qp))
52738e35860SChristoph Lameter 			continue;
528a7f40cfeSYang Shi 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
529d8835445SYang Shi 			/* MPOL_MF_STRICT must be specified if we get here */
530d8835445SYang Shi 			if (!vma_migratable(vma)) {
531d8835445SYang Shi 				has_unmovable = true;
532a7f40cfeSYang Shi 				break;
533d8835445SYang Shi 			}
534a53190a4SYang Shi 
535a53190a4SYang Shi 			/*
536a53190a4SYang Shi 			 * Do not abort immediately since there may be
537a53190a4SYang Shi 			 * temporary off LRU pages in the range.  Still
538a53190a4SYang Shi 			 * need migrate other LRU pages.
539a53190a4SYang Shi 			 */
540a53190a4SYang Shi 			if (migrate_page_add(page, qp->pagelist, flags))
541a53190a4SYang Shi 				has_unmovable = true;
542a7f40cfeSYang Shi 		} else
543a7f40cfeSYang Shi 			break;
5446f4576e3SNaoya Horiguchi 	}
5456f4576e3SNaoya Horiguchi 	pte_unmap_unlock(pte - 1, ptl);
5466f4576e3SNaoya Horiguchi 	cond_resched();
547d8835445SYang Shi 
548d8835445SYang Shi 	if (has_unmovable)
549d8835445SYang Shi 		return 1;
550d8835445SYang Shi 
551a7f40cfeSYang Shi 	return addr != end ? -EIO : 0;
55291612e0dSHugh Dickins }
55391612e0dSHugh Dickins 
5546f4576e3SNaoya Horiguchi static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
5556f4576e3SNaoya Horiguchi 			       unsigned long addr, unsigned long end,
5566f4576e3SNaoya Horiguchi 			       struct mm_walk *walk)
557e2d8cf40SNaoya Horiguchi {
558e2d8cf40SNaoya Horiguchi #ifdef CONFIG_HUGETLB_PAGE
5596f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
5606f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
561e2d8cf40SNaoya Horiguchi 	struct page *page;
562cb900f41SKirill A. Shutemov 	spinlock_t *ptl;
563d4c54919SNaoya Horiguchi 	pte_t entry;
564e2d8cf40SNaoya Horiguchi 
5656f4576e3SNaoya Horiguchi 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
5666f4576e3SNaoya Horiguchi 	entry = huge_ptep_get(pte);
567d4c54919SNaoya Horiguchi 	if (!pte_present(entry))
568d4c54919SNaoya Horiguchi 		goto unlock;
569d4c54919SNaoya Horiguchi 	page = pte_page(entry);
57088aaa2a1SNaoya Horiguchi 	if (!queue_pages_required(page, qp))
571e2d8cf40SNaoya Horiguchi 		goto unlock;
572e2d8cf40SNaoya Horiguchi 	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
573e2d8cf40SNaoya Horiguchi 	if (flags & (MPOL_MF_MOVE_ALL) ||
574e2d8cf40SNaoya Horiguchi 	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
5756f4576e3SNaoya Horiguchi 		isolate_huge_page(page, qp->pagelist);
576e2d8cf40SNaoya Horiguchi unlock:
577cb900f41SKirill A. Shutemov 	spin_unlock(ptl);
578e2d8cf40SNaoya Horiguchi #else
579e2d8cf40SNaoya Horiguchi 	BUG();
580e2d8cf40SNaoya Horiguchi #endif
58191612e0dSHugh Dickins 	return 0;
5821da177e4SLinus Torvalds }
5831da177e4SLinus Torvalds 
5845877231fSAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING
585b24f53a0SLee Schermerhorn /*
5864b10e7d5SMel Gorman  * This is used to mark a range of virtual addresses to be inaccessible.
5874b10e7d5SMel Gorman  * These are later cleared by a NUMA hinting fault. Depending on these
5884b10e7d5SMel Gorman  * faults, pages may be migrated for better NUMA placement.
5894b10e7d5SMel Gorman  *
5904b10e7d5SMel Gorman  * This is assuming that NUMA faults are handled using PROT_NONE. If
5914b10e7d5SMel Gorman  * an architecture makes a different choice, it will need further
5924b10e7d5SMel Gorman  * changes to the core.
593b24f53a0SLee Schermerhorn  */
5944b10e7d5SMel Gorman unsigned long change_prot_numa(struct vm_area_struct *vma,
5954b10e7d5SMel Gorman 			unsigned long addr, unsigned long end)
596b24f53a0SLee Schermerhorn {
5974b10e7d5SMel Gorman 	int nr_updated;
598b24f53a0SLee Schermerhorn 
5994d942466SMel Gorman 	nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
60003c5a6e1SMel Gorman 	if (nr_updated)
60103c5a6e1SMel Gorman 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
602b24f53a0SLee Schermerhorn 
6034b10e7d5SMel Gorman 	return nr_updated;
604b24f53a0SLee Schermerhorn }
605b24f53a0SLee Schermerhorn #else
606b24f53a0SLee Schermerhorn static unsigned long change_prot_numa(struct vm_area_struct *vma,
607b24f53a0SLee Schermerhorn 			unsigned long addr, unsigned long end)
608b24f53a0SLee Schermerhorn {
609b24f53a0SLee Schermerhorn 	return 0;
610b24f53a0SLee Schermerhorn }
6115877231fSAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */
612b24f53a0SLee Schermerhorn 
6136f4576e3SNaoya Horiguchi static int queue_pages_test_walk(unsigned long start, unsigned long end,
6146f4576e3SNaoya Horiguchi 				struct mm_walk *walk)
6151da177e4SLinus Torvalds {
6166f4576e3SNaoya Horiguchi 	struct vm_area_struct *vma = walk->vma;
6176f4576e3SNaoya Horiguchi 	struct queue_pages *qp = walk->private;
6185b952b3cSAndi Kleen 	unsigned long endvma = vma->vm_end;
6196f4576e3SNaoya Horiguchi 	unsigned long flags = qp->flags;
620dc9aa5b9SChristoph Lameter 
621a7f40cfeSYang Shi 	/*
622a7f40cfeSYang Shi 	 * Need check MPOL_MF_STRICT to return -EIO if possible
623a7f40cfeSYang Shi 	 * regardless of vma_migratable
624a7f40cfeSYang Shi 	 */
625a7f40cfeSYang Shi 	if (!vma_migratable(vma) &&
626a7f40cfeSYang Shi 	    !(flags & MPOL_MF_STRICT))
62748684a65SNaoya Horiguchi 		return 1;
62848684a65SNaoya Horiguchi 
6295b952b3cSAndi Kleen 	if (endvma > end)
6305b952b3cSAndi Kleen 		endvma = end;
6315b952b3cSAndi Kleen 	if (vma->vm_start > start)
6325b952b3cSAndi Kleen 		start = vma->vm_start;
633b24f53a0SLee Schermerhorn 
634b24f53a0SLee Schermerhorn 	if (!(flags & MPOL_MF_DISCONTIG_OK)) {
635b24f53a0SLee Schermerhorn 		if (!vma->vm_next && vma->vm_end < end)
636d05f0cdcSHugh Dickins 			return -EFAULT;
6376f4576e3SNaoya Horiguchi 		if (qp->prev && qp->prev->vm_end < vma->vm_start)
638d05f0cdcSHugh Dickins 			return -EFAULT;
639b24f53a0SLee Schermerhorn 	}
640b24f53a0SLee Schermerhorn 
6416f4576e3SNaoya Horiguchi 	qp->prev = vma;
6426f4576e3SNaoya Horiguchi 
643b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY) {
6442c0346a3SMel Gorman 		/* Similar to task_numa_work, skip inaccessible VMAs */
6454355c018SLiang Chen 		if (!is_vm_hugetlb_page(vma) &&
6464355c018SLiang Chen 			(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) &&
6474355c018SLiang Chen 			!(vma->vm_flags & VM_MIXEDMAP))
648b24f53a0SLee Schermerhorn 			change_prot_numa(vma, start, endvma);
6496f4576e3SNaoya Horiguchi 		return 1;
650b24f53a0SLee Schermerhorn 	}
651b24f53a0SLee Schermerhorn 
6526f4576e3SNaoya Horiguchi 	/* queue pages from current vma */
653a7f40cfeSYang Shi 	if (flags & MPOL_MF_VALID)
6546f4576e3SNaoya Horiguchi 		return 0;
6556f4576e3SNaoya Horiguchi 	return 1;
6566f4576e3SNaoya Horiguchi }
657b24f53a0SLee Schermerhorn 
6586f4576e3SNaoya Horiguchi /*
6596f4576e3SNaoya Horiguchi  * Walk through page tables and collect pages to be migrated.
6606f4576e3SNaoya Horiguchi  *
6616f4576e3SNaoya Horiguchi  * If pages found in a given range are on a set of nodes (determined by
6626f4576e3SNaoya Horiguchi  * @nodes and @flags,) it's isolated and queued to the pagelist which is
663d8835445SYang Shi  * passed via @private.
664d8835445SYang Shi  *
665d8835445SYang Shi  * queue_pages_range() has three possible return values:
666d8835445SYang Shi  * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
667d8835445SYang Shi  *     specified.
668d8835445SYang Shi  * 0 - queue pages successfully or no misplaced page.
669d8835445SYang Shi  * -EIO - there is misplaced page and only MPOL_MF_STRICT was specified.
6706f4576e3SNaoya Horiguchi  */
6716f4576e3SNaoya Horiguchi static int
6726f4576e3SNaoya Horiguchi queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
6736f4576e3SNaoya Horiguchi 		nodemask_t *nodes, unsigned long flags,
6746f4576e3SNaoya Horiguchi 		struct list_head *pagelist)
6756f4576e3SNaoya Horiguchi {
6766f4576e3SNaoya Horiguchi 	struct queue_pages qp = {
6776f4576e3SNaoya Horiguchi 		.pagelist = pagelist,
6786f4576e3SNaoya Horiguchi 		.flags = flags,
6796f4576e3SNaoya Horiguchi 		.nmask = nodes,
6806f4576e3SNaoya Horiguchi 		.prev = NULL,
6816f4576e3SNaoya Horiguchi 	};
6826f4576e3SNaoya Horiguchi 	struct mm_walk queue_pages_walk = {
6836f4576e3SNaoya Horiguchi 		.hugetlb_entry = queue_pages_hugetlb,
6846f4576e3SNaoya Horiguchi 		.pmd_entry = queue_pages_pte_range,
6856f4576e3SNaoya Horiguchi 		.test_walk = queue_pages_test_walk,
6866f4576e3SNaoya Horiguchi 		.mm = mm,
6876f4576e3SNaoya Horiguchi 		.private = &qp,
6886f4576e3SNaoya Horiguchi 	};
6896f4576e3SNaoya Horiguchi 
6906f4576e3SNaoya Horiguchi 	return walk_page_range(start, end, &queue_pages_walk);
6911da177e4SLinus Torvalds }
6921da177e4SLinus Torvalds 
693869833f2SKOSAKI Motohiro /*
694869833f2SKOSAKI Motohiro  * Apply policy to a single VMA
695869833f2SKOSAKI Motohiro  * This must be called with the mmap_sem held for writing.
696869833f2SKOSAKI Motohiro  */
697869833f2SKOSAKI Motohiro static int vma_replace_policy(struct vm_area_struct *vma,
698869833f2SKOSAKI Motohiro 						struct mempolicy *pol)
6998d34694cSKOSAKI Motohiro {
700869833f2SKOSAKI Motohiro 	int err;
701869833f2SKOSAKI Motohiro 	struct mempolicy *old;
702869833f2SKOSAKI Motohiro 	struct mempolicy *new;
7038d34694cSKOSAKI Motohiro 
7048d34694cSKOSAKI Motohiro 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
7058d34694cSKOSAKI Motohiro 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
7068d34694cSKOSAKI Motohiro 		 vma->vm_ops, vma->vm_file,
7078d34694cSKOSAKI Motohiro 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
7088d34694cSKOSAKI Motohiro 
709869833f2SKOSAKI Motohiro 	new = mpol_dup(pol);
710869833f2SKOSAKI Motohiro 	if (IS_ERR(new))
711869833f2SKOSAKI Motohiro 		return PTR_ERR(new);
712869833f2SKOSAKI Motohiro 
713869833f2SKOSAKI Motohiro 	if (vma->vm_ops && vma->vm_ops->set_policy) {
7148d34694cSKOSAKI Motohiro 		err = vma->vm_ops->set_policy(vma, new);
715869833f2SKOSAKI Motohiro 		if (err)
716869833f2SKOSAKI Motohiro 			goto err_out;
7178d34694cSKOSAKI Motohiro 	}
718869833f2SKOSAKI Motohiro 
719869833f2SKOSAKI Motohiro 	old = vma->vm_policy;
720869833f2SKOSAKI Motohiro 	vma->vm_policy = new; /* protected by mmap_sem */
721869833f2SKOSAKI Motohiro 	mpol_put(old);
722869833f2SKOSAKI Motohiro 
723869833f2SKOSAKI Motohiro 	return 0;
724869833f2SKOSAKI Motohiro  err_out:
725869833f2SKOSAKI Motohiro 	mpol_put(new);
7268d34694cSKOSAKI Motohiro 	return err;
7278d34694cSKOSAKI Motohiro }
7288d34694cSKOSAKI Motohiro 
7291da177e4SLinus Torvalds /* Step 2: apply policy to a range and do splits. */
7309d8cebd4SKOSAKI Motohiro static int mbind_range(struct mm_struct *mm, unsigned long start,
7319d8cebd4SKOSAKI Motohiro 		       unsigned long end, struct mempolicy *new_pol)
7321da177e4SLinus Torvalds {
7331da177e4SLinus Torvalds 	struct vm_area_struct *next;
7349d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *prev;
7359d8cebd4SKOSAKI Motohiro 	struct vm_area_struct *vma;
7369d8cebd4SKOSAKI Motohiro 	int err = 0;
737e26a5114SKOSAKI Motohiro 	pgoff_t pgoff;
7389d8cebd4SKOSAKI Motohiro 	unsigned long vmstart;
7399d8cebd4SKOSAKI Motohiro 	unsigned long vmend;
7401da177e4SLinus Torvalds 
741097d5910SLinus Torvalds 	vma = find_vma(mm, start);
7429d8cebd4SKOSAKI Motohiro 	if (!vma || vma->vm_start > start)
7439d8cebd4SKOSAKI Motohiro 		return -EFAULT;
7449d8cebd4SKOSAKI Motohiro 
745097d5910SLinus Torvalds 	prev = vma->vm_prev;
746e26a5114SKOSAKI Motohiro 	if (start > vma->vm_start)
747e26a5114SKOSAKI Motohiro 		prev = vma;
748e26a5114SKOSAKI Motohiro 
7499d8cebd4SKOSAKI Motohiro 	for (; vma && vma->vm_start < end; prev = vma, vma = next) {
7501da177e4SLinus Torvalds 		next = vma->vm_next;
7519d8cebd4SKOSAKI Motohiro 		vmstart = max(start, vma->vm_start);
7529d8cebd4SKOSAKI Motohiro 		vmend   = min(end, vma->vm_end);
7539d8cebd4SKOSAKI Motohiro 
754e26a5114SKOSAKI Motohiro 		if (mpol_equal(vma_policy(vma), new_pol))
755e26a5114SKOSAKI Motohiro 			continue;
756e26a5114SKOSAKI Motohiro 
757e26a5114SKOSAKI Motohiro 		pgoff = vma->vm_pgoff +
758e26a5114SKOSAKI Motohiro 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
7599d8cebd4SKOSAKI Motohiro 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
760e26a5114SKOSAKI Motohiro 				 vma->anon_vma, vma->vm_file, pgoff,
76119a809afSAndrea Arcangeli 				 new_pol, vma->vm_userfaultfd_ctx);
7629d8cebd4SKOSAKI Motohiro 		if (prev) {
7639d8cebd4SKOSAKI Motohiro 			vma = prev;
7649d8cebd4SKOSAKI Motohiro 			next = vma->vm_next;
7653964acd0SOleg Nesterov 			if (mpol_equal(vma_policy(vma), new_pol))
7669d8cebd4SKOSAKI Motohiro 				continue;
7673964acd0SOleg Nesterov 			/* vma_merge() joined vma && vma->next, case 8 */
7683964acd0SOleg Nesterov 			goto replace;
7691da177e4SLinus Torvalds 		}
7709d8cebd4SKOSAKI Motohiro 		if (vma->vm_start != vmstart) {
7719d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
7729d8cebd4SKOSAKI Motohiro 			if (err)
7739d8cebd4SKOSAKI Motohiro 				goto out;
7749d8cebd4SKOSAKI Motohiro 		}
7759d8cebd4SKOSAKI Motohiro 		if (vma->vm_end != vmend) {
7769d8cebd4SKOSAKI Motohiro 			err = split_vma(vma->vm_mm, vma, vmend, 0);
7779d8cebd4SKOSAKI Motohiro 			if (err)
7789d8cebd4SKOSAKI Motohiro 				goto out;
7799d8cebd4SKOSAKI Motohiro 		}
7803964acd0SOleg Nesterov  replace:
781869833f2SKOSAKI Motohiro 		err = vma_replace_policy(vma, new_pol);
7829d8cebd4SKOSAKI Motohiro 		if (err)
7839d8cebd4SKOSAKI Motohiro 			goto out;
7849d8cebd4SKOSAKI Motohiro 	}
7859d8cebd4SKOSAKI Motohiro 
7869d8cebd4SKOSAKI Motohiro  out:
7871da177e4SLinus Torvalds 	return err;
7881da177e4SLinus Torvalds }
7891da177e4SLinus Torvalds 
7901da177e4SLinus Torvalds /* Set the process memory policy */
791028fec41SDavid Rientjes static long do_set_mempolicy(unsigned short mode, unsigned short flags,
792028fec41SDavid Rientjes 			     nodemask_t *nodes)
7931da177e4SLinus Torvalds {
79458568d2aSMiao Xie 	struct mempolicy *new, *old;
7954bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH(scratch);
79658568d2aSMiao Xie 	int ret;
7971da177e4SLinus Torvalds 
7984bfc4495SKAMEZAWA Hiroyuki 	if (!scratch)
7994bfc4495SKAMEZAWA Hiroyuki 		return -ENOMEM;
800f4e53d91SLee Schermerhorn 
8014bfc4495SKAMEZAWA Hiroyuki 	new = mpol_new(mode, flags, nodes);
8024bfc4495SKAMEZAWA Hiroyuki 	if (IS_ERR(new)) {
8034bfc4495SKAMEZAWA Hiroyuki 		ret = PTR_ERR(new);
8044bfc4495SKAMEZAWA Hiroyuki 		goto out;
8054bfc4495SKAMEZAWA Hiroyuki 	}
8062c7c3a7dSOleg Nesterov 
80758568d2aSMiao Xie 	task_lock(current);
8084bfc4495SKAMEZAWA Hiroyuki 	ret = mpol_set_nodemask(new, nodes, scratch);
80958568d2aSMiao Xie 	if (ret) {
81058568d2aSMiao Xie 		task_unlock(current);
81158568d2aSMiao Xie 		mpol_put(new);
8124bfc4495SKAMEZAWA Hiroyuki 		goto out;
81358568d2aSMiao Xie 	}
81458568d2aSMiao Xie 	old = current->mempolicy;
8151da177e4SLinus Torvalds 	current->mempolicy = new;
81645816682SVlastimil Babka 	if (new && new->mode == MPOL_INTERLEAVE)
81745816682SVlastimil Babka 		current->il_prev = MAX_NUMNODES-1;
81858568d2aSMiao Xie 	task_unlock(current);
81958568d2aSMiao Xie 	mpol_put(old);
8204bfc4495SKAMEZAWA Hiroyuki 	ret = 0;
8214bfc4495SKAMEZAWA Hiroyuki out:
8224bfc4495SKAMEZAWA Hiroyuki 	NODEMASK_SCRATCH_FREE(scratch);
8234bfc4495SKAMEZAWA Hiroyuki 	return ret;
8241da177e4SLinus Torvalds }
8251da177e4SLinus Torvalds 
826bea904d5SLee Schermerhorn /*
827bea904d5SLee Schermerhorn  * Return nodemask for policy for get_mempolicy() query
82858568d2aSMiao Xie  *
82958568d2aSMiao Xie  * Called with task's alloc_lock held
830bea904d5SLee Schermerhorn  */
831bea904d5SLee Schermerhorn static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
8321da177e4SLinus Torvalds {
833dfcd3c0dSAndi Kleen 	nodes_clear(*nodes);
834bea904d5SLee Schermerhorn 	if (p == &default_policy)
835bea904d5SLee Schermerhorn 		return;
836bea904d5SLee Schermerhorn 
83745c4745aSLee Schermerhorn 	switch (p->mode) {
83819770b32SMel Gorman 	case MPOL_BIND:
83919770b32SMel Gorman 		/* Fall through */
8401da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
841dfcd3c0dSAndi Kleen 		*nodes = p->v.nodes;
8421da177e4SLinus Torvalds 		break;
8431da177e4SLinus Torvalds 	case MPOL_PREFERRED:
844fc36b8d3SLee Schermerhorn 		if (!(p->flags & MPOL_F_LOCAL))
845dfcd3c0dSAndi Kleen 			node_set(p->v.preferred_node, *nodes);
84653f2556bSLee Schermerhorn 		/* else return empty node mask for local allocation */
8471da177e4SLinus Torvalds 		break;
8481da177e4SLinus Torvalds 	default:
8491da177e4SLinus Torvalds 		BUG();
8501da177e4SLinus Torvalds 	}
8511da177e4SLinus Torvalds }
8521da177e4SLinus Torvalds 
8533b9aadf7SAndrea Arcangeli static int lookup_node(struct mm_struct *mm, unsigned long addr)
8541da177e4SLinus Torvalds {
8551da177e4SLinus Torvalds 	struct page *p;
8561da177e4SLinus Torvalds 	int err;
8571da177e4SLinus Torvalds 
8583b9aadf7SAndrea Arcangeli 	int locked = 1;
8593b9aadf7SAndrea Arcangeli 	err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked);
8601da177e4SLinus Torvalds 	if (err >= 0) {
8611da177e4SLinus Torvalds 		err = page_to_nid(p);
8621da177e4SLinus Torvalds 		put_page(p);
8631da177e4SLinus Torvalds 	}
8643b9aadf7SAndrea Arcangeli 	if (locked)
8653b9aadf7SAndrea Arcangeli 		up_read(&mm->mmap_sem);
8661da177e4SLinus Torvalds 	return err;
8671da177e4SLinus Torvalds }
8681da177e4SLinus Torvalds 
8691da177e4SLinus Torvalds /* Retrieve NUMA policy */
870dbcb0f19SAdrian Bunk static long do_get_mempolicy(int *policy, nodemask_t *nmask,
8711da177e4SLinus Torvalds 			     unsigned long addr, unsigned long flags)
8721da177e4SLinus Torvalds {
8738bccd85fSChristoph Lameter 	int err;
8741da177e4SLinus Torvalds 	struct mm_struct *mm = current->mm;
8751da177e4SLinus Torvalds 	struct vm_area_struct *vma = NULL;
8763b9aadf7SAndrea Arcangeli 	struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
8771da177e4SLinus Torvalds 
878754af6f5SLee Schermerhorn 	if (flags &
879754af6f5SLee Schermerhorn 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
8801da177e4SLinus Torvalds 		return -EINVAL;
881754af6f5SLee Schermerhorn 
882754af6f5SLee Schermerhorn 	if (flags & MPOL_F_MEMS_ALLOWED) {
883754af6f5SLee Schermerhorn 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
884754af6f5SLee Schermerhorn 			return -EINVAL;
885754af6f5SLee Schermerhorn 		*policy = 0;	/* just so it's initialized */
88658568d2aSMiao Xie 		task_lock(current);
887754af6f5SLee Schermerhorn 		*nmask  = cpuset_current_mems_allowed;
88858568d2aSMiao Xie 		task_unlock(current);
889754af6f5SLee Schermerhorn 		return 0;
890754af6f5SLee Schermerhorn 	}
891754af6f5SLee Schermerhorn 
8921da177e4SLinus Torvalds 	if (flags & MPOL_F_ADDR) {
893bea904d5SLee Schermerhorn 		/*
894bea904d5SLee Schermerhorn 		 * Do NOT fall back to task policy if the
895bea904d5SLee Schermerhorn 		 * vma/shared policy at addr is NULL.  We
896bea904d5SLee Schermerhorn 		 * want to return MPOL_DEFAULT in this case.
897bea904d5SLee Schermerhorn 		 */
8981da177e4SLinus Torvalds 		down_read(&mm->mmap_sem);
8991da177e4SLinus Torvalds 		vma = find_vma_intersection(mm, addr, addr+1);
9001da177e4SLinus Torvalds 		if (!vma) {
9011da177e4SLinus Torvalds 			up_read(&mm->mmap_sem);
9021da177e4SLinus Torvalds 			return -EFAULT;
9031da177e4SLinus Torvalds 		}
9041da177e4SLinus Torvalds 		if (vma->vm_ops && vma->vm_ops->get_policy)
9051da177e4SLinus Torvalds 			pol = vma->vm_ops->get_policy(vma, addr);
9061da177e4SLinus Torvalds 		else
9071da177e4SLinus Torvalds 			pol = vma->vm_policy;
9081da177e4SLinus Torvalds 	} else if (addr)
9091da177e4SLinus Torvalds 		return -EINVAL;
9101da177e4SLinus Torvalds 
9111da177e4SLinus Torvalds 	if (!pol)
912bea904d5SLee Schermerhorn 		pol = &default_policy;	/* indicates default behavior */
9131da177e4SLinus Torvalds 
9141da177e4SLinus Torvalds 	if (flags & MPOL_F_NODE) {
9151da177e4SLinus Torvalds 		if (flags & MPOL_F_ADDR) {
9163b9aadf7SAndrea Arcangeli 			/*
9173b9aadf7SAndrea Arcangeli 			 * Take a refcount on the mpol, lookup_node()
9183b9aadf7SAndrea Arcangeli 			 * wil drop the mmap_sem, so after calling
9193b9aadf7SAndrea Arcangeli 			 * lookup_node() only "pol" remains valid, "vma"
9203b9aadf7SAndrea Arcangeli 			 * is stale.
9213b9aadf7SAndrea Arcangeli 			 */
9223b9aadf7SAndrea Arcangeli 			pol_refcount = pol;
9233b9aadf7SAndrea Arcangeli 			vma = NULL;
9243b9aadf7SAndrea Arcangeli 			mpol_get(pol);
9253b9aadf7SAndrea Arcangeli 			err = lookup_node(mm, addr);
9261da177e4SLinus Torvalds 			if (err < 0)
9271da177e4SLinus Torvalds 				goto out;
9288bccd85fSChristoph Lameter 			*policy = err;
9291da177e4SLinus Torvalds 		} else if (pol == current->mempolicy &&
93045c4745aSLee Schermerhorn 				pol->mode == MPOL_INTERLEAVE) {
93145816682SVlastimil Babka 			*policy = next_node_in(current->il_prev, pol->v.nodes);
9321da177e4SLinus Torvalds 		} else {
9331da177e4SLinus Torvalds 			err = -EINVAL;
9341da177e4SLinus Torvalds 			goto out;
9351da177e4SLinus Torvalds 		}
936bea904d5SLee Schermerhorn 	} else {
937bea904d5SLee Schermerhorn 		*policy = pol == &default_policy ? MPOL_DEFAULT :
938bea904d5SLee Schermerhorn 						pol->mode;
939d79df630SDavid Rientjes 		/*
940d79df630SDavid Rientjes 		 * Internal mempolicy flags must be masked off before exposing
941d79df630SDavid Rientjes 		 * the policy to userspace.
942d79df630SDavid Rientjes 		 */
943d79df630SDavid Rientjes 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
944bea904d5SLee Schermerhorn 	}
9451da177e4SLinus Torvalds 
9461da177e4SLinus Torvalds 	err = 0;
94758568d2aSMiao Xie 	if (nmask) {
948c6b6ef8bSLee Schermerhorn 		if (mpol_store_user_nodemask(pol)) {
949c6b6ef8bSLee Schermerhorn 			*nmask = pol->w.user_nodemask;
950c6b6ef8bSLee Schermerhorn 		} else {
95158568d2aSMiao Xie 			task_lock(current);
952bea904d5SLee Schermerhorn 			get_policy_nodemask(pol, nmask);
95358568d2aSMiao Xie 			task_unlock(current);
95458568d2aSMiao Xie 		}
955c6b6ef8bSLee Schermerhorn 	}
9561da177e4SLinus Torvalds 
9571da177e4SLinus Torvalds  out:
95852cd3b07SLee Schermerhorn 	mpol_cond_put(pol);
9591da177e4SLinus Torvalds 	if (vma)
9603b9aadf7SAndrea Arcangeli 		up_read(&mm->mmap_sem);
9613b9aadf7SAndrea Arcangeli 	if (pol_refcount)
9623b9aadf7SAndrea Arcangeli 		mpol_put(pol_refcount);
9631da177e4SLinus Torvalds 	return err;
9641da177e4SLinus Torvalds }
9651da177e4SLinus Torvalds 
966b20a3503SChristoph Lameter #ifdef CONFIG_MIGRATION
9678bccd85fSChristoph Lameter /*
968c8633798SNaoya Horiguchi  * page migration, thp tail pages can be passed.
9696ce3c4c0SChristoph Lameter  */
970a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist,
971fc301289SChristoph Lameter 				unsigned long flags)
9726ce3c4c0SChristoph Lameter {
973c8633798SNaoya Horiguchi 	struct page *head = compound_head(page);
9746ce3c4c0SChristoph Lameter 	/*
975fc301289SChristoph Lameter 	 * Avoid migrating a page that is shared with others.
9766ce3c4c0SChristoph Lameter 	 */
977c8633798SNaoya Horiguchi 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
978c8633798SNaoya Horiguchi 		if (!isolate_lru_page(head)) {
979c8633798SNaoya Horiguchi 			list_add_tail(&head->lru, pagelist);
980c8633798SNaoya Horiguchi 			mod_node_page_state(page_pgdat(head),
981c8633798SNaoya Horiguchi 				NR_ISOLATED_ANON + page_is_file_cache(head),
982c8633798SNaoya Horiguchi 				hpage_nr_pages(head));
983a53190a4SYang Shi 		} else if (flags & MPOL_MF_STRICT) {
984a53190a4SYang Shi 			/*
985a53190a4SYang Shi 			 * Non-movable page may reach here.  And, there may be
986a53190a4SYang Shi 			 * temporary off LRU pages or non-LRU movable pages.
987a53190a4SYang Shi 			 * Treat them as unmovable pages since they can't be
988a53190a4SYang Shi 			 * isolated, so they can't be moved at the moment.  It
989a53190a4SYang Shi 			 * should return -EIO for this case too.
990a53190a4SYang Shi 			 */
991a53190a4SYang Shi 			return -EIO;
99262695a84SNick Piggin 		}
99362695a84SNick Piggin 	}
994a53190a4SYang Shi 
995a53190a4SYang Shi 	return 0;
9966ce3c4c0SChristoph Lameter }
9976ce3c4c0SChristoph Lameter 
998a49bd4d7SMichal Hocko /* page allocation callback for NUMA node migration */
999666feb21SMichal Hocko struct page *alloc_new_node_page(struct page *page, unsigned long node)
100095a402c3SChristoph Lameter {
1001e2d8cf40SNaoya Horiguchi 	if (PageHuge(page))
1002e2d8cf40SNaoya Horiguchi 		return alloc_huge_page_node(page_hstate(compound_head(page)),
1003e2d8cf40SNaoya Horiguchi 					node);
100494723aafSMichal Hocko 	else if (PageTransHuge(page)) {
1005c8633798SNaoya Horiguchi 		struct page *thp;
1006c8633798SNaoya Horiguchi 
1007c8633798SNaoya Horiguchi 		thp = alloc_pages_node(node,
1008c8633798SNaoya Horiguchi 			(GFP_TRANSHUGE | __GFP_THISNODE),
1009c8633798SNaoya Horiguchi 			HPAGE_PMD_ORDER);
1010c8633798SNaoya Horiguchi 		if (!thp)
1011c8633798SNaoya Horiguchi 			return NULL;
1012c8633798SNaoya Horiguchi 		prep_transhuge_page(thp);
1013c8633798SNaoya Horiguchi 		return thp;
1014c8633798SNaoya Horiguchi 	} else
101596db800fSVlastimil Babka 		return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
1016b360edb4SDavid Rientjes 						    __GFP_THISNODE, 0);
101795a402c3SChristoph Lameter }
101895a402c3SChristoph Lameter 
10196ce3c4c0SChristoph Lameter /*
10207e2ab150SChristoph Lameter  * Migrate pages from one node to a target node.
10217e2ab150SChristoph Lameter  * Returns error or the number of pages not migrated.
10227e2ab150SChristoph Lameter  */
1023dbcb0f19SAdrian Bunk static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1024dbcb0f19SAdrian Bunk 			   int flags)
10257e2ab150SChristoph Lameter {
10267e2ab150SChristoph Lameter 	nodemask_t nmask;
10277e2ab150SChristoph Lameter 	LIST_HEAD(pagelist);
10287e2ab150SChristoph Lameter 	int err = 0;
10297e2ab150SChristoph Lameter 
10307e2ab150SChristoph Lameter 	nodes_clear(nmask);
10317e2ab150SChristoph Lameter 	node_set(source, nmask);
10327e2ab150SChristoph Lameter 
103308270807SMinchan Kim 	/*
103408270807SMinchan Kim 	 * This does not "check" the range but isolates all pages that
103508270807SMinchan Kim 	 * need migration.  Between passing in the full user address
103608270807SMinchan Kim 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
103708270807SMinchan Kim 	 */
103808270807SMinchan Kim 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
103998094945SNaoya Horiguchi 	queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
10407e2ab150SChristoph Lameter 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
10417e2ab150SChristoph Lameter 
1042cf608ac1SMinchan Kim 	if (!list_empty(&pagelist)) {
1043a49bd4d7SMichal Hocko 		err = migrate_pages(&pagelist, alloc_new_node_page, NULL, dest,
10449c620e2bSHugh Dickins 					MIGRATE_SYNC, MR_SYSCALL);
1045cf608ac1SMinchan Kim 		if (err)
1046e2d8cf40SNaoya Horiguchi 			putback_movable_pages(&pagelist);
1047cf608ac1SMinchan Kim 	}
104895a402c3SChristoph Lameter 
10497e2ab150SChristoph Lameter 	return err;
10507e2ab150SChristoph Lameter }
10517e2ab150SChristoph Lameter 
10527e2ab150SChristoph Lameter /*
10537e2ab150SChristoph Lameter  * Move pages between the two nodesets so as to preserve the physical
10547e2ab150SChristoph Lameter  * layout as much as possible.
105539743889SChristoph Lameter  *
105639743889SChristoph Lameter  * Returns the number of page that could not be moved.
105739743889SChristoph Lameter  */
10580ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
10590ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
106039743889SChristoph Lameter {
10617e2ab150SChristoph Lameter 	int busy = 0;
10620aedadf9SChristoph Lameter 	int err;
10637e2ab150SChristoph Lameter 	nodemask_t tmp;
106439743889SChristoph Lameter 
10650aedadf9SChristoph Lameter 	err = migrate_prep();
10660aedadf9SChristoph Lameter 	if (err)
10670aedadf9SChristoph Lameter 		return err;
10680aedadf9SChristoph Lameter 
106939743889SChristoph Lameter 	down_read(&mm->mmap_sem);
1070d4984711SChristoph Lameter 
10717e2ab150SChristoph Lameter 	/*
10727e2ab150SChristoph Lameter 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
10737e2ab150SChristoph Lameter 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
10747e2ab150SChristoph Lameter 	 * bit in 'tmp', and return that <source, dest> pair for migration.
10757e2ab150SChristoph Lameter 	 * The pair of nodemasks 'to' and 'from' define the map.
10767e2ab150SChristoph Lameter 	 *
10777e2ab150SChristoph Lameter 	 * If no pair of bits is found that way, fallback to picking some
10787e2ab150SChristoph Lameter 	 * pair of 'source' and 'dest' bits that are not the same.  If the
10797e2ab150SChristoph Lameter 	 * 'source' and 'dest' bits are the same, this represents a node
10807e2ab150SChristoph Lameter 	 * that will be migrating to itself, so no pages need move.
10817e2ab150SChristoph Lameter 	 *
10827e2ab150SChristoph Lameter 	 * If no bits are left in 'tmp', or if all remaining bits left
10837e2ab150SChristoph Lameter 	 * in 'tmp' correspond to the same bit in 'to', return false
10847e2ab150SChristoph Lameter 	 * (nothing left to migrate).
10857e2ab150SChristoph Lameter 	 *
10867e2ab150SChristoph Lameter 	 * This lets us pick a pair of nodes to migrate between, such that
10877e2ab150SChristoph Lameter 	 * if possible the dest node is not already occupied by some other
10887e2ab150SChristoph Lameter 	 * source node, minimizing the risk of overloading the memory on a
10897e2ab150SChristoph Lameter 	 * node that would happen if we migrated incoming memory to a node
10907e2ab150SChristoph Lameter 	 * before migrating outgoing memory source that same node.
10917e2ab150SChristoph Lameter 	 *
10927e2ab150SChristoph Lameter 	 * A single scan of tmp is sufficient.  As we go, we remember the
10937e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
10947e2ab150SChristoph Lameter 	 * that not only moved, but what's better, moved to an empty slot
10957e2ab150SChristoph Lameter 	 * (d is not set in tmp), then we break out then, with that pair.
1096ae0e47f0SJustin P. Mattock 	 * Otherwise when we finish scanning from_tmp, we at least have the
10977e2ab150SChristoph Lameter 	 * most recent <s, d> pair that moved.  If we get all the way through
10987e2ab150SChristoph Lameter 	 * the scan of tmp without finding any node that moved, much less
10997e2ab150SChristoph Lameter 	 * moved to an empty node, then there is nothing left worth migrating.
11007e2ab150SChristoph Lameter 	 */
11017e2ab150SChristoph Lameter 
11020ce72d4fSAndrew Morton 	tmp = *from;
11037e2ab150SChristoph Lameter 	while (!nodes_empty(tmp)) {
11047e2ab150SChristoph Lameter 		int s,d;
1105b76ac7e7SJianguo Wu 		int source = NUMA_NO_NODE;
11067e2ab150SChristoph Lameter 		int dest = 0;
11077e2ab150SChristoph Lameter 
11087e2ab150SChristoph Lameter 		for_each_node_mask(s, tmp) {
11094a5b18ccSLarry Woodman 
11104a5b18ccSLarry Woodman 			/*
11114a5b18ccSLarry Woodman 			 * do_migrate_pages() tries to maintain the relative
11124a5b18ccSLarry Woodman 			 * node relationship of the pages established between
11134a5b18ccSLarry Woodman 			 * threads and memory areas.
11144a5b18ccSLarry Woodman                          *
11154a5b18ccSLarry Woodman 			 * However if the number of source nodes is not equal to
11164a5b18ccSLarry Woodman 			 * the number of destination nodes we can not preserve
11174a5b18ccSLarry Woodman 			 * this node relative relationship.  In that case, skip
11184a5b18ccSLarry Woodman 			 * copying memory from a node that is in the destination
11194a5b18ccSLarry Woodman 			 * mask.
11204a5b18ccSLarry Woodman 			 *
11214a5b18ccSLarry Woodman 			 * Example: [2,3,4] -> [3,4,5] moves everything.
11224a5b18ccSLarry Woodman 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
11234a5b18ccSLarry Woodman 			 */
11244a5b18ccSLarry Woodman 
11250ce72d4fSAndrew Morton 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
11260ce72d4fSAndrew Morton 						(node_isset(s, *to)))
11274a5b18ccSLarry Woodman 				continue;
11284a5b18ccSLarry Woodman 
11290ce72d4fSAndrew Morton 			d = node_remap(s, *from, *to);
11307e2ab150SChristoph Lameter 			if (s == d)
11317e2ab150SChristoph Lameter 				continue;
11327e2ab150SChristoph Lameter 
11337e2ab150SChristoph Lameter 			source = s;	/* Node moved. Memorize */
11347e2ab150SChristoph Lameter 			dest = d;
11357e2ab150SChristoph Lameter 
11367e2ab150SChristoph Lameter 			/* dest not in remaining from nodes? */
11377e2ab150SChristoph Lameter 			if (!node_isset(dest, tmp))
11387e2ab150SChristoph Lameter 				break;
11397e2ab150SChristoph Lameter 		}
1140b76ac7e7SJianguo Wu 		if (source == NUMA_NO_NODE)
11417e2ab150SChristoph Lameter 			break;
11427e2ab150SChristoph Lameter 
11437e2ab150SChristoph Lameter 		node_clear(source, tmp);
11447e2ab150SChristoph Lameter 		err = migrate_to_node(mm, source, dest, flags);
11457e2ab150SChristoph Lameter 		if (err > 0)
11467e2ab150SChristoph Lameter 			busy += err;
11477e2ab150SChristoph Lameter 		if (err < 0)
11487e2ab150SChristoph Lameter 			break;
114939743889SChristoph Lameter 	}
115039743889SChristoph Lameter 	up_read(&mm->mmap_sem);
11517e2ab150SChristoph Lameter 	if (err < 0)
11527e2ab150SChristoph Lameter 		return err;
11537e2ab150SChristoph Lameter 	return busy;
1154b20a3503SChristoph Lameter 
115539743889SChristoph Lameter }
115639743889SChristoph Lameter 
11573ad33b24SLee Schermerhorn /*
11583ad33b24SLee Schermerhorn  * Allocate a new page for page migration based on vma policy.
1159d05f0cdcSHugh Dickins  * Start by assuming the page is mapped by the same vma as contains @start.
11603ad33b24SLee Schermerhorn  * Search forward from there, if not.  N.B., this assumes that the
11613ad33b24SLee Schermerhorn  * list of pages handed to migrate_pages()--which is how we get here--
11623ad33b24SLee Schermerhorn  * is in virtual address order.
11633ad33b24SLee Schermerhorn  */
1164666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start)
116595a402c3SChristoph Lameter {
1166d05f0cdcSHugh Dickins 	struct vm_area_struct *vma;
11673ad33b24SLee Schermerhorn 	unsigned long uninitialized_var(address);
116895a402c3SChristoph Lameter 
1169d05f0cdcSHugh Dickins 	vma = find_vma(current->mm, start);
11703ad33b24SLee Schermerhorn 	while (vma) {
11713ad33b24SLee Schermerhorn 		address = page_address_in_vma(page, vma);
11723ad33b24SLee Schermerhorn 		if (address != -EFAULT)
11733ad33b24SLee Schermerhorn 			break;
11743ad33b24SLee Schermerhorn 		vma = vma->vm_next;
11753ad33b24SLee Schermerhorn 	}
11763ad33b24SLee Schermerhorn 
117711c731e8SWanpeng Li 	if (PageHuge(page)) {
1178389c8178SMichal Hocko 		return alloc_huge_page_vma(page_hstate(compound_head(page)),
1179389c8178SMichal Hocko 				vma, address);
118094723aafSMichal Hocko 	} else if (PageTransHuge(page)) {
1181c8633798SNaoya Horiguchi 		struct page *thp;
1182c8633798SNaoya Horiguchi 
1183*19deb769SDavid Rientjes 		thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
1184*19deb769SDavid Rientjes 					 HPAGE_PMD_ORDER);
1185c8633798SNaoya Horiguchi 		if (!thp)
1186c8633798SNaoya Horiguchi 			return NULL;
1187c8633798SNaoya Horiguchi 		prep_transhuge_page(thp);
1188c8633798SNaoya Horiguchi 		return thp;
118911c731e8SWanpeng Li 	}
119011c731e8SWanpeng Li 	/*
119111c731e8SWanpeng Li 	 * if !vma, alloc_page_vma() will use task or system default policy
119211c731e8SWanpeng Li 	 */
11930f556856SMichal Hocko 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
11940f556856SMichal Hocko 			vma, address);
119595a402c3SChristoph Lameter }
1196b20a3503SChristoph Lameter #else
1197b20a3503SChristoph Lameter 
1198a53190a4SYang Shi static int migrate_page_add(struct page *page, struct list_head *pagelist,
1199b20a3503SChristoph Lameter 				unsigned long flags)
1200b20a3503SChristoph Lameter {
1201a53190a4SYang Shi 	return -EIO;
1202b20a3503SChristoph Lameter }
1203b20a3503SChristoph Lameter 
12040ce72d4fSAndrew Morton int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
12050ce72d4fSAndrew Morton 		     const nodemask_t *to, int flags)
1206b20a3503SChristoph Lameter {
1207b20a3503SChristoph Lameter 	return -ENOSYS;
1208b20a3503SChristoph Lameter }
120995a402c3SChristoph Lameter 
1210666feb21SMichal Hocko static struct page *new_page(struct page *page, unsigned long start)
121195a402c3SChristoph Lameter {
121295a402c3SChristoph Lameter 	return NULL;
121395a402c3SChristoph Lameter }
1214b20a3503SChristoph Lameter #endif
1215b20a3503SChristoph Lameter 
1216dbcb0f19SAdrian Bunk static long do_mbind(unsigned long start, unsigned long len,
1217028fec41SDavid Rientjes 		     unsigned short mode, unsigned short mode_flags,
1218028fec41SDavid Rientjes 		     nodemask_t *nmask, unsigned long flags)
12196ce3c4c0SChristoph Lameter {
12206ce3c4c0SChristoph Lameter 	struct mm_struct *mm = current->mm;
12216ce3c4c0SChristoph Lameter 	struct mempolicy *new;
12226ce3c4c0SChristoph Lameter 	unsigned long end;
12236ce3c4c0SChristoph Lameter 	int err;
1224d8835445SYang Shi 	int ret;
12256ce3c4c0SChristoph Lameter 	LIST_HEAD(pagelist);
12266ce3c4c0SChristoph Lameter 
1227b24f53a0SLee Schermerhorn 	if (flags & ~(unsigned long)MPOL_MF_VALID)
12286ce3c4c0SChristoph Lameter 		return -EINVAL;
122974c00241SChristoph Lameter 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
12306ce3c4c0SChristoph Lameter 		return -EPERM;
12316ce3c4c0SChristoph Lameter 
12326ce3c4c0SChristoph Lameter 	if (start & ~PAGE_MASK)
12336ce3c4c0SChristoph Lameter 		return -EINVAL;
12346ce3c4c0SChristoph Lameter 
12356ce3c4c0SChristoph Lameter 	if (mode == MPOL_DEFAULT)
12366ce3c4c0SChristoph Lameter 		flags &= ~MPOL_MF_STRICT;
12376ce3c4c0SChristoph Lameter 
12386ce3c4c0SChristoph Lameter 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
12396ce3c4c0SChristoph Lameter 	end = start + len;
12406ce3c4c0SChristoph Lameter 
12416ce3c4c0SChristoph Lameter 	if (end < start)
12426ce3c4c0SChristoph Lameter 		return -EINVAL;
12436ce3c4c0SChristoph Lameter 	if (end == start)
12446ce3c4c0SChristoph Lameter 		return 0;
12456ce3c4c0SChristoph Lameter 
1246028fec41SDavid Rientjes 	new = mpol_new(mode, mode_flags, nmask);
12476ce3c4c0SChristoph Lameter 	if (IS_ERR(new))
12486ce3c4c0SChristoph Lameter 		return PTR_ERR(new);
12496ce3c4c0SChristoph Lameter 
1250b24f53a0SLee Schermerhorn 	if (flags & MPOL_MF_LAZY)
1251b24f53a0SLee Schermerhorn 		new->flags |= MPOL_F_MOF;
1252b24f53a0SLee Schermerhorn 
12536ce3c4c0SChristoph Lameter 	/*
12546ce3c4c0SChristoph Lameter 	 * If we are using the default policy then operation
12556ce3c4c0SChristoph Lameter 	 * on discontinuous address spaces is okay after all
12566ce3c4c0SChristoph Lameter 	 */
12576ce3c4c0SChristoph Lameter 	if (!new)
12586ce3c4c0SChristoph Lameter 		flags |= MPOL_MF_DISCONTIG_OK;
12596ce3c4c0SChristoph Lameter 
1260028fec41SDavid Rientjes 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1261028fec41SDavid Rientjes 		 start, start + len, mode, mode_flags,
126200ef2d2fSDavid Rientjes 		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
12636ce3c4c0SChristoph Lameter 
12640aedadf9SChristoph Lameter 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
12650aedadf9SChristoph Lameter 
12660aedadf9SChristoph Lameter 		err = migrate_prep();
12670aedadf9SChristoph Lameter 		if (err)
1268b05ca738SKOSAKI Motohiro 			goto mpol_out;
12690aedadf9SChristoph Lameter 	}
12704bfc4495SKAMEZAWA Hiroyuki 	{
12714bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
12724bfc4495SKAMEZAWA Hiroyuki 		if (scratch) {
12736ce3c4c0SChristoph Lameter 			down_write(&mm->mmap_sem);
127458568d2aSMiao Xie 			task_lock(current);
12754bfc4495SKAMEZAWA Hiroyuki 			err = mpol_set_nodemask(new, nmask, scratch);
127658568d2aSMiao Xie 			task_unlock(current);
12774bfc4495SKAMEZAWA Hiroyuki 			if (err)
127858568d2aSMiao Xie 				up_write(&mm->mmap_sem);
12794bfc4495SKAMEZAWA Hiroyuki 		} else
12804bfc4495SKAMEZAWA Hiroyuki 			err = -ENOMEM;
12814bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
12824bfc4495SKAMEZAWA Hiroyuki 	}
1283b05ca738SKOSAKI Motohiro 	if (err)
1284b05ca738SKOSAKI Motohiro 		goto mpol_out;
1285b05ca738SKOSAKI Motohiro 
1286d8835445SYang Shi 	ret = queue_pages_range(mm, start, end, nmask,
12876ce3c4c0SChristoph Lameter 			  flags | MPOL_MF_INVERT, &pagelist);
1288d8835445SYang Shi 
1289d8835445SYang Shi 	if (ret < 0) {
1290d8835445SYang Shi 		err = -EIO;
1291d8835445SYang Shi 		goto up_out;
1292d8835445SYang Shi 	}
1293d8835445SYang Shi 
12949d8cebd4SKOSAKI Motohiro 	err = mbind_range(mm, start, end, new);
12957e2ab150SChristoph Lameter 
1296b24f53a0SLee Schermerhorn 	if (!err) {
1297b24f53a0SLee Schermerhorn 		int nr_failed = 0;
1298b24f53a0SLee Schermerhorn 
1299cf608ac1SMinchan Kim 		if (!list_empty(&pagelist)) {
1300b24f53a0SLee Schermerhorn 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1301d05f0cdcSHugh Dickins 			nr_failed = migrate_pages(&pagelist, new_page, NULL,
1302d05f0cdcSHugh Dickins 				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1303cf608ac1SMinchan Kim 			if (nr_failed)
130474060e4dSNaoya Horiguchi 				putback_movable_pages(&pagelist);
1305cf608ac1SMinchan Kim 		}
13066ce3c4c0SChristoph Lameter 
1307d8835445SYang Shi 		if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
13086ce3c4c0SChristoph Lameter 			err = -EIO;
1309ab8a3e14SKOSAKI Motohiro 	} else
1310b0e5fd73SJoonsoo Kim 		putback_movable_pages(&pagelist);
1311b20a3503SChristoph Lameter 
1312d8835445SYang Shi up_out:
13136ce3c4c0SChristoph Lameter 	up_write(&mm->mmap_sem);
1314b05ca738SKOSAKI Motohiro mpol_out:
1315f0be3d32SLee Schermerhorn 	mpol_put(new);
13166ce3c4c0SChristoph Lameter 	return err;
13176ce3c4c0SChristoph Lameter }
13186ce3c4c0SChristoph Lameter 
131939743889SChristoph Lameter /*
13208bccd85fSChristoph Lameter  * User space interface with variable sized bitmaps for nodelists.
13218bccd85fSChristoph Lameter  */
13228bccd85fSChristoph Lameter 
13238bccd85fSChristoph Lameter /* Copy a node mask from user space. */
132439743889SChristoph Lameter static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
13258bccd85fSChristoph Lameter 		     unsigned long maxnode)
13268bccd85fSChristoph Lameter {
13278bccd85fSChristoph Lameter 	unsigned long k;
132856521e7aSYisheng Xie 	unsigned long t;
13298bccd85fSChristoph Lameter 	unsigned long nlongs;
13308bccd85fSChristoph Lameter 	unsigned long endmask;
13318bccd85fSChristoph Lameter 
13328bccd85fSChristoph Lameter 	--maxnode;
13338bccd85fSChristoph Lameter 	nodes_clear(*nodes);
13348bccd85fSChristoph Lameter 	if (maxnode == 0 || !nmask)
13358bccd85fSChristoph Lameter 		return 0;
1336a9c930baSAndi Kleen 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1337636f13c1SChris Wright 		return -EINVAL;
13388bccd85fSChristoph Lameter 
13398bccd85fSChristoph Lameter 	nlongs = BITS_TO_LONGS(maxnode);
13408bccd85fSChristoph Lameter 	if ((maxnode % BITS_PER_LONG) == 0)
13418bccd85fSChristoph Lameter 		endmask = ~0UL;
13428bccd85fSChristoph Lameter 	else
13438bccd85fSChristoph Lameter 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
13448bccd85fSChristoph Lameter 
134556521e7aSYisheng Xie 	/*
134656521e7aSYisheng Xie 	 * When the user specified more nodes than supported just check
134756521e7aSYisheng Xie 	 * if the non supported part is all zero.
134856521e7aSYisheng Xie 	 *
134956521e7aSYisheng Xie 	 * If maxnode have more longs than MAX_NUMNODES, check
135056521e7aSYisheng Xie 	 * the bits in that area first. And then go through to
135156521e7aSYisheng Xie 	 * check the rest bits which equal or bigger than MAX_NUMNODES.
135256521e7aSYisheng Xie 	 * Otherwise, just check bits [MAX_NUMNODES, maxnode).
135356521e7aSYisheng Xie 	 */
13548bccd85fSChristoph Lameter 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
13558bccd85fSChristoph Lameter 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
13568bccd85fSChristoph Lameter 			if (get_user(t, nmask + k))
13578bccd85fSChristoph Lameter 				return -EFAULT;
13588bccd85fSChristoph Lameter 			if (k == nlongs - 1) {
13598bccd85fSChristoph Lameter 				if (t & endmask)
13608bccd85fSChristoph Lameter 					return -EINVAL;
13618bccd85fSChristoph Lameter 			} else if (t)
13628bccd85fSChristoph Lameter 				return -EINVAL;
13638bccd85fSChristoph Lameter 		}
13648bccd85fSChristoph Lameter 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
13658bccd85fSChristoph Lameter 		endmask = ~0UL;
13668bccd85fSChristoph Lameter 	}
13678bccd85fSChristoph Lameter 
136856521e7aSYisheng Xie 	if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
136956521e7aSYisheng Xie 		unsigned long valid_mask = endmask;
137056521e7aSYisheng Xie 
137156521e7aSYisheng Xie 		valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
137256521e7aSYisheng Xie 		if (get_user(t, nmask + nlongs - 1))
137356521e7aSYisheng Xie 			return -EFAULT;
137456521e7aSYisheng Xie 		if (t & valid_mask)
137556521e7aSYisheng Xie 			return -EINVAL;
137656521e7aSYisheng Xie 	}
137756521e7aSYisheng Xie 
13788bccd85fSChristoph Lameter 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
13798bccd85fSChristoph Lameter 		return -EFAULT;
13808bccd85fSChristoph Lameter 	nodes_addr(*nodes)[nlongs-1] &= endmask;
13818bccd85fSChristoph Lameter 	return 0;
13828bccd85fSChristoph Lameter }
13838bccd85fSChristoph Lameter 
13848bccd85fSChristoph Lameter /* Copy a kernel node mask to user space */
13858bccd85fSChristoph Lameter static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
13868bccd85fSChristoph Lameter 			      nodemask_t *nodes)
13878bccd85fSChristoph Lameter {
13888bccd85fSChristoph Lameter 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1389050c17f2SRalph Campbell 	unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
13908bccd85fSChristoph Lameter 
13918bccd85fSChristoph Lameter 	if (copy > nbytes) {
13928bccd85fSChristoph Lameter 		if (copy > PAGE_SIZE)
13938bccd85fSChristoph Lameter 			return -EINVAL;
13948bccd85fSChristoph Lameter 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
13958bccd85fSChristoph Lameter 			return -EFAULT;
13968bccd85fSChristoph Lameter 		copy = nbytes;
13978bccd85fSChristoph Lameter 	}
13988bccd85fSChristoph Lameter 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
13998bccd85fSChristoph Lameter }
14008bccd85fSChristoph Lameter 
1401e7dc9ad6SDominik Brodowski static long kernel_mbind(unsigned long start, unsigned long len,
1402e7dc9ad6SDominik Brodowski 			 unsigned long mode, const unsigned long __user *nmask,
1403e7dc9ad6SDominik Brodowski 			 unsigned long maxnode, unsigned int flags)
14048bccd85fSChristoph Lameter {
14058bccd85fSChristoph Lameter 	nodemask_t nodes;
14068bccd85fSChristoph Lameter 	int err;
1407028fec41SDavid Rientjes 	unsigned short mode_flags;
14088bccd85fSChristoph Lameter 
1409028fec41SDavid Rientjes 	mode_flags = mode & MPOL_MODE_FLAGS;
1410028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1411a3b51e01SDavid Rientjes 	if (mode >= MPOL_MAX)
1412a3b51e01SDavid Rientjes 		return -EINVAL;
14134c50bc01SDavid Rientjes 	if ((mode_flags & MPOL_F_STATIC_NODES) &&
14144c50bc01SDavid Rientjes 	    (mode_flags & MPOL_F_RELATIVE_NODES))
14154c50bc01SDavid Rientjes 		return -EINVAL;
14168bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
14178bccd85fSChristoph Lameter 	if (err)
14188bccd85fSChristoph Lameter 		return err;
1419028fec41SDavid Rientjes 	return do_mbind(start, len, mode, mode_flags, &nodes, flags);
14208bccd85fSChristoph Lameter }
14218bccd85fSChristoph Lameter 
1422e7dc9ad6SDominik Brodowski SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1423e7dc9ad6SDominik Brodowski 		unsigned long, mode, const unsigned long __user *, nmask,
1424e7dc9ad6SDominik Brodowski 		unsigned long, maxnode, unsigned int, flags)
1425e7dc9ad6SDominik Brodowski {
1426e7dc9ad6SDominik Brodowski 	return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1427e7dc9ad6SDominik Brodowski }
1428e7dc9ad6SDominik Brodowski 
14298bccd85fSChristoph Lameter /* Set the process memory policy */
1430af03c4acSDominik Brodowski static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1431af03c4acSDominik Brodowski 				 unsigned long maxnode)
14328bccd85fSChristoph Lameter {
14338bccd85fSChristoph Lameter 	int err;
14348bccd85fSChristoph Lameter 	nodemask_t nodes;
1435028fec41SDavid Rientjes 	unsigned short flags;
14368bccd85fSChristoph Lameter 
1437028fec41SDavid Rientjes 	flags = mode & MPOL_MODE_FLAGS;
1438028fec41SDavid Rientjes 	mode &= ~MPOL_MODE_FLAGS;
1439028fec41SDavid Rientjes 	if ((unsigned int)mode >= MPOL_MAX)
14408bccd85fSChristoph Lameter 		return -EINVAL;
14414c50bc01SDavid Rientjes 	if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
14424c50bc01SDavid Rientjes 		return -EINVAL;
14438bccd85fSChristoph Lameter 	err = get_nodes(&nodes, nmask, maxnode);
14448bccd85fSChristoph Lameter 	if (err)
14458bccd85fSChristoph Lameter 		return err;
1446028fec41SDavid Rientjes 	return do_set_mempolicy(mode, flags, &nodes);
14478bccd85fSChristoph Lameter }
14488bccd85fSChristoph Lameter 
1449af03c4acSDominik Brodowski SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1450af03c4acSDominik Brodowski 		unsigned long, maxnode)
1451af03c4acSDominik Brodowski {
1452af03c4acSDominik Brodowski 	return kernel_set_mempolicy(mode, nmask, maxnode);
1453af03c4acSDominik Brodowski }
1454af03c4acSDominik Brodowski 
1455b6e9b0baSDominik Brodowski static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1456b6e9b0baSDominik Brodowski 				const unsigned long __user *old_nodes,
1457b6e9b0baSDominik Brodowski 				const unsigned long __user *new_nodes)
145839743889SChristoph Lameter {
1459596d7cfaSKOSAKI Motohiro 	struct mm_struct *mm = NULL;
146039743889SChristoph Lameter 	struct task_struct *task;
146139743889SChristoph Lameter 	nodemask_t task_nodes;
146239743889SChristoph Lameter 	int err;
1463596d7cfaSKOSAKI Motohiro 	nodemask_t *old;
1464596d7cfaSKOSAKI Motohiro 	nodemask_t *new;
1465596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH(scratch);
146639743889SChristoph Lameter 
1467596d7cfaSKOSAKI Motohiro 	if (!scratch)
1468596d7cfaSKOSAKI Motohiro 		return -ENOMEM;
146939743889SChristoph Lameter 
1470596d7cfaSKOSAKI Motohiro 	old = &scratch->mask1;
1471596d7cfaSKOSAKI Motohiro 	new = &scratch->mask2;
1472596d7cfaSKOSAKI Motohiro 
1473596d7cfaSKOSAKI Motohiro 	err = get_nodes(old, old_nodes, maxnode);
147439743889SChristoph Lameter 	if (err)
1475596d7cfaSKOSAKI Motohiro 		goto out;
1476596d7cfaSKOSAKI Motohiro 
1477596d7cfaSKOSAKI Motohiro 	err = get_nodes(new, new_nodes, maxnode);
1478596d7cfaSKOSAKI Motohiro 	if (err)
1479596d7cfaSKOSAKI Motohiro 		goto out;
148039743889SChristoph Lameter 
148139743889SChristoph Lameter 	/* Find the mm_struct */
148255cfaa3cSZeng Zhaoming 	rcu_read_lock();
1483228ebcbeSPavel Emelyanov 	task = pid ? find_task_by_vpid(pid) : current;
148439743889SChristoph Lameter 	if (!task) {
148555cfaa3cSZeng Zhaoming 		rcu_read_unlock();
1486596d7cfaSKOSAKI Motohiro 		err = -ESRCH;
1487596d7cfaSKOSAKI Motohiro 		goto out;
148839743889SChristoph Lameter 	}
14893268c63eSChristoph Lameter 	get_task_struct(task);
149039743889SChristoph Lameter 
1491596d7cfaSKOSAKI Motohiro 	err = -EINVAL;
149239743889SChristoph Lameter 
149339743889SChristoph Lameter 	/*
149431367466SOtto Ebeling 	 * Check if this process has the right to modify the specified process.
149531367466SOtto Ebeling 	 * Use the regular "ptrace_may_access()" checks.
149639743889SChristoph Lameter 	 */
149731367466SOtto Ebeling 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1498c69e8d9cSDavid Howells 		rcu_read_unlock();
149939743889SChristoph Lameter 		err = -EPERM;
15003268c63eSChristoph Lameter 		goto out_put;
150139743889SChristoph Lameter 	}
1502c69e8d9cSDavid Howells 	rcu_read_unlock();
150339743889SChristoph Lameter 
150439743889SChristoph Lameter 	task_nodes = cpuset_mems_allowed(task);
150539743889SChristoph Lameter 	/* Is the user allowed to access the target nodes? */
1506596d7cfaSKOSAKI Motohiro 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
150739743889SChristoph Lameter 		err = -EPERM;
15083268c63eSChristoph Lameter 		goto out_put;
150939743889SChristoph Lameter 	}
151039743889SChristoph Lameter 
15110486a38bSYisheng Xie 	task_nodes = cpuset_mems_allowed(current);
15120486a38bSYisheng Xie 	nodes_and(*new, *new, task_nodes);
15130486a38bSYisheng Xie 	if (nodes_empty(*new))
15143268c63eSChristoph Lameter 		goto out_put;
15150486a38bSYisheng Xie 
15160486a38bSYisheng Xie 	nodes_and(*new, *new, node_states[N_MEMORY]);
15170486a38bSYisheng Xie 	if (nodes_empty(*new))
15180486a38bSYisheng Xie 		goto out_put;
15193b42d28bSChristoph Lameter 
152086c3a764SDavid Quigley 	err = security_task_movememory(task);
152186c3a764SDavid Quigley 	if (err)
15223268c63eSChristoph Lameter 		goto out_put;
152386c3a764SDavid Quigley 
15243268c63eSChristoph Lameter 	mm = get_task_mm(task);
15253268c63eSChristoph Lameter 	put_task_struct(task);
1526f2a9ef88SSasha Levin 
1527f2a9ef88SSasha Levin 	if (!mm) {
1528f2a9ef88SSasha Levin 		err = -EINVAL;
1529f2a9ef88SSasha Levin 		goto out;
1530f2a9ef88SSasha Levin 	}
1531f2a9ef88SSasha Levin 
1532596d7cfaSKOSAKI Motohiro 	err = do_migrate_pages(mm, old, new,
153374c00241SChristoph Lameter 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
15343268c63eSChristoph Lameter 
153539743889SChristoph Lameter 	mmput(mm);
15363268c63eSChristoph Lameter out:
1537596d7cfaSKOSAKI Motohiro 	NODEMASK_SCRATCH_FREE(scratch);
1538596d7cfaSKOSAKI Motohiro 
153939743889SChristoph Lameter 	return err;
15403268c63eSChristoph Lameter 
15413268c63eSChristoph Lameter out_put:
15423268c63eSChristoph Lameter 	put_task_struct(task);
15433268c63eSChristoph Lameter 	goto out;
15443268c63eSChristoph Lameter 
154539743889SChristoph Lameter }
154639743889SChristoph Lameter 
1547b6e9b0baSDominik Brodowski SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1548b6e9b0baSDominik Brodowski 		const unsigned long __user *, old_nodes,
1549b6e9b0baSDominik Brodowski 		const unsigned long __user *, new_nodes)
1550b6e9b0baSDominik Brodowski {
1551b6e9b0baSDominik Brodowski 	return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1552b6e9b0baSDominik Brodowski }
1553b6e9b0baSDominik Brodowski 
155439743889SChristoph Lameter 
15558bccd85fSChristoph Lameter /* Retrieve NUMA policy */
1556af03c4acSDominik Brodowski static int kernel_get_mempolicy(int __user *policy,
1557af03c4acSDominik Brodowski 				unsigned long __user *nmask,
1558af03c4acSDominik Brodowski 				unsigned long maxnode,
1559af03c4acSDominik Brodowski 				unsigned long addr,
1560af03c4acSDominik Brodowski 				unsigned long flags)
15618bccd85fSChristoph Lameter {
1562dbcb0f19SAdrian Bunk 	int err;
1563dbcb0f19SAdrian Bunk 	int uninitialized_var(pval);
15648bccd85fSChristoph Lameter 	nodemask_t nodes;
15658bccd85fSChristoph Lameter 
1566050c17f2SRalph Campbell 	if (nmask != NULL && maxnode < nr_node_ids)
15678bccd85fSChristoph Lameter 		return -EINVAL;
15688bccd85fSChristoph Lameter 
15698bccd85fSChristoph Lameter 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
15708bccd85fSChristoph Lameter 
15718bccd85fSChristoph Lameter 	if (err)
15728bccd85fSChristoph Lameter 		return err;
15738bccd85fSChristoph Lameter 
15748bccd85fSChristoph Lameter 	if (policy && put_user(pval, policy))
15758bccd85fSChristoph Lameter 		return -EFAULT;
15768bccd85fSChristoph Lameter 
15778bccd85fSChristoph Lameter 	if (nmask)
15788bccd85fSChristoph Lameter 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
15798bccd85fSChristoph Lameter 
15808bccd85fSChristoph Lameter 	return err;
15818bccd85fSChristoph Lameter }
15828bccd85fSChristoph Lameter 
1583af03c4acSDominik Brodowski SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1584af03c4acSDominik Brodowski 		unsigned long __user *, nmask, unsigned long, maxnode,
1585af03c4acSDominik Brodowski 		unsigned long, addr, unsigned long, flags)
1586af03c4acSDominik Brodowski {
1587af03c4acSDominik Brodowski 	return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1588af03c4acSDominik Brodowski }
1589af03c4acSDominik Brodowski 
15901da177e4SLinus Torvalds #ifdef CONFIG_COMPAT
15911da177e4SLinus Torvalds 
1592c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1593c93e0f6cSHeiko Carstens 		       compat_ulong_t __user *, nmask,
1594c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode,
1595c93e0f6cSHeiko Carstens 		       compat_ulong_t, addr, compat_ulong_t, flags)
15961da177e4SLinus Torvalds {
15971da177e4SLinus Torvalds 	long err;
15981da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
15991da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
16001da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
16011da177e4SLinus Torvalds 
1602050c17f2SRalph Campbell 	nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
16031da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
16041da177e4SLinus Torvalds 
16051da177e4SLinus Torvalds 	if (nmask)
16061da177e4SLinus Torvalds 		nm = compat_alloc_user_space(alloc_size);
16071da177e4SLinus Torvalds 
1608af03c4acSDominik Brodowski 	err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
16091da177e4SLinus Torvalds 
16101da177e4SLinus Torvalds 	if (!err && nmask) {
16112bbff6c7SKAMEZAWA Hiroyuki 		unsigned long copy_size;
16122bbff6c7SKAMEZAWA Hiroyuki 		copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
16132bbff6c7SKAMEZAWA Hiroyuki 		err = copy_from_user(bm, nm, copy_size);
16141da177e4SLinus Torvalds 		/* ensure entire bitmap is zeroed */
16151da177e4SLinus Torvalds 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
16161da177e4SLinus Torvalds 		err |= compat_put_bitmap(nmask, bm, nr_bits);
16171da177e4SLinus Torvalds 	}
16181da177e4SLinus Torvalds 
16191da177e4SLinus Torvalds 	return err;
16201da177e4SLinus Torvalds }
16211da177e4SLinus Torvalds 
1622c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1623c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode)
16241da177e4SLinus Torvalds {
16251da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
16261da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
16271da177e4SLinus Torvalds 	DECLARE_BITMAP(bm, MAX_NUMNODES);
16281da177e4SLinus Torvalds 
16291da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
16301da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
16311da177e4SLinus Torvalds 
16321da177e4SLinus Torvalds 	if (nmask) {
1633cf01fb99SChris Salls 		if (compat_get_bitmap(bm, nmask, nr_bits))
16341da177e4SLinus Torvalds 			return -EFAULT;
1635cf01fb99SChris Salls 		nm = compat_alloc_user_space(alloc_size);
1636cf01fb99SChris Salls 		if (copy_to_user(nm, bm, alloc_size))
1637cf01fb99SChris Salls 			return -EFAULT;
1638cf01fb99SChris Salls 	}
16391da177e4SLinus Torvalds 
1640af03c4acSDominik Brodowski 	return kernel_set_mempolicy(mode, nm, nr_bits+1);
16411da177e4SLinus Torvalds }
16421da177e4SLinus Torvalds 
1643c93e0f6cSHeiko Carstens COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1644c93e0f6cSHeiko Carstens 		       compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1645c93e0f6cSHeiko Carstens 		       compat_ulong_t, maxnode, compat_ulong_t, flags)
16461da177e4SLinus Torvalds {
16471da177e4SLinus Torvalds 	unsigned long __user *nm = NULL;
16481da177e4SLinus Torvalds 	unsigned long nr_bits, alloc_size;
1649dfcd3c0dSAndi Kleen 	nodemask_t bm;
16501da177e4SLinus Torvalds 
16511da177e4SLinus Torvalds 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
16521da177e4SLinus Torvalds 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
16531da177e4SLinus Torvalds 
16541da177e4SLinus Torvalds 	if (nmask) {
1655cf01fb99SChris Salls 		if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
16561da177e4SLinus Torvalds 			return -EFAULT;
1657cf01fb99SChris Salls 		nm = compat_alloc_user_space(alloc_size);
1658cf01fb99SChris Salls 		if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1659cf01fb99SChris Salls 			return -EFAULT;
1660cf01fb99SChris Salls 	}
16611da177e4SLinus Torvalds 
1662e7dc9ad6SDominik Brodowski 	return kernel_mbind(start, len, mode, nm, nr_bits+1, flags);
16631da177e4SLinus Torvalds }
16641da177e4SLinus Torvalds 
1665b6e9b0baSDominik Brodowski COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid,
1666b6e9b0baSDominik Brodowski 		       compat_ulong_t, maxnode,
1667b6e9b0baSDominik Brodowski 		       const compat_ulong_t __user *, old_nodes,
1668b6e9b0baSDominik Brodowski 		       const compat_ulong_t __user *, new_nodes)
1669b6e9b0baSDominik Brodowski {
1670b6e9b0baSDominik Brodowski 	unsigned long __user *old = NULL;
1671b6e9b0baSDominik Brodowski 	unsigned long __user *new = NULL;
1672b6e9b0baSDominik Brodowski 	nodemask_t tmp_mask;
1673b6e9b0baSDominik Brodowski 	unsigned long nr_bits;
1674b6e9b0baSDominik Brodowski 	unsigned long size;
1675b6e9b0baSDominik Brodowski 
1676b6e9b0baSDominik Brodowski 	nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES);
1677b6e9b0baSDominik Brodowski 	size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1678b6e9b0baSDominik Brodowski 	if (old_nodes) {
1679b6e9b0baSDominik Brodowski 		if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits))
1680b6e9b0baSDominik Brodowski 			return -EFAULT;
1681b6e9b0baSDominik Brodowski 		old = compat_alloc_user_space(new_nodes ? size * 2 : size);
1682b6e9b0baSDominik Brodowski 		if (new_nodes)
1683b6e9b0baSDominik Brodowski 			new = old + size / sizeof(unsigned long);
1684b6e9b0baSDominik Brodowski 		if (copy_to_user(old, nodes_addr(tmp_mask), size))
1685b6e9b0baSDominik Brodowski 			return -EFAULT;
1686b6e9b0baSDominik Brodowski 	}
1687b6e9b0baSDominik Brodowski 	if (new_nodes) {
1688b6e9b0baSDominik Brodowski 		if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits))
1689b6e9b0baSDominik Brodowski 			return -EFAULT;
1690b6e9b0baSDominik Brodowski 		if (new == NULL)
1691b6e9b0baSDominik Brodowski 			new = compat_alloc_user_space(size);
1692b6e9b0baSDominik Brodowski 		if (copy_to_user(new, nodes_addr(tmp_mask), size))
1693b6e9b0baSDominik Brodowski 			return -EFAULT;
1694b6e9b0baSDominik Brodowski 	}
1695b6e9b0baSDominik Brodowski 	return kernel_migrate_pages(pid, nr_bits + 1, old, new);
1696b6e9b0baSDominik Brodowski }
1697b6e9b0baSDominik Brodowski 
1698b6e9b0baSDominik Brodowski #endif /* CONFIG_COMPAT */
16991da177e4SLinus Torvalds 
170074d2c3a0SOleg Nesterov struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
170174d2c3a0SOleg Nesterov 						unsigned long addr)
17021da177e4SLinus Torvalds {
17038d90274bSOleg Nesterov 	struct mempolicy *pol = NULL;
17041da177e4SLinus Torvalds 
17051da177e4SLinus Torvalds 	if (vma) {
1706480eccf9SLee Schermerhorn 		if (vma->vm_ops && vma->vm_ops->get_policy) {
17078d90274bSOleg Nesterov 			pol = vma->vm_ops->get_policy(vma, addr);
170800442ad0SMel Gorman 		} else if (vma->vm_policy) {
17091da177e4SLinus Torvalds 			pol = vma->vm_policy;
171000442ad0SMel Gorman 
171100442ad0SMel Gorman 			/*
171200442ad0SMel Gorman 			 * shmem_alloc_page() passes MPOL_F_SHARED policy with
171300442ad0SMel Gorman 			 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
171400442ad0SMel Gorman 			 * count on these policies which will be dropped by
171500442ad0SMel Gorman 			 * mpol_cond_put() later
171600442ad0SMel Gorman 			 */
171700442ad0SMel Gorman 			if (mpol_needs_cond_ref(pol))
171800442ad0SMel Gorman 				mpol_get(pol);
171900442ad0SMel Gorman 		}
17201da177e4SLinus Torvalds 	}
1721f15ca78eSOleg Nesterov 
172274d2c3a0SOleg Nesterov 	return pol;
172374d2c3a0SOleg Nesterov }
172474d2c3a0SOleg Nesterov 
172574d2c3a0SOleg Nesterov /*
1726dd6eecb9SOleg Nesterov  * get_vma_policy(@vma, @addr)
172774d2c3a0SOleg Nesterov  * @vma: virtual memory area whose policy is sought
172874d2c3a0SOleg Nesterov  * @addr: address in @vma for shared policy lookup
172974d2c3a0SOleg Nesterov  *
173074d2c3a0SOleg Nesterov  * Returns effective policy for a VMA at specified address.
1731dd6eecb9SOleg Nesterov  * Falls back to current->mempolicy or system default policy, as necessary.
173274d2c3a0SOleg Nesterov  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
173374d2c3a0SOleg Nesterov  * count--added by the get_policy() vm_op, as appropriate--to protect against
173474d2c3a0SOleg Nesterov  * freeing by another task.  It is the caller's responsibility to free the
173574d2c3a0SOleg Nesterov  * extra reference for shared policies.
173674d2c3a0SOleg Nesterov  */
1737ac79f78dSDavid Rientjes static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1738dd6eecb9SOleg Nesterov 						unsigned long addr)
173974d2c3a0SOleg Nesterov {
174074d2c3a0SOleg Nesterov 	struct mempolicy *pol = __get_vma_policy(vma, addr);
174174d2c3a0SOleg Nesterov 
17428d90274bSOleg Nesterov 	if (!pol)
1743dd6eecb9SOleg Nesterov 		pol = get_task_policy(current);
17448d90274bSOleg Nesterov 
17451da177e4SLinus Torvalds 	return pol;
17461da177e4SLinus Torvalds }
17471da177e4SLinus Torvalds 
17486b6482bbSOleg Nesterov bool vma_policy_mof(struct vm_area_struct *vma)
1749fc314724SMel Gorman {
17506b6482bbSOleg Nesterov 	struct mempolicy *pol;
1751f15ca78eSOleg Nesterov 
1752fc314724SMel Gorman 	if (vma->vm_ops && vma->vm_ops->get_policy) {
1753fc314724SMel Gorman 		bool ret = false;
1754fc314724SMel Gorman 
1755fc314724SMel Gorman 		pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1756fc314724SMel Gorman 		if (pol && (pol->flags & MPOL_F_MOF))
1757fc314724SMel Gorman 			ret = true;
1758fc314724SMel Gorman 		mpol_cond_put(pol);
1759fc314724SMel Gorman 
1760fc314724SMel Gorman 		return ret;
17618d90274bSOleg Nesterov 	}
17628d90274bSOleg Nesterov 
1763fc314724SMel Gorman 	pol = vma->vm_policy;
17648d90274bSOleg Nesterov 	if (!pol)
17656b6482bbSOleg Nesterov 		pol = get_task_policy(current);
1766fc314724SMel Gorman 
1767fc314724SMel Gorman 	return pol->flags & MPOL_F_MOF;
1768fc314724SMel Gorman }
1769fc314724SMel Gorman 
1770d3eb1570SLai Jiangshan static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1771d3eb1570SLai Jiangshan {
1772d3eb1570SLai Jiangshan 	enum zone_type dynamic_policy_zone = policy_zone;
1773d3eb1570SLai Jiangshan 
1774d3eb1570SLai Jiangshan 	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1775d3eb1570SLai Jiangshan 
1776d3eb1570SLai Jiangshan 	/*
1777d3eb1570SLai Jiangshan 	 * if policy->v.nodes has movable memory only,
1778d3eb1570SLai Jiangshan 	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1779d3eb1570SLai Jiangshan 	 *
1780d3eb1570SLai Jiangshan 	 * policy->v.nodes is intersect with node_states[N_MEMORY].
1781d3eb1570SLai Jiangshan 	 * so if the following test faile, it implies
1782d3eb1570SLai Jiangshan 	 * policy->v.nodes has movable memory only.
1783d3eb1570SLai Jiangshan 	 */
1784d3eb1570SLai Jiangshan 	if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1785d3eb1570SLai Jiangshan 		dynamic_policy_zone = ZONE_MOVABLE;
1786d3eb1570SLai Jiangshan 
1787d3eb1570SLai Jiangshan 	return zone >= dynamic_policy_zone;
1788d3eb1570SLai Jiangshan }
1789d3eb1570SLai Jiangshan 
179052cd3b07SLee Schermerhorn /*
179152cd3b07SLee Schermerhorn  * Return a nodemask representing a mempolicy for filtering nodes for
179252cd3b07SLee Schermerhorn  * page allocation
179352cd3b07SLee Schermerhorn  */
179452cd3b07SLee Schermerhorn static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
179519770b32SMel Gorman {
179619770b32SMel Gorman 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
179745c4745aSLee Schermerhorn 	if (unlikely(policy->mode == MPOL_BIND) &&
1798d3eb1570SLai Jiangshan 			apply_policy_zone(policy, gfp_zone(gfp)) &&
179919770b32SMel Gorman 			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
180019770b32SMel Gorman 		return &policy->v.nodes;
180119770b32SMel Gorman 
180219770b32SMel Gorman 	return NULL;
180319770b32SMel Gorman }
180419770b32SMel Gorman 
180504ec6264SVlastimil Babka /* Return the node id preferred by the given mempolicy, or the given id */
180604ec6264SVlastimil Babka static int policy_node(gfp_t gfp, struct mempolicy *policy,
18072f5f9486SAndi Kleen 								int nd)
18081da177e4SLinus Torvalds {
18096d840958SMichal Hocko 	if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
18101da177e4SLinus Torvalds 		nd = policy->v.preferred_node;
18116d840958SMichal Hocko 	else {
181219770b32SMel Gorman 		/*
18136d840958SMichal Hocko 		 * __GFP_THISNODE shouldn't even be used with the bind policy
18146d840958SMichal Hocko 		 * because we might easily break the expectation to stay on the
18156d840958SMichal Hocko 		 * requested node and not break the policy.
181619770b32SMel Gorman 		 */
18176d840958SMichal Hocko 		WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
18181da177e4SLinus Torvalds 	}
18196d840958SMichal Hocko 
182004ec6264SVlastimil Babka 	return nd;
18211da177e4SLinus Torvalds }
18221da177e4SLinus Torvalds 
18231da177e4SLinus Torvalds /* Do dynamic interleaving for a process */
18241da177e4SLinus Torvalds static unsigned interleave_nodes(struct mempolicy *policy)
18251da177e4SLinus Torvalds {
182645816682SVlastimil Babka 	unsigned next;
18271da177e4SLinus Torvalds 	struct task_struct *me = current;
18281da177e4SLinus Torvalds 
182945816682SVlastimil Babka 	next = next_node_in(me->il_prev, policy->v.nodes);
1830f5b087b5SDavid Rientjes 	if (next < MAX_NUMNODES)
183145816682SVlastimil Babka 		me->il_prev = next;
183245816682SVlastimil Babka 	return next;
18331da177e4SLinus Torvalds }
18341da177e4SLinus Torvalds 
1835dc85da15SChristoph Lameter /*
1836dc85da15SChristoph Lameter  * Depending on the memory policy provide a node from which to allocate the
1837dc85da15SChristoph Lameter  * next slab entry.
1838dc85da15SChristoph Lameter  */
18392a389610SDavid Rientjes unsigned int mempolicy_slab_node(void)
1840dc85da15SChristoph Lameter {
1841e7b691b0SAndi Kleen 	struct mempolicy *policy;
18422a389610SDavid Rientjes 	int node = numa_mem_id();
1843e7b691b0SAndi Kleen 
1844e7b691b0SAndi Kleen 	if (in_interrupt())
18452a389610SDavid Rientjes 		return node;
1846e7b691b0SAndi Kleen 
1847e7b691b0SAndi Kleen 	policy = current->mempolicy;
1848fc36b8d3SLee Schermerhorn 	if (!policy || policy->flags & MPOL_F_LOCAL)
18492a389610SDavid Rientjes 		return node;
1850765c4507SChristoph Lameter 
1851bea904d5SLee Schermerhorn 	switch (policy->mode) {
1852bea904d5SLee Schermerhorn 	case MPOL_PREFERRED:
1853fc36b8d3SLee Schermerhorn 		/*
1854fc36b8d3SLee Schermerhorn 		 * handled MPOL_F_LOCAL above
1855fc36b8d3SLee Schermerhorn 		 */
1856bea904d5SLee Schermerhorn 		return policy->v.preferred_node;
1857bea904d5SLee Schermerhorn 
1858dc85da15SChristoph Lameter 	case MPOL_INTERLEAVE:
1859dc85da15SChristoph Lameter 		return interleave_nodes(policy);
1860dc85da15SChristoph Lameter 
1861dd1a239fSMel Gorman 	case MPOL_BIND: {
1862c33d6c06SMel Gorman 		struct zoneref *z;
1863c33d6c06SMel Gorman 
1864dc85da15SChristoph Lameter 		/*
1865dc85da15SChristoph Lameter 		 * Follow bind policy behavior and start allocation at the
1866dc85da15SChristoph Lameter 		 * first node.
1867dc85da15SChristoph Lameter 		 */
186819770b32SMel Gorman 		struct zonelist *zonelist;
186919770b32SMel Gorman 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1870c9634cf0SAneesh Kumar K.V 		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1871c33d6c06SMel Gorman 		z = first_zones_zonelist(zonelist, highest_zoneidx,
1872c33d6c06SMel Gorman 							&policy->v.nodes);
1873c1093b74SPavel Tatashin 		return z->zone ? zone_to_nid(z->zone) : node;
1874dd1a239fSMel Gorman 	}
1875dc85da15SChristoph Lameter 
1876dc85da15SChristoph Lameter 	default:
1877bea904d5SLee Schermerhorn 		BUG();
1878dc85da15SChristoph Lameter 	}
1879dc85da15SChristoph Lameter }
1880dc85da15SChristoph Lameter 
1881fee83b3aSAndrew Morton /*
1882fee83b3aSAndrew Morton  * Do static interleaving for a VMA with known offset @n.  Returns the n'th
1883fee83b3aSAndrew Morton  * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
1884fee83b3aSAndrew Morton  * number of present nodes.
1885fee83b3aSAndrew Morton  */
188698c70baaSLaurent Dufour static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
18871da177e4SLinus Torvalds {
1888dfcd3c0dSAndi Kleen 	unsigned nnodes = nodes_weight(pol->v.nodes);
1889f5b087b5SDavid Rientjes 	unsigned target;
1890fee83b3aSAndrew Morton 	int i;
1891fee83b3aSAndrew Morton 	int nid;
18921da177e4SLinus Torvalds 
1893f5b087b5SDavid Rientjes 	if (!nnodes)
1894f5b087b5SDavid Rientjes 		return numa_node_id();
1895fee83b3aSAndrew Morton 	target = (unsigned int)n % nnodes;
1896fee83b3aSAndrew Morton 	nid = first_node(pol->v.nodes);
1897fee83b3aSAndrew Morton 	for (i = 0; i < target; i++)
1898dfcd3c0dSAndi Kleen 		nid = next_node(nid, pol->v.nodes);
18991da177e4SLinus Torvalds 	return nid;
19001da177e4SLinus Torvalds }
19011da177e4SLinus Torvalds 
19025da7ca86SChristoph Lameter /* Determine a node number for interleave */
19035da7ca86SChristoph Lameter static inline unsigned interleave_nid(struct mempolicy *pol,
19045da7ca86SChristoph Lameter 		 struct vm_area_struct *vma, unsigned long addr, int shift)
19055da7ca86SChristoph Lameter {
19065da7ca86SChristoph Lameter 	if (vma) {
19075da7ca86SChristoph Lameter 		unsigned long off;
19085da7ca86SChristoph Lameter 
19093b98b087SNishanth Aravamudan 		/*
19103b98b087SNishanth Aravamudan 		 * for small pages, there is no difference between
19113b98b087SNishanth Aravamudan 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
19123b98b087SNishanth Aravamudan 		 * for huge pages, since vm_pgoff is in units of small
19133b98b087SNishanth Aravamudan 		 * pages, we need to shift off the always 0 bits to get
19143b98b087SNishanth Aravamudan 		 * a useful offset.
19153b98b087SNishanth Aravamudan 		 */
19163b98b087SNishanth Aravamudan 		BUG_ON(shift < PAGE_SHIFT);
19173b98b087SNishanth Aravamudan 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
19185da7ca86SChristoph Lameter 		off += (addr - vma->vm_start) >> shift;
191998c70baaSLaurent Dufour 		return offset_il_node(pol, off);
19205da7ca86SChristoph Lameter 	} else
19215da7ca86SChristoph Lameter 		return interleave_nodes(pol);
19225da7ca86SChristoph Lameter }
19235da7ca86SChristoph Lameter 
192400ac59adSChen, Kenneth W #ifdef CONFIG_HUGETLBFS
1925480eccf9SLee Schermerhorn /*
192604ec6264SVlastimil Babka  * huge_node(@vma, @addr, @gfp_flags, @mpol)
1927b46e14acSFabian Frederick  * @vma: virtual memory area whose policy is sought
1928b46e14acSFabian Frederick  * @addr: address in @vma for shared policy lookup and interleave policy
1929b46e14acSFabian Frederick  * @gfp_flags: for requested zone
1930b46e14acSFabian Frederick  * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1931b46e14acSFabian Frederick  * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
1932480eccf9SLee Schermerhorn  *
193304ec6264SVlastimil Babka  * Returns a nid suitable for a huge page allocation and a pointer
193452cd3b07SLee Schermerhorn  * to the struct mempolicy for conditional unref after allocation.
193552cd3b07SLee Schermerhorn  * If the effective policy is 'BIND, returns a pointer to the mempolicy's
193652cd3b07SLee Schermerhorn  * @nodemask for filtering the zonelist.
1937c0ff7453SMiao Xie  *
1938d26914d1SMel Gorman  * Must be protected by read_mems_allowed_begin()
1939480eccf9SLee Schermerhorn  */
194004ec6264SVlastimil Babka int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
194104ec6264SVlastimil Babka 				struct mempolicy **mpol, nodemask_t **nodemask)
19425da7ca86SChristoph Lameter {
194304ec6264SVlastimil Babka 	int nid;
19445da7ca86SChristoph Lameter 
1945dd6eecb9SOleg Nesterov 	*mpol = get_vma_policy(vma, addr);
194619770b32SMel Gorman 	*nodemask = NULL;	/* assume !MPOL_BIND */
19475da7ca86SChristoph Lameter 
194852cd3b07SLee Schermerhorn 	if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
194904ec6264SVlastimil Babka 		nid = interleave_nid(*mpol, vma, addr,
195004ec6264SVlastimil Babka 					huge_page_shift(hstate_vma(vma)));
195152cd3b07SLee Schermerhorn 	} else {
195204ec6264SVlastimil Babka 		nid = policy_node(gfp_flags, *mpol, numa_node_id());
195352cd3b07SLee Schermerhorn 		if ((*mpol)->mode == MPOL_BIND)
195452cd3b07SLee Schermerhorn 			*nodemask = &(*mpol)->v.nodes;
1955480eccf9SLee Schermerhorn 	}
195604ec6264SVlastimil Babka 	return nid;
19575da7ca86SChristoph Lameter }
195806808b08SLee Schermerhorn 
195906808b08SLee Schermerhorn /*
196006808b08SLee Schermerhorn  * init_nodemask_of_mempolicy
196106808b08SLee Schermerhorn  *
196206808b08SLee Schermerhorn  * If the current task's mempolicy is "default" [NULL], return 'false'
196306808b08SLee Schermerhorn  * to indicate default policy.  Otherwise, extract the policy nodemask
196406808b08SLee Schermerhorn  * for 'bind' or 'interleave' policy into the argument nodemask, or
196506808b08SLee Schermerhorn  * initialize the argument nodemask to contain the single node for
196606808b08SLee Schermerhorn  * 'preferred' or 'local' policy and return 'true' to indicate presence
196706808b08SLee Schermerhorn  * of non-default mempolicy.
196806808b08SLee Schermerhorn  *
196906808b08SLee Schermerhorn  * We don't bother with reference counting the mempolicy [mpol_get/put]
197006808b08SLee Schermerhorn  * because the current task is examining it's own mempolicy and a task's
197106808b08SLee Schermerhorn  * mempolicy is only ever changed by the task itself.
197206808b08SLee Schermerhorn  *
197306808b08SLee Schermerhorn  * N.B., it is the caller's responsibility to free a returned nodemask.
197406808b08SLee Schermerhorn  */
197506808b08SLee Schermerhorn bool init_nodemask_of_mempolicy(nodemask_t *mask)
197606808b08SLee Schermerhorn {
197706808b08SLee Schermerhorn 	struct mempolicy *mempolicy;
197806808b08SLee Schermerhorn 	int nid;
197906808b08SLee Schermerhorn 
198006808b08SLee Schermerhorn 	if (!(mask && current->mempolicy))
198106808b08SLee Schermerhorn 		return false;
198206808b08SLee Schermerhorn 
1983c0ff7453SMiao Xie 	task_lock(current);
198406808b08SLee Schermerhorn 	mempolicy = current->mempolicy;
198506808b08SLee Schermerhorn 	switch (mempolicy->mode) {
198606808b08SLee Schermerhorn 	case MPOL_PREFERRED:
198706808b08SLee Schermerhorn 		if (mempolicy->flags & MPOL_F_LOCAL)
198806808b08SLee Schermerhorn 			nid = numa_node_id();
198906808b08SLee Schermerhorn 		else
199006808b08SLee Schermerhorn 			nid = mempolicy->v.preferred_node;
199106808b08SLee Schermerhorn 		init_nodemask_of_node(mask, nid);
199206808b08SLee Schermerhorn 		break;
199306808b08SLee Schermerhorn 
199406808b08SLee Schermerhorn 	case MPOL_BIND:
199506808b08SLee Schermerhorn 		/* Fall through */
199606808b08SLee Schermerhorn 	case MPOL_INTERLEAVE:
199706808b08SLee Schermerhorn 		*mask =  mempolicy->v.nodes;
199806808b08SLee Schermerhorn 		break;
199906808b08SLee Schermerhorn 
200006808b08SLee Schermerhorn 	default:
200106808b08SLee Schermerhorn 		BUG();
200206808b08SLee Schermerhorn 	}
2003c0ff7453SMiao Xie 	task_unlock(current);
200406808b08SLee Schermerhorn 
200506808b08SLee Schermerhorn 	return true;
200606808b08SLee Schermerhorn }
200700ac59adSChen, Kenneth W #endif
20085da7ca86SChristoph Lameter 
20096f48d0ebSDavid Rientjes /*
20106f48d0ebSDavid Rientjes  * mempolicy_nodemask_intersects
20116f48d0ebSDavid Rientjes  *
20126f48d0ebSDavid Rientjes  * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
20136f48d0ebSDavid Rientjes  * policy.  Otherwise, check for intersection between mask and the policy
20146f48d0ebSDavid Rientjes  * nodemask for 'bind' or 'interleave' policy.  For 'perferred' or 'local'
20156f48d0ebSDavid Rientjes  * policy, always return true since it may allocate elsewhere on fallback.
20166f48d0ebSDavid Rientjes  *
20176f48d0ebSDavid Rientjes  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
20186f48d0ebSDavid Rientjes  */
20196f48d0ebSDavid Rientjes bool mempolicy_nodemask_intersects(struct task_struct *tsk,
20206f48d0ebSDavid Rientjes 					const nodemask_t *mask)
20216f48d0ebSDavid Rientjes {
20226f48d0ebSDavid Rientjes 	struct mempolicy *mempolicy;
20236f48d0ebSDavid Rientjes 	bool ret = true;
20246f48d0ebSDavid Rientjes 
20256f48d0ebSDavid Rientjes 	if (!mask)
20266f48d0ebSDavid Rientjes 		return ret;
20276f48d0ebSDavid Rientjes 	task_lock(tsk);
20286f48d0ebSDavid Rientjes 	mempolicy = tsk->mempolicy;
20296f48d0ebSDavid Rientjes 	if (!mempolicy)
20306f48d0ebSDavid Rientjes 		goto out;
20316f48d0ebSDavid Rientjes 
20326f48d0ebSDavid Rientjes 	switch (mempolicy->mode) {
20336f48d0ebSDavid Rientjes 	case MPOL_PREFERRED:
20346f48d0ebSDavid Rientjes 		/*
20356f48d0ebSDavid Rientjes 		 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
20366f48d0ebSDavid Rientjes 		 * allocate from, they may fallback to other nodes when oom.
20376f48d0ebSDavid Rientjes 		 * Thus, it's possible for tsk to have allocated memory from
20386f48d0ebSDavid Rientjes 		 * nodes in mask.
20396f48d0ebSDavid Rientjes 		 */
20406f48d0ebSDavid Rientjes 		break;
20416f48d0ebSDavid Rientjes 	case MPOL_BIND:
20426f48d0ebSDavid Rientjes 	case MPOL_INTERLEAVE:
20436f48d0ebSDavid Rientjes 		ret = nodes_intersects(mempolicy->v.nodes, *mask);
20446f48d0ebSDavid Rientjes 		break;
20456f48d0ebSDavid Rientjes 	default:
20466f48d0ebSDavid Rientjes 		BUG();
20476f48d0ebSDavid Rientjes 	}
20486f48d0ebSDavid Rientjes out:
20496f48d0ebSDavid Rientjes 	task_unlock(tsk);
20506f48d0ebSDavid Rientjes 	return ret;
20516f48d0ebSDavid Rientjes }
20526f48d0ebSDavid Rientjes 
20531da177e4SLinus Torvalds /* Allocate a page in interleaved policy.
20541da177e4SLinus Torvalds    Own path because it needs to do special accounting. */
2055662f3a0bSAndi Kleen static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2056662f3a0bSAndi Kleen 					unsigned nid)
20571da177e4SLinus Torvalds {
20581da177e4SLinus Torvalds 	struct page *page;
20591da177e4SLinus Torvalds 
206004ec6264SVlastimil Babka 	page = __alloc_pages(gfp, order, nid);
20614518085eSKemi Wang 	/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
20624518085eSKemi Wang 	if (!static_branch_likely(&vm_numa_stat_key))
20634518085eSKemi Wang 		return page;
2064de55c8b2SAndrey Ryabinin 	if (page && page_to_nid(page) == nid) {
2065de55c8b2SAndrey Ryabinin 		preempt_disable();
2066de55c8b2SAndrey Ryabinin 		__inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT);
2067de55c8b2SAndrey Ryabinin 		preempt_enable();
2068de55c8b2SAndrey Ryabinin 	}
20691da177e4SLinus Torvalds 	return page;
20701da177e4SLinus Torvalds }
20711da177e4SLinus Torvalds 
20721da177e4SLinus Torvalds /**
20730bbbc0b3SAndrea Arcangeli  * 	alloc_pages_vma	- Allocate a page for a VMA.
20741da177e4SLinus Torvalds  *
20751da177e4SLinus Torvalds  * 	@gfp:
20761da177e4SLinus Torvalds  *      %GFP_USER    user allocation.
20771da177e4SLinus Torvalds  *      %GFP_KERNEL  kernel allocations,
20781da177e4SLinus Torvalds  *      %GFP_HIGHMEM highmem/user allocations,
20791da177e4SLinus Torvalds  *      %GFP_FS      allocation should not call back into a file system.
20801da177e4SLinus Torvalds  *      %GFP_ATOMIC  don't sleep.
20811da177e4SLinus Torvalds  *
20820bbbc0b3SAndrea Arcangeli  *	@order:Order of the GFP allocation.
20831da177e4SLinus Torvalds  * 	@vma:  Pointer to VMA or NULL if not available.
20841da177e4SLinus Torvalds  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
2085be97a41bSVlastimil Babka  *	@node: Which node to prefer for allocation (modulo policy).
2086*19deb769SDavid Rientjes  *	@hugepage: for hugepages try only the preferred node if possible
20871da177e4SLinus Torvalds  *
20881da177e4SLinus Torvalds  * 	This function allocates a page from the kernel page pool and applies
20891da177e4SLinus Torvalds  *	a NUMA policy associated with the VMA or the current process.
20901da177e4SLinus Torvalds  *	When VMA is not NULL caller must hold down_read on the mmap_sem of the
20911da177e4SLinus Torvalds  *	mm_struct of the VMA to prevent it from going away. Should be used for
2092be97a41bSVlastimil Babka  *	all allocations for pages that will be mapped into user space. Returns
2093be97a41bSVlastimil Babka  *	NULL when no page can be allocated.
20941da177e4SLinus Torvalds  */
20951da177e4SLinus Torvalds struct page *
20960bbbc0b3SAndrea Arcangeli alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
2097*19deb769SDavid Rientjes 		unsigned long addr, int node, bool hugepage)
20981da177e4SLinus Torvalds {
2099cc9a6c87SMel Gorman 	struct mempolicy *pol;
2100c0ff7453SMiao Xie 	struct page *page;
210104ec6264SVlastimil Babka 	int preferred_nid;
2102be97a41bSVlastimil Babka 	nodemask_t *nmask;
21031da177e4SLinus Torvalds 
2104dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2105cc9a6c87SMel Gorman 
2106be97a41bSVlastimil Babka 	if (pol->mode == MPOL_INTERLEAVE) {
21071da177e4SLinus Torvalds 		unsigned nid;
21085da7ca86SChristoph Lameter 
21098eac563cSAndi Kleen 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
211052cd3b07SLee Schermerhorn 		mpol_cond_put(pol);
21110bbbc0b3SAndrea Arcangeli 		page = alloc_page_interleave(gfp, order, nid);
2112be97a41bSVlastimil Babka 		goto out;
21131da177e4SLinus Torvalds 	}
21141da177e4SLinus Torvalds 
2115*19deb769SDavid Rientjes 	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2116*19deb769SDavid Rientjes 		int hpage_node = node;
2117*19deb769SDavid Rientjes 
2118*19deb769SDavid Rientjes 		/*
2119*19deb769SDavid Rientjes 		 * For hugepage allocation and non-interleave policy which
2120*19deb769SDavid Rientjes 		 * allows the current node (or other explicitly preferred
2121*19deb769SDavid Rientjes 		 * node) we only try to allocate from the current/preferred
2122*19deb769SDavid Rientjes 		 * node and don't fall back to other nodes, as the cost of
2123*19deb769SDavid Rientjes 		 * remote accesses would likely offset THP benefits.
2124*19deb769SDavid Rientjes 		 *
2125*19deb769SDavid Rientjes 		 * If the policy is interleave, or does not allow the current
2126*19deb769SDavid Rientjes 		 * node in its nodemask, we allocate the standard way.
2127*19deb769SDavid Rientjes 		 */
2128*19deb769SDavid Rientjes 		if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL))
2129*19deb769SDavid Rientjes 			hpage_node = pol->v.preferred_node;
2130*19deb769SDavid Rientjes 
2131*19deb769SDavid Rientjes 		nmask = policy_nodemask(gfp, pol);
2132*19deb769SDavid Rientjes 		if (!nmask || node_isset(hpage_node, *nmask)) {
2133*19deb769SDavid Rientjes 			mpol_cond_put(pol);
2134*19deb769SDavid Rientjes 			page = __alloc_pages_node(hpage_node,
2135*19deb769SDavid Rientjes 						gfp | __GFP_THISNODE, order);
2136*19deb769SDavid Rientjes 			goto out;
2137*19deb769SDavid Rientjes 		}
2138*19deb769SDavid Rientjes 	}
2139*19deb769SDavid Rientjes 
2140077fcf11SAneesh Kumar K.V 	nmask = policy_nodemask(gfp, pol);
214104ec6264SVlastimil Babka 	preferred_nid = policy_node(gfp, pol, node);
214204ec6264SVlastimil Babka 	page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
2143d51e9894SVlastimil Babka 	mpol_cond_put(pol);
2144be97a41bSVlastimil Babka out:
2145077fcf11SAneesh Kumar K.V 	return page;
2146077fcf11SAneesh Kumar K.V }
214769262215SChristoph Hellwig EXPORT_SYMBOL(alloc_pages_vma);
2148077fcf11SAneesh Kumar K.V 
21491da177e4SLinus Torvalds /**
21501da177e4SLinus Torvalds  * 	alloc_pages_current - Allocate pages.
21511da177e4SLinus Torvalds  *
21521da177e4SLinus Torvalds  *	@gfp:
21531da177e4SLinus Torvalds  *		%GFP_USER   user allocation,
21541da177e4SLinus Torvalds  *      	%GFP_KERNEL kernel allocation,
21551da177e4SLinus Torvalds  *      	%GFP_HIGHMEM highmem allocation,
21561da177e4SLinus Torvalds  *      	%GFP_FS     don't call back into a file system.
21571da177e4SLinus Torvalds  *      	%GFP_ATOMIC don't sleep.
21581da177e4SLinus Torvalds  *	@order: Power of two of allocation size in pages. 0 is a single page.
21591da177e4SLinus Torvalds  *
21601da177e4SLinus Torvalds  *	Allocate a page from the kernel page pool.  When not in
21611da177e4SLinus Torvalds  *	interrupt context and apply the current process NUMA policy.
21621da177e4SLinus Torvalds  *	Returns NULL when no page can be allocated.
21631da177e4SLinus Torvalds  */
2164dd0fc66fSAl Viro struct page *alloc_pages_current(gfp_t gfp, unsigned order)
21651da177e4SLinus Torvalds {
21668d90274bSOleg Nesterov 	struct mempolicy *pol = &default_policy;
2167c0ff7453SMiao Xie 	struct page *page;
21681da177e4SLinus Torvalds 
21698d90274bSOleg Nesterov 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
21708d90274bSOleg Nesterov 		pol = get_task_policy(current);
217152cd3b07SLee Schermerhorn 
217252cd3b07SLee Schermerhorn 	/*
217352cd3b07SLee Schermerhorn 	 * No reference counting needed for current->mempolicy
217452cd3b07SLee Schermerhorn 	 * nor system default_policy
217552cd3b07SLee Schermerhorn 	 */
217645c4745aSLee Schermerhorn 	if (pol->mode == MPOL_INTERLEAVE)
2177c0ff7453SMiao Xie 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2178c0ff7453SMiao Xie 	else
2179c0ff7453SMiao Xie 		page = __alloc_pages_nodemask(gfp, order,
218004ec6264SVlastimil Babka 				policy_node(gfp, pol, numa_node_id()),
21815c4b4be3SAndi Kleen 				policy_nodemask(gfp, pol));
2182cc9a6c87SMel Gorman 
2183c0ff7453SMiao Xie 	return page;
21841da177e4SLinus Torvalds }
21851da177e4SLinus Torvalds EXPORT_SYMBOL(alloc_pages_current);
21861da177e4SLinus Torvalds 
2187ef0855d3SOleg Nesterov int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2188ef0855d3SOleg Nesterov {
2189ef0855d3SOleg Nesterov 	struct mempolicy *pol = mpol_dup(vma_policy(src));
2190ef0855d3SOleg Nesterov 
2191ef0855d3SOleg Nesterov 	if (IS_ERR(pol))
2192ef0855d3SOleg Nesterov 		return PTR_ERR(pol);
2193ef0855d3SOleg Nesterov 	dst->vm_policy = pol;
2194ef0855d3SOleg Nesterov 	return 0;
2195ef0855d3SOleg Nesterov }
2196ef0855d3SOleg Nesterov 
21974225399aSPaul Jackson /*
2198846a16bfSLee Schermerhorn  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
21994225399aSPaul Jackson  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
22004225399aSPaul Jackson  * with the mems_allowed returned by cpuset_mems_allowed().  This
22014225399aSPaul Jackson  * keeps mempolicies cpuset relative after its cpuset moves.  See
22024225399aSPaul Jackson  * further kernel/cpuset.c update_nodemask().
2203708c1bbcSMiao Xie  *
2204708c1bbcSMiao Xie  * current's mempolicy may be rebinded by the other task(the task that changes
2205708c1bbcSMiao Xie  * cpuset's mems), so we needn't do rebind work for current task.
22064225399aSPaul Jackson  */
22074225399aSPaul Jackson 
2208846a16bfSLee Schermerhorn /* Slow path of a mempolicy duplicate */
2209846a16bfSLee Schermerhorn struct mempolicy *__mpol_dup(struct mempolicy *old)
22101da177e4SLinus Torvalds {
22111da177e4SLinus Torvalds 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
22121da177e4SLinus Torvalds 
22131da177e4SLinus Torvalds 	if (!new)
22141da177e4SLinus Torvalds 		return ERR_PTR(-ENOMEM);
2215708c1bbcSMiao Xie 
2216708c1bbcSMiao Xie 	/* task's mempolicy is protected by alloc_lock */
2217708c1bbcSMiao Xie 	if (old == current->mempolicy) {
2218708c1bbcSMiao Xie 		task_lock(current);
2219708c1bbcSMiao Xie 		*new = *old;
2220708c1bbcSMiao Xie 		task_unlock(current);
2221708c1bbcSMiao Xie 	} else
2222708c1bbcSMiao Xie 		*new = *old;
2223708c1bbcSMiao Xie 
22244225399aSPaul Jackson 	if (current_cpuset_is_being_rebound()) {
22254225399aSPaul Jackson 		nodemask_t mems = cpuset_mems_allowed(current);
2226213980c0SVlastimil Babka 		mpol_rebind_policy(new, &mems);
22274225399aSPaul Jackson 	}
22281da177e4SLinus Torvalds 	atomic_set(&new->refcnt, 1);
22291da177e4SLinus Torvalds 	return new;
22301da177e4SLinus Torvalds }
22311da177e4SLinus Torvalds 
22321da177e4SLinus Torvalds /* Slow path of a mempolicy comparison */
2233fcfb4dccSKOSAKI Motohiro bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
22341da177e4SLinus Torvalds {
22351da177e4SLinus Torvalds 	if (!a || !b)
2236fcfb4dccSKOSAKI Motohiro 		return false;
223745c4745aSLee Schermerhorn 	if (a->mode != b->mode)
2238fcfb4dccSKOSAKI Motohiro 		return false;
223919800502SBob Liu 	if (a->flags != b->flags)
2240fcfb4dccSKOSAKI Motohiro 		return false;
224119800502SBob Liu 	if (mpol_store_user_nodemask(a))
224219800502SBob Liu 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2243fcfb4dccSKOSAKI Motohiro 			return false;
224419800502SBob Liu 
224545c4745aSLee Schermerhorn 	switch (a->mode) {
224619770b32SMel Gorman 	case MPOL_BIND:
224719770b32SMel Gorman 		/* Fall through */
22481da177e4SLinus Torvalds 	case MPOL_INTERLEAVE:
2249fcfb4dccSKOSAKI Motohiro 		return !!nodes_equal(a->v.nodes, b->v.nodes);
22501da177e4SLinus Torvalds 	case MPOL_PREFERRED:
22518970a63eSYisheng Xie 		/* a's ->flags is the same as b's */
22528970a63eSYisheng Xie 		if (a->flags & MPOL_F_LOCAL)
22538970a63eSYisheng Xie 			return true;
225475719661SNamhyung Kim 		return a->v.preferred_node == b->v.preferred_node;
22551da177e4SLinus Torvalds 	default:
22561da177e4SLinus Torvalds 		BUG();
2257fcfb4dccSKOSAKI Motohiro 		return false;
22581da177e4SLinus Torvalds 	}
22591da177e4SLinus Torvalds }
22601da177e4SLinus Torvalds 
22611da177e4SLinus Torvalds /*
22621da177e4SLinus Torvalds  * Shared memory backing store policy support.
22631da177e4SLinus Torvalds  *
22641da177e4SLinus Torvalds  * Remember policies even when nobody has shared memory mapped.
22651da177e4SLinus Torvalds  * The policies are kept in Red-Black tree linked from the inode.
22664a8c7bb5SNathan Zimmer  * They are protected by the sp->lock rwlock, which should be held
22671da177e4SLinus Torvalds  * for any accesses to the tree.
22681da177e4SLinus Torvalds  */
22691da177e4SLinus Torvalds 
22704a8c7bb5SNathan Zimmer /*
22714a8c7bb5SNathan Zimmer  * lookup first element intersecting start-end.  Caller holds sp->lock for
22724a8c7bb5SNathan Zimmer  * reading or for writing
22734a8c7bb5SNathan Zimmer  */
22741da177e4SLinus Torvalds static struct sp_node *
22751da177e4SLinus Torvalds sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
22761da177e4SLinus Torvalds {
22771da177e4SLinus Torvalds 	struct rb_node *n = sp->root.rb_node;
22781da177e4SLinus Torvalds 
22791da177e4SLinus Torvalds 	while (n) {
22801da177e4SLinus Torvalds 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
22811da177e4SLinus Torvalds 
22821da177e4SLinus Torvalds 		if (start >= p->end)
22831da177e4SLinus Torvalds 			n = n->rb_right;
22841da177e4SLinus Torvalds 		else if (end <= p->start)
22851da177e4SLinus Torvalds 			n = n->rb_left;
22861da177e4SLinus Torvalds 		else
22871da177e4SLinus Torvalds 			break;
22881da177e4SLinus Torvalds 	}
22891da177e4SLinus Torvalds 	if (!n)
22901da177e4SLinus Torvalds 		return NULL;
22911da177e4SLinus Torvalds 	for (;;) {
22921da177e4SLinus Torvalds 		struct sp_node *w = NULL;
22931da177e4SLinus Torvalds 		struct rb_node *prev = rb_prev(n);
22941da177e4SLinus Torvalds 		if (!prev)
22951da177e4SLinus Torvalds 			break;
22961da177e4SLinus Torvalds 		w = rb_entry(prev, struct sp_node, nd);
22971da177e4SLinus Torvalds 		if (w->end <= start)
22981da177e4SLinus Torvalds 			break;
22991da177e4SLinus Torvalds 		n = prev;
23001da177e4SLinus Torvalds 	}
23011da177e4SLinus Torvalds 	return rb_entry(n, struct sp_node, nd);
23021da177e4SLinus Torvalds }
23031da177e4SLinus Torvalds 
23044a8c7bb5SNathan Zimmer /*
23054a8c7bb5SNathan Zimmer  * Insert a new shared policy into the list.  Caller holds sp->lock for
23064a8c7bb5SNathan Zimmer  * writing.
23074a8c7bb5SNathan Zimmer  */
23081da177e4SLinus Torvalds static void sp_insert(struct shared_policy *sp, struct sp_node *new)
23091da177e4SLinus Torvalds {
23101da177e4SLinus Torvalds 	struct rb_node **p = &sp->root.rb_node;
23111da177e4SLinus Torvalds 	struct rb_node *parent = NULL;
23121da177e4SLinus Torvalds 	struct sp_node *nd;
23131da177e4SLinus Torvalds 
23141da177e4SLinus Torvalds 	while (*p) {
23151da177e4SLinus Torvalds 		parent = *p;
23161da177e4SLinus Torvalds 		nd = rb_entry(parent, struct sp_node, nd);
23171da177e4SLinus Torvalds 		if (new->start < nd->start)
23181da177e4SLinus Torvalds 			p = &(*p)->rb_left;
23191da177e4SLinus Torvalds 		else if (new->end > nd->end)
23201da177e4SLinus Torvalds 			p = &(*p)->rb_right;
23211da177e4SLinus Torvalds 		else
23221da177e4SLinus Torvalds 			BUG();
23231da177e4SLinus Torvalds 	}
23241da177e4SLinus Torvalds 	rb_link_node(&new->nd, parent, p);
23251da177e4SLinus Torvalds 	rb_insert_color(&new->nd, &sp->root);
2326140d5a49SPaul Mundt 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
232745c4745aSLee Schermerhorn 		 new->policy ? new->policy->mode : 0);
23281da177e4SLinus Torvalds }
23291da177e4SLinus Torvalds 
23301da177e4SLinus Torvalds /* Find shared policy intersecting idx */
23311da177e4SLinus Torvalds struct mempolicy *
23321da177e4SLinus Torvalds mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
23331da177e4SLinus Torvalds {
23341da177e4SLinus Torvalds 	struct mempolicy *pol = NULL;
23351da177e4SLinus Torvalds 	struct sp_node *sn;
23361da177e4SLinus Torvalds 
23371da177e4SLinus Torvalds 	if (!sp->root.rb_node)
23381da177e4SLinus Torvalds 		return NULL;
23394a8c7bb5SNathan Zimmer 	read_lock(&sp->lock);
23401da177e4SLinus Torvalds 	sn = sp_lookup(sp, idx, idx+1);
23411da177e4SLinus Torvalds 	if (sn) {
23421da177e4SLinus Torvalds 		mpol_get(sn->policy);
23431da177e4SLinus Torvalds 		pol = sn->policy;
23441da177e4SLinus Torvalds 	}
23454a8c7bb5SNathan Zimmer 	read_unlock(&sp->lock);
23461da177e4SLinus Torvalds 	return pol;
23471da177e4SLinus Torvalds }
23481da177e4SLinus Torvalds 
234963f74ca2SKOSAKI Motohiro static void sp_free(struct sp_node *n)
235063f74ca2SKOSAKI Motohiro {
235163f74ca2SKOSAKI Motohiro 	mpol_put(n->policy);
235263f74ca2SKOSAKI Motohiro 	kmem_cache_free(sn_cache, n);
235363f74ca2SKOSAKI Motohiro }
235463f74ca2SKOSAKI Motohiro 
2355771fb4d8SLee Schermerhorn /**
2356771fb4d8SLee Schermerhorn  * mpol_misplaced - check whether current page node is valid in policy
2357771fb4d8SLee Schermerhorn  *
2358b46e14acSFabian Frederick  * @page: page to be checked
2359b46e14acSFabian Frederick  * @vma: vm area where page mapped
2360b46e14acSFabian Frederick  * @addr: virtual address where page mapped
2361771fb4d8SLee Schermerhorn  *
2362771fb4d8SLee Schermerhorn  * Lookup current policy node id for vma,addr and "compare to" page's
2363771fb4d8SLee Schermerhorn  * node id.
2364771fb4d8SLee Schermerhorn  *
2365771fb4d8SLee Schermerhorn  * Returns:
2366771fb4d8SLee Schermerhorn  *	-1	- not misplaced, page is in the right node
2367771fb4d8SLee Schermerhorn  *	node	- node id where the page should be
2368771fb4d8SLee Schermerhorn  *
2369771fb4d8SLee Schermerhorn  * Policy determination "mimics" alloc_page_vma().
2370771fb4d8SLee Schermerhorn  * Called from fault path where we know the vma and faulting address.
2371771fb4d8SLee Schermerhorn  */
2372771fb4d8SLee Schermerhorn int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2373771fb4d8SLee Schermerhorn {
2374771fb4d8SLee Schermerhorn 	struct mempolicy *pol;
2375c33d6c06SMel Gorman 	struct zoneref *z;
2376771fb4d8SLee Schermerhorn 	int curnid = page_to_nid(page);
2377771fb4d8SLee Schermerhorn 	unsigned long pgoff;
237890572890SPeter Zijlstra 	int thiscpu = raw_smp_processor_id();
237990572890SPeter Zijlstra 	int thisnid = cpu_to_node(thiscpu);
238098fa15f3SAnshuman Khandual 	int polnid = NUMA_NO_NODE;
2381771fb4d8SLee Schermerhorn 	int ret = -1;
2382771fb4d8SLee Schermerhorn 
2383dd6eecb9SOleg Nesterov 	pol = get_vma_policy(vma, addr);
2384771fb4d8SLee Schermerhorn 	if (!(pol->flags & MPOL_F_MOF))
2385771fb4d8SLee Schermerhorn 		goto out;
2386771fb4d8SLee Schermerhorn 
2387771fb4d8SLee Schermerhorn 	switch (pol->mode) {
2388771fb4d8SLee Schermerhorn 	case MPOL_INTERLEAVE:
2389771fb4d8SLee Schermerhorn 		pgoff = vma->vm_pgoff;
2390771fb4d8SLee Schermerhorn 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
239198c70baaSLaurent Dufour 		polnid = offset_il_node(pol, pgoff);
2392771fb4d8SLee Schermerhorn 		break;
2393771fb4d8SLee Schermerhorn 
2394771fb4d8SLee Schermerhorn 	case MPOL_PREFERRED:
2395771fb4d8SLee Schermerhorn 		if (pol->flags & MPOL_F_LOCAL)
2396771fb4d8SLee Schermerhorn 			polnid = numa_node_id();
2397771fb4d8SLee Schermerhorn 		else
2398771fb4d8SLee Schermerhorn 			polnid = pol->v.preferred_node;
2399771fb4d8SLee Schermerhorn 		break;
2400771fb4d8SLee Schermerhorn 
2401771fb4d8SLee Schermerhorn 	case MPOL_BIND:
2402c33d6c06SMel Gorman 
2403771fb4d8SLee Schermerhorn 		/*
2404771fb4d8SLee Schermerhorn 		 * allows binding to multiple nodes.
2405771fb4d8SLee Schermerhorn 		 * use current page if in policy nodemask,
2406771fb4d8SLee Schermerhorn 		 * else select nearest allowed node, if any.
2407771fb4d8SLee Schermerhorn 		 * If no allowed nodes, use current [!misplaced].
2408771fb4d8SLee Schermerhorn 		 */
2409771fb4d8SLee Schermerhorn 		if (node_isset(curnid, pol->v.nodes))
2410771fb4d8SLee Schermerhorn 			goto out;
2411c33d6c06SMel Gorman 		z = first_zones_zonelist(
2412771fb4d8SLee Schermerhorn 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2413771fb4d8SLee Schermerhorn 				gfp_zone(GFP_HIGHUSER),
2414c33d6c06SMel Gorman 				&pol->v.nodes);
2415c1093b74SPavel Tatashin 		polnid = zone_to_nid(z->zone);
2416771fb4d8SLee Schermerhorn 		break;
2417771fb4d8SLee Schermerhorn 
2418771fb4d8SLee Schermerhorn 	default:
2419771fb4d8SLee Schermerhorn 		BUG();
2420771fb4d8SLee Schermerhorn 	}
24215606e387SMel Gorman 
24225606e387SMel Gorman 	/* Migrate the page towards the node whose CPU is referencing it */
2423e42c8ff2SMel Gorman 	if (pol->flags & MPOL_F_MORON) {
242490572890SPeter Zijlstra 		polnid = thisnid;
24255606e387SMel Gorman 
242610f39042SRik van Riel 		if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2427de1c9ce6SRik van Riel 			goto out;
2428de1c9ce6SRik van Riel 	}
2429e42c8ff2SMel Gorman 
2430771fb4d8SLee Schermerhorn 	if (curnid != polnid)
2431771fb4d8SLee Schermerhorn 		ret = polnid;
2432771fb4d8SLee Schermerhorn out:
2433771fb4d8SLee Schermerhorn 	mpol_cond_put(pol);
2434771fb4d8SLee Schermerhorn 
2435771fb4d8SLee Schermerhorn 	return ret;
2436771fb4d8SLee Schermerhorn }
2437771fb4d8SLee Schermerhorn 
2438c11600e4SDavid Rientjes /*
2439c11600e4SDavid Rientjes  * Drop the (possibly final) reference to task->mempolicy.  It needs to be
2440c11600e4SDavid Rientjes  * dropped after task->mempolicy is set to NULL so that any allocation done as
2441c11600e4SDavid Rientjes  * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2442c11600e4SDavid Rientjes  * policy.
2443c11600e4SDavid Rientjes  */
2444c11600e4SDavid Rientjes void mpol_put_task_policy(struct task_struct *task)
2445c11600e4SDavid Rientjes {
2446c11600e4SDavid Rientjes 	struct mempolicy *pol;
2447c11600e4SDavid Rientjes 
2448c11600e4SDavid Rientjes 	task_lock(task);
2449c11600e4SDavid Rientjes 	pol = task->mempolicy;
2450c11600e4SDavid Rientjes 	task->mempolicy = NULL;
2451c11600e4SDavid Rientjes 	task_unlock(task);
2452c11600e4SDavid Rientjes 	mpol_put(pol);
2453c11600e4SDavid Rientjes }
2454c11600e4SDavid Rientjes 
24551da177e4SLinus Torvalds static void sp_delete(struct shared_policy *sp, struct sp_node *n)
24561da177e4SLinus Torvalds {
2457140d5a49SPaul Mundt 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
24581da177e4SLinus Torvalds 	rb_erase(&n->nd, &sp->root);
245963f74ca2SKOSAKI Motohiro 	sp_free(n);
24601da177e4SLinus Torvalds }
24611da177e4SLinus Torvalds 
246242288fe3SMel Gorman static void sp_node_init(struct sp_node *node, unsigned long start,
246342288fe3SMel Gorman 			unsigned long end, struct mempolicy *pol)
246442288fe3SMel Gorman {
246542288fe3SMel Gorman 	node->start = start;
246642288fe3SMel Gorman 	node->end = end;
246742288fe3SMel Gorman 	node->policy = pol;
246842288fe3SMel Gorman }
246942288fe3SMel Gorman 
2470dbcb0f19SAdrian Bunk static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2471dbcb0f19SAdrian Bunk 				struct mempolicy *pol)
24721da177e4SLinus Torvalds {
2473869833f2SKOSAKI Motohiro 	struct sp_node *n;
2474869833f2SKOSAKI Motohiro 	struct mempolicy *newpol;
24751da177e4SLinus Torvalds 
2476869833f2SKOSAKI Motohiro 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
24771da177e4SLinus Torvalds 	if (!n)
24781da177e4SLinus Torvalds 		return NULL;
2479869833f2SKOSAKI Motohiro 
2480869833f2SKOSAKI Motohiro 	newpol = mpol_dup(pol);
2481869833f2SKOSAKI Motohiro 	if (IS_ERR(newpol)) {
2482869833f2SKOSAKI Motohiro 		kmem_cache_free(sn_cache, n);
2483869833f2SKOSAKI Motohiro 		return NULL;
2484869833f2SKOSAKI Motohiro 	}
2485869833f2SKOSAKI Motohiro 	newpol->flags |= MPOL_F_SHARED;
248642288fe3SMel Gorman 	sp_node_init(n, start, end, newpol);
2487869833f2SKOSAKI Motohiro 
24881da177e4SLinus Torvalds 	return n;
24891da177e4SLinus Torvalds }
24901da177e4SLinus Torvalds 
24911da177e4SLinus Torvalds /* Replace a policy range. */
24921da177e4SLinus Torvalds static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
24931da177e4SLinus Torvalds 				 unsigned long end, struct sp_node *new)
24941da177e4SLinus Torvalds {
2495b22d127aSMel Gorman 	struct sp_node *n;
249642288fe3SMel Gorman 	struct sp_node *n_new = NULL;
249742288fe3SMel Gorman 	struct mempolicy *mpol_new = NULL;
2498b22d127aSMel Gorman 	int ret = 0;
24991da177e4SLinus Torvalds 
250042288fe3SMel Gorman restart:
25014a8c7bb5SNathan Zimmer 	write_lock(&sp->lock);
25021da177e4SLinus Torvalds 	n = sp_lookup(sp, start, end);
25031da177e4SLinus Torvalds 	/* Take care of old policies in the same range. */
25041da177e4SLinus Torvalds 	while (n && n->start < end) {
25051da177e4SLinus Torvalds 		struct rb_node *next = rb_next(&n->nd);
25061da177e4SLinus Torvalds 		if (n->start >= start) {
25071da177e4SLinus Torvalds 			if (n->end <= end)
25081da177e4SLinus Torvalds 				sp_delete(sp, n);
25091da177e4SLinus Torvalds 			else
25101da177e4SLinus Torvalds 				n->start = end;
25111da177e4SLinus Torvalds 		} else {
25121da177e4SLinus Torvalds 			/* Old policy spanning whole new range. */
25131da177e4SLinus Torvalds 			if (n->end > end) {
251442288fe3SMel Gorman 				if (!n_new)
251542288fe3SMel Gorman 					goto alloc_new;
251642288fe3SMel Gorman 
251742288fe3SMel Gorman 				*mpol_new = *n->policy;
251842288fe3SMel Gorman 				atomic_set(&mpol_new->refcnt, 1);
25197880639cSKOSAKI Motohiro 				sp_node_init(n_new, end, n->end, mpol_new);
25201da177e4SLinus Torvalds 				n->end = start;
25215ca39575SHillf Danton 				sp_insert(sp, n_new);
252242288fe3SMel Gorman 				n_new = NULL;
252342288fe3SMel Gorman 				mpol_new = NULL;
25241da177e4SLinus Torvalds 				break;
25251da177e4SLinus Torvalds 			} else
25261da177e4SLinus Torvalds 				n->end = start;
25271da177e4SLinus Torvalds 		}
25281da177e4SLinus Torvalds 		if (!next)
25291da177e4SLinus Torvalds 			break;
25301da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
25311da177e4SLinus Torvalds 	}
25321da177e4SLinus Torvalds 	if (new)
25331da177e4SLinus Torvalds 		sp_insert(sp, new);
25344a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
253542288fe3SMel Gorman 	ret = 0;
253642288fe3SMel Gorman 
253742288fe3SMel Gorman err_out:
253842288fe3SMel Gorman 	if (mpol_new)
253942288fe3SMel Gorman 		mpol_put(mpol_new);
254042288fe3SMel Gorman 	if (n_new)
254142288fe3SMel Gorman 		kmem_cache_free(sn_cache, n_new);
254242288fe3SMel Gorman 
2543b22d127aSMel Gorman 	return ret;
254442288fe3SMel Gorman 
254542288fe3SMel Gorman alloc_new:
25464a8c7bb5SNathan Zimmer 	write_unlock(&sp->lock);
254742288fe3SMel Gorman 	ret = -ENOMEM;
254842288fe3SMel Gorman 	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
254942288fe3SMel Gorman 	if (!n_new)
255042288fe3SMel Gorman 		goto err_out;
255142288fe3SMel Gorman 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
255242288fe3SMel Gorman 	if (!mpol_new)
255342288fe3SMel Gorman 		goto err_out;
255442288fe3SMel Gorman 	goto restart;
25551da177e4SLinus Torvalds }
25561da177e4SLinus Torvalds 
255771fe804bSLee Schermerhorn /**
255871fe804bSLee Schermerhorn  * mpol_shared_policy_init - initialize shared policy for inode
255971fe804bSLee Schermerhorn  * @sp: pointer to inode shared policy
256071fe804bSLee Schermerhorn  * @mpol:  struct mempolicy to install
256171fe804bSLee Schermerhorn  *
256271fe804bSLee Schermerhorn  * Install non-NULL @mpol in inode's shared policy rb-tree.
256371fe804bSLee Schermerhorn  * On entry, the current task has a reference on a non-NULL @mpol.
256471fe804bSLee Schermerhorn  * This must be released on exit.
25654bfc4495SKAMEZAWA Hiroyuki  * This is called at get_inode() calls and we can use GFP_KERNEL.
256671fe804bSLee Schermerhorn  */
256771fe804bSLee Schermerhorn void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
25687339ff83SRobin Holt {
256958568d2aSMiao Xie 	int ret;
257058568d2aSMiao Xie 
257171fe804bSLee Schermerhorn 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
25724a8c7bb5SNathan Zimmer 	rwlock_init(&sp->lock);
25737339ff83SRobin Holt 
257471fe804bSLee Schermerhorn 	if (mpol) {
25757339ff83SRobin Holt 		struct vm_area_struct pvma;
257671fe804bSLee Schermerhorn 		struct mempolicy *new;
25774bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH(scratch);
25787339ff83SRobin Holt 
25794bfc4495SKAMEZAWA Hiroyuki 		if (!scratch)
25805c0c1654SLee Schermerhorn 			goto put_mpol;
258171fe804bSLee Schermerhorn 		/* contextualize the tmpfs mount point mempolicy */
258271fe804bSLee Schermerhorn 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
258315d77835SLee Schermerhorn 		if (IS_ERR(new))
25840cae3457SDan Carpenter 			goto free_scratch; /* no valid nodemask intersection */
258558568d2aSMiao Xie 
258658568d2aSMiao Xie 		task_lock(current);
25874bfc4495SKAMEZAWA Hiroyuki 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
258858568d2aSMiao Xie 		task_unlock(current);
258915d77835SLee Schermerhorn 		if (ret)
25905c0c1654SLee Schermerhorn 			goto put_new;
259171fe804bSLee Schermerhorn 
259271fe804bSLee Schermerhorn 		/* Create pseudo-vma that contains just the policy */
25932c4541e2SKirill A. Shutemov 		vma_init(&pvma, NULL);
259471fe804bSLee Schermerhorn 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
259571fe804bSLee Schermerhorn 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
259615d77835SLee Schermerhorn 
25975c0c1654SLee Schermerhorn put_new:
259871fe804bSLee Schermerhorn 		mpol_put(new);			/* drop initial ref */
25990cae3457SDan Carpenter free_scratch:
26004bfc4495SKAMEZAWA Hiroyuki 		NODEMASK_SCRATCH_FREE(scratch);
26015c0c1654SLee Schermerhorn put_mpol:
26025c0c1654SLee Schermerhorn 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
26037339ff83SRobin Holt 	}
26047339ff83SRobin Holt }
26057339ff83SRobin Holt 
26061da177e4SLinus Torvalds int mpol_set_shared_policy(struct shared_policy *info,
26071da177e4SLinus Torvalds 			struct vm_area_struct *vma, struct mempolicy *npol)
26081da177e4SLinus Torvalds {
26091da177e4SLinus Torvalds 	int err;
26101da177e4SLinus Torvalds 	struct sp_node *new = NULL;
26111da177e4SLinus Torvalds 	unsigned long sz = vma_pages(vma);
26121da177e4SLinus Torvalds 
2613028fec41SDavid Rientjes 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
26141da177e4SLinus Torvalds 		 vma->vm_pgoff,
261545c4745aSLee Schermerhorn 		 sz, npol ? npol->mode : -1,
2616028fec41SDavid Rientjes 		 npol ? npol->flags : -1,
261700ef2d2fSDavid Rientjes 		 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
26181da177e4SLinus Torvalds 
26191da177e4SLinus Torvalds 	if (npol) {
26201da177e4SLinus Torvalds 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
26211da177e4SLinus Torvalds 		if (!new)
26221da177e4SLinus Torvalds 			return -ENOMEM;
26231da177e4SLinus Torvalds 	}
26241da177e4SLinus Torvalds 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
26251da177e4SLinus Torvalds 	if (err && new)
262663f74ca2SKOSAKI Motohiro 		sp_free(new);
26271da177e4SLinus Torvalds 	return err;
26281da177e4SLinus Torvalds }
26291da177e4SLinus Torvalds 
26301da177e4SLinus Torvalds /* Free a backing policy store on inode delete. */
26311da177e4SLinus Torvalds void mpol_free_shared_policy(struct shared_policy *p)
26321da177e4SLinus Torvalds {
26331da177e4SLinus Torvalds 	struct sp_node *n;
26341da177e4SLinus Torvalds 	struct rb_node *next;
26351da177e4SLinus Torvalds 
26361da177e4SLinus Torvalds 	if (!p->root.rb_node)
26371da177e4SLinus Torvalds 		return;
26384a8c7bb5SNathan Zimmer 	write_lock(&p->lock);
26391da177e4SLinus Torvalds 	next = rb_first(&p->root);
26401da177e4SLinus Torvalds 	while (next) {
26411da177e4SLinus Torvalds 		n = rb_entry(next, struct sp_node, nd);
26421da177e4SLinus Torvalds 		next = rb_next(&n->nd);
264363f74ca2SKOSAKI Motohiro 		sp_delete(p, n);
26441da177e4SLinus Torvalds 	}
26454a8c7bb5SNathan Zimmer 	write_unlock(&p->lock);
26461da177e4SLinus Torvalds }
26471da177e4SLinus Torvalds 
26481a687c2eSMel Gorman #ifdef CONFIG_NUMA_BALANCING
2649c297663cSMel Gorman static int __initdata numabalancing_override;
26501a687c2eSMel Gorman 
26511a687c2eSMel Gorman static void __init check_numabalancing_enable(void)
26521a687c2eSMel Gorman {
26531a687c2eSMel Gorman 	bool numabalancing_default = false;
26541a687c2eSMel Gorman 
26551a687c2eSMel Gorman 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
26561a687c2eSMel Gorman 		numabalancing_default = true;
26571a687c2eSMel Gorman 
2658c297663cSMel Gorman 	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2659c297663cSMel Gorman 	if (numabalancing_override)
2660c297663cSMel Gorman 		set_numabalancing_state(numabalancing_override == 1);
2661c297663cSMel Gorman 
2662b0dc2b9bSMel Gorman 	if (num_online_nodes() > 1 && !numabalancing_override) {
2663756a025fSJoe Perches 		pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2664c297663cSMel Gorman 			numabalancing_default ? "Enabling" : "Disabling");
26651a687c2eSMel Gorman 		set_numabalancing_state(numabalancing_default);
26661a687c2eSMel Gorman 	}
26671a687c2eSMel Gorman }
26681a687c2eSMel Gorman 
26691a687c2eSMel Gorman static int __init setup_numabalancing(char *str)
26701a687c2eSMel Gorman {
26711a687c2eSMel Gorman 	int ret = 0;
26721a687c2eSMel Gorman 	if (!str)
26731a687c2eSMel Gorman 		goto out;
26741a687c2eSMel Gorman 
26751a687c2eSMel Gorman 	if (!strcmp(str, "enable")) {
2676c297663cSMel Gorman 		numabalancing_override = 1;
26771a687c2eSMel Gorman 		ret = 1;
26781a687c2eSMel Gorman 	} else if (!strcmp(str, "disable")) {
2679c297663cSMel Gorman 		numabalancing_override = -1;
26801a687c2eSMel Gorman 		ret = 1;
26811a687c2eSMel Gorman 	}
26821a687c2eSMel Gorman out:
26831a687c2eSMel Gorman 	if (!ret)
26844a404beaSAndrew Morton 		pr_warn("Unable to parse numa_balancing=\n");
26851a687c2eSMel Gorman 
26861a687c2eSMel Gorman 	return ret;
26871a687c2eSMel Gorman }
26881a687c2eSMel Gorman __setup("numa_balancing=", setup_numabalancing);
26891a687c2eSMel Gorman #else
26901a687c2eSMel Gorman static inline void __init check_numabalancing_enable(void)
26911a687c2eSMel Gorman {
26921a687c2eSMel Gorman }
26931a687c2eSMel Gorman #endif /* CONFIG_NUMA_BALANCING */
26941a687c2eSMel Gorman 
26951da177e4SLinus Torvalds /* assumes fs == KERNEL_DS */
26961da177e4SLinus Torvalds void __init numa_policy_init(void)
26971da177e4SLinus Torvalds {
2698b71636e2SPaul Mundt 	nodemask_t interleave_nodes;
2699b71636e2SPaul Mundt 	unsigned long largest = 0;
2700b71636e2SPaul Mundt 	int nid, prefer = 0;
2701b71636e2SPaul Mundt 
27021da177e4SLinus Torvalds 	policy_cache = kmem_cache_create("numa_policy",
27031da177e4SLinus Torvalds 					 sizeof(struct mempolicy),
270420c2df83SPaul Mundt 					 0, SLAB_PANIC, NULL);
27051da177e4SLinus Torvalds 
27061da177e4SLinus Torvalds 	sn_cache = kmem_cache_create("shared_policy_node",
27071da177e4SLinus Torvalds 				     sizeof(struct sp_node),
270820c2df83SPaul Mundt 				     0, SLAB_PANIC, NULL);
27091da177e4SLinus Torvalds 
27105606e387SMel Gorman 	for_each_node(nid) {
27115606e387SMel Gorman 		preferred_node_policy[nid] = (struct mempolicy) {
27125606e387SMel Gorman 			.refcnt = ATOMIC_INIT(1),
27135606e387SMel Gorman 			.mode = MPOL_PREFERRED,
27145606e387SMel Gorman 			.flags = MPOL_F_MOF | MPOL_F_MORON,
27155606e387SMel Gorman 			.v = { .preferred_node = nid, },
27165606e387SMel Gorman 		};
27175606e387SMel Gorman 	}
27185606e387SMel Gorman 
2719b71636e2SPaul Mundt 	/*
2720b71636e2SPaul Mundt 	 * Set interleaving policy for system init. Interleaving is only
2721b71636e2SPaul Mundt 	 * enabled across suitably sized nodes (default is >= 16MB), or
2722b71636e2SPaul Mundt 	 * fall back to the largest node if they're all smaller.
2723b71636e2SPaul Mundt 	 */
2724b71636e2SPaul Mundt 	nodes_clear(interleave_nodes);
272501f13bd6SLai Jiangshan 	for_each_node_state(nid, N_MEMORY) {
2726b71636e2SPaul Mundt 		unsigned long total_pages = node_present_pages(nid);
27271da177e4SLinus Torvalds 
2728b71636e2SPaul Mundt 		/* Preserve the largest node */
2729b71636e2SPaul Mundt 		if (largest < total_pages) {
2730b71636e2SPaul Mundt 			largest = total_pages;
2731b71636e2SPaul Mundt 			prefer = nid;
2732b71636e2SPaul Mundt 		}
2733b71636e2SPaul Mundt 
2734b71636e2SPaul Mundt 		/* Interleave this node? */
2735b71636e2SPaul Mundt 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2736b71636e2SPaul Mundt 			node_set(nid, interleave_nodes);
2737b71636e2SPaul Mundt 	}
2738b71636e2SPaul Mundt 
2739b71636e2SPaul Mundt 	/* All too small, use the largest */
2740b71636e2SPaul Mundt 	if (unlikely(nodes_empty(interleave_nodes)))
2741b71636e2SPaul Mundt 		node_set(prefer, interleave_nodes);
2742b71636e2SPaul Mundt 
2743028fec41SDavid Rientjes 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2744b1de0d13SMitchel Humpherys 		pr_err("%s: interleaving failed\n", __func__);
27451a687c2eSMel Gorman 
27461a687c2eSMel Gorman 	check_numabalancing_enable();
27471da177e4SLinus Torvalds }
27481da177e4SLinus Torvalds 
27498bccd85fSChristoph Lameter /* Reset policy of current process to default */
27501da177e4SLinus Torvalds void numa_default_policy(void)
27511da177e4SLinus Torvalds {
2752028fec41SDavid Rientjes 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
27531da177e4SLinus Torvalds }
275468860ec1SPaul Jackson 
27554225399aSPaul Jackson /*
2756095f1fc4SLee Schermerhorn  * Parse and format mempolicy from/to strings
2757095f1fc4SLee Schermerhorn  */
2758095f1fc4SLee Schermerhorn 
2759095f1fc4SLee Schermerhorn /*
2760f2a07f40SHugh Dickins  * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
27611a75a6c8SChristoph Lameter  */
2762345ace9cSLee Schermerhorn static const char * const policy_modes[] =
2763345ace9cSLee Schermerhorn {
2764345ace9cSLee Schermerhorn 	[MPOL_DEFAULT]    = "default",
2765345ace9cSLee Schermerhorn 	[MPOL_PREFERRED]  = "prefer",
2766345ace9cSLee Schermerhorn 	[MPOL_BIND]       = "bind",
2767345ace9cSLee Schermerhorn 	[MPOL_INTERLEAVE] = "interleave",
2768d3a71033SLee Schermerhorn 	[MPOL_LOCAL]      = "local",
2769345ace9cSLee Schermerhorn };
27701a75a6c8SChristoph Lameter 
2771095f1fc4SLee Schermerhorn 
2772095f1fc4SLee Schermerhorn #ifdef CONFIG_TMPFS
2773095f1fc4SLee Schermerhorn /**
2774f2a07f40SHugh Dickins  * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2775095f1fc4SLee Schermerhorn  * @str:  string containing mempolicy to parse
277671fe804bSLee Schermerhorn  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2777095f1fc4SLee Schermerhorn  *
2778095f1fc4SLee Schermerhorn  * Format of input:
2779095f1fc4SLee Schermerhorn  *	<mode>[=<flags>][:<nodelist>]
2780095f1fc4SLee Schermerhorn  *
278171fe804bSLee Schermerhorn  * On success, returns 0, else 1
2782095f1fc4SLee Schermerhorn  */
2783a7a88b23SHugh Dickins int mpol_parse_str(char *str, struct mempolicy **mpol)
2784095f1fc4SLee Schermerhorn {
278571fe804bSLee Schermerhorn 	struct mempolicy *new = NULL;
2786f2a07f40SHugh Dickins 	unsigned short mode_flags;
278771fe804bSLee Schermerhorn 	nodemask_t nodes;
2788095f1fc4SLee Schermerhorn 	char *nodelist = strchr(str, ':');
2789095f1fc4SLee Schermerhorn 	char *flags = strchr(str, '=');
2790dedf2c73Szhong jiang 	int err = 1, mode;
2791095f1fc4SLee Schermerhorn 
2792095f1fc4SLee Schermerhorn 	if (nodelist) {
2793095f1fc4SLee Schermerhorn 		/* NUL-terminate mode or flags string */
2794095f1fc4SLee Schermerhorn 		*nodelist++ = '\0';
279571fe804bSLee Schermerhorn 		if (nodelist_parse(nodelist, nodes))
2796095f1fc4SLee Schermerhorn 			goto out;
279701f13bd6SLai Jiangshan 		if (!nodes_subset(nodes, node_states[N_MEMORY]))
2798095f1fc4SLee Schermerhorn 			goto out;
279971fe804bSLee Schermerhorn 	} else
280071fe804bSLee Schermerhorn 		nodes_clear(nodes);
280171fe804bSLee Schermerhorn 
2802095f1fc4SLee Schermerhorn 	if (flags)
2803095f1fc4SLee Schermerhorn 		*flags++ = '\0';	/* terminate mode string */
2804095f1fc4SLee Schermerhorn 
2805dedf2c73Szhong jiang 	mode = match_string(policy_modes, MPOL_MAX, str);
2806dedf2c73Szhong jiang 	if (mode < 0)
2807095f1fc4SLee Schermerhorn 		goto out;
2808095f1fc4SLee Schermerhorn 
280971fe804bSLee Schermerhorn 	switch (mode) {
2810095f1fc4SLee Schermerhorn 	case MPOL_PREFERRED:
281171fe804bSLee Schermerhorn 		/*
281271fe804bSLee Schermerhorn 		 * Insist on a nodelist of one node only
281371fe804bSLee Schermerhorn 		 */
2814095f1fc4SLee Schermerhorn 		if (nodelist) {
2815095f1fc4SLee Schermerhorn 			char *rest = nodelist;
2816095f1fc4SLee Schermerhorn 			while (isdigit(*rest))
2817095f1fc4SLee Schermerhorn 				rest++;
2818926f2ae0SKOSAKI Motohiro 			if (*rest)
2819926f2ae0SKOSAKI Motohiro 				goto out;
2820095f1fc4SLee Schermerhorn 		}
2821095f1fc4SLee Schermerhorn 		break;
2822095f1fc4SLee Schermerhorn 	case MPOL_INTERLEAVE:
2823095f1fc4SLee Schermerhorn 		/*
2824095f1fc4SLee Schermerhorn 		 * Default to online nodes with memory if no nodelist
2825095f1fc4SLee Schermerhorn 		 */
2826095f1fc4SLee Schermerhorn 		if (!nodelist)
282701f13bd6SLai Jiangshan 			nodes = node_states[N_MEMORY];
28283f226aa1SLee Schermerhorn 		break;
282971fe804bSLee Schermerhorn 	case MPOL_LOCAL:
28303f226aa1SLee Schermerhorn 		/*
283171fe804bSLee Schermerhorn 		 * Don't allow a nodelist;  mpol_new() checks flags
28323f226aa1SLee Schermerhorn 		 */
283371fe804bSLee Schermerhorn 		if (nodelist)
28343f226aa1SLee Schermerhorn 			goto out;
283571fe804bSLee Schermerhorn 		mode = MPOL_PREFERRED;
28363f226aa1SLee Schermerhorn 		break;
2837413b43deSRavikiran G Thirumalai 	case MPOL_DEFAULT:
2838413b43deSRavikiran G Thirumalai 		/*
2839413b43deSRavikiran G Thirumalai 		 * Insist on a empty nodelist
2840413b43deSRavikiran G Thirumalai 		 */
2841413b43deSRavikiran G Thirumalai 		if (!nodelist)
2842413b43deSRavikiran G Thirumalai 			err = 0;
2843413b43deSRavikiran G Thirumalai 		goto out;
2844d69b2e63SKOSAKI Motohiro 	case MPOL_BIND:
284571fe804bSLee Schermerhorn 		/*
2846d69b2e63SKOSAKI Motohiro 		 * Insist on a nodelist
284771fe804bSLee Schermerhorn 		 */
2848d69b2e63SKOSAKI Motohiro 		if (!nodelist)
2849d69b2e63SKOSAKI Motohiro 			goto out;
2850095f1fc4SLee Schermerhorn 	}
2851095f1fc4SLee Schermerhorn 
285271fe804bSLee Schermerhorn 	mode_flags = 0;
2853095f1fc4SLee Schermerhorn 	if (flags) {
2854095f1fc4SLee Schermerhorn 		/*
2855095f1fc4SLee Schermerhorn 		 * Currently, we only support two mutually exclusive
2856095f1fc4SLee Schermerhorn 		 * mode flags.
2857095f1fc4SLee Schermerhorn 		 */
2858095f1fc4SLee Schermerhorn 		if (!strcmp(flags, "static"))
285971fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_STATIC_NODES;
2860095f1fc4SLee Schermerhorn 		else if (!strcmp(flags, "relative"))
286171fe804bSLee Schermerhorn 			mode_flags |= MPOL_F_RELATIVE_NODES;
2862095f1fc4SLee Schermerhorn 		else
2863926f2ae0SKOSAKI Motohiro 			goto out;
2864095f1fc4SLee Schermerhorn 	}
286571fe804bSLee Schermerhorn 
286671fe804bSLee Schermerhorn 	new = mpol_new(mode, mode_flags, &nodes);
286771fe804bSLee Schermerhorn 	if (IS_ERR(new))
2868926f2ae0SKOSAKI Motohiro 		goto out;
2869926f2ae0SKOSAKI Motohiro 
2870f2a07f40SHugh Dickins 	/*
2871f2a07f40SHugh Dickins 	 * Save nodes for mpol_to_str() to show the tmpfs mount options
2872f2a07f40SHugh Dickins 	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2873f2a07f40SHugh Dickins 	 */
2874f2a07f40SHugh Dickins 	if (mode != MPOL_PREFERRED)
2875f2a07f40SHugh Dickins 		new->v.nodes = nodes;
2876f2a07f40SHugh Dickins 	else if (nodelist)
2877f2a07f40SHugh Dickins 		new->v.preferred_node = first_node(nodes);
2878f2a07f40SHugh Dickins 	else
2879f2a07f40SHugh Dickins 		new->flags |= MPOL_F_LOCAL;
2880f2a07f40SHugh Dickins 
2881f2a07f40SHugh Dickins 	/*
2882f2a07f40SHugh Dickins 	 * Save nodes for contextualization: this will be used to "clone"
2883f2a07f40SHugh Dickins 	 * the mempolicy in a specific context [cpuset] at a later time.
2884f2a07f40SHugh Dickins 	 */
2885e17f74afSLee Schermerhorn 	new->w.user_nodemask = nodes;
2886f2a07f40SHugh Dickins 
2887926f2ae0SKOSAKI Motohiro 	err = 0;
288871fe804bSLee Schermerhorn 
2889095f1fc4SLee Schermerhorn out:
2890095f1fc4SLee Schermerhorn 	/* Restore string for error message */
2891095f1fc4SLee Schermerhorn 	if (nodelist)
2892095f1fc4SLee Schermerhorn 		*--nodelist = ':';
2893095f1fc4SLee Schermerhorn 	if (flags)
2894095f1fc4SLee Schermerhorn 		*--flags = '=';
289571fe804bSLee Schermerhorn 	if (!err)
289671fe804bSLee Schermerhorn 		*mpol = new;
2897095f1fc4SLee Schermerhorn 	return err;
2898095f1fc4SLee Schermerhorn }
2899095f1fc4SLee Schermerhorn #endif /* CONFIG_TMPFS */
2900095f1fc4SLee Schermerhorn 
290171fe804bSLee Schermerhorn /**
290271fe804bSLee Schermerhorn  * mpol_to_str - format a mempolicy structure for printing
290371fe804bSLee Schermerhorn  * @buffer:  to contain formatted mempolicy string
290471fe804bSLee Schermerhorn  * @maxlen:  length of @buffer
290571fe804bSLee Schermerhorn  * @pol:  pointer to mempolicy to be formatted
290671fe804bSLee Schermerhorn  *
2907948927eeSDavid Rientjes  * Convert @pol into a string.  If @buffer is too short, truncate the string.
2908948927eeSDavid Rientjes  * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2909948927eeSDavid Rientjes  * longest flag, "relative", and to display at least a few node ids.
29101a75a6c8SChristoph Lameter  */
2911948927eeSDavid Rientjes void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
29121a75a6c8SChristoph Lameter {
29131a75a6c8SChristoph Lameter 	char *p = buffer;
2914948927eeSDavid Rientjes 	nodemask_t nodes = NODE_MASK_NONE;
2915948927eeSDavid Rientjes 	unsigned short mode = MPOL_DEFAULT;
2916948927eeSDavid Rientjes 	unsigned short flags = 0;
29171a75a6c8SChristoph Lameter 
29188790c71aSDavid Rientjes 	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
2919bea904d5SLee Schermerhorn 		mode = pol->mode;
2920948927eeSDavid Rientjes 		flags = pol->flags;
2921948927eeSDavid Rientjes 	}
2922bea904d5SLee Schermerhorn 
29231a75a6c8SChristoph Lameter 	switch (mode) {
29241a75a6c8SChristoph Lameter 	case MPOL_DEFAULT:
29251a75a6c8SChristoph Lameter 		break;
29261a75a6c8SChristoph Lameter 	case MPOL_PREFERRED:
2927fc36b8d3SLee Schermerhorn 		if (flags & MPOL_F_LOCAL)
2928f2a07f40SHugh Dickins 			mode = MPOL_LOCAL;
292953f2556bSLee Schermerhorn 		else
2930fc36b8d3SLee Schermerhorn 			node_set(pol->v.preferred_node, nodes);
29311a75a6c8SChristoph Lameter 		break;
29321a75a6c8SChristoph Lameter 	case MPOL_BIND:
29331a75a6c8SChristoph Lameter 	case MPOL_INTERLEAVE:
29341a75a6c8SChristoph Lameter 		nodes = pol->v.nodes;
29351a75a6c8SChristoph Lameter 		break;
29361a75a6c8SChristoph Lameter 	default:
2937948927eeSDavid Rientjes 		WARN_ON_ONCE(1);
2938948927eeSDavid Rientjes 		snprintf(p, maxlen, "unknown");
2939948927eeSDavid Rientjes 		return;
29401a75a6c8SChristoph Lameter 	}
29411a75a6c8SChristoph Lameter 
2942b7a9f420SDavid Rientjes 	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
29431a75a6c8SChristoph Lameter 
2944fc36b8d3SLee Schermerhorn 	if (flags & MPOL_MODE_FLAGS) {
2945948927eeSDavid Rientjes 		p += snprintf(p, buffer + maxlen - p, "=");
2946f5b087b5SDavid Rientjes 
29472291990aSLee Schermerhorn 		/*
29482291990aSLee Schermerhorn 		 * Currently, the only defined flags are mutually exclusive
29492291990aSLee Schermerhorn 		 */
2950f5b087b5SDavid Rientjes 		if (flags & MPOL_F_STATIC_NODES)
29512291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "static");
29522291990aSLee Schermerhorn 		else if (flags & MPOL_F_RELATIVE_NODES)
29532291990aSLee Schermerhorn 			p += snprintf(p, buffer + maxlen - p, "relative");
2954f5b087b5SDavid Rientjes 	}
2955f5b087b5SDavid Rientjes 
29569e763e0fSTejun Heo 	if (!nodes_empty(nodes))
29579e763e0fSTejun Heo 		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
29589e763e0fSTejun Heo 			       nodemask_pr_args(&nodes));
29591a75a6c8SChristoph Lameter }
2960